Merge branch 'dev' into phase1-validator
This commit is contained in:
commit
d1647c28e0
|
@ -79,16 +79,16 @@ jobs:
|
||||||
# Restore git repo at point close to target branch/revision, to speed up checkout
|
# Restore git repo at point close to target branch/revision, to speed up checkout
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
keys:
|
keys:
|
||||||
- v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
- v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
- v2-specs-repo-{{ .Branch }}-
|
- v3-specs-repo-{{ .Branch }}-
|
||||||
- v2-specs-repo-
|
- v3-specs-repo-
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Clean up git repo to reduce cache size
|
name: Clean up git repo to reduce cache size
|
||||||
command: git gc
|
command: git gc
|
||||||
# Save the git checkout as a cache, to make cloning next time faster.
|
# Save the git checkout as a cache, to make cloning next time faster.
|
||||||
- save_cache:
|
- save_cache:
|
||||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
paths:
|
paths:
|
||||||
- ~/specs-repo
|
- ~/specs-repo
|
||||||
install_pyspec_test:
|
install_pyspec_test:
|
||||||
|
@ -97,7 +97,7 @@ jobs:
|
||||||
working_directory: ~/specs-repo
|
working_directory: ~/specs-repo
|
||||||
steps:
|
steps:
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
- restore_pyspec_cached_venv
|
- restore_pyspec_cached_venv
|
||||||
- run:
|
- run:
|
||||||
name: Install pyspec requirements
|
name: Install pyspec requirements
|
||||||
|
@ -109,7 +109,7 @@ jobs:
|
||||||
working_directory: ~/specs-repo
|
working_directory: ~/specs-repo
|
||||||
steps:
|
steps:
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
- restore_pyspec_cached_venv
|
- restore_pyspec_cached_venv
|
||||||
- run:
|
- run:
|
||||||
name: Run py-tests
|
name: Run py-tests
|
||||||
|
@ -140,7 +140,7 @@ jobs:
|
||||||
working_directory: ~/specs-repo
|
working_directory: ~/specs-repo
|
||||||
steps:
|
steps:
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
- restore_pyspec_cached_venv
|
- restore_pyspec_cached_venv
|
||||||
- run:
|
- run:
|
||||||
name: Run linter
|
name: Run linter
|
||||||
|
@ -152,7 +152,7 @@ jobs:
|
||||||
working_directory: ~/specs-repo
|
working_directory: ~/specs-repo
|
||||||
steps:
|
steps:
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
- restore_deposit_contract_compiler_cached_venv
|
- restore_deposit_contract_compiler_cached_venv
|
||||||
- run:
|
- run:
|
||||||
name: Install deposit contract compiler requirements
|
name: Install deposit contract compiler requirements
|
||||||
|
@ -164,7 +164,7 @@ jobs:
|
||||||
working_directory: ~/specs-repo
|
working_directory: ~/specs-repo
|
||||||
steps:
|
steps:
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
- restore_deposit_contract_tester_cached_venv
|
- restore_deposit_contract_tester_cached_venv
|
||||||
- run:
|
- run:
|
||||||
name: Install deposit contract tester requirements
|
name: Install deposit contract tester requirements
|
||||||
|
@ -176,7 +176,7 @@ jobs:
|
||||||
working_directory: ~/specs-repo
|
working_directory: ~/specs-repo
|
||||||
steps:
|
steps:
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
- restore_deposit_contract_compiler_cached_venv
|
- restore_deposit_contract_compiler_cached_venv
|
||||||
- run:
|
- run:
|
||||||
name: Run deposit contract compile test
|
name: Run deposit contract compile test
|
||||||
|
@ -187,7 +187,7 @@ jobs:
|
||||||
working_directory: ~/specs-repo
|
working_directory: ~/specs-repo
|
||||||
steps:
|
steps:
|
||||||
- restore_cache:
|
- restore_cache:
|
||||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||||
- restore_deposit_contract_tester_cached_venv
|
- restore_deposit_contract_tester_cached_venv
|
||||||
- run:
|
- run:
|
||||||
name: Run deposit contract test
|
name: Run deposit contract test
|
||||||
|
|
8
Makefile
8
Makefile
|
@ -75,15 +75,15 @@ install_test:
|
||||||
|
|
||||||
test: pyspec
|
test: pyspec
|
||||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||||
python -m pytest -n 4 --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
python -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||||
|
|
||||||
find_test: pyspec
|
find_test: pyspec
|
||||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||||
python -m pytest -k=$(K) --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
python -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||||
|
|
||||||
citest: pyspec
|
citest: pyspec
|
||||||
mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \
|
mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||||
python -m pytest -n 4 --junitxml=eth2spec/test_results.xml eth2spec
|
python -m pytest -n 4 --disable-bls --junitxml=eth2spec/test_results.xml eth2spec
|
||||||
|
|
||||||
open_cov:
|
open_cov:
|
||||||
((open "$(COV_INDEX_FILE)" || xdg-open "$(COV_INDEX_FILE)") &> /dev/null) &
|
((open "$(COV_INDEX_FILE)" || xdg-open "$(COV_INDEX_FILE)") &> /dev/null) &
|
||||||
|
@ -117,7 +117,7 @@ install_deposit_contract_compiler:
|
||||||
|
|
||||||
compile_deposit_contract:
|
compile_deposit_contract:
|
||||||
cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \
|
cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \
|
||||||
python3.7 deposit_contract/compile.py contracts/validator_registration.vy
|
python3.7 deposit_contract/compile.py ../contracts/validator_registration.vy
|
||||||
|
|
||||||
test_compile_deposit_contract:
|
test_compile_deposit_contract:
|
||||||
cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \
|
cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \
|
||||||
|
|
10
README.md
10
README.md
|
@ -9,11 +9,14 @@ This repository hosts the current Eth2 specifications. Discussions about design
|
||||||
|
|
||||||
## Specs
|
## Specs
|
||||||
|
|
||||||
Core specifications for Eth2 clients be found in [specs/](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
|
[![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec)
|
||||||
|
|
||||||
|
|
||||||
|
Core specifications for Eth2 clients be found in [specs](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
|
||||||
|
|
||||||
### Phase 0
|
### Phase 0
|
||||||
* [The Beacon Chain](specs/phase0/beacon-chain.md)
|
* [The Beacon Chain](specs/phase0/beacon-chain.md)
|
||||||
* [Fork Choice](specs/phase0/fork-choice.md)
|
* [Beacon Chain Fork Choice](specs/phase0/fork-choice.md)
|
||||||
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
||||||
* [Honest Validator](specs/phase0/validator.md)
|
* [Honest Validator](specs/phase0/validator.md)
|
||||||
* [P2P Networking](specs/phase0/p2p-interface.md)
|
* [P2P Networking](specs/phase0/p2p-interface.md)
|
||||||
|
@ -22,8 +25,9 @@ Core specifications for Eth2 clients be found in [specs/](specs/). These are div
|
||||||
* [From Phase 0 to Phase 1](specs/phase1/phase1-fork.md)
|
* [From Phase 0 to Phase 1](specs/phase1/phase1-fork.md)
|
||||||
* [The Beacon Chain for Shards](specs/phase1/beacon-chain.md)
|
* [The Beacon Chain for Shards](specs/phase1/beacon-chain.md)
|
||||||
* [Custody Game](specs/phase1/custody-game.md)
|
* [Custody Game](specs/phase1/custody-game.md)
|
||||||
* [Shard Transition and Fraud Proofs](specs/phase1/fraud-proofs.md)
|
* [Shard Transition and Fraud Proofs](specs/phase1/shard-transition.md)
|
||||||
* [Light client syncing protocol](specs/phase1/light-client-sync.md)
|
* [Light client syncing protocol](specs/phase1/light-client-sync.md)
|
||||||
|
* [Beacon Chain Fork Choice for Shards](specs/phase1/fork-choice.md)
|
||||||
|
|
||||||
### Phase 2
|
### Phase 2
|
||||||
|
|
||||||
|
|
|
@ -76,8 +76,8 @@ BLS_WITHDRAWAL_PREFIX: 0x00
|
||||||
|
|
||||||
# Time parameters
|
# Time parameters
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# 86400 seconds (1 day)
|
# 172800 seconds (2 days)
|
||||||
MIN_GENESIS_DELAY: 86400
|
GENESIS_DELAY: 172800
|
||||||
# 12 seconds
|
# 12 seconds
|
||||||
SECONDS_PER_SLOT: 12
|
SECONDS_PER_SLOT: 12
|
||||||
# 2**0 (= 1) slots 12 seconds
|
# 2**0 (= 1) slots 12 seconds
|
||||||
|
@ -94,8 +94,8 @@ EPOCHS_PER_ETH1_VOTING_PERIOD: 32
|
||||||
SLOTS_PER_HISTORICAL_ROOT: 8192
|
SLOTS_PER_HISTORICAL_ROOT: 8192
|
||||||
# 2**8 (= 256) epochs ~27 hours
|
# 2**8 (= 256) epochs ~27 hours
|
||||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
|
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
|
||||||
# 2**11 (= 2,048) epochs 9 days
|
# 2**8 (= 256) epochs ~27 hours
|
||||||
PERSISTENT_COMMITTEE_PERIOD: 2048
|
SHARD_COMMITTEE_PERIOD: 256
|
||||||
# 2**6 (= 64) epochs ~7 hours
|
# 2**6 (= 64) epochs ~7 hours
|
||||||
MAX_EPOCHS_PER_CROSSLINK: 64
|
MAX_EPOCHS_PER_CROSSLINK: 64
|
||||||
# 2**2 (= 4) epochs 25.6 minutes
|
# 2**2 (= 4) epochs 25.6 minutes
|
||||||
|
@ -122,8 +122,8 @@ BASE_REWARD_FACTOR: 64
|
||||||
WHISTLEBLOWER_REWARD_QUOTIENT: 512
|
WHISTLEBLOWER_REWARD_QUOTIENT: 512
|
||||||
# 2**3 (= 8)
|
# 2**3 (= 8)
|
||||||
PROPOSER_REWARD_QUOTIENT: 8
|
PROPOSER_REWARD_QUOTIENT: 8
|
||||||
# 2**25 (= 33,554,432)
|
# 2**24 (= 16,777,216)
|
||||||
INACTIVITY_PENALTY_QUOTIENT: 33554432
|
INACTIVITY_PENALTY_QUOTIENT: 16777216
|
||||||
# 2**5 (= 32)
|
# 2**5 (= 32)
|
||||||
MIN_SLASHING_PENALTY_QUOTIENT: 32
|
MIN_SLASHING_PENALTY_QUOTIENT: 32
|
||||||
|
|
||||||
|
@ -132,8 +132,8 @@ MIN_SLASHING_PENALTY_QUOTIENT: 32
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# 2**4 (= 16)
|
# 2**4 (= 16)
|
||||||
MAX_PROPOSER_SLASHINGS: 16
|
MAX_PROPOSER_SLASHINGS: 16
|
||||||
# 2**0 (= 1)
|
# 2**1 (= 2)
|
||||||
MAX_ATTESTER_SLASHINGS: 1
|
MAX_ATTESTER_SLASHINGS: 2
|
||||||
# 2**7 (= 128)
|
# 2**7 (= 128)
|
||||||
MAX_ATTESTATIONS: 128
|
MAX_ATTESTATIONS: 128
|
||||||
# 2**4 (= 16)
|
# 2**4 (= 16)
|
||||||
|
@ -161,6 +161,8 @@ DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
|
||||||
# Phase 1: Upgrade from Phase 0
|
# Phase 1: Upgrade from Phase 0
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
PHASE_1_FORK_VERSION: 0x01000000
|
PHASE_1_FORK_VERSION: 0x01000000
|
||||||
|
# [STUB]
|
||||||
|
PHASE_1_GENESIS_SLOT: 32
|
||||||
INITIAL_ACTIVE_SHARDS: 64
|
INITIAL_ACTIVE_SHARDS: 64
|
||||||
|
|
||||||
# Phase 1: General
|
# Phase 1: General
|
||||||
|
@ -173,8 +175,6 @@ ONLINE_PERIOD: 8
|
||||||
LIGHT_CLIENT_COMMITTEE_SIZE: 128
|
LIGHT_CLIENT_COMMITTEE_SIZE: 128
|
||||||
# 2**8 (= 256) | epochs | ~27 hours
|
# 2**8 (= 256) | epochs | ~27 hours
|
||||||
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
|
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
|
||||||
# 2**8 (= 256) | epochs | ~27 hours
|
|
||||||
SHARD_COMMITTEE_PERIOD: 256
|
|
||||||
# 2**18 (= 262,144)
|
# 2**18 (= 262,144)
|
||||||
SHARD_BLOCK_CHUNK_SIZE: 262144
|
SHARD_BLOCK_CHUNK_SIZE: 262144
|
||||||
# 2**2 (= 4)
|
# 2**2 (= 4)
|
||||||
|
|
|
@ -77,7 +77,7 @@ BLS_WITHDRAWAL_PREFIX: 0x00
|
||||||
# Time parameters
|
# Time parameters
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# [customized] Faster to spin up testnets, but does not give validator reasonable warning time for genesis
|
# [customized] Faster to spin up testnets, but does not give validator reasonable warning time for genesis
|
||||||
MIN_GENESIS_DELAY: 300
|
GENESIS_DELAY: 300
|
||||||
# [customized] Faster for testing purposes
|
# [customized] Faster for testing purposes
|
||||||
SECONDS_PER_SLOT: 6
|
SECONDS_PER_SLOT: 6
|
||||||
# 2**0 (= 1) slots 6 seconds
|
# 2**0 (= 1) slots 6 seconds
|
||||||
|
@ -89,13 +89,13 @@ MIN_SEED_LOOKAHEAD: 1
|
||||||
# 2**2 (= 4) epochs
|
# 2**2 (= 4) epochs
|
||||||
MAX_SEED_LOOKAHEAD: 4
|
MAX_SEED_LOOKAHEAD: 4
|
||||||
# [customized] higher frequency new deposits from eth1 for testing
|
# [customized] higher frequency new deposits from eth1 for testing
|
||||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 2
|
EPOCHS_PER_ETH1_VOTING_PERIOD: 4
|
||||||
# [customized] smaller state
|
# [customized] smaller state
|
||||||
SLOTS_PER_HISTORICAL_ROOT: 64
|
SLOTS_PER_HISTORICAL_ROOT: 64
|
||||||
# 2**8 (= 256) epochs
|
# 2**8 (= 256) epochs
|
||||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
|
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
|
||||||
# [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit
|
# [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit
|
||||||
PERSISTENT_COMMITTEE_PERIOD: 128
|
SHARD_COMMITTEE_PERIOD: 64
|
||||||
# [customized] fast catchup crosslinks
|
# [customized] fast catchup crosslinks
|
||||||
MAX_EPOCHS_PER_CROSSLINK: 4
|
MAX_EPOCHS_PER_CROSSLINK: 4
|
||||||
# 2**2 (= 4) epochs
|
# 2**2 (= 4) epochs
|
||||||
|
@ -122,8 +122,8 @@ BASE_REWARD_FACTOR: 64
|
||||||
WHISTLEBLOWER_REWARD_QUOTIENT: 512
|
WHISTLEBLOWER_REWARD_QUOTIENT: 512
|
||||||
# 2**3 (= 8)
|
# 2**3 (= 8)
|
||||||
PROPOSER_REWARD_QUOTIENT: 8
|
PROPOSER_REWARD_QUOTIENT: 8
|
||||||
# 2**25 (= 33,554,432)
|
# 2**24 (= 16,777,216)
|
||||||
INACTIVITY_PENALTY_QUOTIENT: 33554432
|
INACTIVITY_PENALTY_QUOTIENT: 16777216
|
||||||
# 2**5 (= 32)
|
# 2**5 (= 32)
|
||||||
MIN_SLASHING_PENALTY_QUOTIENT: 32
|
MIN_SLASHING_PENALTY_QUOTIENT: 32
|
||||||
|
|
||||||
|
@ -132,8 +132,8 @@ MIN_SLASHING_PENALTY_QUOTIENT: 32
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# 2**4 (= 16)
|
# 2**4 (= 16)
|
||||||
MAX_PROPOSER_SLASHINGS: 16
|
MAX_PROPOSER_SLASHINGS: 16
|
||||||
# 2**0 (= 1)
|
# 2**1 (= 2)
|
||||||
MAX_ATTESTER_SLASHINGS: 1
|
MAX_ATTESTER_SLASHINGS: 2
|
||||||
# 2**7 (= 128)
|
# 2**7 (= 128)
|
||||||
MAX_ATTESTATIONS: 128
|
MAX_ATTESTATIONS: 128
|
||||||
# 2**4 (= 16)
|
# 2**4 (= 16)
|
||||||
|
@ -162,6 +162,8 @@ DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# [customized] for testnet distinction
|
# [customized] for testnet distinction
|
||||||
PHASE_1_FORK_VERSION: 0x01000001
|
PHASE_1_FORK_VERSION: 0x01000001
|
||||||
|
# [customized] for testing
|
||||||
|
PHASE_1_GENESIS_SLOT: 8
|
||||||
# [customized] reduced for testing
|
# [customized] reduced for testing
|
||||||
INITIAL_ACTIVE_SHARDS: 4
|
INITIAL_ACTIVE_SHARDS: 4
|
||||||
|
|
||||||
|
@ -176,8 +178,6 @@ ONLINE_PERIOD: 8
|
||||||
LIGHT_CLIENT_COMMITTEE_SIZE: 128
|
LIGHT_CLIENT_COMMITTEE_SIZE: 128
|
||||||
# 2**8 (= 256) | epochs
|
# 2**8 (= 256) | epochs
|
||||||
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
|
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
|
||||||
# 2**8 (= 256) | epochs
|
|
||||||
SHARD_COMMITTEE_PERIOD: 256
|
|
||||||
# 2**18 (= 262,144)
|
# 2**18 (= 262,144)
|
||||||
SHARD_BLOCK_CHUNK_SIZE: 262144
|
SHARD_BLOCK_CHUNK_SIZE: 262144
|
||||||
# 2**2 (= 4)
|
# 2**2 (= 4)
|
||||||
|
|
20
setup.py
20
setup.py
|
@ -108,7 +108,7 @@ SSZObject = TypeVar('SSZObject', bound=View)
|
||||||
PHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0
|
PHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0
|
||||||
from eth2spec.config.config_util import apply_constants_config
|
from eth2spec.config.config_util import apply_constants_config
|
||||||
from typing import (
|
from typing import (
|
||||||
Any, Dict, Set, Sequence, NewType, Tuple, Optional, TypeVar, Callable
|
Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable, Optional
|
||||||
)
|
)
|
||||||
|
|
||||||
from dataclasses import (
|
from dataclasses import (
|
||||||
|
@ -146,8 +146,14 @@ _hash = hash
|
||||||
hash_cache: Dict[bytes, Bytes32] = {}
|
hash_cache: Dict[bytes, Bytes32] = {}
|
||||||
|
|
||||||
|
|
||||||
def get_eth1_data(distance: uint64) -> Bytes32:
|
def get_eth1_data(block: Eth1Block) -> Eth1Data:
|
||||||
return hash(distance)
|
"""
|
||||||
|
A stub function return mocking Eth1Data.
|
||||||
|
"""
|
||||||
|
return Eth1Data(
|
||||||
|
deposit_root=block.deposit_root,
|
||||||
|
deposit_count=block.deposit_count,
|
||||||
|
block_hash=hash_tree_root(block))
|
||||||
|
|
||||||
|
|
||||||
def hash(x: bytes) -> Bytes32: # type: ignore
|
def hash(x: bytes) -> Bytes32: # type: ignore
|
||||||
|
@ -376,7 +382,7 @@ class PySpecCommand(Command):
|
||||||
specs/phase0/validator.md
|
specs/phase0/validator.md
|
||||||
specs/phase1/custody-game.md
|
specs/phase1/custody-game.md
|
||||||
specs/phase1/beacon-chain.md
|
specs/phase1/beacon-chain.md
|
||||||
specs/phase1/fraud-proofs.md
|
specs/phase1/shard-transition.md
|
||||||
specs/phase1/fork-choice.md
|
specs/phase1/fork-choice.md
|
||||||
specs/phase1/phase1-fork.md
|
specs/phase1/phase1-fork.md
|
||||||
specs/phase1/validator.md
|
specs/phase1/validator.md
|
||||||
|
@ -480,6 +486,7 @@ setup(
|
||||||
url="https://github.com/ethereum/eth2.0-specs",
|
url="https://github.com/ethereum/eth2.0-specs",
|
||||||
include_package_data=False,
|
include_package_data=False,
|
||||||
package_data={'configs': ['*.yaml'],
|
package_data={'configs': ['*.yaml'],
|
||||||
|
|
||||||
'specs': ['**/*.md'],
|
'specs': ['**/*.md'],
|
||||||
'eth2spec': ['VERSION.txt']},
|
'eth2spec': ['VERSION.txt']},
|
||||||
package_dir={
|
package_dir={
|
||||||
|
@ -499,9 +506,10 @@ setup(
|
||||||
"eth-utils>=1.3.0,<2",
|
"eth-utils>=1.3.0,<2",
|
||||||
"eth-typing>=2.1.0,<3.0.0",
|
"eth-typing>=2.1.0,<3.0.0",
|
||||||
"pycryptodome==3.9.4",
|
"pycryptodome==3.9.4",
|
||||||
"py_ecc==2.0.0",
|
"py_ecc==4.0.0",
|
||||||
|
"milagro_bls_binding==1.3.0",
|
||||||
"dataclasses==0.6",
|
"dataclasses==0.6",
|
||||||
"remerkleable==0.1.12",
|
"remerkleable==0.1.16",
|
||||||
"ruamel.yaml==0.16.5",
|
"ruamel.yaml==0.16.5",
|
||||||
"lru-dict==1.1.6"
|
"lru-dict==1.1.6"
|
||||||
]
|
]
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
- [`DepositMessage`](#depositmessage)
|
- [`DepositMessage`](#depositmessage)
|
||||||
- [`DepositData`](#depositdata)
|
- [`DepositData`](#depositdata)
|
||||||
- [`BeaconBlockHeader`](#beaconblockheader)
|
- [`BeaconBlockHeader`](#beaconblockheader)
|
||||||
- [`SigningRoot`](#signingroot)
|
- [`SigningData`](#signingdata)
|
||||||
- [Beacon operations](#beacon-operations)
|
- [Beacon operations](#beacon-operations)
|
||||||
- [`ProposerSlashing`](#proposerslashing)
|
- [`ProposerSlashing`](#proposerslashing)
|
||||||
- [`AttesterSlashing`](#attesterslashing)
|
- [`AttesterSlashing`](#attesterslashing)
|
||||||
|
@ -110,6 +110,10 @@
|
||||||
- [Helper functions](#helper-functions-1)
|
- [Helper functions](#helper-functions-1)
|
||||||
- [Justification and finalization](#justification-and-finalization)
|
- [Justification and finalization](#justification-and-finalization)
|
||||||
- [Rewards and penalties](#rewards-and-penalties-1)
|
- [Rewards and penalties](#rewards-and-penalties-1)
|
||||||
|
- [Helpers](#helpers)
|
||||||
|
- [Components of attestation deltas](#components-of-attestation-deltas)
|
||||||
|
- [`get_attestation_deltas`](#get_attestation_deltas)
|
||||||
|
- [`process_rewards_and_penalties`](#process_rewards_and_penalties)
|
||||||
- [Registry updates](#registry-updates)
|
- [Registry updates](#registry-updates)
|
||||||
- [Slashings](#slashings)
|
- [Slashings](#slashings)
|
||||||
- [Final updates](#final-updates)
|
- [Final updates](#final-updates)
|
||||||
|
@ -179,6 +183,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
|
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
| - | - |
|
| - | - |
|
||||||
|
| `ETH1_FOLLOW_DISTANCE` | `2**10` (= 1,024) |
|
||||||
| `MAX_COMMITTEES_PER_SLOT` | `2**6` (= 64) |
|
| `MAX_COMMITTEES_PER_SLOT` | `2**6` (= 64) |
|
||||||
| `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) |
|
| `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) |
|
||||||
| `MAX_VALIDATORS_PER_COMMITTEE` | `2**11` (= 2,048) |
|
| `MAX_VALIDATORS_PER_COMMITTEE` | `2**11` (= 2,048) |
|
||||||
|
@ -191,7 +196,6 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| `HYSTERESIS_DOWNWARD_MULTIPLIER` | `1` |
|
| `HYSTERESIS_DOWNWARD_MULTIPLIER` | `1` |
|
||||||
| `HYSTERESIS_UPWARD_MULTIPLIER` | `5` |
|
| `HYSTERESIS_UPWARD_MULTIPLIER` | `5` |
|
||||||
|
|
||||||
|
|
||||||
- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
|
- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
|
||||||
|
|
||||||
### Gwei values
|
### Gwei values
|
||||||
|
@ -214,8 +218,9 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
|
|
||||||
| Name | Value | Unit | Duration |
|
| Name | Value | Unit | Duration |
|
||||||
| - | - | :-: | :-: |
|
| - | - | :-: | :-: |
|
||||||
| `MIN_GENESIS_DELAY` | `86400` | seconds | 1 day |
|
| `GENESIS_DELAY` | `172800` | seconds | 2 days |
|
||||||
| `SECONDS_PER_SLOT` | `12` | seconds | 12 seconds |
|
| `SECONDS_PER_SLOT` | `12` | seconds | 12 seconds |
|
||||||
|
| `SECONDS_PER_ETH1_BLOCK` | `14` | seconds | 14 seconds |
|
||||||
| `MIN_ATTESTATION_INCLUSION_DELAY` | `2**0` (= 1) | slots | 12 seconds |
|
| `MIN_ATTESTATION_INCLUSION_DELAY` | `2**0` (= 1) | slots | 12 seconds |
|
||||||
| `SLOTS_PER_EPOCH` | `2**5` (= 32) | slots | 6.4 minutes |
|
| `SLOTS_PER_EPOCH` | `2**5` (= 32) | slots | 6.4 minutes |
|
||||||
| `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes |
|
| `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes |
|
||||||
|
@ -224,7 +229,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| `EPOCHS_PER_ETH1_VOTING_PERIOD` | `2**5` (= 32) | epochs | ~3.4 hours |
|
| `EPOCHS_PER_ETH1_VOTING_PERIOD` | `2**5` (= 32) | epochs | ~3.4 hours |
|
||||||
| `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~27 hours |
|
| `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~27 hours |
|
||||||
| `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours |
|
| `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours |
|
||||||
| `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days |
|
| `SHARD_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
|
||||||
|
|
||||||
### State list lengths
|
### State list lengths
|
||||||
|
|
||||||
|
@ -242,17 +247,17 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| `BASE_REWARD_FACTOR` | `2**6` (= 64) |
|
| `BASE_REWARD_FACTOR` | `2**6` (= 64) |
|
||||||
| `WHISTLEBLOWER_REWARD_QUOTIENT` | `2**9` (= 512) |
|
| `WHISTLEBLOWER_REWARD_QUOTIENT` | `2**9` (= 512) |
|
||||||
| `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) |
|
| `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) |
|
||||||
| `INACTIVITY_PENALTY_QUOTIENT` | `2**25` (= 33,554,432) |
|
| `INACTIVITY_PENALTY_QUOTIENT` | `2**24` (= 16,777,216) |
|
||||||
| `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) |
|
| `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) |
|
||||||
|
|
||||||
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (about 18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
|
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12` epochs (about 18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
|
||||||
|
|
||||||
### Max operations per block
|
### Max operations per block
|
||||||
|
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
| - | - |
|
| - | - |
|
||||||
| `MAX_PROPOSER_SLASHINGS` | `2**4` (= 16) |
|
| `MAX_PROPOSER_SLASHINGS` | `2**4` (= 16) |
|
||||||
| `MAX_ATTESTER_SLASHINGS` | `2**0` (= 1) |
|
| `MAX_ATTESTER_SLASHINGS` | `2**1` (= 2) |
|
||||||
| `MAX_ATTESTATIONS` | `2**7` (= 128) |
|
| `MAX_ATTESTATIONS` | `2**7` (= 128) |
|
||||||
| `MAX_DEPOSITS` | `2**4` (= 16) |
|
| `MAX_DEPOSITS` | `2**4` (= 16) |
|
||||||
| `MAX_VOLUNTARY_EXITS` | `2**4` (= 16) |
|
| `MAX_VOLUNTARY_EXITS` | `2**4` (= 16) |
|
||||||
|
@ -269,7 +274,6 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| `DOMAIN_SELECTION_PROOF` | `DomainType('0x05000000')` |
|
| `DOMAIN_SELECTION_PROOF` | `DomainType('0x05000000')` |
|
||||||
| `DOMAIN_AGGREGATE_AND_PROOF` | `DomainType('0x06000000')` |
|
| `DOMAIN_AGGREGATE_AND_PROOF` | `DomainType('0x06000000')` |
|
||||||
|
|
||||||
|
|
||||||
## Containers
|
## Containers
|
||||||
|
|
||||||
The following types are [SimpleSerialize (SSZ)](../../ssz/simple-serialize.md) containers.
|
The following types are [SimpleSerialize (SSZ)](../../ssz/simple-serialize.md) containers.
|
||||||
|
@ -399,10 +403,10 @@ class BeaconBlockHeader(Container):
|
||||||
body_root: Root
|
body_root: Root
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `SigningRoot`
|
#### `SigningData`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class SigningRoot(Container):
|
class SigningData(Container):
|
||||||
object_root: Root
|
object_root: Root
|
||||||
domain: Domain
|
domain: Domain
|
||||||
```
|
```
|
||||||
|
@ -604,16 +608,18 @@ def bytes_to_int(data: bytes) -> uint64:
|
||||||
|
|
||||||
#### BLS Signatures
|
#### BLS Signatures
|
||||||
|
|
||||||
Eth2 makes use of BLS signatures as specified in the [IETF draft BLS specification](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-00). Specifically, eth2 uses the `BLS_SIG_BLS12381G2-SHA256-SSWU-RO-_POP_` ciphersuite which implements the following interfaces:
|
Eth2 makes use of BLS signatures as specified in the [IETF draft BLS specification draft-irtf-cfrg-bls-signature-02](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-02) but uses [Hashing to Elliptic Curves - draft-irtf-cfrg-hash-to-curve-07](https://tools.ietf.org/html/draft-irtf-cfrg-hash-to-curve-07) instead of draft-irtf-cfrg-hash-to-curve-06. Specifically, eth2 uses the `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` ciphersuite which implements the following interfaces:
|
||||||
|
|
||||||
- `def Sign(SK: int, message: Bytes) -> BLSSignature`
|
- `def Sign(SK: int, message: Bytes) -> BLSSignature`
|
||||||
- `def Verify(PK: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool`
|
- `def Verify(PK: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool`
|
||||||
- `def Aggregate(signatures: Sequence[BLSSignature]) -> BLSSignature`
|
- `def Aggregate(signatures: Sequence[BLSSignature]) -> BLSSignature`
|
||||||
- `def FastAggregateVerify(PKs: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool`
|
- `def FastAggregateVerify(PKs: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool`
|
||||||
- `def AggregateVerify(pairs: Sequence[PK: BLSPubkey, message: Bytes], signature: BLSSignature) -> bool`
|
- `def AggregateVerify(PKs: Sequence[BLSPubkey], messages: Sequence[Bytes], signature: BLSSignature) -> bool`
|
||||||
|
|
||||||
Within these specifications, BLS signatures are treated as a module for notational clarity, thus to verify a signature `bls.Verify(...)` is used.
|
Within these specifications, BLS signatures are treated as a module for notational clarity, thus to verify a signature `bls.Verify(...)` is used.
|
||||||
|
|
||||||
|
*Note*: The non-standard configuration of the BLS and hash to curve specs is temporary and will be resolved once IETF releases BLS spec draft 3.
|
||||||
|
|
||||||
### Predicates
|
### Predicates
|
||||||
|
|
||||||
#### `is_active_validator`
|
#### `is_active_validator`
|
||||||
|
@ -684,15 +690,11 @@ def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationDa
|
||||||
```python
|
```python
|
||||||
def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
|
def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
|
||||||
"""
|
"""
|
||||||
Check if ``indexed_attestation`` has valid indices and signature.
|
Check if ``indexed_attestation`` is not empty, has sorted and unique indices and has a valid aggregate signature.
|
||||||
"""
|
"""
|
||||||
indices = indexed_attestation.attesting_indices
|
|
||||||
|
|
||||||
# Verify max number of indices
|
|
||||||
if not len(indices) <= MAX_VALIDATORS_PER_COMMITTEE:
|
|
||||||
return False
|
|
||||||
# Verify indices are sorted and unique
|
# Verify indices are sorted and unique
|
||||||
if not indices == sorted(set(indices)):
|
indices = indexed_attestation.attesting_indices
|
||||||
|
if len(indices) == 0 or not indices == sorted(set(indices)):
|
||||||
return False
|
return False
|
||||||
# Verify aggregate signature
|
# Verify aggregate signature
|
||||||
pubkeys = [state.validators[i].pubkey for i in indices]
|
pubkeys = [state.validators[i].pubkey for i in indices]
|
||||||
|
@ -722,9 +724,9 @@ def is_valid_merkle_branch(leaf: Bytes32, branch: Sequence[Bytes32], depth: uint
|
||||||
#### `compute_shuffled_index`
|
#### `compute_shuffled_index`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Bytes32) -> ValidatorIndex:
|
def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64:
|
||||||
"""
|
"""
|
||||||
Return the shuffled validator index corresponding to ``seed`` (and ``index_count``).
|
Return the shuffled index corresponding to ``seed`` (and ``index_count``).
|
||||||
"""
|
"""
|
||||||
assert index < index_count
|
assert index < index_count
|
||||||
|
|
||||||
|
@ -732,14 +734,14 @@ def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Byt
|
||||||
# See the 'generalized domain' algorithm on page 3
|
# See the 'generalized domain' algorithm on page 3
|
||||||
for current_round in range(SHUFFLE_ROUND_COUNT):
|
for current_round in range(SHUFFLE_ROUND_COUNT):
|
||||||
pivot = bytes_to_int(hash(seed + int_to_bytes(current_round, length=1))[0:8]) % index_count
|
pivot = bytes_to_int(hash(seed + int_to_bytes(current_round, length=1))[0:8]) % index_count
|
||||||
flip = ValidatorIndex((pivot + index_count - index) % index_count)
|
flip = (pivot + index_count - index) % index_count
|
||||||
position = max(index, flip)
|
position = max(index, flip)
|
||||||
source = hash(seed + int_to_bytes(current_round, length=1) + int_to_bytes(position // 256, length=4))
|
source = hash(seed + int_to_bytes(current_round, length=1) + int_to_bytes(position // 256, length=4))
|
||||||
byte = source[(position % 256) // 8]
|
byte = source[(position % 256) // 8]
|
||||||
bit = (byte >> (position % 8)) % 2
|
bit = (byte >> (position % 8)) % 2
|
||||||
index = flip if bit else index
|
index = flip if bit else index
|
||||||
|
|
||||||
return ValidatorIndex(index)
|
return index
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `compute_proposer_index`
|
#### `compute_proposer_index`
|
||||||
|
@ -753,11 +755,11 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex]
|
||||||
MAX_RANDOM_BYTE = 2**8 - 1
|
MAX_RANDOM_BYTE = 2**8 - 1
|
||||||
i = 0
|
i = 0
|
||||||
while True:
|
while True:
|
||||||
candidate_index = indices[compute_shuffled_index(ValidatorIndex(i % len(indices)), len(indices), seed)]
|
candidate_index = indices[compute_shuffled_index(i % len(indices), len(indices), seed)]
|
||||||
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
|
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
|
||||||
effective_balance = state.validators[candidate_index].effective_balance
|
effective_balance = state.validators[candidate_index].effective_balance
|
||||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||||
return ValidatorIndex(candidate_index)
|
return candidate_index
|
||||||
i += 1
|
i += 1
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -773,7 +775,7 @@ def compute_committee(indices: Sequence[ValidatorIndex],
|
||||||
"""
|
"""
|
||||||
start = (len(indices) * index) // count
|
start = (len(indices) * index) // count
|
||||||
end = (len(indices) * (index + 1)) // count
|
end = (len(indices) * (index + 1)) // count
|
||||||
return [indices[compute_shuffled_index(ValidatorIndex(i), len(indices), seed)] for i in range(start, end)]
|
return [indices[compute_shuffled_index(i, len(indices), seed)] for i in range(start, end)]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `compute_epoch_at_slot`
|
#### `compute_epoch_at_slot`
|
||||||
|
@ -852,13 +854,12 @@ def compute_domain(domain_type: DomainType, fork_version: Version=None, genesis_
|
||||||
```python
|
```python
|
||||||
def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root:
|
def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root:
|
||||||
"""
|
"""
|
||||||
Return the signing root of an object by calculating the root of the object-domain tree.
|
Return the signing root for the corresponding signing data.
|
||||||
"""
|
"""
|
||||||
domain_wrapped_object = SigningRoot(
|
return hash_tree_root(SigningData(
|
||||||
object_root=hash_tree_root(ssz_object),
|
object_root=hash_tree_root(ssz_object),
|
||||||
domain=domain,
|
domain=domain,
|
||||||
)
|
))
|
||||||
return hash_tree_root(domain_wrapped_object)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### Beacon state accessors
|
### Beacon state accessors
|
||||||
|
@ -1125,7 +1126,7 @@ def slash_validator(state: BeaconState,
|
||||||
whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
|
whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
|
||||||
proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT)
|
proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT)
|
||||||
increase_balance(state, proposer_index, proposer_reward)
|
increase_balance(state, proposer_index, proposer_reward)
|
||||||
increase_balance(state, whistleblower_index, whistleblower_reward - proposer_reward)
|
increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
|
||||||
```
|
```
|
||||||
|
|
||||||
## Genesis
|
## Genesis
|
||||||
|
@ -1136,6 +1137,8 @@ Before the Ethereum 2.0 genesis has been triggered, and for every Ethereum 1.0 b
|
||||||
- `eth1_timestamp` is the Unix timestamp corresponding to `eth1_block_hash`
|
- `eth1_timestamp` is the Unix timestamp corresponding to `eth1_block_hash`
|
||||||
- `deposits` is the sequence of all deposits, ordered chronologically, up to (and including) the block with hash `eth1_block_hash`
|
- `deposits` is the sequence of all deposits, ordered chronologically, up to (and including) the block with hash `eth1_block_hash`
|
||||||
|
|
||||||
|
Eth1 blocks must only be considered once they are at least `SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE` seconds old (i.e. `eth1_timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time`). Due to this constraint, if `GENESIS_DELAY < SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE`, then the `genesis_time` can happen before the time/state is first known. Values should be configured to avoid this case.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||||
eth1_timestamp: uint64,
|
eth1_timestamp: uint64,
|
||||||
|
@ -1146,7 +1149,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||||
epoch=GENESIS_EPOCH,
|
epoch=GENESIS_EPOCH,
|
||||||
)
|
)
|
||||||
state = BeaconState(
|
state = BeaconState(
|
||||||
genesis_time=eth1_timestamp - eth1_timestamp % MIN_GENESIS_DELAY + 2 * MIN_GENESIS_DELAY,
|
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||||
fork=fork,
|
fork=fork,
|
||||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=len(deposits)),
|
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=len(deposits)),
|
||||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||||
|
@ -1174,6 +1177,8 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||||
return state
|
return state
|
||||||
```
|
```
|
||||||
|
|
||||||
|
*Note*: The ETH1 block with `eth1_timestamp` meeting the minimum genesis active validator count criteria can also occur before `MIN_GENESIS_TIME`.
|
||||||
|
|
||||||
### Genesis state
|
### Genesis state
|
||||||
|
|
||||||
Let `genesis_state = candidate_state` whenever `is_valid_genesis_state(candidate_state) is True` for the first time.
|
Let `genesis_state = candidate_state` whenever `is_valid_genesis_state(candidate_state) is True` for the first time.
|
||||||
|
@ -1195,7 +1200,7 @@ Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`.
|
||||||
|
|
||||||
## Beacon chain state transition function
|
## Beacon chain state transition function
|
||||||
|
|
||||||
The post-state corresponding to a pre-state `state` and a signed block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid.
|
The post-state corresponding to a pre-state `state` and a signed block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> BeaconState:
|
def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> BeaconState:
|
||||||
|
@ -1223,13 +1228,13 @@ def verify_block_signature(state: BeaconState, signed_block: SignedBeaconBlock)
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_slots(state: BeaconState, slot: Slot) -> None:
|
def process_slots(state: BeaconState, slot: Slot) -> None:
|
||||||
assert state.slot <= slot
|
assert state.slot < slot
|
||||||
while state.slot < slot:
|
while state.slot < slot:
|
||||||
process_slot(state)
|
process_slot(state)
|
||||||
# Process epoch on the start slot of the next epoch
|
# Process epoch on the start slot of the next epoch
|
||||||
if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
|
if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
|
||||||
process_epoch(state)
|
process_epoch(state)
|
||||||
state.slot += Slot(1)
|
state.slot = Slot(state.slot + 1)
|
||||||
```
|
```
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -1343,6 +1348,8 @@ def process_justification_and_finalization(state: BeaconState) -> None:
|
||||||
|
|
||||||
#### Rewards and penalties
|
#### Rewards and penalties
|
||||||
|
|
||||||
|
##### Helpers
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||||
total_balance = get_total_active_balance(state)
|
total_balance = get_total_active_balance(state)
|
||||||
|
@ -1350,56 +1357,161 @@ def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||||
return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH)
|
return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
def get_proposer_reward(state: BeaconState, attesting_index: ValidatorIndex) -> Gwei:
|
||||||
|
return Gwei(get_base_reward(state, attesting_index) // PROPOSER_REWARD_QUOTIENT)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_finality_delay(state: BeaconState) -> uint64:
|
||||||
|
return get_previous_epoch(state) - state.finalized_checkpoint.epoch
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_in_inactivity_leak(state: BeaconState) -> bool:
|
||||||
|
return get_finality_delay(state) > MIN_EPOCHS_TO_INACTIVITY_PENALTY
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_eligible_validator_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
|
||||||
previous_epoch = get_previous_epoch(state)
|
previous_epoch = get_previous_epoch(state)
|
||||||
total_balance = get_total_active_balance(state)
|
return [
|
||||||
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
|
||||||
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
|
||||||
eligible_validator_indices = [
|
|
||||||
ValidatorIndex(index) for index, v in enumerate(state.validators)
|
ValidatorIndex(index) for index, v in enumerate(state.validators)
|
||||||
if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
|
if is_active_validator(v, previous_epoch) or (v.slashed and previous_epoch + 1 < v.withdrawable_epoch)
|
||||||
]
|
]
|
||||||
|
```
|
||||||
|
|
||||||
# Micro-incentives for matching FFG source, FFG target, and head
|
```python
|
||||||
matching_source_attestations = get_matching_source_attestations(state, previous_epoch)
|
def get_attestation_component_deltas(state: BeaconState,
|
||||||
matching_target_attestations = get_matching_target_attestations(state, previous_epoch)
|
attestations: Sequence[PendingAttestation]
|
||||||
matching_head_attestations = get_matching_head_attestations(state, previous_epoch)
|
) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
for attestations in (matching_source_attestations, matching_target_attestations, matching_head_attestations):
|
"""
|
||||||
unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations)
|
Helper with shared logic for use by get source, target, and head deltas functions
|
||||||
attesting_balance = get_total_balance(state, unslashed_attesting_indices)
|
"""
|
||||||
for index in eligible_validator_indices:
|
rewards = [Gwei(0)] * len(state.validators)
|
||||||
if index in unslashed_attesting_indices:
|
penalties = [Gwei(0)] * len(state.validators)
|
||||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balance totals to avoid uint64 overflow
|
total_balance = get_total_active_balance(state)
|
||||||
|
unslashed_attesting_indices = get_unslashed_attesting_indices(state, attestations)
|
||||||
|
attesting_balance = get_total_balance(state, unslashed_attesting_indices)
|
||||||
|
for index in get_eligible_validator_indices(state):
|
||||||
|
if index in unslashed_attesting_indices:
|
||||||
|
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balance totals to avoid uint64 overflow
|
||||||
|
if is_in_inactivity_leak(state):
|
||||||
|
# Since full base reward will be canceled out by inactivity penalty deltas,
|
||||||
|
# optimal participation receives full base reward compensation here.
|
||||||
|
rewards[index] += get_base_reward(state, index)
|
||||||
|
else:
|
||||||
reward_numerator = get_base_reward(state, index) * (attesting_balance // increment)
|
reward_numerator = get_base_reward(state, index) * (attesting_balance // increment)
|
||||||
rewards[index] += reward_numerator // (total_balance // increment)
|
rewards[index] += reward_numerator // (total_balance // increment)
|
||||||
else:
|
else:
|
||||||
penalties[index] += get_base_reward(state, index)
|
penalties[index] += get_base_reward(state, index)
|
||||||
|
return rewards, penalties
|
||||||
|
```
|
||||||
|
|
||||||
# Proposer and inclusion delay micro-rewards
|
##### Components of attestation deltas
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_source_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
|
"""
|
||||||
|
Return attester micro-rewards/penalties for source-vote for each validator.
|
||||||
|
"""
|
||||||
|
matching_source_attestations = get_matching_source_attestations(state, get_previous_epoch(state))
|
||||||
|
return get_attestation_component_deltas(state, matching_source_attestations)
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_target_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
|
"""
|
||||||
|
Return attester micro-rewards/penalties for target-vote for each validator.
|
||||||
|
"""
|
||||||
|
matching_target_attestations = get_matching_target_attestations(state, get_previous_epoch(state))
|
||||||
|
return get_attestation_component_deltas(state, matching_target_attestations)
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_head_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
|
"""
|
||||||
|
Return attester micro-rewards/penalties for head-vote for each validator.
|
||||||
|
"""
|
||||||
|
matching_head_attestations = get_matching_head_attestations(state, get_previous_epoch(state))
|
||||||
|
return get_attestation_component_deltas(state, matching_head_attestations)
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_inclusion_delay_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
|
"""
|
||||||
|
Return proposer and inclusion delay micro-rewards/penalties for each validator.
|
||||||
|
"""
|
||||||
|
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||||
|
matching_source_attestations = get_matching_source_attestations(state, get_previous_epoch(state))
|
||||||
for index in get_unslashed_attesting_indices(state, matching_source_attestations):
|
for index in get_unslashed_attesting_indices(state, matching_source_attestations):
|
||||||
attestation = min([
|
attestation = min([
|
||||||
a for a in matching_source_attestations
|
a for a in matching_source_attestations
|
||||||
if index in get_attesting_indices(state, a.data, a.aggregation_bits)
|
if index in get_attesting_indices(state, a.data, a.aggregation_bits)
|
||||||
], key=lambda a: a.inclusion_delay)
|
], key=lambda a: a.inclusion_delay)
|
||||||
proposer_reward = Gwei(get_base_reward(state, index) // PROPOSER_REWARD_QUOTIENT)
|
rewards[attestation.proposer_index] += get_proposer_reward(state, index)
|
||||||
rewards[attestation.proposer_index] += proposer_reward
|
max_attester_reward = get_base_reward(state, index) - get_proposer_reward(state, index)
|
||||||
max_attester_reward = get_base_reward(state, index) - proposer_reward
|
|
||||||
rewards[index] += Gwei(max_attester_reward // attestation.inclusion_delay)
|
rewards[index] += Gwei(max_attester_reward // attestation.inclusion_delay)
|
||||||
|
|
||||||
# Inactivity penalty
|
# No penalties associated with inclusion delay
|
||||||
finality_delay = previous_epoch - state.finalized_checkpoint.epoch
|
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||||
if finality_delay > MIN_EPOCHS_TO_INACTIVITY_PENALTY:
|
return rewards, penalties
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
|
"""
|
||||||
|
Return inactivity reward/penalty deltas for each validator.
|
||||||
|
"""
|
||||||
|
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||||
|
if is_in_inactivity_leak(state):
|
||||||
|
matching_target_attestations = get_matching_target_attestations(state, get_previous_epoch(state))
|
||||||
matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations)
|
matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations)
|
||||||
for index in eligible_validator_indices:
|
for index in get_eligible_validator_indices(state):
|
||||||
penalties[index] += Gwei(BASE_REWARDS_PER_EPOCH * get_base_reward(state, index))
|
# If validator is performing optimally this cancels all rewards for a neutral balance
|
||||||
|
base_reward = get_base_reward(state, index)
|
||||||
|
penalties[index] += Gwei(BASE_REWARDS_PER_EPOCH * base_reward - get_proposer_reward(state, index))
|
||||||
if index not in matching_target_attesting_indices:
|
if index not in matching_target_attesting_indices:
|
||||||
effective_balance = state.validators[index].effective_balance
|
effective_balance = state.validators[index].effective_balance
|
||||||
penalties[index] += Gwei(effective_balance * finality_delay // INACTIVITY_PENALTY_QUOTIENT)
|
penalties[index] += Gwei(effective_balance * get_finality_delay(state) // INACTIVITY_PENALTY_QUOTIENT)
|
||||||
|
|
||||||
|
# No rewards associated with inactivity penalties
|
||||||
|
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||||
|
return rewards, penalties
|
||||||
|
```
|
||||||
|
|
||||||
|
##### `get_attestation_deltas`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
|
"""
|
||||||
|
Return attestation reward/penalty deltas for each validator.
|
||||||
|
"""
|
||||||
|
source_rewards, source_penalties = get_source_deltas(state)
|
||||||
|
target_rewards, target_penalties = get_target_deltas(state)
|
||||||
|
head_rewards, head_penalties = get_head_deltas(state)
|
||||||
|
inclusion_delay_rewards, _ = get_inclusion_delay_deltas(state)
|
||||||
|
_, inactivity_penalties = get_inactivity_penalty_deltas(state)
|
||||||
|
|
||||||
|
rewards = [
|
||||||
|
source_rewards[i] + target_rewards[i] + head_rewards[i] + inclusion_delay_rewards[i]
|
||||||
|
for i in range(len(state.validators))
|
||||||
|
]
|
||||||
|
|
||||||
|
penalties = [
|
||||||
|
source_penalties[i] + target_penalties[i] + head_penalties[i] + inactivity_penalties[i]
|
||||||
|
for i in range(len(state.validators))
|
||||||
|
]
|
||||||
|
|
||||||
return rewards, penalties
|
return rewards, penalties
|
||||||
```
|
```
|
||||||
|
|
||||||
|
##### `process_rewards_and_penalties`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_rewards_and_penalties(state: BeaconState) -> None:
|
def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||||
|
@ -1498,6 +1610,8 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||||
def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
|
def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
|
||||||
# Verify that the slots match
|
# Verify that the slots match
|
||||||
assert block.slot == state.slot
|
assert block.slot == state.slot
|
||||||
|
# Verify that the block is newer than latest block header
|
||||||
|
assert block.slot > state.latest_block_header.slot
|
||||||
# Verify that proposer index is the correct index
|
# Verify that proposer index is the correct index
|
||||||
assert block.proposer_index == get_beacon_proposer_index(state)
|
assert block.proposer_index == get_beacon_proposer_index(state)
|
||||||
# Verify that the parent matches
|
# Verify that the parent matches
|
||||||
|
@ -1693,7 +1807,7 @@ def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVolu
|
||||||
# Exits must specify an epoch when they become valid; they are not valid before then
|
# Exits must specify an epoch when they become valid; they are not valid before then
|
||||||
assert get_current_epoch(state) >= voluntary_exit.epoch
|
assert get_current_epoch(state) >= voluntary_exit.epoch
|
||||||
# Verify the validator has been active long enough
|
# Verify the validator has been active long enough
|
||||||
assert get_current_epoch(state) >= validator.activation_epoch + PERSISTENT_COMMITTEE_PERIOD
|
assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
|
||||||
# Verify signature
|
# Verify signature
|
||||||
domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
|
domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
|
||||||
signing_root = compute_signing_root(voluntary_exit, domain)
|
signing_root = compute_signing_root(voluntary_exit, domain)
|
||||||
|
|
|
@ -150,7 +150,7 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
||||||
elif block.slot == slot:
|
elif block.slot == slot:
|
||||||
return root
|
return root
|
||||||
else:
|
else:
|
||||||
# root is older than queried slot, thus a skip slot. Return earliest root prior to slot
|
# root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
||||||
return root
|
return root
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
|
||||||
active_indices = get_active_validator_indices(state, get_current_epoch(state))
|
active_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||||
return Gwei(sum(
|
return Gwei(sum(
|
||||||
state.validators[i].effective_balance for i in active_indices
|
state.validators[i].effective_balance for i in active_indices
|
||||||
if (i in store.latest_messages
|
if (i in store.latest_messages
|
||||||
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
|
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
|
||||||
))
|
))
|
||||||
```
|
```
|
||||||
|
@ -273,19 +273,22 @@ def validate_on_attestation(store: Store, attestation: Attestation) -> None:
|
||||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||||
# Use GENESIS_EPOCH for previous when genesis to avoid underflow
|
# Use GENESIS_EPOCH for previous when genesis to avoid underflow
|
||||||
previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
|
previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
|
||||||
|
# If attestation target is from a future epoch, delay consideration until the epoch arrives
|
||||||
assert target.epoch in [current_epoch, previous_epoch]
|
assert target.epoch in [current_epoch, previous_epoch]
|
||||||
assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
||||||
|
|
||||||
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
||||||
assert target.root in store.blocks
|
assert target.root in store.blocks
|
||||||
# Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
|
||||||
assert get_current_slot(store) >= compute_start_slot_at_epoch(target.epoch)
|
|
||||||
|
|
||||||
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
|
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
|
||||||
assert attestation.data.beacon_block_root in store.blocks
|
assert attestation.data.beacon_block_root in store.blocks
|
||||||
# Attestations must not be for blocks in the future. If not, the attestation should not be considered
|
# Attestations must not be for blocks in the future. If not, the attestation should not be considered
|
||||||
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
||||||
|
|
||||||
|
# LMD vote must be consistent with FFG vote target
|
||||||
|
target_slot = compute_start_slot_at_epoch(target.epoch)
|
||||||
|
assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot)
|
||||||
|
|
||||||
# Attestations can only affect the fork choice of subsequent slots.
|
# Attestations can only affect the fork choice of subsequent slots.
|
||||||
# Delay consideration in the fork choice until their slot is in the past.
|
# Delay consideration in the fork choice until their slot is in the past.
|
||||||
assert get_current_slot(store) >= attestation.data.slot + 1
|
assert get_current_slot(store) >= attestation.data.slot + 1
|
||||||
|
|
|
@ -4,7 +4,7 @@ This document contains the networking specification for Ethereum 2.0 clients.
|
||||||
|
|
||||||
It consists of four main sections:
|
It consists of four main sections:
|
||||||
|
|
||||||
1. A specification of the network fundamentals detailing the two network configurations: interoperability test network and mainnet launch.
|
1. A specification of the network fundamentals.
|
||||||
2. A specification of the three network interaction *domains* of Eth2: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain.
|
2. A specification of the three network interaction *domains* of Eth2: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain.
|
||||||
3. The rationale and further explanation for the design choices made in the previous two sections.
|
3. The rationale and further explanation for the design choices made in the previous two sections.
|
||||||
4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which Eth2 clients are being developed.
|
4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which Eth2 clients are being developed.
|
||||||
|
@ -17,14 +17,8 @@ It consists of four main sections:
|
||||||
|
|
||||||
- [Network fundamentals](#network-fundamentals)
|
- [Network fundamentals](#network-fundamentals)
|
||||||
- [Transport](#transport)
|
- [Transport](#transport)
|
||||||
- [Interop](#interop)
|
|
||||||
- [Mainnet](#mainnet)
|
|
||||||
- [Encryption and identification](#encryption-and-identification)
|
- [Encryption and identification](#encryption-and-identification)
|
||||||
- [Interop](#interop-1)
|
|
||||||
- [Mainnet](#mainnet-1)
|
|
||||||
- [Protocol Negotiation](#protocol-negotiation)
|
- [Protocol Negotiation](#protocol-negotiation)
|
||||||
- [Interop](#interop-2)
|
|
||||||
- [Mainnet](#mainnet-2)
|
|
||||||
- [Multiplexing](#multiplexing)
|
- [Multiplexing](#multiplexing)
|
||||||
- [Eth2 network interaction domains](#eth2-network-interaction-domains)
|
- [Eth2 network interaction domains](#eth2-network-interaction-domains)
|
||||||
- [Configuration](#configuration)
|
- [Configuration](#configuration)
|
||||||
|
@ -33,11 +27,8 @@ It consists of four main sections:
|
||||||
- [Topics and messages](#topics-and-messages)
|
- [Topics and messages](#topics-and-messages)
|
||||||
- [Global topics](#global-topics)
|
- [Global topics](#global-topics)
|
||||||
- [Attestation subnets](#attestation-subnets)
|
- [Attestation subnets](#attestation-subnets)
|
||||||
- [Interop](#interop-3)
|
- [Attestations and Aggregation](#attestations-and-aggregation)
|
||||||
- [Mainnet](#mainnet-3)
|
|
||||||
- [Encodings](#encodings)
|
- [Encodings](#encodings)
|
||||||
- [Interop](#interop-4)
|
|
||||||
- [Mainnet](#mainnet-4)
|
|
||||||
- [The Req/Resp domain](#the-reqresp-domain)
|
- [The Req/Resp domain](#the-reqresp-domain)
|
||||||
- [Protocol identification](#protocol-identification)
|
- [Protocol identification](#protocol-identification)
|
||||||
- [Req/Resp interaction](#reqresp-interaction)
|
- [Req/Resp interaction](#reqresp-interaction)
|
||||||
|
@ -56,29 +47,25 @@ It consists of four main sections:
|
||||||
- [Integration into libp2p stacks](#integration-into-libp2p-stacks)
|
- [Integration into libp2p stacks](#integration-into-libp2p-stacks)
|
||||||
- [ENR structure](#enr-structure)
|
- [ENR structure](#enr-structure)
|
||||||
- [Attestation subnet bitfield](#attestation-subnet-bitfield)
|
- [Attestation subnet bitfield](#attestation-subnet-bitfield)
|
||||||
- [Interop](#interop-5)
|
- [`eth2` field](#eth2-field)
|
||||||
- [Mainnet](#mainnet-5)
|
- [General capabilities](#general-capabilities)
|
||||||
- [`eth2` field](#eth2-field)
|
|
||||||
- [General capabilities](#general-capabilities)
|
|
||||||
- [Topic advertisement](#topic-advertisement)
|
- [Topic advertisement](#topic-advertisement)
|
||||||
- [Mainnet](#mainnet-6)
|
|
||||||
- [Design decision rationale](#design-decision-rationale)
|
- [Design decision rationale](#design-decision-rationale)
|
||||||
- [Transport](#transport-1)
|
- [Transport](#transport-1)
|
||||||
- [Why are we defining specific transports?](#why-are-we-defining-specific-transports)
|
- [Why are we defining specific transports?](#why-are-we-defining-specific-transports)
|
||||||
- [Can clients support other transports/handshakes than the ones mandated by the spec?](#can-clients-support-other-transportshandshakes-than-the-ones-mandated-by-the-spec)
|
- [Can clients support other transports/handshakes than the ones mandated by the spec?](#can-clients-support-other-transportshandshakes-than-the-ones-mandated-by-the-spec)
|
||||||
- [What are the advantages of using TCP/QUIC/Websockets?](#what-are-the-advantages-of-using-tcpquicwebsockets)
|
- [What are the advantages of using TCP/QUIC/Websockets?](#what-are-the-advantages-of-using-tcpquicwebsockets)
|
||||||
- [Why do we not just support a single transport?](#why-do-we-not-just-support-a-single-transport)
|
- [Why do we not just support a single transport?](#why-do-we-not-just-support-a-single-transport)
|
||||||
- [Why are we not using QUIC for mainnet from the start?](#why-are-we-not-using-quic-for-mainnet-from-the-start)
|
- [Why are we not using QUIC from the start?](#why-are-we-not-using-quic-from-the-start)
|
||||||
- [Multiplexing](#multiplexing-1)
|
- [Multiplexing](#multiplexing-1)
|
||||||
- [Why are we using mplex/yamux?](#why-are-we-using-mplexyamux)
|
- [Why are we using mplex/yamux?](#why-are-we-using-mplexyamux)
|
||||||
- [Protocol Negotiation](#protocol-negotiation-1)
|
- [Protocol Negotiation](#protocol-negotiation-1)
|
||||||
- [When is multiselect 2.0 due and why are we using it for mainnet?](#when-is-multiselect-20-due-and-why-are-we-using-it-for-mainnet)
|
- [When is multiselect 2.0 due and why do we plan to migrate to it?](#when-is-multiselect-20-due-and-why-do-we-plan-to-migrate-to-it)
|
||||||
- [What is the difference between connection-level and stream-level protocol negotiation?](#what-is-the-difference-between-connection-level-and-stream-level-protocol-negotiation)
|
- [What is the difference between connection-level and stream-level protocol negotiation?](#what-is-the-difference-between-connection-level-and-stream-level-protocol-negotiation)
|
||||||
- [Encryption](#encryption)
|
- [Encryption](#encryption)
|
||||||
- [Why are we using SecIO for interop? Why not for mainnet?](#why-are-we-using-secio-for-interop-why-not-for-mainnet)
|
- [Why are we not supporting SecIO?](#why-are-we-not-supporting-secio)
|
||||||
- [Why are we using Noise/TLS 1.3 for mainnet?](#why-are-we-using-noisetls-13-for-mainnet)
|
- [Why are we using Noise/TLS 1.3?](#why-are-we-using-noisetls-13)
|
||||||
- [Why are we using encryption at all?](#why-are-we-using-encryption-at-all)
|
- [Why are we using encryption at all?](#why-are-we-using-encryption-at-all)
|
||||||
- [Will mainnnet networking be untested when it launches?](#will-mainnnet-networking-be-untested-when-it-launches)
|
|
||||||
- [Gossipsub](#gossipsub)
|
- [Gossipsub](#gossipsub)
|
||||||
- [Why are we using a pub/sub algorithm for block and attestation propagation?](#why-are-we-using-a-pubsub-algorithm-for-block-and-attestation-propagation)
|
- [Why are we using a pub/sub algorithm for block and attestation propagation?](#why-are-we-using-a-pubsub-algorithm-for-block-and-attestation-propagation)
|
||||||
- [Why are we using topics to segregate encodings, yet only support one encoding?](#why-are-we-using-topics-to-segregate-encodings-yet-only-support-one-encoding)
|
- [Why are we using topics to segregate encodings, yet only support one encoding?](#why-are-we-using-topics-to-segregate-encodings-yet-only-support-one-encoding)
|
||||||
|
@ -105,11 +92,13 @@ It consists of four main sections:
|
||||||
- [Discovery](#discovery)
|
- [Discovery](#discovery)
|
||||||
- [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht)
|
- [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht)
|
||||||
- [What is the difference between an ENR and a multiaddr, and why are we using ENRs?](#what-is-the-difference-between-an-enr-and-a-multiaddr-and-why-are-we-using-enrs)
|
- [What is the difference between an ENR and a multiaddr, and why are we using ENRs?](#what-is-the-difference-between-an-enr-and-a-multiaddr-and-why-are-we-using-enrs)
|
||||||
|
- [Why do we not form ENRs and find peers until genesis block/state is known?](#why-do-we-not-form-enrs-and-find-peers-until-genesis-blockstate-is-known)
|
||||||
- [Compression/Encoding](#compressionencoding)
|
- [Compression/Encoding](#compressionencoding)
|
||||||
- [Why are we using SSZ for encoding?](#why-are-we-using-ssz-for-encoding)
|
- [Why are we using SSZ for encoding?](#why-are-we-using-ssz-for-encoding)
|
||||||
- [Why are we compressing, and at which layers?](#why-are-we-compressing-and-at-which-layers)
|
- [Why are we compressing, and at which layers?](#why-are-we-compressing-and-at-which-layers)
|
||||||
- [Why are using Snappy for compression?](#why-are-using-snappy-for-compression)
|
- [Why are using Snappy for compression?](#why-are-using-snappy-for-compression)
|
||||||
- [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes)
|
- [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes)
|
||||||
|
- [What are SSZ type size bounds?](#what-are-ssz-type-size-bounds)
|
||||||
- [libp2p implementations matrix](#libp2p-implementations-matrix)
|
- [libp2p implementations matrix](#libp2p-implementations-matrix)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
@ -119,46 +108,22 @@ It consists of four main sections:
|
||||||
|
|
||||||
This section outlines the specification for the networking stack in Ethereum 2.0 clients.
|
This section outlines the specification for the networking stack in Ethereum 2.0 clients.
|
||||||
|
|
||||||
Sections that have differing parameters for mainnet launch and interoperability testing are split into subsections. Sections that are not split have the same parameters for interoperability testing as mainnet launch.
|
|
||||||
|
|
||||||
## Transport
|
## Transport
|
||||||
|
|
||||||
Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability.
|
Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability.
|
||||||
|
|
||||||
#### Interop
|
|
||||||
|
|
||||||
All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously).
|
All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously).
|
||||||
|
|
||||||
To facilitate connectivity and avert possible IPv6 routability/support issues, clients participating in the interoperability testnet MUST expose at least ONE IPv4 endpoint.
|
Clients must support listening on at least one of IPv4 or IPv6. Clients that do _not_ have support for listening on IPv4 SHOULD be cognizant of the potential disadvantages in terms of Internet-wide routability/support. Clients MAY choose to listen only on IPv6, but MUST be capable of dialing both IPv4 and IPv6 addresses.
|
||||||
|
|
||||||
All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT, or AutoRelay facilities.
|
All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT, or AutoRelay facilities. (Usage of circuit relay, AutoNAT, or AutoRelay will be specifically re-examined soon.)
|
||||||
|
|
||||||
Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.), MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint.
|
Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.), MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint.
|
||||||
|
|
||||||
#### Mainnet
|
|
||||||
|
|
||||||
All requirements from the interoperability testnet apply, except for the IPv4 addressing scheme requirement.
|
|
||||||
|
|
||||||
At this stage, clients are licensed to drop IPv4 support if they wish to do so, cognizant of the potential disadvantages in terms of Internet-wide routability/support. Clients MAY choose to listen only on IPv6, but MUST retain capability to dial both IPv4 and IPv6 addresses.
|
|
||||||
|
|
||||||
Usage of circuit relay, AutoNAT, or AutoRelay will be specifically re-examined closer to the time.
|
|
||||||
|
|
||||||
## Encryption and identification
|
## Encryption and identification
|
||||||
|
|
||||||
#### Interop
|
|
||||||
|
|
||||||
[SecIO](https://github.com/libp2p/specs/tree/master/secio) with `secp256k1` identities will be used for initial interoperability testing.
|
|
||||||
|
|
||||||
The following SecIO parameters MUST be supported by all stacks:
|
|
||||||
|
|
||||||
- Key agreement: ECDH-P256.
|
|
||||||
- Cipher: AES-128.
|
|
||||||
- Digest: SHA-256.
|
|
||||||
|
|
||||||
#### Mainnet
|
|
||||||
|
|
||||||
The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure
|
The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure
|
||||||
channel handshake with `secp256k1` identities will be used for mainnet.
|
channel handshake with `secp256k1` identities will be used for encryption.
|
||||||
|
|
||||||
As specified in the libp2p specification, clients MUST support the `XX` handshake pattern.
|
As specified in the libp2p specification, clients MUST support the `XX` handshake pattern.
|
||||||
|
|
||||||
|
@ -166,13 +131,7 @@ As specified in the libp2p specification, clients MUST support the `XX` handshak
|
||||||
|
|
||||||
Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers.
|
Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers.
|
||||||
|
|
||||||
#### Interop
|
Clients MUST support [multistream-select 1.0](https://github.com/multiformats/multistream-select/) and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95) when the spec solidifies. Once all clients have implementations for multiselect 2.0, multistream-select 1.0 MAY be phased out.
|
||||||
|
|
||||||
Connection-level and stream-level (see the [Rationale](#design-decision-rationale) section below for explanations) protocol negotiation MUST be conducted using [multistream-select v1.0](https://github.com/multiformats/multistream-select/). Its protocol ID is: `/multistream/1.0.0`.
|
|
||||||
|
|
||||||
#### Mainnet
|
|
||||||
|
|
||||||
Clients MUST support [multistream-select 1.0](https://github.com/multiformats/multistream-select/) and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95). Depending on the number of clients that have implementations for multiselect 2.0 by mainnet, [multistream-select 1.0](https://github.com/multiformats/multistream-select/) may be phased out.
|
|
||||||
|
|
||||||
## Multiplexing
|
## Multiplexing
|
||||||
|
|
||||||
|
@ -180,7 +139,7 @@ During connection bootstrapping, libp2p dynamically negotiates a mutually suppor
|
||||||
|
|
||||||
Two multiplexers are commonplace in libp2p implementations: [mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). Their protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`.
|
Two multiplexers are commonplace in libp2p implementations: [mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). Their protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`.
|
||||||
|
|
||||||
Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux must take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs.
|
Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux MUST take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs.
|
||||||
|
|
||||||
# Eth2 network interaction domains
|
# Eth2 network interaction domains
|
||||||
|
|
||||||
|
@ -191,8 +150,8 @@ This section outlines constants that are used in this spec.
|
||||||
| Name | Value | Description |
|
| Name | Value | Description |
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. |
|
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. |
|
||||||
|
| `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request |
|
||||||
| `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
|
| `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
|
||||||
| `ATTESTATION_SUBNET_COUNT` | `64` | The number of attestation subnets used in the gossipsub protocol. |
|
|
||||||
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
|
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
|
||||||
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
|
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
|
||||||
| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. |
|
| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. |
|
||||||
|
@ -218,9 +177,9 @@ Where
|
||||||
|
|
||||||
## The gossip domain: gossipsub
|
## The gossip domain: gossipsub
|
||||||
|
|
||||||
Clients MUST support the [gossipsub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) libp2p protocol.
|
Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) libp2p protocol including the [gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) extension.
|
||||||
|
|
||||||
**Protocol ID:** `/meshsub/1.0.0`
|
**Protocol ID:** `/meshsub/1.1.0`
|
||||||
|
|
||||||
**Gossipsub Parameters**
|
**Gossipsub Parameters**
|
||||||
|
|
||||||
|
@ -245,7 +204,9 @@ Topics are plain UTF-8 strings and are encoded on the wire as determined by prot
|
||||||
- `current_fork_version` is the fork version of the epoch of the message to be sent on the topic
|
- `current_fork_version` is the fork version of the epoch of the message to be sent on the topic
|
||||||
- `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root`
|
- `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root`
|
||||||
- `Name` - see table below
|
- `Name` - see table below
|
||||||
- `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encoding-strategies) section for further details.
|
- `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encodings) section for further details.
|
||||||
|
|
||||||
|
*Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known.
|
||||||
|
|
||||||
Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. Clients MUST reject (fail validation) messages that are over this size limit. Likewise, clients MUST NOT emit or propagate messages larger than this limit.
|
Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. Clients MUST reject (fail validation) messages that are over this size limit. Likewise, clients MUST NOT emit or propagate messages larger than this limit.
|
||||||
|
|
||||||
|
@ -258,77 +219,74 @@ where `base64` is the [URL-safe base64 alphabet](https://tools.ietf.org/html/rfc
|
||||||
|
|
||||||
The payload is carried in the `data` field of a gossipsub message, and varies depending on the topic:
|
The payload is carried in the `data` field of a gossipsub message, and varies depending on the topic:
|
||||||
|
|
||||||
| Name | Message Type |
|
| Name | Message Type |
|
||||||
|------------------------------------------------|-------------------------|
|
|----------------------------------|---------------------------|
|
||||||
| beacon_block | SignedBeaconBlock |
|
| `beacon_block` | `SignedBeaconBlock` |
|
||||||
| beacon_aggregate_and_proof | SignedAggregateAndProof |
|
| `beacon_aggregate_and_proof` | `SignedAggregateAndProof` |
|
||||||
| beacon_attestation\* | Attestation |
|
| `beacon_attestation_{subnet_id}` | `Attestation` |
|
||||||
| committee_index{subnet_id}\_beacon_attestation | Attestation |
|
| `voluntary_exit` | `SignedVoluntaryExit` |
|
||||||
| voluntary_exit | SignedVoluntaryExit |
|
| `proposer_slashing` | `ProposerSlashing` |
|
||||||
| proposer_slashing | ProposerSlashing |
|
| `attester_slashing` | `AttesterSlashing` |
|
||||||
| attester_slashing | AttesterSlashing |
|
|
||||||
|
|
||||||
Clients MUST reject (fail validation) messages containing an incorrect type, or invalid payload.
|
Clients MUST reject (fail validation) messages containing an incorrect type, or invalid payload.
|
||||||
|
|
||||||
When processing incoming gossip, clients MAY descore or disconnect peers who fail to observe these constraints.
|
When processing incoming gossip, clients MAY descore or disconnect peers who fail to observe these constraints.
|
||||||
|
|
||||||
\* The `beacon_attestation` topic is only for interop and will be removed prior to mainnet.
|
Gossipsub v1.1 introduces [Extended Validators](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md#extended-validators) for the application to aid in the gossipsub peer-scoring scheme.
|
||||||
|
We utilize `ACCEPT`, `REJECT`, and `IGNORE`. For each gossipsub topic, there are application specific validations. If all validations pass, return `ACCEPT`. If one or more validations fail while processing the items in order, return either `REJECT` or `IGNORE` as specified in the prefix of the particular condition.
|
||||||
|
|
||||||
#### Global topics
|
#### Global topics
|
||||||
|
|
||||||
There are two primary global topics used to propagate beacon blocks and aggregate attestations to all nodes on the network. Their `Name`s are:
|
There are two primary global topics used to propagate beacon blocks and aggregate attestations to all nodes on the network. Their `Name`s are:
|
||||||
|
|
||||||
- `beacon_block` - This topic is used solely for propagating new signed beacon blocks to all nodes on the networks. Signed blocks are sent in their entirety. The following validations MUST pass before forwarding the `signed_beacon_block` on the network
|
- `beacon_block` - This topic is used solely for propagating new signed beacon blocks to all nodes on the networks. Signed blocks are sent in their entirety. The following validations MUST pass before forwarding the `signed_beacon_block` on the network
|
||||||
- The block is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `signed_beacon_block.message.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot).
|
- _[IGNORE]_ The block is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `signed_beacon_block.message.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot).
|
||||||
- The block is from a slot greater than the latest finalized slot -- i.e. validate that `signed_beacon_block.message.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` (a client MAY choose to validate and store such blocks for additional purposes -- e.g. slashing detection, archive nodes, etc).
|
- _[IGNORE]_ The block is from a slot greater than the latest finalized slot -- i.e. validate that `signed_beacon_block.message.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` (a client MAY choose to validate and store such blocks for additional purposes -- e.g. slashing detection, archive nodes, etc).
|
||||||
- The block is the first block with valid signature received for the proposer for the slot, `signed_beacon_block.message.slot`.
|
- _[IGNORE]_ The block is the first block with valid signature received for the proposer for the slot, `signed_beacon_block.message.slot`.
|
||||||
- The proposer signature, `signed_beacon_block.signature`, is valid with respect to the `proposer_index` pubkey.
|
- _[REJECT]_ The proposer signature, `signed_beacon_block.signature`, is valid with respect to the `proposer_index` pubkey.
|
||||||
- The block is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the block MAY be queued for later processing while proposers for the block's branch are calculated.
|
- _[REJECT]_ The block is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the block MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||||
- `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `signed_aggregate_and_proof` on the network. (We define the following for convenience -- `aggregate_and_proof = signed_aggregate_and_proof.message` and `aggregate = aggregate_and_proof.aggregate`)
|
- `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `signed_aggregate_and_proof` on the network. (We define the following for convenience -- `aggregate_and_proof = signed_aggregate_and_proof.message` and `aggregate = aggregate_and_proof.aggregate`)
|
||||||
- `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` (a client MAY queue future aggregates for processing at the appropriate slot).
|
- _[IGNORE]_ `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` (a client MAY queue future aggregates for processing at the appropriate slot).
|
||||||
- The aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally).
|
- _[IGNORE]_ The valid aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen (via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally).
|
||||||
- The `aggregate` is the first valid aggregate received for the aggregator with index `aggregate_and_proof.aggregator_index` for the slot `aggregate.data.slot`.
|
- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the aggregator with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`.
|
||||||
- The block being voted for (`aggregate.data.beacon_block_root`) passes validation.
|
- _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation.
|
||||||
- `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate.data.slot, aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
|
- _[REJECT]_ The attestation has participants -- that is, `len(get_attesting_indices(state, aggregate.data, aggregate.aggregation_bits)) >= 1`.
|
||||||
- The aggregator's validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.aggregator_index in get_attesting_indices(state, aggregate.data, aggregate.aggregation_bits)`.
|
- _[REJECT]_ `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate.data.slot, aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
|
||||||
- The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate.data.slot` by the validator with index `aggregate_and_proof.aggregator_index`.
|
- _[REJECT]_ The aggregator's validator index is within the committee -- i.e. `aggregate_and_proof.aggregator_index in get_beacon_committee(state, aggregate.data.slot, aggregate.data.index)`.
|
||||||
- The aggregator signature, `signed_aggregate_and_proof.signature`, is valid.
|
- _[REJECT]_ The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate.data.slot` by the validator with index `aggregate_and_proof.aggregator_index`.
|
||||||
- The signature of `aggregate` is valid.
|
- _[REJECT]_ The aggregator signature, `signed_aggregate_and_proof.signature`, is valid.
|
||||||
|
- _[REJECT]_ The signature of `aggregate` is valid.
|
||||||
|
|
||||||
Additional global topics are used to propagate lower frequency validator messages. Their `Name`s are:
|
Additional global topics are used to propagate lower frequency validator messages. Their `Name`s are:
|
||||||
|
|
||||||
- `voluntary_exit` - This topic is used solely for propagating signed voluntary validator exits to proposers on the network. Signed voluntary exits are sent in their entirety. The following validations MUST pass before forwarding the `signed_voluntary_exit` on to the network
|
- `voluntary_exit` - This topic is used solely for propagating signed voluntary validator exits to proposers on the network. Signed voluntary exits are sent in their entirety. The following validations MUST pass before forwarding the `signed_voluntary_exit` on to the network
|
||||||
- The voluntary exit is the first valid voluntary exit received for the validator with index `signed_voluntary_exit.message.validator_index`.
|
- _[IGNORE]_ The voluntary exit is the first valid voluntary exit received for the validator with index `signed_voluntary_exit.message.validator_index`.
|
||||||
- All of the conditions within `process_voluntary_exit` pass validation.
|
- _[REJECT]_ All of the conditions within `process_voluntary_exit` pass validation.
|
||||||
- `proposer_slashing` - This topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. The following validations MUST pass before forwarding the `proposer_slashing` on to the network
|
- `proposer_slashing` - This topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. The following validations MUST pass before forwarding the `proposer_slashing` on to the network
|
||||||
- The proposer slashing is the first valid proposer slashing received for the proposer with index `proposer_slashing.index`.
|
- _[IGNORE]_ The proposer slashing is the first valid proposer slashing received for the proposer with index `proposer_slashing.index`.
|
||||||
- All of the conditions within `process_proposer_slashing` pass validation.
|
- _[REJECT]_ All of the conditions within `process_proposer_slashing` pass validation.
|
||||||
- `attester_slashing` - This topic is used solely for propagating attester slashings to proposers on the network. Attester slashings are sent in their entirety. Clients who receive an attester slashing on this topic MUST validate the conditions within `process_attester_slashing` before forwarding it across the network.
|
- `attester_slashing` - This topic is used solely for propagating attester slashings to proposers on the network. Attester slashings are sent in their entirety. Clients who receive an attester slashing on this topic MUST validate the conditions within `process_attester_slashing` before forwarding it across the network.
|
||||||
- At least one index in the intersection of the attesting indices of each attestation has not yet been seen in any prior `attester_slashing` (i.e. `attester_slashed_indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)`, verify if `any(attester_slashed_indices.difference(prior_seen_attester_slashed_indices))`).
|
- _[IGNORE]_ At least one index in the intersection of the attesting indices of each attestation has not yet been seen in any prior `attester_slashing` (i.e. `attester_slashed_indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)`, verify if `any(attester_slashed_indices.difference(prior_seen_attester_slashed_indices))`).
|
||||||
- All of the conditions within `process_attester_slashing` pass validation.
|
- _[REJECT]_ All of the conditions within `process_attester_slashing` pass validation.
|
||||||
|
|
||||||
|
|
||||||
#### Attestation subnets
|
#### Attestation subnets
|
||||||
|
|
||||||
Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `Name`s are:
|
Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `Name`s are:
|
||||||
|
|
||||||
- `committee_index{subnet_id}_beacon_attestation` - These topics are used to propagate unaggregated attestations to the subnet `subnet_id` (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the subnet.
|
- `beacon_attestation_{subnet_id}` - These topics are used to propagate unaggregated attestations to the subnet `subnet_id` (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the subnet.
|
||||||
- The attestation's committee index (`attestation.data.index`) is for the correct subnet.
|
- _[REJECT]_ The attestation is for the correct subnet (i.e. `compute_subnet_for_attestation(state, attestation) == subnet_id`).
|
||||||
- `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (within a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot` (a client MAY queue future attestations for processing at the appropriate slot).
|
- _[IGNORE]_ `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (within a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot` (a client MAY queue future attestations for processing at the appropriate slot).
|
||||||
- The attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`).
|
- _[REJECT]_ The attestation is unaggregated -- that is, it has exactly one participating validator (`len(get_attesting_indices(state, attestation.data, attestation.aggregation_bits)) == 1`).
|
||||||
- The attestation is the first valid attestation received for the participating validator for the slot, `attestation.data.slot`.
|
- _[IGNORE]_ There has been no other valid attestation seen on an attestation subnet that has an identical `attestation.data.target.epoch` and participating validator index.
|
||||||
- The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
||||||
- The signature of `attestation` is valid.
|
- _[REJECT]_ The signature of `attestation` is valid.
|
||||||
|
|
||||||
#### Interop
|
#### Attestations and Aggregation
|
||||||
|
|
||||||
Unaggregated and aggregated attestations from all shards are sent as `Attestation`s to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing.
|
Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. The correct subnet for an attestation can be calculated with `compute_subnet_for_attestation`. `beacon_attestation_{subnet_id}` topics, are rotated through throughout the epoch in a similar fashion to rotating through shards in committees in Phase 1.
|
||||||
|
|
||||||
#### Mainnet
|
Unaggregated attestations are sent to the subnet topic, `beacon_attestation_{compute_subnet_for_attestation(state, attestation)}` as `Attestation`s.
|
||||||
|
|
||||||
Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. For the `committee_index{subnet_id}_beacon_attestation` topics, `subnet_id` is set to `index % ATTESTATION_SUBNET_COUNT`, where `index` is the `CommitteeIndex` of the given committee.
|
|
||||||
|
|
||||||
Unaggregated attestations are sent to the subnet topic, `committee_index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` as `Attestation`s.
|
|
||||||
|
|
||||||
Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s.
|
Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s.
|
||||||
|
|
||||||
|
@ -336,15 +294,11 @@ Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `A
|
||||||
|
|
||||||
Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded.
|
Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded.
|
||||||
|
|
||||||
#### Interop
|
- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) block compression. Example: The beacon aggregate attestation topic string is `/eth2/446a7232/beacon_aggregate_and_proof/ssz_snappy`, the fork digest is `446a7232` and the data field of a gossipsub message is an `AggregateAndProof` that has been SSZ-encoded and then compressed with Snappy.
|
||||||
|
|
||||||
- `ssz` - All objects are [SSZ-encoded](#ssz-encoding). Example: The beacon block topic string is `/eth2/beacon_block/ssz`, and the data field of a gossipsub message is an ssz-encoded `SignedBeaconBlock`.
|
Snappy has two formats: "block" and "frames" (streaming). Gossip messages remain relatively small (100s of bytes to 100s of kilobytes) so [basic snappy block compression](https://github.com/google/snappy/blob/master/format_description.txt) is used to avoid the additional overhead associated with snappy frames.
|
||||||
|
|
||||||
#### Mainnet
|
Implementations MUST use a single encoding for gossip. Changing an encoding will require coordination between participating implementations.
|
||||||
|
|
||||||
- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). Example: The beacon aggregate attestation topic string is `/eth2/beacon_aggregate_and_proof/ssz_snappy`, and the data field of a gossipsub message is an `AggregateAndProof` that has been SSZ-encoded and then compressed with Snappy.
|
|
||||||
|
|
||||||
Implementations MUST use a single encoding. Changing an encoding will require coordination between participating implementations.
|
|
||||||
|
|
||||||
## The Req/Resp domain
|
## The Req/Resp domain
|
||||||
|
|
||||||
|
@ -380,9 +334,12 @@ result ::= “0” | “1” | “2” | [“128” ... ”255”]
|
||||||
|
|
||||||
The encoding-dependent header may carry metadata or assertions such as the encoded payload length, for integrity and attack proofing purposes. Because req/resp streams are single-use and stream closures implicitly delimit the boundaries, it is not strictly necessary to length-prefix payloads; however, certain encodings like SSZ do, for added security.
|
The encoding-dependent header may carry metadata or assertions such as the encoded payload length, for integrity and attack proofing purposes. Because req/resp streams are single-use and stream closures implicitly delimit the boundaries, it is not strictly necessary to length-prefix payloads; however, certain encodings like SSZ do, for added security.
|
||||||
|
|
||||||
A `response` is formed by zero or more `response_chunk`s. Responses that consist of a single SSZ-list (such as `BlocksByRange` and `BlocksByRoot`) send each list item as a `response_chunk`. All other response types (non-Lists) send a single `response_chunk`. The encoded-payload of a `response_chunk` has a maximum uncompressed byte size of `MAX_CHUNK_SIZE`.
|
A `response` is formed by zero or more `response_chunk`s. Responses that consist of a single SSZ-list (such as `BlocksByRange` and `BlocksByRoot`) send each list item as a `response_chunk`. All other response types (non-Lists) send a single `response_chunk`.
|
||||||
|
|
||||||
Clients MUST ensure the each encoded payload of a `response_chunk` is less than or equal to `MAX_CHUNK_SIZE`; if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance.
|
For both `request`s and `response`s, the `encoding-dependent-header` MUST be valid, and the `encoded-payload` must be valid within the constraints of the `encoding-dependent-header`.
|
||||||
|
This includes type-specific bounds on payload size for some encoding strategies. Regardless of these type specific bounds, a global maximum uncompressed byte size of `MAX_CHUNK_SIZE` MUST be applied to all method response chunks.
|
||||||
|
|
||||||
|
Clients MUST ensure that lengths are within these bounds; if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance.
|
||||||
|
|
||||||
#### Requesting side
|
#### Requesting side
|
||||||
|
|
||||||
|
@ -390,13 +347,22 @@ Once a new stream with the protocol ID for the request type has been negotiated,
|
||||||
|
|
||||||
The requester MUST close the write side of the stream once it finishes writing the request message. At this point, the stream will be half-closed.
|
The requester MUST close the write side of the stream once it finishes writing the request message. At this point, the stream will be half-closed.
|
||||||
|
|
||||||
The requester MUST wait a maximum of `TTFB_TIMEOUT` for the first response byte to arrive (time to first byte—or TTFB—timeout). On that happening, the requester allows a further `RESP_TIMEOUT` for each subsequent `response_chunk` received. For responses consisting of potentially many `response_chunk`s (an SSZ-list) the requester SHOULD read from the stream until either; a) An error result is received in one of the chunks, b) The responder closes the stream, c) More than `MAX_CHUNK_SIZE` bytes have been read for a single `response_chunk` payload or d) More than the maximum number of requested chunks are read. For requests consisting of a single `response_chunk` and a length-prefix, the requester should read the exact number of bytes defined by the length-prefix before closing the stream.
|
The requester MUST wait a maximum of `TTFB_TIMEOUT` for the first response byte to arrive (time to first byte—or TTFB—timeout). On that happening, the requester allows a further `RESP_TIMEOUT` for each subsequent `response_chunk` received.
|
||||||
|
|
||||||
If any of these timeouts fire, the requester SHOULD reset the stream and deem the req/resp operation to have failed.
|
If any of these timeouts fire, the requester SHOULD reset the stream and deem the req/resp operation to have failed.
|
||||||
|
|
||||||
|
A requester SHOULD read from the stream until either:
|
||||||
|
1. An error result is received in one of the chunks (the error payload MAY be read before stopping).
|
||||||
|
2. The responder closes the stream.
|
||||||
|
3. Any part of the `response_chunk` fails validation.
|
||||||
|
4. The maximum number of requested chunks are read.
|
||||||
|
|
||||||
|
For requests consisting of a single valid `response_chunk`, the requester SHOULD read the chunk fully, as defined by the `encoding-dependent-header`, before closing the stream.
|
||||||
|
|
||||||
#### Responding side
|
#### Responding side
|
||||||
|
|
||||||
Once a new stream with the protocol ID for the request type has been negotiated, the responder must process the incoming request message according to the encoding strategy, until EOF (denoting stream half-closure by the requester).
|
Once a new stream with the protocol ID for the request type has been negotiated, the responder SHOULD process the incoming request and MUST validate it before processing it.
|
||||||
|
Request processing and validation MUST be done according to the encoding strategy, until EOF (denoting stream half-closure by the requester).
|
||||||
|
|
||||||
The responder MUST:
|
The responder MUST:
|
||||||
|
|
||||||
|
@ -426,26 +392,18 @@ The `ErrorMessage` schema is:
|
||||||
|
|
||||||
```
|
```
|
||||||
(
|
(
|
||||||
error_message: String
|
error_message: List[byte, 256]
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
*Note*: The String type is encoded as UTF-8 bytes without NULL terminator when SSZ-encoded. As the `ErrorMessage` is not an SSZ-container, only the UTF-8 bytes will be sent when SSZ-encoded.
|
*Note*: By convention, the `error_message` is a sequence of bytes that MAY be interpreted as a UTF-8 string (for debugging purposes). Clients MUST treat as valid any byte sequences.
|
||||||
|
|
||||||
A response therefore has the form of one or more `response_chunk`s, each structured as follows:
|
|
||||||
```
|
|
||||||
+--------+--------+--------+--------+--------+--------+
|
|
||||||
| result | header (opt) | encoded_response |
|
|
||||||
+--------+--------+--------+--------+--------+--------+
|
|
||||||
```
|
|
||||||
Here, `result` represents the 1-byte response code.
|
|
||||||
|
|
||||||
### Encoding strategies
|
### Encoding strategies
|
||||||
|
|
||||||
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time:
|
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time:
|
||||||
|
|
||||||
- `ssz`: the contents are [SSZ-encoded](../../ssz/simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s.
|
- `ssz`: the contents are [SSZ-encoded](../../ssz/simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s.
|
||||||
- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; MUST be supported in mainnet.
|
- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) frames compression. This encoding type MUST be supported by all clients.
|
||||||
|
|
||||||
#### SSZ-encoding strategy (with or without Snappy)
|
#### SSZ-encoding strategy (with or without Snappy)
|
||||||
|
|
||||||
|
@ -458,7 +416,7 @@ Snappy has two formats: "block" and "frames" (streaming). To support large reque
|
||||||
Since snappy frame contents [have a maximum size of `65536` bytes](https://github.com/google/snappy/blob/master/framing_format.txt#L104)
|
Since snappy frame contents [have a maximum size of `65536` bytes](https://github.com/google/snappy/blob/master/framing_format.txt#L104)
|
||||||
and frame headers are just `identifier (1) + checksum (4)` bytes, the expected buffering of a single frame is acceptable.
|
and frame headers are just `identifier (1) + checksum (4)` bytes, the expected buffering of a single frame is acceptable.
|
||||||
|
|
||||||
**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST encode the length of the raw SSZ bytes, encoded as an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST encode the length of the raw SSZ bytes, encoded as an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
||||||
|
|
||||||
*Writing*: By first computing and writing the SSZ byte length, the SSZ encoder can then directly write the chunk contents to the stream.
|
*Writing*: By first computing and writing the SSZ byte length, the SSZ encoder can then directly write the chunk contents to the stream.
|
||||||
If Snappy is applied, it can be passed through a buffered Snappy writer to compress frame by frame.
|
If Snappy is applied, it can be passed through a buffered Snappy writer to compress frame by frame.
|
||||||
|
@ -466,26 +424,29 @@ If Snappy is applied, it can be passed through a buffered Snappy writer to compr
|
||||||
*Reading*: After reading the expected SSZ byte length, the SSZ decoder can directly read the contents from the stream.
|
*Reading*: After reading the expected SSZ byte length, the SSZ decoder can directly read the contents from the stream.
|
||||||
If snappy is applied, it can be passed through a buffered Snappy reader to decompress frame by frame.
|
If snappy is applied, it can be passed through a buffered Snappy reader to decompress frame by frame.
|
||||||
|
|
||||||
A reader SHOULD NOT read more than `max_encoded_len(n)` bytes after reading the SSZ length prefix `n` from the header.
|
Before reading the payload, the header MUST be validated:
|
||||||
|
- The unsigned protobuf varint used for the length-prefix MUST not be longer than 10 bytes, which is sufficient for any `uint64`.
|
||||||
|
- The length-prefix is within the expected [size bounds derived from the payload SSZ type](#what-are-ssz-type-size-bounds).
|
||||||
|
|
||||||
|
After reading a valid header, the payload MAY be read, while maintaining the size constraints from the header.
|
||||||
|
|
||||||
|
A reader SHOULD NOT read more than `max_encoded_len(n)` bytes after reading the SSZ length-prefix `n` from the header.
|
||||||
- For `ssz` this is: `n`
|
- For `ssz` this is: `n`
|
||||||
- For `ssz_snappy` this is: `32 + n + n // 6`. This is considered the [worst-case compression result](https://github.com/google/snappy/blob/537f4ad6240e586970fe554614542e9717df7902/snappy.cc#L98) by Snappy.
|
- For `ssz_snappy` this is: `32 + n + n // 6`. This is considered the [worst-case compression result](https://github.com/google/snappy/blob/537f4ad6240e586970fe554614542e9717df7902/snappy.cc#L98) by Snappy.
|
||||||
|
|
||||||
A reader SHOULD consider the following cases as invalid input:
|
A reader SHOULD consider the following cases as invalid input:
|
||||||
- A SSZ length prefix that, compared against the SSZ type information (vector lengths, list limits, integer sizes, etc.), is:
|
- Any remaining bytes, after having read the `n` SSZ bytes. An EOF is expected if more bytes are read than required.
|
||||||
- Smaller than the expected minimum serialized length.
|
- An early EOF, before fully reading the declared length-prefix worth of SSZ bytes.
|
||||||
- Bigger than the expected maximum serialized length.
|
|
||||||
- Any remaining bytes, after having read the `n` SSZ bytes. An EOF is expected.
|
|
||||||
- An early EOF, before fully reading the declared length prefix worth of SSZ bytes.
|
|
||||||
|
|
||||||
In case of an invalid input, a reader MUST:
|
In case of an invalid input (header or payload), a reader MUST:
|
||||||
- From requests: send back an error message, response code `InvalidRequest`. The request itself is ignored.
|
- From requests: send back an error message, response code `InvalidRequest`. The request itself is ignored.
|
||||||
- From responses: ignore the response, the response MUST be considered bad server behavior.
|
- From responses: ignore the response, the response MUST be considered bad server behavior.
|
||||||
|
|
||||||
All messages that contain only a single field MUST be encoded directly as the type of that field and MUST NOT be encoded as an SSZ container.
|
All messages that contain only a single field MUST be encoded directly as the type of that field and MUST NOT be encoded as an SSZ container.
|
||||||
|
|
||||||
Responses that are SSZ-lists (for example `[]SignedBeaconBlock`) send their
|
Responses that are SSZ-lists (for example `List[SignedBeaconBlock, ...]`) send their
|
||||||
constituents individually as `response_chunk`s. For example, the
|
constituents individually as `response_chunk`s. For example, the
|
||||||
`[]SignedBeaconBlock` response type sends zero or more `response_chunk`s. Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload.
|
`List[SignedBeaconBlock, ...]` response type sends zero or more `response_chunk`s. Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload.
|
||||||
|
|
||||||
### Messages
|
### Messages
|
||||||
|
|
||||||
|
@ -508,9 +469,9 @@ The fields are, as seen by the client at the time of sending the message:
|
||||||
- `fork_digest`: The node's `ForkDigest` (`compute_fork_digest(current_fork_version, genesis_validators_root)`) where
|
- `fork_digest`: The node's `ForkDigest` (`compute_fork_digest(current_fork_version, genesis_validators_root)`) where
|
||||||
- `current_fork_version` is the fork version at the node's current epoch defined by the wall-clock time (not necessarily the epoch to which the node is sync)
|
- `current_fork_version` is the fork version at the node's current epoch defined by the wall-clock time (not necessarily the epoch to which the node is sync)
|
||||||
- `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root`
|
- `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root`
|
||||||
- `finalized_root`: `state.finalized_checkpoint.root` for the state corresponding to the head block.
|
- `finalized_root`: `state.finalized_checkpoint.root` for the state corresponding to the head block (Note this defaults to `Root(b'\x00' * 32)` for the genesis finalized checkpoint).
|
||||||
- `finalized_epoch`: `state.finalized_checkpoint.epoch` for the state corresponding to the head block.
|
- `finalized_epoch`: `state.finalized_checkpoint.epoch` for the state corresponding to the head block.
|
||||||
- `head_root`: The hash_tree_root root of the current head block.
|
- `head_root`: The `hash_tree_root` root of the current head block (`BeaconBlock`).
|
||||||
- `head_slot`: The slot of the block corresponding to the `head_root`.
|
- `head_slot`: The slot of the block corresponding to the `head_root`.
|
||||||
|
|
||||||
The dialing client MUST send a `Status` request upon connection.
|
The dialing client MUST send a `Status` request upon connection.
|
||||||
|
@ -568,11 +529,14 @@ Request Content:
|
||||||
Response Content:
|
Response Content:
|
||||||
```
|
```
|
||||||
(
|
(
|
||||||
[]SignedBeaconBlock
|
List[SignedBeaconBlock, MAX_REQUEST_BLOCKS]
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
Requests count beacon blocks from the peer starting from `start_slot`, leading up to the current head block as selected by fork choice. `step` defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at slots [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …]. A step value of 1 returns all blocks on the range `[start_slot, start_slot + count)`.
|
Requests beacon blocks in the slot range `[start_slot, start_slot + count * step)`, leading up to the current head block as selected by fork choice.
|
||||||
|
`step` defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at slots [2, 4, 6, …].
|
||||||
|
In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …].
|
||||||
|
A request MUST NOT have a 0 slot increment, i.e. `step >= 1`.
|
||||||
|
|
||||||
`BeaconBlocksByRange` is primarily used to sync historical blocks.
|
`BeaconBlocksByRange` is primarily used to sync historical blocks.
|
||||||
|
|
||||||
|
@ -582,13 +546,20 @@ The response MUST consist of zero or more `response_chunk`. Each _successful_ `r
|
||||||
|
|
||||||
Clients MUST keep a record of signed blocks seen since the since the start of the weak subjectivity period and MUST support serving requests of blocks up to their own `head_block_root`.
|
Clients MUST keep a record of signed blocks seen since the since the start of the weak subjectivity period and MUST support serving requests of blocks up to their own `head_block_root`.
|
||||||
|
|
||||||
Clients MUST respond with at least one block, if they have it and it exists in the range. Clients MAY limit the number of blocks in the response.
|
Clients MUST respond with at least the first block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOCKS` blocks.
|
||||||
|
|
||||||
|
The following blocks, where they exist, MUST be send in consecutive order.
|
||||||
|
|
||||||
|
Clients MAY limit the number of blocks in the response.
|
||||||
|
|
||||||
The response MUST contain no more than `count` blocks.
|
The response MUST contain no more than `count` blocks.
|
||||||
|
|
||||||
Clients MUST order blocks by increasing slot number.
|
Clients MUST respond with blocks from their view of the current fork choice -- that is, blocks from the single chain defined by the current head. Of note, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake.
|
||||||
|
|
||||||
Clients MUST respond with blocks from their view of the current fork choice. In particular, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake.
|
Clients MUST respond with blocks that are consistent from a single chain within the context of the request.
|
||||||
|
This applies to any `step` value. In particular when `step == 1`, each `parent_root` MUST match the `hash_tree_root` of the preceding block.
|
||||||
|
|
||||||
|
After the initial block, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request.
|
||||||
|
|
||||||
#### BeaconBlocksByRoot
|
#### BeaconBlocksByRoot
|
||||||
|
|
||||||
|
@ -598,7 +569,7 @@ Request Content:
|
||||||
|
|
||||||
```
|
```
|
||||||
(
|
(
|
||||||
[]Root
|
List[Root, MAX_REQUEST_BLOCKS]
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -606,12 +577,14 @@ Response Content:
|
||||||
|
|
||||||
```
|
```
|
||||||
(
|
(
|
||||||
[]SignedBeaconBlock
|
List[SignedBeaconBlock, MAX_REQUEST_BLOCKS]
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
Requests blocks by block root (= `hash_tree_root(SignedBeaconBlock.message)`). The response is a list of `SignedBeaconBlock` whose length is less than or equal to the number of requested blocks. It may be less in the case that the responding peer is missing blocks.
|
Requests blocks by block root (= `hash_tree_root(SignedBeaconBlock.message)`). The response is a list of `SignedBeaconBlock` whose length is less than or equal to the number of requested blocks. It may be less in the case that the responding peer is missing blocks.
|
||||||
|
|
||||||
|
No more than `MAX_REQUEST_BLOCKS` may be requested at a time.
|
||||||
|
|
||||||
`BeaconBlocksByRoot` is primarily used to recover recent blocks (e.g. when receiving a block or attestation whose parent is unknown).
|
`BeaconBlocksByRoot` is primarily used to recover recent blocks (e.g. when receiving a block or attestation whose parent is unknown).
|
||||||
|
|
||||||
The request MUST be encoded as an SSZ-field.
|
The request MUST be encoded as an SSZ-field.
|
||||||
|
@ -678,7 +651,7 @@ The response MUST consist of a single `response_chunk`.
|
||||||
|
|
||||||
## The discovery domain: discv5
|
## The discovery domain: discv5
|
||||||
|
|
||||||
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery, both in the interoperability testnet and mainnet.
|
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery.
|
||||||
|
|
||||||
`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. `discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context.
|
`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. `discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context.
|
||||||
|
|
||||||
|
@ -718,15 +691,7 @@ If a node's `MetaData.attnets` has any non-zero bit, the ENR MUST include the `a
|
||||||
|
|
||||||
If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally include the `attnets` entry or leave it out entirely.
|
If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally include the `attnets` entry or leave it out entirely.
|
||||||
|
|
||||||
#### Interop
|
#### `eth2` field
|
||||||
|
|
||||||
In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry Eth2 capability information, as it would be superfluous.
|
|
||||||
|
|
||||||
Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an Eth2 peer, in order to eschew connecting to Eth 1.0 peers.
|
|
||||||
|
|
||||||
#### Mainnet
|
|
||||||
|
|
||||||
##### `eth2` field
|
|
||||||
|
|
||||||
ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version, and next fork epoch to ensure connections are made with peers on the intended eth2 network.
|
ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version, and next fork epoch to ensure connections are made with peers on the intended eth2 network.
|
||||||
|
|
||||||
|
@ -752,18 +717,18 @@ where the fields of `ENRForkID` are defined as
|
||||||
* `next_fork_version` is the fork version corresponding to the next planned hard fork at a future epoch. If no future fork is planned, set `next_fork_version = current_fork_version` to signal this fact
|
* `next_fork_version` is the fork version corresponding to the next planned hard fork at a future epoch. If no future fork is planned, set `next_fork_version = current_fork_version` to signal this fact
|
||||||
* `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated. If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact
|
* `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated. If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact
|
||||||
|
|
||||||
|
*Note*: `fork_digest` is composed of values that are not not known until the genesis block/state are available. Due to this, clients SHOULD NOT form ENRs and begin peer discovery until genesis values are known. One notable exception to this rule is the distribution of bootnode ENRs prior to genesis. In this case, bootnode ENRs SHOULD be initially distributed with `eth2` field set as `ENRForkID(fork_digest=compute_fork_digest(GENESIS_FORK_VERSION, b'\x00'*32), next_fork_version=GENESIS_FORK_VERSION, next_fork_epoch=FAR_FUTURE_EPOCH)`. After genesis values are known, the bootnodes SHOULD update ENRs to participate in normal discovery operations.
|
||||||
|
|
||||||
Clients SHOULD connect to peers with `fork_digest`, `next_fork_version`, and `next_fork_epoch` that match local values.
|
Clients SHOULD connect to peers with `fork_digest`, `next_fork_version`, and `next_fork_epoch` that match local values.
|
||||||
|
|
||||||
Clients MAY connect to peers with the same `fork_digest` but a different `next_fork_version`/`next_fork_epoch`. Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients, these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`.
|
Clients MAY connect to peers with the same `fork_digest` but a different `next_fork_version`/`next_fork_epoch`. Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients, these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`.
|
||||||
|
|
||||||
##### General capabilities
|
#### General capabilities
|
||||||
|
|
||||||
On mainnet, ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability.
|
ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability.
|
||||||
|
|
||||||
### Topic advertisement
|
### Topic advertisement
|
||||||
|
|
||||||
#### Mainnet
|
|
||||||
|
|
||||||
discv5's topic advertisement feature is not expected to be ready for mainnet launch of Phase 0.
|
discv5's topic advertisement feature is not expected to be ready for mainnet launch of Phase 0.
|
||||||
|
|
||||||
Once this feature is built out and stable, we expect to use topic advertisement as a rendezvous facility for peers on shards. Until then, the ENR [attestation subnet bitfield](#attestation-subnet-bitfield) will be used for discovery of peers on particular subnets.
|
Once this feature is built out and stable, we expect to use topic advertisement as a rendezvous facility for peers on shards. Until then, the ENR [attestation subnet bitfield](#attestation-subnet-bitfield) will be used for discovery of peers on particular subnets.
|
||||||
|
@ -809,7 +774,7 @@ Modeling for upgradeability and dynamic transport selection from the get-go lays
|
||||||
|
|
||||||
Clients can adopt new transports without breaking old ones, and the multi-transport ability enables constrained and sandboxed environments (e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), without the need for proxying or trust delegation to servers.
|
Clients can adopt new transports without breaking old ones, and the multi-transport ability enables constrained and sandboxed environments (e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), without the need for proxying or trust delegation to servers.
|
||||||
|
|
||||||
### Why are we not using QUIC for mainnet from the start?
|
### Why are we not using QUIC from the start?
|
||||||
|
|
||||||
The QUIC standard is still not finalized (at working draft 22 at the time of writing), and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). One remarkable example is node.js, where the QUIC implementation is [in early development](https://github.com/nodejs/quic).
|
The QUIC standard is still not finalized (at working draft 22 at the time of writing), and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). One remarkable example is node.js, where the QUIC implementation is [in early development](https://github.com/nodejs/quic).
|
||||||
|
|
||||||
|
@ -825,13 +790,13 @@ Overlay multiplexers are not necessary with QUIC since the protocol provides nat
|
||||||
|
|
||||||
## Protocol Negotiation
|
## Protocol Negotiation
|
||||||
|
|
||||||
### When is multiselect 2.0 due and why are we using it for mainnet?
|
### When is multiselect 2.0 due and why do we plan to migrate to it?
|
||||||
|
|
||||||
multiselect 2.0 is currently being conceptualized. The debate started [on this issue](https://github.com/libp2p/specs/pull/95), but it got overloaded—as it tends to happen with large conceptual OSS discussions that touch the heart and core of a system.
|
multiselect 2.0 is currently being conceptualized. The debate started [on this issue](https://github.com/libp2p/specs/pull/95), but it got overloaded—as it tends to happen with large conceptual OSS discussions that touch the heart and core of a system.
|
||||||
|
|
||||||
In the following weeks (August 2019), there will be a renewed initiative to first define the requirements, constraints, assumptions, and features, in order to lock in basic consensus upfront and subsequently build on that consensus by submitting a specification for implementation.
|
At some point in 2020, we expect a renewed initiative to first define the requirements, constraints, assumptions, and features, in order to lock in basic consensus upfront and subsequently build on that consensus by submitting a specification for implementation.
|
||||||
|
|
||||||
We plan to use multiselect 2.0 for mainnet because it will:
|
We plan to eventually migrate to multiselect 2.0 because it will:
|
||||||
|
|
||||||
1. Reduce round trips during connection bootstrapping and stream protocol negotiation.
|
1. Reduce round trips during connection bootstrapping and stream protocol negotiation.
|
||||||
2. Enable efficient one-stream-per-request interaction patterns.
|
2. Enable efficient one-stream-per-request interaction patterns.
|
||||||
|
@ -853,17 +818,15 @@ At present, multistream-select 1.0 is used for both types of negotiation, but mu
|
||||||
|
|
||||||
## Encryption
|
## Encryption
|
||||||
|
|
||||||
### Why are we using SecIO for interop? Why not for mainnet?
|
### Why are we not supporting SecIO?
|
||||||
|
|
||||||
SecIO has been the default encryption layer for libp2p for years. It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale.
|
SecIO has been the default encryption layer for libp2p for years. It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale.
|
||||||
|
|
||||||
SecIO is the common denominator across the various language libraries at this stage. It is widely implemented. That’s why we have chosen to use it for initial interop to minimize overhead in getting to a basic interoperability testnet.
|
Although SecIO has wide language support, we won’t be using it for mainnet because, amongst other things, it requires several round trips to be sound, and doesn’t support early data (0-RTT data), a mechanism that multiselect 2.0 will leverage to reduce round trips during connection bootstrapping.
|
||||||
|
|
||||||
We won’t be using it for mainnet because, amongst other things, it requires several round trips to be sound, and doesn’t support early data (0-RTT data), a mechanism that multiselect 2.0 will leverage to reduce round trips during connection bootstrapping.
|
|
||||||
|
|
||||||
SecIO is not considered secure for the purposes of this spec.
|
SecIO is not considered secure for the purposes of this spec.
|
||||||
|
|
||||||
### Why are we using Noise/TLS 1.3 for mainnet?
|
### Why are we using Noise/TLS 1.3?
|
||||||
|
|
||||||
Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org):
|
Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org):
|
||||||
|
|
||||||
|
@ -889,10 +852,6 @@ Transport level encryption secures message exchange and provides properties that
|
||||||
|
|
||||||
Note that transport-level encryption is not exclusive of application-level encryption or cryptography. Transport-level encryption secures the communication itself, while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.).
|
Note that transport-level encryption is not exclusive of application-level encryption or cryptography. Transport-level encryption secures the communication itself, while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.).
|
||||||
|
|
||||||
### Will mainnnet networking be untested when it launches?
|
|
||||||
|
|
||||||
Before launching mainnet, the testnet will be switched over to mainnet networking parameters, including Noise handshakes, and other new protocols. This gives us an opportunity to drill coordinated network upgrades and verifying that there are no significant upgradeability gaps.
|
|
||||||
|
|
||||||
## Gossipsub
|
## Gossipsub
|
||||||
|
|
||||||
### Why are we using a pub/sub algorithm for block and attestation propagation?
|
### Why are we using a pub/sub algorithm for block and attestation propagation?
|
||||||
|
@ -1001,7 +960,7 @@ Requests are segregated by protocol ID to:
|
||||||
2. Affording this level of granularity with a top-level protocol would imply creating as many variants (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2).
|
2. Affording this level of granularity with a top-level protocol would imply creating as many variants (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2).
|
||||||
7. Allow us to simplify the payload of requests. Request-id’s and method-ids no longer need to be sent. The encoding/request type and version can all be handled by the framework.
|
7. Allow us to simplify the payload of requests. Request-id’s and method-ids no longer need to be sent. The encoding/request type and version can all be handled by the framework.
|
||||||
|
|
||||||
**Caveat**: The protocol negotiation component in the current version of libp2p is called multistream-select 1.0. It is somewhat naïve and introduces overhead on every request when negotiating streams, although implementation-specific optimizations are possible to save this cost. Multiselect 2.0 will remove this overhead by memoizing previously selected protocols, and modeling shared protocol tables. Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to hinder interop testing. More info is to be released from the libp2p community in the coming weeks.
|
**Caveat**: The protocol negotiation component in the current version of libp2p is called multistream-select 1.0. It is somewhat naïve and introduces overhead on every request when negotiating streams, although implementation-specific optimizations are possible to save this cost. Multiselect 2.0 will eventually remove this overhead by memoizing previously selected protocols, and modeling shared protocol tables. Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to significantly hinder this domain.
|
||||||
|
|
||||||
### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?
|
### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?
|
||||||
|
|
||||||
|
@ -1092,6 +1051,12 @@ discv5 uses ENRs and we will presumably need to:
|
||||||
1. Add `multiaddr` to the dictionary, so that nodes can advertise their multiaddr under a reserved namespace in ENRs. – and/or –
|
1. Add `multiaddr` to the dictionary, so that nodes can advertise their multiaddr under a reserved namespace in ENRs. – and/or –
|
||||||
2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR (ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Eth 1.0 nodes).
|
2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR (ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Eth 1.0 nodes).
|
||||||
|
|
||||||
|
### Why do we not form ENRs and find peers until genesis block/state is known?
|
||||||
|
|
||||||
|
Although client software might very well be running locally prior to the solidification of the eth2 genesis state and block, clients cannot form valid ENRs prior to this point. ENRs contain `fork_digest` which utilizes the `genesis_validators_root` for a cleaner separation between chains so prior to knowing genesis, we cannot use `fork_digest` to cleanly find peers on our intended chain. Once genesis data is known, we can then form ENRs and safely find peers.
|
||||||
|
|
||||||
|
When using an eth1 deposit contract for deposits, `fork_digest` will be known `GENESIS_DELAY` (48hours in mainnet configuration) before `genesis_time`, providing ample time to find peers and form initial connections and gossip subnets prior to genesis.
|
||||||
|
|
||||||
## Compression/Encoding
|
## Compression/Encoding
|
||||||
|
|
||||||
### Why are we using SSZ for encoding?
|
### Why are we using SSZ for encoding?
|
||||||
|
@ -1114,7 +1079,7 @@ For all these reasons, generically negotiating compression algorithms may be tre
|
||||||
|
|
||||||
At this stage, the wisest choice is to consider libp2p a messenger of bytes, and to make application layer participate in compressing those bytes. This looks different depending on the interaction layer:
|
At this stage, the wisest choice is to consider libp2p a messenger of bytes, and to make application layer participate in compressing those bytes. This looks different depending on the interaction layer:
|
||||||
|
|
||||||
- Gossip domain: since gossipsub has a framing protocol and exposes an API, we compress the payload (when dictated by the encoding token in the topic name) prior to publishing the message via the API. No length prefixing is necessary because protobuf takes care of bounding the field in the serialized form.
|
- Gossip domain: since gossipsub has a framing protocol and exposes an API, we compress the payload (when dictated by the encoding token in the topic name) prior to publishing the message via the API. No length-prefixing is necessary because protobuf takes care of bounding the field in the serialized form.
|
||||||
- Req/Resp domain: since we define custom protocols that operate on byte streams, implementers are encouraged to encapsulate the encoding and compression logic behind MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
- Req/Resp domain: since we define custom protocols that operate on byte streams, implementers are encouraged to encapsulate the encoding and compression logic behind MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
||||||
|
|
||||||
### Why are using Snappy for compression?
|
### Why are using Snappy for compression?
|
||||||
|
@ -1129,6 +1094,14 @@ If your libp2p library relies on frameworks/runtimes such as Netty (jvm) or Node
|
||||||
|
|
||||||
For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md) (which is essentially no-op encryption or message authentication), in combination with tcpdump or Wireshark to inspect the wire.
|
For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md) (which is essentially no-op encryption or message authentication), in combination with tcpdump or Wireshark to inspect the wire.
|
||||||
|
|
||||||
|
### What are SSZ type size bounds?
|
||||||
|
|
||||||
|
The SSZ encoding outputs of each type have size bounds: each dynamic type, such as a list, has a "limit", which can be used to compute the maximum valid output size.
|
||||||
|
Note that for some more complex dynamic-length objects, element offsets (4 bytes each) may need to be included.
|
||||||
|
Other types are static, they have a fixed size: no dynamic-length content is involved, and the minimum and maximum bounds are the same.
|
||||||
|
|
||||||
|
For reference, the type bounds can be computed ahead of time, [as per this example](https://gist.github.com/protolambda/db75c7faa1e94f2464787a480e5d613e). It is advisable to derive these lengths from the SSZ type definitions in use, to ensure that version changes do not cause out-of-sync type bounds.
|
||||||
|
|
||||||
# libp2p implementations matrix
|
# libp2p implementations matrix
|
||||||
|
|
||||||
This section will soon contain a matrix showing the maturity/state of the libp2p features required by this spec across the languages in which Eth2 clients are being developed.
|
This section will soon contain a matrix showing the maturity/state of the libp2p features required by this spec across the languages in which Eth2 clients are being developed.
|
||||||
|
|
|
@ -85,11 +85,10 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph
|
||||||
|
|
||||||
| Name | Value | Unit | Duration |
|
| Name | Value | Unit | Duration |
|
||||||
| - | - | :-: | :-: |
|
| - | - | :-: | :-: |
|
||||||
| `ETH1_FOLLOW_DISTANCE` | `2**10` (= 1,024) | blocks | ~4 hours |
|
|
||||||
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | |
|
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | |
|
||||||
| `RANDOM_SUBNETS_PER_VALIDATOR` | `2**0` (= 1) | subnets | |
|
| `RANDOM_SUBNETS_PER_VALIDATOR` | `2**0` (= 1) | subnets | |
|
||||||
| `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
|
| `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
|
||||||
| `SECONDS_PER_ETH1_BLOCK` | `14` | seconds | |
|
| `ATTESTATION_SUBNET_COUNT` | `64` | The number of attestation subnets used in the gossipsub protocol. |
|
||||||
|
|
||||||
## Becoming a validator
|
## Becoming a validator
|
||||||
|
|
||||||
|
@ -253,11 +252,13 @@ The `block.body.eth1_data` field is for block proposers to vote on recent Eth1 d
|
||||||
|
|
||||||
###### `Eth1Block`
|
###### `Eth1Block`
|
||||||
|
|
||||||
Let `Eth1Block` be an abstract object representing Eth1 blocks with the `timestamp` field available.
|
Let `Eth1Block` be an abstract object representing Eth1 blocks with the `timestamp` and depost contract data available.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class Eth1Block(Container):
|
class Eth1Block(Container):
|
||||||
timestamp: uint64
|
timestamp: uint64
|
||||||
|
deposit_root: Root
|
||||||
|
deposit_count: uint64
|
||||||
# All other eth1 block fields
|
# All other eth1 block fields
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -281,8 +282,8 @@ def voting_period_start_time(state: BeaconState) -> uint64:
|
||||||
```python
|
```python
|
||||||
def is_candidate_block(block: Eth1Block, period_start: uint64) -> bool:
|
def is_candidate_block(block: Eth1Block, period_start: uint64) -> bool:
|
||||||
return (
|
return (
|
||||||
block.timestamp <= period_start - SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE
|
block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= period_start
|
||||||
and block.timestamp >= period_start - SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE * 2
|
and block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE * 2 >= period_start
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -290,8 +291,14 @@ def is_candidate_block(block: Eth1Block, period_start: uint64) -> bool:
|
||||||
def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Data:
|
def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Data:
|
||||||
period_start = voting_period_start_time(state)
|
period_start = voting_period_start_time(state)
|
||||||
# `eth1_chain` abstractly represents all blocks in the eth1 chain sorted by ascending block height
|
# `eth1_chain` abstractly represents all blocks in the eth1 chain sorted by ascending block height
|
||||||
votes_to_consider = [get_eth1_data(block) for block in eth1_chain if
|
votes_to_consider = [
|
||||||
is_candidate_block(block, period_start)]
|
get_eth1_data(block) for block in eth1_chain
|
||||||
|
if (
|
||||||
|
is_candidate_block(block, period_start)
|
||||||
|
# Ensure cannot move back to earlier deposit contract states
|
||||||
|
and get_eth1_data(block).deposit_count >= state.eth1_data.deposit_count
|
||||||
|
)
|
||||||
|
]
|
||||||
|
|
||||||
# Valid votes already cast during this period
|
# Valid votes already cast during this period
|
||||||
valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider]
|
valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider]
|
||||||
|
@ -340,9 +347,10 @@ It is useful to be able to run a state transition function (working on a copy of
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root:
|
def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root:
|
||||||
process_slots(state, block.slot)
|
temp_state: BeaconState = state.copy()
|
||||||
process_block(state, block)
|
signed_block = SignedBeaconBlock(message=block)
|
||||||
return hash_tree_root(state)
|
temp_state = state_transition(temp_state, signed_block, validate_result=False)
|
||||||
|
return hash_tree_root(temp_state)
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Signature
|
##### Signature
|
||||||
|
@ -350,9 +358,9 @@ def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root:
|
||||||
`signed_block = SignedBeaconBlock(message=block, signature=block_signature)`, where `block_signature` is obtained from:
|
`signed_block = SignedBeaconBlock(message=block, signature=block_signature)`, where `block_signature` is obtained from:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_block_signature(state: BeaconState, header: BeaconBlockHeader, privkey: int) -> BLSSignature:
|
def get_block_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature:
|
||||||
domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(header.slot))
|
domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(block.slot))
|
||||||
signing_root = compute_signing_root(header, domain)
|
signing_root = compute_signing_root(block, domain)
|
||||||
return bls.Sign(privkey, signing_root)
|
return bls.Sign(privkey, signing_root)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -417,7 +425,19 @@ def get_attestation_signature(state: BeaconState, attestation_data: AttestationD
|
||||||
|
|
||||||
#### Broadcast attestation
|
#### Broadcast attestation
|
||||||
|
|
||||||
Finally, the validator broadcasts `attestation` to the associated attestation subnet -- the `committee_index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` pubsub topic.
|
Finally, the validator broadcasts `attestation` to the associated attestation subnet -- the `beacon_attestation_{compute_subnet_for_attestation(state, attestation)}` pubsub topic.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_subnet_for_attestation(state: BeaconState, attestation: Attestation) -> uint64:
|
||||||
|
"""
|
||||||
|
Compute the correct subnet for an attestation for Phase 0.
|
||||||
|
Note, this mimics expected Phase 1 behavior where attestations will be mapped to their shard subnet.
|
||||||
|
"""
|
||||||
|
slots_since_epoch_start = attestation.data.slot % SLOTS_PER_EPOCH
|
||||||
|
committees_since_epoch_start = get_committee_count_at_slot(state, attestation.data.slot) * slots_since_epoch_start
|
||||||
|
|
||||||
|
return (committees_since_epoch_start + attestation.data.index) % ATTESTATION_SUBNET_COUNT
|
||||||
|
```
|
||||||
|
|
||||||
### Attestation aggregation
|
### Attestation aggregation
|
||||||
|
|
||||||
|
@ -445,7 +465,7 @@ def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_si
|
||||||
|
|
||||||
If the validator is selected to aggregate (`is_aggregator()`), they construct an aggregate attestation via the following.
|
If the validator is selected to aggregate (`is_aggregator()`), they construct an aggregate attestation via the following.
|
||||||
|
|
||||||
Collect `attestations` seen via gossip during the `slot` that have an equivalent `attestation_data` to that constructed by the validator, and create an `aggregate_attestation: Attestation` with the following fields.
|
Collect `attestations` seen via gossip during the `slot` that have an equivalent `attestation_data` to that constructed by the validator. If `len(attestations) > 0`, create an `aggregate_attestation: Attestation` with the following fields.
|
||||||
|
|
||||||
##### Data
|
##### Data
|
||||||
|
|
||||||
|
@ -518,12 +538,14 @@ class SignedAggregateAndProof(Container):
|
||||||
|
|
||||||
## Phase 0 attestation subnet stability
|
## Phase 0 attestation subnet stability
|
||||||
|
|
||||||
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`committee_index{subnet_id}_beacon_attestation`). To provide this stability, each validator must:
|
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each validator must:
|
||||||
|
|
||||||
* Randomly select and remain subscribed to `RANDOM_SUBNETS_PER_VALIDATOR` attestation subnets
|
* Randomly select and remain subscribed to `RANDOM_SUBNETS_PER_VALIDATOR` attestation subnets
|
||||||
* Maintain advertisement of the randomly selected subnets in their node's ENR `attnets` entry by setting the randomly selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets
|
* Maintain advertisement of the randomly selected subnets in their node's ENR `attnets` entry by setting the randomly selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets
|
||||||
* Set the lifetime of each random subscription to a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`. At the end of life for a subscription, select a new random subnet, update subnet subscriptions, and publish an updated ENR
|
* Set the lifetime of each random subscription to a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`. At the end of life for a subscription, select a new random subnet, update subnet subscriptions, and publish an updated ENR
|
||||||
|
|
||||||
|
*Note*: Short lived beacon committee assignments should not be added in into the ENR `attnets` entry.
|
||||||
|
|
||||||
*Note*: When preparing for a hard fork, a validator must select and subscribe to random subnets of the future fork versioning at least `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
|
*Note*: When preparing for a hard fork, a validator must select and subscribe to random subnets of the future fork versioning at least `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
|
||||||
|
|
||||||
## How to avoid slashing
|
## How to avoid slashing
|
||||||
|
|
|
@ -17,51 +17,61 @@
|
||||||
- [Extended `AttestationData`](#extended-attestationdata)
|
- [Extended `AttestationData`](#extended-attestationdata)
|
||||||
- [Extended `Attestation`](#extended-attestation)
|
- [Extended `Attestation`](#extended-attestation)
|
||||||
- [Extended `PendingAttestation`](#extended-pendingattestation)
|
- [Extended `PendingAttestation`](#extended-pendingattestation)
|
||||||
- [`IndexedAttestation`](#indexedattestation)
|
- [Extended `IndexedAttestation`](#extended-indexedattestation)
|
||||||
- [Extended `AttesterSlashing`](#extended-attesterslashing)
|
- [Extended `AttesterSlashing`](#extended-attesterslashing)
|
||||||
- [Extended `Validator`](#extended-validator)
|
- [Extended `Validator`](#extended-validator)
|
||||||
- [Extended `BeaconBlockBody`](#extended-beaconblockbody)
|
- [Extended `BeaconBlockBody`](#extended-beaconblockbody)
|
||||||
- [Extended `BeaconBlock`](#extended-beaconblock)
|
- [Extended `BeaconBlock`](#extended-beaconblock)
|
||||||
- [Extended `SignedBeaconBlock`](#extended-signedbeaconblock)
|
- [Extended `SignedBeaconBlock`](#extended-signedbeaconblock)
|
||||||
- [Extended `BeaconState`](#extended-beaconstate)
|
- [Extended `BeaconState`](#extended-beaconstate)
|
||||||
- [New containers](#new-containers)
|
- [New containers](#new-containers)
|
||||||
- [`ShardBlockWrapper`](#shardblockwrapper)
|
- [`ShardBlock`](#shardblock)
|
||||||
- [`ShardSignableHeader`](#shardsignableheader)
|
- [`SignedShardBlock`](#signedshardblock)
|
||||||
|
- [`ShardBlockHeader`](#shardblockheader)
|
||||||
- [`ShardState`](#shardstate)
|
- [`ShardState`](#shardstate)
|
||||||
- [`ShardTransition`](#shardtransition)
|
- [`ShardTransition`](#shardtransition)
|
||||||
- [`CompactCommittee`](#compactcommittee)
|
- [`CompactCommittee`](#compactcommittee)
|
||||||
- [`AttestationCustodyBitWrapper`](#attestationcustodybitwrapper)
|
- [`AttestationCustodyBitWrapper`](#attestationcustodybitwrapper)
|
||||||
- [Helper functions](#helper-functions)
|
- [Helper functions](#helper-functions)
|
||||||
- [Misc](#misc-1)
|
- [Misc](#misc-1)
|
||||||
- [`get_previous_slot`](#get_previous_slot)
|
- [`compute_previous_slot`](#compute_previous_slot)
|
||||||
- [`pack_compact_validator`](#pack_compact_validator)
|
- [`pack_compact_validator`](#pack_compact_validator)
|
||||||
- [`unpack_compact_validator`](#unpack_compact_validator)
|
- [`unpack_compact_validator`](#unpack_compact_validator)
|
||||||
- [`committee_to_compact_committee`](#committee_to_compact_committee)
|
- [`committee_to_compact_committee`](#committee_to_compact_committee)
|
||||||
- [`compute_shard_from_committee_index`](#compute_shard_from_committee_index)
|
- [`compute_shard_from_committee_index`](#compute_shard_from_committee_index)
|
||||||
|
- [`compute_offset_slots`](#compute_offset_slots)
|
||||||
|
- [`compute_updated_gasprice`](#compute_updated_gasprice)
|
||||||
|
- [`compute_committee_source_epoch`](#compute_committee_source_epoch)
|
||||||
- [Beacon state accessors](#beacon-state-accessors)
|
- [Beacon state accessors](#beacon-state-accessors)
|
||||||
- [`get_active_shard_count`](#get_active_shard_count)
|
- [`get_active_shard_count`](#get_active_shard_count)
|
||||||
- [`get_online_validator_indices`](#get_online_validator_indices)
|
- [`get_online_validator_indices`](#get_online_validator_indices)
|
||||||
- [`get_shard_committee`](#get_shard_committee)
|
- [`get_shard_committee`](#get_shard_committee)
|
||||||
- [`get_shard_proposer_index`](#get_shard_proposer_index)
|
|
||||||
- [`get_light_client_committee`](#get_light_client_committee)
|
- [`get_light_client_committee`](#get_light_client_committee)
|
||||||
|
- [`get_shard_proposer_index`](#get_shard_proposer_index)
|
||||||
- [`get_indexed_attestation`](#get_indexed_attestation)
|
- [`get_indexed_attestation`](#get_indexed_attestation)
|
||||||
- [`get_updated_gasprice`](#get_updated_gasprice)
|
|
||||||
- [`get_start_shard`](#get_start_shard)
|
- [`get_start_shard`](#get_start_shard)
|
||||||
- [`get_shard`](#get_shard)
|
- [`get_shard`](#get_shard)
|
||||||
- [`get_latest_slot_for_shard`](#get_latest_slot_for_shard)
|
- [`get_latest_slot_for_shard`](#get_latest_slot_for_shard)
|
||||||
- [`get_offset_slots`](#get_offset_slots)
|
- [`get_offset_slots`](#get_offset_slots)
|
||||||
- [Predicates](#predicates)
|
- [Predicates](#predicates)
|
||||||
|
- [`verify_attestation_custody`](#verify_attestation_custody)
|
||||||
- [Updated `is_valid_indexed_attestation`](#updated-is_valid_indexed_attestation)
|
- [Updated `is_valid_indexed_attestation`](#updated-is_valid_indexed_attestation)
|
||||||
|
- [`is_on_time_attestation`](#is_on_time_attestation)
|
||||||
|
- [`is_winning_attestation`](#is_winning_attestation)
|
||||||
|
- [`optional_aggregate_verify`](#optional_aggregate_verify)
|
||||||
|
- [`optional_fast_aggregate_verify`](#optional_fast_aggregate_verify)
|
||||||
- [Block processing](#block-processing)
|
- [Block processing](#block-processing)
|
||||||
- [Operations](#operations)
|
- [Operations](#operations)
|
||||||
- [New Attestation processing](#new-attestation-processing)
|
- [New Attestation processing](#new-attestation-processing)
|
||||||
- [`validate_attestation`](#validate_attestation)
|
- [`validate_attestation`](#validate_attestation)
|
||||||
|
- [Updated `process_attestation`](#updated-process_attestation)
|
||||||
|
- [Shard transition processing](#shard-transition-processing)
|
||||||
- [`apply_shard_transition`](#apply_shard_transition)
|
- [`apply_shard_transition`](#apply_shard_transition)
|
||||||
- [`process_crosslink_for_shard`](#process_crosslink_for_shard)
|
- [`process_crosslink_for_shard`](#process_crosslink_for_shard)
|
||||||
- [`process_crosslinks`](#process_crosslinks)
|
- [`process_crosslinks`](#process_crosslinks)
|
||||||
- [`process_attestation`](#process_attestation)
|
- [`verify_empty_shard_transition`](#verify_empty_shard_transition)
|
||||||
|
- [`process_shard_transitions`](#process_shard_transitions)
|
||||||
- [New Attester slashing processing](#new-attester-slashing-processing)
|
- [New Attester slashing processing](#new-attester-slashing-processing)
|
||||||
- [Shard transition false positives](#shard-transition-false-positives)
|
|
||||||
- [Light client processing](#light-client-processing)
|
- [Light client processing](#light-client-processing)
|
||||||
- [Epoch transition](#epoch-transition)
|
- [Epoch transition](#epoch-transition)
|
||||||
- [Custody game updates](#custody-game-updates)
|
- [Custody game updates](#custody-game-updates)
|
||||||
|
@ -97,7 +107,6 @@ Configuration is not namespaced. Instead it is strictly an extension;
|
||||||
| `ONLINE_PERIOD` | `OnlineEpochs(2**3)` (= 8) | online epochs | ~51 min |
|
| `ONLINE_PERIOD` | `OnlineEpochs(2**3)` (= 8) | online epochs | ~51 min |
|
||||||
| `LIGHT_CLIENT_COMMITTEE_SIZE` | `2**7` (= 128) |
|
| `LIGHT_CLIENT_COMMITTEE_SIZE` | `2**7` (= 128) |
|
||||||
| `LIGHT_CLIENT_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
|
| `LIGHT_CLIENT_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
|
||||||
| `SHARD_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
|
|
||||||
| `MAX_SHARD_BLOCK_SIZE` | `2**20` (= 1,048,576) | |
|
| `MAX_SHARD_BLOCK_SIZE` | `2**20` (= 1,048,576) | |
|
||||||
| `TARGET_SHARD_BLOCK_SIZE` | `2**18` (= 262,144) | |
|
| `TARGET_SHARD_BLOCK_SIZE` | `2**18` (= 262,144) | |
|
||||||
| `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | |
|
| `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | |
|
||||||
|
@ -105,6 +114,7 @@ Configuration is not namespaced. Instead it is strictly an extension;
|
||||||
| `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | |
|
| `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | |
|
||||||
| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | |
|
| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | |
|
||||||
| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `2**3` (= 8) | |
|
| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `2**3` (= 8) | |
|
||||||
|
| `NO_SIGNATURE` | `BLSSignature(b'\x00' * 96)` | |
|
||||||
|
|
||||||
### Domain types
|
### Domain types
|
||||||
|
|
||||||
|
@ -132,7 +142,7 @@ class AttestationData(Container):
|
||||||
source: Checkpoint
|
source: Checkpoint
|
||||||
target: Checkpoint
|
target: Checkpoint
|
||||||
# Current-slot shard block root
|
# Current-slot shard block root
|
||||||
head_shard_root: Root
|
shard_head_root: Root
|
||||||
# Shard transition root
|
# Shard transition root
|
||||||
shard_transition_root: Root
|
shard_transition_root: Root
|
||||||
```
|
```
|
||||||
|
@ -155,10 +165,11 @@ class PendingAttestation(Container):
|
||||||
data: AttestationData
|
data: AttestationData
|
||||||
inclusion_delay: Slot
|
inclusion_delay: Slot
|
||||||
proposer_index: ValidatorIndex
|
proposer_index: ValidatorIndex
|
||||||
|
# Phase 1
|
||||||
crosslink_success: boolean
|
crosslink_success: boolean
|
||||||
```
|
```
|
||||||
|
|
||||||
### `IndexedAttestation`
|
### Extended `IndexedAttestation`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class IndexedAttestation(Container):
|
class IndexedAttestation(Container):
|
||||||
|
@ -166,7 +177,7 @@ class IndexedAttestation(Container):
|
||||||
attestation: Attestation
|
attestation: Attestation
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Extended `AttesterSlashing`
|
### Extended `AttesterSlashing`
|
||||||
|
|
||||||
Note that the `attestation_1` and `attestation_2` have a new `IndexedAttestation` definition.
|
Note that the `attestation_1` and `attestation_2` have a new `IndexedAttestation` definition.
|
||||||
|
|
||||||
|
@ -297,26 +308,35 @@ class BeaconState(Container):
|
||||||
|
|
||||||
The following containers are new in Phase 1.
|
The following containers are new in Phase 1.
|
||||||
|
|
||||||
### `ShardBlockWrapper`
|
### `ShardBlock`
|
||||||
|
|
||||||
_Wrapper for being broadcasted over the network._
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class ShardBlockWrapper(Container):
|
class ShardBlock(Container):
|
||||||
shard_parent_root: Root
|
shard_parent_root: Root
|
||||||
beacon_parent_root: Root
|
beacon_parent_root: Root
|
||||||
slot: Slot
|
slot: Slot
|
||||||
|
shard: Shard
|
||||||
|
proposer_index: ValidatorIndex
|
||||||
body: ByteList[MAX_SHARD_BLOCK_SIZE]
|
body: ByteList[MAX_SHARD_BLOCK_SIZE]
|
||||||
|
```
|
||||||
|
|
||||||
|
### `SignedShardBlock`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SignedShardBlock(Container):
|
||||||
|
message: ShardBlock
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
```
|
```
|
||||||
|
|
||||||
### `ShardSignableHeader`
|
### `ShardBlockHeader`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class ShardSignableHeader(Container):
|
class ShardBlockHeader(Container):
|
||||||
shard_parent_root: Root
|
shard_parent_root: Root
|
||||||
beacon_parent_root: Root
|
beacon_parent_root: Root
|
||||||
slot: Slot
|
slot: Slot
|
||||||
|
shard: Shard
|
||||||
|
proposer_index: ValidatorIndex
|
||||||
body_root: Root
|
body_root: Root
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -326,7 +346,7 @@ class ShardSignableHeader(Container):
|
||||||
class ShardState(Container):
|
class ShardState(Container):
|
||||||
slot: Slot
|
slot: Slot
|
||||||
gasprice: Gwei
|
gasprice: Gwei
|
||||||
data: Bytes32
|
transition_digest: Bytes32
|
||||||
latest_block_root: Root
|
latest_block_root: Root
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -367,10 +387,10 @@ class AttestationCustodyBitWrapper(Container):
|
||||||
|
|
||||||
### Misc
|
### Misc
|
||||||
|
|
||||||
#### `get_previous_slot`
|
#### `compute_previous_slot`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_previous_slot(slot: Slot) -> Slot:
|
def compute_previous_slot(slot: Slot) -> Slot:
|
||||||
if slot > 0:
|
if slot > 0:
|
||||||
return Slot(slot - 1)
|
return Slot(slot - 1)
|
||||||
else:
|
else:
|
||||||
|
@ -408,7 +428,7 @@ def unpack_compact_validator(compact_validator: uint64) -> Tuple[ValidatorIndex,
|
||||||
```python
|
```python
|
||||||
def committee_to_compact_committee(state: BeaconState, committee: Sequence[ValidatorIndex]) -> CompactCommittee:
|
def committee_to_compact_committee(state: BeaconState, committee: Sequence[ValidatorIndex]) -> CompactCommittee:
|
||||||
"""
|
"""
|
||||||
Given a state and a list of validator indices, outputs the CompactCommittee representing them.
|
Given a state and a list of validator indices, outputs the ``CompactCommittee`` representing them.
|
||||||
"""
|
"""
|
||||||
validators = [state.validators[i] for i in committee]
|
validators = [state.validators[i] for i in committee]
|
||||||
compact_validators = [
|
compact_validators = [
|
||||||
|
@ -427,6 +447,43 @@ def compute_shard_from_committee_index(state: BeaconState, index: CommitteeIndex
|
||||||
return Shard((index + get_start_shard(state, slot)) % active_shards)
|
return Shard((index + get_start_shard(state, slot)) % active_shards)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `compute_offset_slots`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_offset_slots(start_slot: Slot, end_slot: Slot) -> Sequence[Slot]:
|
||||||
|
"""
|
||||||
|
Return the offset slots that are greater than ``start_slot`` and less than ``end_slot``.
|
||||||
|
"""
|
||||||
|
return [Slot(start_slot + x) for x in SHARD_BLOCK_OFFSETS if start_slot + x < end_slot]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `compute_updated_gasprice`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint8) -> Gwei:
|
||||||
|
if shard_block_length > TARGET_SHARD_BLOCK_SIZE:
|
||||||
|
delta = (prev_gasprice * (shard_block_length - TARGET_SHARD_BLOCK_SIZE)
|
||||||
|
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
||||||
|
return min(prev_gasprice + delta, MAX_GASPRICE)
|
||||||
|
else:
|
||||||
|
delta = (prev_gasprice * (TARGET_SHARD_BLOCK_SIZE - shard_block_length)
|
||||||
|
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
||||||
|
return max(prev_gasprice, MIN_GASPRICE + delta) - delta
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `compute_committee_source_epoch`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_committee_source_epoch(epoch: Epoch, period: uint64) -> Epoch:
|
||||||
|
"""
|
||||||
|
Return the source epoch for computing the committee.
|
||||||
|
"""
|
||||||
|
source_epoch = epoch - epoch % period
|
||||||
|
if source_epoch >= period:
|
||||||
|
source_epoch -= period # `period` epochs lookahead
|
||||||
|
return source_epoch
|
||||||
|
```
|
||||||
|
|
||||||
### Beacon state accessors
|
### Beacon state accessors
|
||||||
|
|
||||||
#### `get_active_shard_count`
|
#### `get_active_shard_count`
|
||||||
|
@ -448,12 +505,37 @@ def get_online_validator_indices(state: BeaconState) -> Set[ValidatorIndex]:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]:
|
def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]:
|
||||||
source_epoch = epoch - epoch % SHARD_COMMITTEE_PERIOD
|
"""
|
||||||
if source_epoch > 0:
|
Return the shard committee of the given ``epoch`` of the given ``shard``.
|
||||||
source_epoch -= SHARD_COMMITTEE_PERIOD
|
"""
|
||||||
|
source_epoch = compute_committee_source_epoch(epoch, SHARD_COMMITTEE_PERIOD)
|
||||||
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
||||||
seed = get_seed(beacon_state, source_epoch, DOMAIN_SHARD_COMMITTEE)
|
seed = get_seed(beacon_state, source_epoch, DOMAIN_SHARD_COMMITTEE)
|
||||||
return compute_committee(active_validator_indices, seed, shard, get_active_shard_count(beacon_state))
|
active_shard_count = get_active_shard_count(beacon_state)
|
||||||
|
return compute_committee(
|
||||||
|
indices=active_validator_indices,
|
||||||
|
seed=seed,
|
||||||
|
index=shard,
|
||||||
|
count=active_shard_count,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_light_client_committee`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||||
|
"""
|
||||||
|
Return the light client committee of no more than ``TARGET_COMMITTEE_SIZE`` validators.
|
||||||
|
"""
|
||||||
|
source_epoch = compute_committee_source_epoch(epoch, LIGHT_CLIENT_COMMITTEE_PERIOD)
|
||||||
|
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
||||||
|
seed = get_seed(beacon_state, source_epoch, DOMAIN_LIGHT_CLIENT)
|
||||||
|
return compute_committee(
|
||||||
|
indices=active_validator_indices,
|
||||||
|
seed=seed,
|
||||||
|
index=0,
|
||||||
|
count=get_active_shard_count(beacon_state),
|
||||||
|
)[:TARGET_COMMITTEE_SIZE]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `get_shard_proposer_index`
|
#### `get_shard_proposer_index`
|
||||||
|
@ -465,19 +547,6 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard
|
||||||
return committee[r % len(committee)]
|
return committee[r % len(committee)]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `get_light_client_committee`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
|
||||||
source_epoch = epoch - epoch % LIGHT_CLIENT_COMMITTEE_PERIOD
|
|
||||||
if source_epoch > 0:
|
|
||||||
source_epoch -= LIGHT_CLIENT_COMMITTEE_PERIOD
|
|
||||||
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
|
||||||
seed = get_seed(beacon_state, source_epoch, DOMAIN_LIGHT_CLIENT)
|
|
||||||
active_shards = get_active_shard_count(beacon_state)
|
|
||||||
return compute_committee(active_validator_indices, seed, 0, active_shards)[:TARGET_COMMITTEE_SIZE]
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `get_indexed_attestation`
|
#### `get_indexed_attestation`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -489,20 +558,6 @@ def get_indexed_attestation(beacon_state: BeaconState, attestation: Attestation)
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `get_updated_gasprice`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_updated_gasprice(prev_gasprice: Gwei, length: uint8) -> Gwei:
|
|
||||||
if length > TARGET_SHARD_BLOCK_SIZE:
|
|
||||||
delta = (prev_gasprice * (length - TARGET_SHARD_BLOCK_SIZE)
|
|
||||||
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
|
||||||
return min(prev_gasprice + delta, MAX_GASPRICE)
|
|
||||||
else:
|
|
||||||
delta = (prev_gasprice * (TARGET_SHARD_BLOCK_SIZE - length)
|
|
||||||
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
|
||||||
return max(prev_gasprice, MIN_GASPRICE + delta) - delta
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `get_start_shard`
|
#### `get_start_shard`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -528,12 +583,46 @@ def get_latest_slot_for_shard(state: BeaconState, shard: Shard) -> Slot:
|
||||||
#### `get_offset_slots`
|
#### `get_offset_slots`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_offset_slots(state: BeaconState, latest_shard_slot: Slot) -> Sequence[Slot]:
|
def get_offset_slots(state: BeaconState, shard: Shard) -> Sequence[Slot]:
|
||||||
return [Slot(latest_shard_slot + x) for x in SHARD_BLOCK_OFFSETS if latest_shard_slot + x < state.slot]
|
"""
|
||||||
|
Return the offset slots of the given ``shard`` between that latest included slot and current slot.
|
||||||
|
"""
|
||||||
|
return compute_offset_slots(get_latest_slot_for_shard(state, shard), state.slot)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Predicates
|
### Predicates
|
||||||
|
|
||||||
|
#### `verify_attestation_custody`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def verify_attestation_custody(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
|
||||||
|
"""
|
||||||
|
Check if ``indexed_attestation`` has valid signature against non-empty custody bits.
|
||||||
|
"""
|
||||||
|
attestation = indexed_attestation.attestation
|
||||||
|
aggregation_bits = attestation.aggregation_bits
|
||||||
|
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
|
||||||
|
all_pubkeys = []
|
||||||
|
all_signing_roots = []
|
||||||
|
for block_index, custody_bits in enumerate(attestation.custody_bits_blocks):
|
||||||
|
assert len(custody_bits) == len(indexed_attestation.committee)
|
||||||
|
for participant, aggregation_bit, custody_bit in zip(
|
||||||
|
indexed_attestation.committee, aggregation_bits, custody_bits
|
||||||
|
):
|
||||||
|
if aggregation_bit:
|
||||||
|
all_pubkeys.append(state.validators[participant].pubkey)
|
||||||
|
# Note: only 2N distinct message hashes
|
||||||
|
attestation_wrapper = AttestationCustodyBitWrapper(
|
||||||
|
attestation_data_root=hash_tree_root(attestation.data),
|
||||||
|
block_index=block_index,
|
||||||
|
bit=custody_bit,
|
||||||
|
)
|
||||||
|
all_signing_roots.append(compute_signing_root(attestation_wrapper, domain))
|
||||||
|
else:
|
||||||
|
assert not custody_bit
|
||||||
|
return bls.AggregateVerify(all_pubkeys, all_signing_roots, signature=attestation.signature)
|
||||||
|
```
|
||||||
|
|
||||||
#### Updated `is_valid_indexed_attestation`
|
#### Updated `is_valid_indexed_attestation`
|
||||||
|
|
||||||
Note that this replaces the Phase 0 `is_valid_indexed_attestation`.
|
Note that this replaces the Phase 0 `is_valid_indexed_attestation`.
|
||||||
|
@ -544,38 +633,83 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
|
||||||
Check if ``indexed_attestation`` has valid indices and signature.
|
Check if ``indexed_attestation`` has valid indices and signature.
|
||||||
"""
|
"""
|
||||||
# Verify aggregate signature
|
# Verify aggregate signature
|
||||||
all_pubkeys = []
|
|
||||||
all_signing_roots = []
|
|
||||||
attestation = indexed_attestation.attestation
|
attestation = indexed_attestation.attestation
|
||||||
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
|
|
||||||
aggregation_bits = attestation.aggregation_bits
|
aggregation_bits = attestation.aggregation_bits
|
||||||
assert len(aggregation_bits) == len(indexed_attestation.committee)
|
if not any(aggregation_bits) or len(aggregation_bits) != len(indexed_attestation.committee):
|
||||||
|
return False
|
||||||
|
|
||||||
if len(attestation.custody_bits_blocks) == 0:
|
if len(attestation.custody_bits_blocks) == 0:
|
||||||
# fall back on phase0 behavior if there is no shard data.
|
# fall back on phase0 behavior if there is no shard data.
|
||||||
for participant, abit in zip(indexed_attestation.committee, aggregation_bits):
|
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
|
||||||
if abit:
|
all_pubkeys = []
|
||||||
|
for participant, aggregation_bit in zip(indexed_attestation.committee, aggregation_bits):
|
||||||
|
if aggregation_bit:
|
||||||
all_pubkeys.append(state.validators[participant].pubkey)
|
all_pubkeys.append(state.validators[participant].pubkey)
|
||||||
signing_root = compute_signing_root(indexed_attestation.attestation.data, domain)
|
signing_root = compute_signing_root(indexed_attestation.attestation.data, domain)
|
||||||
return bls.FastAggregateVerify(all_pubkeys, signing_root, signature=attestation.signature)
|
return bls.FastAggregateVerify(all_pubkeys, signing_root, signature=attestation.signature)
|
||||||
else:
|
else:
|
||||||
for i, custody_bits in enumerate(attestation.custody_bits_blocks):
|
return verify_attestation_custody(state, indexed_attestation)
|
||||||
assert len(custody_bits) == len(indexed_attestation.committee)
|
|
||||||
for participant, abit, cbit in zip(indexed_attestation.committee, aggregation_bits, custody_bits):
|
|
||||||
if abit:
|
|
||||||
all_pubkeys.append(state.validators[participant].pubkey)
|
|
||||||
# Note: only 2N distinct message hashes
|
|
||||||
attestation_wrapper = AttestationCustodyBitWrapper(
|
|
||||||
attestation_data_root=hash_tree_root(attestation.data),
|
|
||||||
block_index=i,
|
|
||||||
bit=cbit
|
|
||||||
)
|
|
||||||
all_signing_roots.append(compute_signing_root(attestation_wrapper, domain))
|
|
||||||
else:
|
|
||||||
assert not cbit
|
|
||||||
return bls.AggregateVerify(zip(all_pubkeys, all_signing_roots), signature=attestation.signature)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `is_on_time_attestation`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_on_time_attestation(state: BeaconState,
|
||||||
|
attestation: Attestation) -> bool:
|
||||||
|
"""
|
||||||
|
Check if the given attestation is on-time.
|
||||||
|
"""
|
||||||
|
# TODO: MIN_ATTESTATION_INCLUSION_DELAY should always be 1
|
||||||
|
return attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `is_winning_attestation`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_winning_attestation(state: BeaconState,
|
||||||
|
attestation: PendingAttestation,
|
||||||
|
committee_index: CommitteeIndex,
|
||||||
|
winning_root: Root) -> bool:
|
||||||
|
"""
|
||||||
|
Check if ``attestation`` helped contribute to the successful crosslink of
|
||||||
|
``winning_root`` formed by ``committee_index`` committee at the current slot.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
attestation.data.slot == state.slot
|
||||||
|
and attestation.data.index == committee_index
|
||||||
|
and attestation.data.shard_transition_root == winning_root
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `optional_aggregate_verify`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def optional_aggregate_verify(pubkeys: Sequence[BLSPubkey],
|
||||||
|
messages: Sequence[Bytes32],
|
||||||
|
signature: BLSSignature) -> bool:
|
||||||
|
"""
|
||||||
|
If ``pubkeys`` is an empty list, the given ``signature`` should be a stub ``NO_SIGNATURE``.
|
||||||
|
Otherwise, verify it with standard BLS AggregateVerify API.
|
||||||
|
"""
|
||||||
|
if len(pubkeys) == 0:
|
||||||
|
return signature == NO_SIGNATURE
|
||||||
|
else:
|
||||||
|
return bls.AggregateVerify(pubkeys, messages, signature)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `optional_fast_aggregate_verify`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def optional_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool:
|
||||||
|
"""
|
||||||
|
If ``pubkeys`` is an empty list, the given ``signature`` should be a stub ``NO_SIGNATURE``.
|
||||||
|
Otherwise, verify it with standard BLS FastAggregateVerify API.
|
||||||
|
"""
|
||||||
|
if len(pubkeys) == 0:
|
||||||
|
return signature == NO_SIGNATURE
|
||||||
|
else:
|
||||||
|
return bls.FastAggregateVerify(pubkeys, message, signature)
|
||||||
|
```
|
||||||
|
|
||||||
### Block processing
|
### Block processing
|
||||||
|
|
||||||
|
@ -584,12 +718,10 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||||
process_block_header(state, block)
|
process_block_header(state, block)
|
||||||
process_randao(state, block.body)
|
process_randao(state, block.body)
|
||||||
process_eth1_data(state, block.body)
|
process_eth1_data(state, block.body)
|
||||||
verify_shard_transition_false_positives(state, block.body)
|
|
||||||
process_light_client_aggregate(state, block.body)
|
process_light_client_aggregate(state, block.body)
|
||||||
process_operations(state, block.body)
|
process_operations(state, block.body)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
#### Operations
|
#### Operations
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -611,7 +743,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||||
# See custody game spec.
|
# See custody game spec.
|
||||||
process_custody_game_operations(state, body)
|
process_custody_game_operations(state, body)
|
||||||
|
|
||||||
process_crosslinks(state, body.shard_transitions, body.attestations)
|
process_shard_transitions(state, body.shard_transitions, body.attestations)
|
||||||
|
|
||||||
# TODO process_operations(body.shard_receipt_proofs, process_shard_receipt_proofs)
|
# TODO process_operations(body.shard_receipt_proofs, process_shard_receipt_proofs)
|
||||||
```
|
```
|
||||||
|
@ -638,17 +770,16 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||||
assert attestation.data.source == state.previous_justified_checkpoint
|
assert attestation.data.source == state.previous_justified_checkpoint
|
||||||
|
|
||||||
shard = get_shard(state, attestation)
|
shard = get_shard(state, attestation)
|
||||||
latest_shard_slot = get_latest_slot_for_shard(state, shard)
|
|
||||||
|
|
||||||
# Type 1: on-time attestations
|
# Type 1: on-time attestations, the custody bits should be non-empty.
|
||||||
if attestation.custody_bits_blocks != []:
|
if attestation.custody_bits_blocks != []:
|
||||||
# Ensure on-time attestation
|
# Ensure on-time attestation
|
||||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot
|
assert is_on_time_attestation(state, attestation)
|
||||||
# Correct data root count
|
# Correct data root count
|
||||||
assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, latest_shard_slot))
|
assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, shard))
|
||||||
# Correct parent block root
|
# Correct parent block root
|
||||||
assert data.beacon_block_root == get_block_root_at_slot(state, get_previous_slot(state.slot))
|
assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot))
|
||||||
# Type 2: no shard transition, no custody bits # TODO: could only allow for older attestations.
|
# Type 2: no shard transition, no custody bits
|
||||||
else:
|
else:
|
||||||
# Ensure delayed attestation
|
# Ensure delayed attestation
|
||||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY < state.slot
|
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY < state.slot
|
||||||
|
@ -659,15 +790,36 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||||
```
|
```
|
||||||
|
|
||||||
|
###### Updated `process_attestation`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||||
|
validate_attestation(state, attestation)
|
||||||
|
# Store pending attestation for epoch processing
|
||||||
|
pending_attestation = PendingAttestation(
|
||||||
|
aggregation_bits=attestation.aggregation_bits,
|
||||||
|
data=attestation.data,
|
||||||
|
inclusion_delay=state.slot - attestation.data.slot,
|
||||||
|
proposer_index=get_beacon_proposer_index(state),
|
||||||
|
crosslink_success=False, # To be filled in during process_shard_transitions
|
||||||
|
)
|
||||||
|
if attestation.data.target.epoch == get_current_epoch(state):
|
||||||
|
state.current_epoch_attestations.append(pending_attestation)
|
||||||
|
else:
|
||||||
|
state.previous_epoch_attestations.append(pending_attestation)
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Shard transition processing
|
||||||
|
|
||||||
###### `apply_shard_transition`
|
###### `apply_shard_transition`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTransition) -> None:
|
def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTransition) -> None:
|
||||||
# Slot the attestation starts counting from
|
# TODO: only need to check it once when phase 1 starts
|
||||||
latest_slot = get_latest_slot_for_shard(state, shard)
|
assert state.slot > PHASE_1_GENESIS_SLOT
|
||||||
|
|
||||||
# Correct data root count
|
# Correct data root count
|
||||||
offset_slots = get_offset_slots(state, latest_slot)
|
offset_slots = get_offset_slots(state, shard)
|
||||||
assert (
|
assert (
|
||||||
len(transition.shard_data_roots)
|
len(transition.shard_data_roots)
|
||||||
== len(transition.shard_states)
|
== len(transition.shard_states)
|
||||||
|
@ -676,28 +828,33 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr
|
||||||
)
|
)
|
||||||
assert transition.start_slot == offset_slots[0]
|
assert transition.start_slot == offset_slots[0]
|
||||||
|
|
||||||
# Reconstruct shard headers
|
|
||||||
headers = []
|
headers = []
|
||||||
proposers = []
|
proposers = []
|
||||||
shard_parent_root = state.shard_states[shard].latest_block_root
|
|
||||||
for i in range(len(offset_slots)):
|
|
||||||
if any(transition.shard_data_roots):
|
|
||||||
headers.append(ShardSignableHeader(
|
|
||||||
shard_parent_root=shard_parent_root,
|
|
||||||
parent_hash=get_block_root_at_slot(state, get_previous_slot(state.slot)),
|
|
||||||
slot=offset_slots[i],
|
|
||||||
body_root=transition.shard_data_roots[i]
|
|
||||||
))
|
|
||||||
proposers.append(get_shard_proposer_index(state, shard, offset_slots[i]))
|
|
||||||
shard_parent_root = hash_tree_root(headers[-1])
|
|
||||||
|
|
||||||
# Verify correct calculation of gas prices and slots
|
|
||||||
prev_gasprice = state.shard_states[shard].gasprice
|
prev_gasprice = state.shard_states[shard].gasprice
|
||||||
for i in range(len(offset_slots)):
|
shard_parent_root = state.shard_states[shard].latest_block_root
|
||||||
|
for i, offset_slot in enumerate(offset_slots):
|
||||||
|
shard_block_length = transition.shard_block_lengths[i]
|
||||||
shard_state = transition.shard_states[i]
|
shard_state = transition.shard_states[i]
|
||||||
block_length = transition.shard_block_lengths[i]
|
# Verify correct calculation of gas prices and slots
|
||||||
assert shard_state.gasprice == get_updated_gasprice(prev_gasprice, block_length)
|
assert shard_state.gasprice == compute_updated_gasprice(prev_gasprice, shard_block_length)
|
||||||
assert shard_state.slot == offset_slots[i]
|
assert shard_state.slot == offset_slot
|
||||||
|
# Collect the non-empty proposals result
|
||||||
|
is_empty_proposal = shard_block_length == 0
|
||||||
|
if not is_empty_proposal:
|
||||||
|
proposal_index = get_shard_proposer_index(state, offset_slot, shard)
|
||||||
|
# Reconstruct shard headers
|
||||||
|
header = ShardBlockHeader(
|
||||||
|
shard_parent_root=shard_parent_root,
|
||||||
|
beacon_parent_root=get_block_root_at_slot(state, offset_slot),
|
||||||
|
slot=offset_slot,
|
||||||
|
shard=shard,
|
||||||
|
proposer_index=proposal_index,
|
||||||
|
body_root=transition.shard_data_roots[i]
|
||||||
|
)
|
||||||
|
shard_parent_root = hash_tree_root(header)
|
||||||
|
headers.append(header)
|
||||||
|
proposers.append(proposal_index)
|
||||||
|
|
||||||
prev_gasprice = shard_state.gasprice
|
prev_gasprice = shard_state.gasprice
|
||||||
|
|
||||||
pubkeys = [state.validators[proposer].pubkey for proposer in proposers]
|
pubkeys = [state.validators[proposer].pubkey for proposer in proposers]
|
||||||
|
@ -706,11 +863,11 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr
|
||||||
for header in headers
|
for header in headers
|
||||||
]
|
]
|
||||||
# Verify combined proposer signature
|
# Verify combined proposer signature
|
||||||
assert bls.AggregateVerify(zip(pubkeys, signing_roots), signature=transition.proposer_signature_aggregate)
|
assert optional_aggregate_verify(pubkeys, signing_roots, transition.proposer_signature_aggregate)
|
||||||
|
|
||||||
# Save updated state
|
# Save updated state
|
||||||
state.shard_states[shard] = transition.shard_states[-1]
|
state.shard_states[shard] = transition.shard_states[len(transition.shard_states) - 1]
|
||||||
state.shard_states[shard].slot = state.slot - 1
|
state.shard_states[shard].slot = compute_previous_slot(state.slot)
|
||||||
```
|
```
|
||||||
|
|
||||||
###### `process_crosslink_for_shard`
|
###### `process_crosslink_for_shard`
|
||||||
|
@ -732,6 +889,9 @@ def process_crosslink_for_shard(state: BeaconState,
|
||||||
for attestation in transition_attestations:
|
for attestation in transition_attestations:
|
||||||
participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
|
participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
|
||||||
transition_participants = transition_participants.union(participants)
|
transition_participants = transition_participants.union(participants)
|
||||||
|
assert attestation.data.shard_head_root == shard_transition.shard_data_roots[
|
||||||
|
len(shard_transition.shard_data_roots) - 1
|
||||||
|
]
|
||||||
|
|
||||||
enough_online_stake = (
|
enough_online_stake = (
|
||||||
get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >=
|
get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >=
|
||||||
|
@ -743,7 +903,6 @@ def process_crosslink_for_shard(state: BeaconState,
|
||||||
|
|
||||||
# Attestation <-> shard transition consistency
|
# Attestation <-> shard transition consistency
|
||||||
assert shard_transition_root == hash_tree_root(shard_transition)
|
assert shard_transition_root == hash_tree_root(shard_transition)
|
||||||
assert attestation.data.head_shard_root == shard_transition.shard_data_roots[-1]
|
|
||||||
|
|
||||||
# Apply transition
|
# Apply transition
|
||||||
apply_shard_transition(state, shard, shard_transition)
|
apply_shard_transition(state, shard, shard_transition)
|
||||||
|
@ -754,11 +913,11 @@ def process_crosslink_for_shard(state: BeaconState,
|
||||||
increase_balance(state, beacon_proposer_index, proposer_reward)
|
increase_balance(state, beacon_proposer_index, proposer_reward)
|
||||||
states_slots_lengths = zip(
|
states_slots_lengths = zip(
|
||||||
shard_transition.shard_states,
|
shard_transition.shard_states,
|
||||||
get_offset_slots(state, get_latest_slot_for_shard(state, shard)),
|
get_offset_slots(state, shard),
|
||||||
shard_transition.shard_block_lengths
|
shard_transition.shard_block_lengths
|
||||||
)
|
)
|
||||||
for shard_state, slot, length in states_slots_lengths:
|
for shard_state, slot, length in states_slots_lengths:
|
||||||
proposer_index = get_shard_proposer_index(state, shard, slot)
|
proposer_index = get_shard_proposer_index(state, slot, shard)
|
||||||
decrease_balance(state, proposer_index, shard_state.gasprice * length)
|
decrease_balance(state, proposer_index, shard_state.gasprice * length)
|
||||||
|
|
||||||
# Return winning transition root
|
# Return winning transition root
|
||||||
|
@ -781,38 +940,41 @@ def process_crosslinks(state: BeaconState,
|
||||||
# All attestations in the block for this committee/shard and current slot
|
# All attestations in the block for this committee/shard and current slot
|
||||||
shard_attestations = [
|
shard_attestations = [
|
||||||
attestation for attestation in attestations
|
attestation for attestation in attestations
|
||||||
if attestation.data.index == committee_index and attestation.data.slot == state.slot
|
if is_on_time_attestation(state, attestation) and attestation.data.index == committee_index
|
||||||
]
|
]
|
||||||
shard_transition = shard_transitions[shard]
|
|
||||||
winning_root = process_crosslink_for_shard(state, committee_index, shard_transition, shard_attestations)
|
winning_root = process_crosslink_for_shard(state, committee_index, shard_transitions[shard], shard_attestations)
|
||||||
if winning_root != Root():
|
if winning_root != Root():
|
||||||
# Mark relevant pending attestations as creating a successful crosslink
|
# Mark relevant pending attestations as creating a successful crosslink
|
||||||
for pending_attestation in state.current_epoch_attestations:
|
for pending_attestation in state.current_epoch_attestations:
|
||||||
if (
|
if is_winning_attestation(state, pending_attestation, committee_index, winning_root):
|
||||||
pending_attestation.slot == state.slot and pending_attestation
|
|
||||||
and pending_attestation.data.index == committee_index
|
|
||||||
and pending_attestation.data.shard_transition_root == winning_root
|
|
||||||
):
|
|
||||||
pending_attestation.crosslink_success = True
|
pending_attestation.crosslink_success = True
|
||||||
```
|
```
|
||||||
|
|
||||||
###### `process_attestation`
|
###### `verify_empty_shard_transition`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
def verify_empty_shard_transition(state: BeaconState, shard_transitions: Sequence[ShardTransition]) -> bool:
|
||||||
validate_attestation(state, attestation)
|
"""
|
||||||
# Store pending attestation for epoch processing
|
Verify that a `shard_transition` in a block is empty if an attestation was not processed for it.
|
||||||
pending_attestation = PendingAttestation(
|
"""
|
||||||
aggregation_bits=attestation.aggregation_bits,
|
for shard in range(get_active_shard_count(state)):
|
||||||
data=attestation.data,
|
if state.shard_states[shard].slot != compute_previous_slot(state.slot):
|
||||||
inclusion_delay=state.slot - attestation.data.slot,
|
if shard_transitions[shard] != ShardTransition():
|
||||||
crosslink_success=False, # To be filled in during process_crosslinks
|
return False
|
||||||
proposer_index=get_beacon_proposer_index(state),
|
return True
|
||||||
)
|
```
|
||||||
if attestation.data.target.epoch == get_current_epoch(state):
|
|
||||||
state.current_epoch_attestations.append(pending_attestation)
|
###### `process_shard_transitions`
|
||||||
else:
|
|
||||||
state.previous_epoch_attestations.append(pending_attestation)
|
```python
|
||||||
|
def process_shard_transitions(state: BeaconState,
|
||||||
|
shard_transitions: Sequence[ShardTransition],
|
||||||
|
attestations: Sequence[Attestation]) -> None:
|
||||||
|
# Process crosslinks
|
||||||
|
process_crosslinks(state, shard_transitions, attestations)
|
||||||
|
# Verify the empty proposal shard states
|
||||||
|
assert verify_empty_shard_transition(state, shard_transitions)
|
||||||
```
|
```
|
||||||
|
|
||||||
##### New Attester slashing processing
|
##### New Attester slashing processing
|
||||||
|
@ -820,11 +982,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||||
```python
|
```python
|
||||||
def get_indices_from_committee(
|
def get_indices_from_committee(
|
||||||
committee: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE],
|
committee: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE],
|
||||||
bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]) -> List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE]:
|
bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]) -> Sequence[ValidatorIndex]:
|
||||||
assert len(bits) == len(committee)
|
assert len(bits) == len(committee)
|
||||||
return List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE](
|
return [validator_index for i, validator_index in enumerate(committee) if bits[i]]
|
||||||
[validator_index for i, validator_index in enumerate(committee) if bits[i]]
|
|
||||||
)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -857,22 +1017,12 @@ def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSla
|
||||||
assert slashed_any
|
assert slashed_any
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Shard transition false positives
|
|
||||||
|
|
||||||
```python
|
|
||||||
def verify_shard_transition_false_positives(state: BeaconState, block_body: BeaconBlockBody) -> None:
|
|
||||||
# Verify that a `shard_transition` in a block is empty if an attestation was not processed for it
|
|
||||||
for shard in range(get_active_shard_count(state)):
|
|
||||||
if state.shard_states[shard].slot != state.slot - 1:
|
|
||||||
assert block_body.shard_transitions[shard] == ShardTransition()
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Light client processing
|
#### Light client processing
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_light_client_aggregate(state: BeaconState, block_body: BeaconBlockBody) -> None:
|
def process_light_client_aggregate(state: BeaconState, block_body: BeaconBlockBody) -> None:
|
||||||
committee = get_light_client_committee(state, get_current_epoch(state))
|
committee = get_light_client_committee(state, get_current_epoch(state))
|
||||||
previous_slot = get_previous_slot(state.slot)
|
previous_slot = compute_previous_slot(state.slot)
|
||||||
previous_block_root = get_block_root_at_slot(state, previous_slot)
|
previous_block_root = get_block_root_at_slot(state, previous_slot)
|
||||||
|
|
||||||
total_reward = Gwei(0)
|
total_reward = Gwei(0)
|
||||||
|
@ -888,10 +1038,9 @@ def process_light_client_aggregate(state: BeaconState, block_body: BeaconBlockBo
|
||||||
|
|
||||||
signing_root = compute_signing_root(previous_block_root,
|
signing_root = compute_signing_root(previous_block_root,
|
||||||
get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(previous_slot)))
|
get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(previous_slot)))
|
||||||
assert bls.FastAggregateVerify(signer_pubkeys, signing_root, signature=block_body.light_client_signature)
|
assert optional_fast_aggregate_verify(signer_pubkeys, signing_root, block_body.light_client_signature)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Epoch transition
|
### Epoch transition
|
||||||
|
|
||||||
This epoch transition overrides the phase0 epoch transition:
|
This epoch transition overrides the phase0 epoch transition:
|
||||||
|
@ -911,7 +1060,7 @@ def process_epoch(state: BeaconState) -> None:
|
||||||
|
|
||||||
#### Custody game updates
|
#### Custody game updates
|
||||||
|
|
||||||
`process_reveal_deadlines` and `process_custody_final_updates` are defined in [the Custody Game spec](./1_custody-game.md),
|
`process_reveal_deadlines` and `process_custody_final_updates` are defined in [the Custody Game spec](./custody-game.md),
|
||||||
|
|
||||||
#### Online-tracking
|
#### Online-tracking
|
||||||
|
|
||||||
|
@ -932,7 +1081,9 @@ def process_online_tracking(state: BeaconState) -> None:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_light_client_committee_updates(state: BeaconState) -> None:
|
def process_light_client_committee_updates(state: BeaconState) -> None:
|
||||||
# Update light client committees
|
"""
|
||||||
|
Update light client committees.
|
||||||
|
"""
|
||||||
if get_current_epoch(state) % LIGHT_CLIENT_COMMITTEE_PERIOD == 0:
|
if get_current_epoch(state) % LIGHT_CLIENT_COMMITTEE_PERIOD == 0:
|
||||||
state.current_light_committee = state.next_light_committee
|
state.current_light_committee = state.next_light_committee
|
||||||
new_committee = get_light_client_committee(state, get_current_epoch(state) + LIGHT_CLIENT_COMMITTEE_PERIOD)
|
new_committee = get_light_client_committee(state, get_current_epoch(state) + LIGHT_CLIENT_COMMITTEE_PERIOD)
|
||||||
|
|
|
@ -300,7 +300,7 @@ def process_early_derived_secret_reveal(state: BeaconState, reveal: EarlyDerived
|
||||||
|
|
||||||
domain = get_domain(state, DOMAIN_RANDAO, reveal.epoch)
|
domain = get_domain(state, DOMAIN_RANDAO, reveal.epoch)
|
||||||
signing_roots = [compute_signing_root(root, domain) for root in [hash_tree_root(reveal.epoch), reveal.mask]]
|
signing_roots = [compute_signing_root(root, domain) for root in [hash_tree_root(reveal.epoch), reveal.mask]]
|
||||||
assert bls.AggregateVerify(zip(pubkeys, signing_roots), reveal.reveal)
|
assert bls.AggregateVerify(pubkeys, signing_roots, reveal.reveal)
|
||||||
|
|
||||||
if reveal.epoch >= get_current_epoch(state) + CUSTODY_PERIOD_TO_RANDAO_PADDING:
|
if reveal.epoch >= get_current_epoch(state) + CUSTODY_PERIOD_TO_RANDAO_PADDING:
|
||||||
# Full slashing when the secret was revealed so early it may be a valid custody
|
# Full slashing when the secret was revealed so early it may be a valid custody
|
||||||
|
|
|
@ -1,70 +0,0 @@
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
|
||||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
|
||||||
|
|
||||||
- [Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs](#ethereum-20-phase-1----shard-transition-and-fraud-proofs)
|
|
||||||
- [Table of contents](#table-of-contents)
|
|
||||||
- [Introduction](#introduction)
|
|
||||||
- [Fraud proofs](#fraud-proofs)
|
|
||||||
- [Shard state transition function](#shard-state-transition-function)
|
|
||||||
- [Honest committee member behavior](#honest-committee-member-behavior)
|
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
|
|
||||||
# Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs
|
|
||||||
|
|
||||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
|
||||||
|
|
||||||
## Table of contents
|
|
||||||
|
|
||||||
<!-- TOC -->
|
|
||||||
|
|
||||||
TODO
|
|
||||||
|
|
||||||
<!-- /TOC -->
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This document describes the shard transition function and fraud proofs as part of Phase 1 of Ethereum 2.0.
|
|
||||||
|
|
||||||
## Fraud proofs
|
|
||||||
|
|
||||||
TODO. The intent is to have a single universal fraud proof type, which contains the following parts:
|
|
||||||
|
|
||||||
1. An on-time attestation on some `shard` signing a `ShardTransition`
|
|
||||||
2. An index `i` of a particular position to focus on
|
|
||||||
3. The `ShardTransition` itself
|
|
||||||
4. The full body of the block
|
|
||||||
5. A Merkle proof to the `shard_states` in the parent block the attestation is referencing
|
|
||||||
|
|
||||||
The proof verifies that one of the two conditions is false:
|
|
||||||
|
|
||||||
1. `custody_bits[i][j] != generate_custody_bit(subkey, block_contents)` for any `j`
|
|
||||||
2. `execute_state_transition(shard, slot, transition.shard_states[i-1].data, hash_tree_root(parent), get_shard_proposer_index(state, shard, slot), block_contents) != transition.shard_states[i].data` (if `i=0` then instead use `parent.shard_states[shard][-1].data`)
|
|
||||||
|
|
||||||
## Shard state transition function
|
|
||||||
|
|
||||||
```python
|
|
||||||
def shard_state_transition(shard: Shard,
|
|
||||||
slot: Slot,
|
|
||||||
pre_state: Root,
|
|
||||||
previous_beacon_root: Root,
|
|
||||||
proposer_pubkey: BLSPubkey,
|
|
||||||
block_data: ByteList[MAX_SHARD_BLOCK_SIZE]) -> Root:
|
|
||||||
# We will add something more substantive in phase 2
|
|
||||||
return hash(pre_state + hash_tree_root(previous_beacon_root) + hash_tree_root(block_data))
|
|
||||||
```
|
|
||||||
|
|
||||||
## Honest committee member behavior
|
|
||||||
|
|
||||||
Suppose you are a committee member on shard `shard` at slot `current_slot`. Let `state` be the head beacon state you are building on, and let `QUARTER_PERIOD = SECONDS_PER_SLOT // 4`. `2 * QUARTER_PERIOD` seconds into slot `slot`, run the following procedure:
|
|
||||||
|
|
||||||
* Initialize `proposals = []`, `shard_states = []`, `shard_state = state.shard_states[shard][-1]`, `start_slot = shard_state.slot`.
|
|
||||||
* For `slot in get_offset_slots(state, start_slot)`, do the following:
|
|
||||||
* Look for all valid proposals for `slot`; that is, a Bytes `proposal` where `shard_state_transition(shard, slot, shard_state, get_block_root_at_slot(state, state.slot - 1), get_shard_proposer_index(state, shard, slot), proposal)` returns a result and does not throw an exception. Let `choices` be the set of non-empty valid proposals you discover.
|
|
||||||
* If `len(choices) == 0`, do `proposals.append(make_empty_proposal(shard_state, slot))`
|
|
||||||
* If `len(choices) == 1`, do `proposals.append(choices[0])`
|
|
||||||
* If `len(choices) > 1`, let `winning_proposal` be the proposal with the largest number of total attestations from slots in `state.shard_next_slots[shard]....slot-1` supporting it or any of its descendants, breaking ties by choosing the first proposal locally seen. Do `proposals.append(winning_proposal)`.
|
|
||||||
* If `proposals[-1]` is NOT an empty proposal, set `shard_state = shard_state_transition(shard, slot, shard_state, get_block_root_at_slot(state, state.slot - 1), get_shard_proposer_index(state, shard, slot), proposals[-1])` and do `shard_states.append(shard_state)`. If it is an empty proposal, leave `shard_state` unchanged.
|
|
||||||
|
|
||||||
Make an attestation using `shard_data_roots = [hash_tree_root(proposal) for proposal in proposals]` and `shard_state_roots = shard_states`.
|
|
|
@ -7,7 +7,7 @@
|
||||||
- [Introduction](#introduction)
|
- [Introduction](#introduction)
|
||||||
- [Configuration](#configuration)
|
- [Configuration](#configuration)
|
||||||
- [Fork to Phase 1](#fork-to-phase-1)
|
- [Fork to Phase 1](#fork-to-phase-1)
|
||||||
- [Fork trigger.](#fork-trigger)
|
- [Fork trigger](#fork-trigger)
|
||||||
- [Upgrading the state](#upgrading-the-state)
|
- [Upgrading the state](#upgrading-the-state)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
@ -35,17 +35,18 @@ Warning: this configuration is not definitive.
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
| - | - |
|
| - | - |
|
||||||
| `PHASE_1_FORK_VERSION` | `Version('0x01000000')` |
|
| `PHASE_1_FORK_VERSION` | `Version('0x01000000')` |
|
||||||
|
| `PHASE_1_GENESIS_SLOT` | `2**5` **TBD** |
|
||||||
| `INITIAL_ACTIVE_SHARDS` | `2**6` (= 64) |
|
| `INITIAL_ACTIVE_SHARDS` | `2**6` (= 64) |
|
||||||
|
|
||||||
## Fork to Phase 1
|
## Fork to Phase 1
|
||||||
|
|
||||||
### Fork trigger.
|
### Fork trigger
|
||||||
|
|
||||||
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork.
|
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at slot `PHASE_1_GENESIS_SLOT`, where `PHASE_1_GENESIS_SLOT % SLOTS_PER_EPOCH == 0`.
|
||||||
|
|
||||||
### Upgrading the state
|
### Upgrading the state
|
||||||
|
|
||||||
After `process_slots` of Phase 0 finishes, but before the first Phase 1 block is processed, an irregular state change is made to upgrade to Phase 1.
|
After `process_slots` of Phase 0 finishes, if `state.slot == PHASE_1_GENESIS_SLOT`, an irregular state change is made to upgrade to Phase 1.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
|
def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
|
||||||
|
@ -102,7 +103,7 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
|
||||||
ShardState(
|
ShardState(
|
||||||
slot=pre.slot,
|
slot=pre.slot,
|
||||||
gasprice=MIN_GASPRICE,
|
gasprice=MIN_GASPRICE,
|
||||||
data=Root(),
|
transition_digest=Root(),
|
||||||
latest_block_root=Root(),
|
latest_block_root=Root(),
|
||||||
) for i in range(INITIAL_ACTIVE_SHARDS)
|
) for i in range(INITIAL_ACTIVE_SHARDS)
|
||||||
),
|
),
|
||||||
|
@ -110,7 +111,7 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
|
||||||
current_light_committee=CompactCommittee(), # computed after state creation
|
current_light_committee=CompactCommittee(), # computed after state creation
|
||||||
next_light_committee=CompactCommittee(),
|
next_light_committee=CompactCommittee(),
|
||||||
# Custody game
|
# Custody game
|
||||||
custody_challenge_index=0,
|
exposed_derived_secrets=[] * EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS,
|
||||||
# exposed_derived_secrets will fully default to zeroes
|
# exposed_derived_secrets will fully default to zeroes
|
||||||
)
|
)
|
||||||
next_epoch = Epoch(epoch + 1)
|
next_epoch = Epoch(epoch + 1)
|
||||||
|
|
|
@ -0,0 +1,294 @@
|
||||||
|
# Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Helper functions](#helper-functions)
|
||||||
|
- [Misc](#misc)
|
||||||
|
- [Shard block verification functions](#shard-block-verification-functions)
|
||||||
|
- [Shard state transition](#shard-state-transition)
|
||||||
|
- [Fraud proofs](#fraud-proofs)
|
||||||
|
- [Verifying the proof](#verifying-the-proof)
|
||||||
|
- [Honest committee member behavior](#honest-committee-member-behavior)
|
||||||
|
- [Helper functions](#helper-functions-1)
|
||||||
|
- [Make attestations](#make-attestations)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This document describes the shard transition function and fraud proofs as part of Phase 1 of Ethereum 2.0.
|
||||||
|
|
||||||
|
## Helper functions
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_shard_transition_digest(beacon_state: BeaconState,
|
||||||
|
shard_state: ShardState,
|
||||||
|
beacon_parent_root: Root,
|
||||||
|
shard_body_root: Root) -> Bytes32:
|
||||||
|
# TODO: use SSZ hash tree root
|
||||||
|
return hash(
|
||||||
|
hash_tree_root(shard_state) + beacon_parent_root + shard_body_root
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Shard block verification functions
|
||||||
|
|
||||||
|
```python
|
||||||
|
def verify_shard_block_message(beacon_state: BeaconState,
|
||||||
|
shard_state: ShardState,
|
||||||
|
block: ShardBlock,
|
||||||
|
slot: Slot,
|
||||||
|
shard: Shard) -> bool:
|
||||||
|
assert block.shard_parent_root == shard_state.latest_block_root
|
||||||
|
assert block.slot == slot
|
||||||
|
assert block.shard == shard
|
||||||
|
assert block.proposer_index == get_shard_proposer_index(beacon_state, slot, shard)
|
||||||
|
assert 0 < len(block.body) <= MAX_SHARD_BLOCK_SIZE
|
||||||
|
return True
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def verify_shard_block_signature(beacon_state: BeaconState,
|
||||||
|
signed_block: SignedShardBlock) -> bool:
|
||||||
|
proposer = beacon_state.validators[signed_block.message.proposer_index]
|
||||||
|
domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(signed_block.message.slot))
|
||||||
|
signing_root = compute_signing_root(signed_block.message, domain)
|
||||||
|
return bls.Verify(proposer.pubkey, signing_root, signed_block.signature)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Shard state transition
|
||||||
|
|
||||||
|
```python
|
||||||
|
def shard_state_transition(beacon_state: BeaconState,
|
||||||
|
shard_state: ShardState,
|
||||||
|
block: ShardBlock) -> None:
|
||||||
|
"""
|
||||||
|
Update ``shard_state`` with shard ``block`` and ``beacon_state`.
|
||||||
|
"""
|
||||||
|
shard_state.slot = block.slot
|
||||||
|
prev_gasprice = shard_state.gasprice
|
||||||
|
shard_state.gasprice = compute_updated_gasprice(prev_gasprice, len(block.body))
|
||||||
|
if len(block.body) == 0:
|
||||||
|
latest_block_root = shard_state.latest_block_root
|
||||||
|
else:
|
||||||
|
latest_block_root = hash_tree_root(block)
|
||||||
|
shard_state.latest_block_root = latest_block_root
|
||||||
|
shard_state.transition_digest = compute_shard_transition_digest(
|
||||||
|
beacon_state,
|
||||||
|
shard_state,
|
||||||
|
block.beacon_parent_root,
|
||||||
|
hash_tree_root(block.body),
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
We have a pure function `get_post_shard_state` for describing the fraud proof verification and honest validator behavior.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_post_shard_state(beacon_state: BeaconState,
|
||||||
|
shard_state: ShardState,
|
||||||
|
block: ShardBlock) -> ShardState:
|
||||||
|
"""
|
||||||
|
A pure function that returns a new post ShardState instead of modifying the given `shard_state`.
|
||||||
|
"""
|
||||||
|
post_state = shard_state.copy()
|
||||||
|
shard_state_transition(beacon_state, post_state, block)
|
||||||
|
return post_state
|
||||||
|
```
|
||||||
|
|
||||||
|
## Fraud proofs
|
||||||
|
|
||||||
|
### Verifying the proof
|
||||||
|
|
||||||
|
TODO. The intent is to have a single universal fraud proof type, which contains the following parts:
|
||||||
|
|
||||||
|
1. An on-time attestation `attestation` on some shard `shard` signing a `transition: ShardTransition`
|
||||||
|
2. An index `offset_index` of a particular position to focus on
|
||||||
|
3. The `transition: ShardTransition` itself
|
||||||
|
4. The full body of the shard block `shard_block`
|
||||||
|
5. A Merkle proof to the `shard_states` in the parent block the attestation is referencing
|
||||||
|
6. The `subkey` to generate the custody bit
|
||||||
|
|
||||||
|
Call the following function to verify the proof:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_valid_fraud_proof(beacon_state: BeaconState,
|
||||||
|
attestation: Attestation,
|
||||||
|
offset_index: uint64,
|
||||||
|
transition: ShardTransition,
|
||||||
|
block: ShardBlock,
|
||||||
|
subkey: BLSPubkey,
|
||||||
|
beacon_parent_block: BeaconBlock) -> bool:
|
||||||
|
# 1. Check if `custody_bits[offset_index][j] != generate_custody_bit(subkey, block_contents)` for any `j`.
|
||||||
|
custody_bits = attestation.custody_bits_blocks
|
||||||
|
for j in range(len(custody_bits[offset_index])):
|
||||||
|
if custody_bits[offset_index][j] != generate_custody_bit(subkey, block):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# 2. Check if the shard state transition result is wrong between
|
||||||
|
# `transition.shard_states[offset_index - 1]` to `transition.shard_states[offset_index]`.
|
||||||
|
if offset_index == 0:
|
||||||
|
shard = get_shard(beacon_state, attestation)
|
||||||
|
shard_states = beacon_parent_block.body.shard_transitions[shard].shard_states
|
||||||
|
shard_state = shard_states[len(shard_states) - 1]
|
||||||
|
else:
|
||||||
|
shard_state = transition.shard_states[offset_index - 1] # Not doing the actual state updates here.
|
||||||
|
|
||||||
|
shard_state = get_post_shard_state(beacon_state, shard_state, block)
|
||||||
|
if shard_state.transition_digest != transition.shard_states[offset_index].transition_digest:
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def generate_custody_bit(subkey: BLSPubkey, block: ShardBlock) -> bool:
|
||||||
|
# TODO
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Honest committee member behavior
|
||||||
|
|
||||||
|
### Helper functions
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_winning_proposal(beacon_state: BeaconState, proposals: Sequence[SignedShardBlock]) -> SignedShardBlock:
|
||||||
|
# TODO: Let `winning_proposal` be the proposal with the largest number of total attestations from slots in
|
||||||
|
# `state.shard_next_slots[shard]....slot-1` supporting it or any of its descendants, breaking ties by choosing
|
||||||
|
# the first proposal locally seen. Do `proposals.append(winning_proposal)`.
|
||||||
|
return proposals[-1] # stub
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_shard_body_roots(proposals: Sequence[SignedShardBlock]) -> Sequence[Root]:
|
||||||
|
return [hash_tree_root(proposal.message.body) for proposal in proposals]
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_proposal_choices_at_slot(beacon_state: BeaconState,
|
||||||
|
shard_state: ShardState,
|
||||||
|
slot: Slot,
|
||||||
|
shard: Shard,
|
||||||
|
shard_blocks: Sequence[SignedShardBlock],
|
||||||
|
validate_signature: bool=True) -> Sequence[SignedShardBlock]:
|
||||||
|
"""
|
||||||
|
Return the valid shard blocks at the given ``slot``.
|
||||||
|
Note that this function doesn't change the state.
|
||||||
|
"""
|
||||||
|
choices = []
|
||||||
|
shard_blocks_at_slot = [block for block in shard_blocks if block.message.slot == slot]
|
||||||
|
for block in shard_blocks_at_slot:
|
||||||
|
try:
|
||||||
|
# Verify block message and signature
|
||||||
|
# TODO these validations should have been checked upon receiving shard blocks.
|
||||||
|
assert verify_shard_block_message(beacon_state, shard_state, block.message, slot, shard)
|
||||||
|
if validate_signature:
|
||||||
|
assert verify_shard_block_signature(beacon_state, block)
|
||||||
|
|
||||||
|
shard_state = get_post_shard_state(beacon_state, shard_state, block.message)
|
||||||
|
except Exception:
|
||||||
|
pass # TODO: throw error in the test helper
|
||||||
|
else:
|
||||||
|
choices.append(block)
|
||||||
|
return choices
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_proposal_at_slot(beacon_state: BeaconState,
|
||||||
|
shard_state: ShardState,
|
||||||
|
slot: Shard,
|
||||||
|
shard: Shard,
|
||||||
|
shard_blocks: Sequence[SignedShardBlock],
|
||||||
|
validate_signature: bool=True) -> Tuple[SignedShardBlock, ShardState]:
|
||||||
|
"""
|
||||||
|
Return ``proposal``, ``shard_state`` of the given ``slot``.
|
||||||
|
Note that this function doesn't change the state.
|
||||||
|
"""
|
||||||
|
choices = get_proposal_choices_at_slot(
|
||||||
|
beacon_state=beacon_state,
|
||||||
|
shard_state=shard_state,
|
||||||
|
slot=slot,
|
||||||
|
shard=shard,
|
||||||
|
shard_blocks=shard_blocks,
|
||||||
|
validate_signature=validate_signature,
|
||||||
|
)
|
||||||
|
if len(choices) == 0:
|
||||||
|
block = ShardBlock(slot=slot)
|
||||||
|
proposal = SignedShardBlock(message=block)
|
||||||
|
elif len(choices) == 1:
|
||||||
|
proposal = choices[0]
|
||||||
|
else:
|
||||||
|
proposal = get_winning_proposal(beacon_state, choices)
|
||||||
|
|
||||||
|
# Apply state transition
|
||||||
|
shard_state = get_post_shard_state(beacon_state, shard_state, proposal.message)
|
||||||
|
|
||||||
|
return proposal, shard_state
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_shard_state_transition_result(
|
||||||
|
beacon_state: BeaconState,
|
||||||
|
shard: Shard,
|
||||||
|
shard_blocks: Sequence[SignedShardBlock],
|
||||||
|
validate_signature: bool=True,
|
||||||
|
) -> Tuple[Sequence[SignedShardBlock], Sequence[ShardState], Sequence[Root]]:
|
||||||
|
proposals = []
|
||||||
|
shard_states = []
|
||||||
|
shard_state = beacon_state.shard_states[shard]
|
||||||
|
for slot in get_offset_slots(beacon_state, shard):
|
||||||
|
proposal, shard_state = get_proposal_at_slot(
|
||||||
|
beacon_state=beacon_state,
|
||||||
|
shard_state=shard_state,
|
||||||
|
slot=slot,
|
||||||
|
shard=shard,
|
||||||
|
shard_blocks=shard_blocks,
|
||||||
|
validate_signature=validate_signature,
|
||||||
|
)
|
||||||
|
shard_states.append(shard_state)
|
||||||
|
proposals.append(proposal)
|
||||||
|
|
||||||
|
shard_data_roots = compute_shard_body_roots(proposals)
|
||||||
|
|
||||||
|
return proposals, shard_states, shard_data_roots
|
||||||
|
```
|
||||||
|
|
||||||
|
### Make attestations
|
||||||
|
|
||||||
|
Suppose you are a committee member on shard `shard` at slot `current_slot` and you have received shard blocks `shard_blocks` since the latest successful crosslink for `shard` into the beacon chain. Let `beacon_state` be the head beacon state you are building on, and let `QUARTER_PERIOD = SECONDS_PER_SLOT // 4`. `2 * QUARTER_PERIOD` seconds into slot `current_slot`, run `get_shard_transition(beacon_state, shard, shard_blocks)` to get `shard_transition`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_shard_transition(beacon_state: BeaconState,
|
||||||
|
shard: Shard,
|
||||||
|
shard_blocks: Sequence[SignedShardBlock]) -> ShardTransition:
|
||||||
|
offset_slots = get_offset_slots(beacon_state, shard)
|
||||||
|
proposals, shard_states, shard_data_roots = get_shard_state_transition_result(beacon_state, shard, shard_blocks)
|
||||||
|
|
||||||
|
shard_block_lengths = []
|
||||||
|
proposer_signatures = []
|
||||||
|
for proposal in proposals:
|
||||||
|
shard_block_lengths.append(len(proposal.message.body))
|
||||||
|
if proposal.signature != NO_SIGNATURE:
|
||||||
|
proposer_signatures.append(proposal.signature)
|
||||||
|
|
||||||
|
if len(proposer_signatures) > 0:
|
||||||
|
proposer_signature_aggregate = bls.Aggregate(proposer_signatures)
|
||||||
|
else:
|
||||||
|
proposer_signature_aggregate = NO_SIGNATURE
|
||||||
|
|
||||||
|
return ShardTransition(
|
||||||
|
start_slot=offset_slots[0],
|
||||||
|
shard_block_lengths=shard_block_lengths,
|
||||||
|
shard_data_roots=shard_data_roots,
|
||||||
|
shard_states=shard_states,
|
||||||
|
proposer_signature_aggregate=proposer_signature_aggregate,
|
||||||
|
)
|
||||||
|
```
|
|
@ -211,8 +211,8 @@ We first define helper functions:
|
||||||
* `List[B, N]` and `Vector[B, N]`, where `B` is a basic type: `(N * size_of(B) + 31) // 32` (dividing by chunk size, rounding up)
|
* `List[B, N]` and `Vector[B, N]`, where `B` is a basic type: `(N * size_of(B) + 31) // 32` (dividing by chunk size, rounding up)
|
||||||
* `List[C, N]` and `Vector[C, N]`, where `C` is a composite type: `N`
|
* `List[C, N]` and `Vector[C, N]`, where `C` is a composite type: `N`
|
||||||
* containers: `len(fields)`
|
* containers: `len(fields)`
|
||||||
* `bitfield_bytes(bits)`: return the bits of the bitlist or bitvector, packed in bytes, aligned to the start. Length-delimiting bit for bitlists is excluded.
|
* `pack(value)`: given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks.
|
||||||
* `pack`: Given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks.
|
* `pack_bits(bits)`: Given the `bits` of bitlist or bitvector, get `bitfield_bytes` by packing them in bytes and aligning to the start. The length-delimiting bit for bitlists is excluded. And then pack `bitfield_bytes` into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks.
|
||||||
* `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power of 2, with 0 mapping to 1. Examples: `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16`
|
* `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power of 2, with 0 mapping to 1. Examples: `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16`
|
||||||
* `merkleize(chunks, limit=None)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, merkleize the chunks, and return the root:
|
* `merkleize(chunks, limit=None)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, merkleize the chunks, and return the root:
|
||||||
* The merkleization depends on the effective input, which can be padded/limited:
|
* The merkleization depends on the effective input, which can be padded/limited:
|
||||||
|
@ -228,9 +228,9 @@ We first define helper functions:
|
||||||
We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
|
We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
|
||||||
|
|
||||||
* `merkleize(pack(value))` if `value` is a basic object or a vector of basic objects.
|
* `merkleize(pack(value))` if `value` is a basic object or a vector of basic objects.
|
||||||
* `merkleize(bitfield_bytes(value), limit=chunk_count(type))` if `value` is a bitvector.
|
* `merkleize(pack_bits(value), limit=chunk_count(type))` if `value` is a bitvector.
|
||||||
* `mix_in_length(merkleize(pack(value), limit=chunk_count(type)), len(value))` if `value` is a list of basic objects.
|
* `mix_in_length(merkleize(pack(value), limit=chunk_count(type)), len(value))` if `value` is a list of basic objects.
|
||||||
* `mix_in_length(merkleize(bitfield_bytes(value), limit=chunk_count(type)), len(value))` if `value` is a bitlist.
|
* `mix_in_length(merkleize(pack_bits(value), limit=chunk_count(type)), len(value))` if `value` is a bitlist.
|
||||||
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container.
|
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container.
|
||||||
* `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` if `value` is a list of composite objects.
|
* `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` if `value` is a list of composite objects.
|
||||||
* `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type.
|
* `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type.
|
||||||
|
|
|
@ -55,6 +55,11 @@ Run the test command from the `tests/core/pyspec` directory:
|
||||||
pytest --config=minimal eth2spec
|
pytest --config=minimal eth2spec
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Options:
|
||||||
|
- `--config`, to change the config. Defaults to `minimal`, can be set to `mainnet`, or other configs from the configs directory.
|
||||||
|
- `--disable-bls`, to disable BLS (only for tests that can run without)
|
||||||
|
- `--bls-type`, `milagro` or `py_ecc` (default)
|
||||||
|
|
||||||
### How to view code coverage report
|
### How to view code coverage report
|
||||||
|
|
||||||
Run `make open_cov` from the root of the specs repository after running `make test` to open the html code coverage report.
|
Run `make open_cov` from the root of the specs repository after running `make test` to open the html code coverage report.
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
0.11.1
|
0.12.1
|
|
@ -12,7 +12,7 @@ configs_path = 'configs/'
|
||||||
from eth2spec.config import config_util
|
from eth2spec.config import config_util
|
||||||
from eth2spec.phase0 import spec
|
from eth2spec.phase0 import spec
|
||||||
from importlib import reload
|
from importlib import reload
|
||||||
my_presets = config_util.prepare_config(configs_path, 'mainnet')
|
config_util.prepare_config(configs_path, 'mainnet')
|
||||||
# reload spec to make loaded config effective
|
# reload spec to make loaded config effective
|
||||||
reload(spec)
|
reload(spec)
|
||||||
```
|
```
|
||||||
|
|
|
@ -8,23 +8,28 @@ config: Dict[str, Any] = {}
|
||||||
|
|
||||||
# Access to overwrite spec constants based on configuration
|
# Access to overwrite spec constants based on configuration
|
||||||
# This is called by the spec module after declaring its globals, and applies the loaded presets.
|
# This is called by the spec module after declaring its globals, and applies the loaded presets.
|
||||||
def apply_constants_config(spec_globals: Dict[str, Any]) -> None:
|
def apply_constants_config(spec_globals: Dict[str, Any], warn_if_unknown: bool = False) -> None:
|
||||||
global config
|
global config
|
||||||
for k, v in config.items():
|
for k, v in config.items():
|
||||||
if k.startswith('DOMAIN_'):
|
# the spec should have default values for everything, if not, the config key is invalid.
|
||||||
spec_globals[k] = spec_globals['DomainType'](v) # domain types are defined as bytes in the configs
|
if k in spec_globals:
|
||||||
|
# Keep the same type as the default value indicates (which may be an SSZ basic type subclass, e.g. 'Gwei')
|
||||||
|
spec_globals[k] = spec_globals[k].__class__(v)
|
||||||
else:
|
else:
|
||||||
spec_globals[k] = v
|
# Note: Phase 0 spec will not know the phase 1 config values.
|
||||||
|
# Yet, during debugging you can enable explicit warnings.
|
||||||
|
if warn_if_unknown:
|
||||||
|
print(f"WARNING: unknown config key: '{k}' with value: '{v}'")
|
||||||
|
|
||||||
|
|
||||||
# Load presets from a file, and then prepares the global config setting. This does not apply the config.
|
# Load presets from a file, and then prepares the global config setting. This does not apply the config.
|
||||||
# To apply the config, reload the spec module (it will re-initialize with the config taken from here).
|
# To apply the config, reload the spec module (it will re-initialize with the config taken from here).
|
||||||
def prepare_config(configs_path, config_name):
|
def prepare_config(configs_path: str, config_name: str) -> None:
|
||||||
global config
|
global config
|
||||||
config = load_config_file(configs_path, config_name)
|
config = load_config_file(configs_path, config_name)
|
||||||
|
|
||||||
|
|
||||||
def load_config_file(configs_dir, presets_name) -> Dict[str, Any]:
|
def load_config_file(configs_dir: str, presets_name: str) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Loads the given preset
|
Loads the given preset
|
||||||
:param presets_name: The name of the presets. (lowercase snake_case)
|
:param presets_name: The name of the presets. (lowercase snake_case)
|
||||||
|
@ -33,7 +38,7 @@ def load_config_file(configs_dir, presets_name) -> Dict[str, Any]:
|
||||||
path = Path(join(configs_dir, presets_name + '.yaml'))
|
path = Path(join(configs_dir, presets_name + '.yaml'))
|
||||||
yaml = YAML(typ='base')
|
yaml = YAML(typ='base')
|
||||||
loaded = yaml.load(path)
|
loaded = yaml.load(path)
|
||||||
out = dict()
|
out: Dict[str, Any] = dict()
|
||||||
for k, v in loaded.items():
|
for k, v in loaded.items():
|
||||||
if isinstance(v, list):
|
if isinstance(v, list):
|
||||||
# Clean up integer values. YAML parser renders lists of ints as list of str
|
# Clean up integer values. YAML parser renders lists of ints as list of str
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from eth2spec.config import config_util
|
from eth2spec.config import config_util
|
||||||
from eth2spec.test.context import reload_specs
|
from eth2spec.test import context
|
||||||
|
from eth2spec.utils import bls as bls_utils
|
||||||
|
|
||||||
# We import pytest only when it's present, i.e. when we are running tests.
|
# We import pytest only when it's present, i.e. when we are running tests.
|
||||||
# The test-cases themselves can be generated without installing pytest.
|
# The test-cases themselves can be generated without installing pytest.
|
||||||
|
@ -27,7 +27,16 @@ def fixture(*args, **kwargs):
|
||||||
|
|
||||||
def pytest_addoption(parser):
|
def pytest_addoption(parser):
|
||||||
parser.addoption(
|
parser.addoption(
|
||||||
"--config", action="store", default="minimal", help="config: make the pyspec use the specified configuration"
|
"--config", action="store", type=str, default="minimal",
|
||||||
|
help="config: make the pyspec use the specified configuration"
|
||||||
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--disable-bls", action="store_true",
|
||||||
|
help="bls-default: make tests that are not dependent on BLS run without BLS"
|
||||||
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro"],
|
||||||
|
help="bls-type: use 'pyecc' or 'milagro' implementation for BLS"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -36,4 +45,22 @@ def config(request):
|
||||||
config_name = request.config.getoption("--config")
|
config_name = request.config.getoption("--config")
|
||||||
config_util.prepare_config('../../../configs/', config_name)
|
config_util.prepare_config('../../../configs/', config_name)
|
||||||
# now that the presets are loaded, reload the specs to apply them
|
# now that the presets are loaded, reload the specs to apply them
|
||||||
reload_specs()
|
context.reload_specs()
|
||||||
|
|
||||||
|
|
||||||
|
@fixture(autouse=True)
|
||||||
|
def bls_default(request):
|
||||||
|
disable_bls = request.config.getoption("--disable-bls")
|
||||||
|
if disable_bls:
|
||||||
|
context.DEFAULT_BLS_ACTIVE = False
|
||||||
|
|
||||||
|
|
||||||
|
@fixture(autouse=True)
|
||||||
|
def bls_type(request):
|
||||||
|
bls_type = request.config.getoption("--bls-type")
|
||||||
|
if bls_type == "py_ecc":
|
||||||
|
bls_utils.bls = bls_utils.py_ecc_bls
|
||||||
|
elif bls_type == "milagro":
|
||||||
|
bls_utils.bls = bls_utils.milagro_bls
|
||||||
|
else:
|
||||||
|
raise Exception(f"unrecognized bls type: {bls_type}")
|
||||||
|
|
|
@ -7,7 +7,9 @@ from .helpers.genesis import create_genesis_state
|
||||||
from .utils import vector_test, with_meta_tags
|
from .utils import vector_test, with_meta_tags
|
||||||
|
|
||||||
from random import Random
|
from random import Random
|
||||||
from typing import Any, Callable, Sequence, TypedDict, Protocol
|
from typing import Any, Callable, NewType, Sequence, TypedDict, Protocol
|
||||||
|
|
||||||
|
from lru import LRU
|
||||||
|
|
||||||
from importlib import reload
|
from importlib import reload
|
||||||
|
|
||||||
|
@ -19,48 +21,75 @@ def reload_specs():
|
||||||
|
|
||||||
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
||||||
|
|
||||||
|
SpecForkName = NewType("SpecForkName", str)
|
||||||
|
|
||||||
|
PHASE0 = SpecForkName('phase0')
|
||||||
|
PHASE1 = SpecForkName('phase1')
|
||||||
|
ALL_PHASES = (PHASE0, PHASE1)
|
||||||
|
|
||||||
# TODO: currently phases are defined as python modules.
|
# TODO: currently phases are defined as python modules.
|
||||||
# It would be better if they would be more well-defined interfaces for stronger typing.
|
# It would be better if they would be more well-defined interfaces for stronger typing.
|
||||||
|
|
||||||
|
|
||||||
class Spec(Protocol):
|
class Spec(Protocol):
|
||||||
version: str
|
version: str
|
||||||
|
|
||||||
|
|
||||||
class Phase0(Spec):
|
class SpecPhase0(Spec):
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
class Phase1(Spec):
|
class SpecPhase1(Spec):
|
||||||
def upgrade_to_phase1(self, state: spec_phase0.BeaconState) -> spec_phase1.BeaconState:
|
def upgrade_to_phase1(self, state: spec_phase0.BeaconState) -> spec_phase1.BeaconState:
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
# add transfer, bridge, etc. as the spec evolves
|
# add transfer, bridge, etc. as the spec evolves
|
||||||
class SpecForks(TypedDict, total=False):
|
class SpecForks(TypedDict, total=False):
|
||||||
phase0: Phase0
|
PHASE0: SpecPhase0
|
||||||
phase1: Phase1
|
PHASE1: SpecPhase1
|
||||||
|
|
||||||
|
|
||||||
|
def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Callable[[Any], int],
|
||||||
|
spec: Spec, phases: SpecForks):
|
||||||
|
|
||||||
|
p0 = phases[PHASE0]
|
||||||
|
balances = balances_fn(p0)
|
||||||
|
activation_threshold = threshold_fn(p0)
|
||||||
|
|
||||||
|
state = create_genesis_state(spec=p0, validator_balances=balances,
|
||||||
|
activation_threshold=activation_threshold)
|
||||||
|
if spec.fork == PHASE1:
|
||||||
|
# TODO: instead of upgrading a test phase0 genesis state we can also write a phase1 state helper.
|
||||||
|
# Decide based on performance/consistency results later.
|
||||||
|
state = phases[PHASE1].upgrade_to_phase1(state)
|
||||||
|
# Shard state slot must lag behind BeaconState slot by at least 1
|
||||||
|
# Will handle this more elegantly with fork mechanics
|
||||||
|
spec.process_slots(state, state.slot + 1)
|
||||||
|
|
||||||
|
return state
|
||||||
|
|
||||||
|
|
||||||
|
_custom_state_cache_dict = LRU(size=10)
|
||||||
|
|
||||||
|
|
||||||
def with_custom_state(balances_fn: Callable[[Any], Sequence[int]],
|
def with_custom_state(balances_fn: Callable[[Any], Sequence[int]],
|
||||||
threshold_fn: Callable[[Any], int]):
|
threshold_fn: Callable[[Any], int]):
|
||||||
def deco(fn):
|
def deco(fn):
|
||||||
def entry(*args, spec: Spec, phases: SpecForks, **kw):
|
|
||||||
try:
|
|
||||||
p0 = phases["phase0"]
|
|
||||||
balances = balances_fn(p0)
|
|
||||||
activation_threshold = threshold_fn(p0)
|
|
||||||
|
|
||||||
state = create_genesis_state(spec=p0, validator_balances=balances,
|
def entry(*args, spec: Spec, phases: SpecForks, **kw):
|
||||||
activation_threshold=activation_threshold)
|
# make a key for the state
|
||||||
if spec.fork == 'phase1':
|
# genesis fork version separates configs during test-generation runtime.
|
||||||
# TODO: instead of upgrading a test phase0 genesis state we can also write a phase1 state helper.
|
key = (spec.fork, spec.GENESIS_FORK_VERSION, spec.__file__, balances_fn, threshold_fn)
|
||||||
# Decide based on performance/consistency results later.
|
global _custom_state_cache_dict
|
||||||
state = phases["phase1"].upgrade_to_phase1(state)
|
if key not in _custom_state_cache_dict:
|
||||||
# Shard state slot must lag behind BeaconState slot by at least 1
|
state = _prepare_state(balances_fn, threshold_fn, spec, phases)
|
||||||
# Will handle this more elegantly with fork mechanics
|
_custom_state_cache_dict[key] = state.get_backing()
|
||||||
spec.process_slots(state, state.slot + 1)
|
|
||||||
kw['state'] = state
|
# Take an entry out of the LRU.
|
||||||
except KeyError:
|
# No copy is necessary, as we wrap the immutable backing with a new view.
|
||||||
raise TypeError('Spec decorator must come within state decorator to inject spec into state.')
|
state = spec.BeaconState(backing=_custom_state_cache_dict[key])
|
||||||
|
kw['state'] = state
|
||||||
return fn(*args, spec=spec, phases=phases, **kw)
|
return fn(*args, spec=spec, phases=phases, **kw)
|
||||||
return entry
|
return entry
|
||||||
return deco
|
return deco
|
||||||
|
@ -138,14 +167,15 @@ def single_phase(fn):
|
||||||
return entry
|
return entry
|
||||||
|
|
||||||
|
|
||||||
# BLS is turned off by default *for performance purposes during TESTING*.
|
# BLS is turned on by default, it can be disabled in tests by overriding this, or using `--disable-bls`.
|
||||||
|
# *This is for performance purposes during TESTING, DO NOT DISABLE IN PRODUCTION*.
|
||||||
# The runner of the test can indicate the preferred setting (test generators prefer BLS to be ON).
|
# The runner of the test can indicate the preferred setting (test generators prefer BLS to be ON).
|
||||||
# - Some tests are marked as BLS-requiring, and ignore this setting.
|
# - Some tests are marked as BLS-requiring, and ignore this setting.
|
||||||
# (tests that express differences caused by BLS, e.g. invalid signatures being rejected)
|
# (tests that express differences caused by BLS, e.g. invalid signatures being rejected)
|
||||||
# - Some other tests are marked as BLS-ignoring, and ignore this setting.
|
# - Some other tests are marked as BLS-ignoring, and ignore this setting.
|
||||||
# (tests that are heavily performance impacted / require unsigned state transitions)
|
# (tests that are heavily performance impacted / require unsigned state transitions)
|
||||||
# - Most tests respect the BLS setting.
|
# - Most tests respect the BLS setting.
|
||||||
DEFAULT_BLS_ACTIVE = False
|
DEFAULT_BLS_ACTIVE = True
|
||||||
|
|
||||||
|
|
||||||
def spec_test(fn):
|
def spec_test(fn):
|
||||||
|
@ -219,14 +249,11 @@ def bls_switch(fn):
|
||||||
return entry
|
return entry
|
||||||
|
|
||||||
|
|
||||||
all_phases = ['phase0', 'phase1']
|
|
||||||
|
|
||||||
|
|
||||||
def with_all_phases(fn):
|
def with_all_phases(fn):
|
||||||
"""
|
"""
|
||||||
A decorator for running a test with every phase
|
A decorator for running a test with every phase
|
||||||
"""
|
"""
|
||||||
return with_phases(all_phases)(fn)
|
return with_phases(ALL_PHASES)(fn)
|
||||||
|
|
||||||
|
|
||||||
def with_all_phases_except(exclusion_phases):
|
def with_all_phases_except(exclusion_phases):
|
||||||
|
@ -234,7 +261,7 @@ def with_all_phases_except(exclusion_phases):
|
||||||
A decorator factory for running a tests with every phase except the ones listed
|
A decorator factory for running a tests with every phase except the ones listed
|
||||||
"""
|
"""
|
||||||
def decorator(fn):
|
def decorator(fn):
|
||||||
return with_phases([phase for phase in all_phases if phase not in exclusion_phases])(fn)
|
return with_phases([phase for phase in ALL_PHASES if phase not in exclusion_phases])(fn)
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
|
@ -260,18 +287,18 @@ def with_phases(phases, other_phases=None):
|
||||||
|
|
||||||
# TODO: test state is dependent on phase0 but is immediately transitioned to phase1.
|
# TODO: test state is dependent on phase0 but is immediately transitioned to phase1.
|
||||||
# A new state-creation helper for phase 1 may be in place, and then phase1+ tests can run without phase0
|
# A new state-creation helper for phase 1 may be in place, and then phase1+ tests can run without phase0
|
||||||
available_phases.add('phase0')
|
available_phases.add(PHASE0)
|
||||||
|
|
||||||
phase_dir = {}
|
phase_dir = {}
|
||||||
if 'phase0' in available_phases:
|
if PHASE0 in available_phases:
|
||||||
phase_dir['phase0'] = spec_phase0
|
phase_dir[PHASE0] = spec_phase0
|
||||||
if 'phase1' in available_phases:
|
if PHASE1 in available_phases:
|
||||||
phase_dir['phase1'] = spec_phase1
|
phase_dir[PHASE1] = spec_phase1
|
||||||
|
|
||||||
# return is ignored whenever multiple phases are ran. If
|
# return is ignored whenever multiple phases are ran. If
|
||||||
if 'phase0' in run_phases:
|
if PHASE0 in run_phases:
|
||||||
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
|
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
|
||||||
if 'phase1' in run_phases:
|
if PHASE1 in run_phases:
|
||||||
ret = fn(spec=spec_phase1, phases=phase_dir, *args, **kw)
|
ret = fn(spec=spec_phase1, phases=phase_dir, *args, **kw)
|
||||||
return ret
|
return ret
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
|
@ -183,7 +183,7 @@ def test_filtered_block_tree(spec, state):
|
||||||
for i in range(spec.SLOTS_PER_EPOCH):
|
for i in range(spec.SLOTS_PER_EPOCH):
|
||||||
slot = rogue_block.slot + i
|
slot = rogue_block.slot + i
|
||||||
for index in range(spec.get_committee_count_at_slot(non_viable_state, slot)):
|
for index in range(spec.get_committee_count_at_slot(non_viable_state, slot)):
|
||||||
attestation = get_valid_attestation(spec, non_viable_state, rogue_block.slot + i, index)
|
attestation = get_valid_attestation(spec, non_viable_state, slot, index, signed=True)
|
||||||
attestations.append(attestation)
|
attestations.append(attestation)
|
||||||
|
|
||||||
# tick time forward to be able to include up to the latest attestation
|
# tick time forward to be able to include up to the latest attestation
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
from eth2spec.test.context import PHASE0, with_all_phases, spec_state_test
|
||||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
||||||
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch
|
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot
|
||||||
|
|
||||||
|
|
||||||
def run_on_attestation(spec, state, store, attestation, valid=True):
|
def run_on_attestation(spec, state, store, attestation, valid=True):
|
||||||
|
@ -16,7 +16,7 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
|
||||||
indexed_attestation = spec.get_indexed_attestation(state, attestation)
|
indexed_attestation = spec.get_indexed_attestation(state, attestation)
|
||||||
spec.on_attestation(store, attestation)
|
spec.on_attestation(store, attestation)
|
||||||
|
|
||||||
if spec.fork == 'phase0':
|
if spec.fork == PHASE0:
|
||||||
sample_index = indexed_attestation.attesting_indices[0]
|
sample_index = indexed_attestation.attesting_indices[0]
|
||||||
else:
|
else:
|
||||||
attesting_indices = [
|
attesting_indices = [
|
||||||
|
@ -116,6 +116,44 @@ def test_on_attestation_mismatched_target_and_slot(spec, state):
|
||||||
run_on_attestation(spec, state, store, attestation, False)
|
run_on_attestation(spec, state, store, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_on_attestation_inconsistent_target_and_head(spec, state):
|
||||||
|
store = spec.get_forkchoice_store(state)
|
||||||
|
spec.on_tick(store, store.time + 2 * spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
|
# Create chain 1 as empty chain between genesis and start of 1st epoch
|
||||||
|
target_state_1 = state.copy()
|
||||||
|
next_epoch(spec, target_state_1)
|
||||||
|
|
||||||
|
# Create chain 2 with different block in chain from chain 1 from chain 1 from chain 1 from chain 1
|
||||||
|
target_state_2 = state.copy()
|
||||||
|
diff_block = build_empty_block_for_next_slot(spec, target_state_2)
|
||||||
|
signed_diff_block = state_transition_and_sign_block(spec, target_state_2, diff_block)
|
||||||
|
spec.on_block(store, signed_diff_block)
|
||||||
|
next_epoch(spec, target_state_2)
|
||||||
|
next_slot(spec, target_state_2)
|
||||||
|
|
||||||
|
# Create and store block new head block on target state 1
|
||||||
|
head_block = build_empty_block_for_next_slot(spec, target_state_1)
|
||||||
|
signed_head_block = state_transition_and_sign_block(spec, target_state_1, head_block)
|
||||||
|
spec.on_block(store, signed_head_block)
|
||||||
|
|
||||||
|
# Attest to head of chain 1
|
||||||
|
attestation = get_valid_attestation(spec, target_state_1, slot=head_block.slot, signed=False)
|
||||||
|
epoch = spec.compute_epoch_at_slot(attestation.data.slot)
|
||||||
|
|
||||||
|
# Set attestation target to be from chain 2
|
||||||
|
attestation.data.target = spec.Checkpoint(epoch=epoch, root=spec.get_block_root(target_state_2, epoch))
|
||||||
|
sign_attestation(spec, state, attestation)
|
||||||
|
|
||||||
|
assert attestation.data.target.epoch == spec.GENESIS_EPOCH + 1
|
||||||
|
assert spec.compute_epoch_at_slot(attestation.data.slot) == spec.GENESIS_EPOCH + 1
|
||||||
|
assert spec.get_block_root(target_state_1, epoch) != attestation.data.target.root
|
||||||
|
|
||||||
|
run_on_attestation(spec, state, store, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_on_attestation_target_not_in_store(spec, state):
|
def test_on_attestation_target_not_in_store(spec, state):
|
||||||
|
|
|
@ -184,7 +184,7 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state):
|
||||||
def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state):
|
def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state):
|
||||||
# Initialization
|
# Initialization
|
||||||
store = spec.get_forkchoice_store(state)
|
store = spec.get_forkchoice_store(state)
|
||||||
time = 100
|
time = 0
|
||||||
spec.on_tick(store, time)
|
spec.on_tick(store, time)
|
||||||
|
|
||||||
next_epoch(spec, state)
|
next_epoch(spec, state)
|
||||||
|
@ -215,7 +215,7 @@ def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state):
|
||||||
def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
|
def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
|
||||||
# Initialization
|
# Initialization
|
||||||
store = spec.get_forkchoice_store(state)
|
store = spec.get_forkchoice_store(state)
|
||||||
time = 100
|
time = 0
|
||||||
spec.on_tick(store, time)
|
spec.on_tick(store, time)
|
||||||
|
|
||||||
next_epoch(spec, state)
|
next_epoch(spec, state)
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
from eth2spec.test.context import spec_test, with_phases, single_phase
|
from eth2spec.test.context import PHASE0, spec_test, with_phases, single_phase
|
||||||
from eth2spec.test.helpers.deposits import (
|
from eth2spec.test.helpers.deposits import (
|
||||||
prepare_genesis_deposits,
|
prepare_genesis_deposits,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases(([PHASE0]))
|
||||||
@spec_test
|
@spec_test
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_initialize_beacon_state_from_eth1(spec):
|
def test_initialize_beacon_state_from_eth1(spec):
|
||||||
|
@ -21,7 +21,7 @@ def test_initialize_beacon_state_from_eth1(spec):
|
||||||
# initialize beacon_state
|
# initialize beacon_state
|
||||||
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||||
|
|
||||||
assert state.genesis_time == eth1_timestamp - eth1_timestamp % spec.MIN_GENESIS_DELAY + 2 * spec.MIN_GENESIS_DELAY
|
assert state.genesis_time == eth1_timestamp + spec.GENESIS_DELAY
|
||||||
assert len(state.validators) == deposit_count
|
assert len(state.validators) == deposit_count
|
||||||
assert state.eth1_data.deposit_root == deposit_root
|
assert state.eth1_data.deposit_root == deposit_root
|
||||||
assert state.eth1_data.deposit_count == deposit_count
|
assert state.eth1_data.deposit_count == deposit_count
|
||||||
|
@ -32,7 +32,7 @@ def test_initialize_beacon_state_from_eth1(spec):
|
||||||
yield 'state', state
|
yield 'state', state
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_test
|
@spec_test
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_initialize_beacon_state_some_small_balances(spec):
|
def test_initialize_beacon_state_some_small_balances(spec):
|
||||||
|
@ -57,7 +57,7 @@ def test_initialize_beacon_state_some_small_balances(spec):
|
||||||
# initialize beacon_state
|
# initialize beacon_state
|
||||||
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||||
|
|
||||||
assert state.genesis_time == eth1_timestamp - eth1_timestamp % spec.MIN_GENESIS_DELAY + 2 * spec.MIN_GENESIS_DELAY
|
assert state.genesis_time == eth1_timestamp + spec.GENESIS_DELAY
|
||||||
assert len(state.validators) == small_deposit_count
|
assert len(state.validators) == small_deposit_count
|
||||||
assert state.eth1_data.deposit_root == deposit_root
|
assert state.eth1_data.deposit_root == deposit_root
|
||||||
assert state.eth1_data.deposit_count == len(deposits)
|
assert state.eth1_data.deposit_count == len(deposits)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
from eth2spec.test.context import spec_test, with_phases, single_phase
|
from eth2spec.test.context import PHASE0, spec_test, with_phases, single_phase
|
||||||
from eth2spec.test.helpers.deposits import (
|
from eth2spec.test.helpers.deposits import (
|
||||||
prepare_genesis_deposits,
|
prepare_genesis_deposits,
|
||||||
)
|
)
|
||||||
|
@ -25,7 +25,7 @@ def run_is_valid_genesis_state(spec, state, valid=True):
|
||||||
assert is_valid == valid
|
assert is_valid == valid
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_test
|
@spec_test
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_is_valid_genesis_state_true(spec):
|
def test_is_valid_genesis_state_true(spec):
|
||||||
|
@ -34,7 +34,7 @@ def test_is_valid_genesis_state_true(spec):
|
||||||
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_test
|
@spec_test
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
||||||
|
@ -44,7 +44,7 @@ def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
||||||
yield from run_is_valid_genesis_state(spec, state, valid=False)
|
yield from run_is_valid_genesis_state(spec, state, valid=False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_test
|
@spec_test
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_is_valid_genesis_state_true_more_balance(spec):
|
def test_is_valid_genesis_state_true_more_balance(spec):
|
||||||
|
@ -55,7 +55,7 @@ def test_is_valid_genesis_state_true_more_balance(spec):
|
||||||
|
|
||||||
|
|
||||||
# TODO: not part of the genesis function yet. Erroneously merged.
|
# TODO: not part of the genesis function yet. Erroneously merged.
|
||||||
# @with_phases(['phase0'])
|
# @with_phases([PHASE0])
|
||||||
# @spec_test
|
# @spec_test
|
||||||
# def test_is_valid_genesis_state_false_not_enough_balance(spec):
|
# def test_is_valid_genesis_state_false_not_enough_balance(spec):
|
||||||
# state = create_valid_beacon_state(spec)
|
# state = create_valid_beacon_state(spec)
|
||||||
|
@ -64,7 +64,7 @@ def test_is_valid_genesis_state_true_more_balance(spec):
|
||||||
# yield from run_is_valid_genesis_state(spec, state, valid=False)
|
# yield from run_is_valid_genesis_state(spec, state, valid=False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_test
|
@spec_test
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_is_valid_genesis_state_true_one_more_validator(spec):
|
def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||||
|
@ -78,7 +78,7 @@ def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||||
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_test
|
@spec_test
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_is_valid_genesis_state_false_not_enough_validator(spec):
|
def test_is_valid_genesis_state_false_not_enough_validator(spec):
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from eth2spec.test.context import expect_assertion_error
|
from eth2spec.test.context import expect_assertion_error, PHASE0, PHASE1
|
||||||
from eth2spec.test.helpers.state import next_slot, state_transition_and_sign_block
|
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot, transition_to
|
||||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||||
from eth2spec.test.helpers.keys import privkeys
|
from eth2spec.test.helpers.keys import privkeys
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
||||||
|
from lru import LRU
|
||||||
|
|
||||||
|
|
||||||
def run_attestation_processing(spec, state, attestation, valid=True):
|
def run_attestation_processing(spec, state, attestation, valid=True):
|
||||||
|
@ -43,7 +44,7 @@ def run_attestation_processing(spec, state, attestation, valid=True):
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
def build_attestation_data(spec, state, slot, index):
|
def build_attestation_data(spec, state, slot, index, shard_transition=None, on_time=True):
|
||||||
assert state.slot >= slot
|
assert state.slot >= slot
|
||||||
|
|
||||||
if slot == state.slot:
|
if slot == state.slot:
|
||||||
|
@ -66,7 +67,7 @@ def build_attestation_data(spec, state, slot, index):
|
||||||
source_epoch = state.current_justified_checkpoint.epoch
|
source_epoch = state.current_justified_checkpoint.epoch
|
||||||
source_root = state.current_justified_checkpoint.root
|
source_root = state.current_justified_checkpoint.root
|
||||||
|
|
||||||
return spec.AttestationData(
|
attestation_data = spec.AttestationData(
|
||||||
slot=slot,
|
slot=slot,
|
||||||
index=index,
|
index=index,
|
||||||
beacon_block_root=block_root,
|
beacon_block_root=block_root,
|
||||||
|
@ -74,14 +75,34 @@ def build_attestation_data(spec, state, slot, index):
|
||||||
target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root),
|
target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if spec.fork == PHASE1:
|
||||||
|
if shard_transition is not None:
|
||||||
|
lastest_shard_data_root_index = len(shard_transition.shard_data_roots) - 1
|
||||||
|
attestation_data.shard_head_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
|
||||||
|
attestation_data.shard_transition_root = shard_transition.hash_tree_root()
|
||||||
|
else:
|
||||||
|
# No shard transition
|
||||||
|
shard = spec.get_shard(state, spec.Attestation(data=attestation_data))
|
||||||
|
if on_time:
|
||||||
|
temp_state = state.copy()
|
||||||
|
next_slot(spec, temp_state)
|
||||||
|
shard_transition = spec.get_shard_transition(temp_state, shard, [])
|
||||||
|
lastest_shard_data_root_index = len(shard_transition.shard_data_roots) - 1
|
||||||
|
attestation_data.shard_head_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
|
||||||
|
attestation_data.shard_transition_root = shard_transition.hash_tree_root()
|
||||||
|
else:
|
||||||
|
attestation_data.shard_head_root = state.shard_states[shard].transition_digest
|
||||||
|
attestation_data.shard_transition_root = spec.Root()
|
||||||
|
return attestation_data
|
||||||
|
|
||||||
|
|
||||||
def convert_to_valid_on_time_attestation(spec, state, attestation, signed=False):
|
def convert_to_valid_on_time_attestation(spec, state, attestation, signed=False):
|
||||||
shard = spec.get_shard(state, attestation)
|
shard = spec.get_shard(state, attestation)
|
||||||
|
offset_slots = spec.compute_offset_slots(
|
||||||
next_state = state.copy()
|
spec.get_latest_slot_for_shard(state, shard),
|
||||||
next_slot(spec, next_state)
|
attestation.data.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY,
|
||||||
offset_slots = spec.get_offset_slots(next_state, spec.get_latest_slot_for_shard(next_state, shard))
|
)
|
||||||
for offset_slot in offset_slots:
|
for _ in offset_slots:
|
||||||
attestation.custody_bits_blocks.append(
|
attestation.custody_bits_blocks.append(
|
||||||
Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]([0 for _ in attestation.aggregation_bits])
|
Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]([0 for _ in attestation.aggregation_bits])
|
||||||
)
|
)
|
||||||
|
@ -92,7 +113,7 @@ def convert_to_valid_on_time_attestation(spec, state, attestation, signed=False)
|
||||||
return attestation
|
return attestation
|
||||||
|
|
||||||
|
|
||||||
def get_valid_on_time_attestation(spec, state, slot=None, index=None, signed=False):
|
def get_valid_on_time_attestation(spec, state, slot=None, index=None, shard_transition=None, signed=False):
|
||||||
'''
|
'''
|
||||||
Construct on-time attestation for next slot
|
Construct on-time attestation for next slot
|
||||||
'''
|
'''
|
||||||
|
@ -101,7 +122,15 @@ def get_valid_on_time_attestation(spec, state, slot=None, index=None, signed=Fal
|
||||||
if index is None:
|
if index is None:
|
||||||
index = 0
|
index = 0
|
||||||
|
|
||||||
return get_valid_attestation(spec, state, slot=slot, index=index, signed=signed, on_time=True)
|
return get_valid_attestation(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
slot=slot,
|
||||||
|
index=index,
|
||||||
|
shard_transition=shard_transition,
|
||||||
|
signed=signed,
|
||||||
|
on_time=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False):
|
def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False):
|
||||||
|
@ -116,13 +145,24 @@ def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False)
|
||||||
return get_valid_attestation(spec, state, slot=slot, index=index, signed=signed, on_time=False)
|
return get_valid_attestation(spec, state, slot=slot, index=index, signed=signed, on_time=False)
|
||||||
|
|
||||||
|
|
||||||
def get_valid_attestation(spec, state, slot=None, index=None, empty=False, signed=False, on_time=True):
|
def get_valid_attestation(spec,
|
||||||
|
state,
|
||||||
|
slot=None,
|
||||||
|
index=None,
|
||||||
|
filter_participant_set=None,
|
||||||
|
shard_transition=None,
|
||||||
|
signed=False,
|
||||||
|
on_time=True):
|
||||||
|
# If filter_participant_set filters everything, the attestation has 0 participants, and cannot be signed.
|
||||||
|
# Thus strictly speaking invalid when no participant is added later.
|
||||||
if slot is None:
|
if slot is None:
|
||||||
slot = state.slot
|
slot = state.slot
|
||||||
if index is None:
|
if index is None:
|
||||||
index = 0
|
index = 0
|
||||||
|
|
||||||
attestation_data = build_attestation_data(spec, state, slot, index)
|
attestation_data = build_attestation_data(
|
||||||
|
spec, state, slot=slot, index=index, shard_transition=shard_transition, on_time=on_time
|
||||||
|
)
|
||||||
|
|
||||||
beacon_committee = spec.get_beacon_committee(
|
beacon_committee = spec.get_beacon_committee(
|
||||||
state,
|
state,
|
||||||
|
@ -136,12 +176,10 @@ def get_valid_attestation(spec, state, slot=None, index=None, empty=False, signe
|
||||||
aggregation_bits=aggregation_bits,
|
aggregation_bits=aggregation_bits,
|
||||||
data=attestation_data,
|
data=attestation_data,
|
||||||
)
|
)
|
||||||
if not empty:
|
# fill the attestation with (optionally filtered) participants, and optionally sign it
|
||||||
fill_aggregate_attestation(spec, state, attestation)
|
fill_aggregate_attestation(spec, state, attestation, signed=signed, filter_participant_set=filter_participant_set)
|
||||||
if signed:
|
|
||||||
sign_attestation(spec, state, attestation)
|
|
||||||
|
|
||||||
if spec.fork == 'phase1' and on_time:
|
if spec.fork == PHASE1 and on_time:
|
||||||
attestation = convert_to_valid_on_time_attestation(spec, state, attestation, signed)
|
attestation = convert_to_valid_on_time_attestation(spec, state, attestation, signed)
|
||||||
|
|
||||||
return attestation
|
return attestation
|
||||||
|
@ -163,7 +201,7 @@ def sign_aggregate_attestation(spec, state, attestation_data, participants: List
|
||||||
|
|
||||||
|
|
||||||
def sign_indexed_attestation(spec, state, indexed_attestation):
|
def sign_indexed_attestation(spec, state, indexed_attestation):
|
||||||
if spec.fork == 'phase0':
|
if spec.fork == PHASE0:
|
||||||
participants = indexed_attestation.attesting_indices
|
participants = indexed_attestation.attesting_indices
|
||||||
data = indexed_attestation.data
|
data = indexed_attestation.data
|
||||||
indexed_attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
|
indexed_attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
|
||||||
|
@ -173,7 +211,10 @@ def sign_indexed_attestation(spec, state, indexed_attestation):
|
||||||
indexed_attestation.attestation.aggregation_bits,
|
indexed_attestation.attestation.aggregation_bits,
|
||||||
)
|
)
|
||||||
data = indexed_attestation.attestation.data
|
data = indexed_attestation.attestation.data
|
||||||
indexed_attestation.attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
|
if any(indexed_attestation.attestation.custody_bits_blocks):
|
||||||
|
sign_on_time_attestation(spec, state, indexed_attestation.attestation)
|
||||||
|
else:
|
||||||
|
indexed_attestation.attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
|
||||||
|
|
||||||
|
|
||||||
def sign_on_time_attestation(spec, state, attestation):
|
def sign_on_time_attestation(spec, state, attestation):
|
||||||
|
@ -213,7 +254,7 @@ def get_attestation_custody_signature(spec, state, attestation_data, block_index
|
||||||
|
|
||||||
|
|
||||||
def sign_attestation(spec, state, attestation):
|
def sign_attestation(spec, state, attestation):
|
||||||
if spec.fork == 'phase1' and any(attestation.custody_bits_blocks):
|
if spec.fork == PHASE1 and any(attestation.custody_bits_blocks):
|
||||||
sign_on_time_attestation(spec, state, attestation)
|
sign_on_time_attestation(spec, state, attestation)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -232,21 +273,31 @@ def get_attestation_signature(spec, state, attestation_data, privkey):
|
||||||
return bls.Sign(privkey, signing_root)
|
return bls.Sign(privkey, signing_root)
|
||||||
|
|
||||||
|
|
||||||
def fill_aggregate_attestation(spec, state, attestation, signed=False):
|
def fill_aggregate_attestation(spec, state, attestation, signed=False, filter_participant_set=None):
|
||||||
|
"""
|
||||||
|
`signed`: Signing is optional.
|
||||||
|
`filter_participant_set`: Optional, filters the full committee indices set (default) to a subset that participates
|
||||||
|
"""
|
||||||
beacon_committee = spec.get_beacon_committee(
|
beacon_committee = spec.get_beacon_committee(
|
||||||
state,
|
state,
|
||||||
attestation.data.slot,
|
attestation.data.slot,
|
||||||
attestation.data.index,
|
attestation.data.index,
|
||||||
)
|
)
|
||||||
|
# By default, have everyone participate
|
||||||
|
participants = set(beacon_committee)
|
||||||
|
# But optionally filter the participants to a smaller amount
|
||||||
|
if filter_participant_set is not None:
|
||||||
|
participants = filter_participant_set(participants)
|
||||||
for i in range(len(beacon_committee)):
|
for i in range(len(beacon_committee)):
|
||||||
attestation.aggregation_bits[i] = True
|
attestation.aggregation_bits[i] = beacon_committee[i] in participants
|
||||||
|
|
||||||
if signed:
|
if signed and len(participants) > 0:
|
||||||
sign_attestation(spec, state, attestation)
|
sign_attestation(spec, state, attestation)
|
||||||
|
|
||||||
|
|
||||||
def add_attestations_to_state(spec, state, attestations, slot):
|
def add_attestations_to_state(spec, state, attestations, slot):
|
||||||
spec.process_slots(state, slot)
|
if state.slot < slot:
|
||||||
|
spec.process_slots(state, slot)
|
||||||
for attestation in attestations:
|
for attestation in attestations:
|
||||||
spec.process_attestation(state, attestation)
|
spec.process_attestation(state, attestation)
|
||||||
|
|
||||||
|
@ -277,7 +328,82 @@ def next_epoch_with_attestations(spec,
|
||||||
spec, post_state, slot_to_attest, index=index, signed=True, on_time=False)
|
spec, post_state, slot_to_attest, index=index, signed=True, on_time=False)
|
||||||
block.body.attestations.append(prev_attestation)
|
block.body.attestations.append(prev_attestation)
|
||||||
|
|
||||||
|
if spec.fork == PHASE1:
|
||||||
|
fill_block_shard_transitions_by_attestations(spec, post_state, block)
|
||||||
|
|
||||||
signed_block = state_transition_and_sign_block(spec, post_state, block)
|
signed_block = state_transition_and_sign_block(spec, post_state, block)
|
||||||
signed_blocks.append(signed_block)
|
signed_blocks.append(signed_block)
|
||||||
|
|
||||||
return state, signed_blocks, post_state
|
return state, signed_blocks, post_state
|
||||||
|
|
||||||
|
|
||||||
|
def prepare_state_with_attestations(spec, state, participation_fn=None):
|
||||||
|
"""
|
||||||
|
Prepare state with attestations according to the ``participation_fn``.
|
||||||
|
If no ``participation_fn``, default to "full" -- max committee participation at each slot.
|
||||||
|
|
||||||
|
participation_fn: (slot, committee_index, committee_indices_set) -> participants_indices_set
|
||||||
|
"""
|
||||||
|
# Go to start of next epoch to ensure can have full participation
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
start_slot = state.slot
|
||||||
|
start_epoch = spec.get_current_epoch(state)
|
||||||
|
next_epoch_start_slot = spec.compute_start_slot_at_epoch(start_epoch + 1)
|
||||||
|
attestations = []
|
||||||
|
for _ in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY):
|
||||||
|
# create an attestation for each index in each slot in epoch
|
||||||
|
if state.slot < next_epoch_start_slot:
|
||||||
|
for committee_index in range(spec.get_committee_count_at_slot(state, state.slot)):
|
||||||
|
def temp_participants_filter(comm):
|
||||||
|
if participation_fn is None:
|
||||||
|
return comm
|
||||||
|
else:
|
||||||
|
return participation_fn(state.slot, committee_index, comm)
|
||||||
|
attestation = get_valid_attestation(spec, state, index=committee_index,
|
||||||
|
filter_participant_set=temp_participants_filter, signed=True)
|
||||||
|
if any(attestation.aggregation_bits): # Only if there is at least 1 participant.
|
||||||
|
attestations.append(attestation)
|
||||||
|
# fill each created slot in state after inclusion delay
|
||||||
|
if state.slot >= start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY:
|
||||||
|
inclusion_slot = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||||
|
include_attestations = [att for att in attestations if att.data.slot == inclusion_slot]
|
||||||
|
add_attestations_to_state(spec, state, include_attestations, state.slot)
|
||||||
|
next_slot(spec, state)
|
||||||
|
|
||||||
|
assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||||
|
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||||
|
|
||||||
|
return attestations
|
||||||
|
|
||||||
|
|
||||||
|
_prep_state_cache_dict = LRU(size=10)
|
||||||
|
|
||||||
|
|
||||||
|
def cached_prepare_state_with_attestations(spec, state):
|
||||||
|
"""
|
||||||
|
Cached version of prepare_state_with_attestations,
|
||||||
|
but does not return anything, and does not support a participation fn argument
|
||||||
|
"""
|
||||||
|
# If the pre-state is not already known in the LRU, then take it,
|
||||||
|
# prepare it with attestations, and put it in the LRU.
|
||||||
|
# The input state is likely already cached, so the hash-tree-root does not affect speed.
|
||||||
|
key = (spec.fork, state.hash_tree_root())
|
||||||
|
global _prep_state_cache_dict
|
||||||
|
if key not in _prep_state_cache_dict:
|
||||||
|
prepare_state_with_attestations(spec, state)
|
||||||
|
_prep_state_cache_dict[key] = state.get_backing() # cache the tree structure, not the view wrapping it.
|
||||||
|
|
||||||
|
# Put the LRU cache result into the state view, as if we transitioned the original view
|
||||||
|
state.set_backing(_prep_state_cache_dict[key])
|
||||||
|
|
||||||
|
|
||||||
|
def fill_block_shard_transitions_by_attestations(spec, state, block):
|
||||||
|
block.body.shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
|
||||||
|
for attestation in block.body.attestations:
|
||||||
|
shard = spec.get_shard(state, attestation)
|
||||||
|
if attestation.data.slot == state.slot:
|
||||||
|
temp_state = state.copy()
|
||||||
|
transition_to(spec, temp_state, slot=block.slot)
|
||||||
|
shard_transition = spec.get_shard_transition(temp_state, shard, [])
|
||||||
|
block.body.shard_transitions[shard] = shard_transition
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
from eth2spec.test.context import PHASE1
|
||||||
|
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation, sign_indexed_attestation
|
||||||
|
|
||||||
|
|
||||||
def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
|
def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
|
||||||
|
@ -16,11 +17,31 @@ def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_valid_attester_slashing_by_indices(spec, state, indices_1, indices_2=None, signed_1=False, signed_2=False):
|
||||||
|
if indices_2 is None:
|
||||||
|
indices_2 = indices_1
|
||||||
|
|
||||||
|
assert indices_1 == sorted(indices_1)
|
||||||
|
assert indices_2 == sorted(indices_2)
|
||||||
|
|
||||||
|
attester_slashing = get_valid_attester_slashing(spec, state)
|
||||||
|
|
||||||
|
attester_slashing.attestation_1.attesting_indices = indices_1
|
||||||
|
attester_slashing.attestation_2.attesting_indices = indices_2
|
||||||
|
|
||||||
|
if signed_1:
|
||||||
|
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
|
||||||
|
if signed_2:
|
||||||
|
sign_indexed_attestation(spec, state, attester_slashing.attestation_2)
|
||||||
|
|
||||||
|
return attester_slashing
|
||||||
|
|
||||||
|
|
||||||
def get_indexed_attestation_participants(spec, indexed_att):
|
def get_indexed_attestation_participants(spec, indexed_att):
|
||||||
"""
|
"""
|
||||||
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
|
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
|
||||||
"""
|
"""
|
||||||
if spec.fork == "phase1":
|
if spec.fork == PHASE1:
|
||||||
return list(spec.get_indices_from_committee(
|
return list(spec.get_indices_from_committee(
|
||||||
indexed_att.committee,
|
indexed_att.committee,
|
||||||
indexed_att.attestation.aggregation_bits,
|
indexed_att.attestation.aggregation_bits,
|
||||||
|
@ -33,21 +54,21 @@ def set_indexed_attestation_participants(spec, indexed_att, participants):
|
||||||
"""
|
"""
|
||||||
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
|
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
|
||||||
"""
|
"""
|
||||||
if spec.fork == "phase1":
|
if spec.fork == PHASE1:
|
||||||
indexed_att.attestation.aggregation_bits = [bool(i in participants) for i in indexed_att.committee]
|
indexed_att.attestation.aggregation_bits = [bool(i in participants) for i in indexed_att.committee]
|
||||||
else:
|
else:
|
||||||
indexed_att.attesting_indices = participants
|
indexed_att.attesting_indices = participants
|
||||||
|
|
||||||
|
|
||||||
def get_attestation_1_data(spec, att_slashing):
|
def get_attestation_1_data(spec, att_slashing):
|
||||||
if spec.fork == "phase1":
|
if spec.fork == PHASE1:
|
||||||
return att_slashing.attestation_1.attestation.data
|
return att_slashing.attestation_1.attestation.data
|
||||||
else:
|
else:
|
||||||
return att_slashing.attestation_1.data
|
return att_slashing.attestation_1.data
|
||||||
|
|
||||||
|
|
||||||
def get_attestation_2_data(spec, att_slashing):
|
def get_attestation_2_data(spec, att_slashing):
|
||||||
if spec.fork == "phase1":
|
if spec.fork == PHASE1:
|
||||||
return att_slashing.attestation_2.attestation.data
|
return att_slashing.attestation_2.attestation.data
|
||||||
else:
|
else:
|
||||||
return att_slashing.attestation_2.data
|
return att_slashing.attestation_2.data
|
||||||
|
|
|
@ -15,7 +15,8 @@ def get_proposer_index_maybe(spec, state, slot, proposer_index=None):
|
||||||
" Signing block is slow due to transition for proposer index calculation.")
|
" Signing block is slow due to transition for proposer index calculation.")
|
||||||
# use stub state to get proposer index of future slot
|
# use stub state to get proposer index of future slot
|
||||||
stub_state = state.copy()
|
stub_state = state.copy()
|
||||||
spec.process_slots(stub_state, slot)
|
if stub_state.slot < slot:
|
||||||
|
spec.process_slots(stub_state, slot)
|
||||||
proposer_index = spec.get_beacon_proposer_index(stub_state)
|
proposer_index = spec.get_beacon_proposer_index(stub_state)
|
||||||
return proposer_index
|
return proposer_index
|
||||||
|
|
||||||
|
@ -52,15 +53,19 @@ def sign_block(spec, state, block, proposer_index=None):
|
||||||
|
|
||||||
|
|
||||||
def transition_unsigned_block(spec, state, block):
|
def transition_unsigned_block(spec, state, block):
|
||||||
spec.process_slots(state, block.slot)
|
assert state.slot < block.slot # Preserve assertion from state transition to avoid strange pre-states from testing
|
||||||
|
if state.slot < block.slot:
|
||||||
|
spec.process_slots(state, block.slot)
|
||||||
|
assert state.latest_block_header.slot < block.slot # There may not already be a block in this slot or past it.
|
||||||
|
assert state.slot == block.slot # The block must be for this slot
|
||||||
spec.process_block(state, block)
|
spec.process_block(state, block)
|
||||||
|
|
||||||
|
|
||||||
def apply_empty_block(spec, state):
|
def apply_empty_block(spec, state, slot=None):
|
||||||
"""
|
"""
|
||||||
Transition via an empty block (on current slot, assuming no block has been applied yet).
|
Transition via an empty block (on current slot, assuming no block has been applied yet).
|
||||||
"""
|
"""
|
||||||
block = build_empty_block(spec, state)
|
block = build_empty_block(spec, state, slot)
|
||||||
transition_unsigned_block(spec, state, block)
|
transition_unsigned_block(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
|
@ -73,22 +78,35 @@ def build_empty_block(spec, state, slot=None):
|
||||||
slot = state.slot
|
slot = state.slot
|
||||||
if slot < state.slot:
|
if slot < state.slot:
|
||||||
raise Exception("build_empty_block cannot build blocks for past slots")
|
raise Exception("build_empty_block cannot build blocks for past slots")
|
||||||
if slot > state.slot:
|
if state.slot < slot:
|
||||||
# transition forward in copied state to grab relevant data from state
|
# transition forward in copied state to grab relevant data from state
|
||||||
state = state.copy()
|
state = state.copy()
|
||||||
spec.process_slots(state, slot)
|
spec.process_slots(state, slot)
|
||||||
|
|
||||||
|
state, parent_block_root = get_state_and_beacon_parent_root_at_slot(spec, state, slot)
|
||||||
empty_block = spec.BeaconBlock()
|
empty_block = spec.BeaconBlock()
|
||||||
empty_block.slot = slot
|
empty_block.slot = slot
|
||||||
empty_block.proposer_index = spec.get_beacon_proposer_index(state)
|
empty_block.proposer_index = spec.get_beacon_proposer_index(state)
|
||||||
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||||
previous_block_header = state.latest_block_header.copy()
|
empty_block.parent_root = parent_block_root
|
||||||
if previous_block_header.state_root == spec.Root():
|
|
||||||
previous_block_header.state_root = hash_tree_root(state)
|
|
||||||
empty_block.parent_root = hash_tree_root(previous_block_header)
|
|
||||||
apply_randao_reveal(spec, state, empty_block)
|
apply_randao_reveal(spec, state, empty_block)
|
||||||
return empty_block
|
return empty_block
|
||||||
|
|
||||||
|
|
||||||
def build_empty_block_for_next_slot(spec, state):
|
def build_empty_block_for_next_slot(spec, state):
|
||||||
return build_empty_block(spec, state, state.slot + 1)
|
return build_empty_block(spec, state, state.slot + 1)
|
||||||
|
|
||||||
|
|
||||||
|
def get_state_and_beacon_parent_root_at_slot(spec, state, slot):
|
||||||
|
if slot < state.slot:
|
||||||
|
raise Exception("Cannot build blocks for past slots")
|
||||||
|
if slot > state.slot:
|
||||||
|
# transition forward in copied state to grab relevant data from state
|
||||||
|
state = state.copy()
|
||||||
|
spec.process_slots(state, slot)
|
||||||
|
|
||||||
|
previous_block_header = state.latest_block_header.copy()
|
||||||
|
if previous_block_header.state_root == spec.Root():
|
||||||
|
previous_block_header.state_root = hash_tree_root(state)
|
||||||
|
beacon_parent_root = hash_tree_root(previous_block_header)
|
||||||
|
return state, beacon_parent_root
|
||||||
|
|
|
@ -5,6 +5,17 @@ from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||||
from eth2spec.utils.ssz.ssz_typing import List
|
from eth2spec.utils.ssz.ssz_typing import List
|
||||||
|
|
||||||
|
|
||||||
|
def mock_deposit(spec, state, index):
|
||||||
|
"""
|
||||||
|
Mock validator at ``index`` as having just made a deposit
|
||||||
|
"""
|
||||||
|
assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
|
||||||
|
state.validators[index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
state.validators[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
|
||||||
|
|
||||||
|
|
||||||
def build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, signed=False):
|
def build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, signed=False):
|
||||||
deposit_data = spec.DepositData(
|
deposit_data = spec.DepositData(
|
||||||
pubkey=pubkey,
|
pubkey=pubkey,
|
||||||
|
|
|
@ -2,5 +2,5 @@ from py_ecc.bls import G2ProofOfPossession as bls
|
||||||
from eth2spec.phase0 import spec
|
from eth2spec.phase0 import spec
|
||||||
|
|
||||||
privkeys = [i + 1 for i in range(spec.SLOTS_PER_EPOCH * 256)]
|
privkeys = [i + 1 for i in range(spec.SLOTS_PER_EPOCH * 256)]
|
||||||
pubkeys = [bls.PrivToPub(privkey) for privkey in privkeys]
|
pubkeys = [bls.SkToPk(privkey) for privkey in privkeys]
|
||||||
pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)}
|
pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)}
|
||||||
|
|
|
@ -1,66 +0,0 @@
|
||||||
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
|
||||||
from eth2spec.utils import bls
|
|
||||||
|
|
||||||
from eth2spec.test.helpers.keys import privkeys
|
|
||||||
import eth2spec.test.helpers.attestations as phase0_attestations
|
|
||||||
from eth2spec.test.helpers.state import next_slot
|
|
||||||
|
|
||||||
|
|
||||||
def get_valid_on_time_attestation(spec, state, index=None, signed=False):
|
|
||||||
'''
|
|
||||||
Construct on-time attestation for next slot
|
|
||||||
'''
|
|
||||||
if index is None:
|
|
||||||
index = 0
|
|
||||||
|
|
||||||
attestation = phase0_attestations.get_valid_attestation(spec, state, state.slot, index, False)
|
|
||||||
shard = spec.get_shard(state, attestation)
|
|
||||||
|
|
||||||
next_state = state.copy()
|
|
||||||
next_slot(spec, next_state)
|
|
||||||
offset_slots = spec.get_offset_slots(next_state, spec.get_latest_slot_for_shard(next_state, shard))
|
|
||||||
for offset_slot in offset_slots:
|
|
||||||
attestation.custody_bits_blocks.append(
|
|
||||||
Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]([0 for _ in attestation.aggregation_bits])
|
|
||||||
)
|
|
||||||
|
|
||||||
if signed:
|
|
||||||
sign_attestation(spec, state, attestation)
|
|
||||||
|
|
||||||
return attestation
|
|
||||||
|
|
||||||
|
|
||||||
def sign_attestation(spec, state, attestation):
|
|
||||||
if not any(attestation.custody_bits_blocks):
|
|
||||||
phase0_attestations.sign_attestation(spec, state, attestation)
|
|
||||||
return
|
|
||||||
|
|
||||||
committee = spec.get_beacon_committee(state, attestation.data.slot, attestation.data.index)
|
|
||||||
signatures = []
|
|
||||||
for block_index, custody_bits in enumerate(attestation.custody_bits_blocks):
|
|
||||||
for participant, abit, cbit in zip(committee, attestation.aggregation_bits, custody_bits):
|
|
||||||
if not abit:
|
|
||||||
continue
|
|
||||||
signatures.append(get_attestation_custody_signature(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
attestation.data,
|
|
||||||
block_index,
|
|
||||||
cbit,
|
|
||||||
privkeys[participant]
|
|
||||||
))
|
|
||||||
|
|
||||||
attestation.signature = bls.Aggregate(signatures)
|
|
||||||
|
|
||||||
|
|
||||||
def get_attestation_custody_signature(spec, state, attestation_data, block_index, bit, privkey):
|
|
||||||
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
|
|
||||||
signing_root = spec.compute_signing_root(
|
|
||||||
spec.AttestationCustodyBitWrapper(
|
|
||||||
attestation_data.hash_tree_root(),
|
|
||||||
block_index,
|
|
||||||
bit,
|
|
||||||
),
|
|
||||||
domain,
|
|
||||||
)
|
|
||||||
return bls.Sign(privkey, signing_root)
|
|
|
@ -1,71 +0,0 @@
|
||||||
from eth2spec.test.helpers.keys import privkeys
|
|
||||||
from eth2spec.utils import bls
|
|
||||||
from eth2spec.utils.bls import only_with_bls
|
|
||||||
from eth2spec.utils.ssz.ssz_impl import (
|
|
||||||
hash_tree_root,
|
|
||||||
)
|
|
||||||
|
|
||||||
from .attestations import (
|
|
||||||
sign_shard_attestation,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@only_with_bls()
|
|
||||||
def sign_shard_block(spec, beacon_state, shard_state, block, proposer_index=None):
|
|
||||||
if proposer_index is None:
|
|
||||||
proposer_index = spec.get_shard_proposer_index(beacon_state, shard_state.shard, block.slot)
|
|
||||||
|
|
||||||
privkey = privkeys[proposer_index]
|
|
||||||
domain = spec.get_domain(beacon_state, spec.DOMAIN_SHARD_PROPOSER, spec.compute_epoch_of_shard_slot(block.slot))
|
|
||||||
signing_root = spec.compute_signing_root(block, domain)
|
|
||||||
block.signature = bls.Sign(privkey, signing_root)
|
|
||||||
|
|
||||||
|
|
||||||
def build_empty_shard_block(spec,
|
|
||||||
beacon_state,
|
|
||||||
shard_state,
|
|
||||||
slot,
|
|
||||||
signed=False,
|
|
||||||
full_attestation=False):
|
|
||||||
if slot is None:
|
|
||||||
slot = shard_state.slot
|
|
||||||
|
|
||||||
previous_beacon_header = beacon_state.latest_block_header.copy()
|
|
||||||
if previous_beacon_header.state_root == spec.Bytes32():
|
|
||||||
previous_beacon_header.state_root = beacon_state.hash_tree_root()
|
|
||||||
beacon_block_root = hash_tree_root(previous_beacon_header)
|
|
||||||
|
|
||||||
previous_block_header = shard_state.latest_block_header.copy()
|
|
||||||
if previous_block_header.state_root == spec.Bytes32():
|
|
||||||
previous_block_header.state_root = shard_state.hash_tree_root()
|
|
||||||
parent_root = hash_tree_root(previous_block_header)
|
|
||||||
|
|
||||||
block = spec.ShardBlock(
|
|
||||||
shard=shard_state.shard,
|
|
||||||
slot=slot,
|
|
||||||
beacon_block_root=beacon_block_root,
|
|
||||||
parent_root=parent_root,
|
|
||||||
block_size_sum=shard_state.block_size_sum + spec.SHARD_HEADER_SIZE,
|
|
||||||
)
|
|
||||||
|
|
||||||
if full_attestation:
|
|
||||||
shard_committee = spec.get_shard_committee(beacon_state, shard_state.shard, block.slot)
|
|
||||||
block.aggregation_bits = list(
|
|
||||||
(True,) * len(shard_committee) +
|
|
||||||
(False,) * (spec.MAX_PERIOD_COMMITTEE_SIZE * 2 - len(shard_committee))
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
shard_committee = []
|
|
||||||
|
|
||||||
block.attestations = sign_shard_attestation(
|
|
||||||
spec,
|
|
||||||
beacon_state,
|
|
||||||
shard_state,
|
|
||||||
block,
|
|
||||||
participants=shard_committee,
|
|
||||||
)
|
|
||||||
|
|
||||||
if signed:
|
|
||||||
sign_shard_block(spec, beacon_state, shard_state, block)
|
|
||||||
|
|
||||||
return block
|
|
|
@ -1,18 +0,0 @@
|
||||||
from eth2spec.test.helpers.phase1.shard_block import sign_shard_block
|
|
||||||
|
|
||||||
|
|
||||||
def configure_shard_state(spec, beacon_state, shard=0):
|
|
||||||
beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH)
|
|
||||||
shard_state = spec.get_genesis_shard_state(spec.Shard(shard))
|
|
||||||
shard_state.slot = spec.ShardSlot(spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH)
|
|
||||||
return beacon_state, shard_state
|
|
||||||
|
|
||||||
|
|
||||||
def shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block):
|
|
||||||
"""
|
|
||||||
Shard state transition via the provided ``block``
|
|
||||||
then package the block with the state root and signature.
|
|
||||||
"""
|
|
||||||
spec.shard_state_transition(beacon_state, shard_state, block)
|
|
||||||
block.state_root = shard_state.hash_tree_root()
|
|
||||||
sign_shard_block(spec, beacon_state, shard_state, block)
|
|
|
@ -1,22 +1,55 @@
|
||||||
from eth2spec.test.helpers.block_header import sign_block_header
|
from eth2spec.test.helpers.block_header import sign_block_header
|
||||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||||
|
from eth2spec.test.helpers.state import get_balance
|
||||||
|
|
||||||
|
|
||||||
def get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False):
|
def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
||||||
current_epoch = spec.get_current_epoch(state)
|
slashed_validator = state.validators[slashed_index]
|
||||||
validator_index = spec.get_active_validator_indices(state, current_epoch)[-1]
|
assert slashed_validator.slashed
|
||||||
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
|
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
proposer_index = spec.get_beacon_proposer_index(state)
|
||||||
|
slash_penalty = state.validators[slashed_index].effective_balance // spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||||
|
whistleblower_reward = state.validators[slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
|
||||||
|
if proposer_index != slashed_index:
|
||||||
|
# slashed validator lost initial slash penalty
|
||||||
|
assert (
|
||||||
|
get_balance(state, slashed_index)
|
||||||
|
== get_balance(pre_state, slashed_index) - slash_penalty
|
||||||
|
)
|
||||||
|
# block proposer gained whistleblower reward
|
||||||
|
# >= because proposer could have reported multiple
|
||||||
|
assert (
|
||||||
|
get_balance(state, proposer_index)
|
||||||
|
>= get_balance(pre_state, proposer_index) + whistleblower_reward
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# proposer reported themself so get penalty and reward
|
||||||
|
# >= because proposer could have reported multiple
|
||||||
|
assert (
|
||||||
|
get_balance(state, slashed_index)
|
||||||
|
>= get_balance(pre_state, slashed_index) - slash_penalty + whistleblower_reward
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_valid_proposer_slashing(spec, state, random_root=b'\x99' * 32,
|
||||||
|
slashed_index=None, signed_1=False, signed_2=False):
|
||||||
|
if slashed_index is None:
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
slashed_index = spec.get_active_validator_indices(state, current_epoch)[-1]
|
||||||
|
privkey = pubkey_to_privkey[state.validators[slashed_index].pubkey]
|
||||||
slot = state.slot
|
slot = state.slot
|
||||||
|
|
||||||
header_1 = spec.BeaconBlockHeader(
|
header_1 = spec.BeaconBlockHeader(
|
||||||
slot=slot,
|
slot=slot,
|
||||||
proposer_index=validator_index,
|
proposer_index=slashed_index,
|
||||||
parent_root=b'\x33' * 32,
|
parent_root=b'\x33' * 32,
|
||||||
state_root=b'\x44' * 32,
|
state_root=b'\x44' * 32,
|
||||||
body_root=b'\x55' * 32,
|
body_root=b'\x55' * 32,
|
||||||
)
|
)
|
||||||
header_2 = header_1.copy()
|
header_2 = header_1.copy()
|
||||||
header_2.parent_root = b'\x99' * 32
|
header_2.parent_root = random_root
|
||||||
|
|
||||||
if signed_1:
|
if signed_1:
|
||||||
signed_header_1 = sign_block_header(spec, state, header_1, privkey)
|
signed_header_1 = sign_block_header(spec, state, header_1, privkey)
|
||||||
|
|
|
@ -0,0 +1,457 @@
|
||||||
|
from random import Random
|
||||||
|
from lru import LRU
|
||||||
|
|
||||||
|
from eth2spec.phase0 import spec as spec_phase0
|
||||||
|
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
|
||||||
|
from eth2spec.test.helpers.deposits import mock_deposit
|
||||||
|
from eth2spec.test.helpers.state import next_epoch
|
||||||
|
from eth2spec.utils.ssz.ssz_typing import Container, uint64, List
|
||||||
|
|
||||||
|
|
||||||
|
class Deltas(Container):
|
||||||
|
rewards: List[uint64, spec_phase0.VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
penalties: List[uint64, spec_phase0.VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
|
||||||
|
|
||||||
|
def has_enough_for_reward(spec, state, index):
|
||||||
|
"""
|
||||||
|
Check if base_reward will be non-zero.
|
||||||
|
|
||||||
|
At very low balances, it is possible for a validator have a positive effective_balance
|
||||||
|
but a zero base reward.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
state.validators[index].effective_balance * spec.BASE_REWARD_FACTOR
|
||||||
|
> spec.integer_squareroot(spec.get_total_active_balance(state)) // spec.BASE_REWARDS_PER_EPOCH
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_deltas(spec, state):
|
||||||
|
"""
|
||||||
|
Run all deltas functions yielding:
|
||||||
|
- pre-state ('pre')
|
||||||
|
- source deltas ('source_deltas')
|
||||||
|
- target deltas ('target_deltas')
|
||||||
|
- head deltas ('head_deltas')
|
||||||
|
- inclusion delay deltas ('inclusion_delay_deltas')
|
||||||
|
- inactivity penalty deltas ('inactivity_penalty_deltas')
|
||||||
|
"""
|
||||||
|
yield 'pre', state
|
||||||
|
yield from run_attestation_component_deltas(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
spec.get_source_deltas,
|
||||||
|
spec.get_matching_source_attestations,
|
||||||
|
'source_deltas',
|
||||||
|
)
|
||||||
|
yield from run_attestation_component_deltas(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
spec.get_target_deltas,
|
||||||
|
spec.get_matching_target_attestations,
|
||||||
|
'target_deltas',
|
||||||
|
)
|
||||||
|
yield from run_attestation_component_deltas(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
spec.get_head_deltas,
|
||||||
|
spec.get_matching_head_attestations,
|
||||||
|
'head_deltas',
|
||||||
|
)
|
||||||
|
yield from run_get_inclusion_delay_deltas(spec, state)
|
||||||
|
yield from run_get_inactivity_penalty_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_attestation_component_deltas(spec, state, component_delta_fn, matching_att_fn, deltas_name):
|
||||||
|
"""
|
||||||
|
Run ``component_delta_fn``, yielding:
|
||||||
|
- deltas ('{``deltas_name``}')
|
||||||
|
"""
|
||||||
|
rewards, penalties = component_delta_fn(state)
|
||||||
|
|
||||||
|
yield deltas_name, Deltas(rewards=rewards, penalties=penalties)
|
||||||
|
|
||||||
|
matching_attestations = matching_att_fn(state, spec.get_previous_epoch(state))
|
||||||
|
matching_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||||
|
eligible_indices = spec.get_eligible_validator_indices(state)
|
||||||
|
for index in range(len(state.validators)):
|
||||||
|
if index not in eligible_indices:
|
||||||
|
assert rewards[index] == 0
|
||||||
|
assert penalties[index] == 0
|
||||||
|
continue
|
||||||
|
|
||||||
|
validator = state.validators[index]
|
||||||
|
enough_for_reward = has_enough_for_reward(spec, state, index)
|
||||||
|
if index in matching_indices and not validator.slashed:
|
||||||
|
if enough_for_reward:
|
||||||
|
assert rewards[index] > 0
|
||||||
|
else:
|
||||||
|
assert rewards[index] == 0
|
||||||
|
assert penalties[index] == 0
|
||||||
|
else:
|
||||||
|
assert rewards[index] == 0
|
||||||
|
if enough_for_reward:
|
||||||
|
assert penalties[index] > 0
|
||||||
|
else:
|
||||||
|
assert penalties[index] == 0
|
||||||
|
|
||||||
|
|
||||||
|
def run_get_inclusion_delay_deltas(spec, state):
|
||||||
|
"""
|
||||||
|
Run ``get_inclusion_delay_deltas``, yielding:
|
||||||
|
- inclusion delay deltas ('inclusion_delay_deltas')
|
||||||
|
"""
|
||||||
|
rewards, penalties = spec.get_inclusion_delay_deltas(state)
|
||||||
|
|
||||||
|
yield 'inclusion_delay_deltas', Deltas(rewards=rewards, penalties=penalties)
|
||||||
|
|
||||||
|
eligible_attestations = spec.get_matching_source_attestations(state, spec.get_previous_epoch(state))
|
||||||
|
attesting_indices = spec.get_unslashed_attesting_indices(state, eligible_attestations)
|
||||||
|
|
||||||
|
rewarded_indices = set()
|
||||||
|
rewarded_proposer_indices = set()
|
||||||
|
# Ensure attesters with enough balance are rewarded for attestations
|
||||||
|
# Track those that are rewarded and track proposers that should be rewarded
|
||||||
|
for index in range(len(state.validators)):
|
||||||
|
if index in attesting_indices and has_enough_for_reward(spec, state, index):
|
||||||
|
assert rewards[index] > 0
|
||||||
|
rewarded_indices.add(index)
|
||||||
|
|
||||||
|
# Track proposer of earliest included attestation for the validator defined by index
|
||||||
|
earliest_attestation = min([
|
||||||
|
a for a in eligible_attestations
|
||||||
|
if index in spec.get_attesting_indices(state, a.data, a.aggregation_bits)
|
||||||
|
], key=lambda a: a.inclusion_delay)
|
||||||
|
rewarded_proposer_indices.add(earliest_attestation.proposer_index)
|
||||||
|
|
||||||
|
# Ensure all expected proposers have been rewarded
|
||||||
|
# Track rewarde indices
|
||||||
|
proposing_indices = [a.proposer_index for a in eligible_attestations]
|
||||||
|
for index in proposing_indices:
|
||||||
|
if index in rewarded_proposer_indices:
|
||||||
|
assert rewards[index] > 0
|
||||||
|
rewarded_indices.add(index)
|
||||||
|
|
||||||
|
# Ensure all expected non-rewarded indices received no reward
|
||||||
|
for index in range(len(state.validators)):
|
||||||
|
assert penalties[index] == 0
|
||||||
|
if index not in rewarded_indices:
|
||||||
|
assert rewards[index] == 0
|
||||||
|
|
||||||
|
|
||||||
|
def run_get_inactivity_penalty_deltas(spec, state):
|
||||||
|
"""
|
||||||
|
Run ``get_inactivity_penalty_deltas``, yielding:
|
||||||
|
- inactivity penalty deltas ('inactivity_penalty_deltas')
|
||||||
|
"""
|
||||||
|
rewards, penalties = spec.get_inactivity_penalty_deltas(state)
|
||||||
|
|
||||||
|
yield 'inactivity_penalty_deltas', Deltas(rewards=rewards, penalties=penalties)
|
||||||
|
|
||||||
|
matching_attestations = spec.get_matching_target_attestations(state, spec.get_previous_epoch(state))
|
||||||
|
matching_attesting_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||||
|
|
||||||
|
eligible_indices = spec.get_eligible_validator_indices(state)
|
||||||
|
for index in range(len(state.validators)):
|
||||||
|
assert rewards[index] == 0
|
||||||
|
if index not in eligible_indices:
|
||||||
|
assert penalties[index] == 0
|
||||||
|
continue
|
||||||
|
|
||||||
|
if spec.is_in_inactivity_leak(state):
|
||||||
|
base_reward = spec.get_base_reward(state, index)
|
||||||
|
base_penalty = spec.BASE_REWARDS_PER_EPOCH * base_reward - spec.get_proposer_reward(state, index)
|
||||||
|
if not has_enough_for_reward(spec, state, index):
|
||||||
|
assert penalties[index] == 0
|
||||||
|
elif index in matching_attesting_indices:
|
||||||
|
assert penalties[index] == base_penalty
|
||||||
|
else:
|
||||||
|
assert penalties[index] > base_penalty
|
||||||
|
else:
|
||||||
|
assert penalties[index] == 0
|
||||||
|
|
||||||
|
|
||||||
|
def transition_state_to_leak(spec, state, epochs=None):
|
||||||
|
if epochs is None:
|
||||||
|
epochs = spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY
|
||||||
|
assert epochs >= spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY
|
||||||
|
|
||||||
|
for _ in range(epochs):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
_cache_dict = LRU(size=10)
|
||||||
|
|
||||||
|
|
||||||
|
def leaking(epochs=None):
|
||||||
|
|
||||||
|
def deco(fn):
|
||||||
|
def entry(*args, spec, state, **kw):
|
||||||
|
# If the pre-state is not already known in the LRU, then take it,
|
||||||
|
# transition it to leak, and put it in the LRU.
|
||||||
|
# The input state is likely already cached, so the hash-tree-root does not affect speed.
|
||||||
|
key = (state.hash_tree_root(), spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY, spec.SLOTS_PER_EPOCH, epochs)
|
||||||
|
global _cache_dict
|
||||||
|
if key not in _cache_dict:
|
||||||
|
transition_state_to_leak(spec, state, epochs=epochs)
|
||||||
|
_cache_dict[key] = state.get_backing() # cache the tree structure, not the view wrapping it.
|
||||||
|
|
||||||
|
# Take an entry out of the LRU.
|
||||||
|
# No copy is necessary, as we wrap the immutable backing with a new view.
|
||||||
|
state = spec.BeaconState(backing=_cache_dict[key])
|
||||||
|
return fn(*args, spec=spec, state=state, **kw)
|
||||||
|
return entry
|
||||||
|
return deco
|
||||||
|
|
||||||
|
|
||||||
|
def set_some_new_deposits(spec, state, rng):
|
||||||
|
num_validators = len(state.validators)
|
||||||
|
# Set ~1/10 to just recently deposited
|
||||||
|
for index in range(num_validators):
|
||||||
|
# If not already active, skip
|
||||||
|
if not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)):
|
||||||
|
continue
|
||||||
|
if rng.randrange(num_validators) < num_validators // 10:
|
||||||
|
mock_deposit(spec, state, index)
|
||||||
|
# Set ~half of selected to eligible for activation
|
||||||
|
if rng.choice([True, False]):
|
||||||
|
state.validators[index].activation_eligibility_epoch = spec.get_current_epoch(state)
|
||||||
|
|
||||||
|
|
||||||
|
def exit_random_validators(spec, state, rng):
|
||||||
|
if spec.get_current_epoch(state) < 5:
|
||||||
|
# Move epochs forward to allow for some validators already exited/withdrawable
|
||||||
|
for _ in range(5):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
# Exit ~1/2 of validators
|
||||||
|
for index in spec.get_active_validator_indices(state, current_epoch):
|
||||||
|
if rng.choice([True, False]):
|
||||||
|
continue
|
||||||
|
|
||||||
|
validator = state.validators[index]
|
||||||
|
validator.exit_epoch = rng.choice([current_epoch - 1, current_epoch - 2, current_epoch - 3])
|
||||||
|
# ~1/2 are withdrawable
|
||||||
|
if rng.choice([True, False]):
|
||||||
|
validator.withdrawable_epoch = current_epoch
|
||||||
|
else:
|
||||||
|
validator.withdrawable_epoch = current_epoch + 1
|
||||||
|
|
||||||
|
|
||||||
|
def slash_random_validators(spec, state, rng):
|
||||||
|
# Slash ~1/2 of validators
|
||||||
|
for index in range(len(state.validators)):
|
||||||
|
# slash at least one validator
|
||||||
|
if index == 0 or rng.choice([True, False]):
|
||||||
|
spec.slash_validator(state, index)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_empty(spec, state):
|
||||||
|
# Do not add any attestations to state
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_full_all_correct(spec, state):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_full_but_partial_participation(spec, state, rng=Random(5522)):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
for a in state.previous_epoch_attestations:
|
||||||
|
a.aggregation_bits = [rng.choice([True, False]) for _ in a.aggregation_bits]
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_partial(spec, state, fraction_filled):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
# Remove portion of attestations
|
||||||
|
num_attestations = int(len(state.previous_epoch_attestations) * fraction_filled)
|
||||||
|
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_half_full(spec, state):
|
||||||
|
yield from run_test_partial(spec, state, 0.5)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_one_attestation_one_correct(spec, state):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
# Remove all attestations except for the first one
|
||||||
|
state.previous_epoch_attestations = state.previous_epoch_attestations[:1]
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_with_not_yet_activated_validators(spec, state, rng=Random(5555)):
|
||||||
|
set_some_new_deposits(spec, state, rng)
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_with_exited_validators(spec, state, rng=Random(1337)):
|
||||||
|
exit_random_validators(spec, state, rng)
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_with_slashed_validators(spec, state, rng=Random(3322)):
|
||||||
|
exit_random_validators(spec, state, rng)
|
||||||
|
slash_random_validators(spec, state, rng)
|
||||||
|
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_some_very_low_effective_balances_that_attested(spec, state):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
# Set some balances to be very low (including 0)
|
||||||
|
assert len(state.validators) >= 5
|
||||||
|
for i, index in enumerate(range(5)):
|
||||||
|
state.validators[index].effective_balance = i
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
# Remove attestation
|
||||||
|
attestation = state.previous_epoch_attestations[0]
|
||||||
|
state.previous_epoch_attestations = state.previous_epoch_attestations[1:]
|
||||||
|
# Set removed indices effective balance to very low amount
|
||||||
|
indices = spec.get_unslashed_attesting_indices(state, [attestation])
|
||||||
|
for i, index in enumerate(indices):
|
||||||
|
state.validators[index].effective_balance = i
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_full_fraction_incorrect(spec, state, correct_target, correct_head, fraction_incorrect):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
# Make fraction_incorrect of pending attestations have bad target/head as specified
|
||||||
|
num_incorrect = int(fraction_incorrect * len(state.previous_epoch_attestations))
|
||||||
|
for pending_attestation in state.previous_epoch_attestations[:num_incorrect]:
|
||||||
|
if not correct_target:
|
||||||
|
pending_attestation.data.target.root = b'\x55' * 32
|
||||||
|
if not correct_head:
|
||||||
|
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_full_delay_one_slot(spec, state):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
for a in state.previous_epoch_attestations:
|
||||||
|
a.inclusion_delay += 1
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_full_delay_max_slots(spec, state):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
for a in state.previous_epoch_attestations:
|
||||||
|
a.inclusion_delay += spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_full_mixed_delay(spec, state, rng=Random(1234)):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
for a in state.previous_epoch_attestations:
|
||||||
|
a.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_proposer_not_in_attestations(spec, state):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
# Get an attestation where the proposer is not in the committee
|
||||||
|
non_proposer_attestations = []
|
||||||
|
for a in state.previous_epoch_attestations:
|
||||||
|
if a.proposer_index not in spec.get_unslashed_attesting_indices(state, [a]):
|
||||||
|
non_proposer_attestations.append(a)
|
||||||
|
|
||||||
|
assert any(non_proposer_attestations)
|
||||||
|
state.previous_epoch_attestations = non_proposer_attestations
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_duplicate_attestations_at_later_slots(spec, state):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
# Remove 2/3 of attestations to make it more interesting
|
||||||
|
num_attestations = int(len(state.previous_epoch_attestations) * 0.33)
|
||||||
|
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
|
||||||
|
|
||||||
|
# Get map of the proposer at each slot to make valid-looking duplicate attestations
|
||||||
|
per_slot_proposers = {
|
||||||
|
(a.data.slot + a.inclusion_delay): a.proposer_index
|
||||||
|
for a in state.previous_epoch_attestations
|
||||||
|
}
|
||||||
|
max_slot = max([a.data.slot + a.inclusion_delay for a in state.previous_epoch_attestations])
|
||||||
|
later_attestations = []
|
||||||
|
for a in state.previous_epoch_attestations:
|
||||||
|
# Only have proposers for previous epoch so do not create later
|
||||||
|
# duplicate if slot exceeds the max slot in previous_epoch_attestations
|
||||||
|
if a.data.slot + a.inclusion_delay >= max_slot:
|
||||||
|
continue
|
||||||
|
later_a = a.copy()
|
||||||
|
later_a.inclusion_delay += 1
|
||||||
|
later_a.proposer_index = per_slot_proposers[later_a.data.slot + later_a.inclusion_delay]
|
||||||
|
later_attestations.append(later_a)
|
||||||
|
|
||||||
|
assert any(later_attestations)
|
||||||
|
|
||||||
|
state.previous_epoch_attestations = sorted(
|
||||||
|
state.previous_epoch_attestations + later_attestations,
|
||||||
|
key=lambda a: a.data.slot + a.inclusion_delay
|
||||||
|
)
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_all_balances_too_low_for_reward(spec, state):
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
for index in range(len(state.validators)):
|
||||||
|
state.validators[index].effective_balance = 10
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_full_random(spec, state, rng=Random(8020)):
|
||||||
|
set_some_new_deposits(spec, state, rng)
|
||||||
|
exit_random_validators(spec, state, rng)
|
||||||
|
slash_random_validators(spec, state, rng)
|
||||||
|
|
||||||
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
|
for pending_attestation in state.previous_epoch_attestations:
|
||||||
|
# ~1/3 have bad target
|
||||||
|
if rng.randint(0, 2) == 0:
|
||||||
|
pending_attestation.data.target.root = b'\x55' * 32
|
||||||
|
# ~1/3 have bad head
|
||||||
|
if rng.randint(0, 2) == 0:
|
||||||
|
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
||||||
|
# ~50% participation
|
||||||
|
pending_attestation.aggregation_bits = [rng.choice([True, False]) for _ in pending_attestation.aggregation_bits]
|
||||||
|
# Random inclusion delay
|
||||||
|
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
|
yield from run_deltas(spec, state)
|
|
@ -0,0 +1,86 @@
|
||||||
|
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
|
||||||
|
from eth2spec.test.helpers.block import get_state_and_beacon_parent_root_at_slot
|
||||||
|
from eth2spec.test.helpers.state import transition_to
|
||||||
|
from eth2spec.test.helpers.keys import privkeys
|
||||||
|
from eth2spec.utils import bls
|
||||||
|
from eth2spec.utils.bls import only_with_bls
|
||||||
|
|
||||||
|
|
||||||
|
@only_with_bls()
|
||||||
|
def sign_shard_block(spec, beacon_state, shard, block, proposer_index=None):
|
||||||
|
slot = block.message.slot
|
||||||
|
if proposer_index is None:
|
||||||
|
proposer_index = spec.get_shard_proposer_index(beacon_state, slot, shard)
|
||||||
|
|
||||||
|
privkey = privkeys[proposer_index]
|
||||||
|
domain = spec.get_domain(beacon_state, spec.DOMAIN_SHARD_PROPOSAL, spec.compute_epoch_at_slot(slot))
|
||||||
|
signing_root = spec.compute_signing_root(block.message, domain)
|
||||||
|
block.signature = bls.Sign(privkey, signing_root)
|
||||||
|
|
||||||
|
|
||||||
|
def build_shard_block(spec,
|
||||||
|
beacon_state,
|
||||||
|
shard,
|
||||||
|
slot=None,
|
||||||
|
body=None,
|
||||||
|
signed=False):
|
||||||
|
shard_state = beacon_state.shard_states[shard]
|
||||||
|
if slot is None:
|
||||||
|
slot = shard_state.slot + 1
|
||||||
|
|
||||||
|
if body is None:
|
||||||
|
body = b'\x56' * 128
|
||||||
|
|
||||||
|
proposer_index = spec.get_shard_proposer_index(beacon_state, slot, shard)
|
||||||
|
beacon_state, beacon_parent_root = get_state_and_beacon_parent_root_at_slot(spec, beacon_state, slot)
|
||||||
|
|
||||||
|
block = spec.ShardBlock(
|
||||||
|
shard_parent_root=shard_state.latest_block_root,
|
||||||
|
beacon_parent_root=beacon_parent_root,
|
||||||
|
slot=slot,
|
||||||
|
shard=shard,
|
||||||
|
proposer_index=proposer_index,
|
||||||
|
body=body,
|
||||||
|
)
|
||||||
|
signed_block = spec.SignedShardBlock(
|
||||||
|
message=block,
|
||||||
|
)
|
||||||
|
|
||||||
|
if signed:
|
||||||
|
sign_shard_block(spec, beacon_state, shard, signed_block, proposer_index=proposer_index)
|
||||||
|
|
||||||
|
return signed_block
|
||||||
|
|
||||||
|
|
||||||
|
def build_shard_transitions_till_slot(spec, state, shard_blocks, on_time_slot):
|
||||||
|
temp_state = state.copy()
|
||||||
|
transition_to(spec, temp_state, on_time_slot)
|
||||||
|
shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
|
||||||
|
for shard, blocks in shard_blocks.items():
|
||||||
|
offset_slots = spec.get_offset_slots(temp_state, shard)
|
||||||
|
len_offset_slots = len(offset_slots)
|
||||||
|
assert len_offset_slots == on_time_slot - state.shard_states[shard].slot - 1
|
||||||
|
shard_transition = spec.get_shard_transition(temp_state, shard, blocks)
|
||||||
|
if len(blocks) > 0:
|
||||||
|
shard_block_root = blocks[-1].message.hash_tree_root()
|
||||||
|
assert shard_transition.shard_states[len_offset_slots - 1].latest_block_root == shard_block_root
|
||||||
|
assert shard_transition.shard_states[len_offset_slots - 1].slot == offset_slots[-1]
|
||||||
|
shard_transitions[shard] = shard_transition
|
||||||
|
|
||||||
|
return shard_transitions
|
||||||
|
|
||||||
|
|
||||||
|
def build_attestation_with_shard_transition(spec, state, index, on_time_slot, shard_transition=None):
|
||||||
|
temp_state = state.copy()
|
||||||
|
transition_to(spec, temp_state, on_time_slot - 1)
|
||||||
|
attestation = get_valid_on_time_attestation(
|
||||||
|
spec,
|
||||||
|
temp_state,
|
||||||
|
index=index,
|
||||||
|
shard_transition=shard_transition,
|
||||||
|
signed=True,
|
||||||
|
)
|
||||||
|
assert attestation.data.slot == temp_state.slot
|
||||||
|
if shard_transition is not None:
|
||||||
|
assert attestation.data.shard_transition_root == shard_transition.hash_tree_root()
|
||||||
|
return attestation
|
|
@ -0,0 +1,28 @@
|
||||||
|
from eth2spec.test.context import expect_assertion_error
|
||||||
|
|
||||||
|
|
||||||
|
def run_shard_transitions_processing(spec, state, shard_transitions, attestations, valid=True):
|
||||||
|
"""
|
||||||
|
Run ``process_shard_transitions``, yielding:
|
||||||
|
- pre-state ('pre')
|
||||||
|
- shard_transitions ('shard_transitions')
|
||||||
|
- attestations ('attestations')
|
||||||
|
- post-state ('post').
|
||||||
|
If ``valid == False``, run expecting ``AssertionError``
|
||||||
|
"""
|
||||||
|
# yield pre-state
|
||||||
|
yield 'pre', state
|
||||||
|
yield 'shard_transitions', shard_transitions
|
||||||
|
yield 'attestations', attestations
|
||||||
|
|
||||||
|
# If the attestation is invalid, processing is aborted, and there is no post-state.
|
||||||
|
if not valid:
|
||||||
|
expect_assertion_error(lambda: spec.process_shard_transitions(state, shard_transitions, attestations))
|
||||||
|
yield 'post', None
|
||||||
|
return
|
||||||
|
|
||||||
|
# process crosslinks
|
||||||
|
spec.process_shard_transitions(state, shard_transitions, attestations)
|
||||||
|
|
||||||
|
# yield post-state
|
||||||
|
yield 'post', state
|
|
@ -1,5 +1,5 @@
|
||||||
from eth2spec.test.context import expect_assertion_error
|
from eth2spec.test.context import expect_assertion_error
|
||||||
from eth2spec.test.helpers.block import sign_block, transition_unsigned_block
|
from eth2spec.test.helpers.block import apply_empty_block, sign_block, transition_unsigned_block
|
||||||
|
|
||||||
|
|
||||||
def get_balance(state, index):
|
def get_balance(state, index):
|
||||||
|
@ -17,7 +17,8 @@ def next_slots(spec, state, slots):
|
||||||
"""
|
"""
|
||||||
Transition given slots forward.
|
Transition given slots forward.
|
||||||
"""
|
"""
|
||||||
spec.process_slots(state, state.slot + slots)
|
if slots > 0:
|
||||||
|
spec.process_slots(state, state.slot + slots)
|
||||||
|
|
||||||
|
|
||||||
def transition_to(spec, state, slot):
|
def transition_to(spec, state, slot):
|
||||||
|
@ -30,12 +31,39 @@ def transition_to(spec, state, slot):
|
||||||
assert state.slot == slot
|
assert state.slot == slot
|
||||||
|
|
||||||
|
|
||||||
|
def transition_to_slot_via_block(spec, state, slot):
|
||||||
|
"""
|
||||||
|
Transition to ``slot`` via an empty block transition
|
||||||
|
"""
|
||||||
|
assert state.slot < slot
|
||||||
|
apply_empty_block(spec, state, slot)
|
||||||
|
assert state.slot == slot
|
||||||
|
|
||||||
|
|
||||||
|
def transition_to_valid_shard_slot(spec, state):
|
||||||
|
"""
|
||||||
|
Transition to slot `spec.PHASE_1_GENESIS_SLOT + 1` and fork at `spec.PHASE_1_GENESIS_SLOT`.
|
||||||
|
"""
|
||||||
|
transition_to(spec, state, spec.PHASE_1_GENESIS_SLOT)
|
||||||
|
state = spec.upgrade_to_phase1(state) # `upgrade_to_phase1` is a pure function
|
||||||
|
next_slot(spec, state)
|
||||||
|
return state
|
||||||
|
|
||||||
|
|
||||||
def next_epoch(spec, state):
|
def next_epoch(spec, state):
|
||||||
"""
|
"""
|
||||||
Transition to the start slot of the next epoch
|
Transition to the start slot of the next epoch
|
||||||
"""
|
"""
|
||||||
slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
|
slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
|
||||||
spec.process_slots(state, slot)
|
if slot > state.slot:
|
||||||
|
spec.process_slots(state, slot)
|
||||||
|
|
||||||
|
|
||||||
|
def next_epoch_via_block(spec, state):
|
||||||
|
"""
|
||||||
|
Transition to the start slot of the next epoch via a full block transition
|
||||||
|
"""
|
||||||
|
apply_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
|
|
||||||
def get_state_root(spec, state, slot) -> bytes:
|
def get_state_root(spec, state, slot) -> bytes:
|
||||||
|
|
|
@ -13,12 +13,10 @@ from eth2spec.test.helpers.attestations import (
|
||||||
sign_attestation,
|
sign_attestation,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.state import (
|
from eth2spec.test.helpers.state import (
|
||||||
next_slot,
|
|
||||||
next_slots,
|
next_slots,
|
||||||
next_epoch,
|
next_epoch_via_block,
|
||||||
transition_to,
|
transition_to_slot_via_block,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.block import apply_empty_block
|
|
||||||
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
||||||
|
|
||||||
|
|
||||||
|
@ -47,9 +45,7 @@ def test_success_multi_proposer_index_iterations(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_success_previous_epoch(spec, state):
|
def test_success_previous_epoch(spec, state):
|
||||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
next_epoch_via_block(spec, state)
|
||||||
next_epoch(spec, state)
|
|
||||||
apply_empty_block(spec, state)
|
|
||||||
|
|
||||||
yield from run_attestation_processing(spec, state, attestation)
|
yield from run_attestation_processing(spec, state, attestation)
|
||||||
|
|
||||||
|
@ -64,6 +60,29 @@ def test_invalid_attestation_signature(spec, state):
|
||||||
yield from run_attestation_processing(spec, state, attestation, False)
|
yield from run_attestation_processing(spec, state, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_empty_participants_zeroes_sig(spec, state):
|
||||||
|
attestation = get_valid_attestation(spec, state, filter_participant_set=lambda comm: []) # 0 participants
|
||||||
|
attestation.signature = spec.BLSSignature(b'\x00' * 96)
|
||||||
|
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||||
|
|
||||||
|
yield from run_attestation_processing(spec, state, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_empty_participants_seemingly_valid_sig(spec, state):
|
||||||
|
attestation = get_valid_attestation(spec, state, filter_participant_set=lambda comm: []) # 0 participants
|
||||||
|
# Special BLS value, valid for zero pubkeys on some (but not all) BLS implementations.
|
||||||
|
attestation.signature = spec.BLSSignature(b'\xc0' + b'\x00' * 95)
|
||||||
|
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||||
|
|
||||||
|
yield from run_attestation_processing(spec, state, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_before_inclusion_delay(spec, state):
|
def test_before_inclusion_delay(spec, state):
|
||||||
|
@ -79,8 +98,7 @@ def test_after_epoch_slots(spec, state):
|
||||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||||
|
|
||||||
# increment past latest inclusion slot
|
# increment past latest inclusion slot
|
||||||
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH + 1)
|
transition_to_slot_via_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH + 1)
|
||||||
apply_empty_block(spec, state)
|
|
||||||
|
|
||||||
yield from run_attestation_processing(spec, state, attestation, False)
|
yield from run_attestation_processing(spec, state, attestation, False)
|
||||||
|
|
||||||
|
@ -151,8 +169,8 @@ def test_invalid_index(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_mismatched_target_and_slot(spec, state):
|
def test_mismatched_target_and_slot(spec, state):
|
||||||
next_epoch(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
next_epoch(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
|
|
||||||
attestation = get_valid_attestation(spec, state, on_time=False)
|
attestation = get_valid_attestation(spec, state, on_time=False)
|
||||||
attestation.data.slot = attestation.data.slot - spec.SLOTS_PER_EPOCH
|
attestation.data.slot = attestation.data.slot - spec.SLOTS_PER_EPOCH
|
||||||
|
@ -260,19 +278,6 @@ def test_bad_source_root(spec, state):
|
||||||
yield from run_attestation_processing(spec, state, attestation, False)
|
yield from run_attestation_processing(spec, state, attestation, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_empty_aggregation_bits(spec, state):
|
|
||||||
next_slot(spec, state)
|
|
||||||
attestation = get_valid_attestation(spec, state, empty=True)
|
|
||||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
|
||||||
|
|
||||||
assert attestation.aggregation_bits == Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](
|
|
||||||
*([0b0] * len(attestation.aggregation_bits)))
|
|
||||||
|
|
||||||
yield from run_attestation_processing(spec, state, attestation)
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_too_many_aggregation_bits(spec, state):
|
def test_too_many_aggregation_bits(spec, state):
|
||||||
|
|
|
@ -1,11 +1,13 @@
|
||||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases
|
from eth2spec.test.context import (
|
||||||
|
PHASE0, PHASE1,
|
||||||
|
spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases
|
||||||
|
)
|
||||||
from eth2spec.test.helpers.attestations import sign_indexed_attestation
|
from eth2spec.test.helpers.attestations import sign_indexed_attestation
|
||||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, \
|
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, \
|
||||||
get_indexed_attestation_participants, get_attestation_2_data, get_attestation_1_data
|
get_indexed_attestation_participants, get_attestation_2_data, get_attestation_1_data
|
||||||
from eth2spec.test.helpers.block import apply_empty_block
|
|
||||||
from eth2spec.test.helpers.state import (
|
from eth2spec.test.helpers.state import (
|
||||||
get_balance,
|
get_balance,
|
||||||
next_epoch,
|
next_epoch_via_block,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -88,8 +90,7 @@ def test_success_double(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_success_surround(spec, state):
|
def test_success_surround(spec, state):
|
||||||
next_epoch(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
apply_empty_block(spec, state)
|
|
||||||
|
|
||||||
state.current_justified_checkpoint.epoch += 1
|
state.current_justified_checkpoint.epoch += 1
|
||||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||||
|
@ -161,7 +162,7 @@ def test_same_data(spec, state):
|
||||||
|
|
||||||
indexed_att_1 = attester_slashing.attestation_1
|
indexed_att_1 = attester_slashing.attestation_1
|
||||||
att_2_data = get_attestation_2_data(spec, attester_slashing)
|
att_2_data = get_attestation_2_data(spec, attester_slashing)
|
||||||
if spec.fork == 'phase1':
|
if spec.fork == PHASE1:
|
||||||
indexed_att_1.attestation.data = att_2_data
|
indexed_att_1.attestation.data = att_2_data
|
||||||
else:
|
else:
|
||||||
indexed_att_1.data = att_2_data
|
indexed_att_1.data = att_2_data
|
||||||
|
@ -199,7 +200,7 @@ def test_participants_already_slashed(spec, state):
|
||||||
# Some of the following tests are phase0 only: phase 1 lists participants with bitfields instead of index list.
|
# Some of the following tests are phase0 only: phase 1 lists participants with bitfields instead of index list.
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_att1_bad_extra_index(spec, state):
|
def test_att1_bad_extra_index(spec, state):
|
||||||
|
@ -215,7 +216,7 @@ def test_att1_bad_extra_index(spec, state):
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_att1_bad_replaced_index(spec, state):
|
def test_att1_bad_replaced_index(spec, state):
|
||||||
|
@ -231,7 +232,7 @@ def test_att1_bad_replaced_index(spec, state):
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_att2_bad_extra_index(spec, state):
|
def test_att2_bad_extra_index(spec, state):
|
||||||
|
@ -247,7 +248,7 @@ def test_att2_bad_extra_index(spec, state):
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_att2_bad_replaced_index(spec, state):
|
def test_att2_bad_replaced_index(spec, state):
|
||||||
|
@ -263,7 +264,7 @@ def test_att2_bad_replaced_index(spec, state):
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_att1_duplicate_index_normal_signed(spec, state):
|
def test_att1_duplicate_index_normal_signed(spec, state):
|
||||||
|
@ -283,7 +284,7 @@ def test_att1_duplicate_index_normal_signed(spec, state):
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_att2_duplicate_index_normal_signed(spec, state):
|
def test_att2_duplicate_index_normal_signed(spec, state):
|
||||||
|
@ -303,7 +304,7 @@ def test_att2_duplicate_index_normal_signed(spec, state):
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_att1_duplicate_index_double_signed(spec, state):
|
def test_att1_duplicate_index_double_signed(spec, state):
|
||||||
|
@ -318,7 +319,7 @@ def test_att1_duplicate_index_double_signed(spec, state):
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_att2_duplicate_index_double_signed(spec, state):
|
def test_att2_duplicate_index_double_signed(spec, state):
|
||||||
|
@ -333,7 +334,7 @@ def test_att2_duplicate_index_double_signed(spec, state):
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_unsorted_att_1(spec, state):
|
def test_unsorted_att_1(spec, state):
|
||||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||||
|
@ -346,7 +347,7 @@ def test_unsorted_att_1(spec, state):
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_unsorted_att_2(spec, state):
|
def test_unsorted_att_2(spec, state):
|
||||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||||
|
|
|
@ -9,7 +9,7 @@ def prepare_state_for_header_processing(spec, state):
|
||||||
spec.process_slots(state, state.slot + 1)
|
spec.process_slots(state, state.slot + 1)
|
||||||
|
|
||||||
|
|
||||||
def run_block_header_processing(spec, state, block, valid=True):
|
def run_block_header_processing(spec, state, block, prepare_state=True, valid=True):
|
||||||
"""
|
"""
|
||||||
Run ``process_block_header``, yielding:
|
Run ``process_block_header``, yielding:
|
||||||
- pre-state ('pre')
|
- pre-state ('pre')
|
||||||
|
@ -17,7 +17,8 @@ def run_block_header_processing(spec, state, block, valid=True):
|
||||||
- post-state ('post').
|
- post-state ('post').
|
||||||
If ``valid == False``, run expecting ``AssertionError``
|
If ``valid == False``, run expecting ``AssertionError``
|
||||||
"""
|
"""
|
||||||
prepare_state_for_header_processing(spec, state)
|
if prepare_state:
|
||||||
|
prepare_state_for_header_processing(spec, state)
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
yield 'block', block
|
yield 'block', block
|
||||||
|
@ -68,6 +69,22 @@ def test_invalid_parent_root(spec, state):
|
||||||
yield from run_block_header_processing(spec, state, block, valid=False)
|
yield from run_block_header_processing(spec, state, block, valid=False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_invalid_multiple_blocks_single_slot(spec, state):
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
|
||||||
|
prepare_state_for_header_processing(spec, state)
|
||||||
|
spec.process_block_header(state, block)
|
||||||
|
|
||||||
|
assert state.latest_block_header.slot == state.slot
|
||||||
|
|
||||||
|
child_block = block.copy()
|
||||||
|
child_block.parent_root = block.hash_tree_root()
|
||||||
|
|
||||||
|
yield from run_block_header_processing(spec, state, child_block, prepare_state=False, valid=False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_proposer_slashed(spec, state):
|
def test_proposer_slashed(spec, state):
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases
|
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases
|
||||||
|
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||||
from eth2spec.test.helpers.block_header import sign_block_header
|
from eth2spec.test.helpers.block_header import sign_block_header
|
||||||
from eth2spec.test.helpers.keys import privkeys
|
from eth2spec.test.helpers.keys import privkeys
|
||||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
|
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect
|
||||||
from eth2spec.test.helpers.state import get_balance, next_epoch
|
from eth2spec.test.helpers.state import next_epoch
|
||||||
|
|
||||||
|
|
||||||
def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True):
|
def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True):
|
||||||
|
@ -14,6 +15,8 @@ def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True)
|
||||||
If ``valid == False``, run expecting ``AssertionError``
|
If ``valid == False``, run expecting ``AssertionError``
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
pre_state = state.copy()
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
yield 'proposer_slashing', proposer_slashing
|
yield 'proposer_slashing', proposer_slashing
|
||||||
|
|
||||||
|
@ -22,25 +25,31 @@ def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True)
|
||||||
yield 'post', None
|
yield 'post', None
|
||||||
return
|
return
|
||||||
|
|
||||||
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
|
|
||||||
pre_proposer_balance = get_balance(state, proposer_index)
|
|
||||||
|
|
||||||
spec.process_proposer_slashing(state, proposer_slashing)
|
spec.process_proposer_slashing(state, proposer_slashing)
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
# check if slashed
|
slashed_proposer_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||||
slashed_validator = state.validators[proposer_index]
|
check_proposer_slashing_effect(spec, pre_state, state, slashed_proposer_index)
|
||||||
assert slashed_validator.slashed
|
|
||||||
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
|
|
||||||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
|
||||||
|
|
||||||
# lost whistleblower reward
|
|
||||||
assert get_balance(state, proposer_index) < pre_proposer_balance
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_success(spec, state):
|
def test_success(spec, state):
|
||||||
|
# Get proposer for next slot
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
proposer_index = block.proposer_index
|
||||||
|
|
||||||
|
# Create slashing for same proposer
|
||||||
|
proposer_slashing = get_valid_proposer_slashing(spec, state,
|
||||||
|
slashed_index=proposer_index,
|
||||||
|
signed_1=True, signed_2=True)
|
||||||
|
|
||||||
|
yield from run_proposer_slashing_processing(spec, state, proposer_slashing)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_success_slashed_and_proposer_index_the_same(spec, state):
|
||||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||||
|
|
||||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing)
|
yield from run_proposer_slashing_processing(spec, state, proposer_slashing)
|
||||||
|
|
|
@ -34,8 +34,8 @@ def run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=True
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_success(spec, state):
|
def test_success(spec, state):
|
||||||
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
@ -53,8 +53,8 @@ def test_success(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_invalid_signature(spec, state):
|
def test_invalid_signature(spec, state):
|
||||||
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
@ -71,8 +71,8 @@ def test_invalid_signature(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_success_exit_queue(spec, state):
|
def test_success_exit_queue(spec, state):
|
||||||
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
|
||||||
|
@ -115,8 +115,8 @@ def test_success_exit_queue(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_default_exit_epoch_subsequent_exit(spec, state):
|
def test_default_exit_epoch_subsequent_exit(spec, state):
|
||||||
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
@ -137,8 +137,8 @@ def test_default_exit_epoch_subsequent_exit(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_validator_exit_in_future(spec, state):
|
def test_validator_exit_in_future(spec, state):
|
||||||
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
@ -156,8 +156,8 @@ def test_validator_exit_in_future(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_validator_invalid_validator_index(spec, state):
|
def test_validator_invalid_validator_index(spec, state):
|
||||||
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
@ -190,8 +190,8 @@ def test_validator_not_active(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_validator_already_exited(spec, state):
|
def test_validator_already_exited(spec, state):
|
||||||
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow validator able to exit
|
||||||
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
@ -218,7 +218,7 @@ def test_validator_not_active_long_enough(spec, state):
|
||||||
|
|
||||||
assert (
|
assert (
|
||||||
current_epoch - state.validators[validator_index].activation_epoch <
|
current_epoch - state.validators[validator_index].activation_epoch <
|
||||||
spec.PERSISTENT_COMMITTEE_PERIOD
|
spec.SHARD_COMMITTEE_PERIOD
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
||||||
|
|
|
@ -18,7 +18,8 @@ def run_epoch_processing_to(spec, state, process_name: str):
|
||||||
slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH)
|
slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
# transition state to slot before epoch state transition
|
# transition state to slot before epoch state transition
|
||||||
spec.process_slots(state, slot - 1)
|
if state.slot < slot - 1:
|
||||||
|
spec.process_slots(state, slot - 1)
|
||||||
|
|
||||||
# start transitioning, do one slot update before the epoch itself.
|
# start transitioning, do one slot update before the epoch itself.
|
||||||
spec.process_slot(state)
|
spec.process_slot(state)
|
||||||
|
|
|
@ -24,7 +24,7 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
||||||
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||||
|
|
||||||
total_balance = spec.get_total_active_balance(state)
|
total_balance = spec.get_total_active_balance(state)
|
||||||
remaining_balance = total_balance * 2 // 3
|
remaining_balance = int(total_balance * 2 // 3) # can become negative
|
||||||
|
|
||||||
start_slot = spec.compute_start_slot_at_epoch(epoch)
|
start_slot = spec.compute_start_slot_at_epoch(epoch)
|
||||||
for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH):
|
for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH):
|
||||||
|
@ -42,7 +42,7 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
||||||
aggregation_bits = [0] * len(committee)
|
aggregation_bits = [0] * len(committee)
|
||||||
for v in range(len(committee) * 2 // 3 + 1):
|
for v in range(len(committee) * 2 // 3 + 1):
|
||||||
if remaining_balance > 0:
|
if remaining_balance > 0:
|
||||||
remaining_balance -= state.validators[v].effective_balance
|
remaining_balance -= int(state.validators[v].effective_balance)
|
||||||
aggregation_bits[v] = 1
|
aggregation_bits[v] = 1
|
||||||
else:
|
else:
|
||||||
break
|
break
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
from eth2spec.test.helpers.deposits import mock_deposit
|
||||||
from eth2spec.test.helpers.state import next_epoch, next_slots
|
from eth2spec.test.helpers.state import next_epoch, next_slots
|
||||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||||
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||||
|
@ -7,14 +8,6 @@ def run_process_registry_updates(spec, state):
|
||||||
yield from run_epoch_processing_with(spec, state, 'process_registry_updates')
|
yield from run_epoch_processing_with(spec, state, 'process_registry_updates')
|
||||||
|
|
||||||
|
|
||||||
def mock_deposit(spec, state, index):
|
|
||||||
assert spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
|
|
||||||
state.validators[index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
|
|
||||||
state.validators[index].activation_epoch = spec.FAR_FUTURE_EPOCH
|
|
||||||
state.validators[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
|
|
||||||
assert not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state))
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_add_to_activation_queue(spec, state):
|
def test_add_to_activation_queue(spec, state):
|
||||||
|
|
|
@ -12,39 +12,18 @@ from eth2spec.test.helpers.state import (
|
||||||
from eth2spec.test.helpers.attestations import (
|
from eth2spec.test.helpers.attestations import (
|
||||||
add_attestations_to_state,
|
add_attestations_to_state,
|
||||||
get_valid_attestation,
|
get_valid_attestation,
|
||||||
|
prepare_state_with_attestations,
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.rewards import leaking
|
||||||
from eth2spec.test.helpers.attester_slashings import get_indexed_attestation_participants
|
from eth2spec.test.helpers.attester_slashings import get_indexed_attestation_participants
|
||||||
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||||
|
from random import Random
|
||||||
|
|
||||||
|
|
||||||
def run_process_rewards_and_penalties(spec, state):
|
def run_process_rewards_and_penalties(spec, state):
|
||||||
yield from run_epoch_processing_with(spec, state, 'process_rewards_and_penalties')
|
yield from run_epoch_processing_with(spec, state, 'process_rewards_and_penalties')
|
||||||
|
|
||||||
|
|
||||||
def prepare_state_with_full_attestations(spec, state, empty=False):
|
|
||||||
start_slot = state.slot
|
|
||||||
start_epoch = spec.get_current_epoch(state)
|
|
||||||
next_start_epoch = spec.compute_start_slot_at_epoch(start_epoch + 1)
|
|
||||||
attestations = []
|
|
||||||
for slot in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY):
|
|
||||||
# create an attestation for each index in each slot in epoch
|
|
||||||
if state.slot < next_start_epoch:
|
|
||||||
for committee_index in range(spec.get_committee_count_at_slot(state, state.slot)):
|
|
||||||
attestation = get_valid_attestation(spec, state, index=committee_index, empty=empty, signed=True)
|
|
||||||
attestations.append(attestation)
|
|
||||||
# fill each created slot in state after inclusion delay
|
|
||||||
if state.slot >= start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY:
|
|
||||||
inclusion_slot = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY
|
|
||||||
include_attestations = [att for att in attestations if att.data.slot == inclusion_slot]
|
|
||||||
add_attestations_to_state(spec, state, include_attestations, state.slot)
|
|
||||||
next_slot(spec, state)
|
|
||||||
|
|
||||||
assert spec.compute_epoch_at_slot(state.slot) == start_epoch + 1
|
|
||||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
|
||||||
|
|
||||||
return attestations
|
|
||||||
|
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases(['phase0'])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_genesis_epoch_no_attestations_no_penalties(spec, state):
|
def test_genesis_epoch_no_attestations_no_penalties(spec, state):
|
||||||
|
@ -84,30 +63,10 @@ def test_genesis_epoch_full_attestations_no_rewards(spec, state):
|
||||||
assert state.balances[index] == pre_state.balances[index]
|
assert state.balances[index] == pre_state.balances[index]
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_full_attestations(spec, state):
|
|
||||||
# Go to start of next epoch to ensure can have full participation
|
|
||||||
next_epoch(spec, state)
|
|
||||||
attestations = prepare_state_with_full_attestations(spec, state)
|
|
||||||
|
|
||||||
pre_state = state.copy()
|
|
||||||
|
|
||||||
yield from run_process_rewards_and_penalties(spec, state)
|
|
||||||
|
|
||||||
attesting_indices = spec.get_unslashed_attesting_indices(state, attestations)
|
|
||||||
assert len(attesting_indices) == len(pre_state.validators)
|
|
||||||
for index in range(len(pre_state.validators)):
|
|
||||||
if index in attesting_indices:
|
|
||||||
assert state.balances[index] > pre_state.balances[index]
|
|
||||||
else:
|
|
||||||
assert state.balances[index] < pre_state.balances[index]
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_full_attestations_random_incorrect_fields(spec, state):
|
def test_full_attestations_random_incorrect_fields(spec, state):
|
||||||
attestations = prepare_state_with_full_attestations(spec, state)
|
attestations = prepare_state_with_attestations(spec, state)
|
||||||
for i, attestation in enumerate(state.previous_epoch_attestations):
|
for i, attestation in enumerate(state.previous_epoch_attestations):
|
||||||
if i % 3 == 0:
|
if i % 3 == 0:
|
||||||
# Mess up some head votes
|
# Mess up some head votes
|
||||||
|
@ -132,9 +91,7 @@ def test_full_attestations_random_incorrect_fields(spec, state):
|
||||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.MAX_EFFECTIVE_BALANCE // 2)
|
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.MAX_EFFECTIVE_BALANCE // 2)
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_full_attestations_misc_balances(spec, state):
|
def test_full_attestations_misc_balances(spec, state):
|
||||||
# Go to start of next epoch to ensure can have full participation
|
attestations = prepare_state_with_attestations(spec, state)
|
||||||
next_epoch(spec, state)
|
|
||||||
attestations = prepare_state_with_full_attestations(spec, state)
|
|
||||||
|
|
||||||
pre_state = state.copy()
|
pre_state = state.copy()
|
||||||
|
|
||||||
|
@ -166,7 +123,7 @@ def test_full_attestations_misc_balances(spec, state):
|
||||||
@with_custom_state(balances_fn=low_single_balance, threshold_fn=zero_activation_threshold)
|
@with_custom_state(balances_fn=low_single_balance, threshold_fn=zero_activation_threshold)
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_full_attestations_one_validaor_one_gwei(spec, state):
|
def test_full_attestations_one_validaor_one_gwei(spec, state):
|
||||||
attestations = prepare_state_with_full_attestations(spec, state)
|
attestations = prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
yield from run_process_rewards_and_penalties(spec, state)
|
yield from run_process_rewards_and_penalties(spec, state)
|
||||||
|
|
||||||
|
@ -178,6 +135,7 @@ def test_full_attestations_one_validaor_one_gwei(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_no_attestations_all_penalties(spec, state):
|
def test_no_attestations_all_penalties(spec, state):
|
||||||
|
# Move to next epoch to ensure rewards/penalties are processed
|
||||||
next_epoch(spec, state)
|
next_epoch(spec, state)
|
||||||
pre_state = state.copy()
|
pre_state = state.copy()
|
||||||
|
|
||||||
|
@ -189,20 +147,97 @@ def test_no_attestations_all_penalties(spec, state):
|
||||||
assert state.balances[index] < pre_state.balances[index]
|
assert state.balances[index] < pre_state.balances[index]
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
def run_with_participation(spec, state, participation_fn):
|
||||||
@spec_state_test
|
participated = set()
|
||||||
def test_empty_attestations(spec, state):
|
|
||||||
attestations = prepare_state_with_full_attestations(spec, state, empty=True)
|
def participation_tracker(slot, comm_index, comm):
|
||||||
|
att_participants = participation_fn(slot, comm_index, comm)
|
||||||
|
participated.update(att_participants)
|
||||||
|
return att_participants
|
||||||
|
|
||||||
|
attestations = prepare_state_with_attestations(spec, state, participation_fn=participation_tracker)
|
||||||
|
proposer_indices = [a.proposer_index for a in state.previous_epoch_attestations]
|
||||||
|
|
||||||
pre_state = state.copy()
|
pre_state = state.copy()
|
||||||
|
|
||||||
yield from run_process_rewards_and_penalties(spec, state)
|
yield from run_process_rewards_and_penalties(spec, state)
|
||||||
|
|
||||||
attesting_indices = spec.get_unslashed_attesting_indices(state, attestations)
|
attesting_indices = spec.get_unslashed_attesting_indices(state, attestations)
|
||||||
assert len(attesting_indices) == 0
|
assert len(attesting_indices) == len(participated)
|
||||||
|
|
||||||
for index in range(len(pre_state.validators)):
|
for index in range(len(pre_state.validators)):
|
||||||
assert state.balances[index] < pre_state.balances[index]
|
if spec.is_in_inactivity_leak(state):
|
||||||
|
# Proposers can still make money during a leak
|
||||||
|
if index in proposer_indices and index in participated:
|
||||||
|
assert state.balances[index] > pre_state.balances[index]
|
||||||
|
# If not proposer but participated optimally, should have exactly neutral balance
|
||||||
|
elif index in attesting_indices:
|
||||||
|
assert state.balances[index] == pre_state.balances[index]
|
||||||
|
else:
|
||||||
|
assert state.balances[index] < pre_state.balances[index]
|
||||||
|
else:
|
||||||
|
if index in participated:
|
||||||
|
assert state.balances[index] > pre_state.balances[index]
|
||||||
|
else:
|
||||||
|
assert state.balances[index] < pre_state.balances[index]
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_almost_empty_attestations(spec, state):
|
||||||
|
rng = Random(1234)
|
||||||
|
yield from run_with_participation(spec, state, lambda slot, comm_index, comm: rng.sample(comm, 1))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_almost_empty_attestations_with_leak(spec, state):
|
||||||
|
rng = Random(1234)
|
||||||
|
yield from run_with_participation(spec, state, lambda slot, comm_index, comm: rng.sample(comm, 1))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_random_fill_attestations(spec, state):
|
||||||
|
rng = Random(4567)
|
||||||
|
yield from run_with_participation(spec, state, lambda slot, comm_index, comm: rng.sample(comm, len(comm) // 3))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_random_fill_attestations_with_leak(spec, state):
|
||||||
|
rng = Random(4567)
|
||||||
|
yield from run_with_participation(spec, state, lambda slot, comm_index, comm: rng.sample(comm, len(comm) // 3))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_almost_full_attestations(spec, state):
|
||||||
|
rng = Random(8901)
|
||||||
|
yield from run_with_participation(spec, state, lambda slot, comm_index, comm: rng.sample(comm, len(comm) - 1))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_almost_full_attestations_with_leak(spec, state):
|
||||||
|
rng = Random(8901)
|
||||||
|
yield from run_with_participation(spec, state, lambda slot, comm_index, comm: rng.sample(comm, len(comm) - 1))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_attestation_participation(spec, state):
|
||||||
|
yield from run_with_participation(spec, state, lambda slot, comm_index, comm: comm)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_full_attestation_participation_with_leak(spec, state):
|
||||||
|
yield from run_with_participation(spec, state, lambda slot, comm_index, comm: comm)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -247,14 +282,13 @@ def test_duplicate_attestation(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
# Case when some eligible attestations are slashed. Modifies attesting_balance and consequently rewards/penalties.
|
# Case when some eligible attestations are slashed. Modifies attesting_balance and consequently rewards/penalties.
|
||||||
def test_attestations_some_slashed(spec, state):
|
def test_attestations_some_slashed(spec, state):
|
||||||
attestations = prepare_state_with_full_attestations(spec, state)
|
attestations = prepare_state_with_attestations(spec, state)
|
||||||
attesting_indices_before_slashings = list(spec.get_unslashed_attesting_indices(state, attestations))
|
attesting_indices_before_slashings = list(spec.get_unslashed_attesting_indices(state, attestations))
|
||||||
|
|
||||||
# Slash maximum amount of validators allowed per epoch.
|
# Slash maximum amount of validators allowed per epoch.
|
||||||
for i in range(spec.MIN_PER_EPOCH_CHURN_LIMIT):
|
for i in range(spec.MIN_PER_EPOCH_CHURN_LIMIT):
|
||||||
spec.slash_validator(state, attesting_indices_before_slashings[i])
|
spec.slash_validator(state, attesting_indices_before_slashings[i])
|
||||||
|
|
||||||
assert spec.compute_epoch_at_slot(state.slot) == spec.GENESIS_EPOCH + 1
|
|
||||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||||
|
|
||||||
pre_state = state.copy()
|
pre_state = state.copy()
|
||||||
|
|
|
@ -0,0 +1,155 @@
|
||||||
|
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||||
|
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_empty(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_empty(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_all_correct(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_all_correct(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_half_full(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_half_full(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_quarter_full(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_partial(spec, state, 0.25)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_but_partial_participation(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_but_partial_participation(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_one_attestation_one_correct(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_one_attestation_one_correct(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_with_not_yet_activated_validators(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_with_not_yet_activated_validators(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_with_exited_validators(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_with_exited_validators(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_with_slashed_validators(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_with_slashed_validators(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_some_very_low_effective_balances_that_attested(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_some_very_low_effective_balances_that_attested(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_some_very_low_effective_balances_that_did_not_attest(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# NOTE: No source incorrect tests
|
||||||
|
# All PendingAttestations in state have source validated
|
||||||
|
# We choose to keep this invariant in these tests to not force clients to test with degenerate states
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_half_correct_target_incorrect_head(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||||
|
spec, state,
|
||||||
|
correct_target=True,
|
||||||
|
correct_head=False,
|
||||||
|
fraction_incorrect=0.5,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_correct_target_incorrect_head(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||||
|
spec, state,
|
||||||
|
correct_target=True,
|
||||||
|
correct_head=False,
|
||||||
|
fraction_incorrect=1.0,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_half_incorrect_target_incorrect_head(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||||
|
spec, state,
|
||||||
|
correct_target=False,
|
||||||
|
correct_head=False,
|
||||||
|
fraction_incorrect=0.5,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_half_incorrect_target_correct_head(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||||
|
spec, state,
|
||||||
|
correct_target=False,
|
||||||
|
correct_head=True,
|
||||||
|
fraction_incorrect=0.5,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_delay_one_slot(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_delay_one_slot(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_delay_max_slots(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_delay_max_slots(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_mixed_delay(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_mixed_delay(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_proposer_not_in_attestations(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_proposer_not_in_attestations(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_duplicate_attestations_at_later_slots(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_duplicate_attestations_at_later_slots(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_all_balances_too_low_for_reward(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_all_balances_too_low_for_reward(spec, state)
|
|
@ -0,0 +1,156 @@
|
||||||
|
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||||
|
from eth2spec.test.helpers.rewards import leaking
|
||||||
|
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_empty_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_empty(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_full_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_all_correct(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_half_full_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_half_full(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_quarter_full_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_partial(spec, state, 0.25)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_full_but_partial_participation_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_but_partial_participation(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_one_attestation_one_correct_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_one_attestation_one_correct(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_with_not_yet_activated_validators_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_with_not_yet_activated_validators(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_with_exited_validators_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_with_exited_validators(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_with_slashed_validators_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_with_slashed_validators(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_some_very_low_effective_balances_that_attested_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_some_very_low_effective_balances_that_attested(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_some_very_low_effective_balances_that_did_not_attest_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_some_very_low_effective_balances_that_did_not_attest(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# NOTE: No source incorrect tests
|
||||||
|
# All PendingAttestations in state have source validated
|
||||||
|
# We choose to keep this invariant in these tests to not force clients to test with degenerate states
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_full_half_correct_target_incorrect_head_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||||
|
spec, state,
|
||||||
|
correct_target=True,
|
||||||
|
correct_head=False,
|
||||||
|
fraction_incorrect=0.5,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_full_correct_target_incorrect_head_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||||
|
spec, state,
|
||||||
|
correct_target=True,
|
||||||
|
correct_head=False,
|
||||||
|
fraction_incorrect=1.0,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_full_half_incorrect_target_incorrect_head_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||||
|
spec, state,
|
||||||
|
correct_target=False,
|
||||||
|
correct_head=False,
|
||||||
|
fraction_incorrect=0.5,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_full_half_incorrect_target_correct_head_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||||
|
spec, state,
|
||||||
|
correct_target=False,
|
||||||
|
correct_head=True,
|
||||||
|
fraction_incorrect=0.5,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_full_random_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_random(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking(epochs=5)
|
||||||
|
def test_full_random_five_epoch_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_random(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@leaking(epochs=10)
|
||||||
|
def test_full_random_ten_epoch_leak(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_random(spec, state)
|
|
@ -0,0 +1,45 @@
|
||||||
|
from random import Random
|
||||||
|
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
with_all_phases,
|
||||||
|
spec_test,
|
||||||
|
spec_state_test,
|
||||||
|
with_custom_state,
|
||||||
|
single_phase,
|
||||||
|
low_balances, misc_balances,
|
||||||
|
)
|
||||||
|
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_random_0(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(1010))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_random_1(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(2020))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_random_2(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(3030))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_full_random_low_balances(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_random(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_full_random_misc_balances(spec, state):
|
||||||
|
yield from rewards_helpers.run_test_full_random(spec, state)
|
|
@ -1,17 +1,28 @@
|
||||||
from copy import deepcopy
|
|
||||||
|
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
|
|
||||||
from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_block, next_slot, next_epoch
|
from eth2spec.test.helpers.state import (
|
||||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block, \
|
get_balance, state_transition_and_sign_block,
|
||||||
transition_unsigned_block
|
next_slot, next_epoch, next_epoch_via_block,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.block import (
|
||||||
|
build_empty_block_for_next_slot, build_empty_block,
|
||||||
|
sign_block,
|
||||||
|
transition_unsigned_block,
|
||||||
|
)
|
||||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, get_indexed_attestation_participants
|
from eth2spec.test.helpers.attester_slashings import (
|
||||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
|
get_valid_attester_slashing_by_indices,
|
||||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
get_valid_attester_slashing,
|
||||||
|
get_indexed_attestation_participants,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect
|
||||||
|
from eth2spec.test.helpers.attestations import get_valid_attestation, fill_block_shard_transitions_by_attestations
|
||||||
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||||
|
|
||||||
from eth2spec.test.context import spec_state_test, with_all_phases, expect_assertion_error, always_bls, with_phases
|
from eth2spec.test.context import (
|
||||||
|
spec_state_test, with_all_phases, expect_assertion_error, always_bls, with_phases,
|
||||||
|
PHASE1
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -45,10 +56,12 @@ def test_same_slot_block_transition(spec, state):
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
assert state.slot == block.slot
|
||||||
|
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||||
|
|
||||||
yield 'blocks', [signed_block]
|
yield 'blocks', [signed_block]
|
||||||
yield 'post', state
|
yield 'post', None
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -71,6 +84,81 @@ def test_empty_block_transition(spec, state):
|
||||||
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Bytes32()
|
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != spec.Bytes32()
|
||||||
|
|
||||||
|
|
||||||
|
def process_and_sign_block_without_header_validations(spec, state, block):
|
||||||
|
"""
|
||||||
|
Artificially bypass the restrictions in the state transition to transition and sign block
|
||||||
|
|
||||||
|
WARNING UNSAFE: Only use when generating valid-looking invalid blocks for test vectors
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Perform single mutation in `process_block_header`
|
||||||
|
state.latest_block_header = spec.BeaconBlockHeader(
|
||||||
|
slot=block.slot,
|
||||||
|
proposer_index=block.proposer_index,
|
||||||
|
parent_root=block.parent_root,
|
||||||
|
state_root=spec.Bytes32(),
|
||||||
|
body_root=block.body.hash_tree_root(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Perform rest of process_block transitions
|
||||||
|
spec.process_randao(state, block.body)
|
||||||
|
spec.process_eth1_data(state, block.body)
|
||||||
|
spec.process_operations(state, block.body)
|
||||||
|
|
||||||
|
# Insert post-state rot
|
||||||
|
block.state_root = state.hash_tree_root()
|
||||||
|
|
||||||
|
# Sign block
|
||||||
|
return sign_block(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spec_state_test
|
||||||
|
def test_proposal_for_genesis_slot(spec, state):
|
||||||
|
assert state.slot == spec.GENESIS_SLOT
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
block = build_empty_block(spec, state, spec.GENESIS_SLOT)
|
||||||
|
block.parent_root = state.latest_block_header.hash_tree_root()
|
||||||
|
|
||||||
|
# Show that normal path through transition fails
|
||||||
|
failed_state = state.copy()
|
||||||
|
expect_assertion_error(
|
||||||
|
lambda: spec.state_transition(failed_state, spec.SignedBeaconBlock(message=block), validate_result=False)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Artificially bypass the restriction in the state transition to transition and sign block for test vectors
|
||||||
|
signed_block = process_and_sign_block_without_header_validations(spec, state, block)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_block]
|
||||||
|
yield 'post', None
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_parent_from_same_slot(spec, state):
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
parent_block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
signed_parent_block = state_transition_and_sign_block(spec, state, parent_block)
|
||||||
|
|
||||||
|
child_block = parent_block.copy()
|
||||||
|
child_block.parent_root = state.latest_block_header.hash_tree_root()
|
||||||
|
|
||||||
|
# Show that normal path through transition fails
|
||||||
|
failed_state = state.copy()
|
||||||
|
expect_assertion_error(
|
||||||
|
lambda: spec.state_transition(failed_state, spec.SignedBeaconBlock(message=child_block), validate_result=False)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Artificially bypass the restriction in the state transition to transition and sign block for test vectors
|
||||||
|
signed_child_block = process_and_sign_block_without_header_validations(spec, state, child_block)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_parent_block, signed_child_block]
|
||||||
|
yield 'post', None
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_invalid_state_root(spec, state):
|
def test_invalid_state_root(spec, state):
|
||||||
|
@ -228,11 +316,11 @@ def test_empty_epoch_transition_not_finalizing(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_proposer_slashing(spec, state):
|
def test_proposer_slashing(spec, state):
|
||||||
# copy for later balance lookups.
|
# copy for later balance lookups.
|
||||||
pre_state = deepcopy(state)
|
pre_state = state.copy()
|
||||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||||
validator_index = proposer_slashing.signed_header_1.message.proposer_index
|
slashed_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||||
|
|
||||||
assert not state.validators[validator_index].slashed
|
assert not state.validators[slashed_index].slashed
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
|
@ -247,25 +335,108 @@ def test_proposer_slashing(spec, state):
|
||||||
yield 'blocks', [signed_block]
|
yield 'blocks', [signed_block]
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
# check if slashed
|
check_proposer_slashing_effect(spec, pre_state, state, slashed_index)
|
||||||
slashed_validator = state.validators[validator_index]
|
|
||||||
assert slashed_validator.slashed
|
|
||||||
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
|
@with_all_phases
|
||||||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
@spec_state_test
|
||||||
# lost whistleblower reward
|
def test_double_same_proposer_slashings_same_block(spec, state):
|
||||||
assert get_balance(state, validator_index) < get_balance(pre_state, validator_index)
|
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||||
|
slashed_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||||
|
assert not state.validators[slashed_index].slashed
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.proposer_slashings = [proposer_slashing, proposer_slashing]
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_block]
|
||||||
|
yield 'post', None
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_double_similar_proposer_slashings_same_block(spec, state):
|
||||||
|
slashed_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||||
|
|
||||||
|
# Same validator, but different slashable offences in the same block
|
||||||
|
proposer_slashing_1 = get_valid_proposer_slashing(spec, state, random_root=b'\xaa' * 32,
|
||||||
|
slashed_index=slashed_index,
|
||||||
|
signed_1=True, signed_2=True)
|
||||||
|
proposer_slashing_2 = get_valid_proposer_slashing(spec, state, random_root=b'\xbb' * 32,
|
||||||
|
slashed_index=slashed_index,
|
||||||
|
signed_1=True, signed_2=True)
|
||||||
|
assert not state.validators[slashed_index].slashed
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.proposer_slashings = [proposer_slashing_1, proposer_slashing_2]
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_block]
|
||||||
|
yield 'post', None
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_multiple_different_proposer_slashings_same_block(spec, state):
|
||||||
|
pre_state = state.copy()
|
||||||
|
|
||||||
|
num_slashings = 3
|
||||||
|
proposer_slashings = []
|
||||||
|
for i in range(num_slashings):
|
||||||
|
slashed_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[i]
|
||||||
|
assert not state.validators[slashed_index].slashed
|
||||||
|
|
||||||
|
proposer_slashing = get_valid_proposer_slashing(spec, state,
|
||||||
|
slashed_index=slashed_index,
|
||||||
|
signed_1=True, signed_2=True)
|
||||||
|
proposer_slashings.append(proposer_slashing)
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
#
|
||||||
|
# Add to state via block transition
|
||||||
|
#
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.proposer_slashings = proposer_slashings
|
||||||
|
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_block]
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
for proposer_slashing in proposer_slashings:
|
||||||
|
slashed_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||||
|
check_proposer_slashing_effect(spec, pre_state, state, slashed_index)
|
||||||
|
|
||||||
|
|
||||||
|
def check_attester_slashing_effect(spec, pre_state, state, slashed_indices):
|
||||||
|
for slashed_index in slashed_indices:
|
||||||
|
slashed_validator = state.validators[slashed_index]
|
||||||
|
assert slashed_validator.slashed
|
||||||
|
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
# lost whistleblower reward
|
||||||
|
assert get_balance(state, slashed_index) < get_balance(pre_state, slashed_index)
|
||||||
|
|
||||||
|
proposer_index = spec.get_beacon_proposer_index(state)
|
||||||
|
# gained whistleblower reward
|
||||||
|
assert get_balance(state, proposer_index) > get_balance(pre_state, proposer_index)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_attester_slashing(spec, state):
|
def test_attester_slashing(spec, state):
|
||||||
# copy for later balance lookups.
|
# copy for later balance lookups.
|
||||||
pre_state = deepcopy(state)
|
pre_state = state.copy()
|
||||||
|
|
||||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||||
validator_index = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)[0]
|
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||||
|
|
||||||
assert not state.validators[validator_index].slashed
|
assert not any(state.validators[i].slashed for i in slashed_indices)
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
|
@ -280,19 +451,118 @@ def test_attester_slashing(spec, state):
|
||||||
yield 'blocks', [signed_block]
|
yield 'blocks', [signed_block]
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
slashed_validator = state.validators[validator_index]
|
check_attester_slashing_effect(spec, pre_state, state, slashed_indices)
|
||||||
assert slashed_validator.slashed
|
|
||||||
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
|
|
||||||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
|
||||||
# lost whistleblower reward
|
|
||||||
assert get_balance(state, validator_index) < get_balance(pre_state, validator_index)
|
|
||||||
|
|
||||||
proposer_index = spec.get_beacon_proposer_index(state)
|
|
||||||
# gained whistleblower reward
|
@with_all_phases
|
||||||
assert (
|
@spec_state_test
|
||||||
get_balance(state, proposer_index) >
|
def test_duplicate_attester_slashing(spec, state):
|
||||||
get_balance(pre_state, proposer_index)
|
# Skip test if config cannot handle multiple AttesterSlashings per block
|
||||||
|
if spec.MAX_ATTESTER_SLASHINGS < 2:
|
||||||
|
return
|
||||||
|
|
||||||
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||||
|
attester_slashings = [attester_slashing, attester_slashing.copy()]
|
||||||
|
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||||
|
|
||||||
|
assert not any(state.validators[i].slashed for i in slashed_indices)
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
#
|
||||||
|
# Add to state via block transition
|
||||||
|
#
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.attester_slashings = attester_slashings
|
||||||
|
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_block]
|
||||||
|
yield 'post', None
|
||||||
|
|
||||||
|
|
||||||
|
# All AttesterSlashing tests should be adopted for Phase 1 but helper support is not yet there
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spec_state_test
|
||||||
|
def test_multiple_attester_slashings_no_overlap(spec, state):
|
||||||
|
# Skip test if config cannot handle multiple AttesterSlashings per block
|
||||||
|
if spec.MAX_ATTESTER_SLASHINGS < 2:
|
||||||
|
return
|
||||||
|
|
||||||
|
# copy for later balance lookups.
|
||||||
|
pre_state = state.copy()
|
||||||
|
|
||||||
|
full_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[:8]
|
||||||
|
half_length = len(full_indices) // 2
|
||||||
|
|
||||||
|
attester_slashing_1 = get_valid_attester_slashing_by_indices(
|
||||||
|
spec, state,
|
||||||
|
full_indices[:half_length], signed_1=True, signed_2=True,
|
||||||
)
|
)
|
||||||
|
attester_slashing_2 = get_valid_attester_slashing_by_indices(
|
||||||
|
spec, state,
|
||||||
|
full_indices[half_length:], signed_1=True, signed_2=True,
|
||||||
|
)
|
||||||
|
attester_slashings = [attester_slashing_1, attester_slashing_2]
|
||||||
|
|
||||||
|
assert not any(state.validators[i].slashed for i in full_indices)
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
#
|
||||||
|
# Add to state via block transition
|
||||||
|
#
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.attester_slashings = attester_slashings
|
||||||
|
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_block]
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
check_attester_slashing_effect(spec, pre_state, state, full_indices)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spec_state_test
|
||||||
|
def test_multiple_attester_slashings_partial_overlap(spec, state):
|
||||||
|
# Skip test if config cannot handle multiple AttesterSlashings per block
|
||||||
|
if spec.MAX_ATTESTER_SLASHINGS < 2:
|
||||||
|
return
|
||||||
|
|
||||||
|
# copy for later balance lookups.
|
||||||
|
pre_state = state.copy()
|
||||||
|
|
||||||
|
full_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[:8]
|
||||||
|
one_third_length = len(full_indices) // 3
|
||||||
|
|
||||||
|
attester_slashing_1 = get_valid_attester_slashing_by_indices(
|
||||||
|
spec, state,
|
||||||
|
full_indices[:one_third_length * 2], signed_1=True, signed_2=True,
|
||||||
|
)
|
||||||
|
attester_slashing_2 = get_valid_attester_slashing_by_indices(
|
||||||
|
spec, state,
|
||||||
|
full_indices[one_third_length:], signed_1=True, signed_2=True,
|
||||||
|
)
|
||||||
|
attester_slashings = [attester_slashing_1, attester_slashing_2]
|
||||||
|
|
||||||
|
assert not any(state.validators[i].slashed for i in full_indices)
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
#
|
||||||
|
# Add to state via block transition
|
||||||
|
#
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.attester_slashings = attester_slashings
|
||||||
|
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_block]
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
check_attester_slashing_effect(spec, pre_state, state, full_indices)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -303,22 +573,19 @@ def test_proposer_after_inactive_index(spec, state):
|
||||||
state.validators[inactive_index].exit_epoch = spec.get_current_epoch(state)
|
state.validators[inactive_index].exit_epoch = spec.get_current_epoch(state)
|
||||||
|
|
||||||
# skip forward, get brand new proposers
|
# skip forward, get brand new proposers
|
||||||
next_epoch(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
next_epoch(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
|
||||||
state_transition_and_sign_block(spec, state, block)
|
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
next_slot(spec, state)
|
|
||||||
proposer_index = spec.get_beacon_proposer_index(state)
|
proposer_index = spec.get_beacon_proposer_index(state)
|
||||||
if proposer_index > inactive_index:
|
if proposer_index > inactive_index:
|
||||||
# found a proposer that has a higher index than a disabled validator
|
# found a proposer that has a higher index than a disabled validator
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
# test if the proposer can be recognized correctly after the inactive validator
|
# test if the proposer can be recognized correctly after the inactive validator
|
||||||
signed_block = state_transition_and_sign_block(spec, state, build_empty_block(spec, state))
|
signed_block = state_transition_and_sign_block(spec, state, build_empty_block_for_next_slot(spec, state))
|
||||||
yield 'blocks', [signed_block]
|
yield 'blocks', [signed_block]
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
break
|
break
|
||||||
|
next_slot(spec, state)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -336,16 +603,16 @@ def test_high_proposer_index(spec, state):
|
||||||
|
|
||||||
active_count = len(spec.get_active_validator_indices(state, current_epoch))
|
active_count = len(spec.get_active_validator_indices(state, current_epoch))
|
||||||
while True:
|
while True:
|
||||||
next_slot(spec, state)
|
|
||||||
proposer_index = spec.get_beacon_proposer_index(state)
|
proposer_index = spec.get_beacon_proposer_index(state)
|
||||||
if proposer_index >= active_count:
|
if proposer_index >= active_count:
|
||||||
# found a proposer that has a higher index than the active validator count
|
# found a proposer that has a higher index than the active validator count
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
# test if the proposer can be recognized correctly, even while it has a high index.
|
# test if the proposer can be recognized correctly, even while it has a high index.
|
||||||
signed_block = state_transition_and_sign_block(spec, state, build_empty_block(spec, state))
|
signed_block = state_transition_and_sign_block(spec, state, build_empty_block_for_next_slot(spec, state))
|
||||||
yield 'blocks', [signed_block]
|
yield 'blocks', [signed_block]
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
break
|
break
|
||||||
|
next_slot(spec, state)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -420,12 +687,14 @@ def test_attestation(spec, state):
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
attestation = get_valid_attestation(spec, state, signed=True)
|
attestation = get_valid_attestation(spec, state, signed=True, on_time=True)
|
||||||
|
|
||||||
# Add to state via block transition
|
# Add to state via block transition
|
||||||
pre_current_attestations_len = len(state.current_epoch_attestations)
|
pre_current_attestations_len = len(state.current_epoch_attestations)
|
||||||
attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||||
attestation_block.body.attestations.append(attestation)
|
attestation_block.body.attestations.append(attestation)
|
||||||
|
if spec.fork == PHASE1:
|
||||||
|
fill_block_shard_transitions_by_attestations(spec, state, attestation_block)
|
||||||
signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block)
|
signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block)
|
||||||
|
|
||||||
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
|
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
|
||||||
|
@ -443,35 +712,38 @@ def test_attestation(spec, state):
|
||||||
assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
|
assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
|
||||||
|
|
||||||
|
|
||||||
# In phase1 a committee is computed for PERSISTENT_COMMITTEE_PERIOD slots ago,
|
def prepare_signed_exits(spec, state, indices):
|
||||||
|
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT)
|
||||||
|
|
||||||
|
def create_signed_exit(index):
|
||||||
|
exit = spec.VoluntaryExit(
|
||||||
|
epoch=spec.get_current_epoch(state),
|
||||||
|
validator_index=index,
|
||||||
|
)
|
||||||
|
signing_root = spec.compute_signing_root(exit, domain)
|
||||||
|
return spec.SignedVoluntaryExit(message=exit, signature=bls.Sign(privkeys[index], signing_root))
|
||||||
|
|
||||||
|
return [create_signed_exit(index) for index in indices]
|
||||||
|
|
||||||
|
|
||||||
|
# In phase1 a committee is computed for SHARD_COMMITTEE_PERIOD slots ago,
|
||||||
# exceeding the minimal-config randao mixes memory size.
|
# exceeding the minimal-config randao mixes memory size.
|
||||||
|
# Applies to all voluntary-exit sanity block tests.
|
||||||
|
|
||||||
@with_phases(['phase0'])
|
@with_phases(['phase0'])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_voluntary_exit(spec, state):
|
def test_voluntary_exit(spec, state):
|
||||||
validator_index = spec.get_active_validator_indices(
|
validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||||
state,
|
|
||||||
spec.get_current_epoch(state)
|
|
||||||
)[-1]
|
|
||||||
|
|
||||||
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
signed_exits = prepare_signed_exits(spec, state, [validator_index])
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
voluntary_exit = spec.VoluntaryExit(
|
|
||||||
epoch=spec.get_current_epoch(state),
|
|
||||||
validator_index=validator_index,
|
|
||||||
)
|
|
||||||
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT)
|
|
||||||
signing_root = spec.compute_signing_root(voluntary_exit, domain)
|
|
||||||
signed_voluntary_exit = spec.SignedVoluntaryExit(
|
|
||||||
message=voluntary_exit,
|
|
||||||
signature=bls.Sign(privkeys[validator_index], signing_root)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Add to state via block transition
|
# Add to state via block transition
|
||||||
initiate_exit_block = build_empty_block_for_next_slot(spec, state)
|
initiate_exit_block = build_empty_block_for_next_slot(spec, state)
|
||||||
initiate_exit_block.body.voluntary_exits.append(signed_voluntary_exit)
|
initiate_exit_block.body.voluntary_exits = signed_exits
|
||||||
signed_initiate_exit_block = state_transition_and_sign_block(spec, state, initiate_exit_block)
|
signed_initiate_exit_block = state_transition_and_sign_block(spec, state, initiate_exit_block)
|
||||||
|
|
||||||
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
@ -486,6 +758,59 @@ def test_voluntary_exit(spec, state):
|
||||||
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spec_state_test
|
||||||
|
def test_double_validator_exit_same_block(spec, state):
|
||||||
|
validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||||
|
|
||||||
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
|
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
# Same index tries to exit twice, but should only be able to do so once.
|
||||||
|
signed_exits = prepare_signed_exits(spec, state, [validator_index, validator_index])
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
# Add to state via block transition
|
||||||
|
initiate_exit_block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
initiate_exit_block.body.voluntary_exits = signed_exits
|
||||||
|
signed_initiate_exit_block = state_transition_and_sign_block(spec, state, initiate_exit_block, expect_fail=True)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_initiate_exit_block]
|
||||||
|
yield 'post', None
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(['phase0'])
|
||||||
|
@spec_state_test
|
||||||
|
def test_multiple_different_validator_exits_same_block(spec, state):
|
||||||
|
validator_indices = [
|
||||||
|
spec.get_active_validator_indices(state, spec.get_current_epoch(state))[i]
|
||||||
|
for i in range(3)
|
||||||
|
]
|
||||||
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
|
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
signed_exits = prepare_signed_exits(spec, state, validator_indices)
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
# Add to state via block transition
|
||||||
|
initiate_exit_block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
initiate_exit_block.body.voluntary_exits = signed_exits
|
||||||
|
signed_initiate_exit_block = state_transition_and_sign_block(spec, state, initiate_exit_block)
|
||||||
|
|
||||||
|
for index in validator_indices:
|
||||||
|
assert state.validators[index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
# Process within epoch transition
|
||||||
|
exit_block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||||
|
signed_exit_block = state_transition_and_sign_block(spec, state, exit_block)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_initiate_exit_block, signed_exit_block]
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
for index in validator_indices:
|
||||||
|
assert state.validators[index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_balance_driven_status_transitions(spec, state):
|
def test_balance_driven_status_transitions(spec, state):
|
|
@ -51,7 +51,8 @@ def test_double_empty_epoch(spec, state):
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_over_epoch_boundary(spec, state):
|
def test_over_epoch_boundary(spec, state):
|
||||||
spec.process_slots(state, state.slot + (spec.SLOTS_PER_EPOCH // 2))
|
if spec.SLOTS_PER_EPOCH > 1:
|
||||||
|
spec.process_slots(state, state.slot + (spec.SLOTS_PER_EPOCH // 2))
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
slots = spec.SLOTS_PER_EPOCH
|
slots = spec.SLOTS_PER_EPOCH
|
||||||
yield 'slots', slots
|
yield 'slots', slots
|
|
@ -1,5 +1,6 @@
|
||||||
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
|
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
|
PHASE0,
|
||||||
with_all_phases_except,
|
with_all_phases_except,
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
expect_assertion_error,
|
expect_assertion_error,
|
||||||
|
@ -54,7 +55,7 @@ def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=Tru
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_success(spec, state):
|
def test_success(spec, state):
|
||||||
|
@ -64,7 +65,7 @@ def test_success(spec, state):
|
||||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_reveal_too_early(spec, state):
|
def test_reveal_too_early(spec, state):
|
||||||
|
@ -73,7 +74,7 @@ def test_reveal_too_early(spec, state):
|
||||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_wrong_period(spec, state):
|
def test_wrong_period(spec, state):
|
||||||
|
@ -82,7 +83,7 @@ def test_wrong_period(spec, state):
|
||||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_late_reveal(spec, state):
|
def test_late_reveal(spec, state):
|
||||||
|
@ -92,7 +93,7 @@ def test_late_reveal(spec, state):
|
||||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_double_reveal(spec, state):
|
def test_double_reveal(spec, state):
|
||||||
|
@ -104,7 +105,7 @@ def test_double_reveal(spec, state):
|
||||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_max_decrement(spec, state):
|
def test_max_decrement(spec, state):
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal
|
from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal
|
||||||
from eth2spec.test.helpers.block import apply_empty_block
|
from eth2spec.test.helpers.state import next_epoch_via_block, get_balance
|
||||||
from eth2spec.test.helpers.state import next_epoch, get_balance
|
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
|
PHASE0,
|
||||||
with_all_phases_except,
|
with_all_phases_except,
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
expect_assertion_error,
|
expect_assertion_error,
|
||||||
|
@ -41,7 +41,7 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_success(spec, state):
|
def test_success(spec, state):
|
||||||
|
@ -50,7 +50,7 @@ def test_success(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_reveal_from_current_epoch(spec, state):
|
def test_reveal_from_current_epoch(spec, state):
|
||||||
|
@ -59,18 +59,17 @@ def test_reveal_from_current_epoch(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_reveal_from_past_epoch(spec, state):
|
def test_reveal_from_past_epoch(spec, state):
|
||||||
next_epoch(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
apply_empty_block(spec, state)
|
|
||||||
randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state) - 1)
|
randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state) - 1)
|
||||||
|
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_reveal_with_custody_padding(spec, state):
|
def test_reveal_with_custody_padding(spec, state):
|
||||||
|
@ -82,7 +81,7 @@ def test_reveal_with_custody_padding(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_reveal_with_custody_padding_minus_one(spec, state):
|
def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||||
|
@ -94,7 +93,7 @@ def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_double_reveal(spec, state):
|
def test_double_reveal(spec, state):
|
||||||
|
@ -115,7 +114,7 @@ def test_double_reveal(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_revealer_is_slashed(spec, state):
|
def test_revealer_is_slashed(spec, state):
|
||||||
|
@ -125,7 +124,7 @@ def test_revealer_is_slashed(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except(['phase0'])
|
@with_all_phases_except([PHASE0])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_far_future_epoch(spec, state):
|
def test_far_future_epoch(spec, state):
|
||||||
|
|
|
@ -0,0 +1,73 @@
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
PHASE0,
|
||||||
|
with_all_phases_except,
|
||||||
|
spec_state_test,
|
||||||
|
always_bls,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.shard_transitions import run_shard_transitions_processing
|
||||||
|
from eth2spec.test.helpers.shard_block import (
|
||||||
|
build_attestation_with_shard_transition,
|
||||||
|
build_shard_block,
|
||||||
|
build_shard_transitions_till_slot,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||||
|
|
||||||
|
|
||||||
|
def run_basic_crosslink_tests(spec, state, target_len_offset_slot, valid=True):
|
||||||
|
state = transition_to_valid_shard_slot(spec, state)
|
||||||
|
# At the beginning, let `x = state.slot`, `state.shard_states[shard].slot == x - 1`
|
||||||
|
slot_x = state.slot
|
||||||
|
committee_index = spec.CommitteeIndex(0)
|
||||||
|
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||||
|
assert state.shard_states[shard].slot == slot_x - 1
|
||||||
|
|
||||||
|
# Create SignedShardBlock
|
||||||
|
body = b'\x56' * spec.MAX_SHARD_BLOCK_SIZE
|
||||||
|
shard_block = build_shard_block(spec, state, shard, body=body, signed=True)
|
||||||
|
shard_blocks = [shard_block]
|
||||||
|
# Create a shard_transitions that would be included at beacon block `state.slot + target_len_offset_slot`
|
||||||
|
shard_transitions = build_shard_transitions_till_slot(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
shard_blocks={shard: shard_blocks},
|
||||||
|
on_time_slot=state.slot + target_len_offset_slot,
|
||||||
|
)
|
||||||
|
shard_transition = shard_transitions[shard]
|
||||||
|
# Create an attestation that would be included at beacon block `state.slot + target_len_offset_slot`
|
||||||
|
attestation = build_attestation_with_shard_transition(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
index=committee_index,
|
||||||
|
on_time_slot=state.slot + target_len_offset_slot,
|
||||||
|
shard_transition=shard_transition,
|
||||||
|
)
|
||||||
|
pre_gasprice = state.shard_states[shard].gasprice
|
||||||
|
|
||||||
|
transition_to(spec, state, state.slot + target_len_offset_slot)
|
||||||
|
pre_shard_state = state.shard_states[shard]
|
||||||
|
|
||||||
|
yield from run_shard_transitions_processing(spec, state, shard_transitions, [attestation], valid=valid)
|
||||||
|
|
||||||
|
if valid:
|
||||||
|
# After state transition,
|
||||||
|
assert state.slot == slot_x + target_len_offset_slot
|
||||||
|
shard_state = state.shard_states[shard]
|
||||||
|
assert shard_state != pre_shard_state
|
||||||
|
assert shard_state == shard_transition.shard_states[len(shard_transition.shard_states) - 1]
|
||||||
|
|
||||||
|
if target_len_offset_slot == 1:
|
||||||
|
assert shard_state.gasprice > pre_gasprice
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases_except([PHASE0])
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_basic_crosslinks(spec, state):
|
||||||
|
yield from run_basic_crosslink_tests(spec, state, target_len_offset_slot=1, valid=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases_except([PHASE0])
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_multiple_offset_slots(spec, state):
|
||||||
|
yield from run_basic_crosslink_tests(spec, state, target_len_offset_slot=3, valid=True)
|
|
@ -0,0 +1,107 @@
|
||||||
|
from typing import Dict, Sequence
|
||||||
|
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
PHASE0,
|
||||||
|
with_all_phases_except,
|
||||||
|
spec_state_test,
|
||||||
|
always_bls,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.block import build_empty_block
|
||||||
|
from eth2spec.test.helpers.shard_block import (
|
||||||
|
build_attestation_with_shard_transition,
|
||||||
|
build_shard_block,
|
||||||
|
build_shard_transitions_till_slot,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.state import state_transition_and_sign_block, transition_to_valid_shard_slot
|
||||||
|
|
||||||
|
|
||||||
|
def run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index, valid=True):
|
||||||
|
shard_transitions = build_shard_transitions_till_slot(
|
||||||
|
spec, state, shard_blocks, on_time_slot=state.slot + target_len_offset_slot
|
||||||
|
)
|
||||||
|
attestations = [
|
||||||
|
build_attestation_with_shard_transition(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
on_time_slot=state.slot + target_len_offset_slot,
|
||||||
|
index=committee_index,
|
||||||
|
shard_transition=shard_transitions[shard],
|
||||||
|
)
|
||||||
|
for shard in shard_blocks.keys()
|
||||||
|
]
|
||||||
|
|
||||||
|
# Propose beacon block at slot `x + 1`
|
||||||
|
beacon_block = build_empty_block(spec, state, slot=state.slot + target_len_offset_slot)
|
||||||
|
beacon_block.body.attestations = attestations
|
||||||
|
beacon_block.body.shard_transitions = shard_transitions
|
||||||
|
|
||||||
|
pre_shard_states = state.shard_states.copy()
|
||||||
|
yield 'pre', state.copy()
|
||||||
|
yield 'block', beacon_block
|
||||||
|
state_transition_and_sign_block(spec, state, beacon_block)
|
||||||
|
if valid:
|
||||||
|
yield 'post', state
|
||||||
|
else:
|
||||||
|
yield 'post', None
|
||||||
|
return
|
||||||
|
|
||||||
|
for shard in range(spec.get_active_shard_count(state)):
|
||||||
|
post_shard_state = state.shard_states[shard]
|
||||||
|
if shard in shard_blocks:
|
||||||
|
# Shard state has been changed to state_transition result
|
||||||
|
assert post_shard_state == shard_transitions[shard].shard_states[
|
||||||
|
len(shard_transitions[shard].shard_states) - 1
|
||||||
|
]
|
||||||
|
assert beacon_block.slot == shard_transitions[shard].shard_states[0].slot + target_len_offset_slot
|
||||||
|
assert post_shard_state.slot == state.slot - 1
|
||||||
|
if len(shard_blocks[shard]) == 0:
|
||||||
|
# `latest_block_root` is the same
|
||||||
|
assert post_shard_state.latest_block_root == pre_shard_states[shard].latest_block_root
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases_except([PHASE0])
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_process_beacon_block_with_normal_shard_transition(spec, state):
|
||||||
|
state = transition_to_valid_shard_slot(spec, state)
|
||||||
|
|
||||||
|
target_len_offset_slot = 1
|
||||||
|
committee_index = spec.CommitteeIndex(0)
|
||||||
|
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||||
|
assert state.shard_states[shard].slot == state.slot - 1
|
||||||
|
|
||||||
|
pre_gasprice = state.shard_states[shard].gasprice
|
||||||
|
|
||||||
|
# Create SignedShardBlock at slot `shard_state.slot + 1`
|
||||||
|
body = b'\x56' * spec.MAX_SHARD_BLOCK_SIZE
|
||||||
|
shard_block = build_shard_block(spec, state, shard, body=body, signed=True)
|
||||||
|
shard_blocks: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
|
||||||
|
|
||||||
|
yield from run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index)
|
||||||
|
|
||||||
|
shard_state = state.shard_states[shard]
|
||||||
|
|
||||||
|
if target_len_offset_slot == 1 and len(shard_blocks) > 0:
|
||||||
|
assert shard_state.gasprice > pre_gasprice
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases_except([PHASE0])
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_process_beacon_block_with_empty_proposal_transition(spec, state):
|
||||||
|
state = transition_to_valid_shard_slot(spec, state)
|
||||||
|
|
||||||
|
target_len_offset_slot = 1
|
||||||
|
committee_index = spec.CommitteeIndex(0)
|
||||||
|
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||||
|
assert state.shard_states[shard].slot == state.slot - 1
|
||||||
|
|
||||||
|
# No new shard block
|
||||||
|
shard_blocks = {}
|
||||||
|
|
||||||
|
pre_gasprice = state.shard_states[shard].gasprice
|
||||||
|
|
||||||
|
yield from run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index)
|
||||||
|
|
||||||
|
if target_len_offset_slot == 1 and len(shard_blocks) > 0:
|
||||||
|
assert state.shard_states[shard].gasprice > pre_gasprice
|
|
@ -1,7 +1,6 @@
|
||||||
from eth2spec.test.context import spec_state_test, never_bls, with_all_phases, with_phases
|
from eth2spec.test.context import spec_state_test, never_bls, with_all_phases, with_phases
|
||||||
from eth2spec.test.helpers.state import next_epoch
|
from eth2spec.test.helpers.state import next_epoch_via_block
|
||||||
from eth2spec.test.helpers.attestations import next_epoch_with_attestations
|
from eth2spec.test.helpers.attestations import next_epoch_with_attestations
|
||||||
from eth2spec.test.helpers.block import apply_empty_block
|
|
||||||
|
|
||||||
|
|
||||||
def check_finality(spec,
|
def check_finality(spec,
|
||||||
|
@ -58,10 +57,8 @@ def test_finality_no_updates_at_genesis(spec, state):
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_finality_rule_4(spec, state):
|
def test_finality_rule_4(spec, state):
|
||||||
# get past first two epochs that finality does not run on
|
# get past first two epochs that finality does not run on
|
||||||
next_epoch(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
apply_empty_block(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
next_epoch(spec, state)
|
|
||||||
apply_empty_block(spec, state)
|
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
|
@ -86,10 +83,8 @@ def test_finality_rule_4(spec, state):
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_finality_rule_1(spec, state):
|
def test_finality_rule_1(spec, state):
|
||||||
# get past first two epochs that finality does not run on
|
# get past first two epochs that finality does not run on
|
||||||
next_epoch(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
apply_empty_block(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
next_epoch(spec, state)
|
|
||||||
apply_empty_block(spec, state)
|
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
|
@ -116,10 +111,8 @@ def test_finality_rule_1(spec, state):
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_finality_rule_2(spec, state):
|
def test_finality_rule_2(spec, state):
|
||||||
# get past first two epochs that finality does not run on
|
# get past first two epochs that finality does not run on
|
||||||
next_epoch(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
apply_empty_block(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
next_epoch(spec, state)
|
|
||||||
apply_empty_block(spec, state)
|
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
|
@ -152,10 +145,8 @@ def test_finality_rule_3(spec, state):
|
||||||
https://github.com/ethereum/eth2.0-specs/issues/611#issuecomment-463612892
|
https://github.com/ethereum/eth2.0-specs/issues/611#issuecomment-463612892
|
||||||
"""
|
"""
|
||||||
# get past first two epochs that finality does not run on
|
# get past first two epochs that finality does not run on
|
||||||
next_epoch(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
apply_empty_block(spec, state)
|
next_epoch_via_block(spec, state)
|
||||||
next_epoch(spec, state)
|
|
||||||
apply_empty_block(spec, state)
|
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,444 @@
|
||||||
|
from eth2spec.test.context import spec_state_test, always_bls, with_all_phases
|
||||||
|
from eth2spec.test.helpers.attestations import build_attestation_data
|
||||||
|
from eth2spec.test.helpers.block import build_empty_block
|
||||||
|
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||||
|
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||||
|
from eth2spec.test.helpers.state import next_epoch
|
||||||
|
from eth2spec.utils import bls
|
||||||
|
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
||||||
|
|
||||||
|
|
||||||
|
def run_get_signature_test(spec, state, obj, domain, get_signature_fn, privkey, pubkey, signing_ssz_object=None):
|
||||||
|
if signing_ssz_object is None:
|
||||||
|
signing_ssz_object = obj
|
||||||
|
signature = get_signature_fn(state, obj, privkey)
|
||||||
|
signing_root = spec.compute_signing_root(signing_ssz_object, domain)
|
||||||
|
assert bls.Verify(pubkey, signing_root, signature)
|
||||||
|
|
||||||
|
|
||||||
|
def run_get_committee_assignment(spec, state, epoch, validator_index, valid=True):
|
||||||
|
try:
|
||||||
|
assignment = spec.get_committee_assignment(state, epoch, validator_index)
|
||||||
|
committee, committee_index, slot = assignment
|
||||||
|
assert spec.compute_epoch_at_slot(slot) == epoch
|
||||||
|
assert committee == spec.get_beacon_committee(state, slot, committee_index)
|
||||||
|
assert committee_index < spec.get_committee_count_at_slot(state, slot)
|
||||||
|
assert validator_index in committee
|
||||||
|
assert valid
|
||||||
|
except AssertionError:
|
||||||
|
assert not valid
|
||||||
|
else:
|
||||||
|
assert valid
|
||||||
|
|
||||||
|
|
||||||
|
def run_is_candidate_block(spec, eth1_block, period_start, success=True):
|
||||||
|
assert success == spec.is_candidate_block(eth1_block, period_start)
|
||||||
|
|
||||||
|
|
||||||
|
def get_min_new_period_epochs(spec):
|
||||||
|
return int(
|
||||||
|
spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2 # to seconds
|
||||||
|
/ spec.SECONDS_PER_SLOT / spec.SLOTS_PER_EPOCH
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_mock_aggregate(spec):
|
||||||
|
return spec.Attestation(
|
||||||
|
data=spec.AttestationData(
|
||||||
|
slot=10,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Becoming a validator
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_check_if_validator_active(spec, state):
|
||||||
|
active_validator_index = len(state.validators) - 1
|
||||||
|
assert spec.check_if_validator_active(state, active_validator_index)
|
||||||
|
new_validator_index = len(state.validators)
|
||||||
|
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
deposit = prepare_state_and_deposit(spec, state, new_validator_index, amount, signed=True)
|
||||||
|
spec.process_deposit(state, deposit)
|
||||||
|
assert not spec.check_if_validator_active(state, new_validator_index)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Validator assignments
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_get_committee_assignment_current_epoch(spec, state):
|
||||||
|
epoch = spec.get_current_epoch(state)
|
||||||
|
validator_index = len(state.validators) - 1
|
||||||
|
run_get_committee_assignment(spec, state, epoch, validator_index, valid=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_get_committee_assignment_next_epoch(spec, state):
|
||||||
|
epoch = spec.get_current_epoch(state) + 1
|
||||||
|
validator_index = len(state.validators) - 1
|
||||||
|
run_get_committee_assignment(spec, state, epoch, validator_index, valid=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_get_committee_assignment_out_bound_epoch(spec, state):
|
||||||
|
epoch = spec.get_current_epoch(state) + 2
|
||||||
|
validator_index = len(state.validators) - 1
|
||||||
|
run_get_committee_assignment(spec, state, epoch, validator_index, valid=False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_is_proposer(spec, state):
|
||||||
|
proposer_index = spec.get_beacon_proposer_index(state)
|
||||||
|
assert spec.is_proposer(state, proposer_index)
|
||||||
|
|
||||||
|
proposer_index = proposer_index + 1 % len(state.validators)
|
||||||
|
assert not spec.is_proposer(state, proposer_index)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Beacon chain responsibilities
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
# Block proposal
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_get_epoch_signature(spec, state):
|
||||||
|
block = spec.BeaconBlock()
|
||||||
|
privkey = privkeys[0]
|
||||||
|
pubkey = pubkeys[0]
|
||||||
|
domain = spec.get_domain(state, spec.DOMAIN_RANDAO, spec.compute_epoch_at_slot(block.slot))
|
||||||
|
run_get_signature_test(
|
||||||
|
spec=spec,
|
||||||
|
state=state,
|
||||||
|
obj=block,
|
||||||
|
domain=domain,
|
||||||
|
get_signature_fn=spec.get_epoch_signature,
|
||||||
|
privkey=privkey,
|
||||||
|
pubkey=pubkey,
|
||||||
|
signing_ssz_object=spec.compute_epoch_at_slot(block.slot),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_is_candidate_block(spec, state):
|
||||||
|
period_start = spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2 + 1000
|
||||||
|
run_is_candidate_block(
|
||||||
|
spec,
|
||||||
|
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE),
|
||||||
|
period_start,
|
||||||
|
success=True,
|
||||||
|
)
|
||||||
|
run_is_candidate_block(
|
||||||
|
spec,
|
||||||
|
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE + 1),
|
||||||
|
period_start,
|
||||||
|
success=False,
|
||||||
|
)
|
||||||
|
run_is_candidate_block(
|
||||||
|
spec,
|
||||||
|
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2),
|
||||||
|
period_start,
|
||||||
|
success=True,
|
||||||
|
)
|
||||||
|
run_is_candidate_block(
|
||||||
|
spec,
|
||||||
|
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2 - 1),
|
||||||
|
period_start,
|
||||||
|
success=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_get_eth1_vote_default_vote(spec, state):
|
||||||
|
min_new_period_epochs = get_min_new_period_epochs(spec)
|
||||||
|
for _ in range(min_new_period_epochs):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
state.eth1_data_votes = ()
|
||||||
|
eth1_chain = []
|
||||||
|
eth1_data = spec.get_eth1_vote(state, eth1_chain)
|
||||||
|
assert eth1_data == state.eth1_data
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_get_eth1_vote_consensus_vote(spec, state):
|
||||||
|
min_new_period_epochs = get_min_new_period_epochs(spec)
|
||||||
|
for _ in range(min_new_period_epochs + 2):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
period_start = spec.voting_period_start_time(state)
|
||||||
|
votes_length = spec.get_current_epoch(state) % spec.EPOCHS_PER_ETH1_VOTING_PERIOD
|
||||||
|
assert votes_length >= 3 # We need to have the majority vote
|
||||||
|
state.eth1_data_votes = ()
|
||||||
|
|
||||||
|
block_1 = spec.Eth1Block(
|
||||||
|
timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE - 1,
|
||||||
|
deposit_count=state.eth1_data.deposit_count,
|
||||||
|
deposit_root=b'\x04' * 32,
|
||||||
|
)
|
||||||
|
block_2 = spec.Eth1Block(
|
||||||
|
timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE,
|
||||||
|
deposit_count=state.eth1_data.deposit_count + 1,
|
||||||
|
deposit_root=b'\x05' * 32,
|
||||||
|
)
|
||||||
|
eth1_chain = [block_1, block_2]
|
||||||
|
eth1_data_votes = []
|
||||||
|
|
||||||
|
# Only the first vote is for block_1
|
||||||
|
eth1_data_votes.append(spec.get_eth1_data(block_1))
|
||||||
|
# Other votes are for block_2
|
||||||
|
for _ in range(votes_length - 1):
|
||||||
|
eth1_data_votes.append(spec.get_eth1_data(block_2))
|
||||||
|
|
||||||
|
state.eth1_data_votes = eth1_data_votes
|
||||||
|
eth1_data = spec.get_eth1_vote(state, eth1_chain)
|
||||||
|
assert eth1_data.block_hash == block_2.hash_tree_root()
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_get_eth1_vote_tie(spec, state):
|
||||||
|
min_new_period_epochs = get_min_new_period_epochs(spec)
|
||||||
|
for _ in range(min_new_period_epochs + 1):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
period_start = spec.voting_period_start_time(state)
|
||||||
|
votes_length = spec.get_current_epoch(state) % spec.EPOCHS_PER_ETH1_VOTING_PERIOD
|
||||||
|
assert votes_length > 0 and votes_length % 2 == 0
|
||||||
|
|
||||||
|
state.eth1_data_votes = ()
|
||||||
|
block_1 = spec.Eth1Block(
|
||||||
|
timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE - 1,
|
||||||
|
deposit_count=state.eth1_data.deposit_count,
|
||||||
|
deposit_root=b'\x04' * 32,
|
||||||
|
)
|
||||||
|
block_2 = spec.Eth1Block(
|
||||||
|
timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE,
|
||||||
|
deposit_count=state.eth1_data.deposit_count + 1,
|
||||||
|
deposit_root=b'\x05' * 32,
|
||||||
|
)
|
||||||
|
eth1_chain = [block_1, block_2]
|
||||||
|
eth1_data_votes = []
|
||||||
|
# Half votes are for block_1, another half votes are for block_2
|
||||||
|
for i in range(votes_length):
|
||||||
|
if i % 2 == 0:
|
||||||
|
block = block_1
|
||||||
|
else:
|
||||||
|
block = block_2
|
||||||
|
eth1_data_votes.append(spec.get_eth1_data(block))
|
||||||
|
|
||||||
|
state.eth1_data_votes = eth1_data_votes
|
||||||
|
eth1_data = spec.get_eth1_vote(state, eth1_chain)
|
||||||
|
|
||||||
|
# Tiebreak by smallest distance -> eth1_chain[0]
|
||||||
|
assert eth1_data.block_hash == eth1_chain[0].hash_tree_root()
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_get_eth1_vote_chain_in_past(spec, state):
|
||||||
|
min_new_period_epochs = get_min_new_period_epochs(spec)
|
||||||
|
for _ in range(min_new_period_epochs + 1):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
period_start = spec.voting_period_start_time(state)
|
||||||
|
votes_length = spec.get_current_epoch(state) % spec.EPOCHS_PER_ETH1_VOTING_PERIOD
|
||||||
|
assert votes_length > 0 and votes_length % 2 == 0
|
||||||
|
|
||||||
|
state.eth1_data_votes = ()
|
||||||
|
block_1 = spec.Eth1Block(
|
||||||
|
timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE,
|
||||||
|
deposit_count=state.eth1_data.deposit_count - 1, # Chain prior to current eth1data
|
||||||
|
deposit_root=b'\x42' * 32,
|
||||||
|
)
|
||||||
|
eth1_chain = [block_1]
|
||||||
|
eth1_data_votes = []
|
||||||
|
|
||||||
|
state.eth1_data_votes = eth1_data_votes
|
||||||
|
eth1_data = spec.get_eth1_vote(state, eth1_chain)
|
||||||
|
|
||||||
|
# Should be default vote
|
||||||
|
assert eth1_data == state.eth1_data
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_compute_new_state_root(spec, state):
|
||||||
|
pre_state = state.copy()
|
||||||
|
post_state = state.copy()
|
||||||
|
block = build_empty_block(spec, state, state.slot + 1)
|
||||||
|
state_root = spec.compute_new_state_root(state, block)
|
||||||
|
|
||||||
|
assert state_root != pre_state.hash_tree_root()
|
||||||
|
assert state == pre_state
|
||||||
|
|
||||||
|
# dumb verification
|
||||||
|
spec.process_slots(post_state, block.slot)
|
||||||
|
spec.process_block(post_state, block)
|
||||||
|
assert state_root == post_state.hash_tree_root()
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_get_block_signature(spec, state):
|
||||||
|
privkey = privkeys[0]
|
||||||
|
pubkey = pubkeys[0]
|
||||||
|
block = build_empty_block(spec, state)
|
||||||
|
domain = spec.get_domain(state, spec.DOMAIN_BEACON_PROPOSER, spec.compute_epoch_at_slot(block.slot))
|
||||||
|
run_get_signature_test(
|
||||||
|
spec=spec,
|
||||||
|
state=state,
|
||||||
|
obj=block,
|
||||||
|
domain=domain,
|
||||||
|
get_signature_fn=spec.get_block_signature,
|
||||||
|
privkey=privkey,
|
||||||
|
pubkey=pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Attesting
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_get_attestation_signature(spec, state):
|
||||||
|
privkey = privkeys[0]
|
||||||
|
pubkey = pubkeys[0]
|
||||||
|
attestation_data = spec.AttestationData(slot=10)
|
||||||
|
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
|
||||||
|
run_get_signature_test(
|
||||||
|
spec=spec,
|
||||||
|
state=state,
|
||||||
|
obj=attestation_data,
|
||||||
|
domain=domain,
|
||||||
|
get_signature_fn=spec.get_attestation_signature,
|
||||||
|
privkey=privkey,
|
||||||
|
pubkey=pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Attestation aggregation
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_get_slot_signature(spec, state):
|
||||||
|
privkey = privkeys[0]
|
||||||
|
pubkey = pubkeys[0]
|
||||||
|
slot = spec.Slot(10)
|
||||||
|
domain = spec.get_domain(state, spec.DOMAIN_SELECTION_PROOF, spec.compute_epoch_at_slot(slot))
|
||||||
|
run_get_signature_test(
|
||||||
|
spec=spec,
|
||||||
|
state=state,
|
||||||
|
obj=slot,
|
||||||
|
domain=domain,
|
||||||
|
get_signature_fn=spec.get_slot_signature,
|
||||||
|
privkey=privkey,
|
||||||
|
pubkey=pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_is_aggregator(spec, state):
|
||||||
|
# TODO: we can test the probabilistic result against `TARGET_AGGREGATORS_PER_COMMITTEE`
|
||||||
|
# if we have more validators and larger committeee size
|
||||||
|
slot = state.slot
|
||||||
|
committee_index = 0
|
||||||
|
has_aggregator = False
|
||||||
|
beacon_committee = spec.get_beacon_committee(state, slot, committee_index)
|
||||||
|
for validator_index in beacon_committee:
|
||||||
|
privkey = privkeys[validator_index]
|
||||||
|
slot_signature = spec.get_slot_signature(state, slot, privkey)
|
||||||
|
if spec.is_aggregator(state, slot, committee_index, slot_signature):
|
||||||
|
has_aggregator = True
|
||||||
|
break
|
||||||
|
assert has_aggregator
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_get_aggregate_signature(spec, state):
|
||||||
|
attestations = []
|
||||||
|
attesting_pubkeys = []
|
||||||
|
slot = state.slot
|
||||||
|
committee_index = 0
|
||||||
|
attestation_data = build_attestation_data(spec, state, slot=slot, index=committee_index)
|
||||||
|
beacon_committee = spec.get_beacon_committee(
|
||||||
|
state,
|
||||||
|
attestation_data.slot,
|
||||||
|
attestation_data.index,
|
||||||
|
)
|
||||||
|
committee_size = len(beacon_committee)
|
||||||
|
aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size))
|
||||||
|
for i, validator_index in enumerate(beacon_committee):
|
||||||
|
bits = aggregation_bits.copy()
|
||||||
|
bits[i] = True
|
||||||
|
attestations.append(
|
||||||
|
spec.Attestation(
|
||||||
|
data=attestation_data,
|
||||||
|
aggregation_bits=bits,
|
||||||
|
signature=spec.get_attestation_signature(state, attestation_data, privkeys[validator_index]),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
attesting_pubkeys.append(state.validators[validator_index].pubkey)
|
||||||
|
assert len(attestations) > 0
|
||||||
|
signature = spec.get_aggregate_signature(attestations)
|
||||||
|
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
|
||||||
|
signing_root = spec.compute_signing_root(attestation_data, domain)
|
||||||
|
assert bls.FastAggregateVerify(attesting_pubkeys, signing_root, signature)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_get_aggregate_and_proof(spec, state):
|
||||||
|
privkey = privkeys[0]
|
||||||
|
aggregator_index = spec.ValidatorIndex(10)
|
||||||
|
aggregate = get_mock_aggregate(spec)
|
||||||
|
aggregate_and_proof = spec.get_aggregate_and_proof(state, aggregator_index, aggregate, privkey)
|
||||||
|
assert aggregate_and_proof.aggregator_index == aggregator_index
|
||||||
|
assert aggregate_and_proof.aggregate == aggregate
|
||||||
|
assert aggregate_and_proof.selection_proof == spec.get_slot_signature(state, aggregate.data.slot, privkey)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_get_aggregate_and_proof_signature(spec, state):
|
||||||
|
privkey = privkeys[0]
|
||||||
|
pubkey = pubkeys[0]
|
||||||
|
aggregate = get_mock_aggregate(spec)
|
||||||
|
aggregate_and_proof = spec.get_aggregate_and_proof(state, spec.ValidatorIndex(1), aggregate, privkey)
|
||||||
|
domain = spec.get_domain(state, spec.DOMAIN_AGGREGATE_AND_PROOF, spec.compute_epoch_at_slot(aggregate.data.slot))
|
||||||
|
run_get_signature_test(
|
||||||
|
spec=spec,
|
||||||
|
state=state,
|
||||||
|
obj=aggregate_and_proof,
|
||||||
|
domain=domain,
|
||||||
|
get_signature_fn=spec.get_aggregate_and_proof_signature,
|
||||||
|
privkey=privkey,
|
||||||
|
pubkey=pubkey,
|
||||||
|
)
|
|
@ -1,12 +1,17 @@
|
||||||
from py_ecc.bls import G2ProofOfPossession as bls
|
from py_ecc.bls import G2ProofOfPossession as py_ecc_bls
|
||||||
from py_ecc.bls.g2_primatives import signature_to_G2 as _signature_to_G2
|
from py_ecc.bls.g2_primatives import signature_to_G2 as _signature_to_G2
|
||||||
|
import milagro_bls_binding as milagro_bls # noqa: F401 for BLS switching option
|
||||||
|
|
||||||
# Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing.
|
# Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing.
|
||||||
bls_active = True
|
bls_active = True
|
||||||
|
|
||||||
|
# To change bls implementation, default to PyECC for correctness. Milagro is a good faster alternative.
|
||||||
|
bls = py_ecc_bls
|
||||||
|
|
||||||
STUB_SIGNATURE = b'\x11' * 96
|
STUB_SIGNATURE = b'\x11' * 96
|
||||||
STUB_PUBKEY = b'\x22' * 48
|
STUB_PUBKEY = b'\x22' * 48
|
||||||
STUB_COORDINATES = _signature_to_G2(bls.Sign(0, b""))
|
Z2_SIGNATURE = b'\xc0' + b'\x00' * 95
|
||||||
|
STUB_COORDINATES = _signature_to_G2(Z2_SIGNATURE)
|
||||||
|
|
||||||
|
|
||||||
def only_with_bls(alt_return=None):
|
def only_with_bls(alt_return=None):
|
||||||
|
@ -25,17 +30,32 @@ def only_with_bls(alt_return=None):
|
||||||
|
|
||||||
@only_with_bls(alt_return=True)
|
@only_with_bls(alt_return=True)
|
||||||
def Verify(PK, message, signature):
|
def Verify(PK, message, signature):
|
||||||
return bls.Verify(PK, message, signature)
|
try:
|
||||||
|
result = bls.Verify(PK, message, signature)
|
||||||
|
except Exception:
|
||||||
|
result = False
|
||||||
|
finally:
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
@only_with_bls(alt_return=True)
|
@only_with_bls(alt_return=True)
|
||||||
def AggregateVerify(pairs, signature):
|
def AggregateVerify(pubkeys, messages, signature):
|
||||||
return bls.AggregateVerify(pairs, signature)
|
try:
|
||||||
|
result = bls.AggregateVerify(list(pubkeys), list(messages), signature)
|
||||||
|
except Exception:
|
||||||
|
result = False
|
||||||
|
finally:
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
@only_with_bls(alt_return=True)
|
@only_with_bls(alt_return=True)
|
||||||
def FastAggregateVerify(PKs, message, signature):
|
def FastAggregateVerify(pubkeys, message, signature):
|
||||||
return bls.FastAggregateVerify(PKs, message, signature)
|
try:
|
||||||
|
result = bls.FastAggregateVerify(list(pubkeys), message, signature)
|
||||||
|
except Exception:
|
||||||
|
result = False
|
||||||
|
finally:
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
@only_with_bls(alt_return=STUB_SIGNATURE)
|
@only_with_bls(alt_return=STUB_SIGNATURE)
|
||||||
|
@ -45,9 +65,22 @@ def Aggregate(signatures):
|
||||||
|
|
||||||
@only_with_bls(alt_return=STUB_SIGNATURE)
|
@only_with_bls(alt_return=STUB_SIGNATURE)
|
||||||
def Sign(SK, message):
|
def Sign(SK, message):
|
||||||
return bls.Sign(SK, message)
|
if bls == py_ecc_bls:
|
||||||
|
return bls.Sign(SK, message)
|
||||||
|
else:
|
||||||
|
return bls.Sign(SK.to_bytes(32, 'big'), message)
|
||||||
|
|
||||||
|
|
||||||
@only_with_bls(alt_return=STUB_COORDINATES)
|
@only_with_bls(alt_return=STUB_COORDINATES)
|
||||||
def signature_to_G2(signature):
|
def signature_to_G2(signature):
|
||||||
return _signature_to_G2(signature)
|
return _signature_to_G2(signature)
|
||||||
|
|
||||||
|
|
||||||
|
@only_with_bls(alt_return=STUB_PUBKEY)
|
||||||
|
def AggregatePKs(pubkeys):
|
||||||
|
return bls._AggregatePKs(list(pubkeys))
|
||||||
|
|
||||||
|
|
||||||
|
@only_with_bls(alt_return=STUB_SIGNATURE)
|
||||||
|
def SkToPk(SK):
|
||||||
|
return bls.SkToPk(SK)
|
||||||
|
|
|
@ -1,28 +1,17 @@
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
|
from typing import Dict, Union
|
||||||
|
|
||||||
ZERO_BYTES32 = b'\x00' * 32
|
ZERO_BYTES32 = b'\x00' * 32
|
||||||
|
|
||||||
|
|
||||||
def _hash(x):
|
def _hash(x: Union[bytes, bytearray, memoryview]) -> bytes:
|
||||||
return sha256(x).digest()
|
return sha256(x).digest()
|
||||||
|
|
||||||
|
|
||||||
# Minimal collection of (key, value) pairs, for fast hash-retrieval, to save on repetitive computation cost.
|
hash_cache: Dict[bytes, bytes] = {}
|
||||||
# Key = the hash input
|
|
||||||
# Value = the hash output
|
|
||||||
hash_cache = []
|
|
||||||
|
|
||||||
|
|
||||||
def add_zero_hashes_to_cache():
|
def hash(x: bytes) -> bytes:
|
||||||
zerohashes = [(None, ZERO_BYTES32)]
|
if x in hash_cache:
|
||||||
for layer in range(1, 32):
|
return hash_cache[x]
|
||||||
k = zerohashes[layer - 1][1] + zerohashes[layer - 1][1]
|
|
||||||
zerohashes.append((k, _hash(k)))
|
|
||||||
hash_cache.extend(zerohashes[1:])
|
|
||||||
|
|
||||||
|
|
||||||
def hash(x):
|
|
||||||
for (k, h) in hash_cache:
|
|
||||||
if x == k:
|
|
||||||
return h
|
|
||||||
return _hash(x)
|
return _hash(x)
|
||||||
|
|
|
@ -5,11 +5,10 @@ We do not recommend rolling your own crypto or using an untested BLS library.
|
||||||
|
|
||||||
The BLS test suite runner has the following handlers:
|
The BLS test suite runner has the following handlers:
|
||||||
|
|
||||||
- [`aggregate_pubkeys`](./aggregate_pubkeys.md)
|
- [`aggregate_verify`](./aggregate_verify.md)
|
||||||
- [`aggregate_sigs`](./aggregate_sigs.md)
|
- [`aggregate`](./aggregate.md)
|
||||||
- [`msg_hash_g2_compressed`](./msg_hash_g2_compressed.md)
|
- [`fast_aggregate_verify`](./fast_aggregate_verify.md)
|
||||||
- [`msg_hash_g2_uncompressed`](./msg_hash_g2_uncompressed.md)
|
- [`sign`](./sign.md)
|
||||||
- [`priv_to_pub`](./priv_to_pub.md)
|
- [`verify`](./verify.md)
|
||||||
- [`sign_msg`](./sign_msg.md)
|
|
||||||
|
|
||||||
*Note*: Signature-verification and aggregate-verify test cases are not yet supported.
|
*Note*: Signature-verification and aggregate-verify test cases are not yet supported.
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
# Test format: BLS signature aggregation
|
||||||
|
|
||||||
|
A BLS signature aggregation combines a series of signatures into a single signature.
|
||||||
|
|
||||||
|
## Test case format
|
||||||
|
|
||||||
|
The test data is declared in a `data.yaml` file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
input: List[BLS Signature] -- list of input BLS signatures
|
||||||
|
output: BLS Signature -- expected output, single BLS signature or empty.
|
||||||
|
```
|
||||||
|
|
||||||
|
- `BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`.
|
||||||
|
- No output value if the input is invalid.
|
||||||
|
|
||||||
|
## Condition
|
||||||
|
|
||||||
|
The `aggregate` handler should aggregate the signatures in the `input`, and the result should match the expected `output`.
|
|
@ -1,19 +0,0 @@
|
||||||
# Test format: BLS pubkey aggregation
|
|
||||||
|
|
||||||
A BLS pubkey aggregation combines a series of pubkeys into a single pubkey.
|
|
||||||
|
|
||||||
## Test case format
|
|
||||||
|
|
||||||
The test data is declared in a `data.yaml` file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
input: List[BLS Pubkey] -- list of input BLS pubkeys
|
|
||||||
output: BLS Pubkey -- expected output, single BLS pubkey
|
|
||||||
```
|
|
||||||
|
|
||||||
`BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`.
|
|
||||||
|
|
||||||
|
|
||||||
## Condition
|
|
||||||
|
|
||||||
The `aggregate_pubkeys` handler should aggregate the keys in the `input`, and the result should match the expected `output`.
|
|
|
@ -1,19 +0,0 @@
|
||||||
# Test format: BLS signature aggregation
|
|
||||||
|
|
||||||
A BLS signature aggregation combines a series of signatures into a single signature.
|
|
||||||
|
|
||||||
## Test case format
|
|
||||||
|
|
||||||
The test data is declared in a `data.yaml` file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
input: List[BLS Signature] -- list of input BLS signatures
|
|
||||||
output: BLS Signature -- expected output, single BLS signature
|
|
||||||
```
|
|
||||||
|
|
||||||
`BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`.
|
|
||||||
|
|
||||||
|
|
||||||
## Condition
|
|
||||||
|
|
||||||
The `aggregate_sigs` handler should aggregate the signatures in the `input`, and the result should match the expected `output`.
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
# Test format: BLS sign message
|
||||||
|
|
||||||
|
Verify the signature against the given pubkeys and one messages.
|
||||||
|
|
||||||
|
## Test case format
|
||||||
|
|
||||||
|
The test data is declared in a `data.yaml` file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
input:
|
||||||
|
pubkeys: List[bytes48] -- the pubkeys
|
||||||
|
messages: List[bytes32] -- the messages
|
||||||
|
signature: bytes96 -- the signature to verify against pubkeys and messages
|
||||||
|
output: bool -- VALID or INVALID
|
||||||
|
```
|
||||||
|
|
||||||
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
|
@ -0,0 +1,17 @@
|
||||||
|
# Test format: BLS sign message
|
||||||
|
|
||||||
|
Verify the signature against the given pubkeys and one message.
|
||||||
|
|
||||||
|
## Test case format
|
||||||
|
|
||||||
|
The test data is declared in a `data.yaml` file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
input:
|
||||||
|
pubkeys: List[bytes48] -- the pubkey
|
||||||
|
message: bytes32 -- the message
|
||||||
|
signature: bytes96 -- the signature to verify against pubkeys and message
|
||||||
|
output: bool -- VALID or INVALID
|
||||||
|
```
|
||||||
|
|
||||||
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
|
@ -1,21 +0,0 @@
|
||||||
# Test format: BLS hash-compressed
|
|
||||||
|
|
||||||
A BLS compressed-hash to G2.
|
|
||||||
|
|
||||||
## Test case format
|
|
||||||
|
|
||||||
The test data is declared in a `data.yaml` file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
input:
|
|
||||||
message: bytes32
|
|
||||||
domain: bytes8 -- the BLS domain
|
|
||||||
output: List[bytes48] -- length of two
|
|
||||||
```
|
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
|
||||||
|
|
||||||
|
|
||||||
## Condition
|
|
||||||
|
|
||||||
The `msg_hash_g2_compressed` handler should hash the `message`, with the given `domain`, to G2 with compression, and the result should match the expected `output`.
|
|
|
@ -1,21 +0,0 @@
|
||||||
# Test format: BLS hash-uncompressed
|
|
||||||
|
|
||||||
A BLS uncompressed-hash to G2.
|
|
||||||
|
|
||||||
## Test case format
|
|
||||||
|
|
||||||
The test data is declared in a `data.yaml` file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
input:
|
|
||||||
message: bytes32
|
|
||||||
domain: bytes8 -- the BLS domain
|
|
||||||
output: List[List[bytes48]] -- 3 lists, each a length of two
|
|
||||||
```
|
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
|
||||||
|
|
||||||
|
|
||||||
## Condition
|
|
||||||
|
|
||||||
The `msg_hash_g2_uncompressed` handler should hash the `message`, with the given `domain`, to G2, without compression, and the result should match the expected `output`.
|
|
|
@ -1,19 +0,0 @@
|
||||||
# Test format: BLS private key to pubkey
|
|
||||||
|
|
||||||
A BLS private key to public key conversion.
|
|
||||||
|
|
||||||
## Test case format
|
|
||||||
|
|
||||||
The test data is declared in a `data.yaml` file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
input: bytes32 -- the private key
|
|
||||||
output: bytes48 -- the public key
|
|
||||||
```
|
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
|
||||||
|
|
||||||
|
|
||||||
## Condition
|
|
||||||
|
|
||||||
The `priv_to_pub` handler should compute the public key for the given private key `input`, and the result should match the expected `output`.
|
|
|
@ -10,13 +10,7 @@ The test data is declared in a `data.yaml` file:
|
||||||
input:
|
input:
|
||||||
privkey: bytes32 -- the private key used for signing
|
privkey: bytes32 -- the private key used for signing
|
||||||
message: bytes32 -- input message to sign (a hash)
|
message: bytes32 -- input message to sign (a hash)
|
||||||
domain: bytes8 -- the BLS domain
|
|
||||||
output: bytes96 -- expected signature
|
output: bytes96 -- expected signature
|
||||||
```
|
```
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
|
|
||||||
## Condition
|
|
||||||
|
|
||||||
The `sign_msg` handler should sign the given `message`, with `domain`, using the given `privkey`, and the result should match the expected `output`.
|
|
|
@ -0,0 +1,17 @@
|
||||||
|
# Test format: BLS sign message
|
||||||
|
|
||||||
|
Verify the signature against the given one pubkey and one message.
|
||||||
|
|
||||||
|
## Test case format
|
||||||
|
|
||||||
|
The test data is declared in a `data.yaml` file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
input:
|
||||||
|
pubkey: bytes48 -- the pubkey
|
||||||
|
message: bytes32 -- the message
|
||||||
|
signature: bytes96 -- the signature to verify against pubkey and message
|
||||||
|
output: bool -- VALID or INVALID
|
||||||
|
```
|
||||||
|
|
||||||
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
|
@ -38,7 +38,7 @@ The provided pre-state is already transitioned to just before the specific sub-t
|
||||||
Sub-transitions:
|
Sub-transitions:
|
||||||
|
|
||||||
- `justification_and_finalization`
|
- `justification_and_finalization`
|
||||||
- *`rewards_and_penalties` - planned testing extension*
|
- `rewards_and_penalties` (limited to `minimal` config)
|
||||||
- `registry_updates`
|
- `registry_updates`
|
||||||
- `slashings`
|
- `slashings`
|
||||||
- `final_updates`
|
- `final_updates`
|
||||||
|
|
|
@ -0,0 +1,75 @@
|
||||||
|
# Rewards tests
|
||||||
|
|
||||||
|
All rewards deltas sub-functions are tested for each test case.
|
||||||
|
There is no "change" factor, the rewards/penalties outputs are pure functions with just the pre-state as input.
|
||||||
|
(See test condition documentation on how to run the tests.)
|
||||||
|
|
||||||
|
`Deltas` is defined as:
|
||||||
|
```python
|
||||||
|
class Deltas(Container):
|
||||||
|
rewards: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
penalties: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Test case format
|
||||||
|
|
||||||
|
### `meta.yaml`
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
description: string -- Optional description of test case, purely for debugging purposes.
|
||||||
|
Tests should use the directory name of the test case as identifier, not the description.
|
||||||
|
```
|
||||||
|
|
||||||
|
_Note_: No signature verification happens within rewards sub-functions. These
|
||||||
|
tests can safely be run with or without BLS enabled.
|
||||||
|
|
||||||
|
### `pre.yaml`
|
||||||
|
|
||||||
|
A YAML-encoded `BeaconState`, the state before running the rewards sub-function.
|
||||||
|
|
||||||
|
Also available as `pre.ssz`.
|
||||||
|
|
||||||
|
### `source_deltas.yaml`
|
||||||
|
|
||||||
|
A YAML-encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_source_deltas` function
|
||||||
|
|
||||||
|
Also available as `source_deltas.ssz`.
|
||||||
|
|
||||||
|
### `target_deltas.yaml`
|
||||||
|
|
||||||
|
A YAML-encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_target_deltas` function
|
||||||
|
|
||||||
|
Also available as `target_deltas.ssz`.
|
||||||
|
|
||||||
|
### `head_deltas.yaml`
|
||||||
|
|
||||||
|
A YAML-encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_head_deltas` function
|
||||||
|
|
||||||
|
Also available as `head_deltas.ssz`.
|
||||||
|
|
||||||
|
### `inclusion_delay_deltas.yaml`
|
||||||
|
|
||||||
|
A YAML-encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_inclusion_delay_deltas` function
|
||||||
|
|
||||||
|
Also available as `inclusion_delay_deltas.ssz`.
|
||||||
|
|
||||||
|
### `inactivity_penalty_deltas.yaml`
|
||||||
|
|
||||||
|
A YAML-encoded `Deltas` representing the rewards and penalties returned by the rewards the `get_inactivity_penalty_deltas` function
|
||||||
|
|
||||||
|
Also available as `inactivity_penalty_deltas.ssz`.
|
||||||
|
|
||||||
|
## Condition
|
||||||
|
|
||||||
|
A handler of the `rewards` test-runner should process these cases,
|
||||||
|
calling the corresponding rewards deltas function for each set of deltas.
|
||||||
|
|
||||||
|
The provided pre-state is ready to be input into each rewards deltas function.
|
||||||
|
|
||||||
|
The provided `deltas` should match the return values of the
|
||||||
|
deltas function. Specifically the following must hold true for each set of deltas:
|
||||||
|
|
||||||
|
```python
|
||||||
|
deltas.rewards == deltas_function(state)[0]
|
||||||
|
deltas.penalties == deltas_function(state)[1]
|
||||||
|
```
|
|
@ -184,7 +184,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
gen_runner.run_generator("epoch_processing", [
|
gen_runner.run_generator("epoch_processing", [
|
||||||
create_provider('crosslinks', test_process_crosslinks, 'minimal'),
|
create_provider('final_updates', test_process_final_updates, 'minimal'),
|
||||||
...
|
...
|
||||||
])
|
])
|
||||||
|
|
||||||
|
|
|
@ -1,21 +1,11 @@
|
||||||
# BLS Test Generator
|
# BLS Test Generator
|
||||||
|
|
||||||
Explanation of BLS12-381 type hierarchy
|
The [BLS Signature APIs](../../../specs/phase0/beacon-chain.md#bls-signatures)
|
||||||
The base unit is bytes48 of which only 381 bits are used
|
|
||||||
|
|
||||||
- FQ: uint381 modulo field modulus
|
Information on the format of the tests can be found in the [BLS test formats documentation](../../formats/bls/README.md).
|
||||||
- FQ2: (FQ, FQ)
|
|
||||||
- G2: (FQ2, FQ2, FQ2)
|
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
- [Eth2 spec](../../../specs/phase0/beacon-chain.md#bls-signatures)
|
- [IETF BLS Signature Scheme](https://datatracker.ietf.org/doc/draft-irtf-cfrg-bls-signature/)
|
||||||
- [Finite Field Arithmetic](http://www.springeronline.com/sgw/cda/pageitems/document/cda_downloaddocument/0,11996,0-0-45-110359-0,00.pdf)
|
- [Finite Field Arithmetic](http://www.springeronline.com/sgw/cda/pageitems/document/cda_downloaddocument/0,11996,0-0-45-110359-0,00.pdf)
|
||||||
- Chapter 2 of [Elliptic Curve Cryptography](http://cacr.uwaterloo.ca/ecc/). Darrel Hankerson, Alfred Menezes, and Scott Vanstone
|
- Chapter 2 of [Elliptic Curve Cryptography](http://cacr.uwaterloo.ca/ecc/). Darrel Hankerson, Alfred Menezes, and Scott Vanstone
|
||||||
- [Zcash BLS parameters](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381)
|
|
||||||
- [Trinity implementation](https://github.com/ethereum/trinity/blob/master/eth2/_utils/bls.py)
|
|
||||||
|
|
||||||
## Comments
|
|
||||||
|
|
||||||
Compared to Zcash, Ethereum specs always requires the compressed form (c_flag / most significant bit always set).
|
|
||||||
Also note that pubkeys and privkeys are reversed.
|
|
||||||
|
|
|
@ -2,27 +2,28 @@
|
||||||
BLS test vectors generator
|
BLS test vectors generator
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
from hashlib import sha256
|
||||||
from typing import Tuple, Iterable, Any, Callable, Dict
|
from typing import Tuple, Iterable, Any, Callable, Dict
|
||||||
|
|
||||||
from eth_utils import (
|
from eth_utils import (
|
||||||
encode_hex,
|
encode_hex,
|
||||||
int_to_big_endian,
|
int_to_big_endian,
|
||||||
)
|
)
|
||||||
|
import milagro_bls_binding as milagro_bls
|
||||||
|
|
||||||
|
from eth2spec.utils import bls
|
||||||
|
from eth2spec.test.context import PHASE0
|
||||||
from gen_base import gen_runner, gen_typing
|
from gen_base import gen_runner, gen_typing
|
||||||
|
|
||||||
from py_ecc import bls
|
|
||||||
from hashlib import sha256
|
def to_bytes(i):
|
||||||
|
return i.to_bytes(32, "big")
|
||||||
|
|
||||||
|
|
||||||
def hash(x):
|
def hash(x):
|
||||||
return sha256(x).digest()
|
return sha256(x).digest()
|
||||||
|
|
||||||
|
|
||||||
F2Q_COEFF_LEN = 48
|
|
||||||
G2_COMPRESSED_Z_LEN = 48
|
|
||||||
DST = bls.G2ProofOfPossession.DST
|
|
||||||
|
|
||||||
|
|
||||||
def int_to_hex(n: int, byte_length: int = None) -> str:
|
def int_to_hex(n: int, byte_length: int = None) -> str:
|
||||||
byte_value = int_to_big_endian(n)
|
byte_value = int_to_big_endian(n)
|
||||||
if byte_length:
|
if byte_length:
|
||||||
|
@ -48,11 +49,15 @@ PRIVKEYS = [
|
||||||
hex_to_int('0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216'),
|
hex_to_int('0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216'),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
Z1_PUBKEY = b'\xc0' + b'\x00' * 47
|
||||||
|
NO_SIGNATURE = b'\x00' * 96
|
||||||
|
Z2_SIGNATURE = b'\xc0' + b'\x00' * 95
|
||||||
|
|
||||||
|
|
||||||
def case01_sign():
|
def case01_sign():
|
||||||
for privkey in PRIVKEYS:
|
for privkey in PRIVKEYS:
|
||||||
for message in MESSAGES:
|
for message in MESSAGES:
|
||||||
sig = bls.G2ProofOfPossession.Sign(privkey, message)
|
sig = bls.Sign(privkey, message)
|
||||||
identifier = f'{int_to_hex(privkey)}_{encode_hex(message)}'
|
identifier = f'{int_to_hex(privkey)}_{encode_hex(message)}'
|
||||||
yield f'sign_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
yield f'sign_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||||
'input': {
|
'input': {
|
||||||
|
@ -67,9 +72,17 @@ def case02_verify():
|
||||||
for i, privkey in enumerate(PRIVKEYS):
|
for i, privkey in enumerate(PRIVKEYS):
|
||||||
for message in MESSAGES:
|
for message in MESSAGES:
|
||||||
# Valid signature
|
# Valid signature
|
||||||
signature = bls.G2ProofOfPossession.Sign(privkey, message)
|
signature = bls.Sign(privkey, message)
|
||||||
pubkey = bls.G2ProofOfPossession.PrivToPub(privkey)
|
pubkey = bls.SkToPk(privkey)
|
||||||
|
|
||||||
|
assert milagro_bls.SkToPk(to_bytes(privkey)) == pubkey
|
||||||
|
assert milagro_bls.Sign(to_bytes(privkey), message) == signature
|
||||||
|
|
||||||
identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}'
|
identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}'
|
||||||
|
|
||||||
|
assert bls.Verify(pubkey, message, signature)
|
||||||
|
assert milagro_bls.Verify(pubkey, message, signature)
|
||||||
|
|
||||||
yield f'verify_valid_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
yield f'verify_valid_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkey': encode_hex(pubkey),
|
'pubkey': encode_hex(pubkey),
|
||||||
|
@ -80,8 +93,10 @@ def case02_verify():
|
||||||
}
|
}
|
||||||
|
|
||||||
# Invalid signatures -- wrong pubkey
|
# Invalid signatures -- wrong pubkey
|
||||||
wrong_pubkey = bls.G2ProofOfPossession.PrivToPub(PRIVKEYS[(i + 1) % len(PRIVKEYS)])
|
wrong_pubkey = bls.SkToPk(PRIVKEYS[(i + 1) % len(PRIVKEYS)])
|
||||||
identifier = f'{encode_hex(wrong_pubkey)}_{encode_hex(message)}'
|
identifier = f'{encode_hex(wrong_pubkey)}_{encode_hex(message)}'
|
||||||
|
assert not bls.Verify(wrong_pubkey, message, signature)
|
||||||
|
assert not milagro_bls.Verify(wrong_pubkey, message, signature)
|
||||||
yield f'verify_wrong_pubkey_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
yield f'verify_wrong_pubkey_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkey': encode_hex(wrong_pubkey),
|
'pubkey': encode_hex(wrong_pubkey),
|
||||||
|
@ -94,6 +109,8 @@ def case02_verify():
|
||||||
# Invalid signature -- tampered with signature
|
# Invalid signature -- tampered with signature
|
||||||
tampered_signature = signature[:-4] + b'\xFF\xFF\xFF\xFF'
|
tampered_signature = signature[:-4] + b'\xFF\xFF\xFF\xFF'
|
||||||
identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}'
|
identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}'
|
||||||
|
assert not bls.Verify(pubkey, message, tampered_signature)
|
||||||
|
assert not milagro_bls.Verify(pubkey, message, tampered_signature)
|
||||||
yield f'verify_tampered_signature_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
yield f'verify_tampered_signature_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkey': encode_hex(pubkey),
|
'pubkey': encode_hex(pubkey),
|
||||||
|
@ -103,26 +120,53 @@ def case02_verify():
|
||||||
'output': False,
|
'output': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Valid pubkey and signature with the point at infinity
|
||||||
|
assert bls.Verify(Z1_PUBKEY, message, Z2_SIGNATURE)
|
||||||
|
assert milagro_bls.Verify(Z1_PUBKEY, message, Z2_SIGNATURE)
|
||||||
|
yield f'verify_infinity_pubkey_and_infinity_signature', {
|
||||||
|
'input': {
|
||||||
|
'pubkey': encode_hex(Z1_PUBKEY),
|
||||||
|
'message': encode_hex(message),
|
||||||
|
'signature': encode_hex(Z2_SIGNATURE),
|
||||||
|
},
|
||||||
|
'output': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def case03_aggregate():
|
def case03_aggregate():
|
||||||
for message in MESSAGES:
|
for message in MESSAGES:
|
||||||
sigs = [bls.G2ProofOfPossession.Sign(privkey, message) for privkey in PRIVKEYS]
|
sigs = [bls.Sign(privkey, message) for privkey in PRIVKEYS]
|
||||||
yield f'aggregate_{encode_hex(message)}', {
|
yield f'aggregate_{encode_hex(message)}', {
|
||||||
'input': [encode_hex(sig) for sig in sigs],
|
'input': [encode_hex(sig) for sig in sigs],
|
||||||
'output': encode_hex(bls.G2ProofOfPossession.Aggregate(sigs)),
|
'output': encode_hex(bls.Aggregate(sigs)),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys -- len(pubkeys) == 0
|
||||||
|
try:
|
||||||
|
bls.Aggregate([])
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise Exception("Should have been INVALID")
|
||||||
|
|
||||||
|
yield f'aggregate_na_pubkeys', {
|
||||||
|
'input': [],
|
||||||
|
'output': None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def case04_fast_aggregate_verify():
|
def case04_fast_aggregate_verify():
|
||||||
for i, message in enumerate(MESSAGES):
|
for i, message in enumerate(MESSAGES):
|
||||||
privkeys = PRIVKEYS[:i + 1]
|
privkeys = PRIVKEYS[:i + 1]
|
||||||
sigs = [bls.G2ProofOfPossession.Sign(privkey, message) for privkey in privkeys]
|
sigs = [bls.Sign(privkey, message) for privkey in privkeys]
|
||||||
aggregate_signature = bls.G2ProofOfPossession.Aggregate(sigs)
|
aggregate_signature = bls.Aggregate(sigs)
|
||||||
pubkeys = [bls.G2ProofOfPossession.PrivToPub(privkey) for privkey in privkeys]
|
pubkeys = [bls.SkToPk(privkey) for privkey in privkeys]
|
||||||
pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys]
|
pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys]
|
||||||
|
|
||||||
# Valid signature
|
# Valid signature
|
||||||
identifier = f'{pubkeys_serial}_{encode_hex(message)}'
|
identifier = f'{pubkeys_serial}_{encode_hex(message)}'
|
||||||
|
assert bls.FastAggregateVerify(pubkeys, message, aggregate_signature)
|
||||||
|
assert milagro_bls.FastAggregateVerify(pubkeys, message, aggregate_signature)
|
||||||
yield f'fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
yield f'fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkeys': pubkeys_serial,
|
'pubkeys': pubkeys_serial,
|
||||||
|
@ -133,9 +177,11 @@ def case04_fast_aggregate_verify():
|
||||||
}
|
}
|
||||||
|
|
||||||
# Invalid signature -- extra pubkey
|
# Invalid signature -- extra pubkey
|
||||||
pubkeys_extra = pubkeys + [bls.G2ProofOfPossession.PrivToPub(PRIVKEYS[-1])]
|
pubkeys_extra = pubkeys + [bls.SkToPk(PRIVKEYS[-1])]
|
||||||
pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra]
|
pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra]
|
||||||
identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}'
|
identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}'
|
||||||
|
assert not bls.FastAggregateVerify(pubkeys_extra, message, aggregate_signature)
|
||||||
|
assert not milagro_bls.FastAggregateVerify(pubkeys_extra, message, aggregate_signature)
|
||||||
yield f'fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
yield f'fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkeys': pubkeys_extra_serial,
|
'pubkeys': pubkeys_extra_serial,
|
||||||
|
@ -148,6 +194,8 @@ def case04_fast_aggregate_verify():
|
||||||
# Invalid signature -- tampered with signature
|
# Invalid signature -- tampered with signature
|
||||||
tampered_signature = aggregate_signature[:-4] + b'\xff\xff\xff\xff'
|
tampered_signature = aggregate_signature[:-4] + b'\xff\xff\xff\xff'
|
||||||
identifier = f'{pubkeys_serial}_{encode_hex(message)}'
|
identifier = f'{pubkeys_serial}_{encode_hex(message)}'
|
||||||
|
assert not bls.FastAggregateVerify(pubkeys, message, tampered_signature)
|
||||||
|
assert not milagro_bls.FastAggregateVerify(pubkeys, message, tampered_signature)
|
||||||
yield f'fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
yield f'fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkeys': pubkeys_serial,
|
'pubkeys': pubkeys_serial,
|
||||||
|
@ -157,37 +205,94 @@ def case04_fast_aggregate_verify():
|
||||||
'output': False,
|
'output': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE
|
||||||
|
assert not bls.FastAggregateVerify([], message, Z2_SIGNATURE)
|
||||||
|
assert not milagro_bls.FastAggregateVerify([], message, Z2_SIGNATURE)
|
||||||
|
yield f'fast_aggregate_verify_na_pubkeys_and_infinity_signature', {
|
||||||
|
'input': {
|
||||||
|
'pubkeys': [],
|
||||||
|
'message': encode_hex(message),
|
||||||
|
'signature': encode_hex(Z2_SIGNATURE),
|
||||||
|
},
|
||||||
|
'output': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00...
|
||||||
|
assert not bls.FastAggregateVerify([], message, NO_SIGNATURE)
|
||||||
|
assert not milagro_bls.FastAggregateVerify([], message, NO_SIGNATURE)
|
||||||
|
yield f'fast_aggregate_verify_na_pubkeys_and_na_signature', {
|
||||||
|
'input': {
|
||||||
|
'pubkeys': [],
|
||||||
|
'message': encode_hex(message),
|
||||||
|
'signature': encode_hex(NO_SIGNATURE),
|
||||||
|
},
|
||||||
|
'output': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def case05_aggregate_verify():
|
def case05_aggregate_verify():
|
||||||
pairs = []
|
pubkeys = []
|
||||||
|
pubkeys_serial = []
|
||||||
|
messages = []
|
||||||
|
messages_serial = []
|
||||||
sigs = []
|
sigs = []
|
||||||
for privkey, message in zip(PRIVKEYS, MESSAGES):
|
for privkey, message in zip(PRIVKEYS, MESSAGES):
|
||||||
sig = bls.G2ProofOfPossession.Sign(privkey, message)
|
sig = bls.Sign(privkey, message)
|
||||||
pubkey = bls.G2ProofOfPossession.PrivToPub(privkey)
|
pubkey = bls.SkToPk(privkey)
|
||||||
pairs.append({
|
pubkeys.append(pubkey)
|
||||||
'pubkey': encode_hex(pubkey),
|
pubkeys_serial.append(encode_hex(pubkey))
|
||||||
'message': encode_hex(message),
|
messages.append(message)
|
||||||
})
|
messages_serial.append(encode_hex(message))
|
||||||
sigs.append(sig)
|
sigs.append(sig)
|
||||||
|
|
||||||
aggregate_signature = bls.G2ProofOfPossession.Aggregate(sigs)
|
aggregate_signature = bls.Aggregate(sigs)
|
||||||
|
assert bls.AggregateVerify(pubkeys, messages, aggregate_signature)
|
||||||
|
assert milagro_bls.AggregateVerify(pubkeys, messages, aggregate_signature)
|
||||||
yield f'aggregate_verify_valid', {
|
yield f'aggregate_verify_valid', {
|
||||||
'input': {
|
'input': {
|
||||||
'pairs': pairs,
|
'pubkeys': pubkeys_serial,
|
||||||
|
'messages': messages_serial,
|
||||||
'signature': encode_hex(aggregate_signature),
|
'signature': encode_hex(aggregate_signature),
|
||||||
},
|
},
|
||||||
'output': True,
|
'output': True,
|
||||||
}
|
}
|
||||||
|
|
||||||
tampered_signature = aggregate_signature[:4] + b'\xff\xff\xff\xff'
|
tampered_signature = aggregate_signature[:4] + b'\xff\xff\xff\xff'
|
||||||
|
assert not bls.AggregateVerify(pubkey, messages, tampered_signature)
|
||||||
|
assert not milagro_bls.AggregateVerify(pubkeys, messages, tampered_signature)
|
||||||
yield f'aggregate_verify_tampered_signature', {
|
yield f'aggregate_verify_tampered_signature', {
|
||||||
'input': {
|
'input': {
|
||||||
'pairs': pairs,
|
'pubkeys': pubkeys_serial,
|
||||||
|
'messages': messages_serial,
|
||||||
'signature': encode_hex(tampered_signature),
|
'signature': encode_hex(tampered_signature),
|
||||||
},
|
},
|
||||||
'output': False,
|
'output': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE
|
||||||
|
assert not bls.AggregateVerify([], [], Z2_SIGNATURE)
|
||||||
|
assert not milagro_bls.AggregateVerify([], [], Z2_SIGNATURE)
|
||||||
|
yield f'aggregate_verify_na_pubkeys_and_infinity_signature', {
|
||||||
|
'input': {
|
||||||
|
'pubkeys': [],
|
||||||
|
'messages': [],
|
||||||
|
'signature': encode_hex(Z2_SIGNATURE),
|
||||||
|
},
|
||||||
|
'output': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00...
|
||||||
|
assert not bls.AggregateVerify([], [], NO_SIGNATURE)
|
||||||
|
assert not milagro_bls.AggregateVerify([], [], NO_SIGNATURE)
|
||||||
|
yield f'aggregate_verify_na_pubkeys_and_na_signature', {
|
||||||
|
'input': {
|
||||||
|
'pubkeys': [],
|
||||||
|
'messages': [],
|
||||||
|
'signature': encode_hex(NO_SIGNATURE),
|
||||||
|
},
|
||||||
|
'output': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def create_provider(handler_name: str,
|
def create_provider(handler_name: str,
|
||||||
test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider:
|
test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider:
|
||||||
|
@ -202,7 +307,7 @@ def create_provider(handler_name: str,
|
||||||
print(data)
|
print(data)
|
||||||
(case_name, case_content) = data
|
(case_name, case_content) = data
|
||||||
yield gen_typing.TestCase(
|
yield gen_typing.TestCase(
|
||||||
fork_name='phase0',
|
fork_name=PHASE0,
|
||||||
runner_name='bls',
|
runner_name='bls',
|
||||||
handler_name=handler_name,
|
handler_name=handler_name,
|
||||||
suite_name='small',
|
suite_name='small',
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
py_ecc==2.0.0
|
py_ecc==4.0.0
|
||||||
eth-utils==1.6.0
|
eth-utils==1.6.0
|
||||||
../../core/gen_helpers
|
../../core/gen_helpers
|
||||||
|
../../../
|
||||||
|
|
|
@ -13,6 +13,7 @@ from gen_base import gen_runner, gen_typing
|
||||||
from gen_from_tests.gen import generate_from_tests
|
from gen_from_tests.gen import generate_from_tests
|
||||||
from importlib import reload
|
from importlib import reload
|
||||||
from eth2spec.config import config_util
|
from eth2spec.config import config_util
|
||||||
|
from eth2spec.test.context import PHASE0
|
||||||
|
|
||||||
|
|
||||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||||
|
@ -28,7 +29,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
||||||
runner_name='epoch_processing',
|
runner_name='epoch_processing',
|
||||||
handler_name=handler_name,
|
handler_name=handler_name,
|
||||||
src=tests_src,
|
src=tests_src,
|
||||||
fork_name='phase0'
|
fork_name=PHASE0,
|
||||||
)
|
)
|
||||||
|
|
||||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
from typing import Iterable
|
from typing import Iterable
|
||||||
|
|
||||||
|
from eth2spec.test.context import PHASE0
|
||||||
from eth2spec.test.genesis import test_initialization, test_validity
|
from eth2spec.test.genesis import test_initialization, test_validity
|
||||||
|
|
||||||
from gen_base import gen_runner, gen_typing
|
from gen_base import gen_runner, gen_typing
|
||||||
|
@ -21,7 +22,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
||||||
runner_name='genesis',
|
runner_name='genesis',
|
||||||
handler_name=handler_name,
|
handler_name=handler_name,
|
||||||
src=tests_src,
|
src=tests_src,
|
||||||
fork_name='phase0'
|
fork_name=PHASE0,
|
||||||
)
|
)
|
||||||
|
|
||||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||||
|
|
|
@ -15,6 +15,7 @@ from importlib import reload
|
||||||
from eth2spec.config import config_util
|
from eth2spec.config import config_util
|
||||||
from eth2spec.phase0 import spec as spec_phase0
|
from eth2spec.phase0 import spec as spec_phase0
|
||||||
from eth2spec.phase1 import spec as spec_phase1
|
from eth2spec.phase1 import spec as spec_phase1
|
||||||
|
from eth2spec.test.context import PHASE0
|
||||||
|
|
||||||
|
|
||||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||||
|
@ -30,7 +31,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
||||||
runner_name='operations',
|
runner_name='operations',
|
||||||
handler_name=handler_name,
|
handler_name=handler_name,
|
||||||
src=tests_src,
|
src=tests_src,
|
||||||
fork_name='phase0'
|
fork_name=PHASE0,
|
||||||
)
|
)
|
||||||
|
|
||||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||||
|
|
|
@ -0,0 +1,8 @@
|
||||||
|
# Rewards
|
||||||
|
|
||||||
|
Rewards covers the sub-functions of `process_rewards_and_penalties` for granular testing of components of the rewards function.
|
||||||
|
|
||||||
|
A rewards test-runner can consume these sub-transition test-suites,
|
||||||
|
and handle different kinds of epoch sub-transitions by processing the cases using the specified test handler.
|
||||||
|
|
||||||
|
Information on the format of the tests can be found in the [rewards test formats documentation](../../formats/rewards/README.md).
|
|
@ -0,0 +1,44 @@
|
||||||
|
from typing import Iterable
|
||||||
|
|
||||||
|
from eth2spec.phase0 import spec as spec_phase0
|
||||||
|
from eth2spec.phase1 import spec as spec_phase1
|
||||||
|
from eth2spec.test.phase_0.rewards import (
|
||||||
|
test_basic,
|
||||||
|
test_leak,
|
||||||
|
test_random,
|
||||||
|
)
|
||||||
|
from gen_base import gen_runner, gen_typing
|
||||||
|
from gen_from_tests.gen import generate_from_tests
|
||||||
|
from importlib import reload
|
||||||
|
from eth2spec.config import config_util
|
||||||
|
from eth2spec.test.context import PHASE0
|
||||||
|
|
||||||
|
|
||||||
|
def create_provider(tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||||
|
|
||||||
|
def prepare_fn(configs_path: str) -> str:
|
||||||
|
config_util.prepare_config(configs_path, config_name)
|
||||||
|
reload(spec_phase0)
|
||||||
|
reload(spec_phase1)
|
||||||
|
return config_name
|
||||||
|
|
||||||
|
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||||
|
return generate_from_tests(
|
||||||
|
runner_name='rewards',
|
||||||
|
handler_name='core',
|
||||||
|
src=tests_src,
|
||||||
|
fork_name=PHASE0,
|
||||||
|
)
|
||||||
|
|
||||||
|
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
gen_runner.run_generator("rewards", [
|
||||||
|
create_provider(test_basic, 'minimal'),
|
||||||
|
create_provider(test_basic, 'mainnet'),
|
||||||
|
create_provider(test_leak, 'minimal'),
|
||||||
|
create_provider(test_leak, 'mainnet'),
|
||||||
|
create_provider(test_random, 'minimal'),
|
||||||
|
create_provider(test_random, 'mainnet'),
|
||||||
|
])
|
|
@ -0,0 +1,2 @@
|
||||||
|
../../core/gen_helpers
|
||||||
|
../../../
|
|
@ -4,7 +4,8 @@ from importlib import reload
|
||||||
from gen_base import gen_runner, gen_typing
|
from gen_base import gen_runner, gen_typing
|
||||||
from gen_from_tests.gen import generate_from_tests
|
from gen_from_tests.gen import generate_from_tests
|
||||||
|
|
||||||
from eth2spec.test.sanity import test_blocks, test_slots
|
from eth2spec.test.context import PHASE0
|
||||||
|
from eth2spec.test.phase_0.sanity import test_blocks, test_slots
|
||||||
from eth2spec.config import config_util
|
from eth2spec.config import config_util
|
||||||
from eth2spec.phase0 import spec as spec_phase0
|
from eth2spec.phase0 import spec as spec_phase0
|
||||||
from eth2spec.phase1 import spec as spec_phase1
|
from eth2spec.phase1 import spec as spec_phase1
|
||||||
|
@ -23,7 +24,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
||||||
runner_name='sanity',
|
runner_name='sanity',
|
||||||
handler_name=handler_name,
|
handler_name=handler_name,
|
||||||
src=tests_src,
|
src=tests_src,
|
||||||
fork_name='phase0'
|
fork_name=PHASE0,
|
||||||
)
|
)
|
||||||
|
|
||||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||||
|
|
|
@ -6,6 +6,7 @@ from gen_base import gen_runner, gen_typing
|
||||||
|
|
||||||
from eth2spec.config import config_util
|
from eth2spec.config import config_util
|
||||||
from eth2spec.phase0 import spec as spec
|
from eth2spec.phase0 import spec as spec
|
||||||
|
from eth2spec.test.context import PHASE0
|
||||||
|
|
||||||
|
|
||||||
def shuffling_case_fn(seed, count):
|
def shuffling_case_fn(seed, count):
|
||||||
|
@ -37,7 +38,7 @@ def create_provider(config_name: str) -> gen_typing.TestProvider:
|
||||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||||
for (case_name, case_fn) in shuffling_test_cases():
|
for (case_name, case_fn) in shuffling_test_cases():
|
||||||
yield gen_typing.TestCase(
|
yield gen_typing.TestCase(
|
||||||
fork_name='phase0',
|
fork_name=PHASE0,
|
||||||
runner_name='shuffling',
|
runner_name='shuffling',
|
||||||
handler_name='core',
|
handler_name='core',
|
||||||
suite_name='shuffle',
|
suite_name='shuffle',
|
||||||
|
|
|
@ -6,6 +6,7 @@ import ssz_bitvector
|
||||||
import ssz_boolean
|
import ssz_boolean
|
||||||
import ssz_uints
|
import ssz_uints
|
||||||
import ssz_container
|
import ssz_container
|
||||||
|
from eth2spec.test.context import PHASE0
|
||||||
|
|
||||||
|
|
||||||
def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typing.TestProvider:
|
def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typing.TestProvider:
|
||||||
|
@ -16,7 +17,7 @@ def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typin
|
||||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||||
for (case_name, case_fn) in case_maker():
|
for (case_name, case_fn) in case_maker():
|
||||||
yield gen_typing.TestCase(
|
yield gen_typing.TestCase(
|
||||||
fork_name='phase0',
|
fork_name=PHASE0,
|
||||||
runner_name='ssz_generic',
|
runner_name='ssz_generic',
|
||||||
handler_name=handler_name,
|
handler_name=handler_name,
|
||||||
suite_name=suite_name,
|
suite_name=suite_name,
|
||||||
|
|
|
@ -8,6 +8,7 @@ from gen_base import gen_runner, gen_typing
|
||||||
from eth2spec.debug import random_value, encode
|
from eth2spec.debug import random_value, encode
|
||||||
from eth2spec.config import config_util
|
from eth2spec.config import config_util
|
||||||
from eth2spec.phase0 import spec
|
from eth2spec.phase0 import spec
|
||||||
|
from eth2spec.test.context import PHASE0
|
||||||
from eth2spec.utils.ssz.ssz_typing import Container
|
from eth2spec.utils.ssz.ssz_typing import Container
|
||||||
from eth2spec.utils.ssz.ssz_impl import (
|
from eth2spec.utils.ssz.ssz_impl import (
|
||||||
hash_tree_root,
|
hash_tree_root,
|
||||||
|
@ -44,7 +45,7 @@ def ssz_static_cases(seed: int, name, ssz_type, mode: random_value.Randomization
|
||||||
|
|
||||||
for i in range(count):
|
for i in range(count):
|
||||||
yield gen_typing.TestCase(
|
yield gen_typing.TestCase(
|
||||||
fork_name='phase0',
|
fork_name=PHASE0,
|
||||||
runner_name='ssz_static',
|
runner_name='ssz_static',
|
||||||
handler_name=name,
|
handler_name=name,
|
||||||
suite_name=f"ssz_{random_mode_name}{'_chaos' if chaos else ''}",
|
suite_name=f"ssz_{random_mode_name}{'_chaos' if chaos else ''}",
|
||||||
|
|
Loading…
Reference in New Issue