Merge pull request #987 from ethereum/dev

v0.6.0 Release
This commit is contained in:
Danny Ryan 2019-04-24 14:04:13 -06:00 committed by GitHub
commit 7a06df6766
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
107 changed files with 6850 additions and 3186 deletions

View File

@ -1,41 +1,97 @@
# Python CircleCI 2.0 configuration file
version: 2
jobs:
build:
docker:
- image: circleci/python:3.6
working_directory: ~/repo
version: 2.1
commands:
restore_cached_venv:
description: "Restores a cached venv"
parameters:
reqs_checksum:
type: string
default: "1234"
venv_name:
type: string
default: "default-name"
steps:
- checkout
# Download and cache dependencies
- restore_cache:
keys:
- v1-dependencies-{{ checksum "requirements.txt" }}
- << parameters.venv_name >>-venv-<< parameters.reqs_checksum >>
# fallback to using the latest cache if no exact match is found
- v1-dependencies-
- run:
name: install dependencies
command: |
python3 -m venv venv
. venv/bin/activate
pip install -r requirements.txt
- run:
name: build phase0 spec
command: make build/phase0
- << parameters.venv_name >>-venv-
save_cached_venv:
description: "Saves a venv into a cache"
parameters:
reqs_checksum:
type: string
default: "1234"
venv_path:
type: string
default: "venv"
venv_name:
type: string
default: "default-name"
steps:
- save_cache:
paths:
- ./venv
key: v1-dependencies-{{ checksum "requirements.txt" }}
key: << parameters.venv_name >>-venv-<< parameters.reqs_checksum >>
paths: << parameters.venv_path >>
jobs:
checkout_specs:
docker:
- image: circleci/python:3.6
working_directory: ~/specs-repo
steps:
# Restore git repo at point close to target branch/revision, to speed up checkout
- restore_cache:
keys:
- v1-specs-repo-{{ .Branch }}-{{ .Revision }}
- v1-specs-repo-{{ .Branch }}-
- v1-specs-repo-
- checkout
- run:
name: run tests
command: |
. venv/bin/activate
pytest tests
- store_artifacts:
path: test-reports
destination: test-reports
name: Clean up git repo to reduce cache size
command: git gc
# Save the git checkout as a cache, to make cloning next time faster.
- save_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
paths:
- ~/specs-repo
install_test:
docker:
- image: circleci/python:3.6
working_directory: ~/specs-repo
steps:
- restore_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_cached_venv:
venv_name: v1-test_libs
reqs_checksum: '{{ checksum "test_libs/pyspec/setup.py" }}'
- run:
name: Install pyspec requirements
command: make install_test
- save_cached_venv:
venv_name: v1-test_libs
reqs_checksum: '{{ checksum "test_libs/pyspec/setup.py" }}'
venv_path: ./test_libs/venv
test:
docker:
- image: circleci/python:3.6
working_directory: ~/specs-repo
steps:
- restore_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_cached_venv:
venv_name: v1-test_libs
reqs_checksum: '{{ checksum "test_libs/pyspec/setup.py" }}'
- run:
name: Run py-tests
command: make citest
- store_test_results:
path: test_libs/pyspec/test-reports
workflows:
version: 2.1
test_spec:
jobs:
- checkout_specs
- install_test:
requires:
- checkout_specs
- test:
requires:
- install_test

17
.gitignore vendored
View File

@ -1,7 +1,22 @@
*.pyc
/__pycache__
/venv
venv
.venvs
.venv
/.pytest_cache
*.egg
*.egg-info
eggs
.eggs
build/
output/
eth2.0-spec-tests/
.pytest_cache
# Dynamically built from Markdown spec
test_libs/pyspec/eth2spec/phase0/spec.py
# vscode
.vscode/**

View File

@ -1,29 +1,91 @@
SPEC_DIR = ./specs
SCRIPT_DIR = ./scripts
BUILD_DIR = ./build
UTILS_DIR = ./utils
TEST_LIBS_DIR = ./test_libs
PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec
CONFIG_HELPERS_DIR = $(TEST_LIBS_DIR)/config_helpers
YAML_TEST_DIR = ./eth2.0-spec-tests/tests
GENERATOR_DIR = ./test_generators
CONFIGS_DIR = ./configs
# Collect a list of generator names
GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/)))
# Map this list of generator paths to a list of test output paths
YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERATORS))
GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS))
PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py
PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS)
.PHONY: clean all test
all: $(BUILD_DIR)/phase0
.PHONY: clean all test citest gen_yaml_tests pyspec phase0 install_test
all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS)
clean:
rm -rf $(BUILD_DIR)
rm -rf $(YAML_TEST_DIR)
rm -rf $(GENERATOR_VENVS)
rm -rf $(TEST_LIBS_DIR)/venv
rm -rf $(PY_SPEC_DIR)/.pytest_cache
rm -rf $(PY_SPEC_ALL_TARGETS)
# "make gen_yaml_tests" to run generators
gen_yaml_tests: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_TARGETS)
# installs the packages to run pyspec tests
install_test:
cd $(TEST_LIBS_DIR); python3 -m venv venv; . venv/bin/activate; \
cd ..; cd $(CONFIG_HELPERS_DIR); pip3 install -e .; \
cd ../..; cd $(PY_SPEC_DIR); pip3 install -e .[dev];
test: $(PY_SPEC_ALL_TARGETS)
cd $(TEST_LIBS_DIR); . venv/bin/activate; \
cd ..; cd $(PY_SPEC_DIR); python -m pytest .;
citest: $(PY_SPEC_ALL_TARGETS)
cd $(TEST_LIBS_DIR); . venv/bin/activate; \
cd ..; cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; python -m pytest --junitxml=test-reports/eth2spec/test_results.xml .
# "make pyspec" to create the pyspec for all phases.
pyspec: $(PY_SPEC_ALL_TARGETS)
# "make phase0" to create pyspec for phase0
phase0: $(PY_SPEC_PHASE_0_TARGETS)
# runs a limited set of tests against a minimal config
# run pytest with `-m` option to full suite
test:
pytest -m minimal_config tests/
$(PY_SPEC_DIR)/eth2spec/phase0/spec.py:
python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@
$(BUILD_DIR)/phase0:
CURRENT_DIR = ${CURDIR}
# The function that builds a set of suite files, by calling a generator for the given type (param 1)
define build_yaml_tests
# Started!
# Create output directory
# Navigate to the generator
# Create a virtual environment, if it does not exist already
# Activate the venv, this is where dependencies are installed for the generator
# Install all the necessary requirements
# Run the generator. The generator is assumed to have an "main.py" file.
# We output to the tests dir (generator program should accept a "-o <filepath>" argument.
echo "generator $(1) started"; \
mkdir -p $(YAML_TEST_DIR)$(1); \
cd $(GENERATOR_DIR)$(1); \
if ! test -d venv; then python3 -m venv venv; fi; \
. venv/bin/activate; \
pip3 install -r requirements.txt; \
python3 main.py -o $(CURRENT_DIR)/$(YAML_TEST_DIR)$(1) -c $(CURRENT_DIR)/$(CONFIGS_DIR); \
echo "generator $(1) finished"
endef
# The tests dir itself is simply build by creating the directory (recursively creating deeper directories if necessary)
$(YAML_TEST_DIR):
$(info creating directory, to output yaml targets to: ${YAML_TEST_TARGETS})
mkdir -p $@
python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@/spec.py
mkdir -p $@/utils
cp $(UTILS_DIR)/phase0/* $@/utils
cp $(UTILS_DIR)/phase0/state_transition.py $@
touch $@/__init__.py $@/utils/__init__.py
$(YAML_TEST_DIR)/:
$(info ignoring duplicate yaml tests dir)
# For any target within the tests dir, build it using the build_yaml_tests function.
# (creation of output dir is a dependency)
$(YAML_TEST_DIR)%: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR)
$(call build_yaml_tests,$*)

View File

@ -2,26 +2,47 @@
[![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
To learn more about sharding and eth2.0/Serenity, see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQs) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
To learn more about sharding and eth2.0/Serenity, see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQ) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
This repo hosts the current eth2.0 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed upon changes to spec can be made through pull requests.
# Specs
## Specs
Core specifications for eth2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
* [Phase 0 -- The Beacon Chain](specs/core/0_beacon-chain.md)
* [Phase 1 -- Shard Data Chains](specs/core/1_shard-data-chains.md)
Accompanying documents can be found in [specs](specs) and include
* [SimpleSerialize (SSZ) spec](specs/simple-serialize.md)
* [BLS signature verification](specs/bls_signature.md)
* [General test format](specs/test-format.md)
### Phase 0
* [The Beacon Chain](specs/core/0_beacon-chain.md)
* [Fork Choice](specs/core/0_fork-choice.md)
* [Deposit Contract](specs/core/0_deposit-contract.md)
* [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md)
## Design goals
### Phase 1
* [Custody Game](specs/core/1_custody-game.md)
* [Shard Data Chains](specs/core/1_shard-data-chains.md)
### Accompanying documents can be found in [specs](specs) and include:
* [SimpleSerialize (SSZ) spec](specs/simple-serialize.md)
* [BLS signature verification](specs/bls_signature.md)
* [General test format](specs/test_formats/README.md)
* [Merkle proof formats](specs/light_client/merkle_proofs.md)
* [Light client syncing protocol](specs/light_client/sync_protocol.md)
### Design goals
The following are the broad design goals for Ethereum 2.0:
* to minimize complexity, even at the cost of some losses in efficiency
* to remain live through major network partitions and when very large portions of nodes go offline
* to select all components such that they are either quantum secure or can be easily swapped out for quantum secure counterparts when available
* to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time
* to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain)
## For spec contributors
Documentation on the different components used during spec writing can be found here:
* [YAML Test Generators](test_generators/README.md)
* [Executable Python Spec, with Py-tests](test_libs/pyspec/README.md)

View File

@ -0,0 +1,20 @@
# Constant Presets
This directory contains a set of constants presets used for testing, testnets, and mainnet.
A preset file contains all the constants known for its target.
Later-fork constants can be ignored, e.g. ignore phase1 constants as a client that only supports phase 0 currently.
## Format
Each preset is a key-value mapping.
**Key**: an `UPPER_SNAKE_CASE` (a.k.a. "macro case") formatted string, name of the constant.
**Value**: can be any of:
- an unsigned integer number, can be up to 64 bits (incl.)
- a hexadecimal string, prefixed with `0x`
Presets may contain comments to describe the values.
See `mainnet.yaml` for a complete example.

View File

@ -0,0 +1,123 @@
# Mainnet preset
# Note: the intention of this file (for now) is to illustrate what a mainnet configuration could look like.
# Some of these constants may still change before the launch of Phase 0.
# Misc
# ---------------------------------------------------------------
# 2**10 (= 1,024)
SHARD_COUNT: 1024
# 2**7 (= 128)
TARGET_COMMITTEE_SIZE: 128
# 2**12 (= 4,096)
MAX_INDICES_PER_ATTESTATION: 4096
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# See issue 563
SHUFFLE_ROUND_COUNT: 90
# Deposit contract
# ---------------------------------------------------------------
# **TBD**
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
# 2**5 (= 32)
DEPOSIT_CONTRACT_TREE_DEPTH: 32
# Gwei values
# ---------------------------------------------------------------
# 2**0 * 10**9 (= 1,000,000,000) Gwei
MIN_DEPOSIT_AMOUNT: 1000000000
# 2**5 * 10**9 (= 32,000,000,000) Gwei
MAX_EFFECTIVE_BALANCE: 32000000000
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**0 * 10**9 (= 1,000,000,000) Gwei
HIGH_BALANCE_INCREMENT: 1000000000
# Initial values
# ---------------------------------------------------------------
GENESIS_FORK_VERSION: 0x00000000
# 0, GENESIS_EPOCH is derived from this constant
GENESIS_SLOT: 0
# 2**64 - 1
FAR_FUTURE_EPOCH: 18446744073709551615
BLS_WITHDRAWAL_PREFIX_BYTE: 0x00
# Time parameters
# ---------------------------------------------------------------
# 6 seconds 6 seconds
SECONDS_PER_SLOT: 6
# 2**2 (= 4) slots 24 seconds
MIN_ATTESTATION_INCLUSION_DELAY: 4
# 2**6 (= 64) slots 6.4 minutes
SLOTS_PER_EPOCH: 64
# 2**0 (= 1) epochs 6.4 minutes
MIN_SEED_LOOKAHEAD: 1
# 2**2 (= 4) epochs 25.6 minutes
ACTIVATION_EXIT_DELAY: 4
# 2**10 (= 1,024) slots ~1.7 hours
SLOTS_PER_ETH1_VOTING_PERIOD: 1024
# 2**13 (= 8,192) slots ~13 hours
SLOTS_PER_HISTORICAL_ROOT: 8192
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**11 (= 2,048) epochs 9 days
PERSISTENT_COMMITTEE_PERIOD: 2048
# 2**6 (= 64) epochs ~7 hours
MAX_CROSSLINK_EPOCHS: 64
# State list lengths
# ---------------------------------------------------------------
# 2**13 (= 8,192) epochs ~36 days
LATEST_RANDAO_MIXES_LENGTH: 8192
# 2**13 (= 8,192) epochs ~36 days
LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 8192
# 2**13 (= 8,192) epochs ~36 days
LATEST_SLASHED_EXIT_LENGTH: 8192
# Reward and penalty quotients
# ---------------------------------------------------------------
# 2**5 (= 32)
BASE_REWARD_QUOTIENT: 32
# 2**9 (= 512)
WHISTLEBLOWING_REWARD_QUOTIENT: 512
# 2**3 (= 8)
PROPOSER_REWARD_QUOTIENT: 8
# 2**24 (= 16,777,216)
INACTIVITY_PENALTY_QUOTIENT: 16777216
# Max operations per block
# ---------------------------------------------------------------
# 2**5 (= 32)
MIN_PENALTY_QUOTIENT: 32
# 2**4 (= 16)
MAX_PROPOSER_SLASHINGS: 16
# 2**0 (= 1)
MAX_ATTESTER_SLASHINGS: 1
# 2**7 (= 128)
MAX_ATTESTATIONS: 128
# 2**4 (= 16)
MAX_DEPOSITS: 16
# 2**4 (= 16)
MAX_VOLUNTARY_EXITS: 16
# Originally 2**4 (= 16), disabled for now.
MAX_TRANSFERS: 0
# Signature domains
# ---------------------------------------------------------------
DOMAIN_BEACON_PROPOSER: 0
DOMAIN_RANDAO: 1
DOMAIN_ATTESTATION: 2
DOMAIN_DEPOSIT: 3
DOMAIN_VOLUNTARY_EXIT: 4
DOMAIN_TRANSFER: 5

View File

@ -0,0 +1,123 @@
# Minimal preset
# Misc
# ---------------------------------------------------------------
# [customized] Just 8 shards for testing purposes
SHARD_COUNT: 8
# [customized] unsecure, but fast
TARGET_COMMITTEE_SIZE: 4
# 2**12 (= 4,096)
MAX_INDICES_PER_ATTESTATION: 4096
# 2**2 (= 4)
MIN_PER_EPOCH_CHURN_LIMIT: 4
# 2**16 (= 65,536)
CHURN_LIMIT_QUOTIENT: 65536
# [customized] Faster, but unsecure.
SHUFFLE_ROUND_COUNT: 10
# Deposit contract
# ---------------------------------------------------------------
# **TBD**
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
# 2**5 (= 32)
DEPOSIT_CONTRACT_TREE_DEPTH: 32
# Gwei values
# ---------------------------------------------------------------
# 2**0 * 10**9 (= 1,000,000,000) Gwei
MIN_DEPOSIT_AMOUNT: 1000000000
# 2**5 * 10**9 (= 32,000,000,000) Gwei
MAX_EFFECTIVE_BALANCE: 32000000000
# 2**4 * 10**9 (= 16,000,000,000) Gwei
EJECTION_BALANCE: 16000000000
# 2**0 * 10**9 (= 1,000,000,000) Gwei
HIGH_BALANCE_INCREMENT: 1000000000
# Initial values
# ---------------------------------------------------------------
GENESIS_FORK_VERSION: 0x00000000
# 0, GENESIS_EPOCH is derived from this constant
GENESIS_SLOT: 0
# 2**64 - 1
FAR_FUTURE_EPOCH: 18446744073709551615
BLS_WITHDRAWAL_PREFIX_BYTE: 0x00
# Time parameters
# ---------------------------------------------------------------
# 6 seconds 6 seconds
SECONDS_PER_SLOT: 6
# [customized] 2 slots
MIN_ATTESTATION_INCLUSION_DELAY: 2
# [customized] fast epochs
SLOTS_PER_EPOCH: 8
# 2**0 (= 1) epochs 6.4 minutes
MIN_SEED_LOOKAHEAD: 1
# 2**2 (= 4) epochs 25.6 minutes
ACTIVATION_EXIT_DELAY: 4
# [customized] higher frequency new deposits from eth1 for testing
SLOTS_PER_ETH1_VOTING_PERIOD: 16
# [customized] smaller state
SLOTS_PER_HISTORICAL_ROOT: 64
# 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**11 (= 2,048) epochs 9 days
PERSISTENT_COMMITTEE_PERIOD: 2048
# 2**6 (= 64) epochs ~7 hours
MAX_CROSSLINK_EPOCHS: 64
# State list lengths
# ---------------------------------------------------------------
# [customized] smaller state
LATEST_RANDAO_MIXES_LENGTH: 64
# [customized] smaller state
LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 64
# [customized] smaller state
LATEST_SLASHED_EXIT_LENGTH: 64
# Reward and penalty quotients
# ---------------------------------------------------------------
# 2**5 (= 32)
BASE_REWARD_QUOTIENT: 32
# 2**9 (= 512)
WHISTLEBLOWING_REWARD_QUOTIENT: 512
# 2**3 (= 8)
PROPOSER_REWARD_QUOTIENT: 8
# 2**24 (= 16,777,216)
INACTIVITY_PENALTY_QUOTIENT: 16777216
# Max operations per block
# ---------------------------------------------------------------
# 2**5 (= 32)
MIN_PENALTY_QUOTIENT: 32
# 2**4 (= 16)
MAX_PROPOSER_SLASHINGS: 16
# 2**0 (= 1)
MAX_ATTESTER_SLASHINGS: 1
# 2**7 (= 128)
MAX_ATTESTATIONS: 128
# 2**4 (= 16)
MAX_DEPOSITS: 16
# 2**4 (= 16)
MAX_VOLUNTARY_EXITS: 16
# Originally 2**4 (= 16), disabled for now.
MAX_TRANSFERS: 0
# Signature domains
# ---------------------------------------------------------------
DOMAIN_BEACON_PROPOSER: 0
DOMAIN_RANDAO: 1
DOMAIN_ATTESTATION: 2
DOMAIN_DEPOSIT: 3
DOMAIN_VOLUNTARY_EXIT: 4
DOMAIN_TRANSFER: 5

View File

@ -0,0 +1,18 @@
# Fork timelines
This directory contains a set of fork timelines used for testing, testnets, and mainnet.
A timeline file contains all the forks known for its target.
Later forks can be ignored, e.g. ignore fork `phase1` as a client that only supports phase 0 currently.
## Format
Each preset is a key-value mapping.
**Key**: an `lower_snake_case` (a.k.a. "python case") formatted string, name of the fork.
**Value**: an unsigned integer number, epoch number of activation of the fork
Timelines may contain comments to describe the values.
See `mainnet.yaml` for a complete example.

View File

@ -0,0 +1,12 @@
# Mainnet fork timeline
# Equal to GENESIS_EPOCH
phase0: 67108864
# Example 1:
# phase0_funny_fork_name: 67116000
# Example 2:
# Should be equal to PHASE_1_GENESIS_EPOCH
# (placeholder in example value here)
# phase1: 67163000

View File

@ -0,0 +1,6 @@
# Testing fork timeline
# Equal to GENESIS_EPOCH
phase0: 536870912
# No other forks considered in testing yet (to be implemented)

View File

@ -1,6 +0,0 @@
eth-utils>=1.3.0,<2
eth-typing>=2.1.0,<3.0.0
oyaml==0.7
pycryptodome==3.7.3
py_ecc>=1.6.0
pytest>=3.6,<3.7

View File

@ -2,24 +2,32 @@ import sys
import function_puller
def build_spec(sourcefile, outfile):
def build_phase0_spec(sourcefile, outfile):
code_lines = []
code_lines.append("from build.phase0.utils.minimal_ssz import *")
code_lines.append("from build.phase0.utils.bls_stub import *")
for i in (1, 2, 3, 4, 8, 32, 48, 96):
code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i))
code_lines.append("SLOTS_PER_EPOCH = 64") # stub, will get overwritten by real var
code_lines.append("def slot_to_epoch(x): return x // SLOTS_PER_EPOCH")
code_lines.append("""
from typing import (
Any,
Callable,
Dict,
List,
NewType,
Tuple,
)
from eth2spec.utils.minimal_ssz import *
from eth2spec.utils.bls_stub import *
""")
for i in (1, 2, 3, 4, 8, 32, 48, 96):
code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i))
code_lines.append("""
# stub, will get overwritten by real var
SLOTS_PER_EPOCH = 64
def slot_to_epoch(x): return x // SLOTS_PER_EPOCH
Slot = NewType('Slot', int) # uint64
@ -34,31 +42,36 @@ Any = None
Store = None
""")
code_lines += function_puller.get_lines(sourcefile)
code_lines += function_puller.get_spec(sourcefile)
code_lines.append("""
# Monkey patch validator shuffling cache
_get_shuffling = get_shuffling
shuffling_cache = {}
def get_shuffling(seed: Bytes32,
validators: List[Validator],
epoch: Epoch) -> List[List[ValidatorIndex]]:
# Monkey patch validator get committee code
_compute_committee = compute_committee
committee_cache = {}
param_hash = (seed, hash_tree_root(validators, [Validator]), epoch)
if param_hash in shuffling_cache:
def compute_committee(validator_indices: List[ValidatorIndex],
seed: Bytes32,
index: int,
total_committees: int) -> List[ValidatorIndex]:
param_hash = (hash_tree_root(validator_indices), seed, index, total_committees)
if param_hash in committee_cache:
# print("Cache hit, epoch={0}".format(epoch))
return shuffling_cache[param_hash]
return committee_cache[param_hash]
else:
# print("Cache miss, epoch={0}".format(epoch))
ret = _get_shuffling(seed, validators, epoch)
shuffling_cache[param_hash] = ret
ret = _compute_committee(validator_indices, seed, index, total_committees)
committee_cache[param_hash] = ret
return ret
# Monkey patch hash cache
_hash = hash
hash_cache = {}
def hash(x):
if x in hash_cache:
return hash_cache[x]
@ -66,7 +79,19 @@ def hash(x):
ret = _hash(x)
hash_cache[x] = ret
return ret
""")
# Access to overwrite spec constants based on configuration
def apply_constants_preset(preset: Dict[str, Any]):
global_vars = globals()
for k, v in preset.items():
global_vars[k] = v
# Deal with derived constants
global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)
# Initialize SSZ types again, to account for changed lengths
init_SSZ_types()
""")
with open(outfile, 'w') as out:
out.write("\n".join(code_lines))
@ -74,5 +99,6 @@ def hash(x):
if __name__ == '__main__':
if len(sys.argv) < 3:
print("Error: spec source and outfile must defined")
build_spec(sys.argv[1], sys.argv[2])
print("Usage: <source phase0> <output phase0 pyspec>")
build_phase0_spec(sys.argv[1], sys.argv[2])

View File

@ -1,11 +1,13 @@
import sys
from typing import List
def get_lines(file_name):
def get_spec(file_name: str) -> List[str]:
code_lines = []
pulling_from = None
current_name = None
processing_typedef = False
current_typedef = None
type_defs = []
for linenum, line in enumerate(open(sys.argv[1]).readlines()):
line = line.rstrip()
if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':
@ -17,17 +19,26 @@ def get_lines(file_name):
if pulling_from is None:
pulling_from = linenum
else:
if processing_typedef:
if current_typedef is not None:
assert code_lines[-1] == '}'
code_lines[-1] = '})'
current_typedef[-1] = '})'
type_defs.append((current_name, current_typedef))
pulling_from = None
processing_typedef = False
current_typedef = None
else:
if pulling_from == linenum and line == '{':
code_lines.append('%s = SSZType({' % current_name)
processing_typedef = True
current_typedef = ['global_vars["%s"] = SSZType({' % current_name]
elif pulling_from is not None:
# Add some whitespace between functions
if line[:3] == 'def':
code_lines.append('')
code_lines.append('')
code_lines.append(line)
# Remember type def lines
if current_typedef is not None:
current_typedef.append(line)
elif pulling_from is None and len(line) > 0 and line[0] == '|':
row = line[1:].split('|')
if len(row) >= 2:
@ -42,5 +53,18 @@ def get_lines(file_name):
if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
eligible = False
if eligible:
code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123567890123456789012357890')))
code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890')))
# Build type-def re-initialization
code_lines.append('')
code_lines.append('def init_SSZ_types():')
code_lines.append(' global_vars = globals()')
for ssz_type_name, ssz_type in type_defs:
code_lines.append('')
for type_line in ssz_type:
code_lines.append(' ' + type_line)
code_lines.append('')
code_lines.append('ssz_types = [' + ', '.join([f'\'{ssz_type_name}\'' for (ssz_type_name, _) in type_defs]) + ']')
code_lines.append('')
code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType: return globals()[name]')
code_lines.append('')
return code_lines

View File

@ -86,9 +86,9 @@ def hash_to_G2(message_hash: Bytes32, domain: uint64) -> [uint384]:
### `modular_squareroot`
`modular_squareroot(x)` returns a solution `y` to `y**2 % q == x`, and `None` if none exists. If there are two solutions the one with higher imaginary component is favored; if both solutions have equal imaginary component the one with higher real component is favored (note that this is equivalent to saying that the single solution with either imaginary component > p/2 or imaginary component zero and real component > p/2 is favored).
`modular_squareroot(x)` returns a solution `y` to `y**2 % q == x`, and `None` if none exists. If there are two solutions, the one with higher imaginary component is favored; if both solutions have equal imaginary component, the one with higher real component is favored (note that this is equivalent to saying that the single solution with either imaginary component > p/2 or imaginary component zero and real component > p/2 is favored).
The following is a sample implementation; implementers are free to implement modular square roots as they wish. Note that `x2 = -x1` is an _additive modular inverse_ so real and imaginary coefficients remain in `[0 .. q-1]`. `coerce_to_int(element: Fq) -> int` is a function that takes Fq element `element` (ie. integers `mod q`) and converts it to a regular integer.
The following is a sample implementation; implementers are free to implement modular square roots as they wish. Note that `x2 = -x1` is an _additive modular inverse_ so real and imaginary coefficients remain in `[0 .. q-1]`. `coerce_to_int(element: Fq) -> int` is a function that takes Fq element `element` (i.e. integers `mod q`) and converts it to a regular integer.
```python
Fq2_order = q ** 2 - 1
@ -110,11 +110,11 @@ def modular_squareroot(value: Fq2) -> Fq2:
### `bls_aggregate_pubkeys`
Let `bls_aggregate_pubkeys(pubkeys: List[Bytes48]) -> Bytes48` return `pubkeys[0] + .... + pubkeys[len(pubkeys)-1]`, where `+` is the elliptic curve addition operation over the G1 curve.
Let `bls_aggregate_pubkeys(pubkeys: List[Bytes48]) -> Bytes48` return `pubkeys[0] + .... + pubkeys[len(pubkeys)-1]`, where `+` is the elliptic curve addition operation over the G1 curve. (When `len(pubkeys) == 0` the empty sum is the G1 point at infinity.)
### `bls_aggregate_signatures`
Let `bls_aggregate_signatures(signatures: List[Bytes96]) -> Bytes96` return `signatures[0] + .... + signatures[len(signatures)-1]`, where `+` is the elliptic curve addition operation over the G2 curve.
Let `bls_aggregate_signatures(signatures: List[Bytes96]) -> Bytes96` return `signatures[0] + .... + signatures[len(signatures)-1]`, where `+` is the elliptic curve addition operation over the G2 curve. (When `len(signatures) == 0` the empty sum is the G2 point at infinity.)
## Signature verification

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
# Ethereum 2.0 Phase 0 -- Deposit Contract
**NOTICE**: This document is a work in progress for researchers and implementers.
## Table of contents
<!-- TOC -->
- [Ethereum 2.0 Phase 0 -- Deposit Contract](#ethereum-20-phase-0----deposit-contract)
- [Table of contents](#table-of-contents)
- [Introduction](#introduction)
- [Constants](#constants)
- [Deposit contract](#time-parameters)
- [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract)
- [Deposit arguments](#deposit-arguments)
- [Withdrawal credentials](#withdrawal-credentials)
- [`Deposit` logs](#deposit-logs)
- [`Eth2Genesis` log](#eth2genesis-log)
- [Vyper code](#vyper-code)
<!-- /TOC -->
## Introduction
This document represents is the specification for the beacon chain deposit contract, part of Ethereum 2.0 phase 0.
## Constants
### Deposit contract
| Name | Value |
| - | - |
| `DEPOSIT_CONTRACT_ADDRESS` | **TBD** |
| `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) |
## Ethereum 1.0 deposit contract
The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in phase 2, i.e. when the EVM2.0 is deployed and the shards have state.
### Deposit arguments
The deposit contract has a single `deposit` function which takes as argument a SimpleSerialize'd `DepositData`.
### Withdrawal credentials
One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawals to shards. The first byte of `withdrawal_credentials` is a version number. As of now the only expected format is as follows:
* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX_BYTE`
* `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]` where `withdrawal_pubkey` is a BLS pubkey
The private key corresponding to `withdrawal_pubkey` will be required to initiate a withdrawal. It can be stored separately until a withdrawal is required, e.g. in cold storage.
### `Deposit` logs
Every Ethereum 1.0 deposit, of size between `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, emits a `Deposit` log for consumption by the beacon chain. The deposit contract does little validation, pushing most of the validator onboarding logic to the beacon chain. In particular, the proof of possession (a BLS12 signature) is not verified by the deposit contract.
### `Eth2Genesis` log
When a sufficient amount of full deposits have been made, the deposit contract emits the `Eth2Genesis` log. The beacon chain state may then be initialized by calling the `get_genesis_beacon_state` function (defined below) where:
* `genesis_time` equals `time` in the `Eth2Genesis` log
* `latest_eth1_data.deposit_root` equals `deposit_root` in the `Eth2Genesis` log
* `latest_eth1_data.deposit_count` equals `deposit_count` in the `Eth2Genesis` log
* `latest_eth1_data.block_hash` equals the hash of the block that included the log
* `genesis_validator_deposits` is a list of `Deposit` objects built according to the `Deposit` logs up to the deposit that triggered the `Eth2Genesis` log, processed in the order in which they were emitted (oldest to newest)
### Vyper code
The source for the Vyper contract lives in a [separate repository](https://github.com/ethereum/deposit_contract) at [https://github.com/ethereum/deposit_contract/blob/master/deposit_contract/contracts/validator_registration.v.py](https://github.com/ethereum/deposit_contract/blob/master/deposit_contract/contracts/validator_registration.v.py).
Note: to save ~10x on gas this contract uses a somewhat unintuitive progressive Merkle root calculation algo that requires only O(log(n)) storage. See https://github.com/ethereum/research/blob/master/beacon_chain_impl/progressive_merkle_tree.py for an implementation of the same algo in python tested for correctness.
For convenience, we provide the interface to the contract here:
* `__init__()`: initializes the contract
* `get_deposit_root() -> bytes32`: returns the current root of the deposit tree
* `deposit(bytes[512])`: adds a deposit instance to the deposit tree, incorporating the input argument and the value transferred in the given call. Note: the amount of value transferred *must* be within `MIN_DEPOSIT_AMOUNT` and `MAX_DEPOSIT_AMOUNT`, inclusive. Each of these constants are specified in units of Gwei.

101
specs/core/0_fork-choice.md Normal file
View File

@ -0,0 +1,101 @@
# Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice
**NOTICE**: This document is a work in progress for researchers and implementers.
## Table of contents
<!-- TOC -->
- [Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice](#ethereum-20-phase-0----beacon-chain-fork-choice)
- [Table of contents](#table-of-contents)
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Constants](#constants)
- [Time parameters](#time-parameters)
- [Beacon chain processing](#beacon-chain-processing)
- [Beacon chain fork choice rule](#beacon-chain-fork-choice-rule)
<!-- /TOC -->
## Introduction
This document represents is the specification for the beacon chain fork choice rule, part of Ethereum 2.0 phase 0.
## Prerequisites
All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](./0_beacon-chain.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout.
## Constants
### Time parameters
| Name | Value | Unit | Duration |
| - | - | :-: | :-: |
| `SECONDS_PER_SLOT` | `6` | seconds | 6 seconds |
## Beacon chain processing
Processing the beacon chain is similar to processing the Ethereum 1.0 chain. Clients download and process blocks and maintain a view of what is the current "canonical chain", terminating at the current "head". For a beacon block, `block`, to be processed by a node, the following conditions must be met:
* The parent block with root `block.previous_block_root` has been processed and accepted.
* An Ethereum 1.0 block pointed to by the `state.latest_eth1_data.block_hash` has been processed and accepted.
* The node's Unix time is greater than or equal to `state.genesis_time + block.slot * SECONDS_PER_SLOT`.
Note: Leap seconds mean that slots will occasionally last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds, possibly several times a year.
Note: Nodes needs to have a clock that is roughly (i.e. within `SECONDS_PER_SLOT` seconds) synchronized with the other nodes.
### Beacon chain fork choice rule
The beacon chain fork choice rule is a hybrid that combines justification and finality with Latest Message Driven (LMD) Greediest Heaviest Observed SubTree (GHOST). At any point in time a [validator](#dfn-validator) `v` subjectively calculates the beacon chain head as follows.
* Abstractly define `Store` as the type of storage object for the chain data and `store` be the set of attestations and blocks that the [validator](#dfn-validator) `v` has observed and verified (in particular, block ancestors must be recursively verified). Attestations not yet included in any chain are still included in `store`.
* Let `finalized_head` be the finalized block with the highest epoch. (A block `B` is finalized if there is a descendant of `B` in `store` the processing of which sets `B` as finalized.)
* Let `justified_head` be the descendant of `finalized_head` with the highest epoch that has been justified for at least 1 epoch. (A block `B` is justified if there is a descendant of `B` in `store` the processing of which sets `B` as justified.) If no such descendant exists set `justified_head` to `finalized_head`.
* Let `get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock` be the ancestor of `block` with slot number `slot`. The `get_ancestor` function can be defined recursively as:
```python
def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock:
"""
Get the ancestor of ``block`` with slot number ``slot``; return ``None`` if not found.
"""
if block.slot == slot:
return block
elif block.slot < slot:
return None
else:
return get_ancestor(store, store.get_parent(block), slot)
```
* Let `get_latest_attestation(store: Store, index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `index`. If several such attestations exist, use the one the [validator](#dfn-validator) `v` observed first.
* Let `get_latest_attestation_target(store: Store, index: ValidatorIndex) -> BeaconBlock` be the target block in the attestation `get_latest_attestation(store, index)`.
* Let `get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]` returns the child blocks of the given `block`.
* Let `justified_head_state` be the resulting `BeaconState` object from processing the chain up to the `justified_head`.
* The `head` is `lmd_ghost(store, justified_head_state, justified_head)` where the function `lmd_ghost` is defined below. Note that the implementation below is suboptimal; there are implementations that compute the head in time logarithmic in slot count.
```python
def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) -> BeaconBlock:
"""
Execute the LMD-GHOST algorithm to find the head ``BeaconBlock``.
"""
validators = start_state.validator_registry
active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot))
attestation_targets = [(i, get_latest_attestation_target(store, i)) for i in active_validator_indices]
# Use the rounded-balance-with-hysteresis supplied by the protocol for fork
# choice voting. This reduces the number of recomputations that need to be
# made for optimized implementations that precompute and save data
def get_vote_count(block: BeaconBlock) -> int:
return sum(
start_state.validator_registry[validator_index].high_balance
for validator_index, target in attestation_targets
if get_ancestor(store, target, block.slot) == block
)
head = start_block
while 1:
children = get_children(store, head)
if len(children) == 0:
return head
# Ties broken by favoring block with lexicographically higher root
head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x)))
```

View File

@ -0,0 +1,529 @@
# Ethereum 2.0 Phase 1 -- Custody Game
**NOTICE**: This spec is a work-in-progress for researchers and implementers.
## Table of contents
<!-- TOC -->
- [Ethereum 2.0 Phase 1 -- Custody Game](#ethereum-20-phase-1----custody-game)
- [Table of contents](#table-of-contents)
- [Introduction](#introduction)
- [Terminology](#terminology)
- [Constants](#constants)
- [Misc](#misc)
- [Time parameters](#time-parameters)
- [Max operations per block](#max-operations-per-block)
- [Signature domains](#signature-domains)
- [Data structures](#data-structures)
- [Custody objects](#custody-objects)
- [`CustodyChunkChallenge`](#custodychunkchallenge)
- [`CustodyBitChallenge`](#custodybitchallenge)
- [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord)
- [`CustodyBitChallengeRecord`](#custodybitchallengerecord)
- [`CustodyResponse`](#custodyresponse)
- [`CustodyKeyReveal`](#custodykeyreveal)
- [Phase 0 container updates](#phase-0-container-updates)
- [`Validator`](#validator)
- [`BeaconState`](#beaconstate)
- [`BeaconBlockBody`](#beaconblockbody)
- [Helpers](#helpers)
- [`typeof`](#typeof)
- [`empty`](#empty)
- [`get_crosslink_chunk_count`](#get_crosslink_chunk_count)
- [`get_custody_chunk_bit`](#get_custody_chunk_bit)
- [`epoch_to_custody_period`](#epoch_to_custody_period)
- [`replace_empty_or_append`](#replace_empty_or_append)
- [`verify_custody_key`](#verify_custody_key)
- [Per-block processing](#per-block-processing)
- [Operations](#operations)
- [Custody reveals](#custody-reveals)
- [Chunk challenges](#chunk-challenges)
- [Bit challenges](#bit-challenges)
- [Custody responses](#custody-responses)
- [Per-epoch processing](#per-epoch-processing)
<!-- /TOC -->
## Introduction
This document details the beacon chain additions and changes in Phase 1 of Ethereum 2.0 to support the shard data custody game, building upon the [phase 0](0_beacon-chain.md) specification.
## Terminology
* **Custody game**:
* **Custody period**:
* **Custody chunk**:
* **Custody chunk bit**:
* **Custody chunk challenge**:
* **Custody bit**:
* **Custody bit challenge**:
* **Custody key**:
* **Custody key reveal**:
* **Custody key mask**:
* **Custody response**:
* **Custody response deadline**:
## Constants
### Misc
| Name | Value |
| - | - |
| `BYTES_PER_SHARD_BLOCK` | `2**14` (= 16,384) |
| `BYTES_PER_CUSTODY_CHUNK` | `2**9` (= 512) |
| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) |
### Time parameters
| Name | Value | Unit | Duration |
| - | - | :-: | :-: |
| `MAX_CHUNK_CHALLENGE_DELAY` | `2**11` (= 2,048) | epochs | ~9 days |
| `EPOCHS_PER_CUSTODY_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days |
| `CUSTODY_RESPONSE_DEADLINE` | `2**14` (= 16,384) | epochs | ~73 days |
### Max operations per block
| Name | Value |
| - | - |
| `MAX_CUSTODY_KEY_REVEALS` | `2**4` (= 16) |
| `MAX_CUSTODY_CHUNK_CHALLENGES` | `2**2` (= 4) |
| `MAX_CUSTODY_BIT_CHALLENGES` | `2**2` (= 4) |
| `MAX_CUSTODY_RESPONSES` | `2**5` (= 32) |
### Signature domains
| Name | Value |
| - | - |
| `DOMAIN_CUSTODY_KEY_REVEAL` | `6` |
| `DOMAIN_CUSTODY_BIT_CHALLENGE` | `7` |
## Data structures
### Custody objects
#### `CustodyChunkChallenge`
```python
{
'responder_index': ValidatorIndex,
'attestation': Attestation,
'chunk_index': 'uint64',
}
```
#### `CustodyBitChallenge`
```python
{
'responder_index': ValidatorIndex,
'attestation': Attestation,
'challenger_index': ValidatorIndex,
'responder_key': BLSSignature,
'chunk_bits': Bitfield,
'signature': BLSSignature,
}
```
#### `CustodyChunkChallengeRecord`
```python
{
'challenge_index': 'uint64',
'challenger_index': ValidatorIndex,
'responder_index': ValidatorIndex,
'deadline': Epoch,
'crosslink_data_root': Hash,
'depth': 'uint64',
'chunk_index': 'uint64',
}
```
#### `CustodyBitChallengeRecord`
```python
{
'challenge_index': 'uint64',
'challenger_index': ValidatorIndex,
'responder_index': ValidatorIndex,
'deadline': Epoch,
'crosslink_data_root': Hash,
'chunk_bits': Bitfield,
'responder_key': BLSSignature,
}
```
#### `CustodyResponse`
```python
{
'challenge_index': 'uint64',
'chunk_index': 'uint64',
'chunk': ['byte', BYTES_PER_CUSTODY_CHUNK],
'branch': [Hash],
}
```
#### `CustodyKeyReveal`
```python
{
'revealer_index': ValidatorIndex,
'period': 'uint64',
'key': BLSSignature,
'masker_index': ValidatorIndex,
'mask': Hash,
}
```
### Phase 0 container updates
Add the following fields to the end of the specified container objects. Fields with underlying type `uint64` are initialized to `0` and list fields are initialized to `[]`.
#### `Validator`
```python
'custody_reveal_index': 'uint64',
'max_reveal_lateness': 'uint64',
```
#### `BeaconState`
```python
'custody_chunk_challenge_records': [CustodyChunkChallengeRecord],
'custody_bit_challenge_records': [CustodyBitChallengeRecord],
'custody_challenge_index': 'uint64',
```
#### `BeaconBlockBody`
```python
'custody_key_reveals': [CustodyKeyReveal],
'custody_chunk_challenges': [CustodyChunkChallenge],
'custody_bit_challenges': [CustodyBitChallenge],
'custody_responses': [CustodyResponse],
```
## Helpers
### `typeof`
The `typeof` function accepts and SSZ object as a single input and returns the corresponding SSZ type.
### `empty`
The `empty` function accepts and SSZ type as input and returns an object of that type with all fields initialized to default values.
### `get_crosslink_chunk_count`
```python
def get_custody_chunk_count(attestation: Attestation) -> int:
crosslink_start_epoch = attestation.data.latest_crosslink.epoch
crosslink_end_epoch = slot_to_epoch(attestation.data.slot)
crosslink_crosslink_length = min(MAX_CROSSLINK_EPOCHS, end_epoch - start_epoch)
chunks_per_epoch = 2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK
return crosslink_crosslink_length * chunks_per_epoch
```
### `get_custody_chunk_bit`
```python
def get_custody_chunk_bit(key: BLSSignature, chunk: bytes) -> bool:
# TODO: Replace with something MPC-friendly, e.g. the Legendre symbol
return get_bitfield_bit(hash(challenge.responder_key + chunk), 0)
```
### `epoch_to_custody_period`
```python
def epoch_to_custody_period(epoch: Epoch) -> int:
return epoch // EPOCHS_PER_CUSTODY_PERIOD
```
### `replace_empty_or_append`
```python
def replace_empty_or_append(list: List[Any], new_element: Any) -> int:
for i in range(len(list)):
if list[i] == empty(typeof(new_element)):
list[i] = new_element
return i
list.append(new_element)
return len(list) - 1
```
### `verify_custody_key`
```python
def verify_custody_key(state: BeaconState, reveal: CustodyKeyReveal) -> bool:
# Case 1: non-masked non-punitive non-early reveal
pubkeys = [state.validator_registry[reveal.revealer_index].pubkey]
message_hashes = [hash_tree_root(reveal.period)]
# Case 2: masked punitive early reveal
# Masking prevents proposer stealing the whistleblower reward
# Secure under the aggregate extraction infeasibility assumption
# See pages 11-12 of https://crypto.stanford.edu/~dabo/pubs/papers/aggreg.pdf
if reveal.mask != ZERO_HASH:
pubkeys.append(state.validator_registry[reveal.masker_index].pubkey)
message_hashes.append(reveal.mask)
return bls_verify_multiple(
pubkeys=pubkeys,
message_hashes=message_hashes,
signature=reveal.key,
domain=get_domain(
fork=state.fork,
epoch=reveal.period * EPOCHS_PER_CUSTODY_PERIOD,
domain_type=DOMAIN_CUSTODY_KEY_REVEAL,
),
)
```
## Per-block processing
### Operations
Add the following operations to the per-block processing, in order the given below and after all other operations in phase 0.
#### Custody reveals
Verify that `len(block.body.custody_key_reveals) <= MAX_CUSTODY_KEY_REVEALS`.
For each `reveal` in `block.body.custody_key_reveals`, run the following function:
```python
def process_custody_reveal(state: BeaconState,
reveal: CustodyKeyReveal) -> None:
assert verify_custody_key(state, reveal)
revealer = state.validator_registry[reveal.revealer_index]
current_custody_period = epoch_to_custody_period(get_current_epoch(state))
# Case 1: non-masked non-punitive non-early reveal
if reveal.mask == ZERO_HASH:
assert reveal.period == epoch_to_custody_period(revealer.activation_epoch) + revealer.custody_reveal_index
# Revealer is active or exited
assert is_active_validator(revealer, get_current_epoch(state)) or revealer.exit_epoch > get_current_epoch(state)
revealer.custody_reveal_index += 1
revealer.max_reveal_lateness = max(revealer.max_reveal_lateness, current_custody_period - reveal.period)
proposer_index = get_beacon_proposer_index(state)
increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT)
# Case 2: masked punitive early reveal
else:
assert reveal.period > current_custody_period
assert revealer.slashed is False
slash_validator(state, reveal.revealer_index, reveal.masker_index)
```
#### Chunk challenges
Verify that `len(block.body.custody_chunk_challenges) <= MAX_CUSTODY_CHUNK_CHALLENGES`.
For each `challenge` in `block.body.custody_chunk_challenges`, run the following function:
```python
def process_chunk_challenge(state: BeaconState,
challenge: CustodyChunkChallenge) -> None:
# Verify the attestation
assert verify_standalone_attestation(state, convert_to_standalone(state, challenge.attestation))
# Verify it is not too late to challenge
assert slot_to_epoch(challenge.attestation.data.slot) >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY
responder = state.validator_registry[challenge.responder_index]
assert responder.exit_epoch >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY
# Verify the responder participated in the attestation
attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)
assert challenge.responder_index in attesters
# Verify the challenge is not a duplicate
for record in state.custody_chunk_challenge_records:
assert (
record.crosslink_data_root != challenge.attestation.data.crosslink_data_root or
record.chunk_index != challenge.chunk_index
)
# Verify depth
depth = math.log2(next_power_of_two(get_custody_chunk_count(challenge.attestation)))
assert challenge.chunk_index < 2**depth
# Add new chunk challenge record
new_record = CustodyChunkChallengeRecord(
challenge_index=state.custody_challenge_index,
challenger_index=get_beacon_proposer_index(state),
responder_index=challenge.responder_index
deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE,
crosslink_data_root=challenge.attestation.data.crosslink_data_root,
depth=depth,
chunk_index=challenge.chunk_index,
)
replace_empty_or_append(state.custody_chunk_challenge_records, new_record)
state.custody_challenge_index += 1
# Postpone responder withdrawability
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
```
#### Bit challenges
Verify that `len(block.body.custody_bit_challenges) <= MAX_CUSTODY_BIT_CHALLENGES`.
For each `challenge` in `block.body.custody_bit_challenges`, run the following function:
```python
def process_bit_challenge(state: BeaconState,
challenge: CustodyBitChallenge) -> None:
# Verify challenge signature
challenger = state.validator_registry[challenge.challenger_index]
assert bls_verify(
pubkey=challenger.pubkey,
message_hash=signing_root(challenge),
signature=challenge.signature,
domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_BIT_CHALLENGE),
)
# Verify the challenger is not slashed
assert challenger.slashed is False
# Verify the attestation
assert verify_standalone_attestation(state, convert_to_standalone(state, challenge.attestation))
# Verify the attestation is eligible for challenging
responder = state.validator_registry[challenge.responder_index]
min_challengeable_epoch = responder.exit_epoch - EPOCHS_PER_CUSTODY_PERIOD * (1 + responder.max_reveal_lateness)
assert min_challengeable_epoch <= slot_to_epoch(challenge.attestation.data.slot)
# Verify the responder participated in the attestation
attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)
assert challenge.responder_index in attesters
# A validator can be the challenger or responder for at most one challenge at a time
for record in state.custody_bit_challenge_records:
assert record.challenger_index != challenge.challenger_index
assert record.responder_index != challenge.responder_index
# Verify the responder key
assert verify_custody_key(state, CustodyKeyReveal(
revealer_index=challenge.responder_index,
period=epoch_to_custody_period(slot_to_epoch(attestation.data.slot)),
key=challenge.responder_key,
masker_index=0,
mask=ZERO_HASH,
))
# Verify the chunk count
chunk_count = get_custody_chunk_count(challenge.attestation)
assert verify_bitfield(challenge.chunk_bits, chunk_count)
# Verify the xor of the chunk bits does not equal the custody bit
chunk_bits_xor = 0b0
for i in range(chunk_count):
chunk_bits_xor ^ get_bitfield_bit(challenge.chunk_bits, i)
custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(responder_index))
assert custody_bit != chunk_bits_xor
# Add new bit challenge record
new_record = CustodyBitChallengeRecord(
challenge_index=state.custody_challenge_index,
challenger_index=challenge.challenger_index,
responder_index=challenge.responder_index,
deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE
crosslink_data_root=challenge.attestation.crosslink_data_root,
chunk_bits=challenge.chunk_bits,
responder_key=challenge.responder_key,
)
replace_empty_or_append(state.custody_bit_challenge_records, new_record)
state.custody_challenge_index += 1
# Postpone responder withdrawability
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
```
#### Custody responses
Verify that `len(block.body.custody_responses) <= MAX_CUSTODY_RESPONSES`.
For each `response` in `block.body.custody_responses`, run the following function:
```python
def process_custody_response(state: BeaconState,
response: CustodyResponse) -> None:
chunk_challenge = next(record for record in state.custody_chunk_challenge_records if record.challenge_index == response.challenge_index, None)
if chunk_challenge is not None:
return process_chunk_challenge_response(state, response, chunk_challenge)
bit_challenge = next(record for record in state.custody_bit_challenge_records if record.challenge_index == response.challenge_index, None)
if bit_challenge is not None:
return process_bit_challenge_response(state, response, bit_challenge)
assert False
```
```python
def process_chunk_challenge_response(state: BeaconState,
response: CustodyResponse,
challenge: CustodyChunkChallengeRecord) -> None:
# Verify chunk index
assert response.chunk_index == challenge.chunk_index
# Verify the chunk matches the crosslink data root
assert verify_merkle_branch(
leaf=hash_tree_root(response.chunk),
branch=response.branch,
depth=challenge.depth,
index=response.chunk_index,
root=challenge.crosslink_data_root,
)
# Clear the challenge
records = state.custody_chunk_challenge_records
records[records.index(challenge)] = CustodyChunkChallengeRecord()
# Reward the proposer
proposer_index = get_beacon_proposer_index(state)
increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT)
```
```python
def process_bit_challenge_response(state: BeaconState,
response: CustodyResponse,
challenge: CustodyBitChallengeRecord) -> None:
# Verify chunk index
assert response.chunk_index < len(challenge.chunk_bits)
# Verify the chunk matches the crosslink data root
assert verify_merkle_branch(
leaf=hash_tree_root(response.chunk),
branch=response.branch,
depth=math.log2(next_power_of_two(len(challenge.chunk_bits))),
index=response.chunk_index,
root=challenge.crosslink_data_root,
)
# Verify the chunk bit does not match the challenge chunk bit
assert get_custody_chunk_bit(challenge.responder_key, response.chunk) != get_bitfield_bit(challenge.chunk_bits, response.chunk_index)
# Clear the challenge
records = state.custody_bit_challenge_records
records[records.index(challenge)] = CustodyBitChallengeRecord()
# Slash challenger
slash_validator(state, challenge.challenger_index, challenge.responder_index)
```
## Per-epoch processing
Run `process_challenge_deadlines(state)` immediately after `process_ejections(state)`:
```python
def process_challenge_deadlines(state: BeaconState) -> None:
for challenge in state.custody_chunk_challenge_records:
if get_current_epoch(state) > challenge.deadline:
slash_validator(state, challenge.responder_index, challenge.challenger_index)
records = state.custody_chunk_challenge_records
records[records.index(challenge)] = CustodyChunkChallengeRecord()
for challenge in state.custody_bit_challenge_records:
if get_current_epoch(state) > challenge.deadline:
slash_validator(state, challenge.responder_index, challenge.challenger_index)
records = state.custody_bit_challenge_records
records[records.index(challenge)] = CustodyBitChallengeRecord()
```
In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope):
```python
def eligible(index):
validator = state.validator_registry[index]
# Cannot exit if there are still open chunk challenges
if len([record for record in state.custody_chunk_challenge_records if record.responder_index == index]) > 0:
return False
# Cannot exit if you have not revealed all of your custody keys
elif epoch_to_custody_period(revealer.activation_epoch) + validator.custody_reveal_index <= epoch_to_custody_period(validator.exit_epoch):
return False
# Cannot exit if you already have
elif validator.withdrawable_epoch < FAR_FUTURE_EPOCH:
return False
# Return minimum time
else:
return current_epoch >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWAL_EPOCHS
```

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,26 @@
### Generalized Merkle tree index
**NOTICE**: This document is a work-in-progress for researchers and implementers.
## Table of Contents
<!-- TOC -->
- [Table of Contents](#table-of-contents)
- [Constants](#constants)
- [Generalized Merkle tree index](#generalized-merkle-tree-index)
- [SSZ object to index](#ssz-object-to-index)
- [Merkle multiproofs](#merkle-multiproofs)
- [MerklePartial](#merklepartial)
- [`SSZMerklePartial`](#sszmerklepartial)
- [Proofs for execution](#proofs-for-execution)
<!-- /TOC -->
## Constants
| Name | Value |
| - | - |
| `LENGTH_FLAG` | `2**64 - 1` |
## Generalized Merkle tree index
In a binary Merkle tree, we define a "generalized index" of a node as `2**depth + index`. Visually, this looks as follows:
@ -12,16 +34,16 @@ In a binary Merkle tree, we define a "generalized index" of a node as `2**depth
Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function:
```python
def merkle_tree(leaves):
def merkle_tree(leaves: List[Bytes32]) -> List[Bytes32]:
o = [0] * len(leaves) + leaves
for i in range(len(leaves)-1, 0, -1):
o[i] = hash(o[i*2] + o[i*2+1])
for i in range(len(leaves) - 1, 0, -1):
o[i] = hash(o[i * 2] + o[i * 2 + 1])
return o
```
We will define Merkle proofs in terms of generalized indices.
### SSZ object to index
## SSZ object to index
We can describe the hash tree of any SSZ object, rooted in `hash_tree_root(object)`, as a binary Merkle tree whose depth may vary. For example, an object `{x: bytes32, y: List[uint64]}` would look as follows:
@ -36,34 +58,51 @@ y_data_root len(y)
.......
```
We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo[5]`. We'll describe paths as lists: in these three cases they are `["x"]`, `["y", "len"]` and `["y", 5]` respectively. We can now define a function `get_generalized_indices(object: Any, path: List[str OR int], root=1: int) -> int` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access.
We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`.
```python
def get_generalized_indices(obj: Any, path: List[str or int], root=1) -> List[int]:
def path_to_encoded_form(obj: Any, path: List[Union[str, int]]) -> List[int]:
if len(path) == 0:
return []
elif isinstance(path[0], "__len__"):
assert len(path) == 1
return [LENGTH_FLAG]
elif isinstance(path[0], str) and hasattr(obj, "fields"):
return [list(obj.fields.keys()).index(path[0])] + path_to_encoded_form(getattr(obj, path[0]), path[1:])
elif isinstance(obj, (Vector, List)):
return [path[0]] + path_to_encoded_form(obj[path[0]], path[1:])
else:
raise Exception("Unknown type / path")
```
We can now define a function `get_generalized_indices(object: Any, path: List[int], root: int=1) -> List[int]` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access.
```python
def get_generalized_indices(obj: Any, path: List[int], root: int=1) -> List[int]:
if len(path) == 0:
return [root]
elif isinstance(obj, StaticList):
elif isinstance(obj, Vector):
items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1
new_root = root * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk
return get_generalized_indices(obj[path[0]], path[1:], new_root)
elif isinstance(obj, DynamicList) and path[0] == "len":
elif isinstance(obj, List) and path[0] == LENGTH_FLAG:
return [root * 2 + 1]
elif isinstance(obj, DynamicList) and isinstance(path[0], int):
elif isinstance(obj, List) and isinstance(path[0], int):
assert path[0] < len(obj)
items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1
new_root = root * 2 * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk
return [root *2 + 1] + get_generalized_indices(obj[path[0]], path[1:], new_root)
elif hasattr(obj, "fields"):
index = list(fields.keys()).index(path[0])
new_root = root * next_power_of_2(len(fields)) + index
return get_generalized_indices(getattr(obj, path[0]), path[1:], new_root)
field = list(fields.keys())[path[0]]
new_root = root * next_power_of_2(len(fields)) + path[0]
return get_generalized_indices(getattr(obj, field), path[1:], new_root)
else:
raise Exception("Unknown type / path")
```
### Merkle multiproofs
## Merkle multiproofs
We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (ie. generalized indices 8, 9, 14):
We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (i.e. generalized indices 8, 9, 14):
```
.
@ -74,19 +113,12 @@ x x . . . . x *
. are unused nodes, * are used nodes, x are the values we are trying to prove. Notice how despite being a multiproof for 3 values, it requires only 3 auxiliary nodes, only one node more than would be required to prove a single value. Normally the efficiency gains are not quite that extreme, but the savings relative to individual Merkle proofs are still significant. As a rule of thumb, a multiproof for k nodes at the same level of an n-node tree has size `k * (n/k + log(n/k))`.
Here is code for creating and verifying a multiproof. First a helper:
```python
def log2(x):
return 0 if x == 1 else 1 + log2(x//2)
```
First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require:
Here is code for creating and verifying a multiproof. First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require:
```python
def get_proof_indices(tree_indices: List[int]) -> List[int]:
# Get all indices touched by the proof
maximal_indices = set({})
maximal_indices = set()
for i in tree_indices:
x = i
while x > 1:
@ -94,7 +126,7 @@ def get_proof_indices(tree_indices: List[int]) -> List[int]:
x //= 2
maximal_indices = tree_indices + sorted(list(maximal_indices))[::-1]
# Get indices that cannot be recalculated from earlier indices
redundant_indices = set({})
redundant_indices = set()
proof = []
for index in maximal_indices:
if index not in redundant_indices:
@ -105,30 +137,48 @@ def get_proof_indices(tree_indices: List[int]) -> List[int]:
break
index //= 2
return [i for i in proof if i not in tree_indices]
````
```
Generating a proof is simply a matter of taking the node of the SSZ hash tree with the union of the given generalized indices for each index given by `get_proof_indices`, and outputting the list of nodes in the same order.
Here is the verification function:
```python
def verify_multi_proof(root, indices, leaves, proof):
def verify_multi_proof(root: Bytes32, indices: List[int], leaves: List[Bytes32], proof: List[Bytes32]) -> bool:
tree = {}
for index, leaf in zip(indices, leaves):
tree[index] = leaf
for index, proofitem in zip(get_proof_indices(indices), proof):
tree[index] = proofitem
indexqueue = sorted(tree.keys())[:-1]
for index, proof_item in zip(get_proof_indices(indices), proof):
tree[index] = proof_item
index_queue = sorted(tree.keys())[:-1]
i = 0
while i < len(indexqueue):
index = indexqueue[i]
if index >= 2 and index^1 in tree:
tree[index//2] = hash(tree[index - index%2] + tree[index - index%2 + 1])
indexqueue.append(index//2)
while i < len(index_queue):
index = index_queue[i]
if index >= 2 and index ^ 1 in tree:
tree[index // 2] = hash(tree[index - index % 2] + tree[index - index % 2 + 1])
index_queue.append(index // 2)
i += 1
return (indices == []) or (1 in tree and tree[1] == root)
```
#### Proofs for execution
## MerklePartial
We define `MerklePartial(f, arg1, arg2...)` as being a list of Merkle multiproofs of the sets of nodes in the hash trees of the SSZ objects that are needed to authenticate the values needed to compute some function `f(arg1, arg2...)`. An individual Merkle multiproof is given as a dynamic sized list of `bytes32` values, a `MerklePartial` is a fixed-size list of objects `{proof: ["bytes32"], value: "bytes32"}`, one for each `arg` to `f` (if some `arg` is a base type, then the multiproof is empty).
We define:
Ideally, any function which accepts an SSZ object should also be able to accept a `MerklePartial` object as a substitute.
### `SSZMerklePartial`
```python
{
"root": "bytes32",
"indices": ["uint64"],
"values": ["bytes32"],
"proof": ["bytes32"]
}
```
### Proofs for execution
We define `MerklePartial(f, arg1, arg2..., focus=0)` as being a `SSZMerklePartial` object wrapping a Merkle multiproof of the set of nodes in the hash tree of the SSZ object `arg[focus]` that is needed to authenticate the parts of the object needed to compute `f(arg1, arg2...)`.
Ideally, any function which accepts an SSZ object should also be able to accept a `SSZMerklePartial` object as a substitute.

View File

@ -1,64 +1,91 @@
# Beacon chain light client syncing
# Beacon Chain Light Client Syncing
One of the design goals of the eth2 beacon chain is light-client friendlines, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains.
__NOTICE__: This document is a work-in-progress for researchers and implementers. One of the design goals of the eth2 beacon chain is light-client friendliness, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains.
### Preliminaries
## Table of Contents
We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (eg. `BeaconBlockHeader` is a summary of `BeaconBlock`).
<!-- TOC -->
- [Beacon Chain Light Client Syncing](#beacon-chain-light-client-syncing)
- [Table of Contents](#table-of-contents)
- [Preliminaries](#preliminaries)
- [Expansions](#expansions)
- [`get_active_validator_indices`](#get_active_validator_indices)
- [`MerklePartial`](#merklepartial)
- [`PeriodData`](#perioddata)
- [`get_earlier_start_epoch`](#get_earlier_start_epoch)
- [`get_later_start_epoch`](#get_later_start_epoch)
- [`get_period_data`](#get_period_data)
- [Light client state](#light-client-state)
- [Updating the shuffled committee](#updating-the-shuffled-committee)
- [Computing the current committee](#computing-the-current-committee)
- [Verifying blocks](#verifying-blocks)
<!-- /TOC -->
## Preliminaries
### Expansions
We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (e.g. `BeaconBlockHeader` is a summary of `BeaconBlock`).
We define two expansions:
* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState`
* `ExtendedBeaconState`, which is identical to a `BeaconState` except `latest_active_index_roots: List[Bytes32]` is replaced by `latest_active_indices: List[List[ValidatorIndex]]`, where `BeaconState.latest_active_index_roots[i] = hash_tree_root(ExtendedBeaconState.latest_active_indices[i])`
* `ExtendedBeaconState`, which is identical to a `BeaconState` except `latest_active_index_roots: List[Bytes32]` is replaced by `latest_active_indices: List[List[ValidatorIndex]]`, where `BeaconState.latest_active_index_roots[i] = hash_tree_root(ExtendedBeaconState.latest_active_indices[i])`.
* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState`.
### `get_active_validator_indices`
Note that there is now a new way to compute `get_active_validator_indices`:
```python
def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[ValidatorIndex]:
def get_active_validator_indices(state: ExtendedBeaconState, epoch: Epoch) -> List[ValidatorIndex]:
return state.latest_active_indices[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH]
```
Note that it takes `state` instead of `state.validator_registry` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments.
### `MerklePartial`
A `MerklePartial(f, *args)` is an object that contains a minimal Merkle proof needed to compute `f(*args)`. A `MerklePartial` can be used in place of a regular SSZ object, though a computation would return an error if it attempts to access part of the object that is not contained in the proof.
We add a data type `PeriodData` and four helpers:
### `PeriodData`
```python
{
'validator_count': 'uint64',
'seed': 'bytes32',
'committee': [Validator]
'committee': [Validator],
}
```
### `get_earlier_start_epoch`
```python
def get_earlier_start_epoch(slot: Slot) -> int:
return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD * 2
```
### `get_later_start_epoch`
```python
def get_later_start_epoch(slot: Slot) -> int:
return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD
def get_earlier_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData:
period_start = get_earlier_start_epoch(header.slot)
```
### `get_period_data`
```python
def get_period_data(block: ExtendedBeaconBlock, shard_id: Shard, later: bool) -> PeriodData:
period_start = get_later_start_epoch(header.slot) if later else get_earlier_start_epoch(header.slot)
validator_count = len(get_active_validator_indices(state, period_start))
committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1
indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count)
indices = get_period_committee(block.state, shard_id, period_start, 0, committee_count)
return PeriodData(
validator_count,
generate_seed(block.state, period_start),
[block.state.validator_registry[i] for i in indices]
)
def get_later_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData:
period_start = get_later_start_epoch(header.slot)
validator_count = len(get_active_validator_indices(state, period_start))
committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1
indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count)
return PeriodData(
validator_count,
generate_seed(block.state, period_start),
[block.state.validator_registry[i] for i in indices]
[block.state.validator_registry[i] for i in indices],
)
```
@ -68,48 +95,49 @@ A light client will keep track of:
* A random `shard_id` in `[0...SHARD_COUNT-1]` (selected once and retained forever)
* A block header that they consider to be finalized (`finalized_header`) and do not expect to revert.
* `later_period_data = get_maximal_later_committee(finalized_header, shard_id)`
* `earlier_period_data = get_maximal_earlier_committee(finalized_header, shard_id)`
* `later_period_data = get_period_data(finalized_header, shard_id, later=True)`
* `earlier_period_data = get_period_data(finalized_header, shard_id, later=False)`
We use the struct `validator_memory` to keep track of these variables.
We use the struct `ValidatorMemory` to keep track of these variables.
### Updating the shuffled committee
If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_maximal_later_committee, validator_memory.finalized_header, shard_id)`. It can then compute:
If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_period_data, validator_memory.finalized_header, shard_id, later=True)`. It can then compute:
```python
earlier_period_data = later_period_data
later_period_data = get_later_period_data(new_committee_proof, finalized_header, shard_id)
later_period_data = get_period_data(new_committee_proof, finalized_header, shard_id, later=True)
```
The maximum size of a proof is `128 * ((22-7) * 32 + 110) = 75520` bytes for validator records and `(22-7) * 32 + 128 * 8 = 1504` for the active index proof (much smaller because the relevant active indices are all beside each other in the Merkle tree). This needs to be done once per `PERSISTENT_COMMITTEE_PERIOD` epochs (2048 epochs / 9 days), or ~38 bytes per epoch.
### Computing the current committee
## Computing the current committee
Here is a helper to compute the committee at a slot given the maximal earlier and later committees:
```python
def compute_committee(header: BeaconBlockHeader,
validator_memory: ValidatorMemory):
validator_memory: ValidatorMemory) -> List[ValidatorIndex]:
earlier_validator_count = validator_memory.earlier_period_data.validator_count
later_validator_count = validator_memory.later_period_data.validator_count
earlier_committee = validator_memory.earlier_period_data.committee
later_committee = validator_memory.later_period_data.committee
maximal_earlier_committee = validator_memory.earlier_period_data.committee
maximal_later_committee = validator_memory.later_period_data.committee
earlier_start_epoch = get_earlier_start_epoch(header.slot)
later_start_epoch = get_later_start_epoch(header.slot)
epoch = slot_to_epoch(header.slot)
actual_committee_count = max(
committee_count = max(
earlier_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE),
later_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE),
) + 1
def get_offset(count, end:bool):
return get_split_offset(count,
SHARD_COUNT * committee_count,
validator_memory.shard_id * committee_count + (1 if end else 0))
def get_offset(count: int, end: bool) -> int:
return get_split_offset(
count,
SHARD_COUNT * committee_count,
validator_memory.shard_id * committee_count + (1 if end else 0),
)
actual_earlier_committee = maximal_earlier_committee[
0:get_offset(earlier_validator_count, True) - get_offset(earlier_validator_count, False)
]
@ -118,31 +146,30 @@ def compute_committee(header: BeaconBlockHeader,
]
def get_switchover_epoch(index):
return (
bytes_to_int(hash(validator_memory.earlier_period_data.seed + bytes3(index))[0:8]) %
bytes_to_int(hash(validator_memory.earlier_period_data.seed + int_to_bytes3(index))[0:8]) %
PERSISTENT_COMMITTEE_PERIOD
)
# Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from
# later committee; return a sorted list of the union of the two, deduplicated
return sorted(list(set(
[i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] +
[i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)]
[i for i in actual_earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] +
[i for i in actual_later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)]
)))
```
Note that this method makes use of the fact that the committee for any given shard always starts and ends at the same validator index independently of the committee count (this is because the validator set is split into `SHARD_COUNT * committee_count` slices but the first slice of a shard is a multiple `committee_count * i`, so the start of the slice is `n * committee_count * i // (SHARD_COUNT * committee_count) = n * i // SHARD_COUNT`, using the slightly nontrivial algebraic identity `(x * a) // ab == x // b`).
### Verifying blocks
## Verifying blocks
If a client wants to update its `finalized_header` it asks the network for a `BlockValidityProof`, which is simply:
```python
{
'header': BlockHeader,
'header': BeaconBlockHeader,
'shard_aggregate_signature': 'bytes96',
'shard_bitfield': 'bytes',
'shard_parent_block': ShardBlock
'shard_parent_block': ShardBlock,
}
```
@ -150,23 +177,23 @@ The verification procedure is as follows:
```python
def verify_block_validity_proof(proof: BlockValidityProof, validator_memory: ValidatorMemory) -> bool:
assert proof.shard_parent_block.beacon_chain_ref == hash_tree_root(proof.header)
assert proof.shard_parent_block.beacon_chain_root == hash_tree_root(proof.header)
committee = compute_committee(proof.header, validator_memory)
# Verify that we have >=50% support
support_balance = sum([c.high_balance for i, c in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True])
total_balance = sum([c.high_balance for i, c in enumerate(committee)]
support_balance = sum([v.effective_balance for i, v in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True])
total_balance = sum([v.effective_balance for i, v in enumerate(committee)])
assert support_balance * 2 > total_balance
# Verify shard attestations
group_public_key = bls_aggregate_pubkeys([
v.pubkey for v, index in enumerate(committee) if
get_bitfield_bit(proof.shard_bitfield, i) is True
v.pubkey for v, index in enumerate(committee)
if get_bitfield_bit(proof.shard_bitfield, index) is True
])
assert bls_verify(
pubkey=group_public_key,
message_hash=hash_tree_root(shard_parent_block),
signature=shard_aggregate_signature,
domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER)
signature=proof.shard_aggregate_signature,
domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER),
)
```
The size of this proof is only 200 (header) + 96 (signature) + 16 (bitfield) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_ref, ShardBlock)`, which would cut off ~220 bytes.
The size of this proof is only 200 (header) + 96 (signature) + 16 (bitfield) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_root, ShardBlock)`, which would cut off ~220 bytes.

View File

@ -0,0 +1,46 @@
ETH 2.0 Networking Spec - Messaging
===
# Abstract
This specification describes how individual Ethereum 2.0 messages are represented on the wire.
The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL”, NOT", “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in RFC 2119.
# Motivation
This specification seeks to define a messaging protocol that is flexible enough to be changed easily as the ETH 2.0 specification evolves.
Note that while `libp2p` is the chosen networking stack for Ethereum 2.0, as of this writing some clients do not have workable `libp2p` implementations. To allow those clients to communicate, we define a message envelope that includes the body's compression, encoding, and body length. Once `libp2p` is available across all implementations, this message envelope will be removed because `libp2p` will negotiate the values defined in the envelope upfront.
# Specification
## Message Structure
An ETH 2.0 message consists of an envelope that defines the message's compression, encoding, and length followed by the body itself.
Visually, a message looks like this:
```
+--------------------------+
| compression nibble |
+--------------------------+
| encoding nibble |
+--------------------------+
| body length (uint64) |
+--------------------------+
| |
| body |
| |
+--------------------------+
```
Clients MUST ignore messages with mal-formed bodies. The compression/encoding nibbles MUST be one of the following values:
## Compression Nibble Values
- `0x0`: no compression
## Encoding Nibble Values
- `0x1`: SSZ

View File

@ -0,0 +1,32 @@
ETH 2.0 Networking Spec - Node Identification
===
# Abstract
This specification describes how Ethereum 2.0 nodes identify and address each other on the network.
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119.
# Specification
Clients use Ethereum Node Records (as described in [EIP-778](http://eips.ethereum.org/EIPS/eip-778)) to discover one another. Each ENR includes, among other things, the following keys:
- The node's IP.
- The node's TCP port.
- The node's public key.
For clients to be addressable, their ENR responses MUST contain all of the above keys. Client MUST verify the signature of any received ENRs, and disconnect from peers whose ENR signatures are invalid. Each node's public key MUST be unique.
The keys above are enough to construct a [multiaddr](https://github.com/multiformats/multiaddr) for use with the rest of the `libp2p` stack.
It is RECOMMENDED that clients set their TCP port to the default of `9000`.
## Peer ID Generation
The `libp2p` networking stack identifies peers via a "peer ID." Simply put, a node's Peer ID is the SHA2-256 `multihash` of the node's public key struct (serialized in protobuf, refer to the [Peer ID spec](https://github.com/libp2p/specs/pull/100)). `go-libp2p-crypto` contains the canonical implementation of how to hash `secp256k1` keys for use as a peer ID.
# See Also
- [multiaddr](https://github.com/multiformats/multiaddr)
- [multihash](https://multiformats.io/multihash/)
- [go-libp2p-crypto](https://github.com/libp2p/go-libp2p-crypto)

View File

@ -0,0 +1,292 @@
ETH 2.0 Networking Spec - RPC Interface
===
# Abstract
The Ethereum 2.0 networking stack uses two modes of communication: a broadcast protocol that gossips information to interested parties via GossipSub, and an RPC protocol that retrieves information from specific clients. This specification defines the RPC protocol.
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119.
# Dependencies
This specification assumes familiarity with the [Messaging](./messaging.md), [Node Identification](./node-identification.md), and [Beacon Chain](../core/0_beacon-chain.md) specifications.
# Specification
## Message Schemas
Message body schemas are notated like this:
```
(
field_name_1: type
field_name_2: type
)
```
Embedded types are serialized as SSZ Containers unless otherwise noted.
All referenced data structures can be found in the [0-beacon-chain](../core/0_beacon-chain.md#data-structures) specification.
## `libp2p` Protocol Names
A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. Peers exchange supported protocol IDs via the `Identify` protocol upon connection. When opening a new stream, peers pin a particular protocol ID to it, and the stream remains contextualised thereafter. Since messages are sent inside a stream, they do not need to bear the protocol ID.
## RPC-Over-`libp2p`
To facilitate RPC-over-`libp2p`, a single protocol name is used: `/eth/serenity/beacon/rpc/1`. The version number in the protocol name is neither backwards or forwards compatible, and will be incremented whenever changes to the below structures are required.
Remote method calls are wrapped in a "request" structure:
```
(
id: uint64
method_id: uint16
body: Request
)
```
and their corresponding responses are wrapped in a "response" structure:
```
(
id: uint64
response_code: uint16
result: bytes
)
```
If an error occurs, a variant of the response structure is returned:
```
(
id: uint64
response_code: uint16
result: bytes
)
```
The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](https://www.jsonrpc.org/specification). Specifically:
1. The `id` member is REQUIRED.
2. The `id` member in the response MUST be the same as the value of the `id` in the request.
3. The `id` member MUST be unique within the context of a single connection. Monotonically increasing `id`s are RECOMMENDED.
4. The `method_id` member is REQUIRED.
5. The `result` member is REQUIRED on success.
6. The `result` member is OPTIONAL on errors, and MAY contain additional information about the error.
7. `response_code` MUST be `0` on success.
Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. Note that this implies that responses MAY arrive in a different order than requests.
The "method ID" fields in the below messages refer to the `method` field in the request structure above.
The first 1,000 values in `response_code` are reserved for system use. The following response codes are predefined:
1. `0`: No error.
2. `10`: Parse error.
2. `20`: Invalid request.
3. `30`: Method not found.
4. `40`: Server error.
### Alternative for Non-`libp2p` Clients
Since some clients are waiting for `libp2p` implementations in their respective languages. As such, they MAY listen for raw TCP messages on port `9000`. To distinguish RPC messages from other messages on that port, a byte prefix of `ETH` (`0x455448`) MUST be prepended to all messages. This option will be removed once `libp2p` is ready in all supported languages.
## Messages
### Hello
**Method ID:** `0`
**Body**:
```
(
network_id: uint8
chain_id: uint64
latest_finalized_root: bytes32
latest_finalized_epoch: uint64
best_root: bytes32
best_slot: uint64
)
```
Clients exchange `hello` messages upon connection, forming a two-phase handshake. The first message the initiating client sends MUST be the `hello` message. In response, the receiving client MUST respond with its own `hello` message.
Clients SHOULD immediately disconnect from one another following the handshake above under the following conditions:
1. If `network_id` belongs to a different chain, since the client definitionally cannot sync with this client.
2. If the `latest_finalized_root` shared by the peer is not in the client's chain at the expected epoch. For example, if Peer 1 in the diagram below has `(root, epoch)` of `(A, 5)` and Peer 2 has `(B, 3)`, Peer 1 would disconnect because it knows that `B` is not the root in their chain at epoch 3:
```
Root A
+---+
|xxx| +----+ Epoch 5
+-+-+
^
|
+-+-+
| | +----+ Epoch 4
+-+-+
Root B ^
|
+---+ +-+-+
|xxx+<---+--->+ | +----+ Epoch 3
+---+ | +---+
|
+-+-+
| | +-----------+ Epoch 2
+-+-+
^
|
+-+-+
| | +-----------+ Epoch 1
+---+
```
Once the handshake completes, the client with the higher `latest_finalized_epoch` or `best_slot` (if the clients have equal `latest_finalized_epoch`s) SHOULD request beacon block roots from its counterparty via `beacon_block_roots` (i.e., RPC method `10`).
### Goodbye
**Method ID:** `1`
**Body:**
```
(
reason: uint64
)
```
Client MAY send `goodbye` messages upon disconnection. The reason field MAY be one of the following values:
- `1`: Client shut down.
- `2`: Irrelevant network.
- `3`: Fault/error.
Clients MAY define custom goodbye reasons as long as the value is larger than `1000`.
### Get Status
**Method ID:** `2`
**Request Body:**
```
(
sha: bytes32
user_agent: bytes
timestamp: uint64
)
```
**Response Body:**
```
(
sha: bytes32
user_agent: bytes
timestamp: uint64
)
```
Returns metadata about the remote node.
### Request Beacon Block Roots
**Method ID:** `10`
**Request Body**
```
(
start_slot: uint64
count: uint64
)
```
**Response Body:**
```
# BlockRootSlot
(
block_root: bytes32
slot: uint64
)
(
roots: []BlockRootSlot
)
```
Requests a list of block roots and slots from the peer. The `count` parameter MUST be less than or equal to `32768`. The slots MUST be returned in ascending slot order.
### Beacon Block Headers
**Method ID:** `11`
**Request Body**
```
(
start_root: HashTreeRoot
start_slot: uint64
max_headers: uint64
skip_slots: uint64
)
```
**Response Body:**
```
(
headers: []BeaconBlockHeader
)
```
Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `1` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]` - i.e., duplicate blocks MUST be collapsed. A `skip_slots` value of `0` returns all blocks.
The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each peer has a different starting block in order to populate block data.
### Beacon Block Bodies
**Method ID:** `12`
**Request Body:**
```
(
block_roots: []HashTreeRoot
)
```
**Response Body:**
```
(
block_bodies: []BeaconBlockBody
)
```
Requests the `block_bodies` associated with the provided `block_roots` from the peer. Responses MUST return `block_roots` in the order provided in the request. If the receiver does not have a particular `block_root`, it must return a zero-value `block_body` (i.e., a `block_body` container with all zero fields).
### Beacon Chain State
**Note:** This section is preliminary, pending the definition of the data structures to be transferred over the wire during fast sync operations.
**Method ID:** `13`
**Request Body:**
```
(
hashes: []HashTreeRoot
)
```
**Response Body:** TBD
Requests contain the hashes of Merkle tree nodes that when merkleized yield the block's `state_root`.
The response will contain the values that, when hashed, yield the hashes inside the request body.

View File

@ -1,4 +1,4 @@
# SimpleSerialiZe (SSZ)
# SimpleSerialize (SSZ)
This is a **work in progress** describing typing, serialization and Merkleization of Ethereum 2.0 objects.
@ -9,10 +9,11 @@ This is a **work in progress** describing typing, serialization and Merkleizatio
- [Basic types](#basic-types)
- [Composite types](#composite-types)
- [Aliases](#aliases)
- [Default values](#default-values)
- [Serialization](#serialization)
- [`"uintN"`](#uintn)
- [`"bool"`](#bool)
- [Tuples, containers, lists](#tuples-containers-lists)
- [Containers, vectors, lists](#containers-vectors-lists)
- [Deserialization](#deserialization)
- [Merkleization](#merkleization)
- [Self-signed containers](#self-signed-containers)
@ -22,8 +23,9 @@ This is a **work in progress** describing typing, serialization and Merkleizatio
| Name | Value | Description |
|-|-|-|
| `BYTES_PER_CHUNK` | `32` | Number of bytes per chunk.
| `BYTES_PER_LENGTH_PREFIX` | `4` | Number of bytes per serialized length prefix. |
| `BYTES_PER_CHUNK` | `32` | Number of bytes per chunk. |
| `BYTES_PER_LENGTH_OFFSET` | `4` | Number of bytes per serialized length offset. |
| `BITS_PER_BYTE` | `8` | Number of bits per byte. |
## Typing
### Basic types
@ -33,13 +35,15 @@ This is a **work in progress** describing typing, serialization and Merkleizatio
### Composite types
* **container**: ordered heterogenous collection of values
* key-pair curly bracket notation `{}`, e.g. `{'foo': "uint64", 'bar': "bool"}`
* **tuple**: ordered fixed-length homogeneous collection of values
* **container**: ordered heterogeneous collection of values
* key-pair curly bracket notation `{}`, e.g. `{"foo": "uint64", "bar": "bool"}`
* **vector**: ordered fixed-length homogeneous collection of values
* angle bracket notation `[type, N]`, e.g. `["uint64", N]`
* **list**: ordered variable-length homogenous collection of values
* **list**: ordered variable-length homogeneous collection of values
* angle bracket notation `[type]`, e.g. `["uint64"]`
We recursively define "variable-size" types to be lists and all types that contains a variable-size type. All other types are said to be "fixed-size".
### Aliases
For convenience we alias:
@ -48,41 +52,49 @@ For convenience we alias:
* `"bytes"` to `["byte"]` (this is *not* a basic type)
* `"bytesN"` to `["byte", N]` (this is *not* a basic type)
### Default values
The default value of a type upon initialization is recursively defined using `0` for `"uintN"`, `False` for `"bool"`, and `[]` for lists.
## Serialization
We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `"bytes"`.
*Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, etc.) objects implicitly carry their type.
> *Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signing_root`, `is_variable_size`, etc.) objects implicitly carry their type.
### `uintN`
### `"uintN"`
```python
assert N in [8, 16, 32, 64, 128, 256]
return value.to_bytes(N // 8, 'little')
return value.to_bytes(N // 8, "little")
```
### `bool`
### `"bool"`
```python
assert value in (True, False)
return b'\x01' if value is True else b'\x00'
return b"\x01" if value is True else b"\x00"
```
### Tuples, containers, lists
If `value` is fixed-length (i.e. does not embed a list):
### Containers, vectors, lists
```python
return ''.join([serialize(element) for element in value])
```
# Reccursively serialize
fixed_parts = [serialize(element) if not is_variable_size(element) else None for element in value]
variable_parts = [serialize(element) if is_variable_size(element) else b"" for element in value]
If `value` is variable-length (i.e. embeds a list):
# Compute and check lengths
fixed_lengths = [len(part) if part != None else BYTES_PER_LENGTH_OFFSET for part in fixed_parts]
variable_lengths = [len(part) for part in variable_parts]
assert sum(fixed_lengths + variable_lengths) < 2**(BYTES_PER_LENGTH_OFFSET * BITS_PER_BYTE)
```python
serialized_bytes = ''.join([serialize(element) for element in value])
assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX)
serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little')
return serialized_length + serialized_bytes
# Interleave offsets of variable-size parts with fixed-size parts
variable_offsets = [serialize(sum(fixed_lengths + variable_lengths[:i])) for i in range(len(value))]
fixed_parts = [part if part != None else variable_offsets[i] for i, part in enumerate(fixed_parts)]
# Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts
return b"".join(fixed_parts + variable_parts)
```
## Deserialization
@ -99,24 +111,24 @@ We first define helper functions:
We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
* `merkleize(pack(value))` if `value` is a basic object or a tuple of basic objects
* `merkleize(pack(value))` if `value` is a basic object or a vector of basic objects
* `mix_in_length(merkleize(pack(value)), len(value))` if `value` is a list of basic objects
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a tuple of composite objects or a container
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container
* `mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value))` if `value` is a list of composite objects
## Self-signed containers
Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signed_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`.
Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signing_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`.
## Implementations
| Language | Project | Maintainer | Implementation |
|-|-|-|-|
| Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) |
| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) |
| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz](https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz) |
| Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) |
| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/util/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) |
| Javascript | Lodestart | Chain Safe Systems | [https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js](https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js) |
| TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz-js) |
| Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) |
| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz](https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz) |
| Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) |

View File

@ -1,71 +0,0 @@
# General test format [WIP]
This document defines the general YAML format to which all tests should conform. Testing specifications in Eth2.0 are still a work in progress. _Expect breaking changes_
## ToC
* [About](#about)
* [YAML Fields](#yaml-fields)
* [Example test suite](#example-test-suite)
## About
Ethereum 2.0 uses YAML as the format for all cross client tests. This document describes at a high level the general format to which all test files should conform.
The particular formats of specific types of tests (test suites) are defined in separate documents.
## YAML fields
`title` _(required)_
`summary` _(optional)_
`test_suite` _(required)_ string defining the test suite to which the test cases conform
`fork` _(required)_ production release versioning
`version` _(required)_ version for particular test document
`test_cases` _(required)_ list of test cases each of which is formatted to conform to the `test_case` standard defined by `test_suite`. All test cases have optional `name` and `description` string fields.
## Example test suite
`shuffle` is a test suite that defines test cases for the `shuffle()` helper function defined in the `beacon-chain` spec.
Test cases that conform to the `shuffle` test suite have the following fields:
* `input` _(required)_ the list of items passed into `shuffle()`
* `output` _(required)_ the expected list returned by `shuffle()`
* `seed` _(required)_ the seed of entropy passed into `shuffle()`
As for all test cases, `name` and `description` are optional string fields.
The following is a sample YAML document for the `shuffle` test suite:
```yaml
title: Shuffling Algorithm Tests
summary: Test vectors for shuffling a list based upon a seed using `shuffle`
test_suite: shuffle
fork: tchaikovsky
version: 1.0
test_cases:
- input: []
output: []
seed: !!binary ""
- name: boring_list
description: List with a single element, 0
input: [0]
output: [0]
seed: !!binary ""
- input: [255]
output: [255]
seed: !!binary ""
- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5]
output: [1, 6, 4, 1, 6, 6, 2, 2, 4, 5]
seed: !!binary ""
- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
output: [4, 7, 10, 13, 3, 1, 2, 9, 12, 6, 11, 8, 5]
seed: !!binary ""
- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5]
output: [6, 65, 2, 5, 4, 2, 6, 6, 1, 1]
seed: !!binary |
JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0=
```

View File

@ -0,0 +1,198 @@
# General test format
This document defines the YAML format and structure used for ETH 2.0 testing.
## ToC
* [About](#about)
* [Glossary](#glossary)
* [Test format philosophy](#test-format-philosophy)
* [Test Suite](#test-suite)
* [Config](#config)
* [Fork-timeline](#fork-timeline)
* [Config sourcing](#config-sourcing)
* [Test structure](#test-structure)
## About
Ethereum 2.0 uses YAML as the format for all cross client tests. This document describes at a high level the general format to which all test files should conform.
### Test-case formats
The particular formats of specific types of tests (test suites) are defined in separate documents.
Test formats:
- [`bls`](./bls/README.md)
- [`operations`](./operations/README.md)
- [`shuffling`](./shuffling/README.md)
- [`ssz`](./ssz/README.md)
- More formats are planned, see tracking issues for CI/testing
## Glossary
- `generator`: a program that outputs one or more `suite` files.
- A generator should only output one `type` of test.
- A generator is free to output multiple `suite` files, optionally with different `handler`s.
- `type`: the specialization of one single `generator`.
- `suite`: a YAML file with:
- a header: describes the `suite`, and defines what the `suite` is for
- a list of test cases
- `runner`: where a generator is a *"producer"*, this is the *"consumer"*.
- A `runner` focuses on *only one* `type`, and each type has *only one* `runner`.
- `handler`: a `runner` may be too limited sometimes, you may have a `suite` with a specific focus that requires a different format.
To facilitate this, you specify a `handler`: the runner can deal with the format by using the specified handler.
Using a `handler` in a `runner` is optional.
- `case`: a test case, an entry in the `test_cases` list of a `suite`. A case can be anything in general,
but its format should be well-defined in the documentation corresponding to the `type` (and `handler`).\
A test has the same exact configuration and fork context as the other entries in the `case` list of its `suite`.
- `forks_timeline`: a fork timeline definition, a YAML file containing a key for each fork-name, and an epoch number as value.
## Test format philosophy
### Config design
After long discussion, the following types of configured constants were identified:
- Never changing: genesis data
- Changing, but reliant on old value: e.g. an epoch time may change, but if you want to do the conversion
`(genesis data, timestamp) -> epoch number` you end up needing both constants.
- Changing, but kept around during fork transition: finalization may take a while,
e.g. an executable has to deal with new deposits and old deposits at the same time. Another example may be economic constants.
- Additional, back-wards compatible: new constants are introduced for later phases
- Changing: there is a very small chance some constant may really be *replaced*.
In this off-chance, it is likely better to include it as an additional variable,
and some clients may simply stop supporting the old one, if they do not want to sync from genesis.
Based on these types of changes, we model the config as a list of key value pairs,
that only grows with every fork (they may change in development versions of forks however, git manages this).
With this approach, configurations are backwards compatible (older clients ignore unknown variables), and easy to maintain.
### Fork config design
There are two types of fork-data:
1) timeline: when does a fork take place?
2) coverage: what forks are covered by a test?
The first is neat to have as a separate form: we prevent duplication, and can run with different presets
(e.g. fork timeline for a minimal local test, for a public testnet, or for mainnet)
The second does not affect the result of the tests, it just states what is covered by the tests,
so that the right suites can be executed to see coverage for a certain fork.
For some types of tests, it may be beneficial to ensure it runs exactly the same, with any given fork "active".
Test-formats can be explicit on the need to repeat a test with different forks being "active",
but generally tests run only once.
### Test completeness
Tests should be independent of any sync-data. If one wants to run a test, the input data should be available from the YAML.
The aim is to provide clients with a well-defined scope of work to run a particular set of test-suites.
- Clients that are complete are expected to contribute to testing, seeking for better resources to get conformance with the spec, and other clients.
- Clients that are not complete in functionality can choose to ignore suites that use certain test-runners, or specific handlers of these test-runners.
- Clients that are on older versions can test their work based on older releases of the generated tests, and catch up with newer releases when possible.
## Test Suite
```
title: <string, short, one line> -- Display name for the test suite
summary: <string, average, 1-3 lines> -- Summarizes the test suite
forks_timeline: <string, reference to a fork definition file, without extension> -- Used to determine the forking timeline
forks: <list of strings> -- Defines the coverage. Test-runner code may decide to re-run with the different forks "activated", when applicable.
config: <string, reference to a config file, without extension> -- Used to determine which set of constants to run (possibly compile time) with
runner: <string, no spaces, python-like naming format> *MUST be consistent with folder structure*
handler: <string, no spaces, python-like naming format> *MUST be consistent with folder structure*
test_cases: <list, values being maps defining a test case each>
...
```
## Config
A configuration is a separate YAML file.
Separation of configuration and tests aims to:
- Prevent duplication of configuration
- Make all tests easy to upgrade (e.g. when a new config constant is introduced)
- Clearly define which constants to use
- Shareable between clients, for cross-client short or long lived testnets
- Minimize the amounts of different constants permutations to compile as a client.
Note: Some clients prefer compile-time constants and optimizations.
They should compile for each configuration once, and run the corresponding tests per build target.
The format is described in [`configs/constant_presets`](../../configs/constant_presets/README.md#format).
## Fork-timeline
A fork timeline is (preferably) loaded in as a configuration object into a client, as opposed to the constants configuration:
- we do not allocate or optimize any code based on epoch numbers
- when we transition from one fork to the other, it is preferred to stay online.
- we may decide on an epoch number for a fork based on external events (e.g. Eth1 log event),
a client should be able to activate a fork dynamically.
The format is described in [`configs/fork_timelines`](../../configs/fork_timelines/README.md#format).
## Config sourcing
The constants configurations are located in:
```
<specs repo root>/configs/constant_presets/<config name>.yaml
```
And copied by CI for testing purposes to:
```
<tests repo root>/configs/constant_presets/<config name>.yaml
```
The fork timelines are located in:
```
<specs repo root>/configs/fork_timelines/<timeline name>.yaml
```
And copied by CI for testing purposes to:
```
<tests repo root>/configs/fork_timelines/<timeline name>.yaml
```
## Test structure
To prevent parsing of hundreds of different YAML files to test a specific test type,
or even more specific, just a handler, tests should be structured in the following nested form:
```
. <--- root of eth2.0 tests repository
├── bls <--- collection of handler for a specific test-runner, example runner: "bls"
│   ├── verify_msg <--- collection of test suites for a specific handler, example handler: "verify_msg". If no multiple handlers, use a dummy folder (e.g. "core"), and specify that in the yaml.
│   │   ├── verify_valid.yml .
│   │   ├── special_cases.yml . a list of test suites
│   │   ├── domains.yml .
│   │   ├── invalid.yml .
│   │   ... <--- more suite files (optional)
│   ... <--- more handlers
... <--- more test types
```
## Note for implementers
The basic pattern for test-suite loading and running is:
Iterate suites for given test-type, or sub-type (e.g. `operations > deposits`):
1. Filter test-suite, options:
- Config: Load first few lines, load into YAML, and check `config`, either:
- Pass the suite to the correct compiled target
- Ignore the suite if running tests as part of a compiled target with different configuration
- Load the correct configuration for the suite dynamically before running the suite
- Select by file name
- Filter for specific suites (e.g. for a specific fork)
2. Load the YAML
- Optionally translate the data into applicable naming, e.g. `snake_case` to `PascalCase`
3. Iterate through the `test_cases`
4. Ask test-runner to allocate a new test-case (i.e. objectify the test-case, generalize it with a `TestCase` interface)
Optionally pass raw test-case data to enable dynamic test-case allocation.
1. Load test-case data into it.
2. Make the test-case run.

View File

@ -0,0 +1,15 @@
# BLS tests
A test type for BLS. Primarily geared towards verifying the *integration* of any BLS library.
We do not recommend to roll your own crypto, or use an untested BLS library.
The BLS test suite runner has the following handlers:
- [`aggregate_pubkeys`](./aggregate_pubkeys.md)
- [`aggregate_sigs`](./aggregate_sigs.md)
- [`msg_hash_g2_compressed`](./msg_hash_g2_compressed.md)
- [`msg_hash_g2_uncompressed`](./msg_hash_g2_uncompressed.md)
- [`priv_to_pub`](./priv_to_pub.md)
- [`sign_msg`](./sign_msg.md)
Note: signature-verification and aggregate-verify test cases are not yet supported.

View File

@ -0,0 +1,17 @@
# Test format: BLS pubkey aggregation
A BLS pubkey aggregation combines a series of pubkeys into a single pubkey.
## Test case format
```yaml
input: List[BLS Pubkey] -- list of input BLS pubkeys
output: BLS Pubkey -- expected output, single BLS pubkey
```
`BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`.
## Condition
The `aggregate_pubkeys` handler should aggregate the keys in the `input`, and the result should match the expected `output`.

View File

@ -0,0 +1,17 @@
# Test format: BLS signature aggregation
A BLS signature aggregation combines a series of signatures into a single signature.
## Test case format
```yaml
input: List[BLS Signature] -- list of input BLS signatures
output: BLS Signature -- expected output, single BLS signature
```
`BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`.
## Condition
The `aggregate_sigs` handler should aggregate the signatures in the `input`, and the result should match the expected `output`.

View File

@ -0,0 +1,19 @@
# Test format: BLS hash-compressed
A BLS compressed-hash to G2.
## Test case format
```yaml
input:
message: bytes32,
domain: bytes -- any number
output: List[bytes48] -- length of two
```
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
The `msg_hash_g2_compressed` handler should hash the `message`, with the given `domain`, to G2 with compression, and the result should match the expected `output`.

View File

@ -0,0 +1,19 @@
# Test format: BLS hash-uncompressed
A BLS uncompressed-hash to G2.
## Test case format
```yaml
input:
message: bytes32,
domain: bytes -- any number
output: List[List[bytes48]] -- 3 lists, each a length of two
```
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
The `msg_hash_g2_uncompressed` handler should hash the `message`, with the given `domain`, to G2, without compression, and the result should match the expected `output`.

View File

@ -0,0 +1,17 @@
# Test format: BLS private key to pubkey
A BLS private key to public key conversion.
## Test case format
```yaml
input: bytes32 -- the private key
output: bytes48 -- the public key
```
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
The `priv_to_pub` handler should compute the public key for the given private key `input`, and the result should match the expected `output`.

View File

@ -0,0 +1,20 @@
# Test format: BLS sign message
Message signing with BLS should produce a signature.
## Test case format
```yaml
input:
privkey: bytes32 -- the private key used for signing
message: bytes32 -- input message to sign (a hash)
domain: bytes -- BLS domain
output: bytes96 -- expected signature
```
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
## Condition
The `sign_msg` handler should sign the given `message`, with `domain`, using the given `privkey`, and the result should match the expected `output`.

View File

@ -0,0 +1,10 @@
# Operations tests
The different kinds of operations ("transactions") are tested individually with test handlers.
The tested operation kinds are:
- [`deposits`](./deposits.md)
- More tests are work-in-progress.

View File

@ -0,0 +1,18 @@
# Test format: Deposit operations
A deposit is a form of an operation (or "transaction"), modifying the state.
## Test case format
```yaml
description: string -- description of test case, purely for debugging purposes
pre: BeaconState -- state before applying the deposit
deposit: Deposit -- the deposit
post: BeaconState -- state after applying the deposit. No value if deposit processing is aborted.
```
## Condition
A `deposits` handler of the `operations` should process these cases,
calling the implementation of the `process_deposit(state, deposit)` functionality described in the spec.
The resulting state should match the expected `post` state, or if the `post` state is left blank, the handler should reject the inputs as invalid.

View File

@ -0,0 +1,32 @@
# Test format: shuffling
The runner of the Shuffling test type has only one handler: `core`
This does not mean however that testing is limited.
Clients may take different approaches to shuffling, for optimizing,
and supporting advanced lookup behavior back in older history.
For implementers, possible test runners implementing testing can include:
1) just test permute-index, run it for each index `i` in `range(count)`, and check against expected `output[i]` (default spec implementation)
2) test un-permute-index (the reverse lookup. Implemented by running the shuffling rounds in reverse: from `round_count-1` to `0`)
3) test the optimized complete shuffle, where all indices are shuffled at once, test output in one go.
4) test complete shuffle in reverse (reverse rounds, same as 2)
## Test case format
```yaml
seed: bytes32
count: int
shuffled: List[int]
```
- The `bytes32` is encoded a string, hexadecimal encoding, prefixed with `0x`.
- Integers are validator indices. These are `uint64`, but realistically they are not as big.
The `count` specifies the validator registry size. One should compute the shuffling for indices `0, 1, 2, 3, ..., count (exclusive)`.
Seed is the raw shuffling seed, passed to permute-index (or optimized shuffling approach).
## Condition
The resulting list should match the expected output `shuffled` after shuffling the implied input, using the given `seed`.

View File

@ -0,0 +1,20 @@
# SSZ, generic tests
This set of test-suites provides general testing for SSZ:
to instantiate any container/list/vector/other type from binary data.
Since SSZ is in a development-phase, not the full suite of features is covered yet.
Note that these tests are based on the older SSZ package.
The tests are still relevant, but limited in scope:
more complex object encodings have changed since the original SSZ testing.
A minimal but useful series of tests covering `uint` encoding and decoding is provided.
This is a direct port of the older SSZ `uint` tests (minus outdated test cases).
[uint test format](./uint.md).
Note: the current phase-0 spec does not use larger uints, and uses byte vectors (fixed length) instead to represent roots etc.
The exact uint lengths to support may be redefined in the future.
Extension of the SSZ tests collection is planned, with an update to the new spec-maintained `minimal_ssz.py`,
see CI/testing issues for progress tracking.

View File

@ -0,0 +1,19 @@
# Test format: SSZ uints
SSZ supports encoding of uints up to 32 bytes. These are considered to be basic types.
## Test case format
```yaml
type: "uintN" -- string, where N is one of [8, 16, 32, 64, 128, 256]
valid: bool -- expected validity of the input data
value: string -- string, decimal encoding, to support up to 256 bit integers
ssz: bytes -- string, input data, hex encoded, with prefix 0x
tags: List[string] -- description of test case, in the form of a list of labels
```
## Condition
Two-way testing can be implemented in the test-runner:
- Encoding: After encoding the given input number `value`, the output should match `ssz`
- Decoding: After decoding the given `ssz` bytes, it should match the input number `value`

View File

@ -0,0 +1,8 @@
# SSZ, static tests
This set of test-suites provides static testing for SSZ:
to instantiate just the known ETH-2.0 SSZ types from binary data.
This series of tests is based on the spec-maintained `minimal_ssz.py`, i.e. fully consistent with the SSZ spec.
Test format documentation can be found here: [core test format](./core.md).

View File

@ -0,0 +1,32 @@
# Test format: SSZ static types
The goal of this type is to provide clients with a solid reference for how the known SSZ objects should be encoded.
Each object described in the Phase-0 spec is covered.
This is important, as many of the clients aiming to serialize/deserialize objects directly into structs/classes
do not support (or have alternatives for) generic SSZ encoding/decoding.
This test-format ensures these direct serializations are covered.
## Test case format
```yaml
type_name: string -- string, object name, formatted as in spec. E.g. "BeaconBlock"
value: dynamic -- the YAML-encoded value, of the type specified by type_name.
serialized: bytes -- string, SSZ-serialized data, hex encoded, with prefix 0x
root: bytes32 -- string, hash-tree-root of the value, hex encoded, with prefix 0x
signing_root: bytes32 -- string, signing-root of the value, hex encoded, with prefix 0x. Optional, present if type contains ``signature`` field
```
## Condition
A test-runner can implement the following assertions:
- Serialization: After parsing the `value`, SSZ-serialize it: the output should match `serialized`
- Hash-tree-root: After parsing the `value`, Hash-tree-root it: the output should match `root`
- Optionally also check signing-root, if present.
- Deserialization: SSZ-deserialize the `serialized` value, and see if it matches the parsed `value`
## References
**`serialized`**: [SSZ serialization](../../simple-serialize.md#serialization)
**`root`** - [hash_tree_root](../../simple-serialize.md#merkleization) function
**`signing_root`** - [signing_root](../../simple-serialize.md#self-signed-containers) function

View File

@ -1,6 +1,6 @@
# Ethereum 2.0 Phase 0 -- Honest Validator
__NOTICE__: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md) that describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol.
__NOTICE__: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](../core/0_beacon-chain.md) that describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol.
## Table of Contents
@ -38,13 +38,13 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers
- [Attestations](#attestations-1)
- [Attestation data](#attestation-data)
- [Slot](#slot-1)
- [Shard](#shard)
- [Beacon block root](#beacon-block-root)
- [Target root](#target-root)
- [Crosslink data root](#crosslink-data-root)
- [Latest crosslink](#latest-crosslink)
- [Source epoch](#source-epoch)
- [Source root](#source-root)
- [Target root](#target-root)
- [Shard](#shard)
- [Previous crosslink root](#previous-crosslink-root)
- [Crosslink data root](#crosslink-data-root)
- [Construct attestation](#construct-attestation)
- [Data](#data)
- [Aggregation bitfield](#aggregation-bitfield)
@ -60,13 +60,13 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers
## Introduction
This document represents the expected behavior of an "honest validator" with respect to Phase 0 of the Ethereum 2.0 protocol. This document does not distinguish between a "node" (ie. the functionality of following and reading the beacon chain) and a "validator client" (ie. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope.
This document represents the expected behavior of an "honest validator" with respect to Phase 0 of the Ethereum 2.0 protocol. This document does not distinguish between a "node" (i.e. the functionality of following and reading the beacon chain) and a "validator client" (i.e. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope.
A validator is an entity that participates in the consensus of the Ethereum 2.0 protocol. This is an optional role for users in which they can post ETH as collateral and verify and attest to the validity of blocks to seek financial returns in exchange for building and securing the protocol. This is similar to proof of work networks in which a miner provides collateral in the form of hardware/hash-power to seek returns in exchange for building and securing the protocol.
## Prerequisites
All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout.
All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](../core/0_beacon-chain.md) and [Phase 0 -- Deposit Contract](../core/0_deposit-contract.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout.
## Constants
@ -84,7 +84,7 @@ A validator must initialize many parameters locally before submitting a deposit
#### BLS public key
Validator public keys are [G1 points](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md#g1-points) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator.
Validator public keys are [G1 points](../bls_signature.md#g1-points) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator.
#### BLS withdrawal key
@ -96,15 +96,15 @@ The validator constructs their `withdrawal_credentials` via the following:
### Submit deposit
In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW chain. Deposits are made to the [deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) located at `DEPOSIT_CONTRACT_ADDRESS`.
In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW chain. Deposits are made to the [deposit contract](../core/0_deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`.
To submit a deposit:
* Pack the validator's [initialization parameters](#initialization) into `deposit_input`, a [`DepositInput`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#depositinput) SSZ object.
* Let `proof_of_possession` be the result of `bls_sign` of the `signed_root(deposit_input)` with `domain=DOMAIN_DEPOSIT`.
* Set `deposit_input.proof_of_possession = proof_of_possession`.
* Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object.
* Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_DEPOSIT_AMOUNT`.
* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit` along with `serialize(deposit_input)` as the singular `bytes` input along with a deposit `amount` in Gwei.
* Set `deposit_data.amount = amount`.
* Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=DOMAIN_DEPOSIT`.
* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])` along with a deposit of `amount` Gwei.
_Note_: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_DEPOSIT_AMOUNT`.
@ -114,13 +114,13 @@ Deposits cannot be processed into the beacon chain until the eth1.0 block in whi
### Validator index
Once a validator has been processed and added to the beacon state's `validator_registry`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally.
Once a validator has been processed and added to the beacon state's `validator_registry`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](../core/0_beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally.
### Activation
In normal operation, the validator is quickly activated at which point the validator is added to the shuffling and begins validation after an additional `ACTIVATION_EXIT_DELAY` epochs (25.6 minutes).
The function [`is_active_validator`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given shuffling epoch. Note that the `BeaconState` contains a field `current_shuffling_epoch` which dictates from which epoch the current active validators are taken. Usage is as follows:
The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given shuffling epoch. Note that the `BeaconState` contains a field `current_shuffling_epoch` which dictates from which epoch the current active validators are taken. Usage is as follows:
```python
shuffling_epoch = state.current_shuffling_epoch
@ -138,9 +138,9 @@ A validator has two primary responsibilities to the beacon chain -- [proposing b
### Block proposal
A validator is expected to propose a [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function).
A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function).
There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (eg. at 312500 validators = 10 million ETH, that's once per ~3 weeks).
There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (e.g. at 312500 validators = 10 million ETH, that's once per ~3 weeks).
#### Block header
@ -152,7 +152,7 @@ _Note:_ there might be "skipped" slots between the `parent` and `block`. These s
##### Parent root
Set `block.previous_block_root = hash_tree_root(parent)`.
Set `block.previous_block_root = signing_root(parent)`.
##### State root
@ -182,15 +182,15 @@ epoch_signature = bls_sign(
* Let `D` be the set of `Eth1DataVote` objects `vote` in `state.eth1_data_votes` where:
* `vote.eth1_data.block_hash` is the hash of an eth1.0 block that is (i) part of the canonical chain, (ii) >= `ETH1_FOLLOW_DISTANCE` blocks behind the head, and (iii) newer than `state.latest_eth1_data.block_data`.
* `vote.eth1_data.deposit_count` is the deposit count of the eth1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`.
* `vote.eth1_data.deposit_root` is the deposit root of the eth1.0 deposit contract at the block defined by `vote.eth1_data.block_hash`.
* If `D` is empty:
* Let `block_hash` be the block hash of the `ETH1_FOLLOW_DISTANCE`'th ancestor of the head of the canonical eth1.0 chain.
* Let `deposit_root` be the deposit root of the eth1.0 deposit contract in the post-state of the block referenced by `block_hash`
* Let `deposit_root` and `deposit_count` be the deposit root and deposit count of the eth1.0 deposit contract in the post-state of the block referenced by `block_hash`
* Let `best_vote_data = Eth1Data(block_hash=block_hash, deposit_root=deposit_root, deposit_count=deposit_count)`.
* If `D` is nonempty:
* Let `best_vote` be the member of `D` that has the highest `vote.vote_count`, breaking ties by favoring block hashes with higher associated block height.
* Let `block_hash = best_vote.eth1_data.block_hash`.
* Let `deposit_root = best_vote.eth1_data.deposit_root`.
* Set `block.eth1_data = Eth1Data(deposit_root=deposit_root, block_hash=block_hash)`.
* Let `best_vote_data` be the `eth1_data` of the member of `D` that has the highest `vote.vote_count`, breaking ties by favoring block hashes with higher associated block height.
* Set `block.eth1_data = best_vote_data`.
##### Signature
@ -199,7 +199,7 @@ Set `block.signature = block_signature` where `block_signature` is defined as:
```python
block_signature = bls_sign(
privkey=validator.privkey, # privkey store locally, not in state
message_hash=signed_root(block),
message_hash=signing_root(block),
domain=get_domain(
fork=fork, # `fork` is the fork object at the slot `block.slot`
epoch=slot_to_epoch(block.slot),
@ -212,25 +212,25 @@ block_signature = bls_sign(
##### Proposer slashings
Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposer-slashings-1). The validator receives a small "whistleblower" reward for each proposer slashing found and included.
Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](../core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](../core/0_beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included.
##### Attester slashings
Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attester-slashings-1). The validator receives a small "whistleblower" reward for each attester slashing found and included.
Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](../core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](../core/0_beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included.
##### Attestations
Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations-1). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain.
Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](../core/0_beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain.
##### Deposits
Up to `MAX_DEPOSITS` [`Deposit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) objects can be included in the `block`. These deposits are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposits-1).
If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](../core/0_deposit-contract.md) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits).
The `proof` for each deposit must be constructed against the deposit root contained in `state.latest_eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `latest_eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation.
##### Voluntary exits
Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#exits-1).
Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](../core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](../core/0_beacon-chain.md#voluntary-exits).
### Attestations
@ -240,7 +240,7 @@ A validator should create and broadcast the attestation halfway through the `slo
#### Attestation data
First the validator should construct `attestation_data`, an [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot.
First the validator should construct `attestation_data`, an [`AttestationData`](../core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot.
* Let `head_block` be the result of running the fork choice during the assigned slot.
* Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot.
@ -249,31 +249,9 @@ First the validator should construct `attestation_data`, an [`AttestationData`](
Set `attestation_data.slot = head_state.slot`.
##### Shard
Set `attestation_data.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`.
##### Beacon block root
Set `attestation_data.beacon_block_root = hash_tree_root(head_block)`.
##### Target root
Set `attestation_data.target_root = hash_tree_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary.
_Note:_ This can be looked up in the state using:
* Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`.
* Set `epoch_boundary = head if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`.
##### Crosslink data root
Set `attestation_data.crosslink_data_root = ZERO_HASH`.
_Note:_ This is a stub for phase 0.
##### Latest crosslink
Set `attestation_data.previous_crosslink = head_state.latest_crosslinks[shard]`.
Set `attestation_data.beacon_block_root = signing_root(head_block)`.
##### Source epoch
@ -283,9 +261,31 @@ Set `attestation_data.source_epoch = head_state.justified_epoch`.
Set `attestation_data.source_root = head_state.current_justified_root`.
##### Target root
Set `attestation_data.target_root = signing_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary.
_Note:_ This can be looked up in the state using:
* Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`.
* Set `epoch_boundary = head if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`.
##### Shard
Set `attestation_data.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`.
##### Previous crosslink root
Set `attestation_data.previous_crosslink_root = hash_tree_root(head_state.current_crosslinks[shard])`.
##### Crosslink data root
Set `attestation_data.crosslink_data_root = ZERO_HASH`.
_Note:_ This is a stub for phase 0.
#### Construct attestation
Next the validator creates `attestation`, an [`Attestation`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestation) object.
Next the validator creates `attestation`, an [`Attestation`](../core/0_beacon-chain.md#attestation) object.
##### Data
@ -298,7 +298,7 @@ Set `attestation.data = attestation_data` where `attestation_data` is the `Attes
* Set `aggregation_bitfield[index_into_committee // 8] |= 2 ** (index_into_committee % 8)`.
* Set `attestation.aggregation_bitfield = aggregation_bitfield`.
_Note_: Calling `get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield)` should return a list of length equal to 1, containing `validator_index`.
_Note_: Calling `get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)` should return a list of length equal to 1, containing `validator_index`.
##### Custody bitfield
@ -331,22 +331,19 @@ signed_attestation_data = bls_sign(
## Validator assignments
A validator can get the current and previous epoch committee assignments using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `previous_epoch <= epoch <= current_epoch`.
A validator can get the current, previous, and next epoch committee assignments using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `previous_epoch <= epoch <= next_epoch`.
```python
def get_committee_assignment(
state: BeaconState,
epoch: Epoch,
validator_index: ValidatorIndex,
registry_change: bool=False) -> Tuple[List[ValidatorIndex], Shard, Slot, bool]:
validator_index: ValidatorIndex) -> Tuple[List[ValidatorIndex], Shard, Slot]:
"""
Return the committee assignment in the ``epoch`` for ``validator_index`` and ``registry_change``.
Return the committee assignment in the ``epoch`` for ``validator_index``.
``assignment`` returned is a tuple of the following form:
* ``assignment[0]`` is the list of validators in the committee
* ``assignment[1]`` is the shard to which the committee is assigned
* ``assignment[2]`` is the slot at which the committee is assigned
* ``assignment[3]`` is a bool signalling if the validator is expected to propose
a beacon block at the assigned slot.
"""
previous_epoch = get_previous_epoch(state)
next_epoch = get_current_epoch(state) + 1
@ -357,7 +354,6 @@ def get_committee_assignment(
crosslink_committees = get_crosslink_committees_at_slot(
state,
slot,
registry_change=registry_change,
)
selected_committees = [
committee # Tuple[List[ValidatorIndex], Shard]
@ -367,28 +363,32 @@ def get_committee_assignment(
if len(selected_committees) > 0:
validators = selected_committees[0][0]
shard = selected_committees[0][1]
is_proposer = validator_index == get_beacon_proposer_index(state, slot, registry_change=registry_change)
assignment = (validators, shard, slot, is_proposer)
assignment = (validators, shard, slot)
return assignment
```
A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the slot in question. Proposer selection is only stable within the context of the current epoch.
```python
def is_proposer_at_slot(state: BeaconState,
slot: Slot,
validator_index: ValidatorIndex) -> bool:
assert state.slot == slot
return get_beacon_proposer_index(state) == validator_index
```
_Note_: To see if a validator is assigned to proposer during the slot, the validator must run an empty slot transition from the previous state to the current slot.
### Lookahead
The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming assignments of proposing and attesting dictated by the shuffling and slot.
The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing which must checked during the slot in question.
There are three possibilities for the shuffling at the next epoch:
1. The shuffling changes due to a "validator registry change".
2. The shuffling changes due to `epochs_since_last_registry_update` being an exact power of 2 greater than 1.
3. The shuffling remains the same (i.e. the validator is in the same shard committee).
`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments which involves noting at which future slot one will have to attest and also which shard one should begin syncing (in phase 1+).
Either (2) or (3) occurs if (1) fails. The choice between (2) and (3) is deterministic based upon `epochs_since_last_registry_update`.
When querying for assignments in the next epoch there are two options -- with and without a `registry_change` -- which is the optional fourth parameter of the `get_committee_assignment`.
`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should always plan for assignments from both values of `registry_change` unless the validator can concretely eliminate one of the options. Planning for future assignments involves noting at which future slot one might have to attest and propose and also which shard one should begin syncing (in phase 1+).
Specifically, a validator should call both `get_committee_assignment(state, next_epoch, validator_index, registry_change=True)` and `get_committee_assignment(state, next_epoch, validator_index, registry_change=False)` when checking for next epoch assignments.
Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
## How to avoid slashing
@ -398,7 +398,7 @@ _Note_: Signed data must be within a sequential `Fork` context to conflict. Mess
### Proposer slashing
To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposalsigneddata) where conflicting is defined as two distinct blocks within the same epoch.
To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) where conflicting is defined as two distinct blocks within the same epoch.
_In phase 0, as long as the validator does not sign two different beacon blocks for the same epoch, the validator is safe against proposer slashings._
@ -410,7 +410,7 @@ If the software crashes at some point within this routine, then when the validat
### Attester slashing
To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) objects where conflicting is defined as a set of two attestations that satisfy either [`is_double_vote`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_double_vote) or [`is_surround_vote`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_surround_vote).
To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](../core/0_beacon-chain.md#attestationdata) objects where conflicting is defined as a set of two attestations that satisfy either [`is_double_vote`](../core/0_beacon-chain.md#is_double_vote) or [`is_surround_vote`](../core/0_beacon-chain.md#is_surround_vote).
Specifically, when signing an `Attestation`, a validator should perform the following steps in the following order:
1. Save a record to hard disk that an attestation has been signed for source -- `attestation_data.source_epoch` -- and target -- `slot_to_epoch(attestation_data.slot)`.

173
test_generators/README.md Normal file
View File

@ -0,0 +1,173 @@
# Eth2.0 Test Generators
This directory contains all the generators for YAML tests, consumed by Eth 2.0 client implementations.
Any issues with the generators and/or generated tests should be filed
in the repository that hosts the generator outputs, here: [ethereum/eth2.0-tests](https://github.com/ethereum/eth2.0-tests/).
Whenever a release is made, the new tests are automatically built and
[eth2TestGenBot](https://github.com/eth2TestGenBot) commits the changes to the test repository.
## How to run generators
pre-requisites:
- Python 3 installed
- PIP 3
- GNU make
### Cleaning
This removes the existing virtual environments (`/test_generators/<generator>/venv`), and generated tests (`/yaml_tests/`).
```bash
make clean
```
### Running all test generators
This runs all the generators.
```bash
make -j 4 gen_yaml_tests
```
The `-j N` flag makes the generators run in parallel, with `N` being the amount of cores.
### Running a single generator
The make file auto-detects generators in the `test_generators/` directory,
and provides a tests-gen target for each generator, see example.
```bash
make ./yaml_tests/shuffling/
```
## Developing a generator
Simply open up the generator (not all at once) of choice in your favorite IDE/editor, and run:
```bash
# From the root of the generator directory:
# Create a virtual environment (any venv/.venv/.venvs is git-ignored)
python3 -m venv venv
# Activate the venv, this is where dependencies are installed for the generator
. venv/bin/activate
```
Now that you have a virtual environment, write your generator.
It's recommended to extend the base-generator.
Create a `requirements.txt` in the root of your generator directory:
```
eth-utils==1.4.1
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec
```
The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself, to prevent code duplication and outdated tests.
Applying configurations to the spec is simple, and enables you to create test suites with different contexts.
Note: make sure to run `make pyspec` from the root of the specs repository, to build the pyspec requirement.
Install all the necessary requirements (re-run when you add more):
```bash
pip3 install -e .[pyspec]
```
And write your initial test generator, extending the base generator:
Write a `main.py` file, here's an example:
```python
from gen_base import gen_runner, gen_suite, gen_typing
from eth_utils import (
to_dict, to_tuple
)
from preset_loader import loader
from eth2spec.phase0 import spec
@to_dict
def example_test_case(v: int):
yield "spec_SHARD_COUNT", spec.SHARD_COUNT
yield "example", v
@to_tuple
def generate_example_test_cases():
for i in range(10):
yield example_test_case(i)
def example_minimal_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
presets = loader.load_presets(configs_path, 'minimal')
spec.apply_constants_preset(presets)
return ("mini", "core", gen_suite.render_suite(
title="example_minimal",
summary="Minimal example suite, testing bar.",
forks_timeline="testing",
forks=["phase0"],
config="minimal",
handler="main",
test_cases=generate_example_test_cases()))
def example_mainnet_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
presets = loader.load_presets(configs_path, 'mainnet')
spec.apply_constants_preset(presets)
return ("full", "core", gen_suite.render_suite(
title="example_main_net",
summary="Main net based example suite.",
forks_timeline= "mainnet",
forks=["phase0"],
config="testing",
handler="main",
test_cases=generate_example_test_cases()))
if __name__ == "__main__":
gen_runner.run_generator("example", [example_minimal_suite, example_mainnet_suite])
```
Recommendations:
- you can have more than just 1 suite creator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])`
- you can concatenate lists of test cases, if you don't want to split it up in suites, however make sure they could be run with one handler.
- you can split your suite creators into different python files/packages, good for code organization.
- use config "minimal" for performance. But also implement a suite with the default config where necessary.
- you may be able to write your test suite creator in a way where it does not make assumptions on constants.
If so, you can generate test suites with different configurations for the same scenario (see example).
- the test-generator accepts `--output` and `--force` (overwrite output)
## How to add a new test generator
In order to add a new test generator that builds `New Tests`:
1. Create a new directory `new_tests`, within the `test_generators` directory.
Note that `new_tests` is also the name of the directory in which the tests will appear in the tests repository later.
2. Your generator is assumed to have a `requirements.txt` file,
with any dependencies it may need. Leave it empty if your generator has none.
3. Your generator is assumed to have a `main.py` file in its root.
By adding the base generator to your requirements, you can make a generator really easily. See docs below.
4. Your generator is called with `-o some/file/path/for_testing/can/be_anything -c some/other/path/to_configs/`.
The base generator helps you handle this; you only have to define suite headers,
and a list of tests for each suite you generate.
5. Finally, add any linting or testing commands to the
[circleci config file](https://github.com/ethereum/eth2.0-test-generators/blob/master/.circleci/config.yml)
if desired to increase code quality.
Note: you do not have to change the makefile.
However, if necessary (e.g. not using python, or mixing in other languages), submit an issue, and it can be a special case.
Do note that generators should be easy to maintain, lean, and based on the spec.
## How to remove a test generator
If a test generator is not needed anymore, undo the steps described above and make a new release:
1. remove the generator directory
2. remove the generated tests in the `eth2.0-tests` repository by opening a PR there.
3. make a new release

View File

@ -0,0 +1,21 @@
# BLS Test Generator
Explanation of BLS12-381 type hierarchy
The base unit is bytes48 of which only 381 bits are used
- FQ: uint381 modulo field modulus
- FQ2: (FQ, FQ)
- G2: (FQ2, FQ2, FQ2)
## Resources
- [Eth2.0 spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md)
- [Finite Field Arithmetic](http://www.springeronline.com/sgw/cda/pageitems/document/cda_downloaddocument/0,11996,0-0-45-110359-0,00.pdf)
- Chapter 2 of [Elliptic Curve Cryptography](http://cacr.uwaterloo.ca/ecc/). Darrel Hankerson, Alfred Menezes, and Scott Vanstone
- [Zcash BLS parameters](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381)
- [Trinity implementation](https://github.com/ethereum/trinity/blob/master/eth2/_utils/bls.py)
## Comments
Compared to Zcash, Ethereum specs always requires the compressed form (c_flag / most significant bit always set).
Also note that pubkeys and privkeys are reversed.

243
test_generators/bls/main.py Normal file
View File

@ -0,0 +1,243 @@
"""
BLS test vectors generator
"""
from typing import Tuple
from eth_utils import (
to_tuple, int_to_big_endian
)
from gen_base import gen_runner, gen_suite, gen_typing
from py_ecc import bls
def int_to_hex(n: int) -> str:
return '0x' + int_to_big_endian(n).hex()
def hex_to_int(x: str) -> int:
return int(x, 16)
# Note: even though a domain is only an uint64,
# To avoid issues with YAML parsers that are limited to 53-bit (JS language limit)
# It is serialized as an hex string as well.
DOMAINS = [
0,
1,
1234,
2**32-1,
2**64-1
]
MESSAGES = [
bytes(b'\x00' * 32),
bytes(b'\x56' * 32),
bytes(b'\xab' * 32),
]
PRIVKEYS = [
# Curve order is 256 so private keys are 32 bytes at most.
# Also not all integers is a valid private key, so using pre-generated keys
hex_to_int('0x00000000000000000000000000000000263dbd792f5b1be47ed85f8938c0f29586af0d3ac7b977f21c278fe1462040e3'),
hex_to_int('0x0000000000000000000000000000000047b8192d77bf871b62e87859d653922725724a5c031afeabc60bcef5ff665138'),
hex_to_int('0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216'),
]
def hash_message(msg: bytes,
domain: int) ->Tuple[Tuple[str, str], Tuple[str, str], Tuple[str, str]]:
"""
Hash message
Input:
- Message as bytes
- domain as uint64
Output:
- Message hash as a G2 point
"""
return [
[
int_to_hex(fq2.coeffs[0]),
int_to_hex(fq2.coeffs[1]),
]
for fq2 in bls.utils.hash_to_G2(msg, domain)
]
def hash_message_compressed(msg: bytes, domain: int) -> Tuple[str, str]:
"""
Hash message
Input:
- Message as bytes
- domain as uint64
Output:
- Message hash as a compressed G2 point
"""
z1, z2 = bls.utils.compress_G2(bls.utils.hash_to_G2(msg, domain))
return [int_to_hex(z1), int_to_hex(z2)]
@to_tuple
def case01_message_hash_G2_uncompressed():
for msg in MESSAGES:
for domain in DOMAINS:
yield {
'input': {
'message': '0x' + msg.hex(),
'domain': int_to_hex(domain)
},
'output': hash_message(msg, domain)
}
@to_tuple
def case02_message_hash_G2_compressed():
for msg in MESSAGES:
for domain in DOMAINS:
yield {
'input': {
'message': '0x' + msg.hex(),
'domain': int_to_hex(domain)
},
'output': hash_message_compressed(msg, domain)
}
@to_tuple
def case03_private_to_public_key():
pubkeys = [bls.privtopub(privkey) for privkey in PRIVKEYS]
pubkeys_serial = ['0x' + pubkey.hex() for pubkey in pubkeys]
for privkey, pubkey_serial in zip(PRIVKEYS, pubkeys_serial):
yield {
'input': int_to_hex(privkey),
'output': pubkey_serial,
}
@to_tuple
def case04_sign_messages():
for privkey in PRIVKEYS:
for message in MESSAGES:
for domain in DOMAINS:
sig = bls.sign(message, privkey, domain)
yield {
'input': {
'privkey': int_to_hex(privkey),
'message': '0x' + message.hex(),
'domain': int_to_hex(domain)
},
'output': '0x' + sig.hex()
}
# TODO: case05_verify_messages: Verify messages signed in case04
# It takes too long, empty for now
@to_tuple
def case06_aggregate_sigs():
for domain in DOMAINS:
for message in MESSAGES:
sigs = [bls.sign(message, privkey, domain) for privkey in PRIVKEYS]
yield {
'input': ['0x' + sig.hex() for sig in sigs],
'output': '0x' + bls.aggregate_signatures(sigs).hex(),
}
@to_tuple
def case07_aggregate_pubkeys():
pubkeys = [bls.privtopub(privkey) for privkey in PRIVKEYS]
pubkeys_serial = ['0x' + pubkey.hex() for pubkey in pubkeys]
yield {
'input': pubkeys_serial,
'output': '0x' + bls.aggregate_pubkeys(pubkeys).hex(),
}
# TODO
# Aggregate verify
# TODO
# Proof-of-possession
def bls_msg_hash_uncompressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
return ("g2_uncompressed", "msg_hash_g2_uncompressed", gen_suite.render_suite(
title="BLS G2 Uncompressed msg hash",
summary="BLS G2 Uncompressed msg hash",
forks_timeline="mainnet",
forks=["phase0"],
config="mainnet",
runner="bls",
handler="msg_hash_uncompressed",
test_cases=case01_message_hash_G2_uncompressed()))
def bls_msg_hash_compressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
return ("g2_compressed", "msg_hash_g2_compressed", gen_suite.render_suite(
title="BLS G2 Compressed msg hash",
summary="BLS G2 Compressed msg hash",
forks_timeline="mainnet",
forks=["phase0"],
config="mainnet",
runner="bls",
handler="msg_hash_compressed",
test_cases=case02_message_hash_G2_compressed()))
def bls_priv_to_pub_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
return ("priv_to_pub", "priv_to_pub", gen_suite.render_suite(
title="BLS private key to pubkey",
summary="BLS Convert private key to public key",
forks_timeline="mainnet",
forks=["phase0"],
config="mainnet",
runner="bls",
handler="priv_to_pub",
test_cases=case03_private_to_public_key()))
def bls_sign_msg_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
return ("sign_msg", "sign_msg", gen_suite.render_suite(
title="BLS sign msg",
summary="BLS Sign a message",
forks_timeline="mainnet",
forks=["phase0"],
config="mainnet",
runner="bls",
handler="sign_msg",
test_cases=case04_sign_messages()))
def bls_aggregate_sigs_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
return ("aggregate_sigs", "aggregate_sigs", gen_suite.render_suite(
title="BLS aggregate sigs",
summary="BLS Aggregate signatures",
forks_timeline="mainnet",
forks=["phase0"],
config="mainnet",
runner="bls",
handler="aggregate_sigs",
test_cases=case06_aggregate_sigs()))
def bls_aggregate_pubkeys_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
return ("aggregate_pubkeys", "aggregate_pubkeys", gen_suite.render_suite(
title="BLS aggregate pubkeys",
summary="BLS Aggregate public keys",
forks_timeline="mainnet",
forks=["phase0"],
config="mainnet",
runner="bls",
handler="aggregate_pubkeys",
test_cases=case07_aggregate_pubkeys()))
if __name__ == "__main__":
gen_runner.run_generator("bls", [
bls_msg_hash_compressed_suite,
bls_msg_hash_uncompressed_suite,
bls_priv_to_pub_suite,
bls_sign_msg_suite,
bls_aggregate_sigs_suite,
bls_aggregate_pubkeys_suite
])

View File

@ -0,0 +1,3 @@
py-ecc==1.6.0
eth-utils==1.4.1
../../test_libs/gen_helpers

View File

@ -0,0 +1,13 @@
# Operations
Operations (or "transactions" in previous spec iterations),
are atomic changes to the state, introduced by embedding in blocks.
This generator provides a series of test suites, divided into handler, for each operation type.
An operation test-runner can consume these operation test-suites,
and handle different kinds of operations by processing the cases using the specified test handler.
Information on the format of the tests can be found in the [operations test formats documentation](../../specs/test_formats/operations/README.md).

View File

@ -0,0 +1,180 @@
from eth2spec.phase0 import spec
from eth_utils import (
to_dict, to_tuple
)
from gen_base import gen_suite, gen_typing
from preset_loader import loader
from eth2spec.debug.encode import encode
from eth2spec.utils.minimal_ssz import signing_root
from eth2spec.utils.merkle_minimal import get_merkle_root, calc_merkle_tree_from_leaves, get_merkle_proof
from typing import List, Tuple
import genesis
import keys
from py_ecc import bls
def build_deposit_data(state,
pubkey: spec.BLSPubkey,
withdrawal_cred: spec.Bytes32,
privkey: int,
amount: int):
deposit_data = spec.DepositData(
pubkey=pubkey,
withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + withdrawal_cred[1:],
amount=amount,
)
deposit_data.proof_of_possession = bls.sign(
message_hash=signing_root(deposit_data),
privkey=privkey,
domain=spec.get_domain(
state,
spec.get_current_epoch(state),
spec.DOMAIN_DEPOSIT,
)
)
return deposit_data
def build_deposit(state,
deposit_data_leaves: List[spec.Bytes32],
pubkey: spec.BLSPubkey,
withdrawal_cred: spec.Bytes32,
privkey: int,
amount: int) -> spec.Deposit:
deposit_data = build_deposit_data(state, pubkey, withdrawal_cred, privkey, amount)
item = deposit_data.hash_tree_root()
index = len(deposit_data_leaves)
deposit_data_leaves.append(item)
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
proof = list(get_merkle_proof(tree, item_index=index))
deposit = spec.Deposit(
proof=list(proof),
index=index,
data=deposit_data,
)
assert spec.verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, get_merkle_root(tuple(deposit_data_leaves)))
return deposit
def build_deposit_for_index(initial_validator_count: int, index: int) -> Tuple[spec.Deposit, spec.BeaconState]:
genesis_deposits = genesis.create_deposits(
keys.pubkeys[:initial_validator_count],
keys.withdrawal_creds[:initial_validator_count]
)
state = genesis.create_genesis_state(genesis_deposits)
deposit_data_leaves = [dep.data.hash_tree_root() for dep in genesis_deposits]
deposit = build_deposit(
state,
deposit_data_leaves,
keys.pubkeys[index],
keys.withdrawal_creds[index],
keys.privkeys[index],
spec.MAX_DEPOSIT_AMOUNT,
)
state.latest_eth1_data.deposit_root = get_merkle_root(tuple(deposit_data_leaves))
state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
return deposit, state
@to_dict
def valid_deposit():
new_dep, state = build_deposit_for_index(10, 10)
yield 'description', 'valid deposit to add new validator'
yield 'pre', encode(state, spec.BeaconState)
yield 'deposit', encode(new_dep, spec.Deposit)
spec.process_deposit(state, new_dep)
yield 'post', encode(state, spec.BeaconState)
@to_dict
def valid_topup():
new_dep, state = build_deposit_for_index(10, 3)
yield 'description', 'valid deposit to top-up existing validator'
yield 'pre', encode(state, spec.BeaconState)
yield 'deposit', encode(new_dep, spec.Deposit)
spec.process_deposit(state, new_dep)
yield 'post', encode(state, spec.BeaconState)
@to_dict
def invalid_deposit_index():
new_dep, state = build_deposit_for_index(10, 10)
# Mess up deposit index, 1 too small
state.deposit_index = 9
yield 'description', 'invalid deposit index'
yield 'pre', encode(state, spec.BeaconState)
yield 'deposit', encode(new_dep, spec.Deposit)
try:
spec.process_deposit(state, new_dep)
except AssertionError:
# expected
yield 'post', None
return
raise Exception('invalid_deposit_index has unexpectedly allowed deposit')
@to_dict
def invalid_deposit_proof():
new_dep, state = build_deposit_for_index(10, 10)
# Make deposit proof invalid (at bottom of proof)
new_dep.proof[-1] = spec.ZERO_HASH
yield 'description', 'invalid deposit proof'
yield 'pre', encode(state, spec.BeaconState)
yield 'deposit', encode(new_dep, spec.Deposit)
try:
spec.process_deposit(state, new_dep)
except AssertionError:
# expected
yield 'post', None
return
raise Exception('invalid_deposit_index has unexpectedly allowed deposit')
@to_tuple
def deposit_cases():
yield valid_deposit()
yield valid_topup()
yield invalid_deposit_index()
yield invalid_deposit_proof()
def mini_deposits_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
presets = loader.load_presets(configs_path, 'minimal')
spec.apply_constants_preset(presets)
return ("deposit_minimal", "deposits", gen_suite.render_suite(
title="deposit operation",
summary="Test suite for deposit type operation processing",
forks_timeline="testing",
forks=["phase0"],
config="minimal",
runner="operations",
handler="deposits",
test_cases=deposit_cases()))
def full_deposits_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
presets = loader.load_presets(configs_path, 'mainnet')
spec.apply_constants_preset(presets)
return ("deposit_full", "deposits", gen_suite.render_suite(
title="deposit operation",
summary="Test suite for deposit type operation processing",
forks_timeline="mainnet",
forks=["phase0"],
config="mainnet",
runner="operations",
handler="deposits",
test_cases=deposit_cases()))

View File

@ -0,0 +1,44 @@
from eth2spec.phase0 import spec
from eth2spec.utils.merkle_minimal import get_merkle_root, calc_merkle_tree_from_leaves, get_merkle_proof
from typing import List
def create_genesis_state(deposits: List[spec.Deposit]) -> spec.BeaconState:
deposit_root = get_merkle_root((tuple([(dep.data.hash_tree_root()) for dep in deposits])))
return spec.get_genesis_beacon_state(
deposits,
genesis_time=0,
genesis_eth1_data=spec.Eth1Data(
deposit_root=deposit_root,
deposit_count=len(deposits),
block_hash=spec.ZERO_HASH,
),
)
def create_deposits(pubkeys: List[spec.BLSPubkey], withdrawal_cred: List[spec.Bytes32]) -> List[spec.Deposit]:
# Mock proof of possession
proof_of_possession = b'\x33' * 96
deposit_data = [
spec.DepositData(
pubkey=pubkeys[i],
withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + withdrawal_cred[i][1:],
amount=spec.MAX_DEPOSIT_AMOUNT,
proof_of_possession=proof_of_possession,
) for i in range(len(pubkeys))
]
# Fill tree with existing deposits
deposit_data_leaves = [data.hash_tree_root() for data in deposit_data]
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
return [
spec.Deposit(
proof=list(get_merkle_proof(tree, item_index=i)),
index=i,
data=deposit_data[i]
) for i in range(len(deposit_data))
]

View File

@ -0,0 +1,7 @@
from py_ecc import bls
from eth2spec.phase0.spec import hash
privkeys = list(range(1, 101))
pubkeys = [bls.privtopub(k) for k in privkeys]
# Insecure, but easier to follow
withdrawal_creds = [hash(bls.privtopub(k)) for k in privkeys]

View File

@ -0,0 +1,9 @@
from gen_base import gen_runner
from deposits import mini_deposits_suite, full_deposits_suite
if __name__ == "__main__":
gen_runner.run_generator("operations", [
mini_deposits_suite,
full_deposits_suite
])

View File

@ -0,0 +1,5 @@
eth-utils==1.4.1
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec
py_ecc

View File

@ -0,0 +1,10 @@
# Shuffling Tests
Tests for the swap-or-not shuffling in ETH 2.0.
Tips for initial shuffling write:
- run with `round_count = 1` first, do the same with pyspec.
- start with permute index
- optimized shuffling implementations:
- vitalik, Python: https://github.com/ethereum/eth2.0-specs/pull/576#issue-250741806
- protolambda, Go: https://github.com/protolambda/eth2-shuffle

View File

@ -0,0 +1,54 @@
from eth2spec.phase0 import spec
from eth_utils import (
to_dict, to_tuple
)
from gen_base import gen_runner, gen_suite, gen_typing
from preset_loader import loader
@to_dict
def shuffling_case(seed: spec.Bytes32, count: int):
yield 'seed', '0x' + seed.hex()
yield 'count', count
yield 'shuffled', [spec.get_permuted_index(i, count, seed) for i in range(count)]
@to_tuple
def shuffling_test_cases():
for seed in [spec.hash(spec.int_to_bytes4(seed_init_value)) for seed_init_value in range(30)]:
for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000]:
yield shuffling_case(seed, count)
def mini_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
presets = loader.load_presets(configs_path, 'minimal')
spec.apply_constants_preset(presets)
return ("shuffling_minimal", "core", gen_suite.render_suite(
title="Swap-or-Not Shuffling tests with minimal config",
summary="Swap or not shuffling, with minimally configured testing round-count",
forks_timeline="testing",
forks=["phase0"],
config="minimal",
runner="shuffling",
handler="core",
test_cases=shuffling_test_cases()))
def full_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
presets = loader.load_presets(configs_path, 'mainnet')
spec.apply_constants_preset(presets)
return ("shuffling_full", "core", gen_suite.render_suite(
title="Swap-or-Not Shuffling tests with mainnet config",
summary="Swap or not shuffling, with normal configured (secure) mainnet round-count",
forks_timeline="mainnet",
forks=["phase0"],
config="mainnet",
runner="shuffling",
handler="core",
test_cases=shuffling_test_cases()))
if __name__ == "__main__":
gen_runner.run_generator("shuffling", [mini_shuffling_suite, full_shuffling_suite])

View File

@ -0,0 +1,4 @@
eth-utils==1.4.1
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec

View File

@ -0,0 +1,47 @@
from uint_test_cases import (
generate_random_uint_test_cases,
generate_uint_wrong_length_test_cases,
generate_uint_bounds_test_cases,
generate_uint_out_of_bounds_test_cases
)
from gen_base import gen_runner, gen_suite, gen_typing
def ssz_random_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
return ("uint_random", "uint", gen_suite.render_suite(
title="UInt Random",
summary="Random integers chosen uniformly over the allowed value range",
forks_timeline= "mainnet",
forks=["phase0"],
config="mainnet",
runner="ssz",
handler="uint",
test_cases=generate_random_uint_test_cases()))
def ssz_wrong_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
return ("uint_wrong_length", "uint", gen_suite.render_suite(
title="UInt Wrong Length",
summary="Serialized integers that are too short or too long",
forks_timeline= "mainnet",
forks=["phase0"],
config="mainnet",
runner="ssz",
handler="uint",
test_cases=generate_uint_wrong_length_test_cases()))
def ssz_uint_bounds_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
return ("uint_bounds", "uint", gen_suite.render_suite(
title="UInt Bounds",
summary="Integers right at or beyond the bounds of the allowed value range",
forks_timeline= "mainnet",
forks=["phase0"],
config="mainnet",
runner="ssz",
handler="uint",
test_cases=generate_uint_bounds_test_cases() + generate_uint_out_of_bounds_test_cases()))
if __name__ == "__main__":
gen_runner.run_generator("ssz_generic", [ssz_random_uint_suite, ssz_wrong_uint_suite, ssz_uint_bounds_suite])

View File

@ -0,0 +1,93 @@
from collections.abc import (
Mapping,
Sequence,
)
from eth_utils import (
encode_hex,
to_dict,
)
from ssz.sedes import (
BaseSedes,
Boolean,
Bytes,
BytesN,
Container,
List,
UInt,
)
def render_value(value):
if isinstance(value, bool):
return value
elif isinstance(value, int):
return str(value)
elif isinstance(value, bytes):
return encode_hex(value)
elif isinstance(value, Sequence):
return tuple(render_value(element) for element in value)
elif isinstance(value, Mapping):
return render_dict_value(value)
else:
raise ValueError(f"Cannot render value {value}")
@to_dict
def render_dict_value(value):
for key, value in value.items():
yield key, render_value(value)
def render_type_definition(sedes):
if isinstance(sedes, Boolean):
return "bool"
elif isinstance(sedes, UInt):
return f"uint{sedes.length * 8}"
elif isinstance(sedes, BytesN):
return f"bytes{sedes.length}"
elif isinstance(sedes, Bytes):
return f"bytes"
elif isinstance(sedes, List):
return [render_type_definition(sedes.element_sedes)]
elif isinstance(sedes, Container):
return {
field_name: render_type_definition(field_sedes)
for field_name, field_sedes in sedes.fields
}
elif isinstance(sedes, BaseSedes):
raise Exception("Unreachable: All sedes types have been checked")
else:
raise TypeError("Expected BaseSedes")
@to_dict
def render_test_case(*, sedes, valid, value=None, serial=None, description=None, tags=None):
value_and_serial_given = value is not None and serial is not None
if valid:
if not value_and_serial_given:
raise ValueError("For valid test cases, both value and ssz must be present")
else:
if value_and_serial_given:
raise ValueError("For invalid test cases, one of either value or ssz must not be present")
if tags is None:
tags = []
yield "type", render_type_definition(sedes)
yield "valid", valid
if value is not None:
yield "value", render_value(value)
if serial is not None:
yield "ssz", encode_hex(serial)
if description is not None:
yield description
yield "tags", tags

View File

@ -0,0 +1,4 @@
eth-utils==1.4.1
../../test_libs/gen_helpers
../../test_libs/config_helpers
ssz==0.1.0a2

View File

@ -0,0 +1,98 @@
import random
from eth_utils import (
to_tuple,
)
import ssz
from ssz.sedes import (
UInt,
)
from renderers import (
render_test_case,
)
random.seed(0)
BIT_SIZES = [8, 16, 32, 64, 128, 256]
RANDOM_TEST_CASES_PER_BIT_SIZE = 10
RANDOM_TEST_CASES_PER_LENGTH = 3
def get_random_bytes(length):
return bytes(random.randint(0, 255) for _ in range(length))
@to_tuple
def generate_random_uint_test_cases():
for bit_size in BIT_SIZES:
sedes = UInt(bit_size)
for _ in range(RANDOM_TEST_CASES_PER_BIT_SIZE):
value = random.randrange(0, 2**bit_size)
serial = ssz.encode(value, sedes)
# note that we need to create the tags in each loop cycle, otherwise ruamel will use
# YAML references which makes the resulting file harder to read
tags = tuple(["atomic", "uint", "random"])
yield render_test_case(
sedes=sedes,
valid=True,
value=value,
serial=serial,
tags=tags,
)
@to_tuple
def generate_uint_wrong_length_test_cases():
for bit_size in BIT_SIZES:
sedes = UInt(bit_size)
lengths = sorted({
0,
sedes.length // 2,
sedes.length - 1,
sedes.length + 1,
sedes.length * 2,
})
for length in lengths:
for _ in range(RANDOM_TEST_CASES_PER_LENGTH):
tags = tuple(["atomic", "uint", "wrong_length"])
yield render_test_case(
sedes=sedes,
valid=False,
serial=get_random_bytes(length),
tags=tags,
)
@to_tuple
def generate_uint_bounds_test_cases():
common_tags = ("atomic", "uint")
for bit_size in BIT_SIZES:
sedes = UInt(bit_size)
for value, tag in ((0, "uint_lower_bound"), (2 ** bit_size - 1, "uint_upper_bound")):
serial = ssz.encode(value, sedes)
yield render_test_case(
sedes=sedes,
valid=True,
value=value,
serial=serial,
tags=common_tags + (tag,),
)
@to_tuple
def generate_uint_out_of_bounds_test_cases():
common_tags = ("atomic", "uint")
for bit_size in BIT_SIZES:
sedes = UInt(bit_size)
for value, tag in ((-1, "uint_underflow"), (2 ** bit_size, "uint_overflow")):
yield render_test_case(
sedes=sedes,
valid=False,
value=value,
tags=common_tags + (tag,),
)

View File

@ -0,0 +1,6 @@
# SSZ-static
The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ:
the serialization and hashing of ETH 2.0 data types.
Test-format documentation can be found [here](../../specs/test_formats/ssz_static/README.md).

View File

@ -0,0 +1,85 @@
from random import Random
from eth2spec.debug import random_value, encode
from eth2spec.phase0 import spec
from eth2spec.utils.minimal_ssz import (
hash_tree_root,
signing_root,
serialize,
)
from eth_utils import (
to_tuple, to_dict
)
from gen_base import gen_runner, gen_suite, gen_typing
from preset_loader import loader
MAX_BYTES_LENGTH = 100
MAX_LIST_LENGTH = 10
@to_dict
def create_test_case(rng: Random, name: str, mode: random_value.RandomizationMode, chaos: bool):
typ = spec.get_ssz_type_by_name(name)
value = random_value.get_random_ssz_object(rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos)
yield "type_name", name
yield "value", encode.encode(value, typ)
yield "serialized", '0x' + serialize(value).hex()
yield "root", '0x' + hash_tree_root(value).hex()
if hasattr(value, "signature"):
yield "signing_root", '0x' + signing_root(value).hex()
@to_tuple
def ssz_static_cases(rng: Random, mode: random_value.RandomizationMode, chaos: bool, count: int):
for type_name in spec.ssz_types:
for i in range(count):
yield create_test_case(rng, type_name, mode, chaos)
def get_ssz_suite(seed: int, config_name: str, mode: random_value.RandomizationMode, chaos: bool, cases_if_random: int):
def ssz_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
# Apply changes to presets, this affects some of the vector types.
presets = loader.load_presets(configs_path, config_name)
spec.apply_constants_preset(presets)
# Reproducible RNG
rng = Random(seed)
random_mode_name = mode.to_name()
suite_name = f"ssz_{config_name}_{random_mode_name}{'_chaos' if chaos else ''}"
count = cases_if_random if chaos or mode.is_changing() else 1
print(f"generating SSZ-static suite ({count} cases per ssz type): {suite_name}")
return (suite_name, "core", gen_suite.render_suite(
title=f"ssz testing, with {config_name} config, randomized with mode {random_mode_name}{' and with chaos applied' if chaos else ''}",
summary="Test suite for ssz serialization and hash-tree-root",
forks_timeline="testing",
forks=["phase0"],
config=config_name,
runner="ssz",
handler="static",
test_cases=ssz_static_cases(rng, mode, chaos, count)))
return ssz_suite
if __name__ == "__main__":
# [(seed, config name, randomization mode, chaos on/off, cases_if_random)]
settings = []
seed = 1
for mode in random_value.RandomizationMode:
settings.append((seed, "minimal", mode, False, 30))
seed += 1
settings.append((seed, "minimal", random_value.RandomizationMode.mode_random, True, 30))
seed += 1
settings.append((seed, "mainnet", random_value.RandomizationMode.mode_random, False, 5))
seed += 1
print("Settings: %d, SSZ-types: %d" % (len(settings), len(spec.ssz_types)))
gen_runner.run_generator("ssz_static", [
get_ssz_suite(seed, config_name, mode, chaos, cases_if_random)
for (seed, config_name, mode, chaos, cases_if_random) in settings
])

View File

@ -0,0 +1,4 @@
eth-utils==1.4.1
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec

View File

@ -0,0 +1,19 @@
# ETH 2.0 config helpers
`preset_loader`: A util to load constants-presets with.
See [Constants-presets documentation](../../configs/constants_presets/README.md).
Usage:
```python
configs_path = 'configs/'
...
import preset_loader
from eth2spec.phase0 import spec
my_presets = preset_loader.load_presets(configs_path, 'mainnet')
spec.apply_constants_preset(my_presets)
```
WARNING: this overwrites globals, make sure to prevent accidental collisions with other usage of the same imported specs package.

View File

@ -0,0 +1,25 @@
from typing import Dict, Any
from ruamel.yaml import (
YAML,
)
from pathlib import Path
from os.path import join
def load_presets(configs_dir, presets_name) -> Dict[str, Any]:
"""
Loads the given preset
:param presets_name: The name of the generator. (lowercase snake_case)
:return: Dictionary, mapping of constant-name -> constant-value
"""
path = Path(join(configs_dir, 'constant_presets', presets_name+'.yaml'))
yaml = YAML(typ='base')
loaded = yaml.load(path)
out = dict()
for k, v in loaded.items():
if v.startswith("0x"):
out[k] = bytes.fromhex(v[2:])
else:
out[k] = int(v)
return out

View File

@ -0,0 +1,20 @@
from setuptools import setup, find_packages
deps = {
'preset_loader': [
"ruamel.yaml==0.15.87",
],
}
deps['dev'] = (
deps['preset_loader']
)
install_requires = deps['preset_loader']
setup(
name='config_helpers',
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=install_requires,
)

View File

@ -0,0 +1,5 @@
# ETH 2.0 test generator helpers
`gen_base`: A util to quickly write new test suite generators with.
See [Generators documentation](../../test_generators/README.md).

View File

@ -0,0 +1,115 @@
import argparse
from pathlib import Path
import sys
from typing import List
from ruamel.yaml import (
YAML,
)
from gen_base.gen_typing import TestSuiteCreator
def validate_output_dir(path_str):
path = Path(path_str)
if not path.exists():
raise argparse.ArgumentTypeError("Output directory must exist")
if not path.is_dir():
raise argparse.ArgumentTypeError("Output path must lead to a directory")
return path
def validate_configs_dir(path_str):
path = Path(path_str)
if not path.exists():
raise argparse.ArgumentTypeError("Configs directory must exist")
if not path.is_dir():
raise argparse.ArgumentTypeError("Config path must lead to a directory")
if not Path(path, "constant_presets").exists():
raise argparse.ArgumentTypeError("Constant Presets directory must exist")
if not Path(path, "constant_presets").is_dir():
raise argparse.ArgumentTypeError("Constant Presets path must lead to a directory")
if not Path(path, "fork_timelines").exists():
raise argparse.ArgumentTypeError("Fork Timelines directory must exist")
if not Path(path, "fork_timelines").is_dir():
raise argparse.ArgumentTypeError("Fork Timelines path must lead to a directory")
return path
def run_generator(generator_name, suite_creators: List[TestSuiteCreator]):
"""
Implementation for a general test generator.
:param generator_name: The name of the generator. (lowercase snake_case)
:param suite_creators: A list of suite creators, each of these builds a list of test cases.
:return:
"""
parser = argparse.ArgumentParser(
prog="gen-" + generator_name,
description=f"Generate YAML test suite files for {generator_name}",
)
parser.add_argument(
"-o",
"--output-dir",
dest="output_dir",
required=True,
type=validate_output_dir,
help="directory into which the generated YAML files will be dumped"
)
parser.add_argument(
"-f",
"--force",
action="store_true",
default=False,
help="if set overwrite test files if they exist",
)
parser.add_argument(
"-c",
"--configs-path",
dest="configs_path",
required=True,
type=validate_configs_dir,
help="specify the path of the configs directory (containing constants_presets and fork_timelines)",
)
args = parser.parse_args()
output_dir = args.output_dir
if not args.force:
file_mode = "x"
else:
file_mode = "w"
yaml = YAML(pure=True)
yaml.default_flow_style = None
print(f"Generating tests for {generator_name}, creating {len(suite_creators)} test suite files...")
print(f"Reading config presets and fork timelines from {args.configs_path}")
for suite_creator in suite_creators:
(output_name, handler, suite) = suite_creator(args.configs_path)
handler_output_dir = Path(output_dir) / Path(handler)
try:
if not handler_output_dir.exists():
handler_output_dir.mkdir()
except FileNotFoundError as e:
sys.exit(f'Error when creating handler dir {handler} for test "{suite["title"]}" ({e})')
out_path = handler_output_dir / Path(output_name + '.yaml')
try:
with out_path.open(file_mode) as f:
yaml.dump(suite, f)
except IOError as e:
sys.exit(f'Error when dumping test "{suite["title"]}" ({e})')
print("done.")

View File

@ -0,0 +1,22 @@
from typing import Iterable
from eth_utils import to_dict
from gen_base.gen_typing import TestCase
@to_dict
def render_suite(*,
title: str, summary: str,
forks_timeline: str, forks: Iterable[str],
config: str,
runner: str,
handler: str,
test_cases: Iterable[TestCase]):
yield "title", title
yield "summary", summary
yield "forks_timeline", forks_timeline,
yield "forks", forks
yield "config", config
yield "runner", runner
yield "handler", handler
yield "test_cases", test_cases

View File

@ -0,0 +1,8 @@
from typing import Callable, Dict, Tuple, Any
TestCase = Dict[str, Any]
TestSuite = Dict[str, Any]
# Tuple: (output name, handler name, suite) -- output name excl. ".yaml"
TestSuiteOutput = Tuple[str, str, TestSuite]
# Args: <presets path>
TestSuiteCreator = Callable[[str], TestSuiteOutput]

View File

@ -0,0 +1,21 @@
from setuptools import setup, find_packages
deps = {
'gen_base': [
"ruamel.yaml==0.15.87",
"eth-utils==1.4.1",
],
}
deps['dev'] = (
deps['gen_base']
)
install_requires = deps['gen_base']
setup(
name='gen_helpers',
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=install_requires,
)

View File

@ -0,0 +1,61 @@
# ETH 2.0 PySpec
The Python executable spec is built from the ETH 2.0 specification,
complemented with the necessary helper functions for hashing, BLS, and more.
With this executable spec,
test-generators can easily create test-vectors for client implementations,
and the spec itself can be verified to be consistent and coherent, through sanity tests implemented with pytest.
## Building
All the dynamic parts of the spec can be build at once with `make pyspec`.
Alternatively, you can build a sub-set of the pyspec: `make phase0`.
Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2spec/phase0/spec.py`
## Py-tests
After building, you can install the dependencies for running the `pyspec` tests with `make install_test`
These tests are not intended for client-consumption.
These tests are sanity tests, to verify if the spec itself is consistent.
### How to run tests
#### Automated
Run `make test` from the root of the spec repository.
#### Manual
From within the `pyspec` folder:
Install dependencies:
```bash
python3 -m venv venv
. venv/bin/activate
pip3 install -e .[dev]
```
Note: make sure to run `make -B pyspec` from the root of the specs repository,
to build the parts of the pyspec module derived from the markdown specs.
The `-B` flag may be helpful to force-overwrite the `pyspec` output after you made a change to the markdown source files.
Run the tests:
```
pytest --config=minimal
```
## Contributing
Contributions are welcome, but consider implementing your idea as part of the spec itself first.
The pyspec is not a replacement.
## License
Same as the spec itself, see [LICENSE](../../LICENSE) file in spec repository root.

View File

@ -0,0 +1,28 @@
from eth2spec.utils.minimal_ssz import hash_tree_root
def decode(json, typ):
if isinstance(typ, str) and typ[:4] == 'uint':
return json
elif typ == 'bool':
assert json in (True, False)
return json
elif isinstance(typ, list):
return [decode(element, typ[0]) for element in json]
elif isinstance(typ, str) and typ[:4] == 'byte':
return bytes.fromhex(json[2:])
elif hasattr(typ, 'fields'):
temp = {}
for field, subtype in typ.fields.items():
temp[field] = decode(json[field], subtype)
if field + "_hash_tree_root" in json:
assert(json[field + "_hash_tree_root"][2:] ==
hash_tree_root(temp[field], subtype).hex())
ret = typ(**temp)
if "hash_tree_root" in json:
assert(json["hash_tree_root"][2:] ==
hash_tree_root(ret, typ).hex())
return ret
else:
print(json, typ)
raise Exception("Type not recognized")

View File

@ -0,0 +1,28 @@
from eth2spec.utils.minimal_ssz import hash_tree_root
def encode(value, typ, include_hash_tree_roots=False):
if isinstance(typ, str) and typ[:4] == 'uint':
if typ[4:] == '128' or typ[4:] == '256':
return str(value)
return value
elif typ == 'bool':
assert value in (True, False)
return value
elif isinstance(typ, list):
return [encode(element, typ[0], include_hash_tree_roots) for element in value]
elif isinstance(typ, str) and typ[:4] == 'byte':
return '0x' + value.hex()
elif hasattr(typ, 'fields'):
ret = {}
for field, subtype in typ.fields.items():
ret[field] = encode(getattr(value, field), subtype, include_hash_tree_roots)
if include_hash_tree_roots:
ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex()
if include_hash_tree_roots:
ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex()
return ret
else:
print(value, typ)
raise Exception("Type not recognized")

View File

@ -0,0 +1,137 @@
from random import Random
from typing import Any
from enum import Enum
UINT_SIZES = [8, 16, 32, 64, 128, 256]
basic_types = ["uint%d" % v for v in UINT_SIZES] + ['bool', 'byte']
random_mode_names = ["random", "zero", "max", "nil", "one", "lengthy"]
class RandomizationMode(Enum):
# random content / length
mode_random = 0
# Zero-value
mode_zero = 1
# Maximum value, limited to count 1 however
mode_max = 2
# Return 0 values, i.e. empty
mode_nil_count = 3
# Return 1 value, random content
mode_one_count = 4
# Return max amount of values, random content
mode_max_count = 5
def to_name(self):
return random_mode_names[self.value]
def is_changing(self):
return self.value in [0, 4, 5]
def get_random_ssz_object(rng: Random, typ: Any, max_bytes_length: int, max_list_length: int, mode: RandomizationMode, chaos: bool) -> Any:
"""
Create an object for a given type, filled with random data.
:param rng: The random number generator to use.
:param typ: The type to instantiate
:param max_bytes_length: the max. length for a random bytes array
:param max_list_length: the max. length for a random list
:param mode: how to randomize
:param chaos: if true, the randomization-mode will be randomly changed
:return: the random object instance, of the given type.
"""
if chaos:
mode = rng.choice(list(RandomizationMode))
if isinstance(typ, str):
# Bytes array
if typ == 'bytes':
if mode == RandomizationMode.mode_nil_count:
return b''
if mode == RandomizationMode.mode_max_count:
return get_random_bytes_list(rng, max_bytes_length)
if mode == RandomizationMode.mode_one_count:
return get_random_bytes_list(rng, 1)
if mode == RandomizationMode.mode_zero:
return b'\x00'
if mode == RandomizationMode.mode_max:
return b'\xff'
return get_random_bytes_list(rng, rng.randint(0, max_bytes_length))
elif typ[:5] == 'bytes' and len(typ) > 5:
length = int(typ[5:])
# Sanity, don't generate absurdly big random values
# If a client is aiming to performance-test, they should create a benchmark suite.
assert length <= max_bytes_length
if mode == RandomizationMode.mode_zero:
return b'\x00' * length
if mode == RandomizationMode.mode_max:
return b'\xff' * length
return get_random_bytes_list(rng, length)
# Basic types
else:
if mode == RandomizationMode.mode_zero:
return get_min_basic_value(typ)
if mode == RandomizationMode.mode_max:
return get_max_basic_value(typ)
return get_random_basic_value(rng, typ)
# Vector:
elif isinstance(typ, list) and len(typ) == 2:
return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos) for _ in range(typ[1])]
# List:
elif isinstance(typ, list) and len(typ) == 1:
length = rng.randint(0, max_list_length)
if mode == RandomizationMode.mode_one_count:
length = 1
if mode == RandomizationMode.mode_max_count:
length = max_list_length
return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos) for _ in range(length)]
# Container:
elif hasattr(typ, 'fields'):
return typ(**{field: get_random_ssz_object(rng, subtype, max_bytes_length, max_list_length, mode, chaos) for field, subtype in typ.fields.items()})
else:
print(typ)
raise Exception("Type not recognized")
def get_random_bytes_list(rng: Random, length: int) -> bytes:
return bytes(rng.getrandbits(8) for _ in range(length))
def get_random_basic_value(rng: Random, typ: str) -> Any:
if typ == 'bool':
return rng.choice((True, False))
if typ[:4] == 'uint':
size = int(typ[4:])
assert size in UINT_SIZES
return rng.randint(0, 2**size - 1)
if typ == 'byte':
return rng.randint(0, 8)
else:
raise ValueError("Not a basic type")
def get_min_basic_value(typ: str) -> Any:
if typ == 'bool':
return False
if typ[:4] == 'uint':
size = int(typ[4:])
assert size in UINT_SIZES
return 0
if typ == 'byte':
return 0x00
else:
raise ValueError("Not a basic type")
def get_max_basic_value(typ: str) -> Any:
if typ == 'bool':
return True
if typ[:4] == 'uint':
size = int(typ[4:])
assert size in UINT_SIZES
return 2**size - 1
if typ == 'byte':
return 0xff
else:
raise ValueError("Not a basic type")

View File

@ -1,62 +1,74 @@
from . import spec
from typing import ( # noqa: F401
from typing import (
Any,
Callable,
List,
NewType,
Tuple,
List
)
from .spec import (
BeaconState,
BeaconBlock,
Slot,
)
def process_transaction_type(state: BeaconState,
transactions: List[Any],
max_transactions: int,
tx_fn: Callable[[BeaconState, Any], None]) -> None:
assert len(transactions) <= max_transactions
for transaction in transactions:
tx_fn(state, transaction)
def expected_deposit_count(state: BeaconState) -> int:
return min(
spec.MAX_DEPOSITS,
state.latest_eth1_data.deposit_count - state.deposit_index
)
def process_transactions(state: BeaconState, block: BeaconBlock) -> None:
process_transaction_type(
def process_operation_type(state: BeaconState,
operations: List[Any],
max_operations: int,
tx_fn: Callable[[BeaconState, Any], None]) -> None:
assert len(operations) <= max_operations
for operation in operations:
tx_fn(state, operation)
def process_operations(state: BeaconState, block: BeaconBlock) -> None:
process_operation_type(
state,
block.body.proposer_slashings,
spec.MAX_PROPOSER_SLASHINGS,
spec.process_proposer_slashing,
)
process_transaction_type(
process_operation_type(
state,
block.body.attester_slashings,
spec.MAX_ATTESTER_SLASHINGS,
spec.process_attester_slashing,
)
process_transaction_type(
process_operation_type(
state,
block.body.attestations,
spec.MAX_ATTESTATIONS,
spec.process_attestation,
)
process_transaction_type(
assert len(block.body.deposits) == expected_deposit_count(state)
process_operation_type(
state,
block.body.deposits,
spec.MAX_DEPOSITS,
spec.process_deposit,
)
process_transaction_type(
process_operation_type(
state,
block.body.voluntary_exits,
spec.MAX_VOLUNTARY_EXITS,
spec.process_voluntary_exit,
)
assert len(block.body.transfers) == len(set(block.body.transfers))
process_transaction_type(
process_operation_type(
state,
block.body.transfers,
spec.MAX_TRANSFERS,
@ -71,30 +83,30 @@ def process_block(state: BeaconState,
spec.process_randao(state, block)
spec.process_eth1_data(state, block)
process_transactions(state, block)
process_operations(state, block)
if verify_state_root:
spec.verify_block_state_root(state, block)
def process_epoch_transition(state: BeaconState) -> None:
spec.update_justification_and_finalization(state)
spec.process_justification_and_finalization(state)
spec.process_crosslinks(state)
spec.maybe_reset_eth1_period(state)
spec.apply_rewards(state)
spec.process_ejections(state)
spec.update_registry_and_shuffling_data(state)
spec.process_rewards_and_penalties(state)
spec.process_registry_updates(state)
spec.process_slashings(state)
spec.process_exit_queue(state)
spec.finish_epoch_update(state)
spec.process_final_updates(state)
def state_transition_to(state: BeaconState, up_to: Slot) -> BeaconState:
while state.slot < up_to:
spec.cache_state(state)
if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:
process_epoch_transition(state)
spec.advance_slot(state)
def state_transition(state: BeaconState,
block: BeaconBlock,
verify_state_root: bool=False) -> BeaconState:
while state.slot < block.slot:
spec.cache_state(state)
if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:
process_epoch_transition(state)
spec.advance_slot(state)
if block.slot == state.slot:
process_block(state, block, verify_state_root)
state_transition_to(state, block.slot)
process_block(state, block, verify_state_root)

View File

@ -0,0 +1,6 @@
from hashlib import sha256
# from eth_utils import keccak
def hash(x): return sha256(x).digest()
# def hash(x): return keccak(x)

View File

@ -1,5 +1,6 @@
from .hash_function import hash
from typing import Any
from .hash_function import hash
BYTES_PER_CHUNK = 32
BYTES_PER_LENGTH_PREFIX = 4
@ -9,16 +10,14 @@ ZERO_CHUNK = b'\x00' * BYTES_PER_CHUNK
def SSZType(fields):
class SSZObject():
def __init__(self, **kwargs):
for f in fields:
for f, t in fields.items():
if f not in kwargs:
raise Exception("Missing constructor argument: %s" % f)
setattr(self, f, kwargs[f])
setattr(self, f, get_zero_value(t))
else:
setattr(self, f, kwargs[f])
def __eq__(self, other):
return (
self.fields == other.fields and
self.serialize() == other.serialize()
)
return self.fields == other.fields and self.serialize() == other.serialize()
def __hash__(self):
return int.from_bytes(self.hash_tree_root(), byteorder="little")
@ -58,18 +57,40 @@ class Vector():
def is_basic(typ):
return isinstance(typ, str) and (typ[:4] in ('uint', 'bool') or typ == 'byte')
# if not a string, it is a complex, and cannot be basic
if not isinstance(typ, str):
return False
# "uintN": N-bit unsigned integer (where N in [8, 16, 32, 64, 128, 256])
elif typ[:4] == 'uint' and typ[4:] in ['8', '16', '32', '64', '128', '256']:
return True
# "bool": True or False
elif typ == 'bool':
return True
# alias: "byte" -> "uint8"
elif typ == 'byte':
return True
# default
else:
return False
def is_constant_sized(typ):
# basic objects are fixed size by definition
if is_basic(typ):
return True
# dynamic size array type, "list": [elem_type].
# Not constant size by definition.
elif isinstance(typ, list) and len(typ) == 1:
return is_constant_sized(typ[0])
elif isinstance(typ, list) and len(typ) == 2:
return False
# fixed size array type, "vector": [elem_type, length]
# Constant size, but only if the elements are.
elif isinstance(typ, list) and len(typ) == 2:
return is_constant_sized(typ[0])
# bytes array (fixed or dynamic size)
elif isinstance(typ, str) and typ[:5] == 'bytes':
return len(typ) > 5
# if no length suffix, it has a dynamic size
return typ != 'bytes'
# containers are only constant-size if all of the fields are constant size.
elif hasattr(typ, 'fields'):
for subtype in typ.fields.values():
if not is_constant_sized(subtype):
@ -90,40 +111,98 @@ def coerce_to_bytes(x):
raise Exception("Expecting bytes")
def encode_bytes(value):
serialized_bytes = coerce_to_bytes(value)
assert len(serialized_bytes) < 2 ** (8 * BYTES_PER_LENGTH_PREFIX)
serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little')
return serialized_length + serialized_bytes
def encode_variable_size_container(values, types):
return encode_bytes(encode_fixed_size_container(values, types))
def encode_fixed_size_container(values, types):
return b''.join([serialize_value(v, typ) for (v, typ) in zip(values, types)])
def serialize_value(value, typ=None):
if typ is None:
typ = infer_type(value)
# "uintN"
if isinstance(typ, str) and typ[:4] == 'uint':
length = int(typ[4:])
assert length in (8, 16, 32, 64, 128, 256)
return value.to_bytes(length // 8, 'little')
elif typ == 'bool':
# "bool"
elif isinstance(typ, str) and typ == 'bool':
assert value in (True, False)
return b'\x01' if value is True else b'\x00'
elif (isinstance(typ, list) and len(typ) == 1) or typ == 'bytes':
serialized_bytes = coerce_to_bytes(value) if typ == 'bytes' else b''.join([serialize_value(element, typ[0]) for element in value])
assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX)
serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little')
return serialized_length + serialized_bytes
# Vector
elif isinstance(typ, list) and len(typ) == 2:
# (regardless of element type, sanity-check if the length reported in the vector type matches the value length)
assert len(value) == typ[1]
return b''.join([serialize_value(element, typ[0]) for element in value])
# If value is fixed-size (i.e. element type is fixed-size):
if is_constant_sized(typ):
return encode_fixed_size_container(value, [typ[0]] * len(value))
# If value is variable-size (i.e. element type is variable-size)
else:
return encode_variable_size_container(value, [typ[0]] * len(value))
# "bytes" (variable size)
elif isinstance(typ, str) and typ == 'bytes':
return encode_bytes(value)
# List
elif isinstance(typ, list) and len(typ) == 1:
return encode_variable_size_container(value, [typ[0]] * len(value))
# "bytesN" (fixed size)
elif isinstance(typ, str) and len(typ) > 5 and typ[:5] == 'bytes':
assert len(value) == int(typ[5:]), (value, int(typ[5:]))
return coerce_to_bytes(value)
# containers
elif hasattr(typ, 'fields'):
serialized_bytes = b''.join([serialize_value(getattr(value, field), subtype) for field, subtype in typ.fields.items()])
values = [getattr(value, field) for field in typ.fields.keys()]
types = list(typ.fields.values())
if is_constant_sized(typ):
return serialized_bytes
return encode_fixed_size_container(values, types)
else:
assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX)
serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little')
return serialized_length + serialized_bytes
return encode_variable_size_container(values, types)
else:
print(value, typ)
raise Exception("Type not recognized")
def get_zero_value(typ: Any) -> Any:
if isinstance(typ, str):
# Bytes array
if typ == 'bytes':
return b''
# bytesN
elif typ[:5] == 'bytes' and len(typ) > 5:
length = int(typ[5:])
return b'\x00' * length
# Basic types
elif typ == 'bool':
return False
elif typ[:4] == 'uint':
return 0
elif typ == 'byte':
return 0x00
else:
raise ValueError("Type not recognized")
# Vector:
elif isinstance(typ, list) and len(typ) == 2:
return [get_zero_value(typ[0]) for _ in range(typ[1])]
# List:
elif isinstance(typ, list) and len(typ) == 1:
return []
# Container:
elif hasattr(typ, 'fields'):
return typ(**{field: get_zero_value(subtype) for field, subtype in typ.fields.items()})
else:
print(typ)
raise Exception("Type not recognized")
def chunkify(bytez):
bytez += b'\x00' * (-len(bytez) % BYTES_PER_CHUNK)
return [bytez[i:i + 32] for i in range(0, len(bytez), 32)]
@ -152,12 +231,27 @@ def mix_in_length(root, length):
def infer_type(value):
"""
Note: defaults to uint64 for integer type inference due to lack of information.
Other integer sizes are still supported, see spec.
:param value: The value to infer a SSZ type for.
:return: The SSZ type.
"""
if hasattr(value.__class__, 'fields'):
return value.__class__
elif isinstance(value, Vector):
return [infer_type(value[0]) if len(value) > 0 else 'uint64', len(value)]
if len(value) > 0:
return [infer_type(value[0]), len(value)]
else:
# Element type does not matter too much,
# assumed to be a basic type for size-encoding purposes, vector is empty.
return ['uint64']
elif isinstance(value, list):
return [infer_type(value[0])] if len(value) > 0 else ['uint64']
if len(value) > 0:
return [infer_type(value[0])]
else:
# Element type does not matter, list-content size will be encoded regardless, list is empty.
return ['uint64']
elif isinstance(value, (bytes, str)):
return 'bytes'
elif isinstance(value, int):
@ -169,24 +263,41 @@ def infer_type(value):
def hash_tree_root(value, typ=None):
if typ is None:
typ = infer_type(value)
# -------------------------------------
# merkleize(pack(value))
# basic object: merkleize packed version (merkleization pads it to 32 bytes if it is not already)
if is_basic(typ):
return merkleize(pack([value], typ))
elif isinstance(typ, list) and len(typ) == 1 and is_basic(typ[0]):
return mix_in_length(merkleize(pack(value, typ[0])), len(value))
elif isinstance(typ, list) and len(typ) == 1 and not is_basic(typ[0]):
return mix_in_length(merkleize([hash_tree_root(element, typ[0]) for element in value]), len(value))
# or a vector of basic objects
elif isinstance(typ, list) and len(typ) == 2 and is_basic(typ[0]):
assert len(value) == typ[1]
return merkleize(pack(value, typ[0]))
# -------------------------------------
# mix_in_length(merkleize(pack(value)), len(value))
# if value is a list of basic objects
elif isinstance(typ, list) and len(typ) == 1 and is_basic(typ[0]):
return mix_in_length(merkleize(pack(value, typ[0])), len(value))
# (needs some extra work for non-fixed-sized bytes array)
elif typ == 'bytes':
return mix_in_length(merkleize(chunkify(coerce_to_bytes(value))), len(value))
# -------------------------------------
# merkleize([hash_tree_root(element) for element in value])
# if value is a vector of composite objects
elif isinstance(typ, list) and len(typ) == 2 and not is_basic(typ[0]):
return merkleize([hash_tree_root(element, typ[0]) for element in value])
# (needs some extra work for fixed-sized bytes array)
elif isinstance(typ, str) and typ[:5] == 'bytes' and len(typ) > 5:
assert len(value) == int(typ[5:])
return merkleize(chunkify(coerce_to_bytes(value)))
elif isinstance(typ, list) and len(typ) == 2 and not is_basic(typ[0]):
return merkleize([hash_tree_root(element, typ[0]) for element in value])
# or a container
elif hasattr(typ, 'fields'):
return merkleize([hash_tree_root(getattr(value, field), subtype) for field, subtype in typ.fields.items()])
# -------------------------------------
# mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value))
# if value is a list of composite objects
elif isinstance(typ, list) and len(typ) == 1 and not is_basic(typ[0]):
return mix_in_length(merkleize([hash_tree_root(element, typ[0]) for element in value]), len(value))
# -------------------------------------
else:
raise Exception("Type not recognized")
@ -205,7 +316,7 @@ def truncate(container):
return truncated_class(**kwargs)
def signed_root(container):
def signing_root(container):
return hash_tree_root(truncate(container))

28
test_libs/pyspec/setup.py Normal file
View File

@ -0,0 +1,28 @@
from setuptools import setup, find_packages
deps = {
'pyspec': [
"eth-utils>=1.3.0,<2",
"eth-typing>=2.1.0,<3.0.0",
"pycryptodome==3.7.3",
"py_ecc>=1.6.0",
],
'test': [
"pytest>=3.6,<3.7",
],
}
deps['dev'] = (
deps['pyspec'] +
deps['test']
)
install_requires = deps['pyspec']
setup(
name='pyspec',
packages=find_packages(exclude=["tests", "tests.*"]),
install_requires=install_requires,
extras_require=deps,
)

View File

View File

@ -0,0 +1,156 @@
from copy import deepcopy
import pytest
import eth2spec.phase0.spec as spec
from eth2spec.phase0.state_transition import (
state_transition,
)
from eth2spec.phase0.spec import (
get_current_epoch,
process_attestation,
slot_to_epoch,
)
from tests.helpers import (
build_empty_block_for_next_slot,
get_valid_attestation,
next_epoch,
next_slot,
)
# mark entire file as 'attestations'
pytestmark = pytest.mark.attestations
def run_attestation_processing(state, attestation, valid=True):
"""
Run ``process_attestation`` returning the pre and post state.
If ``valid == False``, run expecting ``AssertionError``
"""
post_state = deepcopy(state)
if not valid:
with pytest.raises(AssertionError):
process_attestation(post_state, attestation)
return state, None
process_attestation(post_state, attestation)
current_epoch = get_current_epoch(state)
target_epoch = slot_to_epoch(attestation.data.slot)
if target_epoch == current_epoch:
assert len(post_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1
else:
assert len(post_state.previous_epoch_attestations) == len(state.previous_epoch_attestations) + 1
return state, post_state
def test_success(state):
attestation = get_valid_attestation(state)
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
pre_state, post_state = run_attestation_processing(state, attestation)
return pre_state, attestation, post_state
def test_success_prevous_epoch(state):
attestation = get_valid_attestation(state)
block = build_empty_block_for_next_slot(state)
block.slot = state.slot + spec.SLOTS_PER_EPOCH
state_transition(state, block)
pre_state, post_state = run_attestation_processing(state, attestation)
return pre_state, attestation, post_state
def test_before_inclusion_delay(state):
attestation = get_valid_attestation(state)
# do not increment slot to allow for inclusion delay
pre_state, post_state = run_attestation_processing(state, attestation, False)
return pre_state, attestation, post_state
def test_after_epoch_slots(state):
attestation = get_valid_attestation(state)
block = build_empty_block_for_next_slot(state)
# increment past latest inclusion slot
block.slot = state.slot + spec.SLOTS_PER_EPOCH + 1
state_transition(state, block)
pre_state, post_state = run_attestation_processing(state, attestation, False)
return pre_state, attestation, post_state
def test_bad_source_epoch(state):
attestation = get_valid_attestation(state)
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
attestation.data.source_epoch += 10
pre_state, post_state = run_attestation_processing(state, attestation, False)
return pre_state, attestation, post_state
def test_bad_source_root(state):
attestation = get_valid_attestation(state)
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
attestation.data.source_root = b'\x42' * 32
pre_state, post_state = run_attestation_processing(state, attestation, False)
return pre_state, attestation, post_state
def test_non_zero_crosslink_data_root(state):
attestation = get_valid_attestation(state)
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
attestation.data.crosslink_data_root = b'\x42' * 32
pre_state, post_state = run_attestation_processing(state, attestation, False)
return pre_state, attestation, post_state
def test_bad_previous_crosslink(state):
next_epoch(state)
attestation = get_valid_attestation(state)
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
next_slot(state)
state.current_crosslinks[attestation.data.shard].epoch += 10
pre_state, post_state = run_attestation_processing(state, attestation, False)
return pre_state, attestation, post_state
def test_non_empty_custody_bitfield(state):
attestation = get_valid_attestation(state)
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield)
pre_state, post_state = run_attestation_processing(state, attestation, False)
return pre_state, attestation, post_state
def test_empty_aggregation_bitfield(state):
attestation = get_valid_attestation(state)
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield)
pre_state, post_state = run_attestation_processing(state, attestation, False)
return pre_state, attestation, post_state

View File

@ -0,0 +1,117 @@
from copy import deepcopy
import pytest
import eth2spec.phase0.spec as spec
from eth2spec.phase0.spec import (
get_beacon_proposer_index,
process_attester_slashing,
)
from tests.helpers import (
get_balance,
get_valid_attester_slashing,
next_epoch,
)
# mark entire file as 'attester_slashing'
pytestmark = pytest.mark.attester_slashings
def run_attester_slashing_processing(state, attester_slashing, valid=True):
"""
Run ``process_attester_slashing`` returning the pre and post state.
If ``valid == False``, run expecting ``AssertionError``
"""
post_state = deepcopy(state)
if not valid:
with pytest.raises(AssertionError):
process_attester_slashing(post_state, attester_slashing)
return state, None
process_attester_slashing(post_state, attester_slashing)
slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0]
slashed_validator = post_state.validator_registry[slashed_index]
assert slashed_validator.slashed
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
# lost whistleblower reward
assert (
get_balance(post_state, slashed_index) <
get_balance(state, slashed_index)
)
proposer_index = get_beacon_proposer_index(state)
# gained whistleblower reward
assert (
get_balance(post_state, proposer_index) >
get_balance(state, proposer_index)
)
return state, post_state
def test_success_double(state):
attester_slashing = get_valid_attester_slashing(state)
pre_state, post_state = run_attester_slashing_processing(state, attester_slashing)
return pre_state, attester_slashing, post_state
def test_success_surround(state):
next_epoch(state)
state.current_justified_epoch += 1
attester_slashing = get_valid_attester_slashing(state)
# set attestion1 to surround attestation 2
attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1
attester_slashing.attestation_1.data.slot = attester_slashing.attestation_2.data.slot + spec.SLOTS_PER_EPOCH
pre_state, post_state = run_attester_slashing_processing(state, attester_slashing)
return pre_state, attester_slashing, post_state
def test_same_data(state):
attester_slashing = get_valid_attester_slashing(state)
attester_slashing.attestation_1.data = attester_slashing.attestation_2.data
pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False)
return pre_state, attester_slashing, post_state
def test_no_double_or_surround(state):
attester_slashing = get_valid_attester_slashing(state)
attester_slashing.attestation_1.data.slot += spec.SLOTS_PER_EPOCH
pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False)
return pre_state, attester_slashing, post_state
def test_participants_already_slashed(state):
attester_slashing = get_valid_attester_slashing(state)
# set all indices to slashed
attestation_1 = attester_slashing.attestation_1
validator_indices = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices
for index in validator_indices:
state.validator_registry[index].slashed = True
pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False)
return pre_state, attester_slashing, post_state
def test_custody_bit_0_and_1(state):
attester_slashing = get_valid_attester_slashing(state)
attester_slashing.attestation_1.custody_bit_1_indices = (
attester_slashing.attestation_1.custody_bit_0_indices
)
pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False)
return pre_state, attester_slashing, post_state

View File

@ -2,14 +2,15 @@ from copy import deepcopy
import pytest
from build.phase0.spec import (
from eth2spec.phase0.spec import (
get_beacon_proposer_index,
cache_state,
advance_slot,
process_block_header,
)
from tests.phase0.helpers import (
from tests.helpers import (
build_empty_block_for_next_slot,
next_slot,
)
# mark entire file as 'header'
@ -54,7 +55,22 @@ def test_invalid_slot(state):
def test_invalid_previous_block_root(state):
block = build_empty_block_for_next_slot(state)
block.previous_block_root = b'\12'*32 # invalid prev root
block.previous_block_root = b'\12' * 32 # invalid prev root
pre_state, post_state = run_block_header_processing(state, block, valid=False)
return pre_state, block, None
def test_proposer_slashed(state):
# use stub state to get proposer index of next slot
stub_state = deepcopy(state)
next_slot(stub_state)
proposer_index = get_beacon_proposer_index(stub_state)
# set proposer to slashed
state.validator_registry[proposer_index].slashed = True
block = build_empty_block_for_next_slot(state)
pre_state, post_state = run_block_header_processing(state, block, valid=False)
return pre_state, block, None

View File

@ -1,21 +1,22 @@
from copy import deepcopy
import pytest
import build.phase0.spec as spec
import eth2spec.phase0.spec as spec
from build.phase0.spec import (
from eth2spec.phase0.spec import (
ZERO_HASH,
process_deposit,
)
from tests.phase0.helpers import (
from tests.helpers import (
get_balance,
build_deposit,
privkeys,
pubkeys,
)
# mark entire file as 'voluntary_exits'
pytestmark = pytest.mark.voluntary_exits
# mark entire file as 'deposits'
pytestmark = pytest.mark.deposits
def test_success(state):
@ -31,7 +32,7 @@ def test_success(state):
deposit_data_leaves,
pubkey,
privkey,
spec.MAX_DEPOSIT_AMOUNT,
spec.MAX_EFFECTIVE_BALANCE,
)
pre_state.latest_eth1_data.deposit_root = root
@ -42,8 +43,9 @@ def test_success(state):
process_deposit(post_state, deposit)
assert len(post_state.validator_registry) == len(state.validator_registry) + 1
assert len(post_state.validator_balances) == len(state.validator_balances) + 1
assert len(post_state.balances) == len(state.balances) + 1
assert post_state.validator_registry[index].pubkey == pubkeys[index]
assert get_balance(post_state, index) == spec.MAX_EFFECTIVE_BALANCE
assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count
return pre_state, deposit, post_state
@ -54,7 +56,7 @@ def test_success_top_up(state):
deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
validator_index = 0
amount = spec.MAX_DEPOSIT_AMOUNT // 4
amount = spec.MAX_EFFECTIVE_BALANCE // 4
pubkey = pubkeys[validator_index]
privkey = privkeys[validator_index]
deposit, root, deposit_data_leaves = build_deposit(
@ -67,16 +69,16 @@ def test_success_top_up(state):
pre_state.latest_eth1_data.deposit_root = root
pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
pre_balance = pre_state.validator_balances[validator_index]
pre_balance = get_balance(pre_state, validator_index)
post_state = deepcopy(pre_state)
process_deposit(post_state, deposit)
assert len(post_state.validator_registry) == len(state.validator_registry)
assert len(post_state.validator_balances) == len(state.validator_balances)
assert len(post_state.balances) == len(state.balances)
assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count
assert post_state.validator_balances[validator_index] == pre_balance + amount
assert get_balance(post_state, validator_index) == pre_balance + amount
return pre_state, deposit, post_state
@ -85,7 +87,6 @@ def test_wrong_index(state):
pre_state = deepcopy(state)
deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
index = len(deposit_data_leaves)
pubkey = pubkeys[index]
privkey = privkeys[index]
@ -94,7 +95,7 @@ def test_wrong_index(state):
deposit_data_leaves,
pubkey,
privkey,
spec.MAX_DEPOSIT_AMOUNT,
spec.MAX_EFFECTIVE_BALANCE,
)
# mess up deposit_index
@ -123,7 +124,7 @@ def test_bad_merkle_proof(state):
deposit_data_leaves,
pubkey,
privkey,
spec.MAX_DEPOSIT_AMOUNT,
spec.MAX_EFFECTIVE_BALANCE,
)
# mess up merkle branch

View File

@ -0,0 +1,96 @@
from copy import deepcopy
import pytest
import eth2spec.phase0.spec as spec
from eth2spec.phase0.spec import (
get_current_epoch,
process_proposer_slashing,
)
from tests.helpers import (
get_balance,
get_valid_proposer_slashing,
)
# mark entire file as 'proposer_slashings'
pytestmark = pytest.mark.proposer_slashings
def run_proposer_slashing_processing(state, proposer_slashing, valid=True):
"""
Run ``process_proposer_slashing`` returning the pre and post state.
If ``valid == False``, run expecting ``AssertionError``
"""
post_state = deepcopy(state)
if not valid:
with pytest.raises(AssertionError):
process_proposer_slashing(post_state, proposer_slashing)
return state, None
process_proposer_slashing(post_state, proposer_slashing)
slashed_validator = post_state.validator_registry[proposer_slashing.proposer_index]
assert slashed_validator.slashed
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
# lost whistleblower reward
assert (
get_balance(post_state, proposer_slashing.proposer_index) <
get_balance(state, proposer_slashing.proposer_index)
)
return state, post_state
def test_success(state):
proposer_slashing = get_valid_proposer_slashing(state)
pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing)
return pre_state, proposer_slashing, post_state
def test_epochs_are_different(state):
proposer_slashing = get_valid_proposer_slashing(state)
# set slots to be in different epochs
proposer_slashing.header_2.slot += spec.SLOTS_PER_EPOCH
pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False)
return pre_state, proposer_slashing, post_state
def test_headers_are_same(state):
proposer_slashing = get_valid_proposer_slashing(state)
# set headers to be the same
proposer_slashing.header_2 = proposer_slashing.header_1
pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False)
return pre_state, proposer_slashing, post_state
def test_proposer_is_slashed(state):
proposer_slashing = get_valid_proposer_slashing(state)
# set proposer to slashed
state.validator_registry[proposer_slashing.proposer_index].slashed = True
pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False)
return pre_state, proposer_slashing, post_state
def test_proposer_is_withdrawn(state):
proposer_slashing = get_valid_proposer_slashing(state)
# set proposer withdrawable_epoch in past
current_epoch = get_current_epoch(state)
proposer_index = proposer_slashing.proposer_index
state.validator_registry[proposer_index].withdrawable_epoch = current_epoch - 1
pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False)
return pre_state, proposer_slashing, post_state

View File

@ -0,0 +1,141 @@
from copy import deepcopy
import pytest
import eth2spec.phase0.spec as spec
from eth2spec.phase0.spec import (
get_active_validator_indices,
get_beacon_proposer_index,
get_current_epoch,
process_transfer,
)
from tests.helpers import (
get_valid_transfer,
next_epoch,
)
# mark entire file as 'transfers'
pytestmark = pytest.mark.transfers
def run_transfer_processing(state, transfer, valid=True):
"""
Run ``process_transfer`` returning the pre and post state.
If ``valid == False``, run expecting ``AssertionError``
"""
post_state = deepcopy(state)
if not valid:
with pytest.raises(AssertionError):
process_transfer(post_state, transfer)
return state, None
process_transfer(post_state, transfer)
proposer_index = get_beacon_proposer_index(state)
pre_transfer_sender_balance = state.balances[transfer.sender]
pre_transfer_recipient_balance = state.balances[transfer.recipient]
pre_transfer_proposer_balance = state.balances[proposer_index]
sender_balance = post_state.balances[transfer.sender]
recipient_balance = post_state.balances[transfer.recipient]
assert sender_balance == pre_transfer_sender_balance - transfer.amount - transfer.fee
assert recipient_balance == pre_transfer_recipient_balance + transfer.amount
assert post_state.balances[proposer_index] == pre_transfer_proposer_balance + transfer.fee
return state, post_state
def test_success_non_activated(state):
transfer = get_valid_transfer(state)
# un-activate so validator can transfer
state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
pre_state, post_state = run_transfer_processing(state, transfer)
return pre_state, transfer, post_state
def test_success_withdrawable(state):
next_epoch(state)
transfer = get_valid_transfer(state)
# withdrawable_epoch in past so can transfer
state.validator_registry[transfer.sender].withdrawable_epoch = get_current_epoch(state) - 1
pre_state, post_state = run_transfer_processing(state, transfer)
return pre_state, transfer, post_state
def test_success_active_above_max_effective(state):
sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
amount = spec.MAX_EFFECTIVE_BALANCE // 32
state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + amount
transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0)
pre_state, post_state = run_transfer_processing(state, transfer)
return pre_state, transfer, post_state
def test_active_but_transfer_past_effective_balance(state):
sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
amount = spec.MAX_EFFECTIVE_BALANCE // 32
state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0)
pre_state, post_state = run_transfer_processing(state, transfer, False)
return pre_state, transfer, post_state
def test_incorrect_slot(state):
transfer = get_valid_transfer(state, slot=state.slot+1)
# un-activate so validator can transfer
state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
pre_state, post_state = run_transfer_processing(state, transfer, False)
return pre_state, transfer, post_state
def test_insufficient_balance(state):
sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
amount = spec.MAX_EFFECTIVE_BALANCE
state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount + 1, fee=0)
# un-activate so validator can transfer
state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
pre_state, post_state = run_transfer_processing(state, transfer, False)
return pre_state, transfer, post_state
def test_no_dust(state):
sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
balance = state.balances[sender_index]
transfer = get_valid_transfer(state, sender_index=sender_index, amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, fee=0)
# un-activate so validator can transfer
state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
pre_state, post_state = run_transfer_processing(state, transfer, False)
return pre_state, transfer, post_state
def test_invalid_pubkey(state):
transfer = get_valid_transfer(state)
state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH
# un-activate so validator can transfer
state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
pre_state, post_state = run_transfer_processing(state, transfer, False)
return pre_state, transfer, post_state

View File

@ -0,0 +1,163 @@
from copy import deepcopy
import pytest
import eth2spec.phase0.spec as spec
from eth2spec.phase0.spec import (
get_active_validator_indices,
get_churn_limit,
get_current_epoch,
process_voluntary_exit,
)
from tests.helpers import (
build_voluntary_exit,
pubkey_to_privkey,
)
# mark entire file as 'voluntary_exits'
pytestmark = pytest.mark.voluntary_exits
def run_voluntary_exit_processing(state, voluntary_exit, valid=True):
"""
Run ``process_voluntary_exit`` returning the pre and post state.
If ``valid == False``, run expecting ``AssertionError``
"""
post_state = deepcopy(state)
if not valid:
with pytest.raises(AssertionError):
process_voluntary_exit(post_state, voluntary_exit)
return state, None
process_voluntary_exit(post_state, voluntary_exit)
validator_index = voluntary_exit.validator_index
assert state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH
assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
return state, post_state
def test_success(state):
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = get_current_epoch(state)
validator_index = get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
voluntary_exit = build_voluntary_exit(
state,
current_epoch,
validator_index,
privkey,
)
pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit)
return pre_state, voluntary_exit, post_state
def test_success_exit_queue(state):
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = get_current_epoch(state)
# exit `MAX_EXITS_PER_EPOCH`
initial_indices = get_active_validator_indices(state, current_epoch)[:get_churn_limit(state)]
post_state = state
for index in initial_indices:
privkey = pubkey_to_privkey[state.validator_registry[index].pubkey]
voluntary_exit = build_voluntary_exit(
state,
current_epoch,
index,
privkey,
)
pre_state, post_state = run_voluntary_exit_processing(post_state, voluntary_exit)
# exit an additional validator
validator_index = get_active_validator_indices(state, current_epoch)[-1]
privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
voluntary_exit = build_voluntary_exit(
state,
current_epoch,
validator_index,
privkey,
)
pre_state, post_state = run_voluntary_exit_processing(post_state, voluntary_exit)
assert (
post_state.validator_registry[validator_index].exit_epoch ==
post_state.validator_registry[initial_indices[0]].exit_epoch + 1
)
return pre_state, voluntary_exit, post_state
def test_validator_not_active(state):
current_epoch = get_current_epoch(state)
validator_index = get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH
#
# build and test voluntary exit
#
voluntary_exit = build_voluntary_exit(
state,
current_epoch,
validator_index,
privkey,
)
pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False)
return pre_state, voluntary_exit, post_state
def test_validator_already_exited(state):
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = get_current_epoch(state)
validator_index = get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
# but validator already has exited
state.validator_registry[validator_index].exit_epoch = current_epoch + 2
voluntary_exit = build_voluntary_exit(
state,
current_epoch,
validator_index,
privkey,
)
pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False)
return pre_state, voluntary_exit, post_state
def test_validator_not_active_long_enough(state):
current_epoch = get_current_epoch(state)
validator_index = get_active_validator_indices(state, current_epoch)[0]
privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
voluntary_exit = build_voluntary_exit(
state,
current_epoch,
validator_index,
privkey,
)
assert (
current_epoch - state.validator_registry[validator_index].activation_epoch <
spec.PERSISTENT_COMMITTEE_PERIOD
)
pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False)
return pre_state, voluntary_exit, post_state

View File

@ -0,0 +1,36 @@
import pytest
from eth2spec.phase0 import spec
from preset_loader import loader
from .helpers import (
create_genesis_state,
)
def pytest_addoption(parser):
parser.addoption(
"--config", action="store", default="minimal", help="config: make the pyspec use the specified configuration"
)
@pytest.fixture(autouse=True)
def config(request):
config_name = request.config.getoption("--config")
presets = loader.load_presets('../../configs/', config_name)
spec.apply_constants_preset(presets)
@pytest.fixture
def num_validators(config):
return spec.SLOTS_PER_EPOCH * 8
@pytest.fixture
def deposit_data_leaves():
return list()
@pytest.fixture
def state(num_validators, deposit_data_leaves):
return create_genesis_state(num_validators, deposit_data_leaves)

View File

@ -0,0 +1,136 @@
from copy import deepcopy
import pytest
import eth2spec.phase0.spec as spec
from eth2spec.phase0.state_transition import (
state_transition,
)
from eth2spec.phase0.spec import (
cache_state,
get_crosslink_deltas,
process_crosslinks,
)
from tests.helpers import (
add_attestation_to_state,
build_empty_block_for_next_slot,
fill_aggregate_attestation,
get_crosslink_committee_for_attestation,
get_valid_attestation,
next_epoch,
next_slot,
set_bitfield_bit,
)
# mark entire file as 'crosslinks'
pytestmark = pytest.mark.crosslinks
def run_process_crosslinks(state, valid=True):
# transition state to slot before state transition
slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1
block = build_empty_block_for_next_slot(state)
block.slot = slot
state_transition(state, block)
# cache state before epoch transition
cache_state(state)
post_state = deepcopy(state)
process_crosslinks(post_state)
return state, post_state
def test_no_attestations(state):
pre_state, post_state = run_process_crosslinks(state)
for shard in range(spec.SHARD_COUNT):
assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard]
return pre_state, post_state
def test_single_crosslink_update_from_current_epoch(state):
next_epoch(state)
attestation = get_valid_attestation(state)
fill_aggregate_attestation(state, attestation)
add_attestation_to_state(state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
assert len(state.current_epoch_attestations) == 1
pre_state, post_state = run_process_crosslinks(state)
shard = attestation.data.shard
assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard]
assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard]
return pre_state, post_state
def test_single_crosslink_update_from_previous_epoch(state):
next_epoch(state)
attestation = get_valid_attestation(state)
fill_aggregate_attestation(state, attestation)
add_attestation_to_state(state, attestation, state.slot + spec.SLOTS_PER_EPOCH)
assert len(state.previous_epoch_attestations) == 1
pre_state, post_state = run_process_crosslinks(state)
crosslink_deltas = get_crosslink_deltas(state)
shard = attestation.data.shard
assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard]
assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard]
# ensure rewarded
for index in get_crosslink_committee_for_attestation(state, attestation.data):
assert crosslink_deltas[0][index] > 0
assert crosslink_deltas[1][index] == 0
return pre_state, post_state
def test_double_late_crosslink(state):
next_epoch(state)
state.slot += 4
attestation_1 = get_valid_attestation(state)
fill_aggregate_attestation(state, attestation_1)
# add attestation_1 in the next epoch
next_epoch(state)
add_attestation_to_state(state, attestation_1, state.slot + 1)
for slot in range(spec.SLOTS_PER_EPOCH):
attestation_2 = get_valid_attestation(state)
if attestation_2.data.shard == attestation_1.data.shard:
break
next_slot(state)
fill_aggregate_attestation(state, attestation_2)
# add attestation_2 in the next epoch after attestation_1 has
# already updated the relevant crosslink
next_epoch(state)
add_attestation_to_state(state, attestation_2, state.slot + 1)
assert len(state.previous_epoch_attestations) == 1
assert len(state.current_epoch_attestations) == 0
pre_state, post_state = run_process_crosslinks(state)
crosslink_deltas = get_crosslink_deltas(state)
shard = attestation_2.data.shard
# ensure that the current crosslinks were not updated by the second attestation
assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard]
# ensure no reward, only penalties for the failed crosslink
for index in get_crosslink_committee_for_attestation(state, attestation_2.data):
assert crosslink_deltas[0][index] == 0
assert crosslink_deltas[1][index] > 0
return pre_state, post_state

View File

@ -0,0 +1,412 @@
from copy import deepcopy
from py_ecc import bls
from eth2spec.phase0.state_transition import (
state_transition,
)
import eth2spec.phase0.spec as spec
from eth2spec.utils.minimal_ssz import signing_root
from eth2spec.phase0.spec import (
# constants
ZERO_HASH,
# SSZ
Attestation,
AttestationData,
AttestationDataAndCustodyBit,
AttesterSlashing,
BeaconBlock,
BeaconBlockHeader,
Deposit,
DepositData,
Eth1Data,
ProposerSlashing,
Transfer,
VoluntaryExit,
# functions
convert_to_indexed,
get_active_validator_indices,
get_attesting_indices,
get_block_root,
get_block_root_at_slot,
get_crosslink_committees_at_slot,
get_current_epoch,
get_domain,
get_epoch_start_slot,
get_genesis_beacon_state,
get_previous_epoch,
get_shard_delta,
hash_tree_root,
slot_to_epoch,
verify_merkle_branch,
hash,
)
from eth2spec.utils.merkle_minimal import (
calc_merkle_tree_from_leaves,
get_merkle_proof,
get_merkle_root,
)
privkeys = [i + 1 for i in range(1024)]
pubkeys = [bls.privtopub(privkey) for privkey in privkeys]
pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)}
def get_balance(state, index):
return state.balances[index]
def set_bitfield_bit(bitfield, i):
"""
Set the bit in ``bitfield`` at position ``i`` to ``1``.
"""
byte_index = i // 8
bit_index = i % 8
return (
bitfield[:byte_index] +
bytes([bitfield[byte_index] | (1 << bit_index)]) +
bitfield[byte_index+1:]
)
def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None):
if not deposit_data_leaves:
deposit_data_leaves = []
signature = b'\x33' * 96
deposit_data_list = []
for i in range(num_validators):
pubkey = pubkeys[i]
deposit_data = DepositData(
pubkey=pubkey,
# insecurely use pubkey as withdrawal key as well
withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:],
amount=spec.MAX_EFFECTIVE_BALANCE,
signature=signature,
)
item = deposit_data.hash_tree_root()
deposit_data_leaves.append(item)
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
root = get_merkle_root((tuple(deposit_data_leaves)))
proof = list(get_merkle_proof(tree, item_index=i))
assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, i, root)
deposit_data_list.append(deposit_data)
genesis_validator_deposits = []
for i in range(num_validators):
genesis_validator_deposits.append(Deposit(
proof=list(get_merkle_proof(tree, item_index=i)),
index=i,
data=deposit_data_list[i]
))
return genesis_validator_deposits, root
def create_genesis_state(num_validators, deposit_data_leaves=None):
initial_deposits, deposit_root = create_mock_genesis_validator_deposits(
num_validators,
deposit_data_leaves,
)
return get_genesis_beacon_state(
initial_deposits,
genesis_time=0,
genesis_eth1_data=Eth1Data(
deposit_root=deposit_root,
deposit_count=len(initial_deposits),
block_hash=spec.ZERO_HASH,
),
)
def build_empty_block_for_next_slot(state):
empty_block = BeaconBlock()
empty_block.slot = state.slot + 1
empty_block.body.eth1_data.deposit_count = state.deposit_index
previous_block_header = deepcopy(state.latest_block_header)
if previous_block_header.state_root == spec.ZERO_HASH:
previous_block_header.state_root = state.hash_tree_root()
empty_block.previous_block_root = signing_root(previous_block_header)
return empty_block
def build_deposit_data(state, pubkey, privkey, amount):
deposit_data = DepositData(
pubkey=pubkey,
# insecurely use pubkey as withdrawal key as well
withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:],
amount=amount,
)
signature = bls.sign(
message_hash=signing_root(deposit_data),
privkey=privkey,
domain=get_domain(
state,
spec.DOMAIN_DEPOSIT,
)
)
deposit_data.signature = signature
return deposit_data
def build_attestation_data(state, slot, shard):
assert state.slot >= slot
if slot == state.slot:
block_root = build_empty_block_for_next_slot(state).previous_block_root
else:
block_root = get_block_root_at_slot(state, slot)
current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state))
if slot < current_epoch_start_slot:
epoch_boundary_root = get_block_root(state, get_previous_epoch(state))
elif slot == current_epoch_start_slot:
epoch_boundary_root = block_root
else:
epoch_boundary_root = get_block_root(state, get_current_epoch(state))
if slot < current_epoch_start_slot:
justified_epoch = state.previous_justified_epoch
justified_block_root = state.previous_justified_root
else:
justified_epoch = state.current_justified_epoch
justified_block_root = state.current_justified_root
crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks
return AttestationData(
slot=slot,
shard=shard,
beacon_block_root=block_root,
source_epoch=justified_epoch,
source_root=justified_block_root,
target_root=epoch_boundary_root,
crosslink_data_root=spec.ZERO_HASH,
previous_crosslink_root=hash_tree_root(crosslinks[shard]),
)
def build_voluntary_exit(state, epoch, validator_index, privkey):
voluntary_exit = VoluntaryExit(
epoch=epoch,
validator_index=validator_index,
)
voluntary_exit.signature = bls.sign(
message_hash=signing_root(voluntary_exit),
privkey=privkey,
domain=get_domain(
state=state,
domain_type=spec.DOMAIN_VOLUNTARY_EXIT,
message_epoch=epoch,
)
)
return voluntary_exit
def build_deposit(state,
deposit_data_leaves,
pubkey,
privkey,
amount):
deposit_data = build_deposit_data(state, pubkey, privkey, amount)
item = deposit_data.hash_tree_root()
index = len(deposit_data_leaves)
deposit_data_leaves.append(item)
tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
root = get_merkle_root((tuple(deposit_data_leaves)))
proof = list(get_merkle_proof(tree, item_index=index))
assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root)
deposit = Deposit(
proof=list(proof),
index=index,
data=deposit_data,
)
return deposit, root, deposit_data_leaves
def get_valid_proposer_slashing(state):
current_epoch = get_current_epoch(state)
validator_index = get_active_validator_indices(state, current_epoch)[-1]
privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
slot = state.slot
header_1 = BeaconBlockHeader(
slot=slot,
previous_block_root=ZERO_HASH,
state_root=ZERO_HASH,
block_body_root=ZERO_HASH,
)
header_2 = deepcopy(header_1)
header_2.previous_block_root = b'\x02' * 32
header_2.slot = slot + 1
domain = get_domain(
state=state,
domain_type=spec.DOMAIN_BEACON_PROPOSER,
)
header_1.signature = bls.sign(
message_hash=signing_root(header_1),
privkey=privkey,
domain=domain,
)
header_2.signature = bls.sign(
message_hash=signing_root(header_2),
privkey=privkey,
domain=domain,
)
return ProposerSlashing(
proposer_index=validator_index,
header_1=header_1,
header_2=header_2,
)
def get_valid_attester_slashing(state):
attestation_1 = get_valid_attestation(state)
attestation_2 = deepcopy(attestation_1)
attestation_2.data.target_root = b'\x01' * 32
return AttesterSlashing(
attestation_1=convert_to_indexed(state, attestation_1),
attestation_2=convert_to_indexed(state, attestation_2),
)
def get_crosslink_committee_for_attestation(state, attestation_data):
"""
Return the crosslink committee corresponding to ``attestation_data``.
"""
crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot)
return [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0]
def get_valid_attestation(state, slot=None):
if slot is None:
slot = state.slot
if slot_to_epoch(slot) == get_current_epoch(state):
shard = (state.latest_start_shard + slot) % spec.SLOTS_PER_EPOCH
else:
previous_shard_delta = get_shard_delta(state, get_previous_epoch(state))
shard = (state.latest_start_shard - previous_shard_delta + slot) % spec.SHARD_COUNT
attestation_data = build_attestation_data(state, slot, shard)
crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data)
committee_size = len(crosslink_committee)
bitfield_length = (committee_size + 7) // 8
aggregation_bitfield = b'\xC0' + b'\x00' * (bitfield_length - 1)
custody_bitfield = b'\x00' * bitfield_length
attestation = Attestation(
aggregation_bitfield=aggregation_bitfield,
data=attestation_data,
custody_bitfield=custody_bitfield,
)
participants = get_attesting_indices(
state,
attestation.data,
attestation.aggregation_bitfield,
)
assert len(participants) == 2
signatures = []
for validator_index in participants:
privkey = privkeys[validator_index]
signatures.append(
get_attestation_signature(
state,
attestation.data,
privkey
)
)
attestation.aggregation_signature = bls.aggregate_signatures(signatures)
return attestation
def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=None):
if slot is None:
slot = state.slot
current_epoch = get_current_epoch(state)
if sender_index is None:
sender_index = get_active_validator_indices(state, current_epoch)[-1]
recipient_index = get_active_validator_indices(state, current_epoch)[0]
transfer_pubkey = pubkeys[-1]
transfer_privkey = privkeys[-1]
if fee is None:
fee = get_balance(state, sender_index) // 32
if amount is None:
amount = get_balance(state, sender_index) - fee
transfer = Transfer(
sender=sender_index,
recipient=recipient_index,
amount=amount,
fee=fee,
slot=slot,
pubkey=transfer_pubkey,
signature=ZERO_HASH,
)
transfer.signature = bls.sign(
message_hash=signing_root(transfer),
privkey=transfer_privkey,
domain=get_domain(
state=state,
domain_type=spec.DOMAIN_TRANSFER,
message_epoch=get_current_epoch(state),
)
)
# ensure withdrawal_credentials reproducable
state.validator_registry[transfer.sender].withdrawal_credentials = (
spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:]
)
return transfer
def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0):
message_hash = AttestationDataAndCustodyBit(
data=attestation_data,
custody_bit=custody_bit,
).hash_tree_root()
return bls.sign(
message_hash=message_hash,
privkey=privkey,
domain=get_domain(
state=state,
domain_type=spec.DOMAIN_ATTESTATION,
message_epoch=slot_to_epoch(attestation_data.slot),
)
)
def fill_aggregate_attestation(state, attestation):
crosslink_committee = get_crosslink_committee_for_attestation(state, attestation.data)
for i in range(len(crosslink_committee)):
attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i)
def add_attestation_to_state(state, attestation, slot):
block = build_empty_block_for_next_slot(state)
block.slot = slot
block.body.attestations.append(attestation)
state_transition(state, block)
def next_slot(state):
block = build_empty_block_for_next_slot(state)
state_transition(state, block)
def next_epoch(state):
block = build_empty_block_for_next_slot(state)
block.slot += spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
state_transition(state, block)

View File

@ -0,0 +1,198 @@
from copy import deepcopy
import pytest
import eth2spec.phase0.spec as spec
from eth2spec.phase0.state_transition import (
state_transition,
)
from .helpers import (
build_empty_block_for_next_slot,
fill_aggregate_attestation,
get_current_epoch,
get_epoch_start_slot,
get_valid_attestation,
next_epoch,
)
# mark entire file as 'state'
pytestmark = pytest.mark.state
def check_finality(state,
prev_state,
current_justified_changed,
previous_justified_changed,
finalized_changed):
if current_justified_changed:
assert state.current_justified_epoch > prev_state.current_justified_epoch
assert state.current_justified_root != prev_state.current_justified_root
else:
assert state.current_justified_epoch == prev_state.current_justified_epoch
assert state.current_justified_root == prev_state.current_justified_root
if previous_justified_changed:
assert state.previous_justified_epoch > prev_state.previous_justified_epoch
assert state.previous_justified_root != prev_state.previous_justified_root
else:
assert state.previous_justified_epoch == prev_state.previous_justified_epoch
assert state.previous_justified_root == prev_state.previous_justified_root
if finalized_changed:
assert state.finalized_epoch > prev_state.finalized_epoch
assert state.finalized_root != prev_state.finalized_root
else:
assert state.finalized_epoch == prev_state.finalized_epoch
assert state.finalized_root == prev_state.finalized_root
def next_epoch_with_attestations(state,
fill_cur_epoch,
fill_prev_epoch):
post_state = deepcopy(state)
blocks = []
for _ in range(spec.SLOTS_PER_EPOCH):
block = build_empty_block_for_next_slot(post_state)
if fill_cur_epoch:
slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
if slot_to_attest >= get_epoch_start_slot(get_current_epoch(post_state)):
cur_attestation = get_valid_attestation(post_state, slot_to_attest)
fill_aggregate_attestation(post_state, cur_attestation)
block.body.attestations.append(cur_attestation)
if fill_prev_epoch:
slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1
prev_attestation = get_valid_attestation(post_state, slot_to_attest)
fill_aggregate_attestation(post_state, prev_attestation)
block.body.attestations.append(prev_attestation)
state_transition(post_state, block)
blocks.append(block)
return state, blocks, post_state
def test_finality_rule_4(state):
test_state = deepcopy(state)
blocks = []
for epoch in range(4):
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
blocks += new_blocks
# justification/finalization skipped at GENESIS_EPOCH
if epoch == 0:
check_finality(test_state, prev_state, False, False, False)
# justification/finalization skipped at GENESIS_EPOCH + 1
elif epoch == 1:
check_finality(test_state, prev_state, False, False, False)
elif epoch == 2:
check_finality(test_state, prev_state, True, False, False)
elif epoch >= 3:
# rule 4 of finality
check_finality(test_state, prev_state, True, True, True)
assert test_state.finalized_epoch == prev_state.current_justified_epoch
assert test_state.finalized_root == prev_state.current_justified_root
return state, blocks, test_state
def test_finality_rule_1(state):
# get past first two epochs that finality does not run on
next_epoch(state)
next_epoch(state)
pre_state = deepcopy(state)
test_state = deepcopy(state)
blocks = []
for epoch in range(3):
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True)
blocks += new_blocks
if epoch == 0:
check_finality(test_state, prev_state, True, False, False)
elif epoch == 1:
check_finality(test_state, prev_state, True, True, False)
elif epoch == 2:
# finalized by rule 1
check_finality(test_state, prev_state, True, True, True)
assert test_state.finalized_epoch == prev_state.previous_justified_epoch
assert test_state.finalized_root == prev_state.previous_justified_root
return pre_state, blocks, test_state
def test_finality_rule_2(state):
# get past first two epochs that finality does not run on
next_epoch(state)
next_epoch(state)
pre_state = deepcopy(state)
test_state = deepcopy(state)
blocks = []
for epoch in range(3):
if epoch == 0:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
check_finality(test_state, prev_state, True, False, False)
elif epoch == 1:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False)
check_finality(test_state, prev_state, False, True, False)
elif epoch == 2:
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True)
# finalized by rule 2
check_finality(test_state, prev_state, True, False, True)
assert test_state.finalized_epoch == prev_state.previous_justified_epoch
assert test_state.finalized_root == prev_state.previous_justified_root
blocks += new_blocks
return pre_state, blocks, test_state
def test_finality_rule_3(state):
"""
Test scenario described here
https://github.com/ethereum/eth2.0-specs/issues/611#issuecomment-463612892
"""
# get past first two epochs that finality does not run on
next_epoch(state)
next_epoch(state)
pre_state = deepcopy(state)
test_state = deepcopy(state)
blocks = []
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
blocks += new_blocks
check_finality(test_state, prev_state, True, False, False)
# In epoch N, JE is set to N, prev JE is set to N-1
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
blocks += new_blocks
check_finality(test_state, prev_state, True, True, True)
# In epoch N+1, JE is N, prev JE is N-1, and not enough messages get in to do anything
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False)
blocks += new_blocks
check_finality(test_state, prev_state, False, True, False)
# In epoch N+2, JE is N, prev JE is N, and enough messages from the previous epoch get in to justify N+1.
# N+1 now becomes the JE. Not enough messages from epoch N+2 itself get in to justify N+2
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True)
blocks += new_blocks
# rule 2
check_finality(test_state, prev_state, True, False, True)
# In epoch N+3, LJE is N+1, prev LJE is N, and enough messages get in to justify epochs N+2 and N+3.
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, True)
blocks += new_blocks
# rule 3
check_finality(test_state, prev_state, True, True, True)
assert test_state.finalized_epoch == prev_state.current_justified_epoch
assert test_state.finalized_root == prev_state.current_justified_root
return pre_state, blocks, test_state

Some files were not shown because too many files have changed in this diff Show More