diff --git a/.circleci/config.yml b/.circleci/config.yml index 02871530e..5be6ed500 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,5 +1,4 @@ -# Python CircleCI 2.0 configuration file -version: 2 +version: 2.1 jobs: build: docker: @@ -8,34 +7,83 @@ jobs: steps: - checkout - # Download and cache dependencies - - restore_cache: - keys: - - v1-dependencies-{{ checksum "requirements.txt" }} - # fallback to using the latest cache if no exact match is found - - v1-dependencies- + - run: + name: Build pyspec + command: make pyspec - run: - name: install dependencies - command: | - python3 -m venv venv - . venv/bin/activate - pip install -r requirements.txt - - run: - name: build phase0 spec - command: make build/phase0 + name: Run py-tests + command: make test - - save_cache: - paths: - - ./venv - key: v1-dependencies-{{ checksum "requirements.txt" }} - - - run: - name: run tests - command: | - . venv/bin/activate - pytest tests - - - store_artifacts: - path: test-reports - destination: test-reports +# TODO see #928: decide on CI triggering of yaml tests building, +# and destination of output (new yaml tests LFS-configured repository) +# +# - run: +# name: Generate YAML tests +# command: make gen_yaml_tests +# +# - store_artifacts: +# path: test-reports +# destination: test-reports +# +# - run: +# name: Save YAML tests for deployment +# command: | +# mkdir /tmp/workspace +# cp -r yaml_tests /tmp/workspace/ +# git log -1 >> /tmp/workspace/latest_commit_message +# - persist_to_workspace: +# root: /tmp/workspace +# paths: +# - yaml_tests +# - latest_commit_message +# commit: +# docker: +# - image: circleci/python:3.6 +# steps: +# - attach_workspace: +# at: /tmp/workspace +# - add_ssh_keys: +# fingerprints: +# - "01:85:b6:36:96:a6:84:72:e4:9b:4e:38:ee:21:97:fa" +# - run: +# name: Checkout test repository +# command: | +# ssh-keyscan -H github.com >> ~/.ssh/known_hosts +# git clone git@github.com:ethereum/eth2.0-tests.git +# - run: +# name: Commit and push generated YAML tests +# command: | +# cd eth2.0-tests +# git config user.name 'eth2TestGenBot' +# git config user.email '47188154+eth2TestGenBot@users.noreply.github.com' +# for filename in /tmp/workspace/yaml_tests/*; do +# rm -rf $(basename $filename) +# cp -r $filename . +# done +# git add . +# if git diff --cached --exit-code >& /dev/null; then +# echo "No changes to commit" +# else +# echo -e "Update generated tests\n\nLatest commit message from eth2.0-specs:\n" > commit_message +# cat /tmp/workspace/latest_commit_message >> commit_message +# git commit -F commit_message +# git push origin master +# fi +#workflows: +# version: 2.1 +# +# build_and_commit: +# jobs: +# - build: +# filters: +# tags: +# only: /.*/ +# - commit: +# requires: +# - build +# filters: +# tags: +# only: /.*/ +# branches: +# ignore: /.*/ \ No newline at end of file diff --git a/.gitignore b/.gitignore index f33dd5256..ce047240a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,15 @@ *.pyc /__pycache__ -/venv +venv +.venvs +.venv /.pytest_cache build/ output/ + +yaml_tests/ +.pytest_cache + +# Dynamically built from Markdown spec +test_libs/pyspec/eth2spec/phase0/spec.py diff --git a/Makefile b/Makefile index 88f17dcf9..b39538791 100644 --- a/Makefile +++ b/Makefile @@ -1,29 +1,73 @@ SPEC_DIR = ./specs SCRIPT_DIR = ./scripts -BUILD_DIR = ./build -UTILS_DIR = ./utils +TEST_LIBS_DIR = ./test_libs +PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec +YAML_TEST_DIR = ./yaml_tests +GENERATOR_DIR = ./test_generators +CONFIGS_DIR = ./configs + +# Collect a list of generator names +GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/))) +# Map this list of generator paths to a list of test output paths +YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERATORS)) +GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS)) + +PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py +PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) -.PHONY: clean all test - - -all: $(BUILD_DIR)/phase0 +.PHONY: clean all test gen_yaml_tests pyspec phase0 +all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) clean: - rm -rf $(BUILD_DIR) + rm -rf $(YAML_TEST_DIR) + rm -rf $(GENERATOR_VENVS) + rm -rf $(PY_SPEC_DIR)/venv $(PY_SPEC_DIR)/.pytest_cache + rm -rf $(PY_SPEC_ALL_TARGETS) +# "make gen_yaml_tests" to run generators +gen_yaml_tests: $(YAML_TEST_DIR) $(YAML_TEST_TARGETS) # runs a limited set of tests against a minimal config -# run pytest with `-m` option to full suite -test: - pytest -m minimal_config tests/ +test: $(PY_SPEC_ALL_TARGETS) + cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python -m pytest -m minimal_config . + +# "make pyspec" to create the pyspec for all phases. +pyspec: $(PY_SPEC_ALL_TARGETS) + +# "make phase0" to create pyspec for phase0 +phase0: $(PY_SPEC_PHASE_0_TARGETS) -$(BUILD_DIR)/phase0: +$(PY_SPEC_DIR)/eth2spec/phase0/spec.py: + python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@ + + +CURRENT_DIR = ${CURDIR} + +# The function that builds a set of suite files, by calling a generator for the given type (param 1) +define build_yaml_tests + $(info running generator $(1)) + # Create the output + mkdir -p $(YAML_TEST_DIR)$(1) + + # 1) Create a virtual environment + # 2) Activate the venv, this is where dependencies are installed for the generator + # 3) Install all the necessary requirements + # 4) Run the generator. The generator is assumed to have an "main.py" file. + # 5) We output to the tests dir (generator program should accept a "-o " argument. + cd $(GENERATOR_DIR)$(1); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt; python3 main.py -o $(CURRENT_DIR)/$(YAML_TEST_DIR)$(1) -c $(CURRENT_DIR)/$(CONFIGS_DIR) + + $(info generator $(1) finished) +endef + +# The tests dir itself is simply build by creating the directory (recursively creating deeper directories if necessary) +$(YAML_TEST_DIR): + $(info creating directory, to output yaml targets to: ${YAML_TEST_TARGETS}) mkdir -p $@ - python3 $(SCRIPT_DIR)/phase0/build_spec.py $(SPEC_DIR)/core/0_beacon-chain.md $@/spec.py - mkdir -p $@/utils - cp $(UTILS_DIR)/phase0/* $@/utils - cp $(UTILS_DIR)/phase0/state_transition.py $@ - touch $@/__init__.py $@/utils/__init__.py + +# For any target within the tests dir, build it using the build_yaml_tests function. +# (creation of output dir is a dependency) +$(YAML_TEST_DIR)%: $(YAML_TEST_DIR) + $(call build_yaml_tests,$*) diff --git a/README.md b/README.md index ce0ae8738..aa5b7e302 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,8 @@ To learn more about sharding and eth2.0/Serenity, see the [sharding FAQ](https:/ This repo hosts the current eth2.0 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed upon changes to spec can be made through pull requests. -# Specs + +## Specs Core specifications for eth2.0 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: * [Phase 0 -- The Beacon Chain](specs/core/0_beacon-chain.md) @@ -16,13 +17,25 @@ Core specifications for eth2.0 client validation can be found in [specs/core](sp Accompanying documents can be found in [specs](specs) and include * [SimpleSerialize (SSZ) spec](specs/simple-serialize.md) * [BLS signature verification](specs/bls_signature.md) -* [General test format](specs/test-format.md) +* [General test format](specs/test_formats/README.md) * [Honest validator implementation doc](specs/validator/0_beacon-chain-validator.md) +* [Merkle proof formats](specs/light_client/merkle_proofs.md) +* [Light client syncing protocol](specs/light_client/sync_protocol.md) + + +### Design goals -## Design goals The following are the broad design goals for Ethereum 2.0: * to minimize complexity, even at the cost of some losses in efficiency * to remain live through major network partitions and when very large portions of nodes go offline * to select all components such that they are either quantum secure or can be easily swapped out for quantum secure counterparts when available * to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time * to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain) + + +## For spec contributors + +Documentation on the different components used during spec writing can be found here: +* [YAML Test Generators](test_generators/README.md) +* [Executable Python Spec, with Py-tests](test_libs/pyspec/README.md) + diff --git a/configs/constant_presets/README.md b/configs/constant_presets/README.md new file mode 100644 index 000000000..45148862e --- /dev/null +++ b/configs/constant_presets/README.md @@ -0,0 +1,20 @@ +# Constant Presets + +This directory contains a set of constants presets used for testing, testnets, and mainnet. + +A preset file contains all the constants known for its target. +Later-fork constants can be ignored, e.g. ignore phase1 constants as a client that only supports phase 0 currently. + +## Format + +Each preset is a key-value mapping. + +**Key**: an `UPPER_SNAKE_CASE` (a.k.a. "macro case") formatted string, name of the constant. +**Value**: can be any of: + - an unsigned integer number, can be up to 64 bits (incl.) + - a hexadecimal string, prefixed with `0x` + +Presets may contain comments to describe the values. + +See `mainnet.yaml` for a complete example. + diff --git a/configs/constant_presets/mainnet.yaml b/configs/constant_presets/mainnet.yaml new file mode 100644 index 000000000..d06febb77 --- /dev/null +++ b/configs/constant_presets/mainnet.yaml @@ -0,0 +1,124 @@ +# Mainnet preset +# Note: the intention of this file (for now) is to illustrate what a mainnet configuration could look like. +# Some of these constants may still change before the launch of Phase 0. + + +# Misc +# --------------------------------------------------------------- +# 2**10 (= 1,024) +SHARD_COUNT: 1024 +# 2**7 (= 128) +TARGET_COMMITTEE_SIZE: 128 +# 2**12 (= 4,096) +MAX_ATTESTATION_PARTICIPANTS: 4096 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 +# See issue 563 +SHUFFLE_ROUND_COUNT: 90 + + +# Deposit contract +# --------------------------------------------------------------- +# **TBD** +DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 +# 2**5 (= 32) +DEPOSIT_CONTRACT_TREE_DEPTH: 32 + + +# Gwei values +# --------------------------------------------------------------- +# 2**0 * 10**9 (= 1,000,000,000) Gwei +MIN_DEPOSIT_AMOUNT: 1000000000 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MAX_DEPOSIT_AMOUNT: 32000000000 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**0 * 10**9 (= 1,000,000,000) Gwei +HIGH_BALANCE_INCREMENT: 1000000000 + + +# Initial values +# --------------------------------------------------------------- +GENESIS_FORK_VERSION: 0x00000000 +# 2**32, GENESIS_EPOCH is derived from this constant +GENESIS_SLOT: 4294967296 +GENESIS_START_SHARD: 0 +# 2**64 - 1 +FAR_FUTURE_EPOCH: 18446744073709551615 +BLS_WITHDRAWAL_PREFIX_BYTE: 0x00 + + +# Time parameters +# --------------------------------------------------------------- +# 6 seconds 6 seconds +SECONDS_PER_SLOT: 6 +# 2**2 (= 4) slots 24 seconds +MIN_ATTESTATION_INCLUSION_DELAY: 4 +# 2**6 (= 64) slots 6.4 minutes +SLOTS_PER_EPOCH: 64 +# 2**0 (= 1) epochs 6.4 minutes +MIN_SEED_LOOKAHEAD: 1 +# 2**2 (= 4) epochs 25.6 minutes +ACTIVATION_EXIT_DELAY: 4 +# 2**10 (= 1,024) slots ~1.7 hours +SLOTS_PER_ETH1_VOTING_PERIOD: 1024 +# 2**13 (= 8,192) slots ~13 hours +SLOTS_PER_HISTORICAL_ROOT: 8192 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**11 (= 2,048) epochs 9 days +PERSISTENT_COMMITTEE_PERIOD: 2048 +# 2**6 (= 64) epochs ~7 hours +MAX_CROSSLINK_EPOCHS: 64 + + +# State list lengths +# --------------------------------------------------------------- +# 2**13 (= 8,192) epochs ~36 days +LATEST_RANDAO_MIXES_LENGTH: 8192 +# 2**13 (= 8,192) epochs ~36 days +LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 8192 +# 2**13 (= 8,192) epochs ~36 days +LATEST_SLASHED_EXIT_LENGTH: 8192 + + +# Reward and penalty quotients +# --------------------------------------------------------------- +# 2**5 (= 32) +BASE_REWARD_QUOTIENT: 32 +# 2**9 (= 512) +WHISTLEBLOWING_REWARD_QUOTIENT: 512 +# 2**3 (= 8) +PROPOSER_REWARD_QUOTIENT: 8 +# 2**24 (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT: 16777216 + + +# Max operations per block +# --------------------------------------------------------------- +# 2**5 (= 32) +MIN_PENALTY_QUOTIENT: 32 +# 2**4 (= 16) +MAX_PROPOSER_SLASHINGS: 16 +# 2**0 (= 1) +MAX_ATTESTER_SLASHINGS: 1 +# 2**7 (= 128) +MAX_ATTESTATIONS: 128 +# 2**4 (= 16) +MAX_DEPOSITS: 16 +# 2**4 (= 16) +MAX_VOLUNTARY_EXITS: 16 +# 2**4 (= 16) +MAX_TRANSFERS: 16 + + +# Signature domains +# --------------------------------------------------------------- +DOMAIN_BEACON_BLOCK: 0 +DOMAIN_RANDAO: 1 +DOMAIN_ATTESTATION: 2 +DOMAIN_DEPOSIT: 3 +DOMAIN_VOLUNTARY_EXIT: 4 +DOMAIN_TRANSFER: 5 \ No newline at end of file diff --git a/configs/constant_presets/minimal.yaml b/configs/constant_presets/minimal.yaml new file mode 100644 index 000000000..80af5398c --- /dev/null +++ b/configs/constant_presets/minimal.yaml @@ -0,0 +1,124 @@ +# Minimal preset + + +# Misc +# --------------------------------------------------------------- + +# [customized] Just 8 shards for testing purposes +SHARD_COUNT: 8 + +# [customized] unsecure, but fast +TARGET_COMMITTEE_SIZE: 4 +# 2**12 (= 4,096) +MAX_ATTESTATION_PARTICIPANTS: 4096 +# 2**2 (= 4) +MIN_PER_EPOCH_CHURN_LIMIT: 4 +# 2**16 (= 65,536) +CHURN_LIMIT_QUOTIENT: 65536 +# [customized] Faster, but unsecure. +SHUFFLE_ROUND_COUNT: 10 + + +# Deposit contract +# --------------------------------------------------------------- +# **TBD** +DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 +# 2**5 (= 32) +DEPOSIT_CONTRACT_TREE_DEPTH: 32 + + +# Gwei values +# --------------------------------------------------------------- +# 2**0 * 10**9 (= 1,000,000,000) Gwei +MIN_DEPOSIT_AMOUNT: 1000000000 +# 2**5 * 10**9 (= 32,000,000,000) Gwei +MAX_DEPOSIT_AMOUNT: 32000000000 +# 2**4 * 10**9 (= 16,000,000,000) Gwei +EJECTION_BALANCE: 16000000000 +# 2**0 * 10**9 (= 1,000,000,000) Gwei +HIGH_BALANCE_INCREMENT: 1000000000 + + +# Initial values +# --------------------------------------------------------------- +GENESIS_FORK_VERSION: 0x00000000 +# 2**32, GENESIS_EPOCH is derived from this constant +GENESIS_SLOT: 4294967296 +GENESIS_START_SHARD: 0 +# 2**64 - 1 +FAR_FUTURE_EPOCH: 18446744073709551615 +BLS_WITHDRAWAL_PREFIX_BYTE: 0x00 + + +# Time parameters +# --------------------------------------------------------------- +# 6 seconds 6 seconds +SECONDS_PER_SLOT: 6 +# [customized] 2 slots +MIN_ATTESTATION_INCLUSION_DELAY: 2 +# [customized] fast epochs +SLOTS_PER_EPOCH: 8 +# 2**0 (= 1) epochs 6.4 minutes +MIN_SEED_LOOKAHEAD: 1 +# 2**2 (= 4) epochs 25.6 minutes +ACTIVATION_EXIT_DELAY: 4 +# [customized] higher frequency new deposits from eth1 for testing +SLOTS_PER_ETH1_VOTING_PERIOD: 16 +# [customized] smaller state +SLOTS_PER_HISTORICAL_ROOT: 64 +# 2**8 (= 256) epochs ~27 hours +MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 +# 2**11 (= 2,048) epochs 9 days +PERSISTENT_COMMITTEE_PERIOD: 2048 +# 2**6 (= 64) epochs ~7 hours +MAX_CROSSLINK_EPOCHS: 64 + + +# State list lengths +# --------------------------------------------------------------- +# [customized] smaller state +LATEST_RANDAO_MIXES_LENGTH: 64 +# [customized] smaller state +LATEST_ACTIVE_INDEX_ROOTS_LENGTH: 64 +# [customized] smaller state +LATEST_SLASHED_EXIT_LENGTH: 64 + + +# Reward and penalty quotients +# --------------------------------------------------------------- +# 2**5 (= 32) +BASE_REWARD_QUOTIENT: 32 +# 2**9 (= 512) +WHISTLEBLOWING_REWARD_QUOTIENT: 512 +# 2**3 (= 8) +PROPOSER_REWARD_QUOTIENT: 8 +# 2**24 (= 16,777,216) +INACTIVITY_PENALTY_QUOTIENT: 16777216 + + +# Max operations per block +# --------------------------------------------------------------- +# 2**5 (= 32) +MIN_PENALTY_QUOTIENT: 32 +# 2**4 (= 16) +MAX_PROPOSER_SLASHINGS: 16 +# 2**0 (= 1) +MAX_ATTESTER_SLASHINGS: 1 +# 2**7 (= 128) +MAX_ATTESTATIONS: 128 +# 2**4 (= 16) +MAX_DEPOSITS: 16 +# 2**4 (= 16) +MAX_VOLUNTARY_EXITS: 16 +# 2**4 (= 16) +MAX_TRANSFERS: 16 + + +# Signature domains +# --------------------------------------------------------------- +DOMAIN_BEACON_BLOCK: 0 +DOMAIN_RANDAO: 1 +DOMAIN_ATTESTATION: 2 +DOMAIN_DEPOSIT: 3 +DOMAIN_VOLUNTARY_EXIT: 4 +DOMAIN_TRANSFER: 5 \ No newline at end of file diff --git a/configs/fork_timelines/README.md b/configs/fork_timelines/README.md new file mode 100644 index 000000000..c93b415f5 --- /dev/null +++ b/configs/fork_timelines/README.md @@ -0,0 +1,18 @@ +# Fork timelines + +This directory contains a set of fork timelines used for testing, testnets, and mainnet. + +A timeline file contains all the forks known for its target. +Later forks can be ignored, e.g. ignore fork `phase1` as a client that only supports phase 0 currently. + +## Format + +Each preset is a key-value mapping. + +**Key**: an `lower_snake_case` (a.k.a. "python case") formatted string, name of the fork. +**Value**: an unsigned integer number, epoch number of activation of the fork + +Timelines may contain comments to describe the values. + +See `mainnet.yaml` for a complete example. + diff --git a/configs/fork_timelines/mainnet.yaml b/configs/fork_timelines/mainnet.yaml new file mode 100644 index 000000000..8d51d6582 --- /dev/null +++ b/configs/fork_timelines/mainnet.yaml @@ -0,0 +1,12 @@ +# Mainnet fork timeline + +# Equal to GENESIS_EPOCH +phase0: 67108864 + +# Example 1: +# phase0_funny_fork_name: 67116000 + +# Example 2: +# Should be equal to PHASE_1_GENESIS_EPOCH +# (placeholder in example value here) +# phase1: 67163000 diff --git a/configs/fork_timelines/testing.yaml b/configs/fork_timelines/testing.yaml new file mode 100644 index 000000000..957a53b8c --- /dev/null +++ b/configs/fork_timelines/testing.yaml @@ -0,0 +1,6 @@ +# Testing fork timeline + +# Equal to GENESIS_EPOCH +phase0: 536870912 + +# No other forks considered in testing yet (to be implemented) diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py index 6116f1ffe..54adfdde7 100644 --- a/scripts/phase0/build_spec.py +++ b/scripts/phase0/build_spec.py @@ -2,24 +2,32 @@ import sys import function_puller -def build_spec(sourcefile, outfile): +def build_phase0_spec(sourcefile, outfile): code_lines = [] - - code_lines.append("from build.phase0.utils.minimal_ssz import *") - code_lines.append("from build.phase0.utils.bls_stub import *") - for i in (1, 2, 3, 4, 8, 32, 48, 96): - code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i)) - code_lines.append("SLOTS_PER_EPOCH = 64") # stub, will get overwritten by real var - code_lines.append("def slot_to_epoch(x): return x // SLOTS_PER_EPOCH") - code_lines.append(""" + from typing import ( Any, Callable, + Dict, List, NewType, Tuple, ) +from eth2spec.utils.minimal_ssz import * +from eth2spec.utils.bls_stub import * + + + """) + for i in (1, 2, 3, 4, 8, 32, 48, 96): + code_lines.append("def int_to_bytes%d(x): return x.to_bytes(%d, 'little')" % (i, i)) + + code_lines.append(""" +# stub, will get overwritten by real var +SLOTS_PER_EPOCH = 64 + + +def slot_to_epoch(x): return x // SLOTS_PER_EPOCH Slot = NewType('Slot', int) # uint64 @@ -34,12 +42,14 @@ Any = None Store = None """) - code_lines += function_puller.get_lines(sourcefile) + code_lines += function_puller.get_spec(sourcefile) code_lines.append(""" # Monkey patch validator get committee code _compute_committee = compute_committee committee_cache = {} + + def compute_committee(validator_indices: List[ValidatorIndex], seed: Bytes32, index: int, @@ -60,6 +70,8 @@ def compute_committee(validator_indices: List[ValidatorIndex], # Monkey patch hash cache _hash = hash hash_cache = {} + + def hash(x): if x in hash_cache: return hash_cache[x] @@ -67,7 +79,19 @@ def hash(x): ret = _hash(x) hash_cache[x] = ret return ret - """) + +# Access to overwrite spec constants based on configuration +def apply_constants_preset(preset: Dict[str, Any]): + global_vars = globals() + for k, v in preset.items(): + global_vars[k] = v + + # Deal with derived constants + global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT) + + # Initialize SSZ types again, to account for changed lengths + init_SSZ_types() +""") with open(outfile, 'w') as out: out.write("\n".join(code_lines)) @@ -75,5 +99,6 @@ def hash(x): if __name__ == '__main__': if len(sys.argv) < 3: - print("Error: spec source and outfile must defined") - build_spec(sys.argv[1], sys.argv[2]) + print("Usage: ") + build_phase0_spec(sys.argv[1], sys.argv[2]) + diff --git a/scripts/phase0/function_puller.py b/scripts/phase0/function_puller.py index 7d5796fc7..635797d39 100644 --- a/scripts/phase0/function_puller.py +++ b/scripts/phase0/function_puller.py @@ -1,11 +1,13 @@ import sys +from typing import List -def get_lines(file_name): +def get_spec(file_name: str) -> List[str]: code_lines = [] pulling_from = None current_name = None - processing_typedef = False + current_typedef = None + type_defs = [] for linenum, line in enumerate(open(sys.argv[1]).readlines()): line = line.rstrip() if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`': @@ -17,17 +19,26 @@ def get_lines(file_name): if pulling_from is None: pulling_from = linenum else: - if processing_typedef: + if current_typedef is not None: assert code_lines[-1] == '}' code_lines[-1] = '})' + current_typedef[-1] = '})' + type_defs.append((current_name, current_typedef)) pulling_from = None - processing_typedef = False + current_typedef = None else: if pulling_from == linenum and line == '{': code_lines.append('%s = SSZType({' % current_name) - processing_typedef = True + current_typedef = ['global_vars["%s"] = SSZType({' % current_name] elif pulling_from is not None: + # Add some whitespace between functions + if line[:3] == 'def': + code_lines.append('') + code_lines.append('') code_lines.append(line) + # Remember type def lines + if current_typedef is not None: + current_typedef.append(line) elif pulling_from is None and len(line) > 0 and line[0] == '|': row = line[1:].split('|') if len(row) >= 2: @@ -42,5 +53,18 @@ def get_lines(file_name): if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789': eligible = False if eligible: - code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123567890123456789012357890'))) + code_lines.append(row[0] + ' = ' + (row[1].replace('**TBD**', '0x1234567890123456789012345678901234567890'))) + # Build type-def re-initialization + code_lines.append('') + code_lines.append('def init_SSZ_types():') + code_lines.append(' global_vars = globals()') + for ssz_type_name, ssz_type in type_defs: + code_lines.append('') + for type_line in ssz_type: + code_lines.append(' ' + type_line) + code_lines.append('') + code_lines.append('ssz_types = [' + ', '.join([f'\'{ssz_type_name}\'' for (ssz_type_name, _) in type_defs]) + ']') + code_lines.append('') + code_lines.append('def get_ssz_type_by_name(name: str) -> SSZType: return globals()[name]') + code_lines.append('') return code_lines diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md index dc359b056..f04a04877 100644 --- a/specs/core/0_beacon-chain.md +++ b/specs/core/0_beacon-chain.md @@ -25,7 +25,6 @@ - [`Fork`](#fork) - [`Crosslink`](#crosslink) - [`Eth1Data`](#eth1data) - - [`Eth1DataVote`](#eth1datavote) - [`AttestationData`](#attestationdata) - [`AttestationDataAndCustodyBit`](#attestationdataandcustodybit) - [`IndexedAttestation`](#indexedattestation) @@ -51,7 +50,7 @@ - [`xor`](#xor) - [`hash`](#hash) - [`hash_tree_root`](#hash_tree_root) - - [`signed_root`](#signed_root) + - [`signing_root`](#signing_root) - [`get_temporary_block_header`](#get_temporary_block_header) - [`slot_to_epoch`](#slot_to_epoch) - [`get_previous_epoch`](#get_previous_epoch) @@ -67,8 +66,8 @@ - [`get_permuted_index`](#get_permuted_index) - [`get_split_offset`](#get_split_offset) - [`get_epoch_committee_count`](#get_epoch_committee_count) + - [`get_shard_delta`](#get_shard_delta) - [`compute_committee`](#compute_committee) - - [`get_current_epoch_committee_count`](#get_current_epoch_committee_count) - [`get_crosslink_committees_at_slot`](#get_crosslink_committees_at_slot) - [`get_block_root`](#get_block_root) - [`get_state_root`](#get_state_root) @@ -77,8 +76,7 @@ - [`generate_seed`](#generate_seed) - [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`verify_merkle_branch`](#verify_merkle_branch) - - [`get_crosslink_committee_for_attestation`](#get_crosslink_committee_for_attestation) - - [`get_attestation_participants`](#get_attestation_participants) + - [`get_attesting_indices`](#get_attesting_indices) - [`int_to_bytes1`, `int_to_bytes2`, ...](#int_to_bytes1-int_to_bytes2-) - [`bytes_to_int`](#bytes_to_int) - [`get_effective_balance`](#get_effective_balance) @@ -92,16 +90,14 @@ - [`is_surround_vote`](#is_surround_vote) - [`integer_squareroot`](#integer_squareroot) - [`get_delayed_activation_exit_epoch`](#get_delayed_activation_exit_epoch) + - [`get_churn_limit`](#get_churn_limit) - [`bls_verify`](#bls_verify) - [`bls_verify_multiple`](#bls_verify_multiple) - [`bls_aggregate_pubkeys`](#bls_aggregate_pubkeys) - - [`process_deposit`](#process_deposit) - [Routines for updating validator status](#routines-for-updating-validator-status) - [`activate_validator`](#activate_validator) - [`initiate_validator_exit`](#initiate_validator_exit) - - [`exit_validator`](#exit_validator) - [`slash_validator`](#slash_validator) - - [`prepare_validator_for_withdrawal`](#prepare_validator_for_withdrawal) - [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract) - [Deposit arguments](#deposit-arguments) - [Withdrawal credentials](#withdrawal-credentials) @@ -115,22 +111,17 @@ - [State caching](#state-caching) - [Per-epoch processing](#per-epoch-processing) - [Helper functions](#helper-functions-1) - - [Justification](#justification) + - [Justification and finalization](#justification-and-finalization) - [Crosslinks](#crosslinks) - - [Eth1 data](#eth1-data) - [Rewards and penalties](#rewards-and-penalties) - - [Justification and finalization](#justification-and-finalization) - - [Crosslinks](#crosslinks-1) - - [Apply rewards](#apply-rewards) - - [Ejections](#ejections) - - [Validator registry and shuffling seed data](#validator-registry-and-shuffling-seed-data) - - [Slashings and exit queue](#slashings-and-exit-queue) + - [Registry updates](#registry-updates) + - [Slashings](#slashings) - [Final updates](#final-updates) - [Per-slot processing](#per-slot-processing) - [Per-block processing](#per-block-processing) - [Block header](#block-header) - [RANDAO](#randao) - - [Eth1 data](#eth1-data-1) + - [Eth1 data](#eth1-data) - [Operations](#operations) - [Proposer slashings](#proposer-slashings) - [Attester slashings](#attester-slashings) @@ -139,10 +130,6 @@ - [Voluntary exits](#voluntary-exits) - [Transfers](#transfers) - [State root verification](#state-root-verification) -- [References](#references) - - [Normative](#normative) - - [Informative](#informative) -- [Copyright](#copyright) @@ -171,21 +158,25 @@ Code snippets appearing in `this style` are to be interpreted as Python code. * **Crosslink** - a set of signatures from a committee attesting to a block in a shard chain that can be included into the beacon chain. Crosslinks are the main means by which the beacon chain "learns about" the updated state of shard chains. * **Slot** - a period during which one proposer has the ability to create a beacon chain block and some attesters have the ability to make attestations. * **Epoch** - an aligned span of slots during which all [validators](#dfn-validator) get exactly one chance to make an attestation. -* **Finalized**, **justified** - see Casper FFG finalization [[casper-ffg]](#ref-casper-ffg). +* **Finalized**, **justified** - see the [Casper FFG paper](https://arxiv.org/abs/1710.09437). * **Withdrawal period** - the number of slots between a [validator](#dfn-validator) exit and the [validator](#dfn-validator) balance being withdrawable. * **Genesis time** - the Unix time of the genesis beacon chain block at slot 0. ## Constants +Note: the default mainnet values for the constants are included here for spec-design purposes. +The different configurations for mainnet, testnets, and yaml-based testing can be found in the `configs/constant_presets/` directory. +These configurations are updated for releases, but may be out of sync during `dev` changes. + ### Misc | Name | Value | | - | - | | `SHARD_COUNT` | `2**10` (= 1,024) | | `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | -| `MAX_BALANCE_CHURN_QUOTIENT` | `2**5` (= 32) | -| `MAX_ATTESTATION_PARTICIPANTS` | `2**12` (= 4,096) | -| `MAX_EXIT_DEQUEUES_PER_EPOCH` | `2**2` (= 4) | +| `MAX_INDICES_PER_ATTESTATION` | `2**12` (= 4,096) | +| `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | +| `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) | | `SHUFFLE_ROUND_COUNT` | 90 | * For the safety of crosslinks `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) @@ -211,16 +202,14 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | Name | Value | | - | - | | `GENESIS_FORK_VERSION` | `int_to_bytes4(0)` | -| `GENESIS_SLOT` | `2**32` | -| `GENESIS_EPOCH` | `slot_to_epoch(GENESIS_SLOT)` | +| `GENESIS_SLOT` | `0` | +| `GENESIS_EPOCH` | `0` | | `GENESIS_START_SHARD` | `0` | | `FAR_FUTURE_EPOCH` | `2**64 - 1` | | `ZERO_HASH` | `int_to_bytes32(0)` | | `EMPTY_SIGNATURE` | `int_to_bytes96(0)` | | `BLS_WITHDRAWAL_PREFIX_BYTE` | `int_to_bytes1(0)` | -* `GENESIS_SLOT` should be at least as large in terms of time as the largest of the time parameters or state list lengths below (ie. it should be at least as large as any value measured in slots, and at least `SLOTS_PER_EPOCH` times as large as any value measured in epochs). - ### Time parameters | Name | Value | Unit | Duration | @@ -230,15 +219,14 @@ Code snippets appearing in `this style` are to be interpreted as Python code. | `SLOTS_PER_EPOCH` | `2**6` (= 64) | slots | 6.4 minutes | | `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes | | `ACTIVATION_EXIT_DELAY` | `2**2` (= 4) | epochs | 25.6 minutes | -| `EPOCHS_PER_ETH1_VOTING_PERIOD` | `2**4` (= 16) | epochs | ~1.7 hours | +| `SLOTS_PER_ETH1_VOTING_PERIOD` | `2**10` (= 1,024) | slots | ~1.7 hours | | `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~13 hours | | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | -| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | +| `MAX_CROSSLINK_EPOCHS` | `2**6` (= 64) | epochs | ~7 hours | * `MAX_CROSSLINK_EPOCHS` should be a small constant times `SHARD_COUNT // SLOTS_PER_EPOCH` - ### State list lengths | Name | Value | Unit | Duration | @@ -284,7 +272,7 @@ Code snippets appearing in `this style` are to be interpreted as Python code. ## Data structures -The following data structures are defined as [SimpleSerialize (SSZ)](https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md) objects. +The following data structures are defined as [SimpleSerialize (SSZ)](../simple-serialize.md) objects. The types are defined topologically to aid in facilitating an executable version of the spec. @@ -309,6 +297,8 @@ The types are defined topologically to aid in facilitating an executable version { # Epoch number 'epoch': 'uint64', + # Root of the previous crosslink + 'previous_crosslink_root': 'bytes32', # Shard data since the previous crosslink 'crosslink_data_root': 'bytes32', } @@ -327,17 +317,6 @@ The types are defined topologically to aid in facilitating an executable version } ``` -#### `Eth1DataVote` - -```python -{ - # Data being voted for - 'eth1_data': Eth1Data, - # Vote count - 'vote_count': 'uint64', -} -``` - #### `AttestationData` ```python @@ -353,7 +332,7 @@ The types are defined topologically to aid in facilitating an executable version # Crosslink vote 'shard': 'uint64', - 'previous_crosslink': Crosslink, + 'previous_crosslink_root': 'bytes32', 'crosslink_data_root': 'bytes32', } ``` @@ -417,14 +396,14 @@ The types are defined topologically to aid in facilitating an executable version 'pubkey': 'bytes48', # Withdrawal credentials 'withdrawal_credentials': 'bytes32', + # Epoch when became eligible for activation + 'activation_eligibility_epoch': 'uint64', # Epoch when validator activated 'activation_epoch': 'uint64', # Epoch when validator exited 'exit_epoch': 'uint64', # Epoch when validator is eligible to withdraw 'withdrawable_epoch': 'uint64', - # Did the validator initiate an exit - 'initiated_exit': 'bool', # Was the validator slashed 'slashed': 'bool', # Rounded balance @@ -440,8 +419,6 @@ The types are defined topologically to aid in facilitating an executable version 'aggregation_bitfield': 'bytes', # Attestation data 'data': AttestationData, - # Custody bitfield - 'custody_bitfield': 'bytes', # Inclusion slot 'inclusion_slot': 'uint64', } @@ -590,7 +567,6 @@ The types are defined topologically to aid in facilitating an executable version # Validator registry 'validator_registry': [Validator], 'balances': ['uint64'], - 'validator_registry_update_epoch': 'uint64', # Randomness and committees 'latest_randao_mixes': ['bytes32', LATEST_RANDAO_MIXES_LENGTH], @@ -608,7 +584,8 @@ The types are defined topologically to aid in facilitating an executable version 'finalized_root': 'bytes32', # Recent state - 'latest_crosslinks': [Crosslink, SHARD_COUNT], + 'current_crosslinks': [Crosslink, SHARD_COUNT], + 'previous_crosslinks': [Crosslink, SHARD_COUNT], 'latest_block_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], 'latest_state_roots': ['bytes32', SLOTS_PER_HISTORICAL_ROOT], 'latest_active_index_roots': ['bytes32', LATEST_ACTIVE_INDEX_ROOTS_LENGTH], @@ -618,7 +595,7 @@ The types are defined topologically to aid in facilitating an executable version # Ethereum 1.0 chain data 'latest_eth1_data': Eth1Data, - 'eth1_data_votes': [Eth1DataVote], + 'eth1_data_votes': [Eth1Data], 'deposit_index': 'uint64', } ``` @@ -657,25 +634,25 @@ Note: We aim to migrate to a S[T/N]ARK-friendly hash function in a future Ethere ### `hash_tree_root` -`def hash_tree_root(object: SSZSerializable) -> Bytes32` is a function for hashing objects into a single root utilizing a hash tree structure. `hash_tree_root` is defined in the [SimpleSerialize spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#tree-hash). +`def hash_tree_root(object: SSZSerializable) -> Bytes32` is a function for hashing objects into a single root utilizing a hash tree structure. `hash_tree_root` is defined in the [SimpleSerialize spec](../simple-serialize.md#merkleization). -### `signed_root` +### `signing_root` -`def signed_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md#signed-roots) to compute signed messages. +`def signing_root(object: SSZContainer) -> Bytes32` is a function defined in the [SimpleSerialize spec](../simple-serialize.md#self-signed-containers) to compute signing messages. ### `get_temporary_block_header` ```python def get_temporary_block_header(block: BeaconBlock) -> BeaconBlockHeader: """ - Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. + Return the block header corresponding to a block with ``state_root`` set to ``ZERO_HASH``. """ return BeaconBlockHeader( slot=block.slot, previous_block_root=block.previous_block_root, state_root=ZERO_HASH, block_body_root=hash_tree_root(block.body), - # signed_root(block) is used for block id purposes so signature is a stub + # signing_root(block) is used for block id purposes so signature is a stub signature=EMPTY_SIGNATURE, ) ``` @@ -696,8 +673,10 @@ def slot_to_epoch(slot: Slot) -> Epoch: def get_previous_epoch(state: BeaconState) -> Epoch: """` Return the previous epoch of the given ``state``. + Return the current epoch if it's genesis epoch. """ - return get_current_epoch(state) - 1 + current_epoch = get_current_epoch(state) + return (current_epoch - 1) if current_epoch > GENESIS_EPOCH else current_epoch ``` ### `get_current_epoch` @@ -744,11 +723,11 @@ def is_slashable_validator(validator: Validator, epoch: Epoch) -> bool: ### `get_active_validator_indices` ```python -def get_active_validator_indices(validators: List[Validator], epoch: Epoch) -> List[ValidatorIndex]: +def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[ValidatorIndex]: """ - Get indices of active validators from ``validators``. + Get active validator indices at ``epoch``. """ - return [i for i, v in enumerate(validators) if is_active_validator(v, epoch)] + return [i for i, v in enumerate(state.validator_registry) if is_active_validator(v, epoch)] ``` ### `get_balance` @@ -811,7 +790,7 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: """ assert index < list_size assert list_size <= 2**40 - + for round in range(SHUFFLE_ROUND_COUNT): pivot = bytes_to_int(hash(seed + int_to_bytes1(round))[0:8]) % list_size flip = (pivot - index) % list_size @@ -828,29 +807,37 @@ def get_permuted_index(index: int, list_size: int, seed: Bytes32) -> int: ```python def get_split_offset(list_size: int, chunks: int, index: int) -> int: - """ - Returns a value such that for a list L, chunk count k and index i, - split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] - """ - return (list_size * index) // chunks + """ + Returns a value such that for a list L, chunk count k and index i, + split(L, k)[i] == L[get_split_offset(len(L), k, i): get_split_offset(len(L), k, i+1)] + """ + return (list_size * index) // chunks ``` ### `get_epoch_committee_count` ```python -def get_epoch_committee_count(active_validator_count: int) -> int: +def get_epoch_committee_count(state: BeaconState, epoch: Epoch) -> int: """ Return the number of committees in one epoch. """ + active_validators = get_active_validator_indices(state, epoch) return max( 1, min( SHARD_COUNT // SLOTS_PER_EPOCH, - active_validator_count // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, + len(active_validators) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, ) ) * SLOTS_PER_EPOCH ``` +### `get_shard_delta` + +```python +def get_shard_delta(state: BeaconState, epoch: Epoch) -> int: + return min(get_epoch_committee_count(state, epoch), SHARD_COUNT - SHARD_COUNT // SLOTS_PER_EPOCH) +``` + ### `compute_committee` ```python @@ -872,20 +859,6 @@ def compute_committee(validator_indices: List[ValidatorIndex], **Note**: this definition and the next few definitions are highly inefficient as algorithms, as they re-calculate many sub-expressions. Production implementations are expected to appropriately use caching/memoization to avoid redoing work. -### `get_current_epoch_committee_count` - -```python -def get_current_epoch_committee_count(state: BeaconState) -> int: - """ - Return the number of committees in the current epoch of the given ``state``. - """ - current_active_validators = get_active_validator_indices( - state.validator_registry, - get_current_epoch(state), - ) - return get_epoch_committee_count(len(current_active_validators)) -``` - ### `get_crosslink_committees_at_slot` ```python @@ -900,20 +873,18 @@ def get_crosslink_committees_at_slot(state: BeaconState, next_epoch = current_epoch + 1 assert previous_epoch <= epoch <= next_epoch - indices = get_active_validator_indices( - state.validator_registry, - epoch, - ) - committees_per_epoch = get_epoch_committee_count(len(indices)) + indices = get_active_validator_indices(state, epoch) if epoch == current_epoch: start_shard = state.latest_start_shard elif epoch == previous_epoch: - start_shard = (state.latest_start_shard - committees_per_epoch) % SHARD_COUNT + previous_shard_delta = get_shard_delta(state, previous_epoch) + start_shard = (state.latest_start_shard - previous_shard_delta) % SHARD_COUNT elif epoch == next_epoch: - current_epoch_committees = get_current_epoch_committee_count(state) - start_shard = (state.latest_start_shard + current_epoch_committees) % SHARD_COUNT + current_shard_delta = get_shard_delta(state, current_epoch) + start_shard = (state.latest_start_shard + current_shard_delta) % SHARD_COUNT + committees_per_epoch = get_epoch_committee_count(state, epoch) committees_per_slot = committees_per_epoch // SLOTS_PER_EPOCH offset = slot % SLOTS_PER_EPOCH slot_start_shard = (start_shard + committees_per_slot * offset) % SHARD_COUNT @@ -940,7 +911,7 @@ def get_block_root(state: BeaconState, return state.latest_block_roots[slot % SLOTS_PER_HISTORICAL_ROOT] ``` -`get_block_root(_, s)` should always return `hash_tree_root` of the block in the beacon chain at slot `s`, and `get_crosslink_committees_at_slot(_, s)` should not change unless the [validator](#dfn-validator) registry changes. +`get_block_root(_, s)` should always return `signing_root` of the block in the beacon chain at slot `s`, and `get_crosslink_committees_at_slot(_, s)` should not change unless the [validator](#dfn-validator) registry changes. ### `get_state_root` @@ -995,25 +966,18 @@ def generate_seed(state: BeaconState, ### `get_beacon_proposer_index` ```python -def get_beacon_proposer_index(state: BeaconState, - slot: Slot) -> ValidatorIndex: +def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex: """ - Return the beacon proposer index for the ``slot``. - Due to proposer selection being based upon the validator balances during - the epoch in question, this can only be run for the current epoch. + Return the beacon proposer index at ``state.slot``. """ current_epoch = get_current_epoch(state) - assert slot_to_epoch(slot) == current_epoch - first_committee, _ = get_crosslink_committees_at_slot(state, slot)[0] + first_committee, _ = get_crosslink_committees_at_slot(state, state.slot)[0] i = 0 while True: - rand_byte = hash( - generate_seed(state, current_epoch) + - int_to_bytes8(i // 32) - )[i % 32] candidate = first_committee[(current_epoch + i) % len(first_committee)] - if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * rand_byte: + random_byte = hash(generate_seed(state, current_epoch) + int_to_bytes8(i // 32))[i % 32] + if get_effective_balance(state, candidate) * 256 > MAX_DEPOSIT_AMOUNT * random_byte: return candidate i += 1 ``` @@ -1035,40 +999,19 @@ def verify_merkle_branch(leaf: Bytes32, proof: List[Bytes32], depth: int, index: return value == root ``` -### `get_crosslink_committee_for_attestation` +### `get_attesting_indices` ```python -def get_crosslink_committee_for_attestation(state: BeaconState, - attestation_data: AttestationData) -> List[ValidatorIndex]: - # Find the committee in the list with the desired shard +def get_attesting_indices(state: BeaconState, + attestation_data: AttestationData, + bitfield: bytes) -> List[ValidatorIndex]: + """ + Return the sorted attesting indices corresponding to ``attestation_data`` and ``bitfield``. + """ crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) - - assert attestation_data.shard in [shard for _, shard in crosslink_committees] crosslink_committee = [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] - - return crosslink_committee -``` - -### `get_attestation_participants` - -```python -def get_attestation_participants(state: BeaconState, - attestation_data: AttestationData, - bitfield: bytes) -> List[ValidatorIndex]: - """ - Return the sorted participant indices corresponding to ``attestation_data`` and ``bitfield``. - """ - crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) - assert verify_bitfield(bitfield, len(crosslink_committee)) - - # Find the participating attesters in the committee - participants = [] - for i, validator_index in enumerate(crosslink_committee): - aggregation_bit = get_bitfield_bit(bitfield, i) - if aggregation_bit == 0b1: - participants.append(validator_index) - return sorted(participants) + return sorted([index for i, index in enumerate(crosslink_committee) if get_bitfield_bit(bitfield, i) == 0b1]) ``` ### `int_to_bytes1`, `int_to_bytes2`, ... @@ -1147,19 +1090,19 @@ def verify_bitfield(bitfield: bytes, committee_size: int) -> bool: ### `convert_to_indexed` ```python -def convert_to_indexed(state: BeaconState, attestation: Attestation): +def convert_to_indexed(state: BeaconState, attestation: Attestation) -> IndexedAttestation: """ - Convert an attestation to (almost) indexed-verifiable form + Convert ``attestation`` to (almost) indexed-verifiable form. """ - attesting_indices = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) - custody_bit_1_indices = get_attestation_participants(state, attestation.data, attestation.custody_bitfield) + attesting_indices = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield) + custody_bit_1_indices = get_attesting_indices(state, attestation.data, attestation.custody_bitfield) custody_bit_0_indices = [index for index in attesting_indices if index not in custody_bit_1_indices] return IndexedAttestation( custody_bit_0_indices=custody_bit_0_indices, custody_bit_1_indices=custody_bit_1_indices, data=attestation.data, - signature=attestation.signature + signature=attestation.signature, ) ``` @@ -1173,14 +1116,13 @@ def verify_indexed_attestation(state: BeaconState, indexed_attestation: IndexedA custody_bit_0_indices = indexed_attestation.custody_bit_0_indices custody_bit_1_indices = indexed_attestation.custody_bit_1_indices - # ensure no duplicate indices across custody bits + # Ensure no duplicate indices across custody bits assert len(set(custody_bit_0_indices).intersection(set(custody_bit_1_indices))) == 0 if len(custody_bit_1_indices) > 0: # [TO BE REMOVED IN PHASE 1] return False - total_attesting_indices = len(custody_bit_0_indices + custody_bit_1_indices) - if not (1 <= total_attesting_indices <= MAX_ATTESTATION_PARTICIPANTS): + if not (1 <= len(custody_bit_0_indices) + len(custody_bit_1_indices) <= MAX_INDICES_PER_ATTESTATION): return False if custody_bit_0_indices != sorted(custody_bit_0_indices): @@ -1258,77 +1200,27 @@ def get_delayed_activation_exit_epoch(epoch: Epoch) -> Epoch: return epoch + 1 + ACTIVATION_EXIT_DELAY ``` +### `get_churn_limit` + +```python +def get_churn_limit(state: BeaconState) -> int: + return max( + MIN_PER_EPOCH_CHURN_LIMIT, + len(get_active_validator_indices(state, get_current_epoch(state))) // CHURN_LIMIT_QUOTIENT + ) +``` + ### `bls_verify` -`bls_verify` is a function for verifying a BLS signature, defined in the [BLS Signature spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md#bls_verify). +`bls_verify` is a function for verifying a BLS signature, defined in the [BLS Signature spec](../bls_signature.md#bls_verify). ### `bls_verify_multiple` -`bls_verify_multiple` is a function for verifying a BLS signature constructed from multiple messages, defined in the [BLS Signature spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md#bls_verify_multiple). +`bls_verify_multiple` is a function for verifying a BLS signature constructed from multiple messages, defined in the [BLS Signature spec](../bls_signature.md#bls_verify_multiple). ### `bls_aggregate_pubkeys` -`bls_aggregate_pubkeys` is a function for aggregating multiple BLS public keys into a single aggregate key, defined in the [BLS Signature spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md#bls_aggregate_pubkeys). - -### `process_deposit` - -Used to add a [validator](#dfn-validator) or top up an existing [validator](#dfn-validator)'s balance by some `deposit` amount: - -```python -def process_deposit(state: BeaconState, deposit: Deposit) -> None: - """ - Process a deposit from Ethereum 1.0. - Note that this function mutates ``state``. - """ - # Deposits must be processed in order - assert deposit.index == state.deposit_index - - # Verify the Merkle branch - merkle_branch_is_valid = verify_merkle_branch( - leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization - proof=deposit.proof, - depth=DEPOSIT_CONTRACT_TREE_DEPTH, - index=deposit.index, - root=state.latest_eth1_data.deposit_root, - ) - assert merkle_branch_is_valid - - # Increment the next deposit index we are expecting. Note that this - # needs to be done here because while the deposit contract will never - # create an invalid Merkle branch, it may admit an invalid deposit - # object, and we need to be able to skip over it - state.deposit_index += 1 - - validator_pubkeys = [v.pubkey for v in state.validator_registry] - pubkey = deposit.data.pubkey - amount = deposit.data.amount - - if pubkey not in validator_pubkeys: - # Verify the deposit signature (proof of possession) - if not bls_verify(pubkey, signed_root(deposit.data), deposit.data.signature, get_domain(state, DOMAIN_DEPOSIT)): - return - - # Add new validator - validator = Validator( - pubkey=pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - activation_epoch=FAR_FUTURE_EPOCH, - exit_epoch=FAR_FUTURE_EPOCH, - withdrawable_epoch=FAR_FUTURE_EPOCH, - initiated_exit=False, - slashed=False, - high_balance=0 - ) - - # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. - state.validator_registry.append(validator) - state.balances.append(0) - set_balance(state, len(state.validator_registry) - 1, amount) - else: - # Increase balance by deposit amount - index = validator_pubkeys.index(pubkey) - increase_balance(state, index, amount) -``` +`bls_aggregate_pubkeys` is a function for aggregating multiple BLS public keys into a single aggregate key, defined in the [BLS Signature spec](../bls_signature.md#bls_aggregate_pubkeys). ### Routines for updating validator status @@ -1337,14 +1229,17 @@ Note: All functions in this section mutate `state`. #### `activate_validator` ```python -def activate_validator(state: BeaconState, index: ValidatorIndex, is_genesis: bool) -> None: +def activate_validator(state: BeaconState, index: ValidatorIndex) -> None: """ Activate the validator of the given ``index``. Note that this function mutates ``state``. """ validator = state.validator_registry[index] - - validator.activation_epoch = GENESIS_EPOCH if is_genesis else get_delayed_activation_exit_epoch(get_current_epoch(state)) + if state.slot == GENESIS_SLOT: + validator.activation_eligibility_epoch = GENESIS_EPOCH + validator.activation_epoch = GENESIS_EPOCH + else: + validator.activation_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) ``` #### `initiate_validator_exit` @@ -1355,23 +1250,21 @@ def initiate_validator_exit(state: BeaconState, index: ValidatorIndex) -> None: Initiate the validator of the given ``index``. Note that this function mutates ``state``. """ + # Return if validator already initiated exit validator = state.validator_registry[index] - validator.initiated_exit = True -``` + if validator.exit_epoch != FAR_FUTURE_EPOCH: + return -#### `exit_validator` + # Compute exit queue epoch + exit_epochs = [v.exit_epoch for v in state.validator_registry if v.exit_epoch != FAR_FUTURE_EPOCH] + exit_queue_epoch = sorted(exit_epochs + [get_delayed_activation_exit_epoch(get_current_epoch(state))])[-1] + exit_queue_churn = len([v for v in state.validator_registry if v.exit_epoch == exit_queue_epoch]) + if exit_queue_churn >= get_churn_limit(state): + exit_queue_epoch += 1 -```python -def exit_validator(state: BeaconState, index: ValidatorIndex) -> None: - """ - Exit the validator with the given ``index``. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - - # Update validator exit epoch if not previously exited - if validator.exit_epoch == FAR_FUTURE_EPOCH: - validator.exit_epoch = get_delayed_activation_exit_epoch(get_current_epoch(state)) + # Set validator exit epoch and withdrawable epoch + validator.exit_epoch = exit_queue_epoch + validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY ``` #### `slash_validator` @@ -1382,13 +1275,13 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl Slash the validator with index ``slashed_index``. Note that this function mutates ``state``. """ - exit_validator(state, slashed_index) + initiate_validator_exit(state, slashed_index) state.validator_registry[slashed_index].slashed = True state.validator_registry[slashed_index].withdrawable_epoch = get_current_epoch(state) + LATEST_SLASHED_EXIT_LENGTH slashed_balance = get_effective_balance(state, slashed_index) state.latest_slashed_balances[get_current_epoch(state) % LATEST_SLASHED_EXIT_LENGTH] += slashed_balance - proposer_index = get_beacon_proposer_index(state, state.slot) + proposer_index = get_beacon_proposer_index(state) if whistleblower_index is None: whistleblower_index = proposer_index whistleblowing_reward = slashed_balance // WHISTLEBLOWING_REWARD_QUOTIENT @@ -1398,19 +1291,6 @@ def slash_validator(state: BeaconState, slashed_index: ValidatorIndex, whistlebl decrease_balance(state, slashed_index, whistleblowing_reward) ``` -#### `prepare_validator_for_withdrawal` - -```python -def prepare_validator_for_withdrawal(state: BeaconState, index: ValidatorIndex) -> None: - """ - Set the validator with the given ``index`` as withdrawable - ``MIN_VALIDATOR_WITHDRAWABILITY_DELAY`` after the current epoch. - Note that this function mutates ``state``. - """ - validator = state.validator_registry[index] - validator.withdrawable_epoch = get_current_epoch(state) + MIN_VALIDATOR_WITHDRAWABILITY_DELAY -``` - ## Ethereum 1.0 deposit contract The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in phase 2, i.e. when the EVM2.0 is deployed and the shards have state. @@ -1515,7 +1395,6 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Validator registry validator_registry=[], balances=[], - validator_registry_update_epoch=GENESIS_EPOCH, # Randomness and committees latest_randao_mixes=Vector([ZERO_HASH for _ in range(LATEST_RANDAO_MIXES_LENGTH)]), @@ -1524,7 +1403,7 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], # Finality previous_epoch_attestations=[], current_epoch_attestations=[], - previous_justified_epoch=GENESIS_EPOCH - 1, + previous_justified_epoch=GENESIS_EPOCH, current_justified_epoch=GENESIS_EPOCH, previous_justified_root=ZERO_HASH, current_justified_root=ZERO_HASH, @@ -1533,7 +1412,8 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], finalized_root=ZERO_HASH, # Recent state - latest_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + current_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), + previous_crosslinks=Vector([Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH) for _ in range(SHARD_COUNT)]), latest_block_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), latest_state_roots=Vector([ZERO_HASH for _ in range(SLOTS_PER_HISTORICAL_ROOT)]), latest_active_index_roots=Vector([ZERO_HASH for _ in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH)]), @@ -1552,11 +1432,11 @@ def get_genesis_beacon_state(genesis_validator_deposits: List[Deposit], process_deposit(state, deposit) # Process genesis activations - for validator_index, _ in enumerate(state.validator_registry): - if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT: - activate_validator(state, validator_index, is_genesis=True) + for index in range(len(state.validator_registry)): + if get_effective_balance(state, index) >= MAX_DEPOSIT_AMOUNT: + activate_validator(state, index) - genesis_active_index_root = hash_tree_root(get_active_validator_indices(state.validator_registry, GENESIS_EPOCH)) + genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH)) for index in range(LATEST_ACTIVE_INDEX_ROOTS_LENGTH): state.latest_active_index_roots[index] = genesis_active_index_root @@ -1577,7 +1457,7 @@ For a beacon chain block, `block`, to be processed by a node, the following cond * The parent block with root `block.previous_block_root` has been processed and accepted. * An Ethereum 1.0 block pointed to by the `state.latest_eth1_data.block_hash` has been processed and accepted. -* The node's Unix time is greater than or equal to `state.genesis_time + (block.slot - GENESIS_SLOT) * SECONDS_PER_SLOT`. (Note that leap seconds mean that slots will occasionally last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds, possibly several times a year.) +* The node's Unix time is greater than or equal to `state.genesis_time + block.slot * SECONDS_PER_SLOT`. (Note that leap seconds mean that slots will occasionally last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds, possibly several times a year.) If these conditions are not met, the client should delay processing the beacon block until the conditions are all satisfied. @@ -1605,8 +1485,8 @@ def get_ancestor(store: Store, block: BeaconBlock, slot: Slot) -> BeaconBlock: return get_ancestor(store, store.get_parent(block), slot) ``` -* Let `get_latest_attestation(store: Store, validator_index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `validator_index`. If several such attestations exist, use the one the [validator](#dfn-validator) `v` observed first. -* Let `get_latest_attestation_target(store: Store, validator_index: ValidatorIndex) -> BeaconBlock` be the target block in the attestation `get_latest_attestation(store, validator_index)`. +* Let `get_latest_attestation(store: Store, index: ValidatorIndex) -> Attestation` be the attestation with the highest slot number in `store` from the validator with the given `index`. If several such attestations exist, use the one the [validator](#dfn-validator) `v` observed first. +* Let `get_latest_attestation_target(store: Store, index: ValidatorIndex) -> BeaconBlock` be the target block in the attestation `get_latest_attestation(store, index)`. * Let `get_children(store: Store, block: BeaconBlock) -> List[BeaconBlock]` returns the child blocks of the given `block`. * Let `justified_head_state` be the resulting `BeaconState` object from processing the chain up to the `justified_head`. * The `head` is `lmd_ghost(store, justified_head_state, justified_head)` where the function `lmd_ghost` is defined below. Note that the implementation below is suboptimal; there are implementations that compute the head in time logarithmic in slot count. @@ -1618,10 +1498,7 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) """ validators = start_state.validator_registry active_validator_indices = get_active_validator_indices(validators, slot_to_epoch(start_state.slot)) - attestation_targets = [ - (validator_index, get_latest_attestation_target(store, validator_index)) - for validator_index in active_validator_indices - ] + attestation_targets = [(i, get_latest_attestation_target(store, i)) for i in active_validator_indices] # Use the rounded-balance-with-hysteresis supplied by the protocol for fork # choice voting. This reduces the number of recomputations that need to be @@ -1638,6 +1515,7 @@ def lmd_ghost(store: Store, start_state: BeaconState, start_block: BeaconBlock) children = get_children(store, head) if len(children) == 0: return head + # Ties broken by favoring block with lexicographically higher root head = max(children, key=lambda x: (get_vote_count(x), hash_tree_root(x))) ``` @@ -1651,9 +1529,9 @@ We now define the state transition function. At a high level, the state transiti 4. The per-block transitions, which happens at every block. Transition section notes: -* The state caching caches the state root of the previous slot. +* The state caching caches the state root of the previous slot and updates block and state roots records. * The per-epoch transitions focus on the [validator](#dfn-validator) registry, including adjusting balances and activating and exiting [validators](#dfn-validator), as well as processing crosslinks and managing block justification/finalization. -* The per-slot transitions focus on the slot counter and block roots records updates. +* The per-slot transitions focus on the slot counter. * The per-block transitions generally focus on verifying aggregate signatures and saving temporary records relating to the per-block activity in the `BeaconState`. Beacon blocks that trigger unhandled Python exceptions (e.g. out-of-range list accesses) and failed `assert`s during the state transition are considered invalid. @@ -1666,17 +1544,17 @@ At every `slot > GENESIS_SLOT` run the following function: ```python def cache_state(state: BeaconState) -> None: - previous_slot_state_root = hash_tree_root(state) + # Cache latest known state root (for previous slot) + latest_state_root = hash_tree_root(state) + state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = latest_state_root - # store the previous slot's post state transition root - state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_slot_state_root - - # cache state root in stored latest_block_header if empty + # Store latest known state root (for previous slot) in latest_block_header if it is empty if state.latest_block_header.state_root == ZERO_HASH: - state.latest_block_header.state_root = previous_slot_state_root + state.latest_block_header.state_root = latest_state_root - # store latest known block for previous slot - state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = signed_root(state.latest_block_header) + # Cache latest known block root (for previous slot) + latest_block_root = signing_root(state.latest_block_header) + state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = latest_block_root ``` ### Per-epoch processing @@ -1685,29 +1563,29 @@ The steps below happen when `state.slot > GENESIS_SLOT and (state.slot + 1) % SL #### Helper functions -We define some helper functions utilized when processing an epoch transition: +We define epoch transition helper functions: ```python def get_current_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_current_epoch(state))) + return get_total_balance(state, get_active_validator_indices(state, get_current_epoch(state))) ``` ```python def get_previous_total_balance(state: BeaconState) -> Gwei: - return get_total_balance(state, get_active_validator_indices(state.validator_registry, get_previous_epoch(state))) + return get_total_balance(state, get_active_validator_indices(state, get_previous_epoch(state))) ``` ```python -def get_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: +def get_unslashed_attesting_indices(state: BeaconState, attestations: List[PendingAttestation]) -> List[ValidatorIndex]: output = set() for a in attestations: - output = output.union(get_attestation_participants(state, a.data, a.aggregation_bitfield)) - return sorted(list(output)) + output = output.union(get_attesting_indices(state, a.data, a.aggregation_bitfield)) + return sorted(filter(lambda index: not state.validator_registry[index].slashed, list(output))) ``` ```python def get_attesting_balance(state: BeaconState, attestations: List[PendingAttestation]) -> Gwei: - return get_total_balance(state, get_attesting_indices(state, attestations)) + return get_total_balance(state, get_unslashed_attesting_indices(state, attestations)) ``` ```python @@ -1737,93 +1615,89 @@ def get_previous_epoch_matching_head_attestations(state: BeaconState) -> List[Pe **Note**: Total balances computed for the previous epoch might be marginally different than the actual total balances during the previous epoch transition. Due to the tight bound on validator churn each epoch and small per-epoch rewards/penalties, the potential balance difference is very low and only marginally affects consensus safety. ```python -def get_winning_root_and_participants(state: BeaconState, shard: Shard) -> Tuple[Bytes32, List[ValidatorIndex]]: - all_attestations = state.current_epoch_attestations + state.previous_epoch_attestations - valid_attestations = [ - a for a in all_attestations if a.data.previous_crosslink == state.latest_crosslinks[shard] - ] - all_roots = [a.data.crosslink_data_root for a in valid_attestations] - - # handle when no attestations for shard available - if len(all_roots) == 0: - return ZERO_HASH, [] - - def get_attestations_for(root) -> List[PendingAttestation]: - return [a for a in valid_attestations if a.data.crosslink_data_root == root] - - # Winning crosslink root is the root with the most votes for it, ties broken in favor of - # lexicographically higher hash - winning_root = max(all_roots, key=lambda r: (get_attesting_balance(state, get_attestations_for(r)), r)) - - return winning_root, get_attesting_indices(state, get_attestations_for(winning_root)) +def get_crosslink_from_attestation_data(state: BeaconState, data: AttestationData) -> Crosslink: + return Crosslink( + epoch=min(slot_to_epoch(data.slot), state.current_crosslinks[data.shard].epoch + MAX_CROSSLINK_EPOCHS), + previous_crosslink_root=data.previous_crosslink_root, + crosslink_data_root=data.crosslink_data_root, + ) ``` ```python -def earliest_attestation(state: BeaconState, validator_index: ValidatorIndex) -> PendingAttestation: +def get_winning_crosslink_and_attesting_indices(state: BeaconState, epoch: Epoch, shard: Shard) -> Tuple[Crosslink, List[ValidatorIndex]]: + pending_attestations = state.current_epoch_attestations if epoch == get_current_epoch(state) else state.previous_epoch_attestations + shard_attestations = [a for a in pending_attestations if a.data.shard == shard] + shard_crosslinks = [get_crosslink_from_attestation_data(state, a.data) for a in shard_attestations] + candidate_crosslinks = [ + c for c in shard_crosslinks + if hash_tree_root(state.current_crosslinks[shard]) in (c.previous_crosslink_root, hash_tree_root(c)) + ] + if len(candidate_crosslinks) == 0: + return Crosslink(epoch=GENESIS_EPOCH, previous_crosslink_root=ZERO_HASH, crosslink_data_root=ZERO_HASH), [] + + def get_attestations_for(crosslink: Crosslink) -> List[PendingAttestation]: + return [a for a in shard_attestations if get_crosslink_from_attestation_data(state, a.data) == crosslink] + # Winning crosslink has the crosslink data root with the most balance voting for it (ties broken lexicographically) + winning_crosslink = max(candidate_crosslinks, key=lambda crosslink: ( + get_attesting_balance(state, get_attestations_for(crosslink)), crosslink.crosslink_data_root + )) + + return winning_crosslink, get_unslashed_attesting_indices(state, get_attestations_for(winning_crosslink)) +``` + +```python +def get_earliest_attestation(state: BeaconState, attestations: List[PendingAttestation], index: ValidatorIndex) -> PendingAttestation: return min([ - a for a in state.previous_epoch_attestations if - validator_index in get_attestation_participants(state, a.data, a.aggregation_bitfield) + a for a in attestations if index in get_attesting_indices(state, a.data, a.aggregation_bitfield) ], key=lambda a: a.inclusion_slot) ``` -```python -def inclusion_slot(state: BeaconState, validator_index: ValidatorIndex) -> Slot: - return earliest_attestation(state, validator_index).inclusion_slot -``` - -```python -def inclusion_distance(state: BeaconState, validator_index: ValidatorIndex) -> int: - attestation = earliest_attestation(state, validator_index) - return attestation.inclusion_slot - attestation.data.slot -``` - -#### Justification +#### Justification and finalization Run the following function: ```python -def update_justification_and_finalization(state: BeaconState) -> None: - new_justified_epoch = state.current_justified_epoch - new_finalized_epoch = state.finalized_epoch +def process_justification_and_finalization(state: BeaconState) -> None: + if get_current_epoch(state) <= GENESIS_EPOCH + 1: + return - # Rotate the justification bitfield up one epoch to make room for the current epoch - state.justification_bitfield <<= 1 - # If the previous epoch gets justified, fill the second last bit + old_previous_justified_epoch = state.previous_justified_epoch + old_current_justified_epoch = state.current_justified_epoch + + # Process justifications + state.previous_justified_epoch = state.current_justified_epoch + state.previous_justified_root = state.current_justified_root + state.justification_bitfield = (state.justification_bitfield << 1) % 2**64 previous_boundary_attesting_balance = get_attesting_balance(state, get_previous_epoch_boundary_attestations(state)) if previous_boundary_attesting_balance * 3 >= get_previous_total_balance(state) * 2: - new_justified_epoch = get_current_epoch(state) - 1 - state.justification_bitfield |= 2 - # If the current epoch gets justified, fill the last bit + state.current_justified_epoch = get_previous_epoch(state) + state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) + state.justification_bitfield |= (1 << 1) current_boundary_attesting_balance = get_attesting_balance(state, get_current_epoch_boundary_attestations(state)) if current_boundary_attesting_balance * 3 >= get_current_total_balance(state) * 2: - new_justified_epoch = get_current_epoch(state) - state.justification_bitfield |= 1 + state.current_justified_epoch = get_current_epoch(state) + state.current_justified_root = get_block_root(state, get_epoch_start_slot(state.current_justified_epoch)) + state.justification_bitfield |= (1 << 0) # Process finalizations bitfield = state.justification_bitfield current_epoch = get_current_epoch(state) - # The 2nd/3rd/4th most recent epochs are all justified, the 2nd using the 4th as source - if (bitfield >> 1) % 8 == 0b111 and state.previous_justified_epoch == current_epoch - 3: - new_finalized_epoch = state.previous_justified_epoch - # The 2nd/3rd most recent epochs are both justified, the 2nd using the 3rd as source - if (bitfield >> 1) % 4 == 0b11 and state.previous_justified_epoch == current_epoch - 2: - new_finalized_epoch = state.previous_justified_epoch - # The 1st/2nd/3rd most recent epochs are all justified, the 1st using the 3rd as source - if (bitfield >> 0) % 8 == 0b111 and state.current_justified_epoch == current_epoch - 2: - new_finalized_epoch = state.current_justified_epoch - # The 1st/2nd most recent epochs are both justified, the 1st using the 2nd as source - if (bitfield >> 0) % 4 == 0b11 and state.current_justified_epoch == current_epoch - 1: - new_finalized_epoch = state.current_justified_epoch - - # Update state jusification/finality fields - state.previous_justified_epoch = state.current_justified_epoch - state.previous_justified_root = state.current_justified_root - if new_justified_epoch != state.current_justified_epoch: - state.current_justified_epoch = new_justified_epoch - state.current_justified_root = get_block_root(state, get_epoch_start_slot(new_justified_epoch)) - if new_finalized_epoch != state.finalized_epoch: - state.finalized_epoch = new_finalized_epoch - state.finalized_root = get_block_root(state, get_epoch_start_slot(new_finalized_epoch)) + # The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source + if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch == current_epoch - 3: + state.finalized_epoch = old_previous_justified_epoch + state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) + # The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source + if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch == current_epoch - 2: + state.finalized_epoch = old_previous_justified_epoch + state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) + # The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source + if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch == current_epoch - 2: + state.finalized_epoch = old_current_justified_epoch + state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) + # The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source + if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch == current_epoch - 1: + state.finalized_epoch = old_current_justified_epoch + state.finalized_root = get_block_root(state, get_epoch_start_slot(state.finalized_epoch)) ``` #### Crosslinks @@ -1832,47 +1706,32 @@ Run the following function: ```python def process_crosslinks(state: BeaconState) -> None: - current_epoch = get_current_epoch(state) - previous_epoch = max(current_epoch - 1, GENESIS_EPOCH) - next_epoch = current_epoch + 1 + state.previous_crosslinks = [c for c in state.current_crosslinks] + previous_epoch = get_previous_epoch(state) + next_epoch = get_current_epoch(state) + 1 for slot in range(get_epoch_start_slot(previous_epoch), get_epoch_start_slot(next_epoch)): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, participants = get_winning_root_and_participants(state, shard) - participating_balance = get_total_balance(state, participants) - total_balance = get_total_balance(state, crosslink_committee) - if 3 * participating_balance >= 2 * total_balance: - state.latest_crosslinks[shard] = Crosslink( - epoch=min(slot_to_epoch(slot), state.latest_crosslinks[shard].epoch + MAX_CROSSLINK_EPOCHS), - crosslink_data_root=winning_root - ) -``` - -#### Eth1 data - -Run the following function: - -```python -def maybe_reset_eth1_period(state: BeaconState) -> None: - if (get_current_epoch(state) + 1) % EPOCHS_PER_ETH1_VOTING_PERIOD == 0: - for eth1_data_vote in state.eth1_data_votes: - # If a majority of all votes were for a particular eth1_data value, - # then set that as the new canonical value - if eth1_data_vote.vote_count * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH: - state.latest_eth1_data = eth1_data_vote.eth1_data - state.eth1_data_votes = [] + winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot_to_epoch(slot), shard) + if 3 * get_total_balance(state, attesting_indices) >= 2 * get_total_balance(state, crosslink_committee): + state.current_crosslinks[shard] = winning_crosslink ``` #### Rewards and penalties -First, we define some additional helpers: +First, we define additional helpers: + +```python +def get_base_reward_from_total_balance(state: BeaconState, total_balance: Gwei, index: ValidatorIndex) -> Gwei: + if total_balance == 0: + return 0 + + adjusted_quotient = integer_squareroot(total_balance) // BASE_REWARD_QUOTIENT + return get_effective_balance(state, index) // adjusted_quotient // 5 +``` ```python def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei: - if get_previous_total_balance(state) == 0: - return 0 - - adjusted_quotient = integer_squareroot(get_previous_total_balance(state)) // BASE_REWARD_QUOTIENT - return get_effective_balance(state, index) // adjusted_quotient // 5 + return get_base_reward_from_total_balance(state, get_previous_total_balance(state), index) ``` ```python @@ -1884,10 +1743,6 @@ def get_inactivity_penalty(state: BeaconState, index: ValidatorIndex, epochs_sin return get_base_reward(state, index) + extra_penalty ``` -Note: When applying penalties in the following balance recalculations, implementers should make sure the `uint64` does not underflow. - -##### Justification and finalization - ```python def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: current_epoch = get_current_epoch(state) @@ -1912,169 +1767,94 @@ def get_justification_and_finalization_deltas(state: BeaconState) -> Tuple[List[ for index in eligible_validators: base_reward = get_base_reward(state, index) # Expected FFG source - if index in get_attesting_indices(state, state.previous_epoch_attestations): + if index in get_unslashed_attesting_indices(state, state.previous_epoch_attestations): rewards[index] += base_reward * total_attesting_balance // total_balance # Inclusion speed bonus - rewards[index] += ( - base_reward * MIN_ATTESTATION_INCLUSION_DELAY // - inclusion_distance(state, index) - ) + earliest_attestation = get_earliest_attestation(state, state.previous_epoch_attestations, index) + inclusion_delay = earliest_attestation.inclusion_slot - earliest_attestation.data.slot + rewards[index] += base_reward * MIN_ATTESTATION_INCLUSION_DELAY // inclusion_delay else: penalties[index] += base_reward # Expected FFG target - if index in get_attesting_indices(state, boundary_attestations): + if index in get_unslashed_attesting_indices(state, boundary_attestations): rewards[index] += base_reward * boundary_attesting_balance // total_balance else: penalties[index] += get_inactivity_penalty(state, index, epochs_since_finality) # Expected head - if index in get_attesting_indices(state, matching_head_attestations): + if index in get_unslashed_attesting_indices(state, matching_head_attestations): rewards[index] += base_reward * matching_head_balance // total_balance else: penalties[index] += base_reward - # Proposer bonus - if index in get_attesting_indices(state, state.previous_epoch_attestations): - proposer_index = get_beacon_proposer_index(state, inclusion_slot(state, index)) - rewards[proposer_index] += base_reward // PROPOSER_REWARD_QUOTIENT # Take away max rewards if we're not finalizing if epochs_since_finality > 4: penalties[index] += base_reward * 4 return [rewards, penalties] ``` -##### Crosslinks - ```python def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]: rewards = [0 for index in range(len(state.validator_registry))] penalties = [0 for index in range(len(state.validator_registry))] - previous_epoch_start_slot = get_epoch_start_slot(get_previous_epoch(state)) - current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) - for slot in range(previous_epoch_start_slot, current_epoch_start_slot): + for slot in range(get_epoch_start_slot(get_previous_epoch(state)), get_epoch_start_slot(get_current_epoch(state))): for crosslink_committee, shard in get_crosslink_committees_at_slot(state, slot): - winning_root, participants = get_winning_root_and_participants(state, shard) - participating_balance = get_total_balance(state, participants) - total_balance = get_total_balance(state, crosslink_committee) + winning_crosslink, attesting_indices = get_winning_crosslink_and_attesting_indices(state, slot_to_epoch(slot), shard) + attesting_balance = get_total_balance(state, attesting_indices) + committee_balance = get_total_balance(state, crosslink_committee) for index in crosslink_committee: - if index in participants: - rewards[index] += get_base_reward(state, index) * participating_balance // total_balance + if index in attesting_indices: + rewards[index] += get_base_reward(state, index) * attesting_balance // committee_balance else: penalties[index] += get_base_reward(state, index) return [rewards, penalties] ``` -#### Apply rewards - -Run the following: +Run the following function: ```python -def apply_rewards(state: BeaconState) -> None: +def process_rewards_and_penalties(state: BeaconState) -> None: + if get_current_epoch(state) == GENESIS_EPOCH: + return + rewards1, penalties1 = get_justification_and_finalization_deltas(state) rewards2, penalties2 = get_crosslink_deltas(state) for i in range(len(state.validator_registry)): - set_balance( - state, - i, - max( - 0, - get_balance(state, i) + rewards1[i] + rewards2[i] - penalties1[i] - penalties2[i], - ), - ) + increase_balance(state, i, rewards1[i] + rewards2[i]) + decrease_balance(state, i, penalties1[i] + penalties2[i]) ``` -#### Ejections - -Run `process_ejections(state)`. - -```python -def process_ejections(state: BeaconState) -> None: - """ - Iterate through the validator registry - and eject active validators with balance below ``EJECTION_BALANCE``. - """ - for index in get_active_validator_indices(state.validator_registry, get_current_epoch(state)): - if get_balance(state, index) < EJECTION_BALANCE: - initiate_validator_exit(state, index) -``` - -#### Validator registry and shuffling seed data - -```python -def update_validator_registry(state: BeaconState) -> None: - """ - Update validator registry. - Note that this function mutates ``state``. - """ - current_epoch = get_current_epoch(state) - # The active validators - active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) - # The total effective balance of active validators - total_balance = get_total_balance(state, active_validator_indices) - - # The maximum balance churn in Gwei (for deposits and exits separately) - max_balance_churn = max( - MAX_DEPOSIT_AMOUNT, - total_balance // (2 * MAX_BALANCE_CHURN_QUOTIENT) - ) - - # Activate validators within the allowable balance churn - balance_churn = 0 - for index, validator in enumerate(state.validator_registry): - if validator.activation_epoch == FAR_FUTURE_EPOCH and get_balance(state, index) >= MAX_DEPOSIT_AMOUNT: - # Check the balance churn would be within the allowance - balance_churn += get_effective_balance(state, index) - if balance_churn > max_balance_churn: - break - - # Activate validator - activate_validator(state, index, is_genesis=False) - - # Exit validators within the allowable balance churn - if current_epoch < state.validator_registry_update_epoch + LATEST_SLASHED_EXIT_LENGTH: - balance_churn = ( - state.latest_slashed_balances[state.validator_registry_update_epoch % LATEST_SLASHED_EXIT_LENGTH] - - state.latest_slashed_balances[current_epoch % LATEST_SLASHED_EXIT_LENGTH] - ) - - for index, validator in enumerate(state.validator_registry): - if validator.exit_epoch == FAR_FUTURE_EPOCH and validator.initiated_exit: - # Check the balance churn would be within the allowance - balance_churn += get_effective_balance(state, index) - if balance_churn > max_balance_churn: - break - - # Exit validator - exit_validator(state, index) - - state.validator_registry_update_epoch = current_epoch -``` +#### Registry updates Run the following function: ```python -def update_registry(state: BeaconState) -> None: - # Check if we should update, and if so, update - if state.finalized_epoch > state.validator_registry_update_epoch: - update_validator_registry(state) - state.latest_start_shard = ( - state.latest_start_shard + - get_current_epoch_committee_count(state) - ) % SHARD_COUNT +def process_registry_updates(state: BeaconState) -> None: + # Process activation eligibility and ejections + for index, validator in enumerate(state.validator_registry): + balance = get_balance(state, index) + if validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and balance >= MAX_DEPOSIT_AMOUNT: + validator.activation_eligibility_epoch = get_current_epoch(state) + + if is_active_validator(validator, get_current_epoch(state)) and balance < EJECTION_BALANCE: + initiate_validator_exit(state, index) + + # Process activations + activation_queue = sorted([ + index for index, validator in enumerate(state.validator_registry) if + validator.activation_eligibility_epoch != FAR_FUTURE_EPOCH and + validator.activation_epoch >= get_delayed_activation_exit_epoch(state.finalized_epoch) + ], key=lambda index: state.validator_registry[index].activation_eligibility_epoch) + for index in activation_queue[:get_churn_limit(state)]: + activate_validator(state, index) ``` -**Invariant**: the active index root that is hashed into the shuffling seed actually is the `hash_tree_root` of the validator set that is used for that epoch. +#### Slashings -#### Slashings and exit queue - -Run `process_slashings(state)` and `process_exit_queue(state)`: +Run the following function: ```python def process_slashings(state: BeaconState) -> None: - """ - Process the slashings. - Note that this function mutates ``state``. - """ current_epoch = get_current_epoch(state) - active_validator_indices = get_active_validator_indices(state.validator_registry, current_epoch) + active_validator_indices = get_active_validator_indices(state, current_epoch) total_balance = get_total_balance(state, active_validator_indices) # Compute `total_penalties` @@ -2091,42 +1871,23 @@ def process_slashings(state: BeaconState) -> None: decrease_balance(state, index, penalty) ``` -```python -def process_exit_queue(state: BeaconState) -> None: - """ - Process the exit queue. - Note that this function mutates ``state``. - """ - def eligible(index): - validator = state.validator_registry[index] - # Filter out dequeued validators - if validator.withdrawable_epoch != FAR_FUTURE_EPOCH: - return False - # Dequeue if the minimum amount of time has passed - else: - return get_current_epoch(state) >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY - - eligible_indices = filter(eligible, list(range(len(state.validator_registry)))) - # Sort in order of exit epoch, and validators that exit within the same epoch exit in order of validator index - sorted_indices = sorted(eligible_indices, key=lambda index: state.validator_registry[index].exit_epoch) - for dequeues, index in enumerate(sorted_indices): - if dequeues >= MAX_EXIT_DEQUEUES_PER_EPOCH: - break - prepare_validator_for_withdrawal(state, index) -``` - #### Final updates Run the following function: ```python -def finish_epoch_update(state: BeaconState) -> None: +def process_final_updates(state: BeaconState) -> None: current_epoch = get_current_epoch(state) next_epoch = current_epoch + 1 + # Reset eth1 data votes + if state.slot % SLOTS_PER_ETH1_VOTING_PERIOD == 0: + state.eth1_data_votes = [] + # Update start shard + state.latest_start_shard = (state.latest_start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT # Set active index root index_root_position = (next_epoch + ACTIVATION_EXIT_DELAY) % LATEST_ACTIVE_INDEX_ROOTS_LENGTH state.latest_active_index_roots[index_root_position] = hash_tree_root( - get_active_validator_indices(state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY) + get_active_validator_indices(state, next_epoch + ACTIVATION_EXIT_DELAY) ) # Set total slashed balances state.latest_slashed_balances[next_epoch % LATEST_SLASHED_EXIT_LENGTH] = ( @@ -2166,21 +1927,21 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None: # Verify that the slots match assert block.slot == state.slot # Verify that the parent matches - assert block.previous_block_root == signed_root(state.latest_block_header) + assert block.previous_block_root == signing_root(state.latest_block_header) # Save current block as the new latest block state.latest_block_header = get_temporary_block_header(block) # Verify proposer is not slashed - proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] + proposer = state.validator_registry[get_beacon_proposer_index(state)] assert not proposer.slashed # Verify proposer signature - assert bls_verify(proposer.pubkey, signed_root(block), block.signature, get_domain(state, DOMAIN_BEACON_PROPOSER)) + assert bls_verify(proposer.pubkey, signing_root(block), block.signature, get_domain(state, DOMAIN_BEACON_PROPOSER)) ``` #### RANDAO ```python def process_randao(state: BeaconState, block: BeaconBlock) -> None: - proposer = state.validator_registry[get_beacon_proposer_index(state, state.slot)] + proposer = state.validator_registry[get_beacon_proposer_index(state)] # Verify that the provided randao value is valid assert bls_verify(proposer.pubkey, hash_tree_root(get_current_epoch(state)), block.body.randao_reveal, get_domain(state, DOMAIN_RANDAO)) # Mix it in @@ -2194,13 +1955,9 @@ def process_randao(state: BeaconState, block: BeaconBlock) -> None: ```python def process_eth1_data(state: BeaconState, block: BeaconBlock) -> None: - for eth1_data_vote in state.eth1_data_votes: - # If someone else has already voted for the same hash, add to its counter - if eth1_data_vote.eth1_data == block.body.eth1_data: - eth1_data_vote.vote_count += 1 - return - # If we're seeing this hash for the first time, make a new counter - state.eth1_data_votes.append(Eth1DataVote(eth1_data=block.body.eth1_data, vote_count=1)) + state.eth1_data_votes.append(block.body.eth1_data) + if state.eth1_data_votes.count(block.body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD: + state.latest_eth1_data = block.body.eth1_data ``` #### Operations @@ -2228,7 +1985,8 @@ def process_proposer_slashing(state: BeaconState, # Signatures are valid for header in (proposer_slashing.header_1, proposer_slashing.header_2): domain = get_domain(state, DOMAIN_BEACON_PROPOSER, slot_to_epoch(header.slot)) - assert bls_verify(proposer.pubkey, signed_root(header), header.signature, domain) + assert bls_verify(proposer.pubkey, signing_root(header), header.signature, domain) + slash_validator(state, proposer_slashing.proposer_index) ``` @@ -2282,35 +2040,27 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: Process ``Attestation`` operation. Note that this function mutates ``state``. """ - assert max(GENESIS_SLOT, state.slot - SLOTS_PER_EPOCH) <= attestation.data.slot - assert attestation.data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY + data = attestation.data + min_slot = state.slot - SLOTS_PER_EPOCH if get_current_epoch(state) > GENESIS_EPOCH else GENESIS_SLOT + assert min_slot <= data.slot <= state.slot - MIN_ATTESTATION_INCLUSION_DELAY - # Check target epoch, source epoch, and source root - target_epoch = slot_to_epoch(attestation.data.slot) - assert (target_epoch, attestation.data.source_epoch, attestation.data.source_root) in { - (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root), - (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root), + # Check target epoch, source epoch, source root, and source crosslink + target_epoch = slot_to_epoch(data.slot) + assert (target_epoch, data.source_epoch, data.source_root, data.previous_crosslink_root) in { + (get_current_epoch(state), state.current_justified_epoch, state.current_justified_root, hash_tree_root(state.current_crosslinks[data.shard])), + (get_previous_epoch(state), state.previous_justified_epoch, state.previous_justified_root, hash_tree_root(state.previous_crosslinks[data.shard])), } - # Check crosslink data - assert attestation.data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1] - assert state.latest_crosslinks[attestation.data.shard] in { - attestation.data.previous_crosslink, # Case 1: latest crosslink matches previous crosslink - Crosslink( # Case 2: latest crosslink matches current crosslink - crosslink_data_root=attestation.data.crosslink_data_root, - epoch=min(slot_to_epoch(attestation.data.slot), - attestation.data.previous_crosslink.epoch + MAX_CROSSLINK_EPOCHS) - ), - } + # Check crosslink data root + assert data.crosslink_data_root == ZERO_HASH # [to be removed in phase 1] # Check signature and bitfields assert verify_indexed_attestation(state, convert_to_indexed(state, attestation)) # Cache pending attestation pending_attestation = PendingAttestation( - data=attestation.data, + data=data, aggregation_bitfield=attestation.aggregation_bitfield, - custody_bitfield=attestation.custody_bitfield, inclusion_slot=state.slot ) if target_epoch == get_current_epoch(state): @@ -2319,11 +2069,82 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: state.previous_epoch_attestations.append(pending_attestation) ``` +Run `process_proposer_attestation_rewards(state)`. + +```python +def process_proposer_attestation_rewards(state: BeaconState) -> None: + proposer_index = get_beacon_proposer_index(state) + for pending_attestations in (state.previous_epoch_attestations, state.current_epoch_attestations): + for index in get_unslashed_attesting_indices(state, pending_attestations): + if get_earliest_attestation(state, pending_attestations, index).inclusion_slot == state.slot: + base_reward = get_base_reward_from_total_balance(state, get_current_total_balance(state), index) + increase_balance(state, proposer_index, base_reward // PROPOSER_REWARD_QUOTIENT) +``` + ##### Deposits -Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. +Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, state.latest_eth1_data.deposit_count - state.deposit_index)`. -For each `deposit` in `block.body.deposits`, run `process_deposit(state, deposit)`. +For each `deposit` in `block.body.deposits`, run the following function: + +```python +def process_deposit(state: BeaconState, deposit: Deposit) -> None: + """ + Process a deposit from Ethereum 1.0. + Used to add a validator or top up an existing validator's + balance by some ``deposit`` amount. + + Note that this function mutates ``state``. + """ + # Deposits must be processed in order + assert deposit.index == state.deposit_index + + # Verify the Merkle branch + merkle_branch_is_valid = verify_merkle_branch( + leaf=hash(serialize(deposit.data)), # 48 + 32 + 8 + 96 = 184 bytes serialization + proof=deposit.proof, + depth=DEPOSIT_CONTRACT_TREE_DEPTH, + index=deposit.index, + root=state.latest_eth1_data.deposit_root, + ) + assert merkle_branch_is_valid + + # Increment the next deposit index we are expecting. Note that this + # needs to be done here because while the deposit contract will never + # create an invalid Merkle branch, it may admit an invalid deposit + # object, and we need to be able to skip over it + state.deposit_index += 1 + + validator_pubkeys = [v.pubkey for v in state.validator_registry] + pubkey = deposit.data.pubkey + amount = deposit.data.amount + + if pubkey not in validator_pubkeys: + # Verify the deposit signature (proof of possession) + if not bls_verify(pubkey, signing_root(deposit.data), deposit.data.signature, get_domain(state, DOMAIN_DEPOSIT)): + return + + # Add new validator + validator = Validator( + pubkey=pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + activation_eligibility_epoch=FAR_FUTURE_EPOCH, + activation_epoch=FAR_FUTURE_EPOCH, + exit_epoch=FAR_FUTURE_EPOCH, + withdrawable_epoch=FAR_FUTURE_EPOCH, + slashed=False, + high_balance=0 + ) + + # Note: In phase 2 registry indices that have been withdrawn for a long time will be recycled. + state.validator_registry.append(validator) + state.balances.append(0) + set_balance(state, len(state.validator_registry) - 1, amount) + else: + # Increase balance by deposit amount + index = validator_pubkeys.index(pubkey) + increase_balance(state, index, amount) +``` ##### Voluntary exits @@ -2342,15 +2163,13 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None: assert is_active_validator(validator, get_current_epoch(state)) # Verify the validator has not yet exited assert validator.exit_epoch == FAR_FUTURE_EPOCH - # Verify the validator has not initiated an exit - assert validator.initiated_exit is False # Exits must specify an epoch when they become valid; they are not valid before then assert get_current_epoch(state) >= exit.epoch # Verify the validator has been active long enough assert get_current_epoch(state) - validator.activation_epoch >= PERSISTENT_COMMITTEE_PERIOD # Verify signature domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, exit.epoch) - assert bls_verify(validator.pubkey, signed_root(exit), exit.signature, domain) + assert bls_verify(validator.pubkey, signing_root(exit), exit.signature, domain) # Initiate exit initiate_validator_exit(state, exit.validator_index) ``` @@ -2371,12 +2190,6 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: """ # Verify the amount and fee aren't individually too big (for anti-overflow purposes) assert get_balance(state, transfer.sender) >= max(transfer.amount, transfer.fee) - # Verify that we have enough ETH to send, and that after the transfer the balance will be either - # exactly zero or at least MIN_DEPOSIT_AMOUNT - assert ( - get_balance(state, transfer.sender) == transfer.amount + transfer.fee or - get_balance(state, transfer.sender) >= transfer.amount + transfer.fee + MIN_DEPOSIT_AMOUNT - ) # A transfer is valid in only one slot assert state.slot == transfer.slot # Only withdrawn or not-yet-deposited accounts can transfer @@ -2390,11 +2203,14 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None: BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer.pubkey)[1:] ) # Verify that the signature is valid - assert bls_verify(transfer.pubkey, signed_root(transfer), transfer.signature, get_domain(state, DOMAIN_TRANSFER)) + assert bls_verify(transfer.pubkey, signing_root(transfer), transfer.signature, get_domain(state, DOMAIN_TRANSFER)) # Process the transfer decrease_balance(state, transfer.sender, transfer.amount + transfer.fee) increase_balance(state, transfer.recipient, transfer.amount) - increase_balance(state, get_beacon_proposer_index(state, state.slot), transfer.fee) + increase_balance(state, get_beacon_proposer_index(state), transfer.fee) + # Verify balances are not dust + assert not (0 < get_balance(state, transfer.sender) < MIN_DEPOSIT_AMOUNT) + assert not (0 < get_balance(state, transfer.recipient) < MIN_DEPOSIT_AMOUNT) ``` #### State root verification @@ -2405,17 +2221,3 @@ Verify the block's `state_root` by running the following function: def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None: assert block.state_root == hash_tree_root(state) ``` - -# References - -This section is divided into Normative and Informative references. Normative references are those that must be read in order to implement this specification, while Informative references are merely helpful information. An example of the former might be the details of a required consensus algorithm, and an example of the latter might be a pointer to research that demonstrates why a particular consensus algorithm might be better suited for inclusion in the standard than another. - -## Normative - -## Informative - _**casper-ffg**_
  _Casper the Friendly Finality Gadget_. V. Buterin and V. Griffith. URL: https://arxiv.org/abs/1710.09437 - - _**python-poc**_
  _Python proof-of-concept implementation_. Ethereum Foundation. URL: https://github.com/ethereum/beacon_chain - -# Copyright -Copyright and related rights waived via [CC0](https://creativecommons.org/publicdomain/zero/1.0/). diff --git a/specs/core/1_custody-game.md b/specs/core/1_custody-game.md index e28536d34..74b086219 100644 --- a/specs/core/1_custody-game.md +++ b/specs/core/1_custody-game.md @@ -283,7 +283,7 @@ def process_custody_reveal(state: BeaconState, assert is_active_validator(revealer, get_current_epoch(state)) or revealer.exit_epoch > get_current_epoch(state) revealer.custody_reveal_index += 1 revealer.max_reveal_lateness = max(revealer.max_reveal_lateness, current_custody_period - reveal.period) - proposer_index = get_beacon_proposer_index(state, state.slot) + proposer_index = get_beacon_proposer_index(state) increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) # Case 2: masked punitive early reveal @@ -309,7 +309,7 @@ def process_chunk_challenge(state: BeaconState, responder = state.validator_registry[challenge.responder_index] assert responder.exit_epoch >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY # Verify the responder participated in the attestation - attesters = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield) assert challenge.responder_index in attesters # Verify the challenge is not a duplicate for record in state.custody_chunk_challenge_records: @@ -323,7 +323,7 @@ def process_chunk_challenge(state: BeaconState, # Add new chunk challenge record state.custody_chunk_challenge_records.append(CustodyChunkChallengeRecord( challenge_index=state.custody_challenge_index, - challenger_index=get_beacon_proposer_index(state, state.slot), + challenger_index=get_beacon_proposer_index(state), responder_index=challenge.responder_index deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE, crosslink_data_root=challenge.attestation.data.crosslink_data_root, @@ -348,7 +348,7 @@ def process_bit_challenge(state: BeaconState, challenger = state.validator_registry[challenge.challenger_index] assert bls_verify( pubkey=challenger.pubkey, - message_hash=signed_root(challenge), + message_hash=signing_root(challenge), signature=challenge.signature, domain=get_domain(state, get_current_epoch(state), DOMAIN_CUSTODY_BIT_CHALLENGE), ) @@ -359,9 +359,9 @@ def process_bit_challenge(state: BeaconState, # Verify the attestation is eligible for challenging responder = state.validator_registry[challenge.responder_index] min_challengeable_epoch = responder.exit_epoch - EPOCHS_PER_CUSTODY_PERIOD * (1 + responder.max_reveal_lateness) - assert min_challengeable_epoch <= slot_to_epoch(challenge.attestation.data.slot) + assert min_challengeable_epoch <= slot_to_epoch(challenge.attestation.data.slot) # Verify the responder participated in the attestation - attesters = get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield) + attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield) assert challenge.responder_index in attesters # A validator can be the challenger or responder for at most one challenge at a time for record in state.custody_bit_challenge_records: @@ -436,7 +436,7 @@ def process_chunk_challenge_response(state: BeaconState, # Clear the challenge state.custody_chunk_challenge_records.remove(challenge) # Reward the proposer - proposer_index = get_beacon_proposer_index(state, state.slot) + proposer_index = get_beacon_proposer_index(state) increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT) ``` diff --git a/specs/core/1_shard-data-chains.md b/specs/core/1_shard-data-chains.md index 8f2d12a91..1e1a232fe 100644 --- a/specs/core/1_shard-data-chains.md +++ b/specs/core/1_shard-data-chains.md @@ -20,6 +20,7 @@ - [`ShardAttestation`](#shardattestation) - [Helper functions](#helper-functions) - [`get_period_committee`](#get_period_committee) + - [`get_switchover_epoch`](#get_switchover_epoch) - [`get_persistent_committee`](#get_persistent_committee) - [`get_shard_proposer_index`](#get_shard_proposer_index) - [`get_shard_header`](#get_shard_header) @@ -137,6 +138,14 @@ def get_period_committee(state: BeaconState, ) ``` +### `get_switchover_epoch` + +```python +def get_switchover_epoch(state: BeaconState, epoch: Epoch, index: ValidatorIndex): + earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 + return bytes_to_int(hash(generate_seed(state, earlier_start_epoch) + bytes3(index))[0:8]) % PERSISTENT_COMMITTEE_PERIOD +``` + ### `get_persistent_committee` ```python @@ -146,6 +155,7 @@ def get_persistent_committee(state: BeaconState, """ Return the persistent committee for the given ``shard`` at the given ``slot``. """ + epoch = slot_to_epoch(slot) earlier_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD * 2 later_start_epoch = epoch - (epoch % PERSISTENT_COMMITTEE_PERIOD) - PERSISTENT_COMMITTEE_PERIOD @@ -160,14 +170,11 @@ def get_persistent_committee(state: BeaconState, earlier_committee = get_period_committee(state, shard, earlier_start_epoch, index, committee_count) later_committee = get_period_committee(state, shard, later_start_epoch, index, committee_count) - def get_switchover_epoch(index): - return bytes_to_int(hash(earlier_seed + bytes3(index))[0:8]) % PERSISTENT_COMMITTEE_PERIOD - # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from # later committee; return a sorted list of the union of the two, deduplicated return sorted(list(set( - [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] + - [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] + [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(state, epoch, i)] + + [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(state, epoch, i)] ))) ``` @@ -287,7 +294,7 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock], # Check beacon block beacon_block = beacon_blocks[block.slot] - assert block.beacon_block_root == signed_root(beacon_block) + assert block.beacon_block_root == signing_root(beacon_block) assert beacon_block.slot <= block.slot: # Check state root @@ -299,19 +306,19 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock], else: parent_block = next( block for block in valid_shard_blocks if - signed_root(block) == candidate.previous_block_root + signing_root(block) == candidate.previous_block_root , None) assert parent_block != None assert parent_block.shard == block.shard assert parent_block.slot < block.slot - assert signed_root(beacon_blocks[parent_block.slot]) == parent_block.beacon_chain_root + assert signing_root(beacon_blocks[parent_block.slot]) == parent_block.beacon_chain_root # Check attestations assert len(block.attestations) <= MAX_SHARD_ATTESTIONS for _, attestation in enumerate(block.attestations): assert max(GENESIS_SHARD_SLOT, block.slot - SLOTS_PER_EPOCH) <= attestation.data.slot - assert attesation.data.slot <= block.slot - MIN_ATTESTATION_INCLUSION_DELAY - assert attetation.data.shart == block.shard + assert attestation.data.slot <= block.slot - MIN_ATTESTATION_INCLUSION_DELAY + assert attestation.data.shard == block.shard verify_shard_attestation_signature(beacon_state, attestation) # Check signature @@ -319,7 +326,7 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock], assert proposer_index is not None assert bls_verify( pubkey=validators[proposer_index].pubkey, - message_hash=signed_root(block), + message_hash=signing_root(block), signature=block.signature, domain=get_domain(beacon_state, slot_to_epoch(block.slot), DOMAIN_SHARD_PROPOSER) ) @@ -342,7 +349,7 @@ def is_valid_shard_attestation(valid_shard_blocks: List[ShardBlock], # Check shard block shard_block = next( block for block in valid_shard_blocks if - signed_root(block) == candidate.attestation.data.shard_block_root + signing_root(block) == candidate.attestation.data.shard_block_root , None) assert shard_block != None assert shard_block.slot == attestation.data.slot @@ -399,4 +406,4 @@ def is_valid_beacon_attestation(shard: Shard, ## Shard fork choice rule -The fork choice rule for any shard is LMD GHOST using the shard attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than a slot.) +The fork choice rule for any shard is LMD GHOST using the shard attestations of the persistent committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_chain_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than that slot.) diff --git a/specs/light_client/merkle_proofs.md b/specs/light_client/merkle_proofs.md index cf4dad2e3..63c018f2f 100644 --- a/specs/light_client/merkle_proofs.md +++ b/specs/light_client/merkle_proofs.md @@ -1,4 +1,26 @@ -### Generalized Merkle tree index +**NOTICE**: This document is a work-in-progress for researchers and implementers. + +## Table of Contents + + +- [Table of Contents](#table-of-contents) +- [Constants](#constants) +- [Generalized Merkle tree index](#generalized-merkle-tree-index) +- [SSZ object to index](#ssz-object-to-index) +- [Merkle multiproofs](#merkle-multiproofs) +- [MerklePartial](#merklepartial) + - [`SSZMerklePartial`](#sszmerklepartial) + - [Proofs for execution](#proofs-for-execution) + + + +## Constants + +| Name | Value | +| - | - | +| `LENGTH_FLAG` | `2**64 - 1` | + +## Generalized Merkle tree index In a binary Merkle tree, we define a "generalized index" of a node as `2**depth + index`. Visually, this looks as follows: @@ -12,16 +34,16 @@ In a binary Merkle tree, we define a "generalized index" of a node as `2**depth Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function: ```python -def merkle_tree(leaves): +def merkle_tree(leaves: List[Bytes32]) -> List[Bytes32]: o = [0] * len(leaves) + leaves - for i in range(len(leaves)-1, 0, -1): - o[i] = hash(o[i*2] + o[i*2+1]) + for i in range(len(leaves) - 1, 0, -1): + o[i] = hash(o[i * 2] + o[i * 2 + 1]) return o ``` We will define Merkle proofs in terms of generalized indices. -### SSZ object to index +## SSZ object to index We can describe the hash tree of any SSZ object, rooted in `hash_tree_root(object)`, as a binary Merkle tree whose depth may vary. For example, an object `{x: bytes32, y: List[uint64]}` would look as follows: @@ -36,32 +58,49 @@ y_data_root len(y) ....... ``` -We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo[5]`. We'll describe paths as lists: in these three cases they are `["x"]`, `["y", "len"]` and `["y", 5]` respectively. We can now define a function `get_generalized_indices(object: Any, path: List[str OR int], root=1: int) -> int` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access. +We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`. ```python -def get_generalized_indices(obj: Any, path: List[str or int], root=1) -> List[int]: +def path_to_encoded_form(obj: Any, path: List[Union[str, int]]) -> List[int]: + if len(path) == 0: + return [] + elif isinstance(path[0], "__len__"): + assert len(path) == 1 + return [LENGTH_FLAG] + elif isinstance(path[0], str) and hasattr(obj, "fields"): + return [list(obj.fields.keys()).index(path[0])] + path_to_encoded_form(getattr(obj, path[0]), path[1:]) + elif isinstance(obj, (Vector, List)): + return [path[0]] + path_to_encoded_form(obj[path[0]], path[1:]) + else: + raise Exception("Unknown type / path") +``` + +We can now define a function `get_generalized_indices(object: Any, path: List[int], root: int=1) -> List[int]` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access. + +```python +def get_generalized_indices(obj: Any, path: List[int], root: int=1) -> List[int]: if len(path) == 0: return [root] - elif isinstance(obj, StaticList): + elif isinstance(obj, Vector): items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 new_root = root * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk return get_generalized_indices(obj[path[0]], path[1:], new_root) - elif isinstance(obj, DynamicList) and path[0] == "len": + elif isinstance(obj, List) and path[0] == LENGTH_FLAG: return [root * 2 + 1] - elif isinstance(obj, DynamicList) and isinstance(path[0], int): + elif isinstance(obj, List) and isinstance(path[0], int): assert path[0] < len(obj) items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1 new_root = root * 2 * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk return [root *2 + 1] + get_generalized_indices(obj[path[0]], path[1:], new_root) elif hasattr(obj, "fields"): - index = list(fields.keys()).index(path[0]) - new_root = root * next_power_of_2(len(fields)) + index - return get_generalized_indices(getattr(obj, path[0]), path[1:], new_root) + field = list(fields.keys())[path[0]] + new_root = root * next_power_of_2(len(fields)) + path[0] + return get_generalized_indices(getattr(obj, field), path[1:], new_root) else: raise Exception("Unknown type / path") ``` -### Merkle multiproofs +## Merkle multiproofs We define a Merkle multiproof as a minimal subset of nodes in a Merkle tree needed to fully authenticate that a set of nodes actually are part of a Merkle tree with some specified root, at a particular set of generalized indices. For example, here is the Merkle multiproof for positions 0, 1, 6 in an 8-node Merkle tree (ie. generalized indices 8, 9, 14): @@ -74,19 +113,12 @@ x x . . . . x * . are unused nodes, * are used nodes, x are the values we are trying to prove. Notice how despite being a multiproof for 3 values, it requires only 3 auxiliary nodes, only one node more than would be required to prove a single value. Normally the efficiency gains are not quite that extreme, but the savings relative to individual Merkle proofs are still significant. As a rule of thumb, a multiproof for k nodes at the same level of an n-node tree has size `k * (n/k + log(n/k))`. -Here is code for creating and verifying a multiproof. First a helper: - -```python -def log2(x): - return 0 if x == 1 else 1 + log2(x//2) -``` - -First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: +Here is code for creating and verifying a multiproof. First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require: ```python def get_proof_indices(tree_indices: List[int]) -> List[int]: # Get all indices touched by the proof - maximal_indices = set({}) + maximal_indices = set() for i in tree_indices: x = i while x > 1: @@ -94,7 +126,7 @@ def get_proof_indices(tree_indices: List[int]) -> List[int]: x //= 2 maximal_indices = tree_indices + sorted(list(maximal_indices))[::-1] # Get indices that cannot be recalculated from earlier indices - redundant_indices = set({}) + redundant_indices = set() proof = [] for index in maximal_indices: if index not in redundant_indices: @@ -105,30 +137,48 @@ def get_proof_indices(tree_indices: List[int]) -> List[int]: break index //= 2 return [i for i in proof if i not in tree_indices] -```` +``` Generating a proof is simply a matter of taking the node of the SSZ hash tree with the union of the given generalized indices for each index given by `get_proof_indices`, and outputting the list of nodes in the same order. +Here is the verification function: + ```python -def verify_multi_proof(root, indices, leaves, proof): +def verify_multi_proof(root: Bytes32, indices: List[int], leaves: List[Bytes32], proof: List[Bytes32]) -> bool: tree = {} for index, leaf in zip(indices, leaves): tree[index] = leaf - for index, proofitem in zip(get_proof_indices(indices), proof): - tree[index] = proofitem - indexqueue = sorted(tree.keys())[:-1] + for index, proof_item in zip(get_proof_indices(indices), proof): + tree[index] = proof_item + index_queue = sorted(tree.keys())[:-1] i = 0 - while i < len(indexqueue): - index = indexqueue[i] - if index >= 2 and index^1 in tree: - tree[index//2] = hash(tree[index - index%2] + tree[index - index%2 + 1]) - indexqueue.append(index//2) + while i < len(index_queue): + index = index_queue[i] + if index >= 2 and index ^ 1 in tree: + tree[index // 2] = hash(tree[index - index % 2] + tree[index - index % 2 + 1]) + index_queue.append(index // 2) i += 1 return (indices == []) or (1 in tree and tree[1] == root) ``` -#### Proofs for execution +## MerklePartial -We define `MerklePartial(f, arg1, arg2...)` as being a list of Merkle multiproofs of the sets of nodes in the hash trees of the SSZ objects that are needed to authenticate the values needed to compute some function `f(arg1, arg2...)`. An individual Merkle multiproof is given as a dynamic sized list of `bytes32` values, a `MerklePartial` is a fixed-size list of objects `{proof: ["bytes32"], value: "bytes32"}`, one for each `arg` to `f` (if some `arg` is a base type, then the multiproof is empty). +We define: -Ideally, any function which accepts an SSZ object should also be able to accept a `MerklePartial` object as a substitute. +### `SSZMerklePartial` + + +```python +{ + "root": "bytes32", + "indices": ["uint64"], + "values": ["bytes32"], + "proof": ["bytes32"] +} +``` + +### Proofs for execution + +We define `MerklePartial(f, arg1, arg2..., focus=0)` as being a `SSZMerklePartial` object wrapping a Merkle multiproof of the set of nodes in the hash tree of the SSZ object `arg[focus]` that is needed to authenticate the parts of the object needed to compute `f(arg1, arg2...)`. + +Ideally, any function which accepts an SSZ object should also be able to accept a `SSZMerklePartial` object as a substitute. diff --git a/specs/light_client/sync_protocol.md b/specs/light_client/sync_protocol.md index 6f020af95..900b2e64f 100644 --- a/specs/light_client/sync_protocol.md +++ b/specs/light_client/sync_protocol.md @@ -1,76 +1,91 @@ # Beacon Chain Light Client Syncing -__NOTICE__: This document is a work-in-progress for researchers and implementers. One of the design goals of the eth2 beacon chain is light-client friendlines, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. +__NOTICE__: This document is a work-in-progress for researchers and implementers. One of the design goals of the eth2 beacon chain is light-client friendliness, both to allow low-resource clients (mobile phones, IoT, etc) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the eth2 beacon chain and other chains. ## Table of Contents + - [Beacon Chain Light Client Syncing](#beacon-chain-light-client-syncing) - [Table of Contents](#table-of-contents) - - [Light client state](#light-client-state) - - [Updating the shuffled committee](#updating-the-shuffled-committee) - - [Computing the current committee](#computing-the-current-committee) - - [Verifying blocks](#verifying-blocks) + - [Preliminaries](#preliminaries) + - [Expansions](#expansions) + - [`get_active_validator_indices`](#get_active_validator_indices) + - [`MerklePartial`](#merklepartial) + - [`PeriodData`](#perioddata) + - [`get_earlier_start_epoch`](#get_earlier_start_epoch) + - [`get_later_start_epoch`](#get_later_start_epoch) + - [`get_period_data`](#get_period_data) + - [Light client state](#light-client-state) + - [Updating the shuffled committee](#updating-the-shuffled-committee) + - [Computing the current committee](#computing-the-current-committee) + - [Verifying blocks](#verifying-blocks) + +## Preliminaries -### Preliminaries +### Expansions We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (eg. `BeaconBlockHeader` is a summary of `BeaconBlock`). We define two expansions: -* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState` -* `ExtendedBeaconState`, which is identical to a `BeaconState` except `latest_active_index_roots: List[Bytes32]` is replaced by `latest_active_indices: List[List[ValidatorIndex]]`, where `BeaconState.latest_active_index_roots[i] = hash_tree_root(ExtendedBeaconState.latest_active_indices[i])` +* `ExtendedBeaconState`, which is identical to a `BeaconState` except `latest_active_index_roots: List[Bytes32]` is replaced by `latest_active_indices: List[List[ValidatorIndex]]`, where `BeaconState.latest_active_index_roots[i] = hash_tree_root(ExtendedBeaconState.latest_active_indices[i])`. +* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState`. + +### `get_active_validator_indices` Note that there is now a new way to compute `get_active_validator_indices`: ```python -def get_active_validator_indices(state: BeaconState, epoch: Epoch) -> List[ValidatorIndex]: +def get_active_validator_indices(state: ExtendedBeaconState, epoch: Epoch) -> List[ValidatorIndex]: return state.latest_active_indices[epoch % LATEST_ACTIVE_INDEX_ROOTS_LENGTH] ``` Note that it takes `state` instead of `state.validator_registry` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments. + +### `MerklePartial` + A `MerklePartial(f, *args)` is an object that contains a minimal Merkle proof needed to compute `f(*args)`. A `MerklePartial` can be used in place of a regular SSZ object, though a computation would return an error if it attempts to access part of the object that is not contained in the proof. -We add a data type `PeriodData` and four helpers: +### `PeriodData` ```python { 'validator_count': 'uint64', 'seed': 'bytes32', - 'committee': [Validator] + 'committee': [Validator], } ``` +### `get_earlier_start_epoch` + ```python def get_earlier_start_epoch(slot: Slot) -> int: return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD * 2 +``` +### `get_later_start_epoch` + +```python def get_later_start_epoch(slot: Slot) -> int: return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD - -def get_earlier_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: - period_start = get_earlier_start_epoch(block.slot) - validator_count = len(get_active_validator_indices(block.state, period_start)) +``` + +### `get_period_data` + +```python +def get_period_data(block: ExtendedBeaconBlock, shard_id: Shard, later: bool) -> PeriodData: + period_start = get_later_start_epoch(header.slot) if later else get_earlier_start_epoch(header.slot) + validator_count = len(get_active_validator_indices(state, period_start)) committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 - indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) + indices = get_period_committee(block.state, shard_id, period_start, 0, committee_count) return PeriodData( validator_count, generate_seed(block.state, period_start), - [block.state.validator_registry[i] for i in indices] - ) - -def get_later_period_data(block: ExtendedBeaconBlock, shard_id: Shard) -> PeriodData: - period_start = get_later_start_epoch(block.slot) - validator_count = len(get_active_validator_indices(block.state, period_start)) - committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1 - indices = get_shuffled_committee(block.state, shard_id, period_start, 0, committee_count) - return PeriodData( - validator_count, - generate_seed(block.state, period_start), - [block.state.validator_registry[i] for i in indices] + [block.state.validator_registry[i] for i in indices], ) ``` @@ -80,48 +95,49 @@ A light client will keep track of: * A random `shard_id` in `[0...SHARD_COUNT-1]` (selected once and retained forever) * A block header that they consider to be finalized (`finalized_header`) and do not expect to revert. -* `later_period_data = get_maximal_later_committee(finalized_header, shard_id)` -* `earlier_period_data = get_maximal_earlier_committee(finalized_header, shard_id)` +* `later_period_data = get_period_data(finalized_header, shard_id, later=True)` +* `earlier_period_data = get_period_data(finalized_header, shard_id, later=False)` -We use the struct `validator_memory` to keep track of these variables. +We use the struct `ValidatorMemory` to keep track of these variables. ### Updating the shuffled committee -If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_maximal_later_committee, validator_memory.finalized_header, shard_id)`. It can then compute: +If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_period_data, validator_memory.finalized_header, shard_id, later=True)`. It can then compute: ```python earlier_period_data = later_period_data -later_period_data = get_later_period_data(new_committee_proof, finalized_header, shard_id) +later_period_data = get_period_data(new_committee_proof, finalized_header, shard_id, later=True) ``` The maximum size of a proof is `128 * ((22-7) * 32 + 110) = 75520` bytes for validator records and `(22-7) * 32 + 128 * 8 = 1504` for the active index proof (much smaller because the relevant active indices are all beside each other in the Merkle tree). This needs to be done once per `PERSISTENT_COMMITTEE_PERIOD` epochs (2048 epochs / 9 days), or ~38 bytes per epoch. -### Computing the current committee +## Computing the current committee Here is a helper to compute the committee at a slot given the maximal earlier and later committees: ```python def compute_committee(header: BeaconBlockHeader, - validator_memory: ValidatorMemory): - + validator_memory: ValidatorMemory) -> List[ValidatorIndex]: earlier_validator_count = validator_memory.earlier_period_data.validator_count later_validator_count = validator_memory.later_period_data.validator_count - earlier_committee = validator_memory.earlier_period_data.committee - later_committee = validator_memory.later_period_data.committee + maximal_earlier_committee = validator_memory.earlier_period_data.committee + maximal_later_committee = validator_memory.later_period_data.committee earlier_start_epoch = get_earlier_start_epoch(header.slot) later_start_epoch = get_later_start_epoch(header.slot) epoch = slot_to_epoch(header.slot) - - actual_committee_count = max( + + committee_count = max( earlier_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), later_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE), ) + 1 - - def get_offset(count, end:bool): - return get_split_offset(count, - SHARD_COUNT * committee_count, - validator_memory.shard_id * committee_count + (1 if end else 0)) - + + def get_offset(count: int, end: bool) -> int: + return get_split_offset( + count, + SHARD_COUNT * committee_count, + validator_memory.shard_id * committee_count + (1 if end else 0), + ) + actual_earlier_committee = maximal_earlier_committee[ 0:get_offset(earlier_validator_count, True) - get_offset(earlier_validator_count, False) ] @@ -130,31 +146,30 @@ def compute_committee(header: BeaconBlockHeader, ] def get_switchover_epoch(index): return ( - bytes_to_int(hash(validator_memory.earlier_period_data.seed + bytes3(index))[0:8]) % + bytes_to_int(hash(validator_memory.earlier_period_data.seed + int_to_bytes3(index))[0:8]) % PERSISTENT_COMMITTEE_PERIOD ) # Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from # later committee; return a sorted list of the union of the two, deduplicated return sorted(list(set( - [i for i in earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] + - [i for i in later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] + [i for i in actual_earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)] + + [i for i in actual_later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)] ))) - ``` Note that this method makes use of the fact that the committee for any given shard always starts and ends at the same validator index independently of the committee count (this is because the validator set is split into `SHARD_COUNT * committee_count` slices but the first slice of a shard is a multiple `committee_count * i`, so the start of the slice is `n * committee_count * i // (SHARD_COUNT * committee_count) = n * i // SHARD_COUNT`, using the slightly nontrivial algebraic identity `(x * a) // ab == x // b`). -### Verifying blocks +## Verifying blocks If a client wants to update its `finalized_header` it asks the network for a `BlockValidityProof`, which is simply: ```python { - 'header': BlockHeader, + 'header': BeaconBlockHeader, 'shard_aggregate_signature': 'bytes96', 'shard_bitfield': 'bytes', - 'shard_parent_block': ShardBlock + 'shard_parent_block': ShardBlock, } ``` @@ -162,23 +177,23 @@ The verification procedure is as follows: ```python def verify_block_validity_proof(proof: BlockValidityProof, validator_memory: ValidatorMemory) -> bool: - assert proof.shard_parent_block.beacon_chain_ref == hash_tree_root(proof.header) + assert proof.shard_parent_block.beacon_chain_root == hash_tree_root(proof.header) committee = compute_committee(proof.header, validator_memory) # Verify that we have >=50% support - support_balance = sum([c.high_balance for i, c in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True]) - total_balance = sum([c.high_balance for i, c in enumerate(committee)] + support_balance = sum([v.high_balance for i, v in enumerate(committee) if get_bitfield_bit(proof.shard_bitfield, i) is True]) + total_balance = sum([v.high_balance for i, v in enumerate(committee)]) assert support_balance * 2 > total_balance # Verify shard attestations group_public_key = bls_aggregate_pubkeys([ - v.pubkey for v, index in enumerate(committee) if - get_bitfield_bit(proof.shard_bitfield, i) is True + v.pubkey for v, index in enumerate(committee) + if get_bitfield_bit(proof.shard_bitfield, index) is True ]) assert bls_verify( pubkey=group_public_key, message_hash=hash_tree_root(shard_parent_block), - signature=shard_aggregate_signature, - domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER) + signature=proof.shard_aggregate_signature, + domain=get_domain(state, slot_to_epoch(shard_block.slot), DOMAIN_SHARD_ATTESTER), ) ``` -The size of this proof is only 200 (header) + 96 (signature) + 16 (bitfield) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_ref, ShardBlock)`, which would cut off ~220 bytes. +The size of this proof is only 200 (header) + 96 (signature) + 16 (bitfield) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_root, ShardBlock)`, which would cut off ~220 bytes. diff --git a/specs/networking/rpc-interface.md b/specs/networking/rpc-interface.md index fa49bcd75..5d408b5a0 100644 --- a/specs/networking/rpc-interface.md +++ b/specs/networking/rpc-interface.md @@ -9,7 +9,7 @@ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", # Dependencies -This specification assumes familiarity with the [Messaging](./messaging.md), [Node Identification](./node-identification), and [Beacon Chain](../core/0_beacon-chain.md) specifications. +This specification assumes familiarity with the [Messaging](./messaging.md), [Node Identification](./node-identification.md), and [Beacon Chain](../core/0_beacon-chain.md) specifications. # Specification @@ -26,7 +26,7 @@ Message body schemas are notated like this: Embedded types are serialized as SSZ Containers unless otherwise noted. -All referenced data structures can be found in the [0-beacon-chain](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/core/0_beacon-chain.md#data-structures) specification. +All referenced data structures can be found in the [0-beacon-chain](../core/0_beacon-chain.md#data-structures) specification. ## `libp2p` Protocol Names diff --git a/specs/simple-serialize.md b/specs/simple-serialize.md index 378a1a7cb..804c66d70 100644 --- a/specs/simple-serialize.md +++ b/specs/simple-serialize.md @@ -1,4 +1,4 @@ -# SimpleSerialiZe (SSZ) +# SimpleSerialize (SSZ) This is a **work in progress** describing typing, serialization and Merkleization of Ethereum 2.0 objects. @@ -54,7 +54,7 @@ For convenience we alias: We recursively define the `serialize` function which consumes an object `value` (of the type specified) and returns a bytestring of type `"bytes"`. -*Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signed_root`, etc.) objects implicitly carry their type. +*Note*: In the function definitions below (`serialize`, `hash_tree_root`, `signing_root`, etc.) objects implicitly carry their type. ### `"uintN"` @@ -108,17 +108,17 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi ## Self-signed containers -Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signed_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`. +Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signing_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`. ## Implementations | Language | Project | Maintainer | Implementation | |-|-|-|-| | Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) | -| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz](https://github.com/sigp/lighthouse/tree/master/beacon_chain/utils/ssz) | +| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz](https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz) | | Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) | | Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/util/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) | -| Javascript | Lodestart | Chain Safe Systems | [https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js](https://github.com/ChainSafeSystems/ssz-js/blob/master/src/index.js) | +| TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz-js) | | Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) | | Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz](https://github.com/prysmaticlabs/prysm/tree/master/shared/ssz) | | Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) | diff --git a/specs/test-format.md b/specs/test-format.md deleted file mode 100644 index d4256ef72..000000000 --- a/specs/test-format.md +++ /dev/null @@ -1,71 +0,0 @@ -# General test format [WIP] - -This document defines the general YAML format to which all tests should conform. Testing specifications in Eth2.0 are still a work in progress. _Expect breaking changes_ - -## ToC - -* [About](#about) -* [YAML Fields](#yaml-fields) -* [Example test suite](#example-test-suite) - -## About -Ethereum 2.0 uses YAML as the format for all cross client tests. This document describes at a high level the general format to which all test files should conform. - -The particular formats of specific types of tests (test suites) are defined in separate documents. - -## YAML fields -`title` _(required)_ - -`summary` _(optional)_ - -`test_suite` _(required)_ string defining the test suite to which the test cases conform - -`fork` _(required)_ production release versioning - -`version` _(required)_ version for particular test document - -`test_cases` _(required)_ list of test cases each of which is formatted to conform to the `test_case` standard defined by `test_suite`. All test cases have optional `name` and `description` string fields. - -## Example test suite -`shuffle` is a test suite that defines test cases for the `shuffle()` helper function defined in the `beacon-chain` spec. - -Test cases that conform to the `shuffle` test suite have the following fields: - -* `input` _(required)_ the list of items passed into `shuffle()` -* `output` _(required)_ the expected list returned by `shuffle()` -* `seed` _(required)_ the seed of entropy passed into `shuffle()` - -As for all test cases, `name` and `description` are optional string fields. - -The following is a sample YAML document for the `shuffle` test suite: - -```yaml -title: Shuffling Algorithm Tests -summary: Test vectors for shuffling a list based upon a seed using `shuffle` -test_suite: shuffle -fork: tchaikovsky -version: 1.0 - -test_cases: -- input: [] - output: [] - seed: !!binary "" -- name: boring_list - description: List with a single element, 0 - input: [0] - output: [0] - seed: !!binary "" -- input: [255] - output: [255] - seed: !!binary "" -- input: [4, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [1, 6, 4, 1, 6, 6, 2, 2, 4, 5] - seed: !!binary "" -- input: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] - output: [4, 7, 10, 13, 3, 1, 2, 9, 12, 6, 11, 8, 5] - seed: !!binary "" -- input: [65, 6, 2, 6, 1, 4, 6, 2, 1, 5] - output: [6, 65, 2, 5, 4, 2, 6, 6, 1, 1] - seed: !!binary | - JlAYJ5H2j8g7PLiPHZI/rTS1uAvKiieOrifPN6Moso0= -``` diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md new file mode 100644 index 000000000..da2e38c01 --- /dev/null +++ b/specs/test_formats/README.md @@ -0,0 +1,198 @@ +# General test format + +This document defines the YAML format and structure used for ETH 2.0 testing. + +## ToC + +* [About](#about) +* [Glossary](#glossary) +* [Test format philosophy](#test-format-philosophy) +* [Test Suite](#test-suite) +* [Config](#config) +* [Fork-timeline](#fork-timeline) +* [Config sourcing](#config-sourcing) +* [Test structure](#test-structure) + +## About + +Ethereum 2.0 uses YAML as the format for all cross client tests. This document describes at a high level the general format to which all test files should conform. + +### Test-case formats + +The particular formats of specific types of tests (test suites) are defined in separate documents. + +Test formats: +- [`bls`](./bls/README.md) +- [`operations`](./operations/README.md) +- [`shuffling`](./shuffling/README.md) +- [`ssz`](./ssz/README.md) +- More formats are planned, see tracking issues for CI/testing + +## Glossary + +- `generator`: a program that outputs one or more `suite` files. + - A generator should only output one `type` of test. + - A generator is free to output multiple `suite` files, optionally with different `handler`s. +- `type`: the specialization of one single `generator`. +- `suite`: a YAML file with: + - a header: describes the `suite`, and defines what the `suite` is for + - a list of test cases +- `runner`: where a generator is a *"producer"*, this is the *"consumer"*. + - A `runner` focuses on *only one* `type`, and each type has *only one* `runner`. +- `handler`: a `runner` may be too limited sometimes, you may have a `suite` with a specific focus that requires a different format. + To facilitate this, you specify a `handler`: the runner can deal with the format by using the specified handler. + Using a `handler` in a `runner` is optional. +- `case`: a test case, an entry in the `test_cases` list of a `suite`. A case can be anything in general, + but its format should be well-defined in the documentation corresponding to the `type` (and `handler`).\ + A test has the same exact configuration and fork context as the other entries in the `case` list of its `suite`. +- `forks_timeline`: a fork timeline definition, a YAML file containing a key for each fork-name, and an epoch number as value. + +## Test format philosophy + +### Config design + +After long discussion, the following types of configured constants were identified: +- Never changing: genesis data +- Changing, but reliant on old value: e.g. an epoch time may change, but if you want to do the conversion + `(genesis data, timestamp) -> epoch number` you end up needing both constants. +- Changing, but kept around during fork transition: finalization may take a while, + e.g. an executable has to deal with new deposits and old deposits at the same time. Another example may be economic constants. +- Additional, back-wards compatible: new constants are introduced for later phases +- Changing: there is a very small chance some constant may really be *replaced*. + In this off-chance, it is likely better to include it as an additional variable, + and some clients may simply stop supporting the old one, if they do not want to sync from genesis. + +Based on these types of changes, we model the config as a list of key value pairs, + that only grows with every fork (they may change in development versions of forks however, git manages this). +With this approach, configurations are backwards compatible (older clients ignore unknown variables), and easy to maintain. + +### Fork config design + +There are two types of fork-data: +1) timeline: when does a fork take place? +2) coverage: what forks are covered by a test? + +The first is neat to have as a separate form: we prevent duplication, and can run with different presets + (e.g. fork timeline for a minimal local test, for a public testnet, or for mainnet) + +The second does not affect the result of the tests, it just states what is covered by the tests, + so that the right suites can be executed to see coverage for a certain fork. +For some types of tests, it may be beneficial to ensure it runs exactly the same, with any given fork "active". +Test-formats can be explicit on the need to repeat a test with different forks being "active", + but generally tests run only once. + +### Test completeness + +Tests should be independent of any sync-data. If one wants to run a test, the input data should be available from the YAML. +The aim is to provide clients with a well-defined scope of work to run a particular set of test-suites. + +- Clients that are complete are expected to contribute to testing, seeking for better resources to get conformance with the spec, and other clients. +- Clients that are not complete in functionality can choose to ignore suites that use certain test-runners, or specific handlers of these test-runners. +- Clients that are on older versions can test their work based on older releases of the generated tests, and catch up with newer releases when possible. + +## Test Suite + +``` +title: -- Display name for the test suite +summary: -- Summarizes the test suite +forks_timeline: -- Used to determine the forking timeline +forks: -- Defines the coverage. Test-runner code may decide to re-run with the different forks "activated", when applicable. +config: -- Used to determine which set of constants to run (possibly compile time) with +runner: *MUST be consistent with folder structure* +handler: *MUST be consistent with folder structure* + +test_cases: + ... + +``` + +## Config + +A configuration is a separate YAML file. +Separation of configuration and tests aims to: +- Prevent duplication of configuration +- Make all tests easy to upgrade (e.g. when a new config constant is introduced) +- Clearly define which constants to use +- Shareable between clients, for cross-client short or long lived testnets +- Minimize the amounts of different constants permutations to compile as a client. + Note: Some clients prefer compile-time constants and optimizations. + They should compile for each configuration once, and run the corresponding tests per build target. + +The format is described in `configs/constant_presets`. + + +## Fork-timeline + +A fork timeline is (preferably) loaded in as a configuration object into a client, as opposed to the constants configuration: + - we do not allocate or optimize any code based on epoch numbers + - when we transition from one fork to the other, it is preferred to stay online. + - we may decide on an epoch number for a fork based on external events (e.g. Eth1 log event), + a client should be able to activate a fork dynamically. + +The format is described in `configs/fork_timelines`. + +## Config sourcing + +The constants configurations are located in: + +``` +/configs/constant_presets/.yaml +``` + +And copied by CI for testing purposes to: + +``` +/configs/constant_presets/.yaml +``` + + +The fork timelines are located in: + +``` +/configs/fork_timelines/.yaml +``` + +And copied by CI for testing purposes to: + +``` +/configs/fork_timelines/.yaml +``` + +## Test structure + +To prevent parsing of hundreds of different YAML files to test a specific test type, + or even more specific, just a handler, tests should be structured in the following nested form: + +``` +. <--- root of eth2.0 tests repository +├── bls <--- collection of handler for a specific test-runner, example runner: "bls" +│   ├── verify_msg <--- collection of test suites for a specific handler, example handler: "verify_msg". If no multiple handlers, use a dummy folder (e.g. "core"), and specify that in the yaml. +│   │   ├── verify_valid.yml . +│   │   ├── special_cases.yml . a list of test suites +│   │   ├── domains.yml . +│   │   ├── invalid.yml . +│   │   ... <--- more suite files (optional) +│   ... <--- more handlers +... <--- more test types +``` + + +## Note for implementers + +The basic pattern for test-suite loading and running is: + +Iterate suites for given test-type, or sub-type (e.g. `operations > deposits`): +1. Filter test-suite, options: + - Config: Load first few lines, load into YAML, and check `config`, either: + - Pass the suite to the correct compiled target + - Ignore the suite if running tests as part of a compiled target with different configuration + - Load the correct configuration for the suite dynamically before running the suite + - Select by file name + - Filter for specific suites (e.g. for a specific fork) +2. Load the YAML + - Optionally translate the data into applicable naming, e.g. `snake_case` to `PascalCase` +3. Iterate through the `test_cases` +4. Ask test-runner to allocate a new test-case (i.e. objectify the test-case, generalize it with a `TestCase` interface) + Optionally pass raw test-case data to enable dynamic test-case allocation. + 1. Load test-case data into it. + 2. Make the test-case run. diff --git a/specs/test_formats/bls/README.md b/specs/test_formats/bls/README.md new file mode 100644 index 000000000..db63bba1d --- /dev/null +++ b/specs/test_formats/bls/README.md @@ -0,0 +1,15 @@ +# BLS tests + +A test type for BLS. Primarily geared towards verifying the *integration* of any BLS library. +We do not recommend to roll your own crypto, or use an untested BLS library. + +The BLS test suite runner has the following handlers: + +- [`aggregate_pubkeys`](./aggregate_pubkeys.md) +- [`aggregate_sigs`](./aggregate_sigs.md) +- [`msg_hash_g2_compressed`](./msg_hash_g2_compressed.md) +- [`msg_hash_g2_uncompressed`](./msg_hash_g2_uncompressed.md) +- [`priv_to_pub`](./priv_to_pub.md) +- [`sign_msg`](./sign_msg.md) + +Note: signature-verification and aggregate-verify test cases are not yet supported. diff --git a/specs/test_formats/bls/aggregate_pubkeys.md b/specs/test_formats/bls/aggregate_pubkeys.md new file mode 100644 index 000000000..43c7d6c6d --- /dev/null +++ b/specs/test_formats/bls/aggregate_pubkeys.md @@ -0,0 +1,17 @@ +# Test format: BLS pubkey aggregation + +A BLS pubkey aggregation combines a series of pubkeys into a single pubkey. + +## Test case format + +```yaml +input: List[BLS Pubkey] -- list of input BLS pubkeys +output: BLS Pubkey -- expected output, single BLS pubkey +``` + +`BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`. + + +## Condition + +The `aggregate_pubkeys` handler should aggregate the keys in the `input`, and the result should match the expected `output`. diff --git a/specs/test_formats/bls/aggregate_sigs.md b/specs/test_formats/bls/aggregate_sigs.md new file mode 100644 index 000000000..6690c3344 --- /dev/null +++ b/specs/test_formats/bls/aggregate_sigs.md @@ -0,0 +1,17 @@ +# Test format: BLS signature aggregation + +A BLS signature aggregation combines a series of signatures into a single signature. + +## Test case format + +```yaml +input: List[BLS Signature] -- list of input BLS signatures +output: BLS Signature -- expected output, single BLS signature +``` + +`BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`. + + +## Condition + +The `aggregate_sigs` handler should aggregate the signatures in the `input`, and the result should match the expected `output`. diff --git a/specs/test_formats/bls/msg_hash_g2_compressed.md b/specs/test_formats/bls/msg_hash_g2_compressed.md new file mode 100644 index 000000000..4e194e90b --- /dev/null +++ b/specs/test_formats/bls/msg_hash_g2_compressed.md @@ -0,0 +1,19 @@ +# Test format: BLS hash-compressed + +A BLS compressed-hash to G2. + +## Test case format + +```yaml +input: + message: bytes32, + domain: bytes -- any number +output: List[bytes48] -- length of two +``` + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` + + +## Condition + +The `msg_hash_g2_compressed` handler should hash the `message`, with the given `domain`, to G2 with compression, and the result should match the expected `output`. diff --git a/specs/test_formats/bls/msg_hash_g2_uncompressed.md b/specs/test_formats/bls/msg_hash_g2_uncompressed.md new file mode 100644 index 000000000..f42ea9998 --- /dev/null +++ b/specs/test_formats/bls/msg_hash_g2_uncompressed.md @@ -0,0 +1,19 @@ +# Test format: BLS hash-uncompressed + +A BLS uncompressed-hash to G2. + +## Test case format + +```yaml +input: + message: bytes32, + domain: bytes -- any number +output: List[List[bytes48]] -- 3 lists, each a length of two +``` + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` + + +## Condition + +The `msg_hash_g2_uncompressed` handler should hash the `message`, with the given `domain`, to G2, without compression, and the result should match the expected `output`. diff --git a/specs/test_formats/bls/priv_to_pub.md b/specs/test_formats/bls/priv_to_pub.md new file mode 100644 index 000000000..7af148d0f --- /dev/null +++ b/specs/test_formats/bls/priv_to_pub.md @@ -0,0 +1,17 @@ +# Test format: BLS private key to pubkey + +A BLS private key to public key conversion. + +## Test case format + +```yaml +input: bytes32 -- the private key +output: bytes48 -- the public key +``` + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` + + +## Condition + +The `priv_to_pub` handler should compute the public key for the given private key `input`, and the result should match the expected `output`. diff --git a/specs/test_formats/bls/sign_msg.md b/specs/test_formats/bls/sign_msg.md new file mode 100644 index 000000000..dd93174f2 --- /dev/null +++ b/specs/test_formats/bls/sign_msg.md @@ -0,0 +1,20 @@ +# Test format: BLS sign message + +Message signing with BLS should produce a signature. + +## Test case format + +```yaml +input: + privkey: bytes32 -- the private key used for signing + message: bytes32 -- input message to sign (a hash) + domain: bytes -- BLS domain +output: bytes96 -- expected signature +``` + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x` + + +## Condition + +The `sign_msg` handler should sign the given `message`, with `domain`, using the given `privkey`, and the result should match the expected `output`. diff --git a/specs/test_formats/operations/README.md b/specs/test_formats/operations/README.md new file mode 100644 index 000000000..842dc3615 --- /dev/null +++ b/specs/test_formats/operations/README.md @@ -0,0 +1,10 @@ +# Operations tests + +The different kinds of operations ("transactions") are tested individually with test handlers. + +The tested operation kinds are: +- [`deposits`](./deposits.md) +- More tests are work-in-progress. + + + diff --git a/specs/test_formats/operations/deposits.md b/specs/test_formats/operations/deposits.md new file mode 100644 index 000000000..8f44ebb22 --- /dev/null +++ b/specs/test_formats/operations/deposits.md @@ -0,0 +1,18 @@ +# Test format: Deposit operations + +A deposit is a form of an operation (or "transaction"), modifying the state. + +## Test case format + +```yaml +description: string -- description of test case, purely for debugging purposes +pre: BeaconState -- state before applying the deposit +deposit: Deposit -- the deposit +post: BeaconState -- state after applying the deposit. No value if deposit processing is aborted. +``` + +## Condition + +A `deposits` handler of the `operations` should process these cases, + calling the implementation of the `process_deposit(state, deposit)` functionality described in the spec. +The resulting state should match the expected `post` state, or if the `post` state is left blank, the handler should reject the inputs as invalid. diff --git a/specs/test_formats/shuffling/README.md b/specs/test_formats/shuffling/README.md new file mode 100644 index 000000000..57be96565 --- /dev/null +++ b/specs/test_formats/shuffling/README.md @@ -0,0 +1,32 @@ +# Test format: shuffling + +The runner of the Shuffling test type has only one handler: `core` + +This does not mean however that testing is limited. +Clients may take different approaches to shuffling, for optimizing, + and supporting advanced lookup behavior back in older history. + +For implementers, possible test runners implementing testing can include: +1) just test permute-index, run it for each index `i` in `range(count)`, and check against expected `output[i]` (default spec implementation) +2) test un-permute-index (the reverse lookup. Implemented by running the shuffling rounds in reverse: from `round_count-1` to `0`) +3) test the optimized complete shuffle, where all indices are shuffled at once, test output in one go. +4) test complete shuffle in reverse (reverse rounds, same as 2) + +## Test case format + +```yaml +seed: bytes32 +count: int +shuffled: List[int] +``` + +- The `bytes32` is encoded a string, hexadecimal encoding, prefixed with `0x`. +- Integers are validator indices. These are `uint64`, but realistically they are not as big. + +The `count` specifies the validator registry size. One should compute the shuffling for indices `0, 1, 2, 3, ..., count (exclusive)`. +Seed is the raw shuffling seed, passed to permute-index (or optimized shuffling approach). + +## Condition + +The resulting list should match the expected output `shuffled` after shuffling the implied input, using the given `seed`. + diff --git a/specs/test_formats/ssz_generic/README.md b/specs/test_formats/ssz_generic/README.md new file mode 100644 index 000000000..9fda0c368 --- /dev/null +++ b/specs/test_formats/ssz_generic/README.md @@ -0,0 +1,20 @@ +# SSZ, generic tests + +This set of test-suites provides general testing for SSZ: + to instantiate any container/list/vector/other type from binary data. + +Since SSZ is in a development-phase, not the full suite of features is covered yet. +Note that these tests are based on the older SSZ package. +The tests are still relevant, but limited in scope: + more complex object encodings have changed since the original SSZ testing. + +A minimal but useful series of tests covering `uint` encoding and decoding is provided. +This is a direct port of the older SSZ `uint` tests (minus outdated test cases). + +[uint test format](./uint.md). + +Note: the current phase-0 spec does not use larger uints, and uses byte vectors (fixed length) instead to represent roots etc. +The exact uint lengths to support may be redefined in the future. + +Extension of the SSZ tests collection is planned, with an update to the new spec-maintained `minimal_ssz.py`, + see CI/testing issues for progress tracking. diff --git a/specs/test_formats/ssz_generic/uint.md b/specs/test_formats/ssz_generic/uint.md new file mode 100644 index 000000000..fd7cf3221 --- /dev/null +++ b/specs/test_formats/ssz_generic/uint.md @@ -0,0 +1,19 @@ +# Test format: SSZ uints + +SSZ supports encoding of uints up to 32 bytes. These are considered to be basic types. + +## Test case format + +```yaml +type: "uintN" -- string, where N is one of [8, 16, 32, 64, 128, 256] +valid: bool -- expected validity of the input data +value: string -- string, decimal encoding, to support up to 256 bit integers +ssz: bytes -- string, input data, hex encoded, with prefix 0x +tags: List[string] -- description of test case, in the form of a list of labels +``` + +## Condition + +Two-way testing can be implemented in the test-runner: +- Encoding: After encoding the given input number `value`, the output should match `ssz` +- Decoding: After decoding the given `ssz` bytes, it should match the input number `value` diff --git a/specs/test_formats/ssz_static/README.md b/specs/test_formats/ssz_static/README.md new file mode 100644 index 000000000..413b00c75 --- /dev/null +++ b/specs/test_formats/ssz_static/README.md @@ -0,0 +1,8 @@ +# SSZ, static tests + +This set of test-suites provides static testing for SSZ: + to instantiate just the known ETH-2.0 SSZ types from binary data. + +This series of tests is based on the spec-maintained `minimal_ssz.py`, i.e. fully consistent with the SSZ spec. + +Test format documentation can be found here: [core test format](./core.md). diff --git a/specs/test_formats/ssz_static/core.md b/specs/test_formats/ssz_static/core.md new file mode 100644 index 000000000..8a5067f03 --- /dev/null +++ b/specs/test_formats/ssz_static/core.md @@ -0,0 +1,23 @@ +# Test format: SSZ static types + +The goal of this type is to provide clients with a solid reference how the known SSZ objects should be encoded. +Each object described in the Phase-0 spec is covered. +This is important, as many of the clients aiming to serialize/deserialize objects directly into structs/classes +do not support (or have alternatives for) generic SSZ encoding/decoding. +This test-format ensures these direct serializations are covered. + +## Test case format + +```yaml +type_name: string -- string, object name, formatted as in spec. E.g. "BeaconBlock" +value: dynamic -- the YAML-encoded value, of the type specified by type_name. +serialized: bytes -- string, SSZ-serialized data, hex encoded, with prefix 0x +root: bytes32 -- string, hash-tree-root of the value, hex encoded, with prefix 0x +``` + +## Condition + +A test-runner can implement the following assertions: +- Serialization: After parsing the `value`, SSZ-serialize it: the output should match `serialized` +- Hash-tree-root: After parsing the `value`, Hash-tree-root it: the output should match `root` +- Deserialization: SSZ-deserialize the `serialized` value, and see if it matches the parsed `value` diff --git a/specs/validator/0_beacon-chain-validator.md b/specs/validator/0_beacon-chain-validator.md index 0d6033acd..632bf2b62 100644 --- a/specs/validator/0_beacon-chain-validator.md +++ b/specs/validator/0_beacon-chain-validator.md @@ -1,6 +1,6 @@ # Ethereum 2.0 Phase 0 -- Honest Validator -__NOTICE__: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md) that describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol. +__NOTICE__: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](../core/0_beacon-chain.md) that describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol. ## Table of Contents @@ -38,13 +38,13 @@ __NOTICE__: This document is a work-in-progress for researchers and implementers - [Attestations](#attestations-1) - [Attestation data](#attestation-data) - [Slot](#slot-1) - - [Shard](#shard) - [Beacon block root](#beacon-block-root) - - [Target root](#target-root) - - [Crosslink data root](#crosslink-data-root) - - [Latest crosslink](#latest-crosslink) - [Source epoch](#source-epoch) - [Source root](#source-root) + - [Target root](#target-root) + - [Shard](#shard) + - [Previous crosslink root](#previous-crosslink-root) + - [Crosslink data root](#crosslink-data-root) - [Construct attestation](#construct-attestation) - [Data](#data) - [Aggregation bitfield](#aggregation-bitfield) @@ -66,7 +66,7 @@ A validator is an entity that participates in the consensus of the Ethereum 2.0 ## Prerequisites -All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout. +All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](../core/0_beacon-chain.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout. ## Constants @@ -84,7 +84,7 @@ A validator must initialize many parameters locally before submitting a deposit #### BLS public key -Validator public keys are [G1 points](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md#g1-points) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator. +Validator public keys are [G1 points](../bls_signature.md#g1-points) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator. #### BLS withdrawal key @@ -96,15 +96,16 @@ The validator constructs their `withdrawal_credentials` via the following: ### Submit deposit -In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW chain. Deposits are made to the [deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) located at `DEPOSIT_CONTRACT_ADDRESS`. +In phase 0, all incoming validator deposits originate from the Ethereum 1.0 PoW chain. Deposits are made to the [deposit contract](../core/0_beacon-chain.md#ethereum-10-deposit-contract) located at `DEPOSIT_CONTRACT_ADDRESS`. To submit a deposit: -* Pack the validator's [initialization parameters](#initialization) into `deposit_input`, a [`DepositInput`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#depositinput) SSZ object. -* Let `proof_of_possession` be the result of `bls_sign` of the `signed_root(deposit_input)` with `domain=DOMAIN_DEPOSIT`. -* Set `deposit_input.proof_of_possession = proof_of_possession`. +* Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object. +* Let `proof_of_possession` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=DOMAIN_DEPOSIT`. +* Set `deposit_data.proof_of_possession = proof_of_possession`. * Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_DEPOSIT_AMOUNT`. -* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit` along with `serialize(deposit_input)` as the singular `bytes` input along with a deposit `amount` in Gwei. +* Set `deposit_data.amount = amount`. +* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `deposit(deposit_input: bytes[512])` along with `serialize(deposit_data)` as the singular `bytes` input along with a deposit of `amount` Gwei. _Note_: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_DEPOSIT_AMOUNT`. @@ -114,13 +115,13 @@ Deposits cannot be processed into the beacon chain until the eth1.0 block in whi ### Validator index -Once a validator has been processed and added to the beacon state's `validator_registry`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally. +Once a validator has been processed and added to the beacon state's `validator_registry`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](../core/0_beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally. ### Activation In normal operation, the validator is quickly activated at which point the validator is added to the shuffling and begins validation after an additional `ACTIVATION_EXIT_DELAY` epochs (25.6 minutes). -The function [`is_active_validator`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given shuffling epoch. Note that the `BeaconState` contains a field `current_shuffling_epoch` which dictates from which epoch the current active validators are taken. Usage is as follows: +The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given shuffling epoch. Note that the `BeaconState` contains a field `current_shuffling_epoch` which dictates from which epoch the current active validators are taken. Usage is as follows: ```python shuffling_epoch = state.current_shuffling_epoch @@ -138,7 +139,7 @@ A validator has two primary responsibilities to the beacon chain -- [proposing b ### Block proposal -A validator is expected to propose a [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function). +A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state, slot)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function). There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (eg. at 312500 validators = 10 million ETH, that's once per ~3 weeks). @@ -152,7 +153,7 @@ _Note:_ there might be "skipped" slots between the `parent` and `block`. These s ##### Parent root -Set `block.previous_block_root = signed_root(parent)`. +Set `block.previous_block_root = signing_root(parent)`. ##### State root @@ -199,7 +200,7 @@ Set `block.signature = block_signature` where `block_signature` is defined as: ```python block_signature = bls_sign( privkey=validator.privkey, # privkey store locally, not in state - message_hash=signed_root(block), + message_hash=signing_root(block), domain=get_domain( fork=fork, # `fork` is the fork object at the slot `block.slot` epoch=slot_to_epoch(block.slot), @@ -212,25 +213,25 @@ block_signature = bls_sign( ##### Proposer slashings -Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included. +Up to `MAX_PROPOSER_SLASHINGS` [`ProposerSlashing`](../core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](../core/0_beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included. ##### Attester slashings -Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included. +Up to `MAX_ATTESTER_SLASHINGS` [`AttesterSlashing`](../core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [Attester slashings processing](../core/0_beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included. ##### Attestations -Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. +Up to `MAX_ATTESTATIONS` aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](../core/0_beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain. ##### Deposits -If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#ethereum-10-deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#deposits). +If there are any unprocessed deposits for the existing `state.latest_eth1_data` (i.e. `state.latest_eth1_data.deposit_count > state.deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, latest_eth1_data.deposit_count - state.deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1.0 deposit contract](../core/0_beacon-chain.md#ethereum-10-deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits). The `proof` for each deposit must be constructed against the deposit root contained in `state.latest_eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `latest_eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation. ##### Voluntary exits -Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#voluntary-exits). +Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](../core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](../core/0_beacon-chain.md#voluntary-exits). ### Attestations @@ -240,7 +241,7 @@ A validator should create and broadcast the attestation halfway through the `slo #### Attestation data -First the validator should construct `attestation_data`, an [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot. +First the validator should construct `attestation_data`, an [`AttestationData`](../core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot. * Let `head_block` be the result of running the fork choice during the assigned slot. * Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot. @@ -249,31 +250,9 @@ First the validator should construct `attestation_data`, an [`AttestationData`]( Set `attestation_data.slot = head_state.slot`. -##### Shard - -Set `attestation_data.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`. - ##### Beacon block root -Set `attestation_data.beacon_block_root = signed_root(head_block)`. - -##### Target root - -Set `attestation_data.target_root = signed_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. - -_Note:_ This can be looked up in the state using: -* Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`. -* Set `epoch_boundary = head if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`. - -##### Crosslink data root - -Set `attestation_data.crosslink_data_root = ZERO_HASH`. - -_Note:_ This is a stub for phase 0. - -##### Latest crosslink - -Set `attestation_data.previous_crosslink = head_state.latest_crosslinks[shard]`. +Set `attestation_data.beacon_block_root = signing_root(head_block)`. ##### Source epoch @@ -283,9 +262,31 @@ Set `attestation_data.source_epoch = head_state.justified_epoch`. Set `attestation_data.source_root = head_state.current_justified_root`. +##### Target root + +Set `attestation_data.target_root = signing_root(epoch_boundary)` where `epoch_boundary` is the block at the most recent epoch boundary. + +_Note:_ This can be looked up in the state using: +* Let `epoch_start_slot = get_epoch_start_slot(get_current_epoch(head_state))`. +* Set `epoch_boundary = head if epoch_start_slot == head_state.slot else get_block_root(state, epoch_start_slot)`. + +##### Shard + +Set `attestation_data.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`. + +##### Previous crosslink root + +Set `attestation_data.previous_crosslink_root = hash_tree_root(head_state.current_crosslinks[shard])`. + +##### Crosslink data root + +Set `attestation_data.crosslink_data_root = ZERO_HASH`. + +_Note:_ This is a stub for phase 0. + #### Construct attestation -Next the validator creates `attestation`, an [`Attestation`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestation) object. +Next the validator creates `attestation`, an [`Attestation`](../core/0_beacon-chain.md#attestation) object. ##### Data @@ -298,7 +299,7 @@ Set `attestation.data = attestation_data` where `attestation_data` is the `Attes * Set `aggregation_bitfield[index_into_committee // 8] |= 2 ** (index_into_committee % 8)`. * Set `attestation.aggregation_bitfield = aggregation_bitfield`. -_Note_: Calling `get_attestation_participants(state, attestation.data, attestation.aggregation_bitfield)` should return a list of length equal to 1, containing `validator_index`. +_Note_: Calling `get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)` should return a list of length equal to 1, containing `validator_index`. ##### Custody bitfield @@ -368,24 +369,23 @@ def get_committee_assignment( return assignment ``` -A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the epoch of the slot in question and can not reliably be used to predict an epoch in advance. +A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the slot in question and can not reliably be used to predict in advance. ```python def is_proposer_at_slot(state: BeaconState, slot: Slot, validator_index: ValidatorIndex) -> bool: - current_epoch = get_current_epoch(state) - assert slot_to_epoch(slot) == current_epoch + assert state.slot == slot - return get_beacon_proposer_index(state, slot) == validator_index + return get_beacon_proposer_index(state) == validator_index ``` -_Note_: If a validator is assigned to the 0th slot of an epoch, the validator must run an empty slot transition from the previous epoch into the 0th slot of the epoch to be able to check if they are a proposer at that slot. +_Note_: To see if a validator is assigned to proposer during the slot, the validator must run an empty slot transition from the previous state to the current slot. ### Lookahead -The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing which must checked during the epoch in question. +The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing which must checked during the slot in question. `get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments which involves noting at which future slot one will have to attest and also which shard one should begin syncing (in phase 1+). @@ -399,7 +399,7 @@ _Note_: Signed data must be within a sequential `Fork` context to conflict. Mess ### Proposer slashing -To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#proposalsigneddata) where conflicting is defined as two distinct blocks within the same epoch. +To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) where conflicting is defined as two distinct blocks within the same epoch. _In phase 0, as long as the validator does not sign two different beacon blocks for the same epoch, the validator is safe against proposer slashings._ @@ -411,7 +411,7 @@ If the software crashes at some point within this routine, then when the validat ### Attester slashing -To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#attestationdata) objects where conflicting is defined as a set of two attestations that satisfy either [`is_double_vote`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_double_vote) or [`is_surround_vote`](https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#is_surround_vote). +To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](../core/0_beacon-chain.md#attestationdata) objects where conflicting is defined as a set of two attestations that satisfy either [`is_double_vote`](../core/0_beacon-chain.md#is_double_vote) or [`is_surround_vote`](../core/0_beacon-chain.md#is_surround_vote). Specifically, when signing an `Attestation`, a validator should perform the following steps in the following order: 1. Save a record to hard disk that an attestation has been signed for source -- `attestation_data.source_epoch` -- and target -- `slot_to_epoch(attestation_data.slot)`. diff --git a/test_generators/README.md b/test_generators/README.md new file mode 100644 index 000000000..743157aae --- /dev/null +++ b/test_generators/README.md @@ -0,0 +1,170 @@ +# Eth2.0 Test Generators + +This directory contains all the generators for YAML tests, consumed by Eth 2.0 client implementations. + +Any issues with the generators and/or generated tests should be filed + in the repository that hosts the generator outputs, here: [ethereum/eth2.0-tests](https://github.com/ethereum/eth2.0-tests/). + +Whenever a release is made, the new tests are automatically built and +[eth2TestGenBot](https://github.com/eth2TestGenBot) commits the changes to the test repository. + +## How to run generators + +pre-requisites: +- Python 3 installed +- PIP 3 +- GNU make + +### Cleaning + +This removes the existing virtual environments (`/test_generators//venv`), and generated tests (`/yaml_tests/`). + +```bash +make clean +``` + +### Running all test generators + +This runs all the generators. + +```bash +make gen_yaml_tests +``` + +### Running a single generator + +The make file auto-detects generators in the `test_generators/` directory, + and provides a tests-gen target for each generator, see example. + +```bash +make ./yaml_tests/shuffling/ +``` + +## Developing a generator + +Simply open up the generator (not all at once) of choice in your favorite IDE/editor, and run: + +```bash +# From the root of the generator directory: +# Create a virtual environment (any venv/.venv/.venvs is git-ignored) +python3 -m venv venv +# Activate the venv, this is where dependencies are installed for the generator +. venv/bin/activate +``` + +Now that you have a virtual environment, write your generator. +It's recommended to extend the base-generator. + +Create a `requirements.txt` in the root of your generator directory: +``` +eth-utils==1.4.1 +../../test_libs/gen_helpers +../../test_libs/config_helpers +../../test_libs/pyspec +``` +The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself, to prevent code duplication and outdated tests. +Applying configurations to the spec is simple, and enables you to create test suites with different contexts. + +Note: make sure to run `make pyspec` from the root of the specs repository, to build the pyspec requirement. + +Install all the necessary requirements (re-run when you add more): +```bash +pip3 install -r requirements.txt +``` + +And write your initial test generator, extending the base generator: + +Write a `main.py` file, here's an example: + +```python +from gen_base import gen_runner, gen_suite, gen_typing + +from eth_utils import ( + to_dict, to_tuple +) + +from preset_loader import loader +from eth2spec.phase0 import spec + +@to_dict +def example_test_case(v: int): + yield "spec_SHARD_COUNT", spec.SHARD_COUNT + yield "example", v + + +@to_tuple +def generate_example_test_cases(): + for i in range(10): + yield example_test_case(i) + + +def example_minimal_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'minimal') + spec.apply_constants_preset(presets) + + return ("mini", "core", gen_suite.render_suite( + title="example_minimal", + summary="Minimal example suite, testing bar.", + forks_timeline="testing", + forks=["phase0"], + config="minimal", + handler="main", + test_cases=generate_example_test_cases())) + + +def example_mainnet_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'mainnet') + spec.apply_constants_preset(presets) + + return ("full", "core", gen_suite.render_suite( + title="example_main_net", + summary="Main net based example suite.", + forks_timeline= "mainnet", + forks=["phase0"], + config="testing", + handler="main", + test_cases=generate_example_test_cases())) + + +if __name__ == "__main__": + gen_runner.run_generator("example", [example_minimal_suite, example_mainnet_suite]) +``` + +Recommendations: +- you can have more than just 1 suite creator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])` +- you can concatenate lists of test cases, if you don't want to split it up in suites, however make sure they could be run with one handler. +- you can split your suite creators into different python files/packages, good for code organization. +- use config "minimal" for performance. But also implement a suite with the default config where necessary. +- you may be able to write your test suite creator in a way where it does not make assumptions on constants. + If so, you can generate test suites with different configurations for the same scenario (see example). +- the test-generator accepts `--output` and `--force` (overwrite output) + +## How to add a new test generator + +In order to add a new test generator that builds `New Tests`: + +1. Create a new directory `new_tests`, within the `test_generators` directory. + Note that `new_tests` is also the name of the directory in which the tests will appear in the tests repository later. +2. Your generator is assumed to have a `requirements.txt` file, + with any dependencies it may need. Leave it empty if your generator has none. +3. Your generator is assumed to have a `main.py` file in its root. + By adding the base generator to your requirements, you can make a generator really easily. See docs below. +4. Your generator is called with `-o some/file/path/for_testing/can/be_anything -c some/other/path/to_configs/`. + The base generator helps you handle this; you only have to define suite headers, + and a list of tests for each suite you generate. +5. Finally, add any linting or testing commands to the + [circleci config file](https://github.com/ethereum/eth2.0-test-generators/blob/master/.circleci/config.yml) + if desired to increase code quality. + +Note: you do not have to change the makefile. +However, if necessary (e.g. not using python, or mixing in other languages), submit an issue, and it can be a special case. +Do note that generators should be easy to maintain, lean, and based on the spec. + + +## How to remove a test generator + +If a test generator is not needed anymore, undo the steps described above and make a new release: + +1. remove the generator directory +2. remove the generated tests in the `eth2.0-tests` repository by opening a PR there. +3. make a new release diff --git a/test_generators/bls/README.md b/test_generators/bls/README.md new file mode 100644 index 000000000..a21ad16d9 --- /dev/null +++ b/test_generators/bls/README.md @@ -0,0 +1,21 @@ +# BLS Test Generator + +Explanation of BLS12-381 type hierarchy +The base unit is bytes48 of which only 381 bits are used + +- FQ: uint381 modulo field modulus +- FQ2: (FQ, FQ) +- G2: (FQ2, FQ2, FQ2) + +## Resources + +- [Eth2.0 spec](https://github.com/ethereum/eth2.0-specs/blob/master/specs/bls_signature.md) +- [Finite Field Arithmetic](http://www.springeronline.com/sgw/cda/pageitems/document/cda_downloaddocument/0,11996,0-0-45-110359-0,00.pdf) +- Chapter 2 of [Elliptic Curve Cryptography](http://cacr.uwaterloo.ca/ecc/). Darrel Hankerson, Alfred Menezes, and Scott Vanstone +- [Zcash BLS parameters](https://github.com/zkcrypto/pairing/tree/master/src/bls12_381) +- [Trinity implementation](https://github.com/ethereum/trinity/blob/master/eth2/_utils/bls.py) + +## Comments + +Compared to Zcash, Ethereum specs always requires the compressed form (c_flag / most significant bit always set). +Also note that pubkeys and privkeys are reversed. diff --git a/test_generators/bls/main.py b/test_generators/bls/main.py new file mode 100644 index 000000000..ef80635de --- /dev/null +++ b/test_generators/bls/main.py @@ -0,0 +1,243 @@ +""" +BLS test vectors generator +""" + +from typing import Tuple + +from eth_utils import ( + to_tuple, int_to_big_endian +) +from gen_base import gen_runner, gen_suite, gen_typing + +from py_ecc import bls + + +def int_to_hex(n: int) -> str: + return '0x' + int_to_big_endian(n).hex() + + +def hex_to_int(x: str) -> int: + return int(x, 16) + + +# Note: even though a domain is only an uint64, +# To avoid issues with YAML parsers that are limited to 53-bit (JS language limit) +# It is serialized as an hex string as well. +DOMAINS = [ + 0, + 1, + 1234, + 2**32-1, + 2**64-1 +] + +MESSAGES = [ + bytes(b'\x00' * 32), + bytes(b'\x56' * 32), + bytes(b'\xab' * 32), +] + +PRIVKEYS = [ + # Curve order is 256 so private keys are 32 bytes at most. + # Also not all integers is a valid private key, so using pre-generated keys + hex_to_int('0x00000000000000000000000000000000263dbd792f5b1be47ed85f8938c0f29586af0d3ac7b977f21c278fe1462040e3'), + hex_to_int('0x0000000000000000000000000000000047b8192d77bf871b62e87859d653922725724a5c031afeabc60bcef5ff665138'), + hex_to_int('0x00000000000000000000000000000000328388aff0d4a5b7dc9205abd374e7e98f3cd9f3418edb4eafda5fb16473d216'), +] + + +def hash_message(msg: bytes, + domain: int) ->Tuple[Tuple[str, str], Tuple[str, str], Tuple[str, str]]: + """ + Hash message + Input: + - Message as bytes + - domain as uint64 + Output: + - Message hash as a G2 point + """ + return [ + [ + int_to_hex(fq2.coeffs[0]), + int_to_hex(fq2.coeffs[1]), + ] + for fq2 in bls.utils.hash_to_G2(msg, domain) + ] + + +def hash_message_compressed(msg: bytes, domain: int) -> Tuple[str, str]: + """ + Hash message + Input: + - Message as bytes + - domain as uint64 + Output: + - Message hash as a compressed G2 point + """ + z1, z2 = bls.utils.compress_G2(bls.utils.hash_to_G2(msg, domain)) + return [int_to_hex(z1), int_to_hex(z2)] + + + +@to_tuple +def case01_message_hash_G2_uncompressed(): + for msg in MESSAGES: + for domain in DOMAINS: + yield { + 'input': { + 'message': '0x' + msg.hex(), + 'domain': int_to_hex(domain) + }, + 'output': hash_message(msg, domain) + } + +@to_tuple +def case02_message_hash_G2_compressed(): + for msg in MESSAGES: + for domain in DOMAINS: + yield { + 'input': { + 'message': '0x' + msg.hex(), + 'domain': int_to_hex(domain) + }, + 'output': hash_message_compressed(msg, domain) + } + +@to_tuple +def case03_private_to_public_key(): + pubkeys = [bls.privtopub(privkey) for privkey in PRIVKEYS] + pubkeys_serial = ['0x' + pubkey.hex() for pubkey in pubkeys] + for privkey, pubkey_serial in zip(PRIVKEYS, pubkeys_serial): + yield { + 'input': int_to_hex(privkey), + 'output': pubkey_serial, + } + +@to_tuple +def case04_sign_messages(): + for privkey in PRIVKEYS: + for message in MESSAGES: + for domain in DOMAINS: + sig = bls.sign(message, privkey, domain) + yield { + 'input': { + 'privkey': int_to_hex(privkey), + 'message': '0x' + message.hex(), + 'domain': int_to_hex(domain) + }, + 'output': '0x' + sig.hex() + } + +# TODO: case05_verify_messages: Verify messages signed in case04 +# It takes too long, empty for now + + +@to_tuple +def case06_aggregate_sigs(): + for domain in DOMAINS: + for message in MESSAGES: + sigs = [bls.sign(message, privkey, domain) for privkey in PRIVKEYS] + yield { + 'input': ['0x' + sig.hex() for sig in sigs], + 'output': '0x' + bls.aggregate_signatures(sigs).hex(), + } + +@to_tuple +def case07_aggregate_pubkeys(): + pubkeys = [bls.privtopub(privkey) for privkey in PRIVKEYS] + pubkeys_serial = ['0x' + pubkey.hex() for pubkey in pubkeys] + yield { + 'input': pubkeys_serial, + 'output': '0x' + bls.aggregate_pubkeys(pubkeys).hex(), + } + + +# TODO +# Aggregate verify + +# TODO +# Proof-of-possession + + +def bls_msg_hash_uncompressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("g2_uncompressed", "msg_hash_g2_uncompressed", gen_suite.render_suite( + title="BLS G2 Uncompressed msg hash", + summary="BLS G2 Uncompressed msg hash", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + runner="bls", + handler="msg_hash_uncompressed", + test_cases=case01_message_hash_G2_uncompressed())) + + +def bls_msg_hash_compressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("g2_compressed", "msg_hash_g2_compressed", gen_suite.render_suite( + title="BLS G2 Compressed msg hash", + summary="BLS G2 Compressed msg hash", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + runner="bls", + handler="msg_hash_compressed", + test_cases=case02_message_hash_G2_compressed())) + + + +def bls_priv_to_pub_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("priv_to_pub", "priv_to_pub", gen_suite.render_suite( + title="BLS private key to pubkey", + summary="BLS Convert private key to public key", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + runner="bls", + handler="priv_to_pub", + test_cases=case03_private_to_public_key())) + + +def bls_sign_msg_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("sign_msg", "sign_msg", gen_suite.render_suite( + title="BLS sign msg", + summary="BLS Sign a message", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + runner="bls", + handler="sign_msg", + test_cases=case04_sign_messages())) + + +def bls_aggregate_sigs_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("aggregate_sigs", "aggregate_sigs", gen_suite.render_suite( + title="BLS aggregate sigs", + summary="BLS Aggregate signatures", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + runner="bls", + handler="aggregate_sigs", + test_cases=case06_aggregate_sigs())) + + +def bls_aggregate_pubkeys_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("aggregate_pubkeys", "aggregate_pubkeys", gen_suite.render_suite( + title="BLS aggregate pubkeys", + summary="BLS Aggregate public keys", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + runner="bls", + handler="aggregate_pubkeys", + test_cases=case07_aggregate_pubkeys())) + + +if __name__ == "__main__": + gen_runner.run_generator("bls", [ + bls_msg_hash_compressed_suite, + bls_msg_hash_uncompressed_suite, + bls_priv_to_pub_suite, + bls_sign_msg_suite, + bls_aggregate_sigs_suite, + bls_aggregate_pubkeys_suite + ]) diff --git a/test_generators/bls/requirements.txt b/test_generators/bls/requirements.txt new file mode 100644 index 000000000..8a933d41c --- /dev/null +++ b/test_generators/bls/requirements.txt @@ -0,0 +1,3 @@ +py-ecc==1.6.0 +eth-utils==1.4.1 +../../test_libs/gen_helpers diff --git a/test_generators/operations/README.md b/test_generators/operations/README.md new file mode 100644 index 000000000..e0b9d0e18 --- /dev/null +++ b/test_generators/operations/README.md @@ -0,0 +1,13 @@ +# Operations + +Operations (or "transactions" in previous spec iterations), + are atomic changes to the state, introduced by embedding in blocks. + +This generator provides a series of test suites, divided into handler, for each operation type. +An operation test-runner can consume these operation test-suites, + and handle different kinds of operations by processing the cases using the specified test handler. + +Information on the format of the tests can be found in the [operations test formats documentation](../../specs/test_formats/operations/README.md). + + + diff --git a/test_generators/operations/deposits.py b/test_generators/operations/deposits.py new file mode 100644 index 000000000..85c93f86b --- /dev/null +++ b/test_generators/operations/deposits.py @@ -0,0 +1,181 @@ +from eth2spec.phase0 import spec +from eth_utils import ( + to_dict, to_tuple +) +from gen_base import gen_suite, gen_typing +from preset_loader import loader +from eth2spec.debug.encode import encode +from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.utils.merkle_minimal import get_merkle_root, calc_merkle_tree_from_leaves, get_merkle_proof + +from typing import List, Tuple + +import genesis +import keys +from py_ecc import bls + + +def build_deposit_data(state, + pubkey: spec.BLSPubkey, + withdrawal_cred: spec.Bytes32, + privkey: int, + amount: int): + deposit_data = spec.DepositData( + pubkey=pubkey, + withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + withdrawal_cred[1:], + amount=amount, + proof_of_possession=spec.EMPTY_SIGNATURE, + ) + deposit_data.proof_of_possession = bls.sign( + message_hash=signing_root(deposit_data), + privkey=privkey, + domain=spec.get_domain( + state.fork, + spec.get_current_epoch(state), + spec.DOMAIN_DEPOSIT, + ) + ) + return deposit_data + + +def build_deposit(state, + deposit_data_leaves: List[spec.Bytes32], + pubkey: spec.BLSPubkey, + withdrawal_cred: spec.Bytes32, + privkey: int, + amount: int) -> spec.Deposit: + + deposit_data = build_deposit_data(state, pubkey, withdrawal_cred, privkey, amount) + + item = spec.hash(deposit_data.serialize()) + index = len(deposit_data_leaves) + deposit_data_leaves.append(item) + tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) + proof = list(get_merkle_proof(tree, item_index=index)) + + deposit = spec.Deposit( + proof=list(proof), + index=index, + data=deposit_data, + ) + assert spec.verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, get_merkle_root(tuple(deposit_data_leaves))) + + return deposit + + +def build_deposit_for_index(initial_validator_count: int, index: int) -> Tuple[spec.Deposit, spec.BeaconState]: + genesis_deposits = genesis.create_deposits( + keys.pubkeys[:initial_validator_count], + keys.withdrawal_creds[:initial_validator_count] + ) + state = genesis.create_genesis_state(genesis_deposits) + + deposit_data_leaves = [spec.hash(dep.data.serialize()) for dep in genesis_deposits] + + deposit = build_deposit( + state, + deposit_data_leaves, + keys.pubkeys[index], + keys.withdrawal_creds[index], + keys.privkeys[index], + spec.MAX_DEPOSIT_AMOUNT, + ) + + state.latest_eth1_data.deposit_root = get_merkle_root(tuple(deposit_data_leaves)) + state.latest_eth1_data.deposit_count = len(deposit_data_leaves) + + return deposit, state + + +@to_dict +def valid_deposit(): + new_dep, state = build_deposit_for_index(10, 10) + yield 'description', 'valid deposit to add new validator' + yield 'pre', encode(state, spec.BeaconState) + yield 'deposit', encode(new_dep, spec.Deposit) + spec.process_deposit(state, new_dep) + yield 'post', encode(state, spec.BeaconState) + + +@to_dict +def valid_topup(): + new_dep, state = build_deposit_for_index(10, 3) + yield 'description', 'valid deposit to top-up existing validator' + yield 'pre', encode(state, spec.BeaconState) + yield 'deposit', encode(new_dep, spec.Deposit) + spec.process_deposit(state, new_dep) + yield 'post', encode(state, spec.BeaconState) + + +@to_dict +def invalid_deposit_index(): + new_dep, state = build_deposit_for_index(10, 10) + # Mess up deposit index, 1 too small + state.deposit_index = 9 + + yield 'description', 'invalid deposit index' + yield 'pre', encode(state, spec.BeaconState) + yield 'deposit', encode(new_dep, spec.Deposit) + try: + spec.process_deposit(state, new_dep) + except AssertionError: + # expected + yield 'post', None + return + raise Exception('invalid_deposit_index has unexpectedly allowed deposit') + + +@to_dict +def invalid_deposit_proof(): + new_dep, state = build_deposit_for_index(10, 10) + # Make deposit proof invalid (at bottom of proof) + new_dep.proof[-1] = spec.ZERO_HASH + + yield 'description', 'invalid deposit proof' + yield 'pre', encode(state, spec.BeaconState) + yield 'deposit', encode(new_dep, spec.Deposit) + try: + spec.process_deposit(state, new_dep) + except AssertionError: + # expected + yield 'post', None + return + raise Exception('invalid_deposit_index has unexpectedly allowed deposit') + + +@to_tuple +def deposit_cases(): + yield valid_deposit() + yield valid_topup() + yield invalid_deposit_index() + yield invalid_deposit_proof() + + +def mini_deposits_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'minimal') + spec.apply_constants_preset(presets) + + return ("deposit_minimal", "deposits", gen_suite.render_suite( + title="deposit operation", + summary="Test suite for deposit type operation processing", + forks_timeline="testing", + forks=["phase0"], + config="minimal", + runner="operations", + handler="deposits", + test_cases=deposit_cases())) + + +def full_deposits_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'mainnet') + spec.apply_constants_preset(presets) + + return ("deposit_full", "deposits", gen_suite.render_suite( + title="deposit operation", + summary="Test suite for deposit type operation processing", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + runner="operations", + handler="deposits", + test_cases=deposit_cases())) diff --git a/test_generators/operations/genesis.py b/test_generators/operations/genesis.py new file mode 100644 index 000000000..7e0146f67 --- /dev/null +++ b/test_generators/operations/genesis.py @@ -0,0 +1,44 @@ +from eth2spec.phase0 import spec +from eth2spec.utils.merkle_minimal import get_merkle_root, calc_merkle_tree_from_leaves, get_merkle_proof +from typing import List + + +def create_genesis_state(deposits: List[spec.Deposit]) -> spec.BeaconState: + deposit_root = get_merkle_root((tuple([spec.hash(dep.data.serialize()) for dep in deposits]))) + + return spec.get_genesis_beacon_state( + deposits, + genesis_time=0, + genesis_eth1_data=spec.Eth1Data( + deposit_root=deposit_root, + deposit_count=len(deposits), + block_hash=spec.ZERO_HASH, + ), + ) + + +def create_deposits(pubkeys: List[spec.BLSPubkey], withdrawal_cred: List[spec.Bytes32]) -> List[spec.Deposit]: + + # Mock proof of possession + proof_of_possession = b'\x33' * 96 + + deposit_data = [ + spec.DepositData( + pubkey=pubkeys[i], + withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + withdrawal_cred[i][1:], + amount=spec.MAX_DEPOSIT_AMOUNT, + proof_of_possession=proof_of_possession, + ) for i in range(len(pubkeys)) + ] + + # Fill tree with existing deposits + deposit_data_leaves = [spec.hash(data.serialize()) for data in deposit_data] + tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves)) + + return [ + spec.Deposit( + proof=list(get_merkle_proof(tree, item_index=i)), + index=i, + data=deposit_data[i] + ) for i in range(len(deposit_data)) + ] diff --git a/test_generators/operations/keys.py b/test_generators/operations/keys.py new file mode 100644 index 000000000..db4f59e0e --- /dev/null +++ b/test_generators/operations/keys.py @@ -0,0 +1,7 @@ +from py_ecc import bls +from eth2spec.phase0.spec import hash + +privkeys = list(range(1, 101)) +pubkeys = [bls.privtopub(k) for k in privkeys] +# Insecure, but easier to follow +withdrawal_creds = [hash(bls.privtopub(k)) for k in privkeys] diff --git a/test_generators/operations/main.py b/test_generators/operations/main.py new file mode 100644 index 000000000..8b0a2a6d8 --- /dev/null +++ b/test_generators/operations/main.py @@ -0,0 +1,9 @@ +from gen_base import gen_runner + +from deposits import mini_deposits_suite, full_deposits_suite + +if __name__ == "__main__": + gen_runner.run_generator("operations", [ + mini_deposits_suite, + full_deposits_suite + ]) diff --git a/test_generators/operations/requirements.txt b/test_generators/operations/requirements.txt new file mode 100644 index 000000000..dfe853536 --- /dev/null +++ b/test_generators/operations/requirements.txt @@ -0,0 +1,5 @@ +eth-utils==1.4.1 +../../test_libs/gen_helpers +../../test_libs/config_helpers +../../test_libs/pyspec +py_ecc \ No newline at end of file diff --git a/test_generators/shuffling/README.md b/test_generators/shuffling/README.md new file mode 100644 index 000000000..a8f0cbdb4 --- /dev/null +++ b/test_generators/shuffling/README.md @@ -0,0 +1,10 @@ +# Shuffling Tests + +Tests for the swap-or-not shuffling in ETH 2.0. + +Tips for initial shuffling write: +- run with `round_count = 1` first, do the same with pyspec. +- start with permute index +- optimized shuffling implementations: + - vitalik, Python: https://github.com/ethereum/eth2.0-specs/pull/576#issue-250741806 + - protolambda, Go: https://github.com/protolambda/eth2-shuffle diff --git a/test_generators/shuffling/main.py b/test_generators/shuffling/main.py new file mode 100644 index 000000000..2c4faeb8f --- /dev/null +++ b/test_generators/shuffling/main.py @@ -0,0 +1,54 @@ +from eth2spec.phase0 import spec +from eth_utils import ( + to_dict, to_tuple +) +from gen_base import gen_runner, gen_suite, gen_typing +from preset_loader import loader + + +@to_dict +def shuffling_case(seed: spec.Bytes32, count: int): + yield 'seed', '0x' + seed.hex() + yield 'count', count + yield 'shuffled', [spec.get_permuted_index(i, count, seed) for i in range(count)] + + +@to_tuple +def shuffling_test_cases(): + for seed in [spec.hash(spec.int_to_bytes4(seed_init_value)) for seed_init_value in range(30)]: + for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000]: + yield shuffling_case(seed, count) + + +def mini_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'minimal') + spec.apply_constants_preset(presets) + + return ("shuffling_minimal", "core", gen_suite.render_suite( + title="Swap-or-Not Shuffling tests with minimal config", + summary="Swap or not shuffling, with minimally configured testing round-count", + forks_timeline="testing", + forks=["phase0"], + config="minimal", + runner="shuffling", + handler="core", + test_cases=shuffling_test_cases())) + + +def full_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + presets = loader.load_presets(configs_path, 'mainnet') + spec.apply_constants_preset(presets) + + return ("shuffling_full", "core", gen_suite.render_suite( + title="Swap-or-Not Shuffling tests with mainnet config", + summary="Swap or not shuffling, with normal configured (secure) mainnet round-count", + forks_timeline="mainnet", + forks=["phase0"], + config="mainnet", + runner="shuffling", + handler="core", + test_cases=shuffling_test_cases())) + + +if __name__ == "__main__": + gen_runner.run_generator("shuffling", [mini_shuffling_suite, full_shuffling_suite]) diff --git a/test_generators/shuffling/requirements.txt b/test_generators/shuffling/requirements.txt new file mode 100644 index 000000000..8f9bede8f --- /dev/null +++ b/test_generators/shuffling/requirements.txt @@ -0,0 +1,4 @@ +eth-utils==1.4.1 +../../test_libs/gen_helpers +../../test_libs/config_helpers +../../test_libs/pyspec \ No newline at end of file diff --git a/scripts/__init__.py b/test_generators/ssz_generic/__init__.py similarity index 100% rename from scripts/__init__.py rename to test_generators/ssz_generic/__init__.py diff --git a/test_generators/ssz_generic/main.py b/test_generators/ssz_generic/main.py new file mode 100644 index 000000000..fe01a68d7 --- /dev/null +++ b/test_generators/ssz_generic/main.py @@ -0,0 +1,47 @@ +from uint_test_cases import ( + generate_random_uint_test_cases, + generate_uint_wrong_length_test_cases, + generate_uint_bounds_test_cases, + generate_uint_out_of_bounds_test_cases +) + +from gen_base import gen_runner, gen_suite, gen_typing + +def ssz_random_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("uint_random", "uint", gen_suite.render_suite( + title="UInt Random", + summary="Random integers chosen uniformly over the allowed value range", + forks_timeline= "mainnet", + forks=["phase0"], + config="mainnet", + runner="ssz", + handler="uint", + test_cases=generate_random_uint_test_cases())) + + +def ssz_wrong_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("uint_wrong_length", "uint", gen_suite.render_suite( + title="UInt Wrong Length", + summary="Serialized integers that are too short or too long", + forks_timeline= "mainnet", + forks=["phase0"], + config="mainnet", + runner="ssz", + handler="uint", + test_cases=generate_uint_wrong_length_test_cases())) + + +def ssz_uint_bounds_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + return ("uint_bounds", "uint", gen_suite.render_suite( + title="UInt Bounds", + summary="Integers right at or beyond the bounds of the allowed value range", + forks_timeline= "mainnet", + forks=["phase0"], + config="mainnet", + runner="ssz", + handler="uint", + test_cases=generate_uint_bounds_test_cases() + generate_uint_out_of_bounds_test_cases())) + + +if __name__ == "__main__": + gen_runner.run_generator("ssz_generic", [ssz_random_uint_suite, ssz_wrong_uint_suite, ssz_uint_bounds_suite]) diff --git a/test_generators/ssz_generic/renderers.py b/test_generators/ssz_generic/renderers.py new file mode 100644 index 000000000..28571cdda --- /dev/null +++ b/test_generators/ssz_generic/renderers.py @@ -0,0 +1,93 @@ +from collections.abc import ( + Mapping, + Sequence, +) + +from eth_utils import ( + encode_hex, + to_dict, +) + +from ssz.sedes import ( + BaseSedes, + Boolean, + Bytes, + BytesN, + Container, + List, + UInt, +) + + +def render_value(value): + if isinstance(value, bool): + return value + elif isinstance(value, int): + return str(value) + elif isinstance(value, bytes): + return encode_hex(value) + elif isinstance(value, Sequence): + return tuple(render_value(element) for element in value) + elif isinstance(value, Mapping): + return render_dict_value(value) + else: + raise ValueError(f"Cannot render value {value}") + + +@to_dict +def render_dict_value(value): + for key, value in value.items(): + yield key, render_value(value) + + +def render_type_definition(sedes): + if isinstance(sedes, Boolean): + return "bool" + + elif isinstance(sedes, UInt): + return f"uint{sedes.length * 8}" + + elif isinstance(sedes, BytesN): + return f"bytes{sedes.length}" + + elif isinstance(sedes, Bytes): + return f"bytes" + + elif isinstance(sedes, List): + return [render_type_definition(sedes.element_sedes)] + + elif isinstance(sedes, Container): + return { + field_name: render_type_definition(field_sedes) + for field_name, field_sedes in sedes.fields + } + + elif isinstance(sedes, BaseSedes): + raise Exception("Unreachable: All sedes types have been checked") + + else: + raise TypeError("Expected BaseSedes") + + +@to_dict +def render_test_case(*, sedes, valid, value=None, serial=None, description=None, tags=None): + value_and_serial_given = value is not None and serial is not None + if valid: + if not value_and_serial_given: + raise ValueError("For valid test cases, both value and ssz must be present") + else: + if value_and_serial_given: + raise ValueError("For invalid test cases, one of either value or ssz must not be present") + + if tags is None: + tags = [] + + yield "type", render_type_definition(sedes) + yield "valid", valid + if value is not None: + yield "value", render_value(value) + if serial is not None: + yield "ssz", encode_hex(serial) + if description is not None: + yield description + yield "tags", tags diff --git a/test_generators/ssz_generic/requirements.txt b/test_generators/ssz_generic/requirements.txt new file mode 100644 index 000000000..94afc9d91 --- /dev/null +++ b/test_generators/ssz_generic/requirements.txt @@ -0,0 +1,4 @@ +eth-utils==1.4.1 +../../test_libs/gen_helpers +../../test_libs/config_helpers +ssz==0.1.0a2 diff --git a/test_generators/ssz_generic/uint_test_cases.py b/test_generators/ssz_generic/uint_test_cases.py new file mode 100644 index 000000000..6d6492c9e --- /dev/null +++ b/test_generators/ssz_generic/uint_test_cases.py @@ -0,0 +1,98 @@ +import random + +from eth_utils import ( + to_tuple, +) + +import ssz +from ssz.sedes import ( + UInt, +) +from renderers import ( + render_test_case, +) + +random.seed(0) + + +BIT_SIZES = [8, 16, 32, 64, 128, 256] +RANDOM_TEST_CASES_PER_BIT_SIZE = 10 +RANDOM_TEST_CASES_PER_LENGTH = 3 + + +def get_random_bytes(length): + return bytes(random.randint(0, 255) for _ in range(length)) + + +@to_tuple +def generate_random_uint_test_cases(): + for bit_size in BIT_SIZES: + sedes = UInt(bit_size) + + for _ in range(RANDOM_TEST_CASES_PER_BIT_SIZE): + value = random.randrange(0, 2**bit_size) + serial = ssz.encode(value, sedes) + # note that we need to create the tags in each loop cycle, otherwise ruamel will use + # YAML references which makes the resulting file harder to read + tags = tuple(["atomic", "uint", "random"]) + yield render_test_case( + sedes=sedes, + valid=True, + value=value, + serial=serial, + tags=tags, + ) + + +@to_tuple +def generate_uint_wrong_length_test_cases(): + for bit_size in BIT_SIZES: + sedes = UInt(bit_size) + lengths = sorted({ + 0, + sedes.length // 2, + sedes.length - 1, + sedes.length + 1, + sedes.length * 2, + }) + for length in lengths: + for _ in range(RANDOM_TEST_CASES_PER_LENGTH): + tags = tuple(["atomic", "uint", "wrong_length"]) + yield render_test_case( + sedes=sedes, + valid=False, + serial=get_random_bytes(length), + tags=tags, + ) + + +@to_tuple +def generate_uint_bounds_test_cases(): + common_tags = ("atomic", "uint") + for bit_size in BIT_SIZES: + sedes = UInt(bit_size) + + for value, tag in ((0, "uint_lower_bound"), (2 ** bit_size - 1, "uint_upper_bound")): + serial = ssz.encode(value, sedes) + yield render_test_case( + sedes=sedes, + valid=True, + value=value, + serial=serial, + tags=common_tags + (tag,), + ) + + +@to_tuple +def generate_uint_out_of_bounds_test_cases(): + common_tags = ("atomic", "uint") + for bit_size in BIT_SIZES: + sedes = UInt(bit_size) + + for value, tag in ((-1, "uint_underflow"), (2 ** bit_size, "uint_overflow")): + yield render_test_case( + sedes=sedes, + valid=False, + value=value, + tags=common_tags + (tag,), + ) diff --git a/test_generators/ssz_static/README.md b/test_generators/ssz_static/README.md new file mode 100644 index 000000000..014c71517 --- /dev/null +++ b/test_generators/ssz_static/README.md @@ -0,0 +1,4 @@ +# SSZ-static + +The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ: + the serialization and hashing of ETH 2.0 data types diff --git a/tests/__init__.py b/test_generators/ssz_static/__init__.py similarity index 100% rename from tests/__init__.py rename to test_generators/ssz_static/__init__.py diff --git a/test_generators/ssz_static/main.py b/test_generators/ssz_static/main.py new file mode 100644 index 000000000..010ca2735 --- /dev/null +++ b/test_generators/ssz_static/main.py @@ -0,0 +1,79 @@ +from random import Random + +from eth2spec.debug import random_value, encode +from eth2spec.phase0 import spec +from eth2spec.utils.minimal_ssz import hash_tree_root, serialize +from eth_utils import ( + to_tuple, to_dict +) +from gen_base import gen_runner, gen_suite, gen_typing +from preset_loader import loader + +MAX_BYTES_LENGTH = 100 +MAX_LIST_LENGTH = 10 + + +@to_dict +def create_test_case(rng: Random, name: str, mode: random_value.RandomizationMode, chaos: bool): + typ = spec.get_ssz_type_by_name(name) + value = random_value.get_random_ssz_object(rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos) + yield "type_name", name + yield "value", encode.encode(value, typ) + yield "serialized", '0x' + serialize(value).hex() + yield "root", '0x' + hash_tree_root(value).hex() + + +@to_tuple +def ssz_static_cases(rng: Random, mode: random_value.RandomizationMode, chaos: bool, count: int): + for type_name in spec.ssz_types: + for i in range(count): + yield create_test_case(rng, type_name, mode, chaos) + + +def get_ssz_suite(seed: int, config_name: str, mode: random_value.RandomizationMode, chaos: bool, cases_if_random: int): + def ssz_suite(configs_path: str) -> gen_typing.TestSuiteOutput: + # Apply changes to presets, this affects some of the vector types. + presets = loader.load_presets(configs_path, config_name) + spec.apply_constants_preset(presets) + + # Reproducible RNG + rng = Random(seed) + + random_mode_name = mode.to_name() + + suite_name = f"ssz_{config_name}_{random_mode_name}{'_chaos' if chaos else ''}" + + count = cases_if_random if chaos or mode.is_changing() else 1 + print(f"generating SSZ-static suite ({count} cases per ssz type): {suite_name}") + + return (suite_name, "core", gen_suite.render_suite( + title=f"ssz testing, with {config_name} config, randomized with mode {random_mode_name}{' and with chaos applied' if chaos else ''}", + summary="Test suite for ssz serialization and hash-tree-root", + forks_timeline="testing", + forks=["phase0"], + config=config_name, + runner="ssz", + handler="static", + test_cases=ssz_static_cases(rng, mode, chaos, count))) + + return ssz_suite + + +if __name__ == "__main__": + # [(seed, config name, randomization mode, chaos on/off, cases_if_random)] + settings = [] + seed = 1 + for mode in random_value.RandomizationMode: + settings.append((seed, "minimal", mode, False, 30)) + seed += 1 + settings.append((seed, "minimal", random_value.RandomizationMode.mode_random, True, 30)) + seed += 1 + settings.append((seed, "mainnet", random_value.RandomizationMode.mode_random, False, 5)) + seed += 1 + + print("Settings: %d, SSZ-types: %d" % (len(settings), len(spec.ssz_types))) + + gen_runner.run_generator("ssz_static", [ + get_ssz_suite(seed, config_name, mode, chaos, cases_if_random) + for (seed, config_name, mode, chaos, cases_if_random) in settings + ]) diff --git a/test_generators/ssz_static/requirements.txt b/test_generators/ssz_static/requirements.txt new file mode 100644 index 000000000..8f9bede8f --- /dev/null +++ b/test_generators/ssz_static/requirements.txt @@ -0,0 +1,4 @@ +eth-utils==1.4.1 +../../test_libs/gen_helpers +../../test_libs/config_helpers +../../test_libs/pyspec \ No newline at end of file diff --git a/test_libs/config_helpers/README.md b/test_libs/config_helpers/README.md new file mode 100644 index 000000000..eaa3f3b40 --- /dev/null +++ b/test_libs/config_helpers/README.md @@ -0,0 +1,19 @@ +# ETH 2.0 config helpers + +`preset_loader`: A util to load constants-presets with. +See [Constants-presets documentation](../../configs/constants_presets/README.md). + +Usage: + +```python +configs_path = 'configs/' + +... + +import preset_loader +from eth2spec.phase0 import spec +my_presets = preset_loader.load_presets(configs_path, 'mainnet') +spec.apply_constants_preset(my_presets) +``` + +WARNING: this overwrites globals, make sure to prevent accidental collisions with other usage of the same imported specs package. diff --git a/tests/phase0/__init__.py b/test_libs/config_helpers/preset_loader/__init__.py similarity index 100% rename from tests/phase0/__init__.py rename to test_libs/config_helpers/preset_loader/__init__.py diff --git a/test_libs/config_helpers/preset_loader/loader.py b/test_libs/config_helpers/preset_loader/loader.py new file mode 100644 index 000000000..f37aca393 --- /dev/null +++ b/test_libs/config_helpers/preset_loader/loader.py @@ -0,0 +1,25 @@ +from typing import Dict, Any + +from ruamel.yaml import ( + YAML, +) +from pathlib import Path +from os.path import join + + +def load_presets(configs_dir, presets_name) -> Dict[str, Any]: + """ + Loads the given preset + :param presets_name: The name of the generator. (lowercase snake_case) + :return: Dictionary, mapping of constant-name -> constant-value + """ + path = Path(join(configs_dir, 'constant_presets', presets_name+'.yaml')) + yaml = YAML(typ='base') + loaded = yaml.load(path) + out = dict() + for k, v in loaded.items(): + if v.startswith("0x"): + out[k] = bytes.fromhex(v[2:]) + else: + out[k] = int(v) + return out diff --git a/test_libs/config_helpers/requirements.txt b/test_libs/config_helpers/requirements.txt new file mode 100644 index 000000000..e441a474b --- /dev/null +++ b/test_libs/config_helpers/requirements.txt @@ -0,0 +1 @@ +ruamel.yaml==0.15.87 diff --git a/test_libs/config_helpers/setup.py b/test_libs/config_helpers/setup.py new file mode 100644 index 000000000..90ad94ee4 --- /dev/null +++ b/test_libs/config_helpers/setup.py @@ -0,0 +1,9 @@ +from distutils.core import setup + +setup( + name='config_helpers', + packages=['preset_loader'], + install_requires=[ + "ruamel.yaml==0.15.87" + ] +) diff --git a/test_libs/gen_helpers/README.md b/test_libs/gen_helpers/README.md new file mode 100644 index 000000000..4dcfacef7 --- /dev/null +++ b/test_libs/gen_helpers/README.md @@ -0,0 +1,5 @@ +# ETH 2.0 test generator helpers + +`gen_base`: A util to quickly write new test suite generators with. +See [Generators documentation](../../test_generators/README.md). + diff --git a/utils/__init__.py b/test_libs/gen_helpers/gen_base/__init__.py similarity index 100% rename from utils/__init__.py rename to test_libs/gen_helpers/gen_base/__init__.py diff --git a/test_libs/gen_helpers/gen_base/gen_runner.py b/test_libs/gen_helpers/gen_base/gen_runner.py new file mode 100644 index 000000000..e36d48b8b --- /dev/null +++ b/test_libs/gen_helpers/gen_base/gen_runner.py @@ -0,0 +1,115 @@ +import argparse +from pathlib import Path +import sys +from typing import List + +from ruamel.yaml import ( + YAML, +) + +from gen_base.gen_typing import TestSuiteCreator + + +def validate_output_dir(path_str): + path = Path(path_str) + + if not path.exists(): + raise argparse.ArgumentTypeError("Output directory must exist") + + if not path.is_dir(): + raise argparse.ArgumentTypeError("Output path must lead to a directory") + + return path + + +def validate_configs_dir(path_str): + path = Path(path_str) + + if not path.exists(): + raise argparse.ArgumentTypeError("Configs directory must exist") + + if not path.is_dir(): + raise argparse.ArgumentTypeError("Config path must lead to a directory") + + if not Path(path, "constant_presets").exists(): + raise argparse.ArgumentTypeError("Constant Presets directory must exist") + + if not Path(path, "constant_presets").is_dir(): + raise argparse.ArgumentTypeError("Constant Presets path must lead to a directory") + + if not Path(path, "fork_timelines").exists(): + raise argparse.ArgumentTypeError("Fork Timelines directory must exist") + + if not Path(path, "fork_timelines").is_dir(): + raise argparse.ArgumentTypeError("Fork Timelines path must lead to a directory") + + return path + + +def run_generator(generator_name, suite_creators: List[TestSuiteCreator]): + """ + Implementation for a general test generator. + :param generator_name: The name of the generator. (lowercase snake_case) + :param suite_creators: A list of suite creators, each of these builds a list of test cases. + :return: + """ + + parser = argparse.ArgumentParser( + prog="gen-" + generator_name, + description=f"Generate YAML test suite files for {generator_name}", + ) + parser.add_argument( + "-o", + "--output-dir", + dest="output_dir", + required=True, + type=validate_output_dir, + help="directory into which the generated YAML files will be dumped" + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + help="if set overwrite test files if they exist", + ) + parser.add_argument( + "-c", + "--configs-path", + dest="configs_path", + required=True, + type=validate_configs_dir, + help="specify the path of the configs directory (containing constants_presets and fork_timelines)", + ) + + args = parser.parse_args() + output_dir = args.output_dir + if not args.force: + file_mode = "x" + else: + file_mode = "w" + + yaml = YAML(pure=True) + yaml.default_flow_style = None + + print(f"Generating tests for {generator_name}, creating {len(suite_creators)} test suite files...") + print(f"Reading config presets and fork timelines from {args.configs_path}") + for suite_creator in suite_creators: + (output_name, handler, suite) = suite_creator(args.configs_path) + + handler_output_dir = Path(output_dir) / Path(handler) + try: + if not handler_output_dir.exists(): + handler_output_dir.mkdir() + except FileNotFoundError as e: + sys.exit(f'Error when creating handler dir {handler} for test "{suite["title"]}" ({e})') + + out_path = handler_output_dir / Path(output_name + '.yaml') + + try: + with out_path.open(file_mode) as f: + yaml.dump(suite, f) + except IOError as e: + sys.exit(f'Error when dumping test "{suite["title"]}" ({e})') + + print("done.") diff --git a/test_libs/gen_helpers/gen_base/gen_suite.py b/test_libs/gen_helpers/gen_base/gen_suite.py new file mode 100644 index 000000000..a3f88791f --- /dev/null +++ b/test_libs/gen_helpers/gen_base/gen_suite.py @@ -0,0 +1,22 @@ +from typing import Iterable + +from eth_utils import to_dict +from gen_base.gen_typing import TestCase + + +@to_dict +def render_suite(*, + title: str, summary: str, + forks_timeline: str, forks: Iterable[str], + config: str, + runner: str, + handler: str, + test_cases: Iterable[TestCase]): + yield "title", title + yield "summary", summary + yield "forks_timeline", forks_timeline, + yield "forks", forks + yield "config", config + yield "runner", runner + yield "handler", handler + yield "test_cases", test_cases diff --git a/test_libs/gen_helpers/gen_base/gen_typing.py b/test_libs/gen_helpers/gen_base/gen_typing.py new file mode 100644 index 000000000..1cb315315 --- /dev/null +++ b/test_libs/gen_helpers/gen_base/gen_typing.py @@ -0,0 +1,8 @@ +from typing import Callable, Dict, Tuple, Any + +TestCase = Dict[str, Any] +TestSuite = Dict[str, Any] +# Tuple: (output name, handler name, suite) -- output name excl. ".yaml" +TestSuiteOutput = Tuple[str, str, TestSuite] +# Args: +TestSuiteCreator = Callable[[str], TestSuiteOutput] diff --git a/test_libs/gen_helpers/requirements.txt b/test_libs/gen_helpers/requirements.txt new file mode 100644 index 000000000..3d6a39458 --- /dev/null +++ b/test_libs/gen_helpers/requirements.txt @@ -0,0 +1,2 @@ +ruamel.yaml==0.15.87 +eth-utils==1.4.1 diff --git a/test_libs/gen_helpers/setup.py b/test_libs/gen_helpers/setup.py new file mode 100644 index 000000000..5de27a6db --- /dev/null +++ b/test_libs/gen_helpers/setup.py @@ -0,0 +1,10 @@ +from distutils.core import setup + +setup( + name='gen_helpers', + packages=['gen_base'], + install_requires=[ + "ruamel.yaml==0.15.87", + "eth-utils==1.4.1" + ] +) diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md new file mode 100644 index 000000000..b3cab11d2 --- /dev/null +++ b/test_libs/pyspec/README.md @@ -0,0 +1,58 @@ +# ETH 2.0 PySpec + +The Python executable spec is built from the ETH 2.0 specification, + complemented with the necessary helper functions for hashing, BLS, and more. + +With this executable spec, + test-generators can easily create test-vectors for client implementations, + and the spec itself can be verified to be consistent and coherent, through sanity tests implemented with pytest. + + +## Building + +All the dynamic parts of the spec can be build at once with `make pyspec`. + +Alternatively, you can build a sub-set of the pyspec: `make phase0`. + +Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2spec/phase0/spec.py` + + +## Py-tests + +These tests are not intended for client-consumption. +These tests are sanity tests, to verify if the spec itself is consistent. + +### How to run tests + +#### Automated + +Run `make test` from the root of the spec repository. + +#### Manual + +From within the `pyspec` folder: + +Install dependencies: +```bash +python3 -m venv venv +. venv/bin/activate +pip3 install -r requirements.txt +``` +Note: make sure to run `make pyspec` from the root of the specs repository, + to build the parts of the pyspec module derived from the markdown specs. + +Run the tests: +``` +pytest -m minimal_config . +``` + + +## Contributing + +Contributions are welcome, but consider implementing your idea as part of the spec itself first. +The pyspec is not a replacement. + + +## License + +Same as the spec itself, see LICENSE file in spec repository root. diff --git a/utils/phase0/__init__.py b/test_libs/pyspec/eth2spec/__init__.py similarity index 100% rename from utils/phase0/__init__.py rename to test_libs/pyspec/eth2spec/__init__.py diff --git a/tests/conftest.py b/test_libs/pyspec/eth2spec/debug/__init__.py similarity index 100% rename from tests/conftest.py rename to test_libs/pyspec/eth2spec/debug/__init__.py diff --git a/test_libs/pyspec/eth2spec/debug/decode.py b/test_libs/pyspec/eth2spec/debug/decode.py new file mode 100644 index 000000000..aeac3924d --- /dev/null +++ b/test_libs/pyspec/eth2spec/debug/decode.py @@ -0,0 +1,28 @@ +from eth2spec.utils.minimal_ssz import hash_tree_root + + +def decode(json, typ): + if isinstance(typ, str) and typ[:4] == 'uint': + return json + elif typ == 'bool': + assert json in (True, False) + return json + elif isinstance(typ, list): + return [decode(element, typ[0]) for element in json] + elif isinstance(typ, str) and typ[:4] == 'byte': + return bytes.fromhex(json[2:]) + elif hasattr(typ, 'fields'): + temp = {} + for field, subtype in typ.fields.items(): + temp[field] = decode(json[field], subtype) + if field + "_hash_tree_root" in json: + assert(json[field + "_hash_tree_root"][2:] == + hash_tree_root(temp[field], subtype).hex()) + ret = typ(**temp) + if "hash_tree_root" in json: + assert(json["hash_tree_root"][2:] == + hash_tree_root(ret, typ).hex()) + return ret + else: + print(json, typ) + raise Exception("Type not recognized") diff --git a/test_libs/pyspec/eth2spec/debug/encode.py b/test_libs/pyspec/eth2spec/debug/encode.py new file mode 100644 index 000000000..d3513e638 --- /dev/null +++ b/test_libs/pyspec/eth2spec/debug/encode.py @@ -0,0 +1,28 @@ +from eth2spec.utils.minimal_ssz import hash_tree_root + + +def encode(value, typ, include_hash_tree_roots=False): + if isinstance(typ, str) and typ[:4] == 'uint': + if typ[4:] == '128' or typ[4:] == '256': + return str(value) + return value + elif typ == 'bool': + assert value in (True, False) + return value + elif isinstance(typ, list): + return [encode(element, typ[0], include_hash_tree_roots) for element in value] + elif isinstance(typ, str) and typ[:4] == 'byte': + return '0x' + value.hex() + elif hasattr(typ, 'fields'): + ret = {} + for field, subtype in typ.fields.items(): + ret[field] = encode(getattr(value, field), subtype, include_hash_tree_roots) + if include_hash_tree_roots: + ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex() + if include_hash_tree_roots: + ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex() + return ret + else: + print(value, typ) + raise Exception("Type not recognized") + diff --git a/test_libs/pyspec/eth2spec/debug/random_value.py b/test_libs/pyspec/eth2spec/debug/random_value.py new file mode 100644 index 000000000..a853d2328 --- /dev/null +++ b/test_libs/pyspec/eth2spec/debug/random_value.py @@ -0,0 +1,137 @@ +from random import Random +from typing import Any +from enum import Enum + + +UINT_SIZES = [8, 16, 32, 64, 128, 256] + +basic_types = ["uint%d" % v for v in UINT_SIZES] + ['bool', 'byte'] + +random_mode_names = ["random", "zero", "max", "nil", "one", "lengthy"] + + +class RandomizationMode(Enum): + # random content / length + mode_random = 0 + # Zero-value + mode_zero = 1 + # Maximum value, limited to count 1 however + mode_max = 2 + # Return 0 values, i.e. empty + mode_nil_count = 3 + # Return 1 value, random content + mode_one_count = 4 + # Return max amount of values, random content + mode_max_count = 5 + + def to_name(self): + return random_mode_names[self.value] + + def is_changing(self): + return self.value in [0, 4, 5] + + +def get_random_ssz_object(rng: Random, typ: Any, max_bytes_length: int, max_list_length: int, mode: RandomizationMode, chaos: bool) -> Any: + """ + Create an object for a given type, filled with random data. + :param rng: The random number generator to use. + :param typ: The type to instantiate + :param max_bytes_length: the max. length for a random bytes array + :param max_list_length: the max. length for a random list + :param mode: how to randomize + :param chaos: if true, the randomization-mode will be randomly changed + :return: the random object instance, of the given type. + """ + if chaos: + mode = rng.choice(list(RandomizationMode)) + if isinstance(typ, str): + # Bytes array + if typ == 'bytes': + if mode == RandomizationMode.mode_nil_count: + return b'' + if mode == RandomizationMode.mode_max_count: + return get_random_bytes_list(rng, max_bytes_length) + if mode == RandomizationMode.mode_one_count: + return get_random_bytes_list(rng, 1) + if mode == RandomizationMode.mode_zero: + return b'\x00' + if mode == RandomizationMode.mode_max: + return b'\xff' + return get_random_bytes_list(rng, rng.randint(0, max_bytes_length)) + elif typ[:5] == 'bytes' and len(typ) > 5: + length = int(typ[5:]) + # Sanity, don't generate absurdly big random values + # If a client is aiming to performance-test, they should create a benchmark suite. + assert length <= max_bytes_length + if mode == RandomizationMode.mode_zero: + return b'\x00' * length + if mode == RandomizationMode.mode_max: + return b'\xff' * length + return get_random_bytes_list(rng, length) + # Basic types + else: + if mode == RandomizationMode.mode_zero: + return get_min_basic_value(typ) + if mode == RandomizationMode.mode_max: + return get_max_basic_value(typ) + return get_random_basic_value(rng, typ) + # Vector: + elif isinstance(typ, list) and len(typ) == 2: + return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos) for _ in range(typ[1])] + # List: + elif isinstance(typ, list) and len(typ) == 1: + length = rng.randint(0, max_list_length) + if mode == RandomizationMode.mode_one_count: + length = 1 + if mode == RandomizationMode.mode_max_count: + length = max_list_length + return [get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos) for _ in range(length)] + # Container: + elif hasattr(typ, 'fields'): + return typ(**{field: get_random_ssz_object(rng, subtype, max_bytes_length, max_list_length, mode, chaos) for field, subtype in typ.fields.items()}) + else: + print(typ) + raise Exception("Type not recognized") + + +def get_random_bytes_list(rng: Random, length: int) -> bytes: + return bytes(rng.getrandbits(8) for _ in range(length)) + + +def get_random_basic_value(rng: Random, typ: str) -> Any: + if typ == 'bool': + return rng.choice((True, False)) + if typ[:4] == 'uint': + size = int(typ[4:]) + assert size in UINT_SIZES + return rng.randint(0, 2**size - 1) + if typ == 'byte': + return rng.randint(0, 8) + else: + raise ValueError("Not a basic type") + + +def get_min_basic_value(typ: str) -> Any: + if typ == 'bool': + return False + if typ[:4] == 'uint': + size = int(typ[4:]) + assert size in UINT_SIZES + return 0 + if typ == 'byte': + return 0x00 + else: + raise ValueError("Not a basic type") + + +def get_max_basic_value(typ: str) -> Any: + if typ == 'bool': + return True + if typ[:4] == 'uint': + size = int(typ[4:]) + assert size in UINT_SIZES + return 2**size - 1 + if typ == 'byte': + return 0xff + else: + raise ValueError("Not a basic type") diff --git a/test_libs/pyspec/eth2spec/phase0/__init__.py b/test_libs/pyspec/eth2spec/phase0/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/phase0/state_transition.py b/test_libs/pyspec/eth2spec/phase0/state_transition.py similarity index 83% rename from utils/phase0/state_transition.py rename to test_libs/pyspec/eth2spec/phase0/state_transition.py index 2c420014f..38ecd2a02 100644 --- a/utils/phase0/state_transition.py +++ b/test_libs/pyspec/eth2spec/phase0/state_transition.py @@ -1,17 +1,17 @@ from . import spec -from typing import ( # noqa: F401 +from typing import ( Any, Callable, - List, - NewType, - Tuple, + List ) from .spec import ( BeaconState, BeaconBlock, + Slot, + process_proposer_attestation_rewards, ) @@ -52,6 +52,7 @@ def process_operations(state: BeaconState, block: BeaconBlock) -> None: spec.MAX_ATTESTATIONS, spec.process_attestation, ) + process_proposer_attestation_rewards(state) assert len(block.body.deposits) == expected_deposit_count(state) process_operation_type( @@ -90,24 +91,24 @@ def process_block(state: BeaconState, def process_epoch_transition(state: BeaconState) -> None: - spec.update_justification_and_finalization(state) + spec.process_justification_and_finalization(state) spec.process_crosslinks(state) - spec.maybe_reset_eth1_period(state) - spec.apply_rewards(state) - spec.process_ejections(state) - spec.update_registry(state) + spec.process_rewards_and_penalties(state) + spec.process_registry_updates(state) spec.process_slashings(state) - spec.process_exit_queue(state) - spec.finish_epoch_update(state) + spec.process_final_updates(state) + + +def state_transition_to(state: BeaconState, up_to: Slot) -> BeaconState: + while state.slot < up_to: + spec.cache_state(state) + if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0: + process_epoch_transition(state) + spec.advance_slot(state) def state_transition(state: BeaconState, block: BeaconBlock, verify_state_root: bool=False) -> BeaconState: - while state.slot < block.slot: - spec.cache_state(state) - if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0: - process_epoch_transition(state) - spec.advance_slot(state) - if block.slot == state.slot: - process_block(state, block, verify_state_root) + state_transition_to(state, block.slot) + process_block(state, block, verify_state_root) diff --git a/test_libs/pyspec/eth2spec/utils/__init__.py b/test_libs/pyspec/eth2spec/utils/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/utils/phase0/bls_stub.py b/test_libs/pyspec/eth2spec/utils/bls_stub.py similarity index 100% rename from utils/phase0/bls_stub.py rename to test_libs/pyspec/eth2spec/utils/bls_stub.py diff --git a/utils/phase0/hash_function.py b/test_libs/pyspec/eth2spec/utils/hash_function.py similarity index 100% rename from utils/phase0/hash_function.py rename to test_libs/pyspec/eth2spec/utils/hash_function.py diff --git a/utils/phase0/merkle_minimal.py b/test_libs/pyspec/eth2spec/utils/merkle_minimal.py similarity index 100% rename from utils/phase0/merkle_minimal.py rename to test_libs/pyspec/eth2spec/utils/merkle_minimal.py diff --git a/utils/phase0/minimal_ssz.py b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py similarity index 50% rename from utils/phase0/minimal_ssz.py rename to test_libs/pyspec/eth2spec/utils/minimal_ssz.py index c4828d08f..dbe9d1359 100644 --- a/utils/phase0/minimal_ssz.py +++ b/test_libs/pyspec/eth2spec/utils/minimal_ssz.py @@ -1,5 +1,6 @@ -from .hash_function import hash +from typing import Any +from .hash_function import hash BYTES_PER_CHUNK = 32 BYTES_PER_LENGTH_PREFIX = 4 @@ -9,16 +10,14 @@ ZERO_CHUNK = b'\x00' * BYTES_PER_CHUNK def SSZType(fields): class SSZObject(): def __init__(self, **kwargs): - for f in fields: + for f, t in fields.items(): if f not in kwargs: - raise Exception("Missing constructor argument: %s" % f) - setattr(self, f, kwargs[f]) + setattr(self, f, get_zero_value(t)) + else: + setattr(self, f, kwargs[f]) def __eq__(self, other): - return ( - self.fields == other.fields and - self.serialize() == other.serialize() - ) + return self.fields == other.fields and self.serialize() == other.serialize() def __hash__(self): return int.from_bytes(self.hash_tree_root(), byteorder="little") @@ -58,18 +57,40 @@ class Vector(): def is_basic(typ): - return isinstance(typ, str) and (typ[:4] in ('uint', 'bool') or typ == 'byte') + # if not a string, it is a complex, and cannot be basic + if not isinstance(typ, str): + return False + # "uintN": N-bit unsigned integer (where N in [8, 16, 32, 64, 128, 256]) + elif typ[:4] == 'uint' and typ[4:] in ['8', '16', '32', '64', '128', '256']: + return True + # "bool": True or False + elif typ == 'bool': + return True + # alias: "byte" -> "uint8" + elif typ == 'byte': + return True + # default + else: + return False def is_constant_sized(typ): + # basic objects are fixed size by definition if is_basic(typ): return True + # dynamic size array type, "list": [elem_type]. + # Not constant size by definition. elif isinstance(typ, list) and len(typ) == 1: - return is_constant_sized(typ[0]) - elif isinstance(typ, list) and len(typ) == 2: return False + # fixed size array type, "vector": [elem_type, length] + # Constant size, but only if the elements are. + elif isinstance(typ, list) and len(typ) == 2: + return is_constant_sized(typ[0]) + # bytes array (fixed or dynamic size) elif isinstance(typ, str) and typ[:5] == 'bytes': - return len(typ) > 5 + # if no length suffix, it has a dynamic size + return typ != 'bytes' + # containers are only constant-size if all of the fields are constant size. elif hasattr(typ, 'fields'): for subtype in typ.fields.values(): if not is_constant_sized(subtype): @@ -90,40 +111,98 @@ def coerce_to_bytes(x): raise Exception("Expecting bytes") +def encode_bytes(value): + serialized_bytes = coerce_to_bytes(value) + assert len(serialized_bytes) < 2 ** (8 * BYTES_PER_LENGTH_PREFIX) + serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little') + return serialized_length + serialized_bytes + + +def encode_variable_size_container(values, types): + return encode_bytes(encode_fixed_size_container(values, types)) + + +def encode_fixed_size_container(values, types): + return b''.join([serialize_value(v, typ) for (v, typ) in zip(values, types)]) + + def serialize_value(value, typ=None): if typ is None: typ = infer_type(value) + # "uintN" if isinstance(typ, str) and typ[:4] == 'uint': length = int(typ[4:]) assert length in (8, 16, 32, 64, 128, 256) return value.to_bytes(length // 8, 'little') - elif typ == 'bool': + # "bool" + elif isinstance(typ, str) and typ == 'bool': assert value in (True, False) return b'\x01' if value is True else b'\x00' - elif (isinstance(typ, list) and len(typ) == 1) or typ == 'bytes': - serialized_bytes = coerce_to_bytes(value) if typ == 'bytes' else b''.join([serialize_value(element, typ[0]) for element in value]) - assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX) - serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little') - return serialized_length + serialized_bytes + # Vector elif isinstance(typ, list) and len(typ) == 2: + # (regardless of element type, sanity-check if the length reported in the vector type matches the value length) assert len(value) == typ[1] - return b''.join([serialize_value(element, typ[0]) for element in value]) + # If value is fixed-size (i.e. element type is fixed-size): + if is_constant_sized(typ): + return encode_fixed_size_container(value, [typ[0]] * len(value)) + # If value is variable-size (i.e. element type is variable-size) + else: + return encode_variable_size_container(value, [typ[0]] * len(value)) + # "bytes" (variable size) + elif isinstance(typ, str) and typ == 'bytes': + return encode_bytes(value) + # List + elif isinstance(typ, list) and len(typ) == 1: + return encode_variable_size_container(value, [typ[0]] * len(value)) + # "bytesN" (fixed size) elif isinstance(typ, str) and len(typ) > 5 and typ[:5] == 'bytes': assert len(value) == int(typ[5:]), (value, int(typ[5:])) return coerce_to_bytes(value) + # containers elif hasattr(typ, 'fields'): - serialized_bytes = b''.join([serialize_value(getattr(value, field), subtype) for field, subtype in typ.fields.items()]) + values = [getattr(value, field) for field in typ.fields.keys()] + types = list(typ.fields.values()) if is_constant_sized(typ): - return serialized_bytes + return encode_fixed_size_container(values, types) else: - assert len(serialized_bytes) < 2**(8 * BYTES_PER_LENGTH_PREFIX) - serialized_length = len(serialized_bytes).to_bytes(BYTES_PER_LENGTH_PREFIX, 'little') - return serialized_length + serialized_bytes + return encode_variable_size_container(values, types) else: print(value, typ) raise Exception("Type not recognized") +def get_zero_value(typ: Any) -> Any: + if isinstance(typ, str): + # Bytes array + if typ == 'bytes': + return b'' + # bytesN + elif typ[:5] == 'bytes' and len(typ) > 5: + length = int(typ[5:]) + return b'\x00' * length + # Basic types + elif typ == 'bool': + return False + elif typ[:4] == 'uint': + return 0 + elif typ == 'byte': + return 0x00 + else: + raise ValueError("Type not recognized") + # Vector: + elif isinstance(typ, list) and len(typ) == 2: + return [get_zero_value(typ[0]) for _ in range(typ[1])] + # List: + elif isinstance(typ, list) and len(typ) == 1: + return [] + # Container: + elif hasattr(typ, 'fields'): + return typ(**{field: get_zero_value(subtype) for field, subtype in typ.fields.items()}) + else: + print(typ) + raise Exception("Type not recognized") + + def chunkify(bytez): bytez += b'\x00' * (-len(bytez) % BYTES_PER_CHUNK) return [bytez[i:i + 32] for i in range(0, len(bytez), 32)] @@ -152,12 +231,27 @@ def mix_in_length(root, length): def infer_type(value): + """ + Note: defaults to uint64 for integer type inference due to lack of information. + Other integer sizes are still supported, see spec. + :param value: The value to infer a SSZ type for. + :return: The SSZ type. + """ if hasattr(value.__class__, 'fields'): return value.__class__ elif isinstance(value, Vector): - return [infer_type(value[0]) if len(value) > 0 else 'uint64', len(value)] + if len(value) > 0: + return [infer_type(value[0]), len(value)] + else: + # Element type does not matter too much, + # assumed to be a basic type for size-encoding purposes, vector is empty. + return ['uint64'] elif isinstance(value, list): - return [infer_type(value[0])] if len(value) > 0 else ['uint64'] + if len(value) > 0: + return [infer_type(value[0])] + else: + # Element type does not matter, list-content size will be encoded regardless, list is empty. + return ['uint64'] elif isinstance(value, (bytes, str)): return 'bytes' elif isinstance(value, int): @@ -169,24 +263,41 @@ def infer_type(value): def hash_tree_root(value, typ=None): if typ is None: typ = infer_type(value) + # ------------------------------------- + # merkleize(pack(value)) + # basic object: merkleize packed version (merkleization pads it to 32 bytes if it is not already) if is_basic(typ): return merkleize(pack([value], typ)) - elif isinstance(typ, list) and len(typ) == 1 and is_basic(typ[0]): - return mix_in_length(merkleize(pack(value, typ[0])), len(value)) - elif isinstance(typ, list) and len(typ) == 1 and not is_basic(typ[0]): - return mix_in_length(merkleize([hash_tree_root(element, typ[0]) for element in value]), len(value)) + # or a vector of basic objects elif isinstance(typ, list) and len(typ) == 2 and is_basic(typ[0]): assert len(value) == typ[1] return merkleize(pack(value, typ[0])) + # ------------------------------------- + # mix_in_length(merkleize(pack(value)), len(value)) + # if value is a list of basic objects + elif isinstance(typ, list) and len(typ) == 1 and is_basic(typ[0]): + return mix_in_length(merkleize(pack(value, typ[0])), len(value)) + # (needs some extra work for non-fixed-sized bytes array) elif typ == 'bytes': return mix_in_length(merkleize(chunkify(coerce_to_bytes(value))), len(value)) + # ------------------------------------- + # merkleize([hash_tree_root(element) for element in value]) + # if value is a vector of composite objects + elif isinstance(typ, list) and len(typ) == 2 and not is_basic(typ[0]): + return merkleize([hash_tree_root(element, typ[0]) for element in value]) + # (needs some extra work for fixed-sized bytes array) elif isinstance(typ, str) and typ[:5] == 'bytes' and len(typ) > 5: assert len(value) == int(typ[5:]) return merkleize(chunkify(coerce_to_bytes(value))) - elif isinstance(typ, list) and len(typ) == 2 and not is_basic(typ[0]): - return merkleize([hash_tree_root(element, typ[0]) for element in value]) + # or a container elif hasattr(typ, 'fields'): return merkleize([hash_tree_root(getattr(value, field), subtype) for field, subtype in typ.fields.items()]) + # ------------------------------------- + # mix_in_length(merkleize([hash_tree_root(element) for element in value]), len(value)) + # if value is a list of composite objects + elif isinstance(typ, list) and len(typ) == 1 and not is_basic(typ[0]): + return mix_in_length(merkleize([hash_tree_root(element, typ[0]) for element in value]), len(value)) + # ------------------------------------- else: raise Exception("Type not recognized") @@ -205,7 +316,7 @@ def truncate(container): return truncated_class(**kwargs) -def signed_root(container): +def signing_root(container): return hash_tree_root(truncate(container)) diff --git a/requirements.txt b/test_libs/pyspec/requirements.txt similarity index 89% rename from requirements.txt rename to test_libs/pyspec/requirements.txt index 9145e951e..3296ef807 100644 --- a/requirements.txt +++ b/test_libs/pyspec/requirements.txt @@ -1,6 +1,5 @@ eth-utils>=1.3.0,<2 eth-typing>=2.1.0,<3.0.0 -oyaml==0.7 pycryptodome==3.7.3 py_ecc>=1.6.0 pytest>=3.6,<3.7 diff --git a/test_libs/pyspec/setup.py b/test_libs/pyspec/setup.py new file mode 100644 index 000000000..1a131a417 --- /dev/null +++ b/test_libs/pyspec/setup.py @@ -0,0 +1,13 @@ +from setuptools import setup, find_packages + +setup( + name='pyspec', + packages=find_packages(), + tests_require=["pytest"], + install_requires=[ + "eth-utils>=1.3.0,<2", + "eth-typing>=2.1.0,<3.0.0", + "pycryptodome==3.7.3", + "py_ecc>=1.6.0", + ] +) diff --git a/test_libs/pyspec/tests/README.md b/test_libs/pyspec/tests/README.md new file mode 100644 index 000000000..e69de29bb diff --git a/test_libs/pyspec/tests/__init__.py b/test_libs/pyspec/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/phase0/block_processing/test_process_attestation.py b/test_libs/pyspec/tests/block_processing/test_process_attestation.py similarity index 90% rename from tests/phase0/block_processing/test_process_attestation.py rename to test_libs/pyspec/tests/block_processing/test_process_attestation.py index ca6933ce7..1be60c860 100644 --- a/tests/phase0/block_processing/test_process_attestation.py +++ b/test_libs/pyspec/tests/block_processing/test_process_attestation.py @@ -1,20 +1,21 @@ from copy import deepcopy import pytest -import build.phase0.spec as spec +import eth2spec.phase0.spec as spec -from build.phase0.state_transition import ( +from eth2spec.phase0.state_transition import ( state_transition, ) -from build.phase0.spec import ( - ZERO_HASH, +from eth2spec.phase0.spec import ( get_current_epoch, process_attestation, slot_to_epoch, ) -from tests.phase0.helpers import ( +from tests.helpers import ( build_empty_block_for_next_slot, get_valid_attestation, + next_epoch, + next_slot, ) @@ -102,7 +103,7 @@ def test_bad_source_root(state): attestation = get_valid_attestation(state) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - attestation.data.source_root = b'\x42'*32 + attestation.data.source_root = b'\x42' * 32 pre_state, post_state = run_attestation_processing(state, attestation, False) @@ -113,7 +114,7 @@ def test_non_zero_crosslink_data_root(state): attestation = get_valid_attestation(state) state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY - attestation.data.crosslink_data_root = b'\x42'*32 + attestation.data.crosslink_data_root = b'\x42' * 32 pre_state, post_state = run_attestation_processing(state, attestation, False) @@ -121,10 +122,12 @@ def test_non_zero_crosslink_data_root(state): def test_bad_previous_crosslink(state): + next_epoch(state) attestation = get_valid_attestation(state) - state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY + for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(state) - state.latest_crosslinks[attestation.data.shard].epoch += 10 + state.current_crosslinks[attestation.data.shard].epoch += 10 pre_state, post_state = run_attestation_processing(state, attestation, False) diff --git a/tests/phase0/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py similarity index 94% rename from tests/phase0/block_processing/test_process_attester_slashing.py rename to test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py index 06f214c4b..84c19145a 100644 --- a/tests/phase0/block_processing/test_process_attester_slashing.py +++ b/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py @@ -1,14 +1,15 @@ from copy import deepcopy import pytest -import build.phase0.spec as spec -from build.phase0.spec import ( +import eth2spec.phase0.spec as spec +from eth2spec.phase0.spec import ( get_balance, get_beacon_proposer_index, process_attester_slashing, ) -from tests.phase0.helpers import ( +from tests.helpers import ( get_valid_attester_slashing, + next_epoch, ) # mark entire file as 'attester_slashing' @@ -31,7 +32,6 @@ def run_attester_slashing_processing(state, attester_slashing, valid=True): slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0] slashed_validator = post_state.validator_registry[slashed_index] - assert not slashed_validator.initiated_exit assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH @@ -40,7 +40,7 @@ def run_attester_slashing_processing(state, attester_slashing, valid=True): get_balance(post_state, slashed_index) < get_balance(state, slashed_index) ) - proposer_index = get_beacon_proposer_index(state, state.slot) + proposer_index = get_beacon_proposer_index(state) # gained whistleblower reward assert ( get_balance(post_state, proposer_index) > @@ -59,6 +59,8 @@ def test_success_double(state): def test_success_surround(state): + next_epoch(state) + state.current_justified_epoch += 1 attester_slashing = get_valid_attester_slashing(state) # set attestion1 to surround attestation 2 diff --git a/tests/phase0/block_processing/test_process_block_header.py b/test_libs/pyspec/tests/block_processing/test_process_block_header.py similarity index 84% rename from tests/phase0/block_processing/test_process_block_header.py rename to test_libs/pyspec/tests/block_processing/test_process_block_header.py index 4981b656c..b35b0a9c1 100644 --- a/tests/phase0/block_processing/test_process_block_header.py +++ b/test_libs/pyspec/tests/block_processing/test_process_block_header.py @@ -2,14 +2,15 @@ from copy import deepcopy import pytest -from build.phase0.spec import ( +from eth2spec.phase0.spec import ( get_beacon_proposer_index, cache_state, advance_slot, process_block_header, ) -from tests.phase0.helpers import ( +from tests.helpers import ( build_empty_block_for_next_slot, + next_slot, ) # mark entire file as 'header' @@ -54,15 +55,19 @@ def test_invalid_slot(state): def test_invalid_previous_block_root(state): block = build_empty_block_for_next_slot(state) - block.previous_block_root = b'\12'*32 # invalid prev root + block.previous_block_root = b'\12' * 32 # invalid prev root pre_state, post_state = run_block_header_processing(state, block, valid=False) return pre_state, block, None def test_proposer_slashed(state): + # use stub state to get proposer index of next slot + stub_state = deepcopy(state) + next_slot(stub_state) + proposer_index = get_beacon_proposer_index(stub_state) + # set proposer to slashed - proposer_index = get_beacon_proposer_index(state, state.slot + 1) state.validator_registry[proposer_index].slashed = True block = build_empty_block_for_next_slot(state) diff --git a/tests/phase0/block_processing/test_process_deposit.py b/test_libs/pyspec/tests/block_processing/test_process_deposit.py similarity index 95% rename from tests/phase0/block_processing/test_process_deposit.py rename to test_libs/pyspec/tests/block_processing/test_process_deposit.py index 0726dddef..4031e650d 100644 --- a/tests/phase0/block_processing/test_process_deposit.py +++ b/test_libs/pyspec/tests/block_processing/test_process_deposit.py @@ -1,22 +1,22 @@ from copy import deepcopy import pytest -import build.phase0.spec as spec +import eth2spec.phase0.spec as spec -from build.phase0.spec import ( +from eth2spec.phase0.spec import ( get_balance, ZERO_HASH, process_deposit, ) -from tests.phase0.helpers import ( +from tests.helpers import ( build_deposit, privkeys, pubkeys, ) -# mark entire file as 'voluntary_exits' -pytestmark = pytest.mark.voluntary_exits +# mark entire file as 'deposits' +pytestmark = pytest.mark.deposits def test_success(state): diff --git a/tests/phase0/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py similarity index 94% rename from tests/phase0/block_processing/test_process_proposer_slashing.py rename to test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py index 467d2164b..6d5f3045d 100644 --- a/tests/phase0/block_processing/test_process_proposer_slashing.py +++ b/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py @@ -1,17 +1,17 @@ from copy import deepcopy import pytest -import build.phase0.spec as spec -from build.phase0.spec import ( +import eth2spec.phase0.spec as spec +from eth2spec.phase0.spec import ( get_balance, get_current_epoch, process_proposer_slashing, ) -from tests.phase0.helpers import ( +from tests.helpers import ( get_valid_proposer_slashing, ) -# mark entire file as 'header' +# mark entire file as 'proposer_slashings' pytestmark = pytest.mark.proposer_slashings @@ -30,7 +30,6 @@ def run_proposer_slashing_processing(state, proposer_slashing, valid=True): process_proposer_slashing(post_state, proposer_slashing) slashed_validator = post_state.validator_registry[proposer_slashing.proposer_index] - assert not slashed_validator.initiated_exit assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH diff --git a/test_libs/pyspec/tests/block_processing/test_voluntary_exit.py b/test_libs/pyspec/tests/block_processing/test_voluntary_exit.py new file mode 100644 index 000000000..c58c5238a --- /dev/null +++ b/test_libs/pyspec/tests/block_processing/test_voluntary_exit.py @@ -0,0 +1,163 @@ +from copy import deepcopy +import pytest + +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.spec import ( + get_active_validator_indices, + get_churn_limit, + get_current_epoch, + process_voluntary_exit, +) +from tests.helpers import ( + build_voluntary_exit, + pubkey_to_privkey, +) + + +# mark entire file as 'voluntary_exits' +pytestmark = pytest.mark.voluntary_exits + + +def run_voluntary_exit_processing(state, voluntary_exit, valid=True): + """ + Run ``process_voluntary_exit`` returning the pre and post state. + If ``valid == False``, run expecting ``AssertionError`` + """ + post_state = deepcopy(state) + + if not valid: + with pytest.raises(AssertionError): + process_voluntary_exit(post_state, voluntary_exit) + return state, None + + process_voluntary_exit(post_state, voluntary_exit) + + validator_index = voluntary_exit.validator_index + assert state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH + + return state, post_state + + +def test_success(state): + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit + state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state, current_epoch)[0] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] + + voluntary_exit = build_voluntary_exit( + state, + current_epoch, + validator_index, + privkey, + ) + + pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit) + return pre_state, voluntary_exit, post_state + + +def test_success_exit_queue(state): + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit + state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + current_epoch = get_current_epoch(state) + + # exit `MAX_EXITS_PER_EPOCH` + initial_indices = get_active_validator_indices(state, current_epoch)[:get_churn_limit(state)] + post_state = state + for index in initial_indices: + privkey = pubkey_to_privkey[state.validator_registry[index].pubkey] + voluntary_exit = build_voluntary_exit( + state, + current_epoch, + index, + privkey, + ) + + pre_state, post_state = run_voluntary_exit_processing(post_state, voluntary_exit) + + # exit an additional validator + validator_index = get_active_validator_indices(state, current_epoch)[-1] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] + voluntary_exit = build_voluntary_exit( + state, + current_epoch, + validator_index, + privkey, + ) + + pre_state, post_state = run_voluntary_exit_processing(post_state, voluntary_exit) + + assert ( + post_state.validator_registry[validator_index].exit_epoch == + post_state.validator_registry[initial_indices[0]].exit_epoch + 1 + ) + + return pre_state, voluntary_exit, post_state + + +def test_validator_not_active(state): + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state, current_epoch)[0] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] + + state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH + + # + # build and test voluntary exit + # + voluntary_exit = build_voluntary_exit( + state, + current_epoch, + validator_index, + privkey, + ) + + pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False) + return pre_state, voluntary_exit, post_state + + +def test_validator_already_exited(state): + # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit + state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH + + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state, current_epoch)[0] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] + + # but validator already has exited + state.validator_registry[validator_index].exit_epoch = current_epoch + 2 + + voluntary_exit = build_voluntary_exit( + state, + current_epoch, + validator_index, + privkey, + ) + + pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False) + return pre_state, voluntary_exit, post_state + + +def test_validator_not_active_long_enough(state): + current_epoch = get_current_epoch(state) + validator_index = get_active_validator_indices(state, current_epoch)[0] + privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] + + voluntary_exit = build_voluntary_exit( + state, + current_epoch, + validator_index, + privkey, + ) + + assert ( + current_epoch - state.validator_registry[validator_index].activation_epoch < + spec.PERSISTENT_COMMITTEE_PERIOD + ) + + pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False) + return pre_state, voluntary_exit, post_state diff --git a/tests/phase0/conftest.py b/test_libs/pyspec/tests/conftest.py similarity index 89% rename from tests/phase0/conftest.py rename to test_libs/pyspec/tests/conftest.py index 36a087941..bf9b1009b 100644 --- a/tests/phase0/conftest.py +++ b/test_libs/pyspec/tests/conftest.py @@ -1,8 +1,8 @@ import pytest -from build.phase0 import spec +from eth2spec.phase0 import spec -from tests.phase0.helpers import ( +from .helpers import ( create_genesis_state, ) @@ -14,7 +14,6 @@ MINIMAL_CONFIG = { "MIN_ATTESTATION_INCLUSION_DELAY": 2, "TARGET_COMMITTEE_SIZE": 4, "SLOTS_PER_EPOCH": 8, - "GENESIS_EPOCH": spec.GENESIS_SLOT // 8, "SLOTS_PER_HISTORICAL_ROOT": 64, "LATEST_RANDAO_MIXES_LENGTH": 64, "LATEST_ACTIVE_INDEX_ROOTS_LENGTH": 64, @@ -28,7 +27,8 @@ def overwrite_spec_config(config): if field == "LATEST_RANDAO_MIXES_LENGTH": spec.BeaconState.fields['latest_randao_mixes'][1] = config[field] elif field == "SHARD_COUNT": - spec.BeaconState.fields['latest_crosslinks'][1] = config[field] + spec.BeaconState.fields['current_crosslinks'][1] = config[field] + spec.BeaconState.fields['previous_crosslinks'][1] = config[field] elif field == "SLOTS_PER_HISTORICAL_ROOT": spec.BeaconState.fields['latest_block_roots'][1] = config[field] spec.BeaconState.fields['latest_state_roots'][1] = config[field] diff --git a/test_libs/pyspec/tests/epoch_processing/test_process_crosslinks.py b/test_libs/pyspec/tests/epoch_processing/test_process_crosslinks.py new file mode 100644 index 000000000..fe694724a --- /dev/null +++ b/test_libs/pyspec/tests/epoch_processing/test_process_crosslinks.py @@ -0,0 +1,136 @@ +from copy import deepcopy +import pytest + +import eth2spec.phase0.spec as spec + +from eth2spec.phase0.state_transition import ( + state_transition, +) +from eth2spec.phase0.spec import ( + cache_state, + get_crosslink_deltas, + process_crosslinks, +) +from tests.helpers import ( + add_attestation_to_state, + build_empty_block_for_next_slot, + fill_aggregate_attestation, + get_crosslink_committee_for_attestation, + get_valid_attestation, + next_epoch, + next_slot, + set_bitfield_bit, +) + + +# mark entire file as 'crosslinks' +pytestmark = pytest.mark.crosslinks + + +def run_process_crosslinks(state, valid=True): + # transition state to slot before state transition + slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1 + block = build_empty_block_for_next_slot(state) + block.slot = slot + state_transition(state, block) + + # cache state before epoch transition + cache_state(state) + + post_state = deepcopy(state) + process_crosslinks(post_state) + + return state, post_state + + +def test_no_attestations(state): + pre_state, post_state = run_process_crosslinks(state) + + for shard in range(spec.SHARD_COUNT): + assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard] + + return pre_state, post_state + + +def test_single_crosslink_update_from_current_epoch(state): + next_epoch(state) + + attestation = get_valid_attestation(state) + + fill_aggregate_attestation(state, attestation) + add_attestation_to_state(state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) + + assert len(state.current_epoch_attestations) == 1 + + pre_state, post_state = run_process_crosslinks(state) + + shard = attestation.data.shard + assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard] + assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard] + + return pre_state, post_state + + +def test_single_crosslink_update_from_previous_epoch(state): + next_epoch(state) + + attestation = get_valid_attestation(state) + + fill_aggregate_attestation(state, attestation) + add_attestation_to_state(state, attestation, state.slot + spec.SLOTS_PER_EPOCH) + + assert len(state.previous_epoch_attestations) == 1 + + pre_state, post_state = run_process_crosslinks(state) + crosslink_deltas = get_crosslink_deltas(state) + + shard = attestation.data.shard + assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard] + assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard] + # ensure rewarded + for index in get_crosslink_committee_for_attestation(state, attestation.data): + assert crosslink_deltas[0][index] > 0 + assert crosslink_deltas[1][index] == 0 + + return pre_state, post_state + + +def test_double_late_crosslink(state): + next_epoch(state) + state.slot += 4 + + attestation_1 = get_valid_attestation(state) + fill_aggregate_attestation(state, attestation_1) + + # add attestation_1 in the next epoch + next_epoch(state) + add_attestation_to_state(state, attestation_1, state.slot + 1) + + for slot in range(spec.SLOTS_PER_EPOCH): + attestation_2 = get_valid_attestation(state) + if attestation_2.data.shard == attestation_1.data.shard: + break + next_slot(state) + fill_aggregate_attestation(state, attestation_2) + + # add attestation_2 in the next epoch after attestation_1 has + # already updated the relevant crosslink + next_epoch(state) + add_attestation_to_state(state, attestation_2, state.slot + 1) + + assert len(state.previous_epoch_attestations) == 1 + assert len(state.current_epoch_attestations) == 0 + + pre_state, post_state = run_process_crosslinks(state) + crosslink_deltas = get_crosslink_deltas(state) + + shard = attestation_2.data.shard + + # ensure that the current crosslinks were not updated by the second attestation + assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard] + # ensure no reward, only penalties for the failed crosslink + for index in get_crosslink_committee_for_attestation(state, attestation_2.data): + assert crosslink_deltas[0][index] == 0 + assert crosslink_deltas[1][index] > 0 + + return pre_state, post_state diff --git a/tests/phase0/helpers.py b/test_libs/pyspec/tests/helpers.py similarity index 70% rename from tests/phase0/helpers.py rename to test_libs/pyspec/tests/helpers.py index 6a7ffd5dd..e04409792 100644 --- a/tests/phase0/helpers.py +++ b/test_libs/pyspec/tests/helpers.py @@ -2,9 +2,12 @@ from copy import deepcopy from py_ecc import bls -import build.phase0.spec as spec -from build.phase0.utils.minimal_ssz import signed_root -from build.phase0.spec import ( +from eth2spec.phase0.state_transition import ( + state_transition, +) +import eth2spec.phase0.spec as spec +from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.phase0.spec import ( # constants EMPTY_SIGNATURE, ZERO_HASH, @@ -22,20 +25,22 @@ from build.phase0.spec import ( # functions convert_to_indexed, get_active_validator_indices, - get_attestation_participants, + get_attesting_indices, get_block_root, - get_crosslink_committee_for_attestation, get_crosslink_committees_at_slot, get_current_epoch, get_domain, get_empty_block, get_epoch_start_slot, get_genesis_beacon_state, + get_previous_epoch, + get_shard_delta, + hash_tree_root, slot_to_epoch, verify_merkle_branch, hash, ) -from build.phase0.utils.merkle_minimal import ( +from eth2spec.utils.merkle_minimal import ( calc_merkle_tree_from_leaves, get_merkle_proof, get_merkle_root, @@ -47,6 +52,19 @@ pubkeys = [bls.privtopub(privkey) for privkey in privkeys] pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)} +def set_bitfield_bit(bitfield, i): + """ + Set the bit in ``bitfield`` at position ``i`` to ``1``. + """ + byte_index = i // 8 + bit_index = i % 8 + return ( + bitfield[:byte_index] + + bytes([bitfield[byte_index] | (1 << bit_index)]) + + bitfield[byte_index+1:] + ) + + def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None): if not deposit_data_leaves: deposit_data_leaves = [] @@ -96,21 +114,13 @@ def create_genesis_state(num_validators, deposit_data_leaves=None): ) -def force_registry_change_at_next_epoch(state): - # artificially trigger registry update at next epoch transition - state.finalized_epoch = get_current_epoch(state) - 1 - for crosslink in state.latest_crosslinks: - crosslink.epoch = state.finalized_epoch - state.validator_registry_update_epoch = state.finalized_epoch - 1 - - def build_empty_block_for_next_slot(state): empty_block = get_empty_block() empty_block.slot = state.slot + 1 previous_block_header = deepcopy(state.latest_block_header) if previous_block_header.state_root == spec.ZERO_HASH: previous_block_header.state_root = state.hash_tree_root() - empty_block.previous_block_root = signed_root(previous_block_header) + empty_block.previous_block_root = signing_root(previous_block_header) return empty_block @@ -123,7 +133,7 @@ def build_deposit_data(state, pubkey, privkey, amount): signature=EMPTY_SIGNATURE, ) signature = bls.sign( - message_hash=signed_root(deposit_data), + message_hash=signing_root(deposit_data), privkey=privkey, domain=get_domain( state, @@ -137,28 +147,37 @@ def build_deposit_data(state, pubkey, privkey, amount): def build_attestation_data(state, slot, shard): assert state.slot >= slot - block_root = build_empty_block_for_next_slot(state).previous_block_root + if slot == state.slot: + block_root = build_empty_block_for_next_slot(state).previous_block_root + else: + block_root = get_block_root(state, slot) - epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) - if epoch_start_slot == slot: + current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state)) + if slot < current_epoch_start_slot: + print(slot) + epoch_boundary_root = get_block_root(state, get_epoch_start_slot(get_previous_epoch(state))) + elif slot == current_epoch_start_slot: epoch_boundary_root = block_root else: - get_block_root(state, epoch_start_slot) + epoch_boundary_root = get_block_root(state, current_epoch_start_slot) - if slot < epoch_start_slot: + if slot < current_epoch_start_slot: + justified_epoch = state.previous_justified_epoch justified_block_root = state.previous_justified_root else: + justified_epoch = state.current_justified_epoch justified_block_root = state.current_justified_root + crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks return AttestationData( slot=slot, shard=shard, beacon_block_root=block_root, - source_epoch=state.current_justified_epoch, + source_epoch=justified_epoch, source_root=justified_block_root, target_root=epoch_boundary_root, crosslink_data_root=spec.ZERO_HASH, - previous_crosslink=deepcopy(state.latest_crosslinks[shard]), + previous_crosslink_root=hash_tree_root(crosslinks[shard]), ) @@ -169,7 +188,7 @@ def build_voluntary_exit(state, epoch, validator_index, privkey): signature=EMPTY_SIGNATURE, ) voluntary_exit.signature = bls.sign( - message_hash=signed_root(voluntary_exit), + message_hash=signing_root(voluntary_exit), privkey=privkey, domain=get_domain( state=state, @@ -207,7 +226,7 @@ def build_deposit(state, def get_valid_proposer_slashing(state): current_epoch = get_current_epoch(state) - validator_index = get_active_validator_indices(state.validator_registry, current_epoch)[-1] + validator_index = get_active_validator_indices(state, current_epoch)[-1] privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey] slot = state.slot @@ -227,12 +246,12 @@ def get_valid_proposer_slashing(state): domain_type=spec.DOMAIN_BEACON_PROPOSER, ) header_1.signature = bls.sign( - message_hash=signed_root(header_1), + message_hash=signing_root(header_1), privkey=privkey, domain=domain, ) header_2.signature = bls.sign( - message_hash=signed_root(header_2), + message_hash=signing_root(header_2), privkey=privkey, domain=domain, ) @@ -247,7 +266,7 @@ def get_valid_proposer_slashing(state): def get_valid_attester_slashing(state): attestation_1 = get_valid_attestation(state) attestation_2 = deepcopy(attestation_1) - attestation_2.data.target_root = b'\x01'*32 + attestation_2.data.target_root = b'\x01' * 32 return AttesterSlashing( attestation_1=convert_to_indexed(state, attestation_1), @@ -255,10 +274,24 @@ def get_valid_attester_slashing(state): ) +def get_crosslink_committee_for_attestation(state, attestation_data): + """ + Return the crosslink committee corresponding to ``attestation_data``. + """ + crosslink_committees = get_crosslink_committees_at_slot(state, attestation_data.slot) + return [committee for committee, shard in crosslink_committees if shard == attestation_data.shard][0] + + def get_valid_attestation(state, slot=None): if slot is None: slot = state.slot - shard = state.latest_start_shard + + if slot_to_epoch(slot) == get_current_epoch(state): + shard = (state.latest_start_shard + slot) % spec.SLOTS_PER_EPOCH + else: + previous_shard_delta = get_shard_delta(state, get_previous_epoch(state)) + shard = (state.latest_start_shard - previous_shard_delta + slot) % spec.SHARD_COUNT + attestation_data = build_attestation_data(state, slot, shard) crosslink_committee = get_crosslink_committee_for_attestation(state, attestation_data) @@ -273,7 +306,7 @@ def get_valid_attestation(state, slot=None): custody_bitfield=custody_bitfield, aggregate_signature=EMPTY_SIGNATURE, ) - participants = get_attestation_participants( + participants = get_attesting_indices( state, attestation.data, attestation.aggregation_bitfield, @@ -310,3 +343,27 @@ def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0) message_epoch=slot_to_epoch(attestation_data.slot), ) ) + + +def fill_aggregate_attestation(state, attestation): + crosslink_committee = get_crosslink_committee_for_attestation(state, attestation.data) + for i in range(len(crosslink_committee)): + attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i) + + +def add_attestation_to_state(state, attestation, slot): + block = build_empty_block_for_next_slot(state) + block.slot = slot + block.body.attestations.append(attestation) + state_transition(state, block) + + +def next_slot(state): + block = build_empty_block_for_next_slot(state) + state_transition(state, block) + + +def next_epoch(state): + block = build_empty_block_for_next_slot(state) + block.slot += spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + state_transition(state, block) diff --git a/tests/phase0/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py similarity index 79% rename from tests/phase0/test_sanity.py rename to test_libs/pyspec/tests/test_sanity.py index b86187ec8..7ddd4d386 100644 --- a/tests/phase0/test_sanity.py +++ b/test_libs/pyspec/tests/test_sanity.py @@ -3,10 +3,10 @@ from copy import deepcopy import pytest from py_ecc import bls -import build.phase0.spec as spec +import eth2spec.phase0.spec as spec -from build.phase0.utils.minimal_ssz import signed_root -from build.phase0.spec import ( +from eth2spec.utils.minimal_ssz import signing_root +from eth2spec.phase0.spec import ( # constants EMPTY_SIGNATURE, ZERO_HASH, @@ -25,24 +25,26 @@ from build.phase0.spec import ( advance_slot, cache_state, set_balance, + slot_to_epoch, verify_merkle_branch, hash, ) -from build.phase0.state_transition import ( +from eth2spec.phase0.state_transition import ( state_transition, ) -from build.phase0.utils.merkle_minimal import ( +from eth2spec.utils.merkle_minimal import ( calc_merkle_tree_from_leaves, get_merkle_proof, get_merkle_root, ) -from tests.phase0.helpers import ( +from .helpers import ( build_deposit_data, build_empty_block_for_next_slot, - force_registry_change_at_next_epoch, + fill_aggregate_attestation, get_valid_attestation, get_valid_attester_slashing, get_valid_proposer_slashing, + next_slot, privkeys, pubkeys, ) @@ -52,6 +54,33 @@ from tests.phase0.helpers import ( pytestmark = pytest.mark.sanity +def check_finality(state, + prev_state, + current_justified_changed, + previous_justified_changed, + finalized_changed): + if current_justified_changed: + assert state.current_justified_epoch > prev_state.current_justified_epoch + assert state.current_justified_root != prev_state.current_justified_root + else: + assert state.current_justified_epoch == prev_state.current_justified_epoch + assert state.current_justified_root == prev_state.current_justified_root + + if previous_justified_changed: + assert state.previous_justified_epoch > prev_state.previous_justified_epoch + assert state.previous_justified_root != prev_state.previous_justified_root + else: + assert state.previous_justified_epoch == prev_state.previous_justified_epoch + assert state.previous_justified_root == prev_state.previous_justified_root + + if finalized_changed: + assert state.finalized_epoch > prev_state.finalized_epoch + assert state.finalized_root != prev_state.finalized_root + else: + assert state.finalized_epoch == prev_state.finalized_epoch + assert state.finalized_root == prev_state.finalized_root + + def test_slot_transition(state): test_state = deepcopy(state) cache_state(test_state) @@ -116,6 +145,33 @@ def test_empty_epoch_transition_not_finalizing(state): return state, [block], test_state +def test_full_attestations_finalizing(state): + test_state = deepcopy(state) + + for slot in range(spec.MIN_ATTESTATION_INCLUSION_DELAY): + next_slot(test_state) + + for epoch in range(5): + for slot in range(spec.SLOTS_PER_EPOCH): + print(test_state.slot) + attestation = get_valid_attestation(test_state, test_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY) + fill_aggregate_attestation(test_state, attestation) + block = build_empty_block_for_next_slot(test_state) + block.body.attestations.append(attestation) + state_transition(test_state, block) + + if epoch == 0: + check_finality(test_state, state, False, False, False) + elif epoch == 1: + check_finality(test_state, state, False, False, False) + elif epoch == 2: + check_finality(test_state, state, True, False, False) + elif epoch == 3: + check_finality(test_state, state, True, True, False) + elif epoch == 4: + check_finality(test_state, state, True, True, True) + + def test_proposer_slashing(state): test_state = deepcopy(state) proposer_slashing = get_valid_proposer_slashing(state) @@ -128,11 +184,9 @@ def test_proposer_slashing(state): block.body.proposer_slashings.append(proposer_slashing) state_transition(test_state, block) - assert not state.validator_registry[validator_index].initiated_exit assert not state.validator_registry[validator_index].slashed slashed_validator = test_state.validator_registry[validator_index] - assert not slashed_validator.initiated_exit assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH @@ -154,18 +208,16 @@ def test_attester_slashing(state): block.body.attester_slashings.append(attester_slashing) state_transition(test_state, block) - assert not state.validator_registry[validator_index].initiated_exit assert not state.validator_registry[validator_index].slashed slashed_validator = test_state.validator_registry[validator_index] - assert not slashed_validator.initiated_exit assert slashed_validator.slashed assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH # lost whistleblower reward assert get_balance(test_state, validator_index) < get_balance(state, validator_index) - proposer_index = get_beacon_proposer_index(test_state, test_state.slot) + proposer_index = get_beacon_proposer_index(test_state) # gained whistleblower reward assert ( get_balance(test_state, proposer_index) > @@ -265,6 +317,9 @@ def test_attestation(state): assert len(test_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1 + proposer_index = get_beacon_proposer_index(test_state) + assert test_state.balances[proposer_index] > state.balances[proposer_index] + # # Epoch transition should move to previous_epoch_attestations # @@ -283,14 +338,12 @@ def test_attestation(state): def test_voluntary_exit(state): pre_state = deepcopy(state) validator_index = get_active_validator_indices( - pre_state.validator_registry, + pre_state, get_current_epoch(pre_state) )[-1] # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - # artificially trigger registry update at next epoch transition - force_registry_change_at_next_epoch(pre_state) post_state = deepcopy(pre_state) @@ -300,7 +353,7 @@ def test_voluntary_exit(state): signature=EMPTY_SIGNATURE, ) voluntary_exit.signature = bls.sign( - message_hash=signed_root(voluntary_exit), + message_hash=signing_root(voluntary_exit), privkey=privkeys[validator_index], domain=get_domain( state=pre_state, @@ -315,9 +368,7 @@ def test_voluntary_exit(state): initiate_exit_block.body.voluntary_exits.append(voluntary_exit) state_transition(post_state, initiate_exit_block) - assert not pre_state.validator_registry[validator_index].initiated_exit - assert post_state.validator_registry[validator_index].initiated_exit - assert post_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH + assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH # # Process within epoch transition @@ -331,47 +382,11 @@ def test_voluntary_exit(state): return pre_state, [initiate_exit_block, exit_block], post_state -def test_no_exit_churn_too_long_since_change(state): - pre_state = deepcopy(state) - validator_index = get_active_validator_indices( - pre_state.validator_registry, - get_current_epoch(pre_state) - )[-1] - - # - # setup pre_state - # - # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit - pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - # artificially trigger registry update at next epoch transition - force_registry_change_at_next_epoch(pre_state) - # make epochs since registry update greater than LATEST_SLASHED_EXIT_LENGTH - pre_state.validator_registry_update_epoch = ( - get_current_epoch(pre_state) - spec.LATEST_SLASHED_EXIT_LENGTH - ) - # set validator to have previously initiated exit - pre_state.validator_registry[validator_index].initiated_exit = True - - post_state = deepcopy(pre_state) - - # - # Process registry change but ensure no exit - # - block = build_empty_block_for_next_slot(post_state) - block.slot += spec.SLOTS_PER_EPOCH - state_transition(post_state, block) - - assert post_state.validator_registry_update_epoch == get_current_epoch(post_state) - 1 - assert post_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH - - return pre_state, [block], post_state - - def test_transfer(state): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) - sender_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[-1] - recipient_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] + sender_index = get_active_validator_indices(pre_state, current_epoch)[-1] + recipient_index = get_active_validator_indices(pre_state, current_epoch)[0] transfer_pubkey = pubkeys[-1] transfer_privkey = privkeys[-1] amount = get_balance(pre_state, sender_index) @@ -386,7 +401,7 @@ def test_transfer(state): signature=EMPTY_SIGNATURE, ) transfer.signature = bls.sign( - message_hash=signed_root(transfer), + message_hash=signing_root(transfer), privkey=transfer_privkey, domain=get_domain( state=pre_state, @@ -417,11 +432,11 @@ def test_transfer(state): return pre_state, [block], post_state -def test_ejection(state): +def test_balance_driven_status_transitions(state): pre_state = deepcopy(state) current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[-1] + validator_index = get_active_validator_indices(pre_state, current_epoch)[-1] assert pre_state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH @@ -436,7 +451,7 @@ def test_ejection(state): block.slot += spec.SLOTS_PER_EPOCH state_transition(post_state, block) - assert post_state.validator_registry[validator_index].initiated_exit == True + assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH return pre_state, [block], post_state diff --git a/tests/phase0/block_processing/test_voluntary_exit.py b/tests/phase0/block_processing/test_voluntary_exit.py deleted file mode 100644 index 6adc81464..000000000 --- a/tests/phase0/block_processing/test_voluntary_exit.py +++ /dev/null @@ -1,175 +0,0 @@ -from copy import deepcopy -import pytest - -import build.phase0.spec as spec - -from build.phase0.spec import ( - get_active_validator_indices, - get_current_epoch, - process_voluntary_exit, -) -from tests.phase0.helpers import ( - build_voluntary_exit, - pubkey_to_privkey, -) - - -# mark entire file as 'voluntary_exits' -pytestmark = pytest.mark.voluntary_exits - - -def test_success(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit - pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - - # - # build voluntary exit - # - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - post_state = deepcopy(pre_state) - - # - # test valid exit - # - process_voluntary_exit(post_state, voluntary_exit) - - assert not pre_state.validator_registry[validator_index].initiated_exit - assert post_state.validator_registry[validator_index].initiated_exit - - return pre_state, voluntary_exit, post_state - - -def test_validator_not_active(state): - pre_state = deepcopy(state) - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - # - # setup pre_state - # - pre_state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH - - # - # build and test voluntary exit - # - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None - - -def test_validator_already_exited(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit - pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - # but validator already has exited - pre_state.validator_registry[validator_index].exit_epoch = current_epoch + 2 - - # - # build voluntary exit - # - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None - - -def test_validator_already_initiated_exit(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit - pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - # but validator already has initiated exit - pre_state.validator_registry[validator_index].initiated_exit = True - - # - # build voluntary exit - # - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None - - -def test_validator_not_active_long_enough(state): - pre_state = deepcopy(state) - # - # setup pre_state - # - current_epoch = get_current_epoch(pre_state) - validator_index = get_active_validator_indices(pre_state.validator_registry, current_epoch)[0] - privkey = pubkey_to_privkey[pre_state.validator_registry[validator_index].pubkey] - - # but validator already has initiated exit - pre_state.validator_registry[validator_index].initiated_exit = True - - # - # build voluntary exit - # - voluntary_exit = build_voluntary_exit( - pre_state, - current_epoch, - validator_index, - privkey, - ) - - assert ( - current_epoch - pre_state.validator_registry[validator_index].activation_epoch < - spec.PERSISTENT_COMMITTEE_PERIOD - ) - - with pytest.raises(AssertionError): - process_voluntary_exit(pre_state, voluntary_exit) - - return pre_state, voluntary_exit, None diff --git a/utils/phase0/jsonize.py b/utils/phase0/jsonize.py deleted file mode 100644 index 816192ec6..000000000 --- a/utils/phase0/jsonize.py +++ /dev/null @@ -1,52 +0,0 @@ -from .minimal_ssz import hash_tree_root - - -def jsonize(value, typ, include_hash_tree_roots=False): - if isinstance(typ, str) and typ[:4] == 'uint': - return value - elif typ == 'bool': - assert value in (True, False) - return value - elif isinstance(typ, list): - return [jsonize(element, typ[0], include_hash_tree_roots) for element in value] - elif isinstance(typ, str) and typ[:4] == 'byte': - return '0x' + value.hex() - elif hasattr(typ, 'fields'): - ret = {} - for field, subtype in typ.fields.items(): - ret[field] = jsonize(getattr(value, field), subtype, include_hash_tree_roots) - if include_hash_tree_roots: - ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex() - if include_hash_tree_roots: - ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex() - return ret - else: - print(value, typ) - raise Exception("Type not recognized") - - -def dejsonize(json, typ): - if isinstance(typ, str) and typ[:4] == 'uint': - return json - elif typ == 'bool': - assert json in (True, False) - return json - elif isinstance(typ, list): - return [dejsonize(element, typ[0]) for element in json] - elif isinstance(typ, str) and typ[:4] == 'byte': - return bytes.fromhex(json[2:]) - elif hasattr(typ, 'fields'): - temp = {} - for field, subtype in typ.fields.items(): - temp[field] = dejsonize(json[field], subtype) - if field + "_hash_tree_root" in json: - assert(json[field + "_hash_tree_root"][2:] == - hash_tree_root(temp[field], subtype).hex()) - ret = typ(**temp) - if "hash_tree_root" in json: - assert(json["hash_tree_root"][2:] == - hash_tree_root(ret, typ).hex()) - return ret - else: - print(json, typ) - raise Exception("Type not recognized")