mirror of
https://github.com/status-im/eth2.0-specs.git
synced 2025-02-27 01:30:36 +00:00
Merge branch 'dev' into roots-removal
This commit is contained in:
commit
f8d9b0d283
@ -35,13 +35,13 @@ commands:
|
||||
description: "Restore the cache with pyspec keys"
|
||||
steps:
|
||||
- restore_cached_venv:
|
||||
venv_name: v3-pyspec-bump2
|
||||
venv_name: v4-pyspec
|
||||
reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}
|
||||
save_pyspec_cached_venv:
|
||||
description: Save a venv into a cache with pyspec keys"
|
||||
steps:
|
||||
- save_cached_venv:
|
||||
venv_name: v3-pyspec-bump2
|
||||
venv_name: v4-pyspec
|
||||
reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}
|
||||
venv_path: ./test_libs/pyspec/venv
|
||||
restore_deposit_contract_cached_venv:
|
||||
|
2
.codespell-whitelist
Normal file
2
.codespell-whitelist
Normal file
@ -0,0 +1,2 @@
|
||||
uint
|
||||
byteorder
|
48
Makefile
48
Makefile
@ -2,17 +2,20 @@ SPEC_DIR = ./specs
|
||||
SCRIPT_DIR = ./scripts
|
||||
TEST_LIBS_DIR = ./test_libs
|
||||
PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec
|
||||
YAML_TEST_DIR = ./eth2.0-spec-tests/tests
|
||||
TEST_VECTOR_DIR = ./eth2.0-spec-tests/tests
|
||||
GENERATOR_DIR = ./test_generators
|
||||
DEPOSIT_CONTRACT_DIR = ./deposit_contract
|
||||
CONFIGS_DIR = ./configs
|
||||
|
||||
# Collect a list of generator names
|
||||
GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/)))
|
||||
# Map this list of generator paths to a list of test output paths
|
||||
YAML_TEST_TARGETS = $(patsubst $(GENERATOR_DIR)/%, $(YAML_TEST_DIR)/%, $(GENERATORS))
|
||||
GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/.)))
|
||||
# Map this list of generator paths to "gen_{generator name}" entries
|
||||
GENERATOR_TARGETS = $(patsubst $(GENERATOR_DIR)/%/, gen_%, $(GENERATORS))
|
||||
GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS))
|
||||
|
||||
# To check generator matching:
|
||||
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
|
||||
|
||||
PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py
|
||||
PY_SPEC_PHASE_0_DEPS = $(SPEC_DIR)/core/0_*.md
|
||||
|
||||
@ -24,14 +27,14 @@ PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) $(PY_SPEC_PHASE_1_TARGETS)
|
||||
COV_HTML_OUT=.htmlcov
|
||||
COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html
|
||||
|
||||
.PHONY: clean all test citest lint gen_yaml_tests pyspec phase0 phase1 install_test open_cov \
|
||||
.PHONY: clean partial_clean all test citest lint generate_tests pyspec phase0 phase1 install_test open_cov \
|
||||
install_deposit_contract_test test_deposit_contract compile_deposit_contract
|
||||
|
||||
all: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR) $(YAML_TEST_TARGETS)
|
||||
all: $(PY_SPEC_ALL_TARGETS)
|
||||
|
||||
# deletes everything except the venvs
|
||||
partial_clean:
|
||||
rm -rf $(YAML_TEST_DIR)
|
||||
rm -rf $(TEST_VECTOR_DIR)
|
||||
rm -rf $(GENERATOR_VENVS)
|
||||
rm -rf $(PY_SPEC_DIR)/.pytest_cache
|
||||
rm -rf $(PY_SPEC_ALL_TARGETS)
|
||||
@ -44,8 +47,8 @@ clean: partial_clean
|
||||
rm -rf $(PY_SPEC_DIR)/venv
|
||||
rm -rf $(DEPOSIT_CONTRACT_DIR)/venv
|
||||
|
||||
# "make gen_yaml_tests" to run generators
|
||||
gen_yaml_tests: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_TARGETS)
|
||||
# "make generate_tests" to run all generators
|
||||
generate_tests: $(PY_SPEC_ALL_TARGETS) $(GENERATOR_TARGETS)
|
||||
|
||||
# installs the packages to run pyspec tests
|
||||
install_test:
|
||||
@ -86,12 +89,12 @@ $(PY_SPEC_PHASE_0_TARGETS): $(PY_SPEC_PHASE_0_DEPS)
|
||||
python3 $(SCRIPT_DIR)/build_spec.py -p0 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/validator/0_beacon-chain-validator.md $@
|
||||
|
||||
$(PY_SPEC_DIR)/eth2spec/phase1/spec.py: $(PY_SPEC_PHASE_1_DEPS)
|
||||
python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/core/0_fork-choice.md $@
|
||||
python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_shard-data-chains.md $(SPEC_DIR)/light_client/merkle_proofs.md $@
|
||||
|
||||
CURRENT_DIR = ${CURDIR}
|
||||
|
||||
# The function that builds a set of suite files, by calling a generator for the given type (param 1)
|
||||
define build_yaml_tests
|
||||
# Runs a generator, identified by param 1
|
||||
define run_generator
|
||||
# Started!
|
||||
# Create output directory
|
||||
# Navigate to the generator
|
||||
@ -100,24 +103,25 @@ define build_yaml_tests
|
||||
# Install all the necessary requirements
|
||||
# Run the generator. The generator is assumed to have an "main.py" file.
|
||||
# We output to the tests dir (generator program should accept a "-o <filepath>" argument.
|
||||
# `-l minimal general` can be added to the generator call to filter to smaller configs, when testing.
|
||||
echo "generator $(1) started"; \
|
||||
mkdir -p $(YAML_TEST_DIR)$(1); \
|
||||
cd $(GENERATOR_DIR)$(1); \
|
||||
mkdir -p $(TEST_VECTOR_DIR); \
|
||||
cd $(GENERATOR_DIR)/$(1); \
|
||||
if ! test -d venv; then python3 -m venv venv; fi; \
|
||||
. venv/bin/activate; \
|
||||
pip3 install -r requirements.txt; \
|
||||
python3 main.py -o $(CURRENT_DIR)/$(YAML_TEST_DIR)$(1) -c $(CURRENT_DIR)/$(CONFIGS_DIR); \
|
||||
python3 main.py -o $(CURRENT_DIR)/$(TEST_VECTOR_DIR) -c $(CURRENT_DIR)/$(CONFIGS_DIR); \
|
||||
echo "generator $(1) finished"
|
||||
endef
|
||||
|
||||
# The tests dir itself is simply build by creating the directory (recursively creating deeper directories if necessary)
|
||||
$(YAML_TEST_DIR):
|
||||
$(info creating directory, to output yaml targets to: ${YAML_TEST_TARGETS})
|
||||
$(TEST_VECTOR_DIR):
|
||||
$(info creating test output directory, for generators: ${GENERATOR_TARGETS})
|
||||
mkdir -p $@
|
||||
$(YAML_TEST_DIR)/:
|
||||
$(info ignoring duplicate yaml tests dir)
|
||||
$(TEST_VECTOR_DIR)/:
|
||||
$(info ignoring duplicate tests dir)
|
||||
|
||||
# For any target within the tests dir, build it using the build_yaml_tests function.
|
||||
# For any generator, build it using the run_generator function.
|
||||
# (creation of output dir is a dependency)
|
||||
$(YAML_TEST_DIR)%: $(PY_SPEC_ALL_TARGETS) $(YAML_TEST_DIR)
|
||||
$(call build_yaml_tests,$*)
|
||||
gen_%: $(PY_SPEC_ALL_TARGETS) $(TEST_VECTOR_DIR)
|
||||
$(call run_generator,$*)
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Ethereum 2.0 Specifications
|
||||
|
||||
[](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
[](https://discord.gg/hpFs23p) [](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
To learn more about sharding and Ethereum 2.0 (Serenity), see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQ) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
|
||||
|
||||
@ -47,8 +47,15 @@ The following are the broad design goals for Ethereum 2.0:
|
||||
* to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain)
|
||||
|
||||
|
||||
## Useful external resources
|
||||
|
||||
* [Design Rationale](https://notes.ethereum.org/s/rkhCgQteN#)
|
||||
* [Phase 0 Onboarding Document](https://notes.ethereum.org/s/Bkn3zpwxB)
|
||||
|
||||
|
||||
## For spec contributors
|
||||
|
||||
|
||||
Documentation on the different components used during spec writing can be found here:
|
||||
* [YAML Test Generators](test_generators/README.md)
|
||||
* [Executable Python Spec, with Py-tests](test_libs/pyspec/README.md)
|
||||
|
36
configs/README.md
Normal file
36
configs/README.md
Normal file
@ -0,0 +1,36 @@
|
||||
# Configs
|
||||
|
||||
This directory contains a set of constants presets used for testing, testnets, and mainnet.
|
||||
|
||||
A preset file contains all the constants known for its target.
|
||||
Later-fork constants can be ignored, e.g. ignore phase1 constants as a client that only supports phase 0 currently.
|
||||
|
||||
|
||||
## Forking
|
||||
|
||||
Configs are not replaced, but extended with forks. This is to support syncing from one state to the other over a fork boundary, without hot-swapping a config.
|
||||
Instead, for forks that introduce changes in a constant, the constant name is prefixed with a short abbreviation of the fork.
|
||||
|
||||
Over time, the need to sync an older state may be deprecated.
|
||||
In this case, the prefix on the new constant may be removed, and the old constant will keep a special name before completely being removed.
|
||||
|
||||
A previous iteration of forking made use of "timelines", but this collides with the definitions used in the spec (constants for special forking slots etc.),
|
||||
and was not integrated sufficiently in any of the spec tools or implementations.
|
||||
Instead, the config essentially doubles as fork definition now, changing the value for e.g. `PHASE_1_GENESIS_SLOT` changes the fork.
|
||||
|
||||
Another reason to prefer forking through constants is the ability to program a forking moment based on context, instead of being limited to a static slot number.
|
||||
|
||||
|
||||
## Format
|
||||
|
||||
Each preset is a key-value mapping.
|
||||
|
||||
**Key**: an `UPPER_SNAKE_CASE` (a.k.a. "macro case") formatted string, name of the constant.
|
||||
|
||||
**Value** can be either:
|
||||
- an unsigned integer number, can be up to 64 bits (incl.)
|
||||
- a hexadecimal string, prefixed with `0x`
|
||||
|
||||
Presets may contain comments to describe the values.
|
||||
|
||||
See [`mainnet.yaml`](./mainnet.yaml) for a complete example.
|
@ -1,20 +0,0 @@
|
||||
# Constant Presets
|
||||
|
||||
This directory contains a set of constants presets used for testing, testnets, and mainnet.
|
||||
|
||||
A preset file contains all the constants known for its target.
|
||||
Later-fork constants can be ignored, e.g. ignore phase1 constants as a client that only supports phase 0 currently.
|
||||
|
||||
## Format
|
||||
|
||||
Each preset is a key-value mapping.
|
||||
|
||||
**Key**: an `UPPER_SNAKE_CASE` (a.k.a. "macro case") formatted string, name of the constant.
|
||||
|
||||
**Value** can be either:
|
||||
- an unsigned integer number, can be up to 64 bits (incl.)
|
||||
- a hexadecimal string, prefixed with `0x`
|
||||
|
||||
Presets may contain comments to describe the values.
|
||||
|
||||
See [`mainnet.yaml`](./mainnet.yaml) for a complete example.
|
@ -1,19 +0,0 @@
|
||||
# Fork timelines
|
||||
|
||||
This directory contains a set of fork timelines used for testing, testnets, and mainnet.
|
||||
|
||||
A timeline file contains all the forks known for its target.
|
||||
Later forks can be ignored, e.g. ignore fork `phase1` as a client that only supports Phase 0 currently.
|
||||
|
||||
## Format
|
||||
|
||||
Each preset is a key-value mapping.
|
||||
|
||||
**Key**: an `lower_snake_case` (a.k.a. "python case") formatted string, name of the fork.
|
||||
|
||||
**Value**: an unsigned integer number, epoch number of activation of the fork.
|
||||
|
||||
Timelines may contain comments to describe the values.
|
||||
|
||||
See [`mainnet.yaml`](./mainnet.yaml) for a complete example.
|
||||
|
@ -1,12 +0,0 @@
|
||||
# Mainnet fork timeline
|
||||
|
||||
# Equal to GENESIS_EPOCH
|
||||
phase0: 67108864
|
||||
|
||||
# Example 1:
|
||||
# phase0_funny_fork_name: 67116000
|
||||
|
||||
# Example 2:
|
||||
# Should be equal to PHASE_1_FORK_EPOCH
|
||||
# (placeholder in example value here)
|
||||
# phase1: 67163000
|
@ -1,6 +0,0 @@
|
||||
# Testing fork timeline
|
||||
|
||||
# Equal to GENESIS_EPOCH
|
||||
phase0: 536870912
|
||||
|
||||
# No other forks considered in testing yet (to be implemented)
|
@ -74,6 +74,10 @@ MAX_EPOCHS_PER_CROSSLINK: 4
|
||||
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
|
||||
# [customized] 2**12 (= 4,096) epochs
|
||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 4096
|
||||
# 2**2 (= 4) epochs
|
||||
EPOCHS_PER_CUSTODY_PERIOD: 4
|
||||
# 2**2 (= 4) epochs
|
||||
CUSTODY_PERIOD_TO_RANDAO_PADDING: 4
|
||||
|
||||
|
||||
# State vector lengths
|
||||
@ -129,3 +133,13 @@ DOMAIN_TRANSFER: 0x05000000
|
||||
DOMAIN_CUSTODY_BIT_CHALLENGE: 0x06000000
|
||||
DOMAIN_SHARD_PROPOSER: 0x80000000
|
||||
DOMAIN_SHARD_ATTESTER: 0x81000000
|
||||
|
||||
|
||||
# Phase 1
|
||||
# ---------------------------------------------------------------
|
||||
SHARD_SLOTS_PER_BEACON_SLOT: 2
|
||||
EPOCHS_PER_SHARD_PERIOD: 4
|
||||
# PHASE_1_FORK_EPOCH >= EPOCHS_PER_SHARD_PERIOD * 2
|
||||
PHASE_1_FORK_EPOCH: 8
|
||||
# PHASE_1_FORK_SLOT = PHASE_1_FORK_EPOCH * SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH
|
||||
PHASE_1_FORK_SLOT: 128
|
@ -37,7 +37,10 @@ from eth2spec.utils.bls import (
|
||||
from eth2spec.utils.hash_function import hash
|
||||
'''
|
||||
PHASE1_IMPORTS = '''from typing import (
|
||||
Any, Dict, Optional, Set, Sequence, MutableSequence, Tuple,
|
||||
Any, Dict, Optional, Set, Sequence, MutableSequence, NewType, Tuple, Union,
|
||||
)
|
||||
from math import (
|
||||
log2,
|
||||
)
|
||||
|
||||
from dataclasses import (
|
||||
@ -48,19 +51,30 @@ from dataclasses import (
|
||||
from eth2spec.utils.ssz.ssz_impl import (
|
||||
hash_tree_root,
|
||||
signing_root,
|
||||
is_empty,
|
||||
is_zero,
|
||||
)
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
uint64, bit, boolean, Container, List, Vector, Bytes, BytesN,
|
||||
Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
|
||||
BasicValue, Elements, BaseBytes, BaseList, SSZType,
|
||||
Container, List, Vector, Bytes, BytesN, Bitlist, Bitvector, Bits,
|
||||
Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96,
|
||||
uint64, bit, boolean,
|
||||
)
|
||||
from eth2spec.utils.bls import (
|
||||
bls_aggregate_pubkeys,
|
||||
bls_verify,
|
||||
bls_verify_multiple,
|
||||
bls_signature_to_G2,
|
||||
)
|
||||
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
|
||||
SSZVariableName = str
|
||||
GeneralizedIndex = NewType('GeneralizedIndex', int)
|
||||
'''
|
||||
SUNDRY_CONSTANTS_FUNCTIONS = '''
|
||||
def ceillog2(x: uint64) -> int:
|
||||
return (x - 1).bit_length()
|
||||
'''
|
||||
SUNDRY_FUNCTIONS = '''
|
||||
# Monkey patch hash cache
|
||||
@ -111,6 +125,13 @@ def apply_constants_preset(preset: Dict[str, Any]) -> None:
|
||||
'''
|
||||
|
||||
|
||||
def remove_for_phase1(functions: Dict[str, str]):
|
||||
for key, value in functions.items():
|
||||
lines = value.split("\n")
|
||||
lines = filter(lambda s: "[to be removed in phase 1]" not in s, lines)
|
||||
functions[key] = "\n".join(lines)
|
||||
|
||||
|
||||
def strip_comments(raw: str) -> str:
|
||||
comment_line_regex = re.compile(r'^\s+# ')
|
||||
lines = raw.split('\n')
|
||||
@ -141,10 +162,15 @@ def objects_to_spec(functions: Dict[str, str],
|
||||
]
|
||||
)
|
||||
)
|
||||
for k in list(functions):
|
||||
if "ceillog2" in k:
|
||||
del functions[k]
|
||||
functions_spec = '\n\n'.join(functions.values())
|
||||
for k in list(constants.keys()):
|
||||
if k.startswith('DOMAIN_'):
|
||||
constants[k] = f"DomainType(({constants[k]}).to_bytes(length=4, byteorder='little'))"
|
||||
if k == "BLS12_381_Q":
|
||||
constants[k] += " # noqa: E501"
|
||||
constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, constants[x]), constants))
|
||||
ssz_objects_instantiation_spec = '\n\n'.join(ssz_objects.values())
|
||||
ssz_objects_reinitialization_spec = (
|
||||
@ -157,6 +183,7 @@ def objects_to_spec(functions: Dict[str, str],
|
||||
spec = (
|
||||
imports
|
||||
+ '\n\n' + new_type_definitions
|
||||
+ '\n' + SUNDRY_CONSTANTS_FUNCTIONS
|
||||
+ '\n\n' + constants_spec
|
||||
+ '\n\n\n' + ssz_objects_instantiation_spec
|
||||
+ '\n\n' + functions_spec
|
||||
@ -186,13 +213,13 @@ ignored_dependencies = [
|
||||
'bit', 'boolean', 'Vector', 'List', 'Container', 'Hash', 'BLSPubkey', 'BLSSignature', 'Bytes', 'BytesN'
|
||||
'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
|
||||
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
|
||||
'bytes' # to be removed after updating spec doc
|
||||
'bytes', 'byte', 'BytesN' # to be removed after updating spec doc
|
||||
]
|
||||
|
||||
|
||||
def dependency_order_ssz_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:
|
||||
"""
|
||||
Determines which SSZ Object is depenedent on which other and orders them appropriately
|
||||
Determines which SSZ Object is dependent on which other and orders them appropriately
|
||||
"""
|
||||
items = list(objects.items())
|
||||
for key, value in items:
|
||||
@ -263,16 +290,23 @@ def build_phase0_spec(phase0_sourcefile: str, fork_choice_sourcefile: str,
|
||||
|
||||
|
||||
def build_phase1_spec(phase0_sourcefile: str,
|
||||
fork_choice_sourcefile: str,
|
||||
phase1_custody_sourcefile: str,
|
||||
phase1_shard_sourcefile: str,
|
||||
fork_choice_sourcefile: str,
|
||||
merkle_proofs_sourcefile: str,
|
||||
outfile: str=None) -> Optional[str]:
|
||||
phase0_spec = get_spec(phase0_sourcefile)
|
||||
phase1_custody = get_spec(phase1_custody_sourcefile)
|
||||
phase1_shard_data = get_spec(phase1_shard_sourcefile)
|
||||
fork_choice_spec = get_spec(fork_choice_sourcefile)
|
||||
spec_objects = phase0_spec
|
||||
for value in [phase1_custody, phase1_shard_data, fork_choice_spec]:
|
||||
all_sourcefiles = (
|
||||
phase0_sourcefile,
|
||||
fork_choice_sourcefile,
|
||||
phase1_custody_sourcefile,
|
||||
phase1_shard_sourcefile,
|
||||
merkle_proofs_sourcefile,
|
||||
)
|
||||
all_spescs = [get_spec(spec) for spec in all_sourcefiles]
|
||||
for spec in all_spescs:
|
||||
remove_for_phase1(spec[0])
|
||||
spec_objects = all_spescs[0]
|
||||
for value in all_spescs[1:]:
|
||||
spec_objects = combine_spec_objects(spec_objects, value)
|
||||
spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS)
|
||||
if outfile is not None:
|
||||
@ -285,17 +319,18 @@ if __name__ == '__main__':
|
||||
description = '''
|
||||
Build the specs from the md docs.
|
||||
If building phase 0:
|
||||
1st argument is input spec.md
|
||||
2nd argument is input fork_choice.md
|
||||
3rd argument is input validator_guide.md
|
||||
1st argument is input /core/0_beacon-chain.md
|
||||
2nd argument is input /core/0_fork-choice.md
|
||||
3rd argument is input /core/0_beacon-chain-validator.md
|
||||
4th argument is output spec.py
|
||||
|
||||
If building phase 1:
|
||||
1st argument is input spec_phase0.md
|
||||
2nd argument is input spec_phase1_custody.md
|
||||
3rd argument is input spec_phase1_shard_data.md
|
||||
4th argument is input fork_choice.md
|
||||
5th argument is output spec.py
|
||||
1st argument is input /core/0_beacon-chain.md
|
||||
2nd argument is input /core/0_fork-choice.md
|
||||
3rd argument is input /core/1_custody-game.md
|
||||
4th argument is input /core/1_shard-data-chains.md
|
||||
5th argument is input /light_client/merkle_proofs.md
|
||||
6th argument is output spec.py
|
||||
'''
|
||||
parser = ArgumentParser(description=description)
|
||||
parser.add_argument("-p", "--phase", dest="phase", type=int, default=0, help="Build for phase #")
|
||||
@ -308,10 +343,15 @@ If building phase 1:
|
||||
else:
|
||||
print(" Phase 0 requires spec, forkchoice, and v-guide inputs as well as an output file.")
|
||||
elif args.phase == 1:
|
||||
if len(args.files) == 5:
|
||||
if len(args.files) == 6:
|
||||
build_phase1_spec(*args.files)
|
||||
else:
|
||||
print(" Phase 1 requires 4 input files as well as an output file: "
|
||||
+ "(phase0.md and phase1.md, phase1.md, fork_choice.md, output.py)")
|
||||
print(
|
||||
" Phase 1 requires input files as well as an output file:\n"
|
||||
"\t core/phase_0: (0_beacon-chain.md, 0_fork-choice.md)\n"
|
||||
"\t core/phase_1: (1_custody-game.md, 1_shard-data-chains.md)\n"
|
||||
"\t light_client: (merkle_proofs.md)\n"
|
||||
"\t and output.py"
|
||||
)
|
||||
else:
|
||||
print("Invalid phase: {0}".format(args.phase))
|
||||
|
@ -167,7 +167,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
|
||||
## Configuration
|
||||
|
||||
*Note*: The default mainnet configuration values are included here for spec-design purposes. The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs/constant_presets) directory. These configurations are updated for releases and may be out of sync during `dev` changes.
|
||||
*Note*: The default mainnet configuration values are included here for spec-design purposes. The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs) directory. These configurations are updated for releases and may be out of sync during `dev` changes.
|
||||
|
||||
### Misc
|
||||
|
||||
@ -648,8 +648,8 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
|
||||
bit_1_indices = indexed_attestation.custody_bit_1_indices
|
||||
|
||||
# Verify no index has custody bit equal to 1 [to be removed in phase 1]
|
||||
if not len(bit_1_indices) == 0:
|
||||
return False
|
||||
if not len(bit_1_indices) == 0: # [to be removed in phase 1]
|
||||
return False # [to be removed in phase 1]
|
||||
# Verify max number of indices
|
||||
if not len(bit_0_indices) + len(bit_1_indices) <= MAX_VALIDATORS_PER_COMMITTEE:
|
||||
return False
|
||||
@ -1463,8 +1463,6 @@ def process_final_updates(state: BeaconState) -> None:
|
||||
HALF_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // 2
|
||||
if balance < validator.effective_balance or validator.effective_balance + 3 * HALF_INCREMENT < balance:
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
# Update start shard
|
||||
state.start_shard = Shard((state.start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT)
|
||||
# Reset slashings
|
||||
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
||||
# Set randao mix
|
||||
@ -1473,6 +1471,8 @@ def process_final_updates(state: BeaconState) -> None:
|
||||
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
||||
state.historical_roots.append(hash_tree_root(historical_batch))
|
||||
# Update start shard
|
||||
state.start_shard = Shard((state.start_shard + get_shard_delta(state, current_epoch)) % SHARD_COUNT)
|
||||
# Rotate current/previous epoch attestations
|
||||
state.previous_epoch_attestations = state.current_epoch_attestations
|
||||
state.current_epoch_attestations = []
|
||||
@ -1605,6 +1605,9 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
attestation_slot = get_attestation_data_slot(state, data)
|
||||
assert attestation_slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= attestation_slot + SLOTS_PER_EPOCH
|
||||
|
||||
committee = get_crosslink_committee(state, data.target.epoch, data.crosslink.shard)
|
||||
assert len(attestation.aggregation_bits) == len(attestation.custody_bits) == len(committee)
|
||||
|
||||
pending_attestation = PendingAttestation(
|
||||
data=data,
|
||||
aggregation_bits=attestation.aggregation_bits,
|
||||
@ -1612,6 +1615,11 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
proposer_index=get_beacon_proposer_index(state),
|
||||
)
|
||||
|
||||
# Check bitlist lengths
|
||||
committee_size = get_committee_count(state, attestation.data.target.epoch)
|
||||
assert len(attestation.aggregation_bits) == committee_size
|
||||
assert len(attestation.custody_bits) == committee_size
|
||||
|
||||
if data.target.epoch == get_current_epoch(state):
|
||||
assert data.source == state.current_justified_checkpoint
|
||||
parent_crosslink = state.current_crosslinks[data.crosslink.shard]
|
||||
|
@ -38,7 +38,7 @@ The initial deployment phases of Ethereum 2.0 are implemented without consensus
|
||||
|
||||
### `deposit` function
|
||||
|
||||
The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96]` corresponding to a `DepositData` object.
|
||||
The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96]` corresponding to a [`DepositData`](./0_beacon-chain.md#depositdata) object.
|
||||
|
||||
#### Deposit amount
|
||||
|
||||
|
@ -91,8 +91,12 @@ def get_genesis_store(genesis_state: BeaconState) -> Store:
|
||||
```python
|
||||
def get_ancestor(store: Store, root: Hash, slot: Slot) -> Hash:
|
||||
block = store.blocks[root]
|
||||
assert block.slot >= slot
|
||||
return root if block.slot == slot else get_ancestor(store, block.parent_root, slot)
|
||||
if block.slot > slot:
|
||||
return get_ancestor(store, block.parent_root, slot)
|
||||
elif block.slot == slot:
|
||||
return root
|
||||
else:
|
||||
return Bytes32() # root is older than queried slot: no results.
|
||||
```
|
||||
|
||||
#### `get_latest_attesting_balance`
|
||||
|
@ -12,6 +12,7 @@
|
||||
- [Terminology](#terminology)
|
||||
- [Constants](#constants)
|
||||
- [Misc](#misc)
|
||||
- [Custody game parameters](#custody-game-parameters)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Max operations per block](#max-operations-per-block)
|
||||
- [Reward and penalty quotients](#reward-and-penalty-quotients)
|
||||
@ -33,12 +34,14 @@
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
- [Helpers](#helpers)
|
||||
- [`ceillog2`](#ceillog2)
|
||||
- [`is_valid_merkle_branch_with_mixin`](#is_valid_merkle_branch_with_mixin)
|
||||
- [`get_crosslink_chunk_count`](#get_crosslink_chunk_count)
|
||||
- [`get_bit`](#get_bit)
|
||||
- [`legendre_bit`](#legendre_bit)
|
||||
- [`custody_subchunkify`](#custody_subchunkify)
|
||||
- [`get_custody_chunk_bit`](#get_custody_chunk_bit)
|
||||
- [`get_chunk_bits_root`](#get_chunk_bits_root)
|
||||
- [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period)
|
||||
- [`get_reveal_period`](#get_reveal_period)
|
||||
- [`get_custody_period_for_validator`](#get_custody_period_for_validator)
|
||||
- [`replace_empty_or_append`](#replace_empty_or_append)
|
||||
- [Per-block processing](#per-block-processing)
|
||||
- [Operations](#operations)
|
||||
@ -75,11 +78,20 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
|
||||
|
||||
### Misc
|
||||
|
||||
| `BLS12_381_Q` | `4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787` |
|
||||
| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) |
|
||||
|
||||
### Custody game parameters
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BYTES_PER_SHARD_BLOCK` | `2**14` (= 16,384) |
|
||||
| `BYTES_PER_CUSTODY_CHUNK` | `2**9` (= 512) |
|
||||
| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) |
|
||||
| `BYTES_PER_CUSTODY_SUBCHUNK` | `48` |
|
||||
| `CHUNKS_PER_EPOCH` | `2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK` |
|
||||
| `MAX_CUSTODY_CHUNKS` | `MAX_EPOCHS_PER_CROSSLINK * CHUNKS_PER_EPOCH` |
|
||||
| `CUSTODY_DATA_DEPTH` | `ceillog2(MAX_CUSTODY_CHUNKS) + 1` |
|
||||
| `CUSTODY_CHUNK_BIT_DEPTH` | `ceillog2(MAX_EPOCHS_PER_CROSSLINK * CHUNKS_PER_EPOCH // 256) + 2` |
|
||||
|
||||
### Time parameters
|
||||
|
||||
@ -144,7 +156,7 @@ class CustodyBitChallenge(Container):
|
||||
attestation: Attestation
|
||||
challenger_index: ValidatorIndex
|
||||
responder_key: BLSSignature
|
||||
chunk_bits: Bytes[PLACEHOLDER]
|
||||
chunk_bits: Bitlist[MAX_CUSTODY_CHUNKS]
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
@ -181,10 +193,10 @@ class CustodyBitChallengeRecord(Container):
|
||||
class CustodyResponse(Container):
|
||||
challenge_index: uint64
|
||||
chunk_index: uint64
|
||||
chunk: Vector[Bytes[PLACEHOLDER], BYTES_PER_CUSTODY_CHUNK]
|
||||
data_branch: List[Hash, PLACEHOLDER]
|
||||
chunk_bits_branch: List[Hash, PLACEHOLDER]
|
||||
chunk_bits_leaf: Hash
|
||||
chunk: BytesN[BYTES_PER_CUSTODY_CHUNK]
|
||||
data_branch: List[Hash, CUSTODY_DATA_DEPTH]
|
||||
chunk_bits_branch: List[Hash, CUSTODY_CHUNK_BIT_DEPTH]
|
||||
chunk_bits_leaf: Bitvector[256]
|
||||
```
|
||||
|
||||
### New beacon operations
|
||||
@ -225,11 +237,11 @@ Add the following fields to the end of the specified container objects. Fields w
|
||||
|
||||
```python
|
||||
class Validator(Container):
|
||||
# next_custody_reveal_period is initialised to the custody period
|
||||
# next_custody_secret_to_reveal is initialised to the custody period
|
||||
# (of the particular validator) in which the validator is activated
|
||||
# = get_reveal_period(...)
|
||||
next_custody_reveal_period: uint64
|
||||
max_reveal_lateness: uint64
|
||||
# = get_custody_period_for_validator(...)
|
||||
next_custody_secret_to_reveal: uint64
|
||||
max_reveal_lateness: Epoch
|
||||
```
|
||||
|
||||
#### `BeaconState`
|
||||
@ -263,7 +275,26 @@ class BeaconBlockBody(Container):
|
||||
|
||||
```python
|
||||
def ceillog2(x: uint64) -> int:
|
||||
return x.bit_length()
|
||||
return (x - 1).bit_length()
|
||||
```
|
||||
|
||||
### `is_valid_merkle_branch_with_mixin`
|
||||
|
||||
```python
|
||||
def is_valid_merkle_branch_with_mixin(leaf: Hash,
|
||||
branch: Sequence[Hash],
|
||||
depth: uint64,
|
||||
index: uint64,
|
||||
root: Hash,
|
||||
mixin: uint64) -> bool:
|
||||
value = leaf
|
||||
for i in range(depth):
|
||||
if index // (2**i) % 2:
|
||||
value = hash(branch[i] + value)
|
||||
else:
|
||||
value = hash(value + branch[i])
|
||||
value = hash(value + mixin.to_bytes(32, "little"))
|
||||
return value == root
|
||||
```
|
||||
|
||||
### `get_crosslink_chunk_count`
|
||||
@ -271,37 +302,69 @@ def ceillog2(x: uint64) -> int:
|
||||
```python
|
||||
def get_custody_chunk_count(crosslink: Crosslink) -> int:
|
||||
crosslink_length = min(MAX_EPOCHS_PER_CROSSLINK, crosslink.end_epoch - crosslink.start_epoch)
|
||||
chunks_per_epoch = 2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK
|
||||
return crosslink_length * chunks_per_epoch
|
||||
return crosslink_length * CHUNKS_PER_EPOCH
|
||||
```
|
||||
|
||||
### `get_bit`
|
||||
### `legendre_bit`
|
||||
|
||||
Returns the Legendre symbol `(a/q)` normalizes as a bit (i.e. `((a/q) + 1) // 2`). In a production implementation, a well-optimized library (e.g. GMP) should be used for this.
|
||||
|
||||
```python
|
||||
def get_bit(serialization: bytes, i: uint64) -> int:
|
||||
"""
|
||||
Extract the bit in ``serialization`` at position ``i``.
|
||||
"""
|
||||
return (serialization[i // 8] >> (i % 8)) % 2
|
||||
def legendre_bit(a: int, q: int) -> int:
|
||||
if a >= q:
|
||||
return legendre_bit(a % q, q)
|
||||
if a == 0:
|
||||
return 0
|
||||
assert(q > a > 0 and q % 2 == 1)
|
||||
t = 1
|
||||
n = q
|
||||
while a != 0:
|
||||
while a % 2 == 0:
|
||||
a //= 2
|
||||
r = n % 8
|
||||
if r == 3 or r == 5:
|
||||
t = -t
|
||||
a, n = n, a
|
||||
if a % 4 == n % 4 == 3:
|
||||
t = -t
|
||||
a %= n
|
||||
if n == 1:
|
||||
return (t + 1) // 2
|
||||
else:
|
||||
return 0
|
||||
```
|
||||
|
||||
### ```custody_subchunkify```
|
||||
|
||||
Given one proof of custody chunk, returns the proof of custody subchunks of the correct sizes.
|
||||
|
||||
```python
|
||||
def custody_subchunkify(bytez: bytes) -> list:
|
||||
bytez += b'\x00' * (-len(bytez) % BYTES_PER_CUSTODY_SUBCHUNK)
|
||||
return [bytez[i:i + BYTES_PER_CUSTODY_SUBCHUNK]
|
||||
for i in range(0, len(bytez), BYTES_PER_CUSTODY_SUBCHUNK)]
|
||||
```
|
||||
|
||||
### `get_custody_chunk_bit`
|
||||
|
||||
```python
|
||||
def get_custody_chunk_bit(key: BLSSignature, chunk: bytes) -> bool:
|
||||
# TODO: Replace with something MPC-friendly, e.g. the Legendre symbol
|
||||
return bool(get_bit(hash(key + chunk), 0))
|
||||
full_G2_element = bls_signature_to_G2(key)
|
||||
s = full_G2_element[0].coeffs
|
||||
bits = [legendre_bit((i + 1) * s[i % 2] + int.from_bytes(subchunk, "little"), BLS12_381_Q)
|
||||
for i, subchunk in enumerate(custody_subchunkify(chunk))]
|
||||
|
||||
return bool(sum(bits) % 2)
|
||||
```
|
||||
|
||||
### `get_chunk_bits_root`
|
||||
|
||||
```python
|
||||
def get_chunk_bits_root(chunk_bits: bytes) -> Hash:
|
||||
aggregated_bits = bytearray([0] * 32)
|
||||
for i in range(0, len(chunk_bits), 32):
|
||||
for j in range(32):
|
||||
aggregated_bits[j] ^= chunk_bits[i + j]
|
||||
return hash(aggregated_bits)
|
||||
def get_chunk_bits_root(chunk_bits: Bitlist[MAX_CUSTODY_CHUNKS]) -> bit:
|
||||
aggregated_bits = 0
|
||||
for i, b in enumerate(chunk_bits):
|
||||
aggregated_bits += 2**i * b
|
||||
return legendre_bit(aggregated_bits, BLS12_381_Q)
|
||||
```
|
||||
|
||||
### `get_randao_epoch_for_custody_period`
|
||||
@ -312,10 +375,10 @@ def get_randao_epoch_for_custody_period(period: uint64, validator_index: Validat
|
||||
return Epoch(next_period_start + CUSTODY_PERIOD_TO_RANDAO_PADDING)
|
||||
```
|
||||
|
||||
### `get_reveal_period`
|
||||
### `get_custody_period_for_validator`
|
||||
|
||||
```python
|
||||
def get_reveal_period(state: BeaconState, validator_index: ValidatorIndex, epoch: Epoch=None) -> int:
|
||||
def get_custody_period_for_validator(state: BeaconState, validator_index: ValidatorIndex, epoch: Epoch=None) -> int:
|
||||
'''
|
||||
Return the reveal period for a given validator.
|
||||
'''
|
||||
@ -328,7 +391,7 @@ def get_reveal_period(state: BeaconState, validator_index: ValidatorIndex, epoch
|
||||
```python
|
||||
def replace_empty_or_append(list: MutableSequence[Any], new_element: Any) -> int:
|
||||
for i in range(len(list)):
|
||||
if is_empty(list[i]):
|
||||
if is_zero(list[i]):
|
||||
list[i] = new_element
|
||||
return i
|
||||
list.append(new_element)
|
||||
@ -354,9 +417,9 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) ->
|
||||
Note that this function mutates ``state``.
|
||||
"""
|
||||
revealer = state.validators[reveal.revealer_index]
|
||||
epoch_to_sign = get_randao_epoch_for_custody_period(revealer.next_custody_reveal_period, reveal.revealed_index)
|
||||
epoch_to_sign = get_randao_epoch_for_custody_period(revealer.next_custody_secret_to_reveal, reveal.revealer_index)
|
||||
|
||||
assert revealer.next_custody_reveal_period < get_reveal_period(state, reveal.revealed_index)
|
||||
assert revealer.next_custody_secret_to_reveal < get_custody_period_for_validator(state, reveal.revealer_index)
|
||||
|
||||
# Revealed validator is active or exited, but not withdrawn
|
||||
assert is_slashable_validator(revealer, get_current_epoch(state))
|
||||
@ -374,15 +437,19 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) ->
|
||||
)
|
||||
|
||||
# Decrement max reveal lateness if response is timely
|
||||
if revealer.next_custody_reveal_period == get_reveal_period(state, reveal.revealer_index) - 2:
|
||||
revealer.max_reveal_lateness -= MAX_REVEAL_LATENESS_DECREMENT
|
||||
revealer.max_reveal_lateness = max(
|
||||
revealer.max_reveal_lateness,
|
||||
get_reveal_period(state, reveal.revealed_index) - revealer.next_custody_reveal_period
|
||||
)
|
||||
if epoch_to_sign + EPOCHS_PER_CUSTODY_PERIOD >= get_current_epoch(state):
|
||||
if revealer.max_reveal_lateness >= MAX_REVEAL_LATENESS_DECREMENT:
|
||||
revealer.max_reveal_lateness -= MAX_REVEAL_LATENESS_DECREMENT
|
||||
else:
|
||||
revealer.max_reveal_lateness = 0
|
||||
else:
|
||||
revealer.max_reveal_lateness = max(
|
||||
revealer.max_reveal_lateness,
|
||||
get_current_epoch(state) - epoch_to_sign - EPOCHS_PER_CUSTODY_PERIOD
|
||||
)
|
||||
|
||||
# Process reveal
|
||||
revealer.next_custody_reveal_period += 1
|
||||
revealer.next_custody_secret_to_reveal += 1
|
||||
|
||||
# Reward Block Preposer
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
@ -520,7 +587,7 @@ For each `challenge` in `block.body.custody_bit_challenges`, run the following f
|
||||
```python
|
||||
def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) -> None:
|
||||
attestation = challenge.attestation
|
||||
epoch = compute_epoch_of_slot(attestation.data.slot)
|
||||
epoch = attestation.data.target.epoch
|
||||
shard = attestation.data.crosslink.shard
|
||||
|
||||
# Verify challenge signature
|
||||
@ -533,7 +600,10 @@ def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) ->
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
# Verify attestation is eligible for challenging
|
||||
responder = state.validators[challenge.responder_index]
|
||||
assert epoch + responder.max_reveal_lateness <= get_reveal_period(state, challenge.responder_index)
|
||||
assert get_current_epoch(state) <= get_randao_epoch_for_custody_period(
|
||||
get_custody_period_for_validator(state, challenge.responder_index, epoch),
|
||||
challenge.responder_index
|
||||
) + 2 * EPOCHS_PER_CUSTODY_PERIOD + responder.max_reveal_lateness
|
||||
|
||||
# Verify the responder participated in the attestation
|
||||
attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
|
||||
@ -543,17 +613,18 @@ def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) ->
|
||||
assert record.challenger_index != challenge.challenger_index
|
||||
# Verify the responder custody key
|
||||
epoch_to_sign = get_randao_epoch_for_custody_period(
|
||||
get_reveal_period(state, challenge.responder_index, epoch),
|
||||
get_custody_period_for_validator(state, challenge.responder_index, epoch),
|
||||
challenge.responder_index,
|
||||
)
|
||||
domain = get_domain(state, DOMAIN_RANDAO, epoch_to_sign)
|
||||
assert bls_verify(responder.pubkey, hash_tree_root(epoch_to_sign), challenge.responder_key, domain)
|
||||
# Verify the chunk count
|
||||
chunk_count = get_custody_chunk_count(attestation.data.crosslink)
|
||||
# Verify the first bit of the hash of the chunk bits does not equal the custody bit
|
||||
assert chunk_count == len(challenge.chunk_bits)
|
||||
# Verify custody bit is incorrect
|
||||
committee = get_crosslink_committee(state, epoch, shard)
|
||||
custody_bit = attestation.custody_bits[committee.index(challenge.responder_index)]
|
||||
assert custody_bit != get_bit(get_chunk_bits_root(challenge.chunk_bits), 0)
|
||||
assert custody_bit != get_chunk_bits_root(challenge.chunk_bits)
|
||||
# Add new bit challenge record
|
||||
new_record = CustodyBitChallengeRecord(
|
||||
challenge_index=state.custody_challenge_index,
|
||||
@ -636,16 +707,17 @@ def process_bit_challenge_response(state: BeaconState,
|
||||
root=challenge.data_root,
|
||||
)
|
||||
# Verify the chunk bit leaf matches the challenge data
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=response.chunk_bits_leaf,
|
||||
assert is_valid_merkle_branch_with_mixin(
|
||||
leaf=hash_tree_root(response.chunk_bits_leaf),
|
||||
branch=response.chunk_bits_branch,
|
||||
depth=ceillog2(challenge.chunk_count) >> 8,
|
||||
depth=ceillog2(MAX_CUSTODY_CHUNKS // 256),
|
||||
index=response.chunk_index // 256,
|
||||
root=challenge.chunk_bits_merkle_root
|
||||
root=challenge.chunk_bits_merkle_root,
|
||||
mixin=challenge.chunk_count,
|
||||
)
|
||||
# Verify the chunk bit does not match the challenge chunk bit
|
||||
assert (get_custody_chunk_bit(challenge.responder_key, response.chunk)
|
||||
!= get_bit(challenge.chunk_bits_leaf, response.chunk_index % 256))
|
||||
!= response.chunk_bits_leaf[response.chunk_index % 256])
|
||||
# Clear the challenge
|
||||
records = state.custody_bit_challenge_records
|
||||
records[records.index(challenge)] = CustodyBitChallengeRecord()
|
||||
@ -665,8 +737,8 @@ Run `process_reveal_deadlines(state)` immediately after `process_registry_update
|
||||
# end insert @process_reveal_deadlines
|
||||
def process_reveal_deadlines(state: BeaconState) -> None:
|
||||
for index, validator in enumerate(state.validators):
|
||||
deadline = validator.next_custody_reveal_period + (CUSTODY_RESPONSE_DEADLINE // EPOCHS_PER_CUSTODY_PERIOD)
|
||||
if get_reveal_period(state, ValidatorIndex(index)) > deadline:
|
||||
deadline = validator.next_custody_secret_to_reveal + (CUSTODY_RESPONSE_DEADLINE // EPOCHS_PER_CUSTODY_PERIOD)
|
||||
if get_custody_period_for_validator(state, ValidatorIndex(index)) > deadline:
|
||||
slash_validator(state, ValidatorIndex(index))
|
||||
```
|
||||
|
||||
|
@ -14,6 +14,8 @@
|
||||
- [Misc](#misc)
|
||||
- [Initial values](#initial-values)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [State list lengths](#state-list-lengths)
|
||||
- [Rewards and penalties](#rewards-and-penalties)
|
||||
- [Signature domain types](#signature-domain-types)
|
||||
- [TODO PLACEHOLDER](#todo-placeholder)
|
||||
- [Data structures](#data-structures)
|
||||
@ -22,20 +24,24 @@
|
||||
- [`ShardBlockSignatures`](#shardblocksignatures)
|
||||
- [`ShardBlockCore`](#shardblockcore)
|
||||
- [`ExtendedShardBlockCore`](#extendedshardblockcore)
|
||||
- [`ShardState`](#shardstate)
|
||||
- [`ShardReceiptDelta`](#shardreceiptdelta)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot)
|
||||
- [`compute_slot_of_shard_slot`](#compute_slot_of_shard_slot)
|
||||
- [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot)
|
||||
- [`get_shard_period_start_epoch`](#get_shard_period_start_epoch)
|
||||
- [`get_period_committee`](#get_period_committee)
|
||||
- [`get_persistent_committee`](#get_persistent_committee)
|
||||
- [`get_shard_block_proposer_index`](#get_shard_block_proposer_index)
|
||||
- [`get_shard_block_attester_committee`](#get_shard_block_attester_committee)
|
||||
- [`get_shard_header`](#get_shard_header)
|
||||
- [`pad`](#pad)
|
||||
- [`flatten_shard_header`](#flatten_shard_header)
|
||||
- [`compute_crosslink_data_root`](#compute_crosslink_data_root)
|
||||
- [`get_default_shard_state`](#get_default_shard_state)
|
||||
- [Object validity](#object-validity)
|
||||
- [Shard blocks](#shard-blocks)
|
||||
- [Shard block validation: preliminary](#shard-block-validation-preliminary)
|
||||
- [Shard state transition function helpers](#shard-state-transition-function-helpers)
|
||||
- [Shard state transition function](#shard-state-transition-function)
|
||||
- [Beacon attestations](#beacon-attestations)
|
||||
- [Shard fork choice rule](#shard-fork-choice-rule)
|
||||
|
||||
@ -59,10 +65,11 @@ We define the following Python custom types for type hinting and readability:
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `SHARD_HEADER_SIZE` | `2**9` (= 512) |
|
||||
| `SHARD_BLOCK_SIZE_LIMIT` | `2**16` (= 65,536) |
|
||||
| `SHARD_SLOTS_PER_BEACON_SLOT` | `2**1` (= 2) |
|
||||
| `MAX_PERSISTENT_COMMITTEE_SIZE` | `2**7` (= 128) |
|
||||
| `TARGET_PERSISTENT_COMMITTEE_SIZE` | `2**7` (= 128) |
|
||||
| `SHARD_HEADER_SIZE` | `2**9` (= 512) |
|
||||
| `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) |
|
||||
| `SHARD_BLOCK_SIZE_LIMIT` | `2**16` (= 65,536) |
|
||||
|
||||
### Initial values
|
||||
|
||||
@ -70,7 +77,6 @@ We define the following Python custom types for type hinting and readability:
|
||||
| - | - |
|
||||
| `PHASE_1_FORK_EPOCH` | **TBD** |
|
||||
| `PHASE_1_FORK_SLOT` | **TBD** |
|
||||
| `GENESIS_SHARD_SLOT` | 0 |
|
||||
|
||||
### Time parameters
|
||||
|
||||
@ -79,6 +85,19 @@ We define the following Python custom types for type hinting and readability:
|
||||
| `CROSSLINK_LOOKBACK` | `2**0` (= 1) | epochs | 6.4 minutes |
|
||||
| `EPOCHS_PER_SHARD_PERIOD` | `2**8` (= 256) | epochs | ~27 hours |
|
||||
|
||||
### State list lengths
|
||||
|
||||
| Name | Value | Unit |
|
||||
| - | - | :-: |
|
||||
| `HISTORY_ACCUMULATOR_VECTOR` | `2**6` (= 64) | state tree maximum depth |
|
||||
|
||||
### Rewards and penalties
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BASEFEE_ADJUSTMENT_FACTOR` | `2**3` (= 8) |
|
||||
| `REWARD_COEFFICIENT_BASE` | `2**20` ( = 1,048,576) |
|
||||
|
||||
### Signature domain types
|
||||
|
||||
The following types are defined, mapping into `DomainType` (little endian):
|
||||
@ -132,7 +151,7 @@ class ShardBlockCore(Container):
|
||||
data_root: Hash
|
||||
state_root: Hash
|
||||
total_bytes: uint64
|
||||
attester_bitfield: Bitvector[MAX_PERSISTENT_COMMITTEE_SIZE * 2]
|
||||
attester_bitfield: Bitvector[TARGET_PERSISTENT_COMMITTEE_SIZE * 2]
|
||||
```
|
||||
|
||||
### `ExtendedShardBlockCore`
|
||||
@ -145,7 +164,33 @@ class ExtendedShardBlockCore(Container):
|
||||
data: Bytes[SHARD_BLOCK_SIZE_LIMIT - SHARD_HEADER_SIZE]
|
||||
state_root: Hash
|
||||
total_bytes: uint64
|
||||
attester_bitfield: Bitvector[MAX_PERSISTENT_COMMITTEE_SIZE * 2]
|
||||
attester_bitfield: Bitvector[TARGET_PERSISTENT_COMMITTEE_SIZE * 2]
|
||||
```
|
||||
|
||||
### `ShardState`
|
||||
|
||||
```python
|
||||
class ShardState(Container):
|
||||
history_accumulator: Vector[Hash, HISTORY_ACCUMULATOR_VECTOR]
|
||||
earlier_committee_rewards: List[uint64, TARGET_PERSISTENT_COMMITTEE_SIZE]
|
||||
later_committee_rewards: List[uint64, TARGET_PERSISTENT_COMMITTEE_SIZE]
|
||||
earlier_committee_fees: List[Gwei, TARGET_PERSISTENT_COMMITTEE_SIZE]
|
||||
later_committee_fees: List[Gwei, TARGET_PERSISTENT_COMMITTEE_SIZE]
|
||||
basefee: Gwei
|
||||
slot: ShardSlot
|
||||
shard: Shard
|
||||
most_recent_block_core: ShardBlockCore
|
||||
receipt_root: Hash
|
||||
total_bytes: uint64
|
||||
```
|
||||
|
||||
### `ShardReceiptDelta`
|
||||
|
||||
```python
|
||||
class ShardReceiptDelta(Container):
|
||||
index: ValidatorIndex
|
||||
reward_coefficient: uint64
|
||||
block_fee: Gwei
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
@ -167,16 +212,14 @@ def compute_epoch_of_shard_slot(slot: ShardSlot) -> Epoch:
|
||||
### `get_shard_period_start_epoch`
|
||||
|
||||
```python
|
||||
def get_shard_period_start_epoch(epoch: Epoch, lookback: Epoch=Epoch(0)) -> Epoch:
|
||||
def get_shard_period_start_epoch(epoch: Epoch, lookback: int=0) -> Epoch:
|
||||
return Epoch(epoch - (epoch % EPOCHS_PER_SHARD_PERIOD) - lookback * EPOCHS_PER_SHARD_PERIOD)
|
||||
```
|
||||
|
||||
### `get_period_committee`
|
||||
|
||||
```python
|
||||
def get_period_committee(state: BeaconState,
|
||||
epoch: Epoch,
|
||||
shard: Shard) -> List[ValidatorIndex, MAX_PERSISTENT_COMMITTEE_SIZE]:
|
||||
def get_period_committee(state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]:
|
||||
"""
|
||||
Return committee for a period. Used to construct persistent committees.
|
||||
"""
|
||||
@ -187,7 +230,7 @@ def get_period_committee(state: BeaconState,
|
||||
count=SHARD_COUNT,
|
||||
)
|
||||
|
||||
return full_committee[:MAX_PERSISTENT_COMMITTEE_SIZE]
|
||||
return full_committee[:TARGET_PERSISTENT_COMMITTEE_SIZE]
|
||||
```
|
||||
|
||||
### `get_persistent_committee`
|
||||
@ -201,8 +244,8 @@ def get_persistent_committee(state: BeaconState,
|
||||
"""
|
||||
epoch = compute_epoch_of_shard_slot(slot)
|
||||
|
||||
earlier_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=Epoch(2)), shard)
|
||||
later_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=Epoch(1)), shard)
|
||||
earlier_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=2), shard)
|
||||
later_committee = get_period_committee(state, get_shard_period_start_epoch(epoch, lookback=1), shard)
|
||||
|
||||
# Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from
|
||||
# later committee; return a sorted list of the union of the two, deduplicated
|
||||
@ -250,9 +293,9 @@ def get_shard_header(block: ShardBlock) -> ShardBlockHeader:
|
||||
data_root=hash_tree_root(block.core.data),
|
||||
state_root=block.core.state_root,
|
||||
total_bytes=block.core.total_bytes,
|
||||
attester_bitfield=block.core.attester_bitfield
|
||||
attester_bitfield=block.core.attester_bitfield,
|
||||
),
|
||||
signatures=block.signatures
|
||||
signatures=block.signatures,
|
||||
)
|
||||
```
|
||||
|
||||
@ -299,93 +342,228 @@ def compute_crosslink_data_root(blocks: Sequence[ShardBlock]) -> Hash:
|
||||
return hash_tree_root(BytesN[MAX_SIZE](pad(header + footer, MAX_SIZE)))
|
||||
```
|
||||
|
||||
## Object validity
|
||||
|
||||
### Shard blocks
|
||||
|
||||
Let:
|
||||
|
||||
- `beacon_blocks` be the `BeaconBlock` list such that `beacon_blocks[slot]` is the canonical `BeaconBlock` at slot `slot`
|
||||
- `beacon_state` be the canonical `BeaconState` after processing `beacon_blocks[-1]`
|
||||
- `shard` is the shard ID
|
||||
- `valid_shard_blocks` be the list of valid `ShardBlock`, recursively defined
|
||||
- `candidate` be a candidate `ShardBlock` for which validity is to be determined by running `is_valid_shard_block`
|
||||
### `get_default_shard_state`
|
||||
|
||||
```python
|
||||
def is_valid_shard_block(beacon_state: BeaconState,
|
||||
beacon_blocks: Sequence[BeaconBlock],
|
||||
shard: Shard,
|
||||
valid_shard_blocks: Sequence[ShardBlock],
|
||||
candidate: ShardBlock) -> bool:
|
||||
# Check if block is already determined valid
|
||||
for _, block in enumerate(valid_shard_blocks):
|
||||
if candidate == block:
|
||||
return True
|
||||
def get_default_shard_state(beacon_state: BeaconState, shard: Shard) -> ShardState:
|
||||
earlier_committee = get_period_committee(
|
||||
beacon_state,
|
||||
Epoch(PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD * 2),
|
||||
shard,
|
||||
)
|
||||
later_committee = get_period_committee(
|
||||
beacon_state,
|
||||
Epoch(PHASE_1_FORK_EPOCH - EPOCHS_PER_SHARD_PERIOD),
|
||||
shard,
|
||||
)
|
||||
return ShardState(
|
||||
basefee=1,
|
||||
shard=shard,
|
||||
slot=PHASE_1_FORK_SLOT,
|
||||
earlier_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(earlier_committee))],
|
||||
later_committee_rewards=[REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))],
|
||||
earlier_committee_fees=[Gwei(0) for _ in range(len(earlier_committee))],
|
||||
later_committee_fees=[Gwei(0) for _ in range(len(later_committee))],
|
||||
)
|
||||
```
|
||||
|
||||
## Object validity
|
||||
|
||||
### Shard block validation: preliminary
|
||||
|
||||
Accept a shard block `block` only if all of the following are correct:
|
||||
|
||||
* Either `block.core.parent_root == Hash()` or a block `parent` such that `hash_tree_root(parent.core) == block.core.parent_root` has already been accepted.
|
||||
* `block.core.beacon_chain_root == get_block_root(head_beacon_state, compute_epoch_of_shard_slot(parent.core.slot))` where `head_beacon_state` is the current beacon chain head state. Alternatively phrased, a beacon chain block `beacon_ref` such that `signing_root(beacon_ref) == block.core.beacon_chain_root` has already been accepted and is part of the canonical chain, and no block with slot `beacon_ref.slot < slot <= compute_start_slot_of_epoch(compute_epoch_of_shard_slot(parent.core.slot))` is part of the canonical chain.
|
||||
* Let `beacon_state` be the state where `beacon_ref.state_root == hash_tree_root(beacon_state)`. Let `prev_state` be the post-state of the `parent` if the `parent` exists, otherwise let it be `get_default_shard_state(beacon_state, shard)` (defined below). `block.core.state_root` must equal the `hash_tree_root` of the state after applying `shard_state_transition(prev_state, beacon_state, block)`.
|
||||
|
||||
Note that these acceptance conditions depend on the canonical beacon chain; when the canonical beacon chain reorganizes, the eligibility of shard blocks should be re-evaluated.
|
||||
|
||||
### Shard state transition function helpers
|
||||
|
||||
```python
|
||||
def add_reward(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: int) -> None:
|
||||
epoch = compute_epoch_of_shard_slot(state.slot)
|
||||
earlier_committee = get_period_committee(
|
||||
beacon_state,
|
||||
get_shard_period_start_epoch(epoch, lookback=2),
|
||||
state.shard,
|
||||
)
|
||||
later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard)
|
||||
if index in earlier_committee:
|
||||
state.earlier_committee_rewards[earlier_committee.index(index)] += delta
|
||||
elif index in later_committee:
|
||||
state.later_committee_rewards[later_committee.index(index)] += delta
|
||||
else:
|
||||
raise Exception("Should never be here")
|
||||
```
|
||||
|
||||
```python
|
||||
def add_fee(state: ShardState, beacon_state: BeaconState, index: ValidatorIndex, delta: int) -> None:
|
||||
epoch = compute_epoch_of_shard_slot(state.slot)
|
||||
earlier_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=2), state.shard)
|
||||
later_committee = get_period_committee(beacon_state, get_shard_period_start_epoch(epoch, lookback=1), state.shard)
|
||||
if index in earlier_committee:
|
||||
state.earlier_committee_fees[earlier_committee.index(index)] += delta
|
||||
elif index in later_committee:
|
||||
state.later_committee_fees[later_committee.index(index)] += delta
|
||||
else:
|
||||
raise Exception("Should never be here")
|
||||
```
|
||||
|
||||
### Shard state transition function
|
||||
|
||||
```python
|
||||
def shard_state_transition(state: ShardState,
|
||||
beacon_state: BeaconState,
|
||||
block: ShardBlock,
|
||||
validate_state_root: bool=False) -> None:
|
||||
assert block.core.slot > state.slot
|
||||
for slot in range(state.slot, block.core.slot):
|
||||
shard_slot_transition(state, beacon_state)
|
||||
shard_block_transition(state, beacon_state, block, validate_state_root=validate_state_root)
|
||||
```
|
||||
|
||||
```python
|
||||
def shard_slot_transition(state: ShardState, beacon_state: BeaconState) -> None:
|
||||
# Correct saved state root
|
||||
if state.most_recent_block_core.state_root == Hash():
|
||||
state.most_recent_block_core.state_root = hash_tree_root(state)
|
||||
|
||||
# Save states in history accumulator
|
||||
depth = 0
|
||||
h = hash_tree_root(state)
|
||||
while state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_VECTOR:
|
||||
state.history_accumulator[depth] = h
|
||||
depth += 1
|
||||
|
||||
# Period transitions
|
||||
if (state.slot + 1) % (SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0:
|
||||
epoch = compute_epoch_of_shard_slot(state.slot)
|
||||
earlier_committee = get_period_committee(
|
||||
beacon_state,
|
||||
get_shard_period_start_epoch(epoch, lookback=2),
|
||||
state.shard,
|
||||
)
|
||||
later_committee = get_period_committee(
|
||||
beacon_state,
|
||||
get_shard_period_start_epoch(epoch, lookback=1),
|
||||
state.shard,
|
||||
)
|
||||
state.receipt_root = hash_tree_root(List[ShardReceiptDelta, PLACEHOLDER]([
|
||||
ShardReceiptDelta(
|
||||
index=validator_index,
|
||||
reward_coefficient=state.earlier_committee_rewards[i],
|
||||
block_fee=state.earlier_committee_fees[i],
|
||||
)
|
||||
for i, validator_index in enumerate(earlier_committee)
|
||||
]))
|
||||
state.earlier_committee_rewards = state.later_committee_rewards
|
||||
state.earlier_committee_fees = state.later_committee_fees
|
||||
state.later_committee_rewards = [REWARD_COEFFICIENT_BASE for _ in range(len(later_committee))],
|
||||
state.later_committee_fees = [Gwei(0) for _ in range(len(later_committee))],
|
||||
else:
|
||||
state.receipt_root = Hash()
|
||||
state.slot += ShardSlot(1)
|
||||
```
|
||||
|
||||
```python
|
||||
def shard_block_transition(state: ShardState,
|
||||
beacon_state: BeaconState,
|
||||
block: ShardBlock,
|
||||
validate_state_root: bool) -> None:
|
||||
# Check slot number
|
||||
assert compute_slot_of_shard_slot(candidate.core.slot) >= PHASE_1_FORK_SLOT
|
||||
|
||||
# Check beacon block
|
||||
beacon_block_slot = compute_start_slot_of_epoch(compute_epoch_of_shard_slot(candidate.core.slot))
|
||||
beacon_block = beacon_blocks[beacon_block_slot]
|
||||
assert candidate.core.beacon_block_root == signing_root(beacon_block)
|
||||
assert beacon_block.slot <= candidate.core.slot
|
||||
|
||||
# Check state root
|
||||
assert candidate.core.state_root == Hash() # [to be removed in phase 2]
|
||||
assert block.core.slot == state.slot
|
||||
|
||||
# Check parent block
|
||||
if candidate.core.parent_root != Hash():
|
||||
parent_block = next(
|
||||
(block for block in valid_shard_blocks if hash_tree_root(block.core) == candidate.core.parent_root),
|
||||
None
|
||||
)
|
||||
assert parent_block is not None
|
||||
assert parent_block.core.slot < candidate.core.slot
|
||||
parent_beacon_block_slot = compute_start_slot_of_epoch(compute_epoch_of_shard_slot(parent_block.core.slot))
|
||||
assert signing_root(beacon_blocks[parent_beacon_block_slot]) == parent_block.core.beacon_chain_root
|
||||
if block.core.parent_root != Hash():
|
||||
assert block.core.parent_root == hash_tree_root(state.most_recent_block_core)
|
||||
|
||||
# Calculate base reward
|
||||
total_balance = get_total_active_balance(beacon_state)
|
||||
base_reward = (
|
||||
REWARD_COEFFICIENT_BASE * BASE_REWARD_FACTOR // integer_squareroot(total_balance) // BASE_REWARDS_PER_EPOCH
|
||||
)
|
||||
# Check attestations
|
||||
attester_committee = get_persistent_committee(beacon_state, shard, block.core.slot)
|
||||
attester_committee = get_persistent_committee(beacon_state, state.shard, block.core.slot)
|
||||
pubkeys = []
|
||||
for i, index in enumerate(attester_committee):
|
||||
attestations = 0
|
||||
|
||||
for i, validator_index in enumerate(attester_committee):
|
||||
if block.core.attester_bitfield[i]:
|
||||
pubkeys.append(beacon_state.validators[index].pubkey)
|
||||
for i in range(len(attester_committee), MAX_PERSISTENT_COMMITTEE_SIZE * 2):
|
||||
assert block.attester_bitfield[i] is False
|
||||
pubkeys.append(beacon_state.validators[validator_index].pubkey)
|
||||
add_reward(state, beacon_state, validator_index, base_reward)
|
||||
attestations += 1
|
||||
|
||||
for i in range(len(attester_committee), TARGET_PERSISTENT_COMMITTEE_SIZE):
|
||||
assert block.core.attester_bitfield[i] is False or block.core.attester_bitfield[i] == 0 # TODO: FIX Bitvector
|
||||
|
||||
assert bls_verify(
|
||||
pubkey=bls_aggregate_pubkeys(pubkeys),
|
||||
message_hash=candidate.core.parent_root,
|
||||
signature=candidate.signatures.attestation_signature,
|
||||
domain=get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(candidate.core.slot))
|
||||
message_hash=block.core.parent_root,
|
||||
signature=block.signatures.attestation_signature,
|
||||
domain=get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.core.slot))
|
||||
)
|
||||
|
||||
# Check proposer
|
||||
proposer_index = get_shard_block_proposer_index(beacon_state, shard, candidate.core.slot)
|
||||
proposer_index = get_shard_block_proposer_index(beacon_state, state.shard, block.core.slot)
|
||||
assert proposer_index is not None
|
||||
add_reward(state, beacon_state, proposer_index, attestations * base_reward // PROPOSER_REWARD_QUOTIENT)
|
||||
assert bls_verify(
|
||||
pubkey=beacon_state.validators[proposer_index].pubkey,
|
||||
message_hash=hash_tree_root(candidate.core),
|
||||
signature=candidate.signatures.proposer_signature,
|
||||
domain=get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(candidate.core.slot)),
|
||||
message_hash=hash_tree_root(block.core),
|
||||
signature=block.signatures.proposer_signature,
|
||||
domain=get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.core.slot)),
|
||||
)
|
||||
|
||||
return True
|
||||
# Process and update block data fees
|
||||
add_fee(state, beacon_state, proposer_index, state.basefee * len(block.core.data) // SHARD_BLOCK_SIZE_LIMIT)
|
||||
QUOTIENT = SHARD_BLOCK_SIZE_LIMIT * BASEFEE_ADJUSTMENT_FACTOR
|
||||
if len(block.core.data) > SHARD_BLOCK_SIZE_TARGET:
|
||||
state.basefee += Gwei(max(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT))
|
||||
elif len(block.core.data) < SHARD_BLOCK_SIZE_TARGET:
|
||||
state.basefee -= Gwei(max(1, state.basefee * (len(block.core.data) - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT))
|
||||
state.basefee = Gwei(max(
|
||||
1,
|
||||
min(
|
||||
EFFECTIVE_BALANCE_INCREMENT // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_BEACON_SLOT * SLOTS_PER_EPOCH,
|
||||
state.basefee,
|
||||
)
|
||||
))
|
||||
|
||||
# Check total bytes
|
||||
state.total_bytes += len(block.core.data)
|
||||
assert block.core.total_bytes == state.total_bytes
|
||||
|
||||
# Update in-state block header
|
||||
state.most_recent_block_core = ShardBlockCore(
|
||||
slot=block.core.slot,
|
||||
beacon_chain_root=block.core.beacon_chain_root,
|
||||
parent_root=block.core.parent_root,
|
||||
data_root=hash_tree_root(block.core.data),
|
||||
state_root=Hash(),
|
||||
total_bytes=block.core.total_bytes,
|
||||
attester_bitfield=block.core.attester_bitfield,
|
||||
)
|
||||
|
||||
# Check state root
|
||||
if validate_state_root:
|
||||
assert block.core.state_root == hash_tree_root(state)
|
||||
```
|
||||
|
||||
### Beacon attestations
|
||||
|
||||
Let:
|
||||
|
||||
- `shard` be a valid `Shard`
|
||||
- `shard_blocks` be the `ShardBlock` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `shard` at slot `slot`
|
||||
- `pre_state` is the `ShardState` before processing any blocks
|
||||
- `shard_blocks_or_state_roots` be the `Union[ShardBlock, Hash]` list such that `shard_blocks[slot]` is the canonical `ShardBlock` for shard `pre_state.shard` at slot `slot` if a block exists, or the post-state-root of processing state up to and including that slot if a block does not exist.
|
||||
- `beacon_state` be the canonical `BeaconState`
|
||||
- `valid_attestations` be the set of valid `Attestation` objects, recursively defined
|
||||
- `candidate` be a candidate `Attestation` which is valid under Phase 0 rules, and for which validity is to be determined under Phase 1 rules by running `is_valid_beacon_attestation`
|
||||
|
||||
```python
|
||||
def is_valid_beacon_attestation(shard: Shard,
|
||||
shard_blocks: Sequence[ShardBlock],
|
||||
def is_valid_beacon_attestation(pre_state: ShardState,
|
||||
shard_blocks_or_state_roots: Sequence[Union[ShardBlock, Hash]],
|
||||
beacon_state: BeaconState,
|
||||
valid_attestations: Set[Attestation],
|
||||
candidate: Attestation) -> bool:
|
||||
@ -407,12 +585,22 @@ def is_valid_beacon_attestation(shard: Shard,
|
||||
assert candidate.data.previous_attestation.epoch < compute_epoch_of_slot(candidate.data.slot)
|
||||
|
||||
# Check crosslink data root
|
||||
start_epoch = beacon_state.crosslinks[shard].epoch
|
||||
start_epoch = beacon_state.crosslinks[pre_state.shard].epoch
|
||||
end_epoch = min(compute_epoch_of_slot(candidate.data.slot) - CROSSLINK_LOOKBACK,
|
||||
start_epoch + MAX_EPOCHS_PER_CROSSLINK)
|
||||
blocks = []
|
||||
for slot in range(start_epoch * SLOTS_PER_EPOCH, end_epoch * SLOTS_PER_EPOCH):
|
||||
blocks.append(shard_blocks[slot])
|
||||
if isinstance(shard_blocks_or_state_roots[slot], ShardBlock):
|
||||
blocks.append(shard_blocks_or_state_roots[slot])
|
||||
else:
|
||||
blocks.append(ShardBlock(
|
||||
core=ExtendedShardBlockCore(
|
||||
slot=slot,
|
||||
state_root=shard_blocks_or_state_roots[slot],
|
||||
total_bytes=pre_state.total_bytes,
|
||||
),
|
||||
signatures=ShardBlockSignatures(),
|
||||
))
|
||||
assert candidate.data.crosslink.data_root == compute_crosslink_data_root(blocks)
|
||||
|
||||
return True
|
||||
|
@ -6,22 +6,44 @@
|
||||
<!-- TOC -->
|
||||
|
||||
- [Merkle proof formats](#merkle-proof-formats)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Constants](#constants)
|
||||
- [Generalized Merkle tree index](#generalized-merkle-tree-index)
|
||||
- [SSZ object to index](#ssz-object-to-index)
|
||||
- [Merkle multiproofs](#merkle-multiproofs)
|
||||
- [MerklePartial](#merklepartial)
|
||||
- [`SSZMerklePartial`](#sszmerklepartial)
|
||||
- [Proofs for execution](#proofs-for-execution)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Generalized Merkle tree index](#generalized-merkle-tree-index)
|
||||
- [SSZ object to index](#ssz-object-to-index)
|
||||
- [Helpers for generalized indices](#helpers-for-generalized-indices)
|
||||
- [`concat_generalized_indices`](#concat_generalized_indices)
|
||||
- [`get_generalized_index_length`](#get_generalized_index_length)
|
||||
- [`get_generalized_index_bit`](#get_generalized_index_bit)
|
||||
- [`generalized_index_sibling`](#generalized_index_sibling)
|
||||
- [`generalized_index_child`](#generalized_index_child)
|
||||
- [`generalized_index_parent`](#generalized_index_parent)
|
||||
- [Merkle multiproofs](#merkle-multiproofs)
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
## Constants
|
||||
## Helper functions
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `LENGTH_FLAG` | `2**64 - 1` |
|
||||
```python
|
||||
def get_next_power_of_two(x: int) -> int:
|
||||
"""
|
||||
Get next power of 2 >= the input.
|
||||
"""
|
||||
if x <= 2:
|
||||
return x
|
||||
else:
|
||||
return 2 * get_next_power_of_two((x + 1) // 2)
|
||||
```
|
||||
|
||||
```python
|
||||
def get_previous_power_of_two(x: int) -> int:
|
||||
"""
|
||||
Get the previous power of 2 >= the input.
|
||||
"""
|
||||
if x <= 2:
|
||||
return x
|
||||
else:
|
||||
return 2 * get_previous_power_of_two(x // 2)
|
||||
```
|
||||
|
||||
## Generalized Merkle tree index
|
||||
|
||||
@ -37,13 +59,16 @@ In a binary Merkle tree, we define a "generalized index" of a node as `2**depth
|
||||
Note that the generalized index has the convenient property that the two children of node `k` are `2k` and `2k+1`, and also that it equals the position of a node in the linear representation of the Merkle tree that's computed by this function:
|
||||
|
||||
```python
|
||||
def merkle_tree(leaves: List[Bytes32]) -> List[Bytes32]:
|
||||
o = [0] * len(leaves) + leaves
|
||||
def merkle_tree(leaves: Sequence[Hash]) -> Sequence[Hash]:
|
||||
padded_length = get_next_power_of_two(len(leaves))
|
||||
o = [Hash()] * padded_length + list(leaves) + [Hash()] * (padded_length - len(leaves))
|
||||
for i in range(len(leaves) - 1, 0, -1):
|
||||
o[i] = hash(o[i * 2] + o[i * 2 + 1])
|
||||
return o
|
||||
```
|
||||
|
||||
We define a custom type `GeneralizedIndex` as a Python integer type in this document. It can be represented as a Bitvector/Bitlist object as well.
|
||||
|
||||
We will define Merkle proofs in terms of generalized indices.
|
||||
|
||||
## SSZ object to index
|
||||
@ -61,46 +86,151 @@ y_data_root len(y)
|
||||
.......
|
||||
```
|
||||
|
||||
We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`.
|
||||
We can now define a concept of a "path", a way of describing a function that takes as input an SSZ object and outputs some specific (possibly deeply nested) member. For example, `foo -> foo.x` is a path, as are `foo -> len(foo.y)` and `foo -> foo.y[5].w`. We'll describe paths as lists, which can have two representations. In "human-readable form", they are `["x"]`, `["y", "__len__"]` and `["y", 5, "w"]` respectively. In "encoded form", they are lists of `uint64` values, in these cases (assuming the fields of `foo` in order are `x` then `y`, and `w` is the first field of `y[i]`) `[0]`, `[1, 2**64-1]`, `[1, 5, 0]`. We define `SSZVariableName` as the member variable name string, i.e., a path is presented as a sequence of integers and `SSZVariableName`.
|
||||
|
||||
```python
|
||||
def path_to_encoded_form(obj: Any, path: List[Union[str, int]]) -> List[int]:
|
||||
if len(path) == 0:
|
||||
return []
|
||||
elif isinstance(path[0], "__len__"):
|
||||
assert len(path) == 1
|
||||
return [LENGTH_FLAG]
|
||||
elif isinstance(path[0], str) and hasattr(obj, "fields"):
|
||||
return [list(obj.fields.keys()).index(path[0])] + path_to_encoded_form(getattr(obj, path[0]), path[1:])
|
||||
elif isinstance(obj, (Vector, List)):
|
||||
return [path[0]] + path_to_encoded_form(obj[path[0]], path[1:])
|
||||
def item_length(typ: SSZType) -> int:
|
||||
"""
|
||||
Return the number of bytes in a basic type, or 32 (a full hash) for compound types.
|
||||
"""
|
||||
if issubclass(typ, BasicValue):
|
||||
return typ.byte_len
|
||||
else:
|
||||
raise Exception("Unknown type / path")
|
||||
return 32
|
||||
```
|
||||
|
||||
We can now define a function `get_generalized_indices(object: Any, path: List[int], root: int=1) -> List[int]` that converts an object and a path to a set of generalized indices (note that for constant-sized objects, there is only one generalized index and it only depends on the path, but for dynamically sized objects the indices may depend on the object itself too). For dynamically-sized objects, the set of indices will have more than one member because of the need to access an array's length to determine the correct generalized index for some array access.
|
||||
```python
|
||||
def get_elem_type(typ: Union[BaseBytes, BaseList, Container],
|
||||
index_or_variable_name: Union[int, SSZVariableName]) -> SSZType:
|
||||
"""
|
||||
Return the type of the element of an object of the given type with the given index
|
||||
or member variable name (eg. `7` for `x[7]`, `"foo"` for `x.foo`)
|
||||
"""
|
||||
return typ.get_fields()[index_or_variable_name] if issubclass(typ, Container) else typ.elem_type
|
||||
```
|
||||
|
||||
```python
|
||||
def get_generalized_indices(obj: Any, path: List[int], root: int=1) -> List[int]:
|
||||
if len(path) == 0:
|
||||
return [root]
|
||||
elif isinstance(obj, Vector):
|
||||
items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1
|
||||
new_root = root * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk
|
||||
return get_generalized_indices(obj[path[0]], path[1:], new_root)
|
||||
elif isinstance(obj, List) and path[0] == LENGTH_FLAG:
|
||||
return [root * 2 + 1]
|
||||
elif isinstance(obj, List) and isinstance(path[0], int):
|
||||
assert path[0] < len(obj)
|
||||
items_per_chunk = (32 // len(serialize(x))) if isinstance(x, int) else 1
|
||||
new_root = root * 2 * next_power_of_2(len(obj) // items_per_chunk) + path[0] // items_per_chunk
|
||||
return [root *2 + 1] + get_generalized_indices(obj[path[0]], path[1:], new_root)
|
||||
elif hasattr(obj, "fields"):
|
||||
field = list(fields.keys())[path[0]]
|
||||
new_root = root * next_power_of_2(len(fields)) + path[0]
|
||||
return get_generalized_indices(getattr(obj, field), path[1:], new_root)
|
||||
def chunk_count(typ: SSZType) -> int:
|
||||
"""
|
||||
Return the number of hashes needed to represent the top-level elements in the given type
|
||||
(eg. `x.foo` or `x[7]` but not `x[7].bar` or `x.foo.baz`). In all cases except lists/vectors
|
||||
of basic types, this is simply the number of top-level elements, as each element gets one
|
||||
hash. For lists/vectors of basic types, it is often fewer because multiple basic elements
|
||||
can be packed into one 32-byte chunk.
|
||||
"""
|
||||
# typ.length describes the limit for list types, or the length for vector types.
|
||||
if issubclass(typ, BasicValue):
|
||||
return 1
|
||||
elif issubclass(typ, Bits):
|
||||
return (typ.length + 255) // 256
|
||||
elif issubclass(typ, Elements):
|
||||
return (typ.length * item_length(typ.elem_type) + 31) // 32
|
||||
elif issubclass(typ, Container):
|
||||
return len(typ.get_fields())
|
||||
else:
|
||||
raise Exception("Unknown type / path")
|
||||
raise Exception(f"Type not supported: {typ}")
|
||||
```
|
||||
|
||||
```python
|
||||
def get_item_position(typ: SSZType, index_or_variable_name: Union[int, SSZVariableName]) -> Tuple[int, int, int]:
|
||||
"""
|
||||
Return three variables:
|
||||
(i) the index of the chunk in which the given element of the item is represented;
|
||||
(ii) the starting byte position within the chunk;
|
||||
(iii) the ending byte position within the chunk.
|
||||
For example: for a 6-item list of uint64 values, index=2 will return (0, 16, 24), index=5 will return (1, 8, 16)
|
||||
"""
|
||||
if issubclass(typ, Elements):
|
||||
index = int(index_or_variable_name)
|
||||
start = index * item_length(typ.elem_type)
|
||||
return start // 32, start % 32, start % 32 + item_length(typ.elem_type)
|
||||
elif issubclass(typ, Container):
|
||||
variable_name = index_or_variable_name
|
||||
return typ.get_field_names().index(variable_name), 0, item_length(get_elem_type(typ, variable_name))
|
||||
else:
|
||||
raise Exception("Only lists/vectors/containers supported")
|
||||
```
|
||||
|
||||
```python
|
||||
def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableName]]) -> Optional[GeneralizedIndex]:
|
||||
"""
|
||||
Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for
|
||||
`len(x[12].bar)`) into the generalized index representing its position in the Merkle tree.
|
||||
"""
|
||||
root = GeneralizedIndex(1)
|
||||
for p in path:
|
||||
assert not issubclass(typ, BasicValue) # If we descend to a basic type, the path cannot continue further
|
||||
if p == '__len__':
|
||||
typ = uint64
|
||||
if issubclass(typ, (List, Bytes)):
|
||||
root = GeneralizedIndex(root * 2 + 1)
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
pos, _, _ = get_item_position(typ, p)
|
||||
base_index = (GeneralizedIndex(2) if issubclass(typ, (List, Bytes)) else GeneralizedIndex(1))
|
||||
root = GeneralizedIndex(root * base_index * get_next_power_of_two(chunk_count(typ)) + pos)
|
||||
typ = get_elem_type(typ, p)
|
||||
return root
|
||||
```
|
||||
|
||||
### Helpers for generalized indices
|
||||
|
||||
_Usage note: functions outside this section should manipulate generalized indices using only functions inside this section. This is to make it easier for developers to implement generalized indices with underlying representations other than bigints._
|
||||
|
||||
#### `concat_generalized_indices`
|
||||
|
||||
```python
|
||||
def concat_generalized_indices(indices: Sequence[GeneralizedIndex]) -> GeneralizedIndex:
|
||||
"""
|
||||
Given generalized indices i1 for A -> B, i2 for B -> C .... i_n for Y -> Z, returns
|
||||
the generalized index for A -> Z.
|
||||
"""
|
||||
o = GeneralizedIndex(1)
|
||||
for i in indices:
|
||||
o = GeneralizedIndex(o * get_previous_power_of_two(i) + (i - get_previous_power_of_two(i)))
|
||||
return o
|
||||
```
|
||||
|
||||
#### `get_generalized_index_length`
|
||||
|
||||
```python
|
||||
def get_generalized_index_length(index: GeneralizedIndex) -> int:
|
||||
"""
|
||||
Return the length of a path represented by a generalized index.
|
||||
"""
|
||||
return int(log2(index))
|
||||
```
|
||||
|
||||
#### `get_generalized_index_bit`
|
||||
|
||||
```python
|
||||
def get_generalized_index_bit(index: GeneralizedIndex, position: int) -> bool:
|
||||
"""
|
||||
Return the given bit of a generalized index.
|
||||
"""
|
||||
return (index & (1 << position)) > 0
|
||||
```
|
||||
|
||||
#### `generalized_index_sibling`
|
||||
|
||||
```python
|
||||
def generalized_index_sibling(index: GeneralizedIndex) -> GeneralizedIndex:
|
||||
return GeneralizedIndex(index ^ 1)
|
||||
```
|
||||
|
||||
#### `generalized_index_child`
|
||||
|
||||
```python
|
||||
def generalized_index_child(index: GeneralizedIndex, right_side: bool) -> GeneralizedIndex:
|
||||
return GeneralizedIndex(index * 2 + right_side)
|
||||
```
|
||||
|
||||
#### `generalized_index_parent`
|
||||
|
||||
```python
|
||||
def generalized_index_parent(index: GeneralizedIndex) -> GeneralizedIndex:
|
||||
return GeneralizedIndex(index // 2)
|
||||
```
|
||||
|
||||
## Merkle multiproofs
|
||||
@ -116,72 +246,80 @@ x x . . . . x *
|
||||
|
||||
. are unused nodes, * are used nodes, x are the values we are trying to prove. Notice how despite being a multiproof for 3 values, it requires only 3 auxiliary nodes, only one node more than would be required to prove a single value. Normally the efficiency gains are not quite that extreme, but the savings relative to individual Merkle proofs are still significant. As a rule of thumb, a multiproof for k nodes at the same level of an n-node tree has size `k * (n/k + log(n/k))`.
|
||||
|
||||
Here is code for creating and verifying a multiproof. First, a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require:
|
||||
First, we provide a method for computing the generalized indices of the auxiliary tree nodes that a proof of a given set of generalized indices will require:
|
||||
|
||||
```python
|
||||
def get_proof_indices(tree_indices: List[int]) -> List[int]:
|
||||
# Get all indices touched by the proof
|
||||
maximal_indices = set()
|
||||
for i in tree_indices:
|
||||
x = i
|
||||
while x > 1:
|
||||
maximal_indices.add(x ^ 1)
|
||||
x //= 2
|
||||
maximal_indices = tree_indices + sorted(list(maximal_indices))[::-1]
|
||||
# Get indices that cannot be recalculated from earlier indices
|
||||
redundant_indices = set()
|
||||
proof = []
|
||||
for index in maximal_indices:
|
||||
if index not in redundant_indices:
|
||||
proof.append(index)
|
||||
while index > 1:
|
||||
redundant_indices.add(index)
|
||||
if (index ^ 1) not in redundant_indices:
|
||||
break
|
||||
index //= 2
|
||||
return [i for i in proof if i not in tree_indices]
|
||||
def get_branch_indices(tree_index: GeneralizedIndex) -> Sequence[GeneralizedIndex]:
|
||||
"""
|
||||
Get the generalized indices of the sister chunks along the path from the chunk with the
|
||||
given tree index to the root.
|
||||
"""
|
||||
o = [generalized_index_sibling(tree_index)]
|
||||
while o[-1] > 1:
|
||||
o.append(generalized_index_sibling(generalized_index_parent(o[-1])))
|
||||
return o[:-1]
|
||||
```
|
||||
|
||||
Generating a proof is simply a matter of taking the node of the SSZ hash tree with the union of the given generalized indices for each index given by `get_proof_indices`, and outputting the list of nodes in the same order.
|
||||
|
||||
Here is the verification function:
|
||||
|
||||
```python
|
||||
def verify_multi_proof(root: Bytes32, indices: List[int], leaves: List[Bytes32], proof: List[Bytes32]) -> bool:
|
||||
tree = {}
|
||||
for index, leaf in zip(indices, leaves):
|
||||
tree[index] = leaf
|
||||
for index, proof_item in zip(get_proof_indices(indices), proof):
|
||||
tree[index] = proof_item
|
||||
index_queue = sorted(tree.keys())[:-1]
|
||||
i = 0
|
||||
while i < len(index_queue):
|
||||
index = index_queue[i]
|
||||
if index >= 2 and index ^ 1 in tree:
|
||||
tree[index // 2] = hash(tree[index - index % 2] + tree[index - index % 2 + 1])
|
||||
index_queue.append(index // 2)
|
||||
i += 1
|
||||
return (indices == []) or (1 in tree and tree[1] == root)
|
||||
def get_helper_indices(indices: Sequence[GeneralizedIndex]) -> Sequence[GeneralizedIndex]:
|
||||
"""
|
||||
Get the generalized indices of all "extra" chunks in the tree needed to prove the chunks with the given
|
||||
generalized indices. Note that the decreasing order is chosen deliberately to ensure equivalence to the
|
||||
order of hashes in a regular single-item Merkle proof in the single-item case.
|
||||
"""
|
||||
all_indices: Set[GeneralizedIndex] = set()
|
||||
for index in indices:
|
||||
all_indices = all_indices.union(set(list(get_branch_indices(index)) + [index]))
|
||||
|
||||
return sorted([
|
||||
x for x in all_indices if (
|
||||
not (
|
||||
generalized_index_child(x, False) in all_indices and
|
||||
generalized_index_child(x, True) in all_indices
|
||||
) and not (x in indices)
|
||||
)
|
||||
], reverse=True)
|
||||
```
|
||||
|
||||
## MerklePartial
|
||||
|
||||
We define:
|
||||
|
||||
### `SSZMerklePartial`
|
||||
|
||||
Now we provide the Merkle proof verification functions. First, for single item proofs:
|
||||
|
||||
```python
|
||||
{
|
||||
"root": "bytes32",
|
||||
"indices": ["uint64"],
|
||||
"values": ["bytes32"],
|
||||
"proof": ["bytes32"]
|
||||
}
|
||||
def verify_merkle_proof(leaf: Hash, proof: Sequence[Hash], index: GeneralizedIndex, root: Hash) -> bool:
|
||||
assert len(proof) == get_generalized_index_length(index)
|
||||
for i, h in enumerate(proof):
|
||||
if get_generalized_index_bit(index, i):
|
||||
leaf = hash(h + leaf)
|
||||
else:
|
||||
leaf = hash(leaf + h)
|
||||
return leaf == root
|
||||
```
|
||||
|
||||
### Proofs for execution
|
||||
Now for multi-item proofs:
|
||||
|
||||
We define `MerklePartial(f, arg1, arg2..., focus=0)` as being a `SSZMerklePartial` object wrapping a Merkle multiproof of the set of nodes in the hash tree of the SSZ object `arg[focus]` that is needed to authenticate the parts of the object needed to compute `f(arg1, arg2...)`.
|
||||
```python
|
||||
def verify_merkle_multiproof(leaves: Sequence[Hash],
|
||||
proof: Sequence[Hash],
|
||||
indices: Sequence[GeneralizedIndex],
|
||||
root: Hash) -> bool:
|
||||
assert len(leaves) == len(indices)
|
||||
helper_indices = get_helper_indices(indices)
|
||||
assert len(proof) == len(helper_indices)
|
||||
objects = {
|
||||
**{index: node for index, node in zip(indices, leaves)},
|
||||
**{index: node for index, node in zip(helper_indices, proof)}
|
||||
}
|
||||
keys = sorted(objects.keys(), reverse=True)
|
||||
pos = 0
|
||||
while pos < len(keys):
|
||||
k = keys[pos]
|
||||
if k in objects and k ^ 1 in objects and k // 2 not in objects:
|
||||
objects[GeneralizedIndex(k // 2)] = hash(
|
||||
objects[GeneralizedIndex((k | 1) ^ 1)] +
|
||||
objects[GeneralizedIndex(k | 1)]
|
||||
)
|
||||
keys.append(GeneralizedIndex(k // 2))
|
||||
pos += 1
|
||||
return objects[GeneralizedIndex(1)] == root
|
||||
```
|
||||
|
||||
Ideally, any function which accepts an SSZ object should also be able to accept a `SSZMerklePartial` object as a substitute.
|
||||
Note that the single-item proof is a special case of a multi-item proof; a valid single-item proof verifies correctly when put into the multi-item verification function (making the natural trivial changes to input arguments, `index -> [index]` and `leaf -> [leaf]`).
|
||||
|
@ -1,199 +1,187 @@
|
||||
# Beacon Chain Light Client Syncing
|
||||
# Minimal Light Client Design
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers. One of the design goals of the Eth 2.0 beacon chain is light-client friendliness, not only to allow low-resource clients (mobile phones, IoT, etc.) to maintain access to the blockchain in a reasonably safe way, but also to facilitate the development of "bridges" between the Eth 2.0 beacon chain and other chains.
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
|
||||
- [Beacon Chain Light Client Syncing](#beacon-chain-light-client-syncing)
|
||||
- [Minimal Light Client Design](#minimal-light-client-design)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Preliminaries](#preliminaries)
|
||||
- [Expansions](#expansions)
|
||||
- [`get_active_validator_indices`](#get_active_validator_indices)
|
||||
- [`MerklePartial`](#merklepartial)
|
||||
- [`PeriodData`](#perioddata)
|
||||
- [`get_earlier_start_epoch`](#get_earlier_start_epoch)
|
||||
- [`get_later_start_epoch`](#get_later_start_epoch)
|
||||
- [`get_period_data`](#get_period_data)
|
||||
- [Light client state](#light-client-state)
|
||||
- [Updating the shuffled committee](#updating-the-shuffled-committee)
|
||||
- [Computing the current committee](#computing-the-current-committee)
|
||||
- [Verifying blocks](#verifying-blocks)
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Containers](#containers)
|
||||
- [`LightClientUpdate`](#lightclientupdate)
|
||||
- [Helpers](#helpers)
|
||||
- [`LightClientMemory`](#lightclientmemory)
|
||||
- [`unpack_compact_validator`](#unpack_compact_validator)
|
||||
- [`get_persistent_committee_pubkeys_and_balances`](#get_persistent_committee_pubkeys_and_balances)
|
||||
- [Light client state updates](#light-client-state-updates)
|
||||
- [Data overhead](#data-overhead)
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
## Preliminaries
|
||||
## Introduction
|
||||
|
||||
### Expansions
|
||||
Ethereum 2.0 is designed to be light client friendly. This allows low-resource clients such as mobile phones to access Ethereum 2.0 with reasonable safety and liveness. It also facilitates the development of "bridges" to external blockchains. This document suggests a minimal light client design for the beacon chain.
|
||||
|
||||
We define an "expansion" of an object as an object where a field in an object that is meant to represent the `hash_tree_root` of another object is replaced by the object. Note that defining expansions is not a consensus-layer-change; it is merely a "re-interpretation" of the object. Particularly, the `hash_tree_root` of an expansion of an object is identical to that of the original object, and we can define expansions where, given a complete history, it is always possible to compute the expansion of any object in the history. The opposite of an expansion is a "summary" (e.g. `BeaconBlockHeader` is a summary of `BeaconBlock`).
|
||||
## Custom types
|
||||
|
||||
We define two expansions:
|
||||
We define the following Python custom types for type hinting and readability:
|
||||
|
||||
* `ExtendedBeaconState`, which is identical to a `BeaconState` except `compact_committees_roots: List[Bytes32]` is replaced by `active_indices: List[List[ValidatorIndex]]`, where `BeaconState.compact_committees_roots[i] = hash_tree_root(ExtendedBeaconState.active_indices[i])`.
|
||||
* `ExtendedBeaconBlock`, which is identical to a `BeaconBlock` except `state_root` is replaced with the corresponding `state: ExtendedBeaconState`.
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `CompactValidator` | `uint64` | compact representation of a validator for light clients |
|
||||
|
||||
### `get_active_validator_indices`
|
||||
## Constants
|
||||
|
||||
Note that there is now a new way to compute `get_active_validator_indices`:
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH` | `4` |
|
||||
| `BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_INDEX` | **TBD** |
|
||||
| `PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH` | `5` |
|
||||
| `PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX` | **TBD** |
|
||||
|
||||
## Containers
|
||||
|
||||
### `LightClientUpdate`
|
||||
|
||||
```python
|
||||
def get_active_validator_indices(state: ExtendedBeaconState, epoch: Epoch) -> List[ValidatorIndex]:
|
||||
return state.active_indices[epoch % EPOCHS_PER_HISTORICAL_VECTOR]
|
||||
class LightClientUpdate(container):
|
||||
# Shard block root (and authenticating signature data)
|
||||
shard_block_root: Hash
|
||||
fork_version: Version
|
||||
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
||||
signature: BLSSignature
|
||||
# Updated beacon header (and authenticating branch)
|
||||
header: BeaconBlockHeader
|
||||
header_branch: Vector[Hash, BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH]
|
||||
# Updated persistent committee (and authenticating branch)
|
||||
committee: CompactCommittee
|
||||
committee_branch: Vector[Hash, PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)]
|
||||
```
|
||||
|
||||
Note that it takes `state` instead of `state.validators` as an argument. This does not affect its use in `get_shuffled_committee`, because `get_shuffled_committee` has access to the full `state` as one of its arguments.
|
||||
## Helpers
|
||||
|
||||
|
||||
### `MerklePartial`
|
||||
|
||||
A `MerklePartial(f, *args)` is an object that contains a minimal Merkle proof needed to compute `f(*args)`. A `MerklePartial` can be used in place of a regular SSZ object, though a computation would return an error if it attempts to access part of the object that is not contained in the proof.
|
||||
|
||||
### `PeriodData`
|
||||
### `LightClientMemory`
|
||||
|
||||
```python
|
||||
{
|
||||
'validator_count': 'uint64',
|
||||
'seed': 'bytes32',
|
||||
'committee': [Validator],
|
||||
}
|
||||
@dataclass
|
||||
class LightClientMemory(object):
|
||||
shard: Shard # Randomly initialized and retained forever
|
||||
header: BeaconBlockHeader # Beacon header which is not expected to revert
|
||||
# Persistent committees corresponding to the beacon header
|
||||
previous_committee: CompactCommittee
|
||||
current_committee: CompactCommittee
|
||||
next_committee: CompactCommittee
|
||||
```
|
||||
|
||||
### `get_earlier_start_epoch`
|
||||
### `unpack_compact_validator`
|
||||
|
||||
```python
|
||||
def get_earlier_start_epoch(slot: Slot) -> int:
|
||||
return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD * 2
|
||||
```
|
||||
|
||||
### `get_later_start_epoch`
|
||||
|
||||
```python
|
||||
def get_later_start_epoch(slot: Slot) -> int:
|
||||
return slot - slot % PERSISTENT_COMMITTEE_PERIOD - PERSISTENT_COMMITTEE_PERIOD
|
||||
```
|
||||
|
||||
### `get_period_data`
|
||||
|
||||
```python
|
||||
def get_period_data(block: ExtendedBeaconBlock, shard_id: Shard, later: bool) -> PeriodData:
|
||||
period_start = get_later_start_epoch(header.slot) if later else get_earlier_start_epoch(header.slot)
|
||||
validator_count = len(get_active_validator_indices(state, period_start))
|
||||
committee_count = validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE) + 1
|
||||
indices = get_period_committee(block.state, shard_id, period_start, 0, committee_count)
|
||||
return PeriodData(
|
||||
validator_count,
|
||||
get_seed(block.state, period_start),
|
||||
[block.state.validators[i] for i in indices],
|
||||
def unpack_compact_validator(compact_validator: CompactValidator) -> Tuple[ValidatorIndex, bool, uint64]:
|
||||
"""
|
||||
Return the index, slashed, effective_balance // EFFECTIVE_BALANCE_INCREMENT of ``compact_validator``.
|
||||
"""
|
||||
return (
|
||||
ValidatorIndex(compact_validator >> 16),
|
||||
(compact_validator >> 15) % 2,
|
||||
uint64(compact_validator & (2**15 - 1)),
|
||||
)
|
||||
```
|
||||
|
||||
### Light client state
|
||||
|
||||
A light client will keep track of:
|
||||
|
||||
* A random `shard_id` in `[0...SHARD_COUNT-1]` (selected once and retained forever)
|
||||
* A block header that they consider to be finalized (`finalized_header`) and do not expect to revert.
|
||||
* `later_period_data = get_period_data(finalized_header, shard_id, later=True)`
|
||||
* `earlier_period_data = get_period_data(finalized_header, shard_id, later=False)`
|
||||
|
||||
We use the struct `ValidatorMemory` to keep track of these variables.
|
||||
|
||||
### Updating the shuffled committee
|
||||
|
||||
If a client's `validator_memory.finalized_header` changes so that `header.slot // PERSISTENT_COMMITTEE_PERIOD` increases, then the client can ask the network for a `new_committee_proof = MerklePartial(get_period_data, validator_memory.finalized_header, shard_id, later=True)`. It can then compute:
|
||||
### `get_persistent_committee_pubkeys_and_balances`
|
||||
|
||||
```python
|
||||
earlier_period_data = later_period_data
|
||||
later_period_data = get_period_data(new_committee_proof, finalized_header, shard_id, later=True)
|
||||
def get_persistent_committee_pubkeys_and_balances(memory: LightClientMemory,
|
||||
epoch: Epoch) -> Tuple[Sequence[BLSPubkey], Sequence[uint64]]:
|
||||
"""
|
||||
Return pubkeys and balances for the persistent committee at ``epoch``.
|
||||
"""
|
||||
current_period = compute_epoch_of_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD
|
||||
next_period = epoch // EPOCHS_PER_SHARD_PERIOD
|
||||
assert next_period in (current_period, current_period + 1)
|
||||
if next_period == current_period:
|
||||
earlier_committee, later_committee = memory.previous_committee, memory.current_committee
|
||||
else:
|
||||
earlier_committee, later_committee = memory.current_committee, memory.next_committee
|
||||
|
||||
pubkeys = []
|
||||
balances = []
|
||||
for pubkey, compact_validator in zip(earlier_committee.pubkeys, earlier_committee.compact_validators):
|
||||
index, slashed, balance = unpack_compact_validator(compact_validator)
|
||||
if epoch % EPOCHS_PER_SHARD_PERIOD < index % EPOCHS_PER_SHARD_PERIOD:
|
||||
pubkeys.append(pubkey)
|
||||
balances.append(balance)
|
||||
for pubkey, compact_validator in zip(later_committee.pubkeys, later_committee.compact_validators):
|
||||
index, slashed, balance = unpack_compact_validator(compact_validator)
|
||||
if epoch % EPOCHS_PER_SHARD_PERIOD >= index % EPOCHS_PER_SHARD_PERIOD:
|
||||
pubkeys.append(pubkey)
|
||||
balances.append(balance)
|
||||
return pubkeys, balances
|
||||
```
|
||||
|
||||
The maximum size of a proof is `128 * ((22-7) * 32 + 110) = 75520` bytes for validator records and `(22-7) * 32 + 128 * 8 = 1504` for the active index proof (much smaller because the relevant active indices are all beside each other in the Merkle tree). This needs to be done once per `PERSISTENT_COMMITTEE_PERIOD` epochs (2048 epochs / 9 days), or ~38 bytes per epoch.
|
||||
## Light client state updates
|
||||
|
||||
## Computing the current committee
|
||||
|
||||
Here is a helper to compute the committee at a slot given the maximal earlier and later committees:
|
||||
The state of a light client is stored in a `memory` object of type `LightClientMemory`. To advance its state a light client requests an `update` object of type `LightClientUpdate` from the network by sending a request containing `(memory.shard, memory.header.slot, slot_range_end)` and calls `update_memory(memory, update)`.
|
||||
|
||||
```python
|
||||
def compute_committee(header: BeaconBlockHeader,
|
||||
validator_memory: ValidatorMemory) -> List[ValidatorIndex]:
|
||||
earlier_validator_count = validator_memory.earlier_period_data.validator_count
|
||||
later_validator_count = validator_memory.later_period_data.validator_count
|
||||
maximal_earlier_committee = validator_memory.earlier_period_data.committee
|
||||
maximal_later_committee = validator_memory.later_period_data.committee
|
||||
earlier_start_epoch = get_earlier_start_epoch(header.slot)
|
||||
later_start_epoch = get_later_start_epoch(header.slot)
|
||||
epoch = compute_epoch_of_slot(header.slot)
|
||||
def update_memory(memory: LightClientMemory, update: LightClientUpdate) -> None:
|
||||
# Verify the update does not skip a period
|
||||
current_period = compute_epoch_of_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD
|
||||
next_epoch = compute_epoch_of_shard_slot(update.header.slot)
|
||||
next_period = next_epoch // EPOCHS_PER_SHARD_PERIOD
|
||||
assert next_period in (current_period, current_period + 1)
|
||||
|
||||
committee_count = max(
|
||||
earlier_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE),
|
||||
later_validator_count // (SHARD_COUNT * TARGET_COMMITTEE_SIZE),
|
||||
) + 1
|
||||
# Verify update header against shard block root and header branch
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(update.header),
|
||||
branch=update.header_branch,
|
||||
depth=BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH,
|
||||
index=BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_INDEX,
|
||||
root=update.shard_block_root,
|
||||
)
|
||||
|
||||
def get_offset(count: int, end: bool) -> int:
|
||||
return get_split_offset(
|
||||
count,
|
||||
SHARD_COUNT * committee_count,
|
||||
validator_memory.shard_id * committee_count + (1 if end else 0),
|
||||
)
|
||||
# Verify persistent committee votes pass 2/3 threshold
|
||||
pubkeys, balances = get_persistent_committee_pubkeys_and_balances(memory, next_epoch)
|
||||
assert 3 * sum(filter(lambda i: update.aggregation_bits[i], balances)) > 2 * sum(balances)
|
||||
|
||||
actual_earlier_committee = maximal_earlier_committee[
|
||||
0:get_offset(earlier_validator_count, True) - get_offset(earlier_validator_count, False)
|
||||
]
|
||||
actual_later_committee = maximal_later_committee[
|
||||
0:get_offset(later_validator_count, True) - get_offset(later_validator_count, False)
|
||||
]
|
||||
def get_switchover_epoch(index):
|
||||
return (
|
||||
bytes_to_int(hash(validator_memory.earlier_period_data.seed + int_to_bytes(index, length=3))[0:8]) %
|
||||
PERSISTENT_COMMITTEE_PERIOD
|
||||
)
|
||||
|
||||
# Take not-yet-cycled-out validators from earlier committee and already-cycled-in validators from
|
||||
# later committee; return a sorted list of the union of the two, deduplicated
|
||||
return sorted(list(set(
|
||||
[i for i in actual_earlier_committee if epoch % PERSISTENT_COMMITTEE_PERIOD < get_switchover_epoch(i)]
|
||||
+ [i for i in actual_later_committee if epoch % PERSISTENT_COMMITTEE_PERIOD >= get_switchover_epoch(i)]
|
||||
)))
|
||||
```
|
||||
|
||||
Note that this method makes use of the fact that the committee for any given shard always starts and ends at the same validator index independently of the committee count (this is because the validator set is split into `SHARD_COUNT * committee_count` slices but the first slice of a shard is a multiple `committee_count * i`, so the start of the slice is `n * committee_count * i // (SHARD_COUNT * committee_count) = n * i // SHARD_COUNT`, using the slightly nontrivial algebraic identity `(x * a) // ab == x // b`).
|
||||
|
||||
## Verifying blocks
|
||||
|
||||
If a client wants to update its `finalized_header` it asks the network for a `BlockValidityProof`, which is simply:
|
||||
|
||||
```python
|
||||
{
|
||||
'header': BeaconBlockHeader,
|
||||
'shard_aggregate_signature': BLSSignature,
|
||||
'shard_bits': Bitlist[PLACEHOLDER],
|
||||
'shard_parent_block': ShardBlock,
|
||||
}
|
||||
```
|
||||
|
||||
The verification procedure is as follows:
|
||||
|
||||
```python
|
||||
def verify_block_validity_proof(proof: BlockValidityProof, validator_memory: ValidatorMemory) -> bool:
|
||||
assert proof.shard_parent_block.beacon_chain_root == hash_tree_root(proof.header)
|
||||
committee = compute_committee(proof.header, validator_memory)
|
||||
# Verify that we have >=50% support
|
||||
support_balance = sum([v.effective_balance for i, v in enumerate(committee) if proof.shard_bits[i]])
|
||||
total_balance = sum([v.effective_balance for i, v in enumerate(committee)])
|
||||
assert support_balance * 2 > total_balance
|
||||
# Verify shard attestations
|
||||
group_public_key = bls_aggregate_pubkeys([
|
||||
v.pubkey for v, index in enumerate(committee)
|
||||
if proof.shard_bits[index]
|
||||
])
|
||||
assert bls_verify(
|
||||
pubkey=group_public_key,
|
||||
message_hash=hash_tree_root(shard_parent_block),
|
||||
signature=proof.shard_aggregate_signature,
|
||||
domain=get_domain(state, compute_epoch_of_slot(shard_block.slot), DOMAIN_SHARD_ATTESTER),
|
||||
)
|
||||
pubkey = bls_aggregate_pubkeys(filter(lambda i: update.aggregation_bits[i], pubkeys))
|
||||
domain = compute_domain(DOMAIN_SHARD_ATTESTER, update.fork_version)
|
||||
assert bls_verify(pubkey, update.shard_block_root, update.signature, domain)
|
||||
|
||||
# Update persistent committees if entering a new period
|
||||
if next_period == current_period + 1:
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(update.committee),
|
||||
branch=update.committee_branch,
|
||||
depth=PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT),
|
||||
index=PERSISTENT_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX << log_2(SHARD_COUNT) + memory.shard,
|
||||
root=hash_tree_root(update.header),
|
||||
)
|
||||
memory.previous_committee = memory.current_committee
|
||||
memory.current_committee = memory.next_committee
|
||||
memory.next_committee = update.committee
|
||||
|
||||
# Update header
|
||||
memory.header = update.header
|
||||
```
|
||||
|
||||
The size of this proof is only 200 (header) + 96 (signature) + 16 (bits) + 352 (shard block) = 664 bytes. It can be reduced further by replacing `ShardBlock` with `MerklePartial(lambda x: x.beacon_chain_root, ShardBlock)`, which would cut off ~220 bytes.
|
||||
## Data overhead
|
||||
|
||||
Once every `EPOCHS_PER_SHARD_PERIOD` epochs (~27 hours) a light client downloads a `LightClientUpdate` object:
|
||||
|
||||
* `shard_block_root`: 32 bytes
|
||||
* `fork_version`: 4 bytes
|
||||
* `aggregation_bits`: 16 bytes
|
||||
* `signature`: 96 bytes
|
||||
* `header`: 8 + 32 + 32 + 32 + 96 = 200 bytes
|
||||
* `header_branch`: 4 * 32 = 128 bytes
|
||||
* `committee`: 128 * (48 + 8) = 7,168 bytes
|
||||
* `committee_branch`: (5 + 10) * 32 = 480 bytes
|
||||
|
||||
The total overhead is 8,124 bytes, or ~0.083 bytes per second. The Bitcoin SPV equivalent is 80 bytes per ~560 seconds, or ~0.143 bytes per second. Various compression optimisations (similar to [these](https://github.com/RCasatta/compressedheaders)) are possible.
|
||||
|
||||
A light client can choose to update the header (without updating the committee) more frequently than once every `EPOCHS_PER_SHARD_PERIOD` epochs at a cost of 32 + 4 + 16 + 96 + 200 + 128 = 476 bytes per update.
|
||||
|
@ -1,158 +0,0 @@
|
||||
ETH 2.0 Networking Spec - Libp2p standard protocols
|
||||
===
|
||||
|
||||
# Abstract
|
||||
|
||||
Ethereum 2.0 clients plan to use the libp2p protocol networking stack for
|
||||
mainnet release. This document aims to standardize the libp2p client protocols,
|
||||
configuration and messaging formats.
|
||||
|
||||
# Libp2p Components
|
||||
|
||||
## Transport
|
||||
|
||||
This section details the libp2p transport layer that underlies the
|
||||
[protocols](#protocols) that are listed in this document.
|
||||
|
||||
Libp2p allows composition of multiple transports. Eth2.0 clients should support
|
||||
TCP/IP and optionally websockets. Websockets are useful for implementations
|
||||
running in the browser and therefore native clients would ideally support these implementations
|
||||
by supporting websockets.
|
||||
|
||||
An ideal libp2p transport would therefore support both TCP/IP and websockets.
|
||||
|
||||
*Note: There is active development in libp2p to facilitate the
|
||||
[QUIC](https://github.com/libp2p/go-libp2p-quic-transport) transport, which may
|
||||
be adopted in the future*
|
||||
|
||||
### Encryption
|
||||
|
||||
Libp2p currently offers [Secio](https://github.com/libp2p/specs/pull/106) which
|
||||
can upgrade a transport which will then encrypt all future communication. Secio
|
||||
generates a symmetric ephemeral key which peers use to encrypt their
|
||||
communication. It can support a range of ciphers and currently supports key
|
||||
derivation for elliptic curve-based public keys.
|
||||
|
||||
Current defaults are:
|
||||
- Key agreement: `ECDH-P256` (also supports `ECDH-P384`)
|
||||
- Cipher: `AES-128` (also supports `AES-256`, `TwofishCTR`)
|
||||
- Digests: `SHA256` (also supports `SHA512`)
|
||||
|
||||
*Note: Secio is being deprecated in favour of [TLS
|
||||
1.3](https://github.com/libp2p/specs/blob/master/tls/tls.md). It is our
|
||||
intention to transition to use TLS 1.3 for encryption between nodes, rather
|
||||
than Secio.*
|
||||
|
||||
|
||||
## Protocols
|
||||
|
||||
This section lists the necessary libp2p protocols required by Ethereum 2.0
|
||||
running a libp2p network stack.
|
||||
|
||||
## Multistream-select
|
||||
|
||||
#### Protocol id: `/multistream/1.0.0`
|
||||
|
||||
Clients running libp2p should support the
|
||||
[multistream-select](https://github.com/multiformats/multistream-select/)
|
||||
protocol which allows clients to negotiate libp2p protocols establish streams
|
||||
per protocol.
|
||||
|
||||
## Multiplexing
|
||||
|
||||
Libp2p allows clients to compose multiple multiplexing methods. Clients should
|
||||
support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and
|
||||
optionally [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md)
|
||||
(these can be composed).
|
||||
|
||||
**Mplex protocol id: `/mplex/6.7.0`**
|
||||
|
||||
**Yamux protocol id: `/yamux/1.0.0`**
|
||||
|
||||
## Gossipsub
|
||||
|
||||
#### Protocol id: `/eth/serenity/gossipsub/1.0.0`
|
||||
|
||||
*Note: Parameters listed here are subject to a large-scale network feasibility
|
||||
study*
|
||||
|
||||
The [Gossipsub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub)
|
||||
protocol is used for block and attestation propagation across the
|
||||
network.
|
||||
|
||||
### Configuration Parameters
|
||||
|
||||
Gossipsub has a number of internal configuration parameters which directly
|
||||
effect the network performance. Clients can implement independently, however
|
||||
we aim to standardize these across clients to optimize the gossip network for
|
||||
propagation times and message duplication. Current network-related defaults are:
|
||||
|
||||
```
|
||||
(
|
||||
// The target number of peers in the overlay mesh network (D in the libp2p specs).
|
||||
mesh_size: 6
|
||||
// The minimum number of peers in the mesh network before adding more (D_lo in the libp2p specs).
|
||||
mesh_lo: 4
|
||||
// The maximum number of peers in the mesh network before removing some (D_high in the libp2p sepcs).
|
||||
mesh_high: 12
|
||||
// The number of peers to gossip to during a heartbeat (D_lazy in the libp2p sepcs).
|
||||
gossip_lazy: 6 // defaults to `mesh_size`
|
||||
// Time to live for fanout peers (seconds).
|
||||
fanout_ttl: 60
|
||||
// The number of heartbeats to gossip about.
|
||||
gossip_history: 3
|
||||
// Time between each heartbeat (seconds).
|
||||
heartbeat_interval: 1
|
||||
)
|
||||
```
|
||||
|
||||
### Topics
|
||||
|
||||
*The Go and Js implementations use string topics - This is likely to be
|
||||
updated to topic hashes in later versions - https://github.com/libp2p/rust-libp2p/issues/473*
|
||||
|
||||
For Eth2.0 clients, topics are sent as `SHA2-256` hashes of the topic string.
|
||||
|
||||
There are two main topics used to propagate attestations and beacon blocks to
|
||||
all nodes on the network.
|
||||
|
||||
- The `beacon_block` topic - This topic is used solely for propagating new
|
||||
beacon blocks to all nodes on the networks.
|
||||
- The `beacon_attestation` topic - This topic is used to propagate
|
||||
aggregated attestations to subscribing nodes (typically block proposers) to
|
||||
be included into future blocks. Attestations are aggregated in their
|
||||
respective subnets before publishing on this topic.
|
||||
|
||||
Shards are grouped into their own subnets (defined by a shard topic). The
|
||||
number of shard subnets is defined via `SHARD_SUBNET_COUNT` and the shard
|
||||
`shard_number % SHARD_SUBNET_COUNT` is assigned to the topic:
|
||||
`shard{shard_number % SHARD_SUBNET_COUNT}_attestation`.
|
||||
|
||||
### Messages
|
||||
|
||||
*Note: The message format here is Eth2.0-specific*
|
||||
|
||||
Each Gossipsub
|
||||
[Message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24)
|
||||
has a maximum size of 512KB (estimated from expected largest uncompressed block
|
||||
size).
|
||||
|
||||
The `data` field of a Gossipsub `Message` is an SSZ-encoded object. For the `beacon_block` topic,
|
||||
this is a `beacon_block`. For the `beacon_attestation` topic, this is
|
||||
an `attestation`.
|
||||
|
||||
## Eth-2 RPC
|
||||
|
||||
#### Protocol Id: `/eth/serenity/beacon/rpc/1`
|
||||
|
||||
The [RPC Interface](./rpc-interface.md) is specified in this repository.
|
||||
|
||||
## Discovery
|
||||
|
||||
Discovery Version 5
|
||||
([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md))
|
||||
will be used for discovery. This protocol uses a UDP transport and specifies
|
||||
its own encryption, ip-discovery and topic advertisement. Therefore, it has no
|
||||
need to establish streams through `multistream-select`, rather, act
|
||||
as a standalone implementation that feeds discovered peers/topics (ENR-records) as
|
||||
`multiaddrs` into the libp2p service.
|
@ -1,45 +0,0 @@
|
||||
# Eth 2.0 Networking Spec - Messaging
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification describes how individual Ethereum 2.0 messages are represented on the wire.
|
||||
|
||||
The key words “MUST”, “MUST NOT”, “REQUIRED”, “SHALL”, “SHALL”, NOT", “SHOULD”, “SHOULD NOT”, “RECOMMENDED”, “MAY”, and “OPTIONAL” in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119).
|
||||
|
||||
## Motivation
|
||||
|
||||
This specification seeks to define a messaging protocol that is flexible enough to be changed easily as the Eth 2.0 specification evolves.
|
||||
|
||||
Note that while `libp2p` is the chosen networking stack for Ethereum 2.0, as of this writing some clients do not have workable `libp2p` implementations. To allow those clients to communicate, we define a message envelope that includes the body's compression, encoding, and body length. Once `libp2p` is available across all implementations, this message envelope will be removed because `libp2p` will negotiate the values defined in the envelope upfront.
|
||||
|
||||
## Specification
|
||||
|
||||
### Message structure
|
||||
|
||||
An Eth 2.0 message consists of an envelope that defines the message's compression, encoding, and length followed by the body itself.
|
||||
|
||||
Visually, a message looks like this:
|
||||
|
||||
```
|
||||
+--------------------------+
|
||||
| compression nibble |
|
||||
+--------------------------+
|
||||
| encoding nibble |
|
||||
+--------------------------+
|
||||
| body length (uint64) |
|
||||
+--------------------------+
|
||||
| |
|
||||
| body |
|
||||
| |
|
||||
+--------------------------+
|
||||
```
|
||||
|
||||
Clients MUST ignore messages with malformed bodies. The compression/encoding nibbles MUST be one of the following values:
|
||||
|
||||
### Compression nibble values
|
||||
|
||||
- `0x0`: no compression
|
||||
|
||||
### Encoding nibble values
|
||||
|
||||
- `0x1`: SSZ
|
@ -1,31 +0,0 @@
|
||||
# Eth 2.0 Networking Spec - Node Identification
|
||||
|
||||
## Abstract
|
||||
|
||||
This specification describes how Ethereum 2.0 nodes identify and address each other on the network.
|
||||
|
||||
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119).
|
||||
|
||||
## Specification
|
||||
|
||||
Clients use Ethereum Node Records (as described in [EIP-778](http://eips.ethereum.org/EIPS/eip-778)) to discover one another. Each ENR includes, among other things, the following keys:
|
||||
|
||||
- The node's IP.
|
||||
- The node's TCP port.
|
||||
- The node's public key.
|
||||
|
||||
For clients to be addressable, their ENR responses MUST contain all of the above keys. Client MUST verify the signature of any received ENRs, and disconnect from peers whose ENR signatures are invalid. Each node's public key MUST be unique.
|
||||
|
||||
The keys above are enough to construct a [multiaddr](https://github.com/multiformats/multiaddr) for use with the rest of the `libp2p` stack.
|
||||
|
||||
It is RECOMMENDED that clients set their TCP port to the default of `9000`.
|
||||
|
||||
### Peer ID generation
|
||||
|
||||
The `libp2p` networking stack identifies peers via a "peer ID." Simply put, a node's Peer ID is the SHA2-256 `multihash` of the node's public key struct (serialized in protobuf, refer to the [Peer ID spec](https://github.com/libp2p/specs/pull/100)). `go-libp2p-crypto` contains the canonical implementation of how to hash `secp256k1` keys for use as a peer ID.
|
||||
|
||||
## See also
|
||||
|
||||
- [multiaddr](https://github.com/multiformats/multiaddr)
|
||||
- [multihash](https://multiformats.io/multihash/)
|
||||
- [go-libp2p-crypto](https://github.com/libp2p/go-libp2p-crypto)
|
772
specs/networking/p2p-interface.md
Normal file
772
specs/networking/p2p-interface.md
Normal file
@ -0,0 +1,772 @@
|
||||
# Ethereum 2.0 networking specification
|
||||
|
||||
This document contains the networking specification for Ethereum 2.0 clients.
|
||||
|
||||
It consists of four main sections:
|
||||
|
||||
1. A specification of the network fundamentals detailing the two network configurations: interoperability test network and mainnet launch.
|
||||
2. A specification of the three network interaction *domains* of Eth 2.0: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain.
|
||||
3. The rationale and further explanation for the design choices made in the previous two sections.
|
||||
4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which Eth 2.0 clients are being developed.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- cmd: doctoc --maxlevel=2 p2p-interface.md -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Network fundamentals](#network-fundamentals)
|
||||
- [Transport](#transport)
|
||||
- [Encryption and identification](#encryption-and-identification)
|
||||
- [Protocol negotiation](#protocol-negotiation)
|
||||
- [Multiplexing](#multiplexing)
|
||||
- [Eth 2.0 network interaction domains](#eth-20-network-interaction-domains)
|
||||
- [Configuration](#configuration)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [The discovery domain: discv5](#the-discovery-domain-discv5)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
- [Transport](#transport-1)
|
||||
- [Multiplexing](#multiplexing-1)
|
||||
- [Protocol negotiation](#protocol-negotiation-1)
|
||||
- [Encryption](#encryption)
|
||||
- [Gossipsub](#gossipsub)
|
||||
- [Req/Resp](#reqresp)
|
||||
- [Discovery](#discovery)
|
||||
- [Compression/Encoding](#compressionencoding)
|
||||
- [libp2p implementations matrix](#libp2p-implementations-matrix)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
# Network fundamentals
|
||||
|
||||
This section outlines the specification for the networking stack in Ethereum 2.0 clients.
|
||||
|
||||
Sections that have differing parameters for mainnet launch and interoperability testing are split into subsections. Sections that are not split have the same parameters for interoperability testing as mainnet launch.
|
||||
|
||||
## Transport
|
||||
|
||||
Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability.
|
||||
|
||||
#### Interop
|
||||
|
||||
All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously).
|
||||
|
||||
To facilitate connectivity and avert possible IPv6 routability/support issues, clients participating in the interoperability testnet MUST expose at least ONE IPv4 endpoint.
|
||||
|
||||
All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT, or AutoRelay facilities.
|
||||
|
||||
Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.), MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
All requirements from the interoperability testnet apply, except for the IPv4 addressing scheme requirement.
|
||||
|
||||
At this stage, clients are licensed to drop IPv4 support if they wish to do so, cognizant of the potential disadvantages in terms of Internet-wide routability/support. Clients MAY choose to listen only on IPv6, but MUST retain capability to dial both IPv4 and IPv6 addresses.
|
||||
|
||||
Usage of circuit relay, AutoNAT, or AutoRelay will be specifically re-examined closer to the time.
|
||||
|
||||
## Encryption and identification
|
||||
|
||||
#### Interop
|
||||
|
||||
[SecIO](https://github.com/libp2p/specs/tree/master/secio) with `secp256k1` identities will be used for initial interoperability testing.
|
||||
|
||||
The following SecIO parameters MUST be supported by all stacks:
|
||||
|
||||
- Key agreement: ECDH-P256.
|
||||
- Cipher: AES-128.
|
||||
- Digest: SHA-256.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
[Noise Framework](http://www.noiseprotocol.org/) handshakes will be used for mainnet. libp2p Noise support [is in the process of being standardized](https://github.com/libp2p/specs/issues/195) in the libp2p project.
|
||||
|
||||
Noise support will presumably include IX, IK, and XX handshake patterns, and may rely on Curve25519 keys, ChaCha20 and Poly1305 ciphers, and SHA-256 as a hash function. These aspects are being actively debated in the referenced issue (Eth 2.0 implementers are welcome to comment and contribute to the discussion).
|
||||
|
||||
## Protocol Negotiation
|
||||
|
||||
Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers.
|
||||
|
||||
#### Interop
|
||||
|
||||
Connection-level and stream-level (see the [Rationale](#design-decision-rationale) section below for explanations) protocol negotiation MUST be conducted using [multistream-select v1.0](https://github.com/multiformats/multistream-select/). Its protocol ID is: `/multistream/1.0.0`.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
Clients MUST support [multistream-select 1.0](https://github.com/multiformats/multistream-select/) and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95). Depending on the number of clients that have implementations for multiselect 2.0 by mainnet, [multistream-select 1.0](https://github.com/multiformats/multistream-select/) may be phased out.
|
||||
|
||||
## Multiplexing
|
||||
|
||||
During connection bootstrapping, libp2p dynamically negotiates a mutually supported multiplexing method to conduct parallel conversations. This applies to transports that are natively incapable of multiplexing (e.g. TCP, WebSockets, WebRTC), and is omitted for capable transports (e.g. QUIC).
|
||||
|
||||
Two multiplexers are commonplace in libp2p implementations: [mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). Their protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`.
|
||||
|
||||
Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux must take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs.
|
||||
|
||||
# Eth 2.0 network interaction domains
|
||||
|
||||
## Configuration
|
||||
|
||||
This section outlines constants that are used in this spec.
|
||||
|
||||
| Name | Value | Description |
|
||||
|---|---|---|
|
||||
| `REQ_RESP_MAX_SIZE` | `TODO` | The maximum size of uncompressed req/resp messages that clients will allow. |
|
||||
| `SSZ_MAX_LIST_SIZE` | `TODO` | The maximum size of SSZ-encoded variable lists. |
|
||||
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum size of uncompressed gossip messages. |
|
||||
| `SHARD_SUBNET_COUNT` | `TODO` | The number of shard subnets used in the gossipsub protocol. |
|
||||
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
|
||||
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
|
||||
Clients MUST support the [gossipsub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub) libp2p protocol.
|
||||
|
||||
**Protocol ID:** `/meshsub/1.0.0`
|
||||
|
||||
**Gossipsub Parameters**
|
||||
|
||||
*Note*: Parameters listed here are subject to a large-scale network feasibility study.
|
||||
|
||||
The following gossipsub [parameters](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub#meshsub-an-overlay-mesh-router) will be used:
|
||||
|
||||
- `D` (topic stable mesh target count): 6
|
||||
- `D_low` (topic stable mesh low watermark): 4
|
||||
- `D_high` (topic stable mesh high watermark): 12
|
||||
- `D_lazy` (gossip target): 6
|
||||
- `fanout_ttl` (ttl for fanout maps for topics we are not subscribed to but have published to, seconds): 60
|
||||
- `gossip_advertise` (number of windows to gossip about): 3
|
||||
- `gossip_history` (number of heartbeat intervals to retain message IDs): 5
|
||||
- `heartbeat_interval` (frequency of heartbeat, seconds): 1
|
||||
|
||||
### Topics
|
||||
|
||||
Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages).
|
||||
|
||||
Topic strings have form: `/eth2/TopicName/TopicEncoding`. This defines both the type of data being sent on the topic and how the data field of the message is encoded. (Further details can be found in [Messages](#Messages)).
|
||||
|
||||
There are two main topics used to propagate attestations and beacon blocks to all nodes on the network. Their `TopicName`s are:
|
||||
|
||||
- `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients MUST validate the block proposer signature before forwarding it across the network.
|
||||
- `beacon_attestation` - This topic is used to propagate aggregated attestations (in their entirety) to subscribing nodes (typically block proposers) to be included in future blocks. Clients MUST validate that the block being voted for passes validation before forwarding the attestation on the network (TODO: [additional validations](https://github.com/ethereum/eth2.0-specs/issues/1332)).
|
||||
|
||||
Additional topics are used to propagate lower frequency validator messages. Their `TopicName`s are:
|
||||
|
||||
- `voluntary_exit` - This topic is used solely for propagating voluntary validator exits to proposers on the network. Voluntary exits are sent in their entirety. Clients who receive a voluntary exit on this topic MUST validate the conditions within `process_voluntary_exit` before forwarding it across the network.
|
||||
- `proposer_slashing` - This topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. Clients who receive a proposer slashing on this topic MUST validate the conditions within `process_proposer_slashing` before forwarding it across the network.
|
||||
- `attester_slashing` - This topic is used solely for propagating attester slashings to proposers on the network. Attester slashings are sent in their entirety. Clients who receive an attester slashing on this topic MUST validate the conditions within `process_attester_slashing` before forwarding it across the network.
|
||||
|
||||
#### Interop
|
||||
|
||||
Unaggregated and aggregated attestations from all shards are sent to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
Shards are grouped into their own subnets (defined by a shard topic). The number of shard subnets is defined via `SHARD_SUBNET_COUNT` and the shard `shard_number % SHARD_SUBNET_COUNT` is assigned to the topic: `shard{shard_number % SHARD_SUBNET_COUNT}_beacon_attestation`. Unaggregated attestations are sent to the subnet topic. Aggregated attestations are sent to the `beacon_attestation` topic.
|
||||
|
||||
TODO: [aggregation strategy](https://github.com/ethereum/eth2.0-specs/issues/1331)
|
||||
|
||||
### Messages
|
||||
|
||||
Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`.
|
||||
|
||||
Clients MUST reject (fail validation) messages that are over this size limit. Likewise, clients MUST NOT emit or propagate messages larger than this limit.
|
||||
|
||||
The payload is carried in the `data` field of a gossipsub message, and varies depending on the topic:
|
||||
|
||||
|
||||
| Topic | Message Type |
|
||||
|------------------------------|-------------------|
|
||||
| beacon_block | BeaconBlock |
|
||||
| beacon_attestation | Attestation |
|
||||
| shard{N}\_beacon_attestation | Attestation |
|
||||
| voluntary_exit | VoluntaryExit |
|
||||
| proposer_slashing | ProposerSlashing |
|
||||
| attester_slashing | AttesterSlashing |
|
||||
|
||||
Clients MUST reject (fail validation) messages containing an incorrect type, or invalid payload.
|
||||
|
||||
When processing incoming gossip, clients MAY descore or disconnect peers who fail to observe these constraints.
|
||||
|
||||
### Encodings
|
||||
|
||||
Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded.
|
||||
|
||||
#### Interop
|
||||
|
||||
- `ssz` - All objects are [SSZ-encoded](#ssz-encoding). Example: The beacon block topic string is `/eth2/beacon_block/ssz`, and the data field of a gossipsub message is an ssz-encoded `BeaconBlock`.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). Example: The beacon attestation topic string is `/eth2/beacon_attestation/ssz_snappy`, and the data field of a gossipsub message is an `Attestation` that has been SSZ-encoded and then compressed with Snappy.
|
||||
|
||||
Implementations MUST use a single encoding. Changing an encoding will require coordination between participating implementations.
|
||||
|
||||
## The Req/Resp domain
|
||||
|
||||
### Protocol identification
|
||||
|
||||
Each message type is segregated into its own libp2p protocol ID, which is a case-sensitive UTF-8 string of the form:
|
||||
|
||||
```
|
||||
/ProtocolPrefix/MessageName/SchemaVersion/Encoding
|
||||
```
|
||||
|
||||
With:
|
||||
|
||||
- `ProtocolPrefix` - messages are grouped into families identified by a shared libp2p protocol name prefix. In this case, we use `/eth2/beacon_chain/req`.
|
||||
- `MessageName` - each request is identified by a name consisting of English alphabet, digits and underscores (`_`).
|
||||
- `SchemaVersion` - an ordinal version number (e.g. 1, 2, 3…). Each schema is versioned to facilitate backward and forward-compatibility when possible.
|
||||
- `Encoding` - while the schema defines the data types in more abstract terms, the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encoding-strategies) section for further details.
|
||||
|
||||
This protocol segregation allows libp2p `multistream-select 1.0` / `multiselect 2.0` to handle the request type, version, and encoding negotiation before establishing the underlying streams.
|
||||
|
||||
### Req/Resp interaction
|
||||
|
||||
We use ONE stream PER request/response interaction. Streams are closed when the interaction finishes, whether in success or in error.
|
||||
|
||||
Request/response messages MUST adhere to the encoding specified in the protocol name and follow this structure (relaxed BNF grammar):
|
||||
|
||||
```
|
||||
request ::= <encoding-dependent-header> | <encoded-payload>
|
||||
response ::= <result> | <encoding-dependent-header> | <encoded-payload>
|
||||
result ::= “0” | “1” | “2” | [“128” ... ”255”]
|
||||
```
|
||||
|
||||
The encoding-dependent header may carry metadata or assertions such as the encoded payload length, for integrity and attack proofing purposes. Because req/resp streams are single-use and stream closures implicitly delimit the boundaries, it is not strictly necessary to length-prefix payloads; however, certain encodings like SSZ do, for added security.
|
||||
|
||||
`encoded-payload` has a maximum byte size of `REQ_RESP_MAX_SIZE`.
|
||||
|
||||
Clients MUST ensure the payload size is less than or equal to `REQ_RESP_MAX_SIZE`; if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance.
|
||||
|
||||
#### Requesting side
|
||||
|
||||
Once a new stream with the protocol ID for the request type has been negotiated, the full request message should be sent immediately. It should be encoded according to the encoding strategy.
|
||||
|
||||
The requester MUST close the write side of the stream once it finishes writing the request message—at this point, the stream will be half-closed.
|
||||
|
||||
The requester MUST wait a maximum of `TTFB_TIMEOUT` for the first response byte to arrive (time to first byte—or TTFB—timeout). On that happening, the requester will allow further `RESP_TIMEOUT` to receive the full response.
|
||||
|
||||
If any of these timeouts fire, the requester SHOULD reset the stream and deem the req/resp operation to have failed.
|
||||
|
||||
#### Responding side
|
||||
|
||||
Once a new stream with the protocol ID for the request type has been negotiated, the responder must process the incoming request message according to the encoding strategy, until EOF (denoting stream half-closure by the requester).
|
||||
|
||||
The responder MUST:
|
||||
|
||||
1. Use the encoding strategy to read the optional header.
|
||||
2. If there are any length assertions for length `N`, it should read exactly `N` bytes from the stream, at which point an EOF should arise (no more bytes). Should this not be the case, it should be treated as a failure.
|
||||
3. Deserialize the expected type, and process the request.
|
||||
4. Write the response (result, optional header, payload).
|
||||
5. Close their write side of the stream. At this point, the stream will be fully closed.
|
||||
|
||||
If steps (1), (2), or (3) fail due to invalid, malformed, or inconsistent data, the responder MUST respond in error. Clients tracking peer reputation MAY record such failures, as well as unexpected events, e.g. early stream resets.
|
||||
|
||||
The entire request should be read in no more than `RESP_TIMEOUT`. Upon a timeout, the responder SHOULD reset the stream.
|
||||
|
||||
The responder SHOULD send a response promptly, starting with a **single-byte** response code which determines the contents of the response (`result` particle in the BNF grammar above).
|
||||
|
||||
It can have one of the following values, encoded as a single unsigned byte:
|
||||
|
||||
- 0: **Success** -- a normal response follows, with contents matching the expected message schema and encoding specified in the request.
|
||||
- 1: **InvalidRequest** -- the contents of the request are semantically invalid, or the payload is malformed, or could not be understood. The response payload adheres to the `ErrorMessage` schema (described below).
|
||||
- 2: **ServerError** -- the responder encountered an error while processing the request. The response payload adheres to the `ErrorMessage` schema (described below).
|
||||
|
||||
Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses.
|
||||
|
||||
The range `[3, 127]` is RESERVED for future usages, and should be treated as error if not recognized expressly.
|
||||
|
||||
The `ErrorMessage` schema is:
|
||||
|
||||
```
|
||||
(
|
||||
error_message: String
|
||||
)
|
||||
```
|
||||
|
||||
*Note*: The String type is encoded as UTF-8 bytes without NULL terminator when SSZ-encoded. As the `ErrorMessage` is not an SSZ-container, only the UTF-8 bytes will be sent when SSZ-encoded.
|
||||
|
||||
A response therefore has the form:
|
||||
```
|
||||
+--------+--------+--------+--------+--------+--------+
|
||||
| result | header (opt) | encoded_response |
|
||||
+--------+--------+--------+--------+--------+--------+
|
||||
```
|
||||
Here, `result` represents the 1-byte response code.
|
||||
|
||||
### Encoding strategies
|
||||
|
||||
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time:
|
||||
|
||||
- `ssz`: The contents are [SSZ-encoded](../simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocks` response would be an SSZ-encoded list of `BeaconBlock`s. All SSZ-Lists in the Req/Resp domain will have a maximum list size of `SSZ_MAX_LIST_SIZE`.
|
||||
- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; MUST be supported in mainnet.
|
||||
|
||||
#### SSZ-encoding strategy (with or without Snappy)
|
||||
|
||||
The [SimpleSerialize (SSZ) specification](../simple-serialize.md) outlines how objects are SSZ-encoded. If the Snappy variant is selected, we feed the serialized form to the Snappy compressor on encoding. The inverse happens on decoding.
|
||||
|
||||
**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST prefix all encoded and compressed (if applicable) payloads with an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
||||
|
||||
*Note*: Parameters defined as `[]VariableName` are SSZ-encoded containerless vectors.
|
||||
|
||||
### Messages
|
||||
|
||||
#### Hello
|
||||
|
||||
**Protocol ID:** ``/eth2/beacon_chain/req/hello/1/``
|
||||
|
||||
**Content**:
|
||||
```
|
||||
(
|
||||
fork_version: bytes4
|
||||
finalized_root: bytes32
|
||||
finalized_epoch: uint64
|
||||
head_root: bytes32
|
||||
head_slot: uint64
|
||||
)
|
||||
```
|
||||
The fields are:
|
||||
|
||||
- `fork_version`: The beacon_state `Fork` version.
|
||||
- `finalized_root`: The latest finalized root the node knows about.
|
||||
- `finalized_epoch`: The latest finalized epoch the node knows about.
|
||||
- `head_root`: The block hash tree root corresponding to the head of the chain as seen by the sending node.
|
||||
- `head_slot`: The slot corresponding to the `head_root`.
|
||||
|
||||
Clients exchange hello messages upon connection, forming a two-phase handshake. The first message the initiating client sends MUST be the hello message. In response, the receiving client MUST respond with its own hello message.
|
||||
|
||||
Clients SHOULD immediately disconnect from one another following the handshake above under the following conditions:
|
||||
|
||||
1. If `fork_version` doesn’t match the local fork version, since the client’s chain is on another fork. `fork_version` can also be used to segregate testnets.
|
||||
2. If the (`finalized_root`, `finalized_epoch`) shared by the peer is not in the client's chain at the expected epoch. For example, if Peer 1 sends (root, epoch) of (A, 5) and Peer 2 sends (B, 3) but Peer 1 has root C at epoch 3, then Peer 1 would disconnect because it knows that their chains are irreparably disjoint.
|
||||
|
||||
Once the handshake completes, the client with the lower `finalized_epoch` or `head_slot` (if the clients have equal `finalized_epoch`s) SHOULD request beacon blocks from its counterparty via the `BeaconBlocks` request.
|
||||
|
||||
#### Goodbye
|
||||
|
||||
**Protocol ID:** ``/eth2/beacon_chain/req/goodbye/1/``
|
||||
|
||||
**Content:**
|
||||
```
|
||||
(
|
||||
reason: uint64
|
||||
)
|
||||
```
|
||||
Client MAY send goodbye messages upon disconnection. The reason field MAY be one of the following values:
|
||||
|
||||
- 1: Client shut down.
|
||||
- 2: Irrelevant network.
|
||||
- 3: Fault/error.
|
||||
|
||||
Clients MAY use reason codes above `128` to indicate alternative, erroneous request-specific responses.
|
||||
|
||||
The range `[4, 127]` is RESERVED for future usage.
|
||||
|
||||
#### BeaconBlocks
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks/1/`
|
||||
|
||||
Request Content
|
||||
```
|
||||
(
|
||||
head_block_root: HashTreeRoot
|
||||
start_slot: uint64
|
||||
count: uint64
|
||||
step: uint64
|
||||
)
|
||||
```
|
||||
|
||||
Response Content:
|
||||
```
|
||||
(
|
||||
blocks: []BeaconBlock
|
||||
)
|
||||
```
|
||||
|
||||
Requests count beacon blocks from the peer starting from `start_slot` on the chain defined by `head_block_root`. The response MUST contain no more than count blocks. `step` defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …]. A step value of 1 returns all blocks on the range `[start_slot, start_slot + count)`.
|
||||
|
||||
`BeaconBlocks` is primarily used to sync historical blocks.
|
||||
|
||||
Clients MUST support requesting blocks since the start of the weak subjectivity period and up to the given `head_block_root`.
|
||||
|
||||
Clients MUST support `head_block_root` values since the latest finalized epoch.
|
||||
|
||||
#### RecentBeaconBlocks
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/recent_beacon_blocks/1/`
|
||||
|
||||
Request Content:
|
||||
|
||||
```
|
||||
(
|
||||
block_roots: []HashTreeRoot
|
||||
)
|
||||
```
|
||||
|
||||
Response Content:
|
||||
|
||||
```
|
||||
(
|
||||
blocks: []BeaconBlock
|
||||
)
|
||||
```
|
||||
|
||||
Requests blocks by their block roots. The response is a list of `BeaconBlock` with the same length as the request. Blocks are returned in order of the request and any missing/unknown blocks are left empty (SSZ null `BeaconBlock`).
|
||||
|
||||
`RecentBeaconBlocks` is primarily used to recover recent blocks (ex. when receiving a block or attestation whose parent is unknown).
|
||||
|
||||
Clients MUST support requesting blocks since the latest finalized epoch.
|
||||
|
||||
## The discovery domain: discv5
|
||||
|
||||
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery, both in the interoperability testnet and mainnet.
|
||||
|
||||
`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. `discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context.
|
||||
|
||||
:warning: Under construction. :warning:
|
||||
|
||||
### Integration into libp2p stacks
|
||||
|
||||
`discv5` SHOULD be integrated into the client’s libp2p stack by implementing an adaptor to make it conform to the [service discovery](https://github.com/libp2p/go-libp2p-core/blob/master/discovery/discovery.go) and [peer routing](https://github.com/libp2p/go-libp2p-core/blob/master/routing/routing.go#L36-L44) abstractions and interfaces (go-libp2p links provided).
|
||||
|
||||
Inputs to operations include peer IDs (when locating a specific peer), or capabilities (when searching for peers with a specific capability), and the outputs will be multiaddrs converted from the ENR records returned by the discv5 backend.
|
||||
|
||||
This integration enables the libp2p stack to subsequently form connections and streams with discovered peers.
|
||||
|
||||
### ENR structure
|
||||
|
||||
The Ethereum Node Record (ENR) for an Ethereum 2.0 client MUST contain the following entries (exclusive of the sequence number and signature, which MUST be present in an ENR):
|
||||
|
||||
- The compressed secp256k1 publickey, 33 bytes (`secp256k1` field).
|
||||
- An IPv4 address (`ip` field) and/or IPv6 address (`ip6` field).
|
||||
- A TCP port (`tcp` field) representing the local libp2p listening port.
|
||||
- A UDP port (`udp` field) representing the local discv5 listening port.
|
||||
|
||||
Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778).
|
||||
|
||||
#### Interop
|
||||
|
||||
In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry Eth 2.0 capability information, as it would be superfluous.
|
||||
|
||||
Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an Eth 2.0 peer, in order to eschew connecting to Eth 1.0 peers.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
On mainnet, ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability.
|
||||
|
||||
### Topic advertisement
|
||||
|
||||
#### Interop
|
||||
|
||||
This feature will not be used in the interoperability testnet.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
In mainnet, we plan to use discv5’s topic advertisement feature as a rendezvous facility for peers on shards (thus subscribing to the relevant gossipsub topics).
|
||||
|
||||
# Design decision rationale
|
||||
|
||||
## Transport
|
||||
|
||||
### Why are we defining specific transports?
|
||||
|
||||
libp2p peers can listen on multiple transports concurrently, and these can change over time. Multiaddrs encode not only the address but also the transport to be used to dial.
|
||||
|
||||
Due to this dynamic nature, agreeing on specific transports like TCP, QUIC, or WebSockets on paper becomes irrelevant.
|
||||
|
||||
However, it is useful to define a minimum baseline for interoperability purposes.
|
||||
|
||||
### Can clients support other transports/handshakes than the ones mandated by the spec?
|
||||
|
||||
Clients may support other transports such as libp2p QUIC, WebSockets, and WebRTC transports, if available in the language of choice. While interoperability shall not be harmed by lack of such support, the advantages are desirable:
|
||||
|
||||
- Better latency, performance, and other QoS characteristics (QUIC).
|
||||
- Paving the way for interfacing with future light clients (WebSockets, WebRTC).
|
||||
|
||||
The libp2p QUIC transport inherently relies on TLS 1.3 per requirement in section 7 of the [QUIC protocol specification](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7) and the accompanying [QUIC-TLS document](https://tools.ietf.org/html/draft-ietf-quic-tls-22).
|
||||
|
||||
The usage of one handshake procedure or the other shall be transparent to the Eth 2.0 application layer, once the libp2p Host/Node object has been configured appropriately.
|
||||
|
||||
### What are the advantages of using TCP/QUIC/Websockets?
|
||||
|
||||
TCP is a reliable, ordered, full-duplex, congestion-controlled network protocol that powers much of the Internet as we know it today. HTTP/1.1 and HTTP/2 run atop TCP.
|
||||
|
||||
QUIC is a new protocol that’s in the final stages of specification by the IETF QUIC WG. It emerged from Google’s SPDY experiment. The QUIC transport is undoubtedly promising. It’s UDP-based yet reliable, ordered, multiplexed, natively secure (TLS 1.3), reduces latency vs. TCP, and offers stream-level and connection-level congestion control (thus removing head-of-line blocking), 0-RTT connection establishment, and endpoint migration, amongst other features. UDP also has better NAT traversal properties than TCP—something we desperately pursue in peer-to-peer networks.
|
||||
|
||||
QUIC is being adopted as the underlying protocol for HTTP/3. This has the potential to award us censorship resistance via deep packet inspection for free. Provided that we use the same port numbers and encryption mechanisms as HTTP/3, our traffic may be indistinguishable from standard web traffic, and we may only become subject to standard IP-based firewall filtering—something we can counteract via other mechanisms.
|
||||
|
||||
WebSockets and/or WebRTC transports are necessary for interaction with browsers, and will become increasingly important as we incorporate browser-based light clients to the Eth 2.0 network.
|
||||
|
||||
### Why do we not just support a single transport?
|
||||
|
||||
Networks evolve. Hardcoding design decisions leads to ossification, preventing the evolution of networks alongside the state of the art. Introducing changes on an ossified protocol is very costly, and sometimes, downright impracticable without causing undesirable breakage.
|
||||
|
||||
Modeling for upgradeability and dynamic transport selection from the get-go lays the foundation for a future-proof stack.
|
||||
|
||||
Clients can adopt new transports without breaking old ones, and the multi-transport ability enables constrained and sandboxed environments (e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), without the need for proxying or trust delegation to servers.
|
||||
|
||||
### Why are we not using QUIC for mainnet from the start?
|
||||
|
||||
The QUIC standard is still not finalized (at working draft 22 at the time of writing), and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). One remarkable example is node.js, where the QUIC implementation is [in early development](https://github.com/nodejs/quic).
|
||||
|
||||
## Multiplexing
|
||||
|
||||
### Why are we using mplex/yamux?
|
||||
|
||||
[Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) is a multiplexer invented by Hashicorp that supports stream-level congestion control. Implementations exist in a limited set of languages, and it’s not a trivial piece to develop.
|
||||
|
||||
Conscious of that, the libp2p community conceptualized [mplex](https://github.com/libp2p/specs/blob/master/mplex/README.md) as a simple, minimal multiplexer for usage with libp2p. It does not support stream-level congestion control and is subject to head-of-line blocking.
|
||||
|
||||
Overlay multiplexers are not necessary with QUIC since the protocol provides native multiplexing, but they need to be layered atop TCP, WebSockets, and other transports that lack such support.
|
||||
|
||||
## Protocol negotiation
|
||||
|
||||
### When is multiselect 2.0 due and why are we using it for mainnet?
|
||||
|
||||
multiselect 2.0 is currently being conceptualized. The debate started [on this issue](https://github.com/libp2p/specs/pull/95), but it got overloaded—as it tends to happen with large conceptual OSS discussions that touch the heart and core of a system.
|
||||
|
||||
In the following weeks (August 2019), there will be a renewed initiative to first define the requirements, constraints, assumptions, and features, in order to lock in basic consensus upfront and subsequently build on that consensus by submitting a specification for implementation.
|
||||
|
||||
We plan to use multiselect 2.0 for mainnet because it will:
|
||||
|
||||
1. Reduce round trips during connection bootstrapping and stream protocol negotiation.
|
||||
2. Enable efficient one-stream-per-request interaction patterns.
|
||||
3. Leverage *push data* mechanisms of underlying protocols to expedite negotiation.
|
||||
4. Provide the building blocks for enhanced censorship resistance.
|
||||
|
||||
### What is the difference between connection-level and stream-level protocol negotiation?
|
||||
|
||||
All libp2p connections must be authenticated, encrypted, and multiplexed. Connections using network transports unsupportive of native authentication/encryption and multiplexing (e.g. TCP) need to undergo protocol negotiation to agree on a mutually supported:
|
||||
|
||||
1. authentication/encryption mechanism (such as SecIO, TLS 1.3, Noise).
|
||||
2. overlay multiplexer (such as mplex, Yamux, spdystream).
|
||||
|
||||
In this specification, we refer to these two as *connection-level negotiations*. Transports supporting those features natively (such as QUIC) omit those negotiations.
|
||||
|
||||
After successfully selecting a multiplexer, all subsequent I/O happens over *streams*. When opening streams, peers pin a protocol to that stream, by conducting *stream-level protocol negotiation*.
|
||||
|
||||
At present, multistream-select 1.0 is used for both types of negotiation, but multiselect 2.0 will use dedicated mechanisms for connection bootstrapping process and stream protocol negotiation.
|
||||
|
||||
## Encryption
|
||||
|
||||
### Why are we using SecIO for interop? Why not for mainnet?
|
||||
|
||||
SecIO has been the default encryption layer for libp2p for years. It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale.
|
||||
|
||||
SecIO is the common denominator across the various language libraries at this stage. It is widely implemented. That’s why we have chosen to use it for initial interop to minimize overhead in getting to a basic interoperability testnet.
|
||||
|
||||
We won’t be using it for mainnet because, amongst other things, it requires several round trips to be sound, and doesn’t support early data (0-RTT data), a mechanism that multiselect 2.0 will leverage to reduce round trips during connection bootstrapping.
|
||||
|
||||
SecIO is not considered secure for the purposes of this spec.
|
||||
|
||||
### Why are we using Noise/TLS 1.3 for mainnet?
|
||||
|
||||
Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org):
|
||||
|
||||
> Noise is a framework for building crypto protocols. Noise protocols support mutual and optional authentication, identity hiding, forward secrecy, zero round-trip encryption, and other advanced features.
|
||||
|
||||
Noise in itself does not specify a single handshake procedure, but provides a framework to build secure handshakes based on Diffie-Hellman key agreement with a variety of tradeoffs and guarantees.
|
||||
|
||||
Noise handshakes are lightweight and simple to understand, and are used in major cryptographic-centric projects like WireGuard, I2P, and Lightning. [Various](https://www.wireguard.com/papers/kobeissi-bhargavan-noise-explorer-2018.pdf) [studies](https://eprint.iacr.org/2019/436.pdf) have assessed the stated security goals of several Noise handshakes with positive results.
|
||||
|
||||
On the other hand, TLS 1.3 is the newest, simplified iteration of TLS. Old, insecure, obsolete ciphers and algorithms have been removed, adopting Ed25519 as the sole ECDH key agreement function. Handshakes are faster, 1-RTT data is supported, and session resumption is a reality, amongst other features.
|
||||
|
||||
*Note*: [TLS 1.3 is a prerequisite of the QUIC transport](https://tools.ietf.org/html/draft-ietf-quic-transport-22#section-7), although an experiment exists to integrate Noise as the QUIC crypto layer: [nQUIC](https://eprint.iacr.org/2019/028).
|
||||
|
||||
### Why are we using encryption at all?
|
||||
|
||||
Transport level encryption secures message exchange and provides properties that are useful for privacy, safety, and censorship resistance. These properties are derived from the following security guarantees that apply to the entire communication between two peers:
|
||||
|
||||
- Peer authentication: the peer I’m talking to is really who they claim to be and who I expect them to be.
|
||||
- Confidentiality: no observer can eavesdrop on the content of our messages.
|
||||
- Integrity: the data has not been tampered with by a third-party while in transit.
|
||||
- Non-repudiation: the originating peer cannot dispute that they sent the message.
|
||||
- Depending on the chosen algorithms and mechanisms (e.g. continuous HMAC), we may obtain additional guarantees, such as non-replayability (this byte could’ve only been sent *now;* e.g. by using continuous HMACs), or perfect forward secrecy (in the case that a peer key is compromised, the content of a past conversation will not be compromised).
|
||||
|
||||
Note that transport-level encryption is not exclusive of application-level encryption or cryptography. Transport-level encryption secures the communication itself, while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.).
|
||||
|
||||
### Will mainnnet networking be untested when it launches?
|
||||
|
||||
Before launching mainnet, the testnet will be switched over to mainnet networking parameters, including Noise handshakes, and other new protocols. This gives us an opportunity to drill coordinated network upgrades and verifying that there are no significant upgradeability gaps.
|
||||
|
||||
## Gossipsub
|
||||
|
||||
### Why are we using a pub/sub algorithm for block and attestation propagation?
|
||||
|
||||
Pubsub is a technique to broadcast/disseminate data across a network rapidly. Such data is packaged in fire-and-forget messages that do not require a response from every recipient. Peers subscribed to a topic participate in the propagation of messages in that topic.
|
||||
|
||||
The alternative is to maintain a fully connected mesh (all peers connected to each other 1:1), which scales poorly (O(n^2)).
|
||||
|
||||
### Why are we using topics to segregate encodings, yet only support one encoding?
|
||||
|
||||
For future extensibility with almost zero overhead now (besides the extra bytes in the topic name).
|
||||
|
||||
### How do we upgrade gossip channels (e.g. changes in encoding, compression)?
|
||||
|
||||
Changing gossipsub/broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, for example during a hard fork.
|
||||
|
||||
One can envision a two-phase deployment as well where clients start listening to the new topic in the first phase then start publishing some time later, letting the traffic naturally move over to the new topic.
|
||||
|
||||
### Why must all clients use the same gossip topic instead of one negotiated between each peer pair?
|
||||
|
||||
Supporting multiple topics/encodings would require the presence of relayers to translate between encodings and topics so as to avoid network fragmentation where participants have diverging views on the gossiped state, making the protocol more complicated and fragile.
|
||||
|
||||
Gossip protocols typically remember what messages they've seen for a finite period of time-based on message identity—if you publish the same message again after that time has passed, it will be re-broadcast—adding a relay delay also makes this scenario more likely.
|
||||
|
||||
One can imagine that in a complicated upgrade scenario, we might have peers publishing the same message on two topics/encodings, but the price here is pretty high in terms of overhead—both computational and networking—so we'd rather avoid that.
|
||||
|
||||
It is permitted for clients to publish data on alternative topics as long as they also publish on the network-wide mandatory topic.
|
||||
|
||||
### Why are the topics strings and not hashes?
|
||||
|
||||
Topic names have a hierarchical structure. In the future, gossipsub may support wildcard subscriptions (e.g. subscribe to all children topics under a root prefix) by way of prefix matching. Enforcing hashes for topic names would preclude us from leveraging such features going forward.
|
||||
|
||||
No security or privacy guarantees are lost as a result of choosing plaintext topic names, since the domain is finite anyway, and calculating a digest's preimage would be trivial.
|
||||
|
||||
Furthermore, the Eth 2.0 topic names are shorter than their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily.
|
||||
|
||||
### Why are there `SHARD_SUBNET_COUNT` subnets, and why is this not defined?
|
||||
|
||||
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel. The exact grouping will be dependent on more involved network tests. This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet).
|
||||
|
||||
### Why are we sending entire objects in the pubsub and not just hashes?
|
||||
|
||||
Entire objects should be sent to get the greatest propagation speeds. If only hashes are sent, then block and attestation propagation is dependent on recursive requests from each peer. In a hash-only scenario, peers could receive hashes without knowing who to download the actual contents from. Sending entire objects ensures that they get propagated through the entire network.
|
||||
|
||||
### Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc?
|
||||
|
||||
The prohibition of unverified-block-gossiping extends to nodes that cannot verify a signature due to not being fully synced to ensure that such (amplified) DOS attacks are not possible.
|
||||
|
||||
### How are we going to discover peers in a gossipsub topic?
|
||||
|
||||
Via discv5 topics. ENRs should not be used for this purpose, as they store identity, location, and capability information, not volatile [advertisements](#topic-advertisement).
|
||||
|
||||
In the interoperability testnet, all peers will be subscribed to all global beacon chain topics, so discovering peers in specific shard topics will be unnecessary.
|
||||
|
||||
## Req/Resp
|
||||
|
||||
### Why segregate requests into dedicated protocol IDs?
|
||||
|
||||
Requests are segregated by protocol ID to:
|
||||
|
||||
1. Leverage protocol routing in libp2p, such that the libp2p stack will route the incoming stream to the appropriate handler. This allows the handler function for each request type to be self-contained. For an analogy, think about how you attach HTTP handlers to a REST API server.
|
||||
2. Version requests independently. In a coarser-grained umbrella protocol, the entire protocol would have to be versioned even if just one field in a single message changed.
|
||||
3. Enable clients to select the individual requests/versions they support. It would no longer be a strict requirement to support all requests, and clients, in principle, could support a subset of requests and variety of versions.
|
||||
4. Enable flexibility and agility for clients adopting spec changes that impact the request, by signalling to peers exactly which subset of new/old requests they support.
|
||||
5. Enable clients to explicitly choose backwards compatibility at the request granularity. Without this, clients would be forced to support entire versions of the coarser request protocol.
|
||||
6. Parallelise RFCs (or Eth 2.0 EIPs). By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently without relying on a synchronization point to version the general top-level protocol.
|
||||
1. This has the benefit that clients can explicitly choose which RFCs to deploy without buying into all other RFCs that may be included in that top-level version.
|
||||
2. Affording this level of granularity with a top-level protocol would imply creating as many variants (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2).
|
||||
7. Allow us to simplify the payload of requests. Request-id’s and method-ids no longer need to be sent. The encoding/request type and version can all be handled by the framework.
|
||||
|
||||
**Caveat**: The protocol negotiation component in the current version of libp2p is called multistream-select 1.0. It is somewhat naïve and introduces overhead on every request when negotiating streams, although implementation-specific optimizations are possible to save this cost. Multiselect 2.0 will remove this overhead by memoizing previously selected protocols, and modeling shared protocol tables. Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to hinder interop testing. More info is to be released from the libp2p community in the coming weeks.
|
||||
|
||||
### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?
|
||||
|
||||
We are using single-use streams where each stream is closed at the end of the message. Thus, libp2p transparently handles message delimiting in the underlying stream. libp2p streams are full-duplex, and each party is responsible for closing their write side (like in TCP). We can therefore use stream closure to mark the end of the request and response independently.
|
||||
|
||||
Nevertheless, messages are still length-prefixed—this is now being considered for removal.
|
||||
|
||||
Advantages of length-prefixing include:
|
||||
|
||||
* Reader can prepare a correctly sized buffer before reading message
|
||||
* Alignment with protocols like gRPC over HTTP/2 that prefix with length
|
||||
* Sanity checking of stream closure / message length
|
||||
|
||||
Disadvantages include:
|
||||
|
||||
* Redundant methods of message delimiting—both stream end marker and length prefix
|
||||
* Harder to stream as length must be known up-front
|
||||
* Additional code path required to verify length
|
||||
|
||||
In some protocols, adding a length prefix serves as a form of DoS protection against very long messages, allowing the client to abort if an overlong message is about to be sent. In this protocol, we are globally limiting message sizes using `REQ_RESP_MAX_SIZE`, thus the length prefix does not afford any additional protection.
|
||||
|
||||
[Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length ints. Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte.
|
||||
|
||||
### Why do we version protocol strings with ordinals instead of semver?
|
||||
|
||||
Using semver for network protocols is confusing. It is never clear what a change in a field, even if backwards compatible on deserialization, actually implies. Network protocol agreement should be explicit. Imagine two peers:
|
||||
|
||||
- Peer A supporting v1.1.1 of protocol X.
|
||||
- Peer B supporting v1.1.2 of protocol X.
|
||||
|
||||
These two peers should never speak to each other because the results can be unpredictable. This is an oversimplification: imagine the same problem with a set of 10 possible versions. We now have 10^2 (100) possible outcomes that peers need to model for. The resulting complexity is unwieldy.
|
||||
|
||||
For this reason, we rely on negotiation of explicit, verbatim protocols. In the above case, peer B would provide backwards compatibility by supporting and advertising both v1.1.1 and v1.1.2 of the protocol.
|
||||
|
||||
Therefore, semver would be relegated to convey expectations at the human level, and it wouldn't do a good job there either, because it's unclear if "backwards compatibility" and "breaking change" apply only to wire schema level, to behavior, etc.
|
||||
|
||||
For this reason, we remove and replace semver with ordinals that require explicit agreement and do not mandate a specific policy for changes.
|
||||
|
||||
### Why is it called Req/Resp and not RPC?
|
||||
|
||||
Req/Resp is used to avoid confusion with JSON-RPC and similar user-client interaction mechanisms.
|
||||
|
||||
## Discovery
|
||||
|
||||
### Why are we using discv5 and not libp2p Kademlia DHT?
|
||||
|
||||
discv5 is a standalone protocol, running on UDP on a dedicated port, meant for peer and service discovery only. discv5 supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are, or will be, requirements in this context.
|
||||
|
||||
On the other hand, libp2p Kademlia DHT is a fully-fledged DHT protocol/implementation with content routing and storage capabilities, both of which are irrelevant in this context.
|
||||
|
||||
We assume that Eth 1.0 nodes will evolve to support discv5. By sharing the discovery network between Eth 1.0 and 2.0, we benefit from the additive effect on network size that enhances resilience and resistance against certain attacks, to which smaller networks are more vulnerable. It should also help light clients of both networks find nodes with specific capabilities.
|
||||
|
||||
discv5 is in the process of being audited.
|
||||
|
||||
### What is the difference between an ENR and a multiaddr, and why are we using ENRs?
|
||||
|
||||
Ethereum Node Records are self-certified node records. Nodes craft and disseminate ENRs for themselves, proving authorship via a cryptographic signature. ENRs are sequentially indexed, enabling conflicts to be resolved.
|
||||
|
||||
ENRs are key-value records with string-indexed ASCII keys. They can store arbitrary information, but EIP-778 specifies a pre-defined dictionary, including IPv4 and IPv6 addresses, secp256k1 public keys, etc.
|
||||
|
||||
Comparing ENRs and multiaddrs is like comparing apples and oranges. ENRs are self-certified containers of identity, addresses, and metadata about a node. Multiaddrs are address strings with the peculiarity that they’re self-describing, composable and future-proof. An ENR can contain multiaddrs, and multiaddrs can be derived securely from the fields of an authenticated ENR.
|
||||
|
||||
discv5 uses ENRs and we will presumably need to:
|
||||
|
||||
1. Add `multiaddr` to the dictionary, so that nodes can advertise their multiaddr under a reserved namespace in ENRs. – and/or –
|
||||
2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR (ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Eth 1.0 nodes).
|
||||
|
||||
## Compression/Encoding
|
||||
|
||||
### Why are we using SSZ for encoding?
|
||||
|
||||
SSZ is used at the consensus layer, and all implementations should have support for SSZ-encoding/decoding, requiring no further dependencies to be added to client implementations. This is a natural choice for serializing objects to be sent across the wire. The actual data in most protocols will be further compressed for efficiency.
|
||||
|
||||
SSZ has well-defined schemas for consensus objects (typically sent across the wire) reducing any serialization schema data that needs to be sent. It also has defined all required types that are required for this network specification.
|
||||
|
||||
### Why are we compressing, and at which layers?
|
||||
|
||||
We compress on the wire to achieve smaller payloads per-message, which, in aggregate, result in higher efficiency, better utilization of available bandwidth, and overall reduction in network-wide traffic overhead.
|
||||
|
||||
At this time, libp2p does not have an out-of-the-box compression feature that can be dynamically negotiated and layered atop connections and streams, but it is [being considered](https://github.com/libp2p/libp2p/issues/81).
|
||||
|
||||
This is a non-trivial feature because the behavior of network IO loops, kernel buffers, chunking, and packet fragmentation, amongst others, need to be taken into account. libp2p streams are unbounded streams, whereas compression algorithms work best on bounded byte streams of which we have some prior knowledge.
|
||||
|
||||
Compression tends not to be a one-size-fits-all problem. A lot of variables need careful evaluation, and generic approaches/choices lead to poor size shavings, which may even be counterproductive when factoring in the CPU and memory tradeoff.
|
||||
|
||||
For all these reasons, generically negotiating compression algorithms may be treated as a research problem at the libp2p community, one we’re happy to tackle in the medium-term.
|
||||
|
||||
At this stage, the wisest choice is to consider libp2p a messenger of bytes, and to make application layer participate in compressing those bytes. This looks different depending on the interaction layer:
|
||||
|
||||
- Gossip domain: since gossipsub has a framing protocol and exposes an API, we compress the payload (when dictated by the encoding token in the topic name) prior to publishing the message via the API. No length prefixing is necessary because protobuf takes care of bounding the field in the serialized form.
|
||||
- Req/Resp domain: since we define custom protocols that operate on byte streams, implementers are encouraged to encapsulate the encoding and compression logic behind MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
||||
|
||||
### Why are using Snappy for compression?
|
||||
|
||||
Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks, and can calculate the size of the uncompressed object without inflating it in memory. This prevents DOS vectors where large uncompressed data is sent.
|
||||
|
||||
### Can I get access to unencrypted bytes on the wire for debugging purposes?
|
||||
|
||||
Yes, you can add loggers in your libp2p protocol handlers to log incoming and outgoing messages. It is recommended to use programming design patterns to encapsulate the logging logic cleanly.
|
||||
|
||||
If your libp2p library relies on frameworks/runtimes such as Netty (jvm) or Node.js (javascript), you can use logging facilities in those frameworks/runtimes to enable message tracing.
|
||||
|
||||
For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md) (which is essentially no-op encryption or message authentication), in combination with tcpdump or Wireshark to inspect the wire.
|
||||
|
||||
# libp2p implementations matrix
|
||||
|
||||
This section will soon contain a matrix showing the maturity/state of the libp2p features required by this spec across the languages in which Eth 2.0 clients are being developed.
|
@ -1,283 +0,0 @@
|
||||
# Eth 2.0 Networking Spec - RPC Interface
|
||||
|
||||
## Abstract
|
||||
|
||||
The Ethereum 2.0 networking stack uses two modes of communication: a broadcast protocol that gossips information to interested parties via GossipSub, and an RPC protocol that retrieves information from specific clients. This specification defines the RPC protocol.
|
||||
|
||||
The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL", NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in [RFC 2119](https://tools.ietf.org/html/rfc2119).
|
||||
|
||||
## Dependencies
|
||||
|
||||
This specification assumes familiarity with the [Messaging](./messaging.md), [Node Identification](./node-identification.md), and [Beacon Chain](../core/0_beacon-chain.md) specifications.
|
||||
|
||||
# Specification
|
||||
|
||||
## Message schemas
|
||||
|
||||
Message body schemas are notated like this:
|
||||
|
||||
```
|
||||
(
|
||||
field_name_1: type
|
||||
field_name_2: type
|
||||
)
|
||||
```
|
||||
|
||||
Embedded types are serialized as SSZ Containers unless otherwise noted.
|
||||
|
||||
All referenced data structures can be found in the [Beacon Chain](../core/0_beacon-chain.md#data-structures) specification.
|
||||
|
||||
## `libp2p` protocol names
|
||||
|
||||
A "Protocol ID" in `libp2p` parlance refers to a human-readable identifier `libp2p` uses in order to identify sub-protocols and stream messages of different types over the same connection. Peers exchange supported protocol IDs via the `Identify` protocol upon connection. When opening a new stream, peers pin a particular protocol ID to it, and the stream remains contextualized thereafter. Since messages are sent inside a stream, they do not need to bear the protocol ID.
|
||||
|
||||
## RPC-over-`libp2p`
|
||||
|
||||
To facilitate RPC-over-`libp2p`, a single protocol name is used: `/eth/serenity/beacon/rpc/1`. The version number in the protocol name is neither backwards or forwards compatible, and will be incremented whenever changes to the below structures are required.
|
||||
|
||||
Remote method calls are wrapped in a "request" structure:
|
||||
|
||||
```
|
||||
(
|
||||
id: uint64
|
||||
method_id: uint16
|
||||
body: (message_body...)
|
||||
)
|
||||
```
|
||||
|
||||
and their corresponding responses are wrapped in a "response" structure:
|
||||
|
||||
```
|
||||
(
|
||||
id: uint64
|
||||
response_code: uint16
|
||||
result: bytes
|
||||
)
|
||||
```
|
||||
|
||||
A union type is used to determine the contents of the `body` field in the request structure. Each "body" entry in the RPC calls below corresponds to one subtype in the `body` type union.
|
||||
|
||||
The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](https://www.jsonrpc.org/specification). Specifically:
|
||||
|
||||
1. The `id` member is REQUIRED.
|
||||
2. The `id` member in the response MUST be the same as the value of the `id` in the request.
|
||||
3. The `id` member MUST be unique within the context of a single connection. Monotonically increasing `id`s are RECOMMENDED.
|
||||
4. The `method_id` member is REQUIRED.
|
||||
5. The `result` member is REQUIRED on success.
|
||||
6. The `result` member is OPTIONAL on errors, and MAY contain additional information about the error.
|
||||
7. `response_code` MUST be `0` on success.
|
||||
|
||||
Structuring RPC requests in this manner allows multiple calls and responses to be multiplexed over the same stream without switching. Note that this implies that responses MAY arrive in a different order than requests.
|
||||
|
||||
The "method ID" fields in the below messages refer to the `method` field in the request structure above.
|
||||
|
||||
The first 1,000 values in `response_code` are reserved for system use. The following response codes are predefined:
|
||||
|
||||
1. `0`: No error.
|
||||
2. `10`: Parse error.
|
||||
2. `20`: Invalid request.
|
||||
3. `30`: Method not found.
|
||||
4. `40`: Server error.
|
||||
|
||||
### Alternative for non-`libp2p` clients
|
||||
|
||||
Since some clients are waiting for `libp2p` implementations in their respective languages. As such, they MAY listen for raw TCP messages on port `9000`. To distinguish RPC messages from other messages on that port, a byte prefix of `ETH` (`0x455448`) MUST be prepended to all messages. This option will be removed once `libp2p` is ready in all supported languages.
|
||||
|
||||
## Messages
|
||||
|
||||
### Hello
|
||||
|
||||
**Method ID:** `0`
|
||||
|
||||
**Body**:
|
||||
|
||||
```
|
||||
(
|
||||
network_id: uint8
|
||||
chain_id: uint64
|
||||
finalized_root: bytes32
|
||||
finalized_epoch: uint64
|
||||
best_root: bytes32
|
||||
best_slot: uint64
|
||||
)
|
||||
```
|
||||
|
||||
Clients exchange `hello` messages upon connection, forming a two-phase handshake. The first message the initiating client sends MUST be the `hello` message. In response, the receiving client MUST respond with its own `hello` message.
|
||||
|
||||
Clients SHOULD immediately disconnect from one another following the handshake above under the following conditions:
|
||||
|
||||
1. If `network_id` belongs to a different chain, since the client definitionally cannot sync with this client.
|
||||
2. If the `finalized_root` shared by the peer is not in the client's chain at the expected epoch. For example, if Peer 1 in the diagram below has `(root, epoch)` of `(A, 5)` and Peer 2 has `(B, 3)`, Peer 1 would disconnect because it knows that `B` is not the root in their chain at epoch 3:
|
||||
|
||||
```
|
||||
Root A
|
||||
|
||||
+---+
|
||||
|xxx| +----+ Epoch 5
|
||||
+-+-+
|
||||
^
|
||||
|
|
||||
+-+-+
|
||||
| | +----+ Epoch 4
|
||||
+-+-+
|
||||
Root B ^
|
||||
|
|
||||
+---+ +-+-+
|
||||
|xxx+<---+--->+ | +----+ Epoch 3
|
||||
+---+ | +---+
|
||||
|
|
||||
+-+-+
|
||||
| | +-----------+ Epoch 2
|
||||
+-+-+
|
||||
^
|
||||
|
|
||||
+-+-+
|
||||
| | +-----------+ Epoch 1
|
||||
+---+
|
||||
```
|
||||
|
||||
Once the handshake completes, the client with the higher `finalized_epoch` or `best_slot` (if the clients have equal `finalized_epoch`s) SHOULD request beacon block roots from its counterparty via `beacon_block_roots` (i.e. RPC method `10`).
|
||||
|
||||
### Goodbye
|
||||
|
||||
**Method ID:** `1`
|
||||
|
||||
**Body:**
|
||||
|
||||
```
|
||||
(
|
||||
reason: uint64
|
||||
)
|
||||
```
|
||||
|
||||
Client MAY send `goodbye` messages upon disconnection. The reason field MAY be one of the following values:
|
||||
|
||||
- `1`: Client shut down.
|
||||
- `2`: Irrelevant network.
|
||||
- `3`: Fault/error.
|
||||
|
||||
Clients MAY define custom goodbye reasons as long as the value is larger than `1000`.
|
||||
|
||||
### Get status
|
||||
|
||||
**Method ID:** `2`
|
||||
|
||||
**Request body:**
|
||||
|
||||
```
|
||||
(
|
||||
sha: bytes32
|
||||
user_agent: bytes
|
||||
timestamp: uint64
|
||||
)
|
||||
```
|
||||
|
||||
**Response body:**
|
||||
|
||||
```
|
||||
(
|
||||
sha: bytes32
|
||||
user_agent: bytes
|
||||
timestamp: uint64
|
||||
)
|
||||
```
|
||||
|
||||
Returns metadata about the remote node.
|
||||
|
||||
### Request beacon block roots
|
||||
|
||||
**Method ID:** `10`
|
||||
|
||||
**Request body**
|
||||
|
||||
```
|
||||
(
|
||||
start_slot: uint64
|
||||
count: uint64
|
||||
)
|
||||
```
|
||||
|
||||
**Response body:**
|
||||
|
||||
```
|
||||
# BlockRootSlot
|
||||
(
|
||||
block_root: bytes32
|
||||
slot: uint64
|
||||
)
|
||||
|
||||
(
|
||||
roots: []BlockRootSlot
|
||||
)
|
||||
```
|
||||
|
||||
Requests a list of block roots and slots from the peer. The `count` parameter MUST be less than or equal to `32768`. The slots MUST be returned in ascending slot order.
|
||||
|
||||
### Beacon block headers
|
||||
|
||||
**Method ID:** `11`
|
||||
|
||||
**Request body**
|
||||
|
||||
```
|
||||
(
|
||||
start_root: HashTreeRoot
|
||||
start_slot: uint64
|
||||
max_headers: uint64
|
||||
skip_slots: uint64
|
||||
)
|
||||
```
|
||||
|
||||
**Response body:**
|
||||
|
||||
```
|
||||
(
|
||||
headers: []BeaconBlockHeader
|
||||
)
|
||||
```
|
||||
|
||||
Requests beacon block headers from the peer starting from `(start_root, start_slot)`. The response MUST contain no more than `max_headers` headers. `skip_slots` defines the maximum number of slots to skip between blocks. For example, requesting blocks starting at slots `2` a `skip_slots` value of `1` would return the blocks at `[2, 4, 6, 8, 10]`. In cases where a slot is empty for a given slot number, the closest previous block MUST be returned. For example, if slot `4` were empty in the previous example, the returned array would contain `[2, 3, 6, 8, 10]`. If slot three were further empty, the array would contain `[2, 6, 8, 10]`—i.e. duplicate blocks MUST be collapsed. A `skip_slots` value of `0` returns all blocks.
|
||||
|
||||
The function of the `skip_slots` parameter helps facilitate light client sync - for example, in [#459](https://github.com/ethereum/eth2.0-specs/issues/459) - and allows clients to balance the peers from whom they request headers. Clients could, for instance, request every 10th block from a set of peers where each peer has a different starting block in order to populate block data.
|
||||
|
||||
### Beacon block bodies
|
||||
|
||||
**Method ID:** `12`
|
||||
|
||||
**Request body:**
|
||||
|
||||
```
|
||||
(
|
||||
block_roots: []HashTreeRoot
|
||||
)
|
||||
```
|
||||
|
||||
**Response body:**
|
||||
|
||||
```
|
||||
(
|
||||
block_bodies: []BeaconBlockBody
|
||||
)
|
||||
```
|
||||
|
||||
Requests the `block_bodies` associated with the provided `block_roots` from the peer. Responses MUST return `block_roots` in the order provided in the request. If the receiver does not have a particular `block_root`, it must return a zero-value `block_body` (i.e. a `block_body` container with all zero fields).
|
||||
|
||||
### Beacon chain state
|
||||
|
||||
*Note*: This section is preliminary, pending the definition of the data structures to be transferred over the wire during fast sync operations.
|
||||
|
||||
**Method ID:** `13`
|
||||
|
||||
**Request body:**
|
||||
|
||||
```
|
||||
(
|
||||
hashes: []HashTreeRoot
|
||||
)
|
||||
```
|
||||
|
||||
**Response body:** TBD
|
||||
|
||||
Requests contain the hashes of Merkle tree nodes that when merkleized yield the block's `state_root`.
|
||||
|
||||
The response will contain the values that, when hashed, yield the hashes inside the request body.
|
@ -14,7 +14,7 @@
|
||||
- [Variable-size and fixed-size](#variable-size-and-fixed-size)
|
||||
- [Aliases](#aliases)
|
||||
- [Default values](#default-values)
|
||||
- [`is_empty`](#is_empty)
|
||||
- [`is_zero`](#is_zero)
|
||||
- [Illegal types](#illegal-types)
|
||||
- [Serialization](#serialization)
|
||||
- [`uintN`](#uintn)
|
||||
@ -26,6 +26,7 @@
|
||||
- [Deserialization](#deserialization)
|
||||
- [Merkleization](#merkleization)
|
||||
- [Self-signed containers](#self-signed-containers)
|
||||
- [Summaries and expansions](#summaries-and-expansions)
|
||||
- [Implementations](#implementations)
|
||||
|
||||
<!-- /TOC -->
|
||||
@ -75,19 +76,21 @@ For convenience we alias:
|
||||
* `bit` to `boolean`
|
||||
* `byte` to `uint8` (this is a basic type)
|
||||
* `BytesN` to `Vector[byte, N]` (this is *not* a basic type)
|
||||
* `null`: `{}`, i.e. the empty container
|
||||
* `null`: `{}`
|
||||
|
||||
### Default values
|
||||
|
||||
The default value of a type upon initialization is recursively defined using `0` for `uintN`, `False` for `boolean` and the elements of `Bitvector`, and `[]` for lists and `Bitlist`. Unions default to the first type in the union (with type index zero), which is `null` if present in the union.
|
||||
|
||||
#### `is_empty`
|
||||
#### `is_zero`
|
||||
|
||||
An SSZ object is called empty (and thus, `is_empty(object)` returns true) if it is equal to the default value for that type.
|
||||
An SSZ object is called zeroed (and thus, `is_zero(object)` returns true) if it is equal to the default value for that type.
|
||||
|
||||
### Illegal types
|
||||
|
||||
Empty vector types (i.e. `[subtype, 0]` for some `subtype`) are not legal. The `null` type is only legal as the first type in a union subtype (i.e. with type index zero).
|
||||
- Empty vector types (`Vector[type, 0]`, `Bitvector[0]`) are illegal.
|
||||
- Containers with no fields are illegal.
|
||||
- The `null` type is only legal as the first type in a union subtype (i.e. with type index zero).
|
||||
|
||||
## Serialization
|
||||
|
||||
@ -215,6 +218,12 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi
|
||||
|
||||
Let `value` be a self-signed container object. The convention is that the signature (e.g. a `"bytes96"` BLS12-381 signature) be the last field of `value`. Further, the signed message for `value` is `signing_root(value) = hash_tree_root(truncate_last(value))` where `truncate_last` truncates the last element of `value`.
|
||||
|
||||
## Summaries and expansions
|
||||
|
||||
Let `A` be an object derived from another object `B` by replacing some of the (possibly nested) values of `B` by their `hash_tree_root`. We say `A` is a "summary" of `B`, and that `B` is an "expansion" of `A`. Notice `hash_tree_root(A) == hash_tree_root(B)`.
|
||||
|
||||
We similarly define "summary types" and "expansion types". For example, [`BeaconBlock`](./core/0_beacon-chain.md#beaconblock) is an expansion type of [`BeaconBlockHeader`](./core/0_beacon-chain.md#beaconblockheader). Notice that objects expand to at most one object of a given expansion type. For example, `BeaconBlockHeader` objects uniquely expand to `BeaconBlock` objects.
|
||||
|
||||
## Implementations
|
||||
|
||||
| Language | Project | Maintainer | Implementation |
|
||||
@ -222,7 +231,7 @@ Let `value` be a self-signed container object. The convention is that the signat
|
||||
| Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) |
|
||||
| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz](https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz) |
|
||||
| Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) |
|
||||
| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/utils/ssz](https://github.com/paritytech/shasper/tree/master/util/ssz) |
|
||||
| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/utils/ssz](https://github.com/paritytech/shasper/tree/master/utils/ssz) |
|
||||
| TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz-js) |
|
||||
| Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) |
|
||||
| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/go-ssz](https://github.com/prysmaticlabs/go-ssz) |
|
||||
|
@ -5,21 +5,25 @@ This document defines the YAML format and structure used for Eth 2.0 testing.
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
|
||||
- [General test format](#general-test-format)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [About](#about)
|
||||
- [Test-case formats](#test-case-formats)
|
||||
- [Glossary](#glossary)
|
||||
- [Test format philosophy](#test-format-philosophy)
|
||||
- [Config design](#config-design)
|
||||
- [Fork config design](#fork-config-design)
|
||||
- [Test completeness](#test-completeness)
|
||||
- [Test suite](#test-suite)
|
||||
- [Config](#config)
|
||||
- [Fork-timeline](#fork-timeline)
|
||||
- [Config sourcing](#config-sourcing)
|
||||
- [Test structure](#test-structure)
|
||||
- [Note for implementers](#note-for-implementers)
|
||||
* [About](#about)
|
||||
+ [Test-case formats](#test-case-formats)
|
||||
* [Glossary](#glossary)
|
||||
* [Test format philosophy](#test-format-philosophy)
|
||||
+ [Config design](#config-design)
|
||||
+ [Test completeness](#test-completeness)
|
||||
* [Test structure](#test-structure)
|
||||
+ [`<config name>/`](#--config-name---)
|
||||
+ [`<fork or phase name>/`](#--fork-or-phase-name---)
|
||||
+ [`<test runner name>/`](#--test-runner-name---)
|
||||
+ [`<test handler name>/`](#--test-handler-name---)
|
||||
+ [`<test suite name>/`](#--test-suite-name---)
|
||||
+ [`<test case>/`](#--test-case---)
|
||||
+ [`<output part>`](#--output-part--)
|
||||
- [Special output parts](#special-output-parts)
|
||||
* [`meta.yaml`](#-metayaml-)
|
||||
* [Config](#config)
|
||||
* [Config sourcing](#config-sourcing)
|
||||
* [Note for implementers](#note-for-implementers)
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
@ -42,30 +46,29 @@ Test formats:
|
||||
- [`ssz_static`](./ssz_static/README.md)
|
||||
- More formats are planned, see tracking issues for CI/testing
|
||||
|
||||
|
||||
## Glossary
|
||||
|
||||
- `generator`: a program that outputs one or more `suite` files.
|
||||
- A generator should only output one `type` of test.
|
||||
- A generator is free to output multiple `suite` files, optionally with different `handler`s.
|
||||
- `type`: the specialization of one single `generator`.
|
||||
- `suite`: a YAML file with:
|
||||
- a header: describes the `suite`, and defines what the `suite` is for
|
||||
- a list of test cases
|
||||
- `generator`: a program that outputs one or more test-cases, each organized into a `config > runner > handler > suite` hierarchy.
|
||||
- `config`: tests are grouped by configuration used for spec presets. In addition to the standard configurations,
|
||||
`general` may be used as a catch-all for tests not restricted to one configuration. (E.g. BLS).
|
||||
- `type`: the specialization of one single `generator`. E.g. epoch processing.
|
||||
- `runner`: where a generator is a *"producer"*, this is the *"consumer"*.
|
||||
- A `runner` focuses on *only one* `type`, and each type has *only one* `runner`.
|
||||
- `handler`: a `runner` may be too limited sometimes, you may have a `suite` with a specific focus that requires a different format.
|
||||
- `handler`: a `runner` may be too limited sometimes, you may have a set of tests with a specific focus that requires a different format.
|
||||
To facilitate this, you specify a `handler`: the runner can deal with the format by using the specified handler.
|
||||
Using a `handler` in a `runner` is optional.
|
||||
- `case`: a test case, an entry in the `test_cases` list of a `suite`. A case can be anything in general,
|
||||
but its format should be well-defined in the documentation corresponding to the `type` (and `handler`).\
|
||||
A test has the same exact configuration and fork context as the other entries in the `case` list of its `suite`.
|
||||
- `forks_timeline`: a fork timeline definition, a YAML file containing a key for each fork-name, and an epoch number as value.
|
||||
- `suite`: a directory containing test cases that are coherent. Each `suite` under the same `handler` shares the same format.
|
||||
This is an organizational/cosmetic hierarchy layer.
|
||||
- `case`: a test case, a directory in a `suite`. A case can be anything in general,
|
||||
but its format should be well-defined in the documentation corresponding to the `type` (and `handler`).
|
||||
- `case part`: a test case consists of different files, possibly in different formats, to facilitate the specific test case format better.
|
||||
Optionally, a `meta.yaml` is included to declare meta-data for the test, e.g. BLS requirements.
|
||||
|
||||
## Test format philosophy
|
||||
|
||||
### Config design
|
||||
|
||||
After long discussion, the following types of configured constants were identified:
|
||||
The configuration constant types are:
|
||||
- Never changing: genesis data.
|
||||
- Changing, but reliant on old value: e.g. an epoch time may change, but if you want to do the conversion
|
||||
`(genesis data, timestamp) -> epoch number`, you end up needing both constants.
|
||||
@ -75,26 +78,12 @@ After long discussion, the following types of configured constants were identifi
|
||||
- Changing: there is a very small chance some constant may really be *replaced*.
|
||||
In this off-chance, it is likely better to include it as an additional variable,
|
||||
and some clients may simply stop supporting the old one if they do not want to sync from genesis.
|
||||
The change of functionality goes through a phase of deprecation of the old constant, and eventually only the new constant is kept around in the config (when old state is not supported anymore).
|
||||
|
||||
Based on these types of changes, we model the config as a list of key value pairs,
|
||||
that only grows with every fork (they may change in development versions of forks, however; git manages this).
|
||||
With this approach, configurations are backwards compatible (older clients ignore unknown variables) and easy to maintain.
|
||||
|
||||
### Fork config design
|
||||
|
||||
There are two types of fork-data:
|
||||
1) Timeline: When does a fork take place?
|
||||
2) Coverage: What forks are covered by a test?
|
||||
|
||||
The first is neat to have as a separate form: we prevent duplication, and can run with different presets
|
||||
(e.g. fork timeline for a minimal local test, for a public testnet, or for mainnet).
|
||||
|
||||
The second does not affect the result of the tests, it just states what is covered by the tests,
|
||||
so that the right suites can be executed to see coverage for a certain fork.
|
||||
For some types of tests, it may be beneficial to ensure it runs exactly the same, with any given fork "active".
|
||||
Test-formats can be explicit on the need to repeat a test with different forks being "active",
|
||||
but generally tests run only once.
|
||||
|
||||
### Test completeness
|
||||
|
||||
Tests should be independent of any sync-data. If one wants to run a test, the input data should be available from the YAML.
|
||||
@ -104,93 +93,68 @@ The aim is to provide clients with a well-defined scope of work to run a particu
|
||||
- Clients that are not complete in functionality can choose to ignore suites that use certain test-runners, or specific handlers of these test-runners.
|
||||
- Clients that are on older versions can test their work based on older releases of the generated tests, and catch up with newer releases when possible.
|
||||
|
||||
## Test suite
|
||||
|
||||
```
|
||||
title: <string, short, one line> -- Display name for the test suite
|
||||
summary: <string, average, 1-3 lines> -- Summarizes the test suite
|
||||
forks_timeline: <string, reference to a fork definition file, without extension> -- Used to determine the forking timeline
|
||||
forks: <list of strings> -- Defines the coverage. Test-runner code may decide to re-run with the different forks "activated", when applicable.
|
||||
config: <string, reference to a config file, without extension> -- Used to determine which set of constants to run (possibly compile time) with
|
||||
runner: <string, no spaces, python-like naming format> *MUST be consistent with folder structure*
|
||||
handler: <string, no spaces, python-like naming format> *MUST be consistent with folder structure*
|
||||
|
||||
test_cases: <list, values being maps defining a test case each>
|
||||
...
|
||||
|
||||
```
|
||||
|
||||
## Config
|
||||
|
||||
A configuration is a separate YAML file.
|
||||
Separation of configuration and tests aims to:
|
||||
- Prevent duplication of configuration
|
||||
- Make all tests easy to upgrade (e.g. when a new config constant is introduced)
|
||||
- Clearly define which constants to use
|
||||
- Shareable between clients, for cross-client short- or long-lived testnets
|
||||
- Minimize the amounts of different constants permutations to compile as a client.
|
||||
*Note*: Some clients prefer compile-time constants and optimizations.
|
||||
They should compile for each configuration once, and run the corresponding tests per build target.
|
||||
|
||||
The format is described in [`configs/constant_presets`](../../configs/constant_presets/README.md#format).
|
||||
|
||||
|
||||
## Fork-timeline
|
||||
|
||||
A fork timeline is (preferably) loaded in as a configuration object into a client, as opposed to the constants configuration:
|
||||
- We do not allocate or optimize any code based on epoch numbers.
|
||||
- When we transition from one fork to the other, it is preferred to stay online.
|
||||
- We may decide on an epoch number for a fork based on external events (e.g. Eth1 log event);
|
||||
a client should be able to activate a fork dynamically.
|
||||
|
||||
The format is described in [`configs/fork_timelines`](../../configs/fork_timelines/README.md#format).
|
||||
|
||||
## Config sourcing
|
||||
|
||||
The constants configurations are located in:
|
||||
|
||||
```
|
||||
<specs repo root>/configs/constant_presets/<config name>.yaml
|
||||
```
|
||||
|
||||
And copied by CI for testing purposes to:
|
||||
|
||||
```
|
||||
<tests repo root>/configs/constant_presets/<config name>.yaml
|
||||
```
|
||||
|
||||
|
||||
The fork timelines are located in:
|
||||
|
||||
```
|
||||
<specs repo root>/configs/fork_timelines/<timeline name>.yaml
|
||||
```
|
||||
|
||||
And copied by CI for testing purposes to:
|
||||
|
||||
```
|
||||
<tests repo root>/configs/fork_timelines/<timeline name>.yaml
|
||||
```
|
||||
|
||||
## Test structure
|
||||
|
||||
To prevent parsing of hundreds of different YAML files to test a specific test type,
|
||||
or even more specific, just a handler, tests should be structured in the following nested form:
|
||||
|
||||
```
|
||||
. <--- root of eth2.0 tests repository
|
||||
├── bls <--- collection of handler for a specific test-runner, example runner: "bls"
|
||||
│ ├── verify_msg <--- collection of test suites for a specific handler, example handler: "verify_msg". If no multiple handlers, use a dummy folder (e.g. "core"), and specify that in the yaml.
|
||||
│ │ ├── verify_valid.yml .
|
||||
│ │ ├── special_cases.yml . a list of test suites
|
||||
│ │ ├── domains.yml .
|
||||
│ │ ├── invalid.yml .
|
||||
│ │ ... <--- more suite files (optional)
|
||||
│ ... <--- more handlers
|
||||
... <--- more test types
|
||||
File path structure:
|
||||
tests/<config name>/<fork or phase name>/<test runner name>/<test handler name>/<test suite name>/<test case>/<output part>
|
||||
```
|
||||
|
||||
## Common test-case properties
|
||||
### `<config name>/`
|
||||
|
||||
Configs are upper level. Some clients want to run minimal first, and useful for sanity checks during development too.
|
||||
As a top level dir, it is not duplicated, and the used config can be copied right into this directory as reference.
|
||||
|
||||
### `<fork or phase name>/`
|
||||
|
||||
This would be: "phase0", "transferparty", "phase1", etc. Each introduces new tests, but does not copy tests that do not change.
|
||||
If you like to test phase 1, you run phase 0 tests, with the configuration that includes phase 1 changes. Out of scope for now however.
|
||||
|
||||
### `<test runner name>/`
|
||||
|
||||
The well known bls/shuffling/ssz_static/operations/epoch_processing/etc. Handlers can change the format, but there is a general target to test.
|
||||
|
||||
|
||||
### `<test handler name>/`
|
||||
|
||||
Specialization within category. All suites in here will have the same test case format.
|
||||
Using a `handler` in a `runner` is optional. A `core` (or other generic) handler may be used if the `runner` does not have different formats.
|
||||
|
||||
### `<test suite name>/`
|
||||
|
||||
Suites are split up. Suite size (i.e. the amount of tests) does not change the maximum memory requirement, as test cases can be loaded one by one.
|
||||
This also makes filtered sets of tests fast and easy to load.
|
||||
|
||||
### `<test case>/`
|
||||
|
||||
Cases are split up too. This enables diffing of parts of the test case, tracking changes per part, while still using LFS. Also enables different formats for some parts.
|
||||
|
||||
### `<output part>`
|
||||
|
||||
E.g. `pre.yaml`, `deposit.yaml`, `post.yaml`.
|
||||
|
||||
Diffing a `pre.yaml` and `post.yaml` provides all the information for testing, good for readability of the change.
|
||||
Then the difference between pre and post can be compared to anything that changes the pre state, e.g. `deposit.yaml`
|
||||
|
||||
These files allow for custom formats for some parts of the test. E.g. something encoded in SSZ.
|
||||
|
||||
Some yaml files have copies, but formatted as raw SSZ bytes: `pre.ssz`, `deposit.ssz`, `post.ssz`.
|
||||
The yaml files are intended to be deprecated, and clients should shift to ssz inputs for efficiency.
|
||||
Deprecation will start once a viewer of SSZ test-cases is in place, to maintain a standard of readable test cases.
|
||||
This also means that some clients can drop legacy YAML -> JSON/other -> SSZ work-arounds.
|
||||
(These were implemented to support the uint64 YAML, hex strings, etc. Things that were not idiomatic to their language.)
|
||||
|
||||
Yaml will not be deprecated for tests that do not use SSZ: e.g. shuffling and BLS tests.
|
||||
In this case, there is no work around for loading necessary anyway, and the size and efficiency of yaml is acceptable.
|
||||
|
||||
#### Special output parts
|
||||
|
||||
##### `meta.yaml`
|
||||
|
||||
If present (it is optional), the test is enhanced with extra data to describe usage. Specialized data is described in the documentation of the specific test format.
|
||||
|
||||
Common data is documented here:
|
||||
|
||||
Some test-case formats share some common key-value pair patterns, and these are documented here:
|
||||
|
||||
@ -203,22 +167,52 @@ bls_setting: int -- optional, can have 3 different values:
|
||||
2: known as "BLS ignored" - if the test validity is strictly dependent on BLS being OFF
|
||||
```
|
||||
|
||||
|
||||
## Config
|
||||
|
||||
A configuration is a separate YAML file.
|
||||
Separation of configuration and tests aims to:
|
||||
- Prevent duplication of configuration
|
||||
- Make all tests easy to upgrade (e.g. when a new config constant is introduced)
|
||||
- Clearly define which constants to use
|
||||
- Be easily shareable between clients, for cross-client short- or long-lived testnets
|
||||
- Minimize the amount of different constants permutations to compile as a client.
|
||||
*Note*: Some clients prefer compile-time constants and optimizations.
|
||||
They should compile for each configuration once, and run the corresponding tests per build target.
|
||||
- Include constants to coordinate forking with
|
||||
|
||||
The format is described in [`/configs`](../../configs/README.md#format).
|
||||
|
||||
|
||||
## Config sourcing
|
||||
|
||||
The constants configurations are located in:
|
||||
|
||||
```
|
||||
<specs repo root>/configs/<config name>.yaml
|
||||
```
|
||||
|
||||
And copied by CI for testing purposes to:
|
||||
|
||||
```
|
||||
<tests repo root>/tests/<config name>/<config name>.yaml
|
||||
```
|
||||
|
||||
The first `<config name>` is a directory, which contains exactly all tests that make use of the given config.
|
||||
|
||||
|
||||
## Note for implementers
|
||||
|
||||
The basic pattern for test-suite loading and running is:
|
||||
|
||||
Iterate suites for given test-type, or sub-type (e.g. `operations > deposits`):
|
||||
1. Filter test-suite, options:
|
||||
- Config: Load first few lines, load into YAML, and check `config`, either:
|
||||
- Pass the suite to the correct compiled target
|
||||
- Ignore the suite if running tests as part of a compiled target with different configuration
|
||||
- Load the correct configuration for the suite dynamically before running the suite
|
||||
- Select by file name
|
||||
- Filter for specific suites (e.g. for a specific fork)
|
||||
2. Load the YAML
|
||||
- Optionally translate the data into applicable naming, e.g. `snake_case` to `PascalCase`
|
||||
3. Iterate through the `test_cases`
|
||||
4. Ask test-runner to allocate a new test-case (i.e. objectify the test-case, generalize it with a `TestCase` interface)
|
||||
Optionally pass raw test-case data to enable dynamic test-case allocation.
|
||||
1. Load test-case data into it.
|
||||
2. Make the test-case run.
|
||||
1. For a specific config, load it first (and only need to do so once),
|
||||
then continue with the tests defined in the config folder.
|
||||
2. Select a fork. Repeat for each fork if running tests for multiple forks.
|
||||
3. Select the category and specialization of interest (e.g. `operations > deposits`). Again, repeat for each if running all.
|
||||
4. Select a test suite. Or repeat for each.
|
||||
5. Select a test case. Or repeat for each.
|
||||
6. Load the parts of the case. And `meta.yaml` if present.
|
||||
7. Run the test, as defined by the test format.
|
||||
|
||||
Step 1 may be a step with compile time selection of a configuration, if desired for optimization.
|
||||
The base requirement is just to use the same set of constants, independent of the loading process.
|
||||
|
@ -4,6 +4,8 @@ A BLS pubkey aggregation combines a series of pubkeys into a single pubkey.
|
||||
|
||||
## Test case format
|
||||
|
||||
The test data is declared in a `data.yaml` file:
|
||||
|
||||
```yaml
|
||||
input: List[BLS Pubkey] -- list of input BLS pubkeys
|
||||
output: BLS Pubkey -- expected output, single BLS pubkey
|
||||
|
@ -4,6 +4,8 @@ A BLS signature aggregation combines a series of signatures into a single signat
|
||||
|
||||
## Test case format
|
||||
|
||||
The test data is declared in a `data.yaml` file:
|
||||
|
||||
```yaml
|
||||
input: List[BLS Signature] -- list of input BLS signatures
|
||||
output: BLS Signature -- expected output, single BLS signature
|
||||
|
@ -4,6 +4,8 @@ A BLS compressed-hash to G2.
|
||||
|
||||
## Test case format
|
||||
|
||||
The test data is declared in a `data.yaml` file:
|
||||
|
||||
```yaml
|
||||
input:
|
||||
message: bytes32
|
||||
|
@ -4,6 +4,8 @@ A BLS uncompressed-hash to G2.
|
||||
|
||||
## Test case format
|
||||
|
||||
The test data is declared in a `data.yaml` file:
|
||||
|
||||
```yaml
|
||||
input:
|
||||
message: bytes32
|
||||
|
@ -4,6 +4,8 @@ A BLS private key to public key conversion.
|
||||
|
||||
## Test case format
|
||||
|
||||
The test data is declared in a `data.yaml` file:
|
||||
|
||||
```yaml
|
||||
input: bytes32 -- the private key
|
||||
output: bytes48 -- the public key
|
||||
|
@ -4,6 +4,8 @@ Message signing with BLS should produce a signature.
|
||||
|
||||
## Test case format
|
||||
|
||||
The test data is declared in a `data.yaml` file:
|
||||
|
||||
```yaml
|
||||
input:
|
||||
privkey: bytes32 -- the private key used for signing
|
||||
|
@ -7,13 +7,27 @@ Hence, the format is shared between each test-handler. (See test condition docum
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
```yaml
|
||||
description: string -- description of test case, purely for debugging purposes
|
||||
description: string -- Optional description of test case, purely for debugging purposes.
|
||||
Tests should use the directory name of the test case as identifier, not the description.
|
||||
bls_setting: int -- see general test-format spec.
|
||||
pre: BeaconState -- state before running the sub-transition
|
||||
post: BeaconState -- state after applying the epoch sub-transition.
|
||||
```
|
||||
|
||||
### `pre.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before running the epoch sub-transition.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
|
||||
|
||||
### `post.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state after applying the epoch sub-transition.
|
||||
|
||||
Also available as `post.ssz`.
|
||||
|
||||
## Condition
|
||||
|
||||
A handler of the `epoch_processing` test-runner should process these cases,
|
||||
|
@ -4,15 +4,39 @@ Tests the initialization of a genesis state based on Eth1 data.
|
||||
|
||||
## Test case format
|
||||
|
||||
### `eth1_block_hash.yaml`
|
||||
|
||||
A `Bytes32` hex encoded, with prefix 0x. The root of the Eth-1 block.
|
||||
|
||||
Also available as `eth1_block_hash.ssz`.
|
||||
|
||||
### `eth1_timestamp.yaml`
|
||||
|
||||
An integer. The timestamp of the block, in seconds.
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
A yaml file to help read the deposit count:
|
||||
|
||||
```yaml
|
||||
description: string -- description of test case, purely for debugging purposes
|
||||
bls_setting: int -- see general test-format spec.
|
||||
eth1_block_hash: Bytes32 -- the root of the Eth-1 block, hex encoded, with prefix 0x
|
||||
eth1_timestamp: int -- the timestamp of the block, in seconds.
|
||||
deposits: [Deposit] -- list of deposits to build the genesis state with
|
||||
state: BeaconState -- the expected genesis state.
|
||||
deposits_count: int -- Amount of deposits.
|
||||
```
|
||||
|
||||
### `deposits_<index>.yaml`
|
||||
|
||||
A series of files, with `<index>` in range `[0, deposits_count)`. Deposits need to be processed in order.
|
||||
Each file is a YAML-encoded `Deposit` object.
|
||||
|
||||
Each deposit is also available as `deposits_<index>.ssz`.
|
||||
|
||||
### `state.yaml`
|
||||
|
||||
The expected genesis state. A YAML-encoded `BeaconState` object.
|
||||
|
||||
Also available as `state.ssz`.
|
||||
|
||||
## Processing
|
||||
|
||||
To process this test, build a genesis state with the provided `eth1_block_hash`, `eth1_timestamp` and `deposits`:
|
||||
`initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)`,
|
||||
as described in the Beacon Chain specification.
|
||||
|
@ -4,12 +4,18 @@ Tests if a genesis state is valid, i.e. if it counts as trigger to launch.
|
||||
|
||||
## Test case format
|
||||
|
||||
```yaml
|
||||
description: string -- description of test case, purely for debugging purposes
|
||||
bls_setting: int -- see general test-format spec.
|
||||
genesis: BeaconState -- state to validate.
|
||||
is_valid: bool -- true if the genesis state is deemed valid as to launch with, false otherwise.
|
||||
```
|
||||
### `genesis.yaml`
|
||||
|
||||
A `BeaconState`, the state to validate as genesis candidate.
|
||||
|
||||
Also available as `genesis.ssz`.
|
||||
|
||||
### `is_valid.yaml`
|
||||
|
||||
A boolean, true if the genesis state is deemed valid as to launch with, false otherwise.
|
||||
|
||||
|
||||
## Processing
|
||||
|
||||
To process the data, call `is_valid_genesis_state(genesis)`.
|
||||
|
||||
|
@ -4,14 +4,33 @@ The different kinds of operations ("transactions") are tested individually with
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
```yaml
|
||||
description: string -- description of test case, purely for debugging purposes
|
||||
bls_setting: int -- see general test-format spec.
|
||||
pre: BeaconState -- state before applying the operation
|
||||
<operation-name>: <operation-object> -- the YAML encoded operation, e.g. a "ProposerSlashing", or "Deposit".
|
||||
post: BeaconState -- state after applying the operation. No value if operation processing is aborted.
|
||||
description: string -- Optional description of test case, purely for debugging purposes.
|
||||
Tests should use the directory name of the test case as identifier, not the description.
|
||||
bls_setting: int -- see general test-format spec.
|
||||
```
|
||||
|
||||
### `pre.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before applying the operation.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
|
||||
### `<operation-name>.yaml`
|
||||
|
||||
A YAML-encoded operation object, e.g. a `ProposerSlashing`, or `Deposit`.
|
||||
|
||||
Also available as `<operation-name>.ssz`.
|
||||
|
||||
### `post.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state after applying the operation. No value if operation processing is aborted.
|
||||
|
||||
Also available as `post.ssz`.
|
||||
|
||||
|
||||
## Condition
|
||||
|
||||
A handler of the `operations` test-runner should process these cases,
|
||||
@ -24,7 +43,7 @@ Operations:
|
||||
|-------------------------|----------------------|----------------------|--------------------------------------------------------|
|
||||
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
|
||||
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
|
||||
| `block` | `Block` | `block` | `process_block_header(state, block)` |
|
||||
| `block_header` | `Block` | **`block`** | `process_block_header(state, block)` |
|
||||
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
|
||||
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
|
||||
| `transfer` | `Transfer` | `transfer` | `process_transfer(state, transfer)` |
|
||||
|
@ -4,14 +4,38 @@ Sanity tests to cover a series of one or more blocks being processed, aiming to
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
```yaml
|
||||
description: string -- description of test case, purely for debugging purposes
|
||||
description: string -- Optional. Description of test case, purely for debugging purposes.
|
||||
bls_setting: int -- see general test-format spec.
|
||||
pre: BeaconState -- state before running through the transitions triggered by the blocks.
|
||||
blocks: [BeaconBlock] -- blocks to process, in given order, following the main transition function (i.e. process slot and epoch transitions in between blocks as normal)
|
||||
post: BeaconState -- state after applying all the transitions triggered by the blocks.
|
||||
blocks_count: int -- the number of blocks processed in this test.
|
||||
```
|
||||
|
||||
|
||||
### `pre.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before running the block transitions.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
|
||||
|
||||
### `blocks_<index>.yaml`
|
||||
|
||||
A series of files, with `<index>` in range `[0, blocks_count)`. Blocks need to be processed in order,
|
||||
following the main transition function (i.e. process slot and epoch transitions in between blocks as normal)
|
||||
|
||||
Each file is a YAML-encoded `BeaconBlock`.
|
||||
|
||||
Each block is also available as `blocks_<index>.ssz`
|
||||
|
||||
### `post.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state after applying the block transitions.
|
||||
|
||||
Also available as `post.ssz`.
|
||||
|
||||
|
||||
## Condition
|
||||
|
||||
The resulting state should match the expected `post` state, or if the `post` state is left blank,
|
||||
|
@ -4,14 +4,34 @@ Sanity tests to cover a series of one or more empty-slot transitions being proce
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
```yaml
|
||||
description: string -- description of test case, purely for debugging purposes
|
||||
description: string -- Optional. Description of test case, purely for debugging purposes.
|
||||
bls_setting: int -- see general test-format spec.
|
||||
pre: BeaconState -- state before running through the transitions.
|
||||
slots: N -- amount of slots to process, N being a positive number.
|
||||
post: BeaconState -- state after applying all the transitions.
|
||||
```
|
||||
|
||||
|
||||
### `pre.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before running the transitions.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
|
||||
|
||||
### `slots.yaml`
|
||||
|
||||
An integer. The amount of slots to process (i.e. the difference in slots between pre and post), always a positive number.
|
||||
|
||||
### `post.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state after applying the transitions.
|
||||
|
||||
Also available as `post.ssz`.
|
||||
|
||||
|
||||
### Processing
|
||||
|
||||
The transition with pure time, no blocks, is known as `process_slots(state, slot)` in the spec.
|
||||
This runs state-caching (pure slot transition) and epoch processing (every E slots).
|
||||
|
||||
|
@ -7,26 +7,32 @@ Clients may take different approaches to shuffling, for optimizing,
|
||||
and supporting advanced lookup behavior back in older history.
|
||||
|
||||
For implementers, possible test runners implementing testing can include:
|
||||
1) Just test permute-index, run it for each index `i` in `range(count)`, and check against expected `output[i]` (default spec implementation).
|
||||
1) Just test permute-index, run it for each index `i` in `range(count)`, and check against expected `mapping[i]` (default spec implementation).
|
||||
2) Test un-permute-index (the reverse lookup; implemented by running the shuffling rounds in reverse, from `round_count-1` to `0`).
|
||||
3) Test the optimized complete shuffle, where all indices are shuffled at once; test output in one go.
|
||||
4) Test complete shuffle in reverse (reverse rounds, same as #2).
|
||||
|
||||
## Test case format
|
||||
|
||||
### `mapping.yaml`
|
||||
|
||||
```yaml
|
||||
seed: bytes32
|
||||
count: int
|
||||
shuffled: List[int]
|
||||
mapping: List[int]
|
||||
```
|
||||
|
||||
- The `bytes32` is encoded a string, hexadecimal encoding, prefixed with `0x`.
|
||||
- The `bytes32` is encoded as a string, hexadecimal encoding, prefixed with `0x`.
|
||||
- Integers are validator indices. These are `uint64`, but realistically they are not as big.
|
||||
|
||||
The `count` specifies the validator registry size. One should compute the shuffling for indices `0, 1, 2, 3, ..., count (exclusive)`.
|
||||
Seed is the raw shuffling seed, passed to permute-index (or optimized shuffling approach).
|
||||
|
||||
The `seed` is the raw shuffling seed, passed to permute-index (or optimized shuffling approach).
|
||||
|
||||
The `mapping` is a look up array, constructed as `[spec.compute_shuffled_index(i, count, seed) for i in range(count)]`
|
||||
I.e. `mapping[i]` is the shuffled location of `i`.
|
||||
|
||||
## Condition
|
||||
|
||||
The resulting list should match the expected output `shuffled` after shuffling the implied input, using the given `seed`.
|
||||
|
||||
The resulting list should match the expected output after shuffling the implied input, using the given `seed`.
|
||||
The output is checked using the `mapping`, based on the shuffling test type (e.g. can be backwards shuffling).
|
||||
|
@ -1,20 +1,197 @@
|
||||
# SSZ, generic tests
|
||||
|
||||
This set of test-suites provides general testing for SSZ:
|
||||
to instantiate any container/list/vector/other type from binary data.
|
||||
to decode any container/list/vector/other type from binary data, encode it back, and compute the hash-tree-root.
|
||||
|
||||
Since SSZ is in a development-phase, the full suite of features is not covered yet.
|
||||
Note that these tests are based on the older SSZ package.
|
||||
The tests are still relevant, but limited in scope:
|
||||
more complex object encodings have changed since the original SSZ testing.
|
||||
This test collection for general-purpose SSZ is experimental.
|
||||
The `ssz_static` suite is the required minimal support for SSZ, and should be prioritized.
|
||||
|
||||
A minimal but useful series of tests covering `uint` encoding and decoding is provided.
|
||||
This is a direct port of the older SSZ `uint` tests (minus outdated test cases).
|
||||
The `ssz_generic` tests are split up into different handler, each specialized into a SSZ type:
|
||||
|
||||
Test format documentation can be found here: [uint test format](./uint.md).
|
||||
- Vectors
|
||||
- `basic_vector`
|
||||
- `complex_vector` *not supported yet*
|
||||
- List
|
||||
- `basic_list` *not supported yet*
|
||||
- `complex_list` *not supported yet*
|
||||
- Bitfields
|
||||
- `bitvector`
|
||||
- `bitlist`
|
||||
- Basic types
|
||||
- `boolean`
|
||||
- `uints`
|
||||
- Containers
|
||||
- `containers`
|
||||
|
||||
*Note*: The current Phase 0 spec does not use larger uints, and uses byte vectors (fixed length) instead to represent roots etc.
|
||||
The exact uint lengths to support may be redefined in the future.
|
||||
|
||||
Extension of the SSZ tests collection is planned, with an update to the new spec-maintained `minimal_ssz.py`;
|
||||
see CI/testing issues for progress tracking.
|
||||
## Format
|
||||
|
||||
For each type, a `valid` and an `invalid` suite is implemented.
|
||||
The cases have the same format, but those in the `invalid` suite only declare a subset of the data a test in the `valid` declares.
|
||||
|
||||
Each of the handlers encodes the SSZ type declaration in the file-name. See [Type Declarations](#type-declarations).
|
||||
|
||||
### `valid`
|
||||
|
||||
Valid has 3 parts: `meta.yaml`, `serialized.ssz`, `value.yaml`
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
Valid ssz objects can have a hash-tree-root, and for some types also a signing-root.
|
||||
The expected roots are encoded into the metadata yaml:
|
||||
|
||||
```yaml
|
||||
root: Bytes32 -- Hash-tree-root of the object
|
||||
signing_root: Bytes32 -- Signing-root of the object
|
||||
```
|
||||
|
||||
The `Bytes32` is encoded as a string, hexadecimal encoding, prefixed with `0x`.
|
||||
|
||||
### `serialized.ssz`
|
||||
|
||||
The serialized form of the object, as raw SSZ bytes.
|
||||
|
||||
### `value.yaml`
|
||||
|
||||
The object, encoded as a YAML structure. Using the same familiar encoding as YAML data in the other test suites.
|
||||
|
||||
### Conditions
|
||||
|
||||
The conditions are the same for each type:
|
||||
|
||||
- Encoding: After encoding the given `value` object, the output should match `serialized`.
|
||||
- Decoding: After decoding the given `serialized` bytes, it should match the `value` object.
|
||||
- Hash-tree-root: the root should match the root declared in the metadata.
|
||||
- Signing-root: if present in metadata, the signing root of the object should match the container.
|
||||
|
||||
## `invalid`
|
||||
|
||||
Test cases in the `invalid` suite only include the `serialized.ssz`
|
||||
|
||||
#### Condition
|
||||
|
||||
Unlike the `valid` suite, invalid encodings do not have any `value` or hash tree root.
|
||||
The `serialized` data should simply not be decoded without raising an error.
|
||||
|
||||
Note that for some type declarations in the invalid suite, the type itself may technically be invalid.
|
||||
This is a valid way of detecting `invalid` data too. E.g. a 0-length basic vector.
|
||||
|
||||
|
||||
## Type declarations
|
||||
|
||||
Most types are not as static, and can reasonably be constructed during test runtime from the test case name.
|
||||
Formats are listed below.
|
||||
|
||||
For each test case, an additional `_{extra...}` may be appended to the name,
|
||||
where `{extra...}` contains a human readable indication of the test case contents for debugging purposes.
|
||||
|
||||
### `basic_vector`
|
||||
|
||||
```
|
||||
Template:
|
||||
|
||||
vec_{element type}_{length}
|
||||
|
||||
Data:
|
||||
|
||||
{element type}: bool, uint8, uint16, uint32, uint64, uint128, uint256
|
||||
|
||||
{length}: an unsigned integer
|
||||
```
|
||||
|
||||
|
||||
### `bitlist`
|
||||
|
||||
```
|
||||
Template:
|
||||
|
||||
bitlist_{limit}
|
||||
|
||||
Data:
|
||||
|
||||
{limit}: the list limit, in bits, of the bitlist. Does not include the length-delimiting bit in the serialized form.
|
||||
```
|
||||
|
||||
|
||||
### `bitvector`
|
||||
|
||||
```
|
||||
Template:
|
||||
|
||||
bitvec_{length}
|
||||
|
||||
Data:
|
||||
|
||||
{length}: the length, in bits, of the bitvector.
|
||||
```
|
||||
|
||||
### `boolean`
|
||||
|
||||
A boolean has no type variations. Instead, file names just plainly describe the contents for debugging.
|
||||
|
||||
### `uints`
|
||||
|
||||
```
|
||||
Template:
|
||||
|
||||
uint_{size}
|
||||
|
||||
Data:
|
||||
|
||||
{size}: the uint size: 8, 16, 32, 64, 128 or 256.
|
||||
```
|
||||
|
||||
### `containers`
|
||||
|
||||
Containers are more complicated than the other types. Instead, a set of pre-defined container structures is referenced:
|
||||
|
||||
```
|
||||
Template:
|
||||
|
||||
{container name}
|
||||
|
||||
Data:
|
||||
|
||||
{container name}: Any of the container names listed below (exluding the `(Container)` python super type)
|
||||
```
|
||||
|
||||
```python
|
||||
|
||||
class SingleFieldTestStruct(Container):
|
||||
A: byte
|
||||
|
||||
|
||||
class SmallTestStruct(Container):
|
||||
A: uint16
|
||||
B: uint16
|
||||
|
||||
|
||||
class FixedTestStruct(Container):
|
||||
A: uint8
|
||||
B: uint64
|
||||
C: uint32
|
||||
|
||||
|
||||
class VarTestStruct(Container):
|
||||
A: uint16
|
||||
B: List[uint16, 1024]
|
||||
C: uint8
|
||||
|
||||
|
||||
class ComplexTestStruct(Container):
|
||||
A: uint16
|
||||
B: List[uint16, 128]
|
||||
C: uint8
|
||||
D: Bytes[256]
|
||||
E: VarTestStruct
|
||||
F: Vector[FixedTestStruct, 4]
|
||||
G: Vector[VarTestStruct, 2]
|
||||
|
||||
|
||||
class BitsStruct(Container):
|
||||
A: Bitlist[5]
|
||||
B: Bitvector[2]
|
||||
C: Bitvector[1]
|
||||
D: Bitlist[6]
|
||||
E: Bitvector[8]
|
||||
```
|
||||
|
@ -1,19 +0,0 @@
|
||||
# Test format: SSZ uints
|
||||
|
||||
SSZ supports encoding of uints up to 32 bytes. These are considered to be basic types.
|
||||
|
||||
## Test case format
|
||||
|
||||
```yaml
|
||||
type: "uintN" -- string, where N is one of [8, 16, 32, 64, 128, 256]
|
||||
valid: bool -- expected validity of the input data
|
||||
value: string -- string, decimal encoding, to support up to 256 bit integers
|
||||
ssz: bytes -- string, input data, hex encoded, with prefix 0x
|
||||
tags: List[string] -- description of test case, in the form of a list of labels
|
||||
```
|
||||
|
||||
## Condition
|
||||
|
||||
Two-way testing can be implemented in the test-runner:
|
||||
- Encoding: After encoding the given input number `value`, the output should match `ssz`
|
||||
- Decoding: After decoding the given `ssz` bytes, it should match the input number `value`
|
@ -3,6 +3,6 @@
|
||||
This set of test-suites provides static testing for SSZ:
|
||||
to instantiate just the known Eth 2.0 SSZ types from binary data.
|
||||
|
||||
This series of tests is based on the spec-maintained `minimal_ssz.py`, i.e. fully consistent with the SSZ spec.
|
||||
This series of tests is based on the spec-maintained `eth2spec/utils/ssz/ssz_impl.py`, i.e. fully consistent with the SSZ spec.
|
||||
|
||||
Test format documentation can be found here: [core test format](./core.md).
|
||||
|
@ -4,29 +4,54 @@ The goal of this type is to provide clients with a solid reference for how the k
|
||||
Each object described in the Phase 0 spec is covered.
|
||||
This is important, as many of the clients aiming to serialize/deserialize objects directly into structs/classes
|
||||
do not support (or have alternatives for) generic SSZ encoding/decoding.
|
||||
|
||||
This test-format ensures these direct serializations are covered.
|
||||
|
||||
Note that this test suite does not cover the invalid-encoding case:
|
||||
SSZ implementations should be hardened against invalid inputs with the other SSZ tests as guide, along with fuzzing.
|
||||
|
||||
## Test case format
|
||||
|
||||
Each SSZ type is a `handler`, since the format is semantically different: the type of the data is different.
|
||||
|
||||
One can iterate over the handlers, and select the type based on the handler name.
|
||||
Suites are then the same format, but each specialized in one randomization mode.
|
||||
Some randomization modes may only produce a single test case (e.g. the all-zeroes case).
|
||||
|
||||
The output parts are: `roots.yaml`, `serialized.ssz`, `value.yaml`
|
||||
|
||||
### `roots.yaml`
|
||||
|
||||
```yaml
|
||||
SomeObjectName: -- key, object name, formatted as in spec. E.g. "BeaconBlock".
|
||||
value: dynamic -- the YAML-encoded value, of the type specified by type_name.
|
||||
serialized: bytes -- string, SSZ-serialized data, hex encoded, with prefix 0x
|
||||
root: bytes32 -- string, hash-tree-root of the value, hex encoded, with prefix 0x
|
||||
signing_root: bytes32 -- string, signing-root of the value, hex encoded, with prefix 0x. Optional, present if type contains ``signature`` field
|
||||
root: bytes32 -- string, hash-tree-root of the value, hex encoded, with prefix 0x
|
||||
signing_root: bytes32 -- string, signing-root of the value, hex encoded, with prefix 0x.
|
||||
*Optional*, present if type is a container and ends with a ``signature`` field.
|
||||
```
|
||||
|
||||
### `serialized.ssz`
|
||||
|
||||
The raw encoded bytes.
|
||||
|
||||
### `value.yaml`
|
||||
|
||||
The same value as `serialized.ssz`, represented as YAML.
|
||||
|
||||
|
||||
## Condition
|
||||
|
||||
A test-runner can implement the following assertions:
|
||||
- Serialization: After parsing the `value`, SSZ-serialize it: the output should match `serialized`
|
||||
- Hash-tree-root: After parsing the `value`, Hash-tree-root it: the output should match `root`
|
||||
- Optionally also check signing-root, if present.
|
||||
- Deserialization: SSZ-deserialize the `serialized` value, and see if it matches the parsed `value`
|
||||
- If YAML decoding of SSZ objects is supported by the implementation:
|
||||
- Serialization: After parsing the `value`, SSZ-serialize it: the output should match `serialized`
|
||||
- Deserialization: SSZ-deserialize the `serialized` value, and see if it matches the parsed `value`
|
||||
- If YAML decoding of SSZ objects is not supported by the implementation:
|
||||
- Serialization in 2 steps: deserialize `serialized`, then serialize the result,
|
||||
and verify if the bytes match the original `serialized`.
|
||||
- Hash-tree-root: After parsing the `value` (or deserializing `serialized`), Hash-tree-root it: the output should match `root`
|
||||
- Optionally also check `signing_root`, if present.
|
||||
|
||||
|
||||
## References
|
||||
|
||||
|
||||
**`serialized`**—[SSZ serialization](../../simple-serialize.md#serialization)
|
||||
**`root`**—[hash_tree_root](../../simple-serialize.md#merkleization) function
|
||||
**`signing_root`**—[signing_root](../../simple-serialize.md#self-signed-containers) function
|
||||
|
@ -266,7 +266,7 @@ Up to `MAX_ATTESTATIONS`, aggregate attestations can be included in the `block`.
|
||||
|
||||
##### Deposits
|
||||
|
||||
If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. `state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth 1.0 deposit contract](../core/0_deposit-contract) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits).
|
||||
If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. `state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth 1.0 deposit contract](../core/0_deposit-contract.md) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits).
|
||||
|
||||
The `proof` for each deposit must be constructed against the deposit root contained in `state.eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation.
|
||||
|
||||
@ -322,13 +322,13 @@ Set `attestation.data = attestation_data` where `attestation_data` is the `Attes
|
||||
|
||||
##### Aggregation bits
|
||||
|
||||
- Let `attestation.aggregation_bits` be a `Bitlist[MAX_INDICES_PER_ATTESTATION]` where the bits at the index in the aggregated validator's `committee` is set to `0b1`.
|
||||
- Let `attestation.aggregation_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` where the bits at the index in the aggregated validator's `committee` is set to `0b1`.
|
||||
|
||||
*Note*: Calling `get_attesting_indices(state, attestation.data, attestation.aggregation_bits)` should return a list of length equal to 1, containing `validator_index`.
|
||||
|
||||
##### Custody bits
|
||||
|
||||
- Let `attestation.custody_bits` be a `Bitlist[MAX_INDICES_PER_ATTESTATION]` filled with zeros of length `len(committee)`.
|
||||
- Let `attestation.custody_bits` be a `Bitlist[MAX_VALIDATORS_PER_COMMITTEE]` filled with zeros of length `len(committee)`.
|
||||
|
||||
*Note*: This is a stub for Phase 0.
|
||||
|
||||
|
@ -1,11 +1,31 @@
|
||||
# Eth 2.0 Test Generators
|
||||
|
||||
This directory contains all the generators for YAML tests, consumed by Eth 2.0 client implementations.
|
||||
This directory contains all the generators for tests, consumed by Eth 2.0 client implementations.
|
||||
|
||||
Any issues with the generators and/or generated tests should be filed in the repository that hosts the generator outputs,
|
||||
here: [ethereum/eth2.0-spec-tests](https://github.com/ethereum/eth2.0-spec-tests).
|
||||
|
||||
On releases, test generators are run by the release manager. Test-generation of mainnet tests can take a significant amount of time, and is better left out of a CI setup.
|
||||
|
||||
An automated nightly tests release system, with a config filter applied, is being considered as implementation needs mature.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [How to run generators](#how-to-run-generators)
|
||||
- [Cleaning](#cleaning)
|
||||
- [Running all test generators](#running-all-test-generators)
|
||||
- [Running a single generator](#running-a-single-generator)
|
||||
- [Developing a generator](#developing-a-generator)
|
||||
- [How to add a new test generator](#how-to-add-a-new-test-generator)
|
||||
- [How to remove a test generator](#how-to-remove-a-test-generator)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
Any issues with the generators and/or generated tests should be filed in the repository that hosts the generator outputs, here: [ethereum/eth2.0-spec-tests](https://github.com/ethereum/eth2.0-spec-tests).
|
||||
|
||||
Whenever a release is made, the new tests are automatically built, and
|
||||
[eth2TestGenBot](https://github.com/eth2TestGenBot) commits the changes to the test repository.
|
||||
|
||||
## How to run generators
|
||||
|
||||
@ -58,11 +78,11 @@ It's recommended to extend the base-generator.
|
||||
|
||||
Create a `requirements.txt` in the root of your generator directory:
|
||||
```
|
||||
eth-utils==1.6.0
|
||||
../../test_libs/gen_helpers
|
||||
../../test_libs/config_helpers
|
||||
../../test_libs/pyspec
|
||||
```
|
||||
|
||||
The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself in order to prevent code duplication and outdated tests.
|
||||
Applying configurations to the spec is simple and enables you to create test suites with different contexts.
|
||||
|
||||
@ -73,72 +93,115 @@ Install all the necessary requirements (re-run when you add more):
|
||||
pip3 install -r requirements.txt
|
||||
```
|
||||
|
||||
Note that you may need `PYTHONPATH` to include the pyspec directory, as with running normal tests,
|
||||
to run test generators manually. The makefile handles this for you already.
|
||||
|
||||
And write your initial test generator, extending the base generator:
|
||||
|
||||
Write a `main.py` file. See example:
|
||||
Write a `main.py` file. The shuffling test generator is a good minimal starting point:
|
||||
|
||||
```python
|
||||
from gen_base import gen_runner, gen_suite, gen_typing
|
||||
|
||||
from eth_utils import (
|
||||
to_dict, to_tuple
|
||||
)
|
||||
|
||||
from eth2spec.phase0 import spec as spec
|
||||
from eth_utils import to_tuple
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from preset_loader import loader
|
||||
from eth2spec.phase0 import spec
|
||||
from typing import Iterable
|
||||
|
||||
@to_dict
|
||||
def example_test_case(v: int):
|
||||
yield "spec_SHARD_COUNT", spec.SHARD_COUNT
|
||||
yield "example", v
|
||||
|
||||
def shuffling_case_fn(seed, count):
|
||||
yield 'mapping', 'data', {
|
||||
'seed': '0x' + seed.hex(),
|
||||
'count': count,
|
||||
'mapping': [int(spec.compute_shuffled_index(i, count, seed)) for i in range(count)]
|
||||
}
|
||||
|
||||
|
||||
def shuffling_case(seed, count):
|
||||
return f'shuffle_0x{seed.hex()}_{count}', lambda: shuffling_case_fn(seed, count)
|
||||
|
||||
|
||||
@to_tuple
|
||||
def generate_example_test_cases():
|
||||
for i in range(10):
|
||||
yield example_test_case(i)
|
||||
def shuffling_test_cases():
|
||||
for seed in [spec.hash(seed_init_value.to_bytes(length=4, byteorder='little')) for seed_init_value in range(30)]:
|
||||
for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000, 9999]:
|
||||
yield shuffling_case(seed, count)
|
||||
|
||||
|
||||
def example_minimal_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
presets = loader.load_presets(configs_path, 'minimal')
|
||||
spec.apply_constants_preset(presets)
|
||||
def create_provider(config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
return ("mini", "core", gen_suite.render_suite(
|
||||
title="example_minimal",
|
||||
summary="Minimal example suite, testing bar.",
|
||||
forks_timeline="testing",
|
||||
forks=["phase0"],
|
||||
config="minimal",
|
||||
handler="main",
|
||||
test_cases=generate_example_test_cases()))
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec.apply_constants_preset(presets)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
for (case_name, case_fn) in shuffling_test_cases():
|
||||
yield gen_typing.TestCase(
|
||||
fork_name='phase0',
|
||||
runner_name='shuffling',
|
||||
handler_name='core',
|
||||
suite_name='shuffle',
|
||||
case_name=case_name,
|
||||
case_fn=case_fn
|
||||
)
|
||||
|
||||
def example_mainnet_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
presets = loader.load_presets(configs_path, 'mainnet')
|
||||
spec.apply_constants_preset(presets)
|
||||
|
||||
return ("full", "core", gen_suite.render_suite(
|
||||
title="example_main_net",
|
||||
summary="Main net based example suite.",
|
||||
forks_timeline= "mainnet",
|
||||
forks=["phase0"],
|
||||
config="testing",
|
||||
handler="main",
|
||||
test_cases=generate_example_test_cases()))
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("example", [example_minimal_suite, example_mainnet_suite])
|
||||
gen_runner.run_generator("shuffling", [create_provider("minimal"), create_provider("mainnet")])
|
||||
```
|
||||
|
||||
This generator:
|
||||
- builds off of `gen_runner.run_generator` to handle configuration / filter / output logic.
|
||||
- parametrized the creation of a test-provider to support multiple configs.
|
||||
- Iterates through tests cases.
|
||||
- Each test case provides a `case_fn`, to be executed by the `gen_runner.run_generator` if the case needs to be generated. But skipped otherwise.
|
||||
|
||||
To extend this, one could decide to parametrize the `shuffling_test_cases` function, and create test provider for any test-yielding function.
|
||||
|
||||
Another example, to generate tests from pytests:
|
||||
|
||||
```python
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec_phase0.apply_constants_preset(presets)
|
||||
spec_phase1.apply_constants_preset(presets)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='epoch_processing',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("epoch_processing", [
|
||||
create_provider('crosslinks', test_process_crosslinks, 'minimal'),
|
||||
...
|
||||
])
|
||||
|
||||
```
|
||||
|
||||
Here multiple phases load the configuration, and the stream of test cases is derived from a pytest file using the `generate_from_tests` utility.
|
||||
|
||||
|
||||
Recommendations:
|
||||
- You can have more than just one suite creator, e.g. ` gen_runner.run_generator("foo", [bar_test_suite, abc_test_suite, example_test_suite])`.
|
||||
- You can concatenate lists of test cases if you don't want to split it up in suites, however, make sure they can be run with one handler.
|
||||
- You can split your suite creators into different Python files/packages; this is good for code organization.
|
||||
- Use config "minimal" for performance, but also implement a suite with the default config where necessary.
|
||||
- You may be able to write your test suite creator in a way where it does not make assumptions on constants.
|
||||
If so, you can generate test suites with different configurations for the same scenario (see example).
|
||||
- The test-generator accepts `--output` and `--force` (overwrite output).
|
||||
- You can have more than just one test provider.
|
||||
- Your test provider is free to output any configuration and combination of runner/handler/fork/case name.
|
||||
- You can split your test case generators into different Python files/packages; this is good for code organization.
|
||||
- Use config `minimal` for performance and simplicity, but also implement a suite with the `mainnet` config where necessary.
|
||||
- You may be able to write your test case provider in a way where it does not make assumptions on constants.
|
||||
If so, you can generate test cases with different configurations for the same scenario (see example).
|
||||
- See [`test_libs/gen_helpers/README.md`](../test_libs/gen_helpers/README.md) for command line options for generators.
|
||||
|
||||
|
||||
## How to add a new test generator
|
||||
|
||||
@ -151,11 +214,10 @@ To add a new test generator that builds `New Tests`:
|
||||
3. Your generator is assumed to have a `main.py` file in its root.
|
||||
By adding the base generator to your requirements, you can make a generator really easily. See docs below.
|
||||
4. Your generator is called with `-o some/file/path/for_testing/can/be_anything -c some/other/path/to_configs/`.
|
||||
The base generator helps you handle this; you only have to define suite headers
|
||||
and a list of tests for each suite you generate.
|
||||
The base generator helps you handle this; you only have to define test case providers.
|
||||
5. Finally, add any linting or testing commands to the
|
||||
[circleci config file](https://github.com/ethereum/eth2.0-test-generators/blob/master/.circleci/config.yml)
|
||||
if desired to increase code quality.
|
||||
[circleci config file](../.circleci/config.yml) if desired to increase code quality.
|
||||
Or add it to the [`Makefile`](../Makefile), if it can be run locally.
|
||||
|
||||
*Note*: You do not have to change the makefile.
|
||||
However, if necessary (e.g. not using Python, or mixing in other languages), submit an issue, and it can be a special case.
|
||||
|
@ -2,23 +2,21 @@
|
||||
BLS test vectors generator
|
||||
"""
|
||||
|
||||
from typing import Tuple
|
||||
from typing import Tuple, Iterable, Any, Callable, Dict
|
||||
|
||||
from eth_utils import (
|
||||
encode_hex,
|
||||
int_to_big_endian,
|
||||
to_tuple,
|
||||
)
|
||||
from gen_base import gen_runner, gen_suite, gen_typing
|
||||
from gen_base import gen_runner, gen_typing
|
||||
|
||||
from py_ecc import bls
|
||||
|
||||
|
||||
F2Q_COEFF_LEN = 48
|
||||
G2_COMPRESSED_Z_LEN = 48
|
||||
|
||||
|
||||
def int_to_hex(n: int, byte_length: int=None) -> str:
|
||||
def int_to_hex(n: int, byte_length: int = None) -> str:
|
||||
byte_value = int_to_big_endian(n)
|
||||
if byte_length:
|
||||
byte_value = byte_value.rjust(byte_length, b'\x00')
|
||||
@ -32,6 +30,9 @@ def hex_to_int(x: str) -> int:
|
||||
DOMAINS = [
|
||||
b'\x00\x00\x00\x00\x00\x00\x00\x00',
|
||||
b'\x00\x00\x00\x00\x00\x00\x00\x01',
|
||||
b'\x01\x00\x00\x00\x00\x00\x00\x00',
|
||||
b'\x80\x00\x00\x00\x00\x00\x00\x00',
|
||||
b'\x01\x23\x45\x67\x89\xab\xcd\xef',
|
||||
b'\xff\xff\xff\xff\xff\xff\xff\xff'
|
||||
]
|
||||
|
||||
@ -51,7 +52,7 @@ PRIVKEYS = [
|
||||
|
||||
|
||||
def hash_message(msg: bytes,
|
||||
domain: bytes) ->Tuple[Tuple[str, str], Tuple[str, str], Tuple[str, str]]:
|
||||
domain: bytes) -> Tuple[Tuple[str, str], Tuple[str, str], Tuple[str, str]]:
|
||||
"""
|
||||
Hash message
|
||||
Input:
|
||||
@ -82,11 +83,10 @@ def hash_message_compressed(msg: bytes, domain: bytes) -> Tuple[str, str]:
|
||||
return [int_to_hex(z1, G2_COMPRESSED_Z_LEN), int_to_hex(z2, G2_COMPRESSED_Z_LEN)]
|
||||
|
||||
|
||||
@to_tuple
|
||||
def case01_message_hash_G2_uncompressed():
|
||||
for msg in MESSAGES:
|
||||
for domain in DOMAINS:
|
||||
yield {
|
||||
yield f'uncom_g2_hash_{encode_hex(msg)}_{encode_hex(domain)}', {
|
||||
'input': {
|
||||
'message': encode_hex(msg),
|
||||
'domain': encode_hex(domain),
|
||||
@ -94,11 +94,11 @@ def case01_message_hash_G2_uncompressed():
|
||||
'output': hash_message(msg, domain)
|
||||
}
|
||||
|
||||
@to_tuple
|
||||
|
||||
def case02_message_hash_G2_compressed():
|
||||
for msg in MESSAGES:
|
||||
for domain in DOMAINS:
|
||||
yield {
|
||||
yield f'com_g2_hash_{encode_hex(msg)}_{encode_hex(domain)}', {
|
||||
'input': {
|
||||
'message': encode_hex(msg),
|
||||
'domain': encode_hex(domain),
|
||||
@ -106,23 +106,23 @@ def case02_message_hash_G2_compressed():
|
||||
'output': hash_message_compressed(msg, domain)
|
||||
}
|
||||
|
||||
@to_tuple
|
||||
|
||||
def case03_private_to_public_key():
|
||||
pubkeys = [bls.privtopub(privkey) for privkey in PRIVKEYS]
|
||||
pubkeys_serial = ['0x' + pubkey.hex() for pubkey in pubkeys]
|
||||
for privkey, pubkey_serial in zip(PRIVKEYS, pubkeys_serial):
|
||||
yield {
|
||||
yield f'priv_to_pub_{int_to_hex(privkey)}', {
|
||||
'input': int_to_hex(privkey),
|
||||
'output': pubkey_serial,
|
||||
}
|
||||
|
||||
@to_tuple
|
||||
|
||||
def case04_sign_messages():
|
||||
for privkey in PRIVKEYS:
|
||||
for message in MESSAGES:
|
||||
for domain in DOMAINS:
|
||||
sig = bls.sign(message, privkey, domain)
|
||||
yield {
|
||||
yield f'sign_msg_{int_to_hex(privkey)}_{encode_hex(message)}_{encode_hex(domain)}', {
|
||||
'input': {
|
||||
'privkey': int_to_hex(privkey),
|
||||
'message': encode_hex(message),
|
||||
@ -131,25 +131,25 @@ def case04_sign_messages():
|
||||
'output': encode_hex(sig)
|
||||
}
|
||||
|
||||
|
||||
# TODO: case05_verify_messages: Verify messages signed in case04
|
||||
# It takes too long, empty for now
|
||||
|
||||
|
||||
@to_tuple
|
||||
def case06_aggregate_sigs():
|
||||
for domain in DOMAINS:
|
||||
for message in MESSAGES:
|
||||
sigs = [bls.sign(message, privkey, domain) for privkey in PRIVKEYS]
|
||||
yield {
|
||||
yield f'agg_sigs_{encode_hex(message)}_{encode_hex(domain)}', {
|
||||
'input': [encode_hex(sig) for sig in sigs],
|
||||
'output': encode_hex(bls.aggregate_signatures(sigs)),
|
||||
}
|
||||
|
||||
@to_tuple
|
||||
|
||||
def case07_aggregate_pubkeys():
|
||||
pubkeys = [bls.privtopub(privkey) for privkey in PRIVKEYS]
|
||||
pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys]
|
||||
yield {
|
||||
yield f'agg_pub_keys', {
|
||||
'input': pubkeys_serial,
|
||||
'output': encode_hex(bls.aggregate_pubkeys(pubkeys)),
|
||||
}
|
||||
@ -162,85 +162,36 @@ def case07_aggregate_pubkeys():
|
||||
# Proof-of-possession
|
||||
|
||||
|
||||
def bls_msg_hash_uncompressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
return ("g2_uncompressed", "msg_hash_g2_uncompressed", gen_suite.render_suite(
|
||||
title="BLS G2 Uncompressed msg hash",
|
||||
summary="BLS G2 Uncompressed msg hash",
|
||||
forks_timeline="mainnet",
|
||||
forks=["phase0"],
|
||||
config="mainnet",
|
||||
runner="bls",
|
||||
handler="msg_hash_uncompressed",
|
||||
test_cases=case01_message_hash_G2_uncompressed()))
|
||||
def create_provider(handler_name: str,
|
||||
test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
# Nothing to load / change in spec. Maybe in future forks.
|
||||
# Put the tests into the general config category, to not require any particular configuration.
|
||||
return 'general'
|
||||
|
||||
def bls_msg_hash_compressed_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
return ("g2_compressed", "msg_hash_g2_compressed", gen_suite.render_suite(
|
||||
title="BLS G2 Compressed msg hash",
|
||||
summary="BLS G2 Compressed msg hash",
|
||||
forks_timeline="mainnet",
|
||||
forks=["phase0"],
|
||||
config="mainnet",
|
||||
runner="bls",
|
||||
handler="msg_hash_compressed",
|
||||
test_cases=case02_message_hash_G2_compressed()))
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
for data in test_case_fn():
|
||||
print(data)
|
||||
(case_name, case_content) = data
|
||||
yield gen_typing.TestCase(
|
||||
fork_name='phase0',
|
||||
runner_name='bls',
|
||||
handler_name=handler_name,
|
||||
suite_name='small',
|
||||
case_name=case_name,
|
||||
case_fn=lambda: [('data', 'data', case_content)]
|
||||
)
|
||||
|
||||
|
||||
|
||||
def bls_priv_to_pub_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
return ("priv_to_pub", "priv_to_pub", gen_suite.render_suite(
|
||||
title="BLS private key to pubkey",
|
||||
summary="BLS Convert private key to public key",
|
||||
forks_timeline="mainnet",
|
||||
forks=["phase0"],
|
||||
config="mainnet",
|
||||
runner="bls",
|
||||
handler="priv_to_pub",
|
||||
test_cases=case03_private_to_public_key()))
|
||||
|
||||
|
||||
def bls_sign_msg_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
return ("sign_msg", "sign_msg", gen_suite.render_suite(
|
||||
title="BLS sign msg",
|
||||
summary="BLS Sign a message",
|
||||
forks_timeline="mainnet",
|
||||
forks=["phase0"],
|
||||
config="mainnet",
|
||||
runner="bls",
|
||||
handler="sign_msg",
|
||||
test_cases=case04_sign_messages()))
|
||||
|
||||
|
||||
def bls_aggregate_sigs_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
return ("aggregate_sigs", "aggregate_sigs", gen_suite.render_suite(
|
||||
title="BLS aggregate sigs",
|
||||
summary="BLS Aggregate signatures",
|
||||
forks_timeline="mainnet",
|
||||
forks=["phase0"],
|
||||
config="mainnet",
|
||||
runner="bls",
|
||||
handler="aggregate_sigs",
|
||||
test_cases=case06_aggregate_sigs()))
|
||||
|
||||
|
||||
def bls_aggregate_pubkeys_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
return ("aggregate_pubkeys", "aggregate_pubkeys", gen_suite.render_suite(
|
||||
title="BLS aggregate pubkeys",
|
||||
summary="BLS Aggregate public keys",
|
||||
forks_timeline="mainnet",
|
||||
forks=["phase0"],
|
||||
config="mainnet",
|
||||
runner="bls",
|
||||
handler="aggregate_pubkeys",
|
||||
test_cases=case07_aggregate_pubkeys()))
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("bls", [
|
||||
bls_msg_hash_compressed_suite,
|
||||
bls_msg_hash_uncompressed_suite,
|
||||
bls_priv_to_pub_suite,
|
||||
bls_sign_msg_suite,
|
||||
bls_aggregate_sigs_suite,
|
||||
bls_aggregate_pubkeys_suite
|
||||
create_provider('msg_hash_uncompressed', case01_message_hash_G2_uncompressed),
|
||||
create_provider('msg_hash_compressed', case02_message_hash_G2_compressed),
|
||||
create_provider('priv_to_pub', case03_private_to_public_key),
|
||||
create_provider('sign_msg', case04_sign_messages),
|
||||
create_provider('aggregate_sigs', case06_aggregate_sigs),
|
||||
create_provider('aggregate_pubkeys', case07_aggregate_pubkeys),
|
||||
])
|
||||
|
@ -1,4 +1,4 @@
|
||||
from typing import Callable, Iterable
|
||||
from typing import Iterable
|
||||
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
@ -9,45 +9,40 @@ from eth2spec.test.phase_0.epoch_processing import (
|
||||
test_process_registry_updates,
|
||||
test_process_slashings
|
||||
)
|
||||
from gen_base import gen_runner, gen_suite, gen_typing
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from preset_loader import loader
|
||||
|
||||
|
||||
def create_suite(transition_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \
|
||||
-> Callable[[str], gen_typing.TestSuiteOutput]:
|
||||
def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec_phase0.apply_constants_preset(presets)
|
||||
spec_phase1.apply_constants_preset(presets)
|
||||
return config_name
|
||||
|
||||
return ("%s_%s" % (transition_name, config_name), transition_name, gen_suite.render_suite(
|
||||
title="%s epoch processing" % transition_name,
|
||||
summary="Test suite for %s type epoch processing" % transition_name,
|
||||
forks_timeline="testing",
|
||||
forks=["phase0"],
|
||||
config=config_name,
|
||||
runner="epoch_processing",
|
||||
handler=transition_name,
|
||||
test_cases=get_cases()))
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='epoch_processing',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
)
|
||||
|
||||
return suite_definition
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("epoch_processing", [
|
||||
create_suite('crosslinks', 'minimal', lambda: generate_from_tests(test_process_crosslinks, 'phase0')),
|
||||
create_suite('crosslinks', 'mainnet', lambda: generate_from_tests(test_process_crosslinks, 'phase0')),
|
||||
create_suite('final_updates', 'minimal', lambda: generate_from_tests(test_process_final_updates, 'phase0')),
|
||||
create_suite('final_updates', 'mainnet', lambda: generate_from_tests(test_process_final_updates, 'phase0')),
|
||||
create_suite('justification_and_finalization', 'minimal',
|
||||
lambda: generate_from_tests(test_process_justification_and_finalization, 'phase0')),
|
||||
create_suite('justification_and_finalization', 'mainnet',
|
||||
lambda: generate_from_tests(test_process_justification_and_finalization, 'phase0')),
|
||||
create_suite('registry_updates', 'minimal',
|
||||
lambda: generate_from_tests(test_process_registry_updates, 'phase0')),
|
||||
create_suite('registry_updates', 'mainnet',
|
||||
lambda: generate_from_tests(test_process_registry_updates, 'phase0')),
|
||||
create_suite('slashings', 'minimal', lambda: generate_from_tests(test_process_slashings, 'phase0')),
|
||||
create_suite('slashings', 'mainnet', lambda: generate_from_tests(test_process_slashings, 'phase0')),
|
||||
create_provider('crosslinks', test_process_crosslinks, 'minimal'),
|
||||
create_provider('crosslinks', test_process_crosslinks, 'mainnet'),
|
||||
create_provider('final_updates', test_process_final_updates, 'minimal'),
|
||||
create_provider('final_updates', test_process_final_updates, 'mainnet'),
|
||||
create_provider('justification_and_finalization', test_process_justification_and_finalization, 'minimal'),
|
||||
create_provider('justification_and_finalization', test_process_justification_and_finalization, 'mainnet'),
|
||||
create_provider('registry_updates', test_process_registry_updates, 'minimal'),
|
||||
create_provider('registry_updates', test_process_registry_updates, 'mainnet'),
|
||||
create_provider('slashings', test_process_slashings, 'minimal'),
|
||||
create_provider('slashings', test_process_slashings, 'mainnet'),
|
||||
])
|
||||
|
@ -1,4 +1,3 @@
|
||||
eth-utils==1.6.0
|
||||
../../test_libs/gen_helpers
|
||||
../../test_libs/config_helpers
|
||||
../../test_libs/pyspec
|
@ -1,33 +1,33 @@
|
||||
from typing import Callable, Iterable
|
||||
from typing import Iterable
|
||||
|
||||
from eth2spec.test.genesis import test_initialization, test_validity
|
||||
|
||||
from gen_base import gen_runner, gen_suite, gen_typing
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from preset_loader import loader
|
||||
from eth2spec.phase0 import spec as spec
|
||||
|
||||
|
||||
def create_suite(handler_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \
|
||||
-> Callable[[str], gen_typing.TestSuiteOutput]:
|
||||
def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec.apply_constants_preset(presets)
|
||||
return config_name
|
||||
|
||||
return ("genesis_%s_%s" % (handler_name, config_name), handler_name, gen_suite.render_suite(
|
||||
title="genesis testing",
|
||||
summary="Genesis test suite, %s type, generated from pytests" % handler_name,
|
||||
forks_timeline="testing",
|
||||
forks=["phase0"],
|
||||
config=config_name,
|
||||
runner="genesis",
|
||||
handler=handler_name,
|
||||
test_cases=get_cases()))
|
||||
return suite_definition
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='genesis',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("genesis", [
|
||||
create_suite('initialization', 'minimal', lambda: generate_from_tests(test_initialization, 'phase0')),
|
||||
create_suite('validity', 'minimal', lambda: generate_from_tests(test_validity, 'phase0')),
|
||||
create_provider('initialization', test_initialization, 'minimal'),
|
||||
create_provider('validity', test_validity, 'minimal'),
|
||||
])
|
||||
|
@ -1,4 +1,3 @@
|
||||
eth-utils==1.6.0
|
||||
../../test_libs/gen_helpers
|
||||
../../test_libs/config_helpers
|
||||
../../test_libs/pyspec
|
@ -1,4 +1,4 @@
|
||||
from typing import Callable, Iterable
|
||||
from typing import Iterable
|
||||
|
||||
from eth2spec.test.phase_0.block_processing import (
|
||||
test_process_attestation,
|
||||
@ -10,48 +10,48 @@ from eth2spec.test.phase_0.block_processing import (
|
||||
test_process_voluntary_exit,
|
||||
)
|
||||
|
||||
from gen_base import gen_runner, gen_suite, gen_typing
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from preset_loader import loader
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
|
||||
|
||||
def create_suite(operation_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \
|
||||
-> Callable[[str], gen_typing.TestSuiteOutput]:
|
||||
def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec_phase0.apply_constants_preset(presets)
|
||||
spec_phase1.apply_constants_preset(presets)
|
||||
return config_name
|
||||
|
||||
return ("%s_%s" % (operation_name, config_name), operation_name, gen_suite.render_suite(
|
||||
title="%s operation" % operation_name,
|
||||
summary="Test suite for %s type operation processing" % operation_name,
|
||||
forks_timeline="testing",
|
||||
forks=["phase0"],
|
||||
config=config_name,
|
||||
runner="operations",
|
||||
handler=operation_name,
|
||||
test_cases=get_cases()))
|
||||
return suite_definition
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='operations',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("operations", [
|
||||
create_suite('attestation', 'minimal', lambda: generate_from_tests(test_process_attestation, 'phase0')),
|
||||
create_suite('attestation', 'mainnet', lambda: generate_from_tests(test_process_attestation, 'phase0')),
|
||||
create_suite('attester_slashing', 'minimal', lambda: generate_from_tests(test_process_attester_slashing, 'phase0')),
|
||||
create_suite('attester_slashing', 'mainnet', lambda: generate_from_tests(test_process_attester_slashing, 'phase0')),
|
||||
create_suite('block_header', 'minimal', lambda: generate_from_tests(test_process_block_header, 'phase0')),
|
||||
create_suite('block_header', 'mainnet', lambda: generate_from_tests(test_process_block_header, 'phase0')),
|
||||
create_suite('deposit', 'minimal', lambda: generate_from_tests(test_process_deposit, 'phase0')),
|
||||
create_suite('deposit', 'mainnet', lambda: generate_from_tests(test_process_deposit, 'phase0')),
|
||||
create_suite('proposer_slashing', 'minimal', lambda: generate_from_tests(test_process_proposer_slashing, 'phase0')),
|
||||
create_suite('proposer_slashing', 'mainnet', lambda: generate_from_tests(test_process_proposer_slashing, 'phase0')),
|
||||
create_suite('transfer', 'minimal', lambda: generate_from_tests(test_process_transfer, 'phase0')),
|
||||
create_provider('attestation', test_process_attestation, 'minimal'),
|
||||
create_provider('attestation', test_process_attestation, 'mainnet'),
|
||||
create_provider('attester_slashing', test_process_attester_slashing, 'minimal'),
|
||||
create_provider('attester_slashing', test_process_attester_slashing, 'mainnet'),
|
||||
create_provider('block_header', test_process_block_header, 'minimal'),
|
||||
create_provider('block_header', test_process_block_header, 'mainnet'),
|
||||
create_provider('deposit', test_process_deposit, 'minimal'),
|
||||
create_provider('deposit', test_process_deposit, 'mainnet'),
|
||||
create_provider('proposer_slashing', test_process_proposer_slashing, 'minimal'),
|
||||
create_provider('proposer_slashing', test_process_proposer_slashing, 'mainnet'),
|
||||
create_provider('transfer', test_process_transfer, 'minimal'),
|
||||
# Disabled, due to the high amount of different transfer tests, this produces a shocking size of tests.
|
||||
# Unnecessarily, as transfer are disabled currently, so not a priority.
|
||||
# create_suite('transfer', 'mainnet', lambda: generate_from_tests(test_process_transfer, 'phase0')),
|
||||
create_suite('voluntary_exit', 'minimal', lambda: generate_from_tests(test_process_voluntary_exit, 'phase0')),
|
||||
create_suite('voluntary_exit', 'mainnet', lambda: generate_from_tests(test_process_voluntary_exit, 'phase0')),
|
||||
# create_provider('transfer', test_process_transfer, 'mainnet'),
|
||||
create_provider('voluntary_exit', test_process_voluntary_exit, 'minimal'),
|
||||
create_provider('voluntary_exit', test_process_voluntary_exit, 'mainnet'),
|
||||
])
|
||||
|
@ -1,37 +1,37 @@
|
||||
from typing import Callable, Iterable
|
||||
from typing import Iterable
|
||||
|
||||
from eth2spec.test.sanity import test_blocks, test_slots
|
||||
|
||||
from gen_base import gen_runner, gen_suite, gen_typing
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from preset_loader import loader
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
|
||||
|
||||
def create_suite(handler_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \
|
||||
-> Callable[[str], gen_typing.TestSuiteOutput]:
|
||||
def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec_phase0.apply_constants_preset(presets)
|
||||
spec_phase1.apply_constants_preset(presets)
|
||||
return config_name
|
||||
|
||||
return ("sanity_%s_%s" % (handler_name, config_name), handler_name, gen_suite.render_suite(
|
||||
title="sanity testing",
|
||||
summary="Sanity test suite, %s type, generated from pytests" % handler_name,
|
||||
forks_timeline="testing",
|
||||
forks=["phase0"],
|
||||
config=config_name,
|
||||
runner="sanity",
|
||||
handler=handler_name,
|
||||
test_cases=get_cases()))
|
||||
return suite_definition
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='sanity',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("sanity", [
|
||||
create_suite('blocks', 'minimal', lambda: generate_from_tests(test_blocks, 'phase0')),
|
||||
create_suite('blocks', 'mainnet', lambda: generate_from_tests(test_blocks, 'phase0')),
|
||||
create_suite('slots', 'minimal', lambda: generate_from_tests(test_slots, 'phase0')),
|
||||
create_suite('slots', 'mainnet', lambda: generate_from_tests(test_slots, 'phase0')),
|
||||
create_provider('blocks', test_blocks, 'minimal'),
|
||||
create_provider('blocks', test_blocks, 'mainnet'),
|
||||
create_provider('slots', test_slots, 'minimal'),
|
||||
create_provider('slots', test_slots, 'mainnet'),
|
||||
])
|
||||
|
@ -1,4 +1,3 @@
|
||||
eth-utils==1.6.0
|
||||
../../test_libs/gen_helpers
|
||||
../../test_libs/config_helpers
|
||||
../../test_libs/pyspec
|
@ -1,54 +1,49 @@
|
||||
from eth2spec.phase0 import spec as spec
|
||||
from eth_utils import (
|
||||
to_dict, to_tuple
|
||||
)
|
||||
from gen_base import gen_runner, gen_suite, gen_typing
|
||||
from eth_utils import to_tuple
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from preset_loader import loader
|
||||
from typing import Iterable
|
||||
|
||||
|
||||
def shuffling_case_fn(seed, count):
|
||||
yield 'mapping', 'data', {
|
||||
'seed': '0x' + seed.hex(),
|
||||
'count': count,
|
||||
'mapping': [int(spec.compute_shuffled_index(i, count, seed)) for i in range(count)]
|
||||
}
|
||||
|
||||
|
||||
@to_dict
|
||||
def shuffling_case(seed, count):
|
||||
yield 'seed', '0x' + seed.hex()
|
||||
yield 'count', count
|
||||
yield 'shuffled', [int(spec.compute_shuffled_index(i, count, seed)) for i in range(count)]
|
||||
return f'shuffle_0x{seed.hex()}_{count}', lambda: shuffling_case_fn(seed, count)
|
||||
|
||||
|
||||
@to_tuple
|
||||
def shuffling_test_cases():
|
||||
for seed in [spec.hash(spec.int_to_bytes(seed_init_value, length=4)) for seed_init_value in range(30)]:
|
||||
for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000]:
|
||||
for seed in [spec.hash(seed_init_value.to_bytes(length=4, byteorder='little')) for seed_init_value in range(30)]:
|
||||
for count in [0, 1, 2, 3, 5, 10, 33, 100, 1000, 9999]:
|
||||
yield shuffling_case(seed, count)
|
||||
|
||||
|
||||
def mini_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
presets = loader.load_presets(configs_path, 'minimal')
|
||||
spec.apply_constants_preset(presets)
|
||||
def create_provider(config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
return ("shuffling_minimal", "core", gen_suite.render_suite(
|
||||
title="Swap-or-Not Shuffling tests with minimal config",
|
||||
summary="Swap or not shuffling, with minimally configured testing round-count",
|
||||
forks_timeline="testing",
|
||||
forks=["phase0"],
|
||||
config="minimal",
|
||||
runner="shuffling",
|
||||
handler="core",
|
||||
test_cases=shuffling_test_cases()))
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec.apply_constants_preset(presets)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
for (case_name, case_fn) in shuffling_test_cases():
|
||||
yield gen_typing.TestCase(
|
||||
fork_name='phase0',
|
||||
runner_name='shuffling',
|
||||
handler_name='core',
|
||||
suite_name='shuffle',
|
||||
case_name=case_name,
|
||||
case_fn=case_fn
|
||||
)
|
||||
|
||||
def full_shuffling_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
presets = loader.load_presets(configs_path, 'mainnet')
|
||||
spec.apply_constants_preset(presets)
|
||||
|
||||
return ("shuffling_full", "core", gen_suite.render_suite(
|
||||
title="Swap-or-Not Shuffling tests with mainnet config",
|
||||
summary="Swap or not shuffling, with normal configured (secure) mainnet round-count",
|
||||
forks_timeline="mainnet",
|
||||
forks=["phase0"],
|
||||
config="mainnet",
|
||||
runner="shuffling",
|
||||
handler="core",
|
||||
test_cases=shuffling_test_cases()))
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("shuffling", [mini_shuffling_suite, full_shuffling_suite])
|
||||
gen_runner.run_generator("shuffling", [create_provider("minimal"), create_provider("mainnet")])
|
||||
|
@ -1,47 +1,44 @@
|
||||
from uint_test_cases import (
|
||||
generate_random_uint_test_cases,
|
||||
generate_uint_wrong_length_test_cases,
|
||||
generate_uint_bounds_test_cases,
|
||||
generate_uint_out_of_bounds_test_cases
|
||||
)
|
||||
|
||||
from gen_base import gen_runner, gen_suite, gen_typing
|
||||
|
||||
def ssz_random_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
return ("uint_random", "uint", gen_suite.render_suite(
|
||||
title="UInt Random",
|
||||
summary="Random integers chosen uniformly over the allowed value range",
|
||||
forks_timeline= "mainnet",
|
||||
forks=["phase0"],
|
||||
config="mainnet",
|
||||
runner="ssz",
|
||||
handler="uint",
|
||||
test_cases=generate_random_uint_test_cases()))
|
||||
from typing import Iterable
|
||||
from gen_base import gen_runner, gen_typing
|
||||
import ssz_basic_vector
|
||||
import ssz_bitlist
|
||||
import ssz_bitvector
|
||||
import ssz_boolean
|
||||
import ssz_uints
|
||||
import ssz_container
|
||||
|
||||
|
||||
def ssz_wrong_uint_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
return ("uint_wrong_length", "uint", gen_suite.render_suite(
|
||||
title="UInt Wrong Length",
|
||||
summary="Serialized integers that are too short or too long",
|
||||
forks_timeline= "mainnet",
|
||||
forks=["phase0"],
|
||||
config="mainnet",
|
||||
runner="ssz",
|
||||
handler="uint",
|
||||
test_cases=generate_uint_wrong_length_test_cases()))
|
||||
def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
return "general"
|
||||
|
||||
def ssz_uint_bounds_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
return ("uint_bounds", "uint", gen_suite.render_suite(
|
||||
title="UInt Bounds",
|
||||
summary="Integers right at or beyond the bounds of the allowed value range",
|
||||
forks_timeline= "mainnet",
|
||||
forks=["phase0"],
|
||||
config="mainnet",
|
||||
runner="ssz",
|
||||
handler="uint",
|
||||
test_cases=generate_uint_bounds_test_cases() + generate_uint_out_of_bounds_test_cases()))
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
for (case_name, case_fn) in case_maker():
|
||||
yield gen_typing.TestCase(
|
||||
fork_name='phase0',
|
||||
runner_name='ssz_generic',
|
||||
handler_name=handler_name,
|
||||
suite_name=suite_name,
|
||||
case_name=case_name,
|
||||
case_fn=case_fn
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("ssz_generic", [ssz_random_uint_suite, ssz_wrong_uint_suite, ssz_uint_bounds_suite])
|
||||
gen_runner.run_generator("ssz_generic", [
|
||||
create_provider("basic_vector", "valid", ssz_basic_vector.valid_cases),
|
||||
create_provider("basic_vector", "invalid", ssz_basic_vector.invalid_cases),
|
||||
create_provider("bitlist", "valid", ssz_bitlist.valid_cases),
|
||||
create_provider("bitlist", "invalid", ssz_bitlist.invalid_cases),
|
||||
create_provider("bitvector", "valid", ssz_bitvector.valid_cases),
|
||||
create_provider("bitvector", "invalid", ssz_bitvector.invalid_cases),
|
||||
create_provider("boolean", "valid", ssz_boolean.valid_cases),
|
||||
create_provider("boolean", "invalid", ssz_boolean.invalid_cases),
|
||||
create_provider("uints", "valid", ssz_uints.valid_cases),
|
||||
create_provider("uints", "invalid", ssz_uints.invalid_cases),
|
||||
create_provider("containers", "valid", ssz_container.valid_cases),
|
||||
create_provider("containers", "invalid", ssz_container.invalid_cases),
|
||||
])
|
||||
|
@ -1,93 +0,0 @@
|
||||
from collections.abc import (
|
||||
Mapping,
|
||||
Sequence,
|
||||
)
|
||||
|
||||
from eth_utils import (
|
||||
encode_hex,
|
||||
to_dict,
|
||||
)
|
||||
|
||||
from ssz.sedes import (
|
||||
BaseSedes,
|
||||
Boolean,
|
||||
Bytes,
|
||||
BytesN,
|
||||
Container,
|
||||
List,
|
||||
UInt,
|
||||
)
|
||||
|
||||
|
||||
def render_value(value):
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
elif isinstance(value, int):
|
||||
return str(value)
|
||||
elif isinstance(value, bytes):
|
||||
return encode_hex(value)
|
||||
elif isinstance(value, Sequence):
|
||||
return tuple(render_value(element) for element in value)
|
||||
elif isinstance(value, Mapping):
|
||||
return render_dict_value(value)
|
||||
else:
|
||||
raise ValueError(f"Cannot render value {value}")
|
||||
|
||||
|
||||
@to_dict
|
||||
def render_dict_value(value):
|
||||
for key, value in value.items():
|
||||
yield key, render_value(value)
|
||||
|
||||
|
||||
def render_type_definition(sedes):
|
||||
if isinstance(sedes, Boolean):
|
||||
return "bool"
|
||||
|
||||
elif isinstance(sedes, UInt):
|
||||
return f"uint{sedes.length * 8}"
|
||||
|
||||
elif isinstance(sedes, BytesN):
|
||||
return f"bytes{sedes.length}"
|
||||
|
||||
elif isinstance(sedes, Bytes):
|
||||
return f"bytes"
|
||||
|
||||
elif isinstance(sedes, List):
|
||||
return [render_type_definition(sedes.element_sedes)]
|
||||
|
||||
elif isinstance(sedes, Container):
|
||||
return {
|
||||
field_name: render_type_definition(field_sedes)
|
||||
for field_name, field_sedes in sedes.fields
|
||||
}
|
||||
|
||||
elif isinstance(sedes, BaseSedes):
|
||||
raise Exception("Unreachable: All sedes types have been checked")
|
||||
|
||||
else:
|
||||
raise TypeError("Expected BaseSedes")
|
||||
|
||||
|
||||
@to_dict
|
||||
def render_test_case(*, sedes, valid, value=None, serial=None, description=None, tags=None):
|
||||
value_and_serial_given = value is not None and serial is not None
|
||||
if valid:
|
||||
if not value_and_serial_given:
|
||||
raise ValueError("For valid test cases, both value and ssz must be present")
|
||||
else:
|
||||
if value_and_serial_given:
|
||||
raise ValueError("For invalid test cases, one of either value or ssz must not be present")
|
||||
|
||||
if tags is None:
|
||||
tags = []
|
||||
|
||||
yield "type", render_type_definition(sedes)
|
||||
yield "valid", valid
|
||||
if value is not None:
|
||||
yield "value", render_value(value)
|
||||
if serial is not None:
|
||||
yield "ssz", encode_hex(serial)
|
||||
if description is not None:
|
||||
yield description
|
||||
yield "tags", tags
|
@ -1,4 +1,4 @@
|
||||
eth-utils==1.6.0
|
||||
../../test_libs/gen_helpers
|
||||
../../test_libs/config_helpers
|
||||
ssz==0.1.0a2
|
||||
../../test_libs/pyspec
|
||||
|
60
test_generators/ssz_generic/ssz_basic_vector.py
Normal file
60
test_generators/ssz_generic/ssz_basic_vector.py
Normal file
@ -0,0 +1,60 @@
|
||||
from ssz_test_case import invalid_test_case, valid_test_case
|
||||
from eth2spec.utils.ssz.ssz_typing import boolean, uint8, uint16, uint32, uint64, uint128, uint256, Vector, BasicType
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
from random import Random
|
||||
from typing import Dict
|
||||
from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object
|
||||
|
||||
|
||||
def basic_vector_case_fn(rng: Random, mode: RandomizationMode, elem_type: BasicType, length: int):
|
||||
return get_random_ssz_object(rng, Vector[elem_type, length],
|
||||
max_bytes_length=length * 8,
|
||||
max_list_length=length,
|
||||
mode=mode, chaos=False)
|
||||
|
||||
|
||||
BASIC_TYPES: Dict[str, BasicType] = {
|
||||
'bool': boolean,
|
||||
'uint8': uint8,
|
||||
'uint16': uint16,
|
||||
'uint32': uint32,
|
||||
'uint64': uint64,
|
||||
'uint128': uint128,
|
||||
'uint256': uint256
|
||||
}
|
||||
|
||||
|
||||
def valid_cases():
|
||||
rng = Random(1234)
|
||||
for (name, typ) in BASIC_TYPES.items():
|
||||
random_modes = [RandomizationMode.mode_zero, RandomizationMode.mode_max]
|
||||
if name != 'bool':
|
||||
random_modes.append(RandomizationMode.mode_random)
|
||||
for length in [1, 2, 3, 4, 5, 8, 16, 31, 512, 513]:
|
||||
for mode in random_modes:
|
||||
yield f'vec_{name}_{length}_{mode.to_name()}', \
|
||||
valid_test_case(lambda: basic_vector_case_fn(rng, mode, typ, length))
|
||||
|
||||
|
||||
def invalid_cases():
|
||||
# zero length vectors are illegal
|
||||
for (name, typ) in BASIC_TYPES.items():
|
||||
yield f'vec_{name}_0', invalid_test_case(lambda: b'')
|
||||
|
||||
rng = Random(1234)
|
||||
for (name, typ) in BASIC_TYPES.items():
|
||||
random_modes = [RandomizationMode.mode_zero, RandomizationMode.mode_max]
|
||||
if name != 'bool':
|
||||
random_modes.append(RandomizationMode.mode_random)
|
||||
for length in [1, 2, 3, 4, 5, 8, 16, 31, 512, 513]:
|
||||
yield f'vec_{name}_{length}_nil', invalid_test_case(lambda: b'')
|
||||
for mode in random_modes:
|
||||
yield f'vec_{name}_{length}_{mode.to_name()}_one_less', \
|
||||
invalid_test_case(lambda: serialize(basic_vector_case_fn(rng, mode, typ, length - 1)))
|
||||
yield f'vec_{name}_{length}_{mode.to_name()}_one_more', \
|
||||
invalid_test_case(lambda: serialize(basic_vector_case_fn(rng, mode, typ, length + 1)))
|
||||
yield f'vec_{name}_{length}_{mode.to_name()}_one_byte_less', \
|
||||
invalid_test_case(lambda: serialize(basic_vector_case_fn(rng, mode, typ, length))[:-1])
|
||||
yield f'vec_{name}_{length}_{mode.to_name()}_one_byte_more', \
|
||||
invalid_test_case(lambda: serialize(basic_vector_case_fn(rng, mode, typ, length))
|
||||
+ serialize(basic_vector_case_fn(rng, mode, uint8, 1)))
|
37
test_generators/ssz_generic/ssz_bitlist.py
Normal file
37
test_generators/ssz_generic/ssz_bitlist.py
Normal file
@ -0,0 +1,37 @@
|
||||
from ssz_test_case import invalid_test_case, valid_test_case
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
from random import Random
|
||||
from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object
|
||||
|
||||
|
||||
def bitlist_case_fn(rng: Random, mode: RandomizationMode, limit: int):
|
||||
return get_random_ssz_object(rng, Bitlist[limit],
|
||||
max_bytes_length=(limit // 8) + 1,
|
||||
max_list_length=limit,
|
||||
mode=mode, chaos=False)
|
||||
|
||||
|
||||
def valid_cases():
|
||||
rng = Random(1234)
|
||||
for size in [1, 2, 3, 4, 5, 8, 16, 31, 512, 513]:
|
||||
for variation in range(5):
|
||||
for mode in [RandomizationMode.mode_nil_count,
|
||||
RandomizationMode.mode_max_count,
|
||||
RandomizationMode.mode_random,
|
||||
RandomizationMode.mode_zero,
|
||||
RandomizationMode.mode_max]:
|
||||
yield f'bitlist_{size}_{mode.to_name()}_{variation}', \
|
||||
valid_test_case(lambda: bitlist_case_fn(rng, mode, size))
|
||||
|
||||
|
||||
def invalid_cases():
|
||||
yield 'bitlist_no_delimiter_empty', invalid_test_case(lambda: b'')
|
||||
yield 'bitlist_no_delimiter_zero_byte', invalid_test_case(lambda: b'\x00')
|
||||
yield 'bitlist_no_delimiter_zeroes', invalid_test_case(lambda: b'\x00\x00\x00')
|
||||
rng = Random(1234)
|
||||
for (typ_limit, test_limit) in [(1, 2), (1, 8), (1, 9), (2, 3), (3, 4), (4, 5),
|
||||
(5, 6), (8, 9), (32, 64), (32, 33), (512, 513)]:
|
||||
yield f'bitlist_{typ_limit}_but_{test_limit}', \
|
||||
invalid_test_case(lambda: serialize(
|
||||
bitlist_case_fn(rng, RandomizationMode.mode_max_count, test_limit)))
|
30
test_generators/ssz_generic/ssz_bitvector.py
Normal file
30
test_generators/ssz_generic/ssz_bitvector.py
Normal file
@ -0,0 +1,30 @@
|
||||
from ssz_test_case import invalid_test_case, valid_test_case
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitvector
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
from random import Random
|
||||
from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object
|
||||
|
||||
|
||||
def bitvector_case_fn(rng: Random, mode: RandomizationMode, size: int):
|
||||
return get_random_ssz_object(rng, Bitvector[size],
|
||||
max_bytes_length=(size + 7) // 8,
|
||||
max_list_length=size,
|
||||
mode=mode, chaos=False)
|
||||
|
||||
|
||||
def valid_cases():
|
||||
rng = Random(1234)
|
||||
for size in [1, 2, 3, 4, 5, 8, 16, 31, 512, 513]:
|
||||
for mode in [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max]:
|
||||
yield f'bitvec_{size}_{mode.to_name()}', valid_test_case(lambda: bitvector_case_fn(rng, mode, size))
|
||||
|
||||
|
||||
def invalid_cases():
|
||||
# zero length bitvecors are illegal
|
||||
yield 'bitvec_0', invalid_test_case(lambda: b'')
|
||||
rng = Random(1234)
|
||||
for (typ_size, test_size) in [(1, 2), (2, 3), (3, 4), (4, 5),
|
||||
(5, 6), (8, 9), (9, 8), (16, 8), (32, 33), (512, 513)]:
|
||||
for mode in [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max]:
|
||||
yield f'bitvec_{typ_size}_{mode.to_name()}_{test_size}', \
|
||||
invalid_test_case(lambda: serialize(bitvector_case_fn(rng, mode, test_size)))
|
15
test_generators/ssz_generic/ssz_boolean.py
Normal file
15
test_generators/ssz_generic/ssz_boolean.py
Normal file
@ -0,0 +1,15 @@
|
||||
from ssz_test_case import valid_test_case, invalid_test_case
|
||||
from eth2spec.utils.ssz.ssz_typing import boolean
|
||||
|
||||
|
||||
def valid_cases():
|
||||
yield "true", valid_test_case(lambda: boolean(True))
|
||||
yield "false", valid_test_case(lambda: boolean(False))
|
||||
|
||||
|
||||
def invalid_cases():
|
||||
yield "byte_2", invalid_test_case(lambda: b'\x02')
|
||||
yield "byte_rev_nibble", invalid_test_case(lambda: b'\x10')
|
||||
yield "byte_0x80", invalid_test_case(lambda: b'\x80')
|
||||
yield "byte_full", invalid_test_case(lambda: b'\xff')
|
||||
|
120
test_generators/ssz_generic/ssz_container.py
Normal file
120
test_generators/ssz_generic/ssz_container.py
Normal file
@ -0,0 +1,120 @@
|
||||
from ssz_test_case import invalid_test_case, valid_test_case
|
||||
from eth2spec.utils.ssz.ssz_typing import SSZType, Container, byte, uint8, uint16, \
|
||||
uint32, uint64, List, Bytes, Vector, Bitvector, Bitlist
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
from random import Random
|
||||
from typing import Dict, Tuple, Sequence, Callable
|
||||
from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object
|
||||
|
||||
|
||||
class SingleFieldTestStruct(Container):
|
||||
A: byte
|
||||
|
||||
|
||||
class SmallTestStruct(Container):
|
||||
A: uint16
|
||||
B: uint16
|
||||
|
||||
|
||||
class FixedTestStruct(Container):
|
||||
A: uint8
|
||||
B: uint64
|
||||
C: uint32
|
||||
|
||||
|
||||
class VarTestStruct(Container):
|
||||
A: uint16
|
||||
B: List[uint16, 1024]
|
||||
C: uint8
|
||||
|
||||
|
||||
class ComplexTestStruct(Container):
|
||||
A: uint16
|
||||
B: List[uint16, 128]
|
||||
C: uint8
|
||||
D: Bytes[256]
|
||||
E: VarTestStruct
|
||||
F: Vector[FixedTestStruct, 4]
|
||||
G: Vector[VarTestStruct, 2]
|
||||
|
||||
|
||||
class BitsStruct(Container):
|
||||
A: Bitlist[5]
|
||||
B: Bitvector[2]
|
||||
C: Bitvector[1]
|
||||
D: Bitlist[6]
|
||||
E: Bitvector[8]
|
||||
|
||||
|
||||
def container_case_fn(rng: Random, mode: RandomizationMode, typ: SSZType):
|
||||
return get_random_ssz_object(rng, typ,
|
||||
max_bytes_length=2000,
|
||||
max_list_length=2000,
|
||||
mode=mode, chaos=False)
|
||||
|
||||
|
||||
PRESET_CONTAINERS: Dict[str, Tuple[SSZType, Sequence[int]]] = {
|
||||
'SingleFieldTestStruct': (SingleFieldTestStruct, []),
|
||||
'SmallTestStruct': (SmallTestStruct, []),
|
||||
'FixedTestStruct': (FixedTestStruct, []),
|
||||
'VarTestStruct': (VarTestStruct, [2]),
|
||||
'ComplexTestStruct': (ComplexTestStruct, [2, 2 + 4 + 1, 2 + 4 + 1 + 4]),
|
||||
'BitsStruct': (BitsStruct, [0, 4 + 1 + 1, 4 + 1 + 1 + 4]),
|
||||
}
|
||||
|
||||
|
||||
def valid_cases():
|
||||
rng = Random(1234)
|
||||
for (name, (typ, offsets)) in PRESET_CONTAINERS.items():
|
||||
for mode in [RandomizationMode.mode_zero, RandomizationMode.mode_max]:
|
||||
yield f'{name}_{mode.to_name()}', valid_test_case(lambda: container_case_fn(rng, mode, typ))
|
||||
random_modes = [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max]
|
||||
if len(offsets) != 0:
|
||||
random_modes.extend([RandomizationMode.mode_nil_count,
|
||||
RandomizationMode.mode_one_count,
|
||||
RandomizationMode.mode_max_count])
|
||||
for mode in random_modes:
|
||||
for variation in range(10):
|
||||
yield f'{name}_{mode.to_name()}_{variation}', \
|
||||
valid_test_case(lambda: container_case_fn(rng, mode, typ))
|
||||
for variation in range(3):
|
||||
yield f'{name}_{mode.to_name()}_chaos_{variation}', \
|
||||
valid_test_case(lambda: container_case_fn(rng, mode, typ))
|
||||
|
||||
|
||||
def mod_offset(b: bytes, offset_index: int, change: Callable[[int], int]):
|
||||
return b[:offset_index] + \
|
||||
(change(int.from_bytes(b[offset_index:offset_index + 4], byteorder='little')) & 0xffffffff) \
|
||||
.to_bytes(length=4, byteorder='little') + \
|
||||
b[offset_index + 4:]
|
||||
|
||||
|
||||
def invalid_cases():
|
||||
rng = Random(1234)
|
||||
for (name, (typ, offsets)) in PRESET_CONTAINERS.items():
|
||||
# using mode_max_count, so that the extra byte cannot be picked up as normal list content
|
||||
yield f'{name}_extra_byte', \
|
||||
invalid_test_case(lambda: serialize(
|
||||
container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + b'\xff')
|
||||
|
||||
if len(offsets) != 0:
|
||||
# Note: there are many more ways to have invalid offsets,
|
||||
# these are just example to get clients started looking into hardening ssz.
|
||||
for mode in [RandomizationMode.mode_random,
|
||||
RandomizationMode.mode_nil_count,
|
||||
RandomizationMode.mode_one_count,
|
||||
RandomizationMode.mode_max_count]:
|
||||
if len(offsets) != 0:
|
||||
for offset_index in offsets:
|
||||
yield f'{name}_offset_{offset_index}_plus_one', \
|
||||
invalid_test_case(lambda: mod_offset(
|
||||
b=serialize(container_case_fn(rng, mode, typ)),
|
||||
offset_index=offset_index,
|
||||
change=lambda x: x + 1
|
||||
))
|
||||
yield f'{name}_offset_{offset_index}_zeroed', \
|
||||
invalid_test_case(lambda: mod_offset(
|
||||
b=serialize(container_case_fn(rng, mode, typ)),
|
||||
offset_index=offset_index,
|
||||
change=lambda x: 0
|
||||
))
|
21
test_generators/ssz_generic/ssz_test_case.py
Normal file
21
test_generators/ssz_generic/ssz_test_case.py
Normal file
@ -0,0 +1,21 @@
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize, hash_tree_root, signing_root
|
||||
from eth2spec.debug.encode import encode
|
||||
from eth2spec.utils.ssz.ssz_typing import SSZValue, Container
|
||||
from typing import Callable
|
||||
|
||||
|
||||
def valid_test_case(value_fn: Callable[[], SSZValue]):
|
||||
def case_fn():
|
||||
value = value_fn()
|
||||
yield "value", "data", encode(value)
|
||||
yield "serialized", "ssz", serialize(value)
|
||||
yield "root", "meta", '0x' + hash_tree_root(value).hex()
|
||||
if isinstance(value, Container):
|
||||
yield "signing_root", "meta", '0x' + signing_root(value).hex()
|
||||
return case_fn
|
||||
|
||||
|
||||
def invalid_test_case(bytez_fn: Callable[[], bytes]):
|
||||
def case_fn():
|
||||
yield "serialized", "ssz", bytez_fn()
|
||||
return case_fn
|
37
test_generators/ssz_generic/ssz_uints.py
Normal file
37
test_generators/ssz_generic/ssz_uints.py
Normal file
@ -0,0 +1,37 @@
|
||||
from ssz_test_case import invalid_test_case, valid_test_case
|
||||
from eth2spec.utils.ssz.ssz_typing import BasicType, uint8, uint16, uint32, uint64, uint128, uint256
|
||||
from random import Random
|
||||
from eth2spec.debug.random_value import RandomizationMode, get_random_ssz_object
|
||||
|
||||
|
||||
def uint_case_fn(rng: Random, mode: RandomizationMode, typ: BasicType):
|
||||
return get_random_ssz_object(rng, typ,
|
||||
max_bytes_length=typ.byte_len,
|
||||
max_list_length=1,
|
||||
mode=mode, chaos=False)
|
||||
|
||||
|
||||
UINT_TYPES = [uint8, uint16, uint32, uint64, uint128, uint256]
|
||||
|
||||
|
||||
def valid_cases():
|
||||
rng = Random(1234)
|
||||
for uint_type in UINT_TYPES:
|
||||
yield f'uint_{uint_type.byte_len * 8}_last_byte_empty', \
|
||||
valid_test_case(lambda: uint_type((2 ** ((uint_type.byte_len - 1) * 8)) - 1))
|
||||
for variation in range(5):
|
||||
for mode in [RandomizationMode.mode_random, RandomizationMode.mode_zero, RandomizationMode.mode_max]:
|
||||
yield f'uint_{uint_type.byte_len * 8}_{mode.to_name()}_{variation}', \
|
||||
valid_test_case(lambda: uint_case_fn(rng, mode, uint_type))
|
||||
|
||||
|
||||
def invalid_cases():
|
||||
for uint_type in UINT_TYPES:
|
||||
yield f'uint_{uint_type.byte_len * 8}_one_too_high', \
|
||||
invalid_test_case(lambda: (2 ** (uint_type.byte_len * 8)).to_bytes(uint_type.byte_len + 1, 'little'))
|
||||
for uint_type in [uint8, uint16, uint32, uint64, uint128, uint256]:
|
||||
yield f'uint_{uint_type.byte_len * 8}_one_byte_longer', \
|
||||
invalid_test_case(lambda: (2 ** (uint_type.byte_len * 8) - 1).to_bytes(uint_type.byte_len + 1, 'little'))
|
||||
for uint_type in [uint8, uint16, uint32, uint64, uint128, uint256]:
|
||||
yield f'uint_{uint_type.byte_len * 8}_one_byte_shorter', \
|
||||
invalid_test_case(lambda: (2 ** ((uint_type.byte_len - 1) * 8) - 1).to_bytes(uint_type.byte_len - 1, 'little'))
|
@ -1,5 +1,5 @@
|
||||
from random import Random
|
||||
|
||||
from typing import Iterable
|
||||
from inspect import getmembers, isclass
|
||||
|
||||
from eth2spec.debug import random_value, encode
|
||||
@ -10,29 +10,23 @@ from eth2spec.utils.ssz.ssz_impl import (
|
||||
signing_root,
|
||||
serialize,
|
||||
)
|
||||
from eth_utils import (
|
||||
to_tuple, to_dict
|
||||
)
|
||||
from gen_base import gen_runner, gen_suite, gen_typing
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from preset_loader import loader
|
||||
|
||||
MAX_BYTES_LENGTH = 100
|
||||
MAX_LIST_LENGTH = 10
|
||||
|
||||
|
||||
@to_dict
|
||||
def create_test_case_contents(value):
|
||||
yield "value", encode.encode(value)
|
||||
yield "serialized", '0x' + serialize(value).hex()
|
||||
yield "root", '0x' + hash_tree_root(value).hex()
|
||||
if hasattr(value, "signature"):
|
||||
yield "signing_root", '0x' + signing_root(value).hex()
|
||||
|
||||
|
||||
@to_dict
|
||||
def create_test_case(rng: Random, name: str, typ, mode: random_value.RandomizationMode, chaos: bool):
|
||||
def create_test_case(rng: Random, typ, mode: random_value.RandomizationMode, chaos: bool) -> Iterable[gen_typing.TestCasePart]:
|
||||
value = random_value.get_random_ssz_object(rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos)
|
||||
yield name, create_test_case_contents(value)
|
||||
yield "value", "data", encode.encode(value)
|
||||
yield "serialized", "ssz", serialize(value)
|
||||
roots_data = {
|
||||
"root": '0x' + hash_tree_root(value).hex()
|
||||
}
|
||||
if isinstance(value, Container) and hasattr(value, "signature"):
|
||||
roots_data["signing_root"] = '0x' + signing_root(value).hex()
|
||||
yield "roots", "data", roots_data
|
||||
|
||||
|
||||
def get_spec_ssz_types():
|
||||
@ -42,40 +36,38 @@ def get_spec_ssz_types():
|
||||
]
|
||||
|
||||
|
||||
@to_tuple
|
||||
def ssz_static_cases(rng: Random, mode: random_value.RandomizationMode, chaos: bool, count: int):
|
||||
for (name, ssz_type) in get_spec_ssz_types():
|
||||
for i in range(count):
|
||||
yield create_test_case(rng, name, ssz_type, mode, chaos)
|
||||
def ssz_static_cases(seed: int, name, ssz_type, mode: random_value.RandomizationMode, chaos: bool, count: int):
|
||||
random_mode_name = mode.to_name()
|
||||
|
||||
# Reproducible RNG
|
||||
rng = Random(seed)
|
||||
|
||||
for i in range(count):
|
||||
yield gen_typing.TestCase(
|
||||
fork_name='phase0',
|
||||
runner_name='ssz_static',
|
||||
handler_name=name,
|
||||
suite_name=f"ssz_{random_mode_name}{'_chaos' if chaos else ''}",
|
||||
case_name=f"case_{i}",
|
||||
case_fn=lambda: create_test_case(rng, ssz_type, mode, chaos)
|
||||
)
|
||||
|
||||
|
||||
def get_ssz_suite(seed: int, config_name: str, mode: random_value.RandomizationMode, chaos: bool, cases_if_random: int):
|
||||
def ssz_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
|
||||
def create_provider(config_name: str, seed: int, mode: random_value.RandomizationMode, chaos: bool,
|
||||
cases_if_random: int) -> gen_typing.TestProvider:
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
# Apply changes to presets, this affects some of the vector types.
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec.apply_constants_preset(presets)
|
||||
return config_name
|
||||
|
||||
# Reproducible RNG
|
||||
rng = Random(seed)
|
||||
|
||||
random_mode_name = mode.to_name()
|
||||
|
||||
suite_name = f"ssz_{config_name}_{random_mode_name}{'_chaos' if chaos else ''}"
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
count = cases_if_random if chaos or mode.is_changing() else 1
|
||||
print(f"generating SSZ-static suite ({count} cases per ssz type): {suite_name}")
|
||||
|
||||
return (suite_name, "core", gen_suite.render_suite(
|
||||
title=f"ssz testing, with {config_name} config, randomized with mode {random_mode_name}{' and with chaos applied' if chaos else ''}",
|
||||
summary="Test suite for ssz serialization and hash-tree-root",
|
||||
forks_timeline="testing",
|
||||
forks=["phase0"],
|
||||
config=config_name,
|
||||
runner="ssz",
|
||||
handler="static",
|
||||
test_cases=ssz_static_cases(rng, mode, chaos, count)))
|
||||
for (i, (name, ssz_type)) in enumerate(get_spec_ssz_types()):
|
||||
yield from ssz_static_cases(seed * 1000 + i, name, ssz_type, mode, chaos, count)
|
||||
|
||||
return ssz_suite
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -91,6 +83,6 @@ if __name__ == "__main__":
|
||||
seed += 1
|
||||
|
||||
gen_runner.run_generator("ssz_static", [
|
||||
get_ssz_suite(seed, config_name, mode, chaos, cases_if_random)
|
||||
for (seed, config_name, mode, chaos, cases_if_random) in settings
|
||||
create_provider(config_name, seed, mode, chaos, cases_if_random)
|
||||
for (seed, config_name, mode, chaos, cases_if_random) in settings
|
||||
])
|
||||
|
@ -1,4 +1,3 @@
|
||||
eth-utils==1.6.0
|
||||
../../test_libs/gen_helpers
|
||||
../../test_libs/config_helpers
|
||||
../../test_libs/pyspec
|
@ -10,10 +10,10 @@ from os.path import join
|
||||
def load_presets(configs_dir, presets_name) -> Dict[str, Any]:
|
||||
"""
|
||||
Loads the given preset
|
||||
:param presets_name: The name of the generator. (lowercase snake_case)
|
||||
:param presets_name: The name of the presets. (lowercase snake_case)
|
||||
:return: Dictionary, mapping of constant-name -> constant-value
|
||||
"""
|
||||
path = Path(join(configs_dir, 'constant_presets', presets_name+'.yaml'))
|
||||
path = Path(join(configs_dir, presets_name+'.yaml'))
|
||||
yaml = YAML(typ='base')
|
||||
loaded = yaml.load(path)
|
||||
out = dict()
|
||||
|
@ -1,5 +1,54 @@
|
||||
# ETH 2.0 test generator helpers
|
||||
|
||||
`gen_base`: A util to quickly write new test suite generators with.
|
||||
See [Generators documentation](../../test_generators/README.md).
|
||||
## `gen_base`
|
||||
|
||||
A util to quickly write new test suite generators with.
|
||||
|
||||
See [Generators documentation](../../test_generators/README.md) for integration details.
|
||||
|
||||
Options:
|
||||
|
||||
```
|
||||
-o OUTPUT_DIR -- Output directory to write tests to. The directory must exist.
|
||||
This directory will hold the top-level test directories (per-config directories).
|
||||
|
||||
[-f] -- Optional. Force-run the generator: if false, existing test case folder will be detected,
|
||||
and the test generator will not run the function to generate the test case with.
|
||||
If true, all cases will run regardless, and files will be overwritten.
|
||||
Other existing files are not deleted.
|
||||
|
||||
-c CONFIGS_PATH -- The directory to load configs for pyspec from. A config is a simple key-value yaml file.
|
||||
Use `../../configs/` when running from the root dir of a generator, and requiring the standard spec configs.
|
||||
|
||||
[-l [CONFIG_LIST [CONFIG_LIST ...]]] -- Optional. Define which configs to run.
|
||||
Test providers loading other configs will be ignored. If none are specified, no config will be ignored.
|
||||
```
|
||||
|
||||
## `gen_from_tests`
|
||||
|
||||
This is an util to derive tests from a tests source file.
|
||||
|
||||
This requires the tests to yield test-case-part outputs. These outputs are then written to the test case directory.
|
||||
Yielding data is illegal in normal pytests, so it is only done when in "generator mode".
|
||||
This functionality can be attached to any function by using the `vector_test()` decorator found in `ethspec/tests/utils.py`.
|
||||
|
||||
## Test-case parts
|
||||
|
||||
Test cases consist of parts, which are yielded to the base generator one by one.
|
||||
|
||||
The yielding pattern is:
|
||||
|
||||
2 value style: `yield <key name> <value>`. The kind of output will be inferred from the value by the `vector_test()` decorator.
|
||||
|
||||
3 value style: `yield <key name> <kind name> <value>`.
|
||||
|
||||
Test part output kinds:
|
||||
- `ssz`: value is expected to be a `bytes`, and the raw data is written to a `<key name>.ssz` file.
|
||||
- `data`: value is expected to be any python object that can be dumped as YAML. Output is written to `<key name>.yaml`
|
||||
- `meta`: these key-value pairs are collected into a dict, and then collectively written to a metadata
|
||||
file named `meta.yaml`, if anything is yielded with `meta` empty.
|
||||
|
||||
The `vector_test()` decorator can detect pyspec SSZ types, and output them both as `data` and `ssz`, for the test consumer to choose.
|
||||
|
||||
Note that the yielded outputs are processed before the test continues. It is safe to yield information that later mutates,
|
||||
as the output will already be encoded to yaml or ssz bytes. This avoids the need to deep-copy the whole object.
|
||||
|
@ -1,13 +1,13 @@
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
import sys
|
||||
from typing import List
|
||||
from typing import Iterable, AnyStr, Any, Callable
|
||||
|
||||
from ruamel.yaml import (
|
||||
YAML,
|
||||
)
|
||||
|
||||
from gen_base.gen_typing import TestSuiteCreator
|
||||
from gen_base.gen_typing import TestProvider
|
||||
|
||||
|
||||
def validate_output_dir(path_str):
|
||||
@ -31,26 +31,17 @@ def validate_configs_dir(path_str):
|
||||
if not path.is_dir():
|
||||
raise argparse.ArgumentTypeError("Config path must lead to a directory")
|
||||
|
||||
if not Path(path, "constant_presets").exists():
|
||||
raise argparse.ArgumentTypeError("Constant Presets directory must exist")
|
||||
|
||||
if not Path(path, "constant_presets").is_dir():
|
||||
raise argparse.ArgumentTypeError("Constant Presets path must lead to a directory")
|
||||
|
||||
if not Path(path, "fork_timelines").exists():
|
||||
raise argparse.ArgumentTypeError("Fork Timelines directory must exist")
|
||||
|
||||
if not Path(path, "fork_timelines").is_dir():
|
||||
raise argparse.ArgumentTypeError("Fork Timelines path must lead to a directory")
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def run_generator(generator_name, suite_creators: List[TestSuiteCreator]):
|
||||
def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
"""
|
||||
Implementation for a general test generator.
|
||||
:param generator_name: The name of the generator. (lowercase snake_case)
|
||||
:param suite_creators: A list of suite creators, each of these builds a list of test cases.
|
||||
:param test_providers: A list of test provider,
|
||||
each of these returns a callable that returns an iterable of test cases.
|
||||
The call to get the iterable may set global configuration,
|
||||
and the iterable should not be resumed after a pause with a change of that configuration.
|
||||
:return:
|
||||
"""
|
||||
|
||||
@ -71,7 +62,7 @@ def run_generator(generator_name, suite_creators: List[TestSuiteCreator]):
|
||||
"--force",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="if set overwrite test files if they exist",
|
||||
help="if set re-generate and overwrite test files if they already exist",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-c",
|
||||
@ -79,7 +70,16 @@ def run_generator(generator_name, suite_creators: List[TestSuiteCreator]):
|
||||
dest="configs_path",
|
||||
required=True,
|
||||
type=validate_configs_dir,
|
||||
help="specify the path of the configs directory (containing constants_presets and fork_timelines)",
|
||||
help="specify the path of the configs directory",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-l",
|
||||
"--config-list",
|
||||
dest="config_list",
|
||||
nargs='*',
|
||||
type=str,
|
||||
required=False,
|
||||
help="specify configs to run with. Allows all if no config names are specified.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
@ -92,24 +92,80 @@ def run_generator(generator_name, suite_creators: List[TestSuiteCreator]):
|
||||
yaml = YAML(pure=True)
|
||||
yaml.default_flow_style = None
|
||||
|
||||
print(f"Generating tests for {generator_name}, creating {len(suite_creators)} test suite files...")
|
||||
print(f"Reading config presets and fork timelines from {args.configs_path}")
|
||||
for suite_creator in suite_creators:
|
||||
(output_name, handler, suite) = suite_creator(args.configs_path)
|
||||
print(f"Generating tests into {output_dir}")
|
||||
print(f"Reading configs from {args.configs_path}")
|
||||
|
||||
handler_output_dir = Path(output_dir) / Path(handler)
|
||||
try:
|
||||
if not handler_output_dir.exists():
|
||||
handler_output_dir.mkdir()
|
||||
except FileNotFoundError as e:
|
||||
sys.exit(f'Error when creating handler dir {handler} for test "{suite["title"]}" ({e})')
|
||||
configs = args.config_list
|
||||
if configs is None:
|
||||
configs = []
|
||||
|
||||
out_path = handler_output_dir / Path(output_name + '.yaml')
|
||||
if len(configs) != 0:
|
||||
print(f"Filtering test-generator runs to only include configs: {', '.join(configs)}")
|
||||
|
||||
try:
|
||||
with out_path.open(file_mode) as f:
|
||||
yaml.dump(suite, f)
|
||||
except IOError as e:
|
||||
sys.exit(f'Error when dumping test "{suite["title"]}" ({e})')
|
||||
for tprov in test_providers:
|
||||
# loads configuration etc.
|
||||
config_name = tprov.prepare(args.configs_path)
|
||||
if len(configs) != 0 and config_name not in configs:
|
||||
print(f"skipping tests with config '{config_name}' since it is filtered out")
|
||||
continue
|
||||
|
||||
print("done.")
|
||||
print(f"generating tests with config '{config_name}' ...")
|
||||
for test_case in tprov.make_cases():
|
||||
case_dir = Path(output_dir) / Path(config_name) / Path(test_case.fork_name) \
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name) \
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
|
||||
if case_dir.exists():
|
||||
if not args.force:
|
||||
print(f'Skipping already existing test: {case_dir}')
|
||||
continue
|
||||
print(f'Warning, output directory {case_dir} already exist,'
|
||||
f' old files are not deleted but will be overwritten when a new version is produced')
|
||||
|
||||
print(f'Generating test: {case_dir}')
|
||||
try:
|
||||
def output_part(out_kind: str, name: str, fn: Callable[[Path, ], None]):
|
||||
# make sure the test case directory is created before any test part is written.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
fn(case_dir)
|
||||
except IOError as e:
|
||||
sys.exit(f'Error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}')
|
||||
|
||||
written_part = False
|
||||
meta = dict()
|
||||
for (name, out_kind, data) in test_case.case_fn():
|
||||
written_part = True
|
||||
if out_kind == "meta":
|
||||
meta[name] = data
|
||||
if out_kind == "data":
|
||||
output_part("data", name, dump_yaml_fn(data, name, file_mode, yaml))
|
||||
if out_kind == "ssz":
|
||||
output_part("ssz", name, dump_ssz_fn(data, name, file_mode))
|
||||
# Once all meta data is collected (if any), write it to a meta data file.
|
||||
if len(meta) != 0:
|
||||
written_part = True
|
||||
output_part("data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml))
|
||||
|
||||
if not written_part:
|
||||
print(f"test case {case_dir} did not produce any test case parts")
|
||||
|
||||
except Exception as e:
|
||||
print(f"ERROR: failed to generate vector(s) for test {case_dir}: {e}")
|
||||
print(f"completed {generator_name}")
|
||||
|
||||
|
||||
def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
||||
def dump(case_path: Path):
|
||||
out_path = case_path / Path(name + '.yaml')
|
||||
with out_path.open(file_mode) as f:
|
||||
yaml_encoder.dump(data, f)
|
||||
return dump
|
||||
|
||||
|
||||
def dump_ssz_fn(data: AnyStr, name: str, file_mode: str):
|
||||
def dump(case_path: Path):
|
||||
out_path = case_path / Path(name + '.ssz')
|
||||
with out_path.open(file_mode + 'b') as f: # write in raw binary mode
|
||||
f.write(data)
|
||||
return dump
|
||||
|
@ -1,22 +0,0 @@
|
||||
from typing import Iterable
|
||||
|
||||
from eth_utils import to_dict
|
||||
from gen_base.gen_typing import TestCase
|
||||
|
||||
|
||||
@to_dict
|
||||
def render_suite(*,
|
||||
title: str, summary: str,
|
||||
forks_timeline: str, forks: Iterable[str],
|
||||
config: str,
|
||||
runner: str,
|
||||
handler: str,
|
||||
test_cases: Iterable[TestCase]):
|
||||
yield "title", title
|
||||
yield "summary", summary
|
||||
yield "forks_timeline", forks_timeline,
|
||||
yield "forks", forks
|
||||
yield "config", config
|
||||
yield "runner", runner
|
||||
yield "handler", handler
|
||||
yield "test_cases", test_cases
|
@ -1,14 +1,35 @@
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
NewType,
|
||||
Tuple,
|
||||
)
|
||||
from dataclasses import dataclass
|
||||
|
||||
# Elements: name, out_kind, data
|
||||
#
|
||||
# out_kind is the type of data:
|
||||
# - "data" for generic
|
||||
# - "ssz" for SSZ encoded bytes
|
||||
# - "meta" for generic data to collect into a meta data dict.
|
||||
TestCasePart = NewType("TestCasePart", Tuple[str, str, Any])
|
||||
|
||||
|
||||
TestCase = Dict[str, Any]
|
||||
TestSuite = Dict[str, Any]
|
||||
# Tuple: (output name, handler name, suite) -- output name excl. ".yaml"
|
||||
TestSuiteOutput = Tuple[str, str, TestSuite]
|
||||
# Args: <presets path>
|
||||
TestSuiteCreator = Callable[[str], TestSuiteOutput]
|
||||
@dataclass
|
||||
class TestCase(object):
|
||||
fork_name: str
|
||||
runner_name: str
|
||||
handler_name: str
|
||||
suite_name: str
|
||||
case_name: str
|
||||
case_fn: Callable[[], Iterable[TestCasePart]]
|
||||
|
||||
|
||||
@dataclass
|
||||
class TestProvider(object):
|
||||
# Prepares the context with a configuration, loaded from the given config path.
|
||||
# fn(config path) => chosen config name
|
||||
prepare: Callable[[str], str]
|
||||
# Retrieves an iterable of cases, called after prepare()
|
||||
make_cases: Callable[[], Iterable[TestCase]]
|
||||
|
@ -1,26 +1,40 @@
|
||||
from inspect import getmembers, isfunction
|
||||
from typing import Any, Iterable
|
||||
|
||||
def generate_from_tests(src, phase, bls_active=True):
|
||||
from gen_base.gen_typing import TestCase
|
||||
|
||||
|
||||
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
||||
fork_name: str, bls_active: bool = True) -> Iterable[TestCase]:
|
||||
"""
|
||||
Generate a list of test cases by running tests from the given src in generator-mode.
|
||||
:param runner_name: to categorize the test in general as.
|
||||
:param handler_name: to categorize the test specialization as.
|
||||
:param src: to retrieve tests from (discovered using inspect.getmembers).
|
||||
:param phase: to run tests against particular phase.
|
||||
:param fork_name: to run tests against particular phase and/or fork.
|
||||
(if multiple forks are applicable, indicate the last fork)
|
||||
:param bls_active: optional, to override BLS switch preference. Defaults to True.
|
||||
:return: the list of test cases.
|
||||
:return: an iterable of test cases.
|
||||
"""
|
||||
fn_names = [
|
||||
name for (name, _) in getmembers(src, isfunction)
|
||||
if name.startswith('test_')
|
||||
]
|
||||
out = []
|
||||
print("generating test vectors from tests source: %s" % src.__name__)
|
||||
for name in fn_names:
|
||||
tfn = getattr(src, name)
|
||||
try:
|
||||
test_case = tfn(generator_mode=True, phase=phase, bls_active=bls_active)
|
||||
# If no test case data is returned, the test is ignored.
|
||||
if test_case is not None:
|
||||
out.append(test_case)
|
||||
except AssertionError:
|
||||
print("ERROR: failed to generate vector from test: %s (src: %s)" % (name, src.__name__))
|
||||
return out
|
||||
|
||||
# strip off the `test_`
|
||||
case_name = name
|
||||
if case_name.startswith('test_'):
|
||||
case_name = case_name[5:]
|
||||
|
||||
yield TestCase(
|
||||
fork_name=fork_name,
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
suite_name='pyspec_tests',
|
||||
case_name=case_name,
|
||||
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
|
||||
case_fn=lambda: tfn(generator_mode=True, phase=fork_name, bls_active=bls_active)
|
||||
)
|
||||
|
@ -29,4 +29,4 @@ def encode(value, include_hash_tree_roots=False):
|
||||
ret["hash_tree_root"] = '0x' + hash_tree_root(value).hex()
|
||||
return ret
|
||||
else:
|
||||
raise Exception(f"Type not recognized: value={value}, typ={value.type()}")
|
||||
raise Exception(f"Type not recognized: value={value}, typ={type(value)}")
|
||||
|
@ -56,15 +56,15 @@ def get_random_ssz_object(rng: Random,
|
||||
if mode == RandomizationMode.mode_nil_count:
|
||||
return typ(b'')
|
||||
elif mode == RandomizationMode.mode_max_count:
|
||||
return typ(get_random_bytes_list(rng, max_bytes_length))
|
||||
return typ(get_random_bytes_list(rng, min(max_bytes_length, typ.length)))
|
||||
elif mode == RandomizationMode.mode_one_count:
|
||||
return typ(get_random_bytes_list(rng, 1))
|
||||
return typ(get_random_bytes_list(rng, min(1, typ.length)))
|
||||
elif mode == RandomizationMode.mode_zero:
|
||||
return typ(b'\x00')
|
||||
return typ(b'\x00' * min(1, typ.length))
|
||||
elif mode == RandomizationMode.mode_max:
|
||||
return typ(b'\xff')
|
||||
return typ(b'\xff' * min(1, typ.length))
|
||||
else:
|
||||
return typ(get_random_bytes_list(rng, rng.randint(0, max_bytes_length)))
|
||||
return typ(get_random_bytes_list(rng, rng.randint(0, min(max_bytes_length, typ.length))))
|
||||
elif issubclass(typ, BytesN):
|
||||
# Sanity, don't generate absurdly big random values
|
||||
# If a client is aiming to performance-test, they should create a benchmark suite.
|
||||
|
@ -18,14 +18,11 @@ def translate_typ(typ) -> ssz.BaseSedes:
|
||||
elif issubclass(typ, spec_ssz.Vector):
|
||||
return ssz.Vector(translate_typ(typ.elem_type), typ.length)
|
||||
elif issubclass(typ, spec_ssz.List):
|
||||
# TODO: Make py-ssz List support the new fixed length list
|
||||
return ssz.List(translate_typ(typ.elem_type))
|
||||
return ssz.List(translate_typ(typ.elem_type), typ.length)
|
||||
elif issubclass(typ, spec_ssz.Bitlist):
|
||||
# TODO: Once Bitlist implemented in py-ssz, use appropriate type
|
||||
return ssz.List(translate_typ(typ.elem_type))
|
||||
return ssz.Bitlist(typ.length)
|
||||
elif issubclass(typ, spec_ssz.Bitvector):
|
||||
# TODO: Once Bitvector implemented in py-ssz, use appropriate type
|
||||
return ssz.Vector(translate_typ(typ.elem_type), typ.length)
|
||||
return ssz.Bitvector(typ.length)
|
||||
elif issubclass(typ, spec_ssz.boolean):
|
||||
return ssz.boolean
|
||||
elif issubclass(typ, spec_ssz.uint):
|
||||
|
@ -9,9 +9,7 @@ def test_decoder():
|
||||
rng = Random(123)
|
||||
|
||||
# check these types only, Block covers a lot of operation types already.
|
||||
# TODO: Once has Bitlists and Bitvectors, add back
|
||||
# spec.BeaconState and spec.BeaconBlock
|
||||
for typ in [spec.IndexedAttestation, spec.AttestationDataAndCustodyBit]:
|
||||
for typ in [spec.AttestationDataAndCustodyBit, spec.BeaconState, spec.BeaconBlock]:
|
||||
# create a random pyspec value
|
||||
original = random_value.get_random_ssz_object(rng, typ, 100, 10,
|
||||
mode=random_value.RandomizationMode.mode_random,
|
||||
@ -32,4 +30,6 @@ def test_decoder():
|
||||
block = translate_value(raw_value, typ)
|
||||
|
||||
# and see if the hash-tree-root of the original matches the hash-tree-root of the decoded & translated value.
|
||||
assert spec_ssz_impl.hash_tree_root(original) == spec_ssz_impl.hash_tree_root(block)
|
||||
original_hash_tree_root = spec_ssz_impl.hash_tree_root(original)
|
||||
assert original_hash_tree_root == spec_ssz_impl.hash_tree_root(block)
|
||||
assert original_hash_tree_root == block_sedes.get_hash_tree_root(raw_value)
|
||||
|
@ -4,7 +4,7 @@ from eth2spec.utils import bls
|
||||
|
||||
from .helpers.genesis import create_genesis_state
|
||||
|
||||
from .utils import spectest, with_tags
|
||||
from .utils import vector_test, with_meta_tags
|
||||
|
||||
|
||||
def with_state(fn):
|
||||
@ -12,7 +12,7 @@ def with_state(fn):
|
||||
try:
|
||||
kw['state'] = create_genesis_state(spec=kw['spec'], num_validators=spec_phase0.SLOTS_PER_EPOCH * 8)
|
||||
except KeyError:
|
||||
raise TypeError('Spec decorator must come before state decorator to inject spec into state.')
|
||||
raise TypeError('Spec decorator must come within state decorator to inject spec into state.')
|
||||
return fn(*args, **kw)
|
||||
return entry
|
||||
|
||||
@ -27,13 +27,18 @@ def with_state(fn):
|
||||
DEFAULT_BLS_ACTIVE = False
|
||||
|
||||
|
||||
def spectest_with_bls_switch(fn):
|
||||
return bls_switch(spectest()(fn))
|
||||
def spec_test(fn):
|
||||
# Bls switch must be wrapped by vector_test,
|
||||
# to fully go through the yielded bls switch data, before setting back the BLS setting.
|
||||
# A test may apply BLS overrides such as @always_bls,
|
||||
# but if it yields data (n.b. @always_bls yields the bls setting), it should be wrapped by this decorator.
|
||||
# This is why @alway_bls has its own bls switch, since the override is beyond the reach of the outer switch.
|
||||
return vector_test()(bls_switch(fn))
|
||||
|
||||
|
||||
# shorthand for decorating @with_state @spectest()
|
||||
# shorthand for decorating @spectest() @with_state
|
||||
def spec_state_test(fn):
|
||||
return with_state(spectest_with_bls_switch(fn))
|
||||
return spec_test(with_state(fn))
|
||||
|
||||
|
||||
def expect_assertion_error(fn):
|
||||
@ -50,47 +55,44 @@ def expect_assertion_error(fn):
|
||||
raise AssertionError('expected an assertion error, but got none.')
|
||||
|
||||
|
||||
# Tags a test to be ignoring BLS for it to pass.
|
||||
bls_ignored = with_tags({'bls_setting': 2})
|
||||
|
||||
|
||||
def never_bls(fn):
|
||||
"""
|
||||
Decorator to apply on ``bls_switch`` decorator to force BLS de-activation. Useful to mark tests as BLS-ignorant.
|
||||
This decorator may only be applied to yielding spec test functions, and should be wrapped by vector_test,
|
||||
as the yielding needs to complete before setting back the BLS setting.
|
||||
"""
|
||||
def entry(*args, **kw):
|
||||
# override bls setting
|
||||
kw['bls_active'] = False
|
||||
return fn(*args, **kw)
|
||||
return bls_ignored(entry)
|
||||
|
||||
|
||||
# Tags a test to be requiring BLS for it to pass.
|
||||
bls_required = with_tags({'bls_setting': 1})
|
||||
return bls_switch(fn)(*args, **kw)
|
||||
return with_meta_tags({'bls_setting': 2})(entry)
|
||||
|
||||
|
||||
def always_bls(fn):
|
||||
"""
|
||||
Decorator to apply on ``bls_switch`` decorator to force BLS activation. Useful to mark tests as BLS-dependent.
|
||||
This decorator may only be applied to yielding spec test functions, and should be wrapped by vector_test,
|
||||
as the yielding needs to complete before setting back the BLS setting.
|
||||
"""
|
||||
def entry(*args, **kw):
|
||||
# override bls setting
|
||||
kw['bls_active'] = True
|
||||
return fn(*args, **kw)
|
||||
return bls_required(entry)
|
||||
return bls_switch(fn)(*args, **kw)
|
||||
return with_meta_tags({'bls_setting': 1})(entry)
|
||||
|
||||
|
||||
def bls_switch(fn):
|
||||
"""
|
||||
Decorator to make a function execute with BLS ON, or BLS off.
|
||||
Based on an optional bool argument ``bls_active``, passed to the function at runtime.
|
||||
This decorator may only be applied to yielding spec test functions, and should be wrapped by vector_test,
|
||||
as the yielding needs to complete before setting back the BLS setting.
|
||||
"""
|
||||
def entry(*args, **kw):
|
||||
old_state = bls.bls_active
|
||||
bls.bls_active = kw.pop('bls_active', DEFAULT_BLS_ACTIVE)
|
||||
out = fn(*args, **kw)
|
||||
yield from fn(*args, **kw)
|
||||
bls.bls_active = old_state
|
||||
return out
|
||||
return entry
|
||||
|
||||
|
||||
@ -99,7 +101,7 @@ all_phases = ['phase0', 'phase1']
|
||||
|
||||
def with_all_phases(fn):
|
||||
"""
|
||||
A decorator for running a test wil every phase
|
||||
A decorator for running a test with every phase
|
||||
"""
|
||||
return with_phases(all_phases)(fn)
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
from eth2spec.test.context import with_all_phases, with_state, bls_switch
|
||||
from eth2spec.test.context import with_all_phases, with_state, bls_switch, with_phases
|
||||
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
@ -103,7 +103,7 @@ def test_on_attestation_same_slot(spec, state):
|
||||
run_on_attestation(spec, state, store, attestation, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@with_state
|
||||
@bls_switch
|
||||
def test_on_attestation_invalid_attestation(spec, state):
|
||||
|
@ -1,11 +1,11 @@
|
||||
from eth2spec.test.context import spectest_with_bls_switch, with_phases
|
||||
from eth2spec.test.context import spec_test, with_phases
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_genesis_deposits,
|
||||
)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@spectest_with_bls_switch
|
||||
@spec_test
|
||||
def test_initialize_beacon_state_from_eth1(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
deposits, deposit_root = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
|
@ -1,4 +1,4 @@
|
||||
from eth2spec.test.context import spectest_with_bls_switch, with_phases
|
||||
from eth2spec.test.context import spec_test, with_phases
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_genesis_deposits,
|
||||
)
|
||||
@ -26,7 +26,7 @@ def run_is_valid_genesis_state(spec, state, valid=True):
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@spectest_with_bls_switch
|
||||
@spec_test
|
||||
def test_is_valid_genesis_state_true(spec):
|
||||
state = create_valid_beacon_state(spec)
|
||||
|
||||
@ -34,7 +34,7 @@ def test_is_valid_genesis_state_true(spec):
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@spectest_with_bls_switch
|
||||
@spec_test
|
||||
def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
||||
state = create_valid_beacon_state(spec)
|
||||
state.genesis_time = spec.MIN_GENESIS_TIME - 1
|
||||
@ -43,7 +43,7 @@ def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@spectest_with_bls_switch
|
||||
@spec_test
|
||||
def test_is_valid_genesis_state_true_more_balance(spec):
|
||||
state = create_valid_beacon_state(spec)
|
||||
state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE + 1
|
||||
@ -53,7 +53,7 @@ def test_is_valid_genesis_state_true_more_balance(spec):
|
||||
|
||||
# TODO: not part of the genesis function yet. Erroneously merged.
|
||||
# @with_phases(['phase0'])
|
||||
# @spectest_with_bls_switch
|
||||
# @spec_test
|
||||
# def test_is_valid_genesis_state_false_not_enough_balance(spec):
|
||||
# state = create_valid_beacon_state(spec)
|
||||
# state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE - 1
|
||||
@ -62,7 +62,7 @@ def test_is_valid_genesis_state_true_more_balance(spec):
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@spectest_with_bls_switch
|
||||
@spec_test
|
||||
def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 1
|
||||
deposits, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
@ -75,7 +75,7 @@ def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@spectest_with_bls_switch
|
||||
@spec_test
|
||||
def test_is_valid_genesis_state_false_not_enough_validator(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1
|
||||
deposits, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
|
@ -1,6 +1,11 @@
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils.bls import bls_sign, bls_aggregate_signatures
|
||||
from eth2spec.utils.hash_function import hash
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitlist, BytesN, Bitvector
|
||||
from eth2spec.utils.ssz.ssz_impl import chunkify, pack, hash_tree_root
|
||||
from eth2spec.utils.merkle_minimal import get_merkle_tree, get_merkle_proof
|
||||
|
||||
BYTES_PER_CHUNK = 32
|
||||
|
||||
|
||||
def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
|
||||
@ -13,7 +18,7 @@ def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
|
||||
|
||||
# Generate the secret that is being revealed
|
||||
reveal = bls_sign(
|
||||
message_hash=spec.hash_tree_root(spec.Epoch(epoch)),
|
||||
message_hash=hash_tree_root(spec.Epoch(epoch)),
|
||||
privkey=privkeys[revealed_index],
|
||||
domain=spec.get_domain(
|
||||
state=state,
|
||||
@ -42,3 +47,128 @@ def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
|
||||
masker_index=masker_index,
|
||||
mask=mask,
|
||||
)
|
||||
|
||||
|
||||
def get_valid_custody_key_reveal(spec, state, period=None):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
revealer_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||
revealer = state.validators[revealer_index]
|
||||
|
||||
if period is None:
|
||||
period = revealer.next_custody_secret_to_reveal
|
||||
|
||||
epoch_to_sign = spec.get_randao_epoch_for_custody_period(period, revealer_index)
|
||||
|
||||
# Generate the secret that is being revealed
|
||||
reveal = bls_sign(
|
||||
message_hash=hash_tree_root(spec.Epoch(epoch_to_sign)),
|
||||
privkey=privkeys[revealer_index],
|
||||
domain=spec.get_domain(
|
||||
state=state,
|
||||
domain_type=spec.DOMAIN_RANDAO,
|
||||
message_epoch=epoch_to_sign,
|
||||
),
|
||||
)
|
||||
return spec.CustodyKeyReveal(
|
||||
revealer_index=revealer_index,
|
||||
reveal=reveal,
|
||||
)
|
||||
|
||||
|
||||
def bitlist_from_int(max_len, num_bits, n):
|
||||
return Bitlist[max_len](*[(n >> i) & 0b1 for i in range(num_bits)])
|
||||
|
||||
|
||||
def get_valid_bit_challenge(spec, state, attestation, invalid_custody_bit=False):
|
||||
crosslink_committee = spec.get_crosslink_committee(
|
||||
state,
|
||||
attestation.data.target.epoch,
|
||||
attestation.data.crosslink.shard,
|
||||
)
|
||||
responder_index = crosslink_committee[0]
|
||||
challenger_index = crosslink_committee[-1]
|
||||
|
||||
epoch = spec.get_randao_epoch_for_custody_period(attestation.data.target.epoch,
|
||||
responder_index)
|
||||
|
||||
# Generate the responder key
|
||||
responder_key = bls_sign(
|
||||
message_hash=hash_tree_root(spec.Epoch(epoch)),
|
||||
privkey=privkeys[responder_index],
|
||||
domain=spec.get_domain(
|
||||
state=state,
|
||||
domain_type=spec.DOMAIN_RANDAO,
|
||||
message_epoch=epoch,
|
||||
),
|
||||
)
|
||||
|
||||
chunk_count = spec.get_custody_chunk_count(attestation.data.crosslink)
|
||||
|
||||
chunk_bits = bitlist_from_int(spec.MAX_CUSTODY_CHUNKS, chunk_count, 0)
|
||||
|
||||
n = 0
|
||||
while spec.get_chunk_bits_root(chunk_bits) == attestation.custody_bits[0] ^ invalid_custody_bit:
|
||||
chunk_bits = bitlist_from_int(spec.MAX_CUSTODY_CHUNKS, chunk_count, n)
|
||||
n += 1
|
||||
|
||||
return spec.CustodyBitChallenge(
|
||||
responder_index=responder_index,
|
||||
attestation=attestation,
|
||||
challenger_index=challenger_index,
|
||||
responder_key=responder_key,
|
||||
chunk_bits=chunk_bits,
|
||||
)
|
||||
|
||||
|
||||
def custody_chunkify(spec, x):
|
||||
chunks = [bytes(x[i:i + spec.BYTES_PER_CUSTODY_CHUNK]) for i in range(0, len(x), spec.BYTES_PER_CUSTODY_CHUNK)]
|
||||
chunks[-1] = chunks[-1].ljust(spec.BYTES_PER_CUSTODY_CHUNK, b"\0")
|
||||
return chunks
|
||||
|
||||
|
||||
def get_valid_custody_response(spec, state, bit_challenge, custody_data, challenge_index, invalid_chunk_bit=False):
|
||||
chunks = custody_chunkify(spec, custody_data)
|
||||
|
||||
chunk_index = len(chunks) - 1
|
||||
chunk_bit = spec.get_custody_chunk_bit(bit_challenge.responder_key, chunks[chunk_index])
|
||||
|
||||
while chunk_bit == bit_challenge.chunk_bits[chunk_index] ^ invalid_chunk_bit:
|
||||
chunk_index -= 1
|
||||
chunk_bit = spec.get_custody_chunk_bit(bit_challenge.responder_key, chunks[chunk_index])
|
||||
|
||||
chunks_hash_tree_roots = [hash_tree_root(BytesN[spec.BYTES_PER_CUSTODY_CHUNK](chunk)) for chunk in chunks]
|
||||
chunks_hash_tree_roots += [
|
||||
hash_tree_root(BytesN[spec.BYTES_PER_CUSTODY_CHUNK](b"\0" * spec.BYTES_PER_CUSTODY_CHUNK))
|
||||
for i in range(2 ** spec.ceillog2(len(chunks)) - len(chunks))]
|
||||
data_tree = get_merkle_tree(chunks_hash_tree_roots)
|
||||
|
||||
data_branch = get_merkle_proof(data_tree, chunk_index)
|
||||
|
||||
bitlist_chunk_index = chunk_index // BYTES_PER_CHUNK
|
||||
bitlist_chunks = chunkify(pack(bit_challenge.chunk_bits))
|
||||
bitlist_tree = get_merkle_tree(bitlist_chunks, pad_to=spec.MAX_CUSTODY_CHUNKS // 256)
|
||||
bitlist_chunk_branch = get_merkle_proof(bitlist_tree, chunk_index // 256) + \
|
||||
[len(bit_challenge.chunk_bits).to_bytes(32, "little")]
|
||||
|
||||
bitlist_chunk_index = chunk_index // 256
|
||||
|
||||
chunk_bits_leaf = Bitvector[256](bit_challenge.chunk_bits[bitlist_chunk_index * 256:
|
||||
(bitlist_chunk_index + 1) * 256])
|
||||
|
||||
return spec.CustodyResponse(
|
||||
challenge_index=challenge_index,
|
||||
chunk_index=chunk_index,
|
||||
chunk=BytesN[spec.BYTES_PER_CUSTODY_CHUNK](chunks[chunk_index]),
|
||||
data_branch=data_branch,
|
||||
chunk_bits_branch=bitlist_chunk_branch,
|
||||
chunk_bits_leaf=chunk_bits_leaf,
|
||||
)
|
||||
|
||||
|
||||
def get_custody_test_vector(bytelength):
|
||||
ints = bytelength // 4
|
||||
return b"".join(i.to_bytes(4, "little") for i in range(ints))
|
||||
|
||||
|
||||
def get_custody_merkle_root(data):
|
||||
return get_merkle_tree(chunkify(data))[-1][0]
|
||||
|
@ -47,7 +47,7 @@ def build_deposit(spec,
|
||||
deposit_data_list.append(deposit_data)
|
||||
root = hash_tree_root(List[spec.DepositData, 2**spec.DEPOSIT_CONTRACT_TREE_DEPTH](*deposit_data_list))
|
||||
tree = calc_merkle_tree_from_leaves(tuple([d.hash_tree_root() for d in deposit_data_list]))
|
||||
proof = list(get_merkle_proof(tree, item_index=index)) + [(index + 1).to_bytes(32, 'little')]
|
||||
proof = list(get_merkle_proof(tree, item_index=index, tree_len=32)) + [(index + 1).to_bytes(32, 'little')]
|
||||
leaf = deposit_data.hash_tree_root()
|
||||
assert spec.is_valid_merkle_branch(leaf, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH + 1, index, root)
|
||||
deposit = spec.Deposit(proof=proof, data=deposit_data)
|
||||
|
@ -0,0 +1,37 @@
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils.bls import (
|
||||
bls_aggregate_signatures,
|
||||
bls_sign,
|
||||
)
|
||||
|
||||
|
||||
def sign_shard_attestation(spec, shard_state, beacon_state, block, participants):
|
||||
signatures = []
|
||||
message_hash = block.core.parent_root
|
||||
block_epoch = spec.compute_epoch_of_shard_slot(block.core.slot)
|
||||
for validator_index in participants:
|
||||
privkey = privkeys[validator_index]
|
||||
signatures.append(
|
||||
get_attestation_signature(
|
||||
spec,
|
||||
shard_state,
|
||||
beacon_state,
|
||||
message_hash,
|
||||
block_epoch,
|
||||
privkey,
|
||||
)
|
||||
)
|
||||
|
||||
return bls_aggregate_signatures(signatures)
|
||||
|
||||
|
||||
def get_attestation_signature(spec, shard_state, beacon_state, message_hash, block_epoch, privkey):
|
||||
return bls_sign(
|
||||
message_hash=message_hash,
|
||||
privkey=privkey,
|
||||
domain=spec.get_domain(
|
||||
state=beacon_state,
|
||||
domain_type=spec.DOMAIN_SHARD_ATTESTER,
|
||||
message_epoch=block_epoch,
|
||||
)
|
||||
)
|
@ -7,6 +7,10 @@ from eth2spec.utils.ssz.ssz_impl import (
|
||||
signing_root,
|
||||
)
|
||||
|
||||
from .attestations import (
|
||||
sign_shard_attestation,
|
||||
)
|
||||
|
||||
|
||||
@only_with_bls()
|
||||
def sign_shard_block(spec, state, block, shard, proposer_index=None):
|
||||
@ -26,22 +30,52 @@ def sign_shard_block(spec, state, block, shard, proposer_index=None):
|
||||
)
|
||||
|
||||
|
||||
def build_empty_shard_block(spec, state, slot, shard, parent_root, signed=False):
|
||||
def build_empty_shard_block(spec,
|
||||
shard_state,
|
||||
beacon_state,
|
||||
slot,
|
||||
parent_root,
|
||||
signed=False,
|
||||
full_attestation=False):
|
||||
if slot is None:
|
||||
slot = state.slot
|
||||
slot = shard_state.slot
|
||||
|
||||
block = spec.ShardBlock(
|
||||
core=spec.ExtendedShardBlockCore(
|
||||
slot=slot,
|
||||
beacon_chain_root=state.block_roots[state.slot % spec.SLOTS_PER_HISTORICAL_ROOT],
|
||||
beacon_chain_root=beacon_state.block_roots[beacon_state.slot % spec.SLOTS_PER_HISTORICAL_ROOT],
|
||||
parent_root=parent_root,
|
||||
),
|
||||
signatures=spec.ShardBlockSignatures(
|
||||
attestation_signature=b'\x12' * 96,
|
||||
attestation_signature=b'\x00' * 96,
|
||||
proposer_signature=b'\x25' * 96,
|
||||
)
|
||||
)
|
||||
|
||||
# attestation
|
||||
if full_attestation:
|
||||
attester_committee = spec.get_persistent_committee(beacon_state, shard_state.shard, block.core.slot)
|
||||
block.core.attester_bitfield = list(
|
||||
(True,) * len(attester_committee) +
|
||||
(False,) * (spec.TARGET_PERSISTENT_COMMITTEE_SIZE * 2 - len(attester_committee))
|
||||
)
|
||||
block.signatures.attestation_signature = sign_shard_attestation(
|
||||
spec,
|
||||
shard_state,
|
||||
beacon_state,
|
||||
block,
|
||||
participants=attester_committee,
|
||||
)
|
||||
else:
|
||||
block.signatures.attestation_signature = sign_shard_attestation(
|
||||
spec,
|
||||
shard_state,
|
||||
beacon_state,
|
||||
block,
|
||||
participants=(),
|
||||
)
|
||||
|
||||
if signed:
|
||||
sign_shard_block(spec, state, block, shard)
|
||||
sign_shard_block(spec, beacon_state, block, shard_state.shard)
|
||||
|
||||
return block
|
||||
|
@ -0,0 +1,148 @@
|
||||
|
||||
import re
|
||||
from eth_utils import (
|
||||
to_tuple,
|
||||
)
|
||||
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_all_phases_except,
|
||||
)
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
Bytes32,
|
||||
Container,
|
||||
List,
|
||||
uint64,
|
||||
)
|
||||
|
||||
|
||||
class Foo(Container):
|
||||
x: uint64
|
||||
y: List[Bytes32, 2]
|
||||
|
||||
# Tree
|
||||
# root
|
||||
# / \
|
||||
# x y_root
|
||||
# / \
|
||||
# y_data_root len(y)
|
||||
# / \
|
||||
# / \ / \
|
||||
#
|
||||
# Generalized indices
|
||||
# 1
|
||||
# / \
|
||||
# 2 (x) 3 (y_root)
|
||||
# / \
|
||||
# 6 7
|
||||
# / \
|
||||
# 12 13
|
||||
|
||||
|
||||
@to_tuple
|
||||
def ssz_object_to_path(start, end):
|
||||
is_len = False
|
||||
len_findall = re.findall(r"(?<=len\().*(?=\))", end)
|
||||
if len_findall:
|
||||
is_len = True
|
||||
end = len_findall[0]
|
||||
|
||||
route = ''
|
||||
if end.startswith(start):
|
||||
route = end[len(start):]
|
||||
|
||||
segments = route.split('.')
|
||||
for word in segments:
|
||||
index_match = re.match(r"(\w+)\[(\d+)]", word)
|
||||
if index_match:
|
||||
yield from index_match.groups()
|
||||
elif len(word):
|
||||
yield word
|
||||
if is_len:
|
||||
yield '__len__'
|
||||
|
||||
|
||||
to_path_test_cases = [
|
||||
('foo', 'foo.x', ('x',)),
|
||||
('foo', 'foo.x[100].y', ('x', '100', 'y')),
|
||||
('foo', 'foo.x[100].y[1].z[2]', ('x', '100', 'y', '1', 'z', '2')),
|
||||
('foo', 'len(foo.x[100].y[1].z[2])', ('x', '100', 'y', '1', 'z', '2', '__len__')),
|
||||
]
|
||||
|
||||
|
||||
def test_to_path():
|
||||
for test_case in to_path_test_cases:
|
||||
start, end, expected = test_case
|
||||
assert ssz_object_to_path(start, end) == expected
|
||||
|
||||
|
||||
generalized_index_cases = [
|
||||
(Foo, ('x',), 2),
|
||||
(Foo, ('y',), 3),
|
||||
(Foo, ('y', 0), 12),
|
||||
(Foo, ('y', 1), 13),
|
||||
(Foo, ('y', '__len__'), None),
|
||||
]
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_get_generalized_index(spec, state):
|
||||
for typ, path, generalized_index in generalized_index_cases:
|
||||
assert spec.get_generalized_index(
|
||||
typ=typ,
|
||||
path=path,
|
||||
) == generalized_index
|
||||
yield 'typ', typ
|
||||
yield 'path', path
|
||||
yield 'generalized_index', generalized_index
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_verify_merkle_proof(spec, state):
|
||||
h = spec.hash
|
||||
a = b'\x11' * 32
|
||||
b = b'\x22' * 32
|
||||
c = b'\x33' * 32
|
||||
d = b'\x44' * 32
|
||||
root = h(h(a + b) + h(c + d))
|
||||
leaf = a
|
||||
generalized_index = 4
|
||||
proof = [b, h(c + d)]
|
||||
|
||||
is_valid = spec.verify_merkle_proof(
|
||||
leaf=leaf,
|
||||
proof=proof,
|
||||
index=generalized_index,
|
||||
root=root,
|
||||
)
|
||||
assert is_valid
|
||||
|
||||
yield 'proof', proof
|
||||
yield 'is_valid', is_valid
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_verify_merkle_multiproof(spec, state):
|
||||
h = spec.hash
|
||||
a = b'\x11' * 32
|
||||
b = b'\x22' * 32
|
||||
c = b'\x33' * 32
|
||||
d = b'\x44' * 32
|
||||
root = h(h(a + b) + h(c + d))
|
||||
leaves = [a, d]
|
||||
generalized_indices = [4, 7]
|
||||
proof = [c, b] # helper_indices = [6, 5]
|
||||
|
||||
is_valid = spec.verify_merkle_multiproof(
|
||||
leaves=leaves,
|
||||
proof=proof,
|
||||
indices=generalized_indices,
|
||||
root=root,
|
||||
)
|
||||
assert is_valid
|
||||
|
||||
yield 'proof', proof
|
||||
yield 'is_valid', is_valid
|
@ -116,8 +116,8 @@ def test_wrong_end_epoch_with_max_epochs_per_crosslink(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_attestation_signature(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
@ -363,7 +363,7 @@ def test_inconsistent_bits(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
custody_bits = attestation.aggregation_bits[:]
|
||||
custody_bits = attestation.custody_bits[:]
|
||||
custody_bits.append(False)
|
||||
|
||||
attestation.custody_bits = custody_bits
|
||||
@ -398,3 +398,61 @@ def test_empty_aggregation_bits(spec, state):
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_too_many_aggregation_bits(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
# one too many bits
|
||||
attestation.aggregation_bits.append(0b0)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_too_few_aggregation_bits(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
attestation.aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](
|
||||
*([0b1] + [0b0] * (len(attestation.aggregation_bits) - 1)))
|
||||
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
# one too few bits
|
||||
attestation.aggregation_bits = attestation.aggregation_bits[:-1]
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_too_many_custody_bits(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
# one too many bits
|
||||
attestation.custody_bits.append(0b0)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_too_few_custody_bits(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
attestation.custody_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](
|
||||
*([0b1] + [0b0] * (len(attestation.custody_bits) - 1)))
|
||||
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
# one too few bits
|
||||
attestation.custody_bits = attestation.custody_bits[:-1]
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
@ -108,8 +108,8 @@ def test_success_surround(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success_already_exited_recent(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
slashed_indices = (
|
||||
@ -123,8 +123,8 @@ def test_success_already_exited_recent(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success_already_exited_long_ago(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
slashed_indices = (
|
||||
@ -139,24 +139,24 @@ def test_success_already_exited_long_ago(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_1(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_2(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_1_and_2(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
@ -212,9 +212,9 @@ def test_custody_bit_0_and_1_intersect(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@always_bls
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_bad_extra_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
@ -228,9 +228,9 @@ def test_att1_bad_extra_index(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@always_bls
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_bad_replaced_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
@ -244,9 +244,9 @@ def test_att1_bad_replaced_index(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@always_bls
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_bad_extra_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
@ -260,9 +260,9 @@ def test_att2_bad_extra_index(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@always_bls
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_bad_replaced_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
|
@ -42,8 +42,8 @@ def test_success_block_header(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_block_header(spec, state):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
yield from run_block_header_processing(spec, state, block, valid=False)
|
||||
|
@ -94,8 +94,8 @@ def test_new_deposit_over_max(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_new_deposit(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
@ -115,8 +115,8 @@ def test_success_top_up(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_top_up(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
|
@ -49,24 +49,24 @@ def test_success(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_1(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_2(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_1_and_2(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
|
@ -81,8 +81,8 @@ def test_success_active_above_max_effective_fee(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature(spec, state):
|
||||
transfer = get_valid_transfer(spec, state)
|
||||
# un-activate so validator can transfer
|
||||
|
@ -47,8 +47,8 @@ def test_success(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature(spec, state):
|
||||
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
@ -89,3 +89,20 @@ def test_historical_root_accumulator(spec, state):
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.historical_roots) == history_len + 1
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_compact_committees_root(spec, state):
|
||||
assert spec.SLOTS_PER_ETH1_VOTING_PERIOD > spec.SLOTS_PER_EPOCH
|
||||
# skip ahead to the end of the epoch
|
||||
state.slot = spec.SLOTS_PER_EPOCH - 1
|
||||
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
|
||||
# ensure that order in which items are processed in final_updates
|
||||
# does not alter the expected_root
|
||||
expected_root = spec.get_compact_committees_root(state, next_epoch)
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert state.compact_committees_roots[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] == expected_root
|
||||
|
@ -0,0 +1,347 @@
|
||||
from eth2spec.test.helpers.custody import (
|
||||
get_valid_bit_challenge,
|
||||
get_valid_custody_response,
|
||||
get_custody_test_vector,
|
||||
get_custody_merkle_root
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
get_valid_attestation,
|
||||
)
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.test.helpers.state import next_epoch, get_balance
|
||||
from eth2spec.test.helpers.block import apply_empty_block
|
||||
from eth2spec.test.context import (
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
)
|
||||
from eth2spec.test.phase_0.block_processing.test_process_attestation import run_attestation_processing
|
||||
|
||||
|
||||
def run_bit_challenge_processing(spec, state, custody_bit_challenge, valid=True):
|
||||
"""
|
||||
Run ``process_bit_challenge``, yielding:
|
||||
- pre-state ('pre')
|
||||
- CustodyBitChallenge ('custody_bit_challenge')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
yield 'pre', state
|
||||
yield 'custody_bit_challenge', custody_bit_challenge
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_bit_challenge(state, custody_bit_challenge))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_bit_challenge(state, custody_bit_challenge)
|
||||
|
||||
assert state.custody_bit_challenge_records[state.custody_challenge_index - 1].chunk_bits_merkle_root == \
|
||||
hash_tree_root(custody_bit_challenge.chunk_bits)
|
||||
assert state.custody_bit_challenge_records[state.custody_challenge_index - 1].challenger_index == \
|
||||
custody_bit_challenge.challenger_index
|
||||
assert state.custody_bit_challenge_records[state.custody_challenge_index - 1].responder_index == \
|
||||
custody_bit_challenge.responder_index
|
||||
|
||||
yield 'post', state
|
||||
|
||||
|
||||
def run_custody_response_processing(spec, state, custody_response, valid=True):
|
||||
"""
|
||||
Run ``process_bit_challenge_response``, yielding:
|
||||
- pre-state ('pre')
|
||||
- CustodyResponse ('custody_response')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
yield 'pre', state
|
||||
yield 'custody_response', custody_response
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_custody_response(state, custody_response))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
# TODO: Add capability to also process chunk challenges, not only bit challenges
|
||||
challenge = state.custody_bit_challenge_records[custody_response.challenge_index]
|
||||
pre_slashed_balance = get_balance(state, challenge.challenger_index)
|
||||
|
||||
spec.process_custody_response(state, custody_response)
|
||||
|
||||
slashed_validator = state.validators[challenge.challenger_index]
|
||||
|
||||
assert slashed_validator.slashed
|
||||
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
assert get_balance(state, challenge.challenger_index) < pre_slashed_balance
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_challenge_appended(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_multiple_epochs_custody(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 3
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_many_epochs_custody(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 100
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_off_chain_attestation(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_invalid_custody_bit_challenge(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation, invalid_custody_bit=True)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_max_reveal_lateness_1(spec, state):
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
responder_index = challenge.responder_index
|
||||
|
||||
state.validators[responder_index].max_reveal_lateness = 3
|
||||
|
||||
for i in range(spec.get_randao_epoch_for_custody_period(
|
||||
spec.get_custody_period_for_validator(state, responder_index),
|
||||
responder_index
|
||||
) + 2 * spec.EPOCHS_PER_CUSTODY_PERIOD + state.validators[responder_index].max_reveal_lateness - 2):
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_max_reveal_lateness_2(spec, state):
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
responder_index = challenge.responder_index
|
||||
|
||||
state.validators[responder_index].max_reveal_lateness = 3
|
||||
|
||||
for i in range(spec.get_randao_epoch_for_custody_period(
|
||||
spec.get_custody_period_for_validator(state, responder_index),
|
||||
responder_index
|
||||
) + 2 * spec.EPOCHS_PER_CUSTODY_PERIOD + state.validators[responder_index].max_reveal_lateness - 1):
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_custody_response(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
_, _, _ = run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
bit_challenge_index = state.custody_challenge_index - 1
|
||||
|
||||
custody_response = get_valid_custody_response(spec, state, challenge, test_vector, bit_challenge_index)
|
||||
|
||||
yield from run_custody_response_processing(spec, state, custody_response)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_custody_response_multiple_epochs(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 3
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
_, _, _ = run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
bit_challenge_index = state.custody_challenge_index - 1
|
||||
|
||||
custody_response = get_valid_custody_response(spec, state, challenge, test_vector, bit_challenge_index)
|
||||
|
||||
yield from run_custody_response_processing(spec, state, custody_response)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_custody_response_many_epochs(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 100
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
_, _, _ = run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
bit_challenge_index = state.custody_challenge_index - 1
|
||||
|
||||
custody_response = get_valid_custody_response(spec, state, challenge, test_vector, bit_challenge_index)
|
||||
|
||||
yield from run_custody_response_processing(spec, state, custody_response)
|
@ -0,0 +1,118 @@
|
||||
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
|
||||
from eth2spec.test.context import (
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
always_bls,
|
||||
)
|
||||
|
||||
|
||||
def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=True):
|
||||
"""
|
||||
Run ``process_custody_key_reveal``, yielding:
|
||||
- pre-state ('pre')
|
||||
- custody_key_reveal ('custody_key_reveal')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
yield 'pre', state
|
||||
yield 'custody_key_reveal', custody_key_reveal
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_custody_key_reveal(state, custody_key_reveal))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
revealer_index = custody_key_reveal.revealer_index
|
||||
|
||||
pre_next_custody_secret_to_reveal = \
|
||||
state.validators[revealer_index].next_custody_secret_to_reveal
|
||||
pre_reveal_lateness = state.validators[revealer_index].max_reveal_lateness
|
||||
|
||||
spec.process_custody_key_reveal(state, custody_key_reveal)
|
||||
|
||||
post_next_custody_secret_to_reveal = \
|
||||
state.validators[revealer_index].next_custody_secret_to_reveal
|
||||
post_reveal_lateness = state.validators[revealer_index].max_reveal_lateness
|
||||
|
||||
assert post_next_custody_secret_to_reveal == pre_next_custody_secret_to_reveal + 1
|
||||
|
||||
if spec.get_current_epoch(state) > spec.get_randao_epoch_for_custody_period(
|
||||
pre_next_custody_secret_to_reveal,
|
||||
revealer_index
|
||||
) + spec.EPOCHS_PER_CUSTODY_PERIOD:
|
||||
assert post_reveal_lateness > 0
|
||||
if pre_reveal_lateness == 0:
|
||||
assert post_reveal_lateness == spec.get_current_epoch(state) - spec.get_randao_epoch_for_custody_period(
|
||||
pre_next_custody_secret_to_reveal,
|
||||
revealer_index
|
||||
) - spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
else:
|
||||
if pre_reveal_lateness > 0:
|
||||
assert post_reveal_lateness < pre_reveal_lateness
|
||||
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
def test_success(spec, state):
|
||||
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
|
||||
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
def test_reveal_too_early(spec, state):
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
|
||||
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
def test_wrong_period(spec, state):
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state, period=5)
|
||||
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
def test_late_reveal(spec, state):
|
||||
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 3 + 150
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
|
||||
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
def test_double_reveal(spec, state):
|
||||
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 2
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
|
||||
|
||||
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
def test_max_decrement(spec, state):
|
||||
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 3 + 150
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
|
||||
|
||||
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||
|
||||
custody_key_reveal2 = get_valid_custody_key_reveal(spec, state)
|
||||
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal2)
|
@ -42,8 +42,8 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success(spec, state):
|
||||
randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state)
|
||||
|
||||
@ -51,8 +51,8 @@ def test_success(spec, state):
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@never_bls
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_reveal_from_current_epoch(spec, state):
|
||||
randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state))
|
||||
|
||||
@ -60,8 +60,8 @@ def test_reveal_from_current_epoch(spec, state):
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@never_bls
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_reveal_from_past_epoch(spec, state):
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
@ -71,8 +71,8 @@ def test_reveal_from_past_epoch(spec, state):
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_with_custody_padding(spec, state):
|
||||
randao_key_reveal = get_valid_early_derived_secret_reveal(
|
||||
spec,
|
||||
@ -83,8 +83,8 @@ def test_reveal_with_custody_padding(spec, state):
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||
randao_key_reveal = get_valid_early_derived_secret_reveal(
|
||||
spec,
|
||||
@ -95,33 +95,29 @@ def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@never_bls
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_double_reveal(spec, state):
|
||||
epoch = spec.get_current_epoch(state) + spec.RANDAO_PENALTY_EPOCHS
|
||||
randao_key_reveal1 = get_valid_early_derived_secret_reveal(
|
||||
spec,
|
||||
state,
|
||||
spec.get_current_epoch(state) + spec.RANDAO_PENALTY_EPOCHS + 1,
|
||||
epoch,
|
||||
)
|
||||
res = dict(run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal1))
|
||||
pre_state = res['pre']
|
||||
yield 'pre', pre_state
|
||||
intermediate_state = res['post']
|
||||
_, _, _ = dict(run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal1))
|
||||
|
||||
randao_key_reveal2 = get_valid_early_derived_secret_reveal(
|
||||
spec,
|
||||
intermediate_state,
|
||||
spec.get_current_epoch(pre_state) + spec.RANDAO_PENALTY_EPOCHS + 1,
|
||||
state,
|
||||
epoch,
|
||||
)
|
||||
res = dict(run_early_derived_secret_reveal_processing(spec, intermediate_state, randao_key_reveal2, False))
|
||||
post_state = res['post']
|
||||
yield 'randao_key_reveal', [randao_key_reveal1, randao_key_reveal2]
|
||||
yield 'post', post_state
|
||||
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@never_bls
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_revealer_is_slashed(spec, state):
|
||||
randao_key_reveal = get_valid_early_derived_secret_reveal(spec, state, spec.get_current_epoch(state))
|
||||
state.validators[randao_key_reveal.revealed_index].slashed = True
|
||||
@ -130,8 +126,8 @@ def test_revealer_is_slashed(spec, state):
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@never_bls
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_far_future_epoch(spec, state):
|
||||
randao_key_reveal = get_valid_early_derived_secret_reveal(
|
||||
spec,
|
||||
|
@ -0,0 +1,48 @@
|
||||
from eth2spec.test.context import (
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
)
|
||||
from eth2spec.test.helpers.phase1.shard_block import (
|
||||
build_empty_shard_block,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
def test_process_empty_shard_block(spec, state):
|
||||
beacon_state = state
|
||||
|
||||
shard_slot = spec.PHASE_1_FORK_SLOT
|
||||
beacon_state.slot = spec.Slot(spec.PHASE_1_FORK_EPOCH * spec.SLOTS_PER_EPOCH)
|
||||
shard_state = spec.get_default_shard_state(beacon_state, shard=spec.Shard(0))
|
||||
shard_state.slot = shard_slot
|
||||
|
||||
block = build_empty_shard_block(
|
||||
spec,
|
||||
shard_state,
|
||||
beacon_state,
|
||||
slot=shard_slot + 1,
|
||||
parent_root=spec.Hash(),
|
||||
signed=True,
|
||||
full_attestation=True,
|
||||
)
|
||||
|
||||
yield 'pre', shard_state
|
||||
yield 'beacon_state', beacon_state
|
||||
yield 'block', block
|
||||
|
||||
beacon_attestation = get_valid_attestation(spec, beacon_state, signed=True)
|
||||
yield 'beacon_attestation', beacon_attestation
|
||||
|
||||
is_valid_beacon_attestation = spec.is_valid_beacon_attestation(
|
||||
pre_state=shard_state,
|
||||
shard_blocks_or_state_roots=(block,),
|
||||
beacon_state=beacon_state,
|
||||
valid_attestations=set([beacon_attestation]),
|
||||
candidate=beacon_attestation,
|
||||
)
|
||||
assert is_valid_beacon_attestation
|
||||
yield 'is_valid_beacon_attestation', is_valid_beacon_attestation
|
@ -11,16 +11,58 @@ from eth2spec.test.context import (
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
def test_is_valid_shard_block(spec, state):
|
||||
def test_process_empty_shard_block(spec, state):
|
||||
beacon_state = state
|
||||
|
||||
shard_slot = spec.PHASE_1_FORK_SLOT
|
||||
beacon_state.slot = spec.Slot(spec.PHASE_1_FORK_EPOCH * spec.SLOTS_PER_EPOCH)
|
||||
shard_state = spec.get_default_shard_state(beacon_state, shard=spec.Shard(0))
|
||||
shard_state.slot = shard_slot
|
||||
|
||||
block = build_empty_shard_block(
|
||||
spec,
|
||||
state,
|
||||
slot=spec.Slot(spec.PERSISTENT_COMMITTEE_PERIOD * 100),
|
||||
shard=spec.Shard(1),
|
||||
shard_state,
|
||||
beacon_state,
|
||||
slot=shard_slot + 1,
|
||||
parent_root=spec.Hash(),
|
||||
signed=True,
|
||||
full_attestation=False,
|
||||
)
|
||||
|
||||
# TODO: test `is_valid_shard_block`
|
||||
yield 'pre', shard_state
|
||||
yield 'beacon_state', beacon_state
|
||||
yield 'block', block
|
||||
|
||||
yield 'blocks', (block,)
|
||||
spec.shard_state_transition(shard_state, beacon_state, block)
|
||||
|
||||
yield 'post', shard_state
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
def test_process_full_attestation_shard_block(spec, state):
|
||||
beacon_state = state
|
||||
|
||||
shard_slot = spec.PHASE_1_FORK_SLOT
|
||||
beacon_state.slot = spec.Slot(spec.PHASE_1_FORK_EPOCH * spec.SLOTS_PER_EPOCH)
|
||||
shard_state = spec.get_default_shard_state(beacon_state, shard=spec.Shard(0))
|
||||
shard_state.slot = shard_slot
|
||||
|
||||
block = build_empty_shard_block(
|
||||
spec,
|
||||
shard_state,
|
||||
beacon_state,
|
||||
slot=shard_slot + 1,
|
||||
parent_root=spec.Hash(),
|
||||
signed=True,
|
||||
full_attestation=True,
|
||||
)
|
||||
|
||||
yield 'pre', shard_state
|
||||
yield 'beacon_state', beacon_state
|
||||
yield 'block', block
|
||||
|
||||
spec.shard_state_transition(shard_state, beacon_state, block)
|
||||
|
||||
yield 'post', shard_state
|
||||
|
@ -29,8 +29,8 @@ def check_finality(spec,
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@never_bls
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_finality_no_updates_at_genesis(spec, state):
|
||||
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
|
||||
|
||||
@ -53,8 +53,8 @@ def test_finality_no_updates_at_genesis(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@never_bls
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_finality_rule_4(spec, state):
|
||||
# get past first two epochs that finality does not run on
|
||||
next_epoch(spec, state)
|
||||
@ -81,8 +81,8 @@ def test_finality_rule_4(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@never_bls
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_finality_rule_1(spec, state):
|
||||
# get past first two epochs that finality does not run on
|
||||
next_epoch(spec, state)
|
||||
@ -111,8 +111,8 @@ def test_finality_rule_1(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@never_bls
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_finality_rule_2(spec, state):
|
||||
# get past first two epochs that finality does not run on
|
||||
next_epoch(spec, state)
|
||||
@ -143,8 +143,8 @@ def test_finality_rule_2(spec, state):
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@never_bls
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_finality_rule_3(spec, state):
|
||||
"""
|
||||
Test scenario described here
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user