Merge branch 'dev'

This commit is contained in:
Danny Ryan 2021-05-14 11:36:38 -06:00
commit cfba8f5dac
No known key found for this signature in database
GPG Key ID: 2765A792E42CE07A
107 changed files with 3214 additions and 1028 deletions

1
.gitignore vendored
View File

@ -17,6 +17,7 @@ eth2.0-spec-tests/
# Dynamically built from Markdown spec
tests/core/pyspec/eth2spec/phase0/
tests/core/pyspec/eth2spec/altair/
tests/core/pyspec/eth2spec/merge/
# coverage reports
.htmlcov

View File

@ -2,7 +2,10 @@ SPEC_DIR = ./specs
SSZ_DIR = ./ssz
TEST_LIBS_DIR = ./tests/core
TEST_GENERATORS_DIR = ./tests/generators
# The working dir during testing
PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec
ETH2SPEC_MODULE_DIR = $(PY_SPEC_DIR)/eth2spec
TEST_REPORT_DIR = $(PY_SPEC_DIR)/test-reports
TEST_VECTOR_DIR = ../eth2.0-spec-tests/tests
GENERATOR_DIR = ./tests/generators
SOLIDITY_DEPOSIT_CONTRACT_DIR = ./solidity_deposit_contract
@ -27,7 +30,8 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/alta
$(wildcard $(SPEC_DIR)/sharding/*.md)
COV_HTML_OUT=.htmlcov
COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html
COV_HTML_OUT_DIR=$(PY_SPEC_DIR)/$(COV_HTML_OUT)
COV_INDEX_FILE=$(COV_HTML_OUT_DIR)/index.html
CURRENT_DIR = ${CURDIR}
LINTER_CONFIG_FILE = $(CURRENT_DIR)/linter.ini
@ -53,16 +57,17 @@ partial_clean:
rm -f .coverage
rm -rf $(PY_SPEC_DIR)/.pytest_cache
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache
rm -rf $(PY_SPEC_DIR)/phase0
rm -rf $(PY_SPEC_DIR)/altair
rm -rf $(PY_SPEC_DIR)/$(COV_HTML_OUT)
rm -rf $(PY_SPEC_DIR)/.coverage
rm -rf $(PY_SPEC_DIR)/test-reports
rm -rf $(ETH2SPEC_MODULE_DIR)/phase0
rm -rf $(ETH2SPEC_MODULE_DIR)/altair
rm -rf $(ETH2SPEC_MODULE_DIR)/merge
rm -rf $(COV_HTML_OUT_DIR)
rm -rf $(TEST_REPORT_DIR)
rm -rf eth2spec.egg-info dist build
rm -rf build
clean: partial_clean
rm -rf venv
# legacy cleanup. The pyspec venv should be located at the repository root
rm -rf $(PY_SPEC_DIR)/venv
rm -rf $(DEPOSIT_CONTRACT_COMPILER_DIR)/venv
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/venv
@ -87,7 +92,7 @@ pyspec:
# installs the packages to run pyspec tests
install_test:
python3 -m venv venv; . venv/bin/activate; python3 -m pip install .[lint]; python3 -m pip install -e .[test]
python3 -m venv venv; . venv/bin/activate; python3 -m pip install -e .[lint]; python3 -m pip install -e .[test]
test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
@ -119,7 +124,7 @@ codespell:
lint: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.merge
lint_generators: pyspec
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \

View File

@ -23,6 +23,7 @@ The current features are:
* [Deposit Contract](specs/phase0/deposit-contract.md)
* [Honest Validator](specs/phase0/validator.md)
* [P2P Networking](specs/phase0/p2p-interface.md)
* [Weak Subjectivity](specs/phase0/weak-subjectivity.md)
### Altair
@ -30,6 +31,7 @@ The current features are:
* [Altair fork](specs/altair/fork.md)
* [Light client sync protocol](specs/altair/sync-protocol.md)
* [Honest Validator guide changes](specs/altair/validator.md)
* [P2P Networking](specs/altair/p2p-interface.md)
### Merge

View File

@ -15,7 +15,7 @@ Over time, the need to sync an older state may be deprecated.
In this case, the prefix on the new constant may be removed, and the old constant will keep a special name before completely being removed.
A previous iteration of forking made use of "timelines", but this collides with the definitions used in the spec (constants for special forking slots, etc.), and was not integrated sufficiently in any of the spec tools or implementations.
Instead, the config essentially doubles as fork definition now, e.g. changing the value for `ALTAIR_FORK_SLOT` changes the fork.
Instead, the config essentially doubles as fork definition now, e.g. changing the value for `ALTAIR_FORK_EPOCH` changes the fork.
Another reason to prefer forking through constants is the ability to program a forking moment based on context, instead of being limited to a static slot number.

View File

@ -1,7 +1,5 @@
# Mainnet preset - Altair
CONFIG_NAME: "mainnet"
# Updated penalty values
# ---------------------------------------------------------------
# 3 * 2**24 (= 50,331,648)
@ -12,20 +10,20 @@ MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
# Sync committee
# ---------------------------------------------------------------
# 2**9 (= 512)
SYNC_COMMITTEE_SIZE: 512
# 2**9 (= 512)
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 512
# Misc
# ---------------------------------------------------------------
# 2**10 (= 1,024)
SYNC_COMMITTEE_SIZE: 1024
# 2**6 (= 64)
SYNC_PUBKEYS_PER_AGGREGATE: 64
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# Time parameters
# ---------------------------------------------------------------
# 2**8 (= 256)
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# Signature domains
@ -40,14 +38,16 @@ DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000
# 0x01000000
ALTAIR_FORK_VERSION: 0x01000000
# TBD
ALTAIR_FORK_SLOT: 18446744073709551615
ALTAIR_FORK_EPOCH: 18446744073709551615
# Sync protocol
# ---------------------------------------------------------------
# 1
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
# 2**13
MAX_VALID_LIGHT_CLIENT_UPDATES: 8192
# 2**13 (=8192)
LIGHT_CLIENT_UPDATE_TIMEOUT: 8192
# Validator
# ---------------------------------------------------------------
# 2**2 (= 4)
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE: 4

View File

@ -1,7 +1,5 @@
# Mainnet preset - Custody Game
CONFIG_NAME: "mainnet"
# Time parameters
# ---------------------------------------------------------------
# 2**1 (= 2) epochs, 12.8 minutes

View File

@ -1,9 +1,7 @@
# Mainnet preset - The Merge
CONFIG_NAME: "mainnet"
# Fork
# ---------------------------------------------------------------
MERGE_FORK_VERSION: 0x02000000
# TBD, temporarily max uint64 value: 2**64 - 1
MERGE_FORK_SLOT: 18446744073709551615
MERGE_FORK_EPOCH: 18446744073709551615

View File

@ -1,7 +1,5 @@
# Mainnet preset
CONFIG_NAME: "mainnet"
# Misc
# ---------------------------------------------------------------
# 2**6 (= 64)

View File

@ -1,12 +1,10 @@
# Mainnet preset - Sharding
CONFIG_NAME: "mainnet"
# Fork
# ---------------------------------------------------------------
SHARDING_FORK_VERSION: 0x03000000
# TBD, temporarily max uint64 value: 2**64 - 1
SHARDING_FORK_SLOT: 18446744073709551615
SHARDING_FORK_EPOCH: 18446744073709551615
# Beacon-chain

View File

@ -1,7 +1,5 @@
# Minimal preset - Altair
CONFIG_NAME: "minimal"
# Updated penalty values
# ---------------------------------------------------------------
# 3 * 2**24 (= 50,331,648)
@ -12,20 +10,20 @@ MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
# Misc
# Sync committee
# ---------------------------------------------------------------
# [customized]
SYNC_COMMITTEE_SIZE: 32
# [customized]
SYNC_PUBKEYS_PER_AGGREGATE: 16
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
# Misc
# ---------------------------------------------------------------
# 2**2 (= 4)
INACTIVITY_SCORE_BIAS: 4
# Time parameters
# ---------------------------------------------------------------
# [customized]
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
# 2**4 (= 16)
INACTIVITY_SCORE_RECOVERY_RATE: 16
# Signature domains
@ -40,14 +38,17 @@ DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000
# [customized] Highest byte set to 0x01 to avoid collisions with mainnet versioning
ALTAIR_FORK_VERSION: 0x01000001
# [customized]
ALTAIR_FORK_SLOT: 18446744073709551615
ALTAIR_FORK_EPOCH: 18446744073709551615
# Sync protocol
# ---------------------------------------------------------------
# 1
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
# [customized]
MAX_VALID_LIGHT_CLIENT_UPDATES: 32
# [customized]
LIGHT_CLIENT_UPDATE_TIMEOUT: 32
# Validator
# ---------------------------------------------------------------
# 2**2 (= 4)
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE: 4

View File

@ -1,7 +1,5 @@
# Minimal preset - Custody Game
CONFIG_NAME: "minimal"
# Time parameters
# ---------------------------------------------------------------
# 2**1 (= 2) epochs, 12.8 minutes

View File

@ -1,9 +1,7 @@
# Minimal preset - The Merge
CONFIG_NAME: "minimal"
# Fork
# ---------------------------------------------------------------
MERGE_FORK_VERSION: 0x02000001
# TBD, temporarily max uint64 value: 2**64 - 1
MERGE_FORK_SLOT: 18446744073709551615
MERGE_FORK_EPOCH: 18446744073709551615

View File

@ -1,7 +1,5 @@
# Minimal preset
CONFIG_NAME: "minimal"
# Misc
# ---------------------------------------------------------------

View File

@ -1,12 +1,10 @@
# Minimal preset - Sharding
CONFIG_NAME: "minimal"
# Fork
# ---------------------------------------------------------------
SHARDING_FORK_VERSION: 0x03000001
# TBD, temporarily max uint64 value: 2**64 - 1
MERGE_FORK_SLOT: 18446744073709551615
MERGE_FORK_EPOCH: 18446744073709551615
# Beacon-chain

692
setup.py
View File

@ -1,182 +1,41 @@
from enum import Enum, auto
from setuptools import setup, find_packages, Command
from setuptools.command.build_py import build_py
from distutils import dir_util
from distutils.util import convert_path
import os
import re
from typing import Dict, NamedTuple, List
import string
import textwrap
from typing import Dict, NamedTuple, List, Sequence, Optional
from abc import ABC, abstractmethod
import ast
FUNCTION_REGEX = r'^def [\w_]*'
# NOTE: have to programmatically include third-party dependencies in `setup.py`.
MARKO_VERSION = "marko==1.0.2"
try:
import marko
except ImportError:
import pip
pip.main(["install", MARKO_VERSION])
from marko.block import Heading, FencedCode, LinkRefDef, BlankLine
from marko.inline import CodeSpan
from marko.ext.gfm import gfm
from marko.ext.gfm.elements import Table, Paragraph
# Definitions in context.py
PHASE0 = 'phase0'
ALTAIR = 'altair'
class SpecObject(NamedTuple):
functions: Dict[str, str]
custom_types: Dict[str, str]
constants: Dict[str, str]
ssz_dep_constants: Dict[str, str] # the constants that depend on ssz_objects
ssz_objects: Dict[str, str]
dataclasses: Dict[str, str]
class CodeBlockType(Enum):
SSZ = auto()
DATACLASS = auto()
FUNCTION = auto()
def get_spec(file_name: str) -> SpecObject:
"""
Takes in the file name of a spec.md file, opens it and returns a parsed spec object.
Note: This function makes heavy use of the inherent ordering of dicts,
if this is not supported by your python version, it will not work.
"""
pulling_from = None # line number of start of latest object
current_name = None # most recent section title
functions: Dict[str, str] = {}
constants: Dict[str, str] = {}
ssz_dep_constants: Dict[str, str] = {}
ssz_objects: Dict[str, str] = {}
dataclasses: Dict[str, str] = {}
function_matcher = re.compile(FUNCTION_REGEX)
block_type = CodeBlockType.FUNCTION
custom_types: Dict[str, str] = {}
for linenum, line in enumerate(open(file_name).readlines()):
line = line.rstrip()
if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':
current_name = line[line[:-1].rfind('`') + 1: -1]
if line[:9] == '```python':
assert pulling_from is None
pulling_from = linenum + 1
elif line[:3] == '```':
pulling_from = None
else:
# Handle function definitions & ssz_objects
if pulling_from is not None:
if len(line) > 18 and line[:6] == 'class ' and (line[-12:] == '(Container):' or '(phase' in line):
end = -12 if line[-12:] == '(Container):' else line.find('(')
name = line[6:end]
# Check consistency with markdown header
assert name == current_name
block_type = CodeBlockType.SSZ
elif line[:10] == '@dataclass':
block_type = CodeBlockType.DATACLASS
elif function_matcher.match(line) is not None:
current_name = function_matcher.match(line).group(0)
block_type = CodeBlockType.FUNCTION
if block_type == CodeBlockType.SSZ:
ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\n'
elif block_type == CodeBlockType.DATACLASS:
dataclasses[current_name] = dataclasses.get(current_name, '') + line + '\n'
elif block_type == CodeBlockType.FUNCTION:
functions[current_name] = functions.get(current_name, '') + line + '\n'
else:
pass
# Handle constant and custom types table entries
elif pulling_from is None and len(line) > 0 and line[0] == '|':
row = line[1:].split('|')
if len(row) >= 2:
for i in range(2):
row[i] = row[i].strip().strip('`')
if '`' in row[i]:
row[i] = row[i][:row[i].find('`')]
is_constant_def = True
if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':
is_constant_def = False
for c in row[0]:
if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
is_constant_def = False
if is_constant_def:
if row[1].startswith('get_generalized_index'):
ssz_dep_constants[row[0]] = row[1]
else:
constants[row[0]] = row[1].replace('**TBD**', '2**32')
elif row[1].startswith('uint') or row[1].startswith('Bytes'):
custom_types[row[0]] = row[1]
return SpecObject(
functions=functions,
custom_types=custom_types,
constants=constants,
ssz_dep_constants=ssz_dep_constants,
ssz_objects=ssz_objects,
dataclasses=dataclasses,
)
MERGE = 'merge'
CONFIG_LOADER = '''
apply_constants_config(globals())
'''
PHASE0_IMPORTS = '''from eth2spec.config.config_util import apply_constants_config
from typing import (
Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar
)
from dataclasses import (
dataclass,
field,
)
from lru import LRU
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint8, uint32, uint64,
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
)
from eth2spec.utils import bls
from eth2spec.utils.hash_function import hash
SSZObject = TypeVar('SSZObject', bound=View)
CONFIG_NAME = 'mainnet'
'''
ALTAIR_IMPORTS = '''from eth2spec.phase0 import spec as phase0
from eth2spec.config.config_util import apply_constants_config
from typing import (
Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable, Optional, Union
)
from dataclasses import (
dataclass,
field,
)
from lru import LRU
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint8, uint32, uint64,
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
Path,
)
from eth2spec.utils import bls
from eth2spec.utils.hash_function import hash
# Whenever altair is loaded, make sure we have the latest phase0
from importlib import reload
reload(phase0)
SSZVariableName = str
GeneralizedIndex = NewType('GeneralizedIndex', int)
SSZObject = TypeVar('SSZObject', bound=View)
CONFIG_NAME = 'mainnet'
'''
SUNDRY_CONSTANTS_FUNCTIONS = '''
# The helper functions that are used when defining constants
CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''
def ceillog2(x: int) -> uint64:
if x < 1:
raise ValueError(f"ceillog2 accepts only positive values, x={x}")
@ -188,7 +47,262 @@ def floorlog2(x: int) -> uint64:
raise ValueError(f"floorlog2 accepts only positive values, x={x}")
return uint64(x.bit_length() - 1)
'''
PHASE0_SUNDRY_FUNCTIONS = '''
class ProtocolDefinition(NamedTuple):
# just function definitions currently. May expand with configuration vars in future.
functions: Dict[str, str]
class SpecObject(NamedTuple):
functions: Dict[str, str]
protocols: Dict[str, ProtocolDefinition]
custom_types: Dict[str, str]
constants: Dict[str, str]
ssz_dep_constants: Dict[str, str] # the constants that depend on ssz_objects
ssz_objects: Dict[str, str]
dataclasses: Dict[str, str]
def _get_name_from_heading(heading: Heading) -> Optional[str]:
last_child = heading.children[-1]
if isinstance(last_child, CodeSpan):
return last_child.children
return None
def _get_source_from_code_block(block: FencedCode) -> str:
return block.children[0].children.strip()
def _get_function_name_from_source(source: str) -> str:
fn = ast.parse(source).body[0]
return fn.name
def _get_self_type_from_source(source: str) -> Optional[str]:
fn = ast.parse(source).body[0]
args = fn.args.args
if len(args) == 0:
return None
if args[0].arg != 'self':
return None
if args[0].annotation is None:
return None
return args[0].annotation.id
def _get_class_info_from_source(source: str) -> (str, Optional[str]):
class_def = ast.parse(source).body[0]
base = class_def.bases[0]
if isinstance(base, ast.Name):
parent_class = base.id
else:
# NOTE: SSZ definition derives from earlier phase...
# e.g. `phase0.SignedBeaconBlock`
# TODO: check for consistency with other phases
parent_class = None
return class_def.name, parent_class
def _is_constant_id(name: str) -> bool:
if name[0] not in string.ascii_uppercase + '_':
return False
return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:]))
ETH2_SPEC_COMMENT_PREFIX = "eth2spec:"
def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:
_, _, title = child._parse_info
if not (title[0] == "(" and title[len(title)-1] == ")"):
return None
title = title[1:len(title)-1]
if not title.startswith(ETH2_SPEC_COMMENT_PREFIX):
return None
return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip()
def get_spec(file_name: str) -> SpecObject:
functions: Dict[str, str] = {}
protocols: Dict[str, ProtocolDefinition] = {}
constants: Dict[str, str] = {}
ssz_dep_constants: Dict[str, str] = {}
ssz_objects: Dict[str, str] = {}
dataclasses: Dict[str, str] = {}
custom_types: Dict[str, str] = {}
with open(file_name) as source_file:
document = gfm.parse(source_file.read())
current_name = None
should_skip = False
for child in document.children:
if isinstance(child, BlankLine):
continue
if should_skip:
should_skip = False
continue
if isinstance(child, Heading):
current_name = _get_name_from_heading(child)
elif isinstance(child, FencedCode):
if child.lang != "python":
continue
source = _get_source_from_code_block(child)
if source.startswith("def"):
current_name = _get_function_name_from_source(source)
self_type_name = _get_self_type_from_source(source)
function_def = "\n".join(line.rstrip() for line in source.splitlines())
if self_type_name is None:
functions[current_name] = function_def
else:
if self_type_name not in protocols:
protocols[self_type_name] = ProtocolDefinition(functions={})
protocols[self_type_name].functions[current_name] = function_def
elif source.startswith("@dataclass"):
dataclasses[current_name] = "\n".join(line.rstrip() for line in source.splitlines())
elif source.startswith("class"):
class_name, parent_class = _get_class_info_from_source(source)
# check consistency with spec
assert class_name == current_name
if parent_class:
assert parent_class == "Container"
# NOTE: trim whitespace from spec
ssz_objects[current_name] = "\n".join(line.rstrip() for line in source.splitlines())
else:
raise Exception("unrecognized python code element")
elif isinstance(child, Table):
for row in child.children:
cells = row.children
if len(cells) >= 2:
name_cell = cells[0]
name = name_cell.children[0].children
value_cell = cells[1]
value = value_cell.children[0].children
if isinstance(value, list):
# marko parses `**X**` as a list containing a X
value = value[0].children
if _is_constant_id(name):
if value.startswith("get_generalized_index"):
ssz_dep_constants[name] = value
else:
constants[name] = value.replace("TBD", "2**32")
elif value.startswith("uint") or value.startswith("Bytes") or value.startswith("ByteList"):
custom_types[name] = value
elif isinstance(child, LinkRefDef):
comment = _get_eth2_spec_comment(child)
if comment == "skip":
should_skip = True
return SpecObject(
functions=functions,
protocols=protocols,
custom_types=custom_types,
constants=constants,
ssz_dep_constants=ssz_dep_constants,
ssz_objects=ssz_objects,
dataclasses=dataclasses,
)
class SpecBuilder(ABC):
@property
@abstractmethod
def fork(self) -> str:
raise NotImplementedError()
@classmethod
@abstractmethod
def imports(cls) -> str:
"""
Import objects from other libraries.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def preparations(cls) -> str:
"""
Define special types/constants for building pyspec or call functions.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def sundry_functions(cls) -> str:
"""
The functions that are (1) defined abstractly in specs or (2) adjusted for getting better performance.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
"""
The constants that are required for SSZ objects.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]:
"""
The constants that are required for custom types.
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def invariant_checks(cls) -> str:
"""
The invariant checks
"""
raise NotImplementedError()
@classmethod
@abstractmethod
def build_spec(cls, source_files: List[str]) -> str:
raise NotImplementedError()
#
# Phase0SpecBuilder
#
class Phase0SpecBuilder(SpecBuilder):
fork: str = PHASE0
@classmethod
def imports(cls) -> str:
return '''from lru import LRU
from dataclasses import (
dataclass,
field,
)
from typing import (
Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar
)
from eth2spec.config.config_util import apply_constants_config
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint8, uint32, uint64,
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist)
from eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401
from eth2spec.utils import bls
from eth2spec.utils.hash_function import hash
'''
@classmethod
def preparations(cls) -> str:
return '''
SSZObject = TypeVar('SSZObject', bound=View)
CONFIG_NAME = 'mainnet'
'''
@classmethod
def sundry_functions(cls) -> str:
return '''
def get_eth1_data(block: Eth1Block) -> Eth1Data:
"""
A stub function return mocking Eth1Data.
@ -259,9 +373,52 @@ get_attesting_indices = cache_this(
),
_get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''
@classmethod
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
return {}
ALTAIR_SUNDRY_FUNCTIONS = '''
@classmethod
def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]:
return {}
@classmethod
def invariant_checks(cls) -> str:
return ''
@classmethod
def build_spec(cls, source_files: Sequence[str]) -> str:
return _build_spec(cls.fork, source_files)
#
# AltairSpecBuilder
#
class AltairSpecBuilder(Phase0SpecBuilder):
fork: str = ALTAIR
@classmethod
def imports(cls) -> str:
return super().imports() + '\n' + '''
from typing import NewType, Union
from importlib import reload
from eth2spec.phase0 import spec as phase0
from eth2spec.utils.ssz.ssz_typing import Path
'''
@classmethod
def preparations(cls):
return super().preparations() + '\n' + '''
# Whenever this spec version is loaded, make sure we have the latest phase0
reload(phase0)
SSZVariableName = str
GeneralizedIndex = NewType('GeneralizedIndex', int)
'''
@classmethod
def sundry_functions(cls) -> str:
return super().sundry_functions() + '\n\n' + '''
def get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex:
ssz_path = Path(ssz_class)
for item in path:
@ -269,29 +426,94 @@ def get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariable
return GeneralizedIndex(ssz_path.gindex())'''
# The constants that depend on SSZ objects
# Will verify the value at the end of the spec
ALTAIR_HARDCODED_SSZ_DEP_CONSTANTS = {
'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',
'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',
}
@classmethod
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
constants = {
'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',
'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',
}
return {**super().hardcoded_ssz_dep_constants(), **constants}
ALTAIR_INVAIANT_CHECKS = '''
@classmethod
def invariant_checks(cls) -> str:
return '''
assert (
TIMELY_HEAD_WEIGHT + TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT + SYNC_REWARD_WEIGHT + PROPOSER_WEIGHT
) == WEIGHT_DENOMINATOR'''
def is_phase0(fork):
return fork == PHASE0
#
# MergeSpecBuilder
#
class MergeSpecBuilder(Phase0SpecBuilder):
fork: str = MERGE
@classmethod
def imports(cls):
return super().imports() + '''
from typing import Protocol
from eth2spec.phase0 import spec as phase0
from eth2spec.utils.ssz.ssz_typing import Bytes20, ByteList, ByteVector, uint256
from importlib import reload
'''
@classmethod
def preparations(cls):
return super().preparations() + '\n' + '''
reload(phase0)
'''
@classmethod
def sundry_functions(cls) -> str:
return super().sundry_functions() + '\n\n' + """
ExecutionState = Any
def is_altair(fork):
return fork == ALTAIR
def get_pow_block(hash: Bytes32) -> PowBlock:
return PowBlock(block_hash=hash, is_valid=True, is_processed=True, total_difficulty=TRANSITION_TOTAL_DIFFICULTY)
def objects_to_spec(spec_object: SpecObject, imports: str, fork: str, ordered_class_objects: Dict[str, str]) -> str:
def get_execution_state(execution_state_root: Bytes32) -> ExecutionState:
pass
def get_pow_chain_head() -> PowBlock:
pass
class NoopExecutionEngine(ExecutionEngine):
def new_block(self, execution_payload: ExecutionPayload) -> bool:
return True
def set_head(self, block_hash: Hash32) -> bool:
return True
def finalize_block(self, block_hash: Hash32) -> bool:
return True
def assemble_block(self, block_hash: Hash32, timestamp: uint64) -> ExecutionPayload:
raise NotImplementedError("no default block production")
EXECUTION_ENGINE = NoopExecutionEngine()"""
@classmethod
def hardcoded_custom_type_dep_constants(cls) -> str:
constants = {
'MAX_BYTES_PER_OPAQUE_TRANSACTION': 'uint64(2**20)',
}
return {**super().hardcoded_custom_type_dep_constants(), **constants}
spec_builders = {
builder.fork: builder
for builder in (Phase0SpecBuilder, AltairSpecBuilder, MergeSpecBuilder)
}
def objects_to_spec(spec_object: SpecObject, builder: SpecBuilder, ordered_class_objects: Dict[str, str]) -> str:
"""
Given all the objects that constitute a spec, combine them into a single pyfile.
"""
@ -300,49 +522,75 @@ def objects_to_spec(spec_object: SpecObject, imports: str, fork: str, ordered_cl
[
f"class {key}({value}):\n pass\n"
for key, value in spec_object.custom_types.items()
if not value.startswith('ByteList')
]
)
+ ('\n\n' if len([key for key, value in spec_object.custom_types.items() if value.startswith('ByteList')]) > 0 else '')
+ '\n\n'.join(
[
f"{key} = {value}\n"
for key, value in spec_object.custom_types.items()
if value.startswith('ByteList')
]
)
)
def format_protocol(protocol_name: str, protocol_def: ProtocolDefinition) -> str:
protocol = f"class {protocol_name}(Protocol):"
for fn_source in protocol_def.functions.values():
fn_source = fn_source.replace("self: "+protocol_name, "self")
protocol += "\n\n" + textwrap.indent(fn_source, " ")
return protocol
protocols_spec = '\n\n\n'.join(format_protocol(k, v) for k, v in spec_object.protocols.items())
for k in list(spec_object.functions):
if "ceillog2" in k or "floorlog2" in k:
del spec_object.functions[k]
functions_spec = '\n\n'.join(spec_object.functions.values())
functions_spec = '\n\n\n'.join(spec_object.functions.values())
for k in list(spec_object.constants.keys()):
if k == "BLS12_381_Q":
spec_object.constants[k] += " # noqa: E501"
constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, spec_object.constants[x]), spec_object.constants))
ordered_class_objects_spec = '\n\n'.join(ordered_class_objects.values())
if is_altair(fork):
altair_ssz_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, ALTAIR_HARDCODED_SSZ_DEP_CONSTANTS[x]), ALTAIR_HARDCODED_SSZ_DEP_CONSTANTS))
ordered_class_objects_spec = '\n\n\n'.join(ordered_class_objects.values())
ssz_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_ssz_dep_constants()[x]), builder.hardcoded_ssz_dep_constants()))
ssz_dep_constants_verification = '\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), builder.hardcoded_ssz_dep_constants()))
custom_type_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_custom_type_dep_constants()[x]), builder.hardcoded_custom_type_dep_constants()))
spec = (
imports
+ '\n\n' + f"fork = \'{fork}\'\n"
builder.imports()
+ builder.preparations()
+ '\n\n' + f"fork = \'{builder.fork}\'\n"
# The constants that some SSZ containers require. Need to be defined before `new_type_definitions`
+ ('\n\n' + custom_type_dep_constants + '\n' if custom_type_dep_constants != '' else '')
+ '\n\n' + new_type_definitions
+ '\n' + SUNDRY_CONSTANTS_FUNCTIONS
+ '\n' + CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS
# The constants that some SSZ containers require. Need to be defined before `constants_spec`
+ ('\n\n' + altair_ssz_dep_constants if is_altair(fork) else '')
+ ('\n\n' + ssz_dep_constants if ssz_dep_constants != '' else '')
+ '\n\n' + constants_spec
+ '\n\n' + CONFIG_LOADER
+ '\n\n' + ordered_class_objects_spec
+ '\n\n' + functions_spec
# Functions to make pyspec work
+ '\n' + PHASE0_SUNDRY_FUNCTIONS
+ ('\n' + ALTAIR_SUNDRY_FUNCTIONS if is_altair(fork) else '')
+ ('\n\n\n' + protocols_spec if protocols_spec != '' else '')
+ '\n\n\n' + functions_spec
+ '\n\n' + builder.sundry_functions()
# Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are
# as same as the spec definition.
+ ('\n\n\n' + ssz_dep_constants_verification if ssz_dep_constants_verification != '' else '')
+ ('\n' + builder.invariant_checks() if builder.invariant_checks() != '' else '')
+ '\n'
)
# Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are
# as same as the spec definition.
if is_altair(fork):
altair_ssz_dep_constants_verification = '\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), ALTAIR_HARDCODED_SSZ_DEP_CONSTANTS))
spec += '\n\n\n' + altair_ssz_dep_constants_verification
spec += '\n' + ALTAIR_INVAIANT_CHECKS
spec += '\n'
return spec
def combine_protocols(old_protocols: Dict[str, ProtocolDefinition],
new_protocols: Dict[str, ProtocolDefinition]) -> Dict[str, ProtocolDefinition]:
for key, value in new_protocols.items():
if key not in old_protocols:
old_protocols[key] = value
else:
functions = combine_functions(old_protocols[key].functions, value.functions)
old_protocols[key] = ProtocolDefinition(functions=functions)
return old_protocols
def combine_functions(old_functions: Dict[str, str], new_functions: Dict[str, str]) -> Dict[str, str]:
for key, value in new_functions.items():
old_functions[key] = value
@ -357,10 +605,10 @@ def combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, st
ignored_dependencies = [
'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',
'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
'Bytes1', 'Bytes4', 'Bytes20', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
'bytes', 'byte', 'ByteList', 'ByteVector',
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2',
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',
]
@ -401,8 +649,9 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
"""
Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.
"""
functions0, custom_types0, constants0, ssz_dep_constants0, ssz_objects0, dataclasses0 = spec0
functions1, custom_types1, constants1, ssz_dep_constants1, ssz_objects1, dataclasses1 = spec1
functions0, protocols0, custom_types0, constants0, ssz_dep_constants0, ssz_objects0, dataclasses0 = spec0
functions1, protocols1, custom_types1, constants1, ssz_dep_constants1, ssz_objects1, dataclasses1 = spec1
protocols = combine_protocols(protocols0, protocols1)
functions = combine_functions(functions0, functions1)
custom_types = combine_constants(custom_types0, custom_types1)
constants = combine_constants(constants0, constants1)
@ -411,6 +660,7 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
dataclasses = combine_functions(dataclasses0, dataclasses1)
return SpecObject(
functions=functions,
protocols=protocols,
custom_types=custom_types,
constants=constants,
ssz_dep_constants=ssz_dep_constants,
@ -419,13 +669,7 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
)
fork_imports = {
'phase0': PHASE0_IMPORTS,
'altair': ALTAIR_IMPORTS,
}
def build_spec(fork: str, source_files: List[str]) -> str:
def _build_spec(fork: str, source_files: Sequence[str]) -> str:
all_specs = [get_spec(spec) for spec in source_files]
spec_object = all_specs[0]
@ -435,7 +679,7 @@ def build_spec(fork: str, source_files: List[str]) -> str:
class_objects = {**spec_object.ssz_objects, **spec_object.dataclasses}
dependency_order_class_objects(class_objects, spec_object.custom_types)
return objects_to_spec(spec_object, fork_imports[fork], fork, class_objects)
return objects_to_spec(spec_object, spec_builders[fork], class_objects)
class PySpecCommand(Command):
@ -467,14 +711,14 @@ class PySpecCommand(Command):
if len(self.md_doc_paths) == 0:
print("no paths were specified, using default markdown file paths for pyspec"
" build (spec fork: %s)" % self.spec_fork)
if is_phase0(self.spec_fork):
if self.spec_fork == PHASE0:
self.md_doc_paths = """
specs/phase0/beacon-chain.md
specs/phase0/fork-choice.md
specs/phase0/validator.md
specs/phase0/weak-subjectivity.md
"""
elif is_altair(self.spec_fork):
elif self.spec_fork == ALTAIR:
self.md_doc_paths = """
specs/phase0/beacon-chain.md
specs/phase0/fork-choice.md
@ -483,8 +727,19 @@ class PySpecCommand(Command):
specs/altair/beacon-chain.md
specs/altair/fork.md
specs/altair/validator.md
specs/altair/p2p-interface.md
specs/altair/sync-protocol.md
"""
elif self.spec_fork == MERGE:
self.md_doc_paths = """
specs/phase0/beacon-chain.md
specs/phase0/fork-choice.md
specs/phase0/validator.md
specs/phase0/weak-subjectivity.md
specs/merge/beacon-chain.md
specs/merge/fork-choice.md
specs/merge/validator.md
"""
else:
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
@ -495,7 +750,7 @@ class PySpecCommand(Command):
raise Exception('Pyspec markdown input file "%s" does not exist.' % filename)
def run(self):
spec_str = build_spec(self.spec_fork, self.parsed_md_doc_paths)
spec_str = spec_builders[self.spec_fork].build_spec(self.parsed_md_doc_paths)
if self.dry_run:
self.announce('dry run successfully prepared contents for spec.'
f' out dir: "{self.out_dir}", spec fork: "{self.spec_fork}"')
@ -523,7 +778,7 @@ class BuildPyCommand(build_py):
self.run_command('pyspec')
def run(self):
for spec_fork in fork_imports:
for spec_fork in spec_builders:
self.run_pyspec_cmd(spec_fork=spec_fork)
super(BuildPyCommand, self).run()
@ -551,7 +806,7 @@ class PyspecDevCommand(Command):
def run(self):
print("running build_py command")
for spec_fork in fork_imports:
for spec_fork in spec_builders:
self.run_pyspec_cmd(spec_fork=spec_fork)
commands = {
@ -607,8 +862,9 @@ setup(
"py_ecc==5.2.0",
"milagro_bls_binding==1.6.3",
"dataclasses==0.6",
"remerkleable==0.1.18",
"remerkleable==0.1.19",
"ruamel.yaml==0.16.5",
"lru-dict==1.1.6",
"marko==1.0.2",
]
)

View File

@ -14,8 +14,8 @@
- [Misc](#misc)
- [Configuration](#configuration)
- [Updated penalty values](#updated-penalty-values)
- [Sync committee](#sync-committee)
- [Misc](#misc-1)
- [Time parameters](#time-parameters)
- [Domain types](#domain-types)
- [Containers](#containers)
- [Modified containers](#modified-containers)
@ -28,15 +28,15 @@
- [`Predicates`](#predicates)
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
- [Misc](#misc-2)
- [`get_flag_indices_and_weights`](#get_flag_indices_and_weights)
- [`add_flag`](#add_flag)
- [`has_flag`](#has_flag)
- [Beacon state accessors](#beacon-state-accessors)
- [`get_sync_committee_indices`](#get_sync_committee_indices)
- [`get_sync_committee`](#get_sync_committee)
- [`get_next_sync_committee_indices`](#get_next_sync_committee_indices)
- [`get_next_sync_committee`](#get_next_sync_committee)
- [`get_base_reward_per_increment`](#get_base_reward_per_increment)
- [`get_base_reward`](#get_base_reward)
- [`get_unslashed_participating_indices`](#get_unslashed_participating_indices)
- [`get_attestation_participation_flag_indices`](#get_attestation_participation_flag_indices)
- [`get_flag_index_deltas`](#get_flag_index_deltas)
- [Modified `get_inactivity_penalty_deltas`](#modified-get_inactivity_penalty_deltas)
- [Beacon state mutators](#beacon-state-mutators)
@ -52,6 +52,7 @@
- [Slashings](#slashings)
- [Participation flags updates](#participation-flags-updates)
- [Sync committee updates](#sync-committee-updates)
- [Initialize state for pure Altair testnets and test vectors](#initialize-state-for-pure-altair-testnets-and-test-vectors)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
@ -76,17 +77,17 @@ Altair is the first beacon chain hard fork. Its main features are:
| Name | Value |
| - | - |
| `TIMELY_HEAD_FLAG_INDEX` | `0` |
| `TIMELY_SOURCE_FLAG_INDEX` | `1` |
| `TIMELY_TARGET_FLAG_INDEX` | `2` |
| `TIMELY_SOURCE_FLAG_INDEX` | `0` |
| `TIMELY_TARGET_FLAG_INDEX` | `1` |
| `TIMELY_HEAD_FLAG_INDEX` | `2` |
### Incentivization weights
| Name | Value |
| - | - |
| `TIMELY_HEAD_WEIGHT` | `uint64(12)` |
| `TIMELY_SOURCE_WEIGHT` | `uint64(12)` |
| `TIMELY_TARGET_WEIGHT` | `uint64(24)` |
| `TIMELY_HEAD_WEIGHT` | `uint64(12)` |
| `SYNC_REWARD_WEIGHT` | `uint64(8)` |
| `PROPOSER_WEIGHT` | `uint64(8)` |
| `WEIGHT_DENOMINATOR` | `uint64(64)` |
@ -98,12 +99,13 @@ Altair is the first beacon chain hard fork. Its main features are:
| Name | Value |
| - | - |
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
| `PARTICIPATION_FLAG_WEIGHTS` | `[TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_FLAG_INDEX]` |
## Configuration
### Updated penalty values
This patch updates a few configuration values to move penalty parameters toward their final, maximum security values.
This patch updates a few configuration values to move penalty parameters closer to their final, maximum security values.
*Note*: The spec does *not* override previous configuration values but instead creates new values and replaces usage throughout.
@ -113,19 +115,19 @@ This patch updates a few configuration values to move penalty parameters toward
| `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR` | `uint64(2**6)` (= 64) |
| `PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR` | `uint64(2)` |
### Sync committee
| Name | Value | Unit | Duration |
| - | - | - | - |
| `SYNC_COMMITTEE_SIZE` | `uint64(2**9)` (= 512) | Validators | |
| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `uint64(2**9)` (= 512) | epochs | ~54 hours |
### Misc
| Name | Value |
| - | - |
| `SYNC_COMMITTEE_SIZE` | `uint64(2**10)` (= 1,024) |
| `SYNC_PUBKEYS_PER_AGGREGATE` | `uint64(2**6)` (= 64) |
| `INACTIVITY_SCORE_BIAS` | `uint64(4)` |
### Time parameters
| Name | Value | Unit | Duration |
| - | - | :-: | :-: |
| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
| `INACTIVITY_SCORE_RECOVERY_RATE` | `uint64(16)` |
### Domain types
@ -211,7 +213,7 @@ class SyncAggregate(Container):
```python
class SyncCommittee(Container):
pubkeys: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE]
pubkey_aggregates: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE // SYNC_PUBKEYS_PER_AGGREGATE]
aggregate_pubkey: BLSPubkey
```
## Helper functions
@ -232,21 +234,13 @@ def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, s
### Misc
#### `get_flag_indices_and_weights`
```python
def get_flag_indices_and_weights() -> Sequence[Tuple[int, uint64]]:
return (
(TIMELY_HEAD_FLAG_INDEX, TIMELY_HEAD_WEIGHT),
(TIMELY_SOURCE_FLAG_INDEX, TIMELY_SOURCE_WEIGHT),
(TIMELY_TARGET_FLAG_INDEX, TIMELY_TARGET_WEIGHT),
)
```
#### `add_flag`
```python
def add_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlags:
"""
Return a new ``ParticipationFlags`` adding ``flag_index`` to ``flags``.
"""
flag = ParticipationFlags(2**flag_index)
return flags | flag
```
@ -255,24 +249,31 @@ def add_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlags:
```python
def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
"""
Return whether ``flags`` has ``flag_index`` set.
"""
flag = ParticipationFlags(2**flag_index)
return flags & flag == flag
```
### Beacon state accessors
#### `get_sync_committee_indices`
#### `get_next_sync_committee_indices`
```python
def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
"""
Return the sequence of sync committee indices (which may include duplicate indices) for a given state and epoch.
Return the sequence of sync committee indices (which may include duplicate indices)
for the next sync committee, given a ``state`` at a sync committee period boundary.
Note: Committee can contain duplicate indices for small validator sets (< SYNC_COMMITTEE_SIZE + 128)
"""
epoch = Epoch(get_current_epoch(state) + 1)
MAX_RANDOM_BYTE = 2**8 - 1
base_epoch = Epoch((max(epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD, 1) - 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
active_validator_indices = get_active_validator_indices(state, base_epoch)
active_validator_indices = get_active_validator_indices(state, epoch)
active_validator_count = uint64(len(active_validator_indices))
seed = get_seed(state, base_epoch, DOMAIN_SYNC_COMMITTEE)
seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
i = 0
sync_committee_indices: List[ValidatorIndex] = []
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
@ -280,24 +281,34 @@ def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[Val
candidate_index = active_validator_indices[shuffled_index]
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: # Sample with replacement
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
sync_committee_indices.append(candidate_index)
i += 1
return sync_committee_indices
```
#### `get_sync_committee`
#### `get_next_sync_committee`
```python
def get_sync_committee(state: BeaconState, epoch: Epoch) -> SyncCommittee:
def get_next_sync_committee(state: BeaconState) -> SyncCommittee:
"""
Return the sync committee for a given state and epoch.
Return the *next* sync committee for a given ``state``.
``SyncCommittee`` contains an aggregate pubkey that enables
resource-constrained clients to save some computation when verifying
the sync committee's signature.
``SyncCommittee`` can also contain duplicate pubkeys, when ``get_next_sync_committee_indices``
returns duplicate indices. Implementations must take care when handling
optimizations relating to aggregation and verification in the presence of duplicates.
Note: This function should only be called at sync committee period boundaries by ``process_sync_committee_updates``
as ``get_next_sync_committee_indices`` is not stable within a given period.
"""
indices = get_sync_committee_indices(state, epoch)
indices = get_next_sync_committee_indices(state)
pubkeys = [state.validators[index].pubkey for index in indices]
partition = [pubkeys[i:i + SYNC_PUBKEYS_PER_AGGREGATE] for i in range(0, len(pubkeys), SYNC_PUBKEYS_PER_AGGREGATE)]
pubkey_aggregates = [bls.AggregatePKs(preaggregate) for preaggregate in partition]
return SyncCommittee(pubkeys=pubkeys, pubkey_aggregates=pubkey_aggregates)
aggregate_pubkey = bls.AggregatePKs(pubkeys)
return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
```
#### `get_base_reward_per_increment`
@ -309,10 +320,17 @@ def get_base_reward_per_increment(state: BeaconState) -> Gwei:
#### `get_base_reward`
*Note*: The function `get_base_reward` is modified with the removal of `BASE_REWARDS_PER_EPOCH`.
*Note*: The function `get_base_reward` is modified with the removal of `BASE_REWARDS_PER_EPOCH` and the use of increment based accounting.
```python
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
"""
Return the base reward for the validator defined by ``index`` with respect to the current ``state``.
Note: An optimally performing validator can earn one base reward per epoch over a long time horizon.
This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal
and sync committees).
"""
increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT
return Gwei(increments * get_base_reward_per_increment(state))
```
@ -322,7 +340,7 @@ def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
```python
def get_unslashed_participating_indices(state: BeaconState, flag_index: int, epoch: Epoch) -> Set[ValidatorIndex]:
"""
Return the active and unslashed validator indices for the given epoch and flag index.
Return the set of validator indices that are both active and unslashed for the given ``flag_index`` and ``epoch``.
"""
assert epoch in (get_previous_epoch(state), get_current_epoch(state))
if epoch == get_current_epoch(state):
@ -334,38 +352,65 @@ def get_unslashed_participating_indices(state: BeaconState, flag_index: int, epo
return set(filter(lambda index: not state.validators[index].slashed, participating_indices))
```
#### `get_attestation_participation_flag_indices`
```python
def get_attestation_participation_flag_indices(state: BeaconState,
data: AttestationData,
inclusion_delay: uint64) -> Sequence[int]:
"""
Return the flag indices that are satisfied by an attestation.
"""
if data.target.epoch == get_current_epoch(state):
justified_checkpoint = state.current_justified_checkpoint
else:
justified_checkpoint = state.previous_justified_checkpoint
# Matching roots
is_matching_source = data.source == justified_checkpoint
is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
assert is_matching_source
participation_flag_indices = []
if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH):
participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
if is_matching_target and inclusion_delay <= SLOTS_PER_EPOCH:
participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY:
participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
return participation_flag_indices
```
#### `get_flag_index_deltas`
```python
def get_flag_index_deltas(state: BeaconState, flag_index: int, weight: uint64) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
def get_flag_index_deltas(state: BeaconState, flag_index: int) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
Return the deltas for a given flag index by scanning through the participation flags.
Return the deltas for a given ``flag_index`` by scanning through the participation flags.
"""
rewards = [Gwei(0)] * len(state.validators)
penalties = [Gwei(0)] * len(state.validators)
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, get_previous_epoch(state))
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balances to avoid uint64 overflow
unslashed_participating_increments = get_total_balance(state, unslashed_participating_indices) // increment
active_increments = get_total_active_balance(state) // increment
previous_epoch = get_previous_epoch(state)
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, previous_epoch)
weight = PARTICIPATION_FLAG_WEIGHTS[flag_index]
unslashed_participating_balance = get_total_balance(state, unslashed_participating_indices)
unslashed_participating_increments = unslashed_participating_balance // EFFECTIVE_BALANCE_INCREMENT
active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
for index in get_eligible_validator_indices(state):
base_reward = get_base_reward(state, index)
if index in unslashed_participating_indices:
if is_in_inactivity_leak(state):
# This flag reward cancels the inactivity penalty corresponding to the flag index
rewards[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
else:
if not is_in_inactivity_leak(state):
reward_numerator = base_reward * weight * unslashed_participating_increments
rewards[index] += Gwei(reward_numerator // (active_increments * WEIGHT_DENOMINATOR))
else:
elif flag_index != TIMELY_HEAD_FLAG_INDEX:
penalties[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
return rewards, penalties
```
#### Modified `get_inactivity_penalty_deltas`
*Note*: The function `get_inactivity_penalty_deltas` is modified in the selection of matching target indices
and the removal of `BASE_REWARDS_PER_EPOCH`.
```python
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
"""
@ -373,17 +418,13 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S
"""
rewards = [Gwei(0) for _ in range(len(state.validators))]
penalties = [Gwei(0) for _ in range(len(state.validators))]
if is_in_inactivity_leak(state):
previous_epoch = get_previous_epoch(state)
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
for index in get_eligible_validator_indices(state):
for (_, weight) in get_flag_indices_and_weights():
# This inactivity penalty cancels the flag reward corresponding to the flag index
penalties[index] += Gwei(get_base_reward(state, index) * weight // WEIGHT_DENOMINATOR)
if index not in matching_target_indices:
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_ALTAIR
penalties[index] += Gwei(penalty_numerator // penalty_denominator)
previous_epoch = get_previous_epoch(state)
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
for index in get_eligible_validator_indices(state):
if index not in matching_target_indices:
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_ALTAIR
penalties[index] += Gwei(penalty_numerator // penalty_denominator)
return rewards, penalties
```
@ -391,7 +432,8 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S
#### Modified `slash_validator`
*Note*: The function `slash_validator` is modified to use `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR`.
*Note*: The function `slash_validator` is modified to use `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR`
and use `PROPOSER_WEIGHT` when calculating the proposer reward.
```python
def slash_validator(state: BeaconState,
@ -444,35 +486,21 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
committee = get_beacon_committee(state, data.slot, data.index)
assert len(attestation.aggregation_bits) == len(committee)
if data.target.epoch == get_current_epoch(state):
epoch_participation = state.current_epoch_participation
justified_checkpoint = state.current_justified_checkpoint
else:
epoch_participation = state.previous_epoch_participation
justified_checkpoint = state.previous_justified_checkpoint
# Matching roots
is_matching_head = data.beacon_block_root == get_block_root_at_slot(state, data.slot)
is_matching_source = data.source == justified_checkpoint
is_matching_target = data.target.root == get_block_root(state, data.target.epoch)
assert is_matching_source
# Participation flag indices
participation_flag_indices = get_attestation_participation_flag_indices(state, data, state.slot - data.slot)
# Verify signature
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
# Participation flag indices
participation_flag_indices = []
if is_matching_head and is_matching_target and state.slot == data.slot + MIN_ATTESTATION_INCLUSION_DELAY:
participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
if is_matching_source and state.slot <= data.slot + integer_squareroot(SLOTS_PER_EPOCH):
participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
if is_matching_target and state.slot <= data.slot + SLOTS_PER_EPOCH:
participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
# Update epoch participation flags
if data.target.epoch == get_current_epoch(state):
epoch_participation = state.current_epoch_participation
else:
epoch_participation = state.previous_epoch_participation
proposer_reward_numerator = 0
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
for flag_index, weight in get_flag_indices_and_weights():
for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
proposer_reward_numerator += get_base_reward(state, index) * weight
@ -485,7 +513,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
#### Modified `process_deposit`
*Note*: The function `process_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, `current_epoch_participation`.
*Note*: The function `process_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`.
```python
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
@ -546,7 +574,8 @@ def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None
proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT))
# Apply participant and proposer rewards
committee_indices = get_sync_committee_indices(state, get_current_epoch(state))
all_pubkeys = [v.pubkey for v in state.validators]
committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys]
participant_indices = [index for index, bit in zip(committee_indices, aggregate.sync_committee_bits) if bit]
for participant_index in participant_indices:
increase_balance(state, participant_index, participant_reward)
@ -595,12 +624,19 @@ def process_justification_and_finalization(state: BeaconState) -> None:
```python
def process_inactivity_updates(state: BeaconState) -> None:
# Score updates based on previous epoch participation, skip genesis epoch
if get_current_epoch(state) == GENESIS_EPOCH:
return
for index in get_eligible_validator_indices(state):
# Increase inactivity score of inactive validators
if index in get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)):
if state.inactivity_scores[index] > 0:
state.inactivity_scores[index] -= 1
elif is_in_inactivity_leak(state):
state.inactivity_scores[index] -= min(1, state.inactivity_scores[index])
else:
state.inactivity_scores[index] += INACTIVITY_SCORE_BIAS
# Decrease the score of all validators for forgiveness when not during a leak
if not is_in_inactivity_leak(state):
state.inactivity_scores[index] -= min(INACTIVITY_SCORE_RECOVERY_RATE, state.inactivity_scores[index])
```
#### Rewards and penalties
@ -613,8 +649,7 @@ def process_rewards_and_penalties(state: BeaconState) -> None:
if get_current_epoch(state) == GENESIS_EPOCH:
return
flag_indices_and_numerators = get_flag_indices_and_weights()
flag_deltas = [get_flag_index_deltas(state, index, numerator) for (index, numerator) in flag_indices_and_numerators]
flag_deltas = [get_flag_index_deltas(state, flag_index) for flag_index in range(len(PARTICIPATION_FLAG_WEIGHTS))]
deltas = flag_deltas + [get_inactivity_penalty_deltas(state)]
for (rewards, penalties) in deltas:
for index in range(len(state.validators)):
@ -658,5 +693,54 @@ def process_sync_committee_updates(state: BeaconState) -> None:
next_epoch = get_current_epoch(state) + Epoch(1)
if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
state.current_sync_committee = state.next_sync_committee
state.next_sync_committee = get_sync_committee(state, next_epoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
state.next_sync_committee = get_next_sync_committee(state)
```
## Initialize state for pure Altair testnets and test vectors
This helper function is only for initializing the state for pure Altair testnets and tests.
*Note*: The function `initialize_beacon_state_from_eth1` is modified: (1) using `ALTAIR_FORK_VERSION` as the current fork version, (2) utilizing the Altair `BeaconBlockBody` when constructing the initial `latest_block_header`, and (3) adding initial sync committees.
```python
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
eth1_timestamp: uint64,
deposits: Sequence[Deposit]) -> BeaconState:
fork = Fork(
previous_version=GENESIS_FORK_VERSION,
current_version=ALTAIR_FORK_VERSION, # [Modified in Altair]
epoch=GENESIS_EPOCH,
)
state = BeaconState(
genesis_time=eth1_timestamp + GENESIS_DELAY,
fork=fork,
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
)
# Process deposits
leaves = list(map(lambda deposit: deposit.data, deposits))
for index, deposit in enumerate(deposits):
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
process_deposit(state, deposit)
# Process activations
for index, validator in enumerate(state.validators):
balance = state.balances[index]
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
validator.activation_eligibility_epoch = GENESIS_EPOCH
validator.activation_epoch = GENESIS_EPOCH
# Set genesis validators root for domain separation and chain versioning
state.genesis_validators_root = hash_tree_root(state.validators)
# [New in Altair] Fill in sync committees
# Note: A duplicate committee is assigned for the current and next committee at genesis
state.current_sync_committee = get_next_sync_committee(state)
state.next_sync_committee = get_next_sync_committee(state)
return state
```

View File

@ -26,21 +26,41 @@ Warning: this configuration is not definitive.
| Name | Value |
| - | - |
| `ALTAIR_FORK_VERSION` | `Version('0x01000000')` |
| `ALTAIR_FORK_SLOT` | `Slot(18446744073709551615)` **TBD** |
| `ALTAIR_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
## Fork to Altair
### Fork trigger
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at slot `ALTAIR_FORK_SLOT`, where `ALTAIR_FORK_SLOT % SLOTS_PER_EPOCH == 0`.
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at epoch `ALTAIR_FORK_EPOCH`.
Note that for the pure Altair networks, we don't apply `upgrade_to_altair` since it starts with Altair version logic.
### Upgrading the state
After `process_slots` of Phase 0 finishes, if `state.slot == ALTAIR_FORK_SLOT`, an irregular state change is made to upgrade to Altair.
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH`, an irregular state change is made to upgrade to Altair.
The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `ALTAIR_FORK_EPOCH * SLOTS_PER_EPOCH`.
Care must be taken when transitioning through the fork boundary as implementations will need a modified state transition function that deviates from the Phase 0 spec.
In particular, the outer `state_transition` function defined in the Phase 0 spec will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`.
```python
def translate_participation(state: BeaconState, pending_attestations: Sequence[phase0.PendingAttestation]) -> None:
for attestation in pending_attestations:
data = attestation.data
inclusion_delay = attestation.inclusion_delay
# Translate attestation inclusion info to flag indices
participation_flag_indices = get_attestation_participation_flag_indices(state, data, inclusion_delay)
# Apply flags to all attesting validators
epoch_participation = state.previous_epoch_participation
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
for flag_index in participation_flag_indices:
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
epoch = get_current_epoch(pre)
epoch = phase0.get_current_epoch(pre)
post = BeaconState(
# Versioning
genesis_time=pre.genesis_time,
@ -78,8 +98,12 @@ def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
# Inactivity
inactivity_scores=[uint64(0) for _ in range(len(pre.validators))],
)
# Fill in previous epoch participation from the pre state's pending attestations
translate_participation(post, pre.previous_epoch_attestations)
# Fill in sync committees
post.current_sync_committee = get_sync_committee(post, get_current_epoch(post))
post.next_sync_committee = get_sync_committee(post, get_current_epoch(post) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
# Note: A duplicate committee is assigned for the current and next committee at the fork boundary
post.current_sync_committee = get_next_sync_committee(post)
post.next_sync_committee = get_next_sync_committee(post)
return post
```

View File

@ -15,6 +15,7 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery
- [Warning](#warning)
- [Modifications in Altair](#modifications-in-altair)
- [MetaData](#metadata)
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
- [Topics and messages](#topics-and-messages)
- [Global topics](#global-topics)
@ -30,6 +31,7 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery
- [Messages](#messages)
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
- [GetMetaData v2](#getmetadata-v2)
- [Transitioning from v1 to v2](#transitioning-from-v1-to-v2)
- [The discovery domain: discv5](#the-discovery-domain-discv5)
@ -38,19 +40,36 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery
## Warning
This document is currently illustrative for early Altair testnets and some parts are subject to change.
Refer to the note in the [validator guide](./validator.md) for further details.
This document is currently illustrative for early Altair testnets and some parts are subject to change.
Refer to the note in the [validator guide](./validator.md) for further details.
# Modifications in Altair
## MetaData
The `MetaData` stored locally by clients is updated with an additional field to communicate the sync committee subnet subscriptions:
```
(
seq_number: uint64
attnets: Bitvector[ATTESTATION_SUBNET_COUNT]
syncnets: Bitvector[SYNC_COMMITTEE_SUBNET_COUNT]
)
```
Where
- `seq_number` and `attnets` have the same meaning defined in the Phase 0 document.
- `syncnets` is a `Bitvector` representing the node's sync committee subnet subscriptions. This field should mirror the data in the node's ENR as outlined in the [validator guide](./validator.md#sync-committee-subnet-stability).
## The gossip domain: gossipsub
Gossip meshes are added in Altair to support the consensus activities of the sync committees.
Gossip meshes are added in Altair to support the consensus activities of the sync committees.
Validators use an aggregation scheme to balance the processing and networking load across all of the relevant actors.
### Topics and messages
Topics follow the same specification as in the Phase 0 document.
Topics follow the same specification as in the Phase 0 document.
New topics are added in Altair to support the sync committees and the beacon block topic is updated with the modified type.
The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 document.
@ -74,27 +93,43 @@ Altair changes the type of the global beacon block topic and adds one global top
##### `beacon_block`
The existing specification for this topic does not change from the Phase 0 document,
but the type of the payload does change to the (modified) `SignedBeaconBlock`.
but the type of the payload does change to the (modified) `SignedBeaconBlock`.
This type changes due to the inclusion of the inner `BeaconBlockBody` that is modified in Altair.
See the [state transition document](./beacon-chain.md#beaconblockbody) for Altair for further details.
##### `sync_committee_contribution_and_proof`
This topic is used to propagate partially aggregated sync committee signatures to be included in future blocks.
The following validations MUST pass before forwarding the `signed_contribution_and_proof` on the network; define `contribution_and_proof = signed_contribution_and_proof.message` and `contribution = contribution_and_proof.contribution` for convenience:
The following validations MUST pass before forwarding the `signed_contribution_and_proof` on the network; define `contribution_and_proof = signed_contribution_and_proof.message`, `contribution = contribution_and_proof.contribution`, and the following function `get_sync_subcommittee_pubkeys` for convenience:
```python
def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]:
# Committees assigned to `slot` sign for `slot - 1`
# This creates the exceptional logic below when transitioning between sync committee periods
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
sync_committee = state.current_sync_committee
else:
sync_committee = state.next_sync_committee
# Return pubkeys for the subcommittee index
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
i = subcommittee_index * sync_subcommittee_size
return sync_committee.pubkeys[i:i + sync_subcommittee_size]
```
- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`.
- _[IGNORE]_ The block being signed over (`contribution.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
- _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`.
- _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot`.
- _[REJECT]_ `contribution_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_sync_committee_aggregator(state, contribution.slot, contribution_and_proof.selection_proof)` returns `True`.
- _[REJECT]_ The aggregator's validator index is within the current sync committee --
i.e. `state.validators[contribution_and_proof.aggregator_index].pubkey in state.current_sync_committee.pubkeys`.
- _[REJECT]_ The `contribution_and_proof.selection_proof` is a valid signature of the `SyncCommitteeSigningData` derived from the `contribution` by the validator with index `contribution_and_proof.aggregator_index`.
- _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and subcommittee index `contribution.subcommittee_index`.
- _[REJECT]_ `contribution_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_sync_committee_aggregator(contribution_and_proof.selection_proof)` returns `True`.
- _[REJECT]_ The aggregator's validator index is in the declared subcommittee of the current sync committee --
i.e. `state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index)`.
- _[REJECT]_ The `contribution_and_proof.selection_proof` is a valid signature of the `SyncAggregatorSelectionData` derived from the `contribution` by the validator with index `contribution_and_proof.aggregator_index`.
- _[REJECT]_ The aggregator signature, `signed_contribution_and_proof.signature`, is valid.
- _[REJECT]_ The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey derived from the participation info in `aggregation_bits` for the subcommittee specified by the `subcommittee_index`.
- _[REJECT]_ The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey derived from the participation info in `aggregation_bits` for the subcommittee specified by the `contribution.subcommittee_index`.
#### Sync committee subnets
@ -109,16 +144,16 @@ The following validations MUST pass before forwarding the `sync_committee_signat
- _[IGNORE]_ The signature's slot is for the current slot, i.e. `sync_committee_signature.slot == current_slot`.
- _[IGNORE]_ The block being signed over (`sync_committee_signature.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_signature.validator_index`.
- _[REJECT]_ The validator producing this `sync_committee_signature` is in the current sync committee, i.e. `state.validators[sync_committee_signature.validator_index].pubkey in state.current_sync_committee.pubkeys`.
- _[REJECT]_ The `subnet_id` is correct, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_signature.validator_index)`.
- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_signature.validator_index)`.
Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee.
- _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`.
#### Sync committees and aggregation
The aggregation scheme closely follows the design of the attestation aggregation scheme.
Sync committee signatures are broadcast into "subnets" defined by a topic.
The number of subnets is defined by `SYNC_COMMITTEE_SUBNET_COUNT` in the [Altair validator guide](./validator.md#constants).
Sync committee members are divided into "subcommittees" which are then assigned to a subnet for the duration of tenure in the sync committee.
The aggregation scheme closely follows the design of the attestation aggregation scheme.
Sync committee signatures are broadcast into "subnets" defined by a topic.
The number of subnets is defined by `SYNC_COMMITTEE_SUBNET_COUNT` in the [Altair validator guide](./validator.md#constants).
Sync committee members are divided into "subcommittees" which are then assigned to a subnet for the duration of tenure in the sync committee.
Individual validators can be duplicated in the broader sync committee such that they are included multiple times in a given subcommittee or across multiple subcommittees.
Unaggregated signatures (along with metadata) are sent as `SyncCommitteeSignature`s on the `sync_committee_{subnet_id}` topics.
@ -182,6 +217,8 @@ Request and Response remain unchanged. A `ForkDigest`-context is used to select
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
[0]: # (eth2spec: skip)
| `fork_version` | Chunk SSZ type |
| ------------------------ | -------------------------- |
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
@ -195,17 +232,37 @@ Request and Response remain unchanged. A `ForkDigest`-context is used to select
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
[1]: # (eth2spec: skip)
| `fork_version` | Chunk SSZ type |
| ------------------------ | -------------------------- |
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
#### GetMetaData v2
**Protocol ID:** `/eth2/beacon_chain/req/metadata/2/`
No Request Content.
Response Content:
```
(
MetaData
)
```
Requests the MetaData of a peer, using the new `MetaData` definition given above
that is extended from phase 0 in Altair. Other conditions for the `GetMetaData`
protocol are unchanged from the phase 0 p2p networking document.
### Transitioning from v1 to v2
In advance of the fork, implementations can opt in to both run the v1 and v2 for a smooth transition.
This is non-breaking, and is recommended as soon as the fork specification is stable.
The v1 variants will be deprecated, and implementations should use v2 when available
The v1 variants will be deprecated, and implementations should use v2 when available
(as negotiated with peers via LibP2P multistream-select).
The v1 method MAY be unregistered at the fork boundary.
@ -217,7 +274,7 @@ the responder MUST return the **InvalidRequest** response code.
The `attnets` key of the ENR is used as defined in the Phase 0 document.
An additional bitfield is added to the ENR under the key `syncnets` to facilitate sync committee subnet discovery.
The length of this bitfield is `SYNC_COMMITTEE_SUBNET_COUNT` where each bit corresponds to a distinct `subnet_id` for a specific sync committee subnet.
The length of this bitfield is `SYNC_COMMITTEE_SUBNET_COUNT` where each bit corresponds to a distinct `subnet_id` for a specific sync committee subnet.
The `i`th bit is set in this bitfield if the validator is currently subscribed to the `sync_committee_{i}` topic.
See the [validator document](./validator.md#sync-committee-subnet-stability) for further details on how the new bits are used.

View File

@ -12,7 +12,6 @@
- [Constants](#constants)
- [Configuration](#configuration)
- [Misc](#misc)
- [Time parameters](#time-parameters)
- [Containers](#containers)
- [`LightClientSnapshot`](#lightclientsnapshot)
- [`LightClientUpdate`](#lightclientupdate)
@ -51,13 +50,6 @@ uses sync committees introduced in [this beacon chain extension](./beacon-chain.
| Name | Value |
| - | - |
| `MIN_SYNC_COMMITTEE_PARTICIPANTS` | `1` |
| `MAX_VALID_LIGHT_CLIENT_UPDATES` | `uint64(2**64 - 1)` |
### Time parameters
| Name | Value | Unit | Duration |
| - | - | :-: | :-: |
| `LIGHT_CLIENT_UPDATE_TIMEOUT` | `Slot(2**13)` | slots | ~27 hours |
## Containers
@ -94,9 +86,10 @@ class LightClientUpdate(Container):
### `LightClientStore`
```python
class LightClientStore(Container):
@dataclass
class LightClientStore(object):
snapshot: LightClientSnapshot
valid_updates: List[LightClientUpdate, MAX_VALID_LIGHT_CLIENT_UPDATES]
valid_updates: Set[LightClientUpdate]
```
## Helper functions
@ -115,7 +108,8 @@ A light client maintains its state in a `store` object of type `LightClientStore
#### `validate_light_client_update`
```python
def validate_light_client_update(snapshot: LightClientSnapshot, update: LightClientUpdate,
def validate_light_client_update(snapshot: LightClientSnapshot,
update: LightClientUpdate,
genesis_validators_root: Root) -> None:
# Verify update slot is larger than snapshot slot
assert update.header.slot > snapshot.header.slot
@ -181,20 +175,21 @@ def apply_light_client_update(snapshot: LightClientSnapshot, update: LightClient
def process_light_client_update(store: LightClientStore, update: LightClientUpdate, current_slot: Slot,
genesis_validators_root: Root) -> None:
validate_light_client_update(store.snapshot, update, genesis_validators_root)
store.valid_updates.append(update)
store.valid_updates.add(update)
update_timeout = SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD
if (
sum(update.sync_committee_bits) * 3 > len(update.sync_committee_bits) * 2
sum(update.sync_committee_bits) * 3 >= len(update.sync_committee_bits) * 2
and update.finality_header != BeaconBlockHeader()
):
# Apply update if (1) 2/3 quorum is reached and (2) we have a finality proof.
# Note that (2) means that the current light client design needs finality.
# It may be changed to re-organizable light client design. See the on-going issue eth2.0-specs#2182.
apply_light_client_update(store.snapshot, update)
store.valid_updates = []
elif current_slot > store.snapshot.header.slot + LIGHT_CLIENT_UPDATE_TIMEOUT:
store.valid_updates = set()
elif current_slot > store.snapshot.header.slot + update_timeout:
# Forced best update when the update timeout has elapsed
apply_light_client_update(store.snapshot,
max(store.valid_updates, key=lambda update: sum(update.sync_committee_bits)))
store.valid_updates = []
store.valid_updates = set()
```

View File

@ -18,7 +18,7 @@ This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./
- [`SyncCommitteeContribution`](#synccommitteecontribution)
- [`ContributionAndProof`](#contributionandproof)
- [`SignedContributionAndProof`](#signedcontributionandproof)
- [`SyncCommitteeSigningData`](#synccommitteesigningdata)
- [`SyncAggregatorSelectionData`](#syncaggregatorselectiondata)
- [Validator assignments](#validator-assignments)
- [Sync Committee](#sync-committee)
- [Lookahead](#lookahead)
@ -49,18 +49,18 @@ This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./
## Introduction
This document represents the expected behavior of an "honest validator" with respect to Altair of the Ethereum 2.0 protocol.
It builds on the [previous document for the behavior of an "honest validator" from Phase 0](../phase0/validator.md) of the Ethereum 2.0 protocol.
This document represents the expected behavior of an "honest validator" with respect to the Altair upgrade of the Ethereum 2.0 protocol.
It builds on the [previous document for the behavior of an "honest validator" from Phase 0](../phase0/validator.md) of the Ethereum 2.0 protocol.
This previous document is referred to below as the "Phase 0 document".
Altair introduces a new type of committee: the sync committee. Sync committees are responsible for signing each block of the canonical chain and there exists an efficient algorithm for light clients to sync the chain using the output of the sync committees.
See the [sync protocol](./sync-protocol.md) for further details on the light client sync.
Under this network upgrade, validators track their participation in this new committee type and produce the relevant signatures as required.
Altair introduces a new type of committee: the sync committee. Sync committees are responsible for signing each block of the canonical chain and there exists an efficient algorithm for light clients to sync the chain using the output of the sync committees.
See the [sync protocol](./sync-protocol.md) for further details on the light client sync.
Under this network upgrade, validators track their participation in this new committee type and produce the relevant signatures as required.
Block proposers incorporate the (aggregated) sync committee signatures into each block they produce.
## Prerequisites
All terminology, constants, functions, and protocol mechanics defined in the [Altair -- The Beacon Chain](./beacon-chain.md) doc are requisite for this document and used throughout.
All terminology, constants, functions, and protocol mechanics defined in the [Altair -- The Beacon Chain](./beacon-chain.md) doc are requisite for this document and used throughout.
Please see this document before continuing and use as a reference throughout.
## Warning
@ -74,7 +74,7 @@ This document is currently illustrative for early Altair testnets and some parts
| Name | Value | Unit |
| - | - | :-: |
| `TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE` | `2**2` (= 4) | validators |
| `SYNC_COMMITTEE_SUBNET_COUNT` | `8` | The number of sync committee subnets used in the gossipsub aggregation protocol. |
| `SYNC_COMMITTEE_SUBNET_COUNT` | `4` | The number of sync committee subnets used in the gossipsub aggregation protocol. |
## Containers
@ -126,10 +126,10 @@ class SignedContributionAndProof(Container):
signature: BLSSignature
```
### `SyncCommitteeSigningData`
### `SyncAggregatorSelectionData`
```python
class SyncCommitteeSigningData(Container):
class SyncAggregatorSelectionData(Container):
slot: Slot
subcommittee_index: uint64
```
@ -143,6 +143,11 @@ A validator determines beacon committee assignments and beacon block proposal du
To determine sync committee assignments, a validator can run the following function: `is_assigned_to_sync_committee(state, epoch, validator_index)` where `epoch` is an epoch number within the current or next sync committee period.
This function is a predicate indicating the presence or absence of the validator in the corresponding sync committee for the queried sync committee period.
*Note*: Being assigned to a sync committee for a given `slot` means that the validator produces and broadcasts signatures for `slot - 1` for inclusion in `slot`.
This means that when assigned to an `epoch` sync committee signatures must be produced and broadcast for slots on range `[compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)`
rather than for the range `[compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)`.
To reduce complexity during the Altair fork, sync committees are not expected to produce signatures for `compute_epoch_at_slot(ALTAIR_FORK_EPOCH) - 1`.
```python
def compute_sync_committee_period(epoch: Epoch) -> uint64:
return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
@ -168,17 +173,16 @@ def is_assigned_to_sync_committee(state: BeaconState,
### Lookahead
The sync committee shufflings give validators 1 sync committee period of lookahead which amounts to `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs.
At any given `epoch`, the `BeaconState` contains the current `SyncCommittee` and the next `SyncCommittee`.
At any given `epoch`, the `BeaconState` contains the current `SyncCommittee` and the next `SyncCommittee`.
Once every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs, the next `SyncCommittee` becomes the current `SyncCommittee` and the next committee is computed and stored.
*Note*: The data required to compute a given committee is not cached in the `BeaconState` after committees are calculated at the period boundaries.
This means that calling `get_sync_commitee()` in a given `epoch` can return a different result than what was computed during the relevant epoch transition.
*Note*: The data required to compute a given committee is not cached in the `BeaconState` after committees are calculated at the period boundaries.
For this reason, *always* get committee assignments via the fields of the `BeaconState` (`current_sync_committee` and `next_sync_committee`) or use the above reference code.
A validator should plan for future sync committee assignments by noting which sync committee periods they are selected for participation.
Specifically, a validator should:
* Upon (re)syncing the chain and upon sync committee period boundaries, check for assignments in the current and next sync committee periods.
* If the validator is in the current sync committee period, they can perform the responsibilities below for sync committee rewards.
* If the validator is in the current sync committee period, then they perform the responsibilities below for sync committee rewards.
* If the validator is in the next sync committee period, they should wait until the next `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` boundary and then perform the responsibilities throughout that period.
## Beacon chain responsibilities
@ -187,8 +191,8 @@ A validator maintains the responsibilities given in the Phase 0 document.
Block proposals are modified to incorporate the sync committee signatures as detailed below.
When assigned to a sync committee, validators have a new responsibility to sign beacon block roots during each slot of the sync committee period.
These signatures are aggregated and routed to the proposer over gossip for inclusion into a beacon block.
When assigned to a sync committee, validators have a new responsibility to sign and broadcast beacon block roots during each slot of the sync committee period.
These signatures are aggregated and routed to the proposer over gossip for inclusion into a beacon block.
Assignments to a particular sync committee are infrequent at normal validator counts; however, an action every slot is required when in the current active sync committee.
### Block proposal
@ -202,34 +206,35 @@ No change to [Preparing for a `BeaconBlock`](../phase0/validator.md#preparing-fo
#### Constructing the `BeaconBlockBody`
Each section of [Constructing the `BeaconBlockBody`](../phase0/validator.md#constructing-the-beaconblockbody) should be followed.
Each section of [Constructing the `BeaconBlockBody`](../phase0/validator.md#constructing-the-beaconblockbody) should be followed.
After constructing the `BeaconBlockBody` as per that section, the proposer has an additional task to include the sync committee signatures:
##### Sync committee
The proposer receives a number of `SyncCommitteeContribution`s (wrapped in `SignedContributionAndProof`s on the wire) from validators in the sync committee who are selected to partially aggregate signatures from independent subcommittees formed by breaking the full sync committee into `SYNC_COMMITTEE_SUBNET_COUNT` pieces (see below for details).
The proposer collects these contributions for further aggregation when preparing a block.
Proposers should select the best contribution seen across all aggregators for each subnet/subcommittee when preparing a block.
The proposer collects the contributions that match their local view of the chain (i.e. `contribution.beacon_block_root == block.parent_root`) for further aggregation when preparing a block.
Of these contributions, proposers should select the best contribution seen across all aggregators for each subnet/subcommittee.
A contribution with more valid signatures is better than a contribution with fewer signatures.
Recall `block.body.sync_aggregate.sync_committee_bits` is a `Bitvector` where the `i`th bit is `True` if the corresponding validator in the sync committee has produced a valid signature,
Recall `block.body.sync_aggregate.sync_committee_bits` is a `Bitvector` where the `i`th bit is `True` if the corresponding validator in the sync committee has produced a valid signature,
and that `block.body.sync_aggregate.sync_committee_signature` is the aggregate BLS signature combining all of the valid signatures.
Given a collection of the best seen `contributions` (with no repeating `subcommittee_index` values) and the `BeaconBlock` under construction,
Given a collection of the best seen `contributions` (with no repeating `subcommittee_index` values) and the `BeaconBlock` under construction,
the proposer processes them as follows:
```python
def process_sync_committee_contributions(block: BeaconBlock,
def process_sync_committee_contributions(block: BeaconBlock,
contributions: Set[SyncCommitteeContribution]) -> None:
sync_aggregate = SyncAggregate()
signatures = []
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
for contribution in contributions:
subcommittee_index = contribution.subcommittee_index
for index, participated in enumerate(contribution.aggregation_bits):
if participated:
participant_index = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT * subcommittee_index + index
participant_index = sync_subcommittee_size * subcommittee_index + index
sync_aggregate.sync_committee_bits[participant_index] = True
signatures.append(contribution.signature)
@ -247,30 +252,30 @@ No change to [Packaging into a `SignedBeaconBlock`](../phase0/validator.md#packa
### Attesting and attestation aggregation
Refer to the phase 0 document for the [attesting](../phase0/validator.md#attesting) and [attestation aggregation](../phase0/validator.md#attestation-aggregation) responsibilities.
Refer to the phase 0 document for the [attesting](../phase0/validator.md#attesting) and [attestation aggregation](../phase0/validator.md#attestation-aggregation) responsibilities.
There is no change compared to the phase 0 document.
### Sync committees
Sync committee members employ an aggregation scheme to reduce load on the global proposer channel that is monitored by all potential proposers to be able to include the full output of the sync committee every slot.
Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeSignature`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers.
Sync committee members employ an aggregation scheme to reduce load on the global proposer channel that is monitored by all potential proposers to be able to include the full output of the sync committee every slot.
Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeSignature`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers.
This process occurs each slot.
#### Sync committee signatures
##### Prepare sync committee signature
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee` above returns `True`), then for every slot in the current sync committee period the validator should prepare a `SyncCommitteeSignature` according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of the current slot.
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of `slot - 1`.
This logic is triggered upon the same conditions as when producing an attestation.
This logic is triggered upon the same conditions as when producing an attestation.
Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first.
`get_sync_committee_signature` assumes `state` is the head state corresponding to processing the block at the current slot as determined by the fork choice (including any empty slots processed with `process_slots`), `block_root` is the root of the head block whose processing results in `state`, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
`get_sync_committee_signature(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
```python
def get_sync_committee_signature(state: BeaconState,
def get_sync_committee_signature(state: BeaconState,
block_root: Root,
validator_index: ValidatorIndex,
validator_index: ValidatorIndex,
privkey: int) -> SyncCommitteeSignature:
epoch = get_current_epoch(state)
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch)
@ -285,18 +290,24 @@ def get_sync_committee_signature(state: BeaconState,
The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic.
The `subnet_id` is derived from the position in the sync committee such that the sync committee is divided into "subcommittees".
It can be computed via `compute_subnets_for_sync_committee` where `state` is a `BeaconState` during the matching sync committee period.
This function returns multiple subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees.
`subnet_id` can be computed via `compute_subnets_for_sync_committee(state, validator_index)` where `state` is a `BeaconState` during the matching sync committee period.
*Note*: This function returns multiple deduplicated subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees.
```python
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Sequence[uint64]:
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Set[uint64]:
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
sync_committee = state.current_sync_committee
else:
sync_committee = state.next_sync_committee
target_pubkey = state.validators[validator_index].pubkey
sync_committee_indices = [index for index, pubkey in enumerate(state.current_sync_committee.pubkeys)
if pubkey == target_pubkey]
return [
sync_committee_indices = [index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey]
return set([
uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
for index in sync_committee_indices
]
])
```
*Note*: Subnet assignment does not change during the duration of a validator's assignment to a given sync committee.
@ -305,18 +316,20 @@ def compute_subnets_for_sync_committee(state: BeaconState, validator_index: Vali
#### Sync committee contributions
Each slot some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeSignature`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block.
Each slot, some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeSignature`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block.
##### Aggregation selection
A validator is selected to aggregate based on the computation in `is_sync_committee_aggregator` where `signature` is the BLS signature returned by `get_sync_committee_selection_proof`.
A validator is selected to aggregate based on the value returned by `is_sync_committee_aggregator()` where `signature` is the BLS signature returned by `get_sync_committee_selection_proof()`.
The signature function takes a `BeaconState` with the relevant sync committees for the queried `slot` (i.e. `state.slot` is within the span covered by the current or next sync committee period), the `subcommittee_index` equal to the `subnet_id`, and the `privkey` is the BLS private key associated with the validator.
```python
def get_sync_committee_selection_proof(state: BeaconState, slot: Slot,
subcommittee_index: uint64, privkey: int) -> BLSSignature:
def get_sync_committee_selection_proof(state: BeaconState,
slot: Slot,
subcommittee_index: uint64,
privkey: int) -> BLSSignature:
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF, compute_epoch_at_slot(slot))
signing_data = SyncCommitteeSigningData(
signing_data = SyncAggregatorSelectionData(
slot=slot,
subcommittee_index=subcommittee_index,
)
@ -330,11 +343,11 @@ def is_sync_committee_aggregator(signature: BLSSignature) -> bool:
return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0
```
*NOTE*: the set of aggregators generally changes every slot; however, the assignments can be computed ahead of time as soon as the committee is known
*NOTE*: The set of aggregators generally changes every slot; however, the assignments can be computed ahead of time as soon as the committee is known.
##### Construct sync committee contribution
If a validator is selected to aggregate the `SyncCommitteeSignature`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`.
If a validator is selected to aggregate the `SyncCommitteeSignature`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`.
Given all of the (valid) collected `sync_committee_signatures: Set[SyncCommitteeSignature]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields:
@ -353,18 +366,21 @@ Set `contribution.subcommittee_index` to the index for the subcommittee index co
###### Aggregation bits
Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee.
An aggregator needs to find the index in the sync committee (as returned by `get_sync_committee_indices`) for a given validator referenced by `sync_committee_signature.validator_index` and map the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is the one set in the `Bitvector`.
For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution.
Also note that a validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeSignature`.
An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`.
For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution.
*Note*: A validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeSignature`.
###### Signature
Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_signatures` and using the `bls.Aggregate` function to produce an aggregate `BLSSignature`.
Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_signatures` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`.
The collection of input signatures should include one signature per validator who had a bit set in the `aggregation_bits` bitfield, with repeated signatures if one validator maps to multiple indices within the subcommittee.
##### Broadcast sync committee contribution
If the validator is selected to aggregate (`is_sync_committee_aggregator`), then they broadcast their best aggregate as a `SignedContributionAndProof` to the global aggregate channel (`sync_committee_contribution_and_proof` topic) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`.
If the validator is selected to aggregate (`is_sync_committee_aggregator()`), then they broadcast their best aggregate as a `SignedContributionAndProof` to the global aggregate channel (`sync_committee_contribution_and_proof` topic) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`.
Selection proofs are provided in `ContributionAndProof` to prove to the gossip channel that the validator has been selected as an aggregator.
@ -393,8 +409,8 @@ def get_contribution_and_proof(state: BeaconState,
Then `signed_contribution_and_proof = SignedContributionAndProof(message=contribution_and_proof, signature=signature)` is constructed and broadcast. Where `signature` is obtained from:
```python
def get_contribution_and_proof_signature(state: BeaconState,
contribution_and_proof: ContributionAndProof,
def get_contribution_and_proof_signature(state: BeaconState,
contribution_and_proof: ContributionAndProof,
privkey: int) -> BLSSignature:
contribution = contribution_and_proof.contribution
domain = get_domain(state, DOMAIN_CONTRIBUTION_AND_PROOF, compute_epoch_at_slot(contribution.slot))
@ -404,19 +420,19 @@ def get_contribution_and_proof_signature(state: BeaconState,
## Sync committee subnet stability
The sync committee subnets need special care to ensure stability given the relatively low number of validators involved in the sync committee at any particular time.
The sync committee subnets need special care to ensure stability given the relatively low number of validators involved in the sync committee at any particular time.
To provide this stability, a validator must do the following:
* Maintain advertisement of the subnet the validator in the sync committee is assigned to in their node's ENR as soon as they have joined the subnet.
Subnet assignments are known `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs in advance and can be computed with `compute_subnets_for_sync_committee` defined above.
ENR advertisement is indicated by setting the appropriate bit(s) of the bitfield found under the `syncnets` key in the ENR corresponding to the derived `subnet_id`(s).
Any bits modified for the sync committee responsibilities are unset in the ENR after any validators have left the sync committee.
* Maintain advertisement of the subnet the validator in the sync committee is assigned to in their node's ENR as soon as they have joined the subnet.
Subnet assignments are known `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs in advance and can be computed with `compute_subnets_for_sync_committee` defined above.
ENR advertisement is indicated by setting the appropriate bit(s) of the bitfield found under the `syncnets` key in the ENR corresponding to the derived `subnet_id`(s).
Any bits modified for the sync committee responsibilities are unset in the ENR once the node no longer has any validators in the subcommittee.
*Note*: The first sync committee from phase 0 to the Altair fork will not be known until the fork happens which implies subnet assignments are not known until then.
Early sync committee members should listen for topic subscriptions from peers and employ discovery via the ENR advertisements near the fork boundary to form initial subnets
*Note*: The first sync committee from phase 0 to the Altair fork will not be known until the fork happens, which implies subnet assignments are not known until then.
Early sync committee members should listen for topic subscriptions from peers and employ discovery via the ENR advertisements near the fork boundary to form initial subnets.
Some early sync committee rewards may be missed while the initial subnets form.
* To join a sync committee subnet, select a random number of epochs before the end of the current sync committee period between 1 and `SYNC_COMMITTEE_SUBNET_COUNT`, inclusive.
Validators should join their member subnet at the beginning of the epoch they have randomly selected.
For example, if the next sync committee period starts at epoch `853,248` and the validator randomly selects an offset of `3`, they should join the subnet at the beginning of epoch `853,245`.
* To join a sync committee subnet, select a random number of epochs before the end of the current sync committee period between 1 and `SYNC_COMMITTEE_SUBNET_COUNT`, inclusive.
Validators should join their member subnet at the beginning of the epoch they have randomly selected.
For example, if the next sync committee period starts at epoch `853,248` and the validator randomly selects an offset of `3`, they should join the subnet at the beginning of epoch `853,245`.
Validators should leverage the lookahead period on sync committee assignments so that they can join the appropriate subnets ahead of their assigned sync committee period.

View File

@ -37,7 +37,7 @@ def get_new_dependencies(state: BeaconState) -> Set[DataCommitment]:
```python
def get_all_dependencies(store: Store, block: BeaconBlock) -> Set[DataCommitment]:
if block.slot < SHARDING_FORK_SLOT:
if compute_epoch_at_slot(block.slot) < SHARDING_FORK_EPOCH:
return set()
else:
latest = get_new_dependencies(store.block_states[hash_tree_root(block)])

View File

@ -13,24 +13,27 @@
- [Introduction](#introduction)
- [Custom types](#custom-types)
- [Constants](#constants)
- [Transition](#transition)
- [Execution](#execution)
- [Configuration](#configuration)
- [Containers](#containers)
- [Extended containers](#extended-containers)
- [`BeaconBlockBody`](#beaconblockbody)
- [`BeaconState`](#beaconstate)
- [New containers](#new-containers)
- [`ApplicationPayload`](#applicationpayload)
- [`ApplicationBlockHeader`](#applicationblockheader)
- [`ExecutionPayload`](#executionpayload)
- [`ExecutionPayloadHeader`](#executionpayloadheader)
- [Protocols](#protocols)
- [`ExecutionEngine`](#executionengine)
- [`new_block`](#new_block)
- [Helper functions](#helper-functions)
- [Misc](#misc)
- [`is_execution_enabled`](#is_execution_enabled)
- [`is_transition_completed`](#is_transition_completed)
- [`is_transition_block`](#is_transition_block)
- [`compute_time_at_slot`](#compute_time_at_slot)
- [Block processing](#block-processing)
- [Application payload processing](#application-payload-processing)
- [`get_application_state`](#get_application_state)
- [`application_state_transition`](#application_state_transition)
- [`process_application_payload`](#process_application_payload)
- [Execution payload processing](#execution-payload-processing)
- [`process_execution_payload`](#process_execution_payload)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
@ -38,7 +41,7 @@
## Introduction
This is a patch implementing the executable beacon chain proposal.
It enshrines application-layer execution and validity as a first class citizen at the core of the beacon chain.
It enshrines transaction execution and validity as a first class citizen at the core of the beacon chain.
## Custom types
@ -50,20 +53,24 @@ We define the following Python custom types for type hinting and readability:
## Constants
### Transition
| Name | Value |
| - | - |
| `TRANSITION_TOTAL_DIFFICULTY` | **TBD** |
### Execution
| Name | Value |
| - | - |
| `MAX_BYTES_PER_OPAQUE_TRANSACTION` | `uint64(2**20)` (= 1,048,576) |
| `MAX_APPLICATION_TRANSACTIONS` | `uint64(2**14)` (= 16,384) |
| `MAX_EXECUTION_TRANSACTIONS` | `uint64(2**14)` (= 16,384) |
| `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) |
## Configuration
Warning: this configuration is not definitive.
| Name | Value |
| - | - |
| `MERGE_FORK_VERSION` | `Version('0x02000000')` |
| `MERGE_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
| `TRANSITION_TOTAL_DIFFICULTY` | **TBD** |
## Containers
### Extended containers
@ -73,79 +80,122 @@ order and append any additional fields to the end.
#### `BeaconBlockBody`
*Note*: `BeaconBlockBody` fields remain unchanged other than the addition of `application_payload`.
*Note*: `BeaconBlockBody` fields remain unchanged other than the addition of `execution_payload`.
```python
class BeaconBlockBody(phase0.BeaconBlockBody):
application_payload: ApplicationPayload # [New in Merge] application payload
execution_payload: ExecutionPayload # [New in Merge]
```
#### `BeaconState`
*Note*: `BeaconState` fields remain unchanged other than addition of `latest_application_block_header`.
*Note*: `BeaconState` fields remain unchanged other than addition of `latest_execution_payload_header`.
```python
class BeaconState(phase0.BeaconState):
# Application-layer
latest_application_block_header: ApplicationBlockHeader # [New in Merge]
# Execution-layer
latest_execution_payload_header: ExecutionPayloadHeader # [New in Merge]
```
### New containers
#### `ApplicationPayload`
#### `ExecutionPayload`
The application payload included in a `BeaconBlockBody`.
The execution payload included in a `BeaconBlockBody`.
```python
class ApplicationPayload(Container):
block_hash: Bytes32 # Hash of application block
parent_hash: Bytes32
class ExecutionPayload(Container):
block_hash: Hash32 # Hash of execution block
parent_hash: Hash32
coinbase: Bytes20
state_root: Bytes32
number: uint64
gas_limit: uint64
gas_used: uint64
timestamp: uint64
receipt_root: Bytes32
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
transactions: List[OpaqueTransaction, MAX_APPLICATION_TRANSACTIONS]
transactions: List[OpaqueTransaction, MAX_EXECUTION_TRANSACTIONS]
```
#### `ApplicationBlockHeader`
#### `ExecutionPayloadHeader`
The application block header included in a `BeaconState`.
The execution payload header included in a `BeaconState`.
*Note:* Holds application payload data without transaction list.
*Note:* Holds execution payload data without transaction bodies.
```python
class ApplicationBlockHeader(Container):
block_hash: Bytes32 # Hash of application block
parent_hash: Bytes32
class ExecutionPayloadHeader(Container):
block_hash: Hash32 # Hash of execution block
parent_hash: Hash32
coinbase: Bytes20
state_root: Bytes32
number: uint64
gas_limit: uint64
gas_used: uint64
timestamp: uint64
receipt_root: Bytes32
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
transactions_root: Root
```
## Protocols
### `ExecutionEngine`
The `ExecutionEngine` protocol separates the consensus and execution sub-systems.
The consensus implementation references an instance of this sub-system with `EXECUTION_ENGINE`.
The following methods are added to the `ExecutionEngine` protocol for use in the state transition:
#### `new_block`
Verifies the given `execution_payload` with respect to execution state transition, and persists changes if valid.
The body of this function is implementation dependent.
The Consensus API may be used to implement this with an external execution engine.
```python
def new_block(self: ExecutionEngine, execution_payload: ExecutionPayload) -> bool:
"""
Returns True if the ``execution_payload`` was verified and processed successfully, False otherwise.
"""
...
```
## Helper functions
### Misc
#### `is_execution_enabled`
```python
def is_execution_enabled(state: BeaconState, block: BeaconBlock) -> bool:
return is_transition_completed(state) or is_transition_block(state, block)
```
#### `is_transition_completed`
```python
def is_transition_completed(state: BeaconState) -> boolean:
return state.latest_application_block_header.block_hash != Bytes32()
def is_transition_completed(state: BeaconState) -> bool:
return state.latest_execution_payload_header != ExecutionPayloadHeader()
```
#### `is_transition_block`
```python
def is_transition_block(state: BeaconState, block_body: BeaconBlockBody) -> boolean:
return state.latest_application_block_header.block_hash == Bytes32() and block_body.application_payload.block_hash != Bytes32()
def is_transition_block(state: BeaconState, block: BeaconBlock) -> bool:
return not is_transition_completed(state) and block.body.execution_payload != ExecutionPayload()
```
#### `compute_time_at_slot`
*Note*: This function is unsafe with respect to overflows and underflows.
```python
def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64:
slots_since_genesis = slot - GENESIS_SLOT
return uint64(state.genesis_time + slots_since_genesis * SECONDS_PER_SLOT)
```
### Block processing
@ -156,54 +206,41 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
process_randao(state, block.body)
process_eth1_data(state, block.body)
process_operations(state, block.body)
process_application_payload(state, block.body) # [New in Merge]
# Pre-merge, skip execution payload processing
if is_execution_enabled(state, block):
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [New in Merge]
```
#### Application payload processing
#### Execution payload processing
##### `get_application_state`
*Note*: `ApplicationState` class is an abstract class representing ethereum application state.
Let `get_application_state(application_state_root: Bytes32) -> ApplicationState` be the function that given the root hash returns a copy of ethereum application state.
The body of the function is implementation dependent.
##### `application_state_transition`
Let `application_state_transition(application_state: ApplicationState, application_payload: ApplicationPayload) -> None` be the transition function of ethereum application state.
The body of the function is implementation dependent.
*Note*: `application_state_transition` must throw `AssertionError` if either the transition itself or one of the post-transition verifications has failed.
##### `process_application_payload`
##### `process_execution_payload`
```python
def process_application_payload(state: BeaconState, body: BeaconBlockBody) -> None:
def process_execution_payload(state: BeaconState,
execution_payload: ExecutionPayload,
execution_engine: ExecutionEngine) -> None:
"""
Note: This function is designed to be able to be run in parallel with the other `process_block` sub-functions
"""
if is_transition_completed(state):
assert execution_payload.parent_hash == state.latest_execution_payload_header.block_hash
assert execution_payload.number == state.latest_execution_payload_header.number + 1
if not is_transition_completed(state):
assert body.application_payload == ApplicationPayload()
return
assert execution_payload.timestamp == compute_time_at_slot(state, state.slot)
if not is_transition_block(state, body):
assert body.application_payload.parent_hash == state.latest_application_block_header.block_hash
assert body.application_payload.number == state.latest_application_block_header.number + 1
assert execution_engine.new_block(execution_payload)
application_state = get_application_state(state.latest_application_block_header.state_root)
application_state_transition(application_state, body.application_payload)
state.latest_application_block_header = ApplicationBlockHeader(
block_hash=application_payload.block_hash,
parent_hash=application_payload.parent_hash,
coinbase=application_payload.coinbase,
state_root=application_payload.state_root,
number=application_payload.number,
gas_limit=application_payload.gas_limit,
gas_used=application_payload.gas_used,
receipt_root=application_payload.receipt_root,
logs_bloom=application_payload.logs_bloom,
transactions_root=hash_tree_root(application_payload.transactions),
state.latest_execution_payload_header = ExecutionPayloadHeader(
block_hash=execution_payload.block_hash,
parent_hash=execution_payload.parent_hash,
coinbase=execution_payload.coinbase,
state_root=execution_payload.state_root,
number=execution_payload.number,
gas_limit=execution_payload.gas_limit,
gas_used=execution_payload.gas_used,
timestamp=execution_payload.timestamp,
receipt_root=execution_payload.receipt_root,
logs_bloom=execution_payload.logs_bloom,
transactions_root=hash_tree_root(execution_payload.transactions),
)
```

View File

@ -8,11 +8,16 @@
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Helpers](#helpers)
- [Protocols](#protocols)
- [`ExecutionEngine`](#executionengine)
- [`set_head`](#set_head)
- [`finalize_block`](#finalize_block)
- [Containers](#containers)
- [`PowBlock`](#powblock)
- [Helper functions](#helper-functions)
- [`get_pow_block`](#get_pow_block)
- [`is_valid_transition_block`](#is_valid_transition_block)
- [Updated fork-choice handlers](#updated-fork-choice-handlers)
- [Updated fork-choice handlers](#updated-fork-choice-handlers)
- [`on_block`](#on_block)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
@ -24,21 +29,60 @@ This is the modification of the fork choice according to the executable beacon c
*Note*: It introduces the process of transition from the last PoW block to the first PoS block.
### Helpers
## Protocols
### `ExecutionEngine`
The following methods are added to the `ExecutionEngine` protocol for use in the fork choice:
#### `set_head`
Re-organizes the execution payload chain and corresponding state to make `block_hash` the head.
The body of this function is implementation dependent.
The Consensus API may be used to implement this with an external execution engine.
```python
def set_head(self: ExecutionEngine, block_hash: Hash32) -> bool:
"""
Returns True if the ``block_hash`` was successfully set as head of the execution payload chain.
"""
...
```
#### `finalize_block`
Applies finality to the execution state: it irreversibly persists the chain of all execution payloads
and corresponding state, up to and including `block_hash`.
The body of this function is implementation dependent.
The Consensus API may be used to implement this with an external execution engine.
```python
def finalize_block(self: ExecutionEngine, block_hash: Hash32) -> bool:
"""
Returns True if the data up to and including ``block_hash`` was successfully finalized.
"""
...
```
## Containers
#### `PowBlock`
```python
class PowBlock(Container):
block_hash: Bytes32
block_hash: Hash32
is_processed: boolean
is_valid: boolean
total_difficulty: uint256
```
## Helper functions
#### `get_pow_block`
Let `get_pow_block(hash: Bytes32) -> PowBlock` be the function that given the hash of the PoW block returns its data.
Let `get_pow_block(block_hash: Hash32) -> PowBlock` be the function that given the hash of the PoW block returns its data.
*Note*: The `eth_getBlockByHash` JSON-RPC method does not distinguish invalid blocks from blocks that haven't been processed yet. Either extending this existing method or implementing a new one is required.
@ -47,12 +91,12 @@ Let `get_pow_block(hash: Bytes32) -> PowBlock` be the function that given the ha
Used by fork-choice handler, `on_block`.
```python
def is_valid_transition_block(block: PowBlock) -> boolean:
def is_valid_transition_block(block: PowBlock) -> bool:
is_total_difficulty_reached = block.total_difficulty >= TRANSITION_TOTAL_DIFFICULTY
return block.is_valid and is_total_difficulty_reached
```
### Updated fork-choice handlers
## Updated fork-choice handlers
#### `on_block`
@ -75,9 +119,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
# [New in Merge]
if is_transition_block(pre_state, block.body):
if is_transition_block(pre_state, block):
# Delay consideration of block until PoW block is processed by the PoW node
pow_block = get_pow_block(block.body.application_payload.parent_hash)
pow_block = get_pow_block(block.body.execution_payload.parent_hash)
assert pow_block.is_processed
assert is_valid_transition_block(pow_block)
@ -113,4 +157,3 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
store.justified_checkpoint = state.current_justified_checkpoint
```

View File

@ -12,12 +12,15 @@
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Protocols](#protocols)
- [`ExecutionEngine`](#executionengine)
- [`assemble_block`](#assemble_block)
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
- [Block proposal](#block-proposal)
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
- [Application Payload](#application-payload)
- [Execution Payload](#execution-payload)
- [`get_pow_chain_head`](#get_pow_chain_head)
- [`produce_application_payload`](#produce_application_payload)
- [`produce_execution_payload`](#produce_execution_payload)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
@ -32,39 +35,60 @@ This document is an extension of the [Phase 0 -- Validator](../phase0/validator.
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [The Merge](./beacon-chain.md) are requisite for this document and used throughout. Please see related Beacon Chain doc before continuing and use them as a reference throughout.
## Protocols
### `ExecutionEngine`
The following methods are added to the `ExecutionEngine` protocol for use as a validator:
#### `assemble_block`
Produces a new instance of an execution payload, with the specified `timestamp`,
on top of the execution payload chain tip identified by `block_hash`.
The body of this function is implementation dependent.
The Consensus API may be used to implement this with an external execution engine.
```python
def assemble_block(self: ExecutionEngine, block_hash: Hash32, timestamp: uint64) -> ExecutionPayload:
...
```
## Beacon chain responsibilities
All validator responsibilities remain unchanged other than those noted below. Namely, the transition block handling and the addition of `ApplicationPayload`.
All validator responsibilities remain unchanged other than those noted below. Namely, the transition block handling and the addition of `ExecutionPayload`.
### Block proposal
#### Constructing the `BeaconBlockBody`
##### Application Payload
##### Execution Payload
###### `get_pow_chain_head`
Let `get_pow_chain_head() -> PowBlock` be the function that returns the head of the PoW chain. The body of the function is implementation specific.
###### `produce_application_payload`
###### `produce_execution_payload`
Let `produce_application_payload(parent_hash: Bytes32) -> ApplicationPayload` be the function that produces new instance of application payload.
The body of this function is implementation dependent.
Let `produce_execution_payload(parent_hash: Hash32, timestamp: uint64) -> ExecutionPayload` be the function that produces new instance of execution payload.
The `ExecutionEngine` protocol is used for the implementation specific part of execution payload proposals.
* Set `block.body.application_payload = get_application_payload(state)` where:
* Set `block.body.execution_payload = get_execution_payload(state)` where:
```python
def get_application_payload(state: BeaconState) -> ApplicationPayload:
def get_execution_payload(state: BeaconState, execution_engine: ExecutionEngine) -> ExecutionPayload:
if not is_transition_completed(state):
pow_block = get_pow_chain_head()
if not is_valid_transition_block(pow_block):
# Pre-merge, empty payload
return ApplicationPayload()
return ExecutionPayload()
else:
# Signify merge via producing on top of the last PoW block
return produce_application_payload(pow_block.block_hash)
timestamp = compute_time_at_slot(state, state.slot)
return execution_engine.assemble_block(pow_block.block_hash, timestamp)
# Post-merge, normal payload
application_parent_hash = state.latest_application_block_header.block_hash
return produce_application_payload(application_parent_hash)
execution_parent_hash = state.latest_execution_payload_header.block_hash
timestamp = compute_time_at_slot(state, state.slot)
return execution_engine.assemble_block(execution_parent_hash, timestamp)
```

View File

@ -157,6 +157,7 @@ We define the following Python custom types for type hinting and readability:
| `ValidatorIndex` | `uint64` | a validator registry index |
| `Gwei` | `uint64` | an amount in Gwei |
| `Root` | `Bytes32` | a Merkle root |
| `Hash32` | `Bytes32` | a 256-bit hash |
| `Version` | `Bytes4` | a fork version number |
| `DomainType` | `Bytes4` | a domain type |
| `ForkDigest` | `Bytes4` | a digest of the current fork data |
@ -164,6 +165,7 @@ We define the following Python custom types for type hinting and readability:
| `BLSPubkey` | `Bytes48` | a BLS12-381 public key |
| `BLSSignature` | `Bytes96` | a BLS12-381 signature |
## Constants
The following values are (non-configurable) constants used throughout the specification.
@ -374,7 +376,7 @@ class PendingAttestation(Container):
class Eth1Data(Container):
deposit_root: Root
deposit_count: uint64
block_hash: Bytes32
block_hash: Hash32
```
#### `HistoricalBatch`

View File

@ -93,6 +93,8 @@ It consists of four main sections:
- [Why is it called Req/Resp and not RPC?](#why-is-it-called-reqresp-and-not-rpc)
- [Why do we allow empty responses in block requests?](#why-do-we-allow-empty-responses-in-block-requests)
- [Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?](#why-does-beaconblocksbyrange-let-the-server-choose-which-branch-to-send-blocks-from)
- [Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?](#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs)
- [Why must the proposer signature be checked when backfilling blocks in the database?](#why-must-the-proposer-signature-be-checked-when-backfilling-blocks-in-the-database)
- [What's the effect of empty slots on the sync algorithm?](#whats-the-effect-of-empty-slots-on-the-sync-algorithm)
- [Discovery](#discovery)
- [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht)
@ -171,6 +173,7 @@ This section outlines constants that are used in this spec.
|---|---|---|
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. |
| `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request |
| `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks |
| `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
@ -179,7 +182,6 @@ This section outlines constants that are used in this spec.
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
## MetaData
Clients MUST locally store the following `MetaData`:
@ -745,10 +747,27 @@ The request MUST be encoded as an SSZ-container.
The response MUST consist of zero or more `response_chunk`.
Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlock` payload.
Clients MUST keep a record of signed blocks seen since the start of the weak subjectivity period
and MUST support serving requests of blocks up to their own `head_block_root`.
Clients MUST keep a record of signed blocks seen on the epoch range
`[max(GENESIS_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]`
where `current_epoch` is defined by the current wall-clock time,
and clients MUST support serving requests of blocks on this range.
Clients MUST respond with at least the first block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOCKS` blocks.
Peers that are unable to reply to block requests within the
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epoch range MAY get descored or disconnected at any time.
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
MUST backfill the local block database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS`
to be fully compliant with `BlocksByRange` requests. To safely perform such a
backfill of blocks to the recent state, the node MUST validate both (1) the
proposer signatures and (2) that the blocks form a valid chain up to the most
recent block referenced in the weak subjectivity state.
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin
participating in the networking immediately, other peers MAY
disconnect and/or temporarily ban such an un-synced or semi-synced client.
Clients MUST respond with at least the first block that exists in the range, if they have it,
and no more than `MAX_REQUEST_BLOCKS` blocks.
The following blocks, where they exist, MUST be sent in consecutive order.
@ -1393,6 +1412,45 @@ To avoid this race condition, we allow the responding side to choose which branc
The requesting client then goes on to validate the blocks and incorporate them in their own database
-- because they follow the same rules, they should at this point arrive at the same canonical chain.
### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?
Due to economic finality and weak subjectivity requirements of a proof-of-stake blockchain, for a new node to safely join the network
the node must provide a recent checkpoint found out-of-band. This checkpoint can be in the form of a `root` & `epoch` or it can be the entire
beacon state and then a simple block sync from there to the head. We expect the latter to be the dominant UX strategy.
These checkpoints *in the worst case* (i.e. very large validator set and maximal allowed safety decay) must be from the
most recent `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, and thus a user must be able to block sync to the head from this starting point.
Thus, this defines the epoch range outside which nodes may prune blocks, and
the epoch range that a new node syncing from a checkpoint must backfill.
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` is calculated using the arithmetic from `compute_weak_subjectivity_period` found in the
[weak subjectivity guide](./weak-subjectivity.md). Specifically to find this max epoch range, we use the worst case event of a very large validator size
(`>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT`).
```python
MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
MIN_VALIDATOR_WITHDRAWABILITY_DELAY
+ MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
)
```
Where `MAX_SAFETY_DECAY = 100` and thus `MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024` (~5 months).
### Why must the proposer signature be checked when backfilling blocks in the database?
When backfilling blocks in a database from a know safe block/state (e.g. when starting from a weak subjectivity state),
the node not only must ensure the `BeaconBlock`s form a chain to the known safe block,
but also must check that the proposer signature is valid in the `SignedBeaconBlock` wrapper.
This is because the signature is not part of the `BeaconBlock` hash chain, and
thus could be corrupted by an attacker serving valid `BeaconBlock`s but invalid
signatures contained in `SignedBeaconBlock`.
Although in this particular use case this does not represent a decay in safety
(due to the assumptions of starting at a weak subjectivity checkpoint), it
would represent invalid historic data and could be unwittingly transmitted to
additional nodes.
### What's the effect of empty slots on the sync algorithm?
When syncing one can only tell that a slot has been skipped on a particular branch

View File

@ -136,7 +136,9 @@ A brief reference for what these values look like in practice ([reference script
## Weak Subjectivity Sync
Clients should allow users to input a Weak Subjectivity Checkpoint at startup, and guarantee that any successful sync leads to the given Weak Subjectivity Checkpoint along the canonical chain. If such a sync is not possible, the client should treat this as a critical and irrecoverable failure.
Clients should allow users to input a Weak Subjectivity Checkpoint at startup,
and guarantee that any successful sync leads to the given Weak Subjectivity Checkpoint along the canonical chain.
If such a sync is not possible, the client should treat this as a critical and irrecoverable failure.
### Weak Subjectivity Sync Procedure

View File

@ -166,7 +166,7 @@ class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body]
### `BeaconState`
```python
class BeaconState(merge.BeaconState): # [extends The Merge block body]
class BeaconState(merge.BeaconState): # [extends The Merge state]
# [Updated fields]
previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
@ -280,7 +280,7 @@ class ShardProposerSlashing(Container):
#### `next_power_of_two`
```python
def next_power_of_two(x):
def next_power_of_two(x: int) -> int:
return 2 ** ((x - 1).bit_length())
```
@ -374,7 +374,7 @@ ensuring that the balance is always sufficient to cover gas costs.
def compute_proposer_index(beacon_state: BeaconState,
indices: Sequence[ValidatorIndex],
seed: Bytes32,
min_effective_balance: GWei = GWei(0)) -> ValidatorIndex:
min_effective_balance: Gwei = Gwei(0)) -> ValidatorIndex:
"""
Return from ``indices`` a random index sampled by effective balance.
"""
@ -402,11 +402,11 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard
"""
epoch = compute_epoch_at_slot(slot)
committee = get_shard_committee(beacon_state, epoch, shard)
seed = hash(get_seed(beacon_state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(beacon_state.slot))
seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_PROPOSER) + uint_to_bytes(slot))
# Proposer must have sufficient balance to pay for worst case fee burn
EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = (
(EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT)
EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT
* HYSTERESIS_DOWNWARD_MULTIPLIER // HYSTERESIS_QUOTIENT
)
min_effective_balance = (
@ -466,7 +466,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
process_randao(state, block.body)
process_eth1_data(state, block.body)
process_operations(state, block.body) # [Modified in Sharding]
process_application_payload(state, block.body) # [New in Merge]
process_execution_payload(state, block.body) # [New in Merge]
```
#### Operations
@ -512,17 +512,22 @@ def update_pending_votes(state: BeaconState, attestation: Attestation) -> None:
pending_headers = state.current_epoch_pending_shard_headers
else:
pending_headers = state.previous_epoch_pending_shard_headers
pending_header = None
for header in pending_headers:
if header.root == attestation.data.shard_header_root:
pending_header = header
assert pending_header is not None
assert pending_header.slot == attestation.data.slot
assert pending_header.shard == compute_shard_from_committee_index(
attestation_shard = compute_shard_from_committee_index(
state,
attestation.data.slot,
attestation.data.index,
)
pending_header = None
for header in pending_headers:
if (
header.root == attestation.data.shard_header_root
and header.slot == attestation.data.slot
and header.shard == attestation_shard
):
pending_header = header
assert pending_header is not None
for i in range(len(pending_header.votes)):
pending_header.votes[i] = pending_header.votes[i] or attestation.aggregation_bits[i]
@ -539,7 +544,7 @@ def update_pending_votes(state: BeaconState, attestation: Attestation) -> None:
participants = get_attesting_indices(state, attestation.data, pending_header.votes)
participants_balance = get_total_balance(state, participants)
full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index)
full_committee_balance = get_total_balance(state, full_committee)
full_committee_balance = get_total_balance(state, set(full_committee))
if participants_balance * 3 >= full_committee_balance * 2:
pending_header.confirmed = True
```
@ -559,11 +564,11 @@ def process_shard_header(state: BeaconState,
assert header.shard < get_active_shard_count(state, header_epoch)
# Verify that the block root matches,
# to ensure the header will only be included in this specific Beacon Chain sub-tree.
assert header.beacon_block_root == get_block_root_at_slot(state, header.slot - 1)
assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1)
# Verify proposer
assert header.proposer_index == get_shard_proposer_index(state, header.slot, header.shard)
# Verify signature
signing_root = compute_signing_root(header, get_domain(state, DOMAIN_SHARD_HEADER))
signing_root = compute_signing_root(header, get_domain(state, DOMAIN_SHARD_PROPOSER))
assert bls.Verify(state.validators[header.proposer_index].pubkey, signing_root, signed_header.signature)
# Verify the length by verifying the degree.
@ -644,7 +649,7 @@ def process_epoch(state: BeaconState) -> None:
# Sharding
process_pending_headers(state)
process_confirmed_header_fees(state)
charge_confirmed_header_fees(state)
reset_pending_headers(state)
# Final updates
@ -668,24 +673,26 @@ def process_pending_headers(state: BeaconState) -> None:
if get_current_epoch(state) == GENESIS_EPOCH:
return
previous_epoch_start_slot = compute_start_slot_at_epoch(get_previous_epoch(state))
previous_epoch = get_previous_epoch(state)
previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch)
for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH):
for shard in range(get_active_shard_count(state)):
for shard_index in range(get_active_shard_count(state, previous_epoch)):
shard = Shard(shard_index)
# Pending headers for this (slot, shard) combo
candidates = [
c for c in state.previous_epoch_pending_shard_headers
if (c.slot, c.shard) == (slot, shard)
]
# The entire committee (and its balance)
full_committee = get_beacon_committee(state, slot, shard)
full_committee_balance = get_total_balance(state, full_committee)
# If any candidates already confirmed, skip
if True in [c.confirmed for c in candidates]:
continue
# The entire committee (and its balance)
index = compute_committee_index_from_shard(state, slot, shard)
full_committee = get_beacon_committee(state, slot, index)
# The set of voters who voted for each header (and their total balances)
voting_sets = [
[v for i, v in enumerate(full_committee) if c.votes[i]]
set(v for i, v in enumerate(full_committee) if c.votes[i])
for c in candidates
]
voting_balances = [
@ -702,23 +709,25 @@ def process_pending_headers(state: BeaconState) -> None:
winning_index = [c.root for c in candidates].index(Root())
candidates[winning_index].confirmed = True
for slot_index in range(SLOTS_PER_EPOCH):
for shard in range(SHARD_COUNT):
for shard in range(MAX_SHARDS):
state.grandparent_epoch_confirmed_commitments[shard][slot_index] = DataCommitment()
confirmed_headers = [candidate for candidate in state.previous_epoch_pending_shard_headers if candidate.confirmed]
for header in confirmed_headers:
state.grandparent_epoch_confirmed_commitments[c.shard][c.slot % SLOTS_PER_EPOCH] = c.commitment
state.grandparent_epoch_confirmed_commitments[header.shard][header.slot % SLOTS_PER_EPOCH] = header.commitment
```
```python
def charge_confirmed_header_fees(state: BeaconState) -> None:
new_gasprice = state.shard_gasprice
previous_epoch = get_previous_epoch(state)
adjustment_quotient = (
get_active_shard_count(state, get_current_epoch(state))
get_active_shard_count(state, previous_epoch)
* SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT
)
previous_epoch_start_slot = compute_start_slot_at_epoch(get_previous_epoch(state))
previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch)
for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH):
for shard in range(SHARD_COUNT):
for shard_index in range(get_active_shard_count(state, previous_epoch)):
shard = Shard(shard_index)
confirmed_candidates = [
c for c in state.previous_epoch_pending_shard_headers
if (c.slot, c.shard, c.confirmed) == (slot, shard, True)
@ -728,7 +737,7 @@ def charge_confirmed_header_fees(state: BeaconState) -> None:
candidate = confirmed_candidates[0]
# Charge EIP 1559 fee
proposer = get_shard_proposer(state, slot, shard)
proposer = get_shard_proposer_index(state, slot, shard)
fee = (
(state.shard_gasprice * candidate.commitment.length)
// TARGET_SAMPLES_PER_BLOCK
@ -751,10 +760,11 @@ def reset_pending_headers(state: BeaconState) -> None:
# Add dummy "empty" PendingShardHeader (default vote for if no shard header available)
next_epoch = get_current_epoch(state) + 1
next_epoch_start_slot = compute_start_slot_at_epoch(next_epoch)
for slot in range(next_epoch_start_slot, next_epoch_start_slot + SLOTS_IN_EPOCH):
for index in range(get_committee_count_per_slot(next_epoch)):
shard = compute_shard_from_committee_index(state, slot, index)
committee_length = len(get_beacon_committee(state, slot, shard))
for slot in range(next_epoch_start_slot, next_epoch_start_slot + SLOTS_PER_EPOCH):
for index in range(get_committee_count_per_slot(state, next_epoch)):
committee_index = CommitteeIndex(index)
shard = compute_shard_from_committee_index(state, slot, committee_index)
committee_length = len(get_beacon_committee(state, slot, committee_index))
state.current_epoch_pending_shard_headers.append(PendingShardHeader(
slot=slot,
shard=shard,

View File

@ -9,13 +9,15 @@
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Constants](#constants)
- [Misc](#misc)
- [New containers](#new-containers)
- [ShardBlobBody](#shardblobbody)
- [ShardBlob](#shardblob)
- [SignedShardBlob](#signedshardblob)
- [Gossip domain](#gossip-domain)
- [Topics and messages](#topics-and-messages)
- [Shard blobs: `shard_blob_{shard}`](#shard-blobs-shard_blob_shard)
- [Shard blobs: `shard_blob_{subnet_id}`](#shard-blobs-shard_blob_subnet_id)
- [Shard header: `shard_header`](#shard-header-shard_header)
- [Shard proposer slashing: `shard_proposer_slashing`](#shard-proposer-slashing-shard_proposer_slashing)
@ -29,6 +31,14 @@ The specification of these changes continues in the same format as the [Phase0](
[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite.
The adjustments and additions for Shards are outlined in this document.
## Constants
### Misc
| Name | Value | Description |
| ---- | ----- | ----------- |
| `SHARD_BLOB_SUBNET_COUNT` | `64` | The number of `shard_blob_{subnet_id}` subnets used in the gossipsub protocol. |
## New containers
### ShardBlobBody
@ -77,23 +87,38 @@ Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.
| Name | Message Type |
|----------------------------------|---------------------------|
| `shard_blob_{shard}` | `SignedShardBlob` |
| `shard_blob_{subnet_id}` | `SignedShardBlob` |
| `shard_header` | `SignedShardHeader` |
| `shard_proposer_slashing` | `ShardProposerSlashing` |
The [DAS network specification](./das-p2p.md) defines additional topics.
#### Shard blobs: `shard_blob_{shard}`
#### Shard blobs: `shard_blob_{subnet_id}`
Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{shard}` subnets.
Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets.
```python
def compute_subnet_for_shard_blob(state: BeaconState, slot: Slot, shard: Shard) -> uint64:
"""
Compute the correct subnet for a shard blob publication.
Note, this mimics compute_subnet_for_attestation().
"""
committee_index = compute_committee_index_from_shard(state, slot, shard)
committees_per_slot = get_committee_count_per_slot(state, compute_epoch_at_slot(slot))
slots_since_epoch_start = Slot(slot % SLOTS_PER_EPOCH)
committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
return uint64((committees_since_epoch_start + committee_index) % SHARD_BLOB_SUBNET_COUNT)
```
The following validations MUST pass before forwarding the `signed_blob` (with inner `message` as `blob`) on the horizontal subnet or creating samples for it.
- _[REJECT]_ `blob.shard` MUST match the topic `{shard}` parameter. (And thus within valid shard index range)
- _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
i.e. validate that `blob.slot <= current_slot`
(a client MAY queue future blobs for processing at the appropriate slot).
- _[IGNORE]_ The `blob` is new enough to be still be processed --
i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)`
- _[REJECT]_ The shard blob is for the correct subnet --
i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id`
- _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination.
- _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large.
- _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid.

View File

@ -75,7 +75,8 @@ For convenience we alias:
* `bit` to `boolean`
* `byte` to `uint8` (this is a basic type)
* `BytesN` to `Vector[byte, N]` (this is *not* a basic type)
* `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type)
* `ByteList[N]` to `List[byte, N]`
* `null`: `{}`
### Default values

View File

@ -7,27 +7,30 @@ With this executable spec,
test-generators can easily create test-vectors for client implementations,
and the spec itself can be verified to be consistent and coherent through sanity tests implemented with pytest.
## Building
To build the pyspec: `python setup.py build`
(or `pip install .`, but beware that ignored files will still be copied over to a temporary dir, due to pip issue 2195).
This outputs the build files to the `./build/lib/eth2spec/...` dir, and can't be used for local test running. Instead, use the dev-install as described below.
## Dev Install
All the dynamic parts of the spec are automatically built with `python setup.py pyspecdev`.
Unlike the regular install, this outputs spec files to their original source location, instead of build output only.
First, create a `venv` and install the developer dependencies (`test` and `lint` extras):
Alternatively, you can build a sub-set of the pyspec with the distutil command:
```bash
python setup.py pyspec --spec-fork=phase0 --md-doc-paths="specs/phase0/beacon-chain.md specs/phase0/fork-choice.md" --out-dir=my_spec_dir
```shell
make install_test
```
## Py-tests
All the dynamic parts of the spec are built with:
After installing, you can install the optional dependencies for testing and linting.
With makefile: `make install_test`.
Or manually: run `pip install .[test]` and `pip install .[lint]`.
```shell
(venv) python setup.py pyspecdev
```
Unlike the regular install, this outputs spec files to their intended source location,
to enable debuggers to navigate between packages and generated code, without fragile directory linking.
By default, when installing the `eth2spec` as package in non-develop mode,
the distutils implementation of the `setup` runs `build`, which is extended to run the same `pyspec` work,
but outputs into the standard `./build/lib` output.
This enables the `eth2.0-specs` repository to be installed like any other python package.
## Py-tests
These tests are not intended for client-consumption.
These tests are testing the spec itself, to verify consistency and provide feedback on modifications of the spec.
@ -39,20 +42,32 @@ However, most of the tests can be run in generator-mode, to output test vectors
Run `make test` from the root of the specs repository (after running `make install_test` if have not before).
Note that the `make` commands run through the build steps: it runs the `build` output, not the local package source files.
#### Manual
From the repository root:
See `Dev install` for test pre-requisites.
Install venv and install:
```bash
python3 -m venv venv
. venv/bin/activate
python setup.py pyspecdev
Tests are built for `pytest`.
Caveats:
- Working directory must be `./tests/core/pyspec`. The work-directory is important to locate eth2 configuration files.
- Run `pytest` as module. It avoids environment differences, and the behavior is different too:
`pytest` as module adds the current directory to the `sys.path`
Full test usage, with explicit configuration for illustration of options usage:
```shell
(venv) python -m pytest --config=minimal eth2spec
```
Run the test command from the `tests/core/pyspec` directory:
Or, to run a specific test file, specify the full path:
```shell
(venv) python -m pytest --config=minimal ./eth2spec/test/phase0/block_processing/test_process_attestation.py
```
pytest --config=minimal eth2spec
Or, to run a specific test function (specify the `eth2spec` module, or the script path if the keyword is ambiguous):
```shell
(venv) python -m pytest --config=minimal -k test_success_multi_proposer_index_iterations eth2spec
```
Options:
@ -64,6 +79,12 @@ Options:
Run `make open_cov` from the root of the specs repository after running `make test` to open the html code coverage report.
### Advanced
Building spec files from any markdown sources, to a custom location:
```bash
(venv) python setup.py pyspec --spec-fork=phase0 --md-doc-paths="specs/phase0/beacon-chain.md specs/phase0/fork-choice.md" --out-dir=my_spec_dir
```
## Contributing

View File

@ -1 +1 @@
1.1.0-alpha.3
1.1.0-alpha.4

View File

@ -54,8 +54,7 @@ def load_config_file(configs_dir: str, presets_name: str) -> Dict[str, Any]:
out[k] = [int(item) if item.isdigit() else item for item in v]
elif isinstance(v, str) and v.startswith("0x"):
out[k] = bytes.fromhex(v[2:])
elif k == "CONFIG_NAME":
out[k] = str(v)
else:
out[k] = int(v)
out['CONFIG_NAME'] = presets_name
return out

View File

@ -4,7 +4,8 @@ from typing import Any, Callable, Dict, Iterable, Optional
from eth2spec.config import config_util
from eth2spec.utils import bls
from eth2spec.test.context import ALL_CONFIGS, TESTGEN_FORKS, SpecForkName, ConfigName
from eth2spec.test.helpers.constants import ALL_CONFIGS, TESTGEN_FORKS
from eth2spec.test.helpers.typing import SpecForkName, ConfigName
from eth2spec.gen_helpers.gen_base import gen_runner
from eth2spec.gen_helpers.gen_base.gen_typing import TestCase, TestProvider

View File

@ -8,14 +8,15 @@ from eth2spec.test.helpers.state import (
state_transition_and_sign_block,
transition_to,
)
from eth2spec.test.helpers.constants import (
MAINNET, MINIMAL,
)
from eth2spec.test.helpers.sync_committee import (
compute_aggregate_sync_committee_signature,
)
from eth2spec.test.context import (
PHASE0,
MAINNET, MINIMAL,
expect_assertion_error,
with_all_phases_except,
with_altair_and_later,
with_configs,
spec_state_test,
always_bls,
@ -48,9 +49,9 @@ def get_committee_indices(spec, state, duplicates=False):
"""
state = state.copy()
current_epoch = spec.get_current_epoch(state)
randao_index = current_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR
randao_index = (current_epoch + 1) % spec.EPOCHS_PER_HISTORICAL_VECTOR
while True:
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
committee = spec.get_next_sync_committee_indices(state)
if duplicates:
if len(committee) != len(set(committee)):
return committee
@ -60,57 +61,73 @@ def get_committee_indices(spec, state, duplicates=False):
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
@with_all_phases_except([PHASE0])
def compute_committee_indices(spec, state, committee):
"""
Given a ``committee``, calculate and return the related indices
"""
all_pubkeys = [v.pubkey for v in state.validators]
committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys]
return committee_indices
@with_altair_and_later
@spec_state_test
@always_bls
def test_invalid_signature_missing_participant(spec, state):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
rng = random.Random(2020)
random_participant = rng.choice(committee)
random_participant = rng.choice(committee_indices)
block = build_empty_block_for_next_slot(spec, state)
# Exclude one participant whose signature was included.
block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[index != random_participant for index in committee],
sync_committee_bits=[index != random_participant for index in committee_indices],
sync_committee_signature=compute_aggregate_sync_committee_signature(
spec,
state,
block.slot - 1,
committee, # full committee signs
committee_indices, # full committee signs
)
)
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
@always_bls
def test_invalid_signature_extra_participant(spec, state):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
rng = random.Random(3030)
random_participant = rng.choice(committee)
random_participant = rng.choice(committee_indices)
block = build_empty_block_for_next_slot(spec, state)
# Exclude one signature even though the block claims the entire committee participated.
block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[True] * len(committee),
sync_committee_bits=[True] * len(committee_indices),
sync_committee_signature=compute_aggregate_sync_committee_signature(
spec,
state,
block.slot - 1,
[index for index in committee if index != random_participant],
[index for index in committee_indices if index != random_participant],
)
)
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
def compute_sync_committee_inclusion_reward(spec, state, participant_index, committee, committee_bits):
def compute_sync_committee_inclusion_reward(spec,
state,
participant_index,
committee_indices,
committee_bits):
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments)
max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR)
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
max_slot_rewards = spec.Gwei(max_epoch_rewards * len(included_indices) // len(committee) // spec.SLOTS_PER_EPOCH)
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
max_slot_rewards = spec.Gwei(
max_epoch_rewards * len(included_indices)
// len(committee_indices) // spec.SLOTS_PER_EPOCH
)
# Compute the participant and proposer sync rewards
committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices])
@ -119,23 +136,23 @@ def compute_sync_committee_inclusion_reward(spec, state, participant_index, comm
return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance)
def compute_sync_committee_participant_reward(spec, state, participant_index, committee, committee_bits):
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
def compute_sync_committee_participant_reward(spec, state, participant_index, committee_indices, committee_bits):
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
multiplicities = Counter(included_indices)
inclusion_reward = compute_sync_committee_inclusion_reward(
spec, state, participant_index, committee, committee_bits,
spec, state, participant_index, committee_indices, committee_bits,
)
return spec.Gwei(inclusion_reward * multiplicities[participant_index])
def compute_sync_committee_proposer_reward(spec, state, committee, committee_bits):
def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits):
proposer_reward = 0
for index, bit in zip(committee, committee_bits):
for index, bit in zip(committee_indices, committee_bits):
if not bit:
continue
inclusion_reward = compute_sync_committee_inclusion_reward(
spec, state, index, committee, committee_bits,
spec, state, index, committee_indices, committee_bits,
)
proposer_reward_denominator = (
(spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT)
@ -146,30 +163,30 @@ def compute_sync_committee_proposer_reward(spec, state, committee, committee_bit
return proposer_reward
def validate_sync_committee_rewards(spec, pre_state, post_state, committee, committee_bits, proposer_index):
def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indices, committee_bits, proposer_index):
for index in range(len(post_state.validators)):
reward = 0
if index in committee:
if index in committee_indices:
reward += compute_sync_committee_participant_reward(
spec,
pre_state,
index,
committee,
committee_indices,
committee_bits,
)
if proposer_index == index:
reward += compute_sync_committee_proposer_reward(
spec,
pre_state,
committee,
committee_bits,
)
if proposer_index == index:
reward += compute_sync_committee_proposer_reward(
spec,
pre_state,
committee_indices,
committee_bits,
)
assert post_state.balances[index] == pre_state.balances[index] + reward
def run_successful_sync_committee_test(spec, state, committee, committee_bits):
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits):
pre_state = state.copy()
block = build_empty_block_for_next_slot(spec, state)
@ -179,7 +196,7 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
spec,
state,
block.slot - 1,
[index for index, bit in zip(committee, committee_bits) if bit],
[index for index, bit in zip(committee_indices, committee_bits) if bit],
)
)
@ -189,60 +206,70 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
spec,
pre_state,
state,
committee,
committee_indices,
committee_bits,
block.proposer_index,
)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@with_configs([MINIMAL], reason="to create nonduplicate committee")
@spec_state_test
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
committee = get_committee_indices(spec, state, duplicates=False)
committee_size = len(committee)
committee_indices = get_committee_indices(spec, state, duplicates=False)
committee_size = len(committee_indices)
committee_bits = [True] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
assert committee_size == len(set(committee))
assert committee_size == len(set(committee_indices))
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@with_configs([MAINNET], reason="to create duplicate committee")
@spec_state_test
def test_sync_committee_rewards_duplicate_committee(spec, state):
committee = get_committee_indices(spec, state, duplicates=True)
committee_size = len(committee)
committee_indices = get_committee_indices(spec, state, duplicates=True)
committee_size = len(committee_indices)
committee_bits = [True] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
assert committee_size > len(set(committee))
assert committee_size > len(set(committee_indices))
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
@always_bls
def test_sync_committee_rewards_not_full_participants(spec, state):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
rng = random.Random(1010)
committee_bits = [rng.choice([True, False]) for _ in committee]
committee_bits = [rng.choice([True, False]) for _ in committee_indices]
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
@always_bls
def test_sync_committee_rewards_empty_participants(spec, state):
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
committee_bits = [False for _ in committee_indices]
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_altair_and_later
@spec_state_test
@always_bls
def test_invalid_signature_past_block(spec, state):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
blocks = []
for _ in range(2):
@ -250,12 +277,12 @@ def test_invalid_signature_past_block(spec, state):
block = build_empty_block_for_next_slot(spec, state)
# Valid sync committee signature here...
block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[True] * len(committee),
sync_committee_bits=[True] * len(committee_indices),
sync_committee_signature=compute_aggregate_sync_committee_signature(
spec,
state,
block.slot - 1,
committee,
committee_indices,
)
)
@ -265,19 +292,19 @@ def test_invalid_signature_past_block(spec, state):
invalid_block = build_empty_block_for_next_slot(spec, state)
# Invalid signature from a slot other than the previous
invalid_block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[True] * len(committee),
sync_committee_bits=[True] * len(committee_indices),
sync_committee_signature=compute_aggregate_sync_committee_signature(
spec,
state,
invalid_block.slot - 2,
committee,
committee_indices,
)
)
yield from run_sync_committee_processing(spec, state, invalid_block, expect_exception=True)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@with_configs([MINIMAL], reason="to produce different committee sets")
@spec_state_test
@always_bls
@ -295,26 +322,25 @@ def test_invalid_signature_previous_committee(spec, state):
transition_to(spec, state, slot_in_future_sync_committee_period)
# Use the previous sync committee to produce the signature.
pubkeys = [validator.pubkey for validator in state.validators]
# Ensure that the pubkey sets are different.
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
committee = [pubkeys.index(pubkey) for pubkey in old_sync_committee.pubkeys]
committee_indices = compute_committee_indices(spec, state, old_sync_committee)
block = build_empty_block_for_next_slot(spec, state)
block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[True] * len(committee),
sync_committee_bits=[True] * len(committee_indices),
sync_committee_signature=compute_aggregate_sync_committee_signature(
spec,
state,
block.slot - 1,
committee,
committee_indices,
)
)
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
@always_bls
@with_configs([MINIMAL], reason="too slow")
@ -333,15 +359,13 @@ def test_valid_signature_future_committee(spec, state):
transition_to(spec, state, slot_in_future_sync_committee_period)
sync_committee = state.current_sync_committee
next_sync_committee = state.next_sync_committee
expected_sync_committee = spec.get_sync_committee(state, epoch_in_future_sync_committee_period)
assert sync_committee == expected_sync_committee
assert next_sync_committee != sync_committee
assert sync_committee != old_current_sync_committee
assert sync_committee != old_next_sync_committee
pubkeys = [validator.pubkey for validator in state.validators]
committee_indices = [pubkeys.index(pubkey) for pubkey in sync_committee.pubkeys]
committee_indices = compute_committee_indices(spec, state, sync_committee)
block = build_empty_block_for_next_slot(spec, state)
block.body.sync_aggregate = spec.SyncAggregate(

View File

@ -0,0 +1,90 @@
from random import Random
from eth2spec.test.context import spec_state_test, with_altair_and_later
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores
from eth2spec.test.helpers.state import (
next_epoch_via_block,
)
from eth2spec.test.helpers.epoch_processing import (
run_epoch_processing_with
)
from eth2spec.test.helpers.random import (
randomize_attestation_participation,
)
def set_full_participation(spec, state):
full_flags = spec.ParticipationFlags(0)
for flag_index in range(len(spec.PARTICIPATION_FLAG_WEIGHTS)):
full_flags = spec.add_flag(full_flags, flag_index)
for index in range(len(state.validators)):
state.current_epoch_participation[index] = full_flags
state.previous_epoch_participation[index] = full_flags
def run_process_inactivity_updates(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_inactivity_updates')
@with_altair_and_later
@spec_state_test
def test_genesis(spec, state):
yield from run_process_inactivity_updates(spec, state)
#
# Genesis epoch processing is skipped
# Thus all of following tests all go past genesis epoch to test core functionality
#
@with_altair_and_later
@spec_state_test
def test_all_zero_inactivity_scores_empty_participation(spec, state):
next_epoch_via_block(spec, state)
state.inactivity_scores = [0] * len(state.validators)
yield from run_process_inactivity_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_all_zero_inactivity_scores_random_participation(spec, state):
next_epoch_via_block(spec, state)
state.inactivity_scores = [0] * len(state.validators)
randomize_attestation_participation(spec, state, rng=Random(5555))
yield from run_process_inactivity_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_all_zero_inactivity_scores_full_participation(spec, state):
next_epoch_via_block(spec, state)
set_full_participation(spec, state)
state.inactivity_scores = [0] * len(state.validators)
yield from run_process_inactivity_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_empty_participation(spec, state):
next_epoch_via_block(spec, state)
randomize_inactivity_scores(spec, state, rng=Random(9999))
yield from run_process_inactivity_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_random_participation(spec, state):
next_epoch_via_block(spec, state)
randomize_attestation_participation(spec, state, rng=Random(22222))
randomize_inactivity_scores(spec, state, rng=Random(22222))
yield from run_process_inactivity_updates(spec, state)
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_full_participation(spec, state):
next_epoch_via_block(spec, state)
set_full_participation(spec, state)
randomize_inactivity_scores(spec, state, rng=Random(33333))
yield from run_process_inactivity_updates(spec, state)

View File

@ -1,30 +1,35 @@
from eth2spec.test.context import (
PHASE0,
MINIMAL,
always_bls,
spec_state_test,
with_all_phases_except,
spec_test,
with_altair_and_later,
with_configs,
with_custom_state,
single_phase,
misc_balances,
)
from eth2spec.test.helpers.constants import MINIMAL
from eth2spec.test.helpers.state import transition_to
from eth2spec.test.helpers.epoch_processing import (
run_epoch_processing_with,
)
@with_all_phases_except([PHASE0])
@spec_state_test
@with_configs([MINIMAL], reason="too slow")
def test_sync_committees_progress(spec, state):
current_epoch = spec.get_current_epoch(state)
# NOTE: if not in the genesis epoch, period math below needs to be
# adjusted relative to the current epoch
assert current_epoch == 0
#
# Note:
# Calculating sync committees requires pubkey aggregation, thus all tests are generated with `always_bls`
#
def run_sync_committees_progress_test(spec, state):
first_sync_committee = state.current_sync_committee
second_sync_committee = state.next_sync_committee
slot_at_end_of_current_period = spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - 1
transition_to(spec, state, slot_at_end_of_current_period)
current_period = spec.get_current_epoch(state) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
next_period = current_period + 1
next_period_start_epoch = next_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
next_period_start_slot = next_period_start_epoch * spec.SLOTS_PER_EPOCH
end_slot_of_current_period = next_period_start_slot - 1
transition_to(spec, state, end_slot_of_current_period)
# Ensure assignments have not changed:
assert state.current_sync_committee == first_sync_committee
@ -34,7 +39,41 @@ def test_sync_committees_progress(spec, state):
# Can compute the third committee having computed final balances in the last epoch
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
third_sync_committee = spec.get_sync_committee(state, 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
third_sync_committee = spec.get_next_sync_committee(state)
assert state.current_sync_committee == second_sync_committee
assert state.next_sync_committee == third_sync_committee
@with_altair_and_later
@spec_state_test
@always_bls
@with_configs([MINIMAL], reason="too slow")
def test_sync_committees_progress_genesis(spec, state):
# Genesis epoch period has an exceptional case
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
yield from run_sync_committees_progress_test(spec, state)
@with_altair_and_later
@spec_state_test
@always_bls
@with_configs([MINIMAL], reason="too slow")
def test_sync_committees_progress_not_genesis(spec, state):
# Transition out of the genesis epoch period to test non-exceptional case
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
slot_in_next_period = state.slot + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
transition_to(spec, state, slot_in_next_period)
yield from run_sync_committees_progress_test(spec, state)
@with_altair_and_later
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@spec_test
@single_phase
@always_bls
@with_configs([MINIMAL], reason="too slow")
def test_sync_committees_progress_misc_balances(spec, state):
yield from run_sync_committees_progress_test(spec, state)

View File

@ -1,6 +1,4 @@
from eth2spec.test.context import (
PHASE0, ALTAIR,
MINIMAL,
with_phases,
with_custom_state,
with_configs,
@ -8,51 +6,18 @@ from eth2spec.test.context import (
low_balances, misc_balances, large_validator_set,
)
from eth2spec.test.utils import with_meta_tags
from eth2spec.test.helpers.constants import (
PHASE0, ALTAIR,
MINIMAL,
)
from eth2spec.test.helpers.state import (
next_epoch,
next_epoch_via_block,
)
ALTAIR_FORK_TEST_META_TAGS = {
'fork': 'altair',
}
def run_fork_test(post_spec, pre_state):
yield 'pre', pre_state
post_state = post_spec.upgrade_to_altair(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
yield 'post', post_state
from eth2spec.test.helpers.altair.fork import (
ALTAIR_FORK_TEST_META_TAGS,
run_fork_test,
)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])

View File

@ -0,0 +1,120 @@
from random import Random
from eth2spec.test.context import (
with_phases,
with_custom_state,
with_configs,
spec_test, with_state,
low_balances, misc_balances, large_validator_set,
)
from eth2spec.test.utils import with_meta_tags
from eth2spec.test.helpers.constants import (
PHASE0, ALTAIR,
MINIMAL,
)
from eth2spec.test.helpers.altair.fork import (
ALTAIR_FORK_TEST_META_TAGS,
run_fork_test,
)
from eth2spec.test.helpers.random import (
randomize_state,
randomize_attestation_participation,
)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_0(spec, phases, state):
randomize_state(spec, state, rng=Random(1010))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_1(spec, phases, state):
randomize_state(spec, state, rng=Random(2020))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_2(spec, phases, state):
randomize_state(spec, state, rng=Random(3030))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_3(spec, phases, state):
randomize_state(spec, state, rng=Random(4040))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_duplicate_attestations(spec, phases, state):
randomize_state(spec, state, rng=Random(1111))
# Note: `run_fork_test` empties `current_epoch_attestations`
state.previous_epoch_attestations = state.previous_epoch_attestations + state.previous_epoch_attestations
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_mismatched_attestations(spec, phases, state):
# Create a random state
randomize_state(spec, state, rng=Random(2222))
# Now make two copies
state_0 = state.copy()
state_1 = state.copy()
# Randomize attestation participation of both
randomize_attestation_participation(spec, state_0, rng=Random(3333))
randomize_attestation_participation(spec, state_1, rng=Random(4444))
# Note: `run_fork_test` empties `current_epoch_attestations`
# Use pending attestations from both random states in a single state for testing
state_0.previous_epoch_attestations = state_0.previous_epoch_attestations + state_1.previous_epoch_attestations
yield from run_fork_test(phases[ALTAIR], state_0)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_low_balances(spec, phases, state):
randomize_state(spec, state, rng=Random(5050))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_misc_balances(spec, phases, state):
randomize_state(spec, state, rng=Random(6060))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@with_configs([MINIMAL],
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
@spec_test
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_large_validator_set(spec, phases, state):
randomize_state(spec, state, rng=Random(7070))
yield from run_fork_test(phases[ALTAIR], state)

View File

@ -0,0 +1,118 @@
from random import Random
from eth2spec.test.context import (
with_altair_and_later,
spec_test,
spec_state_test,
with_custom_state,
single_phase,
low_balances, misc_balances,
)
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores
from eth2spec.test.helpers.rewards import leaking
import eth2spec.test.helpers.rewards as rewards_helpers
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_0(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(9999))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9999))
@with_altair_and_later
@spec_state_test
def test_random_inactivity_scores_1(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10000))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10000))
@with_altair_and_later
@spec_state_test
def test_half_zero_half_random_inactivity_scores(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10101))
half_val_point = len(state.validators) // 2
state.inactivity_scores = [0] * half_val_point + state.inactivity_scores[half_val_point:]
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10101))
@with_altair_and_later
@spec_state_test
def test_random_high_inactivity_scores(spec, state):
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
@with_altair_and_later
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@spec_test
@single_phase
def test_random_inactivity_scores_low_balances_0(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(11111))
yield from rewards_helpers.run_test_full_random(spec, state)
@with_altair_and_later
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@spec_test
@single_phase
def test_random_inactivity_scores_low_balances_1(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(22222))
yield from rewards_helpers.run_test_full_random(spec, state)
@with_altair_and_later
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@spec_test
@single_phase
def test_full_random_misc_balances(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(33333))
yield from rewards_helpers.run_test_full_random(spec, state)
#
# Leaking variants
#
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_inactivity_scores_leaking_0(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(9999))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9999))
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_inactivity_scores_leaking_1(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10000))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10000))
@with_altair_and_later
@spec_state_test
@leaking()
def test_half_zero_half_random_inactivity_scores_leaking(spec, state):
randomize_inactivity_scores(spec, state, rng=Random(10101))
half_val_point = len(state.validators) // 2
state.inactivity_scores = [0] * half_val_point + state.inactivity_scores[half_val_point:]
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(10101))
@with_altair_and_later
@spec_state_test
@leaking()
def test_random_high_inactivity_scores_leaking(spec, state):
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))
@with_altair_and_later
@spec_state_test
@leaking(epochs=5)
def test_random_high_inactivity_scores_leaking_5_epochs(spec, state):
randomize_inactivity_scores(spec, state, minimum=500000, maximum=5000000, rng=Random(9998))
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(9998))

View File

@ -12,14 +12,14 @@ from eth2spec.test.helpers.sync_committee import (
compute_aggregate_sync_committee_signature,
)
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
with_altair_and_later,
spec_state_test,
)
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
participants = random.sample(committee, int(len(committee) * fraction_full))
yield 'pre', state
@ -40,46 +40,46 @@ def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
yield 'post', state
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
def test_full_sync_committee_committee(spec, state):
next_epoch(spec, state)
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
def test_half_sync_committee_committee(spec, state):
next_epoch(spec, state)
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
def test_empty_sync_committee_committee(spec, state):
next_epoch(spec, state)
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
def test_full_sync_committee_committee_genesis(spec, state):
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
def test_half_sync_committee_committee_genesis(spec, state):
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
def test_empty_sync_committee_committee_genesis(spec, state):
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
@with_all_phases_except([PHASE0])
@with_altair_and_later
@spec_state_test
def test_inactivity_scores(spec, state):
for _ in range(spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2):

View File

@ -0,0 +1,244 @@
from eth2spec.test.context import fork_transition_test
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block
def _state_transition_and_sign_block_at_slot(spec, state):
"""
Cribbed from ``transition_unsigned_block`` helper
where the early parts of the state transition have already
been applied to ``state``.
Used to produce a block during an irregular state transition.
"""
block = build_empty_block(spec, state)
assert state.latest_block_header.slot < block.slot
assert state.slot == block.slot
spec.process_block(state, block)
block.state_root = state.hash_tree_root()
return sign_block(spec, state, block)
def _all_blocks(_):
return True
def _skip_slots(*slots):
"""
Skip making a block if its slot is
passed as an argument to this filter
"""
def f(state_at_prior_slot):
return state_at_prior_slot.slot + 1 not in slots
return f
def _no_blocks(_):
return False
def _only_at(slot):
"""
Only produce a block if its slot is ``slot``.
"""
def f(state_at_prior_slot):
return state_at_prior_slot.slot + 1 == slot
return f
def _state_transition_across_slots(spec, state, to_slot, block_filter=_all_blocks):
assert state.slot < to_slot
while state.slot < to_slot:
should_make_block = block_filter(state)
if should_make_block:
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield signed_block
else:
next_slot(spec, state)
def _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=True):
spec.process_slots(state, state.slot + 1)
assert state.slot % spec.SLOTS_PER_EPOCH == 0
assert spec.compute_epoch_at_slot(state.slot) == fork_epoch
state = post_spec.upgrade_to_altair(state)
assert state.fork.epoch == fork_epoch
assert state.fork.previous_version == post_spec.GENESIS_FORK_VERSION
assert state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
if with_block:
return state, _state_transition_and_sign_block_at_slot(post_spec, state)
else:
return state, None
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
def test_normal_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
"""
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
producing blocks for every slot along the way.
"""
yield "pre", state
assert spec.get_current_epoch(state) < fork_epoch
# regular state transition until fork:
to_slot = fork_epoch * spec.SLOTS_PER_EPOCH - 1
blocks = []
blocks.extend([
pre_tag(block) for block in
_state_transition_across_slots(spec, state, to_slot)
])
# irregular state transition to handle fork:
state, block = _do_altair_fork(state, spec, post_spec, fork_epoch)
blocks.append(post_tag(block))
# continue regular state transition with new spec into next epoch
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
blocks.extend([
post_tag(block) for block in
_state_transition_across_slots(post_spec, state, to_slot)
])
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
slots_with_blocks = [block.message.slot for block in blocks]
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
assert set(range(1, state.slot + 1)) == set(slots_with_blocks)
yield "blocks", blocks
yield "post", state
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
def test_transition_missing_first_post_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
"""
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
producing blocks for every slot along the way except for the first block
of the new fork.
"""
yield "pre", state
assert spec.get_current_epoch(state) < fork_epoch
# regular state transition until fork:
to_slot = fork_epoch * spec.SLOTS_PER_EPOCH - 1
blocks = []
blocks.extend([
pre_tag(block) for block in
_state_transition_across_slots(spec, state, to_slot)
])
# irregular state transition to handle fork:
state, _ = _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
# continue regular state transition with new spec into next epoch
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
blocks.extend([
post_tag(block) for block in
_state_transition_across_slots(post_spec, state, to_slot)
])
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
slots_with_blocks = [block.message.slot for block in blocks]
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
expected_slots = set(range(1, state.slot + 1)).difference(set([fork_epoch * spec.SLOTS_PER_EPOCH]))
assert expected_slots == set(slots_with_blocks)
yield "blocks", blocks
yield "post", state
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
def test_transition_missing_last_pre_fork_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
"""
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
producing blocks for every slot along the way except for the last block
of the old fork.
"""
yield "pre", state
assert spec.get_current_epoch(state) < fork_epoch
# regular state transition until fork:
last_slot_of_pre_fork = fork_epoch * spec.SLOTS_PER_EPOCH - 1
to_slot = last_slot_of_pre_fork
blocks = []
blocks.extend([
pre_tag(block) for block in
_state_transition_across_slots(spec, state, to_slot, block_filter=_skip_slots(last_slot_of_pre_fork))
])
# irregular state transition to handle fork:
state, block = _do_altair_fork(state, spec, post_spec, fork_epoch)
blocks.append(post_tag(block))
# continue regular state transition with new spec into next epoch
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
blocks.extend([
post_tag(block) for block in
_state_transition_across_slots(post_spec, state, to_slot)
])
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
slots_with_blocks = [block.message.slot for block in blocks]
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
expected_slots = set(range(1, state.slot + 1)).difference(set([last_slot_of_pre_fork]))
assert expected_slots == set(slots_with_blocks)
yield "blocks", blocks
yield "post", state
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
def test_transition_only_blocks_post_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
"""
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
skipping blocks for every slot along the way except for the first block
in the ending epoch.
"""
yield "pre", state
assert spec.get_current_epoch(state) < fork_epoch
# regular state transition until fork:
last_slot_of_pre_fork = fork_epoch * spec.SLOTS_PER_EPOCH - 1
to_slot = last_slot_of_pre_fork
blocks = []
blocks.extend([
pre_tag(block) for block in
_state_transition_across_slots(spec, state, to_slot, block_filter=_no_blocks)
])
# irregular state transition to handle fork:
state, _ = _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
# continue regular state transition with new spec into next epoch
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
last_slot = (fork_epoch + 1) * post_spec.SLOTS_PER_EPOCH
blocks.extend([
post_tag(block) for block in
_state_transition_across_slots(post_spec, state, to_slot, block_filter=_only_at(last_slot))
])
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
slots_with_blocks = [block.message.slot for block in blocks]
assert len(slots_with_blocks) == 1
assert slots_with_blocks[0] == last_slot
yield "blocks", blocks
yield "post", state

View File

@ -1,8 +1,8 @@
from eth2spec.test.context import (
spec_state_test,
with_phases,
ALTAIR,
)
from eth2spec.test.helpers.constants import ALTAIR
from eth2spec.test.helpers.merkle import build_proof

View File

@ -1,6 +1,4 @@
from eth2spec.test.context import (
ALTAIR,
MINIMAL,
spec_state_test,
with_configs,
with_phases,
@ -10,6 +8,10 @@ from eth2spec.test.helpers.block import (
build_empty_block,
build_empty_block_for_next_slot,
)
from eth2spec.test.helpers.constants import (
ALTAIR,
MINIMAL,
)
from eth2spec.test.helpers.state import (
next_slots,
state_transition_and_sign_block,
@ -30,7 +32,7 @@ def test_process_light_client_update_not_updated(spec, state):
)
store = spec.LightClientStore(
snapshot=pre_snapshot,
valid_updates=[]
valid_updates=set(),
)
# Block at slot 1 doesn't increase sync committee period, so it won't update snapshot
@ -44,7 +46,8 @@ def test_process_light_client_update_not_updated(spec, state):
body_root=signed_block.message.body.hash_tree_root(),
)
# Sync committee signing the header
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
sync_committee_bits = [True] * len(committee)
sync_committee_signature = compute_aggregate_sync_committee_signature(
spec,
@ -74,7 +77,7 @@ def test_process_light_client_update_not_updated(spec, state):
spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
assert len(store.valid_updates) == 1
assert store.valid_updates[0] == update
assert store.valid_updates.pop() == update
assert store.snapshot == pre_snapshot
@ -89,7 +92,7 @@ def test_process_light_client_update_timeout(spec, state):
)
store = spec.LightClientStore(
snapshot=pre_snapshot,
valid_updates=[]
valid_updates=set(),
)
# Forward to next sync committee period
@ -109,7 +112,8 @@ def test_process_light_client_update_timeout(spec, state):
)
# Sync committee signing the finalized_block_header
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
sync_committee_bits = [True] * len(committee)
sync_committee_signature = compute_aggregate_sync_committee_signature(
spec,
@ -154,7 +158,7 @@ def test_process_light_client_update_finality_updated(spec, state):
)
store = spec.LightClientStore(
snapshot=pre_snapshot,
valid_updates=[]
valid_updates=set(),
)
# Change finality
@ -188,7 +192,8 @@ def test_process_light_client_update_finality_updated(spec, state):
)
# Sync committee signing the finalized_block_header
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
sync_committee_bits = [True] * len(committee)
sync_committee_signature = compute_aggregate_sync_committee_signature(
spec,

View File

@ -0,0 +1,214 @@
import random
from collections import defaultdict
from eth2spec.utils.ssz.ssz_typing import Bitvector
from eth2spec.test.helpers.block import build_empty_block
from eth2spec.test.helpers.keys import pubkey_to_privkey
from eth2spec.test.helpers.state import transition_to
from eth2spec.utils import bls
from eth2spec.utils.bls import only_with_bls
from eth2spec.test.context import (
with_altair_and_later,
with_configs,
with_state,
)
from eth2spec.test.helpers.constants import (
MINIMAL,
)
rng = random.Random(1337)
def ensure_assignments_in_sync_committee(
spec, state, epoch, sync_committee, active_pubkeys
):
assert len(sync_committee.pubkeys) >= 3
some_pubkeys = rng.sample(sync_committee.pubkeys, 3)
for pubkey in some_pubkeys:
validator_index = active_pubkeys.index(pubkey)
assert spec.is_assigned_to_sync_committee(state, epoch, validator_index)
@with_altair_and_later
@with_state
def test_is_assigned_to_sync_committee(phases, spec, state):
epoch = spec.get_current_epoch(state)
validator_indices = spec.get_active_validator_indices(state, epoch)
validator_count = len(validator_indices)
query_epoch = epoch + 1
next_query_epoch = query_epoch + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
active_pubkeys = [state.validators[index].pubkey for index in validator_indices]
ensure_assignments_in_sync_committee(
spec, state, query_epoch, state.current_sync_committee, active_pubkeys
)
ensure_assignments_in_sync_committee(
spec, state, next_query_epoch, state.next_sync_committee, active_pubkeys
)
sync_committee_pubkeys = set(
list(state.current_sync_committee.pubkeys)
+ list(state.next_sync_committee.pubkeys)
)
disqualified_pubkeys = set(
filter(lambda key: key not in sync_committee_pubkeys, active_pubkeys)
)
# NOTE: only check `disqualified_pubkeys` if SYNC_COMMITEE_SIZE < validator count
if disqualified_pubkeys:
sample_size = 3
assert validator_count >= sample_size
some_pubkeys = rng.sample(disqualified_pubkeys, sample_size)
for pubkey in some_pubkeys:
validator_index = active_pubkeys.index(pubkey)
is_current = spec.is_assigned_to_sync_committee(
state, query_epoch, validator_index
)
is_next = spec.is_assigned_to_sync_committee(
state, next_query_epoch, validator_index
)
is_current_or_next = is_current or is_next
assert not is_current_or_next
def _get_sync_committee_signature(
spec,
state,
target_slot,
target_block_root,
subcommittee_index,
index_in_subcommittee,
):
subcommittee_size = spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT
sync_committee_index = (
subcommittee_index * subcommittee_size + index_in_subcommittee
)
pubkey = state.current_sync_committee.pubkeys[sync_committee_index]
privkey = pubkey_to_privkey[pubkey]
domain = spec.get_domain(
state,
spec.DOMAIN_SYNC_COMMITTEE,
)
signing_data = spec.compute_signing_root(target_block_root, domain)
return bls.Sign(privkey, spec.hash_tree_root(signing_data))
@only_with_bls()
@with_altair_and_later
@with_configs([MINIMAL], reason="too slow")
@with_state
def test_process_sync_committee_contributions(phases, spec, state):
# skip over slots at genesis
transition_to(spec, state, state.slot + 3)
# build a block and attempt to assemble a sync aggregate
# from some sync committee contributions
block = build_empty_block(spec, state)
previous_slot = state.slot - 1
target_block_root = spec.get_block_root_at_slot(state, previous_slot)
aggregation_bits = Bitvector[
spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT
]()
aggregation_index = 0
aggregation_bits[aggregation_index] = True
contributions = [
spec.SyncCommitteeContribution(
slot=block.slot,
beacon_block_root=target_block_root,
subcommittee_index=i,
aggregation_bits=aggregation_bits,
signature=_get_sync_committee_signature(
spec, state, previous_slot, target_block_root, i, aggregation_index
),
)
for i in range(spec.SYNC_COMMITTEE_SUBNET_COUNT)
]
# ensure the block has an empty sync aggregate...
empty_sync_aggregate = spec.SyncAggregate()
empty_sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
assert block.body.sync_aggregate == empty_sync_aggregate
spec.process_sync_committee_contributions(block, set(contributions))
# and that after processing, it is no longer empty
assert len(block.body.sync_aggregate.sync_committee_bits) != 0
assert (
block.body.sync_aggregate.sync_committee_signature != spec.G2_POINT_AT_INFINITY
)
# moreover, ensure the sync aggregate is valid if the block is accepted
spec.process_block(state, block)
def _validator_index_for_pubkey(state, pubkey):
return list(map(lambda v: v.pubkey, state.validators)).index(pubkey)
def _subnet_for_sync_committee_index(spec, i):
return i // (spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT)
def _get_expected_subnets_by_pubkey(sync_committee_members):
# Build deduplicated set for each pubkey
expected_subnets_by_pubkey = defaultdict(set)
for (subnet, pubkey) in sync_committee_members:
expected_subnets_by_pubkey[pubkey].add(subnet)
return expected_subnets_by_pubkey
@with_altair_and_later
@with_configs([MINIMAL], reason="too slow")
@with_state
def test_compute_subnets_for_sync_committee(state, spec, phases):
# Transition to the head of the next period
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
assert (
spec.compute_sync_committee_period(spec.get_current_epoch(state))
== spec.compute_sync_committee_period(next_slot_epoch)
)
some_sync_committee_members = list(
(
_subnet_for_sync_committee_index(spec, i),
pubkey,
)
# use current_sync_committee
for i, pubkey in enumerate(state.current_sync_committee.pubkeys)
)
expected_subnets_by_pubkey = _get_expected_subnets_by_pubkey(some_sync_committee_members)
for _, pubkey in some_sync_committee_members:
validator_index = _validator_index_for_pubkey(state, pubkey)
subnets = spec.compute_subnets_for_sync_committee(state, validator_index)
expected_subnets = expected_subnets_by_pubkey[pubkey]
assert subnets == expected_subnets
@with_altair_and_later
@with_configs([MINIMAL], reason="too slow")
@with_state
def test_compute_subnets_for_sync_committee_slot_period_boundary(state, spec, phases):
# Transition to the end of the period
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1)
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
assert (
spec.compute_sync_committee_period(spec.get_current_epoch(state))
!= spec.compute_sync_committee_period(next_slot_epoch)
)
some_sync_committee_members = list(
(
_subnet_for_sync_committee_index(spec, i),
pubkey,
)
# use next_sync_committee
for i, pubkey in enumerate(state.next_sync_committee.pubkeys)
)
expected_subnets_by_pubkey = _get_expected_subnets_by_pubkey(some_sync_committee_members)
for _, pubkey in some_sync_committee_members:
validator_index = _validator_index_for_pubkey(state, pubkey)
subnets = spec.compute_subnets_for_sync_committee(state, validator_index)
expected_subnets = expected_subnets_by_pubkey[pubkey]
assert subnets == expected_subnets

View File

@ -2,14 +2,19 @@ import pytest
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.merge import spec as spec_merge
from eth2spec.utils import bls
from .exceptions import SkippedTest
from .helpers.constants import (
PHASE0, ALTAIR, MERGE,
ALL_PHASES, FORKS_BEFORE_ALTAIR, FORKS_BEFORE_MERGE,
)
from .helpers.genesis import create_genesis_state
from .utils import vector_test, with_meta_tags
from .utils import vector_test, with_meta_tags, build_transition_test
from random import Random
from typing import Any, Callable, NewType, Sequence, TypedDict, Protocol
from typing import Any, Callable, Sequence, TypedDict, Protocol
from lru import LRU
@ -19,32 +24,9 @@ from importlib import reload
def reload_specs():
reload(spec_phase0)
reload(spec_altair)
reload(spec_merge)
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
SpecForkName = NewType("SpecForkName", str)
ConfigName = NewType("ConfigName", str)
PHASE0 = SpecForkName('phase0')
ALTAIR = SpecForkName('altair')
# Experimental phases (not included in default "ALL_PHASES"):
MERGE = SpecForkName('merge')
SHARDING = SpecForkName('sharding')
CUSTODY_GAME = SpecForkName('custody_game')
DAS = SpecForkName('das')
ALL_PHASES = (PHASE0, ALTAIR)
MAINNET = ConfigName('mainnet')
MINIMAL = ConfigName('minimal')
ALL_CONFIGS = (MINIMAL, MAINNET)
# The forks that output to the test vectors.
TESTGEN_FORKS = (PHASE0, ALTAIR)
# TODO: currently phases are defined as python modules.
# It would be better if they would be more well-defined interfaces for stronger typing.
@ -61,24 +43,23 @@ class SpecAltair(Spec):
...
class SpecMerge(Spec):
...
class SpecForks(TypedDict, total=False):
PHASE0: SpecPhase0
ALTAIR: SpecAltair
MERGE: SpecMerge
def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Callable[[Any], int],
spec: Spec, phases: SpecForks):
p0 = phases[PHASE0]
balances = balances_fn(p0)
activation_threshold = threshold_fn(p0)
state = create_genesis_state(spec=p0, validator_balances=balances,
phase = phases[spec.fork]
balances = balances_fn(phase)
activation_threshold = threshold_fn(phase)
state = create_genesis_state(spec=phase, validator_balances=balances,
activation_threshold=activation_threshold)
# TODO: upgrade to merge spec, and later sharding.
if spec.fork == ALTAIR:
state = phases[ALTAIR].upgrade_to_altair(state)
return state
@ -331,7 +312,7 @@ def with_phases(phases, other_phases=None):
return None
run_phases = [phase]
if PHASE0 not in run_phases and ALTAIR not in run_phases:
if PHASE0 not in run_phases and ALTAIR not in run_phases and MERGE not in run_phases:
dump_skipping_message("none of the recognized phases are executable, skipping test.")
return None
@ -349,6 +330,8 @@ def with_phases(phases, other_phases=None):
phase_dir[PHASE0] = spec_phase0
if ALTAIR in available_phases:
phase_dir[ALTAIR] = spec_altair
if MERGE in available_phases:
phase_dir[MERGE] = spec_merge
# return is ignored whenever multiple phases are ran.
# This return is for test generators to emit python generators (yielding test vector outputs)
@ -356,6 +339,8 @@ def with_phases(phases, other_phases=None):
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
if ALTAIR in run_phases:
ret = fn(spec=spec_altair, phases=phase_dir, *args, **kw)
if MERGE in run_phases:
ret = fn(spec=spec_merge, phases=phase_dir, *args, **kw)
# TODO: merge, sharding, custody_game and das are not executable yet.
# Tests that specify these features will not run, and get ignored for these specific phases.
@ -381,8 +366,55 @@ def with_configs(configs, reason=None):
def is_post_altair(spec):
# TODO: everything runs in parallel to Altair.
# After features are rebased on the Altair fork, this can be reduced to just PHASE0.
if spec.fork in [PHASE0, MERGE, SHARDING, CUSTODY_GAME, DAS]:
if spec.fork == MERGE: # TODO: remove parallel Altair-Merge condition after rebase.
return False
if spec.fork in FORKS_BEFORE_ALTAIR:
return False
return True
def is_post_merge(spec):
if spec.fork == ALTAIR: # TODO: remove parallel Altair-Merge condition after rebase.
return False
if spec.fork in FORKS_BEFORE_MERGE:
return False
return True
with_altair_and_later = with_phases([ALTAIR]) # TODO: include Merge, but not until Merge work is rebased.
with_merge_and_later = with_phases([MERGE])
def fork_transition_test(pre_fork_name, post_fork_name, fork_epoch=None):
"""
A decorator to construct a "transition" test from one fork of the eth2 spec
to another.
Decorator assumes a transition from the `pre_fork_name` fork to the
`post_fork_name` fork. The user can supply a `fork_epoch` at which the
fork occurs or they must compute one (yielding to the generator) during the test
if more custom behavior is desired.
A test using this decorator should expect to receive as parameters:
`state`: the default state constructed for the `pre_fork_name` fork
according to the `with_state` decorator.
`fork_epoch`: the `fork_epoch` provided to this decorator, if given.
`spec`: the version of the eth2 spec corresponding to `pre_fork_name`.
`post_spec`: the version of the eth2 spec corresponding to `post_fork_name`.
`pre_tag`: a function to tag data as belonging to `pre_fork_name` fork.
Used to discriminate data during consumption of the generated spec tests.
`post_tag`: a function to tag data as belonging to `post_fork_name` fork.
Used to discriminate data during consumption of the generated spec tests.
"""
def _wrapper(fn):
@with_phases([pre_fork_name], other_phases=[post_fork_name])
@spec_test
@with_state
def _adapter(*args, **kwargs):
wrapped = build_transition_test(fn,
pre_fork_name,
post_fork_name,
fork_epoch=fork_epoch)
return wrapped(*args, **kwargs)
return _adapter
return _wrapper

View File

@ -1,9 +1,9 @@
from eth2spec.test.context import (
CUSTODY_GAME,
with_phases,
spec_state_test,
always_bls,
)
from eth2spec.test.helpers.constants import CUSTODY_GAME
from eth2spec.test.helpers.state import transition_to
from eth2spec.test.helpers.attestations import (
run_attestation_processing,

View File

@ -6,10 +6,12 @@ from eth2spec.test.helpers.custody import (
from eth2spec.test.helpers.attestations import (
get_valid_on_time_attestation,
)
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
from eth2spec.test.context import (
from eth2spec.test.helpers.constants import (
CUSTODY_GAME,
MINIMAL,
)
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
from eth2spec.test.context import (
expect_assertion_error,
disable_process_reveal_deadlines,
spec_state_test,

View File

@ -1,6 +1,6 @@
from eth2spec.test.helpers.constants import CUSTODY_GAME
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
from eth2spec.test.context import (
CUSTODY_GAME,
with_phases,
spec_state_test,
expect_assertion_error,

View File

@ -5,12 +5,14 @@ from eth2spec.test.helpers.custody import (
from eth2spec.test.helpers.attestations import (
get_valid_on_time_attestation,
)
from eth2spec.test.helpers.constants import (
CUSTODY_GAME,
MINIMAL,
)
from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils.ssz.ssz_typing import ByteList
from eth2spec.test.helpers.state import get_balance, transition_to
from eth2spec.test.context import (
MINIMAL,
CUSTODY_GAME,
with_phases,
spec_state_test,
expect_assertion_error,

View File

@ -1,7 +1,7 @@
from eth2spec.test.helpers.constants import CUSTODY_GAME
from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal
from eth2spec.test.helpers.state import next_epoch_via_block, get_balance
from eth2spec.test.context import (
CUSTODY_GAME,
with_phases,
spec_state_test,
expect_assertion_error,

View File

@ -7,13 +7,15 @@ from eth2spec.test.helpers.attestations import (
)
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
from eth2spec.test.context import (
CUSTODY_GAME,
MINIMAL,
spec_state_test,
with_phases,
with_configs,
)
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
from eth2spec.test.helpers.constants import (
CUSTODY_GAME,
MINIMAL,
)
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
from eth2spec.test.custody_game.block_processing.test_process_chunk_challenge import (

View File

@ -1,4 +1,4 @@
from eth2spec.test.context import (
from eth2spec.test.helpers.constants import (
CUSTODY_GAME,
)
from eth2spec.test.helpers.custody import (

View File

@ -3,12 +3,14 @@ from eth2spec.test.helpers.custody import (
)
from eth2spec.test.helpers.state import transition_to
from eth2spec.test.context import (
CUSTODY_GAME,
MINIMAL,
with_phases,
with_configs,
spec_state_test,
)
from eth2spec.test.helpers.constants import (
CUSTODY_GAME,
MINIMAL,
)
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
from eth2spec.test.custody_game.block_processing.test_process_custody_key_reveal import (
run_custody_key_reveal_processing,

View File

@ -1,14 +1,16 @@
from typing import Dict, Sequence
from eth2spec.test.context import (
CUSTODY_GAME,
MINIMAL,
with_phases,
spec_state_test,
with_configs,
)
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
from eth2spec.test.helpers.block import build_empty_block
from eth2spec.test.helpers.constants import (
CUSTODY_GAME,
MINIMAL,
)
from eth2spec.test.helpers.custody import (
get_custody_slashable_test_vector,
get_valid_chunk_challenge,

View File

@ -0,0 +1,42 @@
ALTAIR_FORK_TEST_META_TAGS = {
'fork': 'altair',
}
def run_fork_test(post_spec, pre_state):
# Clean up state to be more realistic
pre_state.current_epoch_attestations = []
yield 'pre', pre_state
post_state = post_spec.upgrade_to_altair(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
yield 'post', post_state

View File

@ -1,4 +1,5 @@
from eth2spec.test.context import is_post_altair
from eth2spec.test.context import is_post_altair, is_post_merge
from eth2spec.test.helpers.execution_payload import build_empty_execution_payload
from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils import bls
from eth2spec.utils.bls import only_with_bls
@ -94,6 +95,9 @@ def build_empty_block(spec, state, slot=None):
if is_post_altair(spec):
empty_block.body.sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
if is_post_merge(spec):
empty_block.body.execution_payload = build_empty_execution_payload(spec, state)
apply_randao_reveal(spec, state, empty_block)
return empty_block

View File

@ -0,0 +1,34 @@
from .typing import SpecForkName, ConfigName
#
# SpecForkName
#
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
PHASE0 = SpecForkName('phase0')
ALTAIR = SpecForkName('altair')
MERGE = SpecForkName('merge')
# Experimental phases (not included in default "ALL_PHASES"):
SHARDING = SpecForkName('sharding')
CUSTODY_GAME = SpecForkName('custody_game')
DAS = SpecForkName('das')
# The forks that pytest runs with.
ALL_PHASES = (PHASE0, ALTAIR, MERGE)
# The forks that output to the test vectors.
TESTGEN_FORKS = (PHASE0, ALTAIR, MERGE)
# TODO: everything runs in parallel to Altair.
# After features are rebased on the Altair fork, this can be reduced to just PHASE0.
FORKS_BEFORE_ALTAIR = (PHASE0, MERGE, SHARDING, CUSTODY_GAME, DAS)
# TODO: when rebasing Merge onto Altair, add ALTAIR to this tuple.
FORKS_BEFORE_MERGE = (PHASE0,)
#
# Config
#
MAINNET = ConfigName('mainnet')
MINIMAL = ConfigName('minimal')
ALL_CONFIGS = (MINIMAL, MAINNET)

View File

@ -9,6 +9,7 @@ def get_process_calls(spec):
# or the old function will stick around.
return [
'process_justification_and_finalization',
'process_inactivity_updates', # altair
'process_rewards_and_penalties',
'process_registry_updates',
'process_reveal_deadlines', # custody game
@ -26,7 +27,7 @@ def get_process_calls(spec):
'process_participation_flag_updates' if is_post_altair(spec) else (
'process_participation_record_updates'
),
'process_sync_committee_updates',
'process_sync_committee_updates', # altair
'process_shard_epoch_increment' # sharding
]

View File

@ -0,0 +1,59 @@
def build_empty_execution_payload(spec, state):
"""
Assuming a pre-state of the same slot, build a valid ExecutionPayload without any transactions.
"""
latest = state.latest_execution_payload_header
timestamp = spec.compute_time_at_slot(state, state.slot)
empty_txs = spec.List[spec.OpaqueTransaction, spec.MAX_EXECUTION_TRANSACTIONS]()
payload = spec.ExecutionPayload(
block_hash=spec.Hash32(),
parent_hash=latest.block_hash,
coinbase=spec.Bytes20(),
state_root=latest.state_root, # no changes to the state
number=latest.number + 1,
gas_limit=latest.gas_limit, # retain same limit
gas_used=0, # empty block, 0 gas
timestamp=timestamp,
receipt_root=b"no receipts here" + b"\x00" * 16, # TODO: root of empty MPT may be better.
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
transactions=empty_txs,
)
# TODO: real RLP + block hash logic would be nice, requires RLP and keccak256 dependency however.
payload.block_hash = spec.Hash32(spec.hash(payload.hash_tree_root() + b"FAKE RLP HASH"))
return payload
def get_execution_payload_header(spec, execution_payload):
return spec.ExecutionPayloadHeader(
block_hash=execution_payload.block_hash,
parent_hash=execution_payload.parent_hash,
coinbase=execution_payload.coinbase,
state_root=execution_payload.state_root,
number=execution_payload.number,
gas_limit=execution_payload.gas_limit,
gas_used=execution_payload.gas_used,
timestamp=execution_payload.timestamp,
receipt_root=execution_payload.receipt_root,
logs_bloom=execution_payload.logs_bloom,
transactions_root=spec.hash_tree_root(execution_payload.transactions)
)
def build_state_with_incomplete_transition(spec, state):
return build_state_with_execution_payload_header(spec, state, spec.ExecutionPayloadHeader())
def build_state_with_complete_transition(spec, state):
pre_state_payload = build_empty_execution_payload(spec, state)
payload_header = get_execution_payload_header(spec, pre_state_payload)
return build_state_with_execution_payload_header(spec, state, payload_header)
def build_state_with_execution_payload_header(spec, state, execution_payload_header):
pre_state = state.copy()
pre_state.latest_execution_payload_header = execution_payload_header
return pre_state

View File

@ -1,7 +1,5 @@
from eth_utils import encode_hex
from eth2spec.phase0 import spec as phase0_spec
def get_anchor_root(spec, state):
anchor_block_header = state.latest_block_header.copy()
@ -58,8 +56,7 @@ def get_genesis_forkchoice_store(spec, genesis_state):
def get_genesis_forkchoice_store_and_block(spec, genesis_state):
assert genesis_state.slot == spec.GENESIS_SLOT
# The genesis block must be a Phase 0 `BeaconBlock`
genesis_block = phase0_spec.BeaconBlock(state_root=genesis_state.hash_tree_root())
genesis_block = spec.BeaconBlock(state_root=genesis_state.hash_tree_root())
return spec.get_forkchoice_store(genesis_state, genesis_block), genesis_block

View File

@ -1,3 +1,8 @@
from eth2spec.test.helpers.constants import (
ALTAIR,
FORKS_BEFORE_ALTAIR,
MERGE,
)
from eth2spec.test.helpers.keys import pubkeys
@ -20,6 +25,13 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
deposit_root = b'\x42' * 32
eth1_block_hash = b'\xda' * 32
current_version = spec.GENESIS_FORK_VERSION
if spec.fork == ALTAIR:
current_version = spec.ALTAIR_FORK_VERSION
elif spec.fork == MERGE:
current_version = spec.MERGE_FORK_VERSION
state = spec.BeaconState(
genesis_time=0,
eth1_deposit_index=len(validator_balances),
@ -30,7 +42,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
),
fork=spec.Fork(
previous_version=spec.GENESIS_FORK_VERSION,
current_version=spec.GENESIS_FORK_VERSION,
current_version=current_version,
epoch=spec.GENESIS_EPOCH,
),
latest_block_header=spec.BeaconBlockHeader(body_root=spec.hash_tree_root(spec.BeaconBlockBody())),
@ -47,8 +59,18 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
if validator.effective_balance >= activation_threshold:
validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
validator.activation_epoch = spec.GENESIS_EPOCH
if spec.fork not in FORKS_BEFORE_ALTAIR:
state.previous_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
state.current_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
state.inactivity_scores.append(spec.uint64(0))
# Set genesis validators root for domain separation and chain versioning
state.genesis_validators_root = spec.hash_tree_root(state.validators)
if spec.fork not in FORKS_BEFORE_ALTAIR:
# Fill in sync committees
# Note: A duplicate committee is assigned for the current and next committee at genesis
state.current_sync_committee = spec.get_next_sync_committee(state)
state.next_sync_committee = spec.get_next_sync_committee(state)
return state

View File

@ -0,0 +1,5 @@
from random import Random
def randomize_inactivity_scores(spec, state, minimum=0, maximum=50000, rng=Random(4242)):
state.inactivity_scores = [rng.randint(minimum, maximum) for _ in range(len(state.validators))]

View File

@ -0,0 +1,113 @@
from random import Random
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
from eth2spec.test.context import is_post_altair
from eth2spec.test.helpers.deposits import mock_deposit
from eth2spec.test.helpers.state import next_epoch
def set_some_new_deposits(spec, state, rng):
num_validators = len(state.validators)
# Set ~1/10 to just recently deposited
for index in range(num_validators):
# If not already active, skip
if not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)):
continue
if rng.randrange(num_validators) < num_validators // 10:
mock_deposit(spec, state, index)
# Set ~half of selected to eligible for activation
if rng.choice([True, False]):
state.validators[index].activation_eligibility_epoch = spec.get_current_epoch(state)
def exit_random_validators(spec, state, rng):
if spec.get_current_epoch(state) < 5:
# Move epochs forward to allow for some validators already exited/withdrawable
for _ in range(5):
next_epoch(spec, state)
current_epoch = spec.get_current_epoch(state)
# Exit ~1/2 of validators
for index in spec.get_active_validator_indices(state, current_epoch):
if rng.choice([True, False]):
continue
validator = state.validators[index]
validator.exit_epoch = rng.choice([current_epoch - 1, current_epoch - 2, current_epoch - 3])
# ~1/2 are withdrawable
if rng.choice([True, False]):
validator.withdrawable_epoch = current_epoch
else:
validator.withdrawable_epoch = current_epoch + 1
def slash_random_validators(spec, state, rng):
# Slash ~1/2 of validators
for index in range(len(state.validators)):
# slash at least one validator
if index == 0 or rng.choice([True, False]):
spec.slash_validator(state, index)
def randomize_epoch_participation(spec, state, epoch, rng):
assert epoch in (spec.get_current_epoch(state), spec.get_previous_epoch(state))
if not is_post_altair(spec):
if epoch == spec.get_current_epoch(state):
pending_attestations = state.current_epoch_attestations
else:
pending_attestations = state.previous_epoch_attestations
for pending_attestation in pending_attestations:
# ~1/3 have bad target
if rng.randint(0, 2) == 0:
pending_attestation.data.target.root = b'\x55' * 32
# ~1/3 have bad head
if rng.randint(0, 2) == 0:
pending_attestation.data.beacon_block_root = b'\x66' * 32
# ~50% participation
pending_attestation.aggregation_bits = [rng.choice([True, False])
for _ in pending_attestation.aggregation_bits]
# Random inclusion delay
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
else:
if epoch == spec.get_current_epoch(state):
epoch_participation = state.current_epoch_participation
else:
epoch_participation = state.previous_epoch_participation
for index in range(len(state.validators)):
# ~1/3 have bad head or bad target or not timely enough
is_timely_correct_head = rng.randint(0, 2) != 0
flags = epoch_participation[index]
def set_flag(index, value):
nonlocal flags
flag = spec.ParticipationFlags(2**index)
if value:
flags |= flag
else:
flags &= 0xff ^ flag
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
if is_timely_correct_head:
# If timely head, then must be timely target
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
# If timely head, then must be timely source
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
else:
# ~50% of remaining have bad target or not timely enough
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
# ~50% of remaining have bad source or not timely enough
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
epoch_participation[index] = flags
def randomize_attestation_participation(spec, state, rng=Random(8020)):
cached_prepare_state_with_attestations(spec, state)
randomize_epoch_participation(spec, state, spec.get_previous_epoch(state), rng)
randomize_epoch_participation(spec, state, spec.get_current_epoch(state), rng)
def randomize_state(spec, state, rng=Random(8020)):
set_some_new_deposits(spec, state, rng)
exit_random_validators(spec, state, rng)
slash_random_validators(spec, state, rng)
randomize_attestation_participation(spec, state, rng)

View File

@ -3,9 +3,16 @@ from lru import LRU
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.test.context import is_post_altair
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
from eth2spec.test.helpers.deposits import mock_deposit
from eth2spec.test.helpers.state import next_epoch
from eth2spec.test.helpers.state import (
next_epoch,
)
from eth2spec.test.helpers.random import (
set_some_new_deposits, exit_random_validators, slash_random_validators,
randomize_state,
)
from eth2spec.test.helpers.attestations import (
cached_prepare_state_with_attestations,
)
from eth2spec.utils.ssz.ssz_typing import Container, uint64, List
@ -62,13 +69,13 @@ def run_deltas(spec, state):
if is_post_altair(spec):
def get_source_deltas(state):
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX, spec.TIMELY_SOURCE_WEIGHT)
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX)
def get_head_deltas(state):
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX, spec.TIMELY_HEAD_WEIGHT)
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX)
def get_target_deltas(state):
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX, spec.TIMELY_TARGET_WEIGHT)
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX)
yield from run_attestation_component_deltas(
spec,
@ -133,14 +140,23 @@ def run_attestation_component_deltas(spec, state, component_delta_fn, matching_a
validator = state.validators[index]
enough_for_reward = has_enough_for_reward(spec, state, index)
if index in matching_indices and not validator.slashed:
if enough_for_reward:
assert rewards[index] > 0
if is_post_altair(spec):
if not spec.is_in_inactivity_leak(state) and enough_for_reward:
assert rewards[index] > 0
else:
assert rewards[index] == 0
else:
assert rewards[index] == 0
if enough_for_reward:
assert rewards[index] > 0
else:
assert rewards[index] == 0
assert penalties[index] == 0
else:
assert rewards[index] == 0
if enough_for_reward:
if is_post_altair(spec) and 'head' in deltas_name:
assert penalties[index] == 0
elif enough_for_reward:
assert penalties[index] > 0
else:
assert penalties[index] == 0
@ -225,18 +241,19 @@ def run_get_inactivity_penalty_deltas(spec, state):
if not is_post_altair(spec):
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH
base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward(state, index)
else:
base_penalty = sum(
base_reward * numerator // spec.WEIGHT_DENOMINATOR
for (_, numerator) in spec.get_flag_indices_and_weights()
)
if not has_enough_for_reward(spec, state, index):
assert penalties[index] == 0
elif index in matching_attesting_indices or not has_enough_for_leak_penalty(spec, state, index):
assert penalties[index] == base_penalty
if is_post_altair(spec):
assert penalties[index] == 0
else:
assert penalties[index] == base_penalty
else:
assert penalties[index] > base_penalty
if is_post_altair(spec):
assert penalties[index] > 0
else:
assert penalties[index] > base_penalty
else:
assert penalties[index] == 0
@ -255,7 +272,6 @@ _cache_dict = LRU(size=10)
def leaking(epochs=None):
def deco(fn):
def entry(*args, spec, state, **kw):
# If the pre-state is not already known in the LRU, then take it,
@ -275,49 +291,6 @@ def leaking(epochs=None):
return deco
def set_some_new_deposits(spec, state, rng):
num_validators = len(state.validators)
# Set ~1/10 to just recently deposited
for index in range(num_validators):
# If not already active, skip
if not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)):
continue
if rng.randrange(num_validators) < num_validators // 10:
mock_deposit(spec, state, index)
# Set ~half of selected to eligible for activation
if rng.choice([True, False]):
state.validators[index].activation_eligibility_epoch = spec.get_current_epoch(state)
def exit_random_validators(spec, state, rng):
if spec.get_current_epoch(state) < 5:
# Move epochs forward to allow for some validators already exited/withdrawable
for _ in range(5):
next_epoch(spec, state)
current_epoch = spec.get_current_epoch(state)
# Exit ~1/2 of validators
for index in spec.get_active_validator_indices(state, current_epoch):
if rng.choice([True, False]):
continue
validator = state.validators[index]
validator.exit_epoch = rng.choice([current_epoch - 1, current_epoch - 2, current_epoch - 3])
# ~1/2 are withdrawable
if rng.choice([True, False]):
validator.withdrawable_epoch = current_epoch
else:
validator.withdrawable_epoch = current_epoch + 1
def slash_random_validators(spec, state, rng):
# Slash ~1/2 of validators
for index in range(len(state.validators)):
# slash at least one validator
if index == 0 or rng.choice([True, False]):
spec.slash_validator(state, index)
def run_test_empty(spec, state):
# Do not add any attestations to state
@ -521,49 +494,5 @@ def run_test_all_balances_too_low_for_reward(spec, state):
def run_test_full_random(spec, state, rng=Random(8020)):
set_some_new_deposits(spec, state, rng)
exit_random_validators(spec, state, rng)
slash_random_validators(spec, state, rng)
cached_prepare_state_with_attestations(spec, state)
if not is_post_altair(spec):
for pending_attestation in state.previous_epoch_attestations:
# ~1/3 have bad target
if rng.randint(0, 2) == 0:
pending_attestation.data.target.root = b'\x55' * 32
# ~1/3 have bad head
if rng.randint(0, 2) == 0:
pending_attestation.data.beacon_block_root = b'\x66' * 32
# ~50% participation
pending_attestation.aggregation_bits = [rng.choice([True, False])
for _ in pending_attestation.aggregation_bits]
# Random inclusion delay
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
else:
for index in range(len(state.validators)):
# ~1/3 have bad head or bad target or not timely enough
is_timely_correct_head = rng.randint(0, 2) != 0
flags = state.previous_epoch_participation[index]
def set_flag(index, value):
nonlocal flags
flag = spec.ParticipationFlags(2**index)
if value:
flags |= flag
else:
flags &= 0xff ^ flag
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
if is_timely_correct_head:
# If timely head, then must be timely target
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
# If timely head, then must be timely source
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
else:
# ~50% of remaining have bad target or not timely enough
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
# ~50% of remaining have bad source or not timely enough
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
state.previous_epoch_participation[index] = flags
randomize_state(spec, state, rng)
yield from run_deltas(spec, state)

View File

@ -42,9 +42,10 @@ def transition_to_slot_via_block(spec, state, slot):
def transition_to_valid_shard_slot(spec, state):
"""
Transition to slot `spec.SHARDING_FORK_SLOT + 1` and fork at `spec.SHARDING_FORK_SLOT`.
Transition to slot `compute_epoch_at_slot(spec.SHARDING_FORK_EPOCH) + 1`
and fork at `compute_epoch_at_slot(spec.SHARDING_FORK_EPOCH)`.
"""
transition_to(spec, state, spec.SHARDING_FORK_SLOT)
transition_to(spec, state, spec.compute_epoch_at_slot(spec.SHARDING_FORK_EPOCH))
next_slot(spec, state)

View File

@ -0,0 +1,4 @@
from typing import NewType
SpecForkName = NewType("SpecForkName", str)
ConfigName = NewType("ConfigName", str)

View File

@ -0,0 +1,201 @@
from eth2spec.test.helpers.execution_payload import (
build_empty_execution_payload,
get_execution_payload_header,
build_state_with_incomplete_transition,
build_state_with_complete_transition,
)
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_merge_and_later
from eth2spec.test.helpers.state import next_slot
def run_execution_payload_processing(spec, state, execution_payload, valid=True, execution_valid=True):
"""
Run ``process_execution_payload``, yielding:
- pre-state ('pre')
- execution payload ('execution_payload')
- execution details, to mock EVM execution ('execution.yml', a dict with 'execution_valid' key and boolean value)
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
yield 'pre', state
yield 'execution', {'execution_valid': execution_valid}
yield 'execution_payload', execution_payload
called_new_block = False
class TestEngine(spec.NoopExecutionEngine):
def new_block(self, payload) -> bool:
nonlocal called_new_block, execution_valid
called_new_block = True
assert payload == execution_payload
return execution_valid
if not valid:
expect_assertion_error(lambda: spec.process_execution_payload(state, execution_payload, TestEngine()))
yield 'post', None
return
spec.process_execution_payload(state, execution_payload, TestEngine())
# Make sure we called the engine
assert called_new_block
yield 'post', state
assert state.latest_execution_payload_header == get_execution_payload_header(spec, execution_payload)
@with_merge_and_later
@spec_state_test
def test_success_first_payload(spec, state):
# pre-state
state = build_state_with_incomplete_transition(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
@with_merge_and_later
@spec_state_test
def test_success_regular_payload(spec, state):
# pre-state
state = build_state_with_complete_transition(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
@with_merge_and_later
@spec_state_test
def test_success_first_payload_with_gap_slot(spec, state):
# pre-state
state = build_state_with_incomplete_transition(spec, state)
next_slot(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
@with_merge_and_later
@spec_state_test
def test_success_regular_payload_with_gap_slot(spec, state):
# pre-state
state = build_state_with_complete_transition(spec, state)
next_slot(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload)
@with_merge_and_later
@spec_state_test
def test_bad_execution_first_payload(spec, state):
# completely valid payload, but execution itself fails (e.g. block exceeds gas limit)
# pre-state
state = build_state_with_incomplete_transition(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False, execution_valid=False)
@with_merge_and_later
@spec_state_test
def test_bad_execution_regular_payload(spec, state):
# completely valid payload, but execution itself fails (e.g. block exceeds gas limit)
# pre-state
state = build_state_with_complete_transition(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False, execution_valid=False)
@with_merge_and_later
@spec_state_test
def test_bad_parent_hash_regular_payload(spec, state):
# pre-state
state = build_state_with_complete_transition(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = spec.Hash32()
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_merge_and_later
@spec_state_test
def test_bad_number_regular_payload(spec, state):
# pre-state
state = build_state_with_complete_transition(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.number = execution_payload.number + 1
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_merge_and_later
@spec_state_test
def test_bad_everything_regular_payload(spec, state):
# pre-state
state = build_state_with_complete_transition(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.parent_hash = spec.Hash32()
execution_payload.number = execution_payload.number + 1
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_merge_and_later
@spec_state_test
def test_bad_timestamp_first_payload(spec, state):
# pre-state
state = build_state_with_incomplete_transition(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.timestamp = execution_payload.timestamp + 1
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
@with_merge_and_later
@spec_state_test
def test_bad_timestamp_regular_payload(spec, state):
# pre-state
state = build_state_with_complete_transition(spec, state)
next_slot(spec, state)
# execution payload
execution_payload = build_empty_execution_payload(spec, state)
execution_payload.timestamp = execution_payload.timestamp + 1
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)

View File

@ -0,0 +1,25 @@
from eth2spec.test.helpers.state import (
state_transition_and_sign_block
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot
)
from eth2spec.test.context import (
with_merge_and_later, spec_state_test
)
@with_merge_and_later
@spec_state_test
def test_empty_block_transition(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
assert len(block.body.execution_payload.transactions) == 0
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
yield 'post', state
# TODO: tests with EVM, mock or replacement?

View File

@ -1,4 +1,5 @@
from eth2spec.test.context import PHASE0, spec_state_test, with_phases
from eth2spec.test.context import spec_state_test, with_phases
from eth2spec.test.helpers.constants import PHASE0
from eth2spec.test.helpers.epoch_processing import (
run_epoch_processing_with
)

View File

@ -1,7 +1,6 @@
from eth_utils import encode_hex
from eth2spec.test.context import (
MINIMAL,
is_post_altair,
spec_state_test,
with_all_phases,
@ -9,6 +8,7 @@ from eth2spec.test.context import (
)
from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.constants import MINIMAL
from eth2spec.test.helpers.fork_choice import (
tick_and_run_on_attestation,
tick_and_run_on_block,

View File

@ -1,11 +1,11 @@
from eth2spec.test.context import (
MINIMAL,
is_post_altair,
single_phase,
spec_test,
with_configs,
with_all_phases,
)
from eth2spec.test.helpers.constants import MINIMAL
from eth2spec.test.helpers.deposits import (
prepare_full_genesis_deposits,
prepare_random_genesis_deposits,

View File

@ -1,11 +1,11 @@
from eth2spec.test.context import (
MINIMAL,
is_post_altair,
spec_test,
single_phase,
with_configs,
with_all_phases,
)
from eth2spec.test.helpers.constants import MINIMAL
from eth2spec.test.helpers.deposits import (
prepare_full_genesis_deposits,
)

View File

@ -1,4 +1,5 @@
from eth2spec.test.context import PHASE0, with_all_phases, with_phases, spec_state_test
from eth2spec.test.context import with_all_phases, with_phases, spec_state_test
from eth2spec.test.helpers.constants import PHASE0
import eth2spec.test.helpers.rewards as rewards_helpers

View File

@ -1,4 +1,5 @@
from eth2spec.test.context import PHASE0, with_all_phases, with_phases, spec_state_test
from eth2spec.test.context import with_all_phases, with_phases, spec_state_test
from eth2spec.test.helpers.constants import PHASE0
from eth2spec.test.helpers.rewards import leaking
import eth2spec.test.helpers.rewards as rewards_helpers

View File

@ -39,8 +39,16 @@ def test_full_random_3(spec, state):
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@spec_test
@single_phase
def test_full_random_low_balances(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state)
def test_full_random_low_balances_0(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(5050))
@with_all_phases
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@spec_test
@single_phase
def test_full_random_low_balances_1(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(6060))
@with_all_phases
@ -48,4 +56,4 @@ def test_full_random_low_balances(spec, state):
@spec_test
@single_phase
def test_full_random_misc_balances(spec, state):
yield from rewards_helpers.run_test_full_random(spec, state)
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(7070))

View File

@ -24,9 +24,8 @@ from eth2spec.test.helpers.multi_operations import (
run_slash_and_exit,
run_test_full_random_operations,
)
from eth2spec.test.helpers.constants import PHASE0, MINIMAL
from eth2spec.test.context import (
PHASE0, MINIMAL,
spec_test, spec_state_test, dump_skipping_message,
with_phases, with_all_phases, single_phase,
expect_assertion_error, always_bls,
@ -931,8 +930,11 @@ def test_balance_driven_status_transitions(spec, state):
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
# Requires always_bls because historical root period and sync committee period is same length
# so this epoch transition also computes new sync committees which requires aggregation
@with_all_phases
@spec_state_test
@always_bls
def test_historical_batch(spec, state):
state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1
pre_historical_roots_len = len(state.historical_roots)

View File

@ -1,6 +1,7 @@
from eth2spec.test.context import PHASE0, ALTAIR, with_all_phases, spec_state_test
from eth2spec.test.context import with_all_phases, spec_state_test
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot
from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store
@ -18,7 +19,7 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
spec.on_attestation(store, attestation)
sample_index = indexed_attestation.attesting_indices[0]
if spec.fork in (PHASE0, ALTAIR):
if spec.fork in (PHASE0, ALTAIR, MERGE):
latest_message = spec.LatestMessage(
epoch=attestation.data.target.epoch,
root=attestation.data.beacon_block_root,

View File

@ -224,14 +224,19 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
next_epoch(spec, state)
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store)
next_epoch(spec, state)
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
last_block_root = hash_tree_root(last_signed_block.message)
# Mock justified block in store
# Mock fictitious justified checkpoint in store
store.justified_checkpoint = spec.Checkpoint(
epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot),
root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000")
)
next_epoch(spec, state)
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
# Create new higher justified checkpoint not in branch of store's justified checkpoint
just_block = build_empty_block_for_next_slot(spec, state)
# Slot is same as justified checkpoint so does not trigger an override in the store
just_block.slot = spec.compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
store.blocks[just_block.hash_tree_root()] = just_block
# Step time past safe slots
@ -274,14 +279,19 @@ def test_on_block_outside_safe_slots_but_finality(spec, state):
next_epoch(spec, state)
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store)
next_epoch(spec, state)
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
last_block_root = hash_tree_root(last_signed_block.message)
# Mock justified block in store
# Mock fictitious justified checkpoint in store
store.justified_checkpoint = spec.Checkpoint(
epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot),
root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000")
)
next_epoch(spec, state)
spec.on_tick(store, store.time + state.slot * spec.SECONDS_PER_SLOT)
# Create new higher justified checkpoint not in branch of store's justified checkpoint
just_block = build_empty_block_for_next_slot(spec, state)
# Slot is same as justified checkpoint so does not trigger an override in the store
just_block.slot = spec.compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
store.blocks[just_block.hash_tree_root()] = just_block
# Step time past safe slots
@ -291,13 +301,15 @@ def test_on_block_outside_safe_slots_but_finality(spec, state):
# Mock justified and finalized update in state
just_fin_state = store.block_states[last_block_root]
new_justified = spec.Checkpoint(
epoch=store.justified_checkpoint.epoch + 1,
epoch=spec.compute_epoch_at_slot(just_block.slot) + 1,
root=just_block.hash_tree_root(),
)
assert new_justified.epoch > store.justified_checkpoint.epoch
new_finalized = spec.Checkpoint(
epoch=store.finalized_checkpoint.epoch + 1,
epoch=spec.compute_epoch_at_slot(just_block.slot),
root=just_block.parent_root,
)
assert new_finalized.epoch > store.finalized_checkpoint.epoch
just_fin_state.current_justified_checkpoint = new_justified
just_fin_state.finalized_checkpoint = new_finalized

View File

@ -1,8 +1,8 @@
from eth2spec.test.context import (
spec_state_test,
always_bls, with_phases, with_all_phases,
PHASE0,
)
from eth2spec.test.helpers.constants import PHASE0
from eth2spec.test.helpers.attestations import build_attestation_data, get_valid_attestation
from eth2spec.test.helpers.block import build_empty_block
from eth2spec.test.helpers.deposits import prepare_state_and_deposit

View File

@ -1,8 +1,8 @@
from eth2spec.test.context import (
SHARDING,
with_phases,
spec_state_test,
)
from eth2spec.test.helpers.constants import SHARDING
from eth2spec.test.helpers.state import next_epoch

View File

@ -1,3 +1,4 @@
import inspect
from typing import Dict, Any
from eth2spec.utils.ssz.ssz_typing import View
from eth2spec.utils.ssz.ssz_impl import serialize
@ -93,3 +94,50 @@ def with_meta_tags(tags: Dict[str, Any]):
yield k, 'meta', v
return entry
return runner
def build_transition_test(fn, pre_fork_name, post_fork_name, fork_epoch=None):
"""
Handles the inner plumbing to generate `transition_test`s.
See that decorator in `context.py` for more information.
"""
def _adapter(*args, **kwargs):
post_spec = kwargs["phases"][post_fork_name]
pre_fork_counter = 0
def pre_tag(obj):
nonlocal pre_fork_counter
pre_fork_counter += 1
return obj
def post_tag(obj):
return obj
yield "post_fork", "meta", post_fork_name
has_fork_epoch = False
if fork_epoch:
kwargs["fork_epoch"] = fork_epoch
has_fork_epoch = True
yield "fork_epoch", "meta", fork_epoch
# massage args to handle an optional custom state using
# `with_custom_state` decorator
expected_args = inspect.getfullargspec(fn)
if "phases" not in expected_args.kwonlyargs:
kwargs.pop("phases", None)
for part in fn(*args,
post_spec=post_spec,
pre_tag=pre_tag,
post_tag=post_tag,
**kwargs):
if part[0] == "fork_epoch":
has_fork_epoch = True
yield part
assert has_fork_epoch
if pre_fork_counter > 0:
yield "fork_block", "meta", pre_fork_counter - 1
return _adapter

View File

@ -6,3 +6,6 @@ from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32,
from remerkleable.bitfields import Bitvector, Bitlist
from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList
from remerkleable.core import BasicView, View, Path
Bytes20 = ByteVector[20] # type: ignore

View File

@ -139,6 +139,8 @@ E.g. `pre.ssz_snappy`, `deposit.ssz_snappy`, `post.ssz_snappy`.
Diffing a `pre.ssz_snappy` and `post.ssz_snappy` provides all the information for testing, when decompressed and decoded.
Then the difference between pre and post can be compared to anything that changes the pre state, e.g. `deposit.ssz_snappy`
Note that by default, the SSZ data is in the given test case's <fork or phase name> version, e.g., if it's `altair` test case, use `altair.BeaconState` container to deserialize the given state.
YAML is generally used for test metadata, and for tests that do not use SSZ: e.g. shuffling and BLS tests.
In this case, there is no point in adding special SSZ types. And the size and efficiency of YAML is acceptable.

View File

@ -32,9 +32,8 @@ The provided pre-state is already transitioned to just before the specific sub-t
Sub-transitions:
Sub-transitions:
- `justification_and_finalization`
- `inactivity_penalty_updates`
- `rewards_and_penalties`
- `registry_updates`
- `slashings`
@ -44,5 +43,6 @@ Sub-transitions:
- `randao_mixes_reset`
- `historical_roots_update`
- `participation_record_updates`
- `sync_committee_updates`
The resulting state should match the expected `post` state.

View File

@ -33,17 +33,23 @@ This excludes the other parts of the block-transition.
Operations:
| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
|-------------------------|-----------------------|----------------------|-----------------------------------------------------------------|
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
| `block_header` | `BeaconBlock` | **`block`** | `process_block_header(state, block)` |
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_committee(state, sync_aggregate)` (new in Altair) |
| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
|-------------------------|-----------------------|----------------------|----------------------------------------------------------------------|
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
| `block_header` | `BeaconBlock` | **`block`** | `process_block_header(state, block)` |
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_committee(state, sync_aggregate)` (new in Altair) |
| `execution_payload` | `ExecutionPayload` | `execution_payload` | `process_execution_payload(state, execution_payload)` (new in Merge) |
Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here.
The `execution_payload` processing normally requires a `verify_execution_state_transition(execution_payload)`,
a responsibility of an (external) execution engine.
During testing this execution is mocked, an `execution.yml` is provided instead:
a dict containing an `execution_valid` boolean field with the verification result.
The resulting state should match the expected `post` state, or if the `post` state is left blank,
the handler should reject the input operation as invalid.

View File

@ -0,0 +1,72 @@
# Transition testing
Transition tests to cover processing the chain across a fork boundary.
Each test case contains a `post_fork` key in the `meta.yaml` that indicates the target fork which also fixes the fork the test begins in.
Clients should assume forks happen sequentially in the following manner:
0. `phase0`
1. `altair`
For example, if a test case has `post_fork` of `altair`, the test consumer should assume the test begins in `phase0` and use that specification to process the initial state and any blocks up until the fork epoch. After the fork happens, the test consumer should use the specification according to the `altair` fork to process the remaining data.
## Test case format
### `meta.yaml`
```yaml
post_fork: string -- String name of the spec after the fork.
fork_epoch: int -- The epoch at which the fork takes place.
fork_block: int -- Optional. The `<index>` of the last block on the initial fork.
blocks_count: int -- The number of blocks processed in this test.
```
*Note*: There may be a fork transition function to run at the `fork_epoch`.
Refer to the specs for the relevant fork for further details.
### `pre.ssz_snappy`
A SSZ-snappy encoded `BeaconState` according to the specification of
the initial fork, the state before running the block transitions.
### `blocks_<index>.ssz_snappy`
A series of files, with `<index>` in range `[0, blocks_count)`.
Blocks must be processed in order, following the main transition function
(i.e. process slot and epoch transitions in between blocks as normal).
Blocks are encoded as `SignedBeaconBlock`s from the relevant spec version
as indicated by the `post_fork` and `fork_block` data in the `meta.yaml`.
As blocks span fork boundaires, a `fork_block` number is given in
the `meta.yaml` to help resolve which blocks belong to which fork.
The `fork_block` is the index in the test data of the **last** block
of the **initial** fork.
To demonstrate, the following diagram shows slots with `_` and blocks
in those slots as `x`. The fork happens at the epoch delineated by the `|`.
```
x x x x
_ _ _ _ | _ _ _ _
```
The `blocks_count` value in the `meta.yaml` in this case is `4` where the
`fork_block` value in the `meta.yaml` is `1`. If this particular example were
testing the fork from Phase 0 to Altair, blocks with indices `0, 1` represent
`SignedBeaconBlock`s defined in the Phase 0 spec and blocks with indices `2, 3`
represent `SignedBeaconBlock`s defined in the Altair spec.
*Note*: If `fork_block` is missing, then all block data should be
interpreted as belonging to the post fork.
### `post.ssz_snappy`
A SSZ-snappy encoded `BeaconState` according to the specification of
the post fork, the state after running the block transitions.
## Condition
The resulting state should match the expected `post` state.

View File

@ -164,7 +164,7 @@ Another example, to generate tests from pytests:
```python
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.test.context import PHASE0, ALTAIR
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators

View File

@ -12,7 +12,7 @@ from eth_utils import (
import milagro_bls_binding as milagro_bls
from eth2spec.utils import bls
from eth2spec.test.context import PHASE0
from eth2spec.test.helpers.constants import PHASE0
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing

View File

@ -1,10 +1,11 @@
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.test.context import PHASE0, ALTAIR
from eth2spec.merge import spec as spec_merge
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
specs = (spec_phase0, spec_altair)
specs = (spec_phase0, spec_altair, spec_merge)
if __name__ == "__main__":
@ -27,6 +28,10 @@ if __name__ == "__main__":
**phase_0_mods,
} # also run the previous phase 0 tests
# No epoch-processing changes in Merge and previous testing repeats with new types, so no additional tests required.
# TODO: rebase onto Altair testing later.
merge_mods = phase_0_mods
# TODO Custody Game testgen is disabled for now
# custody_game_mods = {**{key: 'eth2spec.test.custody_game.epoch_processing.test_process_' + key for key in [
# 'reveal_deadlines',
@ -37,6 +42,7 @@ if __name__ == "__main__":
all_mods = {
PHASE0: phase_0_mods,
ALTAIR: altair_mods,
MERGE: merge_mods,
}
run_state_test_generators(runner_name="epoch_processing", specs=specs, all_mods=all_mods)

View File

@ -1,19 +1,22 @@
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.test.context import PHASE0, ALTAIR
from eth2spec.merge import spec as spec_merge
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
specs = (spec_phase0, spec_altair)
specs = (spec_phase0, spec_altair, spec_merge)
if __name__ == "__main__":
phase_0_mods = {'finality': 'eth2spec.test.phase0.finality.test_finality'}
altair_mods = phase_0_mods # No additional altair specific finality tests
altair_mods = phase_0_mods # No additional Altair specific finality tests
merge_mods = phase_0_mods # No additional Merge specific finality tests
all_mods = {
PHASE0: phase_0_mods,
ALTAIR: altair_mods,
MERGE: spec_merge,
}
run_state_test_generators(runner_name="finality", specs=specs, all_mods=all_mods)

View File

@ -1,10 +1,11 @@
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.test.context import PHASE0, ALTAIR
from eth2spec.merge import spec as spec_merge
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
specs = (spec_phase0, spec_altair)
specs = (spec_phase0, spec_altair, spec_merge)
if __name__ == "__main__":
@ -13,10 +14,13 @@ if __name__ == "__main__":
]}
# No additional Altair specific finality tests, yet.
altair_mods = phase_0_mods
# No specific Merge tests yet. TODO: rebase onto Altair testing later.
merge_mods = phase_0_mods
all_mods = {
PHASE0: phase_0_mods,
ALTAIR: altair_mods,
MERGE: merge_mods,
}
run_state_test_generators(runner_name="fork_choice", specs=specs, all_mods=all_mods)

View File

@ -1,7 +1,7 @@
from importlib import reload
from typing import Iterable
from eth2spec.test.context import PHASE0, ALTAIR, MINIMAL, MAINNET
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MINIMAL, MAINNET
from eth2spec.config import config_util
from eth2spec.test.altair.fork import test_fork as test_altair_forks
from eth2spec.phase0 import spec as spec_phase0

View File

@ -1,7 +1,7 @@
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.test.context import PHASE0, ALTAIR
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
specs = (spec_phase0, spec_altair)

View File

@ -1,10 +1,11 @@
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.test.context import PHASE0, ALTAIR
from eth2spec.merge import spec as spec_merge
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
specs = (spec_phase0, spec_altair)
specs = (spec_phase0, spec_altair, spec_merge)
if __name__ == "__main__":
@ -23,6 +24,13 @@ if __name__ == "__main__":
**phase_0_mods,
} # also run the previous phase 0 tests
merge_mods = {
**{key: 'eth2spec.test.merge.block_processing.test_process_' + key for key in [
'execution_payload',
]},
**phase_0_mods, # TODO: runs phase0 tests. Rebase to include `altair_mods` testing later.
}
# TODO Custody Game testgen is disabled for now
# custody_game_mods = {**{key: 'eth2spec.test.custody_game.block_processing.test_process_' + key for key in [
# 'attestation',
@ -35,6 +43,7 @@ if __name__ == "__main__":
all_mods = {
PHASE0: phase_0_mods,
ALTAIR: altair_mods,
MERGE: merge_mods,
}
run_state_test_generators(runner_name="operations", specs=specs, all_mods=all_mods)

Some files were not shown because too many files have changed in this diff Show More