Merge pull request #1955 from ethereum/dev

release v0.12.2
This commit is contained in:
Danny Ryan 2020-07-24 13:08:05 -06:00 committed by GitHub
commit 447b74d6dc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
100 changed files with 4702 additions and 1586 deletions

View File

@ -35,39 +35,39 @@ commands:
description: "Restore the cache with pyspec keys" description: "Restore the cache with pyspec keys"
steps: steps:
- restore_cached_venv: - restore_cached_venv:
venv_name: v19-pyspec venv_name: v22-pyspec
reqs_checksum: cache-{{ checksum "setup.py" }} reqs_checksum: cache-{{ checksum "setup.py" }}
save_pyspec_cached_venv: save_pyspec_cached_venv:
description: Save a venv into a cache with pyspec keys" description: Save a venv into a cache with pyspec keys"
steps: steps:
- save_cached_venv: - save_cached_venv:
venv_name: v19-pyspec venv_name: v22-pyspec
reqs_checksum: cache-{{ checksum "setup.py" }} reqs_checksum: cache-{{ checksum "setup.py" }}
venv_path: ./venv venv_path: ./venv
restore_deposit_contract_compiler_cached_venv: restore_deposit_contract_compiler_cached_venv:
description: "Restore the venv from cache for the deposit contract compiler" description: "Restore the venv from cache for the deposit contract compiler"
steps: steps:
- restore_cached_venv: - restore_cached_venv:
venv_name: v18-deposit-contract-compiler venv_name: v23-deposit-contract-compiler
reqs_checksum: cache-{{ checksum "deposit_contract/compiler/requirements.txt" }} reqs_checksum: cache-{{ checksum "deposit_contract/compiler/requirements.txt" }}
save_deposit_contract_compiler_cached_venv: save_deposit_contract_compiler_cached_venv:
description: "Save the venv to cache for later use of the deposit contract compiler" description: "Save the venv to cache for later use of the deposit contract compiler"
steps: steps:
- save_cached_venv: - save_cached_venv:
venv_name: v18-deposit-contract-compiler venv_name: v23-deposit-contract-compiler
reqs_checksum: cache-{{ checksum "deposit_contract/compiler/requirements.txt" }} reqs_checksum: cache-{{ checksum "deposit_contract/compiler/requirements.txt" }}
venv_path: ./deposit_contract/compiler/venv venv_path: ./deposit_contract/compiler/venv
restore_deposit_contract_tester_cached_venv: restore_deposit_contract_tester_cached_venv:
description: "Restore the venv from cache for the deposit contract tester" description: "Restore the venv from cache for the deposit contract tester"
steps: steps:
- restore_cached_venv: - restore_cached_venv:
venv_name: v19-deposit-contract-tester venv_name: v22-deposit-contract-tester
reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "deposit_contract/tester/requirements.txt" }} reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "deposit_contract/tester/requirements.txt" }}
save_deposit_contract_tester_cached_venv: save_deposit_contract_tester_cached_venv:
description: "Save the venv to cache for later use of the deposit contract tester" description: "Save the venv to cache for later use of the deposit contract tester"
steps: steps:
- save_cached_venv: - save_cached_venv:
venv_name: v19-deposit-contract-tester venv_name: v22-deposit-contract-tester
reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "deposit_contract/tester/requirements.txt" }} reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "deposit_contract/tester/requirements.txt" }}
venv_path: ./deposit_contract/tester/venv venv_path: ./deposit_contract/tester/venv
jobs: jobs:

View File

@ -22,6 +22,9 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/phas
COV_HTML_OUT=.htmlcov COV_HTML_OUT=.htmlcov
COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html
CURRENT_DIR = ${CURDIR}
LINTER_CONFIG_FILE = $(CURRENT_DIR)/linter.ini
.PHONY: clean partial_clean all test citest lint generate_tests pyspec install_test open_cov \ .PHONY: clean partial_clean all test citest lint generate_tests pyspec install_test open_cov \
install_deposit_contract_tester test_deposit_contract install_deposit_contract_compiler \ install_deposit_contract_tester test_deposit_contract install_deposit_contract_compiler \
compile_deposit_contract test_compile_deposit_contract check_toc compile_deposit_contract test_compile_deposit_contract check_toc
@ -71,7 +74,7 @@ pyspec:
# installs the packages to run pyspec tests # installs the packages to run pyspec tests
install_test: install_test:
python3 -m venv venv; . venv/bin/activate; pip3 install .[test] .[lint] python3.8 -m venv venv; . venv/bin/activate; pip3 install .[lint]; pip3 install -e .[test]
test: pyspec test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \ . venv/bin/activate; cd $(PY_SPEC_DIR); \
@ -101,9 +104,8 @@ codespell:
lint: pyspec lint: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \ . venv/bin/activate; cd $(PY_SPEC_DIR); \
flake8 --ignore=E252,W504,W503 --max-line-length=120 ./eth2spec \ flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
&& cd ./eth2spec && mypy --follow-imports=silent --warn-unused-ignores --ignore-missing-imports --check-untyped-defs --disallow-incomplete-defs --disallow-untyped-defs -p phase0 \ && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.phase1
&& mypy --follow-imports=silent --warn-unused-ignores --ignore-missing-imports --check-untyped-defs --disallow-incomplete-defs --disallow-untyped-defs -p phase1;
install_deposit_contract_tester: install_deposit_contract_tester:
cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt
@ -123,8 +125,6 @@ test_compile_deposit_contract:
cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \ cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \
python3.7 -m pytest . python3.7 -m pytest .
CURRENT_DIR = ${CURDIR}
# Runs a generator, identified by param 1 # Runs a generator, identified by param 1
define run_generator define run_generator
# Started! # Started!

View File

@ -2,7 +2,7 @@
[![Join the chat at https://discord.gg/hpFs23p](https://img.shields.io/badge/chat-on%20discord-blue.svg)](https://discord.gg/hpFs23p) [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Join the chat at https://discord.gg/hpFs23p](https://img.shields.io/badge/chat-on%20discord-blue.svg)](https://discord.gg/hpFs23p) [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
To learn more about sharding and Ethereum 2.0 (Serenity), see the [sharding FAQ](https://github.com/ethereum/wiki/wiki/Sharding-FAQ) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm). To learn more about sharding and Ethereum 2.0 (Serenity), see the [sharding FAQ](https://eth.wiki/sharding/Sharding-FAQs) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
This repository hosts the current Eth2 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed-upon changes to the spec can be made through pull requests. This repository hosts the current Eth2 specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed-upon changes to the spec can be made through pull requests.

View File

@ -32,4 +32,4 @@ Each preset is a key-value mapping.
Presets may contain comments to describe the values. Presets may contain comments to describe the values.
See [`mainnet.yaml`](./mainnet.yaml) for a complete example. See [`mainnet_phase0.yaml`](./mainnet_phase0.yaml) for a complete example.

View File

@ -51,6 +51,9 @@ SECONDS_PER_ETH1_BLOCK: 14
# Deposit contract # Deposit contract
# --------------------------------------------------------------- # ---------------------------------------------------------------
# Ethereum PoW Mainnet
DEPOSIT_CHAIN_ID: 1
DEPOSIT_NETWORK_ID: 1
# **TBD** # **TBD**
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
@ -151,70 +154,3 @@ DOMAIN_DEPOSIT: 0x03000000
DOMAIN_VOLUNTARY_EXIT: 0x04000000 DOMAIN_VOLUNTARY_EXIT: 0x04000000
DOMAIN_SELECTION_PROOF: 0x05000000 DOMAIN_SELECTION_PROOF: 0x05000000
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 DOMAIN_AGGREGATE_AND_PROOF: 0x06000000
# Phase 1
DOMAIN_SHARD_PROPOSAL: 0x80000000
DOMAIN_SHARD_COMMITTEE: 0x81000000
DOMAIN_LIGHT_CLIENT: 0x82000000
DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
# Phase 1: Upgrade from Phase 0
# ---------------------------------------------------------------
PHASE_1_FORK_VERSION: 0x01000000
# [STUB]
PHASE_1_GENESIS_SLOT: 32
INITIAL_ACTIVE_SHARDS: 64
# Phase 1: General
# ---------------------------------------------------------------
# 2**10` (= 1024)
MAX_SHARDS: 1024
# 2**3 (= 8) | online epochs | ~51 min
ONLINE_PERIOD: 8
# 2**7 (= 128)
LIGHT_CLIENT_COMMITTEE_SIZE: 128
# 2**8 (= 256) | epochs | ~27 hours
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
# 2**18 (= 262,144)
SHARD_BLOCK_CHUNK_SIZE: 262144
# 2**2 (= 4)
MAX_SHARD_BLOCK_CHUNKS: 4
# 3 * 2**16` (= 196,608)
TARGET_SHARD_BLOCK_SIZE: 196608
# Note: MAX_SHARD_BLOCKS_PER_ATTESTATION is derived from the list length.
SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
# len(SHARD_BLOCK_OFFSETS)
MAX_SHARD_BLOCKS_PER_ATTESTATION: 12
# 2**14 (= 16,384) Gwei
MAX_GASPRICE: 16384
# 2**3 (= 8) Gwei
MIN_GASPRICE: 8
# 2**3 (= 8)
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
# Phase 1: Custody Game
# ---------------------------------------------------------------
# Time parameters
# 2**1 (= 2) epochs, 12.8 minutes
RANDAO_PENALTY_EPOCHS: 2
# 2**14 (= 16,384) epochs ~73 days
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 16384
# 2**11 (= 2,048) epochs, ~9 days
EPOCHS_PER_CUSTODY_PERIOD: 2048
# 2**11 (= 2,048) epochs, ~9 days
CUSTODY_PERIOD_TO_RANDAO_PADDING: 2048
# 2**7 (= 128) epochs, ~14 hours
MAX_REVEAL_LATENESS_DECREMENT: 128
# Max operations
# 2**8 (= 256)
MAX_CUSTODY_KEY_REVEALS: 256
MAX_EARLY_DERIVED_SECRET_REVEALS: 1
MAX_CUSTODY_SLASHINGS: 1
# Reward and penalty quotients
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2
# 2**8 (= 256)
MINOR_REWARD_QUOTIENT: 256

102
configs/mainnet/phase1.yaml Normal file
View File

@ -0,0 +1,102 @@
# Mainnet preset - phase 1
# phase1-fork
# ---------------------------------------------------------------
PHASE_1_FORK_VERSION: 0x01000000
# [STUB]
PHASE_1_GENESIS_SLOT: 32
INITIAL_ACTIVE_SHARDS: 64
# beacon-chain
# ---------------------------------------------------------------
# Misc
# 2**10 (= 1,024)
MAX_SHARDS: 1024
# 2**7 (= 128)
LIGHT_CLIENT_COMMITTEE_SIZE: 128
# 2**3 (= 8)
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
# Shard block configs
# 2**20 (= 1048,576) bytes
MAX_SHARD_BLOCK_SIZE: 1048576
# 2**18 (= 262,144) bytes
TARGET_SHARD_BLOCK_SIZE: 262144
# Note: MAX_SHARD_BLOCKS_PER_ATTESTATION is derived from the list length.
SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
# len(SHARD_BLOCK_OFFSETS)
MAX_SHARD_BLOCKS_PER_ATTESTATION: 12
# 2**12 (= 4,096)
BYTES_PER_CUSTODY_CHUNK: 4096
# ceillog2(MAX_SHARD_BLOCK_SIZE // BYTES_PER_CUSTODY_CHUNK)
CUSTODY_RESPONSE_DEPTH: 8
# Gwei values
# 2**14 (= 16,384) Gwei
MAX_GASPRICE: 16384
# 2**3 (= 8) Gwei
MIN_GASPRICE: 8
# Time parameters
# 2**3 (= 8) | online epochs
ONLINE_PERIOD: 8
# 2**8 (= 256) | epochs
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
# Max operations per block
# 2**20 (= 1,048,576)
MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS: 1048576
# Domain types
DOMAIN_SHARD_PROPOSAL: 0x80000000
DOMAIN_SHARD_COMMITTEE: 0x81000000
DOMAIN_LIGHT_CLIENT: 0x82000000
# custody-game spec
DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
DOMAIN_LIGHT_SELECTION_PROOF: 0x84000000
DOMAIN_LIGHT_AGGREGATE_AND_PROOF: 0x85000000
# custody-game
# ---------------------------------------------------------------
# Time parameters
# 2**1 (= 2) epochs, 12.8 minutes
RANDAO_PENALTY_EPOCHS: 2
# 2**15 (= 32,768) epochs, ~146 days
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 32768
# 2**14 (= 16,384) epochs ~73 days
EPOCHS_PER_CUSTODY_PERIOD: 16384
# 2**11 (= 2,048) epochs, ~9 days
CUSTODY_PERIOD_TO_RANDAO_PADDING: 2048
# 2**14 (= 16,384) epochs
CUSTODY_RESPONSE_DEADLINE: 16384
# 2**15 (= 32,768) epochs, ~146 days
MAX_CHUNK_CHALLENGE_DELAY: 32768
# Misc parameters
# 2**256 - 189
CUSTODY_PRIME: 115792089237316195423570985008687907853269984665640564039457584007913129639747
# 3
CUSTODY_SECRETS: 3
# 2**5 (= 32) bytes
BYTES_PER_CUSTODY_ATOM: 32
# 1/1024 chance of custody bit 1
CUSTODY_PROBABILITY_EXPONENT: 10
# Max operations
# 2**8 (= 256)
MAX_CUSTODY_KEY_REVEALS: 256
# 2**0 (= 1)
MAX_EARLY_DERIVED_SECRET_REVEALS: 1
# 2**2 (= 2)
MAX_CUSTODY_CHUNK_CHALLENGES: 4
# 2** 4 (= 16)
MAX_CUSTODY_CHUNK_CHALLENGE_RESP: 16
# 2**0 (= 1)
MAX_CUSTODY_SLASHINGS: 1
# Reward and penalty quotients
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2
# 2**8 (= 256)
MINOR_REWARD_QUOTIENT: 256

View File

@ -51,6 +51,9 @@ SECONDS_PER_ETH1_BLOCK: 14
# Deposit contract # Deposit contract
# --------------------------------------------------------------- # ---------------------------------------------------------------
# Ethereum Goerli testnet
DEPOSIT_CHAIN_ID: 5
DEPOSIT_NETWORK_ID: 5
# **TBD** # **TBD**
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890 DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
@ -151,73 +154,3 @@ DOMAIN_DEPOSIT: 0x03000000
DOMAIN_VOLUNTARY_EXIT: 0x04000000 DOMAIN_VOLUNTARY_EXIT: 0x04000000
DOMAIN_SELECTION_PROOF: 0x05000000 DOMAIN_SELECTION_PROOF: 0x05000000
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000 DOMAIN_AGGREGATE_AND_PROOF: 0x06000000
# Phase 1
DOMAIN_SHARD_PROPOSAL: 0x80000000
DOMAIN_SHARD_COMMITTEE: 0x81000000
DOMAIN_LIGHT_CLIENT: 0x82000000
DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
# Phase 1: Upgrade from Phase 0
# ---------------------------------------------------------------
# [customized] for testnet distinction
PHASE_1_FORK_VERSION: 0x01000001
# [customized] for testing
PHASE_1_GENESIS_SLOT: 8
# [customized] reduced for testing
INITIAL_ACTIVE_SHARDS: 4
# Phase 1: General
# ---------------------------------------------------------------
# [customized] reduced for testing
MAX_SHARDS: 8
# 2**3 (= 8) | online epochs
ONLINE_PERIOD: 8
# 2**7 (= 128)
LIGHT_CLIENT_COMMITTEE_SIZE: 128
# 2**8 (= 256) | epochs
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
# 2**18 (= 262,144)
SHARD_BLOCK_CHUNK_SIZE: 262144
# 2**2 (= 4)
MAX_SHARD_BLOCK_CHUNKS: 4
# 3 * 2**16` (= 196,608)
TARGET_SHARD_BLOCK_SIZE: 196608
# Note: MAX_SHARD_BLOCKS_PER_ATTESTATION is derived from the list length.
SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
# len(SHARD_BLOCK_OFFSETS)
MAX_SHARD_BLOCKS_PER_ATTESTATION: 12
# 2**14 (= 16,384) Gwei
MAX_GASPRICE: 16384
# 2**3 (= 8) Gwei
MIN_GASPRICE: 8
# 2**3 (= 8)
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
# Phase 1: Custody Game
# ---------------------------------------------------------------
# Time parameters
# 2**1 (= 2) epochs
RANDAO_PENALTY_EPOCHS: 2
# [customized] quicker for testing
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 4096
# 2**11 (= 2,048) epochs
EPOCHS_PER_CUSTODY_PERIOD: 2048
# 2**11 (= 2,048) epochs
CUSTODY_PERIOD_TO_RANDAO_PADDING: 2048
# 2**7 (= 128) epochs
MAX_REVEAL_LATENESS_DECREMENT: 128
# Max operations
# 2**8 (= 256)
MAX_CUSTODY_KEY_REVEALS: 256
MAX_EARLY_DERIVED_SECRET_REVEALS: 1
MAX_CUSTODY_SLASHINGS: 1
# Reward and penalty quotients
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2
# 2**8 (= 256)
MINOR_REWARD_QUOTIENT: 256

106
configs/minimal/phase1.yaml Normal file
View File

@ -0,0 +1,106 @@
# Minimal preset - phase 1
# phase1-fork
# ---------------------------------------------------------------
# [customized] for testnet distinction
PHASE_1_FORK_VERSION: 0x01000001
# [customized] for testing
PHASE_1_GENESIS_SLOT: 8
# [customized] reduced for testing
INITIAL_ACTIVE_SHARDS: 2
# beacon-chain
# ---------------------------------------------------------------
# Misc
# [customized] reduced for testing
MAX_SHARDS: 8
# 2**7 (= 128)
LIGHT_CLIENT_COMMITTEE_SIZE: 128
# 2**3 (= 8)
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
# Shard block configs
# 2**20 (= 1048,576) bytes
MAX_SHARD_BLOCK_SIZE: 1048576
# 2**18 (= 262,144) bytes
TARGET_SHARD_BLOCK_SIZE: 262144
# Note: MAX_SHARD_BLOCKS_PER_ATTESTATION is derived from the list length.
SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
# len(SHARD_BLOCK_OFFSETS)
MAX_SHARD_BLOCKS_PER_ATTESTATION: 12
# 2**12 (= 4,096)
BYTES_PER_CUSTODY_CHUNK: 4096
# ceillog2(MAX_SHARD_BLOCK_SIZE // BYTES_PER_CUSTODY_CHUNK)
CUSTODY_RESPONSE_DEPTH: 8
# Gwei values
# 2**14 (= 16,384) Gwei
MAX_GASPRICE: 16384
# 2**3 (= 8) Gwei
MIN_GASPRICE: 8
# Time parameters
# 2**3 (= 8) | online epochs
ONLINE_PERIOD: 8
# 2**8 (= 256) | epochs
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
# Max operations per block
# 2**20 (= 1,048,576)
MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS: 1048576
# Domain types
DOMAIN_SHARD_PROPOSAL: 0x80000000
DOMAIN_SHARD_COMMITTEE: 0x81000000
DOMAIN_LIGHT_CLIENT: 0x82000000
# custody-game spec
DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
DOMAIN_LIGHT_SELECTION_PROOF: 0x84000000
DOMAIN_LIGHT_AGGREGATE_AND_PROOF: 0x85000000
# custody-game
# ---------------------------------------------------------------
# Time parameters
# 2**1 (= 2) epochs
RANDAO_PENALTY_EPOCHS: 2
# [customized] quicker for testing
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 128
# [customized] quicker for testing
EPOCHS_PER_CUSTODY_PERIOD: 64
# [customized] quicker for testing
CUSTODY_PERIOD_TO_RANDAO_PADDING: 8
# [customized] quicker for testing
CUSTODY_RESPONSE_DEADLINE: 128
# [customize for faster testing]
MAX_CHUNK_CHALLENGE_DELAY: 128
# Misc parameters
# 2**256 - 189
CUSTODY_PRIME: 115792089237316195423570985008687907853269984665640564039457584007913129639747
# 3
CUSTODY_SECRETS: 3
# 2**5 (= 32) bytes
BYTES_PER_CUSTODY_ATOM: 32
# 1/4 chance of custody bit 1 [customized for faster testing]
CUSTODY_PROBABILITY_EXPONENT: 2
# Max operations
# 2**8 (= 256)
MAX_CUSTODY_KEY_REVEALS: 256
# 2**0 (= 1)
MAX_EARLY_DERIVED_SECRET_REVEALS: 1
# [customized]
MAX_CUSTODY_CHUNK_CHALLENGES: 2
# [customized]
MAX_CUSTODY_CHUNK_CHALLENGE_RESP: 8
# 2**0 (= 1)
MAX_CUSTODY_SLASHINGS: 1
# Reward and penalty quotients
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2
# 2**8 (= 256)
MINOR_REWARD_QUOTIENT: 256

13
linter.ini Normal file
View File

@ -0,0 +1,13 @@
[flake8]
ignore = E252,W504,W503
max-line-length = 120
[mypy]
disallow_incomplete_defs = True
disallow_untyped_defs = True
warn_unused_ignores = True
warn_unused_configs = True
warn_redundant_casts = True
ignore_missing_imports = True

View File

@ -94,9 +94,9 @@ from dataclasses import (
from lru import LRU from lru import LRU
from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
from eth2spec.utils.ssz.ssz_typing import ( from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint64, View, boolean, Container, List, Vector, uint8, uint32, uint64,
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
) )
from eth2spec.utils import bls from eth2spec.utils import bls
@ -118,10 +118,10 @@ from dataclasses import (
from lru import LRU from lru import LRU
from eth2spec.utils.ssz.ssz_impl import hash_tree_root from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
from eth2spec.utils.ssz.ssz_typing import ( from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint64, uint8, bit, View, boolean, Container, List, Vector, uint8, uint32, uint64, bit,
ByteList, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector, ByteList, ByteVector, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
) )
from eth2spec.utils import bls from eth2spec.utils import bls
@ -140,12 +140,7 @@ SUNDRY_CONSTANTS_FUNCTIONS = '''
def ceillog2(x: uint64) -> int: def ceillog2(x: uint64) -> int:
return (x - 1).bit_length() return (x - 1).bit_length()
''' '''
SUNDRY_FUNCTIONS = ''' PHASE0_SUNDRY_FUNCTIONS = '''
# Monkey patch hash cache
_hash = hash
hash_cache: Dict[bytes, Bytes32] = {}
def get_eth1_data(block: Eth1Block) -> Eth1Data: def get_eth1_data(block: Eth1Block) -> Eth1Data:
""" """
A stub function return mocking Eth1Data. A stub function return mocking Eth1Data.
@ -156,12 +151,6 @@ def get_eth1_data(block: Eth1Block) -> Eth1Data:
block_hash=hash_tree_root(block)) block_hash=hash_tree_root(block))
def hash(x: bytes) -> Bytes32: # type: ignore
if x not in hash_cache:
hash_cache[x] = Bytes32(_hash(x))
return hash_cache[x]
def cache_this(key_fn, value_fn, lru_size): # type: ignore def cache_this(key_fn, value_fn, lru_size): # type: ignore
cache_dict = LRU(size=lru_size) cache_dict = LRU(size=lru_size)
@ -189,10 +178,10 @@ get_base_reward = cache_this(
lambda state, index: (state.validators.hash_tree_root(), state.slot, index), lambda state, index: (state.validators.hash_tree_root(), state.slot, index),
_get_base_reward, lru_size=2048) _get_base_reward, lru_size=2048)
_get_committee_count_at_slot = get_committee_count_at_slot _get_committee_count_per_slot = get_committee_count_per_slot
get_committee_count_at_slot = cache_this( get_committee_count_per_slot = cache_this(
lambda state, epoch: (state.validators.hash_tree_root(), epoch), lambda state, epoch: (state.validators.hash_tree_root(), epoch),
_get_committee_count_at_slot, lru_size=SLOTS_PER_EPOCH * 3) _get_committee_count_per_slot, lru_size=SLOTS_PER_EPOCH * 3)
_get_active_validator_indices = get_active_validator_indices _get_active_validator_indices = get_active_validator_indices
get_active_validator_indices = cache_this( get_active_validator_indices = cache_this(
@ -216,10 +205,27 @@ get_matching_head_attestations = cache_this(
_get_attesting_indices = get_attesting_indices _get_attesting_indices = get_attesting_indices
get_attesting_indices = cache_this( get_attesting_indices = cache_this(
lambda state, data, bits: (state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()), lambda state, data, bits: (
state.randao_mixes.hash_tree_root(),
state.validators.hash_tree_root(), data.hash_tree_root(), bits.hash_tree_root()
),
_get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)''' _get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''
PHASE1_SUNDRY_FUNCTIONS = '''
def get_block_data_merkle_root(data: ByteList) -> Root:
# To get the Merkle root of the block data, we need the Merkle root without the length mix-in
# The below implements this in the Remerkleable framework
return Root(data.get_backing().get_left().merkle_root())
_get_start_shard = get_start_shard
get_start_shard = cache_this(
lambda state, slot: (state.validators.hash_tree_root(), slot),
_get_start_shard, lru_size=SLOTS_PER_EPOCH * 3)'''
def objects_to_spec(spec_object: SpecObject, imports: str, fork: str) -> str: def objects_to_spec(spec_object: SpecObject, imports: str, fork: str) -> str:
""" """
Given all the objects that constitute a spec, combine them into a single pyfile. Given all the objects that constitute a spec, combine them into a single pyfile.
@ -250,9 +256,11 @@ def objects_to_spec(spec_object: SpecObject, imports: str, fork: str) -> str:
+ '\n\n' + CONFIG_LOADER + '\n\n' + CONFIG_LOADER
+ '\n\n' + ssz_objects_instantiation_spec + '\n\n' + ssz_objects_instantiation_spec
+ '\n\n' + functions_spec + '\n\n' + functions_spec
+ '\n' + SUNDRY_FUNCTIONS + '\n' + PHASE0_SUNDRY_FUNCTIONS
+ '\n'
) )
if fork == 'phase1':
spec += '\n' + PHASE1_SUNDRY_FUNCTIONS
spec += '\n'
return spec return spec
@ -385,6 +393,8 @@ class PySpecCommand(Command):
specs/phase1/shard-transition.md specs/phase1/shard-transition.md
specs/phase1/fork-choice.md specs/phase1/fork-choice.md
specs/phase1/phase1-fork.md specs/phase1/phase1-fork.md
specs/phase1/shard-fork-choice.md
specs/phase1/validator.md
""" """
else: else:
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork) raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
@ -508,7 +518,7 @@ setup(
"py_ecc==4.0.0", "py_ecc==4.0.0",
"milagro_bls_binding==1.3.0", "milagro_bls_binding==1.3.0",
"dataclasses==0.6", "dataclasses==0.6",
"remerkleable==0.1.16", "remerkleable==0.1.17",
"ruamel.yaml==0.16.5", "ruamel.yaml==0.16.5",
"lru-dict==1.1.6" "lru-dict==1.1.6"
] ]

View File

@ -55,8 +55,8 @@
- [Math](#math) - [Math](#math)
- [`integer_squareroot`](#integer_squareroot) - [`integer_squareroot`](#integer_squareroot)
- [`xor`](#xor) - [`xor`](#xor)
- [`int_to_bytes`](#int_to_bytes) - [`uint_to_bytes`](#uint_to_bytes)
- [`bytes_to_int`](#bytes_to_int) - [`bytes_to_uint64`](#bytes_to_uint64)
- [Crypto](#crypto) - [Crypto](#crypto)
- [`hash`](#hash) - [`hash`](#hash)
- [`hash_tree_root`](#hash_tree_root) - [`hash_tree_root`](#hash_tree_root)
@ -89,7 +89,7 @@
- [`get_active_validator_indices`](#get_active_validator_indices) - [`get_active_validator_indices`](#get_active_validator_indices)
- [`get_validator_churn_limit`](#get_validator_churn_limit) - [`get_validator_churn_limit`](#get_validator_churn_limit)
- [`get_seed`](#get_seed) - [`get_seed`](#get_seed)
- [`get_committee_count_at_slot`](#get_committee_count_at_slot) - [`get_committee_count_per_slot`](#get_committee_count_per_slot)
- [`get_beacon_committee`](#get_beacon_committee) - [`get_beacon_committee`](#get_beacon_committee)
- [`get_beacon_proposer_index`](#get_beacon_proposer_index) - [`get_beacon_proposer_index`](#get_beacon_proposer_index)
- [`get_total_balance`](#get_total_balance) - [`get_total_balance`](#get_total_balance)
@ -170,9 +170,9 @@ The following values are (non-configurable) constants used throughout the specif
| `GENESIS_SLOT` | `Slot(0)` | | `GENESIS_SLOT` | `Slot(0)` |
| `GENESIS_EPOCH` | `Epoch(0)` | | `GENESIS_EPOCH` | `Epoch(0)` |
| `FAR_FUTURE_EPOCH` | `Epoch(2**64 - 1)` | | `FAR_FUTURE_EPOCH` | `Epoch(2**64 - 1)` |
| `BASE_REWARDS_PER_EPOCH` | `4` | | `BASE_REWARDS_PER_EPOCH` | `uint64(4)` |
| `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) | | `DEPOSIT_CONTRACT_TREE_DEPTH` | `uint64(2**5)` (= 32) |
| `JUSTIFICATION_BITS_LENGTH` | `4` | | `JUSTIFICATION_BITS_LENGTH` | `uint64(4)` |
| `ENDIANNESS` | `'little'` | | `ENDIANNESS` | `'little'` |
## Configuration ## Configuration
@ -183,18 +183,18 @@ The following values are (non-configurable) constants used throughout the specif
| Name | Value | | Name | Value |
| - | - | | - | - |
| `ETH1_FOLLOW_DISTANCE` | `2**10` (= 1,024) | | `ETH1_FOLLOW_DISTANCE` | `uint64(2**10)` (= 1,024) |
| `MAX_COMMITTEES_PER_SLOT` | `2**6` (= 64) | | `MAX_COMMITTEES_PER_SLOT` | `uint64(2**6)` (= 64) |
| `TARGET_COMMITTEE_SIZE` | `2**7` (= 128) | | `TARGET_COMMITTEE_SIZE` | `uint64(2**7)` (= 128) |
| `MAX_VALIDATORS_PER_COMMITTEE` | `2**11` (= 2,048) | | `MAX_VALIDATORS_PER_COMMITTEE` | `uint64(2**11)` (= 2,048) |
| `MIN_PER_EPOCH_CHURN_LIMIT` | `2**2` (= 4) | | `MIN_PER_EPOCH_CHURN_LIMIT` | `uint64(2**2)` (= 4) |
| `CHURN_LIMIT_QUOTIENT` | `2**16` (= 65,536) | | `CHURN_LIMIT_QUOTIENT` | `uint64(2**16)` (= 65,536) |
| `SHUFFLE_ROUND_COUNT` | `90` | | `SHUFFLE_ROUND_COUNT` | `uint64(90)` |
| `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `2**14` (= 16,384) | | `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `uint64(2**14)` (= 16,384) |
| `MIN_GENESIS_TIME` | `1578009600` (Jan 3, 2020) | | `MIN_GENESIS_TIME` | `uint64(1578009600)` (Jan 3, 2020) |
| `HYSTERESIS_QUOTIENT` | `4` | | `HYSTERESIS_QUOTIENT` | `uint64(4)` |
| `HYSTERESIS_DOWNWARD_MULTIPLIER` | `1` | | `HYSTERESIS_DOWNWARD_MULTIPLIER` | `uint64(1)` |
| `HYSTERESIS_UPWARD_MULTIPLIER` | `5` | | `HYSTERESIS_UPWARD_MULTIPLIER` | `uint64(5)` |
- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) - For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
@ -218,37 +218,37 @@ The following values are (non-configurable) constants used throughout the specif
| Name | Value | Unit | Duration | | Name | Value | Unit | Duration |
| - | - | :-: | :-: | | - | - | :-: | :-: |
| `GENESIS_DELAY` | `172800` | seconds | 2 days | | `GENESIS_DELAY` | `uint64(172800)` | seconds | 2 days |
| `SECONDS_PER_SLOT` | `12` | seconds | 12 seconds | | `SECONDS_PER_SLOT` | `uint64(12)` | seconds | 12 seconds |
| `SECONDS_PER_ETH1_BLOCK` | `14` | seconds | 14 seconds | | `SECONDS_PER_ETH1_BLOCK` | `uint64(14)` | seconds | 14 seconds |
| `MIN_ATTESTATION_INCLUSION_DELAY` | `2**0` (= 1) | slots | 12 seconds | | `MIN_ATTESTATION_INCLUSION_DELAY` | `uint64(2**0)` (= 1) | slots | 12 seconds |
| `SLOTS_PER_EPOCH` | `2**5` (= 32) | slots | 6.4 minutes | | `SLOTS_PER_EPOCH` | `uint64(2**5)` (= 32) | slots | 6.4 minutes |
| `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes | | `MIN_SEED_LOOKAHEAD` | `uint64(2**0)` (= 1) | epochs | 6.4 minutes |
| `MAX_SEED_LOOKAHEAD` | `2**2` (= 4) | epochs | 25.6 minutes | | `MAX_SEED_LOOKAHEAD` | `uint64(2**2)` (= 4) | epochs | 25.6 minutes |
| `MIN_EPOCHS_TO_INACTIVITY_PENALTY` | `2**2` (= 4) | epochs | 25.6 minutes | | `MIN_EPOCHS_TO_INACTIVITY_PENALTY` | `uint64(2**2)` (= 4) | epochs | 25.6 minutes |
| `EPOCHS_PER_ETH1_VOTING_PERIOD` | `2**5` (= 32) | epochs | ~3.4 hours | | `EPOCHS_PER_ETH1_VOTING_PERIOD` | `uint64(2**5)` (= 32) | epochs | ~3.4 hours |
| `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~27 hours | | `SLOTS_PER_HISTORICAL_ROOT` | `uint64(2**13)` (= 8,192) | slots | ~27 hours |
| `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `uint64(2**8)` (= 256) | epochs | ~27 hours |
| `SHARD_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours | | `SHARD_COMMITTEE_PERIOD` | `uint64(2**8)` (= 256) | epochs | ~27 hours |
### State list lengths ### State list lengths
| Name | Value | Unit | Duration | | Name | Value | Unit | Duration |
| - | - | :-: | :-: | | - | - | :-: | :-: |
| `EPOCHS_PER_HISTORICAL_VECTOR` | `2**16` (= 65,536) | epochs | ~0.8 years | | `EPOCHS_PER_HISTORICAL_VECTOR` | `uint64(2**16)` (= 65,536) | epochs | ~0.8 years |
| `EPOCHS_PER_SLASHINGS_VECTOR` | `2**13` (= 8,192) | epochs | ~36 days | | `EPOCHS_PER_SLASHINGS_VECTOR` | `uint64(2**13)` (= 8,192) | epochs | ~36 days |
| `HISTORICAL_ROOTS_LIMIT` | `2**24` (= 16,777,216) | historical roots | ~52,262 years | | `HISTORICAL_ROOTS_LIMIT` | `uint64(2**24)` (= 16,777,216) | historical roots | ~52,262 years |
| `VALIDATOR_REGISTRY_LIMIT` | `2**40` (= 1,099,511,627,776) | validators | | `VALIDATOR_REGISTRY_LIMIT` | `uint64(2**40)` (= 1,099,511,627,776) | validators |
### Rewards and penalties ### Rewards and penalties
| Name | Value | | Name | Value |
| - | - | | - | - |
| `BASE_REWARD_FACTOR` | `2**6` (= 64) | | `BASE_REWARD_FACTOR` | `uint64(2**6)` (= 64) |
| `WHISTLEBLOWER_REWARD_QUOTIENT` | `2**9` (= 512) | | `WHISTLEBLOWER_REWARD_QUOTIENT` | `uint64(2**9)` (= 512) |
| `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) | | `PROPOSER_REWARD_QUOTIENT` | `uint64(2**3)` (= 8) |
| `INACTIVITY_PENALTY_QUOTIENT` | `2**24` (= 16,777,216) | | `INACTIVITY_PENALTY_QUOTIENT` | `uint64(2**24)` (= 16,777,216) |
| `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) | | `MIN_SLASHING_PENALTY_QUOTIENT` | `uint64(2**5)` (= 32) |
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12` epochs (about 18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. - The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12` epochs (about 18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
@ -576,24 +576,18 @@ def xor(bytes_1: Bytes32, bytes_2: Bytes32) -> Bytes32:
return Bytes32(a ^ b for a, b in zip(bytes_1, bytes_2)) return Bytes32(a ^ b for a, b in zip(bytes_1, bytes_2))
``` ```
#### `int_to_bytes` #### `uint_to_bytes`
`def uint_to_bytes(n: uint) -> bytes` is a function for serializing the `uint` type object to bytes in ``ENDIANNESS``-endian. The expected length of the output is the byte-length of the `uint` type.
#### `bytes_to_uint64`
```python ```python
def int_to_bytes(n: uint64, length: uint64) -> bytes: def bytes_to_uint64(data: bytes) -> uint64:
"""
Return the ``length``-byte serialization of ``n`` in ``ENDIANNESS``-endian.
"""
return n.to_bytes(length, ENDIANNESS)
```
#### `bytes_to_int`
```python
def bytes_to_int(data: bytes) -> uint64:
""" """
Return the integer deserialization of ``data`` interpreted as ``ENDIANNESS``-endian. Return the integer deserialization of ``data`` interpreted as ``ENDIANNESS``-endian.
""" """
return int.from_bytes(data, ENDIANNESS) return uint64(int.from_bytes(data, ENDIANNESS))
``` ```
### Crypto ### Crypto
@ -733,11 +727,15 @@ def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) ->
# Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf) # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
# See the 'generalized domain' algorithm on page 3 # See the 'generalized domain' algorithm on page 3
for current_round in range(SHUFFLE_ROUND_COUNT): for current_round in range(SHUFFLE_ROUND_COUNT):
pivot = bytes_to_int(hash(seed + int_to_bytes(current_round, length=1))[0:8]) % index_count pivot = bytes_to_uint64(hash(seed + uint_to_bytes(uint8(current_round)))[0:8]) % index_count
flip = (pivot + index_count - index) % index_count flip = (pivot + index_count - index) % index_count
position = max(index, flip) position = max(index, flip)
source = hash(seed + int_to_bytes(current_round, length=1) + int_to_bytes(position // 256, length=4)) source = hash(
byte = source[(position % 256) // 8] seed
+ uint_to_bytes(uint8(current_round))
+ uint_to_bytes(uint32(position // 256))
)
byte = uint8(source[(position % 256) // 8])
bit = (byte >> (position % 8)) % 2 bit = (byte >> (position % 8)) % 2
index = flip if bit else index index = flip if bit else index
@ -753,10 +751,11 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex]
""" """
assert len(indices) > 0 assert len(indices) > 0
MAX_RANDOM_BYTE = 2**8 - 1 MAX_RANDOM_BYTE = 2**8 - 1
i = 0 i = uint64(0)
total = uint64(len(indices))
while True: while True:
candidate_index = indices[compute_shuffled_index(i % len(indices), len(indices), seed)] candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32] random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
effective_balance = state.validators[candidate_index].effective_balance effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
return candidate_index return candidate_index
@ -775,7 +774,7 @@ def compute_committee(indices: Sequence[ValidatorIndex],
""" """
start = (len(indices) * index) // count start = (len(indices) * index) // count
end = (len(indices) * (index + 1)) // count end = (len(indices) * (index + 1)) // count
return [indices[compute_shuffled_index(i, len(indices), seed)] for i in range(start, end)] return [indices[compute_shuffled_index(uint64(i), uint64(len(indices)), seed)] for i in range(start, end)]
``` ```
#### `compute_epoch_at_slot` #### `compute_epoch_at_slot`
@ -934,7 +933,7 @@ def get_validator_churn_limit(state: BeaconState) -> uint64:
Return the validator churn limit for the current epoch. Return the validator churn limit for the current epoch.
""" """
active_validator_indices = get_active_validator_indices(state, get_current_epoch(state)) active_validator_indices = get_active_validator_indices(state, get_current_epoch(state))
return max(MIN_PER_EPOCH_CHURN_LIMIT, len(active_validator_indices) // CHURN_LIMIT_QUOTIENT) return max(MIN_PER_EPOCH_CHURN_LIMIT, uint64(len(active_validator_indices)) // CHURN_LIMIT_QUOTIENT)
``` ```
#### `get_seed` #### `get_seed`
@ -945,20 +944,19 @@ def get_seed(state: BeaconState, epoch: Epoch, domain_type: DomainType) -> Bytes
Return the seed at ``epoch``. Return the seed at ``epoch``.
""" """
mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow mix = get_randao_mix(state, Epoch(epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)) # Avoid underflow
return hash(domain_type + int_to_bytes(epoch, length=8) + mix) return hash(domain_type + uint_to_bytes(epoch) + mix)
``` ```
#### `get_committee_count_at_slot` #### `get_committee_count_per_slot`
```python ```python
def get_committee_count_at_slot(state: BeaconState, slot: Slot) -> uint64: def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64:
""" """
Return the number of committees at ``slot``. Return the number of committees in each slot for the given ``epoch``.
""" """
epoch = compute_epoch_at_slot(slot) return max(uint64(1), min(
return max(1, min(
MAX_COMMITTEES_PER_SLOT, MAX_COMMITTEES_PER_SLOT,
len(get_active_validator_indices(state, epoch)) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE, uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE,
)) ))
``` ```
@ -970,7 +968,7 @@ def get_beacon_committee(state: BeaconState, slot: Slot, index: CommitteeIndex)
Return the beacon committee at ``slot`` for ``index``. Return the beacon committee at ``slot`` for ``index``.
""" """
epoch = compute_epoch_at_slot(slot) epoch = compute_epoch_at_slot(slot)
committees_per_slot = get_committee_count_at_slot(state, slot) committees_per_slot = get_committee_count_per_slot(state, epoch)
return compute_committee( return compute_committee(
indices=get_active_validator_indices(state, epoch), indices=get_active_validator_indices(state, epoch),
seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER), seed=get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
@ -987,7 +985,7 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
Return the beacon proposer index at the current slot. Return the beacon proposer index at the current slot.
""" """
epoch = get_current_epoch(state) epoch = get_current_epoch(state)
seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + int_to_bytes(state.slot, length=8)) seed = hash(get_seed(state, epoch, DOMAIN_BEACON_PROPOSER) + uint_to_bytes(state.slot))
indices = get_active_validator_indices(state, epoch) indices = get_active_validator_indices(state, epoch)
return compute_proposer_index(state, indices, seed) return compute_proposer_index(state, indices, seed)
``` ```
@ -1317,7 +1315,7 @@ def process_justification_and_finalization(state: BeaconState) -> None:
# Process justifications # Process justifications
state.previous_justified_checkpoint = state.current_justified_checkpoint state.previous_justified_checkpoint = state.current_justified_checkpoint
state.justification_bits[1:] = state.justification_bits[:-1] state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1]
state.justification_bits[0] = 0b0 state.justification_bits[0] = 0b0
matching_target_attestations = get_matching_target_attestations(state, previous_epoch) # Previous epoch matching_target_attestations = get_matching_target_attestations(state, previous_epoch) # Previous epoch
if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2: if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
@ -1720,10 +1718,10 @@ def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSla
```python ```python
def process_attestation(state: BeaconState, attestation: Attestation) -> None: def process_attestation(state: BeaconState, attestation: Attestation) -> None:
data = attestation.data data = attestation.data
assert data.index < get_committee_count_at_slot(state, data.slot)
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
assert data.target.epoch == compute_epoch_at_slot(data.slot) assert data.target.epoch == compute_epoch_at_slot(data.slot)
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
assert data.index < get_committee_count_per_slot(state, data.target.epoch)
committee = get_beacon_committee(state, data.slot, data.index) committee = get_beacon_committee(state, data.slot, data.index)
assert len(attestation.aggregation_bits) == len(committee) assert len(attestation.aggregation_bits) == len(committee)
@ -1748,6 +1746,22 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
##### Deposits ##### Deposits
```python
def get_validator_from_deposit(state: BeaconState, deposit: Deposit) -> Validator:
amount = deposit.data.amount
effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
return Validator(
pubkey=deposit.data.pubkey,
withdrawal_credentials=deposit.data.withdrawal_credentials,
activation_eligibility_epoch=FAR_FUTURE_EPOCH,
activation_epoch=FAR_FUTURE_EPOCH,
exit_epoch=FAR_FUTURE_EPOCH,
withdrawable_epoch=FAR_FUTURE_EPOCH,
effective_balance=effective_balance,
)
```
```python ```python
def process_deposit(state: BeaconState, deposit: Deposit) -> None: def process_deposit(state: BeaconState, deposit: Deposit) -> None:
# Verify the Merkle branch # Verify the Merkle branch
@ -1778,15 +1792,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
return return
# Add validator and balance entries # Add validator and balance entries
state.validators.append(Validator( state.validators.append(get_validator_from_deposit(state, deposit))
pubkey=pubkey,
withdrawal_credentials=deposit.data.withdrawal_credentials,
activation_eligibility_epoch=FAR_FUTURE_EPOCH,
activation_epoch=FAR_FUTURE_EPOCH,
exit_epoch=FAR_FUTURE_EPOCH,
withdrawable_epoch=FAR_FUTURE_EPOCH,
effective_balance=min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE),
))
state.balances.append(amount) state.balances.append(amount)
else: else:
# Increase balance by deposit amount # Increase balance by deposit amount

View File

@ -10,7 +10,7 @@
- [Introduction](#introduction) - [Introduction](#introduction)
- [Constants](#constants) - [Constants](#constants)
- [Contract](#contract) - [Configuration](#configuration)
- [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract) - [Ethereum 1.0 deposit contract](#ethereum-10-deposit-contract)
- [`deposit` function](#deposit-function) - [`deposit` function](#deposit-function)
- [Deposit amount](#deposit-amount) - [Deposit amount](#deposit-amount)
@ -27,16 +27,29 @@ This document represents the specification for the beacon chain deposit contract
## Constants ## Constants
### Contract The following values are (non-configurable) constants used throughout the specification.
| Name | Value | | Name | Value |
| - | - | | - | - |
| `DEPOSIT_CONTRACT_ADDRESS` | **TBD** |
| `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) | | `DEPOSIT_CONTRACT_TREE_DEPTH` | `2**5` (= 32) |
## Configuration
*Note*: The default mainnet configuration values are included here for spec-design purposes.
The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs) directory.
These configurations are updated for releases and may be out of sync during `dev` changes.
| Name | Value |
| - | - |
| `DEPOSIT_CHAIN_ID` | `1` |
| `DEPOSIT_NETWORK_ID` | `1` |
| `DEPOSIT_CONTRACT_ADDRESS` | **TBD** |
## Ethereum 1.0 deposit contract ## Ethereum 1.0 deposit contract
The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to Ethereum 1.0 for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in Phase 2. The initial deployment phases of Ethereum 2.0 are implemented without consensus changes to Ethereum 1.0. A deposit contract at address `DEPOSIT_CONTRACT_ADDRESS` is added to the Ethereum 1.0 chain defined by the [chain-id](https://eips.ethereum.org/EIPS/eip-155) -- `DEPOSIT_CHAIN_ID` -- and the network-id -- `DEPOSIT_NETWORK_ID` -- for deposits of ETH to the beacon chain. Validator balances will be withdrawable to the shards in Phase 2.
_Note_: See [here](https://chainid.network/) for a comprehensive list of public Ethereum chain chain-id's and network-id's.
### `deposit` function ### `deposit` function

View File

@ -44,9 +44,11 @@ This document is the beacon chain fork choice spec, part of Ethereum 2.0 Phase 0
The head block root associated with a `store` is defined as `get_head(store)`. At genesis, let `store = get_forkchoice_store(genesis_state)` and update `store` by running: The head block root associated with a `store` is defined as `get_head(store)`. At genesis, let `store = get_forkchoice_store(genesis_state)` and update `store` by running:
- `on_tick(time)` whenever `time > store.time` where `time` is the current Unix time - `on_tick(store, time)` whenever `time > store.time` where `time` is the current Unix time
- `on_block(block)` whenever a block `block: SignedBeaconBlock` is received - `on_block(store, block)` whenever a block `block: SignedBeaconBlock` is received
- `on_attestation(attestation)` whenever an attestation `attestation` is received - `on_attestation(store, attestation)` whenever an attestation `attestation` is received
Any of the above handlers that trigger an unhandled exception (e.g. a failed assert or an out-of-range list access) are considered invalid. Invalid calls to handlers must not modify `store`.
*Notes*: *Notes*:
@ -100,7 +102,7 @@ _The block for `anchor_root` is incorrectly initialized to the block header, rat
```python ```python
def get_forkchoice_store(anchor_state: BeaconState) -> Store: def get_forkchoice_store(anchor_state: BeaconState) -> Store:
anchor_block_header = anchor_state.latest_block_header.copy() anchor_block_header = copy(anchor_state.latest_block_header)
if anchor_block_header.state_root == Bytes32(): if anchor_block_header.state_root == Bytes32():
anchor_block_header.state_root = hash_tree_root(anchor_state) anchor_block_header.state_root = hash_tree_root(anchor_state)
anchor_root = hash_tree_root(anchor_block_header) anchor_root = hash_tree_root(anchor_block_header)
@ -108,14 +110,14 @@ def get_forkchoice_store(anchor_state: BeaconState) -> Store:
justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root) finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
return Store( return Store(
time=anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot, time=uint64(anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot),
genesis_time=anchor_state.genesis_time, genesis_time=anchor_state.genesis_time,
justified_checkpoint=justified_checkpoint, justified_checkpoint=justified_checkpoint,
finalized_checkpoint=finalized_checkpoint, finalized_checkpoint=finalized_checkpoint,
best_justified_checkpoint=justified_checkpoint, best_justified_checkpoint=justified_checkpoint,
blocks={anchor_root: anchor_block_header}, blocks={anchor_root: anchor_block_header},
block_states={anchor_root: anchor_state.copy()}, block_states={anchor_root: copy(anchor_state)},
checkpoint_states={justified_checkpoint: anchor_state.copy()}, checkpoint_states={justified_checkpoint: copy(anchor_state)},
) )
``` ```
@ -300,8 +302,9 @@ def validate_on_attestation(store: Store, attestation: Attestation) -> None:
def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None: def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None:
# Store target checkpoint state if not yet seen # Store target checkpoint state if not yet seen
if target not in store.checkpoint_states: if target not in store.checkpoint_states:
base_state = store.block_states[target.root].copy() base_state = copy(store.block_states[target.root])
process_slots(base_state, compute_start_slot_at_epoch(target.epoch)) if base_state.slot < compute_start_slot_at_epoch(target.epoch):
process_slots(base_state, compute_start_slot_at_epoch(target.epoch))
store.checkpoint_states[target] = base_state store.checkpoint_states[target] = base_state
``` ```
@ -342,22 +345,23 @@ def on_tick(store: Store, time: uint64) -> None:
```python ```python
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
block = signed_block.message block = signed_block.message
# Make a copy of the state to avoid mutability issues # Parent block must be known
assert block.parent_root in store.block_states assert block.parent_root in store.block_states
pre_state = store.block_states[block.parent_root].copy() # Make a copy of the state to avoid mutability issues
pre_state = copy(store.block_states[block.parent_root])
# Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past. # Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
assert get_current_slot(store) >= block.slot assert get_current_slot(store) >= block.slot
# Add new block to the store
store.blocks[hash_tree_root(block)] = block
# Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor) # Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
assert block.slot > finalized_slot assert block.slot > finalized_slot
# Check block is a descendant of the finalized block at the checkpoint finalized slot # Check block is a descendant of the finalized block at the checkpoint finalized slot
assert get_ancestor(store, hash_tree_root(block), finalized_slot) == store.finalized_checkpoint.root assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
# Check the block is valid and compute the post-state # Check the block is valid and compute the post-state
state = state_transition(pre_state, signed_block, True) state = state_transition(pre_state, signed_block, True)
# Add new block to the store
store.blocks[hash_tree_root(block)] = block
# Add new state for this block to the store # Add new state for this block to the store
store.block_states[hash_tree_root(block)] = state store.block_states[hash_tree_root(block)] = state
@ -371,15 +375,19 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
# Update finalized checkpoint # Update finalized checkpoint
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
store.finalized_checkpoint = state.finalized_checkpoint store.finalized_checkpoint = state.finalized_checkpoint
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
# Update justified if new justified is later than store justified # Potentially update justified if different from store
# or if store justified is not in chain with finalized checkpoint if store.justified_checkpoint != state.current_justified_checkpoint:
if ( # Update justified if new justified is later than store justified
state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
or get_ancestor(store, store.justified_checkpoint.root, finalized_slot) != store.finalized_checkpoint.root store.justified_checkpoint = state.current_justified_checkpoint
): return
store.justified_checkpoint = state.current_justified_checkpoint
# Update justified if store justified is not in chain with finalized checkpoint
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
store.justified_checkpoint = state.current_justified_checkpoint
``` ```
#### `on_attestation` #### `on_attestation`

File diff suppressed because it is too large Load Diff

View File

@ -111,7 +111,7 @@ The validator constructs their `withdrawal_credentials` via the following:
### Submit deposit ### Submit deposit
In Phase 0, all incoming validator deposits originate from the Ethereum 1.0 proof-of-work chain. Deposits are made to the [deposit contract](./deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`. In Phase 0, all incoming validator deposits originate from the Ethereum 1.0 chain defined by `DEPOSIT_CHAIN_ID` and `DEPOSIT_NETWORK_ID`. Deposits are made to the [deposit contract](./deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`.
To submit a deposit: To submit a deposit:
@ -172,8 +172,9 @@ def get_committee_assignment(state: BeaconState,
assert epoch <= next_epoch assert epoch <= next_epoch
start_slot = compute_start_slot_at_epoch(epoch) start_slot = compute_start_slot_at_epoch(epoch)
committee_count_per_slot = get_committee_count_per_slot(state, epoch)
for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH): for slot in range(start_slot, start_slot + SLOTS_PER_EPOCH):
for index in range(get_committee_count_at_slot(state, Slot(slot))): for index in range(committee_count_per_slot):
committee = get_beacon_committee(state, Slot(slot), CommitteeIndex(index)) committee = get_beacon_committee(state, Slot(slot), CommitteeIndex(index))
if validator_index in committee: if validator_index in committee:
return committee, CommitteeIndex(index), Slot(slot) return committee, CommitteeIndex(index), Slot(slot)
@ -199,8 +200,10 @@ The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahe
Specifically a validator should: Specifically a validator should:
* Call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments. * Call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
* Find peers of the pubsub topic `committee_index{committee_index % ATTESTATION_SUBNET_COUNT}_beacon_attestation`. * Calculate the committees per slot for the next epoch: `committees_per_slot = get_committee_count_per_slot(state, next_epoch)`
* If an _insufficient_ number of current peers are subscribed to the topic, the validator must discover new peers on this topic. Via the discovery protocol, find peers with an ENR containing the `attnets` entry such that `ENR["attnets"][committee_index % ATTESTATION_SUBNET_COUNT] == True`. Then validate that the peers are still persisted on the desired topic by requesting `GetMetaData` and checking the resulting `attnets` field. * Calculate the subnet index: `subnet_id = compute_subnet_for_attestation(committees_per_slot, slot, committee_index)`
* Find peers of the pubsub topic `beacon_attestation_{subnet_id}`.
* If an _insufficient_ number of current peers are subscribed to the topic, the validator must discover new peers on this topic. Via the discovery protocol, find peers with an ENR containing the `attnets` entry such that `ENR["attnets"][subnet_id] == True`. Then validate that the peers are still persisted on the desired topic by requesting `GetMetaData` and checking the resulting `attnets` field.
* If the validator is assigned to be an aggregator for the slot (see `is_aggregator()`), then subscribe to the topic. * If the validator is assigned to be an aggregator for the slot (see `is_aggregator()`), then subscribe to the topic.
*Note*: If the validator is _not_ assigned to be an aggregator, the validator only needs sufficient number of peers on the topic to be able to publish messages. The validator does not need to _subscribe_ and listen to all messages on the topic. *Note*: If the validator is _not_ assigned to be an aggregator, the validator only needs sufficient number of peers on the topic to be able to publish messages. The validator does not need to _subscribe_ and listen to all messages on the topic.
@ -270,7 +273,7 @@ An honest block proposer sets `block.body.eth1_data = get_eth1_vote(state)` wher
```python ```python
def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64: def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64:
return state.genesis_time + slot * SECONDS_PER_SLOT return uint64(state.genesis_time + slot * SECONDS_PER_SLOT)
``` ```
```python ```python
@ -304,7 +307,7 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da
valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider] valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider]
# Default vote on latest eth1 block data in the period range unless eth1 chain is not live # Default vote on latest eth1 block data in the period range unless eth1 chain is not live
default_vote = votes_to_consider[-1] if any(votes_to_consider) else state.eth1_data default_vote = votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state.eth1_data
return max( return max(
valid_votes, valid_votes,
@ -396,7 +399,7 @@ Set `attestation_data.beacon_block_root = hash_tree_root(head_block)`.
*Note*: `epoch_boundary_block_root` can be looked up in the state using: *Note*: `epoch_boundary_block_root` can be looked up in the state using:
- Let `start_slot = compute_start_slot_at_epoch(get_current_epoch(head_state))`. - Let `start_slot = compute_start_slot_at_epoch(get_current_epoch(head_state))`.
- Let `epoch_boundary_block_root = hash_tree_root(head_block) if start_slot == head_state.slot else get_block_root(state, start_slot)`. - Let `epoch_boundary_block_root = hash_tree_root(head_block) if start_slot == head_state.slot else get_block_root(state, get_current_epoch(head_state))`.
#### Construct attestation #### Construct attestation
@ -425,18 +428,22 @@ def get_attestation_signature(state: BeaconState, attestation_data: AttestationD
#### Broadcast attestation #### Broadcast attestation
Finally, the validator broadcasts `attestation` to the associated attestation subnet -- the `beacon_attestation_{compute_subnet_for_attestation(state, attestation)}` pubsub topic. Finally, the validator broadcasts `attestation` to the associated attestation subnet, the `beacon_attestation_{subnet_id}` pubsub topic.
The `subnet_id` for the `attestation` is calculated with:
- Let `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)`.
- Let `subnet_id = compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.committee_index)`.
```python ```python
def compute_subnet_for_attestation(state: BeaconState, attestation: Attestation) -> uint64: def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
""" """
Compute the correct subnet for an attestation for Phase 0. Compute the correct subnet for an attestation for Phase 0.
Note, this mimics expected Phase 1 behavior where attestations will be mapped to their shard subnet. Note, this mimics expected Phase 1 behavior where attestations will be mapped to their shard subnet.
""" """
slots_since_epoch_start = attestation.data.slot % SLOTS_PER_EPOCH slots_since_epoch_start = slot % SLOTS_PER_EPOCH
committees_since_epoch_start = get_committee_count_at_slot(state, attestation.data.slot) * slots_since_epoch_start committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
return (committees_since_epoch_start + attestation.data.index) % ATTESTATION_SUBNET_COUNT return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
``` ```
### Attestation aggregation ### Attestation aggregation
@ -458,7 +465,7 @@ def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSigna
def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool: def is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex, slot_signature: BLSSignature) -> bool:
committee = get_beacon_committee(state, slot, index) committee = get_beacon_committee(state, slot, index)
modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE) modulo = max(1, len(committee) // TARGET_AGGREGATORS_PER_COMMITTEE)
return bytes_to_int(hash(slot_signature)[0:8]) % modulo == 0 return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
``` ```
#### Construct aggregate #### Construct aggregate

View File

@ -1,4 +1,4 @@
# Ethereum 2.0 Phase 1 -- The Beacon Chain for Shards # Ethereum 2.0 Phase 1 -- The Beacon Chain with Shards
**Notice**: This document is a work-in-progress for researchers and implementers. **Notice**: This document is a work-in-progress for researchers and implementers.
@ -12,6 +12,11 @@
- [Custom types](#custom-types) - [Custom types](#custom-types)
- [Configuration](#configuration) - [Configuration](#configuration)
- [Misc](#misc) - [Misc](#misc)
- [Shard block configs](#shard-block-configs)
- [Gwei values](#gwei-values)
- [Initial values](#initial-values)
- [Time parameters](#time-parameters)
- [Domain types](#domain-types)
- [Updated containers](#updated-containers) - [Updated containers](#updated-containers)
- [Extended `AttestationData`](#extended-attestationdata) - [Extended `AttestationData`](#extended-attestationdata)
- [Extended `Attestation`](#extended-attestation) - [Extended `Attestation`](#extended-attestation)
@ -47,14 +52,11 @@
- [`get_shard_committee`](#get_shard_committee) - [`get_shard_committee`](#get_shard_committee)
- [`get_light_client_committee`](#get_light_client_committee) - [`get_light_client_committee`](#get_light_client_committee)
- [`get_shard_proposer_index`](#get_shard_proposer_index) - [`get_shard_proposer_index`](#get_shard_proposer_index)
- [`get_indexed_attestation`](#get_indexed_attestation) - [`get_committee_count_delta`](#get_committee_count_delta)
- [`get_start_shard`](#get_start_shard) - [`get_start_shard`](#get_start_shard)
- [`get_shard`](#get_shard)
- [`get_latest_slot_for_shard`](#get_latest_slot_for_shard) - [`get_latest_slot_for_shard`](#get_latest_slot_for_shard)
- [`get_offset_slots`](#get_offset_slots) - [`get_offset_slots`](#get_offset_slots)
- [Predicates](#predicates) - [Predicates](#predicates)
- [`verify_attestation_custody`](#verify_attestation_custody)
- [Updated `is_valid_indexed_attestation`](#updated-is_valid_indexed_attestation)
- [`is_on_time_attestation`](#is_on_time_attestation) - [`is_on_time_attestation`](#is_on_time_attestation)
- [`is_winning_attestation`](#is_winning_attestation) - [`is_winning_attestation`](#is_winning_attestation)
- [`optional_aggregate_verify`](#optional_aggregate_verify) - [`optional_aggregate_verify`](#optional_aggregate_verify)
@ -70,9 +72,10 @@
- [`process_crosslinks`](#process_crosslinks) - [`process_crosslinks`](#process_crosslinks)
- [`verify_empty_shard_transition`](#verify_empty_shard_transition) - [`verify_empty_shard_transition`](#verify_empty_shard_transition)
- [`process_shard_transitions`](#process_shard_transitions) - [`process_shard_transitions`](#process_shard_transitions)
- [New Attester slashing processing](#new-attester-slashing-processing) - [New default validator for deposits](#new-default-validator-for-deposits)
- [Light client processing](#light-client-processing) - [Light client processing](#light-client-processing)
- [Epoch transition](#epoch-transition) - [Epoch transition](#epoch-transition)
- [Phase 1 final updates](#phase-1-final-updates)
- [Custody game updates](#custody-game-updates) - [Custody game updates](#custody-game-updates)
- [Online-tracking](#online-tracking) - [Online-tracking](#online-tracking)
- [Light client committee updates](#light-client-committee-updates) - [Light client committee updates](#light-client-committee-updates)
@ -100,23 +103,53 @@ Configuration is not namespaced. Instead it is strictly an extension;
### Misc ### Misc
| Name | Value | Unit | Duration | | Name | Value |
| - | - | - | - | | - | - |
| `MAX_SHARDS` | `2**10` (= 1024) | | `MAX_SHARDS` | `2**10` (= 1024) |
| `ONLINE_PERIOD` | `OnlineEpochs(2**3)` (= 8) | online epochs | ~51 min |
| `LIGHT_CLIENT_COMMITTEE_SIZE` | `2**7` (= 128) | | `LIGHT_CLIENT_COMMITTEE_SIZE` | `2**7` (= 128) |
| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `2**3` (= 8) |
### Shard block configs
| Name | Value | Unit |
| - | - | - |
| `MAX_SHARD_BLOCK_SIZE` | `2**20` (= 1,048,576) | bytes |
| `TARGET_SHARD_BLOCK_SIZE` | `2**18` (= 262,144) | bytes |
| `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | - |
| `MAX_SHARD_BLOCKS_PER_ATTESTATION` | `len(SHARD_BLOCK_OFFSETS)` | - |
| `BYTES_PER_CUSTODY_CHUNK` | `2**12` (= 4,096) | bytes |
| `CUSTODY_RESPONSE_DEPTH` | `ceillog2(MAX_SHARD_BLOCK_SIZE // BYTES_PER_CUSTODY_CHUNK)` | - |
### Gwei values
| Name | Value |
| - | - |
| `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei |
| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei |
### Initial values
| Name | Value |
| - | - |
| `NO_SIGNATURE` | `BLSSignature(b'\x00' * 96)` |
### Time parameters
| Name | Value | Unit | Duration |
| - | - | :-: | :-: |
| `ONLINE_PERIOD` | `OnlineEpochs(2**3)` (= 8) | online epochs | ~51 mins |
| `LIGHT_CLIENT_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours | | `LIGHT_CLIENT_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
| `MAX_SHARD_BLOCK_SIZE` | `2**20` (= 1,048,576) | |
| `TARGET_SHARD_BLOCK_SIZE` | `2**18` (= 262,144) | | ### Domain types
| `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | |
| `MAX_SHARD_BLOCKS_PER_ATTESTATION` | `len(SHARD_BLOCK_OFFSETS)` | | | Name | Value |
| `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | | | - | - |
| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | | | `DOMAIN_SHARD_PROPOSAL` | `DomainType('0x80000000')` |
| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `2**3` (= 8) | | | `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` |
| `DOMAIN_SHARD_PROPOSAL` | `DomainType('0x80000000')` | | | `DOMAIN_LIGHT_CLIENT` | `DomainType('0x82000000')` |
| `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` | | | `DOMAIN_CUSTODY_BIT_SLASHING` | `DomainType('0x83000000')` |
| `DOMAIN_LIGHT_CLIENT` | `DomainType('0x82000000')` | | | `DOMAIN_LIGHT_SELECTION_PROOF` | `DomainType('0x84000000')` |
| `NO_SIGNATURE` | `BLSSignature(b'\x00' * 96)` | | | `DOMAIN_LIGHT_AGGREGATE_AND_PROOF` | `DomainType('0x85000000')` |
## Updated containers ## Updated containers
@ -133,6 +166,8 @@ class AttestationData(Container):
# FFG vote # FFG vote
source: Checkpoint source: Checkpoint
target: Checkpoint target: Checkpoint
# Shard vote
shard: Shard
# Current-slot shard block root # Current-slot shard block root
shard_head_root: Root shard_head_root: Root
# Shard transition root # Shard transition root
@ -145,7 +180,6 @@ class AttestationData(Container):
class Attestation(Container): class Attestation(Container):
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE] aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
data: AttestationData data: AttestationData
custody_bits_blocks: List[Bitlist[MAX_VALIDATORS_PER_COMMITTEE], MAX_SHARD_BLOCKS_PER_ATTESTATION]
signature: BLSSignature signature: BLSSignature
``` ```
@ -165,8 +199,9 @@ class PendingAttestation(Container):
```python ```python
class IndexedAttestation(Container): class IndexedAttestation(Container):
committee: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE] attesting_indices: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE]
attestation: Attestation data: AttestationData
signature: BLSSignature
``` ```
### Extended `AttesterSlashing` ### Extended `AttesterSlashing`
@ -197,7 +232,9 @@ class Validator(Container):
# (of the particular validator) in which the validator is activated # (of the particular validator) in which the validator is activated
# = get_custody_period_for_validator(...) # = get_custody_period_for_validator(...)
next_custody_secret_to_reveal: uint64 next_custody_secret_to_reveal: uint64
max_reveal_lateness: Epoch # TODO: The max_reveal_lateness doesn't really make sense anymore.
# So how do we incentivise early custody key reveals now?
all_custody_secrets_revealed_epoch: Epoch # to be initialized to FAR_FUTURE_EPOCH
``` ```
### Extended `BeaconBlockBody` ### Extended `BeaconBlockBody`
@ -216,13 +253,15 @@ class BeaconBlockBody(Container):
deposits: List[Deposit, MAX_DEPOSITS] deposits: List[Deposit, MAX_DEPOSITS]
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
# Custody game # Custody game
custody_slashings: List[SignedCustodySlashing, MAX_CUSTODY_SLASHINGS] chunk_challenges: List[CustodyChunkChallenge, MAX_CUSTODY_CHUNK_CHALLENGES]
chunk_challenge_responses: List[CustodyChunkResponse, MAX_CUSTODY_CHUNK_CHALLENGE_RESPONSES]
custody_key_reveals: List[CustodyKeyReveal, MAX_CUSTODY_KEY_REVEALS] custody_key_reveals: List[CustodyKeyReveal, MAX_CUSTODY_KEY_REVEALS]
early_derived_secret_reveals: List[EarlyDerivedSecretReveal, MAX_EARLY_DERIVED_SECRET_REVEALS] early_derived_secret_reveals: List[EarlyDerivedSecretReveal, MAX_EARLY_DERIVED_SECRET_REVEALS]
custody_slashings: List[SignedCustodySlashing, MAX_CUSTODY_SLASHINGS]
# Shards # Shards
shard_transitions: Vector[ShardTransition, MAX_SHARDS] shard_transitions: Vector[ShardTransition, MAX_SHARDS]
# Light clients # Light clients
light_client_signature_bitfield: Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE] light_client_bits: Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE]
light_client_signature: BLSSignature light_client_signature: BLSSignature
``` ```
@ -285,6 +324,7 @@ class BeaconState(Container):
current_justified_checkpoint: Checkpoint current_justified_checkpoint: Checkpoint
finalized_checkpoint: Checkpoint finalized_checkpoint: Checkpoint
# Phase 1 # Phase 1
current_epoch_start_shard: Shard
shard_states: List[ShardState, MAX_SHARDS] shard_states: List[ShardState, MAX_SHARDS]
online_countdown: List[OnlineEpochs, VALIDATOR_REGISTRY_LIMIT] # not a raw byte array, considered its large size. online_countdown: List[OnlineEpochs, VALIDATOR_REGISTRY_LIMIT] # not a raw byte array, considered its large size.
current_light_committee: CompactCommittee current_light_committee: CompactCommittee
@ -294,6 +334,8 @@ class BeaconState(Container):
# at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS # at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS
exposed_derived_secrets: Vector[List[ValidatorIndex, MAX_EARLY_DERIVED_SECRET_REVEALS * SLOTS_PER_EPOCH], exposed_derived_secrets: Vector[List[ValidatorIndex, MAX_EARLY_DERIVED_SECRET_REVEALS * SLOTS_PER_EPOCH],
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS]
custody_chunk_challenge_records: List[CustodyChunkChallengeRecord, MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS]
custody_chunk_challenge_index: uint64
``` ```
## New containers ## New containers
@ -338,7 +380,6 @@ class ShardBlockHeader(Container):
class ShardState(Container): class ShardState(Container):
slot: Slot slot: Slot
gasprice: Gwei gasprice: Gwei
transition_digest: Bytes32
latest_block_root: Root latest_block_root: Root
``` ```
@ -351,6 +392,7 @@ class ShardTransition(Container):
# Shard block lengths # Shard block lengths
shard_block_lengths: List[uint64, MAX_SHARD_BLOCKS_PER_ATTESTATION] shard_block_lengths: List[uint64, MAX_SHARD_BLOCKS_PER_ATTESTATION]
# Shard data roots # Shard data roots
# The root is of ByteList[MAX_SHARD_BLOCK_SIZE]
shard_data_roots: List[Bytes32, MAX_SHARD_BLOCKS_PER_ATTESTATION] shard_data_roots: List[Bytes32, MAX_SHARD_BLOCKS_PER_ATTESTATION]
# Intermediate shard states # Intermediate shard states
shard_states: List[ShardState, MAX_SHARD_BLOCKS_PER_ATTESTATION] shard_states: List[ShardState, MAX_SHARD_BLOCKS_PER_ATTESTATION]
@ -452,7 +494,7 @@ def compute_offset_slots(start_slot: Slot, end_slot: Slot) -> Sequence[Slot]:
#### `compute_updated_gasprice` #### `compute_updated_gasprice`
```python ```python
def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint8) -> Gwei: def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint64) -> Gwei:
if shard_block_length > TARGET_SHARD_BLOCK_SIZE: if shard_block_length > TARGET_SHARD_BLOCK_SIZE:
delta = (prev_gasprice * (shard_block_length - TARGET_SHARD_BLOCK_SIZE) delta = (prev_gasprice * (shard_block_length - TARGET_SHARD_BLOCK_SIZE)
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT) // TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
@ -490,7 +532,7 @@ def get_active_shard_count(state: BeaconState) -> uint64:
```python ```python
def get_online_validator_indices(state: BeaconState) -> Set[ValidatorIndex]: def get_online_validator_indices(state: BeaconState) -> Set[ValidatorIndex]:
active_validators = get_active_validator_indices(state, get_current_epoch(state)) active_validators = get_active_validator_indices(state, get_current_epoch(state))
return set([i for i in active_validators if state.online_countdown[i] != 0]) return set(i for i in active_validators if state.online_countdown[i] != 0) # non-duplicate
``` ```
#### `get_shard_committee` #### `get_shard_committee`
@ -517,7 +559,7 @@ def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -
```python ```python
def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]: def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
""" """
Return the light client committee of no more than ``TARGET_COMMITTEE_SIZE`` validators. Return the light client committee of no more than ``LIGHT_CLIENT_COMMITTEE_SIZE`` validators.
""" """
source_epoch = compute_committee_source_epoch(epoch, LIGHT_CLIENT_COMMITTEE_PERIOD) source_epoch = compute_committee_source_epoch(epoch, LIGHT_CLIENT_COMMITTEE_PERIOD)
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch) active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
@ -527,26 +569,33 @@ def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Seque
seed=seed, seed=seed,
index=0, index=0,
count=get_active_shard_count(beacon_state), count=get_active_shard_count(beacon_state),
)[:TARGET_COMMITTEE_SIZE] )[:LIGHT_CLIENT_COMMITTEE_SIZE]
``` ```
#### `get_shard_proposer_index` #### `get_shard_proposer_index`
```python ```python
def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard) -> ValidatorIndex: def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard) -> ValidatorIndex:
committee = get_shard_committee(beacon_state, compute_epoch_at_slot(slot), shard) """
r = bytes_to_int(get_seed(beacon_state, get_current_epoch(beacon_state), DOMAIN_SHARD_COMMITTEE)[:8]) Return the proposer's index of shard block at ``slot``.
"""
epoch = compute_epoch_at_slot(slot)
committee = get_shard_committee(beacon_state, epoch, shard)
seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_COMMITTEE) + uint_to_bytes(slot))
r = bytes_to_uint64(seed[:8])
return committee[r % len(committee)] return committee[r % len(committee)]
``` ```
#### `get_indexed_attestation` #### `get_committee_count_delta`
```python ```python
def get_indexed_attestation(beacon_state: BeaconState, attestation: Attestation) -> IndexedAttestation: def get_committee_count_delta(state: BeaconState, start_slot: Slot, stop_slot: Slot) -> uint64:
committee = get_beacon_committee(beacon_state, attestation.data.slot, attestation.data.index) """
return IndexedAttestation( Return the sum of committee counts in range ``[start_slot, stop_slot)``.
committee=committee, """
attestation=attestation, return sum(
get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(slot)))
for slot in range(start_slot, stop_slot)
) )
``` ```
@ -554,21 +603,35 @@ def get_indexed_attestation(beacon_state: BeaconState, attestation: Attestation)
```python ```python
def get_start_shard(state: BeaconState, slot: Slot) -> Shard: def get_start_shard(state: BeaconState, slot: Slot) -> Shard:
# TODO: implement start shard logic """
return Shard(0) Return the start shard at ``slot``.
``` """
current_epoch_start_slot = compute_start_slot_at_epoch(get_current_epoch(state))
#### `get_shard` active_shard_count = get_active_shard_count(state)
if current_epoch_start_slot == slot:
```python return state.current_epoch_start_shard
def get_shard(state: BeaconState, attestation: Attestation) -> Shard: elif slot > current_epoch_start_slot:
return compute_shard_from_committee_index(state, attestation.data.index, attestation.data.slot) # Current epoch or the next epoch lookahead
shard_delta = get_committee_count_delta(state, start_slot=current_epoch_start_slot, stop_slot=slot)
return Shard((state.current_epoch_start_shard + shard_delta) % active_shard_count)
else:
# Previous epoch
shard_delta = get_committee_count_delta(state, start_slot=slot, stop_slot=current_epoch_start_slot)
max_committees_per_epoch = MAX_COMMITTEES_PER_SLOT * SLOTS_PER_EPOCH
return Shard(
# Ensure positive
(state.current_epoch_start_shard + max_committees_per_epoch * active_shard_count - shard_delta)
% active_shard_count
)
``` ```
#### `get_latest_slot_for_shard` #### `get_latest_slot_for_shard`
```python ```python
def get_latest_slot_for_shard(state: BeaconState, shard: Shard) -> Slot: def get_latest_slot_for_shard(state: BeaconState, shard: Shard) -> Slot:
"""
Return the latest slot number of the given ``shard``.
"""
return state.shard_states[shard].slot return state.shard_states[shard].slot
``` ```
@ -577,82 +640,23 @@ def get_latest_slot_for_shard(state: BeaconState, shard: Shard) -> Slot:
```python ```python
def get_offset_slots(state: BeaconState, shard: Shard) -> Sequence[Slot]: def get_offset_slots(state: BeaconState, shard: Shard) -> Sequence[Slot]:
""" """
Return the offset slots of the given ``shard`` between that latest included slot and current slot. Return the offset slots of the given ``shard``.
The offset slot are after the latest slot and before current slot.
""" """
return compute_offset_slots(get_latest_slot_for_shard(state, shard), state.slot) return compute_offset_slots(get_latest_slot_for_shard(state, shard), state.slot)
``` ```
### Predicates ### Predicates
#### `verify_attestation_custody`
```python
def verify_attestation_custody(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
"""
Check if ``indexed_attestation`` has valid signature against non-empty custody bits.
"""
attestation = indexed_attestation.attestation
aggregation_bits = attestation.aggregation_bits
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
all_pubkeys = []
all_signing_roots = []
for block_index, custody_bits in enumerate(attestation.custody_bits_blocks):
assert len(custody_bits) == len(indexed_attestation.committee)
for participant, aggregation_bit, custody_bit in zip(
indexed_attestation.committee, aggregation_bits, custody_bits
):
if aggregation_bit:
all_pubkeys.append(state.validators[participant].pubkey)
# Note: only 2N distinct message hashes
attestation_wrapper = AttestationCustodyBitWrapper(
attestation_data_root=hash_tree_root(attestation.data),
block_index=block_index,
bit=custody_bit,
)
all_signing_roots.append(compute_signing_root(attestation_wrapper, domain))
else:
assert not custody_bit
return bls.AggregateVerify(all_pubkeys, all_signing_roots, signature=attestation.signature)
```
#### Updated `is_valid_indexed_attestation`
Note that this replaces the Phase 0 `is_valid_indexed_attestation`.
```python
def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
"""
Check if ``indexed_attestation`` has valid indices and signature.
"""
# Verify aggregate signature
attestation = indexed_attestation.attestation
aggregation_bits = attestation.aggregation_bits
if not any(aggregation_bits) or len(aggregation_bits) != len(indexed_attestation.committee):
return False
if len(attestation.custody_bits_blocks) == 0:
# fall back on phase0 behavior if there is no shard data.
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
all_pubkeys = []
for participant, aggregation_bit in zip(indexed_attestation.committee, aggregation_bits):
if aggregation_bit:
all_pubkeys.append(state.validators[participant].pubkey)
signing_root = compute_signing_root(indexed_attestation.attestation.data, domain)
return bls.FastAggregateVerify(all_pubkeys, signing_root, signature=attestation.signature)
else:
return verify_attestation_custody(state, indexed_attestation)
```
#### `is_on_time_attestation` #### `is_on_time_attestation`
```python ```python
def is_on_time_attestation(state: BeaconState, def is_on_time_attestation(state: BeaconState,
attestation: Attestation) -> bool: attestation_data: AttestationData) -> bool:
""" """
Check if the given attestation is on-time. Check if the given ``attestation_data`` is on-time.
""" """
# TODO: MIN_ATTESTATION_INCLUSION_DELAY should always be 1 return attestation_data.slot == compute_previous_slot(state.slot)
return attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot
``` ```
#### `is_winning_attestation` #### `is_winning_attestation`
@ -663,11 +667,11 @@ def is_winning_attestation(state: BeaconState,
committee_index: CommitteeIndex, committee_index: CommitteeIndex,
winning_root: Root) -> bool: winning_root: Root) -> bool:
""" """
Check if ``attestation`` helped contribute to the successful crosslink of Check if on-time ``attestation`` helped contribute to the successful crosslink of
``winning_root`` formed by ``committee_index`` committee at the current slot. ``winning_root`` formed by ``committee_index`` committee.
""" """
return ( return (
attestation.data.slot == state.slot is_on_time_attestation(state, attestation.data)
and attestation.data.index == committee_index and attestation.data.index == committee_index
and attestation.data.shard_transition_root == winning_root and attestation.data.shard_transition_root == winning_root
) )
@ -710,7 +714,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
process_block_header(state, block) process_block_header(state, block)
process_randao(state, block.body) process_randao(state, block.body)
process_eth1_data(state, block.body) process_eth1_data(state, block.body)
process_light_client_signatures(state, block.body) process_light_client_aggregate(state, block.body)
process_operations(state, block.body) process_operations(state, block.body)
``` ```
@ -747,7 +751,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
```python ```python
def validate_attestation(state: BeaconState, attestation: Attestation) -> None: def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
data = attestation.data data = attestation.data
assert data.index < get_committee_count_at_slot(state, data.slot) assert data.index < get_committee_count_per_slot(state, data.target.epoch)
assert data.index < get_active_shard_count(state) assert data.index < get_active_shard_count(state)
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state)) assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
assert data.target.epoch == compute_epoch_at_slot(data.slot) assert data.target.epoch == compute_epoch_at_slot(data.slot)
@ -761,20 +765,19 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
else: else:
assert attestation.data.source == state.previous_justified_checkpoint assert attestation.data.source == state.previous_justified_checkpoint
shard = get_shard(state, attestation) # Type 1: on-time attestations
if is_on_time_attestation(state, attestation.data):
# Type 1: on-time attestations, the custody bits should be non-empty.
if attestation.custody_bits_blocks != []:
# Ensure on-time attestation
assert is_on_time_attestation(state, attestation)
# Correct data root count
assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, shard))
# Correct parent block root # Correct parent block root
assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot)) assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot))
# Type 2: no shard transition, no custody bits # Correct shard number
shard = compute_shard_from_committee_index(state, attestation.data.index, attestation.data.slot)
assert attestation.data.shard == shard
# On-time attestations should have a non-empty shard transition root
assert attestation.data.shard_transition_root != hash_tree_root(ShardTransition())
# Type 2: no shard transition
else: else:
# Ensure delayed attestation # Ensure delayed attestation
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY < state.slot assert data.slot < compute_previous_slot(state.slot)
# Late attestations cannot have a shard transition root # Late attestations cannot have a shard transition root
assert data.shard_transition_root == Root() assert data.shard_transition_root == Root()
@ -846,6 +849,9 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr
shard_parent_root = hash_tree_root(header) shard_parent_root = hash_tree_root(header)
headers.append(header) headers.append(header)
proposers.append(proposal_index) proposers.append(proposal_index)
else:
# Must have a stub for `shard_data_root` if empty slot
assert transition.shard_data_roots[i] == Root()
prev_gasprice = shard_state.gasprice prev_gasprice = shard_state.gasprice
@ -869,9 +875,10 @@ def process_crosslink_for_shard(state: BeaconState,
committee_index: CommitteeIndex, committee_index: CommitteeIndex,
shard_transition: ShardTransition, shard_transition: ShardTransition,
attestations: Sequence[Attestation]) -> Root: attestations: Sequence[Attestation]) -> Root:
committee = get_beacon_committee(state, state.slot, committee_index) on_time_attestation_slot = compute_previous_slot(state.slot)
committee = get_beacon_committee(state, on_time_attestation_slot, committee_index)
online_indices = get_online_validator_indices(state) online_indices = get_online_validator_indices(state)
shard = compute_shard_from_committee_index(state, committee_index, state.slot) shard = compute_shard_from_committee_index(state, committee_index, on_time_attestation_slot)
# Loop over all shard transition roots # Loop over all shard transition roots
shard_transition_roots = set([a.data.shard_transition_root for a in attestations]) shard_transition_roots = set([a.data.shard_transition_root for a in attestations])
@ -881,9 +888,6 @@ def process_crosslink_for_shard(state: BeaconState,
for attestation in transition_attestations: for attestation in transition_attestations:
participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits) participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
transition_participants = transition_participants.union(participants) transition_participants = transition_participants.union(participants)
assert attestation.data.shard_head_root == shard_transition.shard_data_roots[
len(shard_transition.shard_data_roots) - 1
]
enough_online_stake = ( enough_online_stake = (
get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >= get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >=
@ -896,6 +900,12 @@ def process_crosslink_for_shard(state: BeaconState,
# Attestation <-> shard transition consistency # Attestation <-> shard transition consistency
assert shard_transition_root == hash_tree_root(shard_transition) assert shard_transition_root == hash_tree_root(shard_transition)
# Check `shard_head_root` of the winning root
last_offset_index = len(shard_transition.shard_states) - 1
shard_head_root = shard_transition.shard_states[last_offset_index].latest_block_root
for attestation in transition_attestations:
assert attestation.data.shard_head_root == shard_head_root
# Apply transition # Apply transition
apply_shard_transition(state, shard, shard_transition) apply_shard_transition(state, shard, shard_transition)
# Apply proposer reward and cost # Apply proposer reward and cost
@ -926,16 +936,19 @@ def process_crosslink_for_shard(state: BeaconState,
def process_crosslinks(state: BeaconState, def process_crosslinks(state: BeaconState,
shard_transitions: Sequence[ShardTransition], shard_transitions: Sequence[ShardTransition],
attestations: Sequence[Attestation]) -> None: attestations: Sequence[Attestation]) -> None:
committee_count = get_committee_count_at_slot(state, state.slot) on_time_attestation_slot = compute_previous_slot(state.slot)
committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(on_time_attestation_slot))
for committee_index in map(CommitteeIndex, range(committee_count)): for committee_index in map(CommitteeIndex, range(committee_count)):
shard = compute_shard_from_committee_index(state, committee_index, state.slot)
# All attestations in the block for this committee/shard and current slot # All attestations in the block for this committee/shard and current slot
shard = compute_shard_from_committee_index(state, committee_index, on_time_attestation_slot)
# Since the attestations are validated, all `shard_attestations` satisfy `attestation.data.shard == shard`
shard_attestations = [ shard_attestations = [
attestation for attestation in attestations attestation for attestation in attestations
if is_on_time_attestation(state, attestation) and attestation.data.index == committee_index if is_on_time_attestation(state, attestation.data) and attestation.data.index == committee_index
] ]
winning_root = process_crosslink_for_shard(
winning_root = process_crosslink_for_shard(state, committee_index, shard_transitions[shard], shard_attestations) state, committee_index, shard_transitions[shard], shard_attestations
)
if winning_root != Root(): if winning_root != Root():
# Mark relevant pending attestations as creating a successful crosslink # Mark relevant pending attestations as creating a successful crosslink
for pending_attestation in state.current_epoch_attestations: for pending_attestation in state.current_epoch_attestations:
@ -969,64 +982,51 @@ def process_shard_transitions(state: BeaconState,
assert verify_empty_shard_transition(state, shard_transitions) assert verify_empty_shard_transition(state, shard_transitions)
``` ```
##### New Attester slashing processing ##### New default validator for deposits
```python ```python
def get_indices_from_committee( def get_validator_from_deposit(state: BeaconState, deposit: Deposit) -> Validator:
committee: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE], amount = deposit.data.amount
bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]) -> Sequence[ValidatorIndex]: effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
assert len(bits) == len(committee) next_custody_secret_to_reveal = get_custody_period_for_validator(
return [validator_index for i, validator_index in enumerate(committee) if bits[i]] ValidatorIndex(len(state.validators)),
``` get_current_epoch(state),
```python
def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None:
indexed_attestation_1 = attester_slashing.attestation_1
indexed_attestation_2 = attester_slashing.attestation_2
assert is_slashable_attestation_data(
indexed_attestation_1.attestation.data,
indexed_attestation_2.attestation.data,
)
assert is_valid_indexed_attestation(state, indexed_attestation_1)
assert is_valid_indexed_attestation(state, indexed_attestation_2)
indices_1 = get_indices_from_committee(
indexed_attestation_1.committee,
indexed_attestation_1.attestation.aggregation_bits,
)
indices_2 = get_indices_from_committee(
indexed_attestation_2.committee,
indexed_attestation_2.attestation.aggregation_bits,
) )
slashed_any = False return Validator(
indices = set(indices_1).intersection(indices_2) pubkey=deposit.data.pubkey,
for index in sorted(indices): withdrawal_credentials=deposit.data.withdrawal_credentials,
if is_slashable_validator(state.validators[index], get_current_epoch(state)): activation_eligibility_epoch=FAR_FUTURE_EPOCH,
slash_validator(state, index) activation_epoch=FAR_FUTURE_EPOCH,
slashed_any = True exit_epoch=FAR_FUTURE_EPOCH,
assert slashed_any withdrawable_epoch=FAR_FUTURE_EPOCH,
effective_balance=effective_balance,
next_custody_secret_to_reveal=next_custody_secret_to_reveal,
all_custody_secrets_revealed_epoch=FAR_FUTURE_EPOCH,
)
``` ```
#### Light client processing #### Light client processing
```python ```python
def process_light_client_signatures(state: BeaconState, block_body: BeaconBlockBody) -> None: def process_light_client_aggregate(state: BeaconState, block_body: BeaconBlockBody) -> None:
committee = get_light_client_committee(state, get_current_epoch(state)) committee = get_light_client_committee(state, get_current_epoch(state))
previous_slot = compute_previous_slot(state.slot)
previous_block_root = get_block_root_at_slot(state, previous_slot)
total_reward = Gwei(0) total_reward = Gwei(0)
signer_pubkeys = [] signer_pubkeys = []
for bit_index, participant_index in enumerate(committee): for bit_index, participant_index in enumerate(committee):
if block_body.light_client_signature_bitfield[bit_index]: if block_body.light_client_bits[bit_index]:
signer_pubkeys.append(state.validators[participant_index].pubkey) signer_pubkeys.append(state.validators[participant_index].pubkey)
increase_balance(state, participant_index, get_base_reward(state, participant_index)) if not state.validators[participant_index].slashed:
total_reward += get_base_reward(state, participant_index) increase_balance(state, participant_index, get_base_reward(state, participant_index))
total_reward += get_base_reward(state, participant_index)
increase_balance(state, get_beacon_proposer_index(state), Gwei(total_reward // PROPOSER_REWARD_QUOTIENT)) increase_balance(state, get_beacon_proposer_index(state), Gwei(total_reward // PROPOSER_REWARD_QUOTIENT))
slot = compute_previous_slot(state.slot) signing_root = compute_signing_root(previous_block_root,
signing_root = compute_signing_root(get_block_root_at_slot(state, slot), get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(previous_slot)))
get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(slot)))
assert optional_fast_aggregate_verify(signer_pubkeys, signing_root, block_body.light_client_signature) assert optional_fast_aggregate_verify(signer_pubkeys, signing_root, block_body.light_client_signature)
``` ```
@ -1040,16 +1040,27 @@ def process_epoch(state: BeaconState) -> None:
process_rewards_and_penalties(state) process_rewards_and_penalties(state)
process_registry_updates(state) process_registry_updates(state)
process_reveal_deadlines(state) process_reveal_deadlines(state)
process_challenge_deadlines(state)
process_slashings(state) process_slashings(state)
process_final_updates(state) process_final_updates(state) # phase 0 final updates
process_phase_1_final_updates(state)
```
#### Phase 1 final updates
```python
def process_phase_1_final_updates(state: BeaconState) -> None:
process_custody_final_updates(state) process_custody_final_updates(state)
process_online_tracking(state) process_online_tracking(state)
process_light_client_committee_updates(state) process_light_client_committee_updates(state)
# Update current_epoch_start_shard
state.current_epoch_start_shard = get_start_shard(state, Slot(state.slot + 1))
``` ```
#### Custody game updates #### Custody game updates
`process_reveal_deadlines` and `process_custody_final_updates` are defined in [the Custody Game spec](./custody-game.md), `process_reveal_deadlines`, `process_challenge_deadlines` and `process_custody_final_updates` are defined in [the Custody Game spec](./custody-game.md),
#### Online-tracking #### Online-tracking
@ -1073,8 +1084,9 @@ def process_light_client_committee_updates(state: BeaconState) -> None:
""" """
Update light client committees. Update light client committees.
""" """
if get_current_epoch(state) % LIGHT_CLIENT_COMMITTEE_PERIOD == 0: next_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
if next_epoch % LIGHT_CLIENT_COMMITTEE_PERIOD == 0:
state.current_light_committee = state.next_light_committee state.current_light_committee = state.next_light_committee
new_committee = get_light_client_committee(state, get_current_epoch(state) + LIGHT_CLIENT_COMMITTEE_PERIOD) new_committee = get_light_client_committee(state, next_epoch + LIGHT_CLIENT_COMMITTEE_PERIOD)
state.next_light_committee = committee_to_compact_committee(state, new_committee) state.next_light_committee = committee_to_compact_committee(state, new_committee)
``` ```

View File

@ -15,21 +15,29 @@
- [Time parameters](#time-parameters) - [Time parameters](#time-parameters)
- [Max operations per block](#max-operations-per-block) - [Max operations per block](#max-operations-per-block)
- [Reward and penalty quotients](#reward-and-penalty-quotients) - [Reward and penalty quotients](#reward-and-penalty-quotients)
- [Signature domain types](#signature-domain-types)
- [Data structures](#data-structures) - [Data structures](#data-structures)
- [New Beacon Chain operations](#new-beacon-chain-operations) - [New Beacon Chain operations](#new-beacon-chain-operations)
- [`CustodyChunkChallenge`](#custodychunkchallenge)
- [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord)
- [`CustodyChunkResponse`](#custodychunkresponse)
- [`CustodySlashing`](#custodyslashing) - [`CustodySlashing`](#custodyslashing)
- [`SignedCustodySlashing`](#signedcustodyslashing) - [`SignedCustodySlashing`](#signedcustodyslashing)
- [`CustodyKeyReveal`](#custodykeyreveal) - [`CustodyKeyReveal`](#custodykeyreveal)
- [`EarlyDerivedSecretReveal`](#earlyderivedsecretreveal) - [`EarlyDerivedSecretReveal`](#earlyderivedsecretreveal)
- [Helpers](#helpers) - [Helpers](#helpers)
- [`get_block_data_merkle_root`](#get_block_data_merkle_root)
- [`replace_empty_or_append`](#replace_empty_or_append)
- [`legendre_bit`](#legendre_bit) - [`legendre_bit`](#legendre_bit)
- [`custody_atoms`](#custody_atoms) - [`get_custody_atoms`](#get_custody_atoms)
- [`get_custody_secrets`](#get_custody_secrets)
- [`universal_hash_function`](#universal_hash_function)
- [`compute_custody_bit`](#compute_custody_bit) - [`compute_custody_bit`](#compute_custody_bit)
- [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period) - [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period)
- [`get_custody_period_for_validator`](#get_custody_period_for_validator) - [`get_custody_period_for_validator`](#get_custody_period_for_validator)
- [Per-block processing](#per-block-processing) - [Per-block processing](#per-block-processing)
- [Custody Game Operations](#custody-game-operations) - [Custody Game Operations](#custody-game-operations)
- [Chunk challenges](#chunk-challenges)
- [Custody chunk response](#custody-chunk-response)
- [Custody key reveals](#custody-key-reveals) - [Custody key reveals](#custody-key-reveals)
- [Early derived secret reveals](#early-derived-secret-reveals) - [Early derived secret reveals](#early-derived-secret-reveals)
- [Custody Slashings](#custody-slashings) - [Custody Slashings](#custody-slashings)
@ -49,8 +57,10 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
| Name | Value | Unit | | Name | Value | Unit |
| - | - | - | | - | - | - |
| `BLS12_381_Q` | `4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787` | - | | `CUSTODY_PRIME` | `2 ** 256 - 189` | - |
| `BYTES_PER_CUSTODY_ATOM` | `48` | bytes | | `CUSTODY_SECRETS` | `3` | - |
| `BYTES_PER_CUSTODY_ATOM` | `32` | bytes |
| `CUSTODY_PROBABILITY_EXPONENT` | `10` | - |
## Configuration ## Configuration
@ -59,18 +69,22 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
| Name | Value | Unit | Duration | | Name | Value | Unit | Duration |
| - | - | :-: | :-: | | - | - | :-: | :-: |
| `RANDAO_PENALTY_EPOCHS` | `2**1` (= 2) | epochs | 12.8 minutes | | `RANDAO_PENALTY_EPOCHS` | `2**1` (= 2) | epochs | 12.8 minutes |
| `EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS` | `2**14` (= 16,384) | epochs | ~73 days | | `EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS` | `2**15` (= 32,768) | epochs | ~146 days |
| `EPOCHS_PER_CUSTODY_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days | | `EPOCHS_PER_CUSTODY_PERIOD` | `2**14` (= 16,384) | epochs | ~73 days |
| `CUSTODY_PERIOD_TO_RANDAO_PADDING` | `2**11` (= 2,048) | epochs | ~9 days | | `CUSTODY_PERIOD_TO_RANDAO_PADDING` | `2**11` (= 2,048) | epochs | ~9 days |
| `MAX_REVEAL_LATENESS_DECREMENT` | `2**7` (= 128) | epochs | ~14 hours | | `MAX_CHUNK_CHALLENGE_DELAY` | `2**15` (= 32,768) | epochs | ~146 days |
| `CHUNK_RESPONSE_DEADLINE` | `2**14` (= 16,384) | epochs | ~73 days |
### Max operations per block ### Max operations per block
| Name | Value | | Name | Value |
| - | - | | - | - |
| `MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS` | `2**20` (= 1,048,576) |
| `MAX_CUSTODY_KEY_REVEALS` | `2**8` (= 256) | | `MAX_CUSTODY_KEY_REVEALS` | `2**8` (= 256) |
| `MAX_EARLY_DERIVED_SECRET_REVEALS` | `1` | | `MAX_EARLY_DERIVED_SECRET_REVEALS` | `2**0` (= 1) |
| `MAX_CUSTODY_SLASHINGS` | `1` | | `MAX_CUSTODY_CHUNK_CHALLENGES` | `2**2` (= 4) |
| `MAX_CUSTODY_CHUNK_CHALLENGE_RESPONSES` | `2**4` (= 16) |
| `MAX_CUSTODY_SLASHINGS` | `2**0` (= 1) |
### Reward and penalty quotients ### Reward and penalty quotients
@ -79,23 +93,47 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
| `EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE` | `2**1` (= 2) | | `EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE` | `2**1` (= 2) |
| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) | | `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) |
### Signature domain types
The following types are defined, mapping into `DomainType` (little endian):
| Name | Value |
| - | - |
| `DOMAIN_CUSTODY_BIT_SLASHING` | `DomainType('0x83000000')` |
## Data structures ## Data structures
### New Beacon Chain operations ### New Beacon Chain operations
#### `CustodyChunkChallenge`
```python
class CustodyChunkChallenge(Container):
responder_index: ValidatorIndex
shard_transition: ShardTransition
attestation: Attestation
data_index: uint64
chunk_index: uint64
```
#### `CustodyChunkChallengeRecord`
```python
class CustodyChunkChallengeRecord(Container):
challenge_index: uint64
challenger_index: ValidatorIndex
responder_index: ValidatorIndex
inclusion_epoch: Epoch
data_root: Root
chunk_index: uint64
```
#### `CustodyChunkResponse`
```python
class CustodyChunkResponse(Container):
challenge_index: uint64
chunk_index: uint64
chunk: ByteVector[BYTES_PER_CUSTODY_CHUNK]
branch: Vector[Root, CUSTODY_RESPONSE_DEPTH]
```
#### `CustodySlashing` #### `CustodySlashing`
```python ```python
class CustodySlashing(Container): class CustodySlashing(Container):
# Attestation.custody_bits_blocks[data_index][committee.index(malefactor_index)] is the target custody bit to check.
# (Attestation.data.shard_transition_root as ShardTransition).shard_data_roots[data_index] is the root of the data. # (Attestation.data.shard_transition_root as ShardTransition).shard_data_roots[data_index] is the root of the data.
data_index: uint64 data_index: uint64
malefactor_index: ValidatorIndex malefactor_index: ValidatorIndex
@ -114,7 +152,6 @@ class SignedCustodySlashing(Container):
signature: BLSSignature signature: BLSSignature
``` ```
#### `CustodyKeyReveal` #### `CustodyKeyReveal`
```python ```python
@ -146,6 +183,22 @@ class EarlyDerivedSecretReveal(Container):
## Helpers ## Helpers
### `get_block_data_merkle_root`
`get_block_data_merkle_root(data: ByteList) -> Root` is the function that returns the Merkle root of the block data without the length mix-in.
### `replace_empty_or_append`
```python
def replace_empty_or_append(l: List, new_element: Any) -> int:
for i in range(len(l)):
if l[i] == type(new_element)():
l[i] = new_element
return i
l.append(new_element)
return len(l) - 1
```
### `legendre_bit` ### `legendre_bit`
Returns the Legendre symbol `(a/q)` normalizes as a bit (i.e. `((a/q) + 1) // 2`). In a production implementation, a well-optimized library (e.g. GMP) should be used for this. Returns the Legendre symbol `(a/q)` normalizes as a bit (i.e. `((a/q) + 1) // 2`). In a production implementation, a well-optimized library (e.g. GMP) should be used for this.
@ -175,27 +228,56 @@ def legendre_bit(a: int, q: int) -> int:
return 0 return 0
``` ```
### `custody_atoms` ### `get_custody_atoms`
Given one set of data, return the custody atoms: each atom will be combined with one legendre bit. Given one set of data, return the custody atoms: each atom will be combined with one legendre bit.
```python ```python
def get_custody_atoms(bytez: bytes) -> Sequence[bytes]: def get_custody_atoms(bytez: bytes) -> Sequence[bytes]:
bytez += b'\x00' * (-len(bytez) % BYTES_PER_CUSTODY_ATOM) # right-padding length_remainder = len(bytez) % BYTES_PER_CUSTODY_ATOM
return [bytez[i:i + BYTES_PER_CUSTODY_ATOM] bytez += b'\x00' * ((BYTES_PER_CUSTODY_ATOM - length_remainder) % BYTES_PER_CUSTODY_ATOM) # right-padding
for i in range(0, len(bytez), BYTES_PER_CUSTODY_ATOM)] return [
bytez[i:i + BYTES_PER_CUSTODY_ATOM]
for i in range(0, len(bytez), BYTES_PER_CUSTODY_ATOM)
]
```
### `get_custody_secrets`
Extract the custody secrets from the signature
```python
def get_custody_secrets(key: BLSSignature) -> Sequence[int]:
full_G2_element = bls.signature_to_G2(key)
signature = full_G2_element[0].coeffs
signature_bytes = b"".join(x.to_bytes(48, "little") for x in signature)
secrets = [int.from_bytes(signature_bytes[i:i + BYTES_PER_CUSTODY_ATOM], "little")
for i in range(0, len(signature_bytes), 32)]
return secrets
```
### `universal_hash_function`
```python
def universal_hash_function(data_chunks: Sequence[bytes], secrets: Sequence[int]) -> int:
n = len(data_chunks)
return (
sum(
secrets[i % CUSTODY_SECRETS]**i * int.from_bytes(atom, "little") % CUSTODY_PRIME
for i, atom in enumerate(data_chunks)
) + secrets[n % CUSTODY_SECRETS]**n
) % CUSTODY_PRIME
``` ```
### `compute_custody_bit` ### `compute_custody_bit`
```python ```python
def compute_custody_bit(key: BLSSignature, data: bytes) -> bit: def compute_custody_bit(key: BLSSignature, data: ByteList[MAX_SHARD_BLOCK_SIZE]) -> bit:
full_G2_element = bls.signature_to_G2(key)
s = full_G2_element[0].coeffs
custody_atoms = get_custody_atoms(data) custody_atoms = get_custody_atoms(data)
n = len(custody_atoms) secrets = get_custody_secrets(key)
a = sum(s[i % 2]**i * int.from_bytes(atom, "little") for i, atom in enumerate(custody_atoms) + s[n % 2]**n) uhf = universal_hash_function(custody_atoms, secrets)
return legendre_bit(a, BLS12_381_Q) legendre_bits = [legendre_bit(uhf + secrets[0] + i, CUSTODY_PRIME) for i in range(CUSTODY_PROBABILITY_EXPONENT)]
return all(legendre_bits)
``` ```
### `get_randao_epoch_for_custody_period` ### `get_randao_epoch_for_custody_period`
@ -209,7 +291,7 @@ def get_randao_epoch_for_custody_period(period: uint64, validator_index: Validat
### `get_custody_period_for_validator` ### `get_custody_period_for_validator`
```python ```python
def get_custody_period_for_validator(validator_index: ValidatorIndex, epoch: Epoch) -> int: def get_custody_period_for_validator(validator_index: ValidatorIndex, epoch: Epoch) -> uint64:
''' '''
Return the reveal period for a given validator. Return the reveal period for a given validator.
''' '''
@ -227,11 +309,90 @@ def process_custody_game_operations(state: BeaconState, body: BeaconBlockBody) -
for operation in operations: for operation in operations:
fn(state, operation) fn(state, operation)
for_ops(body.chunk_challenges, process_chunk_challenge)
for_ops(body.chunk_challenge_responses, process_chunk_challenge_response)
for_ops(body.custody_key_reveals, process_custody_key_reveal) for_ops(body.custody_key_reveals, process_custody_key_reveal)
for_ops(body.early_derived_secret_reveals, process_early_derived_secret_reveal) for_ops(body.early_derived_secret_reveals, process_early_derived_secret_reveal)
for_ops(body.custody_slashings, process_custody_slashing) for_ops(body.custody_slashings, process_custody_slashing)
``` ```
#### Chunk challenges
```python
def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge) -> None:
# Verify the attestation
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, challenge.attestation))
# Verify it is not too late to challenge the attestation
max_attestation_challenge_epoch = challenge.attestation.data.target.epoch + MAX_CHUNK_CHALLENGE_DELAY
assert get_current_epoch(state) <= max_attestation_challenge_epoch
# Verify it is not too late to challenge the responder
responder = state.validators[challenge.responder_index]
if responder.exit_epoch < FAR_FUTURE_EPOCH:
assert get_current_epoch(state) <= responder.exit_epoch + MAX_CHUNK_CHALLENGE_DELAY
# Verify responder is slashable
assert is_slashable_validator(responder, get_current_epoch(state))
# Verify the responder participated in the attestation
attesters = get_attesting_indices(state, challenge.attestation.data, challenge.attestation.aggregation_bits)
assert challenge.responder_index in attesters
# Verify shard transition is correctly given
assert hash_tree_root(challenge.shard_transition) == challenge.attestation.data.shard_transition_root
data_root = challenge.shard_transition.shard_data_roots[challenge.data_index]
# Verify the challenge is not a duplicate
for record in state.custody_chunk_challenge_records:
assert (
record.data_root != data_root or
record.chunk_index != challenge.chunk_index
)
# Verify depth
shard_block_length = challenge.shard_transition.shard_block_lengths[challenge.data_index]
transition_chunks = (shard_block_length + BYTES_PER_CUSTODY_CHUNK - 1) // BYTES_PER_CUSTODY_CHUNK
assert challenge.chunk_index < transition_chunks
# Add new chunk challenge record
new_record = CustodyChunkChallengeRecord(
challenge_index=state.custody_chunk_challenge_index,
challenger_index=get_beacon_proposer_index(state),
responder_index=challenge.responder_index,
inclusion_epoch=get_current_epoch(state),
data_root=challenge.shard_transition.shard_data_roots[challenge.data_index],
chunk_index=challenge.chunk_index,
)
replace_empty_or_append(state.custody_chunk_challenge_records, new_record)
state.custody_chunk_challenge_index += 1
# Postpone responder withdrawability
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
```
#### Custody chunk response
```python
def process_chunk_challenge_response(state: BeaconState,
response: CustodyChunkResponse) -> None:
# Get matching challenge (if any) from records
matching_challenges = [
record for record in state.custody_chunk_challenge_records
if record.challenge_index == response.challenge_index
]
assert len(matching_challenges) == 1
challenge = matching_challenges[0]
# Verify chunk index
assert response.chunk_index == challenge.chunk_index
# Verify the chunk matches the crosslink data root
assert is_valid_merkle_branch(
leaf=hash_tree_root(response.chunk),
branch=response.branch,
depth=CUSTODY_RESPONSE_DEPTH,
index=response.chunk_index,
root=challenge.data_root,
)
# Clear the challenge
index_in_records = state.custody_chunk_challenge_records.index(challenge)
state.custody_chunk_challenge_records[index_in_records] = CustodyChunkChallengeRecord()
# Reward the proposer
proposer_index = get_beacon_proposer_index(state)
increase_balance(state, proposer_index, Gwei(get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT))
```
#### Custody key reveals #### Custody key reveals
```python ```python
@ -244,7 +405,14 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) ->
epoch_to_sign = get_randao_epoch_for_custody_period(revealer.next_custody_secret_to_reveal, reveal.revealer_index) epoch_to_sign = get_randao_epoch_for_custody_period(revealer.next_custody_secret_to_reveal, reveal.revealer_index)
custody_reveal_period = get_custody_period_for_validator(reveal.revealer_index, get_current_epoch(state)) custody_reveal_period = get_custody_period_for_validator(reveal.revealer_index, get_current_epoch(state))
assert revealer.next_custody_secret_to_reveal < custody_reveal_period # Only past custody periods can be revealed, except after exiting the exit period can be revealed
is_past_reveal = revealer.next_custody_secret_to_reveal < custody_reveal_period
is_exited = revealer.exit_epoch <= get_current_epoch(state)
is_exit_period_reveal = (
revealer.next_custody_secret_to_reveal
== get_custody_period_for_validator(reveal.revealer_index, revealer.exit_epoch - 1)
)
assert is_past_reveal or (is_exited and is_exit_period_reveal)
# Revealed validator is active or exited, but not withdrawn # Revealed validator is active or exited, but not withdrawn
assert is_slashable_validator(revealer, get_current_epoch(state)) assert is_slashable_validator(revealer, get_current_epoch(state))
@ -254,19 +422,9 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) ->
signing_root = compute_signing_root(epoch_to_sign, domain) signing_root = compute_signing_root(epoch_to_sign, domain)
assert bls.Verify(revealer.pubkey, signing_root, reveal.reveal) assert bls.Verify(revealer.pubkey, signing_root, reveal.reveal)
# Decrement max reveal lateness if response is timely
if epoch_to_sign + EPOCHS_PER_CUSTODY_PERIOD >= get_current_epoch(state):
if revealer.max_reveal_lateness >= MAX_REVEAL_LATENESS_DECREMENT:
revealer.max_reveal_lateness -= MAX_REVEAL_LATENESS_DECREMENT
else:
revealer.max_reveal_lateness = 0
else:
revealer.max_reveal_lateness = max(
revealer.max_reveal_lateness,
get_current_epoch(state) - epoch_to_sign - EPOCHS_PER_CUSTODY_PERIOD
)
# Process reveal # Process reveal
if is_exited and is_exit_period_reveal:
revealer.all_custody_secrets_revealed_epoch = get_current_epoch(state)
revealer.next_custody_secret_to_reveal += 1 revealer.next_custody_secret_to_reveal += 1
# Reward Block Proposer # Reward Block Proposer
@ -359,14 +517,16 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
# Verify the attestation # Verify the attestation
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
# TODO: custody_slashing.data is not chunked like shard blocks yet, result is lots of padding.
# TODO: can do a single combined merkle proof of data being attested. # TODO: can do a single combined merkle proof of data being attested.
# Verify the shard transition is indeed attested by the attestation # Verify the shard transition is indeed attested by the attestation
shard_transition = custody_slashing.shard_transition shard_transition = custody_slashing.shard_transition
assert hash_tree_root(shard_transition) == attestation.shard_transition_root assert hash_tree_root(shard_transition) == attestation.data.shard_transition_root
# Verify that the provided data matches the shard-transition # Verify that the provided data matches the shard-transition
assert hash_tree_root(custody_slashing.data) == shard_transition.shard_data_roots[custody_slashing.data_index] assert (
get_block_data_merkle_root(custody_slashing.data)
== shard_transition.shard_data_roots[custody_slashing.data_index]
)
assert len(custody_slashing.data) == shard_transition.shard_block_lengths[custody_slashing.data_index]
# Verify existence and participation of claimed malefactor # Verify existence and participation of claimed malefactor
attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bits) attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
@ -381,18 +541,14 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
signing_root = compute_signing_root(epoch_to_sign, domain) signing_root = compute_signing_root(epoch_to_sign, domain)
assert bls.Verify(malefactor.pubkey, signing_root, custody_slashing.malefactor_secret) assert bls.Verify(malefactor.pubkey, signing_root, custody_slashing.malefactor_secret)
# Get the custody bit
custody_bits = attestation.custody_bits_blocks[custody_slashing.data_index]
committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index)
claimed_custody_bit = custody_bits[committee.index(custody_slashing.malefactor_index)]
# Compute the custody bit # Compute the custody bit
computed_custody_bit = compute_custody_bit(custody_slashing.malefactor_secret, custody_slashing.data) computed_custody_bit = compute_custody_bit(custody_slashing.malefactor_secret, custody_slashing.data)
# Verify the claim # Verify the claim
if claimed_custody_bit != computed_custody_bit: if computed_custody_bit == 1:
# Slash the malefactor, reward the other committee members # Slash the malefactor, reward the other committee members
slash_validator(state, custody_slashing.malefactor_index) slash_validator(state, custody_slashing.malefactor_index)
committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index)
others_count = len(committee) - 1 others_count = len(committee) - 1
whistleblower_reward = Gwei(malefactor.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT // others_count) whistleblower_reward = Gwei(malefactor.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT // others_count)
for attester_index in attesters: for attester_index in attesters:
@ -404,33 +560,48 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
slash_validator(state, custody_slashing.whistleblower_index) slash_validator(state, custody_slashing.whistleblower_index)
``` ```
## Per-epoch processing ## Per-epoch processing
### Handling of reveal deadlines ### Handling of reveal deadlines
Run `process_reveal_deadlines(state)` after `process_registry_updates(state)`:
```python ```python
def process_reveal_deadlines(state: BeaconState) -> None: def process_reveal_deadlines(state: BeaconState) -> None:
epoch = get_current_epoch(state) epoch = get_current_epoch(state)
for index, validator in enumerate(state.validators): for index, validator in enumerate(state.validators):
if get_custody_period_for_validator(ValidatorIndex(index), epoch) > validator.next_custody_secret_to_reveal: deadline = validator.next_custody_secret_to_reveal + 1
# ------------------ WARNING ----------------------- # if get_custody_period_for_validator(ValidatorIndex(index), epoch) > deadline:
# UNSAFE REMOVAL OF SLASHING TO PRIORITIZE PHASE 0 CI # slash_validator(state, ValidatorIndex(index))
# Must find generic way to handle key reveals in tests # ```
# ---------------------------------------------------- #
# slash_validator(state, ValidatorIndex(index)) ```python
pass def process_challenge_deadlines(state: BeaconState) -> None:
for custody_chunk_challenge in state.custody_chunk_challenge_records:
if get_current_epoch(state) > custody_chunk_challenge.inclusion_epoch + EPOCHS_PER_CUSTODY_PERIOD:
slash_validator(state, custody_chunk_challenge.responder_index, custody_chunk_challenge.challenger_index)
index_in_records = state.custody_chunk_challenge_records.index(custody_chunk_challenge)
state.custody_chunk_challenge_records[index_in_records] = CustodyChunkChallengeRecord()
``` ```
### Final updates ### Final updates
After `process_final_updates(state)`, additional updates are made for the custody game:
```python ```python
def process_custody_final_updates(state: BeaconState) -> None: def process_custody_final_updates(state: BeaconState) -> None:
# Clean up exposed RANDAO key reveals # Clean up exposed RANDAO key reveals
state.exposed_derived_secrets[get_current_epoch(state) % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] = [] state.exposed_derived_secrets[get_current_epoch(state) % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] = []
# Reset withdrawable epochs if challenge records are empty
records = state.custody_chunk_challenge_records
validator_indices_in_records = set(record.responder_index for record in records) # non-duplicate
for index, validator in enumerate(state.validators):
if validator.exit_epoch != FAR_FUTURE_EPOCH:
not_all_secrets_are_revealed = validator.all_custody_secrets_revealed_epoch == FAR_FUTURE_EPOCH
if index in validator_indices_in_records or not_all_secrets_are_revealed:
# Delay withdrawable epochs if challenge records are not empty or not all
# custody secrets revealed
validator.withdrawable_epoch = FAR_FUTURE_EPOCH
else:
# Reset withdrawable epochs if challenge records are empty and all secrets are revealed
if validator.withdrawable_epoch == FAR_FUTURE_EPOCH:
validator.withdrawable_epoch = Epoch(validator.all_custody_secrets_revealed_epoch
+ MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
``` ```

View File

@ -9,8 +9,9 @@
- [Introduction](#introduction) - [Introduction](#introduction)
- [Fork choice](#fork-choice) - [Helpers](#helpers)
- [Handlers](#handlers) - [Extended `LatestMessage`](#extended-latestmessage)
- [Updated `update_latest_messages`](#updated-update_latest_messages)
<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC --> <!-- /TOC -->
@ -19,34 +20,30 @@
This document is the beacon chain fork choice spec for part of Ethereum 2.0 Phase 1. This document is the beacon chain fork choice spec for part of Ethereum 2.0 Phase 1.
## Fork choice ### Helpers
Due to the changes in the structure of `IndexedAttestation` in Phase 1, `on_attestation` must be re-specified to handle this. The bulk of `on_attestation` has been moved out into a few helpers to reduce code duplication where possible. #### Extended `LatestMessage`
The rest of the fork choice remains stable.
### Handlers
```python ```python
def on_attestation(store: Store, attestation: Attestation) -> None: @dataclass(eq=True, frozen=True)
""" class LatestMessage(object):
Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire. epoch: Epoch
root: Root
An ``attestation`` that is asserted as invalid may be valid at a later time, shard: Shard
consider scheduling it for later processing in such case. shard_root: Root
""" ```
validate_on_attestation(store, attestation)
store_target_checkpoint_state(store, attestation.data.target) #### Updated `update_latest_messages`
# Get state at the `target` to fully validate attestation ```python
target_state = store.checkpoint_states[attestation.data.target] def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None:
indexed_attestation = get_indexed_attestation(target_state, attestation) target = attestation.data.target
assert is_valid_indexed_attestation(target_state, indexed_attestation) beacon_block_root = attestation.data.beacon_block_root
# TODO: separate shard chain vote
# Update latest messages for attesting indices shard = attestation.data.shard
attesting_indices = [ for i in attesting_indices:
index for i, index in enumerate(indexed_attestation.committee) if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
if attestation.aggregation_bits[i] store.latest_messages[i] = LatestMessage(
] epoch=target.epoch, root=beacon_block_root, shard=shard, shard_root=attestation.data.shard_head_root
update_latest_messages(store, attesting_indices, attestation) )
``` ```

View File

@ -80,7 +80,7 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
exit_epoch=phase0_validator.exit_epoch, exit_epoch=phase0_validator.exit_epoch,
withdrawable_epoch=phase0_validator.withdrawable_epoch, withdrawable_epoch=phase0_validator.withdrawable_epoch,
next_custody_secret_to_reveal=get_custody_period_for_validator(ValidatorIndex(i), epoch), next_custody_secret_to_reveal=get_custody_period_for_validator(ValidatorIndex(i), epoch),
max_reveal_lateness=0, # TODO custody refactor. Outdated? all_custody_secrets_revealed_epoch=FAR_FUTURE_EPOCH,
) for i, phase0_validator in enumerate(pre.validators) ) for i, phase0_validator in enumerate(pre.validators)
), ),
balances=pre.balances, balances=pre.balances,
@ -99,11 +99,11 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
current_justified_checkpoint=pre.current_justified_checkpoint, current_justified_checkpoint=pre.current_justified_checkpoint,
finalized_checkpoint=pre.finalized_checkpoint, finalized_checkpoint=pre.finalized_checkpoint,
# Phase 1 # Phase 1
current_epoch_start_shard=Shard(0),
shard_states=List[ShardState, MAX_SHARDS]( shard_states=List[ShardState, MAX_SHARDS](
ShardState( ShardState(
slot=pre.slot, slot=pre.slot,
gasprice=MIN_GASPRICE, gasprice=MIN_GASPRICE,
transition_digest=Root(),
latest_block_root=Root(), latest_block_root=Root(),
) for i in range(INITIAL_ACTIVE_SHARDS) ) for i in range(INITIAL_ACTIVE_SHARDS)
), ),
@ -111,7 +111,7 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
current_light_committee=CompactCommittee(), # computed after state creation current_light_committee=CompactCommittee(), # computed after state creation
next_light_committee=CompactCommittee(), next_light_committee=CompactCommittee(),
# Custody game # Custody game
exposed_derived_secrets=[] * EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS, exposed_derived_secrets=[()] * EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS,
# exposed_derived_secrets will fully default to zeroes # exposed_derived_secrets will fully default to zeroes
) )
next_epoch = Epoch(epoch + 1) next_epoch = Epoch(epoch + 1)

View File

@ -0,0 +1,185 @@
# Ethereum 2.0 Phase 1 -- Beacon Chain + Shard Chain Fork Choice
**Notice**: This document is a work-in-progress for researchers and implementers.
## Table of contents
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Fork choice](#fork-choice)
- [Helpers](#helpers)
- [`ShardStore`](#shardstore)
- [`get_forkchoice_shard_store`](#get_forkchoice_shard_store)
- [`get_shard_latest_attesting_balance`](#get_shard_latest_attesting_balance)
- [`get_shard_head`](#get_shard_head)
- [`get_shard_ancestor`](#get_shard_ancestor)
- [`get_pending_shard_blocks`](#get_pending_shard_blocks)
- [Handlers](#handlers)
- [`on_shard_block`](#on_shard_block)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Introduction
This document is the shard chain fork choice spec for part of Ethereum 2.0 Phase 1. It assumes the [beacon chain fork choice spec](./fork-choice.md).
## Fork choice
### Helpers
#### `ShardStore`
```python
@dataclass
class ShardStore:
shard: Shard
signed_blocks: Dict[Root, SignedShardBlock] = field(default_factory=dict)
block_states: Dict[Root, ShardState] = field(default_factory=dict)
```
#### `get_forkchoice_shard_store`
```python
def get_forkchoice_shard_store(anchor_state: BeaconState, shard: Shard) -> ShardStore:
return ShardStore(
shard=shard,
signed_blocks={
anchor_state.shard_states[shard].latest_block_root: SignedShardBlock(
message=ShardBlock(slot=anchor_state.slot, shard=shard)
)
},
block_states={anchor_state.shard_states[shard].latest_block_root: anchor_state.copy().shard_states[shard]},
)
```
#### `get_shard_latest_attesting_balance`
```python
def get_shard_latest_attesting_balance(store: Store, shard_store: ShardStore, root: Root) -> Gwei:
state = store.checkpoint_states[store.justified_checkpoint]
active_indices = get_active_validator_indices(state, get_current_epoch(state))
return Gwei(sum(
state.validators[i].effective_balance for i in active_indices
if (
i in store.latest_messages
# TODO: check the latest message logic: currently, validator's previous vote of another shard
# would be ignored once their newer vote is accepted. Check if it makes sense.
and store.latest_messages[i].shard == shard_store.shard
and get_shard_ancestor(
store, shard_store, store.latest_messages[i].shard_root, shard_store.signed_blocks[root].message.slot
) == root
)
))
```
#### `get_shard_head`
```python
def get_shard_head(store: Store, shard_store: ShardStore) -> Root:
# Execute the LMD-GHOST fork choice
beacon_head_root = get_head(store)
shard_head_state = store.block_states[beacon_head_root].shard_states[shard_store.shard]
shard_head_root = shard_head_state.latest_block_root
shard_blocks = {
root: signed_shard_block.message for root, signed_shard_block in shard_store.signed_blocks.items()
if signed_shard_block.message.slot > shard_head_state.slot
}
while True:
# Find the valid child block roots
children = [
root for root, shard_block in shard_blocks.items()
if shard_block.shard_parent_root == shard_head_root
]
if len(children) == 0:
return shard_head_root
# Sort by latest attesting balance with ties broken lexicographically
shard_head_root = max(
children, key=lambda root: (get_shard_latest_attesting_balance(store, shard_store, root), root)
)
```
#### `get_shard_ancestor`
```python
def get_shard_ancestor(store: Store, shard_store: ShardStore, root: Root, slot: Slot) -> Root:
block = shard_store.signed_blocks[root].message
if block.slot > slot:
return get_shard_ancestor(store, shard_store, block.shard_parent_root, slot)
elif block.slot == slot:
return root
else:
# root is older than queried slot, thus a skip slot. Return most recent root prior to slot
return root
```
#### `get_pending_shard_blocks`
```python
def get_pending_shard_blocks(store: Store, shard_store: ShardStore) -> Sequence[SignedShardBlock]:
"""
Return the canonical shard block branch that has not yet been crosslinked.
"""
shard = shard_store.shard
beacon_head_root = get_head(store)
beacon_head_state = store.block_states[beacon_head_root]
latest_shard_block_root = beacon_head_state.shard_states[shard].latest_block_root
shard_head_root = get_shard_head(store, shard_store)
root = shard_head_root
signed_shard_blocks = []
while root != latest_shard_block_root:
signed_shard_block = shard_store.signed_blocks[root]
signed_shard_blocks.append(signed_shard_block)
root = signed_shard_block.message.shard_parent_root
signed_shard_blocks.reverse()
return signed_shard_blocks
```
### Handlers
#### `on_shard_block`
```python
def on_shard_block(store: Store, shard_store: ShardStore, signed_shard_block: SignedShardBlock) -> None:
shard_block = signed_shard_block.message
shard = shard_store.shard
# Check shard
# TODO: check it in networking spec
assert shard_block.shard == shard
# Check shard parent exists
assert shard_block.shard_parent_root in shard_store.block_states
shard_parent_state = shard_store.block_states[shard_block.shard_parent_root]
# Check beacon parent exists
assert shard_block.beacon_parent_root in store.block_states
beacon_parent_state = store.block_states[shard_block.beacon_parent_root]
# Check that block is later than the finalized shard state slot (optimization to reduce calls to get_ancestor)
finalized_beacon_state = store.block_states[store.finalized_checkpoint.root]
finalized_shard_state = finalized_beacon_state.shard_states[shard]
assert shard_block.slot > finalized_shard_state.slot
# Check block is a descendant of the finalized block at the checkpoint finalized slot
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
assert (
get_ancestor(store, shard_block.beacon_parent_root, finalized_slot) == store.finalized_checkpoint.root
)
# Check the block is valid and compute the post-state
shard_state = shard_parent_state.copy()
shard_state_transition(shard_state, signed_shard_block, beacon_parent_state, validate_result=True)
# Add new block to the store
# Note: storing `SignedShardBlock` format for computing `ShardTransition.proposer_signature_aggregate`
shard_store.signed_blocks[hash_tree_root(shard_block)] = signed_shard_block
# Add new state for this block to the store
shard_store.block_states[hash_tree_root(shard_block)] = shard_state
```

View File

@ -10,14 +10,12 @@
- [Introduction](#introduction) - [Introduction](#introduction)
- [Helper functions](#helper-functions) - [Helper functions](#helper-functions)
- [Misc](#misc)
- [Shard block verification functions](#shard-block-verification-functions) - [Shard block verification functions](#shard-block-verification-functions)
- [Shard state transition](#shard-state-transition) - [`verify_shard_block_message`](#verify_shard_block_message)
- [`verify_shard_block_signature`](#verify_shard_block_signature)
- [Shard state transition function](#shard-state-transition-function)
- [Fraud proofs](#fraud-proofs) - [Fraud proofs](#fraud-proofs)
- [Verifying the proof](#verifying-the-proof) - [Verifying the proof](#verifying-the-proof)
- [Honest committee member behavior](#honest-committee-member-behavior)
- [Helper functions](#helper-functions-1)
- [Make attestations](#make-attestations)
<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- END doctoc generated TOC please keep comment here to allow auto update -->
@ -27,81 +25,75 @@ This document describes the shard transition function and fraud proofs as part o
## Helper functions ## Helper functions
### Misc
```python
def compute_shard_transition_digest(beacon_state: BeaconState,
shard_state: ShardState,
beacon_parent_root: Root,
shard_body_root: Root) -> Bytes32:
# TODO: use SSZ hash tree root
return hash(
hash_tree_root(shard_state) + beacon_parent_root + shard_body_root
)
```
### Shard block verification functions ### Shard block verification functions
#### `verify_shard_block_message`
```python ```python
def verify_shard_block_message(beacon_state: BeaconState, def verify_shard_block_message(beacon_parent_state: BeaconState,
shard_state: ShardState, shard_parent_state: ShardState,
block: ShardBlock, block: ShardBlock) -> bool:
slot: Slot, # Check `shard_parent_root` field
shard: Shard) -> bool: assert block.shard_parent_root == shard_parent_state.latest_block_root
assert block.shard_parent_root == shard_state.latest_block_root # Check `beacon_parent_root` field
assert block.slot == slot beacon_parent_block_header = beacon_parent_state.latest_block_header.copy()
assert block.shard == shard if beacon_parent_block_header.state_root == Root():
assert block.proposer_index == get_shard_proposer_index(beacon_state, slot, shard) beacon_parent_block_header.state_root = hash_tree_root(beacon_parent_state)
beacon_parent_root = hash_tree_root(beacon_parent_block_header)
assert block.beacon_parent_root == beacon_parent_root
# Check `slot` field
shard = block.shard
next_slot = Slot(block.slot + 1)
offset_slots = compute_offset_slots(get_latest_slot_for_shard(beacon_parent_state, shard), next_slot)
assert block.slot in offset_slots
# Check `proposer_index` field
assert block.proposer_index == get_shard_proposer_index(beacon_parent_state, block.slot, shard)
# Check `body` field
assert 0 < len(block.body) <= MAX_SHARD_BLOCK_SIZE assert 0 < len(block.body) <= MAX_SHARD_BLOCK_SIZE
return True return True
``` ```
#### `verify_shard_block_signature`
```python ```python
def verify_shard_block_signature(beacon_state: BeaconState, def verify_shard_block_signature(beacon_parent_state: BeaconState,
signed_block: SignedShardBlock) -> bool: signed_block: SignedShardBlock) -> bool:
proposer = beacon_state.validators[signed_block.message.proposer_index] proposer = beacon_parent_state.validators[signed_block.message.proposer_index]
domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(signed_block.message.slot)) domain = get_domain(beacon_parent_state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(signed_block.message.slot))
signing_root = compute_signing_root(signed_block.message, domain) signing_root = compute_signing_root(signed_block.message, domain)
return bls.Verify(proposer.pubkey, signing_root, signed_block.signature) return bls.Verify(proposer.pubkey, signing_root, signed_block.signature)
``` ```
## Shard state transition ## Shard state transition function
The post-state corresponding to a pre-state `shard_state` and a signed block `signed_block` is defined as `shard_state_transition(shard_state, signed_block, beacon_parent_state)`, where `beacon_parent_state` is the parent beacon state of the `signed_block`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid.
```python ```python
def shard_state_transition(beacon_state: BeaconState, def shard_state_transition(shard_state: ShardState,
shard_state: ShardState, signed_block: SignedShardBlock,
block: ShardBlock) -> None: beacon_parent_state: BeaconState,
validate_result: bool = True) -> ShardState:
assert verify_shard_block_message(beacon_parent_state, shard_state, signed_block.message)
if validate_result:
assert verify_shard_block_signature(beacon_parent_state, signed_block)
process_shard_block(shard_state, signed_block.message)
return shard_state
```
```python
def process_shard_block(shard_state: ShardState,
block: ShardBlock) -> None:
""" """
Update ``shard_state`` with shard ``block`` and ``beacon_state`. Update ``shard_state`` with shard ``block``.
""" """
shard_state.slot = block.slot shard_state.slot = block.slot
prev_gasprice = shard_state.gasprice prev_gasprice = shard_state.gasprice
shard_state.gasprice = compute_updated_gasprice(prev_gasprice, len(block.body)) shard_block_length = len(block.body)
if len(block.body) == 0: shard_state.gasprice = compute_updated_gasprice(prev_gasprice, uint64(shard_block_length))
latest_block_root = shard_state.latest_block_root if shard_block_length != 0:
else: shard_state.latest_block_root = hash_tree_root(block)
latest_block_root = hash_tree_root(block)
shard_state.latest_block_root = latest_block_root
shard_state.transition_digest = compute_shard_transition_digest(
beacon_state,
shard_state,
block.beacon_parent_root,
hash_tree_root(block.body),
)
```
We have a pure function `get_post_shard_state` for describing the fraud proof verification and honest validator behavior.
```python
def get_post_shard_state(beacon_state: BeaconState,
shard_state: ShardState,
block: ShardBlock) -> ShardState:
"""
A pure function that returns a new post ShardState instead of modifying the given `shard_state`.
"""
post_state = shard_state.copy()
shard_state_transition(beacon_state, post_state, block)
return post_state
``` ```
## Fraud proofs ## Fraud proofs
@ -136,14 +128,13 @@ def is_valid_fraud_proof(beacon_state: BeaconState,
# 2. Check if the shard state transition result is wrong between # 2. Check if the shard state transition result is wrong between
# `transition.shard_states[offset_index - 1]` to `transition.shard_states[offset_index]`. # `transition.shard_states[offset_index - 1]` to `transition.shard_states[offset_index]`.
if offset_index == 0: if offset_index == 0:
shard = get_shard(beacon_state, attestation) shard_states = beacon_parent_block.body.shard_transitions[attestation.data.shard].shard_states
shard_states = beacon_parent_block.body.shard_transitions[shard].shard_states
shard_state = shard_states[len(shard_states) - 1] shard_state = shard_states[len(shard_states) - 1]
else: else:
shard_state = transition.shard_states[offset_index - 1] # Not doing the actual state updates here. shard_state = transition.shard_states[offset_index - 1] # Not doing the actual state updates here.
shard_state = get_post_shard_state(beacon_state, shard_state, block) process_shard_block(shard_state, block)
if shard_state.transition_digest != transition.shard_states[offset_index].transition_digest: if shard_state != transition.shard_states[offset_index]:
return True return True
return False return False
@ -154,141 +145,3 @@ def generate_custody_bit(subkey: BLSPubkey, block: ShardBlock) -> bool:
# TODO # TODO
... ...
``` ```
## Honest committee member behavior
### Helper functions
```python
def get_winning_proposal(beacon_state: BeaconState, proposals: Sequence[SignedShardBlock]) -> SignedShardBlock:
# TODO: Let `winning_proposal` be the proposal with the largest number of total attestations from slots in
# `state.shard_next_slots[shard]....slot-1` supporting it or any of its descendants, breaking ties by choosing
# the first proposal locally seen. Do `proposals.append(winning_proposal)`.
return proposals[-1] # stub
```
```python
def compute_shard_body_roots(proposals: Sequence[SignedShardBlock]) -> Sequence[Root]:
return [hash_tree_root(proposal.message.body) for proposal in proposals]
```
```python
def get_proposal_choices_at_slot(beacon_state: BeaconState,
shard_state: ShardState,
slot: Slot,
shard: Shard,
shard_blocks: Sequence[SignedShardBlock],
validate_signature: bool=True) -> Sequence[SignedShardBlock]:
"""
Return the valid shard blocks at the given ``slot``.
Note that this function doesn't change the state.
"""
choices = []
shard_blocks_at_slot = [block for block in shard_blocks if block.message.slot == slot]
for block in shard_blocks_at_slot:
try:
# Verify block message and signature
# TODO these validations should have been checked upon receiving shard blocks.
assert verify_shard_block_message(beacon_state, shard_state, block.message, slot, shard)
if validate_signature:
assert verify_shard_block_signature(beacon_state, block)
shard_state = get_post_shard_state(beacon_state, shard_state, block.message)
except Exception:
pass # TODO: throw error in the test helper
else:
choices.append(block)
return choices
```
```python
def get_proposal_at_slot(beacon_state: BeaconState,
shard_state: ShardState,
slot: Shard,
shard: Shard,
shard_blocks: Sequence[SignedShardBlock],
validate_signature: bool=True) -> Tuple[SignedShardBlock, ShardState]:
"""
Return ``proposal``, ``shard_state`` of the given ``slot``.
Note that this function doesn't change the state.
"""
choices = get_proposal_choices_at_slot(
beacon_state=beacon_state,
shard_state=shard_state,
slot=slot,
shard=shard,
shard_blocks=shard_blocks,
validate_signature=validate_signature,
)
if len(choices) == 0:
block = ShardBlock(slot=slot)
proposal = SignedShardBlock(message=block)
elif len(choices) == 1:
proposal = choices[0]
else:
proposal = get_winning_proposal(beacon_state, choices)
# Apply state transition
shard_state = get_post_shard_state(beacon_state, shard_state, proposal.message)
return proposal, shard_state
```
```python
def get_shard_state_transition_result(
beacon_state: BeaconState,
shard: Shard,
shard_blocks: Sequence[SignedShardBlock],
validate_signature: bool=True,
) -> Tuple[Sequence[SignedShardBlock], Sequence[ShardState], Sequence[Root]]:
proposals = []
shard_states = []
shard_state = beacon_state.shard_states[shard]
for slot in get_offset_slots(beacon_state, shard):
proposal, shard_state = get_proposal_at_slot(
beacon_state=beacon_state,
shard_state=shard_state,
slot=slot,
shard=shard,
shard_blocks=shard_blocks,
validate_signature=validate_signature,
)
shard_states.append(shard_state)
proposals.append(proposal)
shard_data_roots = compute_shard_body_roots(proposals)
return proposals, shard_states, shard_data_roots
```
### Make attestations
Suppose you are a committee member on shard `shard` at slot `current_slot` and you have received shard blocks `shard_blocks` since the latest successful crosslink for `shard` into the beacon chain. Let `beacon_state` be the head beacon state you are building on, and let `QUARTER_PERIOD = SECONDS_PER_SLOT // 4`. `2 * QUARTER_PERIOD` seconds into slot `current_slot`, run `get_shard_transition(beacon_state, shard, shard_blocks)` to get `shard_transition`.
```python
def get_shard_transition(beacon_state: BeaconState,
shard: Shard,
shard_blocks: Sequence[SignedShardBlock]) -> ShardTransition:
offset_slots = get_offset_slots(beacon_state, shard)
proposals, shard_states, shard_data_roots = get_shard_state_transition_result(beacon_state, shard, shard_blocks)
shard_block_lengths = []
proposer_signatures = []
for proposal in proposals:
shard_block_lengths.append(len(proposal.message.body))
if proposal.signature != NO_SIGNATURE:
proposer_signatures.append(proposal.signature)
if len(proposer_signatures) > 0:
proposer_signature_aggregate = bls.Aggregate(proposer_signatures)
else:
proposer_signature_aggregate = NO_SIGNATURE
return ShardTransition(
start_slot=offset_slots[0],
shard_block_lengths=shard_block_lengths,
shard_data_roots=shard_data_roots,
shard_states=shard_states,
proposer_signature_aggregate=proposer_signature_aggregate,
)
```

559
specs/phase1/validator.md Normal file
View File

@ -0,0 +1,559 @@
# Ethereum 2.0 Phase 1 -- Honest Validator
**Notice**: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 1](./), which describes the expected actions of a "validator" participating in the Ethereum 2.0 Phase 1 protocol.
## Table of contents
<!-- TOC -->
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Constants](#constants)
- [Misc](#misc)
- [Becoming a validator](#becoming-a-validator)
- [Beacon chain validator assignments](#beacon-chain-validator-assignments)
- [Lookahead](#lookahead)
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
- [Block proposal](#block-proposal)
- [Preparing for a `BeaconBlock`](#preparing-for-a-beaconblock)
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
- [Custody slashings](#custody-slashings)
- [Custody key reveals](#custody-key-reveals)
- [Early derived secret reveals](#early-derived-secret-reveals)
- [Shard transitions](#shard-transitions)
- [Light client fields](#light-client-fields)
- [Packaging into a `SignedBeaconBlock`](#packaging-into-a-signedbeaconblock)
- [Attesting](#attesting)
- [`FullAttestationData`](#fullattestationdata)
- [`FullAttestation`](#fullattestation)
- [Timing](#timing)
- [Attestation data](#attestation-data)
- [Head shard root](#head-shard-root)
- [Shard transition](#shard-transition)
- [Construct attestation](#construct-attestation)
- [Attestation Aggregation](#attestation-aggregation)
- [Broadcast aggregate](#broadcast-aggregate)
- [`AggregateAndProof`](#aggregateandproof)
- [`SignedAggregateAndProof`](#signedaggregateandproof)
- [Light client committee](#light-client-committee)
- [Preparation](#preparation)
- [Light client vote](#light-client-vote)
- [Light client vote data](#light-client-vote-data)
- [`LightClientVoteData`](#lightclientvotedata)
- [Construct vote](#construct-vote)
- [`LightClientVote`](#lightclientvote)
- [Broadcast](#broadcast)
- [Light client vote aggregation](#light-client-vote-aggregation)
- [Aggregation selection](#aggregation-selection)
- [Construct aggregate](#construct-aggregate)
- [Broadcast aggregate](#broadcast-aggregate-1)
- [`LightAggregateAndProof`](#lightaggregateandproof)
- [`SignedLightAggregateAndProof`](#signedlightaggregateandproof)
- [How to avoid slashing](#how-to-avoid-slashing)
- [Custody slashing](#custody-slashing)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
## Introduction
This document represents the expected behavior of an "honest validator" with respect to Phase 1 of the Ethereum 2.0 protocol. This document does not distinguish between a "node" (i.e. the functionality of following and reading the beacon chain) and a "validator client" (i.e. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope.
A validator is an entity that participates in the consensus of the Ethereum 2.0 protocol. This is an optional role for users in which they can post ETH as collateral and verify and attest to the validity of blocks to seek financial returns in exchange for building and securing the protocol. This is similar to proof-of-work networks in which miners provide collateral in the form of hardware/hash-power to seek returns in exchange for building and securing the protocol.
## Prerequisites
This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden.
All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use as a reference throughout.
## Constants
See constants from [Phase 0 validator guide](../phase0/validator.md#constants).
### Misc
| Name | Value | Unit | Duration |
| - | - | :-: | :-: |
| `TARGET_LIGHT_CLIENT_AGGREGATORS_PER_SLOT` | `2**3` (= 8) | validators | |
| `LIGHT_CLIENT_PREPARATION_EPOCHS` | `2**2` (= 4) | epochs | |
## Becoming a validator
Becoming a validator in Phase 1 is unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#becoming-a-validator) for details.
## Beacon chain validator assignments
Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#validator-assignments) for details.
### Lookahead
Lookahead for beacon committee assignments operates in the same manner as Phase 0, but committee members must join a shard block pubsub topic in addition to the committee attestation topic.
Specifically _after_ finding stable peers of attestation subnets (see Phase 0) a validator should:
* Let `shard = compute_shard_from_committee_index(state, committee_index, slot)`
* Subscribe to the pubsub topic `shard_{shard}_block` (attestation subnet peers should have this topic available).
TODO: For now, the `state` we pass to `compute_shard_from_committee_index` is the current state without considering `len(state.shard_states)`, i.e., the result from `get_active_shard_count(state)` changes. We should fix it when we have shard count update logic.
## Beacon chain responsibilities
A validator has two primary responsibilities to the beacon chain: [proposing blocks](#block-proposal) and [creating attestations](#attesting). Proposals happen infrequently, whereas attestations should be created once per epoch.
These responsibilities are largely unchanged from Phase 0, but utilize the updated `SignedBeaconBlock`, `BeaconBlock`, `BeaconBlockBody`, `Attestation`, and `AttestationData` definitions found in Phase 1. Below notes only the additional and modified behavior with respect to Phase 0.
Phase 1 adds light client committees and associated responsibilities, discussed [below](#light-client-committee).
### Block proposal
#### Preparing for a `BeaconBlock`
`slot`, `proposer_index`, `parent_root`, `state_root` fields are unchanged.
#### Constructing the `BeaconBlockBody`
`randao_reveal`, `eth1_data`, and `graffiti` are unchanged.
`proposer_slashings`, `deposits`, and `voluntary_exits` are unchanged.
`attester_slashings` and `attestations` operate exactly as in Phase 0, but with new definitations of `AttesterSlashing` and `Attestation`, along with modified validation conditions found in `process_attester_slashing` and `process_attestation`.
##### Custody slashings
Up to `MAX_CUSTODY_SLASHINGS`, [`CustodySlashing`](./custody-game.md#custodyslashing) objects can be included in the `block`. The custody slashings must satisfy the verification conditions found in [custody slashings processing](./custody-game.md#custody-slashings). The validator receives a small "whistleblower" reward for each custody slashing included (THIS IS NOT CURRENTLY THE CASE BUT PROBABLY SHOULD BE).
##### Custody key reveals
Up to `MAX_CUSTODY_KEY_REVEALS`, [`CustodyKeyReveal`](./custody-game.md#custodykeyreveal) objects can be included in the `block`. The custody key reveals must satisfy the verification conditions found in [custody key reveal processing](./custody-game.md#custody-key-reveals). The validator receives a small reward for each custody key reveal included.
##### Early derived secret reveals
Up to `MAX_EARLY_DERIVED_SECRET_REVEALS`, [`EarlyDerivedSecretReveal`](./custody-game.md#earlyderivedsecretreveal) objects can be included in the `block`. The early derived secret reveals must satisfy the verification conditions found in [early derived secret reveal processing](./custody-game.md#custody-key-reveals). The validator receives a small "whistleblower" reward for each early derived secrete reveal included.
##### Shard transitions
Exactly `MAX_SHARDS` [`ShardTransition`](./beacon-chain.md#shardtransition) objects are included in the block. Default each to an empty `ShardTransition()`. Then for each committee assigned to the slot with an associated `committee_index` and `shard`, set `shard_transitions[shard] = full_transitions[winning_root]` if the committee had enough weight to form a crosslink this slot.
Specifically:
* Call `shards, winning_roots = get_shard_winning_roots(state, block.body.attestations)`
* Let `full_transitions` be a dictionary mapping from the `shard_transition_root`s found in `attestations` to the corresponding full `ShardTransition`
* Then for each `shard` and `winning_root` in `zip(shards, winning_roots)` set `shard_transitions[shard] = full_transitions[winning_root]`
*Note*: The `state` passed into `get_shard_winning_roots` must be transitioned the slot of `block.slot` to run accurately due to the internal use of `get_online_validator_indices` and `is_on_time_attestation`.
```python
def get_shard_winning_roots(state: BeaconState,
attestations: Sequence[Attestation]) -> Tuple[Sequence[Shard], Sequence[Root]]:
shards = []
winning_roots = []
online_indices = get_online_validator_indices(state)
on_time_attestation_slot = compute_previous_slot(state.slot)
committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(on_time_attestation_slot))
for committee_index in map(CommitteeIndex, range(committee_count)):
shard = compute_shard_from_committee_index(state, committee_index, on_time_attestation_slot)
# All attestations in the block for this committee/shard and are "on time"
shard_attestations = [
attestation for attestation in attestations
if is_on_time_attestation(state, attestation.data) and attestation.data.index == committee_index
]
committee = get_beacon_committee(state, on_time_attestation_slot, committee_index)
# Loop over all shard transition roots, looking for a winning root
shard_transition_roots = set(a.data.shard_transition_root for a in shard_attestations) # non-duplicate
for shard_transition_root in sorted(shard_transition_roots):
transition_attestations = [
a for a in shard_attestations
if a.data.shard_transition_root == shard_transition_root
]
transition_participants: Set[ValidatorIndex] = set()
for attestation in transition_attestations:
participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
transition_participants = transition_participants.union(participants)
enough_online_stake = (
get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >=
get_total_balance(state, online_indices.intersection(committee)) * 2
)
if enough_online_stake:
shards.append(shard)
winning_roots.append(shard_transition_root)
break
return shards, winning_roots
```
##### Light client fields
First retrieve `best_aggregate` from `get_best_light_client_aggregate(block, aggregates)` where `aggregates` is a list of valid aggregated `LightClientVote`s for the previous slot.
Then:
* Set `light_client_bits = best_aggregate.aggregation_bits`
* Set `light_client_signature = best_aggregate.signature`
```python
def get_best_light_client_aggregate(block: BeaconBlock,
aggregates: Sequence[LightClientVote]) -> LightClientVote:
viable_aggregates = [
aggregate for aggregate in aggregates
if (
aggregate.data.slot == compute_previous_slot(block.slot)
and aggregate.data.beacon_block_root == block.parent_root
)
]
return max(
viable_aggregates,
# Ties broken by lexicographically by hash_tree_root
key=lambda a: (len([i for i in a.aggregation_bits if i == 1]), hash_tree_root(a)),
default=LightClientVote(),
)
```
#### Packaging into a `SignedBeaconBlock`
Packaging into a `SignedBeaconBlock` is unchanged from Phase 0.
### Attesting
A validator is expected to create, sign, and broadcast an attestation during each epoch.
Assignments and the core of this duty are unchanged from Phase 0. There are a few additional fields related to the assigned shard chain.
The `Attestation` and `AttestationData` defined in the [Phase 1 Beacon Chain spec](./beacon-chain.md) utilizes `shard_transition_root: Root` rather than a full `ShardTransition`. For the purposes of the validator and p2p layer, a modified `FullAttestationData` and containing `FullAttestation` are used to send the accompanying `ShardTransition` in its entirety. Note that due to the properties of SSZ `hash_tree_root`, the root and signatures of `AttestationData` and `FullAttestationData` are equivalent.
#### `FullAttestationData`
```python
class FullAttestationData(Container):
slot: Slot
index: CommitteeIndex
# LMD GHOST vote
beacon_block_root: Root
# FFG vote
source: Checkpoint
target: Checkpoint
# Current-slot shard block root
shard_head_root: Root
# Full shard transition
shard_transition: ShardTransition
```
#### `FullAttestation`
```python
class FullAttestation(Container):
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
data: FullAttestationData
signature: BLSSignature
```
#### Timing
Note the timing of when to create/broadcast is altered from Phase 1.
A validator should create and broadcast the `attestation` to the associated attestation subnet when either (a) the validator has received a valid `BeaconBlock` from the expected beacon block proposer and a valid `ShardBlock` for the expected shard block proposer for the assigned `slot` or (b) one-half of the `slot` has transpired (`SECONDS_PER_SLOT / 2` seconds after the start of `slot`) -- whichever comes _first_.
#### Attestation data
`attestation_data` is constructed in the same manner as Phase 0 but uses `FullAttestationData` with the addition of two fields -- `shard_head_root` and `shard_transition`.
- Let `head_block` be the result of running the fork choice during the assigned slot.
- Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot using `process_slots(state, slot)`.
- Let `shard_head_block` be the result of running the fork choice on the assigned shard chain during the assigned slot.
- Let `shard_blocks` be the shard blocks in the chain starting immediately _after_ the most recent crosslink (`head_state.shard_transitions[shard].latest_block_root`) up to the `shard_head_block` (i.e. the value of the shard fork choice store of `get_pending_shard_blocks(store, shard_store)`).
*Note*: We assume that the fork choice only follows branches with valid `offset_slots` with respect to the most recent beacon state shard transition for the queried shard.
##### Head shard root
Set `attestation_data.shard_head_root = hash_tree_root(shard_head_block)`.
##### Shard transition
Set `shard_transition` to the value returned by `get_shard_transition(head_state, shard, shard_blocks)`.
```python
def get_shard_transition_fields(
beacon_state: BeaconState,
shard: Shard,
shard_blocks: Sequence[SignedShardBlock],
) -> Tuple[Sequence[uint64], Sequence[Root], Sequence[ShardState]]:
shard_states = []
shard_data_roots = []
shard_block_lengths = []
shard_state = beacon_state.shard_states[shard]
shard_block_slots = [shard_block.message.slot for shard_block in shard_blocks]
offset_slots = compute_offset_slots(
get_latest_slot_for_shard(beacon_state, shard),
Slot(beacon_state.slot + 1),
)
for slot in offset_slots:
if slot in shard_block_slots:
shard_block = shard_blocks[shard_block_slots.index(slot)]
shard_data_roots.append(get_block_data_merkle_root(shard_block.message.body))
else:
shard_block = SignedShardBlock(message=ShardBlock(slot=slot, shard=shard))
shard_data_roots.append(Root())
shard_state = shard_state.copy()
process_shard_block(shard_state, shard_block.message)
shard_states.append(shard_state)
shard_block_lengths.append(len(shard_block.message.body))
return shard_block_lengths, shard_data_roots, shard_states
```
```python
def get_shard_transition(beacon_state: BeaconState,
shard: Shard,
shard_blocks: Sequence[SignedShardBlock]) -> ShardTransition:
offset_slots = compute_offset_slots(
get_latest_slot_for_shard(beacon_state, shard),
Slot(beacon_state.slot + 1),
)
shard_block_lengths, shard_data_roots, shard_states = (
get_shard_transition_fields(beacon_state, shard, shard_blocks)
)
if len(shard_blocks) > 0:
proposer_signatures = [shard_block.signature for shard_block in shard_blocks]
proposer_signature_aggregate = bls.Aggregate(proposer_signatures)
else:
proposer_signature_aggregate = NO_SIGNATURE
return ShardTransition(
start_slot=offset_slots[0],
shard_block_lengths=shard_block_lengths,
shard_data_roots=shard_data_roots,
shard_states=shard_states,
proposer_signature_aggregate=proposer_signature_aggregate,
)
```
#### Construct attestation
Next, the validator creates `attestation`, a `FullAttestation` as defined above.
`attestation.data`, `attestation.aggregation_bits`, and `attestation.signature` are unchanged from Phase 0. But safety/validity in signing the message is premised upon calculation of the "custody bit" [TODO].
### Attestation Aggregation
Some validators are selected to locally aggregate attestations with a similar `attestation_data` to their constructed `attestation` for the assigned `slot`.
Aggregation selection and the core of this duty are largely unchanged from Phase 0. Any additional components or changes are noted.
#### Broadcast aggregate
Note the timing of when to broadcast aggregates is altered in Phase 1+.
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) three-fourths of the way through the `slot`-that is, `SECONDS_PER_SLOT * 3 / 4` seconds after the start of `slot`.
##### `AggregateAndProof`
`AggregateAndProof` is unchanged other than the contained `Attestation`.
```python
class AggregateAndProof(Container):
aggregator_index: ValidatorIndex
aggregate: Attestation
selection_proof: BLSSignature
```
##### `SignedAggregateAndProof`
`AggregateAndProof` is unchanged other than the contained `AggregateAndProof`.
```python
class SignedAggregateAndProof(Container):
message: AggregateAndProof
signature: BLSSignature
```
### Light client committee
In addition to the core beacon chain responsibilities, Phase 1 adds an additional role -- the Light Client Committee -- to aid in light client functionality.
Validators serve on the light client committee for `LIGHT_CLIENT_COMMITTEE_PERIOD` epochs and the assignment to be on a committee is known `LIGHT_CLIENT_COMMITTEE_PERIOD` epochs in advance.
#### Preparation
When `get_current_epoch(state) % LIGHT_CLIENT_COMMITTEE_PERIOD == LIGHT_CLIENT_COMMITTEE_PERIOD - LIGHT_CLIENT_PREPARATION_EPOCHS` each validator must check if they are in the next period light client committee by calling `is_in_next_light_client_committee()`.
If the validator is in the next light client committee, they must join the `light_client_votes` pubsub topic to begin duties at the start of the next period.
```python
def is_in_next_light_client_committee(state: BeaconState, index: ValidatorIndex) -> bool:
next_committee = get_light_client_committee(state, get_current_epoch(state) + LIGHT_CLIENT_COMMITTEE_PERIOD)
return index in next_committee
```
#### Light client vote
During a period of epochs that the validator is a part of the light client committee (`validator_index in get_light_client_committee(state, epoch)`), the validator creates and broadcasts a `LightClientVote` at each slot.
A validator should create and broadcast the `light_client_vote` to the `light_client_votes` pubsub topic when either (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the `slot` have transpired (`SECONDS_PER_SLOT / 3` seconds after the start of `slot`) -- whichever comes _first_.
- Let `light_client_committee = get_light_client_committee(state, compute_epoch_at_slot(slot))`
##### Light client vote data
First the validator constructs `light_client_vote_data`, a [`LightClientVoteData`](#lightclientvotedata) object.
* Let `head_block` be the result of running the fork choice during the assigned slot.
* Set `light_client_vote.slot = slot`.
* Set `light_client_vote.beacon_block_root = hash_tree_root(head_block)`.
###### `LightClientVoteData`
```python
class LightClientVoteData(Container):
slot: Slot
beacon_block_root: Root
```
##### Construct vote
Then the validator constructs `light_client_vote`, a [`LightClientVote`](#lightclientvote) object.
* Set `light_client_vote.data = light_client_vote_data`.
* Set `light_client_vote.aggregation_bits` to be a `Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE]`, where the bit of the index of the validator in the `light_client_committee` is set to `0b1` and all other bits are are set to `0b0`.
* Set `light_client_vote.signature = vote_signature` where `vote_signature` is obtained from:
```python
def get_light_client_vote_signature(state: BeaconState,
light_client_vote_data: LightClientVoteData,
privkey: int) -> BLSSignature:
domain = get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(light_client_vote_data.slot))
signing_root = compute_signing_root(light_client_vote_data, domain)
return bls.Sign(privkey, signing_root)
```
###### `LightClientVote`
```python
class LightClientVote(Container):
data: LightClientVoteData
aggregation_bits: Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE]
signature: BLSSignature
```
##### Broadcast
Finally, the validator broadcasts `light_client_vote` to the `light_client_votes` pubsub topic.
#### Light client vote aggregation
Some validators in the light client committee are selected to locally aggregate light client votes with a similar `light_client_vote_data` to their constructed `light_client_vote` for the assigned `slot`.
#### Aggregation selection
A validator is selected to aggregate based upon the return value of `is_light_client_aggregator()`.
```python
def get_light_client_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature:
domain = get_domain(state, DOMAIN_LIGHT_SELECTION_PROOF, compute_epoch_at_slot(slot))
signing_root = compute_signing_root(slot, domain)
return bls.Sign(privkey, signing_root)
```
```python
def is_light_client_aggregator(state: BeaconState, slot: Slot, slot_signature: BLSSignature) -> bool:
committee = get_light_client_committee(state, compute_epoch_at_slot(slot))
modulo = max(1, len(committee) // TARGET_LIGHT_CLIENT_AGGREGATORS_PER_SLOT)
return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
```
#### Construct aggregate
If the validator is selected to aggregate (`is_light_client_aggregator()`), they construct an aggregate light client vote via the following.
Collect `light_client_votes` seen via gossip during the `slot` that have an equivalent `light_client_vote_data` to that constructed by the validator, and create a `aggregate_light_client_vote: LightClientVote` with the following fields.
* Set `aggregate_light_client_vote.data = light_client_vote_data` where `light_client_vote_data` is the `LightClientVoteData` object that is the same for each individual light client vote being aggregated.
* Set `aggregate_light_client_vote.aggregation_bits` to be a `Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE]`, where each bit set from each individual light client vote is set to `0b1`.
* Set `aggregate_light_client_vote.signature = aggregate_light_client_signature` where `aggregate_light_client_signature` is obtained from `get_aggregate_light_client_signature`.
```python
def get_aggregate_light_client_signature(light_client_votes: Sequence[LightClientVote]) -> BLSSignature:
signatures = [light_client_vote.signature for light_client_vote in light_client_votes]
return bls.Aggregate(signatures)
```
#### Broadcast aggregate
If the validator is selected to aggregate (`is_light_client_aggregator`), then they broadcast their best aggregate light client vote as a `SignedLightAggregateAndProof` to the global aggregate light client vote channel (`aggregate_light_client_votes`) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`.
Selection proofs are provided in `LightAggregateAndProof` to prove to the gossip channel that the validator has been selected as an aggregator.
`LightAggregateAndProof` messages are signed by the aggregator and broadcast inside of `SignedLightAggregateAndProof` objects to prevent a class of DoS attacks and message forgeries.
First, `light_aggregate_and_proof = get_light_aggregate_and_proof(state, validator_index, aggregate_light_client_vote, privkey)` is constructed.
```python
def get_light_aggregate_and_proof(state: BeaconState,
aggregator_index: ValidatorIndex,
aggregate: LightClientVote,
privkey: int) -> LightAggregateAndProof:
return LightAggregateAndProof(
aggregator_index=aggregator_index,
aggregate=aggregate,
selection_proof=get_light_client_slot_signature(state, aggregate.data.slot, privkey),
)
```
Then `signed_light_aggregate_and_proof = SignedLightAggregateAndProof(message=light_aggregate_and_proof, signature=signature)` is constructed and broadast. Where `signature` is obtained from:
```python
def get_light_aggregate_and_proof_signature(state: BeaconState,
aggregate_and_proof: LightAggregateAndProof,
privkey: int) -> BLSSignature:
aggregate = aggregate_and_proof.aggregate
domain = get_domain(state, DOMAIN_LIGHT_AGGREGATE_AND_PROOF, compute_epoch_at_slot(aggregate.data.slot))
signing_root = compute_signing_root(aggregate_and_proof, domain)
return bls.Sign(privkey, signing_root)
```
##### `LightAggregateAndProof`
```python
class LightAggregateAndProof(Container):
aggregator_index: ValidatorIndex
aggregate: LightClientVote
selection_proof: BLSSignature
```
##### `SignedLightAggregateAndProof`
```python
class SignedLightAggregateAndProof(Container):
message: LightAggregateAndProof
signature: BLSSignature
```
## How to avoid slashing
Proposer and Attester slashings described in Phase 0 remain in place with the
addition of the following.
### Custody slashing
To avoid custody slashings, the attester must never sign any shard transition for which the custody bit is one. The custody bit is computed using the custody secret:
```python
def get_custody_secret(state: BeaconState,
validator_index: ValidatorIndex,
privkey: int,
epoch: Epoch=None) -> BLSSignature:
if epoch is None:
epoch = get_current_epoch(state)
period = get_custody_period_for_validator(validator_index, epoch)
epoch_to_sign = get_randao_epoch_for_custody_period(period, validator_index)
domain = get_domain(state, DOMAIN_RANDAO, epoch_to_sign)
signing_root = compute_signing_root(Epoch(epoch_to_sign), domain)
return bls.Sign(privkey, signing_root)
```
Note that the valid custody secret is always the one for the **attestation target epoch**, not to be confused with the epoch in which the shard block was generated. While they are the same most of the time, getting this wrong at custody epoch boundaries would result in a custody slashing.

View File

@ -26,25 +26,34 @@
## Helper functions ## Helper functions
```python ```python
def get_next_power_of_two(x: int) -> int: def get_power_of_two_ceil(x: int) -> int:
""" """
Get next power of 2 >= the input. Get the power of 2 for given input, or the closest higher power of 2 if the input is not a power of 2.
Commonly used for "how many nodes do I need for a bottom tree layer fitting x elements?"
Example: 0->1, 1->1, 2->2, 3->4, 4->4, 5->8, 6->8, 7->8, 8->8, 9->16.
""" """
if x <= 2: if x <= 1:
return x return 1
elif x == 2:
return 2
else: else:
return 2 * get_next_power_of_two((x + 1) // 2) return 2 * get_power_of_two_ceil((x + 1) // 2)
``` ```
```python ```python
def get_previous_power_of_two(x: int) -> int: def get_power_of_two_floor(x: int) -> int:
""" """
Get the previous power of 2 >= the input. Get the power of 2 for given input, or the closest lower power of 2 if the input is not a power of 2.
The zero case is a placeholder and not used for math with generalized indices.
Commonly used for "what power of two makes up the root bit of the generalized index?"
Example: 0->1, 1->1, 2->2, 3->2, 4->4, 5->4, 6->4, 7->4, 8->8, 9->8
""" """
if x <= 2: if x <= 1:
return 1
if x == 2:
return x return x
else: else:
return 2 * get_previous_power_of_two(x // 2) return 2 * get_power_of_two_floor(x // 2)
``` ```
## Generalized Merkle tree index ## Generalized Merkle tree index
@ -62,9 +71,14 @@ Note that the generalized index has the convenient property that the two childre
```python ```python
def merkle_tree(leaves: Sequence[Bytes32]) -> Sequence[Bytes32]: def merkle_tree(leaves: Sequence[Bytes32]) -> Sequence[Bytes32]:
padded_length = get_next_power_of_two(len(leaves)) """
o = [Bytes32()] * padded_length + list(leaves) + [Bytes32()] * (padded_length - len(leaves)) Return an array representing the tree nodes by generalized index:
for i in range(padded_length - 1, 0, -1): [0, 1, 2, 3, 4, 5, 6, 7], where each layer is a power of 2. The 0 index is ignored. The 1 index is the root.
The result will be twice the size as the padded bottom layer for the input leaves.
"""
bottom_length = get_power_of_two_ceil(len(leaves))
o = [Bytes32()] * bottom_length + list(leaves) + [Bytes32()] * (bottom_length - len(leaves))
for i in range(bottom_length - 1, 0, -1):
o[i] = hash(o[i * 2] + o[i * 2 + 1]) o[i] = hash(o[i * 2] + o[i * 2 + 1])
return o return o
``` ```
@ -169,7 +183,7 @@ def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableNam
else: else:
pos, _, _ = get_item_position(typ, p) pos, _, _ = get_item_position(typ, p)
base_index = (GeneralizedIndex(2) if issubclass(typ, (List, ByteList)) else GeneralizedIndex(1)) base_index = (GeneralizedIndex(2) if issubclass(typ, (List, ByteList)) else GeneralizedIndex(1))
root = GeneralizedIndex(root * base_index * get_next_power_of_two(chunk_count(typ)) + pos) root = GeneralizedIndex(root * base_index * get_power_of_two_ceil(chunk_count(typ)) + pos)
typ = get_elem_type(typ, p) typ = get_elem_type(typ, p)
return root return root
``` ```
@ -188,7 +202,7 @@ def concat_generalized_indices(*indices: GeneralizedIndex) -> GeneralizedIndex:
""" """
o = GeneralizedIndex(1) o = GeneralizedIndex(1)
for i in indices: for i in indices:
o = GeneralizedIndex(o * get_previous_power_of_two(i) + (i - get_previous_power_of_two(i))) o = GeneralizedIndex(o * get_power_of_two_floor(i) + (i - get_power_of_two_floor(i)))
return o return o
``` ```

View File

@ -66,6 +66,8 @@
* **union**: union type containing one of the given subtypes * **union**: union type containing one of the given subtypes
* notation `Union[type_0, type_1, ...]`, e.g. `union[null, uint64]` * notation `Union[type_0, type_1, ...]`, e.g. `union[null, uint64]`
*Note*: Both `Vector[boolean, N]` and `Bitvector[N]` are valid, yet distinct due to their different serialization requirements. Similarly, both `List[boolean, N]` and `Bitlist[N]` are valid, yet distinct. Generally `Bitvector[N]`/`Bitlist[N]` are preferred because of their serialization efficiencies.
### Variable-size and fixed-size ### Variable-size and fixed-size
We recursively define "variable-size" types to be lists, unions, `Bitlist` and all types that contain a variable-size type. All other types are said to be "fixed-size". We recursively define "variable-size" types to be lists, unions, `Bitlist` and all types that contain a variable-size type. All other types are said to be "fixed-size".
@ -88,9 +90,9 @@ Assuming a helper function `default(type)` which returns the default value for `
| `boolean` | `False` | | `boolean` | `False` |
| `Container` | `[default(type) for type in container]` | | `Container` | `[default(type) for type in container]` |
| `Vector[type, N]` | `[default(type)] * N` | | `Vector[type, N]` | `[default(type)] * N` |
| `Bitvector[boolean, N]` | `[False] * N` | | `Bitvector[N]` | `[False] * N` |
| `List[type, N]` | `[]` | | `List[type, N]` | `[]` |
| `Bitlist[boolean, N]` | `[]` | | `Bitlist[N]` | `[]` |
| `Union[type_0, type_1, ...]` | `default(type_0)` | | `Union[type_0, type_1, ...]` | `default(type_0)` |
#### `is_zero` #### `is_zero`
@ -211,13 +213,17 @@ We first define helper functions:
* `List[B, N]` and `Vector[B, N]`, where `B` is a basic type: `(N * size_of(B) + 31) // 32` (dividing by chunk size, rounding up) * `List[B, N]` and `Vector[B, N]`, where `B` is a basic type: `(N * size_of(B) + 31) // 32` (dividing by chunk size, rounding up)
* `List[C, N]` and `Vector[C, N]`, where `C` is a composite type: `N` * `List[C, N]` and `Vector[C, N]`, where `C` is a composite type: `N`
* containers: `len(fields)` * containers: `len(fields)`
* `pack(value)`: given ordered objects of the same basic type, serialize them, pack them into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks. * `pack(values)`: Given ordered objects of the same basic type:
* `pack_bits(bits)`: Given the `bits` of bitlist or bitvector, get `bitfield_bytes` by packing them in bytes and aligning to the start. The length-delimiting bit for bitlists is excluded. And then pack `bitfield_bytes` into `BYTES_PER_CHUNK`-byte chunks, right-pad the last chunk with zero bytes, and return the chunks. 1. Serialize `values` into bytes.
2. If not aligned to a multiple of `BYTES_PER_CHUNK` bytes, right-pad with zeroes to the next multiple.
3. Partition the bytes into `BYTES_PER_CHUNK`-byte chunks.
4. Return the chunks.
* `pack_bits(bits)`: Given the bits of bitlist or bitvector, get `bitfield_bytes` by packing them in bytes and aligning to the start. The length-delimiting bit for bitlists is excluded. Then return `pack(bitfield_bytes)`.
* `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power of 2, with 0 mapping to 1. Examples: `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16` * `next_pow_of_two(i)`: get the next power of 2 of `i`, if not already a power of 2, with 0 mapping to 1. Examples: `0->1, 1->1, 2->2, 3->4, 4->4, 6->8, 9->16`
* `merkleize(chunks, limit=None)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, merkleize the chunks, and return the root: * `merkleize(chunks, limit=None)`: Given ordered `BYTES_PER_CHUNK`-byte chunks, merkleize the chunks, and return the root:
* The merkleization depends on the effective input, which can be padded/limited: * The merkleization depends on the effective input, which must be padded/limited:
- if no limit: pad the `chunks` with zeroed chunks to `next_pow_of_two(len(chunks))` (virtually for memory efficiency). - if no limit: pad the `chunks` with zeroed chunks to `next_pow_of_two(len(chunks))` (virtually for memory efficiency).
- if `limit > len(chunks)`, pad the `chunks` with zeroed chunks to `next_pow_of_two(limit)` (virtually for memory efficiency). - if `limit >= len(chunks)`, pad the `chunks` with zeroed chunks to `next_pow_of_two(limit)` (virtually for memory efficiency).
- if `limit < len(chunks)`: do not merkleize, input exceeds limit. Raise an error instead. - if `limit < len(chunks)`: do not merkleize, input exceeds limit. Raise an error instead.
* Then, merkleize the chunks (empty input is padded to 1 zero chunk): * Then, merkleize the chunks (empty input is padded to 1 zero chunk):
- If `1` chunk: the root is the chunk itself. - If `1` chunk: the root is the chunk itself.
@ -249,7 +255,7 @@ We similarly define "summary types" and "expansion types". For example, [`Beacon
| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz](https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz) | | Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz](https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz) |
| Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) | | Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) |
| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/utils/ssz](https://github.com/paritytech/shasper/tree/master/utils/ssz) | | Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/utils/ssz](https://github.com/paritytech/shasper/tree/master/utils/ssz) |
| TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz-js) | | TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz) |
| Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) | | Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) |
| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/go-ssz](https://github.com/prysmaticlabs/go-ssz) | | Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/go-ssz](https://github.com/prysmaticlabs/go-ssz) |
| Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) | | Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) |

View File

@ -1 +1 @@
0.12.1 0.12.2

View File

@ -1,8 +1,9 @@
from ruamel.yaml import YAML import os
from pathlib import Path from pathlib import Path
from os.path import join
from typing import Dict, Any from typing import Dict, Any
from ruamel.yaml import YAML
config: Dict[str, Any] = {} config: Dict[str, Any] = {}
@ -35,11 +36,19 @@ def load_config_file(configs_dir: str, presets_name: str) -> Dict[str, Any]:
:param presets_name: The name of the presets. (lowercase snake_case) :param presets_name: The name of the presets. (lowercase snake_case)
:return: Dictionary, mapping of constant-name -> constant-value :return: Dictionary, mapping of constant-name -> constant-value
""" """
path = Path(join(configs_dir, presets_name + '.yaml')) present_dir = Path(configs_dir) / presets_name
yaml = YAML(typ='base') _, _, config_files = next(os.walk(present_dir))
loaded = yaml.load(path) config_files.sort()
loaded_config = {}
for config_file_name in config_files:
yaml = YAML(typ='base')
path = present_dir / config_file_name
loaded = yaml.load(path)
loaded_config.update(loaded)
assert loaded_config != {}
out: Dict[str, Any] = dict() out: Dict[str, Any] = dict()
for k, v in loaded.items(): for k, v in loaded_config.items():
if isinstance(v, list): if isinstance(v, list):
# Clean up integer values. YAML parser renders lists of ints as list of str # Clean up integer values. YAML parser renders lists of ints as list of str
out[k] = [int(item) if item.isdigit() else item for item in v] out[k] = [int(item) if item.isdigit() else item for item in v]

View File

@ -59,8 +59,8 @@ def bls_default(request):
def bls_type(request): def bls_type(request):
bls_type = request.config.getoption("--bls-type") bls_type = request.config.getoption("--bls-type")
if bls_type == "py_ecc": if bls_type == "py_ecc":
bls_utils.bls = bls_utils.py_ecc_bls bls_utils.use_py_ecc()
elif bls_type == "milagro": elif bls_type == "milagro":
bls_utils.bls = bls_utils.milagro_bls bls_utils.use_milagro()
else: else:
raise Exception(f"unrecognized bls type: {bls_type}") raise Exception(f"unrecognized bls type: {bls_type}")

View File

@ -1,12 +1,14 @@
from lru import LRU
from typing import List from typing import List
from eth2spec.test.context import expect_assertion_error, PHASE0, PHASE1 from eth2spec.test.context import expect_assertion_error, PHASE1
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot, transition_to from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot
from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee
from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils import bls from eth2spec.utils import bls
from eth2spec.utils.ssz.ssz_typing import Bitlist from eth2spec.utils.ssz.ssz_typing import Bitlist
from lru import LRU
def run_attestation_processing(spec, state, attestation, valid=True): def run_attestation_processing(spec, state, attestation, valid=True):
@ -44,7 +46,7 @@ def run_attestation_processing(spec, state, attestation, valid=True):
yield 'post', state yield 'post', state
def build_attestation_data(spec, state, slot, index, shard_transition=None, on_time=True): def build_attestation_data(spec, state, slot, index, shard=None, shard_transition=None, on_time=True):
assert state.slot >= slot assert state.slot >= slot
if slot == state.slot: if slot == state.slot:
@ -76,43 +78,26 @@ def build_attestation_data(spec, state, slot, index, shard_transition=None, on_t
) )
if spec.fork == PHASE1: if spec.fork == PHASE1:
if shard is None:
shard = spec.compute_shard_from_committee_index(state, attestation_data.index, attestation_data.slot)
attestation_data.shard = shard
if shard_transition is not None: if shard_transition is not None:
lastest_shard_data_root_index = len(shard_transition.shard_data_roots) - 1 last_offset_index = len(shard_transition.shard_data_roots) - 1
attestation_data.shard_head_root = shard_transition.shard_data_roots[lastest_shard_data_root_index] attestation_data.shard_head_root = shard_transition.shard_states[last_offset_index].latest_block_root
attestation_data.shard_transition_root = shard_transition.hash_tree_root() attestation_data.shard_transition_root = shard_transition.hash_tree_root()
else: else:
# No shard transition
shard = spec.get_shard(state, spec.Attestation(data=attestation_data))
if on_time: if on_time:
temp_state = state.copy() shard_transition = spec.get_shard_transition(state, shard, shard_blocks=[])
next_slot(spec, temp_state) last_offset_index = len(shard_transition.shard_data_roots) - 1
shard_transition = spec.get_shard_transition(temp_state, shard, []) attestation_data.shard_head_root = shard_transition.shard_states[last_offset_index].latest_block_root
lastest_shard_data_root_index = len(shard_transition.shard_data_roots) - 1
attestation_data.shard_head_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
attestation_data.shard_transition_root = shard_transition.hash_tree_root() attestation_data.shard_transition_root = shard_transition.hash_tree_root()
else: else:
attestation_data.shard_head_root = state.shard_states[shard].transition_digest attestation_data.shard_head_root = state.shard_states[shard].latest_block_root
attestation_data.shard_transition_root = spec.Root() attestation_data.shard_transition_root = spec.Root()
return attestation_data return attestation_data
def convert_to_valid_on_time_attestation(spec, state, attestation, signed=False):
shard = spec.get_shard(state, attestation)
offset_slots = spec.compute_offset_slots(
spec.get_latest_slot_for_shard(state, shard),
attestation.data.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY,
)
for _ in offset_slots:
attestation.custody_bits_blocks.append(
Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]([0 for _ in attestation.aggregation_bits])
)
if signed:
sign_attestation(spec, state, attestation)
return attestation
def get_valid_on_time_attestation(spec, state, slot=None, index=None, shard_transition=None, signed=False): def get_valid_on_time_attestation(spec, state, slot=None, index=None, shard_transition=None, signed=False):
''' '''
Construct on-time attestation for next slot Construct on-time attestation for next slot
@ -133,7 +118,7 @@ def get_valid_on_time_attestation(spec, state, slot=None, index=None, shard_tran
) )
def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False): def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False, shard_transition=None):
''' '''
Construct on-time attestation for next slot Construct on-time attestation for next slot
''' '''
@ -142,7 +127,8 @@ def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False)
if index is None: if index is None:
index = 0 index = 0
return get_valid_attestation(spec, state, slot=slot, index=index, signed=signed, on_time=False) return get_valid_attestation(spec, state, slot=slot, index=index,
signed=signed, on_time=False, shard_transition=shard_transition)
def get_valid_attestation(spec, def get_valid_attestation(spec,
@ -179,9 +165,6 @@ def get_valid_attestation(spec,
# fill the attestation with (optionally filtered) participants, and optionally sign it # fill the attestation with (optionally filtered) participants, and optionally sign it
fill_aggregate_attestation(spec, state, attestation, signed=signed, filter_participant_set=filter_participant_set) fill_aggregate_attestation(spec, state, attestation, signed=signed, filter_participant_set=filter_participant_set)
if spec.fork == PHASE1 and on_time:
attestation = convert_to_valid_on_time_attestation(spec, state, attestation, signed)
return attestation return attestation
@ -201,43 +184,9 @@ def sign_aggregate_attestation(spec, state, attestation_data, participants: List
def sign_indexed_attestation(spec, state, indexed_attestation): def sign_indexed_attestation(spec, state, indexed_attestation):
if spec.fork == PHASE0: participants = indexed_attestation.attesting_indices
participants = indexed_attestation.attesting_indices data = indexed_attestation.data
data = indexed_attestation.data indexed_attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
indexed_attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
else:
participants = spec.get_indices_from_committee(
indexed_attestation.committee,
indexed_attestation.attestation.aggregation_bits,
)
data = indexed_attestation.attestation.data
if any(indexed_attestation.attestation.custody_bits_blocks):
sign_on_time_attestation(spec, state, indexed_attestation.attestation)
else:
indexed_attestation.attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
def sign_on_time_attestation(spec, state, attestation):
if not any(attestation.custody_bits_blocks):
sign_attestation(spec, state, attestation)
return
committee = spec.get_beacon_committee(state, attestation.data.slot, attestation.data.index)
signatures = []
for block_index, custody_bits in enumerate(attestation.custody_bits_blocks):
for participant, abit, cbit in zip(committee, attestation.aggregation_bits, custody_bits):
if not abit:
continue
signatures.append(get_attestation_custody_signature(
spec,
state,
attestation.data,
block_index,
cbit,
privkeys[participant]
))
attestation.signature = bls.Aggregate(signatures)
def get_attestation_custody_signature(spec, state, attestation_data, block_index, bit, privkey): def get_attestation_custody_signature(spec, state, attestation_data, block_index, bit, privkey):
@ -254,10 +203,6 @@ def get_attestation_custody_signature(spec, state, attestation_data, block_index
def sign_attestation(spec, state, attestation): def sign_attestation(spec, state, attestation):
if spec.fork == PHASE1 and any(attestation.custody_bits_blocks):
sign_on_time_attestation(spec, state, attestation)
return
participants = spec.get_attesting_indices( participants = spec.get_attesting_indices(
state, state,
attestation.data, attestation.data,
@ -314,23 +259,30 @@ def next_epoch_with_attestations(spec,
block = build_empty_block_for_next_slot(spec, post_state) block = build_empty_block_for_next_slot(spec, post_state)
if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest) committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest))
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)): if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)):
for index in range(committees_per_slot): for index in range(committees_per_slot):
cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index, signed=True) if spec.fork == PHASE1:
shard = spec.compute_shard_from_committee_index(post_state, index, slot_to_attest)
shard_transition = get_shard_transition_of_committee(spec, post_state, index)
block.body.shard_transitions[shard] = shard_transition
else:
shard_transition = None
cur_attestation = get_valid_attestation(
spec, post_state, slot_to_attest,
shard_transition=shard_transition, index=index, signed=True, on_time=True
)
block.body.attestations.append(cur_attestation) block.body.attestations.append(cur_attestation)
if fill_prev_epoch: if fill_prev_epoch:
slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1 slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1
committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest) committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest))
for index in range(committees_per_slot): for index in range(committees_per_slot):
prev_attestation = get_valid_attestation( prev_attestation = get_valid_attestation(
spec, post_state, slot_to_attest, index=index, signed=True, on_time=False) spec, post_state, slot_to_attest, index=index, signed=True, on_time=False)
block.body.attestations.append(prev_attestation) block.body.attestations.append(prev_attestation)
if spec.fork == PHASE1:
fill_block_shard_transitions_by_attestations(spec, post_state, block)
signed_block = state_transition_and_sign_block(spec, post_state, block) signed_block = state_transition_and_sign_block(spec, post_state, block)
signed_blocks.append(signed_block) signed_blocks.append(signed_block)
@ -354,7 +306,7 @@ def prepare_state_with_attestations(spec, state, participation_fn=None):
for _ in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY): for _ in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY):
# create an attestation for each index in each slot in epoch # create an attestation for each index in each slot in epoch
if state.slot < next_epoch_start_slot: if state.slot < next_epoch_start_slot:
for committee_index in range(spec.get_committee_count_at_slot(state, state.slot)): for committee_index in range(spec.get_committee_count_per_slot(state, spec.get_current_epoch(state))):
def temp_participants_filter(comm): def temp_participants_filter(comm):
if participation_fn is None: if participation_fn is None:
return comm return comm
@ -396,14 +348,3 @@ def cached_prepare_state_with_attestations(spec, state):
# Put the LRU cache result into the state view, as if we transitioned the original view # Put the LRU cache result into the state view, as if we transitioned the original view
state.set_backing(_prep_state_cache_dict[key]) state.set_backing(_prep_state_cache_dict[key])
def fill_block_shard_transitions_by_attestations(spec, state, block):
block.body.shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
for attestation in block.body.attestations:
shard = spec.get_shard(state, attestation)
if attestation.data.slot == state.slot:
temp_state = state.copy()
transition_to(spec, temp_state, slot=block.slot)
shard_transition = spec.get_shard_transition(temp_state, shard, [])
block.body.shard_transitions[shard] = shard_transition

View File

@ -1,4 +1,3 @@
from eth2spec.test.context import PHASE1
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation, sign_indexed_attestation from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation, sign_indexed_attestation
@ -41,34 +40,19 @@ def get_indexed_attestation_participants(spec, indexed_att):
""" """
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase. Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
""" """
if spec.fork == PHASE1: return list(indexed_att.attesting_indices)
return list(spec.get_indices_from_committee(
indexed_att.committee,
indexed_att.attestation.aggregation_bits,
))
else:
return list(indexed_att.attesting_indices)
def set_indexed_attestation_participants(spec, indexed_att, participants): def set_indexed_attestation_participants(spec, indexed_att, participants):
""" """
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase. Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
""" """
if spec.fork == PHASE1: indexed_att.attesting_indices = participants
indexed_att.attestation.aggregation_bits = [bool(i in participants) for i in indexed_att.committee]
else:
indexed_att.attesting_indices = participants
def get_attestation_1_data(spec, att_slashing): def get_attestation_1_data(spec, att_slashing):
if spec.fork == PHASE1: return att_slashing.attestation_1.data
return att_slashing.attestation_1.attestation.data
else:
return att_slashing.attestation_1.data
def get_attestation_2_data(spec, att_slashing): def get_attestation_2_data(spec, att_slashing):
if spec.fork == PHASE1: return att_slashing.attestation_2.data
return att_slashing.attestation_2.attestation.data
else:
return att_slashing.attestation_2.data

View File

@ -1,10 +1,7 @@
from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils import bls from eth2spec.utils import bls
from eth2spec.utils.ssz.ssz_typing import Bitlist, ByteVector, Bitvector from eth2spec.utils.ssz.ssz_typing import Bitlist, ByteVector, ByteList
from eth2spec.utils.ssz.ssz_impl import hash_tree_root from remerkleable.tree import gindex_bit_iter
from eth2spec.utils.merkle_minimal import get_merkle_tree, get_merkle_proof
from remerkleable.core import pack_bits_to_chunks
from remerkleable.tree import subtree_fill_to_contents, get_depth
BYTES_PER_CHUNK = 32 BYTES_PER_CHUNK = 32
@ -37,9 +34,10 @@ def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
) )
def get_valid_custody_key_reveal(spec, state, period=None): def get_valid_custody_key_reveal(spec, state, period=None, validator_index=None):
current_epoch = spec.get_current_epoch(state) current_epoch = spec.get_current_epoch(state)
revealer_index = spec.get_active_validator_indices(state, current_epoch)[0] revealer_index = (spec.get_active_validator_indices(state, current_epoch)[0]
if validator_index is None else validator_index)
revealer = state.validators[revealer_index] revealer = state.validators[revealer_index]
if period is None: if period is None:
@ -61,38 +59,54 @@ def bitlist_from_int(max_len, num_bits, n):
return Bitlist[max_len](*[(n >> i) & 0b1 for i in range(num_bits)]) return Bitlist[max_len](*[(n >> i) & 0b1 for i in range(num_bits)])
def get_valid_bit_challenge(spec, state, attestation, invalid_custody_bit=False): def get_valid_custody_slashing(spec, state, attestation, shard_transition, custody_secret, data, data_index=0):
beacon_committee = spec.get_beacon_committee( beacon_committee = spec.get_beacon_committee(
state, state,
attestation.data.slot, attestation.data.slot,
attestation.data.crosslink.shard, attestation.data.index,
) )
responder_index = beacon_committee[0] malefactor_index = beacon_committee[0]
challenger_index = beacon_committee[-1] whistleblower_index = beacon_committee[-1]
epoch = spec.get_randao_epoch_for_custody_period(attestation.data.target.epoch, slashing = spec.CustodySlashing(
responder_index) data_index=data_index,
malefactor_index=malefactor_index,
malefactor_secret=custody_secret,
whistleblower_index=whistleblower_index,
shard_transition=shard_transition,
attestation=attestation,
data=data,
)
slashing_domain = spec.get_domain(state, spec.DOMAIN_CUSTODY_BIT_SLASHING)
slashing_root = spec.compute_signing_root(slashing, slashing_domain)
# Generate the responder key signed_slashing = spec.SignedCustodySlashing(
domain = spec.get_domain(state, spec.DOMAIN_RANDAO, epoch) message=slashing,
signing_root = spec.compute_signing_root(spec.Epoch(epoch), domain) signature=bls.Sign(privkeys[whistleblower_index], slashing_root)
responder_key = bls.Sign(privkeys[responder_index], signing_root) )
chunk_count = spec.get_custody_chunk_count(attestation.data.crosslink) return signed_slashing
chunk_bits = bitlist_from_int(spec.MAX_CUSTODY_CHUNKS, chunk_count, 0)
n = 0 def get_valid_chunk_challenge(spec, state, attestation, shard_transition, data_index=None, chunk_index=None):
while spec.get_chunk_bits_root(chunk_bits) == attestation.custody_bits[0] ^ invalid_custody_bit: crosslink_committee = spec.get_beacon_committee(
chunk_bits = bitlist_from_int(spec.MAX_CUSTODY_CHUNKS, chunk_count, n) state,
n += 1 attestation.data.slot,
attestation.data.index
)
responder_index = crosslink_committee[0]
data_index = len(shard_transition.shard_block_lengths) - 1 if not data_index else data_index
return spec.CustodyBitChallenge( chunk_count = (shard_transition.shard_block_lengths[data_index]
+ spec.BYTES_PER_CUSTODY_CHUNK - 1) // spec.BYTES_PER_CUSTODY_CHUNK
chunk_index = chunk_count - 1 if not chunk_index else chunk_index
return spec.CustodyChunkChallenge(
responder_index=responder_index, responder_index=responder_index,
attestation=attestation, attestation=attestation,
challenger_index=challenger_index, chunk_index=chunk_index,
responder_key=responder_key, data_index=data_index,
chunk_bits=chunk_bits, shard_transition=shard_transition,
) )
@ -102,50 +116,89 @@ def custody_chunkify(spec, x):
return chunks return chunks
def get_valid_custody_response(spec, state, bit_challenge, custody_data, challenge_index, invalid_chunk_bit=False): def build_proof(anchor, leaf_index):
chunks = custody_chunkify(spec, custody_data) if leaf_index <= 1:
return [] # Nothing to prove / invalid index
node = anchor
proof = []
# Walk down, top to bottom to the leaf
bit_iter, _ = gindex_bit_iter(leaf_index)
for bit in bit_iter:
# Always take the opposite hand for the proof.
# 1 = right as leaf, thus get left
if bit:
proof.append(node.get_left().merkle_root())
node = node.get_right()
else:
proof.append(node.get_right().merkle_root())
node = node.get_left()
chunk_index = len(chunks) - 1 return list(reversed(proof))
chunk_bit = spec.get_custody_chunk_bit(bit_challenge.responder_key, chunks[chunk_index])
while chunk_bit == bit_challenge.chunk_bits[chunk_index] ^ invalid_chunk_bit:
chunk_index -= 1
chunk_bit = spec.get_custody_chunk_bit(bit_challenge.responder_key, chunks[chunk_index])
chunks_hash_tree_roots = [hash_tree_root(ByteVector[spec.BYTES_PER_CUSTODY_CHUNK](chunk)) for chunk in chunks] def get_valid_custody_chunk_response(spec, state, chunk_challenge, challenge_index,
chunks_hash_tree_roots += [ block_length_or_custody_data,
hash_tree_root(ByteVector[spec.BYTES_PER_CUSTODY_CHUNK](b"\0" * spec.BYTES_PER_CUSTODY_CHUNK)) invalid_chunk_data=False):
for i in range(2 ** spec.ceillog2(len(chunks)) - len(chunks))] if isinstance(block_length_or_custody_data, int):
data_tree = get_merkle_tree(chunks_hash_tree_roots) custody_data = get_custody_test_vector(block_length_or_custody_data)
else:
custody_data = block_length_or_custody_data
data_branch = get_merkle_proof(data_tree, chunk_index) custody_data_block = ByteList[spec.MAX_SHARD_BLOCK_SIZE](custody_data)
chunks = custody_chunkify(spec, custody_data_block)
bitlist_chunk_index = chunk_index // BYTES_PER_CHUNK chunk_index = chunk_challenge.chunk_index
print(bitlist_chunk_index)
bitlist_chunk_nodes = pack_bits_to_chunks(bit_challenge.chunk_bits)
bitlist_tree = subtree_fill_to_contents(bitlist_chunk_nodes, get_depth(spec.MAX_CUSTODY_CHUNKS))
print(bitlist_tree)
bitlist_chunk_branch = None # TODO; extract proof from merkle tree
bitlist_chunk_index = chunk_index // 256 data_branch = build_proof(custody_data_block.get_backing().get_left(), chunk_index + 2**spec.CUSTODY_RESPONSE_DEPTH)
chunk_bits_leaf = Bitvector[256](bit_challenge.chunk_bits[bitlist_chunk_index * 256: return spec.CustodyChunkResponse(
(bitlist_chunk_index + 1) * 256])
return spec.CustodyResponse(
challenge_index=challenge_index, challenge_index=challenge_index,
chunk_index=chunk_index, chunk_index=chunk_index,
chunk=ByteVector[spec.BYTES_PER_CUSTODY_CHUNK](chunks[chunk_index]), chunk=ByteVector[spec.BYTES_PER_CUSTODY_CHUNK](chunks[chunk_index]),
data_branch=data_branch, branch=data_branch,
chunk_bits_branch=bitlist_chunk_branch,
chunk_bits_leaf=chunk_bits_leaf,
) )
def get_custody_test_vector(bytelength): def get_custody_test_vector(bytelength, offset=0):
ints = bytelength // 4 ints = bytelength // 4 + 1
return b"".join(i.to_bytes(4, "little") for i in range(ints)) return (b"".join((i + offset).to_bytes(4, "little") for i in range(ints)))[:bytelength]
def get_custody_merkle_root(data): def get_sample_shard_transition(spec, start_slot, block_lengths):
return None # get_merkle_tree(chunkify(data))[-1][0] b = [spec.get_block_data_merkle_root(ByteList[spec.MAX_SHARD_BLOCK_SIZE](get_custody_test_vector(x)))
for x in block_lengths]
shard_transition = spec.ShardTransition(
start_slot=start_slot,
shard_block_lengths=block_lengths,
shard_data_roots=b,
shard_states=[spec.ShardState() for x in block_lengths],
proposer_signature_aggregate=spec.BLSSignature(),
)
return shard_transition
def get_custody_secret(spec, state, validator_index, epoch=None):
period = spec.get_custody_period_for_validator(validator_index, epoch if epoch is not None
else spec.get_current_epoch(state))
epoch_to_sign = spec.get_randao_epoch_for_custody_period(period, validator_index)
domain = spec.get_domain(state, spec.DOMAIN_RANDAO, epoch_to_sign)
signing_root = spec.compute_signing_root(spec.Epoch(epoch_to_sign), domain)
return bls.Sign(privkeys[validator_index], signing_root)
def get_custody_slashable_test_vector(spec, custody_secret, length, slashable=True):
test_vector = get_custody_test_vector(length)
offset = 0
while spec.compute_custody_bit(custody_secret, test_vector) != slashable:
offset += 1
test_vector = get_custody_test_vector(length, offset)
return test_vector
def get_custody_slashable_shard_transition(spec, start_slot, block_lengths, custody_secret, slashable=True):
shard_transition = get_sample_shard_transition(spec, start_slot, block_lengths)
slashable_test_vector = get_custody_slashable_test_vector(spec, custody_secret,
block_lengths[0], slashable=slashable)
block_data = ByteList[spec.MAX_SHARD_BLOCK_SIZE](slashable_test_vector)
shard_transition.shard_data_roots[0] = spec.get_block_data_merkle_root(block_data)
return shard_transition, slashable_test_vector

View File

@ -0,0 +1,27 @@
def get_anchor_root(spec, state):
anchor_block_header = state.latest_block_header.copy()
if anchor_block_header.state_root == spec.Bytes32():
anchor_block_header.state_root = spec.hash_tree_root(state)
return spec.hash_tree_root(anchor_block_header)
def add_block_to_store(spec, store, signed_block):
pre_state = store.block_states[signed_block.message.parent_root]
block_time = pre_state.genesis_time + signed_block.message.slot * spec.SECONDS_PER_SLOT
if store.time < block_time:
spec.on_tick(store, block_time)
spec.on_block(store, signed_block)
def add_attestation_to_store(spec, store, attestation):
parent_block = store.blocks[attestation.data.beacon_block_root]
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
block_time = pre_state.genesis_time + parent_block.slot * spec.SECONDS_PER_SLOT
next_epoch_time = block_time + spec.SLOTS_PER_EPOCH * spec.SECONDS_PER_SLOT
if store.time < next_epoch_time:
spec.on_tick(store, next_epoch_time)
spec.on_attestation(store, attestation)

View File

@ -1,6 +1,4 @@
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
from eth2spec.test.helpers.block import get_state_and_beacon_parent_root_at_slot from eth2spec.test.helpers.block import get_state_and_beacon_parent_root_at_slot
from eth2spec.test.helpers.state import transition_to
from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils import bls from eth2spec.utils import bls
from eth2spec.utils.bls import only_with_bls from eth2spec.utils.bls import only_with_bls
@ -23,19 +21,21 @@ def build_shard_block(spec,
shard, shard,
slot=None, slot=None,
body=None, body=None,
shard_parent_state=None,
signed=False): signed=False):
shard_state = beacon_state.shard_states[shard] if shard_parent_state is None:
shard_parent_state = beacon_state.shard_states[shard]
if slot is None: if slot is None:
slot = shard_state.slot + 1 slot = shard_parent_state.slot + 1
if body is None: if body is None:
body = b'\x56' * 128 body = get_sample_shard_block_body(spec)
proposer_index = spec.get_shard_proposer_index(beacon_state, slot, shard)
beacon_state, beacon_parent_root = get_state_and_beacon_parent_root_at_slot(spec, beacon_state, slot) beacon_state, beacon_parent_root = get_state_and_beacon_parent_root_at_slot(spec, beacon_state, slot)
proposer_index = spec.get_shard_proposer_index(beacon_state, slot, shard)
block = spec.ShardBlock( block = spec.ShardBlock(
shard_parent_root=shard_state.latest_block_root, shard_parent_root=shard_parent_state.latest_block_root,
beacon_parent_root=beacon_parent_root, beacon_parent_root=beacon_parent_root,
slot=slot, slot=slot,
shard=shard, shard=shard,
@ -52,15 +52,18 @@ def build_shard_block(spec,
return signed_block return signed_block
def build_shard_transitions_till_slot(spec, state, shard_blocks, on_time_slot): def get_shard_transitions(spec, parent_beacon_state, shard_block_dict):
temp_state = state.copy()
transition_to(spec, temp_state, on_time_slot)
shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
for shard, blocks in shard_blocks.items(): on_time_slot = parent_beacon_state.slot + 1
offset_slots = spec.get_offset_slots(temp_state, shard) for shard, blocks in shard_block_dict.items():
shard_transition = spec.get_shard_transition(parent_beacon_state, shard, blocks)
offset_slots = spec.compute_offset_slots(
spec.get_latest_slot_for_shard(parent_beacon_state, shard),
on_time_slot,
)
len_offset_slots = len(offset_slots) len_offset_slots = len(offset_slots)
assert len_offset_slots == on_time_slot - state.shard_states[shard].slot - 1 shard_transition = spec.get_shard_transition(parent_beacon_state, shard, blocks)
shard_transition = spec.get_shard_transition(temp_state, shard, blocks)
if len(blocks) > 0: if len(blocks) > 0:
shard_block_root = blocks[-1].message.hash_tree_root() shard_block_root = blocks[-1].message.hash_tree_root()
assert shard_transition.shard_states[len_offset_slots - 1].latest_block_root == shard_block_root assert shard_transition.shard_states[len_offset_slots - 1].latest_block_root == shard_block_root
@ -70,17 +73,16 @@ def build_shard_transitions_till_slot(spec, state, shard_blocks, on_time_slot):
return shard_transitions return shard_transitions
def build_attestation_with_shard_transition(spec, state, index, on_time_slot, shard_transition=None): def get_committee_index_of_shard(spec, state, slot, shard): # Optional[CommitteeIndex]
temp_state = state.copy() active_shard_count = spec.get_active_shard_count(state)
transition_to(spec, temp_state, on_time_slot - 1) committee_count = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot))
attestation = get_valid_on_time_attestation( start_shard = spec.get_start_shard(state, slot)
spec, for committee_index in range(committee_count):
temp_state, if (start_shard + committee_index) % active_shard_count == shard:
index=index, return committee_index
shard_transition=shard_transition, return None
signed=True,
)
assert attestation.data.slot == temp_state.slot def get_sample_shard_block_body(spec, is_max=False):
if shard_transition is not None: size = spec.MAX_SHARD_BLOCK_SIZE if is_max else 128
assert attestation.data.shard_transition_root == shard_transition.hash_tree_root() return b'\x56' * size
return attestation

View File

@ -26,3 +26,17 @@ def run_shard_transitions_processing(spec, state, shard_transitions, attestation
# yield post-state # yield post-state
yield 'post', state yield 'post', state
def get_shard_transition_of_committee(spec, state, committee_index, shard_blocks=None):
if shard_blocks is None:
shard_blocks = []
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
shard_transition = spec.get_shard_transition(state, shard, shard_blocks=shard_blocks)
return shard_transition
def is_full_crosslink(spec, state):
epoch = spec.compute_epoch_at_slot(state.slot)
return spec.get_committee_count_per_slot(state, epoch) >= spec.get_active_shard_count(state)

View File

@ -139,7 +139,7 @@ def test_wrong_index_for_committee_signature(spec, state):
@spec_state_test @spec_state_test
@never_bls @never_bls
def test_wrong_index_for_slot(spec, state): def test_wrong_index_for_slot(spec, state):
while spec.get_committee_count_at_slot(state, state.slot) >= spec.MAX_COMMITTEES_PER_SLOT: while spec.get_committee_count_per_slot(state, spec.get_current_epoch(state)) >= spec.MAX_COMMITTEES_PER_SLOT:
state.validators = state.validators[:len(state.validators) // 2] state.validators = state.validators[:len(state.validators) // 2]
state.balances = state.balances[:len(state.balances) // 2] state.balances = state.balances[:len(state.balances) // 2]

View File

@ -1,5 +1,5 @@
from eth2spec.test.context import ( from eth2spec.test.context import (
PHASE0, PHASE1, PHASE0,
spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases
) )
from eth2spec.test.helpers.attestations import sign_indexed_attestation from eth2spec.test.helpers.attestations import sign_indexed_attestation
@ -162,10 +162,7 @@ def test_same_data(spec, state):
indexed_att_1 = attester_slashing.attestation_1 indexed_att_1 = attester_slashing.attestation_1
att_2_data = get_attestation_2_data(spec, attester_slashing) att_2_data = get_attestation_2_data(spec, attester_slashing)
if spec.fork == PHASE1: indexed_att_1.data = att_2_data
indexed_att_1.attestation.data = att_2_data
else:
indexed_att_1.data = att_2_data
sign_indexed_attestation(spec, state, attester_slashing.attestation_1) sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
yield from run_attester_slashing_processing(spec, state, attester_slashing, False) yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@ -200,6 +197,71 @@ def test_participants_already_slashed(spec, state):
# Some of the following tests are phase0 only: phase 1 lists participants with bitfields instead of index list. # Some of the following tests are phase0 only: phase 1 lists participants with bitfields instead of index list.
@with_phases([PHASE0])
@spec_state_test
@always_bls
def test_att1_high_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
indices.append(spec.ValidatorIndex(len(state.validators))) # off by 1
attester_slashing.attestation_1.attesting_indices = indices
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_phases([PHASE0])
@spec_state_test
@always_bls
def test_att2_high_index(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_2)
indices.append(spec.ValidatorIndex(len(state.validators))) # off by 1
attester_slashing.attestation_2.attesting_indices = indices
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_phases([PHASE0])
@spec_state_test
@always_bls
def test_att1_empty_indices(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
attester_slashing.attestation_1.attesting_indices = []
attester_slashing.attestation_1.signature = spec.bls.Z2_SIGNATURE
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_phases([PHASE0])
@spec_state_test
@always_bls
def test_att2_empty_indices(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
attester_slashing.attestation_2.attesting_indices = []
attester_slashing.attestation_2.signature = spec.bls.Z2_SIGNATURE
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_phases([PHASE0])
@spec_state_test
@always_bls
def test_all_empty_indices(spec, state):
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False)
attester_slashing.attestation_1.attesting_indices = []
attester_slashing.attestation_1.signature = spec.bls.Z2_SIGNATURE
attester_slashing.attestation_2.attesting_indices = []
attester_slashing.attestation_2.signature = spec.bls.Z2_SIGNATURE
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
@with_phases([PHASE0]) @with_phases([PHASE0])
@spec_state_test @spec_state_test
@always_bls @always_bls

View File

@ -1,5 +1,5 @@
from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import ( from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
run_epoch_processing_with, run_epoch_processing_to run_epoch_processing_with, run_epoch_processing_to
) )
from eth2spec.test.helpers.state import transition_to from eth2spec.test.helpers.state import transition_to

View File

@ -1,5 +1,5 @@
from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import ( from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
run_epoch_processing_with run_epoch_processing_with
) )
from eth2spec.test.helpers.state import transition_to from eth2spec.test.helpers.state import transition_to
@ -27,8 +27,8 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
remaining_balance = int(total_balance * 2 // 3) # can become negative remaining_balance = int(total_balance * 2 // 3) # can become negative
start_slot = spec.compute_start_slot_at_epoch(epoch) start_slot = spec.compute_start_slot_at_epoch(epoch)
committees_per_slot = spec.get_committee_count_per_slot(state, epoch)
for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH): for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH):
committees_per_slot = spec.get_committee_count_at_slot(state, slot)
for index in range(committees_per_slot): for index in range(committees_per_slot):
# Check if we already have had sufficient balance. (and undone if we don't want it). # Check if we already have had sufficient balance. (and undone if we don't want it).
# If so, do not create more attestations. (we do not have empty pending attestations normally anyway) # If so, do not create more attestations. (we do not have empty pending attestations normally anyway)

View File

@ -1,7 +1,7 @@
from eth2spec.test.helpers.deposits import mock_deposit from eth2spec.test.helpers.deposits import mock_deposit
from eth2spec.test.helpers.state import next_epoch, next_slots from eth2spec.test.helpers.state import next_epoch, next_slots
from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
def run_process_registry_updates(spec, state): def run_process_registry_updates(spec, state):

View File

@ -1,4 +1,5 @@
from eth2spec.test.context import ( from eth2spec.test.context import (
PHASE0,
spec_state_test, spec_test, spec_state_test, spec_test,
with_all_phases, with_phases, single_phase, with_all_phases, with_phases, single_phase,
with_custom_state, with_custom_state,
@ -16,7 +17,7 @@ from eth2spec.test.helpers.attestations import (
) )
from eth2spec.test.helpers.rewards import leaking from eth2spec.test.helpers.rewards import leaking
from eth2spec.test.helpers.attester_slashings import get_indexed_attestation_participants from eth2spec.test.helpers.attester_slashings import get_indexed_attestation_participants
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
from random import Random from random import Random
@ -24,7 +25,7 @@ def run_process_rewards_and_penalties(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_rewards_and_penalties') yield from run_epoch_processing_with(spec, state, 'process_rewards_and_penalties')
@with_phases(['phase0']) @with_phases([PHASE0])
@spec_state_test @spec_state_test
def test_genesis_epoch_no_attestations_no_penalties(spec, state): def test_genesis_epoch_no_attestations_no_penalties(spec, state):
pre_state = state.copy() pre_state = state.copy()
@ -37,7 +38,7 @@ def test_genesis_epoch_no_attestations_no_penalties(spec, state):
assert state.balances[index] == pre_state.balances[index] assert state.balances[index] == pre_state.balances[index]
@with_phases(['phase0']) @with_phases([PHASE0])
@spec_state_test @spec_state_test
def test_genesis_epoch_full_attestations_no_rewards(spec, state): def test_genesis_epoch_full_attestations_no_rewards(spec, state):
attestations = [] attestations = []
@ -47,7 +48,7 @@ def test_genesis_epoch_full_attestations_no_rewards(spec, state):
attestation = get_valid_attestation(spec, state, signed=True) attestation = get_valid_attestation(spec, state, signed=True)
attestations.append(attestation) attestations.append(attestation)
# fill each created slot in state after inclusion delay # fill each created slot in state after inclusion delay
if slot - spec.MIN_ATTESTATION_INCLUSION_DELAY >= 0: if slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
include_att = attestations[slot - spec.MIN_ATTESTATION_INCLUSION_DELAY] include_att = attestations[slot - spec.MIN_ATTESTATION_INCLUSION_DELAY]
add_attestations_to_state(spec, state, [include_att], state.slot) add_attestations_to_state(spec, state, [include_att], state.slot)
next_slot(spec, state) next_slot(spec, state)

View File

@ -1,5 +1,5 @@
from eth2spec.test.context import spec_state_test, with_all_phases from eth2spec.test.context import spec_state_test, with_all_phases
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import ( from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
run_epoch_processing_with, run_epoch_processing_to run_epoch_processing_with, run_epoch_processing_to
) )
from eth2spec.test.helpers.state import next_epoch from eth2spec.test.helpers.state import next_epoch

View File

@ -1,4 +1,4 @@
from eth2spec.test.context import spec_state_test, never_bls, with_all_phases, with_phases from eth2spec.test.context import PHASE0, spec_state_test, with_all_phases, with_phases
from eth2spec.test.helpers.state import next_epoch_via_block from eth2spec.test.helpers.state import next_epoch_via_block
from eth2spec.test.helpers.attestations import next_epoch_with_attestations from eth2spec.test.helpers.attestations import next_epoch_with_attestations
@ -28,9 +28,8 @@ def check_finality(spec,
assert state.finalized_checkpoint == prev_state.finalized_checkpoint assert state.finalized_checkpoint == prev_state.finalized_checkpoint
@with_phases(["phase0"]) @with_phases([PHASE0])
@spec_state_test @spec_state_test
@never_bls
def test_finality_no_updates_at_genesis(spec, state): def test_finality_no_updates_at_genesis(spec, state):
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
@ -54,7 +53,6 @@ def test_finality_no_updates_at_genesis(spec, state):
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
@never_bls
def test_finality_rule_4(spec, state): def test_finality_rule_4(spec, state):
# get past first two epochs that finality does not run on # get past first two epochs that finality does not run on
next_epoch_via_block(spec, state) next_epoch_via_block(spec, state)
@ -80,7 +78,6 @@ def test_finality_rule_4(spec, state):
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
@never_bls
def test_finality_rule_1(spec, state): def test_finality_rule_1(spec, state):
# get past first two epochs that finality does not run on # get past first two epochs that finality does not run on
next_epoch_via_block(spec, state) next_epoch_via_block(spec, state)
@ -108,7 +105,6 @@ def test_finality_rule_1(spec, state):
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
@never_bls
def test_finality_rule_2(spec, state): def test_finality_rule_2(spec, state):
# get past first two epochs that finality does not run on # get past first two epochs that finality does not run on
next_epoch_via_block(spec, state) next_epoch_via_block(spec, state)
@ -138,7 +134,6 @@ def test_finality_rule_2(spec, state):
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
@never_bls
def test_finality_rule_3(spec, state): def test_finality_rule_3(spec, state):
""" """
Test scenario described here Test scenario described here

View File

@ -16,12 +16,13 @@ from eth2spec.test.helpers.attester_slashings import (
get_indexed_attestation_participants, get_indexed_attestation_participants,
) )
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect
from eth2spec.test.helpers.attestations import get_valid_attestation, fill_block_shard_transitions_by_attestations from eth2spec.test.helpers.attestations import get_valid_attestation
from eth2spec.test.helpers.deposits import prepare_state_and_deposit from eth2spec.test.helpers.deposits import prepare_state_and_deposit
from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee
from eth2spec.test.context import ( from eth2spec.test.context import (
PHASE0, PHASE1,
spec_state_test, with_all_phases, expect_assertion_error, always_bls, with_phases, spec_state_test, with_all_phases, expect_assertion_error, always_bls, with_phases,
PHASE1
) )
@ -112,7 +113,7 @@ def process_and_sign_block_without_header_validations(spec, state, block):
return sign_block(spec, state, block) return sign_block(spec, state, block)
@with_phases(['phase0']) @with_phases([PHASE0])
@spec_state_test @spec_state_test
def test_proposal_for_genesis_slot(spec, state): def test_proposal_for_genesis_slot(spec, state):
assert state.slot == spec.GENESIS_SLOT assert state.slot == spec.GENESIS_SLOT
@ -312,6 +313,28 @@ def test_empty_epoch_transition_not_finalizing(spec, state):
assert state.balances[index] < pre_balances[index] assert state.balances[index] < pre_balances[index]
@with_all_phases
@spec_state_test
def test_proposer_self_slashing(spec, state):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
assert not state.validators[block.proposer_index].slashed
proposer_slashing = get_valid_proposer_slashing(
spec, state, slashed_index=block.proposer_index, signed_1=True, signed_2=True)
block.body.proposer_slashings.append(proposer_slashing)
# The header is processed *before* the block body:
# the proposer was not slashed before the body, thus the block is valid.
signed_block = state_transition_and_sign_block(spec, state, block)
# The proposer slashed themselves.
assert state.validators[block.proposer_index].slashed
yield 'blocks', [signed_block]
yield 'post', state
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_proposer_slashing(spec, state): def test_proposer_slashing(spec, state):
@ -483,7 +506,7 @@ def test_duplicate_attester_slashing(spec, state):
# All AttesterSlashing tests should be adopted for Phase 1 but helper support is not yet there # All AttesterSlashing tests should be adopted for Phase 1 but helper support is not yet there
@with_phases(['phase0']) @with_phases([PHASE0])
@spec_state_test @spec_state_test
def test_multiple_attester_slashings_no_overlap(spec, state): def test_multiple_attester_slashings_no_overlap(spec, state):
# Skip test if config cannot handle multiple AttesterSlashings per block # Skip test if config cannot handle multiple AttesterSlashings per block
@ -524,7 +547,7 @@ def test_multiple_attester_slashings_no_overlap(spec, state):
check_attester_slashing_effect(spec, pre_state, state, full_indices) check_attester_slashing_effect(spec, pre_state, state, full_indices)
@with_phases(['phase0']) @with_phases([PHASE0])
@spec_state_test @spec_state_test
def test_multiple_attester_slashings_partial_overlap(spec, state): def test_multiple_attester_slashings_partial_overlap(spec, state):
# Skip test if config cannot handle multiple AttesterSlashings per block # Skip test if config cannot handle multiple AttesterSlashings per block
@ -687,14 +710,23 @@ def test_attestation(spec, state):
yield 'pre', state yield 'pre', state
attestation = get_valid_attestation(spec, state, signed=True, on_time=True) attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
index = 0
if spec.fork == PHASE1:
shard = spec.compute_shard_from_committee_index(state, index, state.slot)
shard_transition = get_shard_transition_of_committee(spec, state, index)
attestation_block.body.shard_transitions[shard] = shard_transition
else:
shard_transition = None
attestation = get_valid_attestation(
spec, state, shard_transition=shard_transition, index=index, signed=True, on_time=True
)
# Add to state via block transition # Add to state via block transition
pre_current_attestations_len = len(state.current_epoch_attestations) pre_current_attestations_len = len(state.current_epoch_attestations)
attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
attestation_block.body.attestations.append(attestation) attestation_block.body.attestations.append(attestation)
if spec.fork == PHASE1:
fill_block_shard_transitions_by_attestations(spec, state, attestation_block)
signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block) signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block)
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1 assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
@ -730,7 +762,7 @@ def prepare_signed_exits(spec, state, indices):
# exceeding the minimal-config randao mixes memory size. # exceeding the minimal-config randao mixes memory size.
# Applies to all voluntary-exit sanity block tests. # Applies to all voluntary-exit sanity block tests.
@with_phases(['phase0']) @with_phases([PHASE0])
@spec_state_test @spec_state_test
def test_voluntary_exit(spec, state): def test_voluntary_exit(spec, state):
validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
@ -758,7 +790,7 @@ def test_voluntary_exit(spec, state):
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
@with_phases(['phase0']) @with_phases([PHASE0])
@spec_state_test @spec_state_test
def test_double_validator_exit_same_block(spec, state): def test_double_validator_exit_same_block(spec, state):
validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1] validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
@ -779,7 +811,7 @@ def test_double_validator_exit_same_block(spec, state):
yield 'post', None yield 'post', None
@with_phases(['phase0']) @with_phases([PHASE0])
@spec_state_test @spec_state_test
def test_multiple_different_validator_exits_same_block(spec, state): def test_multiple_different_validator_exits_same_block(spec, state):
validator_indices = [ validator_indices = [

View File

@ -1,41 +1,13 @@
from eth2spec.test.context import with_all_phases, spec_state_test from eth2spec.test.context import with_all_phases, spec_state_test
from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations
from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.fork_choice import add_attestation_to_store, add_block_to_store, get_anchor_root
from eth2spec.test.helpers.state import ( from eth2spec.test.helpers.state import (
next_epoch, next_epoch,
state_transition_and_sign_block, state_transition_and_sign_block,
) )
def add_block_to_store(spec, store, signed_block):
pre_state = store.block_states[signed_block.message.parent_root]
block_time = pre_state.genesis_time + signed_block.message.slot * spec.SECONDS_PER_SLOT
if store.time < block_time:
spec.on_tick(store, block_time)
spec.on_block(store, signed_block)
def add_attestation_to_store(spec, store, attestation):
parent_block = store.blocks[attestation.data.beacon_block_root]
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
block_time = pre_state.genesis_time + parent_block.slot * spec.SECONDS_PER_SLOT
next_epoch_time = block_time + spec.SLOTS_PER_EPOCH * spec.SECONDS_PER_SLOT
if store.time < next_epoch_time:
spec.on_tick(store, next_epoch_time)
spec.on_attestation(store, attestation)
def get_anchor_root(spec, state):
anchor_block_header = state.latest_block_header.copy()
if anchor_block_header.state_root == spec.Bytes32():
anchor_block_header.state_root = spec.hash_tree_root(state)
return spec.hash_tree_root(anchor_block_header)
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_genesis(spec, state): def test_genesis(spec, state):
@ -182,7 +154,7 @@ def test_filtered_block_tree(spec, state):
attestations = [] attestations = []
for i in range(spec.SLOTS_PER_EPOCH): for i in range(spec.SLOTS_PER_EPOCH):
slot = rogue_block.slot + i slot = rogue_block.slot + i
for index in range(spec.get_committee_count_at_slot(non_viable_state, slot)): for index in range(spec.get_committee_count_per_slot(non_viable_state, spec.compute_epoch_at_slot(slot))):
attestation = get_valid_attestation(spec, non_viable_state, slot, index, signed=True) attestation = get_valid_attestation(spec, non_viable_state, slot, index, signed=True)
attestations.append(attestation) attestations.append(attestation)

View File

@ -16,20 +16,22 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
indexed_attestation = spec.get_indexed_attestation(state, attestation) indexed_attestation = spec.get_indexed_attestation(state, attestation)
spec.on_attestation(store, attestation) spec.on_attestation(store, attestation)
sample_index = indexed_attestation.attesting_indices[0]
if spec.fork == PHASE0: if spec.fork == PHASE0:
sample_index = indexed_attestation.attesting_indices[0] latest_message = spec.LatestMessage(
else:
attesting_indices = [
index for i, index in enumerate(indexed_attestation.committee)
if attestation.aggregation_bits[i]
]
sample_index = attesting_indices[0]
assert (
store.latest_messages[sample_index] ==
spec.LatestMessage(
epoch=attestation.data.target.epoch, epoch=attestation.data.target.epoch,
root=attestation.data.beacon_block_root, root=attestation.data.beacon_block_root,
) )
else:
latest_message = spec.LatestMessage(
epoch=attestation.data.target.epoch,
root=attestation.data.beacon_block_root,
shard=attestation.data.shard,
shard_root=attestation.data.shard_head_root,
)
assert (
store.latest_messages[sample_index] == latest_message
) )
@ -156,9 +158,9 @@ def test_on_attestation_inconsistent_target_and_head(spec, state):
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_on_attestation_target_not_in_store(spec, state): def test_on_attestation_target_block_not_in_store(spec, state):
store = spec.get_forkchoice_store(state) store = spec.get_forkchoice_store(state)
time = store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH time = store.time + spec.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1)
spec.on_tick(store, time) spec.on_tick(store, time)
# move to immediately before next epoch to make block new target # move to immediately before next epoch to make block new target
@ -176,11 +178,63 @@ def test_on_attestation_target_not_in_store(spec, state):
run_on_attestation(spec, state, store, attestation, False) run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_target_checkpoint_not_in_store(spec, state):
store = spec.get_forkchoice_store(state)
time = store.time + spec.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1)
spec.on_tick(store, time)
# move to immediately before next epoch to make block new target
next_epoch = spec.get_current_epoch(state) + 1
transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1)
target_block = build_empty_block_for_next_slot(spec, state)
signed_target_block = state_transition_and_sign_block(spec, state, target_block)
# add target block to store
spec.on_block(store, signed_target_block)
# target checkpoint state is not yet in store
attestation = get_valid_attestation(spec, state, slot=target_block.slot, signed=True)
assert attestation.data.target.root == target_block.hash_tree_root()
run_on_attestation(spec, state, store, attestation)
@with_all_phases
@spec_state_test
def test_on_attestation_target_checkpoint_not_in_store_diff_slot(spec, state):
store = spec.get_forkchoice_store(state)
time = store.time + spec.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1)
spec.on_tick(store, time)
# move to two slots before next epoch to make target block one before an empty slot
next_epoch = spec.get_current_epoch(state) + 1
transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 2)
target_block = build_empty_block_for_next_slot(spec, state)
signed_target_block = state_transition_and_sign_block(spec, state, target_block)
# add target block to store
spec.on_block(store, signed_target_block)
# target checkpoint state is not yet in store
attestation_slot = target_block.slot + 1
transition_to(spec, state, attestation_slot)
attestation = get_valid_attestation(spec, state, slot=attestation_slot, signed=True)
assert attestation.data.target.root == target_block.hash_tree_root()
run_on_attestation(spec, state, store, attestation)
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_on_attestation_beacon_block_not_in_store(spec, state): def test_on_attestation_beacon_block_not_in_store(spec, state):
store = spec.get_forkchoice_store(state) store = spec.get_forkchoice_store(state)
time = store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH time = store.time + spec.SECONDS_PER_SLOT * (spec.SLOTS_PER_EPOCH + 1)
spec.on_tick(store, time) spec.on_tick(store, time)
# move to immediately before next epoch to make block new target # move to immediately before next epoch to make block new target

View File

@ -1,5 +1,9 @@
from eth2spec.test.context import spec_state_test, always_bls, with_all_phases from eth2spec.test.context import (
from eth2spec.test.helpers.attestations import build_attestation_data spec_state_test,
always_bls, with_phases, with_all_phases,
PHASE0,
)
from eth2spec.test.helpers.attestations import build_attestation_data, get_valid_attestation
from eth2spec.test.helpers.block import build_empty_block from eth2spec.test.helpers.block import build_empty_block
from eth2spec.test.helpers.deposits import prepare_state_and_deposit from eth2spec.test.helpers.deposits import prepare_state_and_deposit
from eth2spec.test.helpers.keys import privkeys, pubkeys from eth2spec.test.helpers.keys import privkeys, pubkeys
@ -22,7 +26,7 @@ def run_get_committee_assignment(spec, state, epoch, validator_index, valid=True
committee, committee_index, slot = assignment committee, committee_index, slot = assignment
assert spec.compute_epoch_at_slot(slot) == epoch assert spec.compute_epoch_at_slot(slot) == epoch
assert committee == spec.get_beacon_committee(state, slot, committee_index) assert committee == spec.get_beacon_committee(state, slot, committee_index)
assert committee_index < spec.get_committee_count_at_slot(state, slot) assert committee_index < spec.get_committee_count_per_slot(state, epoch)
assert validator_index in committee assert validator_index in committee
assert valid assert valid
except AssertionError: except AssertionError:
@ -36,9 +40,9 @@ def run_is_candidate_block(spec, eth1_block, period_start, success=True):
def get_min_new_period_epochs(spec): def get_min_new_period_epochs(spec):
return int( return (
spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2 # to seconds (spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2) # to seconds
/ spec.SECONDS_PER_SLOT / spec.SLOTS_PER_EPOCH // spec.SECONDS_PER_SLOT // spec.SLOTS_PER_EPOCH
) )
@ -314,21 +318,35 @@ def test_get_block_signature(spec, state):
) )
@with_all_phases
@spec_state_test
def test_compute_fork_digest(spec, state):
actual_fork_digest = spec.compute_fork_digest(state.fork.current_version, state.genesis_validators_root)
expected_fork_data_root = spec.hash_tree_root(
spec.ForkData(current_version=state.fork.current_version,
genesis_validators_root=state.genesis_validators_root))
expected_fork_digest = spec.ForkDigest(expected_fork_data_root[:4])
assert actual_fork_digest == expected_fork_digest
# Attesting # Attesting
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
@always_bls @always_bls
def test_get_attestation_signature(spec, state): def test_get_attestation_signature_phase0(spec, state):
privkey = privkeys[0] privkey = privkeys[0]
pubkey = pubkeys[0] pubkey = pubkeys[0]
attestation_data = spec.AttestationData(slot=10) attestation = get_valid_attestation(spec, state, signed=False)
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch) domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
run_get_signature_test( run_get_signature_test(
spec=spec, spec=spec,
state=state, state=state,
obj=attestation_data, obj=attestation.data,
domain=domain, domain=domain,
get_signature_fn=spec.get_attestation_signature, get_signature_fn=spec.get_attestation_signature,
privkey=privkey, privkey=privkey,
@ -336,6 +354,21 @@ def test_get_attestation_signature(spec, state):
) )
@with_all_phases
@spec_state_test
def test_compute_subnet_for_attestation(spec, state):
for committee_idx in range(spec.MAX_COMMITTEES_PER_SLOT):
for slot in range(state.slot, state.slot + spec.SLOTS_PER_EPOCH):
committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot))
actual_subnet_id = spec.compute_subnet_for_attestation(committees_per_slot, slot, committee_idx)
slots_since_epoch_start = slot % spec.SLOTS_PER_EPOCH
committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
expected_subnet_id = (committees_since_epoch_start + committee_idx) % spec.ATTESTATION_SUBNET_COUNT
assert actual_subnet_id == expected_subnet_id
# Attestation aggregation # Attestation aggregation
@ -363,7 +396,7 @@ def test_get_slot_signature(spec, state):
@always_bls @always_bls
def test_is_aggregator(spec, state): def test_is_aggregator(spec, state):
# TODO: we can test the probabilistic result against `TARGET_AGGREGATORS_PER_COMMITTEE` # TODO: we can test the probabilistic result against `TARGET_AGGREGATORS_PER_COMMITTEE`
# if we have more validators and larger committeee size # if we have more validators and larger committee size
slot = state.slot slot = state.slot
committee_index = 0 committee_index = 0
has_aggregator = False has_aggregator = False
@ -377,7 +410,7 @@ def test_is_aggregator(spec, state):
assert has_aggregator assert has_aggregator
@with_all_phases @with_phases([PHASE0])
@spec_state_test @spec_state_test
@always_bls @always_bls
def test_get_aggregate_signature(spec, state): def test_get_aggregate_signature(spec, state):

View File

@ -1,4 +1,5 @@
from eth2spec.test.context import ( from eth2spec.test.context import (
PHASE0,
with_all_phases_except, with_all_phases_except,
spec_state_test, spec_state_test,
always_bls, always_bls,
@ -11,7 +12,7 @@ from eth2spec.test.helpers.attestations import (
) )
@with_all_phases_except(['phase0']) @with_all_phases_except([PHASE0])
@spec_state_test @spec_state_test
@always_bls @always_bls
def test_on_time_success(spec, state): def test_on_time_success(spec, state):
@ -22,25 +23,12 @@ def test_on_time_success(spec, state):
yield from run_attestation_processing(spec, state, attestation) yield from run_attestation_processing(spec, state, attestation)
@with_all_phases_except(['phase0']) @with_all_phases_except([PHASE0])
@spec_state_test @spec_state_test
@always_bls @always_bls
def test_on_time_empty_custody_bits_blocks(spec, state): def test_late_success(spec, state):
attestation = get_valid_late_attestation(spec, state, signed=True) attestation = get_valid_late_attestation(spec, state, signed=True)
assert not any(attestation.custody_bits_blocks)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
yield from run_attestation_processing(spec, state, attestation, False)
@with_all_phases_except(['phase0'])
@spec_state_test
@always_bls
def test_late_with_custody_bits_blocks(spec, state):
attestation = get_valid_on_time_attestation(spec, state, signed=True)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY + 1) transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY + 1)
yield from run_attestation_processing(spec, state, attestation, False) yield from run_attestation_processing(spec, state, attestation)

View File

@ -0,0 +1,306 @@
from eth2spec.test.helpers.custody import (
get_valid_chunk_challenge,
get_valid_custody_chunk_response,
get_sample_shard_transition,
)
from eth2spec.test.helpers.attestations import (
get_valid_on_time_attestation,
)
from eth2spec.test.helpers.state import transition_to
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
expect_assertion_error,
)
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
def run_chunk_challenge_processing(spec, state, custody_chunk_challenge, valid=True):
"""
Run ``process_chunk_challenge``, yielding:
- pre-state ('pre')
- CustodyBitChallenge ('custody_chunk_challenge')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
yield 'pre', state
yield 'custody_chunk_challenge', custody_chunk_challenge
if not valid:
expect_assertion_error(lambda: spec.process_chunk_challenge(state, custody_chunk_challenge))
yield 'post', None
return
spec.process_chunk_challenge(state, custody_chunk_challenge)
assert state.custody_chunk_challenge_records[state.custody_chunk_challenge_index - 1].responder_index == \
custody_chunk_challenge.responder_index
assert state.custody_chunk_challenge_records[state.custody_chunk_challenge_index - 1].chunk_index == \
custody_chunk_challenge.chunk_index
yield 'post', state
def run_custody_chunk_response_processing(spec, state, custody_response, valid=True):
"""
Run ``process_chunk_challenge_response``, yielding:
- pre-state ('pre')
- CustodyResponse ('custody_response')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
yield 'pre', state
yield 'custody_response', custody_response
if not valid:
expect_assertion_error(lambda: spec.process_custody_response(state, custody_response))
yield 'post', None
return
spec.process_chunk_challenge_response(state, custody_response)
assert state.custody_chunk_challenge_records[custody_response.challenge_index] == spec.CustodyChunkChallengeRecord()
yield 'post', state
@with_all_phases_except([PHASE0])
@spec_state_test
def test_challenge_appended(spec, state):
transition_to(spec, state, state.slot + 1)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD)
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
yield from run_chunk_challenge_processing(spec, state, challenge)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_challenge_empty_element_replaced(spec, state):
transition_to(spec, state, state.slot + 1)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD)
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
state.custody_chunk_challenge_records.append(spec.CustodyChunkChallengeRecord())
yield from run_chunk_challenge_processing(spec, state, challenge)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_duplicate_challenge(spec, state):
transition_to(spec, state, state.slot + 1)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD)
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
_, _, _ = run_chunk_challenge_processing(spec, state, challenge)
yield from run_chunk_challenge_processing(spec, state, challenge, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_second_challenge(spec, state):
transition_to(spec, state, state.slot + 1)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD)
challenge0 = get_valid_chunk_challenge(spec, state, attestation, shard_transition, chunk_index=0)
_, _, _ = run_chunk_challenge_processing(spec, state, challenge0)
challenge1 = get_valid_chunk_challenge(spec, state, attestation, shard_transition, chunk_index=1)
yield from run_chunk_challenge_processing(spec, state, challenge1)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_multiple_epochs_custody(spec, state):
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * 3)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1))
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
yield from run_chunk_challenge_processing(spec, state, challenge)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_many_epochs_custody(spec, state):
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * 20)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1))
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
yield from run_chunk_challenge_processing(spec, state, challenge)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_off_chain_attestation(spec, state):
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1))
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
yield from run_chunk_challenge_processing(spec, state, challenge)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_custody_response(spec, state):
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1))
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
_, _, _ = run_chunk_challenge_processing(spec, state, challenge)
chunk_challenge_index = state.custody_chunk_challenge_index - 1
custody_response = get_valid_custody_chunk_response(
spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3)
yield from run_custody_chunk_response_processing(spec, state, custody_response)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_custody_response_multiple_epochs(spec, state):
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * 3)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1))
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
_, _, _ = run_chunk_challenge_processing(spec, state, challenge)
chunk_challenge_index = state.custody_chunk_challenge_index - 1
custody_response = get_valid_custody_chunk_response(
spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3)
yield from run_custody_chunk_response_processing(spec, state, custody_response)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_custody_response_many_epochs(spec, state):
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * 20)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1))
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
_, _, _ = run_chunk_challenge_processing(spec, state, challenge)
chunk_challenge_index = state.custody_chunk_challenge_index - 1
custody_response = get_valid_custody_chunk_response(
spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3)
yield from run_custody_chunk_response_processing(spec, state, custody_response)

View File

@ -28,30 +28,14 @@ def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=Tru
pre_next_custody_secret_to_reveal = \ pre_next_custody_secret_to_reveal = \
state.validators[revealer_index].next_custody_secret_to_reveal state.validators[revealer_index].next_custody_secret_to_reveal
pre_reveal_lateness = state.validators[revealer_index].max_reveal_lateness
spec.process_custody_key_reveal(state, custody_key_reveal) spec.process_custody_key_reveal(state, custody_key_reveal)
post_next_custody_secret_to_reveal = \ post_next_custody_secret_to_reveal = \
state.validators[revealer_index].next_custody_secret_to_reveal state.validators[revealer_index].next_custody_secret_to_reveal
post_reveal_lateness = state.validators[revealer_index].max_reveal_lateness
assert post_next_custody_secret_to_reveal == pre_next_custody_secret_to_reveal + 1 assert post_next_custody_secret_to_reveal == pre_next_custody_secret_to_reveal + 1
if spec.get_current_epoch(state) > spec.get_randao_epoch_for_custody_period(
pre_next_custody_secret_to_reveal,
revealer_index
) + spec.EPOCHS_PER_CUSTODY_PERIOD:
assert post_reveal_lateness > 0
if pre_reveal_lateness == 0:
assert post_reveal_lateness == spec.get_current_epoch(state) - spec.get_randao_epoch_for_custody_period(
pre_next_custody_secret_to_reveal,
revealer_index
) - spec.EPOCHS_PER_CUSTODY_PERIOD
else:
if pre_reveal_lateness > 0:
assert post_reveal_lateness < pre_reveal_lateness
yield 'post', state yield 'post', state
@ -103,17 +87,3 @@ def test_double_reveal(spec, state):
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal) _, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False) yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_max_decrement(spec, state):
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 3 + 150
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
custody_key_reveal2 = get_valid_custody_key_reveal(spec, state)
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal2)

View File

@ -0,0 +1,138 @@
from eth2spec.test.helpers.custody import (
get_valid_custody_slashing,
get_custody_secret,
get_custody_slashable_shard_transition,
)
from eth2spec.test.helpers.attestations import (
get_valid_on_time_attestation,
)
from eth2spec.utils.ssz.ssz_typing import ByteList
from eth2spec.test.helpers.state import get_balance, transition_to
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
expect_assertion_error,
)
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
def run_custody_slashing_processing(spec, state, custody_slashing, valid=True, correct=True):
"""
Run ``process_bit_challenge``, yielding:
- pre-state ('pre')
- CustodySlashing ('custody_slashing')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
yield 'pre', state
yield 'custody_slashing', custody_slashing
if not valid:
expect_assertion_error(lambda: spec.process_custody_slashing(state, custody_slashing))
yield 'post', None
return
if correct:
pre_slashed_balance = get_balance(state, custody_slashing.message.malefactor_index)
else:
pre_slashed_balance = get_balance(state, custody_slashing.message.whistleblower_index)
spec.process_custody_slashing(state, custody_slashing)
if correct:
slashed_validator = state.validators[custody_slashing.message.malefactor_index]
assert get_balance(state, custody_slashing.message.malefactor_index) < pre_slashed_balance
else:
slashed_validator = state.validators[custody_slashing.message.whistleblower_index]
assert get_balance(state, custody_slashing.message.whistleblower_index) < pre_slashed_balance
assert slashed_validator.slashed
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
yield 'post', state
def run_standard_custody_slashing_test(spec,
state,
shard_lateness=None,
shard=None,
validator_index=None,
block_lengths=None,
slashing_message_data=None,
correct=True,
valid=True):
if shard_lateness is None:
shard_lateness = spec.SLOTS_PER_EPOCH
transition_to(spec, state, state.slot + shard_lateness)
if shard is None:
shard = 0
if validator_index is None:
validator_index = spec.get_beacon_committee(state, state.slot, shard)[0]
offset_slots = spec.get_offset_slots(state, shard)
if block_lengths is None:
block_lengths = [2**15 // 3] * len(offset_slots)
custody_secret = get_custody_secret(spec, state, validator_index)
shard_transition, slashable_test_vector = get_custody_slashable_shard_transition(
spec,
state.slot,
block_lengths,
custody_secret,
slashable=correct,
)
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1))
slashing = get_valid_custody_slashing(spec, state, attestation, shard_transition,
custody_secret, slashable_test_vector)
if slashing_message_data is not None:
slashing.message.data = slashing_message_data
yield from run_custody_slashing_processing(spec, state, slashing, valid=valid, correct=correct)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_custody_slashing(spec, state):
yield from run_standard_custody_slashing_test(spec, state)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_incorrect_custody_slashing(spec, state):
yield from run_standard_custody_slashing_test(spec, state, correct=False)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_multiple_epochs_custody(spec, state):
yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 3)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_many_epochs_custody(spec, state):
yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 10)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_invalid_custody_slashing(spec, state):
yield from run_standard_custody_slashing_test(
spec,
state,
slashing_message_data=ByteList[spec.MAX_SHARD_BLOCK_SIZE](),
valid=False,
)

View File

@ -0,0 +1,208 @@
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
)
from eth2spec.test.helpers.attestations import (
get_valid_attestation,
get_valid_on_time_attestation,
run_attestation_processing,
)
from eth2spec.test.helpers.shard_transitions import (
run_shard_transitions_processing,
is_full_crosslink,
)
from eth2spec.test.helpers.shard_block import (
build_shard_block,
get_shard_transitions,
get_sample_shard_block_body,
get_committee_index_of_shard,
)
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot, next_slot
def get_initial_env(spec, state, target_len_offset_slot):
state = transition_to_valid_shard_slot(spec, state)
committee_index = spec.CommitteeIndex(0)
target_shard_slot = state.slot + target_len_offset_slot - 1
shard = spec.compute_shard_from_committee_index(state, committee_index, target_shard_slot)
assert state.shard_states[shard].slot == state.slot - 1
return state, shard, target_shard_slot
def get_attestations_and_shard_transitions(spec, state, shard_block_dict):
shard_transitions = get_shard_transitions(spec, state, shard_block_dict)
attestations = [
get_valid_on_time_attestation(
spec, state,
index=get_committee_index_of_shard(spec, state, state.slot, shard),
shard_transition=shard_transition,
signed=True,
)
for shard, shard_transition in enumerate(shard_transitions)
if shard_transition != spec.ShardTransition()
]
return attestations, shard_transitions
def run_successful_crosslink_tests(spec, state, target_len_offset_slot):
state, shard, target_shard_slot = get_initial_env(spec, state, target_len_offset_slot)
init_slot = state.slot
# Create SignedShardBlock at init_slot
shard_block = build_shard_block(
spec, state, shard,
slot=init_slot, body=get_sample_shard_block_body(spec, is_max=True), signed=True
)
# Transition state to target shard slot
transition_to(spec, state, target_shard_slot)
# Create a shard_transitions that would be included at beacon block `target_shard_slot + 1`
shard_block_dict = {shard: [shard_block]}
attestations, shard_transitions = get_attestations_and_shard_transitions(spec, state, shard_block_dict)
next_slot(spec, state)
for attestation in attestations:
_, _, _ = run_attestation_processing(spec, state, attestation)
_, winning_roots = spec.get_shard_winning_roots(state, attestations)
assert len(winning_roots) == 1
shard_transition = shard_transitions[shard]
assert winning_roots[0] == shard_transition.hash_tree_root()
pre_gasprice = state.shard_states[shard].gasprice
pre_shard_states = state.shard_states.copy()
yield from run_shard_transitions_processing(spec, state, shard_transitions, attestations)
for index, shard_state in enumerate(state.shard_states):
if index == shard:
assert shard_state != pre_shard_states[index]
assert shard_state == shard_transition.shard_states[len(shard_transition.shard_states) - 1]
assert shard_state.latest_block_root == shard_block.message.hash_tree_root()
if target_len_offset_slot == 1:
assert shard_state.gasprice > pre_gasprice
else:
assert shard_state == pre_shard_states[index]
for pending_attestation in state.current_epoch_attestations:
assert bool(pending_attestation.crosslink_success) is True
@with_all_phases_except([PHASE0])
@spec_state_test
def test_basic_crosslinks(spec, state):
if not is_full_crosslink(spec, state):
# Skip this test
return
yield from run_successful_crosslink_tests(spec, state, target_len_offset_slot=1)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_multiple_offset_slots(spec, state):
if not is_full_crosslink(spec, state):
# Skip this test
return
yield from run_successful_crosslink_tests(spec, state, target_len_offset_slot=2)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_no_winning_root(spec, state):
if not is_full_crosslink(spec, state):
# Skip this test
return
state, shard, target_shard_slot = get_initial_env(spec, state, target_len_offset_slot=1)
init_slot = state.slot
# Create SignedShardBlock at init_slot
shard_block = build_shard_block(
spec, state, shard,
slot=init_slot, body=get_sample_shard_block_body(spec, is_max=True), signed=True
)
# Transition state to target shard slot
transition_to(spec, state, target_shard_slot)
# Create a shard_transitions that would be included at beacon block `target_shard_slot + 1`
shard_transitions = get_shard_transitions(spec, state, {shard: [shard_block]})
shard_transition = shard_transitions[shard]
committee_index = get_committee_index_of_shard(spec, state, state.slot, shard)
attestation = get_valid_attestation(
spec, state,
index=committee_index,
shard_transition=shard_transition,
# Decrease attested participants to 1/3 committee
filter_participant_set=lambda committee: set(list(committee)[:len(committee) // 3]),
signed=True,
on_time=True,
)
next_slot(spec, state)
_, _, _ = run_attestation_processing(spec, state, attestation)
_, winning_roots = spec.get_shard_winning_roots(state, [attestation])
assert len(winning_roots) == 0
# No winning root, shard_transitions[shard] is empty
shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
pre_shard_states = state.shard_states.copy()
yield from run_shard_transitions_processing(spec, state, shard_transitions, [attestation])
for pending_attestation in state.current_epoch_attestations:
assert bool(pending_attestation.crosslink_success) is False
assert state.shard_states == pre_shard_states
@with_all_phases_except([PHASE0])
@spec_state_test
def test_wrong_shard_transition_root(spec, state):
if not is_full_crosslink(spec, state):
# Skip this test
return
state, shard, target_shard_slot = get_initial_env(spec, state, target_len_offset_slot=1)
init_slot = state.slot
# Create SignedShardBlock at init_slot
shard_block = build_shard_block(
spec, state, shard,
slot=init_slot, body=get_sample_shard_block_body(spec, is_max=True), signed=True
)
# Transition state to target shard slot
transition_to(spec, state, target_shard_slot)
# Create a shard_transitions that would be included at beacon block `target_shard_slot + 1`
shard_transitions = get_shard_transitions(spec, state, {shard: [shard_block]})
shard_transition = shard_transitions[shard]
wrong_shard_transition = shard_transition.copy()
wrong_shard_transition.shard_states[shard].gasprice = shard_transition.shard_states[shard].gasprice + 1
committee_index = get_committee_index_of_shard(spec, state, state.slot, shard)
attestation = get_valid_attestation(
spec, state,
index=committee_index,
shard_transition=wrong_shard_transition,
signed=True,
on_time=True,
)
attestations = [attestation]
next_slot(spec, state)
run_attestation_processing(spec, state, attestation)
# Check if winning root != shard_transition.hash_tree_root()
_, winning_roots = spec.get_shard_winning_roots(state, attestations)
assert len(winning_roots) == 1
shard_transition = shard_transitions[shard]
assert winning_roots[0] != shard_transition.hash_tree_root()
yield from run_shard_transitions_processing(spec, state, shard_transitions, attestations, valid=False)

View File

@ -0,0 +1,58 @@
from eth2spec.test.helpers.custody import (
get_valid_chunk_challenge,
get_sample_shard_transition,
)
from eth2spec.test.helpers.attestations import (
get_valid_on_time_attestation,
)
from eth2spec.test.helpers.state import transition_to
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
)
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import (
run_chunk_challenge_processing,
)
def run_process_challenge_deadlines(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines')
@with_all_phases_except([PHASE0])
@spec_state_test
def test_validator_slashed_after_chunk_challenge(spec, state):
transition_to(spec, state, state.slot + 1)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
validator_index = spec.get_beacon_committee(
state,
attestation.data.slot,
attestation.data.index
)[0]
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
_, _, _ = run_chunk_challenge_processing(spec, state, challenge)
assert state.validators[validator_index].slashed == 0
transition_to(spec, state, state.slot + spec.MAX_CHUNK_CHALLENGE_DELAY * spec.SLOTS_PER_EPOCH)
state.validators[validator_index].slashed = 0
yield from run_process_challenge_deadlines(spec, state)
assert state.validators[validator_index].slashed == 1

View File

@ -0,0 +1,169 @@
from eth2spec.test.context import (
PHASE0,
)
from eth2spec.test.helpers.custody import (
get_valid_chunk_challenge,
get_valid_custody_chunk_response,
get_valid_custody_key_reveal,
get_sample_shard_transition
)
from eth2spec.test.helpers.attestations import (
get_valid_on_time_attestation,
)
from eth2spec.test.helpers.state import next_epoch_via_block, transition_to
from eth2spec.test.context import (
with_all_phases_except,
spec_state_test,
)
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import (
run_chunk_challenge_processing,
run_custody_chunk_response_processing,
)
from eth2spec.test.phase1.block_processing.test_process_custody_key_reveal import run_custody_key_reveal_processing
def run_process_custody_final_updates(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_custody_final_updates')
@with_all_phases_except([PHASE0])
@spec_state_test
def test_validator_withdrawal_delay(spec, state):
spec.initiate_validator_exit(state, 0)
assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH
yield from run_process_custody_final_updates(spec, state)
assert state.validators[0].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
@with_all_phases_except([PHASE0])
@spec_state_test
def test_validator_withdrawal_reenable_after_custody_reveal(spec, state):
spec.initiate_validator_exit(state, 0)
assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH
next_epoch_via_block(spec, state)
assert state.validators[0].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
while spec.get_current_epoch(state) < state.validators[0].exit_epoch:
next_epoch_via_block(spec, state)
while (state.validators[0].next_custody_secret_to_reveal
<= spec.get_custody_period_for_validator(0, state.validators[0].exit_epoch - 1)):
custody_key_reveal = get_valid_custody_key_reveal(spec, state, validator_index=0)
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
yield from run_process_custody_final_updates(spec, state)
assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH
@with_all_phases_except([PHASE0])
@spec_state_test
def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state):
transition_to(spec, state, state.slot + 1)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
validator_index = spec.get_beacon_committee(
state,
attestation.data.slot,
attestation.data.index
)[0]
spec.initiate_validator_exit(state, validator_index)
assert state.validators[validator_index].withdrawable_epoch < spec.FAR_FUTURE_EPOCH
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
while spec.get_current_epoch(state) < state.validators[validator_index].exit_epoch:
next_epoch_via_block(spec, state)
while (state.validators[validator_index].next_custody_secret_to_reveal
<= spec.get_custody_period_for_validator(
validator_index,
state.validators[validator_index].exit_epoch - 1)):
custody_key_reveal = get_valid_custody_key_reveal(spec, state, validator_index=validator_index)
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
next_epoch_via_block(spec, state)
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
_, _, _ = run_chunk_challenge_processing(spec, state, challenge)
yield from run_process_custody_final_updates(spec, state)
assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
@with_all_phases_except([PHASE0])
@spec_state_test
def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state):
transition_to(spec, state, state.slot + 1)
shard = 0
offset_slots = spec.get_offset_slots(state, shard)
shard_transition = get_sample_shard_transition(spec, state.slot, [2**15 // 3] * len(offset_slots))
attestation = get_valid_on_time_attestation(spec, state, index=shard, signed=True,
shard_transition=shard_transition)
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
_, _, _ = run_attestation_processing(spec, state, attestation)
validator_index = spec.get_beacon_committee(
state,
attestation.data.slot,
attestation.data.index
)[0]
spec.initiate_validator_exit(state, validator_index)
assert state.validators[validator_index].withdrawable_epoch < spec.FAR_FUTURE_EPOCH
next_epoch_via_block(spec, state)
assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
while spec.get_current_epoch(state) < state.validators[validator_index].exit_epoch:
next_epoch_via_block(spec, state)
while (state.validators[validator_index].next_custody_secret_to_reveal
<= spec.get_custody_period_for_validator(
validator_index,
state.validators[validator_index].exit_epoch - 1)):
custody_key_reveal = get_valid_custody_key_reveal(spec, state, validator_index=validator_index)
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
next_epoch_via_block(spec, state)
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transition)
_, _, _ = run_chunk_challenge_processing(spec, state, challenge)
next_epoch_via_block(spec, state)
assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
chunk_challenge_index = state.custody_chunk_challenge_index - 1
custody_response = get_valid_custody_chunk_response(
spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=2**15 // 3)
_, _, _ = run_custody_chunk_response_processing(spec, state, custody_response)
yield from run_process_custody_final_updates(spec, state)
assert state.validators[validator_index].withdrawable_epoch < spec.FAR_FUTURE_EPOCH

View File

@ -0,0 +1,51 @@
from eth2spec.test.helpers.custody import (
get_valid_custody_key_reveal,
)
from eth2spec.test.helpers.state import transition_to
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
)
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
from eth2spec.test.phase1.block_processing.test_process_custody_key_reveal import run_custody_key_reveal_processing
def run_process_challenge_deadlines(spec, state):
yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines')
@with_all_phases_except([PHASE0])
@spec_state_test
def test_validator_slashed_after_reveal_deadline(spec, state):
assert state.validators[0].slashed == 0
transition_to(spec, state, spec.get_randao_epoch_for_custody_period(0, 0) * spec.SLOTS_PER_EPOCH)
# Need to run at least one reveal so that not all validators are slashed (otherwise spec fails to find proposers)
custody_key_reveal = get_valid_custody_key_reveal(spec, state, validator_index=1)
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
transition_to(spec, state, state.slot + spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH)
state.validators[0].slashed = 0
yield from run_process_challenge_deadlines(spec, state)
assert state.validators[0].slashed == 1
@with_all_phases_except([PHASE0])
@spec_state_test
def test_validator_not_slashed_after_reveal(spec, state):
transition_to(spec, state, spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH)
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
assert state.validators[0].slashed == 0
transition_to(spec, state, state.slot + spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH)
yield from run_process_challenge_deadlines(spec, state)
assert state.validators[0].slashed == 0

View File

@ -0,0 +1,242 @@
from typing import Dict, Sequence
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
)
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
from eth2spec.test.helpers.block import build_empty_block
from eth2spec.test.helpers.custody import (
get_custody_secret,
get_custody_slashable_test_vector,
get_valid_chunk_challenge,
get_valid_custody_chunk_response,
get_valid_custody_key_reveal,
get_valid_custody_slashing,
get_valid_early_derived_secret_reveal,
)
from eth2spec.test.helpers.shard_block import (
build_shard_block,
get_committee_index_of_shard,
get_sample_shard_block_body,
get_shard_transitions,
)
from eth2spec.test.helpers.shard_transitions import is_full_crosslink
from eth2spec.test.helpers.state import state_transition_and_sign_block, transition_to_valid_shard_slot, transition_to
def run_beacon_block(spec, state, block, valid=True):
yield 'pre', state.copy()
if not valid:
signed_beacon_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
yield 'block', signed_beacon_block
yield 'post', None
return
signed_beacon_block = state_transition_and_sign_block(spec, state, block)
yield 'block', signed_beacon_block
yield 'post', state
#
# Beacon block with non-empty shard transitions
#
def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard, valid=True):
transition_to(spec, state, state.slot + target_len_offset_slot)
body = get_sample_shard_block_body(spec, is_max=True)
shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True)
shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
shard_transitions = get_shard_transitions(spec, state, shard_block_dict)
attestations = [
get_valid_on_time_attestation(
spec,
state,
index=committee_index,
shard_transition=shard_transitions[shard],
signed=True,
)
for shard in shard_block_dict.keys()
]
beacon_block = build_empty_block(spec, state, slot=state.slot + 1)
beacon_block.body.attestations = attestations
beacon_block.body.shard_transitions = shard_transitions
pre_gasprice = state.shard_states[shard].gasprice
pre_shard_states = state.shard_states.copy()
yield 'pre', state.copy()
if not valid:
state_transition_and_sign_block(spec, state, beacon_block, expect_fail=True)
yield 'block', beacon_block
yield 'post', None
return
signed_beacon_block = state_transition_and_sign_block(spec, state, beacon_block)
yield 'block', signed_beacon_block
yield 'post', state
for shard in range(spec.get_active_shard_count(state)):
post_shard_state = state.shard_states[shard]
if shard in shard_block_dict:
# Shard state has been changed to state_transition result
assert post_shard_state == shard_transitions[shard].shard_states[
len(shard_transitions[shard].shard_states) - 1
]
assert post_shard_state.slot == state.slot - 1
if len((shard_block_dict[shard])) == 0:
# `latest_block_root` is the same
assert post_shard_state.latest_block_root == pre_shard_states[shard].latest_block_root
if target_len_offset_slot == 1 and len(shard_block_dict[shard]) > 0:
assert post_shard_state.gasprice > pre_gasprice
@with_all_phases_except([PHASE0])
@spec_state_test
def test_process_beacon_block_with_normal_shard_transition(spec, state):
# NOTE: this test is only for full crosslink (minimal config), not for mainnet
if not is_full_crosslink(spec, state):
# skip
return
state = transition_to_valid_shard_slot(spec, state)
target_len_offset_slot = 1
committee_index = spec.CommitteeIndex(0)
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot + target_len_offset_slot - 1)
assert state.shard_states[shard].slot == state.slot - 1
yield from run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_process_beacon_block_with_empty_proposal_transition(spec, state):
# NOTE: this test is only for full crosslink (minimal config), not for mainnet
if not is_full_crosslink(spec, state):
# skip
return
state = transition_to_valid_shard_slot(spec, state)
target_len_offset_slot = 1
committee_index = spec.CommitteeIndex(0)
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot + target_len_offset_slot - 1)
assert state.shard_states[shard].slot == state.slot - 1
yield from run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard)
#
# Beacon block with custody operations
#
@with_all_phases_except([PHASE0])
@spec_state_test
def test_with_shard_transition_with_custody_challenge_and_response(spec, state):
# NOTE: this test is only for full crosslink (minimal config), not for mainnet
if not is_full_crosslink(spec, state):
# skip
return
state = transition_to_valid_shard_slot(spec, state)
# build shard block
shard = 0
committee_index = get_committee_index_of_shard(spec, state, state.slot, shard)
body = get_sample_shard_block_body(spec)
shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True)
shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
shard_transitions = get_shard_transitions(spec, state, shard_block_dict)
attestation = get_valid_on_time_attestation(
spec, state, index=committee_index,
shard_transition=shard_transitions[shard], signed=True,
)
block = build_empty_block(spec, state, slot=state.slot + 1)
block.body.attestations = [attestation]
block.body.shard_transitions = shard_transitions
# CustodyChunkChallenge operation
challenge = get_valid_chunk_challenge(spec, state, attestation, shard_transitions[shard])
block.body.chunk_challenges = [challenge]
# CustodyChunkResponse operation
chunk_challenge_index = state.custody_chunk_challenge_index
custody_response = get_valid_custody_chunk_response(
spec, state, challenge, chunk_challenge_index, block_length_or_custody_data=body)
block.body.chunk_challenge_responses = [custody_response]
yield from run_beacon_block(spec, state, block)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_custody_key_reveal(spec, state):
state = transition_to_valid_shard_slot(spec, state)
transition_to(spec, state, state.slot + spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH)
block = build_empty_block(spec, state, slot=state.slot + 1)
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
block.body.custody_key_reveals = [custody_key_reveal]
yield from run_beacon_block(spec, state, block)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_early_derived_secret_reveal(spec, state):
state = transition_to_valid_shard_slot(spec, state)
block = build_empty_block(spec, state, slot=state.slot + 1)
early_derived_secret_reveal = get_valid_early_derived_secret_reveal(spec, state)
block.body.early_derived_secret_reveals = [early_derived_secret_reveal]
yield from run_beacon_block(spec, state, block)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_custody_slashing(spec, state):
# NOTE: this test is only for full crosslink (minimal config), not for mainnet
if not is_full_crosslink(spec, state):
# skip
return
state = transition_to_valid_shard_slot(spec, state)
# Build shard block
shard = 0
committee_index = get_committee_index_of_shard(spec, state, state.slot, shard)
# Create slashable shard block body
validator_index = spec.get_beacon_committee(state, state.slot, committee_index)[0]
custody_secret = get_custody_secret(spec, state, validator_index)
slashable_body = get_custody_slashable_test_vector(spec, custody_secret, length=100, slashable=True)
shard_block = build_shard_block(spec, state, shard, body=slashable_body, slot=state.slot, signed=True)
shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
shard_transitions = get_shard_transitions(spec, state, shard_block_dict)
attestation = get_valid_on_time_attestation(
spec, state, index=committee_index,
shard_transition=shard_transitions[shard], signed=True,
)
block = build_empty_block(spec, state, slot=state.slot + 1)
block.body.attestations = [attestation]
block.body.shard_transitions = shard_transitions
_, _, _ = run_beacon_block(spec, state, block)
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1))
block = build_empty_block(spec, state, slot=state.slot + 1)
custody_slashing = get_valid_custody_slashing(
spec, state, attestation, shard_transitions[shard], custody_secret, slashable_body
)
block.body.custody_slashings = [custody_slashing]
yield from run_beacon_block(spec, state, block)

View File

@ -0,0 +1,271 @@
from eth2spec.test.context import (
PHASE0,
always_bls,
expect_assertion_error,
spec_state_test,
with_all_phases_except,
)
from eth2spec.test.helpers.shard_block import (
build_shard_block,
sign_shard_block,
)
from eth2spec.test.helpers.shard_transitions import is_full_crosslink
from eth2spec.test.helpers.state import next_slot, transition_to_valid_shard_slot, transition_to
def run_shard_blocks(spec, shard_state, signed_shard_block, beacon_parent_state, valid=True):
pre_shard_state = shard_state.copy()
yield 'pre', pre_shard_state
yield 'signed_shard_block', signed_shard_block
yield 'beacon_parent_state', beacon_parent_state
if not valid:
expect_assertion_error(
lambda: spec.shard_state_transition(shard_state, signed_shard_block, beacon_parent_state)
)
yield 'post', None
return
spec.shard_state_transition(shard_state, signed_shard_block, beacon_parent_state)
yield 'post', shard_state
# Verify `process_shard_block`
block = signed_shard_block.message
assert shard_state.slot == block.slot
shard_block_length = len(block.body)
assert shard_state.gasprice == spec.compute_updated_gasprice(pre_shard_state.gasprice, shard_block_length)
if shard_block_length != 0:
shard_state.latest_block_root == block.hash_tree_root()
else:
shard_state.latest_block_root == pre_shard_state.latest_block_root
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_valid_shard_block(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state)
#
# verify_shard_block_message
#
@with_all_phases_except([PHASE0])
@spec_state_test
def test_invalid_shard_parent_root(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
signed_shard_block.message.shard_parent_root = b'\x12' * 32
sign_shard_block(spec, beacon_state, shard, signed_shard_block)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_invalid_beacon_parent_root(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
signed_shard_block.message.beacon_parent_root = b'\x12' * 32
sign_shard_block(spec, beacon_state, shard, signed_shard_block)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_invalid_slot(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
signed_shard_block.message.slot = beacon_state.slot + 1
proposer_index = spec.get_shard_proposer_index(beacon_state, signed_shard_block.message.slot, shard)
sign_shard_block(spec, beacon_state, shard, signed_shard_block, proposer_index=proposer_index)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_invalid_proposer_index(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
active_validator_indices = spec.get_active_validator_indices(beacon_state, spec.get_current_epoch(beacon_state))
proposer_index = (
(spec.get_shard_proposer_index(beacon_state, signed_shard_block.message.slot, shard) + 1)
% len(active_validator_indices)
)
signed_shard_block.message.proposer_index = proposer_index
sign_shard_block(spec, beacon_state, shard, signed_shard_block, proposer_index=proposer_index)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_out_of_bound_offset(spec, state):
# TODO: Handle this edge case properly
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
slot = (
beacon_state.shard_states[shard].slot
+ spec.SHARD_BLOCK_OFFSETS[spec.MAX_SHARD_BLOCKS_PER_ATTESTATION - 1]
+ 1 # out-of-bound
)
transition_to(spec, beacon_state, slot)
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_invalid_offset(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
# 4 is not in `SHARD_BLOCK_OFFSETS`
shard = 0
slot = beacon_state.shard_states[shard].slot + 4
assert slot not in spec.SHARD_BLOCK_OFFSETS
transition_to(spec, beacon_state, slot)
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_empty_block_body(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, body=b'', signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
#
# verify_shard_block_signature
#
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_invalid_signature(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=False)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
#
# Other cases
#
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_max_offset(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
slot = beacon_state.shard_states[shard].slot + spec.SHARD_BLOCK_OFFSETS[spec.MAX_SHARD_BLOCKS_PER_ATTESTATION - 1]
transition_to(spec, beacon_state, slot)
shard_state = beacon_state.shard_states[shard]
signed_shard_block = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_pending_shard_parent_block(spec, state):
if not is_full_crosslink(spec, state):
# skip
return
# Block N
beacon_state = transition_to_valid_shard_slot(spec, state)
shard = 0
shard_state = beacon_state.shard_states[shard]
signed_shard_block_1 = build_shard_block(spec, beacon_state, shard, slot=beacon_state.slot, signed=True)
_, _, _, _ = run_shard_blocks(spec, shard_state, signed_shard_block_1, beacon_state)
# Block N+1
next_slot(spec, beacon_state)
signed_shard_block_2 = build_shard_block(
spec, beacon_state, shard,
slot=beacon_state.slot, shard_parent_state=shard_state, signed=True
)
assert signed_shard_block_2.message.shard_parent_root == shard_state.latest_block_root
assert signed_shard_block_2.message.slot == signed_shard_block_1.message.slot + 1
yield from run_shard_blocks(spec, shard_state, signed_shard_block_2, beacon_state)

View File

@ -0,0 +1,129 @@
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
from eth2spec.test.context import PHASE0, spec_state_test, with_all_phases_except, never_bls
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
from eth2spec.test.helpers.shard_block import (
build_shard_block,
get_shard_transitions,
get_committee_index_of_shard,
)
from eth2spec.test.helpers.fork_choice import add_block_to_store, get_anchor_root
from eth2spec.test.helpers.state import state_transition_and_sign_block
from eth2spec.test.helpers.block import build_empty_block
def run_on_shard_block(spec, store, shard_store, signed_block, valid=True):
if not valid:
try:
spec.on_shard_block(store, shard_store, signed_block)
except AssertionError:
return
else:
assert False
spec.on_shard_block(store, shard_store, signed_block)
assert shard_store.signed_blocks[hash_tree_root(signed_block.message)] == signed_block
def apply_shard_block(spec, store, shard_store, beacon_parent_state, shard_blocks_buffer):
shard = shard_store.shard
body = b'\x56' * 4
shard_head_root = spec.get_shard_head(store, shard_store)
shard_parent_state = shard_store.block_states[shard_head_root]
assert shard_parent_state.slot != beacon_parent_state.slot
shard_block = build_shard_block(
spec, beacon_parent_state, shard,
shard_parent_state=shard_parent_state, slot=beacon_parent_state.slot, body=body, signed=True
)
shard_blocks_buffer.append(shard_block)
run_on_shard_block(spec, store, shard_store, shard_block)
assert spec.get_shard_head(store, shard_store) == shard_block.message.hash_tree_root()
def check_pending_shard_blocks(spec, store, shard_store, shard_blocks_buffer):
pending_shard_blocks = spec.get_pending_shard_blocks(store, shard_store)
assert pending_shard_blocks == shard_blocks_buffer
def is_in_offset_sets(spec, beacon_head_state, shard):
offset_slots = spec.compute_offset_slots(
beacon_head_state.shard_states[shard].slot, beacon_head_state.slot + 1
)
return beacon_head_state.slot in offset_slots
def apply_shard_and_beacon(spec, state, store, shard_store, shard_blocks_buffer):
store.time = store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
shard = shard_store.shard
committee_index = get_committee_index_of_shard(spec, state, state.slot, shard)
has_shard_committee = committee_index is not None # has committee of `shard` at this slot
beacon_block = build_empty_block(spec, state, slot=state.slot + 1)
# If next slot has committee of `shard`, add `shard_transtion` to the proposing beacon block
if has_shard_committee and len(shard_blocks_buffer) > 0:
# Sanity check `get_pending_shard_blocks` function
check_pending_shard_blocks(spec, store, shard_store, shard_blocks_buffer)
# Use temporary next state to get ShardTransition of shard block
shard_transitions = get_shard_transitions(
spec,
state,
shard_block_dict={shard: shard_blocks_buffer},
)
shard_transition = shard_transitions[shard]
attestation = get_valid_on_time_attestation(
spec,
state,
index=committee_index,
shard_transition=shard_transition,
signed=False,
)
assert attestation.data.shard == shard
beacon_block.body.attestations = [attestation]
beacon_block.body.shard_transitions = shard_transitions
# Clear buffer
shard_blocks_buffer.clear()
signed_beacon_block = state_transition_and_sign_block(spec, state, beacon_block) # transition!
add_block_to_store(spec, store, signed_beacon_block)
assert spec.get_head(store) == beacon_block.hash_tree_root()
# On shard block at transitioned `state.slot`
if is_in_offset_sets(spec, state, shard):
# The created shard block would be appended to `shard_blocks_buffer`
apply_shard_block(spec, store, shard_store, state, shard_blocks_buffer)
return has_shard_committee
@with_all_phases_except([PHASE0])
@spec_state_test
@never_bls # Set to never_bls for testing `check_pending_shard_blocks`
def test_basic(spec, state):
spec.PHASE_1_GENESIS_SLOT = 0 # NOTE: mock genesis slot here
state = spec.upgrade_to_phase1(state)
shard = spec.Shard(1)
# Initialization
store = spec.get_forkchoice_store(state)
anchor_root = get_anchor_root(spec, state)
assert spec.get_head(store) == anchor_root
shard_store = spec.get_forkchoice_shard_store(state, shard)
shard_head_root = spec.get_shard_head(store, shard_store)
assert shard_head_root == state.shard_states[shard].latest_block_root
assert shard_store.block_states[shard_head_root].slot == 1
assert shard_store.block_states[shard_head_root] == state.shard_states[shard]
# For mainnet config, it's possible that only one committee of `shard` per epoch.
# we set this counter to test more rounds.
shard_committee_counter = 2
shard_blocks_buffer = []
while shard_committee_counter > 0:
has_shard_committee = apply_shard_and_beacon(
spec, state, store, shard_store, shard_blocks_buffer
)
if has_shard_committee:
shard_committee_counter -= 1

View File

@ -0,0 +1,76 @@
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
)
from eth2spec.test.helpers.state import next_epoch
@with_all_phases_except([PHASE0])
@spec_state_test
def test_get_committee_count_delta(spec, state):
assert spec.get_committee_count_delta(state, 0, 0) == 0
assert spec.get_committee_count_per_slot(state, 0) != 0
assert spec.get_committee_count_delta(state, 0, 1) == spec.get_committee_count_per_slot(state, 0)
assert spec.get_committee_count_delta(state, 1, 2) == spec.get_committee_count_per_slot(state, 0)
assert spec.get_committee_count_delta(state, 0, 2) == spec.get_committee_count_per_slot(state, 0) * 2
assert spec.get_committee_count_delta(state, 0, spec.SLOTS_PER_EPOCH) == (
spec.get_committee_count_per_slot(state, 0) * spec.SLOTS_PER_EPOCH
)
assert spec.get_committee_count_delta(state, 0, 2 * spec.SLOTS_PER_EPOCH) == (
spec.get_committee_count_per_slot(state, 0) * spec.SLOTS_PER_EPOCH
+ spec.get_committee_count_per_slot(state, 1) * spec.SLOTS_PER_EPOCH
)
@with_all_phases_except([PHASE0])
@spec_state_test
def test_get_start_shard_current_epoch_start(spec, state):
assert state.current_epoch_start_shard == 0
next_epoch(spec, state)
active_shard_count = spec.get_active_shard_count(state)
assert state.current_epoch_start_shard == (
spec.get_committee_count_delta(state, 0, spec.SLOTS_PER_EPOCH) % active_shard_count
)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
slot = current_epoch_start_slot
start_shard = spec.get_start_shard(state, slot)
assert start_shard == state.current_epoch_start_shard
@with_all_phases_except([PHASE0])
@spec_state_test
def test_get_start_shard_next_slot(spec, state):
next_epoch(spec, state)
active_shard_count = spec.get_active_shard_count(state)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
slot = current_epoch_start_slot + 1
start_shard = spec.get_start_shard(state, slot)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
expected_start_shard = (
state.current_epoch_start_shard
+ spec.get_committee_count_delta(state, start_slot=current_epoch_start_slot, stop_slot=slot)
) % active_shard_count
assert start_shard == expected_start_shard
@with_all_phases_except([PHASE0])
@spec_state_test
def test_get_start_shard_previous_slot(spec, state):
next_epoch(spec, state)
active_shard_count = spec.get_active_shard_count(state)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
slot = current_epoch_start_slot - 1
start_shard = spec.get_start_shard(state, slot)
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
expected_start_shard = (
state.current_epoch_start_shard
+ spec.MAX_COMMITTEES_PER_SLOT * spec.SLOTS_PER_EPOCH * active_shard_count
- spec.get_committee_count_delta(state, start_slot=slot, stop_slot=current_epoch_start_slot)
) % active_shard_count
assert start_shard == expected_start_shard

View File

@ -1,73 +0,0 @@
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
always_bls,
)
from eth2spec.test.helpers.shard_transitions import run_shard_transitions_processing
from eth2spec.test.helpers.shard_block import (
build_attestation_with_shard_transition,
build_shard_block,
build_shard_transitions_till_slot,
)
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
def run_basic_crosslink_tests(spec, state, target_len_offset_slot, valid=True):
state = transition_to_valid_shard_slot(spec, state)
# At the beginning, let `x = state.slot`, `state.shard_states[shard].slot == x - 1`
slot_x = state.slot
committee_index = spec.CommitteeIndex(0)
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
assert state.shard_states[shard].slot == slot_x - 1
# Create SignedShardBlock
body = b'\x56' * spec.MAX_SHARD_BLOCK_SIZE
shard_block = build_shard_block(spec, state, shard, body=body, signed=True)
shard_blocks = [shard_block]
# Create a shard_transitions that would be included at beacon block `state.slot + target_len_offset_slot`
shard_transitions = build_shard_transitions_till_slot(
spec,
state,
shard_blocks={shard: shard_blocks},
on_time_slot=state.slot + target_len_offset_slot,
)
shard_transition = shard_transitions[shard]
# Create an attestation that would be included at beacon block `state.slot + target_len_offset_slot`
attestation = build_attestation_with_shard_transition(
spec,
state,
index=committee_index,
on_time_slot=state.slot + target_len_offset_slot,
shard_transition=shard_transition,
)
pre_gasprice = state.shard_states[shard].gasprice
transition_to(spec, state, state.slot + target_len_offset_slot)
pre_shard_state = state.shard_states[shard]
yield from run_shard_transitions_processing(spec, state, shard_transitions, [attestation], valid=valid)
if valid:
# After state transition,
assert state.slot == slot_x + target_len_offset_slot
shard_state = state.shard_states[shard]
assert shard_state != pre_shard_state
assert shard_state == shard_transition.shard_states[len(shard_transition.shard_states) - 1]
if target_len_offset_slot == 1:
assert shard_state.gasprice > pre_gasprice
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_basic_crosslinks(spec, state):
yield from run_basic_crosslink_tests(spec, state, target_len_offset_slot=1, valid=True)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_multiple_offset_slots(spec, state):
yield from run_basic_crosslink_tests(spec, state, target_len_offset_slot=3, valid=True)

View File

@ -1,107 +0,0 @@
from typing import Dict, Sequence
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
always_bls,
)
from eth2spec.test.helpers.block import build_empty_block
from eth2spec.test.helpers.shard_block import (
build_attestation_with_shard_transition,
build_shard_block,
build_shard_transitions_till_slot,
)
from eth2spec.test.helpers.state import state_transition_and_sign_block, transition_to_valid_shard_slot
def run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index, valid=True):
shard_transitions = build_shard_transitions_till_slot(
spec, state, shard_blocks, on_time_slot=state.slot + target_len_offset_slot
)
attestations = [
build_attestation_with_shard_transition(
spec,
state,
on_time_slot=state.slot + target_len_offset_slot,
index=committee_index,
shard_transition=shard_transitions[shard],
)
for shard in shard_blocks.keys()
]
# Propose beacon block at slot `x + 1`
beacon_block = build_empty_block(spec, state, slot=state.slot + target_len_offset_slot)
beacon_block.body.attestations = attestations
beacon_block.body.shard_transitions = shard_transitions
pre_shard_states = state.shard_states.copy()
yield 'pre', state.copy()
yield 'block', beacon_block
state_transition_and_sign_block(spec, state, beacon_block)
if valid:
yield 'post', state
else:
yield 'post', None
return
for shard in range(spec.get_active_shard_count(state)):
post_shard_state = state.shard_states[shard]
if shard in shard_blocks:
# Shard state has been changed to state_transition result
assert post_shard_state == shard_transitions[shard].shard_states[
len(shard_transitions[shard].shard_states) - 1
]
assert beacon_block.slot == shard_transitions[shard].shard_states[0].slot + target_len_offset_slot
assert post_shard_state.slot == state.slot - 1
if len(shard_blocks[shard]) == 0:
# `latest_block_root` is the same
assert post_shard_state.latest_block_root == pre_shard_states[shard].latest_block_root
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_process_beacon_block_with_normal_shard_transition(spec, state):
state = transition_to_valid_shard_slot(spec, state)
target_len_offset_slot = 1
committee_index = spec.CommitteeIndex(0)
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
assert state.shard_states[shard].slot == state.slot - 1
pre_gasprice = state.shard_states[shard].gasprice
# Create SignedShardBlock at slot `shard_state.slot + 1`
body = b'\x56' * spec.MAX_SHARD_BLOCK_SIZE
shard_block = build_shard_block(spec, state, shard, body=body, signed=True)
shard_blocks: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
yield from run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index)
shard_state = state.shard_states[shard]
if target_len_offset_slot == 1 and len(shard_blocks) > 0:
assert shard_state.gasprice > pre_gasprice
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_process_beacon_block_with_empty_proposal_transition(spec, state):
state = transition_to_valid_shard_slot(spec, state)
target_len_offset_slot = 1
committee_index = spec.CommitteeIndex(0)
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
assert state.shard_states[shard].slot == state.slot - 1
# No new shard block
shard_blocks = {}
pre_gasprice = state.shard_states[shard].gasprice
yield from run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index)
if target_len_offset_slot == 1 and len(shard_blocks) > 0:
assert state.shard_states[shard].gasprice > pre_gasprice

View File

@ -14,6 +14,22 @@ Z2_SIGNATURE = b'\xc0' + b'\x00' * 95
STUB_COORDINATES = _signature_to_G2(Z2_SIGNATURE) STUB_COORDINATES = _signature_to_G2(Z2_SIGNATURE)
def use_milagro():
"""
Shortcut to use Milagro as BLS library
"""
global bls
bls = milagro_bls
def use_py_ecc():
"""
Shortcut to use Py-ecc as BLS library
"""
global bls
bls = py_ecc_bls
def only_with_bls(alt_return=None): def only_with_bls(alt_return=None):
""" """
Decorator factory to make a function only run when BLS is active. Otherwise return the default. Decorator factory to make a function only run when BLS is active. Otherwise return the default.

View File

@ -1,17 +1,9 @@
from hashlib import sha256 from hashlib import sha256
from typing import Dict, Union from remerkleable.byte_arrays import Bytes32
from typing import Union
ZERO_BYTES32 = b'\x00' * 32 ZERO_BYTES32 = b'\x00' * 32
def _hash(x: Union[bytes, bytearray, memoryview]) -> bytes: def hash(x: Union[bytes, bytearray, memoryview]) -> Bytes32:
return sha256(x).digest() return Bytes32(sha256(x).digest())
hash_cache: Dict[bytes, bytes] = {}
def hash(x: bytes) -> bytes:
if x in hash_cache:
return hash_cache[x]
return _hash(x)

View File

@ -1,3 +1,6 @@
from typing import TypeVar
from remerkleable.basic import uint
from remerkleable.core import View from remerkleable.core import View
from remerkleable.byte_arrays import Bytes32 from remerkleable.byte_arrays import Bytes32
@ -8,3 +11,15 @@ def serialize(obj: View) -> bytes:
def hash_tree_root(obj: View) -> Bytes32: def hash_tree_root(obj: View) -> Bytes32:
return Bytes32(obj.get_backing().merkle_root()) return Bytes32(obj.get_backing().merkle_root())
def uint_to_bytes(n: uint) -> bytes:
return serialize(n)
V = TypeVar('V', bound=View)
# Helper method for typing copies, and avoiding a example_input.copy() method call, instead of copy(example_input)
def copy(obj: V) -> V:
return obj.copy()

View File

@ -5,4 +5,4 @@ from remerkleable.complex import Container, Vector, List
from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256 from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256
from remerkleable.bitfields import Bitvector, Bitlist from remerkleable.bitfields import Bitvector, Bitlist
from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList
from remerkleable.core import BasicView, View, TypeDef from remerkleable.core import BasicView, View

View File

@ -0,0 +1,47 @@
import pytest
# Note: these functions are extract from merkle-proofs.md (deprecated),
# the tests are temporary to show correctness while the document is still there.
def get_power_of_two_ceil(x: int) -> int:
if x <= 1:
return 1
elif x == 2:
return 2
else:
return 2 * get_power_of_two_ceil((x + 1) // 2)
def get_power_of_two_floor(x: int) -> int:
if x <= 1:
return 1
if x == 2:
return x
else:
return 2 * get_power_of_two_floor(x // 2)
power_of_two_ceil_cases = [
(0, 1), (1, 1), (2, 2), (3, 4), (4, 4), (5, 8), (6, 8), (7, 8), (8, 8), (9, 16),
]
power_of_two_floor_cases = [
(0, 1), (1, 1), (2, 2), (3, 2), (4, 4), (5, 4), (6, 4), (7, 4), (8, 8), (9, 8),
]
@pytest.mark.parametrize(
'value,expected',
power_of_two_ceil_cases,
)
def test_get_power_of_two_ceil(value, expected):
assert get_power_of_two_ceil(value) == expected
@pytest.mark.parametrize(
'value,expected',
power_of_two_floor_cases,
)
def test_get_power_of_two_floor(value, expected):
assert get_power_of_two_floor(value) == expected

View File

@ -0,0 +1,43 @@
# Finality tests
The aim of the tests for the finality rules.
- `finality`: transitions triggered by one or more blocks.
## Test case format
### `meta.yaml`
```yaml
description: string -- Optional. Description of test case, purely for debugging purposes.
bls_setting: int -- see general test-format spec.
blocks_count: int -- the number of blocks processed in this test.
```
### `pre.yaml`
A YAML-encoded `BeaconState`, the state before running the block transitions.
Also available as `pre.ssz`.
### `blocks_<index>.yaml`
A series of files, with `<index>` in range `[0, blocks_count)`. Blocks need to be processed in order,
following the main transition function (i.e. process slot and epoch transitions in between blocks as normal)
Each file is a YAML-encoded `SignedBeaconBlock`.
Each block is also available as `blocks_<index>.ssz`
### `post.yaml`
A YAML-encoded `BeaconState`, the state after applying the block transitions.
Also available as `post.ssz`.
## Condition
The resulting state should match the expected `post` state, or if the `post` state is left blank,
the handler should reject the series of blocks as invalid.

View File

@ -149,7 +149,9 @@ def case03_aggregate():
else: else:
raise Exception("Should have been INVALID") raise Exception("Should have been INVALID")
yield f'aggregate_na_pubkeys', { # No signatures to aggregate. Follow IETF BLS spec, return `None` to represent INVALID.
# https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-02#section-2.8
yield f'aggregate_na_signatures', {
'input': [], 'input': [],
'output': None, 'output': None,
} }
@ -319,6 +321,7 @@ def create_provider(handler_name: str,
if __name__ == "__main__": if __name__ == "__main__":
bls.use_py_ecc() # Py-ecc is chosen instead of Milagro, since the code is better understood to be correct.
gen_runner.run_generator("bls", [ gen_runner.run_generator("bls", [
create_provider('sign', case01_sign), create_provider('sign', case01_sign),
create_provider('verify', case02_verify), create_provider('verify', case02_verify),

View File

@ -2,7 +2,7 @@ from typing import Iterable
from eth2spec.phase0 import spec as spec_phase0 from eth2spec.phase0 import spec as spec_phase0
from eth2spec.phase1 import spec as spec_phase1 from eth2spec.phase1 import spec as spec_phase1
from eth2spec.test.phase_0.epoch_processing import ( from eth2spec.test.phase0.epoch_processing import (
test_process_final_updates, test_process_final_updates,
test_process_justification_and_finalization, test_process_justification_and_finalization,
test_process_registry_updates, test_process_registry_updates,
@ -14,6 +14,7 @@ from gen_from_tests.gen import generate_from_tests
from importlib import reload from importlib import reload
from eth2spec.config import config_util from eth2spec.config import config_util
from eth2spec.test.context import PHASE0 from eth2spec.test.context import PHASE0
from eth2spec.utils import bls
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider: def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
@ -22,6 +23,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
config_util.prepare_config(configs_path, config_name) config_util.prepare_config(configs_path, config_name)
reload(spec_phase0) reload(spec_phase0)
reload(spec_phase1) reload(spec_phase1)
bls.use_milagro()
return config_name return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]: def cases_fn() -> Iterable[gen_typing.TestCase]:

View File

@ -0,0 +1,5 @@
# Finality tests
Finality tests cover regular state-transitions in a common block-list format to test finality rules.
Information on the format of the tests can be found in the [finality test formats documentation](../../formats/finality/README.md).

View File

@ -0,0 +1,39 @@
from typing import Iterable
from importlib import reload
from gen_base import gen_runner, gen_typing
from gen_from_tests.gen import generate_from_tests
from eth2spec.test.context import PHASE0
from eth2spec.test.phase0.finality import test_finality
from eth2spec.config import config_util
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.phase1 import spec as spec_phase1
from eth2spec.utils import bls
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
def prepare_fn(configs_path: str) -> str:
config_util.prepare_config(configs_path, config_name)
reload(spec_phase0)
reload(spec_phase1)
bls.use_milagro()
return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]:
return generate_from_tests(
runner_name='finality',
handler_name=handler_name,
src=tests_src,
fork_name=PHASE0,
)
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
if __name__ == "__main__":
gen_runner.run_generator("finality", [
create_provider('finality', test_finality, 'minimal'),
create_provider('finality', test_finality, 'mainnet'),
])

View File

@ -0,0 +1,2 @@
../../core/gen_helpers
../../../

View File

@ -1,13 +1,14 @@
from typing import Iterable from typing import Iterable
from eth2spec.test.context import PHASE0 from eth2spec.test.context import PHASE0
from eth2spec.test.genesis import test_initialization, test_validity from eth2spec.test.phase0.genesis import test_initialization, test_validity
from gen_base import gen_runner, gen_typing from gen_base import gen_runner, gen_typing
from gen_from_tests.gen import generate_from_tests from gen_from_tests.gen import generate_from_tests
from eth2spec.phase0 import spec as spec from eth2spec.phase0 import spec as spec
from importlib import reload from importlib import reload
from eth2spec.config import config_util from eth2spec.config import config_util
from eth2spec.utils import bls
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider: def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
@ -15,6 +16,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
def prepare_fn(configs_path: str) -> str: def prepare_fn(configs_path: str) -> str:
config_util.prepare_config(configs_path, config_name) config_util.prepare_config(configs_path, config_name)
reload(spec) reload(spec)
bls.use_milagro()
return config_name return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]: def cases_fn() -> Iterable[gen_typing.TestCase]:

View File

@ -1,6 +1,6 @@
from typing import Iterable from typing import Iterable
from eth2spec.test.phase_0.block_processing import ( from eth2spec.test.phase0.block_processing import (
test_process_attestation, test_process_attestation,
test_process_attester_slashing, test_process_attester_slashing,
test_process_block_header, test_process_block_header,
@ -16,6 +16,7 @@ from eth2spec.config import config_util
from eth2spec.phase0 import spec as spec_phase0 from eth2spec.phase0 import spec as spec_phase0
from eth2spec.phase1 import spec as spec_phase1 from eth2spec.phase1 import spec as spec_phase1
from eth2spec.test.context import PHASE0 from eth2spec.test.context import PHASE0
from eth2spec.utils import bls
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider: def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
@ -24,6 +25,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
config_util.prepare_config(configs_path, config_name) config_util.prepare_config(configs_path, config_name)
reload(spec_phase0) reload(spec_phase0)
reload(spec_phase1) reload(spec_phase1)
bls.use_milagro()
return config_name return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]: def cases_fn() -> Iterable[gen_typing.TestCase]:

View File

@ -2,7 +2,7 @@ from typing import Iterable
from eth2spec.phase0 import spec as spec_phase0 from eth2spec.phase0 import spec as spec_phase0
from eth2spec.phase1 import spec as spec_phase1 from eth2spec.phase1 import spec as spec_phase1
from eth2spec.test.phase_0.rewards import ( from eth2spec.test.phase0.rewards import (
test_basic, test_basic,
test_leak, test_leak,
test_random, test_random,
@ -12,6 +12,7 @@ from gen_from_tests.gen import generate_from_tests
from importlib import reload from importlib import reload
from eth2spec.config import config_util from eth2spec.config import config_util
from eth2spec.test.context import PHASE0 from eth2spec.test.context import PHASE0
from eth2spec.utils import bls
def create_provider(tests_src, config_name: str) -> gen_typing.TestProvider: def create_provider(tests_src, config_name: str) -> gen_typing.TestProvider:
@ -20,6 +21,7 @@ def create_provider(tests_src, config_name: str) -> gen_typing.TestProvider:
config_util.prepare_config(configs_path, config_name) config_util.prepare_config(configs_path, config_name)
reload(spec_phase0) reload(spec_phase0)
reload(spec_phase1) reload(spec_phase1)
bls.use_milagro()
return config_name return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]: def cases_fn() -> Iterable[gen_typing.TestCase]:

View File

@ -5,10 +5,11 @@ from gen_base import gen_runner, gen_typing
from gen_from_tests.gen import generate_from_tests from gen_from_tests.gen import generate_from_tests
from eth2spec.test.context import PHASE0 from eth2spec.test.context import PHASE0
from eth2spec.test.phase_0.sanity import test_blocks, test_slots from eth2spec.test.phase0.sanity import test_blocks, test_slots
from eth2spec.config import config_util from eth2spec.config import config_util
from eth2spec.phase0 import spec as spec_phase0 from eth2spec.phase0 import spec as spec_phase0
from eth2spec.phase1 import spec as spec_phase1 from eth2spec.phase1 import spec as spec_phase1
from eth2spec.utils import bls
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider: def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
@ -17,6 +18,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
config_util.prepare_config(configs_path, config_name) config_util.prepare_config(configs_path, config_name)
reload(spec_phase0) reload(spec_phase0)
reload(spec_phase1) reload(spec_phase1)
bls.use_milagro()
return config_name return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]: def cases_fn() -> Iterable[gen_typing.TestCase]: