Merge branch 'v012x' into empty-bits-case

This commit is contained in:
Danny Ryan 2020-05-07 10:17:26 -06:00
commit 08652f543b
No known key found for this signature in database
GPG Key ID: 2765A792E42CE07A
34 changed files with 1398 additions and 544 deletions

View File

@ -9,11 +9,14 @@ This repository hosts the current Eth2 specifications. Discussions about design
## Specs ## Specs
Core specifications for Eth2 clients be found in [specs/](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are: [![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec)
Core specifications for Eth2 clients be found in [specs](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
### Phase 0 ### Phase 0
* [The Beacon Chain](specs/phase0/beacon-chain.md) * [The Beacon Chain](specs/phase0/beacon-chain.md)
* [Fork Choice](specs/phase0/fork-choice.md) * [Beacon Chain Fork Choice](specs/phase0/fork-choice.md)
* [Deposit Contract](specs/phase0/deposit-contract.md) * [Deposit Contract](specs/phase0/deposit-contract.md)
* [Honest Validator](specs/phase0/validator.md) * [Honest Validator](specs/phase0/validator.md)
* [P2P Networking](specs/phase0/p2p-interface.md) * [P2P Networking](specs/phase0/p2p-interface.md)
@ -22,8 +25,9 @@ Core specifications for Eth2 clients be found in [specs/](specs/). These are div
* [From Phase 0 to Phase 1](specs/phase1/phase1-fork.md) * [From Phase 0 to Phase 1](specs/phase1/phase1-fork.md)
* [The Beacon Chain for Shards](specs/phase1/beacon-chain.md) * [The Beacon Chain for Shards](specs/phase1/beacon-chain.md)
* [Custody Game](specs/phase1/custody-game.md) * [Custody Game](specs/phase1/custody-game.md)
* [Shard Transition and Fraud Proofs](specs/phase1/fraud-proofs.md) * [Shard Transition and Fraud Proofs](specs/phase1/shard-transition.md)
* [Light client syncing protocol](specs/phase1/light-client-sync.md) * [Light client syncing protocol](specs/phase1/light-client-sync.md)
* [Beacon Chain Fork Choice for Shards](specs/phase1/fork-choice.md)
### Phase 2 ### Phase 2

View File

@ -94,8 +94,8 @@ EPOCHS_PER_ETH1_VOTING_PERIOD: 32
SLOTS_PER_HISTORICAL_ROOT: 8192 SLOTS_PER_HISTORICAL_ROOT: 8192
# 2**8 (= 256) epochs ~27 hours # 2**8 (= 256) epochs ~27 hours
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# 2**11 (= 2,048) epochs 9 days # 2**8 (= 256) epochs ~27 hours
PERSISTENT_COMMITTEE_PERIOD: 2048 SHARD_COMMITTEE_PERIOD: 256
# 2**6 (= 64) epochs ~7 hours # 2**6 (= 64) epochs ~7 hours
MAX_EPOCHS_PER_CROSSLINK: 64 MAX_EPOCHS_PER_CROSSLINK: 64
# 2**2 (= 4) epochs 25.6 minutes # 2**2 (= 4) epochs 25.6 minutes
@ -122,8 +122,8 @@ BASE_REWARD_FACTOR: 64
WHISTLEBLOWER_REWARD_QUOTIENT: 512 WHISTLEBLOWER_REWARD_QUOTIENT: 512
# 2**3 (= 8) # 2**3 (= 8)
PROPOSER_REWARD_QUOTIENT: 8 PROPOSER_REWARD_QUOTIENT: 8
# 2**25 (= 33,554,432) # 2**24 (= 16,777,216)
INACTIVITY_PENALTY_QUOTIENT: 33554432 INACTIVITY_PENALTY_QUOTIENT: 16777216
# 2**5 (= 32) # 2**5 (= 32)
MIN_SLASHING_PENALTY_QUOTIENT: 32 MIN_SLASHING_PENALTY_QUOTIENT: 32
@ -161,6 +161,8 @@ DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
# Phase 1: Upgrade from Phase 0 # Phase 1: Upgrade from Phase 0
# --------------------------------------------------------------- # ---------------------------------------------------------------
PHASE_1_FORK_VERSION: 0x01000000 PHASE_1_FORK_VERSION: 0x01000000
# [STUB]
PHASE_1_GENESIS_SLOT: 32
INITIAL_ACTIVE_SHARDS: 64 INITIAL_ACTIVE_SHARDS: 64
# Phase 1: General # Phase 1: General
@ -173,8 +175,6 @@ ONLINE_PERIOD: 8
LIGHT_CLIENT_COMMITTEE_SIZE: 128 LIGHT_CLIENT_COMMITTEE_SIZE: 128
# 2**8 (= 256) | epochs | ~27 hours # 2**8 (= 256) | epochs | ~27 hours
LIGHT_CLIENT_COMMITTEE_PERIOD: 256 LIGHT_CLIENT_COMMITTEE_PERIOD: 256
# 2**8 (= 256) | epochs | ~27 hours
SHARD_COMMITTEE_PERIOD: 256
# 2**18 (= 262,144) # 2**18 (= 262,144)
SHARD_BLOCK_CHUNK_SIZE: 262144 SHARD_BLOCK_CHUNK_SIZE: 262144
# 2**2 (= 4) # 2**2 (= 4)

View File

@ -89,13 +89,13 @@ MIN_SEED_LOOKAHEAD: 1
# 2**2 (= 4) epochs # 2**2 (= 4) epochs
MAX_SEED_LOOKAHEAD: 4 MAX_SEED_LOOKAHEAD: 4
# [customized] higher frequency new deposits from eth1 for testing # [customized] higher frequency new deposits from eth1 for testing
EPOCHS_PER_ETH1_VOTING_PERIOD: 2 EPOCHS_PER_ETH1_VOTING_PERIOD: 4
# [customized] smaller state # [customized] smaller state
SLOTS_PER_HISTORICAL_ROOT: 64 SLOTS_PER_HISTORICAL_ROOT: 64
# 2**8 (= 256) epochs # 2**8 (= 256) epochs
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256 MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
# [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit # [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit
PERSISTENT_COMMITTEE_PERIOD: 128 SHARD_COMMITTEE_PERIOD: 64
# [customized] fast catchup crosslinks # [customized] fast catchup crosslinks
MAX_EPOCHS_PER_CROSSLINK: 4 MAX_EPOCHS_PER_CROSSLINK: 4
# 2**2 (= 4) epochs # 2**2 (= 4) epochs
@ -122,8 +122,8 @@ BASE_REWARD_FACTOR: 64
WHISTLEBLOWER_REWARD_QUOTIENT: 512 WHISTLEBLOWER_REWARD_QUOTIENT: 512
# 2**3 (= 8) # 2**3 (= 8)
PROPOSER_REWARD_QUOTIENT: 8 PROPOSER_REWARD_QUOTIENT: 8
# 2**25 (= 33,554,432) # 2**24 (= 16,777,216)
INACTIVITY_PENALTY_QUOTIENT: 33554432 INACTIVITY_PENALTY_QUOTIENT: 16777216
# 2**5 (= 32) # 2**5 (= 32)
MIN_SLASHING_PENALTY_QUOTIENT: 32 MIN_SLASHING_PENALTY_QUOTIENT: 32
@ -162,6 +162,8 @@ DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
# --------------------------------------------------------------- # ---------------------------------------------------------------
# [customized] for testnet distinction # [customized] for testnet distinction
PHASE_1_FORK_VERSION: 0x01000001 PHASE_1_FORK_VERSION: 0x01000001
# [customized] for testing
PHASE_1_GENESIS_SLOT: 8
# [customized] reduced for testing # [customized] reduced for testing
INITIAL_ACTIVE_SHARDS: 4 INITIAL_ACTIVE_SHARDS: 4
@ -176,8 +178,6 @@ ONLINE_PERIOD: 8
LIGHT_CLIENT_COMMITTEE_SIZE: 128 LIGHT_CLIENT_COMMITTEE_SIZE: 128
# 2**8 (= 256) | epochs # 2**8 (= 256) | epochs
LIGHT_CLIENT_COMMITTEE_PERIOD: 256 LIGHT_CLIENT_COMMITTEE_PERIOD: 256
# 2**8 (= 256) | epochs
SHARD_COMMITTEE_PERIOD: 256
# 2**18 (= 262,144) # 2**18 (= 262,144)
SHARD_BLOCK_CHUNK_SIZE: 262144 SHARD_BLOCK_CHUNK_SIZE: 262144
# 2**2 (= 4) # 2**2 (= 4)

View File

@ -108,7 +108,7 @@ SSZObject = TypeVar('SSZObject', bound=View)
PHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0 PHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0
from eth2spec.config.config_util import apply_constants_config from eth2spec.config.config_util import apply_constants_config
from typing import ( from typing import (
Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable, Optional
) )
from dataclasses import ( from dataclasses import (
@ -146,8 +146,11 @@ _hash = hash
hash_cache: Dict[bytes, Bytes32] = {} hash_cache: Dict[bytes, Bytes32] = {}
def get_eth1_data(distance: uint64) -> Bytes32: def get_eth1_data(block: Eth1Block) -> Eth1Data:
return hash(distance) """
A stub function return mocking Eth1Data.
"""
return Eth1Data(block_hash=hash_tree_root(block))
def hash(x: bytes) -> Bytes32: # type: ignore def hash(x: bytes) -> Bytes32: # type: ignore
@ -373,9 +376,10 @@ class PySpecCommand(Command):
self.md_doc_paths = """ self.md_doc_paths = """
specs/phase0/beacon-chain.md specs/phase0/beacon-chain.md
specs/phase0/fork-choice.md specs/phase0/fork-choice.md
specs/phase0/validator.md
specs/phase1/custody-game.md specs/phase1/custody-game.md
specs/phase1/beacon-chain.md specs/phase1/beacon-chain.md
specs/phase1/fraud-proofs.md specs/phase1/shard-transition.md
specs/phase1/fork-choice.md specs/phase1/fork-choice.md
specs/phase1/phase1-fork.md specs/phase1/phase1-fork.md
""" """

View File

@ -35,7 +35,7 @@
- [`DepositMessage`](#depositmessage) - [`DepositMessage`](#depositmessage)
- [`DepositData`](#depositdata) - [`DepositData`](#depositdata)
- [`BeaconBlockHeader`](#beaconblockheader) - [`BeaconBlockHeader`](#beaconblockheader)
- [`SigningRoot`](#signingroot) - [`SigningData`](#signingdata)
- [Beacon operations](#beacon-operations) - [Beacon operations](#beacon-operations)
- [`ProposerSlashing`](#proposerslashing) - [`ProposerSlashing`](#proposerslashing)
- [`AttesterSlashing`](#attesterslashing) - [`AttesterSlashing`](#attesterslashing)
@ -191,7 +191,6 @@ The following values are (non-configurable) constants used throughout the specif
| `HYSTERESIS_DOWNWARD_MULTIPLIER` | `1` | | `HYSTERESIS_DOWNWARD_MULTIPLIER` | `1` |
| `HYSTERESIS_UPWARD_MULTIPLIER` | `5` | | `HYSTERESIS_UPWARD_MULTIPLIER` | `5` |
- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.) - For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
### Gwei values ### Gwei values
@ -224,7 +223,8 @@ The following values are (non-configurable) constants used throughout the specif
| `EPOCHS_PER_ETH1_VOTING_PERIOD` | `2**5` (= 32) | epochs | ~3.4 hours | | `EPOCHS_PER_ETH1_VOTING_PERIOD` | `2**5` (= 32) | epochs | ~3.4 hours |
| `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~27 hours | | `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~27 hours |
| `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours | | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours |
| `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days | | `SHARD_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
### State list lengths ### State list lengths
@ -242,10 +242,10 @@ The following values are (non-configurable) constants used throughout the specif
| `BASE_REWARD_FACTOR` | `2**6` (= 64) | | `BASE_REWARD_FACTOR` | `2**6` (= 64) |
| `WHISTLEBLOWER_REWARD_QUOTIENT` | `2**9` (= 512) | | `WHISTLEBLOWER_REWARD_QUOTIENT` | `2**9` (= 512) |
| `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) | | `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) |
| `INACTIVITY_PENALTY_QUOTIENT` | `2**25` (= 33,554,432) | | `INACTIVITY_PENALTY_QUOTIENT` | `2**24` (= 16,777,216) |
| `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) | | `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) |
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (about 18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. - The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12` epochs (about 18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
### Max operations per block ### Max operations per block
@ -269,7 +269,6 @@ The following values are (non-configurable) constants used throughout the specif
| `DOMAIN_SELECTION_PROOF` | `DomainType('0x05000000')` | | `DOMAIN_SELECTION_PROOF` | `DomainType('0x05000000')` |
| `DOMAIN_AGGREGATE_AND_PROOF` | `DomainType('0x06000000')` | | `DOMAIN_AGGREGATE_AND_PROOF` | `DomainType('0x06000000')` |
## Containers ## Containers
The following types are [SimpleSerialize (SSZ)](../../ssz/simple-serialize.md) containers. The following types are [SimpleSerialize (SSZ)](../../ssz/simple-serialize.md) containers.
@ -399,10 +398,10 @@ class BeaconBlockHeader(Container):
body_root: Root body_root: Root
``` ```
#### `SigningRoot` #### `SigningData`
```python ```python
class SigningRoot(Container): class SigningData(Container):
object_root: Root object_root: Root
domain: Domain domain: Domain
``` ```
@ -718,9 +717,9 @@ def is_valid_merkle_branch(leaf: Bytes32, branch: Sequence[Bytes32], depth: uint
#### `compute_shuffled_index` #### `compute_shuffled_index`
```python ```python
def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Bytes32) -> ValidatorIndex: def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64:
""" """
Return the shuffled validator index corresponding to ``seed`` (and ``index_count``). Return the shuffled index corresponding to ``seed`` (and ``index_count``).
""" """
assert index < index_count assert index < index_count
@ -728,14 +727,14 @@ def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Byt
# See the 'generalized domain' algorithm on page 3 # See the 'generalized domain' algorithm on page 3
for current_round in range(SHUFFLE_ROUND_COUNT): for current_round in range(SHUFFLE_ROUND_COUNT):
pivot = bytes_to_int(hash(seed + int_to_bytes(current_round, length=1))[0:8]) % index_count pivot = bytes_to_int(hash(seed + int_to_bytes(current_round, length=1))[0:8]) % index_count
flip = ValidatorIndex((pivot + index_count - index) % index_count) flip = (pivot + index_count - index) % index_count
position = max(index, flip) position = max(index, flip)
source = hash(seed + int_to_bytes(current_round, length=1) + int_to_bytes(position // 256, length=4)) source = hash(seed + int_to_bytes(current_round, length=1) + int_to_bytes(position // 256, length=4))
byte = source[(position % 256) // 8] byte = source[(position % 256) // 8]
bit = (byte >> (position % 8)) % 2 bit = (byte >> (position % 8)) % 2
index = flip if bit else index index = flip if bit else index
return ValidatorIndex(index) return index
``` ```
#### `compute_proposer_index` #### `compute_proposer_index`
@ -749,11 +748,11 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex]
MAX_RANDOM_BYTE = 2**8 - 1 MAX_RANDOM_BYTE = 2**8 - 1
i = 0 i = 0
while True: while True:
candidate_index = indices[compute_shuffled_index(ValidatorIndex(i % len(indices)), len(indices), seed)] candidate_index = indices[compute_shuffled_index(i % len(indices), len(indices), seed)]
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32] random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
effective_balance = state.validators[candidate_index].effective_balance effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
return ValidatorIndex(candidate_index) return candidate_index
i += 1 i += 1
``` ```
@ -769,7 +768,7 @@ def compute_committee(indices: Sequence[ValidatorIndex],
""" """
start = (len(indices) * index) // count start = (len(indices) * index) // count
end = (len(indices) * (index + 1)) // count end = (len(indices) * (index + 1)) // count
return [indices[compute_shuffled_index(ValidatorIndex(i), len(indices), seed)] for i in range(start, end)] return [indices[compute_shuffled_index(i, len(indices), seed)] for i in range(start, end)]
``` ```
#### `compute_epoch_at_slot` #### `compute_epoch_at_slot`
@ -848,13 +847,12 @@ def compute_domain(domain_type: DomainType, fork_version: Version=None, genesis_
```python ```python
def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root: def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root:
""" """
Return the signing root of an object by calculating the root of the object-domain tree. Return the signing root for the corresponding signing data.
""" """
domain_wrapped_object = SigningRoot( return hash_tree_root(SigningData(
object_root=hash_tree_root(ssz_object), object_root=hash_tree_root(ssz_object),
domain=domain, domain=domain,
) ))
return hash_tree_root(domain_wrapped_object)
``` ```
### Beacon state accessors ### Beacon state accessors
@ -1121,7 +1119,7 @@ def slash_validator(state: BeaconState,
whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT) whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT) proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT)
increase_balance(state, proposer_index, proposer_reward) increase_balance(state, proposer_index, proposer_reward)
increase_balance(state, whistleblower_index, whistleblower_reward - proposer_reward) increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
``` ```
## Genesis ## Genesis
@ -1170,6 +1168,8 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
return state return state
``` ```
*Note*: The ETH1 block with `eth1_timestamp` meeting the minimum genesis active validator count criteria can also occur before `MIN_GENESIS_TIME`.
### Genesis state ### Genesis state
Let `genesis_state = candidate_state` whenever `is_valid_genesis_state(candidate_state) is True` for the first time. Let `genesis_state = candidate_state` whenever `is_valid_genesis_state(candidate_state) is True` for the first time.
@ -1225,7 +1225,7 @@ def process_slots(state: BeaconState, slot: Slot) -> None:
# Process epoch on the start slot of the next epoch # Process epoch on the start slot of the next epoch
if (state.slot + 1) % SLOTS_PER_EPOCH == 0: if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
process_epoch(state) process_epoch(state)
state.slot += Slot(1) state.slot = Slot(state.slot + 1)
``` ```
```python ```python
@ -1689,7 +1689,7 @@ def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVolu
# Exits must specify an epoch when they become valid; they are not valid before then # Exits must specify an epoch when they become valid; they are not valid before then
assert get_current_epoch(state) >= voluntary_exit.epoch assert get_current_epoch(state) >= voluntary_exit.epoch
# Verify the validator has been active long enough # Verify the validator has been active long enough
assert get_current_epoch(state) >= validator.activation_epoch + PERSISTENT_COMMITTEE_PERIOD assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
# Verify signature # Verify signature
domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch) domain = get_domain(state, DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
signing_root = compute_signing_root(voluntary_exit, domain) signing_root = compute_signing_root(voluntary_exit, domain)

View File

@ -162,7 +162,7 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
active_indices = get_active_validator_indices(state, get_current_epoch(state)) active_indices = get_active_validator_indices(state, get_current_epoch(state))
return Gwei(sum( return Gwei(sum(
state.validators[i].effective_balance for i in active_indices state.validators[i].effective_balance for i in active_indices
if (i in store.latest_messages if (i in store.latest_messages
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root) and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
)) ))
``` ```
@ -273,19 +273,22 @@ def validate_on_attestation(store: Store, attestation: Attestation) -> None:
current_epoch = compute_epoch_at_slot(get_current_slot(store)) current_epoch = compute_epoch_at_slot(get_current_slot(store))
# Use GENESIS_EPOCH for previous when genesis to avoid underflow # Use GENESIS_EPOCH for previous when genesis to avoid underflow
previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
# If attestation target is from a future epoch, delay consideration until the epoch arrives
assert target.epoch in [current_epoch, previous_epoch] assert target.epoch in [current_epoch, previous_epoch]
assert target.epoch == compute_epoch_at_slot(attestation.data.slot) assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found # Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
assert target.root in store.blocks assert target.root in store.blocks
# Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
assert get_current_slot(store) >= compute_start_slot_at_epoch(target.epoch)
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found # Attestations must be for a known block. If block is unknown, delay consideration until the block is found
assert attestation.data.beacon_block_root in store.blocks assert attestation.data.beacon_block_root in store.blocks
# Attestations must not be for blocks in the future. If not, the attestation should not be considered # Attestations must not be for blocks in the future. If not, the attestation should not be considered
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
# FFG and LMD vote must be consistent with each other
target_slot = compute_start_slot_at_epoch(target.epoch)
assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot)
# Attestations can only affect the fork choice of subsequent slots. # Attestations can only affect the fork choice of subsequent slots.
# Delay consideration in the fork choice until their slot is in the past. # Delay consideration in the fork choice until their slot is in the past.
assert get_current_slot(store) >= attestation.data.slot + 1 assert get_current_slot(store) >= attestation.data.slot + 1

View File

@ -4,7 +4,7 @@ This document contains the networking specification for Ethereum 2.0 clients.
It consists of four main sections: It consists of four main sections:
1. A specification of the network fundamentals detailing the two network configurations: interoperability test network and mainnet launch. 1. A specification of the network fundamentals.
2. A specification of the three network interaction *domains* of Eth2: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain. 2. A specification of the three network interaction *domains* of Eth2: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain.
3. The rationale and further explanation for the design choices made in the previous two sections. 3. The rationale and further explanation for the design choices made in the previous two sections.
4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which Eth2 clients are being developed. 4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which Eth2 clients are being developed.
@ -17,14 +17,8 @@ It consists of four main sections:
- [Network fundamentals](#network-fundamentals) - [Network fundamentals](#network-fundamentals)
- [Transport](#transport) - [Transport](#transport)
- [Interop](#interop)
- [Mainnet](#mainnet)
- [Encryption and identification](#encryption-and-identification) - [Encryption and identification](#encryption-and-identification)
- [Interop](#interop-1)
- [Mainnet](#mainnet-1)
- [Protocol Negotiation](#protocol-negotiation) - [Protocol Negotiation](#protocol-negotiation)
- [Interop](#interop-2)
- [Mainnet](#mainnet-2)
- [Multiplexing](#multiplexing) - [Multiplexing](#multiplexing)
- [Eth2 network interaction domains](#eth2-network-interaction-domains) - [Eth2 network interaction domains](#eth2-network-interaction-domains)
- [Configuration](#configuration) - [Configuration](#configuration)
@ -33,11 +27,8 @@ It consists of four main sections:
- [Topics and messages](#topics-and-messages) - [Topics and messages](#topics-and-messages)
- [Global topics](#global-topics) - [Global topics](#global-topics)
- [Attestation subnets](#attestation-subnets) - [Attestation subnets](#attestation-subnets)
- [Interop](#interop-3) - [Attestations and Aggregation](#attestations-and-aggregation)
- [Mainnet](#mainnet-3)
- [Encodings](#encodings) - [Encodings](#encodings)
- [Interop](#interop-4)
- [Mainnet](#mainnet-4)
- [The Req/Resp domain](#the-reqresp-domain) - [The Req/Resp domain](#the-reqresp-domain)
- [Protocol identification](#protocol-identification) - [Protocol identification](#protocol-identification)
- [Req/Resp interaction](#reqresp-interaction) - [Req/Resp interaction](#reqresp-interaction)
@ -56,29 +47,25 @@ It consists of four main sections:
- [Integration into libp2p stacks](#integration-into-libp2p-stacks) - [Integration into libp2p stacks](#integration-into-libp2p-stacks)
- [ENR structure](#enr-structure) - [ENR structure](#enr-structure)
- [Attestation subnet bitfield](#attestation-subnet-bitfield) - [Attestation subnet bitfield](#attestation-subnet-bitfield)
- [Interop](#interop-5) - [`eth2` field](#eth2-field)
- [Mainnet](#mainnet-5) - [General capabilities](#general-capabilities)
- [`eth2` field](#eth2-field)
- [General capabilities](#general-capabilities)
- [Topic advertisement](#topic-advertisement) - [Topic advertisement](#topic-advertisement)
- [Mainnet](#mainnet-6)
- [Design decision rationale](#design-decision-rationale) - [Design decision rationale](#design-decision-rationale)
- [Transport](#transport-1) - [Transport](#transport-1)
- [Why are we defining specific transports?](#why-are-we-defining-specific-transports) - [Why are we defining specific transports?](#why-are-we-defining-specific-transports)
- [Can clients support other transports/handshakes than the ones mandated by the spec?](#can-clients-support-other-transportshandshakes-than-the-ones-mandated-by-the-spec) - [Can clients support other transports/handshakes than the ones mandated by the spec?](#can-clients-support-other-transportshandshakes-than-the-ones-mandated-by-the-spec)
- [What are the advantages of using TCP/QUIC/Websockets?](#what-are-the-advantages-of-using-tcpquicwebsockets) - [What are the advantages of using TCP/QUIC/Websockets?](#what-are-the-advantages-of-using-tcpquicwebsockets)
- [Why do we not just support a single transport?](#why-do-we-not-just-support-a-single-transport) - [Why do we not just support a single transport?](#why-do-we-not-just-support-a-single-transport)
- [Why are we not using QUIC for mainnet from the start?](#why-are-we-not-using-quic-for-mainnet-from-the-start) - [Why are we not using QUIC from the start?](#why-are-we-not-using-quic-from-the-start)
- [Multiplexing](#multiplexing-1) - [Multiplexing](#multiplexing-1)
- [Why are we using mplex/yamux?](#why-are-we-using-mplexyamux) - [Why are we using mplex/yamux?](#why-are-we-using-mplexyamux)
- [Protocol Negotiation](#protocol-negotiation-1) - [Protocol Negotiation](#protocol-negotiation-1)
- [When is multiselect 2.0 due and why are we using it for mainnet?](#when-is-multiselect-20-due-and-why-are-we-using-it-for-mainnet) - [When is multiselect 2.0 due and why do we plan to migrate to it?](#when-is-multiselect-20-due-and-why-do-we-plan-to-migrate-to-it)
- [What is the difference between connection-level and stream-level protocol negotiation?](#what-is-the-difference-between-connection-level-and-stream-level-protocol-negotiation) - [What is the difference between connection-level and stream-level protocol negotiation?](#what-is-the-difference-between-connection-level-and-stream-level-protocol-negotiation)
- [Encryption](#encryption) - [Encryption](#encryption)
- [Why are we using SecIO for interop? Why not for mainnet?](#why-are-we-using-secio-for-interop-why-not-for-mainnet) - [Why are we not supporting SecIO?](#why-are-we-not-supporting-secio)
- [Why are we using Noise/TLS 1.3 for mainnet?](#why-are-we-using-noisetls-13-for-mainnet) - [Why are we using Noise/TLS 1.3?](#why-are-we-using-noisetls-13)
- [Why are we using encryption at all?](#why-are-we-using-encryption-at-all) - [Why are we using encryption at all?](#why-are-we-using-encryption-at-all)
- [Will mainnnet networking be untested when it launches?](#will-mainnnet-networking-be-untested-when-it-launches)
- [Gossipsub](#gossipsub) - [Gossipsub](#gossipsub)
- [Why are we using a pub/sub algorithm for block and attestation propagation?](#why-are-we-using-a-pubsub-algorithm-for-block-and-attestation-propagation) - [Why are we using a pub/sub algorithm for block and attestation propagation?](#why-are-we-using-a-pubsub-algorithm-for-block-and-attestation-propagation)
- [Why are we using topics to segregate encodings, yet only support one encoding?](#why-are-we-using-topics-to-segregate-encodings-yet-only-support-one-encoding) - [Why are we using topics to segregate encodings, yet only support one encoding?](#why-are-we-using-topics-to-segregate-encodings-yet-only-support-one-encoding)
@ -120,46 +107,22 @@ It consists of four main sections:
This section outlines the specification for the networking stack in Ethereum 2.0 clients. This section outlines the specification for the networking stack in Ethereum 2.0 clients.
Sections that have differing parameters for mainnet launch and interoperability testing are split into subsections. Sections that are not split have the same parameters for interoperability testing as mainnet launch.
## Transport ## Transport
Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability. Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability.
#### Interop
All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously). All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously).
To facilitate connectivity and avert possible IPv6 routability/support issues, clients participating in the interoperability testnet MUST expose at least ONE IPv4 endpoint. Clients must support listening on at least one of IPv4 or IPv6. Clients that do _not_ have support for listening on IPv4 SHOULD be cognizant of the potential disadvantages in terms of Internet-wide routability/support. Clients MAY choose to listen only on IPv6, but MUST be capable of dialing both IPv4 and IPv6 addresses.
All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT, or AutoRelay facilities. All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT, or AutoRelay facilities. (Usage of circuit relay, AutoNAT, or AutoRelay will be specifically re-examined soon.)
Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.), MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint. Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.), MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint.
#### Mainnet
All requirements from the interoperability testnet apply, except for the IPv4 addressing scheme requirement.
At this stage, clients are licensed to drop IPv4 support if they wish to do so, cognizant of the potential disadvantages in terms of Internet-wide routability/support. Clients MAY choose to listen only on IPv6, but MUST retain capability to dial both IPv4 and IPv6 addresses.
Usage of circuit relay, AutoNAT, or AutoRelay will be specifically re-examined closer to the time.
## Encryption and identification ## Encryption and identification
#### Interop
[SecIO](https://github.com/libp2p/specs/tree/master/secio) with `secp256k1` identities will be used for initial interoperability testing.
The following SecIO parameters MUST be supported by all stacks:
- Key agreement: ECDH-P256.
- Cipher: AES-128.
- Digest: SHA-256.
#### Mainnet
The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure
channel handshake with `secp256k1` identities will be used for mainnet. channel handshake with `secp256k1` identities will be used for encryption.
As specified in the libp2p specification, clients MUST support the `XX` handshake pattern. As specified in the libp2p specification, clients MUST support the `XX` handshake pattern.
@ -167,13 +130,7 @@ As specified in the libp2p specification, clients MUST support the `XX` handshak
Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers. Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers.
#### Interop Clients MUST support [multistream-select 1.0](https://github.com/multiformats/multistream-select/) and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95) when the spec solidifies. Once all clients have implementations for multiselect 2.0, multistream-select 1.0 MAY be phased out.
Connection-level and stream-level (see the [Rationale](#design-decision-rationale) section below for explanations) protocol negotiation MUST be conducted using [multistream-select v1.0](https://github.com/multiformats/multistream-select/). Its protocol ID is: `/multistream/1.0.0`.
#### Mainnet
Clients MUST support [multistream-select 1.0](https://github.com/multiformats/multistream-select/) and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95). Depending on the number of clients that have implementations for multiselect 2.0 by mainnet, [multistream-select 1.0](https://github.com/multiformats/multistream-select/) may be phased out.
## Multiplexing ## Multiplexing
@ -181,7 +138,7 @@ During connection bootstrapping, libp2p dynamically negotiates a mutually suppor
Two multiplexers are commonplace in libp2p implementations: [mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). Their protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`. Two multiplexers are commonplace in libp2p implementations: [mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). Their protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`.
Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux must take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs. Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux MUST take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs.
# Eth2 network interaction domains # Eth2 network interaction domains
@ -265,7 +222,6 @@ The payload is carried in the `data` field of a gossipsub message, and varies de
|------------------------------------------------|-------------------------| |------------------------------------------------|-------------------------|
| beacon_block | SignedBeaconBlock | | beacon_block | SignedBeaconBlock |
| beacon_aggregate_and_proof | SignedAggregateAndProof | | beacon_aggregate_and_proof | SignedAggregateAndProof |
| beacon_attestation\* | Attestation |
| committee_index{subnet_id}\_beacon_attestation | Attestation | | committee_index{subnet_id}\_beacon_attestation | Attestation |
| voluntary_exit | SignedVoluntaryExit | | voluntary_exit | SignedVoluntaryExit |
| proposer_slashing | ProposerSlashing | | proposer_slashing | ProposerSlashing |
@ -275,8 +231,6 @@ Clients MUST reject (fail validation) messages containing an incorrect type, or
When processing incoming gossip, clients MAY descore or disconnect peers who fail to observe these constraints. When processing incoming gossip, clients MAY descore or disconnect peers who fail to observe these constraints.
\* The `beacon_attestation` topic is only for interop and will be removed prior to mainnet.
#### Global topics #### Global topics
There are two primary global topics used to propagate beacon blocks and aggregate attestations to all nodes on the network. Their `Name`s are: There are two primary global topics used to propagate beacon blocks and aggregate attestations to all nodes on the network. Their `Name`s are:
@ -289,7 +243,7 @@ There are two primary global topics used to propagate beacon blocks and aggregat
- The block is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the block MAY be queued for later processing while proposers for the block's branch are calculated. - The block is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the block MAY be queued for later processing while proposers for the block's branch are calculated.
- `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `signed_aggregate_and_proof` on the network. (We define the following for convenience -- `aggregate_and_proof = signed_aggregate_and_proof.message` and `aggregate = aggregate_and_proof.aggregate`) - `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `signed_aggregate_and_proof` on the network. (We define the following for convenience -- `aggregate_and_proof = signed_aggregate_and_proof.message` and `aggregate = aggregate_and_proof.aggregate`)
- `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` (a client MAY queue future aggregates for processing at the appropriate slot). - `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` (a client MAY queue future aggregates for processing at the appropriate slot).
- The aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen (via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally). - The valid aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen (via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally).
- The `aggregate` is the first valid aggregate received for the aggregator with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`. - The `aggregate` is the first valid aggregate received for the aggregator with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`.
- The block being voted for (`aggregate.data.beacon_block_root`) passes validation. - The block being voted for (`aggregate.data.beacon_block_root`) passes validation.
- `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate.data.slot, aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`. - `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate.data.slot, aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
@ -323,11 +277,7 @@ Attestation subnets are used to propagate unaggregated attestations to subsectio
- The block being voted for (`attestation.data.beacon_block_root`) passes validation. - The block being voted for (`attestation.data.beacon_block_root`) passes validation.
- The signature of `attestation` is valid. - The signature of `attestation` is valid.
#### Interop #### Attestations and Aggregation
Unaggregated and aggregated attestations from all shards are sent as `Attestation`s to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing.
#### Mainnet
Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. For the `committee_index{subnet_id}_beacon_attestation` topics, `subnet_id` is set to `index % ATTESTATION_SUBNET_COUNT`, where `index` is the `CommitteeIndex` of the given committee. Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. For the `committee_index{subnet_id}_beacon_attestation` topics, `subnet_id` is set to `index % ATTESTATION_SUBNET_COUNT`, where `index` is the `CommitteeIndex` of the given committee.
@ -339,17 +289,11 @@ Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `A
Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded. Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded.
#### Interop
- `ssz` - All objects are [SSZ-encoded](#ssz-encoding). Example: The beacon block topic string is `/eth2/beacon_block/ssz`, and the data field of a gossipsub message is an ssz-encoded `SignedBeaconBlock`.
#### Mainnet
- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) block compression. Example: The beacon aggregate attestation topic string is `/eth2/446a7232/beacon_aggregate_and_proof/ssz_snappy`, the fork digest is `446a7232` and the data field of a gossipsub message is an `AggregateAndProof` that has been SSZ-encoded and then compressed with Snappy. - `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) block compression. Example: The beacon aggregate attestation topic string is `/eth2/446a7232/beacon_aggregate_and_proof/ssz_snappy`, the fork digest is `446a7232` and the data field of a gossipsub message is an `AggregateAndProof` that has been SSZ-encoded and then compressed with Snappy.
Snappy has two formats: "block" and "frames" (streaming). Gossip messages remain relatively small (100s of bytes to 100s of kilobytes) so [basic snappy block compression](https://github.com/google/snappy/blob/master/format_description.txt) is used to avoid the additional overhead associated with snappy frames. Snappy has two formats: "block" and "frames" (streaming). Gossip messages remain relatively small (100s of bytes to 100s of kilobytes) so [basic snappy block compression](https://github.com/google/snappy/blob/master/format_description.txt) is used to avoid the additional overhead associated with snappy frames.
Implementations MUST use a single encoding. Changing an encoding will require coordination between participating implementations. Implementations MUST use a single encoding for gossip. Changing an encoding will require coordination between participating implementations.
## The Req/Resp domain ## The Req/Resp domain
@ -450,7 +394,7 @@ Here, `result` represents the 1-byte response code.
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time: The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time:
- `ssz`: the contents are [SSZ-encoded](../../ssz/simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s. - `ssz`: the contents are [SSZ-encoded](../../ssz/simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s.
- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) frames compression. MAY be supported in the interoperability testnet; MUST be supported in mainnet. - `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) frames compression. This encoding type MUST be supported by all clients.
#### SSZ-encoding strategy (with or without Snappy) #### SSZ-encoding strategy (with or without Snappy)
@ -683,7 +627,7 @@ The response MUST consist of a single `response_chunk`.
## The discovery domain: discv5 ## The discovery domain: discv5
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery, both in the interoperability testnet and mainnet. Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery.
`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. `discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context. `discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. `discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context.
@ -723,15 +667,7 @@ If a node's `MetaData.attnets` has any non-zero bit, the ENR MUST include the `a
If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally include the `attnets` entry or leave it out entirely. If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally include the `attnets` entry or leave it out entirely.
#### Interop #### `eth2` field
In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry Eth2 capability information, as it would be superfluous.
Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an Eth2 peer, in order to eschew connecting to Eth 1.0 peers.
#### Mainnet
##### `eth2` field
ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version, and next fork epoch to ensure connections are made with peers on the intended eth2 network. ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version, and next fork epoch to ensure connections are made with peers on the intended eth2 network.
@ -763,14 +699,12 @@ Clients SHOULD connect to peers with `fork_digest`, `next_fork_version`, and `ne
Clients MAY connect to peers with the same `fork_digest` but a different `next_fork_version`/`next_fork_epoch`. Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients, these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`. Clients MAY connect to peers with the same `fork_digest` but a different `next_fork_version`/`next_fork_epoch`. Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients, these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`.
##### General capabilities #### General capabilities
On mainnet, ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability. ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability.
### Topic advertisement ### Topic advertisement
#### Mainnet
discv5's topic advertisement feature is not expected to be ready for mainnet launch of Phase 0. discv5's topic advertisement feature is not expected to be ready for mainnet launch of Phase 0.
Once this feature is built out and stable, we expect to use topic advertisement as a rendezvous facility for peers on shards. Until then, the ENR [attestation subnet bitfield](#attestation-subnet-bitfield) will be used for discovery of peers on particular subnets. Once this feature is built out and stable, we expect to use topic advertisement as a rendezvous facility for peers on shards. Until then, the ENR [attestation subnet bitfield](#attestation-subnet-bitfield) will be used for discovery of peers on particular subnets.
@ -816,7 +750,7 @@ Modeling for upgradeability and dynamic transport selection from the get-go lays
Clients can adopt new transports without breaking old ones, and the multi-transport ability enables constrained and sandboxed environments (e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), without the need for proxying or trust delegation to servers. Clients can adopt new transports without breaking old ones, and the multi-transport ability enables constrained and sandboxed environments (e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), without the need for proxying or trust delegation to servers.
### Why are we not using QUIC for mainnet from the start? ### Why are we not using QUIC from the start?
The QUIC standard is still not finalized (at working draft 22 at the time of writing), and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). One remarkable example is node.js, where the QUIC implementation is [in early development](https://github.com/nodejs/quic). The QUIC standard is still not finalized (at working draft 22 at the time of writing), and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). One remarkable example is node.js, where the QUIC implementation is [in early development](https://github.com/nodejs/quic).
@ -832,13 +766,13 @@ Overlay multiplexers are not necessary with QUIC since the protocol provides nat
## Protocol Negotiation ## Protocol Negotiation
### When is multiselect 2.0 due and why are we using it for mainnet? ### When is multiselect 2.0 due and why do we plan to migrate to it?
multiselect 2.0 is currently being conceptualized. The debate started [on this issue](https://github.com/libp2p/specs/pull/95), but it got overloaded—as it tends to happen with large conceptual OSS discussions that touch the heart and core of a system. multiselect 2.0 is currently being conceptualized. The debate started [on this issue](https://github.com/libp2p/specs/pull/95), but it got overloaded—as it tends to happen with large conceptual OSS discussions that touch the heart and core of a system.
In the following weeks (August 2019), there will be a renewed initiative to first define the requirements, constraints, assumptions, and features, in order to lock in basic consensus upfront and subsequently build on that consensus by submitting a specification for implementation. At some point in 2020, we expect a renewed initiative to first define the requirements, constraints, assumptions, and features, in order to lock in basic consensus upfront and subsequently build on that consensus by submitting a specification for implementation.
We plan to use multiselect 2.0 for mainnet because it will: We plan to eventually migrate to multiselect 2.0 because it will:
1. Reduce round trips during connection bootstrapping and stream protocol negotiation. 1. Reduce round trips during connection bootstrapping and stream protocol negotiation.
2. Enable efficient one-stream-per-request interaction patterns. 2. Enable efficient one-stream-per-request interaction patterns.
@ -860,17 +794,15 @@ At present, multistream-select 1.0 is used for both types of negotiation, but mu
## Encryption ## Encryption
### Why are we using SecIO for interop? Why not for mainnet? ### Why are we not supporting SecIO?
SecIO has been the default encryption layer for libp2p for years. It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale. SecIO has been the default encryption layer for libp2p for years. It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale.
SecIO is the common denominator across the various language libraries at this stage. It is widely implemented. Thats why we have chosen to use it for initial interop to minimize overhead in getting to a basic interoperability testnet. Although SecIO has wide language support, we wont be using it for mainnet because, amongst other things, it requires several round trips to be sound, and doesnt support early data (0-RTT data), a mechanism that multiselect 2.0 will leverage to reduce round trips during connection bootstrapping.
We wont be using it for mainnet because, amongst other things, it requires several round trips to be sound, and doesnt support early data (0-RTT data), a mechanism that multiselect 2.0 will leverage to reduce round trips during connection bootstrapping.
SecIO is not considered secure for the purposes of this spec. SecIO is not considered secure for the purposes of this spec.
### Why are we using Noise/TLS 1.3 for mainnet? ### Why are we using Noise/TLS 1.3?
Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org): Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org):
@ -896,10 +828,6 @@ Transport level encryption secures message exchange and provides properties that
Note that transport-level encryption is not exclusive of application-level encryption or cryptography. Transport-level encryption secures the communication itself, while application-level cryptography is necessary for the applications use cases (e.g. signatures, randomness, etc.). Note that transport-level encryption is not exclusive of application-level encryption or cryptography. Transport-level encryption secures the communication itself, while application-level cryptography is necessary for the applications use cases (e.g. signatures, randomness, etc.).
### Will mainnnet networking be untested when it launches?
Before launching mainnet, the testnet will be switched over to mainnet networking parameters, including Noise handshakes, and other new protocols. This gives us an opportunity to drill coordinated network upgrades and verifying that there are no significant upgradeability gaps.
## Gossipsub ## Gossipsub
### Why are we using a pub/sub algorithm for block and attestation propagation? ### Why are we using a pub/sub algorithm for block and attestation propagation?
@ -1008,7 +936,7 @@ Requests are segregated by protocol ID to:
2. Affording this level of granularity with a top-level protocol would imply creating as many variants (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2). 2. Affording this level of granularity with a top-level protocol would imply creating as many variants (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2).
7. Allow us to simplify the payload of requests. Request-ids and method-ids no longer need to be sent. The encoding/request type and version can all be handled by the framework. 7. Allow us to simplify the payload of requests. Request-ids and method-ids no longer need to be sent. The encoding/request type and version can all be handled by the framework.
**Caveat**: The protocol negotiation component in the current version of libp2p is called multistream-select 1.0. It is somewhat naïve and introduces overhead on every request when negotiating streams, although implementation-specific optimizations are possible to save this cost. Multiselect 2.0 will remove this overhead by memoizing previously selected protocols, and modeling shared protocol tables. Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to hinder interop testing. More info is to be released from the libp2p community in the coming weeks. **Caveat**: The protocol negotiation component in the current version of libp2p is called multistream-select 1.0. It is somewhat naïve and introduces overhead on every request when negotiating streams, although implementation-specific optimizations are possible to save this cost. Multiselect 2.0 will eventually remove this overhead by memoizing previously selected protocols, and modeling shared protocol tables. Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to significantly hinder this domain.
### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding? ### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?

View File

@ -281,8 +281,8 @@ def voting_period_start_time(state: BeaconState) -> uint64:
```python ```python
def is_candidate_block(block: Eth1Block, period_start: uint64) -> bool: def is_candidate_block(block: Eth1Block, period_start: uint64) -> bool:
return ( return (
block.timestamp <= period_start - SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= period_start
and block.timestamp >= period_start - SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE * 2 and block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE * 2 >= period_start
) )
``` ```
@ -340,9 +340,10 @@ It is useful to be able to run a state transition function (working on a copy of
```python ```python
def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root: def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root:
process_slots(state, block.slot) temp_state: BeaconState = state.copy()
process_block(state, block) signed_block = SignedBeaconBlock(message=block)
return hash_tree_root(state) temp_state = state_transition(temp_state, signed_block, validate_result=False)
return hash_tree_root(temp_state)
``` ```
##### Signature ##### Signature
@ -350,9 +351,9 @@ def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root:
`signed_block = SignedBeaconBlock(message=block, signature=block_signature)`, where `block_signature` is obtained from: `signed_block = SignedBeaconBlock(message=block, signature=block_signature)`, where `block_signature` is obtained from:
```python ```python
def get_block_signature(state: BeaconState, header: BeaconBlockHeader, privkey: int) -> BLSSignature: def get_block_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature:
domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(header.slot)) domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(block.slot))
signing_root = compute_signing_root(header, domain) signing_root = compute_signing_root(block, domain)
return bls.Sign(privkey, signing_root) return bls.Sign(privkey, signing_root)
``` ```
@ -524,6 +525,8 @@ Because Phase 0 does not have shards and thus does not have Shard Committees, th
* Maintain advertisement of the randomly selected subnets in their node's ENR `attnets` entry by setting the randomly selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets * Maintain advertisement of the randomly selected subnets in their node's ENR `attnets` entry by setting the randomly selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets
* Set the lifetime of each random subscription to a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`. At the end of life for a subscription, select a new random subnet, update subnet subscriptions, and publish an updated ENR * Set the lifetime of each random subscription to a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`. At the end of life for a subscription, select a new random subnet, update subnet subscriptions, and publish an updated ENR
*Note*: Short lived beacon committee assignments should not be added in into the ENR `attnets` entry.
*Note*: When preparing for a hard fork, a validator must select and subscribe to random subnets of the future fork versioning at least `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements. *Note*: When preparing for a hard fork, a validator must select and subscribe to random subnets of the future fork versioning at least `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
## How to avoid slashing ## How to avoid slashing

View File

@ -16,43 +16,45 @@
- [Extended `AttestationData`](#extended-attestationdata) - [Extended `AttestationData`](#extended-attestationdata)
- [Extended `Attestation`](#extended-attestation) - [Extended `Attestation`](#extended-attestation)
- [Extended `PendingAttestation`](#extended-pendingattestation) - [Extended `PendingAttestation`](#extended-pendingattestation)
- [`IndexedAttestation`](#indexedattestation) - [Extended `IndexedAttestation`](#extended-indexedattestation)
- [Extended `AttesterSlashing`](#extended-attesterslashing) - [Extended `AttesterSlashing`](#extended-attesterslashing)
- [Extended `Validator`](#extended-validator) - [Extended `Validator`](#extended-validator)
- [Extended `BeaconBlockBody`](#extended-beaconblockbody) - [Extended `BeaconBlockBody`](#extended-beaconblockbody)
- [Extended `BeaconBlock`](#extended-beaconblock) - [Extended `BeaconBlock`](#extended-beaconblock)
- [Extended `SignedBeaconBlock`](#extended-signedbeaconblock) - [Extended `SignedBeaconBlock`](#extended-signedbeaconblock)
- [Extended `BeaconState`](#extended-beaconstate) - [Extended `BeaconState`](#extended-beaconstate)
- [New containers](#new-containers) - [New containers](#new-containers)
- [`ShardBlockWrapper`](#shardblockwrapper) - [`ShardBlock`](#shardblock)
- [`ShardSignableHeader`](#shardsignableheader) - [`SignedShardBlock`](#signedshardblock)
- [`ShardBlockHeader`](#shardblockheader)
- [`ShardState`](#shardstate) - [`ShardState`](#shardstate)
- [`ShardTransition`](#shardtransition) - [`ShardTransition`](#shardtransition)
- [`CompactCommittee`](#compactcommittee) - [`CompactCommittee`](#compactcommittee)
- [`AttestationCustodyBitWrapper`](#attestationcustodybitwrapper) - [`AttestationCustodyBitWrapper`](#attestationcustodybitwrapper)
- [Helper functions](#helper-functions) - [Helper functions](#helper-functions)
- [Misc](#misc-1) - [Misc](#misc-1)
- [`get_previous_slot`](#get_previous_slot) - [`compute_previous_slot`](#compute_previous_slot)
- [`pack_compact_validator`](#pack_compact_validator) - [`pack_compact_validator`](#pack_compact_validator)
- [`unpack_compact_validator`](#unpack_compact_validator) - [`unpack_compact_validator`](#unpack_compact_validator)
- [`committee_to_compact_committee`](#committee_to_compact_committee) - [`committee_to_compact_committee`](#committee_to_compact_committee)
- [`compute_shard_from_committee_index`](#compute_shard_from_committee_index) - [`compute_shard_from_committee_index`](#compute_shard_from_committee_index)
- [`compute_offset_slots`](#compute_offset_slots) - [`compute_offset_slots`](#compute_offset_slots)
- [`compute_updated_gasprice`](#compute_updated_gasprice)
- [Beacon state accessors](#beacon-state-accessors) - [Beacon state accessors](#beacon-state-accessors)
- [`get_active_shard_count`](#get_active_shard_count) - [`get_active_shard_count`](#get_active_shard_count)
- [`get_online_validator_indices`](#get_online_validator_indices) - [`get_online_validator_indices`](#get_online_validator_indices)
- [`get_shard_committee`](#get_shard_committee) - [`get_shard_committee`](#get_shard_committee)
- [`get_shard_proposer_index`](#get_shard_proposer_index)
- [`get_light_client_committee`](#get_light_client_committee) - [`get_light_client_committee`](#get_light_client_committee)
- [`get_shard_proposer_index`](#get_shard_proposer_index)
- [`get_indexed_attestation`](#get_indexed_attestation) - [`get_indexed_attestation`](#get_indexed_attestation)
- [`get_updated_gasprice`](#get_updated_gasprice)
- [`get_start_shard`](#get_start_shard) - [`get_start_shard`](#get_start_shard)
- [`get_shard`](#get_shard) - [`get_shard`](#get_shard)
- [`get_latest_slot_for_shard`](#get_latest_slot_for_shard) - [`get_latest_slot_for_shard`](#get_latest_slot_for_shard)
- [`get_offset_slots`](#get_offset_slots) - [`get_offset_slots`](#get_offset_slots)
- [Predicates](#predicates) - [Predicates](#predicates)
- [`is_winning_attestation`](#is_winning_attestation)
- [Updated `is_valid_indexed_attestation`](#updated-is_valid_indexed_attestation) - [Updated `is_valid_indexed_attestation`](#updated-is_valid_indexed_attestation)
- [`is_shard_attestation`](#is_shard_attestation)
- [`is_winning_attestation`](#is_winning_attestation)
- [Block processing](#block-processing) - [Block processing](#block-processing)
- [Operations](#operations) - [Operations](#operations)
- [New Attestation processing](#new-attestation-processing) - [New Attestation processing](#new-attestation-processing)
@ -98,7 +100,6 @@ Configuration is not namespaced. Instead it is strictly an extension;
| `ONLINE_PERIOD` | `OnlineEpochs(2**3)` (= 8) | online epochs | ~51 min | | `ONLINE_PERIOD` | `OnlineEpochs(2**3)` (= 8) | online epochs | ~51 min |
| `LIGHT_CLIENT_COMMITTEE_SIZE` | `2**7` (= 128) | | `LIGHT_CLIENT_COMMITTEE_SIZE` | `2**7` (= 128) |
| `LIGHT_CLIENT_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours | | `LIGHT_CLIENT_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
| `SHARD_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
| `MAX_SHARD_BLOCK_SIZE` | `2**20` (= 1,048,576) | | | `MAX_SHARD_BLOCK_SIZE` | `2**20` (= 1,048,576) | |
| `TARGET_SHARD_BLOCK_SIZE` | `2**18` (= 262,144) | | | `TARGET_SHARD_BLOCK_SIZE` | `2**18` (= 262,144) | |
| `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | | | `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | |
@ -152,7 +153,7 @@ class PendingAttestation(Container):
crosslink_success: boolean crosslink_success: boolean
``` ```
### `IndexedAttestation` ### Extended `IndexedAttestation`
```python ```python
class IndexedAttestation(Container): class IndexedAttestation(Container):
@ -160,7 +161,7 @@ class IndexedAttestation(Container):
attestation: Attestation attestation: Attestation
``` ```
#### Extended `AttesterSlashing` ### Extended `AttesterSlashing`
Note that the `attestation_1` and `attestation_2` have a new `IndexedAttestation` definition. Note that the `attestation_1` and `attestation_2` have a new `IndexedAttestation` definition.
@ -291,26 +292,33 @@ class BeaconState(Container):
The following containers are new in Phase 1. The following containers are new in Phase 1.
### `ShardBlockWrapper` ### `ShardBlock`
_Wrapper for being broadcasted over the network._
```python ```python
class ShardBlockWrapper(Container): class ShardBlock(Container):
shard_parent_root: Root shard_parent_root: Root
beacon_parent_root: Root beacon_parent_root: Root
slot: Slot slot: Slot
proposer_index: ValidatorIndex
body: ByteList[MAX_SHARD_BLOCK_SIZE] body: ByteList[MAX_SHARD_BLOCK_SIZE]
```
### `SignedShardBlock`
```python
class SignedShardBlock(Container):
message: ShardBlock
signature: BLSSignature signature: BLSSignature
``` ```
### `ShardSignableHeader` ### `ShardBlockHeader`
```python ```python
class ShardSignableHeader(Container): class ShardBlockHeader(Container):
shard_parent_root: Root shard_parent_root: Root
beacon_parent_root: Root beacon_parent_root: Root
slot: Slot slot: Slot
proposer_index: ValidatorIndex
body_root: Root body_root: Root
``` ```
@ -320,7 +328,7 @@ class ShardSignableHeader(Container):
class ShardState(Container): class ShardState(Container):
slot: Slot slot: Slot
gasprice: Gwei gasprice: Gwei
data: Bytes32 transition_digest: Bytes32
latest_block_root: Root latest_block_root: Root
``` ```
@ -361,10 +369,10 @@ class AttestationCustodyBitWrapper(Container):
### Misc ### Misc
#### `get_previous_slot` #### `compute_previous_slot`
```python ```python
def get_previous_slot(slot: Slot) -> Slot: def compute_previous_slot(slot: Slot) -> Slot:
if slot > 0: if slot > 0:
return Slot(slot - 1) return Slot(slot - 1)
else: else:
@ -431,6 +439,20 @@ def compute_offset_slots(start_slot: Slot, end_slot: Slot) -> Sequence[Slot]:
return [Slot(start_slot + x) for x in SHARD_BLOCK_OFFSETS if start_slot + x < end_slot] return [Slot(start_slot + x) for x in SHARD_BLOCK_OFFSETS if start_slot + x < end_slot]
``` ```
#### `compute_updated_gasprice`
```python
def compute_updated_gasprice(prev_gasprice: Gwei, length: uint8) -> Gwei:
if length > TARGET_SHARD_BLOCK_SIZE:
delta = (prev_gasprice * (length - TARGET_SHARD_BLOCK_SIZE)
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
return min(prev_gasprice + delta, MAX_GASPRICE)
else:
delta = (prev_gasprice * (TARGET_SHARD_BLOCK_SIZE - length)
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
return max(prev_gasprice, MIN_GASPRICE + delta) - delta
```
### Beacon state accessors ### Beacon state accessors
#### `get_active_shard_count` #### `get_active_shard_count`
@ -452,12 +474,35 @@ def get_online_validator_indices(state: BeaconState) -> Set[ValidatorIndex]:
```python ```python
def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]: def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]:
source_epoch = epoch - epoch % SHARD_COMMITTEE_PERIOD source_epoch = epoch - epoch % SHARD_COMMITTEE_PERIOD
if source_epoch > 0: if source_epoch > 0:
source_epoch -= SHARD_COMMITTEE_PERIOD source_epoch -= SHARD_COMMITTEE_PERIOD
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch) active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
seed = get_seed(beacon_state, source_epoch, DOMAIN_SHARD_COMMITTEE) seed = get_seed(beacon_state, source_epoch, DOMAIN_SHARD_COMMITTEE)
return compute_committee(active_validator_indices, seed, shard, get_active_shard_count(beacon_state)) active_shard_count = get_active_shard_count(beacon_state)
return compute_committee(
indices=active_validator_indices,
seed=seed,
index=shard,
count=active_shard_count,
)
```
#### `get_light_client_committee`
```python
def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
source_epoch = epoch - epoch % LIGHT_CLIENT_COMMITTEE_PERIOD
if source_epoch > 0:
source_epoch -= LIGHT_CLIENT_COMMITTEE_PERIOD
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
seed = get_seed(beacon_state, source_epoch, DOMAIN_LIGHT_CLIENT)
return compute_committee(
indices=active_validator_indices,
seed=seed,
index=0,
count=get_active_shard_count(beacon_state),
)[:TARGET_COMMITTEE_SIZE]
``` ```
#### `get_shard_proposer_index` #### `get_shard_proposer_index`
@ -469,19 +514,6 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard
return committee[r % len(committee)] return committee[r % len(committee)]
``` ```
#### `get_light_client_committee`
```python
def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
source_epoch = epoch - epoch % LIGHT_CLIENT_COMMITTEE_PERIOD
if source_epoch > 0:
source_epoch -= LIGHT_CLIENT_COMMITTEE_PERIOD
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
seed = get_seed(beacon_state, source_epoch, DOMAIN_LIGHT_CLIENT)
active_shards = get_active_shard_count(beacon_state)
return compute_committee(active_validator_indices, seed, 0, active_shards)[:TARGET_COMMITTEE_SIZE]
```
#### `get_indexed_attestation` #### `get_indexed_attestation`
```python ```python
@ -493,20 +525,6 @@ def get_indexed_attestation(beacon_state: BeaconState, attestation: Attestation)
) )
``` ```
#### `get_updated_gasprice`
```python
def get_updated_gasprice(prev_gasprice: Gwei, length: uint8) -> Gwei:
if length > TARGET_SHARD_BLOCK_SIZE:
delta = (prev_gasprice * (length - TARGET_SHARD_BLOCK_SIZE)
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
return min(prev_gasprice + delta, MAX_GASPRICE)
else:
delta = (prev_gasprice * (TARGET_SHARD_BLOCK_SIZE - length)
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
return max(prev_gasprice, MIN_GASPRICE + delta) - delta
```
#### `get_start_shard` #### `get_start_shard`
```python ```python
@ -538,24 +556,6 @@ def get_offset_slots(state: BeaconState, shard: Shard) -> Sequence[Slot]:
### Predicates ### Predicates
#### `is_winning_attestation`
```python
def is_winning_attestation(state: BeaconState,
attestation: PendingAttestation,
committee_index: CommitteeIndex,
winning_root: Root) -> bool:
"""
Check if ``attestation`` helped contribute to the successful crosslink of
``winning_root`` formed by ``committee_index`` committee at the current slot.
"""
return (
attestation.slot == state.slot
and attestation.data.index == committee_index
and attestation.data.shard_transition_root == winning_root
)
```
#### Updated `is_valid_indexed_attestation` #### Updated `is_valid_indexed_attestation`
Note that this replaces the Phase 0 `is_valid_indexed_attestation`. Note that this replaces the Phase 0 `is_valid_indexed_attestation`.
@ -599,6 +599,39 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
return bls.AggregateVerify(zip(all_pubkeys, all_signing_roots), signature=attestation.signature) return bls.AggregateVerify(zip(all_pubkeys, all_signing_roots), signature=attestation.signature)
``` ```
#### `is_shard_attestation`
```python
def is_shard_attestation(state: BeaconState,
attestation: Attestation,
committee_index: CommitteeIndex) -> bool:
if not (
attestation.data.index == committee_index
and attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot # Must be on-time attestation
# TODO: MIN_ATTESTATION_INCLUSION_DELAY should always be 1
):
return False
return True
```
#### `is_winning_attestation`
```python
def is_winning_attestation(state: BeaconState,
attestation: PendingAttestation,
committee_index: CommitteeIndex,
winning_root: Root) -> bool:
"""
Check if ``attestation`` helped contribute to the successful crosslink of
``winning_root`` formed by ``committee_index`` committee at the current slot.
"""
return (
attestation.data.slot == state.slot
and attestation.data.index == committee_index
and attestation.data.shard_transition_root == winning_root
)
```
### Block processing ### Block processing
@ -607,12 +640,11 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
process_block_header(state, block) process_block_header(state, block)
process_randao(state, block.body) process_randao(state, block.body)
process_eth1_data(state, block.body) process_eth1_data(state, block.body)
verify_shard_transition_false_positives(state, block.body)
process_light_client_signatures(state, block.body) process_light_client_signatures(state, block.body)
process_operations(state, block.body) process_operations(state, block.body)
verify_shard_transition_false_positives(state, block.body)
``` ```
#### Operations #### Operations
```python ```python
@ -669,7 +701,7 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
# Correct data root count # Correct data root count
assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, shard)) assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, shard))
# Correct parent block root # Correct parent block root
assert data.beacon_block_root == get_block_root_at_slot(state, get_previous_slot(state.slot)) assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot))
# Type 2: no shard transition, no custody bits # Type 2: no shard transition, no custody bits
else: else:
# Ensure delayed attestation # Ensure delayed attestation
@ -685,6 +717,9 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
```python ```python
def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTransition) -> None: def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTransition) -> None:
# TODO: only need to check it once when phase 1 starts
assert state.slot > PHASE_1_GENESIS_SLOT
# Correct data root count # Correct data root count
offset_slots = get_offset_slots(state, shard) offset_slots = get_offset_slots(state, shard)
assert ( assert (
@ -695,28 +730,32 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr
) )
assert transition.start_slot == offset_slots[0] assert transition.start_slot == offset_slots[0]
# Reconstruct shard headers
headers = [] headers = []
proposers = [] proposers = []
prev_gasprice = state.shard_states[shard].gasprice
shard_parent_root = state.shard_states[shard].latest_block_root shard_parent_root = state.shard_states[shard].latest_block_root
for i in range(len(offset_slots)): for i in range(len(offset_slots)):
if any(transition.shard_data_roots): shard_block_length = transition.shard_block_lengths[i]
headers.append(ShardSignableHeader( shard_state = transition.shard_states[i]
# Verify correct calculation of gas prices and slots
assert shard_state.gasprice == compute_updated_gasprice(prev_gasprice, shard_block_length)
assert shard_state.slot == offset_slots[i]
# Collect the non-empty proposals result
is_empty_proposal = shard_block_length == 0
if not is_empty_proposal:
proposal_index = get_shard_proposer_index(state, offset_slots[i], shard)
# Reconstruct shard headers
header = ShardBlockHeader(
shard_parent_root=shard_parent_root, shard_parent_root=shard_parent_root,
parent_hash=get_block_root_at_slot(state, get_previous_slot(state.slot)), beacon_parent_root=get_block_root_at_slot(state, offset_slots[i]),
proposer_index=proposal_index,
slot=offset_slots[i], slot=offset_slots[i],
body_root=transition.shard_data_roots[i] body_root=transition.shard_data_roots[i]
)) )
proposers.append(get_shard_proposer_index(state, shard, offset_slots[i])) shard_parent_root = hash_tree_root(header)
shard_parent_root = hash_tree_root(headers[-1]) headers.append(header)
proposers.append(proposal_index)
# Verify correct calculation of gas prices and slots
prev_gasprice = state.shard_states[shard].gasprice
for i in range(len(offset_slots)):
shard_state = transition.shard_states[i]
block_length = transition.shard_block_lengths[i]
assert shard_state.gasprice == get_updated_gasprice(prev_gasprice, block_length)
assert shard_state.slot == offset_slots[i]
prev_gasprice = shard_state.gasprice prev_gasprice = shard_state.gasprice
pubkeys = [state.validators[proposer].pubkey for proposer in proposers] pubkeys = [state.validators[proposer].pubkey for proposer in proposers]
@ -728,7 +767,7 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr
assert bls.AggregateVerify(zip(pubkeys, signing_roots), signature=transition.proposer_signature_aggregate) assert bls.AggregateVerify(zip(pubkeys, signing_roots), signature=transition.proposer_signature_aggregate)
# Save updated state # Save updated state
state.shard_states[shard] = transition.shard_states[-1] state.shard_states[shard] = transition.shard_states[len(transition.shard_states) - 1]
state.shard_states[shard].slot = state.slot - 1 state.shard_states[shard].slot = state.slot - 1
``` ```
@ -751,6 +790,9 @@ def process_crosslink_for_shard(state: BeaconState,
for attestation in transition_attestations: for attestation in transition_attestations:
participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits) participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
transition_participants = transition_participants.union(participants) transition_participants = transition_participants.union(participants)
assert attestation.data.head_shard_root == shard_transition.shard_data_roots[
len(shard_transition.shard_data_roots) - 1
]
enough_online_stake = ( enough_online_stake = (
get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >= get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >=
@ -762,7 +804,6 @@ def process_crosslink_for_shard(state: BeaconState,
# Attestation <-> shard transition consistency # Attestation <-> shard transition consistency
assert shard_transition_root == hash_tree_root(shard_transition) assert shard_transition_root == hash_tree_root(shard_transition)
assert attestation.data.head_shard_root == shard_transition.shard_data_roots[-1]
# Apply transition # Apply transition
apply_shard_transition(state, shard, shard_transition) apply_shard_transition(state, shard, shard_transition)
@ -773,11 +814,11 @@ def process_crosslink_for_shard(state: BeaconState,
increase_balance(state, beacon_proposer_index, proposer_reward) increase_balance(state, beacon_proposer_index, proposer_reward)
states_slots_lengths = zip( states_slots_lengths = zip(
shard_transition.shard_states, shard_transition.shard_states,
get_offset_slots(state, get_latest_slot_for_shard(state, shard)), get_offset_slots(state, shard),
shard_transition.shard_block_lengths shard_transition.shard_block_lengths
) )
for shard_state, slot, length in states_slots_lengths: for shard_state, slot, length in states_slots_lengths:
proposer_index = get_shard_proposer_index(state, shard, slot) proposer_index = get_shard_proposer_index(state, slot, shard)
decrease_balance(state, proposer_index, shard_state.gasprice * length) decrease_balance(state, proposer_index, shard_state.gasprice * length)
# Return winning transition root # Return winning transition root
@ -798,11 +839,12 @@ def process_crosslinks(state: BeaconState,
for committee_index in map(CommitteeIndex, range(committee_count)): for committee_index in map(CommitteeIndex, range(committee_count)):
shard = compute_shard_from_committee_index(state, committee_index, state.slot) shard = compute_shard_from_committee_index(state, committee_index, state.slot)
# All attestations in the block for this committee/shard and current slot # All attestations in the block for this committee/shard and current slot
shard_transition = shard_transitions[shard]
shard_attestations = [ shard_attestations = [
attestation for attestation in attestations attestation for attestation in attestations
if attestation.data.index == committee_index and attestation.data.slot == state.slot if is_shard_attestation(state, attestation, committee_index)
] ]
shard_transition = shard_transitions[shard]
winning_root = process_crosslink_for_shard(state, committee_index, shard_transition, shard_attestations) winning_root = process_crosslink_for_shard(state, committee_index, shard_transition, shard_attestations)
if winning_root != Root(): if winning_root != Root():
# Mark relevant pending attestations as creating a successful crosslink # Mark relevant pending attestations as creating a successful crosslink
@ -896,13 +938,17 @@ def process_light_client_signatures(state: BeaconState, block_body: BeaconBlockB
total_reward += get_base_reward(state, participant_index) total_reward += get_base_reward(state, participant_index)
increase_balance(state, get_beacon_proposer_index(state), Gwei(total_reward // PROPOSER_REWARD_QUOTIENT)) increase_balance(state, get_beacon_proposer_index(state), Gwei(total_reward // PROPOSER_REWARD_QUOTIENT))
slot = get_previous_slot(state.slot)
signing_root = compute_signing_root(get_block_root_at_slot(state, slot),
get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(slot)))
assert bls.FastAggregateVerify(signer_pubkeys, signing_root, signature=block_body.light_client_signature)
```
slot = compute_previous_slot(state.slot)
signing_root = compute_signing_root(get_block_root_at_slot(state, slot),
get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(slot)))
if len(signer_pubkeys) == 0:
# TODO: handle the empty light_client_signature case?
assert block_body.light_client_signature == BLSSignature()
return
else:
assert bls.FastAggregateVerify(signer_pubkeys, signing_root, signature=block_body.light_client_signature)
```
### Epoch transition ### Epoch transition

View File

@ -1,70 +0,0 @@
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
- [Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs](#ethereum-20-phase-1----shard-transition-and-fraud-proofs)
- [Table of contents](#table-of-contents)
- [Introduction](#introduction)
- [Fraud proofs](#fraud-proofs)
- [Shard state transition function](#shard-state-transition-function)
- [Honest committee member behavior](#honest-committee-member-behavior)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
# Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs
**Notice**: This document is a work-in-progress for researchers and implementers.
## Table of contents
<!-- TOC -->
TODO
<!-- /TOC -->
## Introduction
This document describes the shard transition function and fraud proofs as part of Phase 1 of Ethereum 2.0.
## Fraud proofs
TODO. The intent is to have a single universal fraud proof type, which contains the following parts:
1. An on-time attestation on some `shard` signing a `ShardTransition`
2. An index `i` of a particular position to focus on
3. The `ShardTransition` itself
4. The full body of the block
5. A Merkle proof to the `shard_states` in the parent block the attestation is referencing
The proof verifies that one of the two conditions is false:
1. `custody_bits[i][j] != generate_custody_bit(subkey, block_contents)` for any `j`
2. `execute_state_transition(shard, slot, transition.shard_states[i-1].data, hash_tree_root(parent), get_shard_proposer_index(state, shard, slot), block_contents) != transition.shard_states[i].data` (if `i=0` then instead use `parent.shard_states[shard][-1].data`)
## Shard state transition function
```python
def shard_state_transition(shard: Shard,
slot: Slot,
pre_state: Root,
previous_beacon_root: Root,
proposer_pubkey: BLSPubkey,
block_data: ByteList[MAX_SHARD_BLOCK_SIZE]) -> Root:
# We will add something more substantive in phase 2
return hash(pre_state + hash_tree_root(previous_beacon_root) + hash_tree_root(block_data))
```
## Honest committee member behavior
Suppose you are a committee member on shard `shard` at slot `current_slot`. Let `state` be the head beacon state you are building on, and let `QUARTER_PERIOD = SECONDS_PER_SLOT // 4`. `2 * QUARTER_PERIOD` seconds into slot `slot`, run the following procedure:
* Initialize `proposals = []`, `shard_states = []`, `shard_state = state.shard_states[shard][-1]`, `start_slot = shard_state.slot`.
* For `slot in get_offset_slots(state, start_slot)`, do the following:
* Look for all valid proposals for `slot`; that is, a Bytes `proposal` where `shard_state_transition(shard, slot, shard_state, get_block_root_at_slot(state, state.slot - 1), get_shard_proposer_index(state, shard, slot), proposal)` returns a result and does not throw an exception. Let `choices` be the set of non-empty valid proposals you discover.
* If `len(choices) == 0`, do `proposals.append(make_empty_proposal(shard_state, slot))`
* If `len(choices) == 1`, do `proposals.append(choices[0])`
* If `len(choices) > 1`, let `winning_proposal` be the proposal with the largest number of total attestations from slots in `state.shard_next_slots[shard]....slot-1` supporting it or any of its descendants, breaking ties by choosing the first proposal locally seen. Do `proposals.append(winning_proposal)`.
* If `proposals[-1]` is NOT an empty proposal, set `shard_state = shard_state_transition(shard, slot, shard_state, get_block_root_at_slot(state, state.slot - 1), get_shard_proposer_index(state, shard, slot), proposals[-1])` and do `shard_states.append(shard_state)`. If it is an empty proposal, leave `shard_state` unchanged.
Make an attestation using `shard_data_roots = [hash_tree_root(proposal) for proposal in proposals]` and `shard_state_roots = shard_states`.

View File

@ -7,7 +7,7 @@
- [Introduction](#introduction) - [Introduction](#introduction)
- [Configuration](#configuration) - [Configuration](#configuration)
- [Fork to Phase 1](#fork-to-phase-1) - [Fork to Phase 1](#fork-to-phase-1)
- [Fork trigger.](#fork-trigger) - [Fork trigger](#fork-trigger)
- [Upgrading the state](#upgrading-the-state) - [Upgrading the state](#upgrading-the-state)
<!-- END doctoc generated TOC please keep comment here to allow auto update --> <!-- END doctoc generated TOC please keep comment here to allow auto update -->
@ -35,17 +35,18 @@ Warning: this configuration is not definitive.
| Name | Value | | Name | Value |
| - | - | | - | - |
| `PHASE_1_FORK_VERSION` | `Version('0x01000000')` | | `PHASE_1_FORK_VERSION` | `Version('0x01000000')` |
| `PHASE_1_GENESIS_SLOT` | `2**5` **TBD** |
| `INITIAL_ACTIVE_SHARDS` | `2**6` (= 64) | | `INITIAL_ACTIVE_SHARDS` | `2**6` (= 64) |
## Fork to Phase 1 ## Fork to Phase 1
### Fork trigger. ### Fork trigger
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at slot `PHASE_1_GENESIS_SLOT`, where `PHASE_1_GENESIS_SLOT % SLOTS_PER_EPOCH == 0`.
### Upgrading the state ### Upgrading the state
After `process_slots` of Phase 0 finishes, but before the first Phase 1 block is processed, an irregular state change is made to upgrade to Phase 1. After `process_slots` of Phase 0 finishes, if `state.slot == PHASE_1_GENESIS_SLOT`, an irregular state change is made to upgrade to Phase 1.
```python ```python
def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState: def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
@ -102,7 +103,7 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
ShardState( ShardState(
slot=pre.slot, slot=pre.slot,
gasprice=MIN_GASPRICE, gasprice=MIN_GASPRICE,
data=Root(), transition_digest=Root(),
latest_block_root=Root(), latest_block_root=Root(),
) for i in range(INITIAL_ACTIVE_SHARDS) ) for i in range(INITIAL_ACTIVE_SHARDS)
), ),
@ -110,7 +111,7 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
current_light_committee=CompactCommittee(), # computed after state creation current_light_committee=CompactCommittee(), # computed after state creation
next_light_committee=CompactCommittee(), next_light_committee=CompactCommittee(),
# Custody game # Custody game
custody_challenge_index=0, exposed_derived_secrets=[] * EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS,
# exposed_derived_secrets will fully default to zeroes # exposed_derived_secrets will fully default to zeroes
) )
next_epoch = Epoch(epoch + 1) next_epoch = Epoch(epoch + 1)

View File

@ -0,0 +1,292 @@
# Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs
**Notice**: This document is a work-in-progress for researchers and implementers.
## Table of contents
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
- [Introduction](#introduction)
- [Helper functions](#helper-functions)
- [Misc](#misc)
- [Shard block verification functions](#shard-block-verification-functions)
- [Shard state transition](#shard-state-transition)
- [Fraud proofs](#fraud-proofs)
- [Verifying the proof](#verifying-the-proof)
- [Honest committee member behavior](#honest-committee-member-behavior)
- [Helper functions](#helper-functions-1)
- [Make attestations](#make-attestations)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Introduction
This document describes the shard transition function and fraud proofs as part of Phase 1 of Ethereum 2.0.
## Helper functions
### Misc
```python
def compute_shard_transition_digest(beacon_state: BeaconState,
shard_state: ShardState,
beacon_parent_root: Root,
shard_body_root: Root) -> Bytes32:
# TODO: use SSZ hash tree root
return hash(
hash_tree_root(shard_state) + beacon_parent_root + shard_body_root
)
```
### Shard block verification functions
```python
def verify_shard_block_message(beacon_state: BeaconState,
shard_state: ShardState,
block: ShardBlock,
slot: Slot,
shard: Shard) -> bool:
assert block.shard_parent_root == shard_state.latest_block_root
assert block.slot == slot
assert block.proposer_index == get_shard_proposer_index(beacon_state, slot, shard)
assert 0 < len(block.body) <= MAX_SHARD_BLOCK_SIZE
return True
```
```python
def verify_shard_block_signature(beacon_state: BeaconState,
signed_block: SignedShardBlock) -> bool:
proposer = beacon_state.validators[signed_block.message.proposer_index]
domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(signed_block.message.slot))
signing_root = compute_signing_root(signed_block.message, domain)
return bls.Verify(proposer.pubkey, signing_root, signed_block.signature)
```
## Shard state transition
```python
def shard_state_transition(beacon_state: BeaconState,
shard_state: ShardState,
block: ShardBlock) -> None:
# Update shard state
prev_gasprice = shard_state.gasprice
if len(block.body) == 0:
latest_block_root = shard_state.latest_block_root
else:
latest_block_root = hash_tree_root(block)
shard_state.transition_digest = compute_shard_transition_digest(
beacon_state,
shard_state,
block.beacon_parent_root,
block.body,
)
shard_state.gasprice = compute_updated_gasprice(prev_gasprice, len(block.body))
shard_state.slot = block.slot
shard_state.latest_block_root = latest_block_root
```
We have a pure function `get_post_shard_state` for describing the fraud proof verification and honest validator behavior.
```python
def get_post_shard_state(beacon_state: BeaconState,
shard_state: ShardState,
block: ShardBlock) -> ShardState:
"""
A pure function that returns a new post ShardState instead of modifying the given `shard_state`.
"""
post_state = shard_state.copy()
shard_state_transition(beacon_state, post_state, block)
return post_state
```
## Fraud proofs
### Verifying the proof
TODO. The intent is to have a single universal fraud proof type, which contains the following parts:
1. An on-time attestation `attestation` on some shard `shard` signing a `transition: ShardTransition`
2. An index `offset_index` of a particular position to focus on
3. The `transition: ShardTransition` itself
4. The full body of the shard block `shard_block`
5. A Merkle proof to the `shard_states` in the parent block the attestation is referencing
6. The `subkey` to generate the custody bit
Call the following function to verify the proof:
```python
def is_valid_fraud_proof(beacon_state: BeaconState,
attestation: Attestation,
offset_index: uint64,
transition: ShardTransition,
block: ShardBlock,
subkey: BLSPubkey,
beacon_parent_block: BeaconBlock) -> bool:
# 1. Check if `custody_bits[offset_index][j] != generate_custody_bit(subkey, block_contents)` for any `j`.
custody_bits = attestation.custody_bits_blocks
for j in range(custody_bits[offset_index]):
if custody_bits[offset_index][j] != generate_custody_bit(subkey, block):
return True
# 2. Check if the shard state transition result is wrong between
# `transition.shard_states[offset_index - 1]` to `transition.shard_states[offset_index]`.
if offset_index == 0:
shard = get_shard(beacon_state, attestation)
shard_state = beacon_parent_block.shard_transitions[shard].shard_states[-1]
else:
shard_state = transition.shard_states[offset_index - 1] # Not doing the actual state updates here.
shard_state = get_post_shard_state(beacon_state, shard_state, block)
if shard_state.transition_digest != transition.shard_states[offset_index].transition_digest:
return True
return False
```
```python
def generate_custody_bit(subkey: BLSPubkey, block: ShardBlock) -> bool:
# TODO
...
```
## Honest committee member behavior
### Helper functions
```python
def get_winning_proposal(beacon_state: BeaconState, proposals: Sequence[SignedShardBlock]) -> SignedShardBlock:
# TODO: Let `winning_proposal` be the proposal with the largest number of total attestations from slots in
# `state.shard_next_slots[shard]....slot-1` supporting it or any of its descendants, breaking ties by choosing
# the first proposal locally seen. Do `proposals.append(winning_proposal)`.
return proposals[-1] # stub
```
```python
def compute_shard_body_roots(proposals: Sequence[SignedShardBlock]) -> Sequence[Root]:
return [hash_tree_root(proposal.message.body) for proposal in proposals]
```
```python
def get_proposal_choices_at_slot(beacon_state: BeaconState,
shard_state: ShardState,
slot: Slot,
shard: Shard,
shard_blocks: Sequence[SignedShardBlock],
validate_signature: bool=True) -> Sequence[SignedShardBlock]:
"""
Return the valid shard blocks at the given ``slot``.
Note that this function doesn't change the state.
"""
choices = []
shard_blocks_at_slot = [block for block in shard_blocks if block.message.slot == slot]
for block in shard_blocks_at_slot:
try:
# Verify block message and signature
# TODO these validations should have been checked upon receiving shard blocks.
assert verify_shard_block_message(beacon_state, shard_state, block.message, slot, shard)
if validate_signature:
assert verify_shard_block_signature(beacon_state, block)
shard_state = get_post_shard_state(beacon_state, shard_state, block.message)
except Exception:
pass # TODO: throw error in the test helper
else:
choices.append(block)
return choices
```
```python
def get_proposal_at_slot(beacon_state: BeaconState,
shard_state: ShardState,
slot: Shard,
shard: Shard,
shard_blocks: Sequence[SignedShardBlock],
validate_signature: bool=True) -> Tuple[SignedShardBlock, ShardState]:
"""
Return ``proposal``, ``shard_state`` of the given ``slot``.
Note that this function doesn't change the state.
"""
choices = get_proposal_choices_at_slot(
beacon_state=beacon_state,
shard_state=shard_state,
slot=slot,
shard=shard,
shard_blocks=shard_blocks,
validate_signature=validate_signature,
)
if len(choices) == 0:
block = ShardBlock(slot=slot)
proposal = SignedShardBlock(message=block)
elif len(choices) == 1:
proposal = choices[0]
else:
proposal = get_winning_proposal(beacon_state, choices)
# Apply state transition
shard_state = get_post_shard_state(beacon_state, shard_state, proposal.message)
return proposal, shard_state
```
```python
def get_shard_state_transition_result(
beacon_state: BeaconState,
shard: Shard,
shard_blocks: Sequence[SignedShardBlock],
validate_signature: bool=True,
) -> Tuple[Sequence[SignedShardBlock], Sequence[ShardState], Sequence[Root]]:
proposals = []
shard_states = []
shard_state = beacon_state.shard_states[shard]
for slot in get_offset_slots(beacon_state, shard):
proposal, shard_state = get_proposal_at_slot(
beacon_state=beacon_state,
shard_state=shard_state,
slot=slot,
shard=shard,
shard_blocks=shard_blocks,
validate_signature=validate_signature,
)
shard_states.append(shard_state)
proposals.append(proposal)
shard_data_roots = compute_shard_body_roots(proposals)
return proposals, shard_states, shard_data_roots
```
### Make attestations
Suppose you are a committee member on shard `shard` at slot `current_slot` and you have received shard blocks `shard_blocks` since the latest successful crosslink for `shard` into the beacon chain. Let `state` be the head beacon state you are building on, and let `QUARTER_PERIOD = SECONDS_PER_SLOT // 4`. `2 * QUARTER_PERIOD` seconds into slot `current_slot`, run `get_shard_transition(beacon_state, shard, shard_blocks)` to get `shard_transition`.
```python
def get_shard_transition(beacon_state: BeaconState,
shard: Shard,
shard_blocks: Sequence[SignedShardBlock]) -> ShardTransition:
offset_slots = get_offset_slots(beacon_state, shard)
start_slot = offset_slots[0]
proposals, shard_states, shard_data_roots = get_shard_state_transition_result(beacon_state, shard, shard_blocks)
assert len(proposals) > 0
assert len(shard_data_roots) > 0
shard_block_lengths = []
proposer_signatures = []
for proposal in proposals:
shard_block_lengths.append(len(proposal.message.body))
if proposal.signature != BLSSignature():
proposer_signatures.append(proposal.signature)
proposer_signature_aggregate = bls.Aggregate(proposer_signatures)
return ShardTransition(
start_slot=start_slot,
shard_block_lengths=shard_block_lengths,
shard_data_roots=shard_data_roots,
shard_states=shard_states,
proposer_signature_aggregate=proposer_signature_aggregate,
)
```

View File

@ -1 +1 @@
0.11.2 0.12.0

View File

@ -24,12 +24,12 @@ def apply_constants_config(spec_globals: Dict[str, Any], warn_if_unknown: bool =
# Load presets from a file, and then prepares the global config setting. This does not apply the config. # Load presets from a file, and then prepares the global config setting. This does not apply the config.
# To apply the config, reload the spec module (it will re-initialize with the config taken from here). # To apply the config, reload the spec module (it will re-initialize with the config taken from here).
def prepare_config(configs_path, config_name): def prepare_config(configs_path: str, config_name: str) -> None:
global config global config
config = load_config_file(configs_path, config_name) config = load_config_file(configs_path, config_name)
def load_config_file(configs_dir, presets_name) -> Dict[str, Any]: def load_config_file(configs_dir: str, presets_name: str) -> Dict[str, Any]:
""" """
Loads the given preset Loads the given preset
:param presets_name: The name of the presets. (lowercase snake_case) :param presets_name: The name of the presets. (lowercase snake_case)
@ -38,7 +38,7 @@ def load_config_file(configs_dir, presets_name) -> Dict[str, Any]:
path = Path(join(configs_dir, presets_name + '.yaml')) path = Path(join(configs_dir, presets_name + '.yaml'))
yaml = YAML(typ='base') yaml = YAML(typ='base')
loaded = yaml.load(path) loaded = yaml.load(path)
out = dict() out: Dict[str, Any] = dict()
for k, v in loaded.items(): for k, v in loaded.items():
if isinstance(v, list): if isinstance(v, list):
# Clean up integer values. YAML parser renders lists of ints as list of str # Clean up integer values. YAML parser renders lists of ints as list of str

View File

@ -1,7 +1,7 @@
from eth2spec.test.context import PHASE0, with_all_phases, spec_state_test from eth2spec.test.context import PHASE0, with_all_phases, spec_state_test
from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot
def run_on_attestation(spec, state, store, attestation, valid=True): def run_on_attestation(spec, state, store, attestation, valid=True):
@ -116,6 +116,44 @@ def test_on_attestation_mismatched_target_and_slot(spec, state):
run_on_attestation(spec, state, store, attestation, False) run_on_attestation(spec, state, store, attestation, False)
@with_all_phases
@spec_state_test
def test_on_attestation_inconsistent_target_and_head(spec, state):
store = spec.get_forkchoice_store(state)
spec.on_tick(store, store.time + 2 * spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH)
# Create chain 1 as empty chain between genesis and start of 1st epoch
target_state_1 = state.copy()
next_epoch(spec, target_state_1)
# Create chain 2 with different block in chain from chain 1 from chain 1 from chain 1 from chain 1
target_state_2 = state.copy()
diff_block = build_empty_block_for_next_slot(spec, target_state_2)
signed_diff_block = state_transition_and_sign_block(spec, target_state_2, diff_block)
spec.on_block(store, signed_diff_block)
next_epoch(spec, target_state_2)
next_slot(spec, target_state_2)
# Create and store block new head block on target state 1
head_block = build_empty_block_for_next_slot(spec, target_state_1)
signed_head_block = state_transition_and_sign_block(spec, target_state_1, head_block)
spec.on_block(store, signed_head_block)
# Attest to head of chain 1
attestation = get_valid_attestation(spec, target_state_1, slot=head_block.slot, signed=False)
epoch = spec.compute_epoch_at_slot(attestation.data.slot)
# Set attestation target to be from chain 2
attestation.data.target = spec.Checkpoint(epoch=epoch, root=spec.get_block_root(target_state_2, epoch))
sign_attestation(spec, state, attestation)
assert attestation.data.target.epoch == spec.GENESIS_EPOCH + 1
assert spec.compute_epoch_at_slot(attestation.data.slot) == spec.GENESIS_EPOCH + 1
assert spec.get_block_root(target_state_1, epoch) != attestation.data.target.root
run_on_attestation(spec, state, store, attestation, False)
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_on_attestation_target_not_in_store(spec, state): def test_on_attestation_target_not_in_store(spec, state):

View File

@ -1,7 +1,7 @@
from typing import List from typing import List
from eth2spec.test.context import expect_assertion_error, PHASE0 from eth2spec.test.context import expect_assertion_error, PHASE0, PHASE1
from eth2spec.test.helpers.state import state_transition_and_sign_block from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot, transition_to
from eth2spec.test.helpers.block import build_empty_block_for_next_slot from eth2spec.test.helpers.block import build_empty_block_for_next_slot
from eth2spec.test.helpers.keys import privkeys from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils import bls from eth2spec.utils import bls
@ -43,7 +43,7 @@ def run_attestation_processing(spec, state, attestation, valid=True):
yield 'post', state yield 'post', state
def build_attestation_data(spec, state, slot, index): def build_attestation_data(spec, state, slot, index, shard_transition=None, on_time=True):
assert state.slot >= slot assert state.slot >= slot
if slot == state.slot: if slot == state.slot:
@ -66,7 +66,7 @@ def build_attestation_data(spec, state, slot, index):
source_epoch = state.current_justified_checkpoint.epoch source_epoch = state.current_justified_checkpoint.epoch
source_root = state.current_justified_checkpoint.root source_root = state.current_justified_checkpoint.root
return spec.AttestationData( attestation_data = spec.AttestationData(
slot=slot, slot=slot,
index=index, index=index,
beacon_block_root=block_root, beacon_block_root=block_root,
@ -74,11 +74,34 @@ def build_attestation_data(spec, state, slot, index):
target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root), target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root),
) )
if spec.fork == PHASE1:
if shard_transition is not None:
lastest_shard_data_root_index = len(shard_transition.shard_data_roots) - 1
attestation_data.head_shard_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
attestation_data.shard_transition_root = shard_transition.hash_tree_root()
else:
# No shard transition
shard = spec.get_shard(state, spec.Attestation(data=attestation_data))
if on_time:
temp_state = state.copy()
next_slot(spec, temp_state)
shard_transition = spec.get_shard_transition(temp_state, shard, [])
lastest_shard_data_root_index = len(shard_transition.shard_data_roots) - 1
attestation_data.head_shard_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
attestation_data.shard_transition_root = shard_transition.hash_tree_root()
else:
attestation_data.head_shard_root = state.shard_states[shard].transition_digest
attestation_data.shard_transition_root = spec.Root()
return attestation_data
def convert_to_valid_on_time_attestation(spec, state, attestation, signed=False): def convert_to_valid_on_time_attestation(spec, state, attestation, signed=False):
shard = spec.get_shard(state, attestation) shard = spec.get_shard(state, attestation)
offset_slots = spec.compute_offset_slots(spec.get_latest_slot_for_shard(state, shard), state.slot + 1) offset_slots = spec.compute_offset_slots(
for offset_slot in offset_slots: spec.get_latest_slot_for_shard(state, shard),
attestation.data.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY,
)
for _ in offset_slots:
attestation.custody_bits_blocks.append( attestation.custody_bits_blocks.append(
Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]([0 for _ in attestation.aggregation_bits]) Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]([0 for _ in attestation.aggregation_bits])
) )
@ -89,7 +112,7 @@ def convert_to_valid_on_time_attestation(spec, state, attestation, signed=False)
return attestation return attestation
def get_valid_on_time_attestation(spec, state, slot=None, index=None, signed=False): def get_valid_on_time_attestation(spec, state, slot=None, index=None, shard_transition=None, signed=False):
''' '''
Construct on-time attestation for next slot Construct on-time attestation for next slot
''' '''
@ -98,7 +121,15 @@ def get_valid_on_time_attestation(spec, state, slot=None, index=None, signed=Fal
if index is None: if index is None:
index = 0 index = 0
return get_valid_attestation(spec, state, slot=slot, index=index, signed=signed, on_time=True) return get_valid_attestation(
spec,
state,
slot=slot,
index=index,
shard_transition=shard_transition,
signed=signed,
on_time=True,
)
def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False): def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False):
@ -113,16 +144,24 @@ def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False)
return get_valid_attestation(spec, state, slot=slot, index=index, signed=signed, on_time=False) return get_valid_attestation(spec, state, slot=slot, index=index, signed=signed, on_time=False)
def get_valid_attestation(spec, state, slot=None, index=None, filter_participant_set=None, signed=False, on_time=True): def get_valid_attestation(spec,
state,
slot=None,
index=None,
filter_participant_set=None,
shard_transition=None,
signed=False,
on_time=True):
# If filter_participant_set is filters everything, the attestation has 0 participants, and cannot be signed. # If filter_participant_set is filters everything, the attestation has 0 participants, and cannot be signed.
# Thus strictly speaking invalid when no participant is added later. # Thus strictly speaking invalid when no participant is added later.
if slot is None: if slot is None:
slot = state.slot slot = state.slot
if index is None: if index is None:
index = 0 index = 0
attestation_data = build_attestation_data(spec, state, slot, index) attestation_data = build_attestation_data(
spec, state, slot=slot, index=index, shard_transition=shard_transition, on_time=on_time
)
beacon_committee = spec.get_beacon_committee( beacon_committee = spec.get_beacon_committee(
state, state,
@ -139,7 +178,7 @@ def get_valid_attestation(spec, state, slot=None, index=None, filter_participant
# fill the attestation with (optionally filtered) participants, and optionally sign it # fill the attestation with (optionally filtered) participants, and optionally sign it
fill_aggregate_attestation(spec, state, attestation, signed=signed, filter_participant_set=filter_participant_set) fill_aggregate_attestation(spec, state, attestation, signed=signed, filter_participant_set=filter_participant_set)
if spec.fork == 'phase1' and on_time: if spec.fork == PHASE1 and on_time:
attestation = convert_to_valid_on_time_attestation(spec, state, attestation, signed) attestation = convert_to_valid_on_time_attestation(spec, state, attestation, signed)
return attestation return attestation
@ -211,7 +250,7 @@ def get_attestation_custody_signature(spec, state, attestation_data, block_index
def sign_attestation(spec, state, attestation): def sign_attestation(spec, state, attestation):
if spec.fork == 'phase1' and any(attestation.custody_bits_blocks): if spec.fork == PHASE1 and any(attestation.custody_bits_blocks):
sign_on_time_attestation(spec, state, attestation) sign_on_time_attestation(spec, state, attestation)
return return
@ -284,7 +323,21 @@ def next_epoch_with_attestations(spec,
spec, post_state, slot_to_attest, index=index, signed=True, on_time=False) spec, post_state, slot_to_attest, index=index, signed=True, on_time=False)
block.body.attestations.append(prev_attestation) block.body.attestations.append(prev_attestation)
if spec.fork == PHASE1:
fill_block_shard_transitions_by_attestations(spec, post_state, block)
signed_block = state_transition_and_sign_block(spec, post_state, block) signed_block = state_transition_and_sign_block(spec, post_state, block)
signed_blocks.append(signed_block) signed_blocks.append(signed_block)
return state, signed_blocks, post_state return state, signed_blocks, post_state
def fill_block_shard_transitions_by_attestations(spec, state, block):
block.body.shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
for attestation in block.body.attestations:
shard = spec.get_shard(state, attestation)
if attestation.data.slot == state.slot:
temp_state = state.copy()
transition_to(spec, temp_state, slot=block.slot)
shard_transition = spec.get_shard_transition(temp_state, shard, [])
block.body.shard_transitions[shard] = shard_transition

View File

@ -71,24 +71,31 @@ def build_empty_block(spec, state, slot=None):
""" """
if slot is None: if slot is None:
slot = state.slot slot = state.slot
if slot < state.slot:
raise Exception("build_empty_block cannot build blocks for past slots")
if slot > state.slot:
# transition forward in copied state to grab relevant data from state
state = state.copy()
spec.process_slots(state, slot)
state, parent_block_root = get_state_and_beacon_parent_root_at_slot(spec, state, slot)
empty_block = spec.BeaconBlock() empty_block = spec.BeaconBlock()
empty_block.slot = slot empty_block.slot = slot
empty_block.proposer_index = spec.get_beacon_proposer_index(state) empty_block.proposer_index = spec.get_beacon_proposer_index(state)
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
previous_block_header = state.latest_block_header.copy() empty_block.parent_root = parent_block_root
if previous_block_header.state_root == spec.Root():
previous_block_header.state_root = hash_tree_root(state)
empty_block.parent_root = hash_tree_root(previous_block_header)
apply_randao_reveal(spec, state, empty_block) apply_randao_reveal(spec, state, empty_block)
return empty_block return empty_block
def build_empty_block_for_next_slot(spec, state): def build_empty_block_for_next_slot(spec, state):
return build_empty_block(spec, state, state.slot + 1) return build_empty_block(spec, state, state.slot + 1)
def get_state_and_beacon_parent_root_at_slot(spec, state, slot):
if slot < state.slot:
raise Exception("Cannot build blocks for past slots")
if slot > state.slot:
# transition forward in copied state to grab relevant data from state
state = state.copy()
spec.process_slots(state, slot)
previous_block_header = state.latest_block_header.copy()
if previous_block_header.state_root == spec.Root():
previous_block_header.state_root = hash_tree_root(state)
beacon_parent_root = hash_tree_root(previous_block_header)
return state, beacon_parent_root

View File

@ -0,0 +1,28 @@
from eth2spec.test.context import expect_assertion_error
def run_crosslinks_processing(spec, state, shard_transitions, attestations, valid=True):
"""
Run ``process_attestation``, yielding:
- pre-state ('pre')
- shard_transitions ('shard_transitions')
- attestations ('attestations')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
# yield pre-state
yield 'pre', state
yield 'shard_transitions', shard_transitions
yield 'attestations', attestations
# If the attestation is invalid, processing is aborted, and there is no post-state.
if not valid:
expect_assertion_error(lambda: spec.process_crosslinks(state, shard_transitions, attestations))
yield 'post', None
return
# process crosslinks
spec.process_crosslinks(state, shard_transitions, attestations)
# yield post-state
yield 'post', state

View File

@ -1,63 +0,0 @@
from eth2spec.utils.ssz.ssz_typing import Bitlist
from eth2spec.utils import bls
from eth2spec.test.helpers.keys import privkeys
import eth2spec.test.helpers.attestations as phase0_attestations
def get_valid_on_time_attestation(spec, state, index=None, signed=False):
'''
Construct on-time attestation for next slot
'''
if index is None:
index = 0
attestation = phase0_attestations.get_valid_attestation(spec, state, state.slot, index, False)
shard = spec.get_shard(state, attestation)
offset_slots = spec.compute_offset_slots(spec.get_latest_slot_for_shard(state, shard), state.slot + 1)
for _ in offset_slots:
attestation.custody_bits_blocks.append(
Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]([0 for _ in attestation.aggregation_bits])
)
if signed:
sign_attestation(spec, state, attestation)
return attestation
def sign_attestation(spec, state, attestation):
if not any(attestation.custody_bits_blocks):
phase0_attestations.sign_attestation(spec, state, attestation)
return
committee = spec.get_beacon_committee(state, attestation.data.slot, attestation.data.index)
signatures = []
for block_index, custody_bits in enumerate(attestation.custody_bits_blocks):
for participant, abit, cbit in zip(committee, attestation.aggregation_bits, custody_bits):
if not abit:
continue
signatures.append(get_attestation_custody_signature(
spec,
state,
attestation.data,
block_index,
cbit,
privkeys[participant]
))
attestation.signature = bls.Aggregate(signatures)
def get_attestation_custody_signature(spec, state, attestation_data, block_index, bit, privkey):
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
signing_root = spec.compute_signing_root(
spec.AttestationCustodyBitWrapper(
attestation_data.hash_tree_root(),
block_index,
bit,
),
domain,
)
return bls.Sign(privkey, signing_root)

View File

@ -1,71 +0,0 @@
from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils import bls
from eth2spec.utils.bls import only_with_bls
from eth2spec.utils.ssz.ssz_impl import (
hash_tree_root,
)
from .attestations import (
sign_shard_attestation,
)
@only_with_bls()
def sign_shard_block(spec, beacon_state, shard_state, block, proposer_index=None):
if proposer_index is None:
proposer_index = spec.get_shard_proposer_index(beacon_state, shard_state.shard, block.slot)
privkey = privkeys[proposer_index]
domain = spec.get_domain(beacon_state, spec.DOMAIN_SHARD_PROPOSER, spec.compute_epoch_of_shard_slot(block.slot))
signing_root = spec.compute_signing_root(block, domain)
block.signature = bls.Sign(privkey, signing_root)
def build_empty_shard_block(spec,
beacon_state,
shard_state,
slot,
signed=False,
full_attestation=False):
if slot is None:
slot = shard_state.slot
previous_beacon_header = beacon_state.latest_block_header.copy()
if previous_beacon_header.state_root == spec.Bytes32():
previous_beacon_header.state_root = beacon_state.hash_tree_root()
beacon_block_root = hash_tree_root(previous_beacon_header)
previous_block_header = shard_state.latest_block_header.copy()
if previous_block_header.state_root == spec.Bytes32():
previous_block_header.state_root = shard_state.hash_tree_root()
parent_root = hash_tree_root(previous_block_header)
block = spec.ShardBlock(
shard=shard_state.shard,
slot=slot,
beacon_block_root=beacon_block_root,
parent_root=parent_root,
block_size_sum=shard_state.block_size_sum + spec.SHARD_HEADER_SIZE,
)
if full_attestation:
shard_committee = spec.get_shard_committee(beacon_state, shard_state.shard, block.slot)
block.aggregation_bits = list(
(True,) * len(shard_committee) +
(False,) * (spec.MAX_PERIOD_COMMITTEE_SIZE * 2 - len(shard_committee))
)
else:
shard_committee = []
block.attestations = sign_shard_attestation(
spec,
beacon_state,
shard_state,
block,
participants=shard_committee,
)
if signed:
sign_shard_block(spec, beacon_state, shard_state, block)
return block

View File

@ -1,18 +0,0 @@
from eth2spec.test.helpers.phase1.shard_block import sign_shard_block
def configure_shard_state(spec, beacon_state, shard=0):
beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH)
shard_state = spec.get_genesis_shard_state(spec.Shard(shard))
shard_state.slot = spec.ShardSlot(spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH)
return beacon_state, shard_state
def shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block):
"""
Shard state transition via the provided ``block``
then package the block with the state root and signature.
"""
spec.shard_state_transition(beacon_state, shard_state, block)
block.state_root = shard_state.hash_tree_root()
sign_shard_block(spec, beacon_state, shard_state, block)

View File

@ -0,0 +1,85 @@
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
from eth2spec.test.helpers.block import get_state_and_beacon_parent_root_at_slot
from eth2spec.test.helpers.state import transition_to
from eth2spec.test.helpers.keys import privkeys
from eth2spec.utils import bls
from eth2spec.utils.bls import only_with_bls
@only_with_bls()
def sign_shard_block(spec, beacon_state, shard, block, proposer_index=None):
slot = block.message.slot
if proposer_index is None:
proposer_index = spec.get_shard_proposer_index(beacon_state, slot, shard)
privkey = privkeys[proposer_index]
domain = spec.get_domain(beacon_state, spec.DOMAIN_SHARD_PROPOSAL, spec.compute_epoch_at_slot(slot))
signing_root = spec.compute_signing_root(block.message, domain)
block.signature = bls.Sign(privkey, signing_root)
def build_shard_block(spec,
beacon_state,
shard,
slot=None,
body=None,
signed=False):
shard_state = beacon_state.shard_states[shard]
if slot is None:
slot = shard_state.slot + 1
if body is None:
body = b'\x56' * 128
proposer_index = spec.get_shard_proposer_index(beacon_state, slot, shard)
beacon_state, beacon_parent_root = get_state_and_beacon_parent_root_at_slot(spec, beacon_state, slot)
block = spec.ShardBlock(
shard_parent_root=shard_state.latest_block_root,
beacon_parent_root=beacon_parent_root,
slot=slot,
proposer_index=proposer_index,
body=body,
)
signed_block = spec.SignedShardBlock(
message=block,
)
if signed:
sign_shard_block(spec, beacon_state, shard, signed_block, proposer_index=proposer_index)
return signed_block
def build_shard_transitions_till_slot(spec, state, shard_blocks, on_time_slot):
temp_state = state.copy()
transition_to(spec, temp_state, on_time_slot)
shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
for shard, blocks in shard_blocks.items():
offset_slots = spec.get_offset_slots(temp_state, shard)
len_offset_slots = len(offset_slots)
assert len_offset_slots == on_time_slot - state.shard_states[shard].slot - 1
shard_transition = spec.get_shard_transition(temp_state, shard, blocks)
if len(blocks) > 0:
shard_block_root = blocks[-1].message.hash_tree_root()
assert shard_transition.shard_states[len_offset_slots - 1].latest_block_root == shard_block_root
assert shard_transition.shard_states[len_offset_slots - 1].slot == offset_slots[-1]
shard_transitions[shard] = shard_transition
return shard_transitions
def build_attestation_with_shard_transition(spec, state, index, on_time_slot, shard_transition=None):
temp_state = state.copy()
transition_to(spec, temp_state, on_time_slot - 1)
attestation = get_valid_on_time_attestation(
spec,
temp_state,
index=index,
shard_transition=shard_transition,
signed=True,
)
assert attestation.data.slot == temp_state.slot
if shard_transition is not None:
assert attestation.data.shard_transition_root == shard_transition.hash_tree_root()
return attestation

View File

@ -30,6 +30,16 @@ def transition_to(spec, state, slot):
assert state.slot == slot assert state.slot == slot
def transition_to_valid_shard_slot(spec, state):
"""
Transition to slot `spec.PHASE_1_GENESIS_SLOT + 1` and fork at `spec.PHASE_1_GENESIS_SLOT`.
"""
transition_to(spec, state, spec.PHASE_1_GENESIS_SLOT)
state = spec.upgrade_to_phase1(state) # `upgrade_to_phase1` is a pure function
next_slot(spec, state)
return state
def next_epoch(spec, state): def next_epoch(spec, state):
""" """
Transition to the start slot of the next epoch Transition to the start slot of the next epoch

View File

@ -34,8 +34,8 @@ def run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=True
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_success(spec, state): def test_success(spec, state):
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state) current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0] validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
@ -53,8 +53,8 @@ def test_success(spec, state):
@spec_state_test @spec_state_test
@always_bls @always_bls
def test_invalid_signature(spec, state): def test_invalid_signature(spec, state):
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state) current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0] validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
@ -71,8 +71,8 @@ def test_invalid_signature(spec, state):
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_success_exit_queue(spec, state): def test_success_exit_queue(spec, state):
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state) current_epoch = spec.get_current_epoch(state)
@ -115,8 +115,8 @@ def test_success_exit_queue(spec, state):
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_default_exit_epoch_subsequent_exit(spec, state): def test_default_exit_epoch_subsequent_exit(spec, state):
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state) current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0] validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
@ -137,8 +137,8 @@ def test_default_exit_epoch_subsequent_exit(spec, state):
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_validator_exit_in_future(spec, state): def test_validator_exit_in_future(spec, state):
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state) current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0] validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
@ -156,8 +156,8 @@ def test_validator_exit_in_future(spec, state):
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_validator_invalid_validator_index(spec, state): def test_validator_invalid_validator_index(spec, state):
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state) current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0] validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
@ -190,8 +190,8 @@ def test_validator_not_active(spec, state):
@with_all_phases @with_all_phases
@spec_state_test @spec_state_test
def test_validator_already_exited(spec, state): def test_validator_already_exited(spec, state):
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit # move state forward SHARD_COMMITTEE_PERIOD epochs to allow validator able to exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
current_epoch = spec.get_current_epoch(state) current_epoch = spec.get_current_epoch(state)
validator_index = spec.get_active_validator_indices(state, current_epoch)[0] validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
@ -218,7 +218,7 @@ def test_validator_not_active_long_enough(spec, state):
assert ( assert (
current_epoch - state.validators[validator_index].activation_epoch < current_epoch - state.validators[validator_index].activation_epoch <
spec.PERSISTENT_COMMITTEE_PERIOD spec.SHARD_COMMITTEE_PERIOD
) )
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False) yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)

View File

@ -8,10 +8,13 @@ from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_e
from eth2spec.test.helpers.keys import privkeys, pubkeys from eth2spec.test.helpers.keys import privkeys, pubkeys
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, get_indexed_attestation_participants from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, get_indexed_attestation_participants
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
from eth2spec.test.helpers.attestations import get_valid_attestation from eth2spec.test.helpers.attestations import get_valid_attestation, fill_block_shard_transitions_by_attestations
from eth2spec.test.helpers.deposits import prepare_state_and_deposit from eth2spec.test.helpers.deposits import prepare_state_and_deposit
from eth2spec.test.context import spec_state_test, with_all_phases, expect_assertion_error, always_bls, with_phases from eth2spec.test.context import (
spec_state_test, with_all_phases, expect_assertion_error, always_bls, with_phases,
PHASE1
)
@with_all_phases @with_all_phases
@ -420,12 +423,14 @@ def test_attestation(spec, state):
yield 'pre', state yield 'pre', state
attestation = get_valid_attestation(spec, state, signed=True) attestation = get_valid_attestation(spec, state, signed=True, on_time=True)
# Add to state via block transition # Add to state via block transition
pre_current_attestations_len = len(state.current_epoch_attestations) pre_current_attestations_len = len(state.current_epoch_attestations)
attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY) attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
attestation_block.body.attestations.append(attestation) attestation_block.body.attestations.append(attestation)
if spec.fork == PHASE1:
fill_block_shard_transitions_by_attestations(spec, state, attestation_block)
signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block) signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block)
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1 assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
@ -443,7 +448,7 @@ def test_attestation(spec, state):
assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
# In phase1 a committee is computed for PERSISTENT_COMMITTEE_PERIOD slots ago, # In phase1 a committee is computed for SHARD_COMMITTEE_PERIOD slots ago,
# exceeding the minimal-config randao mixes memory size. # exceeding the minimal-config randao mixes memory size.
@with_phases(['phase0']) @with_phases(['phase0'])
@spec_state_test @spec_state_test
@ -453,8 +458,8 @@ def test_voluntary_exit(spec, state):
spec.get_current_epoch(state) spec.get_current_epoch(state)
)[-1] )[-1]
# move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
yield 'pre', state yield 'pre', state

View File

@ -0,0 +1,73 @@
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
always_bls,
)
from eth2spec.test.helpers.crosslinks import run_crosslinks_processing
from eth2spec.test.helpers.shard_block import (
build_attestation_with_shard_transition,
build_shard_block,
build_shard_transitions_till_slot,
)
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
def run_basic_crosslink_tests(spec, state, target_len_offset_slot, valid=True):
state = transition_to_valid_shard_slot(spec, state)
# At the beginning, let `x = state.slot`, `state.shard_states[shard].slot == x - 1`
slot_x = state.slot
committee_index = spec.CommitteeIndex(0)
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
assert state.shard_states[shard].slot == slot_x - 1
# Create SignedShardBlock
body = b'\x56' * spec.MAX_SHARD_BLOCK_SIZE
shard_block = build_shard_block(spec, state, shard, body=body, signed=True)
shard_blocks = [shard_block]
# Create a shard_transitions that would be included at beacon block `state.slot + target_len_offset_slot`
shard_transitions = build_shard_transitions_till_slot(
spec,
state,
shard_blocks={shard: shard_blocks},
on_time_slot=state.slot + target_len_offset_slot,
)
shard_transition = shard_transitions[shard]
# Create an attestation that would be included at beacon block `state.slot + target_len_offset_slot`
attestation = build_attestation_with_shard_transition(
spec,
state,
index=committee_index,
on_time_slot=state.slot + target_len_offset_slot,
shard_transition=shard_transition,
)
pre_gasprice = state.shard_states[shard].gasprice
transition_to(spec, state, state.slot + target_len_offset_slot)
pre_shard_state = state.shard_states[shard]
yield from run_crosslinks_processing(spec, state, shard_transitions, [attestation], valid=valid)
if valid:
# After state transition,
assert state.slot == slot_x + target_len_offset_slot
shard_state = state.shard_states[shard]
assert shard_state != pre_shard_state
assert shard_state == shard_transition.shard_states[len(shard_transition.shard_states) - 1]
if target_len_offset_slot == 1:
assert shard_state.gasprice > pre_gasprice
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_basic_crosslinks(spec, state):
yield from run_basic_crosslink_tests(spec, state, target_len_offset_slot=1, valid=True)
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_multiple_offset_slots(spec, state):
yield from run_basic_crosslink_tests(spec, state, target_len_offset_slot=3, valid=True)

View File

@ -0,0 +1,107 @@
from typing import Dict, Sequence
from eth2spec.test.context import (
PHASE0,
with_all_phases_except,
spec_state_test,
always_bls,
)
from eth2spec.test.helpers.block import build_empty_block
from eth2spec.test.helpers.shard_block import (
build_attestation_with_shard_transition,
build_shard_block,
build_shard_transitions_till_slot,
)
from eth2spec.test.helpers.state import state_transition_and_sign_block, transition_to_valid_shard_slot
def run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index, valid=True):
shard_transitions = build_shard_transitions_till_slot(
spec, state, shard_blocks, on_time_slot=state.slot + target_len_offset_slot
)
attestations = [
build_attestation_with_shard_transition(
spec,
state,
on_time_slot=state.slot + target_len_offset_slot,
index=committee_index,
shard_transition=shard_transitions[shard],
)
for shard in shard_blocks.keys()
]
# Propose beacon block at slot `x + 1`
beacon_block = build_empty_block(spec, state, slot=state.slot + target_len_offset_slot)
beacon_block.body.attestations = attestations
beacon_block.body.shard_transitions = shard_transitions
pre_shard_states = state.shard_states.copy()
yield 'pre', state.copy()
yield 'block', beacon_block
state_transition_and_sign_block(spec, state, beacon_block)
if valid:
yield 'post', state
else:
yield 'post', None
return
for shard in range(spec.get_active_shard_count(state)):
post_shard_state = state.shard_states[shard]
if shard in shard_blocks:
# Shard state has been changed to state_transition result
assert post_shard_state == shard_transitions[shard].shard_states[
len(shard_transitions[shard].shard_states) - 1
]
assert beacon_block.slot == shard_transitions[shard].shard_states[0].slot + target_len_offset_slot
assert post_shard_state.slot == state.slot - 1
if len(shard_blocks[shard]) == 0:
# `latest_block_root` is the same
assert post_shard_state.latest_block_root == pre_shard_states[shard].latest_block_root
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_process_beacon_block_with_normal_shard_transition(spec, state):
state = transition_to_valid_shard_slot(spec, state)
target_len_offset_slot = 1
committee_index = spec.CommitteeIndex(0)
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
assert state.shard_states[shard].slot == state.slot - 1
pre_gasprice = state.shard_states[shard].gasprice
# Create SignedShardBlock at slot `shard_state.slot + 1`
body = b'\x56' * spec.MAX_SHARD_BLOCK_SIZE
shard_block = build_shard_block(spec, state, shard, body=body, signed=True)
shard_blocks: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
yield from run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index)
shard_state = state.shard_states[shard]
if target_len_offset_slot == 1 and len(shard_blocks) > 0:
assert shard_state.gasprice > pre_gasprice
@with_all_phases_except([PHASE0])
@spec_state_test
@always_bls
def test_process_beacon_block_with_empty_proposal_transition(spec, state):
state = transition_to_valid_shard_slot(spec, state)
target_len_offset_slot = 1
committee_index = spec.CommitteeIndex(0)
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
assert state.shard_states[shard].slot == state.slot - 1
# No new shard block
shard_blocks = {}
pre_gasprice = state.shard_states[shard].gasprice
yield from run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index)
if target_len_offset_slot == 1 and len(shard_blocks) > 0:
assert state.shard_states[shard].gasprice > pre_gasprice

View File

@ -0,0 +1,395 @@
from eth2spec.test.context import spec_state_test, never_bls, with_all_phases
from eth2spec.test.helpers.attestations import build_attestation_data
from eth2spec.test.helpers.block import build_empty_block
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
from eth2spec.test.helpers.keys import privkeys, pubkeys
from eth2spec.test.helpers.state import next_epoch
from eth2spec.utils import bls
from eth2spec.utils.ssz.ssz_typing import Bitlist
def run_get_signature_test(spec, state, obj, domain, get_signature_fn, privkey, pubkey):
signature = get_signature_fn(state, obj, privkey)
signing_root = spec.compute_signing_root(obj, domain)
assert bls.Verify(pubkey, signing_root, signature)
def run_get_committee_assignment(spec, state, epoch, validator_index, valid=True):
try:
assignment = spec.get_committee_assignment(state, epoch, validator_index)
committee, committee_index, slot = assignment
assert spec.compute_epoch_at_slot(slot) == epoch
assert committee == spec.get_beacon_committee(state, slot, committee_index)
assert committee_index < spec.get_committee_count_at_slot(state, slot)
assert validator_index in committee
assert valid
except AssertionError:
assert not valid
else:
assert valid
def run_is_candidate_block(spec, eth1_block, period_start, success=True):
assert success == spec.is_candidate_block(eth1_block, period_start)
def get_min_new_period_epochs(spec):
return int(
spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2 # to seconds
/ spec.SECONDS_PER_SLOT / spec.SLOTS_PER_EPOCH
)
def get_mock_aggregate(spec):
return spec.Attestation(
data=spec.AttestationData(
slot=10,
)
)
#
# Becoming a validator
#
@with_all_phases
@spec_state_test
@never_bls
def test_check_if_validator_active(spec, state):
active_validator_index = len(state.validators) - 1
assert spec.check_if_validator_active(state, active_validator_index)
new_validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
deposit = prepare_state_and_deposit(spec, state, new_validator_index, amount, signed=True)
spec.process_deposit(state, deposit)
assert not spec.check_if_validator_active(state, new_validator_index)
#
# Validator assignments
#
@with_all_phases
@spec_state_test
@never_bls
def test_get_committee_assignment_current_epoch(spec, state):
epoch = spec.get_current_epoch(state)
validator_index = len(state.validators) - 1
run_get_committee_assignment(spec, state, epoch, validator_index, valid=True)
@with_all_phases
@spec_state_test
@never_bls
def test_get_committee_assignment_next_epoch(spec, state):
epoch = spec.get_current_epoch(state) + 1
validator_index = len(state.validators) - 1
run_get_committee_assignment(spec, state, epoch, validator_index, valid=True)
@with_all_phases
@spec_state_test
@never_bls
def test_get_committee_assignment_out_bound_epoch(spec, state):
epoch = spec.get_current_epoch(state) + 2
validator_index = len(state.validators) - 1
run_get_committee_assignment(spec, state, epoch, validator_index, valid=False)
@with_all_phases
@spec_state_test
@never_bls
def test_is_proposer(spec, state):
proposer_index = spec.get_beacon_proposer_index(state)
assert spec.is_proposer(state, proposer_index)
proposer_index = proposer_index + 1 % len(state.validators)
assert not spec.is_proposer(state, proposer_index)
#
# Beacon chain responsibilities
#
# Block proposal
@with_all_phases
@spec_state_test
def test_get_epoch_signature(spec, state):
block = spec.BeaconBlock()
privkey = privkeys[0]
pubkey = pubkeys[0]
domain = spec.get_domain(state, spec.DOMAIN_RANDAO, spec.compute_epoch_at_slot(block.slot))
run_get_signature_test(
spec=spec,
state=state,
obj=block,
domain=domain,
get_signature_fn=spec.get_epoch_signature,
privkey=privkey,
pubkey=pubkey,
)
@with_all_phases
@spec_state_test
def test_is_candidate_block(spec, state):
period_start = spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2 + 1000
run_is_candidate_block(
spec,
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE),
period_start,
success=True,
)
run_is_candidate_block(
spec,
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE + 1),
period_start,
success=False,
)
run_is_candidate_block(
spec,
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2),
period_start,
success=True,
)
run_is_candidate_block(
spec,
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2 - 1),
period_start,
success=False,
)
@with_all_phases
@spec_state_test
def test_get_eth1_vote_default_vote(spec, state):
min_new_period_epochs = get_min_new_period_epochs(spec)
for _ in range(min_new_period_epochs):
next_epoch(spec, state)
state.eth1_data_votes = ()
eth1_chain = []
eth1_data = spec.get_eth1_vote(state, eth1_chain)
assert eth1_data == state.eth1_data
@with_all_phases
@spec_state_test
def test_get_eth1_vote_consensus_vote(spec, state):
min_new_period_epochs = get_min_new_period_epochs(spec)
for _ in range(min_new_period_epochs + 2):
next_epoch(spec, state)
period_start = spec.voting_period_start_time(state)
votes_length = spec.get_current_epoch(state) % spec.EPOCHS_PER_ETH1_VOTING_PERIOD
assert votes_length >= 3 # We need to have the majority vote
state.eth1_data_votes = ()
block_1 = spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE - 1)
block_2 = spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE)
eth1_chain = [block_1, block_2]
eth1_data_votes = []
# Only the first vote is for block_1
eth1_data_votes.append(spec.get_eth1_data(block_1))
# Other votes are for block_2
for _ in range(votes_length - 1):
eth1_data_votes.append(spec.get_eth1_data(block_2))
state.eth1_data_votes = eth1_data_votes
eth1_data = spec.get_eth1_vote(state, eth1_chain)
assert eth1_data.block_hash == block_2.hash_tree_root()
@with_all_phases
@spec_state_test
def test_get_eth1_vote_tie(spec, state):
min_new_period_epochs = get_min_new_period_epochs(spec)
for _ in range(min_new_period_epochs + 1):
next_epoch(spec, state)
period_start = spec.voting_period_start_time(state)
votes_length = spec.get_current_epoch(state) % spec.EPOCHS_PER_ETH1_VOTING_PERIOD
assert votes_length > 0 and votes_length % 2 == 0
state.eth1_data_votes = ()
block_1 = spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE - 1)
block_2 = spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE)
eth1_chain = [block_1, block_2]
eth1_data_votes = []
# Half votes are for block_1, another half votes are for block_2
for i in range(votes_length):
if i % 2 == 0:
block = block_1
else:
block = block_2
eth1_data_votes.append(spec.get_eth1_data(block))
state.eth1_data_votes = eth1_data_votes
eth1_data = spec.get_eth1_vote(state, eth1_chain)
# Tiebreak by smallest distance -> eth1_chain[0]
assert eth1_data.block_hash == eth1_chain[0].hash_tree_root()
@with_all_phases
@spec_state_test
def test_compute_new_state_root(spec, state):
pre_state = state.copy()
post_state = state.copy()
block = build_empty_block(spec, state, state.slot + 1)
state_root = spec.compute_new_state_root(state, block)
assert state_root != pre_state.hash_tree_root()
assert state == pre_state
# dumb verification
spec.process_slots(post_state, block.slot)
spec.process_block(post_state, block)
assert state_root == post_state.hash_tree_root()
@with_all_phases
@spec_state_test
def test_get_block_signature(spec, state):
privkey = privkeys[0]
pubkey = pubkeys[0]
block = build_empty_block(spec, state)
domain = spec.get_domain(state, spec.DOMAIN_BEACON_PROPOSER, spec.compute_epoch_at_slot(block.slot))
run_get_signature_test(
spec=spec,
state=state,
obj=block,
domain=domain,
get_signature_fn=spec.get_block_signature,
privkey=privkey,
pubkey=pubkey,
)
# Attesting
@with_all_phases
@spec_state_test
def test_get_attestation_signature(spec, state):
privkey = privkeys[0]
pubkey = pubkeys[0]
attestation_data = spec.AttestationData(slot=10)
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
run_get_signature_test(
spec=spec,
state=state,
obj=attestation_data,
domain=domain,
get_signature_fn=spec.get_attestation_signature,
privkey=privkey,
pubkey=pubkey,
)
# Attestation aggregation
@with_all_phases
@spec_state_test
def test_get_slot_signature(spec, state):
privkey = privkeys[0]
pubkey = pubkeys[0]
slot = spec.Slot(10)
domain = spec.get_domain(state, spec.DOMAIN_SELECTION_PROOF, spec.compute_epoch_at_slot(slot))
run_get_signature_test(
spec=spec,
state=state,
obj=slot,
domain=domain,
get_signature_fn=spec.get_slot_signature,
privkey=privkey,
pubkey=pubkey,
)
@with_all_phases
@spec_state_test
def test_is_aggregator(spec, state):
# TODO: we can test the probabilistic result against `TARGET_AGGREGATORS_PER_COMMITTEE`
# if we have more validators and larger committeee size
slot = state.slot
committee_index = 0
has_aggregator = False
beacon_committee = spec.get_beacon_committee(state, slot, committee_index)
for validator_index in beacon_committee:
privkey = privkeys[validator_index]
slot_signature = spec.get_slot_signature(state, slot, privkey)
if spec.is_aggregator(state, slot, committee_index, slot_signature):
has_aggregator = True
break
assert has_aggregator
@with_all_phases
@spec_state_test
def test_get_aggregate_signature(spec, state):
attestations = []
pubkeys = []
slot = state.slot
committee_index = 0
attestation_data = build_attestation_data(spec, state, slot=slot, index=committee_index)
beacon_committee = spec.get_beacon_committee(
state,
attestation_data.slot,
attestation_data.index,
)
committee_size = len(beacon_committee)
aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size))
for i, validator_index in enumerate(beacon_committee):
bits = aggregation_bits
bits[i] = True
attestations.append(
spec.Attestation(
data=attestation_data,
aggregation_bits=bits,
)
)
pubkeys.append(state.validators[validator_index].pubkey)
pubkey = bls.AggregatePKs(pubkeys)
signature = spec.get_aggregate_signature(attestations)
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
signing_root = spec.compute_signing_root(attestation_data, domain)
assert bls.Verify(pubkey, signing_root, signature)
@with_all_phases
@spec_state_test
def test_get_aggregate_and_proof(spec, state):
privkey = privkeys[0]
aggregator_index = spec.ValidatorIndex(10)
aggregate = get_mock_aggregate(spec)
aggregate_and_proof = spec.get_aggregate_and_proof(state, aggregator_index, aggregate, privkey)
assert aggregate_and_proof.aggregator_index == aggregator_index
assert aggregate_and_proof.aggregate == aggregate
assert aggregate_and_proof.selection_proof == spec.get_slot_signature(state, aggregate.data.slot, privkey)
@with_all_phases
@spec_state_test
def test_get_aggregate_and_proof_signature(spec, state):
privkey = privkeys[0]
pubkey = pubkeys[0]
aggregate = get_mock_aggregate(spec)
aggregate_and_proof = spec.get_aggregate_and_proof(state, spec.ValidatorIndex(1), aggregate, privkey)
domain = spec.get_domain(state, spec.DOMAIN_AGGREGATE_AND_PROOF, spec.compute_epoch_at_slot(aggregate.data.slot))
run_get_signature_test(
spec=spec,
state=state,
obj=aggregate_and_proof,
domain=domain,
get_signature_fn=spec.get_aggregate_and_proof_signature,
privkey=privkey,
pubkey=pubkey,
)

View File

@ -51,3 +51,8 @@ def Sign(SK, message):
@only_with_bls(alt_return=STUB_COORDINATES) @only_with_bls(alt_return=STUB_COORDINATES)
def signature_to_G2(signature): def signature_to_G2(signature):
return _signature_to_G2(signature) return _signature_to_G2(signature)
@only_with_bls(alt_return=STUB_PUBKEY)
def AggregatePKs(pubkeys):
return bls._AggregatePKs(pubkeys)

View File

@ -1,28 +1,17 @@
from hashlib import sha256 from hashlib import sha256
from typing import Dict, Union
ZERO_BYTES32 = b'\x00' * 32 ZERO_BYTES32 = b'\x00' * 32
def _hash(x): def _hash(x: Union[bytes, bytearray, memoryview]) -> bytes:
return sha256(x).digest() return sha256(x).digest()
# Minimal collection of (key, value) pairs, for fast hash-retrieval, to save on repetitive computation cost. hash_cache: Dict[bytes, bytes] = {}
# Key = the hash input
# Value = the hash output
hash_cache = []
def add_zero_hashes_to_cache(): def hash(x: bytes) -> bytes:
zerohashes = [(None, ZERO_BYTES32)] if x in hash_cache:
for layer in range(1, 32): return hash_cache[x]
k = zerohashes[layer - 1][1] + zerohashes[layer - 1][1]
zerohashes.append((k, _hash(k)))
hash_cache.extend(zerohashes[1:])
def hash(x):
for (k, h) in hash_cache:
if x == k:
return h
return _hash(x) return _hash(x)

View File

@ -5,7 +5,7 @@ from gen_base import gen_runner, gen_typing
from gen_from_tests.gen import generate_from_tests from gen_from_tests.gen import generate_from_tests
from eth2spec.test.context import PHASE0 from eth2spec.test.context import PHASE0
from eth2spec.test.sanity import test_blocks, test_slots from eth2spec.test.phase_0.sanity import test_blocks, test_slots
from eth2spec.config import config_util from eth2spec.config import config_util
from eth2spec.phase0 import spec as spec_phase0 from eth2spec.phase0 import spec as spec_phase0
from eth2spec.phase1 import spec as spec_phase1 from eth2spec.phase1 import spec as spec_phase1