mirror of
https://github.com/status-im/eth2.0-specs.git
synced 2025-02-20 22:38:11 +00:00
Merge branch 'v012x' into ffg_lmd_vote_consistency
This commit is contained in:
commit
b71a0ee6b9
@ -79,16 +79,16 @@ jobs:
|
||||
# Restore git repo at point close to target branch/revision, to speed up checkout
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- v2-specs-repo-{{ .Branch }}-
|
||||
- v2-specs-repo-
|
||||
- v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- v3-specs-repo-{{ .Branch }}-
|
||||
- v3-specs-repo-
|
||||
- checkout
|
||||
- run:
|
||||
name: Clean up git repo to reduce cache size
|
||||
command: git gc
|
||||
# Save the git checkout as a cache, to make cloning next time faster.
|
||||
- save_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
paths:
|
||||
- ~/specs-repo
|
||||
install_pyspec_test:
|
||||
@ -97,7 +97,7 @@ jobs:
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_pyspec_cached_venv
|
||||
- run:
|
||||
name: Install pyspec requirements
|
||||
@ -109,7 +109,7 @@ jobs:
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_pyspec_cached_venv
|
||||
- run:
|
||||
name: Run py-tests
|
||||
@ -140,7 +140,7 @@ jobs:
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_pyspec_cached_venv
|
||||
- run:
|
||||
name: Run linter
|
||||
@ -152,7 +152,7 @@ jobs:
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_deposit_contract_compiler_cached_venv
|
||||
- run:
|
||||
name: Install deposit contract compiler requirements
|
||||
@ -164,7 +164,7 @@ jobs:
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_deposit_contract_tester_cached_venv
|
||||
- run:
|
||||
name: Install deposit contract tester requirements
|
||||
@ -176,7 +176,7 @@ jobs:
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_deposit_contract_compiler_cached_venv
|
||||
- run:
|
||||
name: Run deposit contract compile test
|
||||
@ -187,7 +187,7 @@ jobs:
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_deposit_contract_tester_cached_venv
|
||||
- run:
|
||||
name: Run deposit contract test
|
||||
|
2
Makefile
2
Makefile
@ -117,7 +117,7 @@ install_deposit_contract_compiler:
|
||||
|
||||
compile_deposit_contract:
|
||||
cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \
|
||||
python3.7 deposit_contract/compile.py contracts/validator_registration.vy
|
||||
python3.7 deposit_contract/compile.py ../contracts/validator_registration.vy
|
||||
|
||||
test_compile_deposit_contract:
|
||||
cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \
|
||||
|
@ -9,7 +9,7 @@ This repository hosts the current Eth2 specifications. Discussions about design
|
||||
|
||||
## Specs
|
||||
|
||||
Core specifications for Eth2 clients be found in [specs/](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
|
||||
Core specifications for Eth2 clients be found in [specs](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
|
||||
|
||||
### Phase 0
|
||||
* [The Beacon Chain](specs/phase0/beacon-chain.md)
|
||||
|
@ -122,8 +122,8 @@ BASE_REWARD_FACTOR: 64
|
||||
WHISTLEBLOWER_REWARD_QUOTIENT: 512
|
||||
# 2**3 (= 8)
|
||||
PROPOSER_REWARD_QUOTIENT: 8
|
||||
# 2**25 (= 33,554,432)
|
||||
INACTIVITY_PENALTY_QUOTIENT: 33554432
|
||||
# 2**24 (= 16,777,216)
|
||||
INACTIVITY_PENALTY_QUOTIENT: 16777216
|
||||
# 2**5 (= 32)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT: 32
|
||||
|
||||
@ -161,10 +161,9 @@ DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
|
||||
# Phase 1: Upgrade from Phase 0
|
||||
# ---------------------------------------------------------------
|
||||
PHASE_1_FORK_VERSION: 0x01000000
|
||||
# [STUB]
|
||||
PHASE_1_GENESIS_SLOT: 32
|
||||
INITIAL_ACTIVE_SHARDS: 64
|
||||
# Placeholder
|
||||
INITIAL_GASPRICE: 10
|
||||
|
||||
|
||||
# Phase 1: General
|
||||
# ---------------------------------------------------------------
|
||||
@ -190,8 +189,8 @@ SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
|
||||
MAX_SHARD_BLOCKS_PER_ATTESTATION: 12
|
||||
# 2**14 (= 16,384) Gwei
|
||||
MAX_GASPRICE: 16384
|
||||
# 2**5 (= 32) Gwei
|
||||
MIN_GASPRICE: 32
|
||||
# 2**3 (= 8) Gwei
|
||||
MIN_GASPRICE: 8
|
||||
# 2**3 (= 8)
|
||||
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
|
||||
|
||||
|
@ -89,7 +89,7 @@ MIN_SEED_LOOKAHEAD: 1
|
||||
# 2**2 (= 4) epochs
|
||||
MAX_SEED_LOOKAHEAD: 4
|
||||
# [customized] higher frequency new deposits from eth1 for testing
|
||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 2
|
||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 4
|
||||
# [customized] smaller state
|
||||
SLOTS_PER_HISTORICAL_ROOT: 64
|
||||
# 2**8 (= 256) epochs
|
||||
@ -122,8 +122,8 @@ BASE_REWARD_FACTOR: 64
|
||||
WHISTLEBLOWER_REWARD_QUOTIENT: 512
|
||||
# 2**3 (= 8)
|
||||
PROPOSER_REWARD_QUOTIENT: 8
|
||||
# 2**25 (= 33,554,432)
|
||||
INACTIVITY_PENALTY_QUOTIENT: 33554432
|
||||
# 2**24 (= 16,777,216)
|
||||
INACTIVITY_PENALTY_QUOTIENT: 16777216
|
||||
# 2**5 (= 32)
|
||||
MIN_SLASHING_PENALTY_QUOTIENT: 32
|
||||
|
||||
@ -162,10 +162,10 @@ DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
|
||||
# ---------------------------------------------------------------
|
||||
# [customized] for testnet distinction
|
||||
PHASE_1_FORK_VERSION: 0x01000001
|
||||
# [customized] for testing
|
||||
PHASE_1_GENESIS_SLOT: 8
|
||||
# [customized] reduced for testing
|
||||
INITIAL_ACTIVE_SHARDS: 4
|
||||
# Placeholder
|
||||
INITIAL_GASPRICE: 10
|
||||
|
||||
|
||||
# Phase 1: General
|
||||
@ -192,8 +192,8 @@ SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
|
||||
MAX_SHARD_BLOCKS_PER_ATTESTATION: 12
|
||||
# 2**14 (= 16,384) Gwei
|
||||
MAX_GASPRICE: 16384
|
||||
# 2**5 (= 32) Gwei
|
||||
MIN_GASPRICE: 32
|
||||
# 2**3 (= 8) Gwei
|
||||
MIN_GASPRICE: 8
|
||||
# 2**3 (= 8)
|
||||
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
|
||||
|
||||
|
14
setup.py
14
setup.py
@ -108,7 +108,7 @@ SSZObject = TypeVar('SSZObject', bound=View)
|
||||
PHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0
|
||||
from eth2spec.config.config_util import apply_constants_config
|
||||
from typing import (
|
||||
Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable
|
||||
Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable, Optional
|
||||
)
|
||||
|
||||
from dataclasses import (
|
||||
@ -146,8 +146,11 @@ _hash = hash
|
||||
hash_cache: Dict[bytes, Bytes32] = {}
|
||||
|
||||
|
||||
def get_eth1_data(distance: uint64) -> Bytes32:
|
||||
return hash(distance)
|
||||
def get_eth1_data(block: Eth1Block) -> Eth1Data:
|
||||
"""
|
||||
A stub function return mocking Eth1Data.
|
||||
"""
|
||||
return Eth1Data(block_hash=hash_tree_root(block))
|
||||
|
||||
|
||||
def hash(x: bytes) -> Bytes32: # type: ignore
|
||||
@ -373,9 +376,10 @@ class PySpecCommand(Command):
|
||||
self.md_doc_paths = """
|
||||
specs/phase0/beacon-chain.md
|
||||
specs/phase0/fork-choice.md
|
||||
specs/phase0/validator.md
|
||||
specs/phase1/custody-game.md
|
||||
specs/phase1/beacon-chain.md
|
||||
specs/phase1/fraud-proofs.md
|
||||
specs/phase1/shard-transition.md
|
||||
specs/phase1/fork-choice.md
|
||||
specs/phase1/phase1-fork.md
|
||||
"""
|
||||
@ -499,7 +503,7 @@ setup(
|
||||
"pycryptodome==3.9.4",
|
||||
"py_ecc==2.0.0",
|
||||
"dataclasses==0.6",
|
||||
"remerkleable==0.1.12",
|
||||
"remerkleable==0.1.13",
|
||||
"ruamel.yaml==0.16.5",
|
||||
"lru-dict==1.1.6"
|
||||
]
|
||||
|
@ -35,7 +35,7 @@
|
||||
- [`DepositMessage`](#depositmessage)
|
||||
- [`DepositData`](#depositdata)
|
||||
- [`BeaconBlockHeader`](#beaconblockheader)
|
||||
- [`SigningRoot`](#signingroot)
|
||||
- [`SigningData`](#signingdata)
|
||||
- [Beacon operations](#beacon-operations)
|
||||
- [`ProposerSlashing`](#proposerslashing)
|
||||
- [`AttesterSlashing`](#attesterslashing)
|
||||
@ -191,7 +191,6 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
| `HYSTERESIS_DOWNWARD_MULTIPLIER` | `1` |
|
||||
| `HYSTERESIS_UPWARD_MULTIPLIER` | `5` |
|
||||
|
||||
|
||||
- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
|
||||
|
||||
### Gwei values
|
||||
@ -242,10 +241,10 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
| `BASE_REWARD_FACTOR` | `2**6` (= 64) |
|
||||
| `WHISTLEBLOWER_REWARD_QUOTIENT` | `2**9` (= 512) |
|
||||
| `PROPOSER_REWARD_QUOTIENT` | `2**3` (= 8) |
|
||||
| `INACTIVITY_PENALTY_QUOTIENT` | `2**25` (= 33,554,432) |
|
||||
| `INACTIVITY_PENALTY_QUOTIENT` | `2**24` (= 16,777,216) |
|
||||
| `MIN_SLASHING_PENALTY_QUOTIENT` | `2**5` (= 32) |
|
||||
|
||||
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12 epochs` (about 18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
|
||||
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**12` epochs (about 18 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`.
|
||||
|
||||
### Max operations per block
|
||||
|
||||
@ -269,7 +268,6 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
| `DOMAIN_SELECTION_PROOF` | `DomainType('0x05000000')` |
|
||||
| `DOMAIN_AGGREGATE_AND_PROOF` | `DomainType('0x06000000')` |
|
||||
|
||||
|
||||
## Containers
|
||||
|
||||
The following types are [SimpleSerialize (SSZ)](../../ssz/simple-serialize.md) containers.
|
||||
@ -399,10 +397,10 @@ class BeaconBlockHeader(Container):
|
||||
body_root: Root
|
||||
```
|
||||
|
||||
#### `SigningRoot`
|
||||
#### `SigningData`
|
||||
|
||||
```python
|
||||
class SigningRoot(Container):
|
||||
class SigningData(Container):
|
||||
object_root: Root
|
||||
domain: Domain
|
||||
```
|
||||
@ -684,14 +682,10 @@ def is_slashable_attestation_data(data_1: AttestationData, data_2: AttestationDa
|
||||
```python
|
||||
def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
|
||||
"""
|
||||
Check if ``indexed_attestation`` has valid indices and signature.
|
||||
Check if ``indexed_attestation`` has sorted and unique indices and a valid aggregate signature.
|
||||
"""
|
||||
indices = indexed_attestation.attesting_indices
|
||||
|
||||
# Verify max number of indices
|
||||
if not len(indices) <= MAX_VALIDATORS_PER_COMMITTEE:
|
||||
return False
|
||||
# Verify indices are sorted and unique
|
||||
indices = indexed_attestation.attesting_indices
|
||||
if not indices == sorted(set(indices)):
|
||||
return False
|
||||
# Verify aggregate signature
|
||||
@ -722,9 +716,9 @@ def is_valid_merkle_branch(leaf: Bytes32, branch: Sequence[Bytes32], depth: uint
|
||||
#### `compute_shuffled_index`
|
||||
|
||||
```python
|
||||
def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Bytes32) -> ValidatorIndex:
|
||||
def compute_shuffled_index(index: uint64, index_count: uint64, seed: Bytes32) -> uint64:
|
||||
"""
|
||||
Return the shuffled validator index corresponding to ``seed`` (and ``index_count``).
|
||||
Return the shuffled index corresponding to ``seed`` (and ``index_count``).
|
||||
"""
|
||||
assert index < index_count
|
||||
|
||||
@ -732,14 +726,14 @@ def compute_shuffled_index(index: ValidatorIndex, index_count: uint64, seed: Byt
|
||||
# See the 'generalized domain' algorithm on page 3
|
||||
for current_round in range(SHUFFLE_ROUND_COUNT):
|
||||
pivot = bytes_to_int(hash(seed + int_to_bytes(current_round, length=1))[0:8]) % index_count
|
||||
flip = ValidatorIndex((pivot + index_count - index) % index_count)
|
||||
flip = (pivot + index_count - index) % index_count
|
||||
position = max(index, flip)
|
||||
source = hash(seed + int_to_bytes(current_round, length=1) + int_to_bytes(position // 256, length=4))
|
||||
byte = source[(position % 256) // 8]
|
||||
bit = (byte >> (position % 8)) % 2
|
||||
index = flip if bit else index
|
||||
|
||||
return ValidatorIndex(index)
|
||||
return index
|
||||
```
|
||||
|
||||
#### `compute_proposer_index`
|
||||
@ -753,11 +747,11 @@ def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex]
|
||||
MAX_RANDOM_BYTE = 2**8 - 1
|
||||
i = 0
|
||||
while True:
|
||||
candidate_index = indices[compute_shuffled_index(ValidatorIndex(i % len(indices)), len(indices), seed)]
|
||||
candidate_index = indices[compute_shuffled_index(i % len(indices), len(indices), seed)]
|
||||
random_byte = hash(seed + int_to_bytes(i // 32, length=8))[i % 32]
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
return ValidatorIndex(candidate_index)
|
||||
return candidate_index
|
||||
i += 1
|
||||
```
|
||||
|
||||
@ -773,7 +767,7 @@ def compute_committee(indices: Sequence[ValidatorIndex],
|
||||
"""
|
||||
start = (len(indices) * index) // count
|
||||
end = (len(indices) * (index + 1)) // count
|
||||
return [indices[compute_shuffled_index(ValidatorIndex(i), len(indices), seed)] for i in range(start, end)]
|
||||
return [indices[compute_shuffled_index(i, len(indices), seed)] for i in range(start, end)]
|
||||
```
|
||||
|
||||
#### `compute_epoch_at_slot`
|
||||
@ -852,13 +846,12 @@ def compute_domain(domain_type: DomainType, fork_version: Version=None, genesis_
|
||||
```python
|
||||
def compute_signing_root(ssz_object: SSZObject, domain: Domain) -> Root:
|
||||
"""
|
||||
Return the signing root of an object by calculating the root of the object-domain tree.
|
||||
Return the signing root for the corresponding signing data.
|
||||
"""
|
||||
domain_wrapped_object = SigningRoot(
|
||||
return hash_tree_root(SigningData(
|
||||
object_root=hash_tree_root(ssz_object),
|
||||
domain=domain,
|
||||
)
|
||||
return hash_tree_root(domain_wrapped_object)
|
||||
))
|
||||
```
|
||||
|
||||
### Beacon state accessors
|
||||
@ -1125,7 +1118,7 @@ def slash_validator(state: BeaconState,
|
||||
whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
|
||||
proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT)
|
||||
increase_balance(state, proposer_index, proposer_reward)
|
||||
increase_balance(state, whistleblower_index, whistleblower_reward - proposer_reward)
|
||||
increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
|
||||
```
|
||||
|
||||
## Genesis
|
||||
@ -1174,6 +1167,8 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||
return state
|
||||
```
|
||||
|
||||
*Note*: The ETH1 block with `eth1_timestamp` meeting the minimum genesis active validator count criteria can also occur before `MIN_GENESIS_TIME`.
|
||||
|
||||
### Genesis state
|
||||
|
||||
Let `genesis_state = candidate_state` whenever `is_valid_genesis_state(candidate_state) is True` for the first time.
|
||||
@ -1195,7 +1190,7 @@ Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`.
|
||||
|
||||
## Beacon chain state transition function
|
||||
|
||||
The post-state corresponding to a pre-state `state` and a signed block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid.
|
||||
The post-state corresponding to a pre-state `state` and a signed block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid.
|
||||
|
||||
```python
|
||||
def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> BeaconState:
|
||||
@ -1229,7 +1224,7 @@ def process_slots(state: BeaconState, slot: Slot) -> None:
|
||||
# Process epoch on the start slot of the next epoch
|
||||
if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
|
||||
process_epoch(state)
|
||||
state.slot += Slot(1)
|
||||
state.slot = Slot(state.slot + 1)
|
||||
```
|
||||
|
||||
```python
|
||||
|
@ -108,7 +108,7 @@ def get_forkchoice_store(anchor_state: BeaconState) -> Store:
|
||||
justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
|
||||
finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
|
||||
return Store(
|
||||
time=anchor_state.genesis_time,
|
||||
time=anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot,
|
||||
genesis_time=anchor_state.genesis_time,
|
||||
justified_checkpoint=justified_checkpoint,
|
||||
finalized_checkpoint=finalized_checkpoint,
|
||||
@ -273,13 +273,12 @@ def validate_on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
# Use GENESIS_EPOCH for previous when genesis to avoid underflow
|
||||
previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
|
||||
# If attestation target is from a future epoch, delay consideration until the epoch arrives
|
||||
assert target.epoch in [current_epoch, previous_epoch]
|
||||
assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
||||
|
||||
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
||||
assert target.root in store.blocks
|
||||
# Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
||||
assert get_current_slot(store) >= compute_start_slot_at_epoch(target.epoch)
|
||||
|
||||
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
|
||||
assert attestation.data.beacon_block_root in store.blocks
|
||||
|
@ -4,7 +4,7 @@ This document contains the networking specification for Ethereum 2.0 clients.
|
||||
|
||||
It consists of four main sections:
|
||||
|
||||
1. A specification of the network fundamentals detailing the two network configurations: interoperability test network and mainnet launch.
|
||||
1. A specification of the network fundamentals.
|
||||
2. A specification of the three network interaction *domains* of Eth2: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain.
|
||||
3. The rationale and further explanation for the design choices made in the previous two sections.
|
||||
4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which Eth2 clients are being developed.
|
||||
@ -17,14 +17,8 @@ It consists of four main sections:
|
||||
|
||||
- [Network fundamentals](#network-fundamentals)
|
||||
- [Transport](#transport)
|
||||
- [Interop](#interop)
|
||||
- [Mainnet](#mainnet)
|
||||
- [Encryption and identification](#encryption-and-identification)
|
||||
- [Interop](#interop-1)
|
||||
- [Mainnet](#mainnet-1)
|
||||
- [Protocol Negotiation](#protocol-negotiation)
|
||||
- [Interop](#interop-2)
|
||||
- [Mainnet](#mainnet-2)
|
||||
- [Multiplexing](#multiplexing)
|
||||
- [Eth2 network interaction domains](#eth2-network-interaction-domains)
|
||||
- [Configuration](#configuration)
|
||||
@ -33,11 +27,8 @@ It consists of four main sections:
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [Attestation subnets](#attestation-subnets)
|
||||
- [Interop](#interop-3)
|
||||
- [Mainnet](#mainnet-3)
|
||||
- [Attestations and Aggregation](#attestations-and-aggregation)
|
||||
- [Encodings](#encodings)
|
||||
- [Interop](#interop-4)
|
||||
- [Mainnet](#mainnet-4)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Protocol identification](#protocol-identification)
|
||||
- [Req/Resp interaction](#reqresp-interaction)
|
||||
@ -56,29 +47,25 @@ It consists of four main sections:
|
||||
- [Integration into libp2p stacks](#integration-into-libp2p-stacks)
|
||||
- [ENR structure](#enr-structure)
|
||||
- [Attestation subnet bitfield](#attestation-subnet-bitfield)
|
||||
- [Interop](#interop-5)
|
||||
- [Mainnet](#mainnet-5)
|
||||
- [`eth2` field](#eth2-field)
|
||||
- [General capabilities](#general-capabilities)
|
||||
- [`eth2` field](#eth2-field)
|
||||
- [General capabilities](#general-capabilities)
|
||||
- [Topic advertisement](#topic-advertisement)
|
||||
- [Mainnet](#mainnet-6)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
- [Transport](#transport-1)
|
||||
- [Why are we defining specific transports?](#why-are-we-defining-specific-transports)
|
||||
- [Can clients support other transports/handshakes than the ones mandated by the spec?](#can-clients-support-other-transportshandshakes-than-the-ones-mandated-by-the-spec)
|
||||
- [What are the advantages of using TCP/QUIC/Websockets?](#what-are-the-advantages-of-using-tcpquicwebsockets)
|
||||
- [Why do we not just support a single transport?](#why-do-we-not-just-support-a-single-transport)
|
||||
- [Why are we not using QUIC for mainnet from the start?](#why-are-we-not-using-quic-for-mainnet-from-the-start)
|
||||
- [Why are we not using QUIC from the start?](#why-are-we-not-using-quic-from-the-start)
|
||||
- [Multiplexing](#multiplexing-1)
|
||||
- [Why are we using mplex/yamux?](#why-are-we-using-mplexyamux)
|
||||
- [Protocol Negotiation](#protocol-negotiation-1)
|
||||
- [When is multiselect 2.0 due and why are we using it for mainnet?](#when-is-multiselect-20-due-and-why-are-we-using-it-for-mainnet)
|
||||
- [When is multiselect 2.0 due and why do we plan to migrate to it?](#when-is-multiselect-20-due-and-why-do-we-plan-to-migrate-to-it)
|
||||
- [What is the difference between connection-level and stream-level protocol negotiation?](#what-is-the-difference-between-connection-level-and-stream-level-protocol-negotiation)
|
||||
- [Encryption](#encryption)
|
||||
- [Why are we using SecIO for interop? Why not for mainnet?](#why-are-we-using-secio-for-interop-why-not-for-mainnet)
|
||||
- [Why are we using Noise/TLS 1.3 for mainnet?](#why-are-we-using-noisetls-13-for-mainnet)
|
||||
- [Why are we not supporting SecIO?](#why-are-we-not-supporting-secio)
|
||||
- [Why are we using Noise/TLS 1.3?](#why-are-we-using-noisetls-13)
|
||||
- [Why are we using encryption at all?](#why-are-we-using-encryption-at-all)
|
||||
- [Will mainnnet networking be untested when it launches?](#will-mainnnet-networking-be-untested-when-it-launches)
|
||||
- [Gossipsub](#gossipsub)
|
||||
- [Why are we using a pub/sub algorithm for block and attestation propagation?](#why-are-we-using-a-pubsub-algorithm-for-block-and-attestation-propagation)
|
||||
- [Why are we using topics to segregate encodings, yet only support one encoding?](#why-are-we-using-topics-to-segregate-encodings-yet-only-support-one-encoding)
|
||||
@ -105,6 +92,7 @@ It consists of four main sections:
|
||||
- [Discovery](#discovery)
|
||||
- [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht)
|
||||
- [What is the difference between an ENR and a multiaddr, and why are we using ENRs?](#what-is-the-difference-between-an-enr-and-a-multiaddr-and-why-are-we-using-enrs)
|
||||
- [Why do we not form ENRs and find peers until genesis block/state is known?](#why-do-we-not-form-enrs-and-find-peers-until-genesis-blockstate-is-known)
|
||||
- [Compression/Encoding](#compressionencoding)
|
||||
- [Why are we using SSZ for encoding?](#why-are-we-using-ssz-for-encoding)
|
||||
- [Why are we compressing, and at which layers?](#why-are-we-compressing-and-at-which-layers)
|
||||
@ -119,46 +107,22 @@ It consists of four main sections:
|
||||
|
||||
This section outlines the specification for the networking stack in Ethereum 2.0 clients.
|
||||
|
||||
Sections that have differing parameters for mainnet launch and interoperability testing are split into subsections. Sections that are not split have the same parameters for interoperability testing as mainnet launch.
|
||||
|
||||
## Transport
|
||||
|
||||
Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability.
|
||||
|
||||
#### Interop
|
||||
|
||||
All implementations MUST support the TCP libp2p transport, and it MUST be enabled for both dialing and listening (i.e. outbound and inbound connections). The libp2p TCP transport supports listening on IPv4 and IPv6 addresses (and on multiple simultaneously).
|
||||
|
||||
To facilitate connectivity and avert possible IPv6 routability/support issues, clients participating in the interoperability testnet MUST expose at least ONE IPv4 endpoint.
|
||||
Clients must support listening on at least one of IPv4 or IPv6. Clients that do _not_ have support for listening on IPv4 SHOULD be cognizant of the potential disadvantages in terms of Internet-wide routability/support. Clients MAY choose to listen only on IPv6, but MUST be capable of dialing both IPv4 and IPv6 addresses.
|
||||
|
||||
All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT, or AutoRelay facilities.
|
||||
All listening endpoints must be publicly dialable, and thus not rely on libp2p circuit relay, AutoNAT, or AutoRelay facilities. (Usage of circuit relay, AutoNAT, or AutoRelay will be specifically re-examined soon.)
|
||||
|
||||
Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.), MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
All requirements from the interoperability testnet apply, except for the IPv4 addressing scheme requirement.
|
||||
|
||||
At this stage, clients are licensed to drop IPv4 support if they wish to do so, cognizant of the potential disadvantages in terms of Internet-wide routability/support. Clients MAY choose to listen only on IPv6, but MUST retain capability to dial both IPv4 and IPv6 addresses.
|
||||
|
||||
Usage of circuit relay, AutoNAT, or AutoRelay will be specifically re-examined closer to the time.
|
||||
|
||||
## Encryption and identification
|
||||
|
||||
#### Interop
|
||||
|
||||
[SecIO](https://github.com/libp2p/specs/tree/master/secio) with `secp256k1` identities will be used for initial interoperability testing.
|
||||
|
||||
The following SecIO parameters MUST be supported by all stacks:
|
||||
|
||||
- Key agreement: ECDH-P256.
|
||||
- Cipher: AES-128.
|
||||
- Digest: SHA-256.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure
|
||||
channel handshake with `secp256k1` identities will be used for mainnet.
|
||||
channel handshake with `secp256k1` identities will be used for encryption.
|
||||
|
||||
As specified in the libp2p specification, clients MUST support the `XX` handshake pattern.
|
||||
|
||||
@ -166,13 +130,7 @@ As specified in the libp2p specification, clients MUST support the `XX` handshak
|
||||
|
||||
Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers.
|
||||
|
||||
#### Interop
|
||||
|
||||
Connection-level and stream-level (see the [Rationale](#design-decision-rationale) section below for explanations) protocol negotiation MUST be conducted using [multistream-select v1.0](https://github.com/multiformats/multistream-select/). Its protocol ID is: `/multistream/1.0.0`.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
Clients MUST support [multistream-select 1.0](https://github.com/multiformats/multistream-select/) and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95). Depending on the number of clients that have implementations for multiselect 2.0 by mainnet, [multistream-select 1.0](https://github.com/multiformats/multistream-select/) may be phased out.
|
||||
Clients MUST support [multistream-select 1.0](https://github.com/multiformats/multistream-select/) and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95) when the spec solidifies. Once all clients have implementations for multiselect 2.0, multistream-select 1.0 MAY be phased out.
|
||||
|
||||
## Multiplexing
|
||||
|
||||
@ -180,7 +138,7 @@ During connection bootstrapping, libp2p dynamically negotiates a mutually suppor
|
||||
|
||||
Two multiplexers are commonplace in libp2p implementations: [mplex](https://github.com/libp2p/specs/tree/master/mplex) and [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). Their protocol IDs are, respectively: `/mplex/6.7.0` and `/yamux/1.0.0`.
|
||||
|
||||
Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux must take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs.
|
||||
Clients MUST support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux MUST take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs.
|
||||
|
||||
# Eth2 network interaction domains
|
||||
|
||||
@ -247,6 +205,8 @@ Topics are plain UTF-8 strings and are encoded on the wire as determined by prot
|
||||
- `Name` - see table below
|
||||
- `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encoding-strategies) section for further details.
|
||||
|
||||
*Note*: `ForkDigestValue` is composed of values that are not known until the genesis block/state are available. Due to this, clients SHOULD NOT subscribe to gossipsub topics until these genesis values are known.
|
||||
|
||||
Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. Clients MUST reject (fail validation) messages that are over this size limit. Likewise, clients MUST NOT emit or propagate messages larger than this limit.
|
||||
|
||||
The `message-id` of a gossipsub message MUST be:
|
||||
@ -262,7 +222,6 @@ The payload is carried in the `data` field of a gossipsub message, and varies de
|
||||
|------------------------------------------------|-------------------------|
|
||||
| beacon_block | SignedBeaconBlock |
|
||||
| beacon_aggregate_and_proof | SignedAggregateAndProof |
|
||||
| beacon_attestation\* | Attestation |
|
||||
| committee_index{subnet_id}\_beacon_attestation | Attestation |
|
||||
| voluntary_exit | SignedVoluntaryExit |
|
||||
| proposer_slashing | ProposerSlashing |
|
||||
@ -272,8 +231,6 @@ Clients MUST reject (fail validation) messages containing an incorrect type, or
|
||||
|
||||
When processing incoming gossip, clients MAY descore or disconnect peers who fail to observe these constraints.
|
||||
|
||||
\* The `beacon_attestation` topic is only for interop and will be removed prior to mainnet.
|
||||
|
||||
#### Global topics
|
||||
|
||||
There are two primary global topics used to propagate beacon blocks and aggregate attestations to all nodes on the network. Their `Name`s are:
|
||||
@ -286,8 +243,8 @@ There are two primary global topics used to propagate beacon blocks and aggregat
|
||||
- The block is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the block MAY be queued for later processing while proposers for the block's branch are calculated.
|
||||
- `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `signed_aggregate_and_proof` on the network. (We define the following for convenience -- `aggregate_and_proof = signed_aggregate_and_proof.message` and `aggregate = aggregate_and_proof.aggregate`)
|
||||
- `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` (a client MAY queue future aggregates for processing at the appropriate slot).
|
||||
- The aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally).
|
||||
- The `aggregate` is the first valid aggregate received for the aggregator with index `aggregate_and_proof.aggregator_index` for the slot `aggregate.data.slot`.
|
||||
- The valid aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen (via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally).
|
||||
- The `aggregate` is the first valid aggregate received for the aggregator with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`.
|
||||
- The block being voted for (`aggregate.data.beacon_block_root`) passes validation.
|
||||
- `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate.data.slot, aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
|
||||
- The aggregator's validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.aggregator_index in get_attesting_indices(state, aggregate.data, aggregate.aggregation_bits)`.
|
||||
@ -316,15 +273,11 @@ Attestation subnets are used to propagate unaggregated attestations to subsectio
|
||||
- The attestation's committee index (`attestation.data.index`) is for the correct subnet.
|
||||
- `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (within a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot` (a client MAY queue future attestations for processing at the appropriate slot).
|
||||
- The attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`).
|
||||
- The attestation is the first valid attestation received for the participating validator for the slot, `attestation.data.slot`.
|
||||
- There has been no other valid attestation seen on an attestation subnet that has an identical `attestation.data.target.epoch` and participating validator index.
|
||||
- The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
||||
- The signature of `attestation` is valid.
|
||||
|
||||
#### Interop
|
||||
|
||||
Unaggregated and aggregated attestations from all shards are sent as `Attestation`s to the `beacon_attestation` topic. Clients are not required to publish aggregate attestations but must be able to process them. All validating clients SHOULD try to perform local attestation aggregation to prepare for block proposing.
|
||||
|
||||
#### Mainnet
|
||||
#### Attestations and Aggregation
|
||||
|
||||
Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. For the `committee_index{subnet_id}_beacon_attestation` topics, `subnet_id` is set to `index % ATTESTATION_SUBNET_COUNT`, where `index` is the `CommitteeIndex` of the given committee.
|
||||
|
||||
@ -336,15 +289,11 @@ Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `A
|
||||
|
||||
Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded.
|
||||
|
||||
#### Interop
|
||||
- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) block compression. Example: The beacon aggregate attestation topic string is `/eth2/446a7232/beacon_aggregate_and_proof/ssz_snappy`, the fork digest is `446a7232` and the data field of a gossipsub message is an `AggregateAndProof` that has been SSZ-encoded and then compressed with Snappy.
|
||||
|
||||
- `ssz` - All objects are [SSZ-encoded](#ssz-encoding). Example: The beacon block topic string is `/eth2/beacon_block/ssz`, and the data field of a gossipsub message is an ssz-encoded `SignedBeaconBlock`.
|
||||
Snappy has two formats: "block" and "frames" (streaming). Gossip messages remain relatively small (100s of bytes to 100s of kilobytes) so [basic snappy block compression](https://github.com/google/snappy/blob/master/format_description.txt) is used to avoid the additional overhead associated with snappy frames.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
- `ssz_snappy` - All objects are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). Example: The beacon aggregate attestation topic string is `/eth2/beacon_aggregate_and_proof/ssz_snappy`, and the data field of a gossipsub message is an `AggregateAndProof` that has been SSZ-encoded and then compressed with Snappy.
|
||||
|
||||
Implementations MUST use a single encoding. Changing an encoding will require coordination between participating implementations.
|
||||
Implementations MUST use a single encoding for gossip. Changing an encoding will require coordination between participating implementations.
|
||||
|
||||
## The Req/Resp domain
|
||||
|
||||
@ -445,7 +394,7 @@ Here, `result` represents the 1-byte response code.
|
||||
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time:
|
||||
|
||||
- `ssz`: the contents are [SSZ-encoded](../../ssz/simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s.
|
||||
- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; MUST be supported in mainnet.
|
||||
- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy) frames compression. This encoding type MUST be supported by all clients.
|
||||
|
||||
#### SSZ-encoding strategy (with or without Snappy)
|
||||
|
||||
@ -458,7 +407,7 @@ Snappy has two formats: "block" and "frames" (streaming). To support large reque
|
||||
Since snappy frame contents [have a maximum size of `65536` bytes](https://github.com/google/snappy/blob/master/framing_format.txt#L104)
|
||||
and frame headers are just `identifier (1) + checksum (4)` bytes, the expected buffering of a single frame is acceptable.
|
||||
|
||||
**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST encode the length of the raw SSZ bytes, encoded as an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
||||
**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST encode the length of the raw SSZ bytes, encoded as an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
||||
|
||||
*Writing*: By first computing and writing the SSZ byte length, the SSZ encoder can then directly write the chunk contents to the stream.
|
||||
If Snappy is applied, it can be passed through a buffered Snappy writer to compress frame by frame.
|
||||
@ -572,7 +521,7 @@ Response Content:
|
||||
)
|
||||
```
|
||||
|
||||
Requests count beacon blocks from the peer starting from `start_slot`, leading up to the current head block as selected by fork choice. `step` defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at slots [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …]. A step value of 1 returns all blocks on the range `[start_slot, start_slot + count)`.
|
||||
Requests beacon blocks in the slot range `[start_slot, start_slot + count * step)`, leading up to the current head block as selected by fork choice. `step` defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at slots [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …].
|
||||
|
||||
`BeaconBlocksByRange` is primarily used to sync historical blocks.
|
||||
|
||||
@ -678,7 +627,7 @@ The response MUST consist of a single `response_chunk`.
|
||||
|
||||
## The discovery domain: discv5
|
||||
|
||||
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery, both in the interoperability testnet and mainnet.
|
||||
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) is used for peer discovery.
|
||||
|
||||
`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. `discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context.
|
||||
|
||||
@ -718,15 +667,7 @@ If a node's `MetaData.attnets` has any non-zero bit, the ENR MUST include the `a
|
||||
|
||||
If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally include the `attnets` entry or leave it out entirely.
|
||||
|
||||
#### Interop
|
||||
|
||||
In the interoperability testnet, all peers will support all capabilities defined in this document (gossip, full Req/Resp suite, discovery protocol), therefore the ENR record does not need to carry Eth2 capability information, as it would be superfluous.
|
||||
|
||||
Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that the peer is indeed an Eth2 peer, in order to eschew connecting to Eth 1.0 peers.
|
||||
|
||||
#### Mainnet
|
||||
|
||||
##### `eth2` field
|
||||
#### `eth2` field
|
||||
|
||||
ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version, and next fork epoch to ensure connections are made with peers on the intended eth2 network.
|
||||
|
||||
@ -752,18 +693,18 @@ where the fields of `ENRForkID` are defined as
|
||||
* `next_fork_version` is the fork version corresponding to the next planned hard fork at a future epoch. If no future fork is planned, set `next_fork_version = current_fork_version` to signal this fact
|
||||
* `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated. If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact
|
||||
|
||||
*Note*: `fork_digest` is composed of values that are not not known until the genesis block/state are available. Due to this, clients SHOULD NOT form ENRs and begin peer discovery until genesis values are known. One notable exception to this rule is the distribution of bootnode ENRs prior to genesis. In this case, bootnode ENRs SHOULD be initially distributed with `eth2` field set as `ENRForkID(fork_digest=compute_fork_digest(GENESIS_FORK_VERSION, b'\x00'*32), next_fork_version=GENESIS_FORK_VERSION, next_fork_epoch=FAR_FUTURE_EPOCH)`. After genesis values are known, the bootnodes SHOULD update ENRs to participate in normal discovery operations.
|
||||
|
||||
Clients SHOULD connect to peers with `fork_digest`, `next_fork_version`, and `next_fork_epoch` that match local values.
|
||||
|
||||
Clients MAY connect to peers with the same `fork_digest` but a different `next_fork_version`/`next_fork_epoch`. Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients, these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`.
|
||||
|
||||
##### General capabilities
|
||||
#### General capabilities
|
||||
|
||||
On mainnet, ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability.
|
||||
ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability.
|
||||
|
||||
### Topic advertisement
|
||||
|
||||
#### Mainnet
|
||||
|
||||
discv5's topic advertisement feature is not expected to be ready for mainnet launch of Phase 0.
|
||||
|
||||
Once this feature is built out and stable, we expect to use topic advertisement as a rendezvous facility for peers on shards. Until then, the ENR [attestation subnet bitfield](#attestation-subnet-bitfield) will be used for discovery of peers on particular subnets.
|
||||
@ -809,7 +750,7 @@ Modeling for upgradeability and dynamic transport selection from the get-go lays
|
||||
|
||||
Clients can adopt new transports without breaking old ones, and the multi-transport ability enables constrained and sandboxed environments (e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), without the need for proxying or trust delegation to servers.
|
||||
|
||||
### Why are we not using QUIC for mainnet from the start?
|
||||
### Why are we not using QUIC from the start?
|
||||
|
||||
The QUIC standard is still not finalized (at working draft 22 at the time of writing), and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). One remarkable example is node.js, where the QUIC implementation is [in early development](https://github.com/nodejs/quic).
|
||||
|
||||
@ -825,13 +766,13 @@ Overlay multiplexers are not necessary with QUIC since the protocol provides nat
|
||||
|
||||
## Protocol Negotiation
|
||||
|
||||
### When is multiselect 2.0 due and why are we using it for mainnet?
|
||||
### When is multiselect 2.0 due and why do we plan to migrate to it?
|
||||
|
||||
multiselect 2.0 is currently being conceptualized. The debate started [on this issue](https://github.com/libp2p/specs/pull/95), but it got overloaded—as it tends to happen with large conceptual OSS discussions that touch the heart and core of a system.
|
||||
|
||||
In the following weeks (August 2019), there will be a renewed initiative to first define the requirements, constraints, assumptions, and features, in order to lock in basic consensus upfront and subsequently build on that consensus by submitting a specification for implementation.
|
||||
At some point in 2020, we expect a renewed initiative to first define the requirements, constraints, assumptions, and features, in order to lock in basic consensus upfront and subsequently build on that consensus by submitting a specification for implementation.
|
||||
|
||||
We plan to use multiselect 2.0 for mainnet because it will:
|
||||
We plan to eventually migrate to multiselect 2.0 because it will:
|
||||
|
||||
1. Reduce round trips during connection bootstrapping and stream protocol negotiation.
|
||||
2. Enable efficient one-stream-per-request interaction patterns.
|
||||
@ -853,17 +794,15 @@ At present, multistream-select 1.0 is used for both types of negotiation, but mu
|
||||
|
||||
## Encryption
|
||||
|
||||
### Why are we using SecIO for interop? Why not for mainnet?
|
||||
### Why are we not supporting SecIO?
|
||||
|
||||
SecIO has been the default encryption layer for libp2p for years. It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale.
|
||||
|
||||
SecIO is the common denominator across the various language libraries at this stage. It is widely implemented. That’s why we have chosen to use it for initial interop to minimize overhead in getting to a basic interoperability testnet.
|
||||
|
||||
We won’t be using it for mainnet because, amongst other things, it requires several round trips to be sound, and doesn’t support early data (0-RTT data), a mechanism that multiselect 2.0 will leverage to reduce round trips during connection bootstrapping.
|
||||
Although SecIO has wide language support, we won’t be using it for mainnet because, amongst other things, it requires several round trips to be sound, and doesn’t support early data (0-RTT data), a mechanism that multiselect 2.0 will leverage to reduce round trips during connection bootstrapping.
|
||||
|
||||
SecIO is not considered secure for the purposes of this spec.
|
||||
|
||||
### Why are we using Noise/TLS 1.3 for mainnet?
|
||||
### Why are we using Noise/TLS 1.3?
|
||||
|
||||
Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org):
|
||||
|
||||
@ -889,10 +828,6 @@ Transport level encryption secures message exchange and provides properties that
|
||||
|
||||
Note that transport-level encryption is not exclusive of application-level encryption or cryptography. Transport-level encryption secures the communication itself, while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.).
|
||||
|
||||
### Will mainnnet networking be untested when it launches?
|
||||
|
||||
Before launching mainnet, the testnet will be switched over to mainnet networking parameters, including Noise handshakes, and other new protocols. This gives us an opportunity to drill coordinated network upgrades and verifying that there are no significant upgradeability gaps.
|
||||
|
||||
## Gossipsub
|
||||
|
||||
### Why are we using a pub/sub algorithm for block and attestation propagation?
|
||||
@ -1001,7 +936,7 @@ Requests are segregated by protocol ID to:
|
||||
2. Affording this level of granularity with a top-level protocol would imply creating as many variants (e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2).
|
||||
7. Allow us to simplify the payload of requests. Request-id’s and method-ids no longer need to be sent. The encoding/request type and version can all be handled by the framework.
|
||||
|
||||
**Caveat**: The protocol negotiation component in the current version of libp2p is called multistream-select 1.0. It is somewhat naïve and introduces overhead on every request when negotiating streams, although implementation-specific optimizations are possible to save this cost. Multiselect 2.0 will remove this overhead by memoizing previously selected protocols, and modeling shared protocol tables. Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to hinder interop testing. More info is to be released from the libp2p community in the coming weeks.
|
||||
**Caveat**: The protocol negotiation component in the current version of libp2p is called multistream-select 1.0. It is somewhat naïve and introduces overhead on every request when negotiating streams, although implementation-specific optimizations are possible to save this cost. Multiselect 2.0 will eventually remove this overhead by memoizing previously selected protocols, and modeling shared protocol tables. Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to significantly hinder this domain.
|
||||
|
||||
### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?
|
||||
|
||||
@ -1092,6 +1027,12 @@ discv5 uses ENRs and we will presumably need to:
|
||||
1. Add `multiaddr` to the dictionary, so that nodes can advertise their multiaddr under a reserved namespace in ENRs. – and/or –
|
||||
2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR (ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Eth 1.0 nodes).
|
||||
|
||||
### Why do we not form ENRs and find peers until genesis block/state is known?
|
||||
|
||||
Although client software might very well be running locally prior to the solidification of the eth2 genesis state and block, clients cannot form valid ENRs prior to this point. ENRs contain `fork_digest` which utilizes the `genesis_validators_root` for a cleaner separation between chains so prior to knowing genesis, we cannot use `fork_digest` to cleanly find peers on our intended chain. Once genesis data is known, we can then form ENRs and safely find peers.
|
||||
|
||||
When using an eth1 deposit contract for deposits, `fork_digest` will be known at least `MIN_GENESIS_DELAY` (24 hours in mainnet configuration) before `genesis_time`, providing ample time to find peers and form initial connections and gossip subnets prior to genesis.
|
||||
|
||||
## Compression/Encoding
|
||||
|
||||
### Why are we using SSZ for encoding?
|
||||
|
@ -281,8 +281,8 @@ def voting_period_start_time(state: BeaconState) -> uint64:
|
||||
```python
|
||||
def is_candidate_block(block: Eth1Block, period_start: uint64) -> bool:
|
||||
return (
|
||||
block.timestamp <= period_start - SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE
|
||||
and block.timestamp >= period_start - SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE * 2
|
||||
block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= period_start
|
||||
and block.timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE * 2 >= period_start
|
||||
)
|
||||
```
|
||||
|
||||
@ -340,9 +340,10 @@ It is useful to be able to run a state transition function (working on a copy of
|
||||
|
||||
```python
|
||||
def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root:
|
||||
process_slots(state, block.slot)
|
||||
process_block(state, block)
|
||||
return hash_tree_root(state)
|
||||
temp_state: BeaconState = state.copy()
|
||||
signed_block = SignedBeaconBlock(message=block)
|
||||
temp_state = state_transition(temp_state, signed_block, validate_result=False)
|
||||
return hash_tree_root(temp_state)
|
||||
```
|
||||
|
||||
##### Signature
|
||||
@ -350,9 +351,9 @@ def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root:
|
||||
`signed_block = SignedBeaconBlock(message=block, signature=block_signature)`, where `block_signature` is obtained from:
|
||||
|
||||
```python
|
||||
def get_block_signature(state: BeaconState, header: BeaconBlockHeader, privkey: int) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(header.slot))
|
||||
signing_root = compute_signing_root(header, domain)
|
||||
def get_block_signature(state: BeaconState, block: BeaconBlock, privkey: int) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(block.slot))
|
||||
signing_root = compute_signing_root(block, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
@ -524,6 +525,8 @@ Because Phase 0 does not have shards and thus does not have Shard Committees, th
|
||||
* Maintain advertisement of the randomly selected subnets in their node's ENR `attnets` entry by setting the randomly selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets
|
||||
* Set the lifetime of each random subscription to a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`. At the end of life for a subscription, select a new random subnet, update subnet subscriptions, and publish an updated ENR
|
||||
|
||||
*Note*: Short lived beacon committee assignments should not be added in into the ENR `attnets` entry.
|
||||
|
||||
*Note*: When preparing for a hard fork, a validator must select and subscribe to random subnets of the future fork versioning at least `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
|
||||
|
||||
## How to avoid slashing
|
||||
|
@ -16,40 +16,45 @@
|
||||
- [Extended `AttestationData`](#extended-attestationdata)
|
||||
- [Extended `Attestation`](#extended-attestation)
|
||||
- [Extended `PendingAttestation`](#extended-pendingattestation)
|
||||
- [`IndexedAttestation`](#indexedattestation)
|
||||
- [Extended `AttesterSlashing`](#extended-attesterslashing)
|
||||
- [Extended `IndexedAttestation`](#extended-indexedattestation)
|
||||
- [Extended `AttesterSlashing`](#extended-attesterslashing)
|
||||
- [Extended `Validator`](#extended-validator)
|
||||
- [Extended `BeaconBlockBody`](#extended-beaconblockbody)
|
||||
- [Extended `BeaconBlock`](#extended-beaconblock)
|
||||
- [Extended `SignedBeaconBlock`](#extended-signedbeaconblock)
|
||||
- [Extended `BeaconState`](#extended-beaconstate)
|
||||
- [New containers](#new-containers)
|
||||
- [`ShardBlockWrapper`](#shardblockwrapper)
|
||||
- [`ShardSignableHeader`](#shardsignableheader)
|
||||
- [`ShardBlock`](#shardblock)
|
||||
- [`SignedShardBlock`](#signedshardblock)
|
||||
- [`ShardBlockHeader`](#shardblockheader)
|
||||
- [`ShardState`](#shardstate)
|
||||
- [`ShardTransition`](#shardtransition)
|
||||
- [`CompactCommittee`](#compactcommittee)
|
||||
- [`AttestationCustodyBitWrapper`](#attestationcustodybitwrapper)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc-1)
|
||||
- [`get_previous_slot`](#get_previous_slot)
|
||||
- [`compute_previous_slot`](#compute_previous_slot)
|
||||
- [`pack_compact_validator`](#pack_compact_validator)
|
||||
- [`unpack_compact_validator`](#unpack_compact_validator)
|
||||
- [`committee_to_compact_committee`](#committee_to_compact_committee)
|
||||
- [`compute_shard_from_committee_index`](#compute_shard_from_committee_index)
|
||||
- [`compute_offset_slots`](#compute_offset_slots)
|
||||
- [`compute_updated_gasprice`](#compute_updated_gasprice)
|
||||
- [Beacon state accessors](#beacon-state-accessors)
|
||||
- [`get_active_shard_count`](#get_active_shard_count)
|
||||
- [`get_online_validator_indices`](#get_online_validator_indices)
|
||||
- [`get_shard_committee`](#get_shard_committee)
|
||||
- [`get_shard_proposer_index`](#get_shard_proposer_index)
|
||||
- [`get_light_client_committee`](#get_light_client_committee)
|
||||
- [`get_shard_proposer_index`](#get_shard_proposer_index)
|
||||
- [`get_indexed_attestation`](#get_indexed_attestation)
|
||||
- [`get_updated_gasprice`](#get_updated_gasprice)
|
||||
- [`get_start_shard`](#get_start_shard)
|
||||
- [`get_shard`](#get_shard)
|
||||
- [`get_next_slot_for_shard`](#get_next_slot_for_shard)
|
||||
- [`get_latest_slot_for_shard`](#get_latest_slot_for_shard)
|
||||
- [`get_offset_slots`](#get_offset_slots)
|
||||
- [Predicates](#predicates)
|
||||
- [Updated `is_valid_indexed_attestation`](#updated-is_valid_indexed_attestation)
|
||||
- [`is_shard_attestation`](#is_shard_attestation)
|
||||
- [`is_winning_attestation`](#is_winning_attestation)
|
||||
- [Block processing](#block-processing)
|
||||
- [Operations](#operations)
|
||||
- [New Attestation processing](#new-attestation-processing)
|
||||
@ -57,7 +62,7 @@
|
||||
- [`apply_shard_transition`](#apply_shard_transition)
|
||||
- [`process_crosslink_for_shard`](#process_crosslink_for_shard)
|
||||
- [`process_crosslinks`](#process_crosslinks)
|
||||
- [`process_attestations`](#process_attestations)
|
||||
- [`process_attestation`](#process_attestation)
|
||||
- [New Attester slashing processing](#new-attester-slashing-processing)
|
||||
- [Shard transition false positives](#shard-transition-false-positives)
|
||||
- [Light client processing](#light-client-processing)
|
||||
@ -101,7 +106,7 @@ Configuration is not namespaced. Instead it is strictly an extension;
|
||||
| `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | |
|
||||
| `MAX_SHARD_BLOCKS_PER_ATTESTATION` | `len(SHARD_BLOCK_OFFSETS)` | |
|
||||
| `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | |
|
||||
| `MIN_GASPRICE` | `Gwei(2**5)` (= 32) | Gwei | |
|
||||
| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | |
|
||||
| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `2**3` (= 8) | |
|
||||
| `DOMAIN_SHARD_PROPOSAL` | `DomainType('0x80000000')` | |
|
||||
| `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` | |
|
||||
@ -149,7 +154,7 @@ class PendingAttestation(Container):
|
||||
crosslink_success: boolean
|
||||
```
|
||||
|
||||
### `IndexedAttestation`
|
||||
### Extended `IndexedAttestation`
|
||||
|
||||
```python
|
||||
class IndexedAttestation(Container):
|
||||
@ -157,7 +162,7 @@ class IndexedAttestation(Container):
|
||||
attestation: Attestation
|
||||
```
|
||||
|
||||
#### Extended `AttesterSlashing`
|
||||
### Extended `AttesterSlashing`
|
||||
|
||||
Note that the `attestation_1` and `attestation_2` have a new `IndexedAttestation` definition.
|
||||
|
||||
@ -288,26 +293,33 @@ class BeaconState(Container):
|
||||
|
||||
The following containers are new in Phase 1.
|
||||
|
||||
### `ShardBlockWrapper`
|
||||
|
||||
_Wrapper for being broadcasted over the network._
|
||||
### `ShardBlock`
|
||||
|
||||
```python
|
||||
class ShardBlockWrapper(Container):
|
||||
class ShardBlock(Container):
|
||||
shard_parent_root: Root
|
||||
beacon_parent_root: Root
|
||||
slot: Slot
|
||||
proposer_index: ValidatorIndex
|
||||
body: ByteList[MAX_SHARD_BLOCK_SIZE]
|
||||
```
|
||||
|
||||
### `SignedShardBlock`
|
||||
|
||||
```python
|
||||
class SignedShardBlock(Container):
|
||||
message: ShardBlock
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### `ShardSignableHeader`
|
||||
### `ShardBlockHeader`
|
||||
|
||||
```python
|
||||
class ShardSignableHeader(Container):
|
||||
class ShardBlockHeader(Container):
|
||||
shard_parent_root: Root
|
||||
beacon_parent_root: Root
|
||||
slot: Slot
|
||||
proposer_index: ValidatorIndex
|
||||
body_root: Root
|
||||
```
|
||||
|
||||
@ -317,7 +329,7 @@ class ShardSignableHeader(Container):
|
||||
class ShardState(Container):
|
||||
slot: Slot
|
||||
gasprice: Gwei
|
||||
data: Bytes32
|
||||
transition_digest: Bytes32
|
||||
latest_block_root: Root
|
||||
```
|
||||
|
||||
@ -358,10 +370,10 @@ class AttestationCustodyBitWrapper(Container):
|
||||
|
||||
### Misc
|
||||
|
||||
#### `get_previous_slot`
|
||||
#### `compute_previous_slot`
|
||||
|
||||
```python
|
||||
def get_previous_slot(slot: Slot) -> Slot:
|
||||
def compute_previous_slot(slot: Slot) -> Slot:
|
||||
if slot > 0:
|
||||
return Slot(slot - 1)
|
||||
else:
|
||||
@ -371,15 +383,29 @@ def get_previous_slot(slot: Slot) -> Slot:
|
||||
#### `pack_compact_validator`
|
||||
|
||||
```python
|
||||
def pack_compact_validator(index: int, slashed: bool, balance_in_increments: int) -> int:
|
||||
def pack_compact_validator(index: ValidatorIndex, slashed: bool, balance_in_increments: uint64) -> uint64:
|
||||
"""
|
||||
Creates a compact validator object representing index, slashed status, and compressed balance.
|
||||
Create a compact validator object representing index, slashed status, and compressed balance.
|
||||
Takes as input balance-in-increments (// EFFECTIVE_BALANCE_INCREMENT) to preserve symmetry with
|
||||
the unpacking function.
|
||||
"""
|
||||
return (index << 16) + (slashed << 15) + balance_in_increments
|
||||
```
|
||||
|
||||
#### `unpack_compact_validator`
|
||||
|
||||
```python
|
||||
def unpack_compact_validator(compact_validator: uint64) -> Tuple[ValidatorIndex, bool, uint64]:
|
||||
"""
|
||||
Return validator index, slashed, balance // EFFECTIVE_BALANCE_INCREMENT
|
||||
"""
|
||||
return (
|
||||
ValidatorIndex(compact_validator >> 16),
|
||||
bool((compact_validator >> 15) % 2),
|
||||
compact_validator & (2**15 - 1),
|
||||
)
|
||||
```
|
||||
|
||||
#### `committee_to_compact_committee`
|
||||
|
||||
```python
|
||||
@ -404,6 +430,30 @@ def compute_shard_from_committee_index(state: BeaconState, index: CommitteeIndex
|
||||
return Shard((index + get_start_shard(state, slot)) % active_shards)
|
||||
```
|
||||
|
||||
#### `compute_offset_slots`
|
||||
|
||||
```python
|
||||
def compute_offset_slots(start_slot: Slot, end_slot: Slot) -> Sequence[Slot]:
|
||||
"""
|
||||
Return the offset slots that are greater than ``start_slot`` and less than ``end_slot``.
|
||||
"""
|
||||
return [Slot(start_slot + x) for x in SHARD_BLOCK_OFFSETS if start_slot + x < end_slot]
|
||||
```
|
||||
|
||||
#### `compute_updated_gasprice`
|
||||
|
||||
```python
|
||||
def compute_updated_gasprice(prev_gasprice: Gwei, length: uint8) -> Gwei:
|
||||
if length > TARGET_SHARD_BLOCK_SIZE:
|
||||
delta = (prev_gasprice * (length - TARGET_SHARD_BLOCK_SIZE)
|
||||
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
||||
return min(prev_gasprice + delta, MAX_GASPRICE)
|
||||
else:
|
||||
delta = (prev_gasprice * (TARGET_SHARD_BLOCK_SIZE - length)
|
||||
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
||||
return max(prev_gasprice, MIN_GASPRICE + delta) - delta
|
||||
```
|
||||
|
||||
### Beacon state accessors
|
||||
|
||||
#### `get_active_shard_count`
|
||||
@ -425,12 +475,35 @@ def get_online_validator_indices(state: BeaconState) -> Set[ValidatorIndex]:
|
||||
|
||||
```python
|
||||
def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]:
|
||||
source_epoch = epoch - epoch % SHARD_COMMITTEE_PERIOD
|
||||
source_epoch = epoch - epoch % SHARD_COMMITTEE_PERIOD
|
||||
if source_epoch > 0:
|
||||
source_epoch -= SHARD_COMMITTEE_PERIOD
|
||||
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
||||
seed = get_seed(beacon_state, source_epoch, DOMAIN_SHARD_COMMITTEE)
|
||||
return compute_committee(active_validator_indices, seed, shard, get_active_shard_count(beacon_state))
|
||||
active_shard_count = get_active_shard_count(beacon_state)
|
||||
return compute_committee(
|
||||
indices=active_validator_indices,
|
||||
seed=seed,
|
||||
index=shard,
|
||||
count=active_shard_count,
|
||||
)
|
||||
```
|
||||
|
||||
#### `get_light_client_committee`
|
||||
|
||||
```python
|
||||
def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
source_epoch = epoch - epoch % LIGHT_CLIENT_COMMITTEE_PERIOD
|
||||
if source_epoch > 0:
|
||||
source_epoch -= LIGHT_CLIENT_COMMITTEE_PERIOD
|
||||
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
||||
seed = get_seed(beacon_state, source_epoch, DOMAIN_LIGHT_CLIENT)
|
||||
return compute_committee(
|
||||
indices=active_validator_indices,
|
||||
seed=seed,
|
||||
index=0,
|
||||
count=get_active_shard_count(beacon_state),
|
||||
)[:TARGET_COMMITTEE_SIZE]
|
||||
```
|
||||
|
||||
#### `get_shard_proposer_index`
|
||||
@ -442,19 +515,6 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard
|
||||
return committee[r % len(committee)]
|
||||
```
|
||||
|
||||
#### `get_light_client_committee`
|
||||
|
||||
```python
|
||||
def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
source_epoch = epoch - epoch % LIGHT_CLIENT_COMMITTEE_PERIOD
|
||||
if source_epoch > 0:
|
||||
source_epoch -= LIGHT_CLIENT_COMMITTEE_PERIOD
|
||||
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
||||
seed = get_seed(beacon_state, source_epoch, DOMAIN_LIGHT_CLIENT)
|
||||
active_shards = get_active_shard_count(beacon_state)
|
||||
return compute_committee(active_validator_indices, seed, 0, active_shards)[:TARGET_COMMITTEE_SIZE]
|
||||
```
|
||||
|
||||
#### `get_indexed_attestation`
|
||||
|
||||
```python
|
||||
@ -466,20 +526,6 @@ def get_indexed_attestation(beacon_state: BeaconState, attestation: Attestation)
|
||||
)
|
||||
```
|
||||
|
||||
#### `get_updated_gasprice`
|
||||
|
||||
```python
|
||||
def get_updated_gasprice(prev_gasprice: Gwei, length: uint8) -> Gwei:
|
||||
if length > TARGET_SHARD_BLOCK_SIZE:
|
||||
delta = (prev_gasprice * (length - TARGET_SHARD_BLOCK_SIZE)
|
||||
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
||||
return min(prev_gasprice + delta, MAX_GASPRICE)
|
||||
else:
|
||||
delta = (prev_gasprice * (TARGET_SHARD_BLOCK_SIZE - length)
|
||||
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
||||
return max(prev_gasprice, MIN_GASPRICE + delta) - delta
|
||||
```
|
||||
|
||||
#### `get_start_shard`
|
||||
|
||||
```python
|
||||
@ -495,19 +541,18 @@ def get_shard(state: BeaconState, attestation: Attestation) -> Shard:
|
||||
return compute_shard_from_committee_index(state, attestation.data.index, attestation.data.slot)
|
||||
```
|
||||
|
||||
#### `get_next_slot_for_shard`
|
||||
#### `get_latest_slot_for_shard`
|
||||
|
||||
```python
|
||||
def get_next_slot_for_shard(state: BeaconState, shard: Shard) -> Slot:
|
||||
return Slot(state.shard_states[shard].slot + 1)
|
||||
def get_latest_slot_for_shard(state: BeaconState, shard: Shard) -> Slot:
|
||||
return state.shard_states[shard].slot
|
||||
```
|
||||
|
||||
|
||||
#### `get_offset_slots`
|
||||
|
||||
```python
|
||||
def get_offset_slots(state: BeaconState, start_slot: Slot) -> Sequence[Slot]:
|
||||
return [Slot(start_slot + x) for x in SHARD_BLOCK_OFFSETS if start_slot + x < state.slot]
|
||||
def get_offset_slots(state: BeaconState, shard: Shard) -> Sequence[Slot]:
|
||||
return compute_offset_slots(state.shard_states[shard].slot, state.slot)
|
||||
```
|
||||
|
||||
### Predicates
|
||||
@ -528,7 +573,7 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
|
||||
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
|
||||
aggregation_bits = attestation.aggregation_bits
|
||||
assert len(aggregation_bits) == len(indexed_attestation.committee)
|
||||
|
||||
|
||||
if len(attestation.custody_bits_blocks) == 0:
|
||||
# fall back on phase0 behavior if there is no shard data.
|
||||
for participant, abit in zip(indexed_attestation.committee, aggregation_bits):
|
||||
@ -543,13 +588,50 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
|
||||
if abit:
|
||||
all_pubkeys.append(state.validators[participant].pubkey)
|
||||
# Note: only 2N distinct message hashes
|
||||
all_signing_roots.append(compute_signing_root(
|
||||
AttestationCustodyBitWrapper(hash_tree_root(attestation.data), i, cbit), domain))
|
||||
attestation_wrapper = AttestationCustodyBitWrapper(
|
||||
attestation_data_root=hash_tree_root(attestation.data),
|
||||
block_index=i,
|
||||
bit=cbit
|
||||
)
|
||||
all_signing_roots.append(compute_signing_root(attestation_wrapper, domain))
|
||||
else:
|
||||
assert not cbit
|
||||
return bls.AggregateVerify(zip(all_pubkeys, all_signing_roots), signature=attestation.signature)
|
||||
```
|
||||
|
||||
#### `is_shard_attestation`
|
||||
|
||||
```python
|
||||
def is_shard_attestation(state: BeaconState,
|
||||
attestation: Attestation,
|
||||
committee_index: CommitteeIndex) -> bool:
|
||||
if not (
|
||||
attestation.data.index == committee_index
|
||||
and attestation.data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot # Must be on-time attestation
|
||||
# TODO: MIN_ATTESTATION_INCLUSION_DELAY should always be 1
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
```
|
||||
|
||||
#### `is_winning_attestation`
|
||||
|
||||
```python
|
||||
def is_winning_attestation(state: BeaconState,
|
||||
attestation: PendingAttestation,
|
||||
committee_index: CommitteeIndex,
|
||||
winning_root: Root) -> bool:
|
||||
"""
|
||||
Check if ``attestation`` helped contribute to the successful crosslink of
|
||||
``winning_root`` formed by ``committee_index`` committee at the current slot.
|
||||
"""
|
||||
return (
|
||||
attestation.data.slot == state.slot
|
||||
and attestation.data.index == committee_index
|
||||
and attestation.data.shard_transition_root == winning_root
|
||||
)
|
||||
```
|
||||
|
||||
### Block processing
|
||||
|
||||
@ -558,35 +640,34 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
verify_shard_transition_false_positives(state, block.body)
|
||||
process_light_client_signatures(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
verify_shard_transition_false_positives(state, block.body)
|
||||
```
|
||||
|
||||
|
||||
#### Operations
|
||||
|
||||
```python
|
||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# Verify that outstanding deposits are processed up to the maximum number of deposits
|
||||
assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
|
||||
|
||||
|
||||
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
for operation in operations:
|
||||
fn(state, operation)
|
||||
|
||||
|
||||
for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
for_ops(body.attester_slashings, process_attester_slashing)
|
||||
|
||||
# New attestation processing
|
||||
process_attestations(state, body, body.attestations)
|
||||
|
||||
for_ops(body.attestations, process_attestation)
|
||||
for_ops(body.deposits, process_deposit)
|
||||
for_ops(body.voluntary_exits, process_voluntary_exit)
|
||||
|
||||
# See custody game spec.
|
||||
process_custody_game_operations(state, body)
|
||||
|
||||
process_crosslinks(state, body.shard_transitions, body.attestations)
|
||||
|
||||
# TODO process_operations(body.shard_receipt_proofs, process_shard_receipt_proofs)
|
||||
```
|
||||
|
||||
@ -600,6 +681,7 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
assert data.index < get_committee_count_at_slot(state, data.slot)
|
||||
assert data.index < get_active_shard_count(state)
|
||||
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
assert data.target.epoch == compute_epoch_at_slot(data.slot)
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
|
||||
|
||||
committee = get_beacon_committee(state, data.slot, data.index)
|
||||
@ -611,63 +693,69 @@ def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
assert attestation.data.source == state.previous_justified_checkpoint
|
||||
|
||||
shard = get_shard(state, attestation)
|
||||
shard_start_slot = get_next_slot_for_shard(state, shard)
|
||||
|
||||
# Type 1: on-time attestations, the custody bits should be non-empty.
|
||||
if attestation.custody_bits_blocks != []:
|
||||
# Ensure on-time attestation
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot
|
||||
# Correct data root count
|
||||
assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, shard))
|
||||
# Correct parent block root
|
||||
assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot))
|
||||
# Type 2: no shard transition, no custody bits
|
||||
else:
|
||||
# Ensure delayed attestation
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY < state.slot
|
||||
# Late attestations cannot have a shard transition root
|
||||
assert data.shard_transition_root == Root()
|
||||
|
||||
# Signature check
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
# Type 1: on-time attestations
|
||||
if attestation.custody_bits_blocks != []:
|
||||
# Correct slot
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot
|
||||
# Correct data root count
|
||||
assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, shard_start_slot))
|
||||
# Correct parent block root
|
||||
assert data.beacon_block_root == get_block_root_at_slot(state, get_previous_slot(state.slot))
|
||||
# Type 2: no shard transition, no custody bits # TODO: could only allow for older attestations.
|
||||
else:
|
||||
# assert state.slot - compute_start_slot_at_epoch(compute_epoch_at_slot(data.slot)) < SLOTS_PER_EPOCH
|
||||
assert data.shard_transition_root == Root()
|
||||
```
|
||||
|
||||
###### `apply_shard_transition`
|
||||
|
||||
```python
|
||||
def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTransition) -> None:
|
||||
# Slot the attestation starts counting from
|
||||
start_slot = get_next_slot_for_shard(state, shard)
|
||||
# TODO: only need to check it once when phase 1 starts
|
||||
assert state.slot > PHASE_1_GENESIS_SLOT
|
||||
|
||||
# Correct data root count
|
||||
offset_slots = get_offset_slots(state, start_slot)
|
||||
offset_slots = get_offset_slots(state, shard)
|
||||
assert (
|
||||
len(transition.shard_data_roots)
|
||||
== len(transition.shard_states)
|
||||
== len(transition.shard_block_lengths)
|
||||
== len(offset_slots)
|
||||
)
|
||||
assert transition.start_slot == start_slot
|
||||
assert transition.start_slot == offset_slots[0]
|
||||
|
||||
# Reconstruct shard headers
|
||||
headers = []
|
||||
proposers = []
|
||||
prev_gasprice = state.shard_states[shard].gasprice
|
||||
shard_parent_root = state.shard_states[shard].latest_block_root
|
||||
for i in range(len(offset_slots)):
|
||||
if any(transition.shard_data_roots):
|
||||
headers.append(ShardSignableHeader(
|
||||
shard_block_length = transition.shard_block_lengths[i]
|
||||
shard_state = transition.shard_states[i]
|
||||
# Verify correct calculation of gas prices and slots
|
||||
assert shard_state.gasprice == compute_updated_gasprice(prev_gasprice, shard_block_length)
|
||||
assert shard_state.slot == offset_slots[i]
|
||||
# Collect the non-empty proposals result
|
||||
is_empty_proposal = shard_block_length == 0
|
||||
if not is_empty_proposal:
|
||||
proposal_index = get_shard_proposer_index(state, offset_slots[i], shard)
|
||||
# Reconstruct shard headers
|
||||
header = ShardBlockHeader(
|
||||
shard_parent_root=shard_parent_root,
|
||||
parent_hash=get_block_root_at_slot(state, get_previous_slot(state.slot)),
|
||||
beacon_parent_root=get_block_root_at_slot(state, offset_slots[i]),
|
||||
proposer_index=proposal_index,
|
||||
slot=offset_slots[i],
|
||||
body_root=transition.shard_data_roots[i]
|
||||
))
|
||||
proposers.append(get_shard_proposer_index(state, shard, offset_slots[i]))
|
||||
shard_parent_root = hash_tree_root(headers[-1])
|
||||
)
|
||||
shard_parent_root = hash_tree_root(header)
|
||||
headers.append(header)
|
||||
proposers.append(proposal_index)
|
||||
|
||||
# Verify correct calculation of gas prices and slots
|
||||
prev_gasprice = state.shard_states[shard].gasprice
|
||||
for i in range(len(offset_slots)):
|
||||
shard_state = transition.shard_states[i]
|
||||
block_length = transition.shard_block_lengths[i]
|
||||
assert shard_state.gasprice == get_updated_gasprice(prev_gasprice, block_length)
|
||||
assert shard_state.slot == offset_slots[i]
|
||||
prev_gasprice = shard_state.gasprice
|
||||
|
||||
pubkeys = [state.validators[proposer].pubkey for proposer in proposers]
|
||||
@ -679,7 +767,7 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr
|
||||
assert bls.AggregateVerify(zip(pubkeys, signing_roots), signature=transition.proposer_signature_aggregate)
|
||||
|
||||
# Save updated state
|
||||
state.shard_states[shard] = transition.shard_states[-1]
|
||||
state.shard_states[shard] = transition.shard_states[len(transition.shard_states) - 1]
|
||||
state.shard_states[shard].slot = state.slot - 1
|
||||
```
|
||||
|
||||
@ -687,11 +775,12 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr
|
||||
|
||||
```python
|
||||
def process_crosslink_for_shard(state: BeaconState,
|
||||
shard: Shard,
|
||||
committee_index: CommitteeIndex,
|
||||
shard_transition: ShardTransition,
|
||||
attestations: Sequence[Attestation]) -> Root:
|
||||
committee = get_beacon_committee(state, get_current_epoch(state), shard)
|
||||
committee = get_beacon_committee(state, state.slot, committee_index)
|
||||
online_indices = get_online_validator_indices(state)
|
||||
shard = compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
|
||||
# Loop over all shard transition roots
|
||||
shard_transition_roots = set([a.data.shard_transition_root for a in attestations])
|
||||
@ -701,6 +790,9 @@ def process_crosslink_for_shard(state: BeaconState,
|
||||
for attestation in transition_attestations:
|
||||
participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
|
||||
transition_participants = transition_participants.union(participants)
|
||||
assert attestation.data.head_shard_root == shard_transition.shard_data_roots[
|
||||
len(shard_transition.shard_data_roots) - 1
|
||||
]
|
||||
|
||||
enough_online_stake = (
|
||||
get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >=
|
||||
@ -712,7 +804,6 @@ def process_crosslink_for_shard(state: BeaconState,
|
||||
|
||||
# Attestation <-> shard transition consistency
|
||||
assert shard_transition_root == hash_tree_root(shard_transition)
|
||||
assert attestation.data.head_shard_root == shard_transition.shard_data_roots[-1]
|
||||
|
||||
# Apply transition
|
||||
apply_shard_transition(state, shard, shard_transition)
|
||||
@ -723,11 +814,11 @@ def process_crosslink_for_shard(state: BeaconState,
|
||||
increase_balance(state, beacon_proposer_index, proposer_reward)
|
||||
states_slots_lengths = zip(
|
||||
shard_transition.shard_states,
|
||||
get_offset_slots(state, get_next_slot_for_shard(state, shard)),
|
||||
get_offset_slots(state, shard),
|
||||
shard_transition.shard_block_lengths
|
||||
)
|
||||
for shard_state, slot, length in states_slots_lengths:
|
||||
proposer_index = get_shard_proposer_index(state, shard, slot)
|
||||
proposer_index = get_shard_proposer_index(state, slot, shard)
|
||||
decrease_balance(state, proposer_index, shard_state.gasprice * length)
|
||||
|
||||
# Return winning transition root
|
||||
@ -742,49 +833,43 @@ def process_crosslink_for_shard(state: BeaconState,
|
||||
|
||||
```python
|
||||
def process_crosslinks(state: BeaconState,
|
||||
block_body: BeaconBlockBody,
|
||||
attestations: Sequence[Attestation]) -> Set[Tuple[Shard, Root]]:
|
||||
winners: Set[Tuple[Shard, Root]] = set()
|
||||
shard_transitions: Sequence[ShardTransition],
|
||||
attestations: Sequence[Attestation]) -> None:
|
||||
committee_count = get_committee_count_at_slot(state, state.slot)
|
||||
for committee_index in map(CommitteeIndex, range(committee_count)):
|
||||
shard = compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
# All attestations in the block for this shard
|
||||
# All attestations in the block for this committee/shard and current slot
|
||||
shard_transition = shard_transitions[shard]
|
||||
shard_attestations = [
|
||||
attestation for attestation in attestations
|
||||
if get_shard(state, attestation) == shard and attestation.data.slot == state.slot
|
||||
if is_shard_attestation(state, attestation, committee_index)
|
||||
]
|
||||
shard_transition = block_body.shard_transitions[shard]
|
||||
winning_root = process_crosslink_for_shard(state, shard, shard_transition, shard_attestations)
|
||||
|
||||
winning_root = process_crosslink_for_shard(state, committee_index, shard_transition, shard_attestations)
|
||||
if winning_root != Root():
|
||||
winners.add((shard, winning_root))
|
||||
return winners
|
||||
# Mark relevant pending attestations as creating a successful crosslink
|
||||
for pending_attestation in state.current_epoch_attestations:
|
||||
if is_winning_attestation(state, pending_attestation, committee_index, winning_root):
|
||||
pending_attestation.crosslink_success = True
|
||||
```
|
||||
|
||||
###### `process_attestations`
|
||||
###### `process_attestation`
|
||||
|
||||
```python
|
||||
def process_attestations(state: BeaconState, block_body: BeaconBlockBody, attestations: Sequence[Attestation]) -> None:
|
||||
# Basic validation
|
||||
for attestation in attestations:
|
||||
validate_attestation(state, attestation)
|
||||
|
||||
# Process crosslinks
|
||||
winners = process_crosslinks(state, block_body, attestations)
|
||||
|
||||
# Store pending attestations for epoch processing
|
||||
for attestation in attestations:
|
||||
is_winning_transition = (get_shard(state, attestation), attestation.data.shard_transition_root) in winners
|
||||
pending_attestation = PendingAttestation(
|
||||
aggregation_bits=attestation.aggregation_bits,
|
||||
data=attestation.data,
|
||||
inclusion_delay=state.slot - attestation.data.slot,
|
||||
crosslink_success=is_winning_transition and attestation.data.slot == state.slot,
|
||||
proposer_index=get_beacon_proposer_index(state),
|
||||
)
|
||||
if attestation.data.target.epoch == get_current_epoch(state):
|
||||
state.current_epoch_attestations.append(pending_attestation)
|
||||
else:
|
||||
state.previous_epoch_attestations.append(pending_attestation)
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
validate_attestation(state, attestation)
|
||||
# Store pending attestation for epoch processing
|
||||
pending_attestation = PendingAttestation(
|
||||
aggregation_bits=attestation.aggregation_bits,
|
||||
data=attestation.data,
|
||||
inclusion_delay=state.slot - attestation.data.slot,
|
||||
proposer_index=get_beacon_proposer_index(state),
|
||||
crosslink_success=False, # To be filled in during process_crosslinks
|
||||
)
|
||||
if attestation.data.target.epoch == get_current_epoch(state):
|
||||
state.current_epoch_attestations.append(pending_attestation)
|
||||
else:
|
||||
state.previous_epoch_attestations.append(pending_attestation)
|
||||
```
|
||||
|
||||
##### New Attester slashing processing
|
||||
@ -803,6 +888,7 @@ def get_indices_from_committee(
|
||||
def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None:
|
||||
indexed_attestation_1 = attester_slashing.attestation_1
|
||||
indexed_attestation_2 = attester_slashing.attestation_2
|
||||
|
||||
assert is_slashable_attestation_data(
|
||||
indexed_attestation_1.attestation.data,
|
||||
indexed_attestation_2.attestation.data,
|
||||
@ -852,13 +938,17 @@ def process_light_client_signatures(state: BeaconState, block_body: BeaconBlockB
|
||||
total_reward += get_base_reward(state, participant_index)
|
||||
|
||||
increase_balance(state, get_beacon_proposer_index(state), Gwei(total_reward // PROPOSER_REWARD_QUOTIENT))
|
||||
|
||||
slot = get_previous_slot(state.slot)
|
||||
signing_root = compute_signing_root(get_block_root_at_slot(state, slot),
|
||||
get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(slot)))
|
||||
return bls.FastAggregateVerify(signer_pubkeys, signing_root, signature=block_body.light_client_signature)
|
||||
```
|
||||
|
||||
slot = compute_previous_slot(state.slot)
|
||||
signing_root = compute_signing_root(get_block_root_at_slot(state, slot),
|
||||
get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(slot)))
|
||||
if len(signer_pubkeys) == 0:
|
||||
# TODO: handle the empty light_client_signature case?
|
||||
assert block_body.light_client_signature == BLSSignature()
|
||||
return
|
||||
else:
|
||||
assert bls.FastAggregateVerify(signer_pubkeys, signing_root, signature=block_body.light_client_signature)
|
||||
```
|
||||
|
||||
### Epoch transition
|
||||
|
||||
|
@ -192,10 +192,10 @@ def get_custody_atoms(bytez: bytes) -> Sequence[bytes]:
|
||||
def compute_custody_bit(key: BLSSignature, data: bytes) -> bit:
|
||||
full_G2_element = bls.signature_to_G2(key)
|
||||
s = full_G2_element[0].coeffs
|
||||
bits = [legendre_bit(sum(s[i % 2]**i * int.from_bytes(atom, "little")), BLS12_381_Q)
|
||||
for i, atom in enumerate(get_custody_atoms(data))]
|
||||
# XOR all atom bits
|
||||
return bit(sum(bits) % 2)
|
||||
custody_atoms = get_custody_atoms(data)
|
||||
n = len(custody_atoms)
|
||||
a = sum(s[i % 2]**i * int.from_bytes(atom, "little") for i, atom in enumerate(custody_atoms) + s[n % 2]**n)
|
||||
return legendre_bit(a, BLS12_381_Q)
|
||||
```
|
||||
|
||||
### `get_randao_epoch_for_custody_period`
|
||||
@ -416,7 +416,13 @@ def process_reveal_deadlines(state: BeaconState) -> None:
|
||||
epoch = get_current_epoch(state)
|
||||
for index, validator in enumerate(state.validators):
|
||||
if get_custody_period_for_validator(ValidatorIndex(index), epoch) > validator.next_custody_secret_to_reveal:
|
||||
slash_validator(state, ValidatorIndex(index))
|
||||
# ------------------ WARNING ----------------------- #
|
||||
# UNSAFE REMOVAL OF SLASHING TO PRIORITIZE PHASE 0 CI #
|
||||
# Must find generic way to handle key reveals in tests #
|
||||
# ---------------------------------------------------- #
|
||||
|
||||
# slash_validator(state, ValidatorIndex(index))
|
||||
pass
|
||||
```
|
||||
|
||||
### Final updates
|
||||
|
@ -1,70 +0,0 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||
|
||||
- [Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs](#ethereum-20-phase-1----shard-transition-and-fraud-proofs)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Introduction](#introduction)
|
||||
- [Fraud proofs](#fraud-proofs)
|
||||
- [Shard state transition function](#shard-state-transition-function)
|
||||
- [Honest committee member behavior](#honest-committee-member-behavior)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
# Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
|
||||
TODO
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the shard transition function and fraud proofs as part of Phase 1 of Ethereum 2.0.
|
||||
|
||||
## Fraud proofs
|
||||
|
||||
TODO. The intent is to have a single universal fraud proof type, which contains the following parts:
|
||||
|
||||
1. An on-time attestation on some `shard` signing a `ShardTransition`
|
||||
2. An index `i` of a particular position to focus on
|
||||
3. The `ShardTransition` itself
|
||||
4. The full body of the block
|
||||
5. A Merkle proof to the `shard_states` in the parent block the attestation is referencing
|
||||
|
||||
The proof verifies that one of the two conditions is false:
|
||||
|
||||
1. `custody_bits[i][j] != generate_custody_bit(subkey, block_contents)` for any `j`
|
||||
2. `execute_state_transition(shard, slot, transition.shard_states[i-1].data, hash_tree_root(parent), get_shard_proposer_index(state, shard, slot), block_contents) != transition.shard_states[i].data` (if `i=0` then instead use `parent.shard_states[shard][-1].data`)
|
||||
|
||||
## Shard state transition function
|
||||
|
||||
```python
|
||||
def shard_state_transition(shard: Shard,
|
||||
slot: Slot,
|
||||
pre_state: Root,
|
||||
previous_beacon_root: Root,
|
||||
proposer_pubkey: BLSPubkey,
|
||||
block_data: ByteList[MAX_SHARD_BLOCK_SIZE]) -> Root:
|
||||
# We will add something more substantive in phase 2
|
||||
return hash(pre_state + hash_tree_root(previous_beacon_root) + hash_tree_root(block_data))
|
||||
```
|
||||
|
||||
## Honest committee member behavior
|
||||
|
||||
Suppose you are a committee member on shard `shard` at slot `current_slot`. Let `state` be the head beacon state you are building on, and let `QUARTER_PERIOD = SECONDS_PER_SLOT // 4`. `2 * QUARTER_PERIOD` seconds into slot `slot`, run the following procedure:
|
||||
|
||||
* Initialize `proposals = []`, `shard_states = []`, `shard_state = state.shard_states[shard][-1]`, `start_slot = shard_state.slot`.
|
||||
* For `slot in get_offset_slots(state, start_slot)`, do the following:
|
||||
* Look for all valid proposals for `slot`; that is, a Bytes `proposal` where `shard_state_transition(shard, slot, shard_state, get_block_root_at_slot(state, state.slot - 1), get_shard_proposer_index(state, shard, slot), proposal)` returns a result and does not throw an exception. Let `choices` be the set of non-empty valid proposals you discover.
|
||||
* If `len(choices) == 0`, do `proposals.append(make_empty_proposal(shard_state, slot))`
|
||||
* If `len(choices) == 1`, do `proposals.append(choices[0])`
|
||||
* If `len(choices) > 1`, let `winning_proposal` be the proposal with the largest number of total attestations from slots in `state.shard_next_slots[shard]....slot-1` supporting it or any of its descendants, breaking ties by choosing the first proposal locally seen. Do `proposals.append(winning_proposal)`.
|
||||
* If `proposals[-1]` is NOT an empty proposal, set `shard_state = shard_state_transition(shard, slot, shard_state, get_block_root_at_slot(state, state.slot - 1), get_shard_proposer_index(state, shard, slot), proposals[-1])` and do `shard_states.append(shard_state)`. If it is an empty proposal, leave `shard_state` unchanged.
|
||||
|
||||
Make an attestation using `shard_data_roots = [hash_tree_root(proposal) for proposal in proposals]` and `shard_state_roots = shard_states`.
|
@ -7,7 +7,7 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Fork to Phase 1](#fork-to-phase-1)
|
||||
- [Fork trigger.](#fork-trigger)
|
||||
- [Fork trigger](#fork-trigger)
|
||||
- [Upgrading the state](#upgrading-the-state)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
@ -35,18 +35,18 @@ Warning: this configuration is not definitive.
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `PHASE_1_FORK_VERSION` | `Version('0x01000000')` |
|
||||
| `PHASE_1_GENESIS_SLOT` | `2**5` **TBD** |
|
||||
| `INITIAL_ACTIVE_SHARDS` | `2**6` (= 64) |
|
||||
| `INITIAL_GASPRICE` | `Gwei(10)` |
|
||||
|
||||
## Fork to Phase 1
|
||||
|
||||
### Fork trigger.
|
||||
### Fork trigger
|
||||
|
||||
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork.
|
||||
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at slot `PHASE_1_GENESIS_SLOT`, where `PHASE_1_GENESIS_SLOT % SLOTS_PER_EPOCH == 0`.
|
||||
|
||||
### Upgrading the state
|
||||
|
||||
After `process_slots` of Phase 0 finishes, but before the first Phase 1 block is processed, an irregular state change is made to upgrade to Phase 1.
|
||||
After `process_slots` of Phase 0 finishes, if `state.slot == PHASE_1_GENESIS_SLOT`, an irregular state change is made to upgrade to Phase 1.
|
||||
|
||||
```python
|
||||
def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
|
||||
@ -102,8 +102,8 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
|
||||
shard_states=List[ShardState, MAX_SHARDS](
|
||||
ShardState(
|
||||
slot=pre.slot,
|
||||
gasprice=INITIAL_GASPRICE,
|
||||
data=Root(),
|
||||
gasprice=MIN_GASPRICE,
|
||||
transition_digest=Root(),
|
||||
latest_block_root=Root(),
|
||||
) for i in range(INITIAL_ACTIVE_SHARDS)
|
||||
),
|
||||
@ -111,7 +111,7 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
|
||||
current_light_committee=CompactCommittee(), # computed after state creation
|
||||
next_light_committee=CompactCommittee(),
|
||||
# Custody game
|
||||
custody_challenge_index=0,
|
||||
exposed_derived_secrets=[] * EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS,
|
||||
# exposed_derived_secrets will fully default to zeroes
|
||||
)
|
||||
next_epoch = Epoch(epoch + 1)
|
||||
|
292
specs/phase1/shard-transition.md
Normal file
292
specs/phase1/shard-transition.md
Normal file
@ -0,0 +1,292 @@
|
||||
# Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc)
|
||||
- [Shard block verification functions](#shard-block-verification-functions)
|
||||
- [Shard state transition](#shard-state-transition)
|
||||
- [Fraud proofs](#fraud-proofs)
|
||||
- [Verifying the proof](#verifying-the-proof)
|
||||
- [Honest committee member behavior](#honest-committee-member-behavior)
|
||||
- [Helper functions](#helper-functions-1)
|
||||
- [Make attestations](#make-attestations)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the shard transition function and fraud proofs as part of Phase 1 of Ethereum 2.0.
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Misc
|
||||
|
||||
```python
|
||||
def compute_shard_transition_digest(beacon_state: BeaconState,
|
||||
shard_state: ShardState,
|
||||
beacon_parent_root: Root,
|
||||
shard_body_root: Root) -> Bytes32:
|
||||
# TODO: use SSZ hash tree root
|
||||
return hash(
|
||||
hash_tree_root(shard_state) + beacon_parent_root + shard_body_root
|
||||
)
|
||||
```
|
||||
|
||||
### Shard block verification functions
|
||||
|
||||
```python
|
||||
def verify_shard_block_message(beacon_state: BeaconState,
|
||||
shard_state: ShardState,
|
||||
block: ShardBlock,
|
||||
slot: Slot,
|
||||
shard: Shard) -> bool:
|
||||
assert block.shard_parent_root == shard_state.latest_block_root
|
||||
assert block.slot == slot
|
||||
assert block.proposer_index == get_shard_proposer_index(beacon_state, slot, shard)
|
||||
assert 0 < len(block.body) <= MAX_SHARD_BLOCK_SIZE
|
||||
return True
|
||||
```
|
||||
|
||||
```python
|
||||
def verify_shard_block_signature(beacon_state: BeaconState,
|
||||
signed_block: SignedShardBlock) -> bool:
|
||||
proposer = beacon_state.validators[signed_block.message.proposer_index]
|
||||
domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(signed_block.message.slot))
|
||||
signing_root = compute_signing_root(signed_block.message, domain)
|
||||
return bls.Verify(proposer.pubkey, signing_root, signed_block.signature)
|
||||
```
|
||||
|
||||
## Shard state transition
|
||||
|
||||
```python
|
||||
def shard_state_transition(beacon_state: BeaconState,
|
||||
shard_state: ShardState,
|
||||
block: ShardBlock) -> None:
|
||||
# Update shard state
|
||||
prev_gasprice = shard_state.gasprice
|
||||
if len(block.body) == 0:
|
||||
latest_block_root = shard_state.latest_block_root
|
||||
else:
|
||||
latest_block_root = hash_tree_root(block)
|
||||
|
||||
shard_state.transition_digest = compute_shard_transition_digest(
|
||||
beacon_state,
|
||||
shard_state,
|
||||
block.beacon_parent_root,
|
||||
block.body,
|
||||
)
|
||||
shard_state.gasprice = compute_updated_gasprice(prev_gasprice, len(block.body))
|
||||
shard_state.slot = block.slot
|
||||
shard_state.latest_block_root = latest_block_root
|
||||
```
|
||||
|
||||
We have a pure function `get_post_shard_state` for describing the fraud proof verification and honest validator behavior.
|
||||
|
||||
```python
|
||||
def get_post_shard_state(beacon_state: BeaconState,
|
||||
shard_state: ShardState,
|
||||
block: ShardBlock) -> ShardState:
|
||||
"""
|
||||
A pure function that returns a new post ShardState instead of modifying the given `shard_state`.
|
||||
"""
|
||||
post_state = shard_state.copy()
|
||||
shard_state_transition(beacon_state, post_state, block)
|
||||
return post_state
|
||||
```
|
||||
|
||||
## Fraud proofs
|
||||
|
||||
### Verifying the proof
|
||||
|
||||
TODO. The intent is to have a single universal fraud proof type, which contains the following parts:
|
||||
|
||||
1. An on-time attestation `attestation` on some shard `shard` signing a `transition: ShardTransition`
|
||||
2. An index `offset_index` of a particular position to focus on
|
||||
3. The `transition: ShardTransition` itself
|
||||
4. The full body of the shard block `shard_block`
|
||||
5. A Merkle proof to the `shard_states` in the parent block the attestation is referencing
|
||||
6. The `subkey` to generate the custody bit
|
||||
|
||||
Call the following function to verify the proof:
|
||||
|
||||
```python
|
||||
def is_valid_fraud_proof(beacon_state: BeaconState,
|
||||
attestation: Attestation,
|
||||
offset_index: uint64,
|
||||
transition: ShardTransition,
|
||||
block: ShardBlock,
|
||||
subkey: BLSPubkey,
|
||||
beacon_parent_block: BeaconBlock) -> bool:
|
||||
# 1. Check if `custody_bits[offset_index][j] != generate_custody_bit(subkey, block_contents)` for any `j`.
|
||||
custody_bits = attestation.custody_bits_blocks
|
||||
for j in range(custody_bits[offset_index]):
|
||||
if custody_bits[offset_index][j] != generate_custody_bit(subkey, block):
|
||||
return True
|
||||
|
||||
# 2. Check if the shard state transition result is wrong between
|
||||
# `transition.shard_states[offset_index - 1]` to `transition.shard_states[offset_index]`.
|
||||
if offset_index == 0:
|
||||
shard = get_shard(beacon_state, attestation)
|
||||
shard_state = beacon_parent_block.shard_transitions[shard].shard_states[-1]
|
||||
else:
|
||||
shard_state = transition.shard_states[offset_index - 1] # Not doing the actual state updates here.
|
||||
|
||||
shard_state = get_post_shard_state(beacon_state, shard_state, block)
|
||||
if shard_state.transition_digest != transition.shard_states[offset_index].transition_digest:
|
||||
return True
|
||||
|
||||
return False
|
||||
```
|
||||
|
||||
```python
|
||||
def generate_custody_bit(subkey: BLSPubkey, block: ShardBlock) -> bool:
|
||||
# TODO
|
||||
...
|
||||
```
|
||||
|
||||
## Honest committee member behavior
|
||||
|
||||
### Helper functions
|
||||
|
||||
```python
|
||||
def get_winning_proposal(beacon_state: BeaconState, proposals: Sequence[SignedShardBlock]) -> SignedShardBlock:
|
||||
# TODO: Let `winning_proposal` be the proposal with the largest number of total attestations from slots in
|
||||
# `state.shard_next_slots[shard]....slot-1` supporting it or any of its descendants, breaking ties by choosing
|
||||
# the first proposal locally seen. Do `proposals.append(winning_proposal)`.
|
||||
return proposals[-1] # stub
|
||||
```
|
||||
|
||||
```python
|
||||
def compute_shard_body_roots(proposals: Sequence[SignedShardBlock]) -> Sequence[Root]:
|
||||
return [hash_tree_root(proposal.message.body) for proposal in proposals]
|
||||
```
|
||||
|
||||
```python
|
||||
def get_proposal_choices_at_slot(beacon_state: BeaconState,
|
||||
shard_state: ShardState,
|
||||
slot: Slot,
|
||||
shard: Shard,
|
||||
shard_blocks: Sequence[SignedShardBlock],
|
||||
validate_signature: bool=True) -> Sequence[SignedShardBlock]:
|
||||
"""
|
||||
Return the valid shard blocks at the given ``slot``.
|
||||
Note that this function doesn't change the state.
|
||||
"""
|
||||
choices = []
|
||||
shard_blocks_at_slot = [block for block in shard_blocks if block.message.slot == slot]
|
||||
for block in shard_blocks_at_slot:
|
||||
try:
|
||||
# Verify block message and signature
|
||||
# TODO these validations should have been checked upon receiving shard blocks.
|
||||
assert verify_shard_block_message(beacon_state, shard_state, block.message, slot, shard)
|
||||
if validate_signature:
|
||||
assert verify_shard_block_signature(beacon_state, block)
|
||||
|
||||
shard_state = get_post_shard_state(beacon_state, shard_state, block.message)
|
||||
except Exception:
|
||||
pass # TODO: throw error in the test helper
|
||||
else:
|
||||
choices.append(block)
|
||||
return choices
|
||||
```
|
||||
|
||||
```python
|
||||
def get_proposal_at_slot(beacon_state: BeaconState,
|
||||
shard_state: ShardState,
|
||||
slot: Shard,
|
||||
shard: Shard,
|
||||
shard_blocks: Sequence[SignedShardBlock],
|
||||
validate_signature: bool=True) -> Tuple[SignedShardBlock, ShardState]:
|
||||
"""
|
||||
Return ``proposal``, ``shard_state`` of the given ``slot``.
|
||||
Note that this function doesn't change the state.
|
||||
"""
|
||||
choices = get_proposal_choices_at_slot(
|
||||
beacon_state=beacon_state,
|
||||
shard_state=shard_state,
|
||||
slot=slot,
|
||||
shard=shard,
|
||||
shard_blocks=shard_blocks,
|
||||
validate_signature=validate_signature,
|
||||
)
|
||||
if len(choices) == 0:
|
||||
block = ShardBlock(slot=slot)
|
||||
proposal = SignedShardBlock(message=block)
|
||||
elif len(choices) == 1:
|
||||
proposal = choices[0]
|
||||
else:
|
||||
proposal = get_winning_proposal(beacon_state, choices)
|
||||
|
||||
# Apply state transition
|
||||
shard_state = get_post_shard_state(beacon_state, shard_state, proposal.message)
|
||||
|
||||
return proposal, shard_state
|
||||
```
|
||||
|
||||
```python
|
||||
def get_shard_state_transition_result(
|
||||
beacon_state: BeaconState,
|
||||
shard: Shard,
|
||||
shard_blocks: Sequence[SignedShardBlock],
|
||||
validate_signature: bool=True,
|
||||
) -> Tuple[Sequence[SignedShardBlock], Sequence[ShardState], Sequence[Root]]:
|
||||
proposals = []
|
||||
shard_states = []
|
||||
shard_state = beacon_state.shard_states[shard]
|
||||
for slot in get_offset_slots(beacon_state, shard):
|
||||
proposal, shard_state = get_proposal_at_slot(
|
||||
beacon_state=beacon_state,
|
||||
shard_state=shard_state,
|
||||
slot=slot,
|
||||
shard=shard,
|
||||
shard_blocks=shard_blocks,
|
||||
validate_signature=validate_signature,
|
||||
)
|
||||
shard_states.append(shard_state)
|
||||
proposals.append(proposal)
|
||||
|
||||
shard_data_roots = compute_shard_body_roots(proposals)
|
||||
|
||||
return proposals, shard_states, shard_data_roots
|
||||
```
|
||||
|
||||
### Make attestations
|
||||
|
||||
Suppose you are a committee member on shard `shard` at slot `current_slot` and you have received shard blocks `shard_blocks` since the latest successful crosslink for `shard` into the beacon chain. Let `state` be the head beacon state you are building on, and let `QUARTER_PERIOD = SECONDS_PER_SLOT // 4`. `2 * QUARTER_PERIOD` seconds into slot `current_slot`, run `get_shard_transition(beacon_state, shard, shard_blocks)` to get `shard_transition`.
|
||||
|
||||
```python
|
||||
def get_shard_transition(beacon_state: BeaconState,
|
||||
shard: Shard,
|
||||
shard_blocks: Sequence[SignedShardBlock]) -> ShardTransition:
|
||||
offset_slots = get_offset_slots(beacon_state, shard)
|
||||
start_slot = offset_slots[0]
|
||||
proposals, shard_states, shard_data_roots = get_shard_state_transition_result(beacon_state, shard, shard_blocks)
|
||||
|
||||
assert len(proposals) > 0
|
||||
assert len(shard_data_roots) > 0
|
||||
|
||||
shard_block_lengths = []
|
||||
proposer_signatures = []
|
||||
for proposal in proposals:
|
||||
shard_block_lengths.append(len(proposal.message.body))
|
||||
if proposal.signature != BLSSignature():
|
||||
proposer_signatures.append(proposal.signature)
|
||||
|
||||
proposer_signature_aggregate = bls.Aggregate(proposer_signatures)
|
||||
|
||||
return ShardTransition(
|
||||
start_slot=start_slot,
|
||||
shard_block_lengths=shard_block_lengths,
|
||||
shard_data_roots=shard_data_roots,
|
||||
shard_states=shard_states,
|
||||
proposer_signature_aggregate=proposer_signature_aggregate,
|
||||
)
|
||||
```
|
@ -1 +1 @@
|
||||
0.11.1
|
||||
0.12.0
|
@ -8,23 +8,28 @@ config: Dict[str, Any] = {}
|
||||
|
||||
# Access to overwrite spec constants based on configuration
|
||||
# This is called by the spec module after declaring its globals, and applies the loaded presets.
|
||||
def apply_constants_config(spec_globals: Dict[str, Any]) -> None:
|
||||
def apply_constants_config(spec_globals: Dict[str, Any], warn_if_unknown: bool = False) -> None:
|
||||
global config
|
||||
for k, v in config.items():
|
||||
if k.startswith('DOMAIN_'):
|
||||
spec_globals[k] = spec_globals['DomainType'](v) # domain types are defined as bytes in the configs
|
||||
# the spec should have default values for everything, if not, the config key is invalid.
|
||||
if k in spec_globals:
|
||||
# Keep the same type as the default value indicates (which may be an SSZ basic type subclass, e.g. 'Gwei')
|
||||
spec_globals[k] = spec_globals[k].__class__(v)
|
||||
else:
|
||||
spec_globals[k] = v
|
||||
# Note: Phase 0 spec will not know the phase 1 config values.
|
||||
# Yet, during debugging you can enable explicit warnings.
|
||||
if warn_if_unknown:
|
||||
print(f"WARNING: unknown config key: '{k}' with value: '{v}'")
|
||||
|
||||
|
||||
# Load presets from a file, and then prepares the global config setting. This does not apply the config.
|
||||
# To apply the config, reload the spec module (it will re-initialize with the config taken from here).
|
||||
def prepare_config(configs_path, config_name):
|
||||
def prepare_config(configs_path: str, config_name: str) -> None:
|
||||
global config
|
||||
config = load_config_file(configs_path, config_name)
|
||||
|
||||
|
||||
def load_config_file(configs_dir, presets_name) -> Dict[str, Any]:
|
||||
def load_config_file(configs_dir: str, presets_name: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Loads the given preset
|
||||
:param presets_name: The name of the presets. (lowercase snake_case)
|
||||
@ -33,10 +38,11 @@ def load_config_file(configs_dir, presets_name) -> Dict[str, Any]:
|
||||
path = Path(join(configs_dir, presets_name + '.yaml'))
|
||||
yaml = YAML(typ='base')
|
||||
loaded = yaml.load(path)
|
||||
out = dict()
|
||||
out: Dict[str, Any] = dict()
|
||||
for k, v in loaded.items():
|
||||
if isinstance(v, list):
|
||||
out[k] = v
|
||||
# Clean up integer values. YAML parser renders lists of ints as list of str
|
||||
out[k] = [int(item) if item.isdigit() else item for item in v]
|
||||
elif isinstance(v, str) and v.startswith("0x"):
|
||||
out[k] = bytes.fromhex(v[2:])
|
||||
else:
|
||||
|
@ -7,7 +7,7 @@ from .helpers.genesis import create_genesis_state
|
||||
from .utils import vector_test, with_meta_tags
|
||||
|
||||
from random import Random
|
||||
from typing import Any, Callable, Sequence, TypedDict, Protocol
|
||||
from typing import Any, Callable, NewType, Sequence, TypedDict, Protocol
|
||||
|
||||
from importlib import reload
|
||||
|
||||
@ -19,25 +19,33 @@ def reload_specs():
|
||||
|
||||
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
||||
|
||||
SpecForkName = NewType("SpecForkName", str)
|
||||
|
||||
PHASE0 = SpecForkName('phase0')
|
||||
PHASE1 = SpecForkName('phase1')
|
||||
ALL_PHASES = (PHASE0, PHASE1)
|
||||
|
||||
# TODO: currently phases are defined as python modules.
|
||||
# It would be better if they would be more well-defined interfaces for stronger typing.
|
||||
|
||||
|
||||
class Spec(Protocol):
|
||||
version: str
|
||||
|
||||
|
||||
class Phase0(Spec):
|
||||
class SpecPhase0(Spec):
|
||||
...
|
||||
|
||||
|
||||
class Phase1(Spec):
|
||||
class SpecPhase1(Spec):
|
||||
def upgrade_to_phase1(self, state: spec_phase0.BeaconState) -> spec_phase1.BeaconState:
|
||||
...
|
||||
|
||||
|
||||
# add transfer, bridge, etc. as the spec evolves
|
||||
class SpecForks(TypedDict, total=False):
|
||||
phase0: Phase0
|
||||
phase1: Phase1
|
||||
PHASE0: SpecPhase0
|
||||
PHASE1: SpecPhase1
|
||||
|
||||
|
||||
def with_custom_state(balances_fn: Callable[[Any], Sequence[int]],
|
||||
@ -45,16 +53,19 @@ def with_custom_state(balances_fn: Callable[[Any], Sequence[int]],
|
||||
def deco(fn):
|
||||
def entry(*args, spec: Spec, phases: SpecForks, **kw):
|
||||
try:
|
||||
p0 = phases["phase0"]
|
||||
p0 = phases[PHASE0]
|
||||
balances = balances_fn(p0)
|
||||
activation_threshold = threshold_fn(p0)
|
||||
|
||||
state = create_genesis_state(spec=p0, validator_balances=balances,
|
||||
activation_threshold=activation_threshold)
|
||||
if spec.fork == 'phase1':
|
||||
if spec.fork == PHASE1:
|
||||
# TODO: instead of upgrading a test phase0 genesis state we can also write a phase1 state helper.
|
||||
# Decide based on performance/consistency results later.
|
||||
state = phases["phase1"].upgrade_to_phase1(state)
|
||||
state = phases[PHASE1].upgrade_to_phase1(state)
|
||||
# Shard state slot must lag behind BeaconState slot by at least 1
|
||||
# Will handle this more elegantly with fork mechanics
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
|
||||
kw['state'] = state
|
||||
except KeyError:
|
||||
@ -217,14 +228,11 @@ def bls_switch(fn):
|
||||
return entry
|
||||
|
||||
|
||||
all_phases = ['phase0', 'phase1']
|
||||
|
||||
|
||||
def with_all_phases(fn):
|
||||
"""
|
||||
A decorator for running a test with every phase
|
||||
"""
|
||||
return with_phases(all_phases)(fn)
|
||||
return with_phases(ALL_PHASES)(fn)
|
||||
|
||||
|
||||
def with_all_phases_except(exclusion_phases):
|
||||
@ -232,7 +240,7 @@ def with_all_phases_except(exclusion_phases):
|
||||
A decorator factory for running a tests with every phase except the ones listed
|
||||
"""
|
||||
def decorator(fn):
|
||||
return with_phases([phase for phase in all_phases if phase not in exclusion_phases])(fn)
|
||||
return with_phases([phase for phase in ALL_PHASES if phase not in exclusion_phases])(fn)
|
||||
return decorator
|
||||
|
||||
|
||||
@ -258,18 +266,18 @@ def with_phases(phases, other_phases=None):
|
||||
|
||||
# TODO: test state is dependent on phase0 but is immediately transitioned to phase1.
|
||||
# A new state-creation helper for phase 1 may be in place, and then phase1+ tests can run without phase0
|
||||
available_phases.add('phase0')
|
||||
available_phases.add(PHASE0)
|
||||
|
||||
phase_dir = {}
|
||||
if 'phase0' in available_phases:
|
||||
phase_dir['phase0'] = spec_phase0
|
||||
if 'phase1' in available_phases:
|
||||
phase_dir['phase1'] = spec_phase1
|
||||
if PHASE0 in available_phases:
|
||||
phase_dir[PHASE0] = spec_phase0
|
||||
if PHASE1 in available_phases:
|
||||
phase_dir[PHASE1] = spec_phase1
|
||||
|
||||
# return is ignored whenever multiple phases are ran. If
|
||||
if 'phase0' in run_phases:
|
||||
if PHASE0 in run_phases:
|
||||
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
|
||||
if 'phase1' in run_phases:
|
||||
if PHASE1 in run_phases:
|
||||
ret = fn(spec=spec_phase1, phases=phase_dir, *args, **kw)
|
||||
return ret
|
||||
return wrapper
|
||||
|
@ -1,9 +1,8 @@
|
||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
next_epoch_with_attestations,
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.context import PHASE0, with_all_phases, spec_state_test
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
||||
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block
|
||||
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch
|
||||
|
||||
|
||||
def run_on_attestation(spec, state, store, attestation, valid=True):
|
||||
@ -16,7 +16,7 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
|
||||
indexed_attestation = spec.get_indexed_attestation(state, attestation)
|
||||
spec.on_attestation(store, attestation)
|
||||
|
||||
if spec.fork == 'phase0':
|
||||
if spec.fork == PHASE0:
|
||||
sample_index = indexed_attestation.attesting_indices[0]
|
||||
else:
|
||||
attesting_indices = [
|
||||
@ -120,11 +120,12 @@ def test_on_attestation_mismatched_target_and_slot(spec, state):
|
||||
@spec_state_test
|
||||
def test_on_attestation_target_not_in_store(spec, state):
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
time = store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
spec.on_tick(store, time)
|
||||
|
||||
# move to immediately before next epoch to make block new target
|
||||
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1)
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1)
|
||||
|
||||
target_block = build_empty_block_for_next_slot(spec, state)
|
||||
state_transition_and_sign_block(spec, state, target_block)
|
||||
@ -141,11 +142,12 @@ def test_on_attestation_target_not_in_store(spec, state):
|
||||
@spec_state_test
|
||||
def test_on_attestation_beacon_block_not_in_store(spec, state):
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
time = store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
spec.on_tick(store, time)
|
||||
|
||||
# move to immediately before next epoch to make block new target
|
||||
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1)
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
transition_to(spec, state, spec.compute_start_slot_at_epoch(next_epoch) - 1)
|
||||
|
||||
target_block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_target_block = state_transition_and_sign_block(spec, state, target_block)
|
||||
@ -169,7 +171,7 @@ def test_on_attestation_beacon_block_not_in_store(spec, state):
|
||||
@spec_state_test
|
||||
def test_on_attestation_future_epoch(spec, state):
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 3 * spec.SECONDS_PER_SLOT
|
||||
time = store.time + 3 * spec.SECONDS_PER_SLOT
|
||||
spec.on_tick(store, time)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
@ -179,7 +181,7 @@ def test_on_attestation_future_epoch(spec, state):
|
||||
spec.on_block(store, signed_block)
|
||||
|
||||
# move state forward but not store
|
||||
state.slot = block.slot + spec.SLOTS_PER_EPOCH
|
||||
next_epoch(spec, state)
|
||||
|
||||
attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True)
|
||||
run_on_attestation(spec, state, store, attestation, False)
|
||||
@ -189,7 +191,7 @@ def test_on_attestation_future_epoch(spec, state):
|
||||
@spec_state_test
|
||||
def test_on_attestation_future_block(spec, state):
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = spec.SECONDS_PER_SLOT * 5
|
||||
time = store.time + spec.SECONDS_PER_SLOT * 5
|
||||
spec.on_tick(store, time)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
@ -209,7 +211,7 @@ def test_on_attestation_future_block(spec, state):
|
||||
@spec_state_test
|
||||
def test_on_attestation_same_slot(spec, state):
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 1 * spec.SECONDS_PER_SLOT
|
||||
time = store.time + spec.SECONDS_PER_SLOT
|
||||
spec.on_tick(store, time)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
@ -225,7 +227,7 @@ def test_on_attestation_same_slot(spec, state):
|
||||
@spec_state_test
|
||||
def test_on_attestation_invalid_attestation(spec, state):
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 3 * spec.SECONDS_PER_SLOT
|
||||
time = store.time + 3 * spec.SECONDS_PER_SLOT
|
||||
spec.on_tick(store, time)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
@ -4,7 +4,8 @@ from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block, transition_unsigned_block, \
|
||||
build_empty_block
|
||||
from eth2spec.test.helpers.state import next_epoch, next_epoch_with_attestations, state_transition_and_sign_block
|
||||
from eth2spec.test.helpers.attestations import next_epoch_with_attestations
|
||||
from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block
|
||||
|
||||
|
||||
def run_on_block(spec, store, signed_block, valid=True):
|
||||
@ -159,6 +160,7 @@ def test_on_block_finalized_skip_slots(spec, state):
|
||||
@spec_state_test
|
||||
def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state):
|
||||
# Initialization
|
||||
next_epoch(spec, state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
|
||||
store.finalized_checkpoint = spec.Checkpoint(
|
||||
|
@ -27,14 +27,16 @@ def test_basic(spec, state):
|
||||
@spec_state_test
|
||||
def test_update_justified_single(spec, state):
|
||||
store = spec.get_forkchoice_store(state)
|
||||
seconds_per_epoch = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
next_epoch_start_slot = spec.compute_start_slot_at_epoch(next_epoch)
|
||||
seconds_until_next_epoch = next_epoch_start_slot * spec.SECONDS_PER_SLOT - store.time
|
||||
|
||||
store.best_justified_checkpoint = spec.Checkpoint(
|
||||
epoch=store.justified_checkpoint.epoch + 1,
|
||||
root=b'\x55' * 32,
|
||||
)
|
||||
|
||||
run_on_tick(spec, store, store.time + seconds_per_epoch, True)
|
||||
run_on_tick(spec, store, store.time + seconds_until_next_epoch, True)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -1,10 +1,10 @@
|
||||
from eth2spec.test.context import spec_test, with_phases, single_phase
|
||||
from eth2spec.test.context import PHASE0, spec_test, with_phases, single_phase
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_genesis_deposits,
|
||||
)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases(([PHASE0]))
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_initialize_beacon_state_from_eth1(spec):
|
||||
@ -32,7 +32,7 @@ def test_initialize_beacon_state_from_eth1(spec):
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_initialize_beacon_state_some_small_balances(spec):
|
||||
|
@ -1,4 +1,4 @@
|
||||
from eth2spec.test.context import spec_test, with_phases, single_phase
|
||||
from eth2spec.test.context import PHASE0, spec_test, with_phases, single_phase
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_genesis_deposits,
|
||||
)
|
||||
@ -25,7 +25,7 @@ def run_is_valid_genesis_state(spec, state, valid=True):
|
||||
assert is_valid == valid
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_true(spec):
|
||||
@ -34,7 +34,7 @@ def test_is_valid_genesis_state_true(spec):
|
||||
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
||||
@ -44,7 +44,7 @@ def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
||||
yield from run_is_valid_genesis_state(spec, state, valid=False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_true_more_balance(spec):
|
||||
@ -55,7 +55,7 @@ def test_is_valid_genesis_state_true_more_balance(spec):
|
||||
|
||||
|
||||
# TODO: not part of the genesis function yet. Erroneously merged.
|
||||
# @with_phases(['phase0'])
|
||||
# @with_phases([PHASE0])
|
||||
# @spec_test
|
||||
# def test_is_valid_genesis_state_false_not_enough_balance(spec):
|
||||
# state = create_valid_beacon_state(spec)
|
||||
@ -64,7 +64,7 @@ def test_is_valid_genesis_state_true_more_balance(spec):
|
||||
# yield from run_is_valid_genesis_state(spec, state, valid=False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||
@ -78,7 +78,7 @@ def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_false_not_enough_validator(spec):
|
||||
|
@ -1,13 +1,49 @@
|
||||
from typing import List
|
||||
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, transition_unsigned_block, \
|
||||
build_empty_block
|
||||
from eth2spec.test.context import expect_assertion_error, PHASE0, PHASE1
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot, transition_to
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
||||
|
||||
|
||||
def build_attestation_data(spec, state, slot, index):
|
||||
def run_attestation_processing(spec, state, attestation, valid=True):
|
||||
"""
|
||||
Run ``process_attestation``, yielding:
|
||||
- pre-state ('pre')
|
||||
- attestation ('attestation')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
# yield pre-state
|
||||
yield 'pre', state
|
||||
|
||||
yield 'attestation', attestation
|
||||
|
||||
# If the attestation is invalid, processing is aborted, and there is no post-state.
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_attestation(state, attestation))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
current_epoch_count = len(state.current_epoch_attestations)
|
||||
previous_epoch_count = len(state.previous_epoch_attestations)
|
||||
|
||||
# process attestation
|
||||
spec.process_attestation(state, attestation)
|
||||
|
||||
# Make sure the attestation has been processed
|
||||
if attestation.data.target.epoch == spec.get_current_epoch(state):
|
||||
assert len(state.current_epoch_attestations) == current_epoch_count + 1
|
||||
else:
|
||||
assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
|
||||
|
||||
# yield post-state
|
||||
yield 'post', state
|
||||
|
||||
|
||||
def build_attestation_data(spec, state, slot, index, shard_transition=None, on_time=True):
|
||||
assert state.slot >= slot
|
||||
|
||||
if slot == state.slot:
|
||||
@ -30,7 +66,7 @@ def build_attestation_data(spec, state, slot, index):
|
||||
source_epoch = state.current_justified_checkpoint.epoch
|
||||
source_root = state.current_justified_checkpoint.root
|
||||
|
||||
return spec.AttestationData(
|
||||
attestation_data = spec.AttestationData(
|
||||
slot=slot,
|
||||
index=index,
|
||||
beacon_block_root=block_root,
|
||||
@ -38,14 +74,92 @@ def build_attestation_data(spec, state, slot, index):
|
||||
target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root),
|
||||
)
|
||||
|
||||
if spec.fork == PHASE1:
|
||||
if shard_transition is not None:
|
||||
lastest_shard_data_root_index = len(shard_transition.shard_data_roots) - 1
|
||||
attestation_data.head_shard_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
|
||||
attestation_data.shard_transition_root = shard_transition.hash_tree_root()
|
||||
else:
|
||||
# No shard transition
|
||||
shard = spec.get_shard(state, spec.Attestation(data=attestation_data))
|
||||
if on_time:
|
||||
temp_state = state.copy()
|
||||
next_slot(spec, temp_state)
|
||||
shard_transition = spec.get_shard_transition(temp_state, shard, [])
|
||||
lastest_shard_data_root_index = len(shard_transition.shard_data_roots) - 1
|
||||
attestation_data.head_shard_root = shard_transition.shard_data_roots[lastest_shard_data_root_index]
|
||||
attestation_data.shard_transition_root = shard_transition.hash_tree_root()
|
||||
else:
|
||||
attestation_data.head_shard_root = state.shard_states[shard].transition_digest
|
||||
attestation_data.shard_transition_root = spec.Root()
|
||||
return attestation_data
|
||||
|
||||
def get_valid_attestation(spec, state, slot=None, index=None, empty=False, signed=False):
|
||||
|
||||
def convert_to_valid_on_time_attestation(spec, state, attestation, signed=False):
|
||||
shard = spec.get_shard(state, attestation)
|
||||
offset_slots = spec.compute_offset_slots(
|
||||
spec.get_latest_slot_for_shard(state, shard),
|
||||
attestation.data.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY,
|
||||
)
|
||||
for _ in offset_slots:
|
||||
attestation.custody_bits_blocks.append(
|
||||
Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE]([0 for _ in attestation.aggregation_bits])
|
||||
)
|
||||
|
||||
if signed:
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
return attestation
|
||||
|
||||
|
||||
def get_valid_on_time_attestation(spec, state, slot=None, index=None, shard_transition=None, signed=False):
|
||||
'''
|
||||
Construct on-time attestation for next slot
|
||||
'''
|
||||
if slot is None:
|
||||
slot = state.slot
|
||||
if index is None:
|
||||
index = 0
|
||||
|
||||
attestation_data = build_attestation_data(spec, state, slot, index)
|
||||
return get_valid_attestation(
|
||||
spec,
|
||||
state,
|
||||
slot=slot,
|
||||
index=index,
|
||||
shard_transition=shard_transition,
|
||||
signed=signed,
|
||||
on_time=True,
|
||||
)
|
||||
|
||||
|
||||
def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False):
|
||||
'''
|
||||
Construct on-time attestation for next slot
|
||||
'''
|
||||
if slot is None:
|
||||
slot = state.slot
|
||||
if index is None:
|
||||
index = 0
|
||||
|
||||
return get_valid_attestation(spec, state, slot=slot, index=index, signed=signed, on_time=False)
|
||||
|
||||
|
||||
def get_valid_attestation(spec,
|
||||
state,
|
||||
slot=None,
|
||||
index=None,
|
||||
shard_transition=None,
|
||||
empty=False,
|
||||
signed=False,
|
||||
on_time=True):
|
||||
if slot is None:
|
||||
slot = state.slot
|
||||
if index is None:
|
||||
index = 0
|
||||
|
||||
attestation_data = build_attestation_data(
|
||||
spec, state, slot=slot, index=index, shard_transition=shard_transition, on_time=on_time
|
||||
)
|
||||
|
||||
beacon_committee = spec.get_beacon_committee(
|
||||
state,
|
||||
@ -63,6 +177,10 @@ def get_valid_attestation(spec, state, slot=None, index=None, empty=False, signe
|
||||
fill_aggregate_attestation(spec, state, attestation)
|
||||
if signed:
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
if spec.fork == PHASE1 and on_time:
|
||||
attestation = convert_to_valid_on_time_attestation(spec, state, attestation, signed)
|
||||
|
||||
return attestation
|
||||
|
||||
|
||||
@ -78,12 +196,11 @@ def sign_aggregate_attestation(spec, state, attestation_data, participants: List
|
||||
privkey
|
||||
)
|
||||
)
|
||||
# TODO: we should try signing custody bits if spec.fork == 'phase1'
|
||||
return bls.Aggregate(signatures)
|
||||
|
||||
|
||||
def sign_indexed_attestation(spec, state, indexed_attestation):
|
||||
if spec.fork == 'phase0':
|
||||
if spec.fork == PHASE0:
|
||||
participants = indexed_attestation.attesting_indices
|
||||
data = indexed_attestation.data
|
||||
indexed_attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
|
||||
@ -96,7 +213,47 @@ def sign_indexed_attestation(spec, state, indexed_attestation):
|
||||
indexed_attestation.attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
|
||||
|
||||
|
||||
def sign_on_time_attestation(spec, state, attestation):
|
||||
if not any(attestation.custody_bits_blocks):
|
||||
sign_attestation(spec, state, attestation)
|
||||
return
|
||||
|
||||
committee = spec.get_beacon_committee(state, attestation.data.slot, attestation.data.index)
|
||||
signatures = []
|
||||
for block_index, custody_bits in enumerate(attestation.custody_bits_blocks):
|
||||
for participant, abit, cbit in zip(committee, attestation.aggregation_bits, custody_bits):
|
||||
if not abit:
|
||||
continue
|
||||
signatures.append(get_attestation_custody_signature(
|
||||
spec,
|
||||
state,
|
||||
attestation.data,
|
||||
block_index,
|
||||
cbit,
|
||||
privkeys[participant]
|
||||
))
|
||||
|
||||
attestation.signature = bls.Aggregate(signatures)
|
||||
|
||||
|
||||
def get_attestation_custody_signature(spec, state, attestation_data, block_index, bit, privkey):
|
||||
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
|
||||
signing_root = spec.compute_signing_root(
|
||||
spec.AttestationCustodyBitWrapper(
|
||||
attestation_data_root=attestation_data.hash_tree_root(),
|
||||
block_index=block_index,
|
||||
bit=bit,
|
||||
),
|
||||
domain,
|
||||
)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
|
||||
|
||||
def sign_attestation(spec, state, attestation):
|
||||
if spec.fork == PHASE1 and any(attestation.custody_bits_blocks):
|
||||
sign_on_time_attestation(spec, state, attestation)
|
||||
return
|
||||
|
||||
participants = spec.get_attesting_indices(
|
||||
state,
|
||||
attestation.data,
|
||||
@ -113,7 +270,6 @@ def get_attestation_signature(spec, state, attestation_data, privkey):
|
||||
|
||||
|
||||
def fill_aggregate_attestation(spec, state, attestation, signed=False):
|
||||
|
||||
beacon_committee = spec.get_beacon_committee(
|
||||
state,
|
||||
attestation.data.slot,
|
||||
@ -127,8 +283,52 @@ def fill_aggregate_attestation(spec, state, attestation, signed=False):
|
||||
|
||||
|
||||
def add_attestations_to_state(spec, state, attestations, slot):
|
||||
block = build_empty_block(spec, state, slot)
|
||||
spec.process_slots(state, slot)
|
||||
for attestation in attestations:
|
||||
block.body.attestations.append(attestation)
|
||||
spec.process_slots(state, block.slot)
|
||||
transition_unsigned_block(spec, state, block)
|
||||
spec.process_attestation(state, attestation)
|
||||
|
||||
|
||||
def next_epoch_with_attestations(spec,
|
||||
state,
|
||||
fill_cur_epoch,
|
||||
fill_prev_epoch):
|
||||
assert state.slot % spec.SLOTS_PER_EPOCH == 0
|
||||
|
||||
post_state = state.copy()
|
||||
signed_blocks = []
|
||||
for _ in range(spec.SLOTS_PER_EPOCH):
|
||||
block = build_empty_block_for_next_slot(spec, post_state)
|
||||
if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
|
||||
committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest)
|
||||
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)):
|
||||
for index in range(committees_per_slot):
|
||||
cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index, signed=True)
|
||||
block.body.attestations.append(cur_attestation)
|
||||
|
||||
if fill_prev_epoch:
|
||||
slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1
|
||||
committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest)
|
||||
for index in range(committees_per_slot):
|
||||
prev_attestation = get_valid_attestation(
|
||||
spec, post_state, slot_to_attest, index=index, signed=True, on_time=False)
|
||||
block.body.attestations.append(prev_attestation)
|
||||
|
||||
if spec.fork == PHASE1:
|
||||
fill_block_shard_transitions_by_attestations(spec, post_state, block)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, post_state, block)
|
||||
signed_blocks.append(signed_block)
|
||||
|
||||
return state, signed_blocks, post_state
|
||||
|
||||
|
||||
def fill_block_shard_transitions_by_attestations(spec, state, block):
|
||||
block.body.shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
|
||||
for attestation in block.body.attestations:
|
||||
shard = spec.get_shard(state, attestation)
|
||||
if attestation.data.slot == state.slot:
|
||||
temp_state = state.copy()
|
||||
transition_to(spec, temp_state, slot=block.slot)
|
||||
shard_transition = spec.get_shard_transition(temp_state, shard, [])
|
||||
block.body.shard_transitions[shard] = shard_transition
|
||||
|
@ -1,3 +1,4 @@
|
||||
from eth2spec.test.context import PHASE1
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
||||
|
||||
|
||||
@ -20,7 +21,7 @@ def get_indexed_attestation_participants(spec, indexed_att):
|
||||
"""
|
||||
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
|
||||
"""
|
||||
if spec.fork == "phase1":
|
||||
if spec.fork == PHASE1:
|
||||
return list(spec.get_indices_from_committee(
|
||||
indexed_att.committee,
|
||||
indexed_att.attestation.aggregation_bits,
|
||||
@ -33,21 +34,21 @@ def set_indexed_attestation_participants(spec, indexed_att, participants):
|
||||
"""
|
||||
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
|
||||
"""
|
||||
if spec.fork == "phase1":
|
||||
if spec.fork == PHASE1:
|
||||
indexed_att.attestation.aggregation_bits = [bool(i in participants) for i in indexed_att.committee]
|
||||
else:
|
||||
indexed_att.attesting_indices = participants
|
||||
|
||||
|
||||
def get_attestation_1_data(spec, att_slashing):
|
||||
if spec.fork == "phase1":
|
||||
if spec.fork == PHASE1:
|
||||
return att_slashing.attestation_1.attestation.data
|
||||
else:
|
||||
return att_slashing.attestation_1.data
|
||||
|
||||
|
||||
def get_attestation_2_data(spec, att_slashing):
|
||||
if spec.fork == "phase1":
|
||||
if spec.fork == PHASE1:
|
||||
return att_slashing.attestation_2.attestation.data
|
||||
else:
|
||||
return att_slashing.attestation_2.data
|
||||
|
@ -71,24 +71,31 @@ def build_empty_block(spec, state, slot=None):
|
||||
"""
|
||||
if slot is None:
|
||||
slot = state.slot
|
||||
if slot < state.slot:
|
||||
raise Exception("build_empty_block cannot build blocks for past slots")
|
||||
if slot > state.slot:
|
||||
# transition forward in copied state to grab relevant data from state
|
||||
state = state.copy()
|
||||
spec.process_slots(state, slot)
|
||||
|
||||
state, parent_block_root = get_state_and_beacon_parent_root_at_slot(spec, state, slot)
|
||||
empty_block = spec.BeaconBlock()
|
||||
empty_block.slot = slot
|
||||
empty_block.proposer_index = spec.get_beacon_proposer_index(state)
|
||||
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||
previous_block_header = state.latest_block_header.copy()
|
||||
if previous_block_header.state_root == spec.Root():
|
||||
previous_block_header.state_root = hash_tree_root(state)
|
||||
empty_block.parent_root = hash_tree_root(previous_block_header)
|
||||
empty_block.parent_root = parent_block_root
|
||||
apply_randao_reveal(spec, state, empty_block)
|
||||
return empty_block
|
||||
|
||||
|
||||
def build_empty_block_for_next_slot(spec, state):
|
||||
return build_empty_block(spec, state, state.slot + 1)
|
||||
|
||||
|
||||
def get_state_and_beacon_parent_root_at_slot(spec, state, slot):
|
||||
if slot < state.slot:
|
||||
raise Exception("Cannot build blocks for past slots")
|
||||
if slot > state.slot:
|
||||
# transition forward in copied state to grab relevant data from state
|
||||
state = state.copy()
|
||||
spec.process_slots(state, slot)
|
||||
|
||||
previous_block_header = state.latest_block_header.copy()
|
||||
if previous_block_header.state_root == spec.Root():
|
||||
previous_block_header.state_root = hash_tree_root(state)
|
||||
beacon_parent_root = hash_tree_root(previous_block_header)
|
||||
return state, beacon_parent_root
|
||||
|
28
tests/core/pyspec/eth2spec/test/helpers/crosslinks.py
Normal file
28
tests/core/pyspec/eth2spec/test/helpers/crosslinks.py
Normal file
@ -0,0 +1,28 @@
|
||||
from eth2spec.test.context import expect_assertion_error
|
||||
|
||||
|
||||
def run_crosslinks_processing(spec, state, shard_transitions, attestations, valid=True):
|
||||
"""
|
||||
Run ``process_attestation``, yielding:
|
||||
- pre-state ('pre')
|
||||
- shard_transitions ('shard_transitions')
|
||||
- attestations ('attestations')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
# yield pre-state
|
||||
yield 'pre', state
|
||||
yield 'shard_transitions', shard_transitions
|
||||
yield 'attestations', attestations
|
||||
|
||||
# If the attestation is invalid, processing is aborted, and there is no post-state.
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_crosslinks(state, shard_transitions, attestations))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
# process crosslinks
|
||||
spec.process_crosslinks(state, shard_transitions, attestations)
|
||||
|
||||
# yield post-state
|
||||
yield 'post', state
|
@ -1,6 +1,6 @@
|
||||
from py_ecc.bls import G2ProofOfPossession as bls
|
||||
from eth2spec.phase0 import spec
|
||||
|
||||
privkeys = [i + 1 for i in range(spec.SLOTS_PER_EPOCH * 16)]
|
||||
privkeys = [i + 1 for i in range(spec.SLOTS_PER_EPOCH * 256)]
|
||||
pubkeys = [bls.PrivToPub(privkey) for privkey in privkeys]
|
||||
pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)}
|
||||
|
@ -1,30 +0,0 @@
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
|
||||
|
||||
def sign_shard_attestation(spec, beacon_state, shard_state, block, participants):
|
||||
signatures = []
|
||||
message_hash = spec.ShardAttestationData(
|
||||
slot=block.slot,
|
||||
parent_root=block.parent_root,
|
||||
).hash_tree_root()
|
||||
block_epoch = spec.compute_epoch_of_shard_slot(block.slot)
|
||||
for validator_index in participants:
|
||||
privkey = privkeys[validator_index]
|
||||
signatures.append(
|
||||
get_attestation_signature(
|
||||
spec,
|
||||
beacon_state,
|
||||
shard_state,
|
||||
message_hash,
|
||||
block_epoch,
|
||||
privkey,
|
||||
)
|
||||
)
|
||||
return bls.Aggregate(signatures)
|
||||
|
||||
|
||||
def get_attestation_signature(spec, beacon_state, shard_state, message_hash, block_epoch, privkey):
|
||||
domain = spec.get_domain(beacon_state, spec.DOMAIN_SHARD_ATTESTER, block_epoch)
|
||||
signing_root = spec.compute_signing_root(message_hash, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
@ -1,71 +0,0 @@
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
from eth2spec.utils.ssz.ssz_impl import (
|
||||
hash_tree_root,
|
||||
)
|
||||
|
||||
from .attestations import (
|
||||
sign_shard_attestation,
|
||||
)
|
||||
|
||||
|
||||
@only_with_bls()
|
||||
def sign_shard_block(spec, beacon_state, shard_state, block, proposer_index=None):
|
||||
if proposer_index is None:
|
||||
proposer_index = spec.get_shard_proposer_index(beacon_state, shard_state.shard, block.slot)
|
||||
|
||||
privkey = privkeys[proposer_index]
|
||||
domain = spec.get_domain(beacon_state, spec.DOMAIN_SHARD_PROPOSER, spec.compute_epoch_of_shard_slot(block.slot))
|
||||
signing_root = spec.compute_signing_root(block, domain)
|
||||
block.signature = bls.Sign(privkey, signing_root)
|
||||
|
||||
|
||||
def build_empty_shard_block(spec,
|
||||
beacon_state,
|
||||
shard_state,
|
||||
slot,
|
||||
signed=False,
|
||||
full_attestation=False):
|
||||
if slot is None:
|
||||
slot = shard_state.slot
|
||||
|
||||
previous_beacon_header = beacon_state.latest_block_header.copy()
|
||||
if previous_beacon_header.state_root == spec.Bytes32():
|
||||
previous_beacon_header.state_root = beacon_state.hash_tree_root()
|
||||
beacon_block_root = hash_tree_root(previous_beacon_header)
|
||||
|
||||
previous_block_header = shard_state.latest_block_header.copy()
|
||||
if previous_block_header.state_root == spec.Bytes32():
|
||||
previous_block_header.state_root = shard_state.hash_tree_root()
|
||||
parent_root = hash_tree_root(previous_block_header)
|
||||
|
||||
block = spec.ShardBlock(
|
||||
shard=shard_state.shard,
|
||||
slot=slot,
|
||||
beacon_block_root=beacon_block_root,
|
||||
parent_root=parent_root,
|
||||
block_size_sum=shard_state.block_size_sum + spec.SHARD_HEADER_SIZE,
|
||||
)
|
||||
|
||||
if full_attestation:
|
||||
shard_committee = spec.get_shard_committee(beacon_state, shard_state.shard, block.slot)
|
||||
block.aggregation_bits = list(
|
||||
(True,) * len(shard_committee) +
|
||||
(False,) * (spec.MAX_PERIOD_COMMITTEE_SIZE * 2 - len(shard_committee))
|
||||
)
|
||||
else:
|
||||
shard_committee = []
|
||||
|
||||
block.attestations = sign_shard_attestation(
|
||||
spec,
|
||||
beacon_state,
|
||||
shard_state,
|
||||
block,
|
||||
participants=shard_committee,
|
||||
)
|
||||
|
||||
if signed:
|
||||
sign_shard_block(spec, beacon_state, shard_state, block)
|
||||
|
||||
return block
|
@ -1,18 +0,0 @@
|
||||
from eth2spec.test.helpers.phase1.shard_block import sign_shard_block
|
||||
|
||||
|
||||
def configure_shard_state(spec, beacon_state, shard=0):
|
||||
beacon_state.slot = spec.Slot(spec.SHARD_GENESIS_EPOCH * spec.SLOTS_PER_EPOCH)
|
||||
shard_state = spec.get_genesis_shard_state(spec.Shard(shard))
|
||||
shard_state.slot = spec.ShardSlot(spec.SHARD_GENESIS_EPOCH * spec.SHARD_SLOTS_PER_EPOCH)
|
||||
return beacon_state, shard_state
|
||||
|
||||
|
||||
def shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block):
|
||||
"""
|
||||
Shard state transition via the provided ``block``
|
||||
then package the block with the state root and signature.
|
||||
"""
|
||||
spec.shard_state_transition(beacon_state, shard_state, block)
|
||||
block.state_root = shard_state.hash_tree_root()
|
||||
sign_shard_block(spec, beacon_state, shard_state, block)
|
85
tests/core/pyspec/eth2spec/test/helpers/shard_block.py
Normal file
85
tests/core/pyspec/eth2spec/test/helpers/shard_block.py
Normal file
@ -0,0 +1,85 @@
|
||||
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
|
||||
from eth2spec.test.helpers.block import get_state_and_beacon_parent_root_at_slot
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
|
||||
|
||||
@only_with_bls()
|
||||
def sign_shard_block(spec, beacon_state, shard, block, proposer_index=None):
|
||||
slot = block.message.slot
|
||||
if proposer_index is None:
|
||||
proposer_index = spec.get_shard_proposer_index(beacon_state, slot, shard)
|
||||
|
||||
privkey = privkeys[proposer_index]
|
||||
domain = spec.get_domain(beacon_state, spec.DOMAIN_SHARD_PROPOSAL, spec.compute_epoch_at_slot(slot))
|
||||
signing_root = spec.compute_signing_root(block.message, domain)
|
||||
block.signature = bls.Sign(privkey, signing_root)
|
||||
|
||||
|
||||
def build_shard_block(spec,
|
||||
beacon_state,
|
||||
shard,
|
||||
slot=None,
|
||||
body=None,
|
||||
signed=False):
|
||||
shard_state = beacon_state.shard_states[shard]
|
||||
if slot is None:
|
||||
slot = shard_state.slot + 1
|
||||
|
||||
if body is None:
|
||||
body = b'\x56' * 128
|
||||
|
||||
proposer_index = spec.get_shard_proposer_index(beacon_state, slot, shard)
|
||||
beacon_state, beacon_parent_root = get_state_and_beacon_parent_root_at_slot(spec, beacon_state, slot)
|
||||
|
||||
block = spec.ShardBlock(
|
||||
shard_parent_root=shard_state.latest_block_root,
|
||||
beacon_parent_root=beacon_parent_root,
|
||||
slot=slot,
|
||||
proposer_index=proposer_index,
|
||||
body=body,
|
||||
)
|
||||
signed_block = spec.SignedShardBlock(
|
||||
message=block,
|
||||
)
|
||||
|
||||
if signed:
|
||||
sign_shard_block(spec, beacon_state, shard, signed_block, proposer_index=proposer_index)
|
||||
|
||||
return signed_block
|
||||
|
||||
|
||||
def build_shard_transitions_till_slot(spec, state, shard_blocks, on_time_slot):
|
||||
temp_state = state.copy()
|
||||
transition_to(spec, temp_state, on_time_slot)
|
||||
shard_transitions = [spec.ShardTransition()] * spec.MAX_SHARDS
|
||||
for shard, blocks in shard_blocks.items():
|
||||
offset_slots = spec.get_offset_slots(temp_state, shard)
|
||||
len_offset_slots = len(offset_slots)
|
||||
assert len_offset_slots == on_time_slot - state.shard_states[shard].slot - 1
|
||||
shard_transition = spec.get_shard_transition(temp_state, shard, blocks)
|
||||
if len(blocks) > 0:
|
||||
shard_block_root = blocks[-1].message.hash_tree_root()
|
||||
assert shard_transition.shard_states[len_offset_slots - 1].latest_block_root == shard_block_root
|
||||
assert shard_transition.shard_states[len_offset_slots - 1].slot == offset_slots[-1]
|
||||
shard_transitions[shard] = shard_transition
|
||||
|
||||
return shard_transitions
|
||||
|
||||
|
||||
def build_attestation_with_shard_transition(spec, state, index, on_time_slot, shard_transition=None):
|
||||
temp_state = state.copy()
|
||||
transition_to(spec, temp_state, on_time_slot - 1)
|
||||
attestation = get_valid_on_time_attestation(
|
||||
spec,
|
||||
temp_state,
|
||||
index=index,
|
||||
shard_transition=shard_transition,
|
||||
signed=True,
|
||||
)
|
||||
assert attestation.data.slot == temp_state.slot
|
||||
if shard_transition is not None:
|
||||
assert attestation.data.shard_transition_root == shard_transition.hash_tree_root()
|
||||
return attestation
|
@ -1,6 +1,5 @@
|
||||
from eth2spec.test.context import expect_assertion_error
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
from eth2spec.test.helpers.block import sign_block, build_empty_block_for_next_slot, transition_unsigned_block
|
||||
from eth2spec.test.helpers.block import sign_block, transition_unsigned_block
|
||||
|
||||
|
||||
def get_balance(state, index):
|
||||
@ -14,6 +13,13 @@ def next_slot(spec, state):
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
|
||||
|
||||
def next_slots(spec, state, slots):
|
||||
"""
|
||||
Transition given slots forward.
|
||||
"""
|
||||
spec.process_slots(state, state.slot + slots)
|
||||
|
||||
|
||||
def transition_to(spec, state, slot):
|
||||
"""
|
||||
Transition to ``slot``.
|
||||
@ -24,6 +30,16 @@ def transition_to(spec, state, slot):
|
||||
assert state.slot == slot
|
||||
|
||||
|
||||
def transition_to_valid_shard_slot(spec, state):
|
||||
"""
|
||||
Transition to slot `spec.PHASE_1_GENESIS_SLOT + 1` and fork at `spec.PHASE_1_GENESIS_SLOT`.
|
||||
"""
|
||||
transition_to(spec, state, spec.PHASE_1_GENESIS_SLOT)
|
||||
state = spec.upgrade_to_phase1(state) # `upgrade_to_phase1` is a pure function
|
||||
next_slot(spec, state)
|
||||
return state
|
||||
|
||||
|
||||
def next_epoch(spec, state):
|
||||
"""
|
||||
Transition to the start slot of the next epoch
|
||||
@ -51,34 +67,3 @@ def state_transition_and_sign_block(spec, state, block, expect_fail=False):
|
||||
transition_unsigned_block(spec, state, block)
|
||||
block.state_root = state.hash_tree_root()
|
||||
return sign_block(spec, state, block)
|
||||
|
||||
|
||||
def next_epoch_with_attestations(spec,
|
||||
state,
|
||||
fill_cur_epoch,
|
||||
fill_prev_epoch):
|
||||
assert state.slot % spec.SLOTS_PER_EPOCH == 0
|
||||
|
||||
post_state = state.copy()
|
||||
signed_blocks = []
|
||||
for _ in range(spec.SLOTS_PER_EPOCH):
|
||||
block = build_empty_block_for_next_slot(spec, post_state)
|
||||
if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
|
||||
committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest)
|
||||
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)):
|
||||
for index in range(committees_per_slot):
|
||||
cur_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index, signed=True)
|
||||
block.body.attestations.append(cur_attestation)
|
||||
|
||||
if fill_prev_epoch:
|
||||
slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1
|
||||
committees_per_slot = spec.get_committee_count_at_slot(state, slot_to_attest)
|
||||
for index in range(committees_per_slot):
|
||||
prev_attestation = get_valid_attestation(spec, post_state, slot_to_attest, index=index, signed=True)
|
||||
block.body.attestations.append(prev_attestation)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, post_state, block)
|
||||
signed_blocks.append(signed_block)
|
||||
|
||||
return state, signed_blocks, post_state
|
||||
|
@ -1,6 +1,5 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
always_bls, never_bls,
|
||||
with_all_phases,
|
||||
spec_test,
|
||||
@ -8,57 +7,26 @@ from eth2spec.test.context import (
|
||||
with_custom_state,
|
||||
single_phase)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
run_attestation_processing,
|
||||
get_valid_attestation,
|
||||
sign_aggregate_attestation,
|
||||
sign_attestation,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slot,
|
||||
next_slots,
|
||||
next_epoch,
|
||||
transition_to,
|
||||
)
|
||||
from eth2spec.test.helpers.block import apply_empty_block
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
||||
|
||||
|
||||
def run_attestation_processing(spec, state, attestation, valid=True):
|
||||
"""
|
||||
Run ``process_attestation``, yielding:
|
||||
- pre-state ('pre')
|
||||
- attestation ('attestation')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
# yield pre-state
|
||||
yield 'pre', state
|
||||
|
||||
yield 'attestation', attestation
|
||||
|
||||
# If the attestation is invalid, processing is aborted, and there is no post-state.
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_attestation(state, attestation))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
current_epoch_count = len(state.current_epoch_attestations)
|
||||
previous_epoch_count = len(state.previous_epoch_attestations)
|
||||
|
||||
# process attestation
|
||||
spec.process_attestation(state, attestation)
|
||||
|
||||
# Make sure the attestation has been processed
|
||||
if attestation.data.target.epoch == spec.get_current_epoch(state):
|
||||
assert len(state.current_epoch_attestations) == current_epoch_count + 1
|
||||
else:
|
||||
assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
|
||||
|
||||
# yield post-state
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
@ -68,9 +36,9 @@ def test_success(spec, state):
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@single_phase
|
||||
def test_success_multi_proposer_index_iterations(spec, state):
|
||||
state.slot += spec.SLOTS_PER_EPOCH * 2
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2)
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
@ -78,8 +46,8 @@ def test_success_multi_proposer_index_iterations(spec, state):
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_previous_epoch(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
state.slot = spec.SLOTS_PER_EPOCH - 1
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
@ -91,7 +59,7 @@ def test_success_previous_epoch(spec, state):
|
||||
@always_bls
|
||||
def test_invalid_attestation_signature(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
@ -108,10 +76,10 @@ def test_before_inclusion_delay(spec, state):
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_after_epoch_slots(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
state.slot = spec.SLOTS_PER_EPOCH - 1
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
|
||||
# increment past latest inclusion slot
|
||||
spec.process_slots(state, state.slot + 2)
|
||||
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH + 1)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
@ -120,7 +88,7 @@ def test_after_epoch_slots(spec, state):
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_old_source_epoch(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 5
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 5)
|
||||
state.finalized_checkpoint.epoch = 2
|
||||
state.previous_justified_checkpoint.epoch = 3
|
||||
state.current_justified_checkpoint.epoch = 4
|
||||
@ -142,7 +110,7 @@ def test_old_source_epoch(spec, state):
|
||||
@always_bls
|
||||
def test_wrong_index_for_committee_signature(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.index += 1
|
||||
|
||||
@ -153,12 +121,14 @@ def test_wrong_index_for_committee_signature(spec, state):
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_wrong_index_for_slot(spec, state):
|
||||
committees_per_slot = spec.get_committee_count_at_slot(state, state.slot)
|
||||
assert committees_per_slot < spec.MAX_COMMITTEES_PER_SLOT
|
||||
index = committees_per_slot
|
||||
while spec.get_committee_count_at_slot(state, state.slot) >= spec.MAX_COMMITTEES_PER_SLOT:
|
||||
state.validators = state.validators[:len(state.validators) // 2]
|
||||
state.balances = state.balances[:len(state.balances) // 2]
|
||||
|
||||
index = spec.MAX_COMMITTEES_PER_SLOT - 1
|
||||
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.index = index
|
||||
|
||||
@ -170,7 +140,7 @@ def test_wrong_index_for_slot(spec, state):
|
||||
@never_bls
|
||||
def test_invalid_index(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
# off by one (with respect to valid range) on purpose
|
||||
attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT
|
||||
@ -184,7 +154,7 @@ def test_mismatched_target_and_slot(spec, state):
|
||||
next_epoch(spec, state)
|
||||
next_epoch(spec, state)
|
||||
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
attestation = get_valid_attestation(spec, state, on_time=False)
|
||||
attestation.data.slot = attestation.data.slot - spec.SLOTS_PER_EPOCH
|
||||
|
||||
sign_attestation(spec, state, attestation)
|
||||
@ -197,9 +167,9 @@ def test_mismatched_target_and_slot(spec, state):
|
||||
def test_old_target_epoch(spec, state):
|
||||
assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2
|
||||
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 2 # target epoch will be too old to handle
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2) # target epoch will be too old to handle
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
@ -221,7 +191,7 @@ def test_future_target_epoch(spec, state):
|
||||
# manually add signature for correct participants
|
||||
attestation.signature = sign_aggregate_attestation(spec, state, attestation.data, participants)
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
@ -230,7 +200,7 @@ def test_future_target_epoch(spec, state):
|
||||
@spec_state_test
|
||||
def test_new_source_epoch(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.source.epoch += 1
|
||||
|
||||
@ -243,7 +213,7 @@ def test_new_source_epoch(spec, state):
|
||||
@spec_state_test
|
||||
def test_source_root_is_target_root(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.source.root = attestation.data.target.root
|
||||
|
||||
@ -255,14 +225,15 @@ def test_source_root_is_target_root(spec, state):
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_invalid_current_source_root(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 5
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 5)
|
||||
|
||||
state.finalized_checkpoint.epoch = 2
|
||||
|
||||
state.previous_justified_checkpoint = spec.Checkpoint(epoch=3, root=b'\x01' * 32)
|
||||
state.current_justified_checkpoint = spec.Checkpoint(epoch=4, root=b'\x32' * 32)
|
||||
|
||||
attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
attestation = get_valid_attestation(spec, state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1, on_time=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
# Test logic sanity checks:
|
||||
assert state.current_justified_checkpoint.root != state.previous_justified_checkpoint.root
|
||||
@ -280,7 +251,7 @@ def test_invalid_current_source_root(spec, state):
|
||||
@spec_state_test
|
||||
def test_bad_source_root(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.source.root = b'\x42' * 32
|
||||
|
||||
@ -292,8 +263,9 @@ def test_bad_source_root(spec, state):
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_empty_aggregation_bits(spec, state):
|
||||
next_slot(spec, state)
|
||||
attestation = get_valid_attestation(spec, state, empty=True)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
assert attestation.aggregation_bits == Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](
|
||||
*([0b0] * len(attestation.aggregation_bits)))
|
||||
@ -305,7 +277,7 @@ def test_empty_aggregation_bits(spec, state):
|
||||
@spec_state_test
|
||||
def test_too_many_aggregation_bits(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
# one too many bits
|
||||
attestation.aggregation_bits.append(0b0)
|
||||
@ -317,7 +289,7 @@ def test_too_many_aggregation_bits(spec, state):
|
||||
@spec_state_test
|
||||
def test_too_few_aggregation_bits(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](
|
||||
*([0b1] + [0b0] * (len(attestation.aggregation_bits) - 1)))
|
||||
|
@ -1,4 +1,7 @@
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import sign_indexed_attestation
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, \
|
||||
get_indexed_attestation_participants, get_attestation_2_data, get_attestation_1_data
|
||||
@ -161,7 +164,7 @@ def test_same_data(spec, state):
|
||||
|
||||
indexed_att_1 = attester_slashing.attestation_1
|
||||
att_2_data = get_attestation_2_data(spec, attester_slashing)
|
||||
if spec.fork == 'phase1':
|
||||
if spec.fork == PHASE1:
|
||||
indexed_att_1.attestation.data = att_2_data
|
||||
else:
|
||||
indexed_att_1.data = att_2_data
|
||||
@ -199,7 +202,7 @@ def test_participants_already_slashed(spec, state):
|
||||
# Some of the following tests are phase0 only: phase 1 lists participants with bitfields instead of index list.
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_bad_extra_index(spec, state):
|
||||
@ -215,7 +218,7 @@ def test_att1_bad_extra_index(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_bad_replaced_index(spec, state):
|
||||
@ -231,7 +234,7 @@ def test_att1_bad_replaced_index(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_bad_extra_index(spec, state):
|
||||
@ -247,7 +250,7 @@ def test_att2_bad_extra_index(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_bad_replaced_index(spec, state):
|
||||
@ -263,7 +266,7 @@ def test_att2_bad_replaced_index(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_duplicate_index_normal_signed(spec, state):
|
||||
@ -283,7 +286,7 @@ def test_att1_duplicate_index_normal_signed(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_duplicate_index_normal_signed(spec, state):
|
||||
@ -303,7 +306,7 @@ def test_att2_duplicate_index_normal_signed(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_duplicate_index_double_signed(spec, state):
|
||||
@ -318,7 +321,7 @@ def test_att1_duplicate_index_double_signed(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_duplicate_index_double_signed(spec, state):
|
||||
@ -333,7 +336,7 @@ def test_att2_duplicate_index_double_signed(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_unsorted_att_1(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
@ -346,7 +349,7 @@ def test_unsorted_att_1(spec, state):
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_unsorted_att_2(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
@ -2,7 +2,7 @@ from eth2spec.test.context import spec_state_test, expect_assertion_error, alway
|
||||
from eth2spec.test.helpers.block_header import sign_block_header
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
|
||||
from eth2spec.test.helpers.state import get_balance
|
||||
from eth2spec.test.helpers.state import get_balance, next_epoch
|
||||
|
||||
|
||||
def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True):
|
||||
@ -152,7 +152,7 @@ def test_proposer_is_withdrawn(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
# move 1 epoch into future, to allow for past withdrawable epoch
|
||||
state.slot += spec.SLOTS_PER_EPOCH
|
||||
next_epoch(spec, state)
|
||||
# set proposer withdrawable_epoch in past
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||
|
@ -2,6 +2,7 @@ from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import (
|
||||
run_epoch_processing_with, run_epoch_processing_to
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_process_final_updates(spec, state):
|
||||
@ -13,7 +14,8 @@ def run_process_final_updates(spec, state):
|
||||
def test_eth1_vote_no_reset(spec, state):
|
||||
assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1
|
||||
# skip ahead to the end of the epoch
|
||||
state.slot = spec.SLOTS_PER_EPOCH - 1
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
|
@ -2,6 +2,7 @@ from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_process_just_and_fin(spec, state):
|
||||
@ -23,7 +24,7 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
||||
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||
|
||||
total_balance = spec.get_total_active_balance(state)
|
||||
remaining_balance = total_balance * 2 // 3
|
||||
remaining_balance = int(total_balance * 2 // 3) # can become negative
|
||||
|
||||
start_slot = spec.compute_start_slot_at_epoch(epoch)
|
||||
for slot in range(start_slot, start_slot + spec.SLOTS_PER_EPOCH):
|
||||
@ -41,14 +42,15 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
||||
aggregation_bits = [0] * len(committee)
|
||||
for v in range(len(committee) * 2 // 3 + 1):
|
||||
if remaining_balance > 0:
|
||||
remaining_balance -= state.validators[v].effective_balance
|
||||
remaining_balance -= int(state.validators[v].effective_balance)
|
||||
aggregation_bits[v] = 1
|
||||
else:
|
||||
break
|
||||
|
||||
# remove just one attester to make the marginal support insufficient
|
||||
# remove 1/5th of attesters so that support is insufficient
|
||||
if not sufficient_support:
|
||||
aggregation_bits[aggregation_bits.index(1)] = 0
|
||||
for i in range(max(len(committee) // 5, 1)):
|
||||
aggregation_bits[i] = 0
|
||||
|
||||
attestations.append(spec.PendingAttestation(
|
||||
aggregation_bits=aggregation_bits,
|
||||
@ -81,7 +83,7 @@ def put_checkpoints_in_block_roots(spec, state, checkpoints):
|
||||
|
||||
def finalize_on_234(spec, state, epoch, sufficient_support):
|
||||
assert epoch > 4
|
||||
state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1) # skip ahead to just before epoch
|
||||
|
||||
# 43210 -- epochs ago
|
||||
# 3210x -- justification bitfield indices
|
||||
@ -116,7 +118,7 @@ def finalize_on_234(spec, state, epoch, sufficient_support):
|
||||
|
||||
def finalize_on_23(spec, state, epoch, sufficient_support):
|
||||
assert epoch > 3
|
||||
state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1) # skip ahead to just before epoch
|
||||
|
||||
# 43210 -- epochs ago
|
||||
# 210xx -- justification bitfield indices (pre shift)
|
||||
@ -194,7 +196,7 @@ def finalize_on_123(spec, state, epoch, sufficient_support):
|
||||
|
||||
def finalize_on_12(spec, state, epoch, sufficient_support, messed_up_target):
|
||||
assert epoch > 2
|
||||
state.slot = (spec.SLOTS_PER_EPOCH * epoch) - 1 # skip ahead to just before epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH * epoch - 1) # skip ahead to just before epoch
|
||||
|
||||
# 43210 -- epochs ago
|
||||
# 210xx -- justification bitfield indices (pre shift)
|
||||
|
@ -1,4 +1,4 @@
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
from eth2spec.test.helpers.state import next_epoch, next_slots
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
|
||||
@ -101,7 +101,7 @@ def test_activation_queue_sorting(spec, state):
|
||||
state.validators[mock_activations - 1].activation_eligibility_epoch = epoch
|
||||
|
||||
# move state forward and finalize to allow for activations
|
||||
state.slot += spec.SLOTS_PER_EPOCH * 3
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 3)
|
||||
state.finalized_checkpoint.epoch = epoch + 1
|
||||
|
||||
yield from run_process_registry_updates(spec, state)
|
||||
@ -113,10 +113,10 @@ def test_activation_queue_sorting(spec, state):
|
||||
# the second last is at the end of the queue, and did not make the churn,
|
||||
# hence is not assigned an activation_epoch yet.
|
||||
assert state.validators[mock_activations - 2].activation_epoch == spec.FAR_FUTURE_EPOCH
|
||||
# the one at churn_limit - 1 did not make it, it was out-prioritized
|
||||
assert state.validators[churn_limit - 1].activation_epoch == spec.FAR_FUTURE_EPOCH
|
||||
# the one at churn_limit did not make it, it was out-prioritized
|
||||
assert state.validators[churn_limit].activation_epoch == spec.FAR_FUTURE_EPOCH
|
||||
# but the the one in front of the above did
|
||||
assert state.validators[churn_limit - 2].activation_epoch != spec.FAR_FUTURE_EPOCH
|
||||
assert state.validators[churn_limit - 1].activation_epoch != spec.FAR_FUTURE_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@ -131,7 +131,8 @@ def test_activation_queue_efficiency(spec, state):
|
||||
state.validators[i].activation_eligibility_epoch = epoch + 1
|
||||
|
||||
# move state forward and finalize to allow for activations
|
||||
state.slot += spec.SLOTS_PER_EPOCH * 3
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 3)
|
||||
|
||||
state.finalized_checkpoint.epoch = epoch + 1
|
||||
|
||||
# Run first registry update. Do not yield test vectors
|
||||
|
@ -1,8 +1,9 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test, with_all_phases, spec_test,
|
||||
misc_balances, with_custom_state,
|
||||
low_single_balance, zero_activation_threshold,
|
||||
single_phase,
|
||||
spec_state_test, spec_test,
|
||||
with_all_phases, with_phases, single_phase,
|
||||
with_custom_state,
|
||||
zero_activation_threshold,
|
||||
misc_balances, low_single_balance,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
@ -20,7 +21,34 @@ def run_process_rewards_and_penalties(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_rewards_and_penalties')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
def prepare_state_with_full_attestations(spec, state, empty=False):
|
||||
# Go to start of next epoch to ensure can have full participation
|
||||
next_epoch(spec, state)
|
||||
|
||||
start_slot = state.slot
|
||||
start_epoch = spec.get_current_epoch(state)
|
||||
next_epoch_start_slot = spec.compute_start_slot_at_epoch(start_epoch + 1)
|
||||
attestations = []
|
||||
for _ in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY):
|
||||
# create an attestation for each index in each slot in epoch
|
||||
if state.slot < next_epoch_start_slot:
|
||||
for committee_index in range(spec.get_committee_count_at_slot(state, state.slot)):
|
||||
attestation = get_valid_attestation(spec, state, index=committee_index, empty=empty, signed=True)
|
||||
attestations.append(attestation)
|
||||
# fill each created slot in state after inclusion delay
|
||||
if state.slot >= start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
inclusion_slot = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
include_attestations = [att for att in attestations if att.data.slot == inclusion_slot]
|
||||
add_attestations_to_state(spec, state, include_attestations, state.slot)
|
||||
next_slot(spec, state)
|
||||
|
||||
assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
|
||||
return attestations
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
def test_genesis_epoch_no_attestations_no_penalties(spec, state):
|
||||
pre_state = state.copy()
|
||||
@ -33,7 +61,7 @@ def test_genesis_epoch_no_attestations_no_penalties(spec, state):
|
||||
assert state.balances[index] == pre_state.balances[index]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
def test_genesis_epoch_full_attestations_no_rewards(spec, state):
|
||||
attestations = []
|
||||
@ -59,25 +87,6 @@ def test_genesis_epoch_full_attestations_no_rewards(spec, state):
|
||||
assert state.balances[index] == pre_state.balances[index]
|
||||
|
||||
|
||||
def prepare_state_with_full_attestations(spec, state, empty=False):
|
||||
attestations = []
|
||||
for slot in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY):
|
||||
# create an attestation for each slot in epoch
|
||||
if slot < spec.SLOTS_PER_EPOCH:
|
||||
attestation = get_valid_attestation(spec, state, empty=empty, signed=True)
|
||||
attestations.append(attestation)
|
||||
# fill each created slot in state after inclusion delay
|
||||
if slot - spec.MIN_ATTESTATION_INCLUSION_DELAY >= 0:
|
||||
include_att = attestations[slot - spec.MIN_ATTESTATION_INCLUSION_DELAY]
|
||||
add_attestations_to_state(spec, state, [include_att], state.slot)
|
||||
next_slot(spec, state)
|
||||
|
||||
assert spec.compute_epoch_at_slot(state.slot) == spec.GENESIS_EPOCH + 1
|
||||
assert len(state.previous_epoch_attestations) == spec.SLOTS_PER_EPOCH
|
||||
|
||||
return attestations
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_full_attestations(spec, state):
|
||||
@ -88,7 +97,7 @@ def test_full_attestations(spec, state):
|
||||
yield from run_process_rewards_and_penalties(spec, state)
|
||||
|
||||
attesting_indices = spec.get_unslashed_attesting_indices(state, attestations)
|
||||
assert len(attesting_indices) > 0
|
||||
assert len(attesting_indices) == len(pre_state.validators)
|
||||
for index in range(len(pre_state.validators)):
|
||||
if index in attesting_indices:
|
||||
assert state.balances[index] > pre_state.balances[index]
|
||||
@ -168,6 +177,7 @@ def test_full_attestations_one_validaor_one_gwei(spec, state):
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_no_attestations_all_penalties(spec, state):
|
||||
# Move to next epoch to ensure rewards/penalties are processed
|
||||
next_epoch(spec, state)
|
||||
pre_state = state.copy()
|
||||
|
||||
@ -237,26 +247,14 @@ def test_duplicate_attestation(spec, state):
|
||||
@spec_state_test
|
||||
# Case when some eligible attestations are slashed. Modifies attesting_balance and consequently rewards/penalties.
|
||||
def test_attestations_some_slashed(spec, state):
|
||||
attestations = []
|
||||
for slot in range(spec.SLOTS_PER_EPOCH + spec.MIN_ATTESTATION_INCLUSION_DELAY):
|
||||
# create an attestation for each slot in epoch
|
||||
if slot < spec.SLOTS_PER_EPOCH:
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
attestations.append(attestation)
|
||||
# fill each created slot in state after inclusion delay
|
||||
if slot - spec.MIN_ATTESTATION_INCLUSION_DELAY >= 0:
|
||||
include_att = attestations[slot - spec.MIN_ATTESTATION_INCLUSION_DELAY]
|
||||
add_attestations_to_state(spec, state, [include_att], state.slot)
|
||||
next_slot(spec, state)
|
||||
|
||||
attestations = prepare_state_with_full_attestations(spec, state)
|
||||
attesting_indices_before_slashings = list(spec.get_unslashed_attesting_indices(state, attestations))
|
||||
|
||||
# Slash maximum amount of validators allowed per epoch.
|
||||
for i in range(spec.MIN_PER_EPOCH_CHURN_LIMIT):
|
||||
spec.slash_validator(state, attesting_indices_before_slashings[i])
|
||||
|
||||
assert spec.compute_epoch_at_slot(state.slot) == spec.GENESIS_EPOCH + 1
|
||||
assert len(state.previous_epoch_attestations) == spec.SLOTS_PER_EPOCH
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
@ -267,6 +265,8 @@ def test_attestations_some_slashed(spec, state):
|
||||
assert len(attesting_indices_before_slashings) - len(attesting_indices) == spec.MIN_PER_EPOCH_CHURN_LIMIT
|
||||
for index in range(len(pre_state.validators)):
|
||||
if index in attesting_indices:
|
||||
# non-slashed attester should gain reward
|
||||
assert state.balances[index] > pre_state.balances[index]
|
||||
else:
|
||||
# Slashed non-proposer attester should have penalty
|
||||
assert state.balances[index] < pre_state.balances[index]
|
||||
|
@ -2,6 +2,7 @@ from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import (
|
||||
run_epoch_processing_with, run_epoch_processing_to
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
|
||||
|
||||
def run_process_slashings(spec, state):
|
||||
@ -79,7 +80,7 @@ def test_small_penalty(spec, state):
|
||||
@spec_state_test
|
||||
def test_scaled_penalties(spec, state):
|
||||
# skip to next epoch
|
||||
state.slot = spec.SLOTS_PER_EPOCH
|
||||
next_epoch(spec, state)
|
||||
|
||||
# Also mock some previous slashings, so that we test to have the delta in the penalties computation.
|
||||
base = spec.EJECTION_BALANCE
|
||||
|
@ -2,16 +2,19 @@ from copy import deepcopy
|
||||
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_block, next_slot
|
||||
from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_block, next_slot, next_epoch
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block, \
|
||||
transition_unsigned_block
|
||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, get_indexed_attestation_participants
|
||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, fill_block_shard_transitions_by_attestations
|
||||
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases, expect_assertion_error, always_bls
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test, with_all_phases, expect_assertion_error, always_bls, with_phases,
|
||||
PHASE1
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@ -303,7 +306,8 @@ def test_proposer_after_inactive_index(spec, state):
|
||||
state.validators[inactive_index].exit_epoch = spec.get_current_epoch(state)
|
||||
|
||||
# skip forward, get brand new proposers
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 2
|
||||
next_epoch(spec, state)
|
||||
next_epoch(spec, state)
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
@ -415,16 +419,18 @@ def test_deposit_top_up(spec, state):
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_attestation(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH
|
||||
next_epoch(spec, state)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=True)
|
||||
|
||||
# Add to state via block transition
|
||||
pre_current_attestations_len = len(state.current_epoch_attestations)
|
||||
attestation_block = build_empty_block(spec, state, state.slot + 1 + spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
attestation_block = build_empty_block(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
attestation_block.body.attestations.append(attestation)
|
||||
if spec.fork == PHASE1:
|
||||
fill_block_shard_transitions_by_attestations(spec, state, attestation_block)
|
||||
signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block)
|
||||
|
||||
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
|
||||
@ -442,7 +448,9 @@ def test_attestation(spec, state):
|
||||
assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
|
||||
|
||||
|
||||
@with_all_phases
|
||||
# In phase1 a committee is computed for PERSISTENT_COMMITTEE_PERIOD slots ago,
|
||||
# exceeding the minimal-config randao mixes memory size.
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
def test_voluntary_exit(spec, state):
|
||||
validator_index = spec.get_active_validator_indices(
|
@ -0,0 +1,46 @@
|
||||
from eth2spec.test.context import (
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
run_attestation_processing,
|
||||
get_valid_late_attestation,
|
||||
get_valid_on_time_attestation,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_on_time_success(spec, state):
|
||||
attestation = get_valid_on_time_attestation(spec, state, signed=True)
|
||||
|
||||
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_on_time_empty_custody_bits_blocks(spec, state):
|
||||
attestation = get_valid_late_attestation(spec, state, signed=True)
|
||||
|
||||
assert not any(attestation.custody_bits_blocks)
|
||||
|
||||
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_late_with_custody_bits_blocks(spec, state):
|
||||
attestation = get_valid_on_time_attestation(spec, state, signed=True)
|
||||
|
||||
transition_to(spec, state, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY + 1)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
@ -0,0 +1,73 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
)
|
||||
from eth2spec.test.helpers.crosslinks import run_crosslinks_processing
|
||||
from eth2spec.test.helpers.shard_block import (
|
||||
build_attestation_with_shard_transition,
|
||||
build_shard_block,
|
||||
build_shard_transitions_till_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||
|
||||
|
||||
def run_basic_crosslink_tests(spec, state, target_len_offset_slot, valid=True):
|
||||
state = transition_to_valid_shard_slot(spec, state)
|
||||
# At the beginning, let `x = state.slot`, `state.shard_states[shard].slot == x - 1`
|
||||
slot_x = state.slot
|
||||
committee_index = spec.CommitteeIndex(0)
|
||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
assert state.shard_states[shard].slot == slot_x - 1
|
||||
|
||||
# Create SignedShardBlock
|
||||
body = b'\x56' * spec.MAX_SHARD_BLOCK_SIZE
|
||||
shard_block = build_shard_block(spec, state, shard, body=body, signed=True)
|
||||
shard_blocks = [shard_block]
|
||||
# Create a shard_transitions that would be included at beacon block `state.slot + target_len_offset_slot`
|
||||
shard_transitions = build_shard_transitions_till_slot(
|
||||
spec,
|
||||
state,
|
||||
shard_blocks={shard: shard_blocks},
|
||||
on_time_slot=state.slot + target_len_offset_slot,
|
||||
)
|
||||
shard_transition = shard_transitions[shard]
|
||||
# Create an attestation that would be included at beacon block `state.slot + target_len_offset_slot`
|
||||
attestation = build_attestation_with_shard_transition(
|
||||
spec,
|
||||
state,
|
||||
index=committee_index,
|
||||
on_time_slot=state.slot + target_len_offset_slot,
|
||||
shard_transition=shard_transition,
|
||||
)
|
||||
pre_gasprice = state.shard_states[shard].gasprice
|
||||
|
||||
transition_to(spec, state, state.slot + target_len_offset_slot)
|
||||
pre_shard_state = state.shard_states[shard]
|
||||
|
||||
yield from run_crosslinks_processing(spec, state, shard_transitions, [attestation], valid=valid)
|
||||
|
||||
if valid:
|
||||
# After state transition,
|
||||
assert state.slot == slot_x + target_len_offset_slot
|
||||
shard_state = state.shard_states[shard]
|
||||
assert shard_state != pre_shard_state
|
||||
assert shard_state == shard_transition.shard_states[len(shard_transition.shard_states) - 1]
|
||||
|
||||
if target_len_offset_slot == 1:
|
||||
assert shard_state.gasprice > pre_gasprice
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_basic_crosslinks(spec, state):
|
||||
yield from run_basic_crosslink_tests(spec, state, target_len_offset_slot=1, valid=True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_multiple_offset_slots(spec, state):
|
||||
yield from run_basic_crosslink_tests(spec, state, target_len_offset_slot=3, valid=True)
|
@ -1,5 +1,6 @@
|
||||
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
@ -54,7 +55,7 @@ def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=Tru
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success(spec, state):
|
||||
@ -64,7 +65,7 @@ def test_success(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_too_early(spec, state):
|
||||
@ -73,7 +74,7 @@ def test_reveal_too_early(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_wrong_period(spec, state):
|
||||
@ -82,7 +83,7 @@ def test_wrong_period(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_late_reveal(spec, state):
|
||||
@ -92,7 +93,7 @@ def test_late_reveal(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_double_reveal(spec, state):
|
||||
@ -104,7 +105,7 @@ def test_double_reveal(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_max_decrement(spec, state):
|
||||
|
@ -2,6 +2,7 @@ from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal
|
||||
from eth2spec.test.helpers.block import apply_empty_block
|
||||
from eth2spec.test.helpers.state import next_epoch, get_balance
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
@ -41,7 +42,7 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success(spec, state):
|
||||
@ -50,7 +51,7 @@ def test_success(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_reveal_from_current_epoch(spec, state):
|
||||
@ -59,7 +60,7 @@ def test_reveal_from_current_epoch(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_reveal_from_past_epoch(spec, state):
|
||||
@ -70,7 +71,7 @@ def test_reveal_from_past_epoch(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_with_custody_padding(spec, state):
|
||||
@ -82,7 +83,7 @@ def test_reveal_with_custody_padding(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||
@ -94,7 +95,7 @@ def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_double_reveal(spec, state):
|
||||
@ -115,7 +116,7 @@ def test_double_reveal(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_revealer_is_slashed(spec, state):
|
||||
@ -125,7 +126,7 @@ def test_revealer_is_slashed(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_far_future_epoch(spec, state):
|
||||
|
107
tests/core/pyspec/eth2spec/test/phase_1/sanity/test_blocks.py
Normal file
107
tests/core/pyspec/eth2spec/test/phase_1/sanity/test_blocks.py
Normal file
@ -0,0 +1,107 @@
|
||||
from typing import Dict, Sequence
|
||||
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
)
|
||||
from eth2spec.test.helpers.block import build_empty_block
|
||||
from eth2spec.test.helpers.shard_block import (
|
||||
build_attestation_with_shard_transition,
|
||||
build_shard_block,
|
||||
build_shard_transitions_till_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, transition_to_valid_shard_slot
|
||||
|
||||
|
||||
def run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index, valid=True):
|
||||
shard_transitions = build_shard_transitions_till_slot(
|
||||
spec, state, shard_blocks, on_time_slot=state.slot + target_len_offset_slot
|
||||
)
|
||||
attestations = [
|
||||
build_attestation_with_shard_transition(
|
||||
spec,
|
||||
state,
|
||||
on_time_slot=state.slot + target_len_offset_slot,
|
||||
index=committee_index,
|
||||
shard_transition=shard_transitions[shard],
|
||||
)
|
||||
for shard in shard_blocks.keys()
|
||||
]
|
||||
|
||||
# Propose beacon block at slot `x + 1`
|
||||
beacon_block = build_empty_block(spec, state, slot=state.slot + target_len_offset_slot)
|
||||
beacon_block.body.attestations = attestations
|
||||
beacon_block.body.shard_transitions = shard_transitions
|
||||
|
||||
pre_shard_states = state.shard_states.copy()
|
||||
yield 'pre', state.copy()
|
||||
yield 'block', beacon_block
|
||||
state_transition_and_sign_block(spec, state, beacon_block)
|
||||
if valid:
|
||||
yield 'post', state
|
||||
else:
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
for shard in range(spec.get_active_shard_count(state)):
|
||||
post_shard_state = state.shard_states[shard]
|
||||
if shard in shard_blocks:
|
||||
# Shard state has been changed to state_transition result
|
||||
assert post_shard_state == shard_transitions[shard].shard_states[
|
||||
len(shard_transitions[shard].shard_states) - 1
|
||||
]
|
||||
assert beacon_block.slot == shard_transitions[shard].shard_states[0].slot + target_len_offset_slot
|
||||
assert post_shard_state.slot == state.slot - 1
|
||||
if len(shard_blocks[shard]) == 0:
|
||||
# `latest_block_root` is the same
|
||||
assert post_shard_state.latest_block_root == pre_shard_states[shard].latest_block_root
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_process_beacon_block_with_normal_shard_transition(spec, state):
|
||||
state = transition_to_valid_shard_slot(spec, state)
|
||||
|
||||
target_len_offset_slot = 1
|
||||
committee_index = spec.CommitteeIndex(0)
|
||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
assert state.shard_states[shard].slot == state.slot - 1
|
||||
|
||||
pre_gasprice = state.shard_states[shard].gasprice
|
||||
|
||||
# Create SignedShardBlock at slot `shard_state.slot + 1`
|
||||
body = b'\x56' * spec.MAX_SHARD_BLOCK_SIZE
|
||||
shard_block = build_shard_block(spec, state, shard, body=body, signed=True)
|
||||
shard_blocks: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
|
||||
|
||||
yield from run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index)
|
||||
|
||||
shard_state = state.shard_states[shard]
|
||||
|
||||
if target_len_offset_slot == 1 and len(shard_blocks) > 0:
|
||||
assert shard_state.gasprice > pre_gasprice
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_process_beacon_block_with_empty_proposal_transition(spec, state):
|
||||
state = transition_to_valid_shard_slot(spec, state)
|
||||
|
||||
target_len_offset_slot = 1
|
||||
committee_index = spec.CommitteeIndex(0)
|
||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
assert state.shard_states[shard].slot == state.slot - 1
|
||||
|
||||
# No new shard block
|
||||
shard_blocks = {}
|
||||
|
||||
pre_gasprice = state.shard_states[shard].gasprice
|
||||
|
||||
yield from run_beacon_block_with_shard_blocks(spec, state, shard_blocks, target_len_offset_slot, committee_index)
|
||||
|
||||
if target_len_offset_slot == 1 and len(shard_blocks) > 0:
|
||||
assert state.shard_states[shard].gasprice > pre_gasprice
|
@ -1,5 +1,6 @@
|
||||
from eth2spec.test.context import spec_state_test, never_bls, with_all_phases
|
||||
from eth2spec.test.helpers.state import next_epoch, next_epoch_with_attestations
|
||||
from eth2spec.test.context import spec_state_test, never_bls, with_all_phases, with_phases
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
from eth2spec.test.helpers.attestations import next_epoch_with_attestations
|
||||
from eth2spec.test.helpers.block import apply_empty_block
|
||||
|
||||
|
||||
@ -28,7 +29,7 @@ def check_finality(spec,
|
||||
assert state.finalized_checkpoint == prev_state.finalized_checkpoint
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(["phase0"])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_finality_no_updates_at_genesis(spec, state):
|
||||
|
@ -0,0 +1,395 @@
|
||||
from eth2spec.test.context import spec_state_test, never_bls, with_all_phases
|
||||
from eth2spec.test.helpers.attestations import build_attestation_data
|
||||
from eth2spec.test.helpers.block import build_empty_block
|
||||
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
||||
|
||||
|
||||
def run_get_signature_test(spec, state, obj, domain, get_signature_fn, privkey, pubkey):
|
||||
signature = get_signature_fn(state, obj, privkey)
|
||||
signing_root = spec.compute_signing_root(obj, domain)
|
||||
assert bls.Verify(pubkey, signing_root, signature)
|
||||
|
||||
|
||||
def run_get_committee_assignment(spec, state, epoch, validator_index, valid=True):
|
||||
try:
|
||||
assignment = spec.get_committee_assignment(state, epoch, validator_index)
|
||||
committee, committee_index, slot = assignment
|
||||
assert spec.compute_epoch_at_slot(slot) == epoch
|
||||
assert committee == spec.get_beacon_committee(state, slot, committee_index)
|
||||
assert committee_index < spec.get_committee_count_at_slot(state, slot)
|
||||
assert validator_index in committee
|
||||
assert valid
|
||||
except AssertionError:
|
||||
assert not valid
|
||||
else:
|
||||
assert valid
|
||||
|
||||
|
||||
def run_is_candidate_block(spec, eth1_block, period_start, success=True):
|
||||
assert success == spec.is_candidate_block(eth1_block, period_start)
|
||||
|
||||
|
||||
def get_min_new_period_epochs(spec):
|
||||
return int(
|
||||
spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2 # to seconds
|
||||
/ spec.SECONDS_PER_SLOT / spec.SLOTS_PER_EPOCH
|
||||
)
|
||||
|
||||
|
||||
def get_mock_aggregate(spec):
|
||||
return spec.Attestation(
|
||||
data=spec.AttestationData(
|
||||
slot=10,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
#
|
||||
# Becoming a validator
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_check_if_validator_active(spec, state):
|
||||
active_validator_index = len(state.validators) - 1
|
||||
assert spec.check_if_validator_active(state, active_validator_index)
|
||||
new_validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit = prepare_state_and_deposit(spec, state, new_validator_index, amount, signed=True)
|
||||
spec.process_deposit(state, deposit)
|
||||
assert not spec.check_if_validator_active(state, new_validator_index)
|
||||
|
||||
|
||||
#
|
||||
# Validator assignments
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_get_committee_assignment_current_epoch(spec, state):
|
||||
epoch = spec.get_current_epoch(state)
|
||||
validator_index = len(state.validators) - 1
|
||||
run_get_committee_assignment(spec, state, epoch, validator_index, valid=True)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_get_committee_assignment_next_epoch(spec, state):
|
||||
epoch = spec.get_current_epoch(state) + 1
|
||||
validator_index = len(state.validators) - 1
|
||||
run_get_committee_assignment(spec, state, epoch, validator_index, valid=True)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_get_committee_assignment_out_bound_epoch(spec, state):
|
||||
epoch = spec.get_current_epoch(state) + 2
|
||||
validator_index = len(state.validators) - 1
|
||||
run_get_committee_assignment(spec, state, epoch, validator_index, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_is_proposer(spec, state):
|
||||
proposer_index = spec.get_beacon_proposer_index(state)
|
||||
assert spec.is_proposer(state, proposer_index)
|
||||
|
||||
proposer_index = proposer_index + 1 % len(state.validators)
|
||||
assert not spec.is_proposer(state, proposer_index)
|
||||
|
||||
|
||||
#
|
||||
# Beacon chain responsibilities
|
||||
#
|
||||
|
||||
|
||||
# Block proposal
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_get_epoch_signature(spec, state):
|
||||
block = spec.BeaconBlock()
|
||||
privkey = privkeys[0]
|
||||
pubkey = pubkeys[0]
|
||||
domain = spec.get_domain(state, spec.DOMAIN_RANDAO, spec.compute_epoch_at_slot(block.slot))
|
||||
run_get_signature_test(
|
||||
spec=spec,
|
||||
state=state,
|
||||
obj=block,
|
||||
domain=domain,
|
||||
get_signature_fn=spec.get_epoch_signature,
|
||||
privkey=privkey,
|
||||
pubkey=pubkey,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_is_candidate_block(spec, state):
|
||||
period_start = spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2 + 1000
|
||||
run_is_candidate_block(
|
||||
spec,
|
||||
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE),
|
||||
period_start,
|
||||
success=True,
|
||||
)
|
||||
run_is_candidate_block(
|
||||
spec,
|
||||
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE + 1),
|
||||
period_start,
|
||||
success=False,
|
||||
)
|
||||
run_is_candidate_block(
|
||||
spec,
|
||||
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2),
|
||||
period_start,
|
||||
success=True,
|
||||
)
|
||||
run_is_candidate_block(
|
||||
spec,
|
||||
spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE * 2 - 1),
|
||||
period_start,
|
||||
success=False,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_get_eth1_vote_default_vote(spec, state):
|
||||
min_new_period_epochs = get_min_new_period_epochs(spec)
|
||||
for _ in range(min_new_period_epochs):
|
||||
next_epoch(spec, state)
|
||||
|
||||
state.eth1_data_votes = ()
|
||||
eth1_chain = []
|
||||
eth1_data = spec.get_eth1_vote(state, eth1_chain)
|
||||
assert eth1_data == state.eth1_data
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_get_eth1_vote_consensus_vote(spec, state):
|
||||
min_new_period_epochs = get_min_new_period_epochs(spec)
|
||||
for _ in range(min_new_period_epochs + 2):
|
||||
next_epoch(spec, state)
|
||||
|
||||
period_start = spec.voting_period_start_time(state)
|
||||
votes_length = spec.get_current_epoch(state) % spec.EPOCHS_PER_ETH1_VOTING_PERIOD
|
||||
assert votes_length >= 3 # We need to have the majority vote
|
||||
state.eth1_data_votes = ()
|
||||
|
||||
block_1 = spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE - 1)
|
||||
block_2 = spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE)
|
||||
eth1_chain = [block_1, block_2]
|
||||
eth1_data_votes = []
|
||||
|
||||
# Only the first vote is for block_1
|
||||
eth1_data_votes.append(spec.get_eth1_data(block_1))
|
||||
# Other votes are for block_2
|
||||
for _ in range(votes_length - 1):
|
||||
eth1_data_votes.append(spec.get_eth1_data(block_2))
|
||||
|
||||
state.eth1_data_votes = eth1_data_votes
|
||||
eth1_data = spec.get_eth1_vote(state, eth1_chain)
|
||||
assert eth1_data.block_hash == block_2.hash_tree_root()
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_get_eth1_vote_tie(spec, state):
|
||||
min_new_period_epochs = get_min_new_period_epochs(spec)
|
||||
for _ in range(min_new_period_epochs + 1):
|
||||
next_epoch(spec, state)
|
||||
|
||||
period_start = spec.voting_period_start_time(state)
|
||||
votes_length = spec.get_current_epoch(state) % spec.EPOCHS_PER_ETH1_VOTING_PERIOD
|
||||
assert votes_length > 0 and votes_length % 2 == 0
|
||||
|
||||
state.eth1_data_votes = ()
|
||||
block_1 = spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE - 1)
|
||||
block_2 = spec.Eth1Block(timestamp=period_start - spec.SECONDS_PER_ETH1_BLOCK * spec.ETH1_FOLLOW_DISTANCE)
|
||||
eth1_chain = [block_1, block_2]
|
||||
eth1_data_votes = []
|
||||
# Half votes are for block_1, another half votes are for block_2
|
||||
for i in range(votes_length):
|
||||
if i % 2 == 0:
|
||||
block = block_1
|
||||
else:
|
||||
block = block_2
|
||||
eth1_data_votes.append(spec.get_eth1_data(block))
|
||||
|
||||
state.eth1_data_votes = eth1_data_votes
|
||||
eth1_data = spec.get_eth1_vote(state, eth1_chain)
|
||||
|
||||
# Tiebreak by smallest distance -> eth1_chain[0]
|
||||
assert eth1_data.block_hash == eth1_chain[0].hash_tree_root()
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_compute_new_state_root(spec, state):
|
||||
pre_state = state.copy()
|
||||
post_state = state.copy()
|
||||
block = build_empty_block(spec, state, state.slot + 1)
|
||||
state_root = spec.compute_new_state_root(state, block)
|
||||
|
||||
assert state_root != pre_state.hash_tree_root()
|
||||
assert state == pre_state
|
||||
|
||||
# dumb verification
|
||||
spec.process_slots(post_state, block.slot)
|
||||
spec.process_block(post_state, block)
|
||||
assert state_root == post_state.hash_tree_root()
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_get_block_signature(spec, state):
|
||||
privkey = privkeys[0]
|
||||
pubkey = pubkeys[0]
|
||||
block = build_empty_block(spec, state)
|
||||
domain = spec.get_domain(state, spec.DOMAIN_BEACON_PROPOSER, spec.compute_epoch_at_slot(block.slot))
|
||||
run_get_signature_test(
|
||||
spec=spec,
|
||||
state=state,
|
||||
obj=block,
|
||||
domain=domain,
|
||||
get_signature_fn=spec.get_block_signature,
|
||||
privkey=privkey,
|
||||
pubkey=pubkey,
|
||||
)
|
||||
|
||||
|
||||
# Attesting
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_get_attestation_signature(spec, state):
|
||||
privkey = privkeys[0]
|
||||
pubkey = pubkeys[0]
|
||||
attestation_data = spec.AttestationData(slot=10)
|
||||
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
|
||||
run_get_signature_test(
|
||||
spec=spec,
|
||||
state=state,
|
||||
obj=attestation_data,
|
||||
domain=domain,
|
||||
get_signature_fn=spec.get_attestation_signature,
|
||||
privkey=privkey,
|
||||
pubkey=pubkey,
|
||||
)
|
||||
|
||||
|
||||
# Attestation aggregation
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_get_slot_signature(spec, state):
|
||||
privkey = privkeys[0]
|
||||
pubkey = pubkeys[0]
|
||||
slot = spec.Slot(10)
|
||||
domain = spec.get_domain(state, spec.DOMAIN_SELECTION_PROOF, spec.compute_epoch_at_slot(slot))
|
||||
run_get_signature_test(
|
||||
spec=spec,
|
||||
state=state,
|
||||
obj=slot,
|
||||
domain=domain,
|
||||
get_signature_fn=spec.get_slot_signature,
|
||||
privkey=privkey,
|
||||
pubkey=pubkey,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_is_aggregator(spec, state):
|
||||
# TODO: we can test the probabilistic result against `TARGET_AGGREGATORS_PER_COMMITTEE`
|
||||
# if we have more validators and larger committeee size
|
||||
slot = state.slot
|
||||
committee_index = 0
|
||||
has_aggregator = False
|
||||
beacon_committee = spec.get_beacon_committee(state, slot, committee_index)
|
||||
for validator_index in beacon_committee:
|
||||
privkey = privkeys[validator_index]
|
||||
slot_signature = spec.get_slot_signature(state, slot, privkey)
|
||||
if spec.is_aggregator(state, slot, committee_index, slot_signature):
|
||||
has_aggregator = True
|
||||
break
|
||||
assert has_aggregator
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_get_aggregate_signature(spec, state):
|
||||
attestations = []
|
||||
pubkeys = []
|
||||
slot = state.slot
|
||||
committee_index = 0
|
||||
attestation_data = build_attestation_data(spec, state, slot=slot, index=committee_index)
|
||||
beacon_committee = spec.get_beacon_committee(
|
||||
state,
|
||||
attestation_data.slot,
|
||||
attestation_data.index,
|
||||
)
|
||||
committee_size = len(beacon_committee)
|
||||
aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size))
|
||||
for i, validator_index in enumerate(beacon_committee):
|
||||
bits = aggregation_bits
|
||||
bits[i] = True
|
||||
attestations.append(
|
||||
spec.Attestation(
|
||||
data=attestation_data,
|
||||
aggregation_bits=bits,
|
||||
)
|
||||
)
|
||||
pubkeys.append(state.validators[validator_index].pubkey)
|
||||
pubkey = bls.AggregatePKs(pubkeys)
|
||||
signature = spec.get_aggregate_signature(attestations)
|
||||
domain = spec.get_domain(state, spec.DOMAIN_BEACON_ATTESTER, attestation_data.target.epoch)
|
||||
signing_root = spec.compute_signing_root(attestation_data, domain)
|
||||
assert bls.Verify(pubkey, signing_root, signature)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_get_aggregate_and_proof(spec, state):
|
||||
privkey = privkeys[0]
|
||||
aggregator_index = spec.ValidatorIndex(10)
|
||||
aggregate = get_mock_aggregate(spec)
|
||||
aggregate_and_proof = spec.get_aggregate_and_proof(state, aggregator_index, aggregate, privkey)
|
||||
assert aggregate_and_proof.aggregator_index == aggregator_index
|
||||
assert aggregate_and_proof.aggregate == aggregate
|
||||
assert aggregate_and_proof.selection_proof == spec.get_slot_signature(state, aggregate.data.slot, privkey)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_get_aggregate_and_proof_signature(spec, state):
|
||||
privkey = privkeys[0]
|
||||
pubkey = pubkeys[0]
|
||||
aggregate = get_mock_aggregate(spec)
|
||||
aggregate_and_proof = spec.get_aggregate_and_proof(state, spec.ValidatorIndex(1), aggregate, privkey)
|
||||
domain = spec.get_domain(state, spec.DOMAIN_AGGREGATE_AND_PROOF, spec.compute_epoch_at_slot(aggregate.data.slot))
|
||||
run_get_signature_test(
|
||||
spec=spec,
|
||||
state=state,
|
||||
obj=aggregate_and_proof,
|
||||
domain=domain,
|
||||
get_signature_fn=spec.get_aggregate_and_proof_signature,
|
||||
privkey=privkey,
|
||||
pubkey=pubkey,
|
||||
)
|
@ -51,3 +51,8 @@ def Sign(SK, message):
|
||||
@only_with_bls(alt_return=STUB_COORDINATES)
|
||||
def signature_to_G2(signature):
|
||||
return _signature_to_G2(signature)
|
||||
|
||||
|
||||
@only_with_bls(alt_return=STUB_PUBKEY)
|
||||
def AggregatePKs(pubkeys):
|
||||
return bls._AggregatePKs(pubkeys)
|
||||
|
@ -1,28 +1,17 @@
|
||||
from hashlib import sha256
|
||||
from typing import Dict, Union
|
||||
|
||||
ZERO_BYTES32 = b'\x00' * 32
|
||||
|
||||
|
||||
def _hash(x):
|
||||
def _hash(x: Union[bytes, bytearray, memoryview]) -> bytes:
|
||||
return sha256(x).digest()
|
||||
|
||||
|
||||
# Minimal collection of (key, value) pairs, for fast hash-retrieval, to save on repetitive computation cost.
|
||||
# Key = the hash input
|
||||
# Value = the hash output
|
||||
hash_cache = []
|
||||
hash_cache: Dict[bytes, bytes] = {}
|
||||
|
||||
|
||||
def add_zero_hashes_to_cache():
|
||||
zerohashes = [(None, ZERO_BYTES32)]
|
||||
for layer in range(1, 32):
|
||||
k = zerohashes[layer - 1][1] + zerohashes[layer - 1][1]
|
||||
zerohashes.append((k, _hash(k)))
|
||||
hash_cache.extend(zerohashes[1:])
|
||||
|
||||
|
||||
def hash(x):
|
||||
for (k, h) in hash_cache:
|
||||
if x == k:
|
||||
return h
|
||||
def hash(x: bytes) -> bytes:
|
||||
if x in hash_cache:
|
||||
return hash_cache[x]
|
||||
return _hash(x)
|
||||
|
@ -18,11 +18,11 @@ A YAML-encoded `BeaconState`, the state before applying the operation.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
|
||||
### `<operation-name>.yaml`
|
||||
### `<input-name>.yaml`
|
||||
|
||||
A YAML-encoded operation object, e.g. a `ProposerSlashing`, or `Deposit`.
|
||||
|
||||
Also available as `<operation-name>.ssz`.
|
||||
Also available as `<input-name>.ssz`.
|
||||
|
||||
### `post.yaml`
|
||||
|
||||
@ -39,14 +39,14 @@ This excludes the other parts of the block-transition.
|
||||
|
||||
Operations:
|
||||
|
||||
| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
|
||||
|-------------------------|----------------------|----------------------|--------------------------------------------------------|
|
||||
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
|
||||
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
|
||||
| `block_header` | `Block` | **`block`** | `process_block_header(state, block)` |
|
||||
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
|
||||
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
|
||||
| `voluntary_exit` | `VoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
|
||||
| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
|
||||
|-------------------------|-----------------------|----------------------|--------------------------------------------------------|
|
||||
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
|
||||
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
|
||||
| `block_header` | `BeaconBlock` | **`block`** | `process_block_header(state, block)` |
|
||||
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
|
||||
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
|
||||
| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
|
||||
|
||||
Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here.
|
||||
|
||||
|
@ -25,7 +25,7 @@ Also available as `pre.ssz`.
|
||||
A series of files, with `<index>` in range `[0, blocks_count)`. Blocks need to be processed in order,
|
||||
following the main transition function (i.e. process slot and epoch transitions in between blocks as normal)
|
||||
|
||||
Each file is a YAML-encoded `BeaconBlock`.
|
||||
Each file is a YAML-encoded `SignedBeaconBlock`.
|
||||
|
||||
Each block is also available as `blocks_<index>.ssz`
|
||||
|
||||
|
@ -13,6 +13,7 @@ from gen_base import gen_runner, gen_typing
|
||||
from py_ecc import bls
|
||||
from hashlib import sha256
|
||||
|
||||
from eth2spec.test.context import PHASE0
|
||||
|
||||
def hash(x):
|
||||
return sha256(x).digest()
|
||||
@ -202,7 +203,7 @@ def create_provider(handler_name: str,
|
||||
print(data)
|
||||
(case_name, case_content) = data
|
||||
yield gen_typing.TestCase(
|
||||
fork_name='phase0',
|
||||
fork_name=PHASE0,
|
||||
runner_name='bls',
|
||||
handler_name=handler_name,
|
||||
suite_name='small',
|
||||
|
@ -1,3 +1,4 @@
|
||||
py_ecc==2.0.0
|
||||
eth-utils==1.6.0
|
||||
../../core/gen_helpers
|
||||
../../../
|
||||
|
@ -5,7 +5,7 @@ Epoch processing covers the sub-transitions during an epoch change.
|
||||
An epoch-processing test-runner can consume these sub-transition test-suites,
|
||||
and handle different kinds of epoch sub-transitions by processing the cases using the specified test handler.
|
||||
|
||||
Information on the format of the tests can be found in the [epoch-processing test formats documentation](../../specs/test_formats/epoch_processing/README.md).
|
||||
Information on the format of the tests can be found in the [epoch-processing test formats documentation](../../formats/epoch_processing/README.md).
|
||||
|
||||
|
||||
|
||||
|
@ -13,6 +13,7 @@ from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from importlib import reload
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.test.context import PHASE0
|
||||
|
||||
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
@ -28,7 +29,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
||||
runner_name='epoch_processing',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
fork_name=PHASE0,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
@ -1,5 +1,6 @@
|
||||
from typing import Iterable
|
||||
|
||||
from eth2spec.test.context import PHASE0
|
||||
from eth2spec.test.genesis import test_initialization, test_validity
|
||||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
@ -21,7 +22,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
||||
runner_name='genesis',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
fork_name=PHASE0,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
@ -6,7 +6,7 @@ Operations (or "transactions" in previous spec iterations),
|
||||
An operation test-runner can consume these operation test-suites,
|
||||
and handle different kinds of operations by processing the cases using the specified test handler.
|
||||
|
||||
Information on the format of the tests can be found in the [operations test formats documentation](../../specs/test_formats/operations/README.md).
|
||||
Information on the format of the tests can be found in the [operations test formats documentation](../../formats/operations/README.md).
|
||||
|
||||
|
||||
|
||||
|
@ -15,6 +15,7 @@ from importlib import reload
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.test.context import PHASE0
|
||||
|
||||
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
@ -30,7 +31,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
||||
runner_name='operations',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
fork_name=PHASE0,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
Sanity tests cover regular state-transitions in a common block-list format, to ensure the basics work.
|
||||
|
||||
Information on the format of the tests can be found in the [sanity test formats documentation](../../specs/test_formats/sanity/README.md).
|
||||
Information on the format of the tests can be found in the [sanity test formats documentation](../../formats/sanity/README.md).
|
||||
|
||||
|
||||
|
||||
|
@ -4,7 +4,8 @@ from importlib import reload
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
|
||||
from eth2spec.test.sanity import test_blocks, test_slots
|
||||
from eth2spec.test.context import PHASE0
|
||||
from eth2spec.test.phase_0.sanity import test_blocks, test_slots
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
@ -23,7 +24,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
||||
runner_name='sanity',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
fork_name=PHASE0,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
@ -6,6 +6,7 @@ from gen_base import gen_runner, gen_typing
|
||||
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec as spec
|
||||
from eth2spec.test.context import PHASE0
|
||||
|
||||
|
||||
def shuffling_case_fn(seed, count):
|
||||
@ -37,7 +38,7 @@ def create_provider(config_name: str) -> gen_typing.TestProvider:
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
for (case_name, case_fn) in shuffling_test_cases():
|
||||
yield gen_typing.TestCase(
|
||||
fork_name='phase0',
|
||||
fork_name=PHASE0,
|
||||
runner_name='shuffling',
|
||||
handler_name='core',
|
||||
suite_name='shuffle',
|
||||
|
@ -6,6 +6,7 @@ import ssz_bitvector
|
||||
import ssz_boolean
|
||||
import ssz_uints
|
||||
import ssz_container
|
||||
from eth2spec.test.context import PHASE0
|
||||
|
||||
|
||||
def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typing.TestProvider:
|
||||
@ -16,7 +17,7 @@ def create_provider(handler_name: str, suite_name: str, case_maker) -> gen_typin
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
for (case_name, case_fn) in case_maker():
|
||||
yield gen_typing.TestCase(
|
||||
fork_name='phase0',
|
||||
fork_name=PHASE0,
|
||||
runner_name='ssz_generic',
|
||||
handler_name=handler_name,
|
||||
suite_name=suite_name,
|
||||
|
@ -3,4 +3,4 @@
|
||||
The purpose of this test-generator is to provide test-vectors for the most important applications of SSZ:
|
||||
the serialization and hashing of Eth2 data types.
|
||||
|
||||
Test-format documentation can be found [here](../../specs/test_formats/ssz_static/README.md).
|
||||
Test-format documentation can be found [here](../../formats/ssz_static/README.md).
|
||||
|
@ -8,6 +8,7 @@ from gen_base import gen_runner, gen_typing
|
||||
from eth2spec.debug import random_value, encode
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec
|
||||
from eth2spec.test.context import PHASE0
|
||||
from eth2spec.utils.ssz.ssz_typing import Container
|
||||
from eth2spec.utils.ssz.ssz_impl import (
|
||||
hash_tree_root,
|
||||
@ -44,7 +45,7 @@ def ssz_static_cases(seed: int, name, ssz_type, mode: random_value.Randomization
|
||||
|
||||
for i in range(count):
|
||||
yield gen_typing.TestCase(
|
||||
fork_name='phase0',
|
||||
fork_name=PHASE0,
|
||||
runner_name='ssz_static',
|
||||
handler_name=name,
|
||||
suite_name=f"ssz_{random_mode_name}{'_chaos' if chaos else ''}",
|
||||
|
Loading…
x
Reference in New Issue
Block a user