Merge branch 'dev' into lightclient-sync-exe
This commit is contained in:
commit
36b6f8c3bf
|
@ -35,13 +35,13 @@ commands:
|
|||
description: "Restore the cache with pyspec keys"
|
||||
steps:
|
||||
- restore_cached_venv:
|
||||
venv_name: v22-pyspec
|
||||
venv_name: v24-pyspec
|
||||
reqs_checksum: cache-{{ checksum "setup.py" }}
|
||||
save_pyspec_cached_venv:
|
||||
description: Save a venv into a cache with pyspec keys"
|
||||
steps:
|
||||
- save_cached_venv:
|
||||
venv_name: v22-pyspec
|
||||
venv_name: v24-pyspec
|
||||
reqs_checksum: cache-{{ checksum "setup.py" }}
|
||||
venv_path: ./venv
|
||||
restore_deposit_contract_tester_cached_venv:
|
||||
|
@ -216,15 +216,17 @@ workflows:
|
|||
- lint:
|
||||
requires:
|
||||
- test
|
||||
- install_deposit_contract_web3_tester:
|
||||
requires:
|
||||
- checkout_specs
|
||||
- test_deposit_contract_web3_tests:
|
||||
requires:
|
||||
- install_deposit_contract_web3_tester
|
||||
# NOTE: Since phase 0 has been launched, we disabled the deposit contract tests.
|
||||
# - install_deposit_contract_web3_tester:
|
||||
# requires:
|
||||
# - checkout_specs
|
||||
# - test_deposit_contract_web3_tests:
|
||||
# requires:
|
||||
# - install_deposit_contract_web3_tester
|
||||
build_and_test_deposit_contract:
|
||||
jobs:
|
||||
- build_deposit_contract
|
||||
- test_deposit_contract:
|
||||
requires:
|
||||
- build_deposit_contract
|
||||
# NOTE: Since phase 0 has been launched, we disabled the deposit contract tests.
|
||||
# - test_deposit_contract:
|
||||
# requires:
|
||||
# - build_deposit_contract
|
||||
|
|
12
Makefile
12
Makefile
|
@ -82,19 +82,19 @@ pyspec:
|
|||
|
||||
# installs the packages to run pyspec tests
|
||||
install_test:
|
||||
python3.8 -m venv venv; . venv/bin/activate; pip3 install .[lint]; pip3 install -e .[test]
|
||||
python3 -m venv venv; . venv/bin/activate; python3 -m pip install .[lint]; python3 -m pip install -e .[test]
|
||||
|
||||
test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov=eth2spec.lightclient_patch.spec -cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov=eth2spec.lightclient_patch.spec -cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
find_test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov=eth2spec.lightclient_patch.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov=eth2spec.lightclient_patch.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
citest: pyspec
|
||||
mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -n 4 --bls-type=milagro --junitxml=eth2spec/test_results.xml eth2spec
|
||||
python3 -m pytest -n 4 --bls-type=milagro --junitxml=eth2spec/test_results.xml eth2spec
|
||||
|
||||
open_cov:
|
||||
((open "$(COV_INDEX_FILE)" || xdg-open "$(COV_INDEX_FILE)") &> /dev/null) &
|
||||
|
@ -133,11 +133,11 @@ test_deposit_contract:
|
|||
dapp test -v --fuzz-runs 5
|
||||
|
||||
install_deposit_contract_web3_tester:
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; python3 -m pip install -r requirements.txt
|
||||
|
||||
test_deposit_contract_web3_tests:
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); . venv/bin/activate; \
|
||||
python -m pytest .
|
||||
python3 -m pytest .
|
||||
|
||||
# Runs a generator, identified by param 1
|
||||
define run_generator
|
||||
|
|
21
README.md
21
README.md
|
@ -11,29 +11,28 @@ This repository hosts the current Eth2 specifications. Discussions about design
|
|||
|
||||
[![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec)
|
||||
|
||||
|
||||
Core specifications for Eth2 clients be found in [specs](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
|
||||
|
||||
### Phase 0
|
||||
|
||||
* [The Beacon Chain](specs/phase0/beacon-chain.md)
|
||||
* [Beacon Chain Fork Choice](specs/phase0/fork-choice.md)
|
||||
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
||||
* [Honest Validator](specs/phase0/validator.md)
|
||||
* [P2P Networking](specs/phase0/p2p-interface.md)
|
||||
|
||||
### Phase 1
|
||||
* [From Phase 0 to Phase 1](specs/phase1/phase1-fork.md)
|
||||
* [The Beacon Chain for Shards](specs/phase1/beacon-chain.md)
|
||||
* [Custody Game](specs/phase1/custody-game.md)
|
||||
* [Shard Transition and Fraud Proofs](specs/phase1/shard-transition.md)
|
||||
* [Light client syncing protocol](specs/phase1/light-client-sync.md)
|
||||
* [Beacon Chain Fork Choice for Shards](specs/phase1/fork-choice.md)
|
||||
### Light clients
|
||||
|
||||
### Phase 2
|
||||
* [Beacon chain changes](specs/lightclient/beacon-chain.md)
|
||||
* [Light client sync protocol](specs/lightclient/sync-protocol.md)
|
||||
|
||||
Phase 2 is still actively in R&D and does not yet have any formal specifications.
|
||||
### Sharding
|
||||
|
||||
See the [Eth2 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) for current progress, discussions, and definitions regarding this work.
|
||||
The sharding spec is still actively in R&D; see the most recent available pull request [here](https://github.com/ethereum/eth2.0-specs/pull/2146) and some technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD).
|
||||
|
||||
### Merge
|
||||
|
||||
The merge is still actively in R&D; see an [ethresear.ch](https://ethresear.ch) post describing the proposed basic mechanism [here](https://ethresear.ch/t/the-eth1-eth2-transition/6265) and the section of [ethereum.org](https://ethereum.org) describing the merge at a high level [here](https://ethereum.org/en/eth2/docking/).
|
||||
|
||||
### Accompanying documents can be found in [specs](specs) and include:
|
||||
|
||||
|
|
|
@ -2,12 +2,22 @@
|
|||
|
||||
CONFIG_NAME: "mainnet"
|
||||
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
# 3 * 2**24) (= 50,331,648)
|
||||
HF1_INACTIVITY_PENALTY_QUOTIENT: 50331648
|
||||
# 2**6 (= 64)
|
||||
HF1_MIN_SLASHING_PENALTY_QUOTIENT: 64
|
||||
# 2
|
||||
HF1_PROPORTIONAL_SLASHING_MULTIPLIER: 2
|
||||
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# 2**10 (=1,024)
|
||||
SYNC_COMMITTEE_SIZE: 1024
|
||||
# 2**6 (=64)
|
||||
SYNC_COMMITTEE_PUBKEY_AGGREGATES_SIZE: 64
|
||||
SYNC_SUBCOMMITTEE_SIZE: 64
|
||||
|
||||
|
||||
# Time parameters
|
||||
|
|
|
@ -2,18 +2,28 @@
|
|||
|
||||
CONFIG_NAME: "minimal"
|
||||
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
# 3 * 2**24) (= 50,331,648)
|
||||
HF1_INACTIVITY_PENALTY_QUOTIENT: 50331648
|
||||
# 2**6 (= 64)
|
||||
HF1_MIN_SLASHING_PENALTY_QUOTIENT: 64
|
||||
# 2
|
||||
HF1_PROPORTIONAL_SLASHING_MULTIPLIER: 2
|
||||
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
SYNC_COMMITTEE_SIZE: 64
|
||||
SYNC_COMMITTEE_SIZE: 32
|
||||
# [customized]
|
||||
SYNC_COMMITTEE_PUBKEY_AGGREGATES_SIZE: 16
|
||||
SYNC_SUBCOMMITTEE_SIZE: 16
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# 2**8 (= 256)
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256
|
||||
# [customized]
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
|
||||
|
||||
|
||||
# Signature domains
|
||||
|
|
11
setup.py
11
setup.py
|
@ -510,7 +510,7 @@ class PySpecCommand(Command):
|
|||
specs/phase1/beacon-chain.md
|
||||
specs/phase1/shard-transition.md
|
||||
specs/phase1/fork-choice.md
|
||||
specs/phase1/phase1-fork.md
|
||||
specs/phase1/fork.md
|
||||
specs/phase1/shard-fork-choice.md
|
||||
specs/phase1/validator.md
|
||||
"""
|
||||
|
@ -521,7 +521,7 @@ class PySpecCommand(Command):
|
|||
specs/phase0/validator.md
|
||||
specs/phase0/weak-subjectivity.md
|
||||
specs/lightclient/beacon-chain.md
|
||||
specs/lightclient/lightclient-fork.md
|
||||
specs/lightclient/fork.md
|
||||
specs/lightclient/sync-protocol.md
|
||||
"""
|
||||
else:
|
||||
|
@ -623,13 +623,12 @@ setup(
|
|||
url="https://github.com/ethereum/eth2.0-specs",
|
||||
include_package_data=False,
|
||||
package_data={'configs': ['*.yaml'],
|
||||
|
||||
'specs': ['**/*.md'],
|
||||
'eth2spec': ['VERSION.txt']},
|
||||
package_dir={
|
||||
"eth2spec": "tests/core/pyspec/eth2spec",
|
||||
"configs": "configs",
|
||||
"specs": "specs"
|
||||
"specs": "specs",
|
||||
},
|
||||
packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],
|
||||
py_modules=["eth2spec"],
|
||||
|
@ -643,8 +642,8 @@ setup(
|
|||
"eth-utils>=1.3.0,<2",
|
||||
"eth-typing>=2.1.0,<3.0.0",
|
||||
"pycryptodome==3.9.4",
|
||||
"py_ecc==5.0.0",
|
||||
"milagro_bls_binding==1.5.0",
|
||||
"py_ecc==5.2.0",
|
||||
"milagro_bls_binding==1.6.3",
|
||||
"dataclasses==0.6",
|
||||
"remerkleable==0.1.18",
|
||||
"ruamel.yaml==0.16.5",
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Ethereum 2.0 Light Client Support
|
||||
# Ethereum 2.0 HF1
|
||||
|
||||
## Table of contents
|
||||
|
||||
|
@ -7,10 +7,14 @@
|
|||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Configuration](#configuration)
|
||||
- [Constants](#constants-1)
|
||||
- [Participation flag indices](#participation-flag-indices)
|
||||
- [Participation flag fractions](#participation-flag-fractions)
|
||||
- [Misc](#misc)
|
||||
- [Configuration](#configuration)
|
||||
- [Updated penalty values](#updated-penalty-values)
|
||||
- [Misc](#misc-1)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Domain types](#domain-types)
|
||||
- [Containers](#containers)
|
||||
|
@ -22,42 +26,98 @@
|
|||
- [Helper functions](#helper-functions)
|
||||
- [`Predicates`](#predicates)
|
||||
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
|
||||
- [Misc](#misc-2)
|
||||
- [`get_flag_indices_and_numerators`](#get_flag_indices_and_numerators)
|
||||
- [`add_flag`](#add_flag)
|
||||
- [`has_flag`](#has_flag)
|
||||
- [Beacon state accessors](#beacon-state-accessors)
|
||||
- [`get_sync_committee_indices`](#get_sync_committee_indices)
|
||||
- [`get_sync_committee`](#get_sync_committee)
|
||||
- [`get_base_reward`](#get_base_reward)
|
||||
- [`get_unslashed_participating_indices`](#get_unslashed_participating_indices)
|
||||
- [`get_flag_deltas`](#get_flag_deltas)
|
||||
- [New `get_inactivity_penalty_deltas`](#new-get_inactivity_penalty_deltas)
|
||||
- [Beacon state mutators](#beacon-state-mutators)
|
||||
- [New `slash_validator`](#new-slash_validator)
|
||||
- [Block processing](#block-processing)
|
||||
- [Modified `process_attestation`](#modified-process_attestation)
|
||||
- [New `process_deposit`](#new-process_deposit)
|
||||
- [Sync committee processing](#sync-committee-processing)
|
||||
- [Epoch processing](#epoch-processing)
|
||||
- [Components of attestation deltas](#components-of-attestation-deltas)
|
||||
- [Final updates](#final-updates)
|
||||
- [Justification and finalization](#justification-and-finalization)
|
||||
- [Rewards and penalties](#rewards-and-penalties)
|
||||
- [Slashings](#slashings)
|
||||
- [Participation flags updates](#participation-flags-updates)
|
||||
- [Sync committee updates](#sync-committee-updates)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This is a standalone beacon chain patch adding light client support via sync committees.
|
||||
This is a patch implementing the first hard fork to the beacon chain, tentatively named HF1 pending a permanent name.
|
||||
It has four main features:
|
||||
|
||||
* Light client support via sync committees
|
||||
* Incentive accounting reforms, reducing spec complexity
|
||||
and [TODO] reducing the cost of processing chains that have very little or zero participation for a long span of epochs
|
||||
* Update penalty configuration values, moving them toward their planned maximally punitive configuration
|
||||
* Fork choice rule changes to address weaknesses recently discovered in the existing fork choice
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `ParticipationFlags` | `uint8` | A succinct representation of 8 boolean participation flags |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BASE_REWARDS_PER_EPOCH` | `uint64(5)` |
|
||||
|
||||
## Configuration
|
||||
|
||||
### Constants
|
||||
### Participation flag indices
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
|
||||
| `TIMELY_HEAD_FLAG_INDEX` | `0` |
|
||||
| `TIMELY_SOURCE_FLAG_INDEX` | `1` |
|
||||
| `TIMELY_TARGET_FLAG_INDEX` | `2` |
|
||||
|
||||
### Participation flag fractions
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `TIMELY_HEAD_FLAG_NUMERATOR` | `12` |
|
||||
| `TIMELY_SOURCE_FLAG_NUMERATOR` | `12` |
|
||||
| `TIMELY_TARGET_FLAG_NUMERATOR` | `32` |
|
||||
| `FLAG_DENOMINATOR` | `64` |
|
||||
|
||||
**Note**: The participatition flag fractions add up to 7/8.
|
||||
The remaining 1/8 is for proposer incentives and other future micro-incentives.
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `SYNC_COMMITTEE_SIZE` | `uint64(2**10)` (= 1024) |
|
||||
| `SYNC_COMMITTEE_PUBKEY_AGGREGATES_SIZE` | `uint64(2**6)` (= 64) |
|
||||
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
|
||||
|
||||
## Configuration
|
||||
|
||||
### Updated penalty values
|
||||
|
||||
This patch updates a few configuration values to move penalty constants toward their final, maxmium security values.
|
||||
|
||||
*Note*: The spec does *not* override previous configuration values but instead creates new values and replaces usage throughout.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `HF1_INACTIVITY_PENALTY_QUOTIENT` | `uint64(3 * 2**24)` (= 50,331,648) |
|
||||
| `HF1_MIN_SLASHING_PENALTY_QUOTIENT` | `uint64(2**6)` (=64) |
|
||||
| `HF1_PROPORTIONAL_SLASHING_MULTIPLIER` | `uint64(2)` |
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `SYNC_COMMITTEE_SIZE` | `uint64(2**10)` (= 1,024) |
|
||||
| `SYNC_SUBCOMMITTEE_SIZE` | `uint64(2**6)` (= 64) |
|
||||
|
||||
### Time parameters
|
||||
|
||||
|
@ -81,17 +141,55 @@ order and append any additional fields to the end.
|
|||
#### `BeaconBlockBody`
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(phase0.BeaconBlockBody):
|
||||
class BeaconBlockBody(Container):
|
||||
randao_reveal: BLSSignature
|
||||
eth1_data: Eth1Data # Eth1 data vote
|
||||
graffiti: Bytes32 # Arbitrary data
|
||||
# Operations
|
||||
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
|
||||
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
|
||||
attestations: List[Attestation, MAX_ATTESTATIONS]
|
||||
deposits: List[Deposit, MAX_DEPOSITS]
|
||||
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
# Sync committee aggregate signature
|
||||
sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE]
|
||||
sync_committee_signature: BLSSignature
|
||||
sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE] # [New in HF1]
|
||||
sync_committee_signature: BLSSignature # [New in HF1]
|
||||
```
|
||||
|
||||
#### `BeaconState`
|
||||
|
||||
```python
|
||||
class BeaconState(phase0.BeaconState):
|
||||
# Sync committees
|
||||
class BeaconState(Container):
|
||||
# Versioning
|
||||
genesis_time: uint64
|
||||
genesis_validators_root: Root
|
||||
slot: Slot
|
||||
fork: Fork
|
||||
# History
|
||||
latest_block_header: BeaconBlockHeader
|
||||
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
|
||||
# Eth1
|
||||
eth1_data: Eth1Data
|
||||
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||
eth1_deposit_index: uint64
|
||||
# Registry
|
||||
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Randomness
|
||||
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
|
||||
# Slashings
|
||||
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
||||
# Participation
|
||||
previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
|
||||
current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Finality
|
||||
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
|
||||
previous_justified_checkpoint: Checkpoint
|
||||
current_justified_checkpoint: Checkpoint
|
||||
finalized_checkpoint: Checkpoint
|
||||
# Light client sync committees
|
||||
current_sync_committee: SyncCommittee
|
||||
next_sync_committee: SyncCommittee
|
||||
```
|
||||
|
@ -103,7 +201,7 @@ class BeaconState(phase0.BeaconState):
|
|||
```python
|
||||
class SyncCommittee(Container):
|
||||
pubkeys: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE]
|
||||
pubkey_aggregates: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_PUBKEY_AGGREGATES_SIZE]
|
||||
pubkey_aggregates: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE // SYNC_SUBCOMMITTEE_SIZE]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
@ -122,6 +220,35 @@ def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, s
|
|||
return bls.FastAggregateVerify(pubkeys, message, signature)
|
||||
```
|
||||
|
||||
### Misc
|
||||
|
||||
#### `get_flag_indices_and_numerators`
|
||||
|
||||
```python
|
||||
def get_flag_indices_and_numerators() -> Sequence[Tuple[int, int]]:
|
||||
return (
|
||||
(TIMELY_HEAD_FLAG_INDEX, TIMELY_HEAD_FLAG_NUMERATOR),
|
||||
(TIMELY_SOURCE_FLAG_INDEX, TIMELY_SOURCE_FLAG_NUMERATOR),
|
||||
(TIMELY_TARGET_FLAG_INDEX, TIMELY_TARGET_FLAG_NUMERATOR),
|
||||
)
|
||||
```
|
||||
|
||||
#### `add_flag`
|
||||
|
||||
```python
|
||||
def add_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlags:
|
||||
flag = ParticipationFlags(2**flag_index)
|
||||
return flags | flag
|
||||
```
|
||||
|
||||
#### `has_flag`
|
||||
|
||||
```python
|
||||
def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
|
||||
flag = ParticipationFlags(2**flag_index)
|
||||
return flags & flag == flag
|
||||
```
|
||||
|
||||
### Beacon state accessors
|
||||
|
||||
#### `get_sync_committee_indices`
|
||||
|
@ -129,7 +256,7 @@ def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, s
|
|||
```python
|
||||
def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
"""
|
||||
Return the sync committee indices for a given state and epoch.
|
||||
Return the sequence of sync committee indices (which may include duplicate indices) for a given state and epoch.
|
||||
"""
|
||||
MAX_RANDOM_BYTE = 2**8 - 1
|
||||
base_epoch = Epoch((max(epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD, 1) - 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
|
@ -143,7 +270,7 @@ def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[Val
|
|||
candidate_index = active_validator_indices[shuffled_index]
|
||||
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: # Sample with replacement
|
||||
sync_committee_indices.append(candidate_index)
|
||||
i += 1
|
||||
return sync_committee_indices
|
||||
|
@ -160,10 +287,135 @@ def get_sync_committee(state: BeaconState, epoch: Epoch) -> SyncCommittee:
|
|||
validators = [state.validators[index] for index in indices]
|
||||
pubkeys = [validator.pubkey for validator in validators]
|
||||
aggregates = [
|
||||
bls.AggregatePKs(pubkeys[i:i + SYNC_COMMITTEE_PUBKEY_AGGREGATES_SIZE])
|
||||
for i in range(0, len(pubkeys), SYNC_COMMITTEE_PUBKEY_AGGREGATES_SIZE)
|
||||
bls.AggregatePKs(pubkeys[i:i + SYNC_SUBCOMMITTEE_SIZE])
|
||||
for i in range(0, len(pubkeys), SYNC_SUBCOMMITTEE_SIZE)
|
||||
]
|
||||
return SyncCommittee(pubkeys, aggregates)
|
||||
return SyncCommittee(pubkeys=pubkeys, pubkey_aggregates=aggregates)
|
||||
```
|
||||
|
||||
#### `get_base_reward`
|
||||
|
||||
*Note*: The function `get_base_reward` is modified with the removal of `BASE_REWARDS_PER_EPOCH`.
|
||||
|
||||
```python
|
||||
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||
total_balance = get_total_active_balance(state)
|
||||
effective_balance = state.validators[index].effective_balance
|
||||
return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance))
|
||||
```
|
||||
|
||||
#### `get_unslashed_participating_indices`
|
||||
|
||||
```python
|
||||
def get_unslashed_participating_indices(state: BeaconState, flag_index: int, epoch: Epoch) -> Set[ValidatorIndex]:
|
||||
"""
|
||||
Retrieve the active and unslashed validator indices for the given epoch and flag index.
|
||||
"""
|
||||
assert epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
if epoch == get_current_epoch(state):
|
||||
epoch_participation = state.current_epoch_participation
|
||||
else:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
participating_indices = [i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)]
|
||||
return set(filter(lambda index: not state.validators[index].slashed, participating_indices))
|
||||
```
|
||||
|
||||
#### `get_flag_deltas`
|
||||
|
||||
```python
|
||||
def get_flag_deltas(state: BeaconState,
|
||||
flag_index: int,
|
||||
numerator: uint64) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
Compute the rewards and penalties associated with a particular duty, by scanning through the participation
|
||||
flags to determine who participated and who did not and assigning them the appropriate rewards and penalties.
|
||||
"""
|
||||
rewards = [Gwei(0)] * len(state.validators)
|
||||
penalties = [Gwei(0)] * len(state.validators)
|
||||
|
||||
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, get_previous_epoch(state))
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balances to avoid uint64 overflow
|
||||
unslashed_participating_increments = get_total_balance(state, unslashed_participating_indices) // increment
|
||||
active_increments = get_total_active_balance(state) // increment
|
||||
for index in get_eligible_validator_indices(state):
|
||||
base_reward = get_base_reward(state, index)
|
||||
if index in unslashed_participating_indices:
|
||||
if is_in_inactivity_leak(state):
|
||||
# Optimal participation is fully rewarded to cancel the inactivity penalty
|
||||
rewards[index] = base_reward * numerator // FLAG_DENOMINATOR
|
||||
else:
|
||||
rewards[index] = (
|
||||
(base_reward * numerator * unslashed_participating_increments)
|
||||
// (active_increments * FLAG_DENOMINATOR)
|
||||
)
|
||||
else:
|
||||
penalties[index] = base_reward * numerator // FLAG_DENOMINATOR
|
||||
return rewards, penalties
|
||||
```
|
||||
|
||||
#### New `get_inactivity_penalty_deltas`
|
||||
|
||||
*Note*: The function `get_inactivity_penalty_deltas` is modified in the selection of matching target indices
|
||||
and the removal of `BASE_REWARDS_PER_EPOCH`.
|
||||
|
||||
```python
|
||||
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
Compute the penalties associated with the inactivity leak, by scanning through the participation
|
||||
flags to determine who participated and who did not, applying the leak penalty globally and applying
|
||||
compensatory rewards to participants.
|
||||
"""
|
||||
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||
if is_in_inactivity_leak(state):
|
||||
reward_numerator_sum = sum(numerator for (_, numerator) in get_flag_indices_and_numerators())
|
||||
matching_target_attesting_indices = get_unslashed_participating_indices(
|
||||
state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)
|
||||
)
|
||||
for index in get_eligible_validator_indices(state):
|
||||
# If validator is performing optimally this cancels all attestation rewards for a neutral balance
|
||||
penalties[index] += Gwei(get_base_reward(state, index) * reward_numerator_sum // FLAG_DENOMINATOR)
|
||||
if index not in matching_target_attesting_indices:
|
||||
effective_balance = state.validators[index].effective_balance
|
||||
penalties[index] += Gwei(
|
||||
effective_balance * get_finality_delay(state)
|
||||
// HF1_INACTIVITY_PENALTY_QUOTIENT
|
||||
)
|
||||
|
||||
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||
return rewards, penalties
|
||||
```
|
||||
|
||||
### Beacon state mutators
|
||||
|
||||
#### New `slash_validator`
|
||||
|
||||
*Note*: The function `slash_validator` is modified
|
||||
with the substitution of `MIN_SLASHING_PENALTY_QUOTIENT` with `HF1_MIN_SLASHING_PENALTY_QUOTIENT`.
|
||||
|
||||
```python
|
||||
def slash_validator(state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: ValidatorIndex=None) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
"""
|
||||
epoch = get_current_epoch(state)
|
||||
initiate_validator_exit(state, slashed_index)
|
||||
validator = state.validators[slashed_index]
|
||||
validator.slashed = True
|
||||
validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
|
||||
state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
|
||||
decrease_balance(state, slashed_index, validator.effective_balance // HF1_MIN_SLASHING_PENALTY_QUOTIENT)
|
||||
|
||||
# Apply proposer and whistleblower rewards
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
if whistleblower_index is None:
|
||||
whistleblower_index = proposer_index
|
||||
whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
|
||||
proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT)
|
||||
increase_balance(state, proposer_index, proposer_reward)
|
||||
increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
|
||||
```
|
||||
|
||||
### Block processing
|
||||
|
@ -173,9 +425,107 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
|||
process_block_header(state, block)
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
# Light client support
|
||||
process_sync_committee(state, block.body)
|
||||
process_operations(state, block.body) # [Modified in HF1]
|
||||
process_sync_committee(state, block.body) # [New in HF1]
|
||||
```
|
||||
|
||||
#### Modified `process_attestation`
|
||||
|
||||
*Note*: The function `process_attestation` is modified to do incentive accounting with epoch participation flags.
|
||||
|
||||
```python
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
data = attestation.data
|
||||
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
assert data.target.epoch == compute_epoch_at_slot(data.slot)
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
|
||||
assert data.index < get_committee_count_per_slot(state, data.target.epoch)
|
||||
|
||||
committee = get_beacon_committee(state, data.slot, data.index)
|
||||
assert len(attestation.aggregation_bits) == len(committee)
|
||||
|
||||
if data.target.epoch == get_current_epoch(state):
|
||||
epoch_participation = state.current_epoch_participation
|
||||
justified_checkpoint = state.current_justified_checkpoint
|
||||
else:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
justified_checkpoint = state.previous_justified_checkpoint
|
||||
|
||||
# Matching roots
|
||||
is_matching_head = data.beacon_block_root == get_block_root_at_slot(state, data.slot)
|
||||
is_matching_source = data.source == justified_checkpoint
|
||||
is_matching_target = data.target.root == get_block_root(state, data.target.epoch)
|
||||
assert is_matching_source
|
||||
|
||||
# Verify signature
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
|
||||
# Participation flag indices
|
||||
participation_flag_indices = []
|
||||
if is_matching_head and is_matching_target and state.slot <= data.slot + MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
|
||||
if is_matching_source and state.slot <= data.slot + integer_squareroot(SLOTS_PER_EPOCH):
|
||||
participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
|
||||
if is_matching_target and state.slot <= data.slot + SLOTS_PER_EPOCH:
|
||||
participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
|
||||
|
||||
# Update epoch participation flags
|
||||
proposer_reward_numerator = 0
|
||||
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
|
||||
for flag_index, flag_numerator in get_flag_indices_and_numerators():
|
||||
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
proposer_reward_numerator += get_base_reward(state, index) * flag_numerator
|
||||
|
||||
# Reward proposer
|
||||
proposer_reward = Gwei(proposer_reward_numerator // (FLAG_DENOMINATOR * PROPOSER_REWARD_QUOTIENT))
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
```
|
||||
|
||||
|
||||
#### New `process_deposit`
|
||||
|
||||
*Note*: The function `process_deposit` is modified to initialize `previous_epoch_participation` and `current_epoch_participation`.
|
||||
|
||||
```python
|
||||
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
# Verify the Merkle branch
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(deposit.data),
|
||||
branch=deposit.proof,
|
||||
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
|
||||
index=state.eth1_deposit_index,
|
||||
root=state.eth1_data.deposit_root,
|
||||
)
|
||||
|
||||
# Deposits must be processed in order
|
||||
state.eth1_deposit_index += 1
|
||||
|
||||
pubkey = deposit.data.pubkey
|
||||
amount = deposit.data.amount
|
||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
if pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
deposit_message = DepositMessage(
|
||||
pubkey=deposit.data.pubkey,
|
||||
withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
amount=deposit.data.amount,
|
||||
)
|
||||
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
signing_root = compute_signing_root(deposit_message, domain)
|
||||
if not bls.Verify(pubkey, signing_root, deposit.data.signature):
|
||||
return
|
||||
|
||||
# Add validator and balance entries
|
||||
state.validators.append(get_validator_from_deposit(state, deposit))
|
||||
state.balances.append(amount)
|
||||
# [Added in hf-1] Initialize empty participation flags for new validator
|
||||
state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||
state.current_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||
else:
|
||||
# Increase balance by deposit amount
|
||||
index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
increase_balance(state, index, amount)
|
||||
```
|
||||
|
||||
#### Sync committee processing
|
||||
|
@ -193,54 +543,136 @@ def process_sync_committee(state: BeaconState, body: BeaconBlockBody) -> None:
|
|||
assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, body.sync_committee_signature)
|
||||
|
||||
# Reward sync committee participants
|
||||
proposer_reward = Gwei(0)
|
||||
proposer_rewards = Gwei(0)
|
||||
active_validator_count = uint64(len(get_active_validator_indices(state, get_current_epoch(state))))
|
||||
for participant_index in participant_indices:
|
||||
proposer_reward = get_proposer_reward(state, participant_index)
|
||||
proposer_rewards += proposer_reward
|
||||
base_reward = get_base_reward(state, participant_index)
|
||||
max_participant_reward = base_reward - base_reward // PROPOSER_REWARD_QUOTIENT
|
||||
reward = Gwei(max_participant_reward * active_validator_count // len(committee_indices) // SLOTS_PER_EPOCH)
|
||||
max_participant_reward = base_reward - proposer_reward
|
||||
reward = Gwei(max_participant_reward * active_validator_count // (len(committee_indices) * SLOTS_PER_EPOCH))
|
||||
increase_balance(state, participant_index, reward)
|
||||
proposer_reward += base_reward // PROPOSER_REWARD_QUOTIENT
|
||||
|
||||
# Reward beacon proposer
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_rewards)
|
||||
```
|
||||
|
||||
### Epoch processing
|
||||
|
||||
#### Components of attestation deltas
|
||||
|
||||
*Note*: The function `get_inactivity_penalty_deltas` is modified with `BASE_REWARDS_PER_EPOCH` replaced by `BASE_REWARDS_PER_EPOCH - 1`.
|
||||
|
||||
```python
|
||||
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
Return inactivity reward/penalty deltas for each validator.
|
||||
"""
|
||||
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||
if is_in_inactivity_leak(state):
|
||||
matching_target_attestations = get_matching_target_attestations(state, get_previous_epoch(state))
|
||||
matching_target_attesting_indices = get_unslashed_attesting_indices(state, matching_target_attestations)
|
||||
for index in get_eligible_validator_indices(state):
|
||||
# Penalize validator so that optimal attestation performance is rewarded with one base reward per epoch
|
||||
base_reward = get_base_reward(state, index)
|
||||
penalties[index] += Gwei((BASE_REWARDS_PER_EPOCH - 1) * base_reward - get_proposer_reward(state, index))
|
||||
if index not in matching_target_attesting_indices:
|
||||
effective_balance = state.validators[index].effective_balance
|
||||
penalties[index] += Gwei(effective_balance * get_finality_delay(state) // INACTIVITY_PENALTY_QUOTIENT)
|
||||
|
||||
# No rewards associated with inactivity penalties
|
||||
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||
return rewards, penalties
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state) # [Modified in HF1]
|
||||
process_rewards_and_penalties(state) # [Modified in HF1]
|
||||
process_registry_updates(state)
|
||||
process_slashings(state) # [Modified in HF1]
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_flag_updates(state) # [New in HF1]
|
||||
process_sync_committee_updates(state) # [New in HF1]
|
||||
```
|
||||
|
||||
#### Final updates
|
||||
#### Justification and finalization
|
||||
|
||||
*Note*: The function `process_final_updates` is modified to handle sync committee updates.
|
||||
*Note*: The function `process_justification_and_finalization` is modified with `matching_target_attestations` replaced by `matching_target_indices`.
|
||||
|
||||
```python
|
||||
def process_final_updates(state: BeaconState) -> None:
|
||||
phase0.process_final_updates(state)
|
||||
def process_justification_and_finalization(state: BeaconState) -> None:
|
||||
# Initial FFG checkpoint values have a `0x00` stub for `root`.
|
||||
# Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub.
|
||||
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||
return
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
current_epoch = get_current_epoch(state)
|
||||
old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
||||
old_current_justified_checkpoint = state.current_justified_checkpoint
|
||||
|
||||
# Process justifications
|
||||
state.previous_justified_checkpoint = state.current_justified_checkpoint
|
||||
state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1]
|
||||
state.justification_bits[0] = 0b0
|
||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
|
||||
if get_total_balance(state, matching_target_indices) * 3 >= get_total_active_balance(state) * 2:
|
||||
state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
|
||||
root=get_block_root(state, previous_epoch))
|
||||
state.justification_bits[1] = 0b1
|
||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, current_epoch)
|
||||
if get_total_balance(state, matching_target_indices) * 3 >= get_total_active_balance(state) * 2:
|
||||
state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
|
||||
root=get_block_root(state, current_epoch))
|
||||
state.justification_bits[0] = 0b1
|
||||
|
||||
# Process finalizations
|
||||
bits = state.justification_bits
|
||||
# The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
|
||||
if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
|
||||
state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
# The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
|
||||
if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
# The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
|
||||
if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
# The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
|
||||
if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
||||
state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
```
|
||||
|
||||
#### Rewards and penalties
|
||||
|
||||
*Note*: The function `process_rewards_and_penalties` is modified to support the incentive reforms.
|
||||
|
||||
```python
|
||||
def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are for work done in the previous epoch
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
return
|
||||
flag_deltas = [
|
||||
get_flag_deltas(state, flag_index, flag_numerator)
|
||||
for (flag_index, flag_numerator) in get_flag_indices_and_numerators()
|
||||
]
|
||||
deltas = flag_deltas + [get_inactivity_penalty_deltas(state)]
|
||||
for (rewards, penalties) in deltas:
|
||||
for index in range(len(state.validators)):
|
||||
increase_balance(state, ValidatorIndex(index), rewards[index])
|
||||
decrease_balance(state, ValidatorIndex(index), penalties[index])
|
||||
```
|
||||
|
||||
#### Slashings
|
||||
|
||||
*Note*: The function `process_slashings` is modified to use `HF1_PROPORTIONAL_SLASHING_MULTIPLIER`.
|
||||
|
||||
```python
|
||||
def process_slashings(state: BeaconState) -> None:
|
||||
epoch = get_current_epoch(state)
|
||||
total_balance = get_total_active_balance(state)
|
||||
adjusted_total_slashing_balance = min(sum(state.slashings) * HF1_PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
|
||||
for index, validator in enumerate(state.validators):
|
||||
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
penalty = penalty_numerator // total_balance * increment
|
||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
|
||||
#### Participation flags updates
|
||||
|
||||
*Note*: The function `process_participation_flag_updates` is new.
|
||||
|
||||
```python
|
||||
def process_participation_flag_updates(state: BeaconState) -> None:
|
||||
state.previous_epoch_participation = state.current_epoch_participation
|
||||
state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
|
||||
```
|
||||
|
||||
#### Sync committee updates
|
||||
|
||||
*Note*: The function `process_sync_committee_updates` is new.
|
||||
|
||||
```python
|
||||
def process_sync_committee_updates(state: BeaconState) -> None:
|
||||
next_epoch = get_current_epoch(state) + Epoch(1)
|
||||
if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
|
||||
state.current_sync_committee = state.next_sync_committee
|
||||
|
|
|
@ -43,6 +43,7 @@ def upgrade_to_lightclient_patch(pre: phase0.BeaconState) -> BeaconState:
|
|||
epoch = get_current_epoch(pre)
|
||||
post = BeaconState(
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
|
@ -65,19 +66,17 @@ def upgrade_to_lightclient_patch(pre: phase0.BeaconState) -> BeaconState:
|
|||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Attestations
|
||||
# previous_epoch_attestations is cleared on upgrade.
|
||||
previous_epoch_attestations=List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH](),
|
||||
# empty in pre state, since the upgrade is performed just after an epoch boundary.
|
||||
current_epoch_attestations=List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH](),
|
||||
# Participation
|
||||
previous_epoch_participation=[ParticipationFlags(0) for _ in range(len(pre.validators))],
|
||||
current_epoch_participation=[ParticipationFlags(0) for _ in range(len(pre.validators))],
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Light-client
|
||||
current_sync_committee=SyncCommittee(),
|
||||
next_sync_committee=SyncCommittee(),
|
||||
)
|
||||
# Fill in sync committees
|
||||
post.current_sync_committee = get_sync_committee(post, get_current_epoch(post))
|
||||
post.next_sync_committee = get_sync_committee(post, get_current_epoch(post) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
return post
|
||||
```
|
|
@ -13,6 +13,7 @@
|
|||
- [Misc](#misc)
|
||||
- [Gwei values](#gwei-values)
|
||||
- [Initial values](#initial-values)
|
||||
- [Withdrawal prefixes](#withdrawal-prefixes)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [State list lengths](#state-list-lengths)
|
||||
- [Rewards and penalties](#rewards-and-penalties)
|
||||
|
@ -57,7 +58,7 @@
|
|||
- [Crypto](#crypto)
|
||||
- [`hash`](#hash)
|
||||
- [`hash_tree_root`](#hash_tree_root)
|
||||
- [BLS Signatures](#bls-signatures)
|
||||
- [BLS signatures](#bls-signatures)
|
||||
- [Predicates](#predicates)
|
||||
- [`is_active_validator`](#is_active_validator)
|
||||
- [`is_eligible_for_activation_queue`](#is_eligible_for_activation_queue)
|
||||
|
@ -113,7 +114,12 @@
|
|||
- [`process_rewards_and_penalties`](#process_rewards_and_penalties)
|
||||
- [Registry updates](#registry-updates)
|
||||
- [Slashings](#slashings)
|
||||
- [Final updates](#final-updates)
|
||||
- [Eth1 data votes updates](#eth1-data-votes-updates)
|
||||
- [Effective balances updates](#effective-balances-updates)
|
||||
- [Slashings balances updates](#slashings-balances-updates)
|
||||
- [Randao mixes updates](#randao-mixes-updates)
|
||||
- [Historical roots updates](#historical-roots-updates)
|
||||
- [Participation records rotation](#participation-records-rotation)
|
||||
- [Block processing](#block-processing)
|
||||
- [Block header](#block-header)
|
||||
- [RANDAO](#randao)
|
||||
|
@ -209,7 +215,13 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
| Name | Value |
|
||||
| - | - |
|
||||
| `GENESIS_FORK_VERSION` | `Version('0x00000000')` |
|
||||
|
||||
### Withdrawal prefixes
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BLS_WITHDRAWAL_PREFIX` | `Bytes1('0x00')` |
|
||||
| `ETH1_ADDRESS_WITHDRAWAL_PREFIX` | `Bytes1('0x01')` |
|
||||
|
||||
### Time parameters
|
||||
|
||||
|
@ -250,7 +262,7 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
|
||||
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**13` epochs (about 36 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. Note this value will be upgraded to `2**24` after Phase 0 mainnet stabilizes to provide a faster recovery in the event of an inactivity leak.
|
||||
|
||||
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accoutable safety margin.
|
||||
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accountable safety margin.
|
||||
|
||||
### Max operations per block
|
||||
|
||||
|
@ -600,17 +612,17 @@ def bytes_to_uint64(data: bytes) -> uint64:
|
|||
|
||||
`def hash_tree_root(object: SSZSerializable) -> Root` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../../ssz/simple-serialize.md#merkleization).
|
||||
|
||||
#### BLS Signatures
|
||||
#### BLS signatures
|
||||
|
||||
Eth2 makes use of BLS signatures as specified in the [IETF draft BLS specification draft-irtf-cfrg-bls-signature-04](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04). Specifically, eth2 uses the `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` ciphersuite which implements the following interfaces:
|
||||
The [IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04) with ciphersuite `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` defines the following functions:
|
||||
|
||||
- `def Sign(SK: int, message: Bytes) -> BLSSignature`
|
||||
- `def Verify(PK: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def Sign(privkey: int, message: Bytes) -> BLSSignature`
|
||||
- `def Verify(pubkey: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def Aggregate(signatures: Sequence[BLSSignature]) -> BLSSignature`
|
||||
- `def FastAggregateVerify(PKs: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def AggregateVerify(PKs: Sequence[BLSPubkey], messages: Sequence[Bytes], signature: BLSSignature) -> bool`
|
||||
- `def FastAggregateVerify(pubkeys: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def AggregateVerify(pubkeys: Sequence[BLSPubkey], messages: Sequence[Bytes], signature: BLSSignature) -> bool`
|
||||
|
||||
Within these specifications, BLS signatures are treated as a module for notational clarity, thus to verify a signature `bls.Verify(...)` is used.
|
||||
The above functions are accessed through the `bls` module, e.g. `bls.Verify`.
|
||||
|
||||
### Predicates
|
||||
|
||||
|
@ -1250,7 +1262,12 @@ def process_epoch(state: BeaconState) -> None:
|
|||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_slashings(state)
|
||||
process_final_updates(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_record_updates(state)
|
||||
```
|
||||
|
||||
#### Helper functions
|
||||
|
@ -1557,15 +1574,19 @@ def process_slashings(state: BeaconState) -> None:
|
|||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
|
||||
#### Final updates
|
||||
|
||||
#### Eth1 data votes updates
|
||||
```python
|
||||
def process_final_updates(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
def process_eth1_data_reset(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
# Reset eth1 data votes
|
||||
if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
|
||||
state.eth1_data_votes = []
|
||||
```
|
||||
|
||||
#### Effective balances updates
|
||||
|
||||
```python
|
||||
def process_effective_balance_updates(state: BeaconState) -> None:
|
||||
# Update effective balances with hysteresis
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
|
@ -1577,14 +1598,41 @@ def process_final_updates(state: BeaconState) -> None:
|
|||
or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||
):
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
```
|
||||
|
||||
#### Slashings balances updates
|
||||
|
||||
```python
|
||||
def process_slashings_reset(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
# Reset slashings
|
||||
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
||||
```
|
||||
|
||||
#### Randao mixes updates
|
||||
|
||||
```python
|
||||
def process_randao_mixes_reset(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
# Set randao mix
|
||||
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
|
||||
```
|
||||
|
||||
#### Historical roots updates
|
||||
```python
|
||||
def process_historical_roots_update(state: BeaconState) -> None:
|
||||
# Set historical root accumulator
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
||||
state.historical_roots.append(hash_tree_root(historical_batch))
|
||||
```
|
||||
|
||||
#### Participation records rotation
|
||||
|
||||
```python
|
||||
def process_participation_record_updates(state: BeaconState) -> None:
|
||||
# Rotate current/previous epoch attestations
|
||||
state.previous_epoch_attestations = state.current_epoch_attestations
|
||||
state.current_epoch_attestations = []
|
||||
|
|
|
@ -58,12 +58,13 @@ The amount of ETH (rounded down to the closest Gwei) sent to the deposit contrac
|
|||
|
||||
#### Withdrawal credentials
|
||||
|
||||
One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawing validator balance (e.g. to another validator, or to shards). The first byte of `withdrawal_credentials` is a version number. As of now, the only expected format is as follows:
|
||||
One of the `DepositData` fields is `withdrawal_credentials` which constrains validator withdrawals.
|
||||
The first byte of this 32-byte field is a withdrawal prefix which defines the semantics of the remaining 31 bytes.
|
||||
The withdrawal prefixes currently supported are `BLS_WITHDRAWAL_PREFIX` and `ETH1_ADDRESS_WITHDRAWAL_PREFIX`.
|
||||
Read more in the [validator guide](./validator.md#withdrawal-credentials).
|
||||
|
||||
* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`
|
||||
* `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]` where `withdrawal_pubkey` is a BLS pubkey
|
||||
|
||||
The private key corresponding to `withdrawal_pubkey` will be required to initiate a withdrawal. It can be stored separately until a withdrawal is required, e.g. in cold storage.
|
||||
*Note*: The deposit contract does not validate the `withdrawal_credentials` field.
|
||||
Support for new withdrawal prefixes can be added without modifying the deposit contract.
|
||||
|
||||
#### `DepositEvent` log
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ It consists of four main sections:
|
|||
- [Compression/Encoding](#compressionencoding)
|
||||
- [Why are we using SSZ for encoding?](#why-are-we-using-ssz-for-encoding)
|
||||
- [Why are we compressing, and at which layers?](#why-are-we-compressing-and-at-which-layers)
|
||||
- [Why are using Snappy for compression?](#why-are-using-snappy-for-compression)
|
||||
- [Why are we using Snappy for compression?](#why-are-we-using-snappy-for-compression)
|
||||
- [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes)
|
||||
- [What are SSZ type size bounds?](#what-are-ssz-type-size-bounds)
|
||||
- [libp2p implementations matrix](#libp2p-implementations-matrix)
|
||||
|
@ -292,7 +292,7 @@ If one or more validations fail while processing the items in order, return eith
|
|||
There are two primary global topics used to propagate beacon blocks (`beacon_block`)
|
||||
and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the network.
|
||||
|
||||
There are three additional global topics are used to propagate lower frequency validator messages
|
||||
There are three additional global topics that are used to propagate lower frequency validator messages
|
||||
(`voluntary_exit`, `proposer_slashing`, and `attester_slashing`).
|
||||
|
||||
##### `beacon_block`
|
||||
|
@ -313,6 +313,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block`
|
|||
(via both gossip and non-gossip sources)
|
||||
(a client MAY queue blocks for processing once the parent block is retrieved).
|
||||
- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation.
|
||||
- _[REJECT]_ The block is from a higher slot than its parent.
|
||||
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- i.e.
|
||||
`get_ancestor(store, block.parent_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
|
||||
== store.finalized_checkpoint.root`
|
||||
|
@ -419,7 +420,7 @@ The following validations MUST pass before forwarding the `attestation` on the s
|
|||
- _[REJECT]_ The signature of `attestation` is valid.
|
||||
- _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) has been seen
|
||||
(via both gossip and non-gossip sources)
|
||||
(a client MAY queue aggregates for processing once block is retrieved).
|
||||
(a client MAY queue attestations for processing once block is retrieved).
|
||||
- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
||||
- _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e.
|
||||
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(attestation.data.target.epoch)) == attestation.data.target.root`
|
||||
|
@ -937,7 +938,7 @@ where the fields of `ENRForkID` are defined as
|
|||
* `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated.
|
||||
If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact
|
||||
|
||||
*Note*: `fork_digest` is composed of values that are not not known until the genesis block/state are available.
|
||||
*Note*: `fork_digest` is composed of values that are not known until the genesis block/state are available.
|
||||
Due to this, clients SHOULD NOT form ENRs and begin peer discovery until genesis values are known.
|
||||
One notable exception to this rule is the distribution of bootnode ENRs prior to genesis.
|
||||
In this case, bootnode ENRs SHOULD be initially distributed with `eth2` field set as
|
||||
|
@ -1223,7 +1224,7 @@ the node's fork choice prevents integration of these messages into the actual co
|
|||
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel.
|
||||
The exact grouping will be dependent on more involved network tests.
|
||||
This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet).
|
||||
The value is currently set to to be equal `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
||||
The value is currently set to be equal to `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
||||
|
||||
### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?
|
||||
|
||||
|
@ -1369,7 +1370,7 @@ Thus, it may happen that we need to transmit an empty list - there are several w
|
|||
|
||||
Semantically, it is not an error that a block is missing during a slot making option 2 unnatural.
|
||||
|
||||
Option 1 allows allows the responder to signal "no block", but this information may be wrong - for example in the case of a malicious node.
|
||||
Option 1 allows the responder to signal "no block", but this information may be wrong - for example in the case of a malicious node.
|
||||
|
||||
Under option 0, there is no way for a client to distinguish between a slot without a block and an incomplete response,
|
||||
but given that it already must contain logic to handle the uncertainty of a malicious peer, option 0 was chosen.
|
||||
|
@ -1495,7 +1496,7 @@ This looks different depending on the interaction layer:
|
|||
implementers are encouraged to encapsulate the encoding and compression logic behind
|
||||
MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
||||
|
||||
### Why are using Snappy for compression?
|
||||
### Why are we using Snappy for compression?
|
||||
|
||||
Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks,
|
||||
and can calculate the size of the uncompressed object without inflating it in memory.
|
||||
|
|
|
@ -15,7 +15,9 @@ This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](.
|
|||
- [Becoming a validator](#becoming-a-validator)
|
||||
- [Initialization](#initialization)
|
||||
- [BLS public key](#bls-public-key)
|
||||
- [BLS withdrawal key](#bls-withdrawal-key)
|
||||
- [Withdrawal credentials](#withdrawal-credentials)
|
||||
- [`BLS_WITHDRAWAL_PREFIX`](#bls_withdrawal_prefix)
|
||||
- [`ETH1_ADDRESS_WITHDRAWAL_PREFIX`](#eth1_address_withdrawal_prefix)
|
||||
- [Submit deposit](#submit-deposit)
|
||||
- [Process deposit](#process-deposit)
|
||||
- [Validator index](#validator-index)
|
||||
|
@ -100,14 +102,41 @@ A validator must initialize many parameters locally before submitting a deposit
|
|||
|
||||
Validator public keys are [G1 points](beacon-chain.md#bls-signatures) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator.
|
||||
|
||||
#### BLS withdrawal key
|
||||
#### Withdrawal credentials
|
||||
|
||||
A secondary withdrawal private key, `withdrawal_privkey`, must also be securely generated along with the resultant `withdrawal_pubkey`. This `withdrawal_privkey` does not have to be available for signing during the normal lifetime of a validator and can live in "cold storage".
|
||||
The `withdrawal_credentials` field constrains validator withdrawals.
|
||||
The first byte of this 32-byte field is a withdrawal prefix which defines the semantics of the remaining 31 bytes.
|
||||
|
||||
The validator constructs their `withdrawal_credentials` via the following:
|
||||
The following withdrawal prefixes are currently supported.
|
||||
|
||||
* Set `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`.
|
||||
* Set `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]`.
|
||||
##### `BLS_WITHDRAWAL_PREFIX`
|
||||
|
||||
Withdrawal credentials with the BLS withdrawal prefix allow a BLS key pair
|
||||
`(bls_withdrawal_privkey, bls_withdrawal_pubkey)` to trigger withdrawals.
|
||||
The `withdrawal_credentials` field must be such that:
|
||||
|
||||
* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`
|
||||
* `withdrawal_credentials[1:] == hash(bls_withdrawal_pubkey)[1:]`
|
||||
|
||||
*Note*: The `bls_withdrawal_privkey` is not required for validating and can be kept in cold storage.
|
||||
|
||||
##### `ETH1_ADDRESS_WITHDRAWAL_PREFIX`
|
||||
|
||||
Withdrawal credentials with the Eth1 address withdrawal prefix specify
|
||||
a 20-byte Eth1 address `eth1_withdrawal_address` as the recipient for all withdrawals.
|
||||
The `eth1_withdrawal_address` can be the address of either an externally owned account or of a contract.
|
||||
|
||||
The `withdrawal_credentials` field must be such that:
|
||||
|
||||
* `withdrawal_credentials[:1] == ETH1_ADDRESS_WITHDRAWAL_PREFIX`
|
||||
* `withdrawal_credentials[1:12] == b'\x00' * 11`
|
||||
* `withdrawal_credentials[12:] == eth1_withdrawal_address`
|
||||
|
||||
After the merge of the current Ethereum application layer (Eth1) into the Beacon Chain (Eth2),
|
||||
withdrawals to `eth1_withdrawal_address` will be normal ETH transfers (with no payload other than the validator's ETH)
|
||||
triggered by a user transaction that will set the gas price and gas limit as well pay fees.
|
||||
As long as the account or contract with address `eth1_withdrawal_address` can receive ETH transfers,
|
||||
the future withdrawal protocol is agnostic to all other implementation details.
|
||||
|
||||
### Submit deposit
|
||||
|
||||
|
|
|
@ -8,13 +8,17 @@
|
|||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Custom Types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Configuration](#configuration)
|
||||
- [Weak Subjectivity Checkpoint](#weak-subjectivity-checkpoint)
|
||||
- [Weak Subjectivity Period](#weak-subjectivity-period)
|
||||
- [Calculating the Weak Subjectivity Period](#calculating-the-weak-subjectivity-period)
|
||||
- [`compute_weak_subjectivity_period`](#compute_weak_subjectivity_period)
|
||||
- [Weak Subjectivity Sync](#weak-subjectivity-sync)
|
||||
- [Weak Subjectivity Sync Procedure](#weak-subjectivity-sync-procedure)
|
||||
- [Checking for Stale Weak Subjectivity Checkpoint](#checking-for-stale-weak-subjectivity-checkpoint)
|
||||
- [`is_within_weak_subjectivity_period`](#is_within_weak_subjectivity_period)
|
||||
- [Distributing Weak Subjectivity Checkpoints](#distributing-weak-subjectivity-checkpoints)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
@ -34,15 +38,27 @@ For more information about weak subjectivity and why it is required, please refe
|
|||
This document uses data structures, constants, functions, and terminology from
|
||||
[Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Beacon Chain Fork Choice](./fork-choice.md).
|
||||
|
||||
## Custom Types
|
||||
|
||||
| Name | SSZ Equivalent | Description |
|
||||
|---|---|---|
|
||||
| `Ether` | `uint64` | an amount in Ether |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
|----------------|--------------|
|
||||
|---|---|
|
||||
| `ETH_TO_GWEI` | `uint64(10**9)` |
|
||||
|
||||
## Configuration
|
||||
|
||||
| Name | Value |
|
||||
|---|---|
|
||||
| `SAFETY_DECAY` | `uint64(10)` |
|
||||
|
||||
## Weak Subjectivity Checkpoint
|
||||
|
||||
Any `Checkpoint` can used be a Weak Subjectivity Checkpoint.
|
||||
Any `Checkpoint` object can be used as a Weak Subjectivity Checkpoint.
|
||||
These Weak Subjectivity Checkpoints are distributed by providers,
|
||||
downloaded by users and/or distributed as a part of clients, and used as input while syncing a client.
|
||||
|
||||
|
@ -59,38 +75,64 @@ a safety margin of at least `1/3 - SAFETY_DECAY/100`.
|
|||
|
||||
### Calculating the Weak Subjectivity Period
|
||||
|
||||
*Note*: `compute_weak_subjectivity_period()` is planned to be updated when a more accurate calculation is made.
|
||||
A detailed analysis of the calculation of the weak subjectivity period is made in [this report](https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf).
|
||||
|
||||
*Note*: The expressions in the report use fractions, whereas eth2.0-specs uses only `uint64` arithmetic. The expressions have been simplified to avoid computing fractions, and more details can be found [here](https://www.overleaf.com/read/wgjzjdjpvpsd).
|
||||
|
||||
*Note*: The calculations here use `Ether` instead of `Gwei`, because the large magnitude of balances in `Gwei` can cause an overflow while computing using `uint64` arithmetic operations. Using `Ether` reduces the magnitude of the multiplicative factors by an order of `ETH_TO_GWEI` (`= 10**9`) and avoid the scope for overflows in `uint64`.
|
||||
|
||||
#### `compute_weak_subjectivity_period`
|
||||
|
||||
```python
|
||||
def compute_weak_subjectivity_period(state: BeaconState) -> uint64:
|
||||
weak_subjectivity_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
validator_count = len(get_active_validator_indices(state, get_current_epoch(state)))
|
||||
if validator_count >= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT:
|
||||
weak_subjectivity_period += SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
||||
"""
|
||||
Returns the weak subjectivity period for the current ``state``.
|
||||
This computation takes into account the effect of:
|
||||
- validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and
|
||||
- validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch).
|
||||
A detailed calculation can be found at:
|
||||
https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf
|
||||
"""
|
||||
ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
N = len(get_active_validator_indices(state, get_current_epoch(state)))
|
||||
t = get_total_active_balance(state) // N // ETH_TO_GWEI
|
||||
T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI
|
||||
delta = get_validator_churn_limit(state)
|
||||
Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH
|
||||
D = SAFETY_DECAY
|
||||
|
||||
if T * (200 + 3 * D) < t * (200 + 12 * D):
|
||||
epochs_for_validator_set_churn = (
|
||||
N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T))
|
||||
)
|
||||
epochs_for_balance_top_ups = (
|
||||
N * (200 + 3 * D) // (600 * Delta)
|
||||
)
|
||||
ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups)
|
||||
else:
|
||||
weak_subjectivity_period += SAFETY_DECAY * validator_count // (2 * 100 * MIN_PER_EPOCH_CHURN_LIMIT)
|
||||
return weak_subjectivity_period
|
||||
ws_period += (
|
||||
3 * N * D * t // (200 * Delta * (T - t))
|
||||
)
|
||||
|
||||
return ws_period
|
||||
```
|
||||
|
||||
*Details about the calculation*:
|
||||
- `100` appears in the denominator to get the actual percentage ratio from `SAFETY_DECAY`
|
||||
- For more information about other terms in this equation, refer to
|
||||
[Weak Subjectivity in Eth2.0](https://notes.ethereum.org/@adiasg/weak-subjectvity-eth2)
|
||||
A brief reference for what these values look like in practice ([reference script](https://gist.github.com/adiasg/3aceab409b36aa9a9d9156c1baa3c248)):
|
||||
|
||||
A brief reference for what these values look like in practice:
|
||||
|
||||
| `validator_count` | `weak_subjectivity_period` |
|
||||
| ---- | ---- |
|
||||
| 1024 | 268 |
|
||||
| 2048 | 281 |
|
||||
| 4096 | 307 |
|
||||
| 8192 | 358 |
|
||||
| 16384 | 460 |
|
||||
| 32768 | 665 |
|
||||
| 65536 | 1075 |
|
||||
| 131072 | 1894 |
|
||||
| 262144 | 3532 |
|
||||
| 524288 | 3532 |
|
||||
| Safety Decay | Avg. Val. Balance (ETH) | Val. Count | Weak Sub. Period (Epochs) |
|
||||
| ---- | ---- | ---- | ---- |
|
||||
| 10 | 28 | 32768 | 504 |
|
||||
| 10 | 28 | 65536 | 752 |
|
||||
| 10 | 28 | 131072 | 1248 |
|
||||
| 10 | 28 | 262144 | 2241 |
|
||||
| 10 | 28 | 524288 | 2241 |
|
||||
| 10 | 28 | 1048576 | 2241 |
|
||||
| 10 | 32 | 32768 | 665 |
|
||||
| 10 | 32 | 65536 | 1075 |
|
||||
| 10 | 32 | 131072 | 1894 |
|
||||
| 10 | 32 | 262144 | 3532 |
|
||||
| 10 | 32 | 524288 | 3532 |
|
||||
| 10 | 32 | 1048576 | 3532 |
|
||||
|
||||
## Weak Subjectivity Sync
|
||||
|
||||
|
@ -101,10 +143,13 @@ Clients should allow users to input a Weak Subjectivity Checkpoint at startup, a
|
|||
1. Input a Weak Subjectivity Checkpoint as a CLI parameter in `block_root:epoch_number` format,
|
||||
where `block_root` (an "0x" prefixed 32-byte hex string) and `epoch_number` (an integer) represent a valid `Checkpoint`.
|
||||
Example of the format:
|
||||
|
||||
```
|
||||
0x8584188b86a9296932785cc2827b925f9deebacce6d72ad8d53171fa046b43d9:9544
|
||||
```
|
||||
2. - *IF* `epoch_number > store.finalized_checkpoint.epoch`,
|
||||
|
||||
2. Check the weak subjectivity requirements:
|
||||
- *IF* `epoch_number > store.finalized_checkpoint.epoch`,
|
||||
then *ASSERT* during block sync that block with root `block_root` is in the sync path at epoch `epoch_number`.
|
||||
Emit descriptive critical error if this assert fails, then exit client process.
|
||||
- *IF* `epoch_number <= store.finalized_checkpoint.epoch`,
|
||||
|
@ -112,11 +157,14 @@ Clients should allow users to input a Weak Subjectivity Checkpoint at startup, a
|
|||
Emit descriptive critical error if this assert fails, then exit client process.
|
||||
|
||||
### Checking for Stale Weak Subjectivity Checkpoint
|
||||
|
||||
Clients may choose to validate that the input Weak Subjectivity Checkpoint is not stale at the time of startup.
|
||||
To support this mechanism, the client needs to take the state at the Weak Subjectivity Checkpoint as
|
||||
a CLI parameter input (or fetch the state associated with the input Weak Subjectivity Checkpoint from some source).
|
||||
The check can be implemented in the following way:
|
||||
|
||||
#### `is_within_weak_subjectivity_period`
|
||||
|
||||
```python
|
||||
def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint) -> bool:
|
||||
# Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
|
||||
|
@ -130,4 +178,5 @@ def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_c
|
|||
```
|
||||
|
||||
## Distributing Weak Subjectivity Checkpoints
|
||||
|
||||
This section will be updated soon.
|
||||
|
|
|
@ -1054,11 +1054,16 @@ def process_epoch(state: BeaconState) -> None:
|
|||
process_justification_and_finalization(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_reveal_deadlines(state)
|
||||
process_challenge_deadlines(state)
|
||||
process_reveal_deadlines(state) # Phase 1
|
||||
process_challenge_deadlines(state) # Phase 1
|
||||
process_slashings(state)
|
||||
process_final_updates(state) # phase 0 final updates
|
||||
process_phase_1_final_updates(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_record_updates(state)
|
||||
process_phase_1_final_updates(state) # Phase 1
|
||||
```
|
||||
|
||||
#### Phase 1 final updates
|
||||
|
|
|
@ -67,7 +67,7 @@ A validator is an entity that participates in the consensus of the Ethereum 2.0
|
|||
|
||||
This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden.
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use as a reference throughout.
|
||||
All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use them as a reference throughout.
|
||||
|
||||
## Constants
|
||||
|
||||
|
@ -352,7 +352,7 @@ Aggregation selection and the core of this duty are largely unchanged from Phase
|
|||
|
||||
Note the timing of when to broadcast aggregates is altered in Phase 1+.
|
||||
|
||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) three-fourths of the way through the `slot`-that is, `SECONDS_PER_SLOT * 3 / 4` seconds after the start of `slot`.
|
||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) three-fourths of the way through the `slot` -- that is, `SECONDS_PER_SLOT * 3 / 4` seconds after the start of `slot`.
|
||||
|
||||
##### `AggregateAndProof`
|
||||
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
from inspect import getmembers, isfunction
|
||||
from typing import Any, Iterable
|
||||
|
||||
from gen_base.gen_typing import TestCase
|
||||
|
||||
|
||||
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
||||
fork_name: str, bls_active: bool = True) -> Iterable[TestCase]:
|
||||
"""
|
||||
Generate a list of test cases by running tests from the given src in generator-mode.
|
||||
:param runner_name: to categorize the test in general as.
|
||||
:param handler_name: to categorize the test specialization as.
|
||||
:param src: to retrieve tests from (discovered using inspect.getmembers).
|
||||
:param fork_name: to run tests against particular phase and/or fork.
|
||||
(if multiple forks are applicable, indicate the last fork)
|
||||
:param bls_active: optional, to override BLS switch preference. Defaults to True.
|
||||
:return: an iterable of test cases.
|
||||
"""
|
||||
fn_names = [
|
||||
name for (name, _) in getmembers(src, isfunction)
|
||||
if name.startswith('test_')
|
||||
]
|
||||
print("generating test vectors from tests source: %s" % src.__name__)
|
||||
for name in fn_names:
|
||||
tfn = getattr(src, name)
|
||||
|
||||
# strip off the `test_`
|
||||
case_name = name
|
||||
if case_name.startswith('test_'):
|
||||
case_name = case_name[5:]
|
||||
|
||||
yield TestCase(
|
||||
fork_name=fork_name,
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
suite_name='pyspec_tests',
|
||||
case_name=case_name,
|
||||
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
|
||||
case_fn=lambda: tfn(generator_mode=True, phase=fork_name, bls_active=bls_active)
|
||||
)
|
|
@ -1,3 +0,0 @@
|
|||
ruamel.yaml==0.16.5
|
||||
eth-utils==1.6.0
|
||||
pytest>=4.4
|
|
@ -1,11 +0,0 @@
|
|||
from distutils.core import setup
|
||||
|
||||
setup(
|
||||
name='gen_helpers',
|
||||
packages=['gen_base', 'gen_from_tests'],
|
||||
install_requires=[
|
||||
"ruamel.yaml==0.16.5",
|
||||
"eth-utils==1.6.0",
|
||||
"pytest>=4.4",
|
||||
]
|
||||
)
|
|
@ -27,7 +27,7 @@ python setup.py pyspec --spec-fork=phase0 --md-doc-paths="specs/phase0/beacon-ch
|
|||
|
||||
After installing, you can install the optional dependencies for testing and linting.
|
||||
With makefile: `make install_test`.
|
||||
Or manually: run `pip install .[testing]` and `pip install .[linting]`.
|
||||
Or manually: run `pip install .[test]` and `pip install .[lint]`.
|
||||
|
||||
These tests are not intended for client-consumption.
|
||||
These tests are testing the spec itself, to verify consistency and provide feedback on modifications of the spec.
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.0.0
|
||||
1.0.1
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
A util to quickly write new test suite generators with.
|
||||
|
||||
See [Generators documentation](../../generators/README.md) for integration details.
|
||||
See [Generators documentation](../../../../generators/README.md) for integration details.
|
||||
|
||||
Options:
|
||||
|
|
@ -8,11 +8,11 @@ from ruamel.yaml import (
|
|||
YAML,
|
||||
)
|
||||
|
||||
from gen_base.gen_typing import TestProvider
|
||||
|
||||
from eth2spec.test import context
|
||||
from eth2spec.test.exceptions import SkippedTest
|
||||
|
||||
from .gen_typing import TestProvider
|
||||
|
||||
|
||||
# Flag that the runner does NOT run test via pytest
|
||||
context.is_pytest = False
|
||||
|
@ -119,10 +119,11 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
|||
|
||||
print(f"generating tests with config '{config_name}' ...")
|
||||
for test_case in tprov.make_cases():
|
||||
case_dir = Path(output_dir) / Path(config_name) / Path(test_case.fork_name) \
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name) \
|
||||
case_dir = (
|
||||
Path(output_dir) / Path(config_name) / Path(test_case.fork_name)
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
|
||||
)
|
||||
if case_dir.exists():
|
||||
if not args.force:
|
||||
print(f'Skipping already existing test: {case_dir}')
|
|
@ -0,0 +1,105 @@
|
|||
from importlib import reload, import_module
|
||||
from inspect import getmembers, isfunction
|
||||
from typing import Any, Callable, Dict, Iterable, Optional
|
||||
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import ALL_CONFIGS, TESTGEN_FORKS, SpecForkName, ConfigName
|
||||
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner
|
||||
from eth2spec.gen_helpers.gen_base.gen_typing import TestCase, TestProvider
|
||||
|
||||
|
||||
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
||||
fork_name: SpecForkName, bls_active: bool = True,
|
||||
phase: Optional[str]=None) -> Iterable[TestCase]:
|
||||
"""
|
||||
Generate a list of test cases by running tests from the given src in generator-mode.
|
||||
:param runner_name: to categorize the test in general as.
|
||||
:param handler_name: to categorize the test specialization as.
|
||||
:param src: to retrieve tests from (discovered using inspect.getmembers).
|
||||
:param fork_name: the folder name for these tests.
|
||||
(if multiple forks are applicable, indicate the last fork)
|
||||
:param bls_active: optional, to override BLS switch preference. Defaults to True.
|
||||
:param phase: optional, to run tests against a particular spec version. Default to `fork_name` value.
|
||||
:return: an iterable of test cases.
|
||||
"""
|
||||
fn_names = [
|
||||
name for (name, _) in getmembers(src, isfunction)
|
||||
if name.startswith('test_')
|
||||
]
|
||||
|
||||
if phase is None:
|
||||
phase = fork_name
|
||||
|
||||
print("generating test vectors from tests source: %s" % src.__name__)
|
||||
for name in fn_names:
|
||||
tfn = getattr(src, name)
|
||||
|
||||
# strip off the `test_`
|
||||
case_name = name
|
||||
if case_name.startswith('test_'):
|
||||
case_name = case_name[5:]
|
||||
|
||||
yield TestCase(
|
||||
fork_name=fork_name,
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
suite_name='pyspec_tests',
|
||||
case_name=case_name,
|
||||
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
|
||||
case_fn=lambda: tfn(generator_mode=True, phase=phase, bls_active=bls_active)
|
||||
)
|
||||
|
||||
|
||||
def get_provider(create_provider_fn: Callable[[SpecForkName, str, str, ConfigName], TestProvider],
|
||||
config_name: ConfigName,
|
||||
fork_name: SpecForkName,
|
||||
all_mods: Dict[str, Dict[str, str]]) -> Iterable[TestProvider]:
|
||||
for key, mod_name in all_mods[fork_name].items():
|
||||
yield create_provider_fn(
|
||||
fork_name=fork_name,
|
||||
handler_name=key,
|
||||
tests_src_mod_name=mod_name,
|
||||
config_name=config_name,
|
||||
)
|
||||
|
||||
|
||||
def get_create_provider_fn(
|
||||
runner_name: str, config_name: ConfigName, specs: Iterable[Any]
|
||||
) -> Callable[[SpecForkName, str, str, ConfigName], TestProvider]:
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
for spec in specs:
|
||||
reload(spec)
|
||||
bls.use_milagro()
|
||||
return config_name
|
||||
|
||||
def create_provider(fork_name: SpecForkName, handler_name: str,
|
||||
tests_src_mod_name: str, config_name: ConfigName) -> TestProvider:
|
||||
def cases_fn() -> Iterable[TestCase]:
|
||||
tests_src = import_module(tests_src_mod_name)
|
||||
return generate_from_tests(
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name=fork_name,
|
||||
)
|
||||
|
||||
return TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
return create_provider
|
||||
|
||||
|
||||
def run_state_test_generators(runner_name: str, specs: Iterable[Any], all_mods: Dict[str, Dict[str, str]]) -> None:
|
||||
"""
|
||||
Generate all available state tests of `TESTGEN_FORKS` forks of `ALL_CONFIGS` configs of the given runner.
|
||||
"""
|
||||
for config_name in ALL_CONFIGS:
|
||||
for fork_name in TESTGEN_FORKS:
|
||||
if fork_name in all_mods:
|
||||
gen_runner.run_generator(runner_name, get_provider(
|
||||
create_provider_fn=get_create_provider_fn(runner_name, config_name, specs),
|
||||
config_name=config_name,
|
||||
fork_name=fork_name,
|
||||
all_mods=all_mods,
|
||||
))
|
|
@ -37,6 +37,10 @@ ALL_PHASES = (PHASE0, PHASE1, LIGHTCLIENT_PATCH)
|
|||
MAINNET = ConfigName('mainnet')
|
||||
MINIMAL = ConfigName('minimal')
|
||||
|
||||
ALL_CONFIGS = (MINIMAL, MAINNET)
|
||||
|
||||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (PHASE0, LIGHTCLIENT_PATCH)
|
||||
|
||||
# TODO: currently phases are defined as python modules.
|
||||
# It would be better if they would be more well-defined interfaces for stronger typing.
|
||||
|
@ -78,7 +82,7 @@ def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Ca
|
|||
# TODO: instead of upgrading a test phase0 genesis state we can also write a phase1 state helper.
|
||||
# Decide based on performance/consistency results later.
|
||||
state = phases[PHASE1].upgrade_to_phase1(state)
|
||||
elif spec.fork == LIGHTCLIENT_PATCH: # not generalizing this just yet, unclear final spec fork/patch order.
|
||||
elif spec.fork == LIGHTCLIENT_PATCH:
|
||||
state = phases[LIGHTCLIENT_PATCH].upgrade_to_lightclient_patch(state)
|
||||
|
||||
return state
|
||||
|
@ -336,12 +340,13 @@ def with_phases(phases, other_phases=None):
|
|||
|
||||
available_phases = set(run_phases)
|
||||
if other_phases is not None:
|
||||
available_phases += set(other_phases)
|
||||
available_phases |= set(other_phases)
|
||||
|
||||
# TODO: test state is dependent on phase0 but is immediately transitioned to phase1.
|
||||
# A new state-creation helper for phase 1 may be in place, and then phase1+ tests can run without phase0
|
||||
available_phases.add(PHASE0)
|
||||
|
||||
# Populate all phases for multi-phase tests
|
||||
phase_dir = {}
|
||||
if PHASE0 in available_phases:
|
||||
phase_dir[PHASE0] = spec_phase0
|
||||
|
@ -390,3 +395,11 @@ def only_full_crosslink(fn):
|
|||
return None
|
||||
return fn(*args, spec=spec, state=state, **kw)
|
||||
return wrapper
|
||||
|
||||
|
||||
def is_post_lightclient_patch(spec):
|
||||
if spec.fork in [PHASE0, PHASE1]:
|
||||
# TODO: PHASE1 fork is temporarily parallel to LIGHTCLIENT_PATCH.
|
||||
# Will make PHASE1 fork inherit LIGHTCLIENT_PATCH later.
|
||||
return False
|
||||
return True
|
||||
|
|
|
@ -2,7 +2,7 @@ from lru import LRU
|
|||
|
||||
from typing import List
|
||||
|
||||
from eth2spec.test.context import expect_assertion_error, PHASE1
|
||||
from eth2spec.test.context import expect_assertion_error, PHASE1, is_post_lightclient_patch
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee
|
||||
|
@ -30,6 +30,7 @@ def run_attestation_processing(spec, state, attestation, valid=True):
|
|||
yield 'post', None
|
||||
return
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
current_epoch_count = len(state.current_epoch_attestations)
|
||||
previous_epoch_count = len(state.previous_epoch_attestations)
|
||||
|
||||
|
@ -37,10 +38,14 @@ def run_attestation_processing(spec, state, attestation, valid=True):
|
|||
spec.process_attestation(state, attestation)
|
||||
|
||||
# Make sure the attestation has been processed
|
||||
if not is_post_lightclient_patch(spec):
|
||||
if attestation.data.target.epoch == spec.get_current_epoch(state):
|
||||
assert len(state.current_epoch_attestations) == current_epoch_count + 1
|
||||
else:
|
||||
assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
|
||||
else:
|
||||
# After accounting reform, there are cases when processing an attestation does not result in any flag updates
|
||||
pass
|
||||
|
||||
# yield post-state
|
||||
yield 'post', state
|
||||
|
@ -315,6 +320,7 @@ def prepare_state_with_attestations(spec, state, participation_fn=None):
|
|||
next_slot(spec, state)
|
||||
|
||||
assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
if not is_post_lightclient_patch(spec):
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
|
||||
return attestations
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from eth2spec.test.context import LIGHTCLIENT_PATCH
|
||||
from eth2spec.test.context import is_post_lightclient_patch
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
|
@ -91,7 +91,7 @@ def build_empty_block(spec, state, slot=None):
|
|||
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||
empty_block.parent_root = parent_block_root
|
||||
|
||||
if spec.fork == LIGHTCLIENT_PATCH:
|
||||
if is_post_lightclient_patch(spec):
|
||||
empty_block.body.sync_committee_signature = spec.G2_POINT_AT_INFINITY
|
||||
|
||||
apply_randao_reveal(spec, state, empty_block)
|
||||
|
|
|
@ -1,14 +1,29 @@
|
|||
|
||||
process_calls = [
|
||||
from eth2spec.test.context import is_post_lightclient_patch
|
||||
|
||||
|
||||
def get_process_calls(spec):
|
||||
return [
|
||||
# PHASE0
|
||||
'process_justification_and_finalization',
|
||||
'process_rewards_and_penalties',
|
||||
'process_registry_updates',
|
||||
'process_reveal_deadlines',
|
||||
'process_challenge_deadlines',
|
||||
'process_slashings',
|
||||
'process_final_updates',
|
||||
'after_process_final_updates',
|
||||
]
|
||||
'process_eth1_data_reset',
|
||||
'process_effective_balance_updates',
|
||||
'process_slashings_reset',
|
||||
'process_randao_mixes_reset',
|
||||
'process_historical_roots_update',
|
||||
# HF1 replaced `process_participation_record_updates` with `process_participation_flag_updates`
|
||||
'process_participation_flag_updates' if is_post_lightclient_patch(spec) else (
|
||||
'process_participation_record_updates'
|
||||
),
|
||||
'process_sync_committee_updates',
|
||||
# PHASE1
|
||||
'process_phase_1_final_updates',
|
||||
]
|
||||
|
||||
|
||||
def run_epoch_processing_to(spec, state, process_name: str):
|
||||
|
@ -25,7 +40,7 @@ def run_epoch_processing_to(spec, state, process_name: str):
|
|||
spec.process_slot(state)
|
||||
|
||||
# process components of epoch transition before final-updates
|
||||
for name in process_calls:
|
||||
for name in get_process_calls(spec):
|
||||
if name == process_name:
|
||||
break
|
||||
# only run when present. Later phases introduce more to the epoch-processing.
|
|
@ -1,8 +1,16 @@
|
|||
from eth2spec.test.context import is_post_lightclient_patch
|
||||
from eth2spec.test.helpers.block_header import sign_block_header
|
||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||
from eth2spec.test.helpers.state import get_balance
|
||||
|
||||
|
||||
def get_min_slashing_penalty_quotient(spec):
|
||||
if is_post_lightclient_patch(spec):
|
||||
return spec.HF1_MIN_SLASHING_PENALTY_QUOTIENT
|
||||
else:
|
||||
return spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||
|
||||
|
||||
def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
||||
slashed_validator = state.validators[slashed_index]
|
||||
assert slashed_validator.slashed
|
||||
|
@ -10,7 +18,7 @@ def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
|||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
proposer_index = spec.get_beacon_proposer_index(state)
|
||||
slash_penalty = state.validators[slashed_index].effective_balance // spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||
slash_penalty = state.validators[slashed_index].effective_balance // get_min_slashing_penalty_quotient(spec)
|
||||
whistleblower_reward = state.validators[slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
|
||||
if proposer_index != slashed_index:
|
||||
# slashed validator lost initial slash penalty
|
||||
|
|
|
@ -2,7 +2,7 @@ from random import Random
|
|||
from lru import LRU
|
||||
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.test.context import LIGHTCLIENT_PATCH
|
||||
from eth2spec.test.context import is_post_lightclient_patch
|
||||
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
|
||||
from eth2spec.test.helpers.deposits import mock_deposit
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
|
@ -38,24 +38,35 @@ def run_deltas(spec, state):
|
|||
- inactivity penalty deltas ('inactivity_penalty_deltas')
|
||||
"""
|
||||
yield 'pre', state
|
||||
|
||||
if is_post_lightclient_patch(spec):
|
||||
def get_source_deltas(state):
|
||||
return spec.get_flag_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX, spec.TIMELY_SOURCE_FLAG_NUMERATOR)
|
||||
|
||||
def get_head_deltas(state):
|
||||
return spec.get_flag_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX, spec.TIMELY_HEAD_FLAG_NUMERATOR)
|
||||
|
||||
def get_target_deltas(state):
|
||||
return spec.get_flag_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX, spec.TIMELY_TARGET_FLAG_NUMERATOR)
|
||||
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
state,
|
||||
spec.get_source_deltas,
|
||||
spec.get_source_deltas if not is_post_lightclient_patch(spec) else get_source_deltas,
|
||||
spec.get_matching_source_attestations,
|
||||
'source_deltas',
|
||||
)
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
state,
|
||||
spec.get_target_deltas,
|
||||
spec.get_target_deltas if not is_post_lightclient_patch(spec) else get_target_deltas,
|
||||
spec.get_matching_target_attestations,
|
||||
'target_deltas',
|
||||
)
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
state,
|
||||
spec.get_head_deltas,
|
||||
spec.get_head_deltas if not is_post_lightclient_patch(spec) else get_head_deltas,
|
||||
spec.get_matching_head_attestations,
|
||||
'head_deltas',
|
||||
)
|
||||
|
@ -63,6 +74,16 @@ def run_deltas(spec, state):
|
|||
yield from run_get_inactivity_penalty_deltas(spec, state)
|
||||
|
||||
|
||||
def deltas_name_to_flag_index(spec, deltas_name):
|
||||
if 'source' in deltas_name:
|
||||
return spec.TIMELY_SOURCE_FLAG_INDEX
|
||||
elif 'head' in deltas_name:
|
||||
return spec.TIMELY_HEAD_FLAG_INDEX
|
||||
elif 'target' in deltas_name:
|
||||
return spec.TIMELY_TARGET_FLAG_INDEX
|
||||
raise ValueError("Wrong deltas_name %s" % deltas_name)
|
||||
|
||||
|
||||
def run_attestation_component_deltas(spec, state, component_delta_fn, matching_att_fn, deltas_name):
|
||||
"""
|
||||
Run ``component_delta_fn``, yielding:
|
||||
|
@ -72,8 +93,14 @@ def run_attestation_component_deltas(spec, state, component_delta_fn, matching_a
|
|||
|
||||
yield deltas_name, Deltas(rewards=rewards, penalties=penalties)
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
matching_attestations = matching_att_fn(state, spec.get_previous_epoch(state))
|
||||
matching_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||
else:
|
||||
matching_indices = spec.get_unslashed_participating_indices(
|
||||
state, deltas_name_to_flag_index(spec, deltas_name), spec.get_previous_epoch(state)
|
||||
)
|
||||
|
||||
eligible_indices = spec.get_eligible_validator_indices(state)
|
||||
for index in range(len(state.validators)):
|
||||
if index not in eligible_indices:
|
||||
|
@ -102,6 +129,12 @@ def run_get_inclusion_delay_deltas(spec, state):
|
|||
Run ``get_inclusion_delay_deltas``, yielding:
|
||||
- inclusion delay deltas ('inclusion_delay_deltas')
|
||||
"""
|
||||
if is_post_lightclient_patch(spec):
|
||||
# No inclusion_delay_deltas
|
||||
yield 'inclusion_delay_deltas', Deltas(rewards=[0] * len(state.validators),
|
||||
penalties=[0] * len(state.validators))
|
||||
return
|
||||
|
||||
rewards, penalties = spec.get_inclusion_delay_deltas(state)
|
||||
|
||||
yield 'inclusion_delay_deltas', Deltas(rewards=rewards, penalties=penalties)
|
||||
|
@ -149,8 +182,14 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
|||
|
||||
yield 'inactivity_penalty_deltas', Deltas(rewards=rewards, penalties=penalties)
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
matching_attestations = spec.get_matching_target_attestations(state, spec.get_previous_epoch(state))
|
||||
matching_attesting_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||
else:
|
||||
matching_attesting_indices = spec.get_unslashed_participating_indices(
|
||||
state, spec.TIMELY_TARGET_FLAG_INDEX, spec.get_previous_epoch(state)
|
||||
)
|
||||
reward_numerator_sum = sum(numerator for (_, numerator) in spec.get_flag_indices_and_numerators())
|
||||
|
||||
eligible_indices = spec.get_eligible_validator_indices(state)
|
||||
for index in range(len(state.validators)):
|
||||
|
@ -160,12 +199,14 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
|||
continue
|
||||
|
||||
if spec.is_in_inactivity_leak(state):
|
||||
if spec.fork == LIGHTCLIENT_PATCH:
|
||||
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH - 1
|
||||
else:
|
||||
# Compute base_penalty
|
||||
if not is_post_lightclient_patch(spec):
|
||||
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH
|
||||
base_reward = spec.get_base_reward(state, index)
|
||||
base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward(state, index)
|
||||
else:
|
||||
base_penalty = spec.get_base_reward(state, index) * reward_numerator_sum // spec.FLAG_DENOMINATOR
|
||||
|
||||
if not has_enough_for_reward(spec, state, index):
|
||||
assert penalties[index] == 0
|
||||
elif index in matching_attesting_indices:
|
||||
|
@ -267,8 +308,13 @@ def run_test_full_all_correct(spec, state):
|
|||
def run_test_full_but_partial_participation(spec, state, rng=Random(5522)):
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
for a in state.previous_epoch_attestations:
|
||||
a.aggregation_bits = [rng.choice([True, False]) for _ in a.aggregation_bits]
|
||||
else:
|
||||
for index in range(len(state.validators)):
|
||||
if rng.choice([True, False]):
|
||||
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
|
@ -277,8 +323,12 @@ def run_test_partial(spec, state, fraction_filled):
|
|||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
# Remove portion of attestations
|
||||
if not is_post_lightclient_patch(spec):
|
||||
num_attestations = int(len(state.previous_epoch_attestations) * fraction_filled)
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
|
||||
else:
|
||||
for index in range(int(len(state.validators) * fraction_filled)):
|
||||
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
|
@ -333,6 +383,7 @@ def run_test_some_very_low_effective_balances_that_attested(spec, state):
|
|||
def run_test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
# Remove attestation
|
||||
attestation = state.previous_epoch_attestations[0]
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations[1:]
|
||||
|
@ -340,6 +391,10 @@ def run_test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
|||
indices = spec.get_unslashed_attesting_indices(state, [attestation])
|
||||
for i, index in enumerate(indices):
|
||||
state.validators[index].effective_balance = i
|
||||
else:
|
||||
index = 0
|
||||
state.validators[index].effective_balance = 1
|
||||
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
|
@ -447,6 +502,7 @@ def run_test_full_random(spec, state, rng=Random(8020)):
|
|||
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
for pending_attestation in state.previous_epoch_attestations:
|
||||
# ~1/3 have bad target
|
||||
if rng.randint(0, 2) == 0:
|
||||
|
@ -455,8 +511,34 @@ def run_test_full_random(spec, state, rng=Random(8020)):
|
|||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
||||
# ~50% participation
|
||||
pending_attestation.aggregation_bits = [rng.choice([True, False]) for _ in pending_attestation.aggregation_bits]
|
||||
pending_attestation.aggregation_bits = [rng.choice([True, False])
|
||||
for _ in pending_attestation.aggregation_bits]
|
||||
# Random inclusion delay
|
||||
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
||||
else:
|
||||
for index in range(len(state.validators)):
|
||||
# ~1/3 have bad head or bad target or not timely enough
|
||||
is_timely_correct_head = rng.randint(0, 2) != 0
|
||||
flags = state.previous_epoch_participation[index]
|
||||
|
||||
def set_flag(index, value):
|
||||
nonlocal flags
|
||||
flag = spec.ParticipationFlags(2**index)
|
||||
if value:
|
||||
flags |= flag
|
||||
else:
|
||||
flags &= 0xff ^ flag
|
||||
|
||||
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
|
||||
if is_timely_correct_head:
|
||||
# If timely head, then must be timely target
|
||||
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
|
||||
# If timely head, then must be timely source
|
||||
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
|
||||
else:
|
||||
# ~50% of remaining have bad target or not timely enough
|
||||
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
|
||||
# ~50% of remaining have bad source or not timely enough
|
||||
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
|
||||
state.previous_epoch_participation[index] = flags
|
||||
yield from run_deltas(spec, state)
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
|
||||
|
||||
def compute_sync_committee_signature(spec, state, slot, privkey):
|
||||
domain = spec.get_domain(state, spec.DOMAIN_SYNC_COMMITTEE, spec.compute_epoch_at_slot(slot))
|
||||
if slot == state.slot:
|
||||
block_root = build_empty_block_for_next_slot(spec, state).parent_root
|
||||
else:
|
||||
block_root = spec.get_block_root_at_slot(state, slot)
|
||||
signing_root = spec.compute_signing_root(block_root, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
|
||||
|
||||
def compute_aggregate_sync_committee_signature(spec, state, slot, participants):
|
||||
if len(participants) == 0:
|
||||
return spec.G2_POINT_AT_INFINITY
|
||||
|
||||
signatures = []
|
||||
for validator_index in participants:
|
||||
privkey = privkeys[validator_index]
|
||||
signatures.append(
|
||||
compute_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
slot,
|
||||
privkey,
|
||||
)
|
||||
)
|
||||
return bls.Aggregate(signatures)
|
|
@ -0,0 +1,319 @@
|
|||
from collections import Counter
|
||||
import random
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
transition_unsigned_block,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
transition_to,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
MAINNET, MINIMAL,
|
||||
expect_assertion_error,
|
||||
with_all_phases_except,
|
||||
with_configs,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
)
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
|
||||
def get_committee_indices(spec, state, duplicates=False):
|
||||
'''
|
||||
This utility function allows the caller to ensure there are or are not
|
||||
duplicate validator indices in the returned committee based on
|
||||
the boolean ``duplicates``.
|
||||
'''
|
||||
state = state.copy()
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
randao_index = current_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
||||
while True:
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
if duplicates:
|
||||
if len(committee) != len(set(committee)):
|
||||
return committee
|
||||
else:
|
||||
if len(committee) == len(set(committee)):
|
||||
return committee
|
||||
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_invalid_signature_missing_participant(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
random_participant = random.choice(committee)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Exclude one participant whose signature was included.
|
||||
block.body.sync_committee_bits = [index != random_participant for index in committee]
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee, # full committee signs
|
||||
)
|
||||
|
||||
yield 'blocks', [block]
|
||||
expect_assertion_error(lambda: spec.process_sync_committee(state, block.body))
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_invalid_signature_extra_participant(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
random_participant = random.choice(committee)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Exclude one signature even though the block claims the entire committee participated.
|
||||
block.body.sync_committee_bits = [True] * len(committee)
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
[index for index in committee if index != random_participant],
|
||||
)
|
||||
|
||||
yield 'blocks', [block]
|
||||
expect_assertion_error(lambda: spec.process_sync_committee(state, block.body))
|
||||
yield 'post', None
|
||||
|
||||
|
||||
def compute_sync_committee_participant_reward(spec, state, participant_index, active_validator_count, committee_size):
|
||||
base_reward = spec.get_base_reward(state, participant_index)
|
||||
proposer_reward = spec.get_proposer_reward(state, participant_index)
|
||||
max_participant_reward = base_reward - proposer_reward
|
||||
return max_participant_reward * active_validator_count // committee_size // spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MINIMAL], reason="to create nonduplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=False)
|
||||
committee_size = len(committee)
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size == len(set(committee))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
pre_balances = state.balances.copy()
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [True] * committee_size
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
for index in range(len(state.validators)):
|
||||
expected_reward = 0
|
||||
|
||||
if index == block.proposer_index:
|
||||
expected_reward += sum([spec.get_proposer_reward(state, index) for index in committee])
|
||||
|
||||
if index in committee:
|
||||
expected_reward += compute_sync_committee_participant_reward(
|
||||
spec,
|
||||
state,
|
||||
index,
|
||||
active_validator_count,
|
||||
committee_size
|
||||
)
|
||||
|
||||
assert state.balances[index] == pre_balances[index] + expected_reward
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MAINNET], reason="to create duplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=True)
|
||||
committee_size = len(committee)
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size > len(set(committee))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
pre_balances = state.balances.copy()
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [True] * committee_size
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
multiplicities = Counter(committee)
|
||||
|
||||
for index in range(len(state.validators)):
|
||||
expected_reward = 0
|
||||
|
||||
if index == block.proposer_index:
|
||||
expected_reward += sum([spec.get_proposer_reward(state, index) for index in committee])
|
||||
|
||||
if index in committee:
|
||||
reward = compute_sync_committee_participant_reward(
|
||||
spec,
|
||||
state,
|
||||
index,
|
||||
active_validator_count,
|
||||
committee_size,
|
||||
)
|
||||
expected_reward += reward * multiplicities[index]
|
||||
|
||||
assert state.balances[index] == pre_balances[index] + expected_reward
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_past_block(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
blocks = []
|
||||
for _ in range(2):
|
||||
# NOTE: need to transition twice to move beyond the degenerate case at genesis
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Valid sync committee signature here...
|
||||
block.body.sync_committee_bits = [True] * len(committee)
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
blocks.append(signed_block)
|
||||
|
||||
invalid_block = build_empty_block_for_next_slot(spec, state)
|
||||
# Invalid signature from a slot other than the previous
|
||||
invalid_block.body.sync_committee_bits = [True] * len(committee)
|
||||
invalid_block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
invalid_block.slot - 2,
|
||||
committee,
|
||||
)
|
||||
blocks.append(invalid_block)
|
||||
|
||||
expect_assertion_error(lambda: transition_unsigned_block(spec, state, invalid_block))
|
||||
|
||||
yield 'blocks', blocks
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MINIMAL], reason="to produce different committee sets")
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_previous_committee(spec, state):
|
||||
# NOTE: the `state` provided is at genesis and the process to select
|
||||
# sync committees currently returns the same committee for the first and second
|
||||
# periods at genesis.
|
||||
# To get a distinct committee so we can generate an "old" signature, we need to advance
|
||||
# 2 EPOCHS_PER_SYNC_COMMITTEE_PERIOD periods.
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
old_sync_committee = state.next_sync_committee
|
||||
|
||||
epoch_in_future_sync_commitee_period = current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot_in_future_sync_committee_period = epoch_in_future_sync_commitee_period * spec.SLOTS_PER_EPOCH
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
# Use the previous sync committee to produce the signature.
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
# Ensure that the pubkey sets are different.
|
||||
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
|
||||
committee = [pubkeys.index(pubkey) for pubkey in old_sync_committee.pubkeys]
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [True] * len(committee)
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
|
||||
yield 'blocks', [block]
|
||||
expect_assertion_error(lambda: spec.process_sync_committee(state, block.body))
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_valid_signature_future_committee(spec, state):
|
||||
# NOTE: the `state` provided is at genesis and the process to select
|
||||
# sync committees currently returns the same committee for the first and second
|
||||
# periods at genesis.
|
||||
# To get a distinct committee so we can generate an "old" signature, we need to advance
|
||||
# 2 EPOCHS_PER_SYNC_COMMITTEE_PERIOD periods.
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
old_current_sync_committee = state.current_sync_committee
|
||||
old_next_sync_committee = state.next_sync_committee
|
||||
|
||||
epoch_in_future_sync_committee_period = current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot_in_future_sync_committee_period = epoch_in_future_sync_committee_period * spec.SLOTS_PER_EPOCH
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
sync_committee = state.current_sync_committee
|
||||
|
||||
expected_sync_committee = spec.get_sync_committee(state, epoch_in_future_sync_committee_period)
|
||||
|
||||
assert sync_committee == expected_sync_committee
|
||||
assert sync_committee != old_current_sync_committee
|
||||
assert sync_committee != old_next_sync_committee
|
||||
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
committee_indices = [pubkeys.index(pubkey) for pubkey in sync_committee.pubkeys]
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [True] * len(committee_indices)
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee_indices,
|
||||
)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
|
@ -0,0 +1,37 @@
|
|||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_sync_committees_progress(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
# NOTE: if not in the genesis epoch, period math below needs to be
|
||||
# adjusted relative to the current epoch
|
||||
assert current_epoch == 0
|
||||
|
||||
first_sync_committee = state.current_sync_committee
|
||||
second_sync_committee = state.next_sync_committee
|
||||
|
||||
slot_at_end_of_current_period = spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - 1
|
||||
transition_to(spec, state, slot_at_end_of_current_period)
|
||||
|
||||
# Ensure assignments have not changed:
|
||||
assert state.current_sync_committee == first_sync_committee
|
||||
assert state.next_sync_committee == second_sync_committee
|
||||
|
||||
yield from run_epoch_processing_with(spec, state, 'process_sync_committee_updates')
|
||||
|
||||
# Can compute the third committee having computed final balances in the last epoch
|
||||
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
|
||||
third_sync_committee = spec.get_sync_committee(state, 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
|
||||
assert state.current_sync_committee == second_sync_committee
|
||||
assert state.next_sync_committee == third_sync_committee
|
|
@ -0,0 +1,113 @@
|
|||
from eth2spec.test.context import (
|
||||
PHASE0, LIGHTCLIENT_PATCH,
|
||||
with_phases,
|
||||
with_custom_state,
|
||||
spec_test, with_state,
|
||||
low_balances, misc_balances, large_validator_set,
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
next_epoch_via_block,
|
||||
)
|
||||
|
||||
|
||||
HF1_FORK_TEST_META_TAGS = {
|
||||
'fork': 'altair',
|
||||
}
|
||||
|
||||
|
||||
def run_fork_test(post_spec, pre_state):
|
||||
yield 'pre', pre_state
|
||||
|
||||
post_state = post_spec.upgrade_to_lightclient_patch(pre_state)
|
||||
|
||||
# Stable fields
|
||||
stable_fields = [
|
||||
'genesis_time', 'genesis_validators_root', 'slot',
|
||||
# History
|
||||
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
|
||||
# Eth1
|
||||
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
|
||||
# Registry
|
||||
'validators', 'balances',
|
||||
# Randomness
|
||||
'randao_mixes',
|
||||
# Slashings
|
||||
'slashings',
|
||||
# Finality
|
||||
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
|
||||
]
|
||||
for field in stable_fields:
|
||||
assert getattr(pre_state, field) == getattr(post_state, field)
|
||||
|
||||
# Modified fields
|
||||
modified_fields = ['fork']
|
||||
for field in modified_fields:
|
||||
assert getattr(pre_state, field) != getattr(post_state, field)
|
||||
|
||||
assert pre_state.fork.current_version == post_state.fork.previous_version
|
||||
assert post_state.fork.current_version == post_spec.LIGHTCLIENT_PATCH_FORK_VERSION
|
||||
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
|
||||
|
||||
yield 'post', post_state
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_base_state(spec, phases, state):
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_next_epoch(spec, phases, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_next_epoch_with_block(spec, phases, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_many_next_epoch(spec, phases, state):
|
||||
for _ in range(3):
|
||||
next_epoch(spec, state)
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_low_balances(spec, phases, state):
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_misc_balances(spec, phases, state):
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_large_validator_set(spec, phases, state):
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
|
@ -0,0 +1,75 @@
|
|||
import random
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
next_epoch,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
)
|
||||
|
||||
|
||||
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
participants = random.sample(committee, int(len(committee) * fraction_full))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [index in participants for index in committee]
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
participants,
|
||||
)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
|
@ -2,10 +2,13 @@ from eth2spec.test.context import (
|
|||
spec_state_test,
|
||||
always_bls, never_bls,
|
||||
with_all_phases,
|
||||
with_all_phases_except,
|
||||
spec_test,
|
||||
low_balances,
|
||||
with_custom_state,
|
||||
single_phase)
|
||||
single_phase,
|
||||
PHASE1,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
run_attestation_processing,
|
||||
get_valid_attestation,
|
||||
|
@ -329,3 +332,212 @@ def test_too_few_aggregation_bits(spec, state):
|
|||
attestation.aggregation_bits = attestation.aggregation_bits[:-1]
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Full correct atttestation contents at different slot inclusions
|
||||
#
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Incorrect head but correct source/target at different slot inclusions
|
||||
#
|
||||
|
||||
@with_all_phases_except([PHASE1])
|
||||
@spec_state_test
|
||||
def test_incorrect_head_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Incorrect head and target but correct source at different slot inclusions
|
||||
#
|
||||
|
||||
# Note: current phase 1 spec checks
|
||||
# `assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot))`
|
||||
# so this test can't pass that until phase 1 refactor is merged
|
||||
@with_all_phases_except([PHASE1])
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Correct head and source but incorrect target at different slot inclusions
|
||||
#
|
||||
|
||||
@with_all_phases_except([PHASE1])
|
||||
@spec_state_test
|
||||
def test_incorrect_target_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
|
|
@ -4,6 +4,7 @@ from eth2spec.test.context import (
|
|||
from eth2spec.test.helpers.attestations import sign_indexed_attestation
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, \
|
||||
get_indexed_attestation_participants, get_attestation_2_data, get_attestation_1_data
|
||||
from eth2spec.test.helpers.proposer_slashings import get_min_slashing_penalty_quotient
|
||||
from eth2spec.test.helpers.state import (
|
||||
get_balance,
|
||||
next_epoch_via_block,
|
||||
|
@ -70,7 +71,7 @@ def run_attester_slashing_processing(spec, state, attester_slashing, valid=True)
|
|||
expected_balance = (
|
||||
pre_proposer_balance
|
||||
+ total_proposer_rewards
|
||||
- pre_slashings[proposer_index] // spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||
- pre_slashings[proposer_index] // get_min_slashing_penalty_quotient(spec)
|
||||
)
|
||||
|
||||
assert get_balance(state, proposer_index) == expected_balance
|
||||
|
|
|
@ -94,6 +94,49 @@ def test_new_deposit_over_max(spec, state):
|
|||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_new_deposit_eth1_withdrawal_credentials(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
withdrawal_credentials = (
|
||||
spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
+ b'\x00' * 11 # specified 0s
|
||||
+ b'\x59' * 20 # a 20-byte eth1 address
|
||||
)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit = prepare_state_and_deposit(
|
||||
spec, state,
|
||||
validator_index,
|
||||
amount,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_new_deposit_non_versioned_withdrawal_credentials(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
withdrawal_credentials = (
|
||||
b'\xFF' # Non specified withdrawal credentials version
|
||||
+ b'\x02' * 31 # Garabage bytes
|
||||
)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit = prepare_state_and_deposit(
|
||||
spec, state,
|
||||
validator_index,
|
||||
amount,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
|
|
|
@ -1,46 +1,11 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with, run_epoch_processing_to
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_process_final_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_final_updates')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_no_reset(spec, state):
|
||||
assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1
|
||||
# skip ahead to the end of the epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_reset(spec, state):
|
||||
# skip ahead to the end of the voting period
|
||||
state.slot = (spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH) - 1
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == 0
|
||||
def run_process_effective_balance_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_effective_balance_updates')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -48,7 +13,7 @@ def test_eth1_vote_reset(spec, state):
|
|||
def test_effective_balance_hysteresis(spec, state):
|
||||
# Prepare state up to the final-updates.
|
||||
# Then overwrite the balances, we only want to focus to be on the hysteresis based changes.
|
||||
run_epoch_processing_to(spec, state, 'process_final_updates')
|
||||
run_epoch_processing_to(spec, state, 'process_effective_balance_updates')
|
||||
# Set some edge cases for balances
|
||||
max = spec.MAX_EFFECTIVE_BALANCE
|
||||
min = spec.EJECTION_BALANCE
|
||||
|
@ -79,21 +44,7 @@ def test_effective_balance_hysteresis(spec, state):
|
|||
state.validators[i].effective_balance = pre_eff
|
||||
state.balances[i] = bal
|
||||
|
||||
yield 'pre', state
|
||||
spec.process_final_updates(state)
|
||||
yield 'post', state
|
||||
yield from run_process_effective_balance_updates(spec, state)
|
||||
|
||||
for i, (_, _, post_eff, name) in enumerate(cases):
|
||||
assert state.validators[i].effective_balance == post_eff, name
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_historical_root_accumulator(spec, state):
|
||||
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
|
||||
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
|
||||
history_len = len(state.historical_roots)
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.historical_roots) == history_len + 1
|
|
@ -0,0 +1,43 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_process_eth1_data_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_eth1_data_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_no_reset(spec, state):
|
||||
assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1
|
||||
# skip ahead to the end of the epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_eth1_data_reset(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_reset(spec, state):
|
||||
# skip ahead to the end of the voting period
|
||||
state.slot = (spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH) - 1
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_eth1_data_reset(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == 0
|
|
@ -0,0 +1,20 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_historical_roots_update(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_historical_roots_update')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_historical_root_accumulator(spec, state):
|
||||
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
|
||||
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
|
||||
history_len = len(state.historical_roots)
|
||||
|
||||
yield from run_process_historical_roots_update(spec, state)
|
||||
|
||||
assert len(state.historical_roots) == history_len + 1
|
|
@ -1,6 +1,6 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
|
||||
run_epoch_processing_with
|
||||
from eth2spec.test.context import is_post_lightclient_patch, spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
@ -16,12 +16,20 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
|||
previous_epoch = spec.get_previous_epoch(state)
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
if current_epoch == epoch:
|
||||
attestations = state.current_epoch_attestations
|
||||
elif previous_epoch == epoch:
|
||||
attestations = state.previous_epoch_attestations
|
||||
else:
|
||||
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||
else:
|
||||
if current_epoch == epoch:
|
||||
epoch_participation = state.current_epoch_participation
|
||||
elif previous_epoch == epoch:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
else:
|
||||
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||
|
||||
total_balance = spec.get_total_active_balance(state)
|
||||
remaining_balance = int(total_balance * 2 // 3) # can become negative
|
||||
|
@ -52,6 +60,8 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
|||
for i in range(max(len(committee) // 5, 1)):
|
||||
aggregation_bits[i] = 0
|
||||
|
||||
# Update state
|
||||
if not is_post_lightclient_patch(spec):
|
||||
attestations.append(spec.PendingAttestation(
|
||||
aggregation_bits=aggregation_bits,
|
||||
data=spec.AttestationData(
|
||||
|
@ -65,6 +75,13 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
|||
))
|
||||
if messed_up_target:
|
||||
attestations[len(attestations) - 1].data.target.root = b'\x99' * 32
|
||||
else:
|
||||
for i, index in enumerate(committee):
|
||||
if aggregation_bits[i]:
|
||||
epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_HEAD_FLAG_INDEX)
|
||||
epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_SOURCE_FLAG_INDEX)
|
||||
if not messed_up_target:
|
||||
epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_TARGET_FLAG_INDEX)
|
||||
|
||||
|
||||
def get_checkpoints(spec, epoch):
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
from eth2spec.test.context import PHASE0, spec_state_test, with_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_participation_record_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_participation_record_updates')
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_updated_participation_record(spec, state):
|
||||
state.previous_epoch_attestations = [spec.PendingAttestation(proposer_index=100)]
|
||||
current_epoch_attestations = [spec.PendingAttestation(proposer_index=200)]
|
||||
state.current_epoch_attestations = current_epoch_attestations
|
||||
|
||||
yield from run_process_participation_record_updates(spec, state)
|
||||
|
||||
assert state.previous_epoch_attestations == current_epoch_attestations
|
||||
assert state.current_epoch_attestations == []
|
|
@ -0,0 +1,21 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_randao_mixes_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_randao_mixes_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_updated_randao_mixes(spec, state):
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
state.randao_mixes[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] = b'\x56' * 32
|
||||
|
||||
yield from run_process_randao_mixes_reset(spec, state)
|
||||
|
||||
assert state.randao_mixes[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] == spec.get_randao_mix(
|
||||
state, spec.get_current_epoch(state)
|
||||
)
|
|
@ -1,7 +1,7 @@
|
|||
from eth2spec.test.helpers.deposits import mock_deposit
|
||||
from eth2spec.test.helpers.state import next_epoch, next_slots
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
|
||||
|
||||
def run_process_registry_updates(spec, state):
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
from eth2spec.test.context import (
|
||||
LIGHTCLIENT_PATCH,
|
||||
spec_state_test, spec_test,
|
||||
with_all_phases, single_phase,
|
||||
with_phases, PHASE0,
|
||||
with_phases, PHASE0, PHASE1,
|
||||
with_custom_state,
|
||||
zero_activation_threshold,
|
||||
misc_balances, low_single_balance,
|
||||
is_post_lightclient_patch,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
|
@ -19,7 +19,7 @@ from eth2spec.test.helpers.attestations import (
|
|||
)
|
||||
from eth2spec.test.helpers.rewards import leaking
|
||||
from eth2spec.test.helpers.attester_slashings import get_indexed_attestation_participants
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
from random import Random
|
||||
|
||||
|
||||
|
@ -66,7 +66,7 @@ def test_genesis_epoch_full_attestations_no_rewards(spec, state):
|
|||
assert state.balances[index] == pre_state.balances[index]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_attestations_random_incorrect_fields(spec, state):
|
||||
attestations = prepare_state_with_attestations(spec, state)
|
||||
|
@ -159,11 +159,11 @@ def run_with_participation(spec, state, participation_fn):
|
|||
return att_participants
|
||||
|
||||
attestations = prepare_state_with_attestations(spec, state, participation_fn=participation_tracker)
|
||||
proposer_indices = [a.proposer_index for a in state.previous_epoch_attestations]
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
if spec.fork == LIGHTCLIENT_PATCH:
|
||||
if not is_post_lightclient_patch(spec):
|
||||
proposer_indices = [a.proposer_index for a in state.previous_epoch_attestations]
|
||||
else:
|
||||
sync_committee_indices = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
|
||||
yield from run_process_rewards_and_penalties(spec, state)
|
||||
|
@ -173,11 +173,11 @@ def run_with_participation(spec, state, participation_fn):
|
|||
|
||||
for index in range(len(pre_state.validators)):
|
||||
if spec.is_in_inactivity_leak(state):
|
||||
# Proposers can still make money during a leak
|
||||
if index in proposer_indices and index in participated:
|
||||
# Proposers can still make money during a leak before LIGHTCLIENT_PATCH
|
||||
if not is_post_lightclient_patch(spec) and index in proposer_indices and index in participated:
|
||||
assert state.balances[index] > pre_state.balances[index]
|
||||
elif index in attesting_indices:
|
||||
if spec.fork == LIGHTCLIENT_PATCH and index in sync_committee_indices:
|
||||
if is_post_lightclient_patch(spec) and index in sync_committee_indices:
|
||||
# The sync committee reward has not been canceled, so the sync committee participants still earn it
|
||||
assert state.balances[index] >= pre_state.balances[index]
|
||||
else:
|
||||
|
@ -428,6 +428,7 @@ def test_attestations_some_slashed(spec, state):
|
|||
for i in range(spec.MIN_PER_EPOCH_CHURN_LIMIT):
|
||||
spec.slash_validator(state, attesting_indices_before_slashings[i])
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
|
||||
pre_state = state.copy()
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases, is_post_lightclient_patch
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with, run_epoch_processing_to
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
|
@ -23,12 +23,19 @@ def slash_validators(spec, state, indices, out_epochs):
|
|||
] = total_slashed_balance
|
||||
|
||||
|
||||
def get_slashing_multiplier(spec):
|
||||
if is_post_lightclient_patch(spec):
|
||||
return spec.HF1_PROPORTIONAL_SLASHING_MULTIPLIER
|
||||
else:
|
||||
return spec.PROPORTIONAL_SLASHING_MULTIPLIER
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_max_penalties(spec, state):
|
||||
# Slashed count to ensure that enough validators are slashed to induce maximum penalties
|
||||
slashed_count = min(
|
||||
(len(state.validators) // spec.PROPORTIONAL_SLASHING_MULTIPLIER) + 1,
|
||||
(len(state.validators) // get_slashing_multiplier(spec)) + 1,
|
||||
# Can't slash more than validator count!
|
||||
len(state.validators)
|
||||
)
|
||||
|
@ -40,7 +47,7 @@ def test_max_penalties(spec, state):
|
|||
total_balance = spec.get_total_active_balance(state)
|
||||
total_penalties = sum(state.slashings)
|
||||
|
||||
assert total_balance // spec.PROPORTIONAL_SLASHING_MULTIPLIER <= total_penalties
|
||||
assert total_balance // get_slashing_multiplier(spec) <= total_penalties
|
||||
|
||||
yield from run_process_slashings(spec, state)
|
||||
|
||||
|
@ -50,7 +57,30 @@ def test_max_penalties(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_small_penalty(spec, state):
|
||||
def test_low_penalty(spec, state):
|
||||
# Slashed count is one tenth of validator set
|
||||
slashed_count = (len(state.validators) // 10) + 1
|
||||
out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
|
||||
|
||||
slashed_indices = list(range(slashed_count))
|
||||
slash_validators(spec, state, slashed_indices, [out_epoch] * slashed_count)
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
yield from run_process_slashings(spec, state)
|
||||
|
||||
for i in slashed_indices:
|
||||
assert 0 < state.balances[i] < pre_state.balances[i]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_minimal_penalty(spec, state):
|
||||
#
|
||||
# When very few slashings, the resulting slashing penalty gets rounded down
|
||||
# to zero so the result of `process_slashings` is null
|
||||
#
|
||||
|
||||
# Just the bare minimum for this one validator
|
||||
state.balances[0] = state.validators[0].effective_balance = spec.EJECTION_BALANCE
|
||||
# All the other validators get the maximum.
|
||||
|
@ -74,11 +104,13 @@ def test_small_penalty(spec, state):
|
|||
|
||||
expected_penalty = (
|
||||
state.validators[0].effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
* (3 * total_penalties)
|
||||
* (get_slashing_multiplier(spec) * total_penalties)
|
||||
// total_balance
|
||||
* spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
)
|
||||
assert state.balances[0] == pre_slash_balances[0] - expected_penalty
|
||||
|
||||
assert expected_penalty == 0
|
||||
assert state.balances[0] == pre_slash_balances[0]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -96,7 +128,7 @@ def test_scaled_penalties(spec, state):
|
|||
state.slashings[5] = base + (incr * 6)
|
||||
state.slashings[spec.EPOCHS_PER_SLASHINGS_VECTOR - 1] = base + (incr * 7)
|
||||
|
||||
slashed_count = len(state.validators) // (spec.PROPORTIONAL_SLASHING_MULTIPLIER + 1)
|
||||
slashed_count = len(state.validators) // (get_slashing_multiplier(spec) + 1)
|
||||
|
||||
assert slashed_count > 10
|
||||
|
||||
|
@ -134,7 +166,7 @@ def test_scaled_penalties(spec, state):
|
|||
v = state.validators[i]
|
||||
expected_penalty = (
|
||||
v.effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
* (spec.PROPORTIONAL_SLASHING_MULTIPLIER * total_penalties)
|
||||
* (get_slashing_multiplier(spec) * total_penalties)
|
||||
// (total_balance)
|
||||
* spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
)
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_slashings_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_slashings_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_flush_slashings(spec, state):
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] = 100
|
||||
assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] != 0
|
||||
|
||||
yield from run_process_slashings_reset(spec, state)
|
||||
|
||||
assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] == 0
|
|
@ -1,4 +1,4 @@
|
|||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.context import PHASE0, PHASE1, with_all_phases, with_phases, spec_state_test
|
||||
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||
|
||||
|
||||
|
@ -32,7 +32,7 @@ def test_full_but_partial_participation(spec, state):
|
|||
yield from rewards_helpers.run_test_full_but_partial_participation(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_one_attestation_one_correct(spec, state):
|
||||
yield from rewards_helpers.run_test_one_attestation_one_correct(spec, state)
|
||||
|
@ -75,7 +75,7 @@ def test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
|||
#
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_half_correct_target_incorrect_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
|
@ -86,7 +86,7 @@ def test_full_half_correct_target_incorrect_head(spec, state):
|
|||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_correct_target_incorrect_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
|
@ -97,7 +97,7 @@ def test_full_correct_target_incorrect_head(spec, state):
|
|||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_half_incorrect_target_incorrect_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
|
@ -108,7 +108,7 @@ def test_full_half_incorrect_target_incorrect_head(spec, state):
|
|||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_half_incorrect_target_correct_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
|
@ -119,31 +119,31 @@ def test_full_half_incorrect_target_correct_head(spec, state):
|
|||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_delay_one_slot(spec, state):
|
||||
yield from rewards_helpers.run_test_full_delay_one_slot(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_delay_max_slots(spec, state):
|
||||
yield from rewards_helpers.run_test_full_delay_max_slots(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_mixed_delay(spec, state):
|
||||
yield from rewards_helpers.run_test_full_mixed_delay(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_proposer_not_in_attestations(spec, state):
|
||||
yield from rewards_helpers.run_test_proposer_not_in_attestations(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_duplicate_attestations_at_later_slots(spec, state):
|
||||
yield from rewards_helpers.run_test_duplicate_attestations_at_later_slots(spec, state)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.context import PHASE0, PHASE1, with_all_phases, with_phases, spec_state_test
|
||||
from eth2spec.test.helpers.rewards import leaking
|
||||
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||
|
||||
|
@ -38,7 +38,7 @@ def test_full_but_partial_participation_leak(spec, state):
|
|||
yield from rewards_helpers.run_test_full_but_partial_participation(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_one_attestation_one_correct_leak(spec, state):
|
||||
|
@ -87,7 +87,7 @@ def test_some_very_low_effective_balances_that_did_not_attest_leak(spec, state):
|
|||
#
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_half_correct_target_incorrect_head_leak(spec, state):
|
||||
|
@ -99,7 +99,7 @@ def test_full_half_correct_target_incorrect_head_leak(spec, state):
|
|||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_correct_target_incorrect_head_leak(spec, state):
|
||||
|
@ -111,7 +111,7 @@ def test_full_correct_target_incorrect_head_leak(spec, state):
|
|||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_half_incorrect_target_incorrect_head_leak(spec, state):
|
||||
|
@ -123,7 +123,7 @@ def test_full_half_incorrect_target_incorrect_head_leak(spec, state):
|
|||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_half_incorrect_target_correct_head_leak(spec, state):
|
||||
|
|
|
@ -29,6 +29,12 @@ def test_full_random_2(spec, state):
|
|||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(3030))
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_full_random_3(spec, state):
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(4040))
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
|
|
|
@ -35,6 +35,7 @@ from eth2spec.test.context import (
|
|||
with_configs,
|
||||
with_custom_state,
|
||||
large_validator_set,
|
||||
is_post_lightclient_patch,
|
||||
)
|
||||
|
||||
|
||||
|
@ -780,15 +781,19 @@ def test_attestation(spec, state):
|
|||
spec, state, shard_transition=shard_transition, index=index, signed=True, on_time=True
|
||||
)
|
||||
|
||||
# Add to state via block transition
|
||||
if not is_post_lightclient_patch(spec):
|
||||
pre_current_attestations_len = len(state.current_epoch_attestations)
|
||||
|
||||
# Add to state via block transition
|
||||
attestation_block.body.attestations.append(attestation)
|
||||
signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block)
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
|
||||
|
||||
# Epoch transition should move to previous_epoch_attestations
|
||||
pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations)
|
||||
else:
|
||||
pre_current_epoch_participation_root = spec.hash_tree_root(state.current_epoch_participation)
|
||||
|
||||
epoch_block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
signed_epoch_block = state_transition_and_sign_block(spec, state, epoch_block)
|
||||
|
@ -796,8 +801,13 @@ def test_attestation(spec, state):
|
|||
yield 'blocks', [signed_attestation_block, signed_epoch_block]
|
||||
yield 'post', state
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
assert len(state.current_epoch_attestations) == 0
|
||||
assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
|
||||
else:
|
||||
for index in range(len(state.validators)):
|
||||
assert state.current_epoch_participation[index] == spec.ParticipationFlags(0b0000_0000)
|
||||
assert spec.hash_tree_root(state.previous_epoch_participation) == pre_current_epoch_participation_root
|
||||
|
||||
|
||||
# In phase1 a committee is computed for SHARD_COMMITTEE_PERIOD slots ago,
|
||||
|
|
|
@ -15,7 +15,7 @@ from eth2spec.test.context import (
|
|||
with_configs,
|
||||
)
|
||||
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
|
||||
from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import (
|
||||
run_chunk_challenge_processing,
|
||||
|
|
|
@ -17,7 +17,7 @@ from eth2spec.test.context import (
|
|||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
|
||||
from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import (
|
||||
run_chunk_challenge_processing,
|
||||
|
|
|
@ -10,7 +10,7 @@ from eth2spec.test.context import (
|
|||
with_configs,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
from eth2spec.test.phase1.block_processing.test_process_custody_key_reveal import run_custody_key_reveal_processing
|
||||
|
||||
|
||||
|
|
|
@ -37,10 +37,17 @@ The provided pre-state is already transitioned to just before the specific sub-t
|
|||
|
||||
Sub-transitions:
|
||||
|
||||
Sub-transitions:
|
||||
|
||||
- `justification_and_finalization`
|
||||
- `rewards_and_penalties` (limited to `minimal` config)
|
||||
- `rewards_and_penalties`
|
||||
- `registry_updates`
|
||||
- `slashings`
|
||||
- `final_updates`
|
||||
- `eth1_data_reset`
|
||||
- `effective_balance_updates`
|
||||
- `slashings_reset`
|
||||
- `randao_mixes_reset`
|
||||
- `historical_roots_update`
|
||||
- `participation_record_updates`
|
||||
|
||||
The resulting state should match the expected `post` state.
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
# Forks
|
||||
|
||||
The aim of the fork tests is to ensure that a pre-fork state can be transformed
|
||||
into a valid post-fork state, utilizing the `upgrade` function found in the relevant `fork.md` spec.
|
||||
|
||||
There is only one handler: `fork`. Each fork (after genesis) is handled with the same format,
|
||||
and the particular fork boundary being tested is noted in `meta.yaml`.
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
A yaml file to signify which fork boundary is being tested.
|
||||
|
||||
```yaml
|
||||
fork: str -- Fork being transitioned to
|
||||
```
|
||||
|
||||
#### Fork strings
|
||||
|
||||
Key of valid `fork` strings that might be found in `meta.yaml`
|
||||
|
||||
| String ID | Pre-fork | Post-fork | Function |
|
||||
| - | - | - | - |
|
||||
| `altair` | Phase 0 | Altair | `upgrade_to_lightclient_patch` |
|
||||
|
||||
### `pre.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before running the fork transition.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
|
||||
### `post.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state after applying the fork transition.
|
||||
|
||||
Also available as `post.ssz`.
|
||||
|
||||
*Note*: This type is the `BeaconState` after the fork and is *not* the same type as `pre`.
|
||||
|
||||
## Processing
|
||||
|
||||
To process this test, pass `pre` into the upgrade function defined by the `fork` in `meta.yaml`.
|
||||
|
||||
## Condition
|
||||
|
||||
The resulting state should match the expected `post`.
|
|
@ -36,7 +36,7 @@ Prerequisites:
|
|||
|
||||
### Cleaning
|
||||
|
||||
This removes the existing virtual environments (`/test_generators/<generator>/venv`) and generated tests (`/yaml_tests/`).
|
||||
This removes the existing virtual environments (`/tests/generators/<generator>/venv`) and generated tests (`../eth2.0-spec-tests/tests`).
|
||||
|
||||
```bash
|
||||
make clean
|
||||
|
@ -47,7 +47,7 @@ make clean
|
|||
This runs all of the generators.
|
||||
|
||||
```bash
|
||||
make -j 4 gen_yaml_tests
|
||||
make -j 4 generate_tests
|
||||
```
|
||||
|
||||
The `-j N` flag makes the generators run in parallel, with `N` being the amount of cores.
|
||||
|
@ -55,10 +55,10 @@ The `-j N` flag makes the generators run in parallel, with `N` being the amount
|
|||
|
||||
### Running a single generator
|
||||
|
||||
The makefile auto-detects generators in the `test_generators` directory and provides a tests-gen target for each generator. See example:
|
||||
The makefile auto-detects generators in the `tests/generators` directory and provides a tests-gen target (gen_<generator_name>) for each generator. See example:
|
||||
|
||||
```bash
|
||||
make ./eth2.0-spec-tests/tests/shuffling/
|
||||
make gen_ssz_static
|
||||
```
|
||||
|
||||
## Developing a generator
|
||||
|
@ -78,9 +78,8 @@ It's recommended to extend the base-generator.
|
|||
|
||||
Create a `requirements.txt` in the root of your generator directory:
|
||||
```
|
||||
../../core/gen_helpers
|
||||
../../core/config_helpers
|
||||
../../core/pyspec
|
||||
pytest>=4.4
|
||||
../../../
|
||||
```
|
||||
|
||||
The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself in order to prevent code duplication and outdated tests.
|
||||
|
@ -103,7 +102,7 @@ Write a `main.py` file. The shuffling test generator is a good minimal starting
|
|||
```python
|
||||
from eth2spec.phase0 import spec as spec
|
||||
from eth_utils import to_tuple
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
from preset_loader import loader
|
||||
from typing import Iterable
|
||||
|
||||
|
@ -163,35 +162,40 @@ To extend this, one could decide to parametrize the `shuffling_test_cases` funct
|
|||
Another example, to generate tests from pytests:
|
||||
|
||||
```python
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.test.context import PHASE0, PHASE1, LIGHTCLIENT_PATCH
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec_phase0.apply_constants_preset(presets)
|
||||
spec_phase1.apply_constants_preset(presets)
|
||||
return config_name
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='epoch_processing',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
specs = (spec_phase0, spec_lightclient_patch, spec_phase1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("epoch_processing", [
|
||||
create_provider('final_updates', test_process_final_updates, 'minimal'),
|
||||
...
|
||||
])
|
||||
phase_0_mods = {key: 'eth2spec.test.phase0.sanity.test_' + key for key in [
|
||||
'blocks',
|
||||
'slots',
|
||||
]}
|
||||
lightclient_patch_mods = {**{key: 'eth2spec.test.lightclient_patch.sanity.test_' + key for key in [
|
||||
'blocks',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests
|
||||
phase_1_mods = {**{key: 'eth2spec.test.phase1.sanity.test_' + key for key in [
|
||||
'blocks', # more phase 1 specific block tests
|
||||
'shard_blocks',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec)
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
LIGHTCLIENT_PATCH: lightclient_patch_mods,
|
||||
PHASE1: phase_1_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="sanity", specs=specs, all_mods=all_mods)
|
||||
```
|
||||
|
||||
Here multiple phases load the configuration, and the stream of test cases is derived from a pytest file using the `generate_from_tests` utility.
|
||||
|
||||
Here multiple phases load the configuration, and the stream of test cases is derived from a pytest file using the `eth2spec.gen_helpers.gen_from_tests.gen.run_state_test_generators` utility. Note that this helper generates all available tests of `TESTGEN_FORKS` forks of `ALL_CONFIGS` configs of the given runner.
|
||||
|
||||
Recommendations:
|
||||
- You can have more than just one test provider.
|
||||
|
@ -200,14 +204,13 @@ Recommendations:
|
|||
- Use config `minimal` for performance and simplicity, but also implement a suite with the `mainnet` config where necessary.
|
||||
- You may be able to write your test case provider in a way where it does not make assumptions on constants.
|
||||
If so, you can generate test cases with different configurations for the same scenario (see example).
|
||||
- See [`tests/core/gen_helpers/README.md`](../core/gen_helpers/README.md) for command line options for generators.
|
||||
|
||||
- See [`tests/core/gen_helpers/README.md`](../core/pyspec/eth2spec/gen_helpers/README.md) for command line options for generators.
|
||||
|
||||
## How to add a new test generator
|
||||
|
||||
To add a new test generator that builds `New Tests`:
|
||||
|
||||
1. Create a new directory `new_tests` within the `test_generators` directory.
|
||||
1. Create a new directory `new_tests` within the `tests/generators` directory.
|
||||
Note that `new_tests` is also the name of the directory in which the tests will appear in the tests repository later.
|
||||
2. Your generator is assumed to have a `requirements.txt` file,
|
||||
with any dependencies it may need. Leave it empty if your generator has none.
|
||||
|
@ -216,8 +219,8 @@ To add a new test generator that builds `New Tests`:
|
|||
4. Your generator is called with `-o some/file/path/for_testing/can/be_anything -c some/other/path/to_configs/`.
|
||||
The base generator helps you handle this; you only have to define test case providers.
|
||||
5. Finally, add any linting or testing commands to the
|
||||
[circleci config file](../.circleci/config.yml) if desired to increase code quality.
|
||||
Or add it to the [`Makefile`](../Makefile), if it can be run locally.
|
||||
[circleci config file](../../.circleci/config.yml) if desired to increase code quality.
|
||||
Or add it to the [`Makefile`](../../Makefile), if it can be run locally.
|
||||
|
||||
*Note*: You do not have to change the makefile.
|
||||
However, if necessary (e.g. not using Python, or mixing in other languages), submit an issue, and it can be a special case.
|
||||
|
|
|
@ -13,7 +13,7 @@ import milagro_bls_binding as milagro_bls
|
|||
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import PHASE0
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
|
||||
|
||||
def to_bytes(i):
|
||||
|
|
|
@ -1,4 +1,2 @@
|
|||
py_ecc==5.0.0
|
||||
eth-utils==1.6.0
|
||||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
||||
|
|
|
@ -1,59 +1,42 @@
|
|||
from typing import Iterable
|
||||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from importlib import reload, import_module
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.test.context import PHASE0, PHASE1
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import PHASE0, PHASE1, LIGHTCLIENT_PATCH
|
||||
|
||||
|
||||
def create_provider(fork_name: str, handler_name: str,
|
||||
tests_src_mod_name: str, config_name: str) -> gen_typing.TestProvider:
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
bls.use_milagro()
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
tests_src = import_module(tests_src_mod_name)
|
||||
return generate_from_tests(
|
||||
runner_name='epoch_processing',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name=fork_name,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
specs = (spec_phase0, spec_lightclient_patch, spec_phase1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
phase_0_mods = {key: 'eth2spec.test.phase0.epoch_processing.test_process_' + key for key in [
|
||||
'final_updates',
|
||||
'justification_and_finalization',
|
||||
'registry_updates',
|
||||
'rewards_and_penalties',
|
||||
'registry_updates',
|
||||
'slashings',
|
||||
'eth1_data_reset',
|
||||
'effective_balance_updates',
|
||||
'slashings_reset',
|
||||
'randao_mixes_reset',
|
||||
'historical_roots_update',
|
||||
'participation_record_updates',
|
||||
]}
|
||||
lightclient_patch_mods = {
|
||||
**{key: 'eth2spec.test.lightclient_patch.epoch_processing.test_process_' + key for key in [
|
||||
'sync_committee_updates',
|
||||
]},
|
||||
**phase_0_mods,
|
||||
} # also run the previous phase 0 tests
|
||||
phase_1_mods = {**{key: 'eth2spec.test.phase1.epoch_processing.test_process_' + key for key in [
|
||||
'reveal_deadlines',
|
||||
'challenge_deadlines',
|
||||
'custody_final_updates',
|
||||
'reveal_deadlines',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec)
|
||||
|
||||
gen_runner.run_generator(f"epoch_processing", [
|
||||
create_provider(PHASE0, key, mod_name, 'minimal') for key, mod_name in phase_0_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"epoch_processing", [
|
||||
create_provider(PHASE0, key, mod_name, 'mainnet') for key, mod_name in phase_0_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"epoch_processing", [
|
||||
create_provider(PHASE1, key, mod_name, 'minimal') for key, mod_name in phase_1_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"epoch_processing", [
|
||||
create_provider(PHASE1, key, mod_name, 'mainnet') for key, mod_name in phase_1_mods.items()
|
||||
])
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
LIGHTCLIENT_PATCH: lightclient_patch_mods,
|
||||
PHASE1: phase_1_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="epoch_processing", specs=specs, all_mods=all_mods)
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
|
@ -1,43 +1,23 @@
|
|||
from typing import Iterable
|
||||
from importlib import reload
|
||||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
|
||||
from eth2spec.test.context import PHASE0, PHASE1
|
||||
from eth2spec.test.phase0.finality import test_finality
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import PHASE0, PHASE1, LIGHTCLIENT_PATCH
|
||||
|
||||
|
||||
def create_provider(fork_name: str, handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
bls.use_milagro()
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='finality',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name=fork_name,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
specs = (spec_phase0, spec_lightclient_patch, spec_phase1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# No additional phase 1 specific rewards tests, yet.
|
||||
key = 'finality'
|
||||
gen_runner.run_generator("finality", [
|
||||
create_provider(PHASE0, 'finality', test_finality, 'minimal'),
|
||||
create_provider(PHASE0, 'finality', test_finality, 'mainnet'),
|
||||
create_provider(PHASE1, 'finality', test_finality, 'minimal'),
|
||||
create_provider(PHASE1, 'finality', test_finality, 'mainnet'),
|
||||
])
|
||||
phase_0_mods = {'finality': 'eth2spec.test.phase0.finality.test_finality'}
|
||||
# No additional lightclient_patch or phase 1 specific finality tests, yet.
|
||||
lightclient_patch_mods = phase_0_mods
|
||||
phase_1_mods = phase_0_mods
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
LIGHTCLIENT_PATCH: lightclient_patch_mods,
|
||||
PHASE1: phase_1_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="finality", specs=specs, all_mods=all_mods)
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
|
@ -0,0 +1,38 @@
|
|||
from importlib import reload
|
||||
from typing import Iterable
|
||||
|
||||
from eth2spec.test.context import PHASE0, LIGHTCLIENT_PATCH, MINIMAL, MAINNET
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.test.lightclient_patch.fork import test_fork as test_altair_forks
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests
|
||||
|
||||
|
||||
def create_provider(tests_src, config_name: str, phase: str, fork_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_lightclient_patch)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='fork',
|
||||
handler_name='fork',
|
||||
src=tests_src,
|
||||
fork_name=fork_name,
|
||||
phase=phase,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("forks", [
|
||||
create_provider(test_altair_forks, MINIMAL, PHASE0, LIGHTCLIENT_PATCH),
|
||||
create_provider(test_altair_forks, MAINNET, PHASE0, LIGHTCLIENT_PATCH),
|
||||
])
|
|
@ -0,0 +1,2 @@
|
|||
pytest>=4.4
|
||||
../../../
|
|
@ -3,8 +3,8 @@ from typing import Iterable
|
|||
from eth2spec.test.context import PHASE0
|
||||
from eth2spec.test.phase0.genesis import test_initialization, test_validity
|
||||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests
|
||||
from eth2spec.phase0 import spec as spec
|
||||
from importlib import reload
|
||||
from eth2spec.config import config_util
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
|
@ -1,34 +1,11 @@
|
|||
from typing import Iterable
|
||||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from importlib import reload, import_module
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.test.context import PHASE0, PHASE1
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import PHASE0, PHASE1, LIGHTCLIENT_PATCH
|
||||
|
||||
|
||||
def create_provider(fork_name: str, handler_name: str,
|
||||
tests_src_mod_name: str, config_name: str) -> gen_typing.TestProvider:
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
bls.use_milagro()
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
tests_src = import_module(tests_src_mod_name)
|
||||
return generate_from_tests(
|
||||
runner_name='operations',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name=fork_name,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
specs = (spec_phase0, spec_lightclient_patch, spec_phase1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -40,6 +17,12 @@ if __name__ == "__main__":
|
|||
'proposer_slashing',
|
||||
'voluntary_exit',
|
||||
]}
|
||||
lightclient_patch_mods = {
|
||||
**{key: 'eth2spec.test.lightclient_patch.block_processing.test_process_' + key for key in [
|
||||
'sync_committee',
|
||||
]},
|
||||
**phase_0_mods,
|
||||
} # also run the previous phase 0 tests
|
||||
phase_1_mods = {**{key: 'eth2spec.test.phase1.block_processing.test_process_' + key for key in [
|
||||
'attestation',
|
||||
'chunk_challenge',
|
||||
|
@ -49,15 +32,10 @@ if __name__ == "__main__":
|
|||
'shard_transition',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec)
|
||||
|
||||
gen_runner.run_generator(f"operations", [
|
||||
create_provider(PHASE0, key, mod_name, 'minimal') for key, mod_name in phase_0_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"operations", [
|
||||
create_provider(PHASE0, key, mod_name, 'mainnet') for key, mod_name in phase_0_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"operations", [
|
||||
create_provider(PHASE1, key, mod_name, 'minimal') for key, mod_name in phase_1_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"operations", [
|
||||
create_provider(PHASE1, key, mod_name, 'mainnet') for key, mod_name in phase_1_mods.items()
|
||||
])
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
LIGHTCLIENT_PATCH: lightclient_patch_mods,
|
||||
PHASE1: phase_1_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="operations", specs=specs, all_mods=all_mods)
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
eth-utils==1.6.0
|
||||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
|
@ -1,34 +1,11 @@
|
|||
from typing import Iterable
|
||||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from importlib import reload, import_module
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.test.context import PHASE0, PHASE1
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import PHASE0, PHASE1, LIGHTCLIENT_PATCH
|
||||
|
||||
|
||||
def create_provider(fork_name: str, handler_name: str,
|
||||
tests_src_mod_name: str, config_name: str) -> gen_typing.TestProvider:
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
bls.use_milagro()
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
tests_src = import_module(tests_src_mod_name)
|
||||
return generate_from_tests(
|
||||
runner_name='rewards',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name=fork_name,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
specs = (spec_phase0, spec_lightclient_patch, spec_phase1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -37,18 +14,14 @@ if __name__ == "__main__":
|
|||
'leak',
|
||||
'random',
|
||||
]}
|
||||
# No additional phase 1 specific rewards tests, yet.
|
||||
# No additional lightclient_patch or phase 1 specific rewards tests, yet.
|
||||
lightclient_patch_mods = phase_0_mods
|
||||
phase_1_mods = phase_0_mods
|
||||
|
||||
gen_runner.run_generator(f"rewards", [
|
||||
create_provider(PHASE0, key, mod_name, 'minimal') for key, mod_name in phase_0_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"rewards", [
|
||||
create_provider(PHASE0, key, mod_name, 'mainnet') for key, mod_name in phase_0_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"rewards", [
|
||||
create_provider(PHASE1, key, mod_name, 'minimal') for key, mod_name in phase_1_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"rewards", [
|
||||
create_provider(PHASE1, key, mod_name, 'mainnet') for key, mod_name in phase_1_mods.items()
|
||||
])
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
LIGHTCLIENT_PATCH: lightclient_patch_mods,
|
||||
PHASE1: phase_1_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="rewards", specs=specs, all_mods=all_mods)
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
|
@ -1,34 +1,12 @@
|
|||
from typing import Iterable
|
||||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from importlib import reload, import_module
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.test.context import PHASE0, PHASE1
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import PHASE0, PHASE1, LIGHTCLIENT_PATCH
|
||||
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
|
||||
|
||||
def create_provider(fork_name: str, handler_name: str,
|
||||
tests_src_mod_name: str, config_name: str) -> gen_typing.TestProvider:
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
bls.use_milagro()
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
tests_src = import_module(tests_src_mod_name)
|
||||
return generate_from_tests(
|
||||
runner_name='sanity',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name=fork_name,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
specs = (spec_phase0, spec_lightclient_patch, spec_phase1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -36,20 +14,18 @@ if __name__ == "__main__":
|
|||
'blocks',
|
||||
'slots',
|
||||
]}
|
||||
lightclient_patch_mods = {**{key: 'eth2spec.test.lightclient_patch.sanity.test_' + key for key in [
|
||||
'blocks',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests
|
||||
phase_1_mods = {**{key: 'eth2spec.test.phase1.sanity.test_' + key for key in [
|
||||
'blocks', # more phase 1 specific block tests
|
||||
'shard_blocks',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec)
|
||||
|
||||
gen_runner.run_generator(f"sanity", [
|
||||
create_provider(PHASE0, key, mod_name, 'minimal') for key, mod_name in phase_0_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"sanity", [
|
||||
create_provider(PHASE0, key, mod_name, 'mainnet') for key, mod_name in phase_0_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"sanity", [
|
||||
create_provider(PHASE1, key, mod_name, 'minimal') for key, mod_name in phase_1_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"sanity", [
|
||||
create_provider(PHASE1, key, mod_name, 'mainnet') for key, mod_name in phase_1_mods.items()
|
||||
])
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
LIGHTCLIENT_PATCH: lightclient_patch_mods,
|
||||
PHASE1: phase_1_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="sanity", specs=specs, all_mods=all_mods)
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
|
@ -2,7 +2,7 @@ from eth_utils import to_tuple
|
|||
from typing import Iterable
|
||||
from importlib import reload
|
||||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec as spec
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
eth-utils==1.6.0
|
||||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
|
@ -1,5 +1,5 @@
|
|||
from typing import Iterable
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
import ssz_basic_vector
|
||||
import ssz_bitlist
|
||||
import ssz_bitvector
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
eth-utils==1.6.0
|
||||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
||||
|
|
|
@ -3,13 +3,14 @@ from typing import Iterable
|
|||
from importlib import reload
|
||||
from inspect import getmembers, isclass
|
||||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
|
||||
from eth2spec.debug import random_value, encode
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.test.context import PHASE0, PHASE1
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
from eth2spec.test.context import PHASE1, LIGHTCLIENT_PATCH, TESTGEN_FORKS, MINIMAL, MAINNET
|
||||
from eth2spec.utils.ssz.ssz_typing import Container
|
||||
from eth2spec.utils.ssz.ssz_impl import (
|
||||
hash_tree_root,
|
||||
|
@ -64,6 +65,7 @@ def create_provider(fork_name, config_name: str, seed: int, mode: random_value.R
|
|||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
reload(spec_lightclient_patch)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
|
@ -71,6 +73,8 @@ def create_provider(fork_name, config_name: str, seed: int, mode: random_value.R
|
|||
spec = spec_phase0
|
||||
if fork_name == PHASE1:
|
||||
spec = spec_phase1
|
||||
if fork_name == LIGHTCLIENT_PATCH:
|
||||
spec = spec_lightclient_patch
|
||||
|
||||
for (i, (name, ssz_type)) in enumerate(get_spec_ssz_types(spec)):
|
||||
yield from ssz_static_cases(fork_name, seed * 1000 + i, name, ssz_type, mode, chaos, count)
|
||||
|
@ -83,14 +87,14 @@ if __name__ == "__main__":
|
|||
settings = []
|
||||
seed = 1
|
||||
for mode in random_value.RandomizationMode:
|
||||
settings.append((seed, "minimal", mode, False, 30))
|
||||
settings.append((seed, MINIMAL, mode, False, 30))
|
||||
seed += 1
|
||||
settings.append((seed, "minimal", random_value.RandomizationMode.mode_random, True, 30))
|
||||
settings.append((seed, MINIMAL, random_value.RandomizationMode.mode_random, True, 30))
|
||||
seed += 1
|
||||
settings.append((seed, "mainnet", random_value.RandomizationMode.mode_random, False, 5))
|
||||
settings.append((seed, MAINNET, random_value.RandomizationMode.mode_random, False, 5))
|
||||
seed += 1
|
||||
|
||||
for fork in [PHASE0, PHASE1]:
|
||||
for fork in TESTGEN_FORKS:
|
||||
gen_runner.run_generator("ssz_static", [
|
||||
create_provider(fork, config_name, seed, mode, chaos, cases_if_random)
|
||||
for (seed, config_name, mode, chaos, cases_if_random) in settings
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
Loading…
Reference in New Issue