merge in dev (v0.10) and fix reorg/lint issues

This commit is contained in:
protolambda 2020-01-13 18:55:21 +01:00
commit 4732c7beb1
No known key found for this signature in database
GPG Key ID: EC89FDBB2B4C7623
174 changed files with 173 additions and 857 deletions

View File

@ -35,27 +35,27 @@ commands:
description: "Restore the cache with pyspec keys"
steps:
- restore_cached_venv:
venv_name: v6-pyspec
reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}
venv_name: v8-pyspec
reqs_checksum: cache-{{ checksum "tests/core/pyspec/requirements.txt" }}-{{ checksum "tests/core/pyspec/requirements-testing.txt" }}
save_pyspec_cached_venv:
description: Save a venv into a cache with pyspec keys"
steps:
- save_cached_venv:
venv_name: v6-pyspec
reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}
venv_path: ./test_libs/pyspec/venv
venv_name: v8-pyspec
reqs_checksum: cache-{{ checksum "tests/core/pyspec/requirements.txt" }}-{{ checksum "tests/core/pyspec/requirements-testing.txt" }}
venv_path: ./tests/core/pyspec/venv
restore_deposit_contract_cached_venv:
description: "Restore the cache with deposit_contract keys"
steps:
- restore_cached_venv:
venv_name: v9-deposit-contract
reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }}
reqs_checksum: cache-{{ checksum "tests/core/pyspec/requirements.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }}
save_deposit_contract_cached_venv:
description: Save a venv into a cache with deposit_contract keys"
steps:
- save_cached_venv:
venv_name: v9-deposit-contract
reqs_checksum: cache-{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }}
reqs_checksum: cache-{{ checksum "tests/core/pyspec/requirements.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }}
venv_path: ./deposit_contract/venv
jobs:
checkout_specs:
@ -66,16 +66,16 @@ jobs:
# Restore git repo at point close to target branch/revision, to speed up checkout
- restore_cache:
keys:
- v1-specs-repo-{{ .Branch }}-{{ .Revision }}
- v1-specs-repo-{{ .Branch }}-
- v1-specs-repo-
- v2-specs-repo-{{ .Branch }}-{{ .Revision }}
- v2-specs-repo-{{ .Branch }}-
- v2-specs-repo-
- checkout
- run:
name: Clean up git repo to reduce cache size
command: git gc
# Save the git checkout as a cache, to make cloning next time faster.
- save_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
paths:
- ~/specs-repo
install_pyspec_test:
@ -84,7 +84,7 @@ jobs:
working_directory: ~/specs-repo
steps:
- restore_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_pyspec_cached_venv
- run:
name: Install pyspec requirements
@ -96,13 +96,13 @@ jobs:
working_directory: ~/specs-repo
steps:
- restore_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_pyspec_cached_venv
- run:
name: Run py-tests
command: make citest
- store_test_results:
path: test_libs/pyspec/test-reports
path: tests/core/pyspec/test-reports
table_of_contents:
docker:
- image: circleci/node:10.16.3
@ -127,7 +127,7 @@ jobs:
working_directory: ~/specs-repo
steps:
- restore_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_pyspec_cached_venv
- run:
name: Run linter
@ -138,7 +138,7 @@ jobs:
working_directory: ~/specs-repo
steps:
- restore_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_deposit_contract_cached_venv
- run:
name: Install deposit contract requirements
@ -150,7 +150,7 @@ jobs:
working_directory: ~/specs-repo
steps:
- restore_cache:
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_deposit_contract_cached_venv
- run:
name: Run deposit contract test

6
.gitignore vendored
View File

@ -14,12 +14,12 @@ eth2.0-spec-tests/
.mypy_cache
# Dynamically built from Markdown spec
test_libs/pyspec/eth2spec/phase0/spec.py
test_libs/pyspec/eth2spec/phase1/spec.py
tests/core/pyspec/eth2spec/phase0/spec.py
tests/core/pyspec/eth2spec/phase1/spec.py
# coverage reports
.htmlcov
.coverage
# local CI testing output
test_libs/pyspec/test-reports
tests/core/pyspec/test-reports

View File

@ -1,9 +1,10 @@
SPEC_DIR = ./specs
SSZ_DIR = ./ssz
SCRIPT_DIR = ./scripts
TEST_LIBS_DIR = ./test_libs
TEST_LIBS_DIR = ./tests/core
PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec
TEST_VECTOR_DIR = ./eth2.0-spec-tests/tests
GENERATOR_DIR = ./test_generators
GENERATOR_DIR = ./tests/generators
DEPOSIT_CONTRACT_DIR = ./deposit_contract
CONFIGS_DIR = ./configs
@ -16,17 +17,19 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
# To check generator matching:
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
PHASE0_SPEC_DIR = $(SPEC_DIR)/phase0
PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py
PY_SPEC_PHASE_0_DEPS = $(wildcard $(SPEC_DIR)/core/0_*.md)
PY_SPEC_PHASE_0_DEPS = $(wildcard $(SPEC_DIR)/phase0/*.md)
PHASE1_SPEC_DIR = $(SPEC_DIR)/phase1
PY_SPEC_PHASE_1_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase1/spec.py
PY_SPEC_PHASE_1_DEPS = $(wildcard $(SPEC_DIR)/core/1_*.md)
PY_SPEC_PHASE_1_DEPS = $(wildcard $(SPEC_DIR)/phase1/*.md)
PY_SPEC_ALL_DEPS = $(PY_SPEC_PHASE_0_DEPS) $(PY_SPEC_PHASE_1_DEPS)
PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) $(PY_SPEC_PHASE_1_TARGETS)
MARKDOWN_FILES = $(PY_SPEC_ALL_DEPS) $(wildcard $(SPEC_DIR)/*.md) $(wildcard $(SPEC_DIR)/light_client/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md)
MARKDOWN_FILES = $(PY_SPEC_ALL_DEPS) $(wildcard $(SPEC_DIR)/*.md) $(wildcard $(SSZ_DIR)/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md)
COV_HTML_OUT=.htmlcov
COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html
@ -101,10 +104,12 @@ test_deposit_contract:
pyspec: $(PY_SPEC_ALL_TARGETS)
$(PY_SPEC_PHASE_0_TARGETS): $(PY_SPEC_PHASE_0_DEPS)
python3 $(SCRIPT_DIR)/build_spec.py -p0 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/validator/0_beacon-chain-validator.md $@
python3 $(SCRIPT_DIR)/build_spec.py -p0 $(PHASE0_SPEC_DIR)/beacon-chain.md $(PHASE0_SPEC_DIR)/fork-choice.md $(PHASE0_SPEC_DIR)/validator.md $@
$(PY_SPEC_DIR)/eth2spec/phase1/spec.py: $(PY_SPEC_PHASE_1_DEPS)
python3 $(SCRIPT_DIR)/build_spec.py -p1 $(SPEC_DIR)/core/0_beacon-chain.md $(SPEC_DIR)/core/0_fork-choice.md $(SPEC_DIR)/light_client/merkle_proofs.md $(SPEC_DIR)/core/1_custody-game.md $(SPEC_DIR)/core/1_beacon-chain.md $(SPEC_DIR)/core/1_fraud-proofs.md $(SPEC_DIR)/core/1_phase1-fork.md $@
python3 $(SCRIPT_DIR)/build_spec.py -p1 $(PHASE0_SPEC_DIR)/beacon-chain.md $(PHASE0_SPEC_DIR)/fork-choice.md $(PHASE1_SPEC_DIR)/custody-game.md $(PHASE1_SPEC_DIR)/beacon-chain.md $(PHASE1_SPEC_DIR)/fraud-proofs.md $(PHASE1_SPEC_DIR)/phase1-fork.md $@
# TODO: also build validator spec and light-client-sync
CURRENT_DIR = ${CURDIR}

View File

@ -9,19 +9,21 @@ This repository hosts the current Eth2 specifications. Discussions about design
## Specs
Core specifications for Eth2 client validation can be found in [specs/core](specs/core). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
Core specifications for Eth2 clients be found in [specs/](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
### Phase 0
* [The Beacon Chain](specs/core/0_beacon-chain.md)
* [Fork Choice](specs/core/0_fork-choice.md)
* [Deposit Contract](specs/core/0_deposit-contract.md)
* [Honest Validator](specs/validator/0_beacon-chain-validator.md)
* [The Beacon Chain](specs/phase0/beacon-chain.md)
* [Fork Choice](specs/phase0/fork-choice.md)
* [Deposit Contract](specs/phase0/deposit-contract.md)
* [Honest Validator](specs/phase0/validator.md)
* [P2P Networking](specs/phase0/p2p-interface.md)
### Phase 1
* [From Phase 0 to Phase 1](specs/core/1_phase1_fork.md)
* [The Beacon Chain for Shards](specs/core/1_beacon-chain.md)
* [Custody Game](specs/core/1_custody-game.md)
* [Shard Transition and Fraud Proofs](specs/core/1_fraud_proofs.md)
* [From Phase 0 to Phase 1](specs/phase1/phase1-fork.md)
* [The Beacon Chain for Shards](specs/phase1/beacon-chain.md)
* [Custody Game](specs/phase1/custody-game.md)
* [Shard Transition and Fraud Proofs](specs/phase1/fraud-proofs.md)
* [Light client syncing protocol](specs/phase1/light-client-sync.md)
### Phase 2
@ -31,11 +33,9 @@ See the [Eth2 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) for c
### Accompanying documents can be found in [specs](specs) and include:
* [SimpleSerialize (SSZ) spec](specs/simple-serialize.md)
* [BLS signature verification](specs/bls_signature.md)
* [General test format](specs/test_formats/README.md)
* [Merkle proof formats](specs/light_client/merkle_proofs.md)
* [Light client syncing protocol](specs/light_client/sync_protocol.md)
* [SimpleSerialize (SSZ) spec](ssz/simple-serialize.md)
* [Merkle proof formats](ssz/merkle-proofs.md)
* [General test format](tests/formats/README.md)
## Additional specifications for client implementers
@ -64,6 +64,6 @@ The following are the broad design goals for Ethereum 2.0:
## For spec contributors
Documentation on the different components used during spec writing can be found here:
* [YAML Test Generators](test_generators/README.md)
* [Executable Python Spec, with Py-tests](test_libs/pyspec/README.md)
* [YAML Test Generators](tests/generators/README.md)
* [Executable Python Spec, with Py-tests](tests/core/pyspec/README.md)

View File

@ -2,4 +2,4 @@ eth-tester[py-evm]==0.1.0b39
git+https://github.com/vyperlang/vyper@1761-HOTFIX-v0.1.0-beta.13
web3==5.0.0b2
pytest==3.6.1
../test_libs/pyspec
../tests/core/pyspec

View File

@ -37,10 +37,7 @@ SSZObject = TypeVar('SSZObject', bound=SSZType)
PHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0
from eth2spec.config.apply_config import apply_constants_preset
from typing import (
Any, Callable, Dict, Set, Sequence, NewType, Tuple, Union, TypeVar
)
from math import (
log2,
Any, Callable, Dict, Set, Sequence, NewType, Tuple, TypeVar
)
from dataclasses import (
@ -50,8 +47,7 @@ from dataclasses import (
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
from eth2spec.utils.ssz.ssz_typing import (
BasicValue, Elements, BaseBytes, BaseList, SSZType,
Container, List, Vector, ByteList, ByteVector, Bitlist, Bitvector, Bits,
SSZType, Container, List, Vector, ByteList, ByteVector, Bitlist, Bitvector,
Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96,
uint64, uint8, bit, boolean,
)
@ -231,7 +227,6 @@ def build_phase0_spec(phase0_sourcefile: str, fork_choice_sourcefile: str,
def build_phase1_spec(phase0_beacon_sourcefile: str,
phase0_fork_choice_sourcefile: str,
merkle_proofs_sourcefile: str,
phase1_custody_sourcefile: str,
phase1_beacon_sourcefile: str,
phase1_fraud_sourcefile: str,
@ -240,7 +235,6 @@ def build_phase1_spec(phase0_beacon_sourcefile: str,
all_sourcefiles = (
phase0_beacon_sourcefile,
phase0_fork_choice_sourcefile,
merkle_proofs_sourcefile,
phase1_custody_sourcefile,
phase1_beacon_sourcefile,
phase1_fraud_sourcefile,
@ -262,20 +256,19 @@ if __name__ == '__main__':
description = '''
Build the specs from the md docs.
If building phase 0:
1st argument is input /core/0_beacon-chain.md
2nd argument is input /core/0_fork-choice.md
3rd argument is input /core/0_beacon-chain-validator.md
1st argument is input phase0/beacon-chain.md
2nd argument is input phase0/fork-choice.md
3rd argument is input phase0/validator.md
4th argument is output spec.py
If building phase 1:
1st argument is input /core/0_beacon-chain.md
2nd argument is input /core/0_fork-choice.md
3rd argument is input /light_client/merkle_proofs.md
4th argument is input /core/1_custody-game.md
5th argument is input /core/1_beacon-chain.md
6th argument is input /core/1_fraud-proofs.md
7th argument is input /core/1_phase1-fork.md
8th argument is output spec.py
1st argument is input phase0/beacon-chain.md
2nd argument is input phase0/fork-choice.md
3rd argument is input phase1/custody-game.md
4th argument is input phase1/beacon-chain.md
5th argument is input phase1/fraud-proofs.md
6th argument is input phase1/phase1-fork.md
7th argument is output spec.py
'''
parser = ArgumentParser(description=description)
parser.add_argument("-p", "--phase", dest="phase", type=int, default=0, help="Build for phase #")
@ -288,14 +281,13 @@ If building phase 1:
else:
print(" Phase 0 requires spec, forkchoice, and v-guide inputs as well as an output file.")
elif args.phase == 1:
if len(args.files) == 8:
if len(args.files) == 7:
build_phase1_spec(*args.files)
else:
print(
" Phase 1 requires input files as well as an output file:\n"
"\t core/phase_0: (0_beacon-chain.md, 0_fork-choice.md)\n"
"\t light_client: (merkle_proofs.md)\n"
"\t core/phase_1: (1_custody-game.md, 1_beacon-chain.md, 1_fraud-proofs.md, 1_phase1-fork.md)\n"
"\t phase0: (beacon-chain.md, fork-choice.md)\n"
"\t phase1: (custody-game.md, beacon-chain.md, fraud-proofs.md, phase1-fork.md)\n"
"\t and output.py"
)
else:

View File

@ -1,254 +0,0 @@
# Phase 1 miscellaneous beacon chain changes
## Table of contents
<!-- TOC -->
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Configuration](#configuration)
- [Containers](#containers)
- [`CompactCommittee`](#compactcommittee)
- [`ShardReceiptDelta`](#shardreceiptdelta)
- [`ShardReceiptProof`](#shardreceiptproof)
- [Helper functions](#helper-functions)
- [`pack_compact_validator`](#pack_compact_validator)
- [`unpack_compact_validator`](#unpack_compact_validator)
- [`committee_to_compact_committee`](#committee_to_compact_committee)
- [`verify_merkle_proof`](#verify_merkle_proof)
- [`compute_historical_state_generalized_index`](#compute_historical_state_generalized_index)
- [`get_generalized_index_of_crosslink_header`](#get_generalized_index_of_crosslink_header)
- [`process_shard_receipt_proof`](#process_shard_receipt_proof)
- [Changes](#changes)
- [Phase 0 container updates](#phase-0-container-updates)
- [`BeaconState`](#beaconstate)
- [`BeaconBlockBody`](#beaconblockbody)
- [Persistent committees](#persistent-committees)
- [Shard receipt processing](#shard-receipt-processing)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
## Configuration
| Name | Value | Unit | Duration
| - | - | - | - |
| `MAX_SHARD_RECEIPT_PROOFS` | `2**0` (= 1) | - | - |
| `PERIOD_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months |
| `MINOR_REWARD_QUOTIENT` | `2**8` (=256) | - | - |
| `REWARD_COEFFICIENT_BASE` | **TBD** | - | - |
## Containers
#### `CompactCommittee`
```python
class CompactCommittee(Container):
pubkeys: List[BLSPubkey, MAX_VALIDATORS_PER_COMMITTEE]
compact_validators: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
```
#### `ShardReceiptDelta`
```python
class ShardReceiptDelta(Container):
index: ValidatorIndex
reward_coefficient: uint64
block_fee: Gwei
```
#### `ShardReceiptProof`
```python
class ShardReceiptProof(Container):
shard: Shard
proof: List[Bytes32, PLACEHOLDER]
receipt: List[ShardReceiptDelta, PLACEHOLDER]
```
## Helper functions
#### `pack_compact_validator`
```python
def pack_compact_validator(index: int, slashed: bool, balance_in_increments: int) -> int:
"""
Creates a compact validator object representing index, slashed status, and compressed balance.
Takes as input balance-in-increments (// EFFECTIVE_BALANCE_INCREMENT) to preserve symmetry with
the unpacking function.
"""
return (index << 16) + (slashed << 15) + balance_in_increments
```
#### `unpack_compact_validator`
```python
def unpack_compact_validator(compact_validator: int) -> Tuple[int, bool, int]:
"""
Returns validator index, slashed, balance // EFFECTIVE_BALANCE_INCREMENT
"""
return compact_validator >> 16, bool((compact_validator >> 15) % 2), compact_validator & (2**15 - 1)
```
#### `committee_to_compact_committee`
```python
def committee_to_compact_committee(state: BeaconState, committee: Sequence[ValidatorIndex]) -> CompactCommittee:
"""
Given a state and a list of validator indices, outputs the CompactCommittee representing them.
"""
validators = [state.validators[i] for i in committee]
compact_validators = [
pack_compact_validator(i, v.slashed, v.effective_balance // EFFECTIVE_BALANCE_INCREMENT)
for i, v in zip(committee, validators)
]
pubkeys = [v.pubkey for v in validators]
return CompactCommittee(pubkeys=pubkeys, compact_validators=compact_validators)
```
#### `verify_merkle_proof`
```python
def verify_merkle_proof(leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex, root: Root) -> bool:
assert len(proof) == get_generalized_index_length(index)
for i, h in enumerate(proof):
if get_generalized_index_bit(index, i):
leaf = hash(h + leaf)
else:
leaf = hash(leaf + h)
return leaf == root
```
#### `compute_historical_state_generalized_index`
```python
def compute_historical_state_generalized_index(earlier: ShardSlot, later: ShardSlot) -> GeneralizedIndex:
"""
Computes the generalized index of the state root of slot `earlier` based on the state root of slot `later`.
Relies on the `history_accumulator` in the `ShardState`, where `history_accumulator[i]` maintains the most
recent 2**i'th slot state. Works by tracing a `log(later-earlier)` step path from `later` to `earlier`
through intermediate blocks at the next available multiples of descending powers of two.
"""
o = GeneralizedIndex(1)
for i in range(HISTORY_ACCUMULATOR_DEPTH - 1, -1, -1):
if (later - 1) & 2**i > (earlier - 1) & 2**i:
later = later - ((later - 1) % 2**i) - 1
gindex = GeneralizedIndex(get_generalized_index(ShardState, ['history_accumulator', i]))
o = concat_generalized_indices(o, gindex)
return o
```
#### `get_generalized_index_of_crosslink_header`
```python
def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex:
"""
Gets the generalized index for the root of the index'th header in a crosslink.
"""
MAX_CROSSLINK_SIZE = (
MAX_SHARD_BLOCK_SIZE * SHARD_SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK
)
assert MAX_CROSSLINK_SIZE == get_previous_power_of_two(MAX_CROSSLINK_SIZE)
return GeneralizedIndex(MAX_CROSSLINK_SIZE // SHARD_HEADER_SIZE + index)
```
#### `process_shard_receipt_proof`
```python
def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptProof) -> None:
"""
Processes a ShardReceipt object.
"""
receipt_slot = (
state.next_shard_receipt_period[receipt_proof.shard] *
SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD
)
first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SHARD_SLOTS_PER_EPOCH
gindex = concat_generalized_indices(
get_generalized_index_of_crosslink_header(0),
GeneralizedIndex(get_generalized_index(ShardBlockHeader, 'state_root')),
compute_historical_state_generalized_index(receipt_slot, first_slot_in_last_crosslink),
GeneralizedIndex(get_generalized_index(ShardState, 'receipt_root'))
)
assert verify_merkle_proof(
leaf=hash_tree_root(receipt_proof.receipt),
proof=receipt_proof.proof,
index=gindex,
root=state.current_crosslinks[receipt_proof.shard].data_root
)
for delta in receipt_proof.receipt:
if get_current_epoch(state) < state.validators[delta.index].withdrawable_epoch:
increase_amount = (
state.validators[delta.index].effective_balance * delta.reward_coefficient // REWARD_COEFFICIENT_BASE
)
increase_balance(state, delta.index, increase_amount)
decrease_balance(state, delta.index, delta.block_fee)
state.next_shard_receipt_period[receipt_proof.shard] += 1
proposer_index = get_beacon_proposer_index(state)
increase_balance(state, proposer_index, Gwei(get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT))
```
## Changes
### Phase 0 container updates
Add the following fields to the end of the specified container objects.
#### `BeaconState`
```python
class BeaconState(Container):
# Period committees
period_committee_roots: Vector[Root, PERIOD_COMMITTEE_ROOT_LENGTH]
next_shard_receipt_period: Vector[uint64, SHARD_COUNT]
```
`period_committee_roots` values are initialized to `Bytes32()` (empty bytes value).
`next_shard_receipt_period` values are initialized to `compute_epoch_at_slot(PHASE_1_FORK_SLOT) // EPOCHS_PER_SHARD_PERIOD`.
#### `BeaconBlockBody`
```python
class BeaconBlockBody(Container):
shard_receipt_proofs: List[ShardReceiptProof, MAX_SHARD_RECEIPT_PROOFS]
```
`shard_receipt_proofs` is initialized to `[]`.
### Persistent committees
Run `update_period_committee` immediately before `process_final_updates`:
```python
# begin insert @update_period_committee
update_period_committee(state)
# end insert @update_period_committee
def update_period_committee(state: BeaconState) -> None:
"""
Updates period committee roots at boundary blocks.
"""
if (get_current_epoch(state) + 1) % EPOCHS_PER_SHARD_PERIOD != 0:
return
period = (get_current_epoch(state) + 1) // EPOCHS_PER_SHARD_PERIOD
committees = Vector[CompactCommittee, SHARD_COUNT]([
committee_to_compact_committee(
state,
get_period_committee(state, Shard(shard), Epoch(get_current_epoch(state) + 1)),
)
for shard in range(SHARD_COUNT)
])
state.period_committee_roots[period % PERIOD_COMMITTEE_ROOT_LENGTH] = hash_tree_root(committees)
```
### Shard receipt processing
Run `process_shard_receipt_proof` on each `ShardReceiptProof` during block processing.
```python
# begin insert @process_shard_receipt_proofs
(body.shard_receipt_proofs, process_shard_receipt_proof),
# end insert @process_shard_receipt_proofs
```

View File

@ -1,444 +0,0 @@
# Ethereum 2.0 Phase 1 -- Shard Data Chains
**Notice**: This document is a work-in-progress for researchers and implementers.
## Table of contents
<!-- TOC -->
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Custom types](#custom-types)
- [Configuration](#configuration)
- [Misc](#misc)
- [Initial values](#initial-values)
- [Time parameters](#time-parameters)
- [State list lengths](#state-list-lengths)
- [Rewards and penalties](#rewards-and-penalties)
- [Signature domain types](#signature-domain-types)
- [Containers](#containers)
- [`Crosslink`](#crosslink)
- [`ShardBlock`](#shardblock)
- [`ShardBlockHeader`](#shardblockheader)
- [`ShardState`](#shardstate)
- [`ShardAttestationData`](#shardattestationdata)
- [Helper functions](#helper-functions)
- [Misc](#misc-1)
- [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot)
- [`compute_shard_period_start_epoch`](#compute_shard_period_start_epoch)
- [Beacon state accessors](#beacon-state-accessors)
- [`get_period_committee`](#get_period_committee)
- [`get_shard_committee`](#get_shard_committee)
- [`get_shard_proposer_index`](#get_shard_proposer_index)
- [Shard state mutators](#shard-state-mutators)
- [`process_delta`](#process_delta)
- [Genesis](#genesis)
- [`get_genesis_shard_state`](#get_genesis_shard_state)
- [`get_genesis_shard_block`](#get_genesis_shard_block)
- [Shard state transition function](#shard-state-transition-function)
- [Period processing](#period-processing)
- [Block processing](#block-processing)
- [Block header](#block-header)
- [Attestations](#attestations)
- [Block body](#block-body)
- [Shard fork choice rule](#shard-fork-choice-rule)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
## Introduction
This document describes the shard transition function (data layer only) and the shard fork choice rule as part of Phase 1 of Ethereum 2.0.
## Custom types
| Name | SSZ equivalent | Description |
| - | - | - |
| `Shard` | `uint64` | a shard number |
| `ShardSlot` | `uint64` | a shard slot number |
## Configuration
### Misc
| Name | Value |
| - | - |
| `SHARD_COUNT` | `2**10` (= 1,024) |
| `MIN_BLOCK_BODY_PRICE` | `2**0` (= 1) |
| `MAX_PERIOD_COMMITTEE_SIZE` | `2**7` (= 128) |
| `SHARD_HEADER_SIZE` | `2**10` (= 1024) |
| `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) |
| `MAX_SHARD_BLOCK_SIZE` | `2**16` (= 65,536) |
### Initial values
| Name | Value | Unit |
| - | - |
| `SHARD_GENESIS_EPOCH` | **TBD** | Epoch |
### Time parameters
| Name | Value | Unit | Duration |
| - | - | :-: | :-: |
| `SHARD_SLOTS_PER_EPOCH` | `2**7` (= 128) | shard slots | 6.4 minutes |
| `EPOCHS_PER_SHARD_PERIOD` | `2**8` (= 256) | epochs | ~27 hours |
### State list lengths
| Name | Value |
| - | - |
| `HISTORY_ACCUMULATOR_DEPTH` | `2**6` (= 64) |
### Rewards and penalties
| Name | Value |
| - | - |
| `BLOCK_BODY_PRICE_QUOTIENT` | `2**3` (= 8) |
### Signature domain types
| Name | Value |
| - | - |
| `DOMAIN_SHARD_PROPOSER` | `DomainType('0x80000000')` |
| `DOMAIN_SHARD_ATTESTER` | `DomainType('0x81000000')` |
## Containers
### `Crosslink`
```python
# Crosslink is a placeholder to appease the build script until phase 1 is reworked
class Crosslink(Container):
shard: Shard
```
### `ShardBlock`
```python
class ShardBlock(Container):
shard: Shard
slot: ShardSlot
beacon_block_root: Root
parent_root: Root
state_root: Root
body: List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE]
block_size_sum: uint64
aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE]
attestations: BLSSignature
signature: BLSSignature
```
### `ShardBlockHeader`
```python
class ShardBlockHeader(Container):
shard: Shard
slot: ShardSlot
beacon_block_root: Root
parent_root: Root
state_root: Root
body_root: Root
block_size_sum: uint64
aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE]
attestations: BLSSignature
signature: BLSSignature
```
### `ShardState`
```python
class ShardState(Container):
shard: Shard
slot: ShardSlot
history_accumulator: Vector[Bytes32, HISTORY_ACCUMULATOR_DEPTH]
latest_block_header: ShardBlockHeader
block_size_sum: uint64
# Fees and rewards
block_body_price: Gwei
older_committee_positive_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE]
older_committee_negative_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE]
newer_committee_positive_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE]
newer_committee_negative_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE]
```
### `ShardAttestationData`
```python
class ShardAttestationData(Container):
slot: ShardSlot
parent_root: Root
```
## Helper functions
### Misc
#### `compute_epoch_of_shard_slot`
```python
def compute_epoch_of_shard_slot(slot: ShardSlot) -> Epoch:
return Epoch(slot // SHARD_SLOTS_PER_EPOCH)
```
#### `compute_shard_period_start_epoch`
```python
def compute_shard_period_start_epoch(epoch: Epoch, lookback: uint64) -> Epoch:
return Epoch(epoch - (epoch % EPOCHS_PER_SHARD_PERIOD) - lookback * EPOCHS_PER_SHARD_PERIOD)
```
### Beacon state accessors
#### `get_period_committee`
```python
def get_period_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]:
active_validator_indices = get_active_validator_indices(beacon_state, epoch)
seed = get_seed(beacon_state, epoch, DOMAIN_SHARD_ATTESTER)
return compute_committee(active_validator_indices, seed, shard, SHARD_COUNT)[:MAX_PERIOD_COMMITTEE_SIZE]
```
#### `get_shard_committee`
```python
def get_shard_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]:
older_committee = get_period_committee(beacon_state, shard, compute_shard_period_start_epoch(epoch, 2))
newer_committee = get_period_committee(beacon_state, shard, compute_shard_period_start_epoch(epoch, 1))
# Every epoch cycle out validators from the older committee and cycle in validators from the newer committee
older_subcommittee = [i for i in older_committee if i % EPOCHS_PER_SHARD_PERIOD > epoch % EPOCHS_PER_SHARD_PERIOD]
newer_subcommittee = [i for i in newer_committee if i % EPOCHS_PER_SHARD_PERIOD <= epoch % EPOCHS_PER_SHARD_PERIOD]
return older_subcommittee + newer_subcommittee
```
#### `get_shard_proposer_index`
```python
def get_shard_proposer_index(beacon_state: BeaconState, shard: Shard, slot: ShardSlot) -> ValidatorIndex:
epoch = get_current_epoch(beacon_state)
shard_committee = get_shard_committee(beacon_state, shard, epoch)
active_indices = [i for i in shard_committee if is_active_validator(beacon_state.validators[i], epoch)]
assert any(active_indices)
epoch_seed = get_seed(beacon_state, epoch, DOMAIN_SHARD_PROPOSER)
seed = hash(epoch_seed + int_to_bytes(slot, length=8) + int_to_bytes(shard, length=8))
return compute_proposer_index(beacon_state, active_indices, seed)
```
### Shard state mutators
#### `process_delta`
```python
def process_delta(beacon_state: BeaconState,
shard_state: ShardState,
index: ValidatorIndex,
delta: Gwei,
positive: bool=True) -> None:
epoch = compute_epoch_of_shard_slot(shard_state.slot)
older_committee = get_period_committee(beacon_state, shard_state.shard, compute_shard_period_start_epoch(epoch, 2))
newer_committee = get_period_committee(beacon_state, shard_state.shard, compute_shard_period_start_epoch(epoch, 1))
if index in older_committee:
if positive:
shard_state.older_committee_positive_deltas[older_committee.index(index)] += delta
else:
shard_state.older_committee_negative_deltas[older_committee.index(index)] += delta
elif index in newer_committee:
if positive:
shard_state.newer_committee_positive_deltas[newer_committee.index(index)] += delta
else:
shard_state.newer_committee_negative_deltas[newer_committee.index(index)] += delta
```
## Genesis
### `get_genesis_shard_state`
```python
def get_genesis_shard_state(shard: Shard) -> ShardState:
return ShardState(
shard=shard,
slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH),
latest_block_header=ShardBlockHeader(
shard=shard,
slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH),
body_root=hash_tree_root(List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE]()),
),
block_body_price=MIN_BLOCK_BODY_PRICE,
)
```
### `get_genesis_shard_block`
```python
def get_genesis_shard_block(shard: Shard) -> ShardBlock:
return ShardBlock(
shard=shard,
slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH),
state_root=hash_tree_root(get_genesis_shard_state(shard)),
)
```
## Shard state transition function
```python
def shard_state_transition(beacon_state: BeaconState,
shard_state: ShardState,
block: ShardBlock,
validate_state_root: bool=False) -> ShardState:
# Process slots (including those with no blocks) since block
process_shard_slots(shard_state, block.slot)
# Process block
process_shard_block(beacon_state, shard_state, block)
# Validate state root (`validate_state_root == True` in production)
if validate_state_root:
assert block.state_root == hash_tree_root(shard_state)
# Return post-state
return shard_state
```
```python
def process_shard_slots(shard_state: ShardState, slot: ShardSlot) -> None:
assert shard_state.slot <= slot
while shard_state.slot < slot:
process_shard_slot(shard_state)
# Process shard period on the start slot of the next shard period
if (shard_state.slot + 1) % (SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0:
process_shard_period(shard_state)
shard_state.slot += ShardSlot(1)
```
```python
def process_shard_slot(shard_state: ShardState) -> None:
# Cache state root
previous_state_root = hash_tree_root(shard_state)
if shard_state.latest_block_header.state_root == Bytes32():
shard_state.latest_block_header.state_root = previous_state_root
# Cache state root in history accumulator
depth = 0
while shard_state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_DEPTH:
shard_state.history_accumulator[depth] = previous_state_root
depth += 1
```
### Period processing
```python
def process_shard_period(shard_state: ShardState) -> None:
# Rotate committee deltas
shard_state.older_committee_positive_deltas = shard_state.newer_committee_positive_deltas
shard_state.older_committee_negative_deltas = shard_state.newer_committee_negative_deltas
shard_state.newer_committee_positive_deltas = [Gwei(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)]
shard_state.newer_committee_negative_deltas = [Gwei(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)]
```
### Block processing
```python
def process_shard_block(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None:
process_shard_block_header(beacon_state, shard_state, block)
process_shard_attestations(beacon_state, shard_state, block)
process_shard_block_body(beacon_state, shard_state, block)
```
#### Block header
```python
def process_shard_block_header(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None:
# Verify the shard number
assert block.shard == shard_state.shard
# Verify the slot number
assert block.slot == shard_state.slot
# Verify the beacon chain root
epoch = compute_epoch_of_shard_slot(shard_state.slot)
assert epoch * SLOTS_PER_EPOCH == beacon_state.slot
beacon_block_header = BeaconBlockHeader(
slot=beacon_state.latest_block_header.slot,
parent_root=beacon_state.latest_block_header.parent_root,
state_root=beacon_state.latest_block_header.state_root,
body_root=beacon_state.latest_block_header.body_root,
)
if beacon_block_header.state_root == Bytes32():
beacon_block_header.state_root = hash_tree_root(beacon_state)
assert block.beacon_block_root == hash_tree_root(beacon_block_header)
# Verify the parent root
assert block.parent_root == hash_tree_root(shard_state.latest_block_header)
# Save current block as the new latest block
shard_state.latest_block_header = ShardBlockHeader(
shard=block.shard,
slot=block.slot,
beacon_block_root=block.beacon_block_root,
parent_root=block.parent_root,
# `state_root` is zeroed and overwritten in the next `process_shard_slot` call
body_root=hash_tree_root(block.body),
block_size_sum=block.block_size_sum,
aggregation_bits=block.aggregation_bits,
attestations=block.attestations,
# `signature` is zeroed
)
# Verify the sum of the block sizes since genesis
shard_state.block_size_sum += SHARD_HEADER_SIZE + len(block.body)
assert block.block_size_sum == shard_state.block_size_sum
# Verify proposer is not slashed
proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot)
proposer = beacon_state.validators[proposer_index]
assert not proposer.slashed
# Verify proposer signature
domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot))
assert bls.Verify(proposer.pubkey, compute_signing_root(block, domain), block.signature)
```
#### Attestations
```python
def process_shard_attestations(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None:
pubkeys = []
attestation_count = 0
shard_committee = get_shard_committee(beacon_state, shard_state.shard, block.slot)
for i, validator_index in enumerate(shard_committee):
if block.aggregation_bits[i]:
pubkeys.append(beacon_state.validators[validator_index].pubkey)
process_delta(beacon_state, shard_state, validator_index, get_base_reward(beacon_state, validator_index))
attestation_count += 1
# Verify there are no extraneous bits set beyond the shard committee
for i in range(len(shard_committee), 2 * MAX_PERIOD_COMMITTEE_SIZE):
assert block.aggregation_bits[i] == 0b0
# Verify attester aggregate signature
domain = get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.slot))
shard_attestation_data = ShardAttestationData(slot=shard_state.slot, parent_root=block.parent_root)
signing_root = compute_signing_root(shard_attestation_data, domain)
assert bls.FastAggregateVerify(pubkeys, signing_root, block.attestations)
# Proposer micro-reward
proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot)
reward = attestation_count * get_base_reward(beacon_state, proposer_index) // PROPOSER_REWARD_QUOTIENT
process_delta(beacon_state, shard_state, proposer_index, Gwei(reward))
```
#### Block body
```python
def process_shard_block_body(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None:
# Verify block body size is a multiple of the header size
assert len(block.body) % SHARD_HEADER_SIZE == 0
# Apply proposer block body fee
block_body_fee = shard_state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE
proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot)
process_delta(beacon_state, shard_state, proposer_index, Gwei(block_body_fee), positive=False) # Burn
process_delta(beacon_state, shard_state, proposer_index, Gwei(block_body_fee // PROPOSER_REWARD_QUOTIENT)) # Reward
# Calculate new block body price
block_size = SHARD_HEADER_SIZE + len(block.body)
QUOTIENT = MAX_SHARD_BLOCK_SIZE * BLOCK_BODY_PRICE_QUOTIENT
if block_size > SHARD_BLOCK_SIZE_TARGET:
price_delta = Gwei(shard_state.block_body_price * (block_size - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)
# The maximum block body price caps the amount burnt on fees within a shard period
MAX_BLOCK_BODY_PRICE = MAX_EFFECTIVE_BALANCE // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH
shard_state.block_body_price = Gwei(min(MAX_BLOCK_BODY_PRICE, shard_state.block_body_price + price_delta))
else:
price_delta = Gwei(shard_state.block_body_price * (SHARD_BLOCK_SIZE_TARGET - block_size) // QUOTIENT)
shard_state.block_body_price = Gwei(max(MIN_BLOCK_BODY_PRICE, shard_state.block_body_price + price_delta))
```
## Shard fork choice rule
The fork choice rule for any shard is LMD GHOST using the shard attestations of the shard committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `beacon_state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_block_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than that slot.)

View File

@ -261,7 +261,7 @@ The following values are (non-configurable) constants used throughout the specif
## Containers
The following types are [SimpleSerialize (SSZ)](../simple-serialize.md) containers.
The following types are [SimpleSerialize (SSZ)](../../ssz/simple-serialize.md) containers.
*Note*: The definitions are ordered topologically to facilitate execution of the spec.
@ -579,7 +579,7 @@ def bytes_to_int(data: bytes) -> uint64:
#### `hash_tree_root`
`def hash_tree_root(object: SSZSerializable) -> Root` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../simple-serialize.md#merkleization).
`def hash_tree_root(object: SSZSerializable) -> Root` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../../ssz/simple-serialize.md#merkleization).
#### BLS Signatures

View File

@ -40,7 +40,7 @@ The initial deployment phases of Ethereum 2.0 are implemented without consensus
### `deposit` function
The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes32`. The first three arguments populate a [`DepositData`](./0_beacon-chain.md#depositdata) object, and `deposit_data_root` is the expected `DepositData` root as a protection against malformatted calldata.
The deposit contract has a public `deposit` function to make deposits. It takes as arguments `pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96], deposit_data_root: bytes32`. The first three arguments populate a [`DepositData`](./beacon-chain.md#depositdata) object, and `deposit_data_root` is the expected `DepositData` root as a protection against malformatted calldata.
#### Deposit amount

View File

@ -34,7 +34,7 @@
## Introduction
This document is the beacon chain fork choice spec, part of Ethereum 2.0 Phase 0. It assumes the [beacon chain state transition function spec](./0_beacon-chain.md).
This document is the beacon chain fork choice spec, part of Ethereum 2.0 Phase 0. It assumes the [beacon chain state transition function spec](./beacon-chain.md).
## Fork choice
@ -48,7 +48,7 @@ The head block root associated with a `store` is defined as `get_head(store)`. A
1) **Leap seconds**: Slots will last `SECONDS_PER_SLOT + 1` or `SECONDS_PER_SLOT - 1` seconds around leap seconds. This is automatically handled by [UNIX time](https://en.wikipedia.org/wiki/Unix_time).
2) **Honest clocks**: Honest nodes are assumed to have clocks synchronized within `SECONDS_PER_SLOT` seconds of each other.
3) **Eth1 data**: The large `ETH1_FOLLOW_DISTANCE` specified in the [honest validator document](../validator/0_beacon-chain-validator.md) should ensure that `state.latest_eth1_data` of the canonical Ethereum 2.0 chain remains consistent with the canonical Ethereum 1.0 chain. If not, emergency manual intervention will be required.
3) **Eth1 data**: The large `ETH1_FOLLOW_DISTANCE` specified in the [honest validator document](./validator.md) should ensure that `state.latest_eth1_data` of the canonical Ethereum 2.0 chain remains consistent with the canonical Ethereum 1.0 chain. If not, emergency manual intervention will be required.
4) **Manual forks**: Manual forks may arbitrarily change the fork choice rule but are expected to be enacted at epoch transitions, with the fork details reflected in `state.fork`.
5) **Implementation**: The implementation found in this specification is constructed for ease of understanding rather than for optimization in computation, space, or any other resource. A number of optimized alternatives can be found [here](https://github.com/protolambda/lmd-ghost).

View File

@ -10,8 +10,7 @@ It consists of four main sections:
4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which Eth2 clients are being developed.
## Table of contents
<!-- cmd: doctoc --maxlevel=2 p2p-interface.md -->
<!-- TOC -->
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
@ -82,6 +81,7 @@ It consists of four main sections:
- [Why must all clients use the same gossip topic instead of one negotiated between each peer pair?](#why-must-all-clients-use-the-same-gossip-topic-instead-of-one-negotiated-between-each-peer-pair)
- [Why are the topics strings and not hashes?](#why-are-the-topics-strings-and-not-hashes)
- [Why are we overriding the default libp2p pubsub `message-id`?](#why-are-we-overriding-the-default-libp2p-pubsub-message-id)
- [Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets?](#why-is-there-maximum_gossip_clock_disparity-when-validating-slot-ranges-of-messages-in-gossip-subnets)
- [Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets?](#why-are-there-attestation_subnet_count-attestation-subnets)
- [Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?](#why-are-attestations-limited-to-be-broadcast-on-gossip-channels-within-slots_per_epoch-slots)
- [Why are aggregate attestations broadcast to the global topic as `AggregateAndProof`s rather than just as `Attestation`s?](#why-are-aggregate-attestations-broadcast-to-the-global-topic-as-aggregateandproofs-rather-than-just-as-attestations)
@ -105,6 +105,7 @@ It consists of four main sections:
- [libp2p implementations matrix](#libp2p-implementations-matrix)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
# Network fundamentals
@ -186,6 +187,7 @@ This section outlines constants that are used in this spec.
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. |
| `MAXIMUM_GOSSIP_CLOCK_DISPARITY` | `500ms` | The maximum milliseconds of clock disparity assumed between honest nodes. |
## The gossip domain: gossipsub
@ -223,15 +225,15 @@ where `base64` is the [URL-safe base64 alphabet](https://tools.ietf.org/html/rfc
The payload is carried in the `data` field of a gossipsub message, and varies depending on the topic:
| Topic | Message Type |
|----------------------------------------|-------------------|
| beacon_block | SignedBeaconBlock |
| beacon_aggregate_and_proof | AggregateAndProof |
| beacon_attestation\* | Attestation |
| committee_index{subnet_id}\_beacon_attestation | Attestation |
| voluntary_exit | VoluntaryExit |
| proposer_slashing | ProposerSlashing |
| attester_slashing | AttesterSlashing |
| Topic | Message Type |
|------------------------------------------------|----------------------|
| beacon_block | SignedBeaconBlock |
| beacon_aggregate_and_proof | AggregateAndProof |
| beacon_attestation\* | Attestation |
| committee_index{subnet_id}\_beacon_attestation | Attestation |
| voluntary_exit | SignedVoluntaryExit |
| proposer_slashing | ProposerSlashing |
| attester_slashing | AttesterSlashing |
Clients MUST reject (fail validation) messages containing an incorrect type, or invalid payload.
@ -243,11 +245,13 @@ When processing incoming gossip, clients MAY descore or disconnect peers who fai
There are two primary global topics used to propagate beacon blocks and aggregate attestations to all nodes on the network. Their `TopicName`s are:
- `beacon_block` - This topic is used solely for propagating new beacon blocks to all nodes on the networks. Blocks are sent in their entirety. Clients MUST validate the block proposer signature before forwarding it across the network.
- `beacon_block` - This topic is used solely for propagating new signed beacon blocks to all nodes on the networks. Signed blocks are sent in their entirety. The following validations MUST pass before forwarding the `signed_beacon_block` on the network
- The proposer signature, `signed_beacon_block.signature` is valid.
- The block is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `signed_beacon_block.message.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot).
- `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `AggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `aggregate_and_proof` on the network.
- The aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally).
- The block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation.
- `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (`aggregate_and_proof.aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate_and_proof.aggregate.data.slot`).
- `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `aggregate_and_proof.aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate_and_proof.aggregate.data.slot`.
- The validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.aggregator_index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`.
- `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.slot, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
- The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.aggregator_index`.
@ -255,7 +259,7 @@ There are two primary global topics used to propagate beacon blocks and aggregat
Additional global topics are used to propagate lower frequency validator messages. Their `TopicName`s are:
- `voluntary_exit` - This topic is used solely for propagating voluntary validator exits to proposers on the network. Voluntary exits are sent in their entirety. Clients who receive a voluntary exit on this topic MUST validate the conditions within `process_voluntary_exit` before forwarding it across the network.
- `voluntary_exit` - This topic is used solely for propagating signed voluntary validator exits to proposers on the network. Signed voluntary exits are sent in their entirety. Clients who receive a signed voluntary exit on this topic MUST validate the conditions within `process_voluntary_exit` before forwarding it across the network.
- `proposer_slashing` - This topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. Clients who receive a proposer slashing on this topic MUST validate the conditions within `process_proposer_slashing` before forwarding it across the network.
- `attester_slashing` - This topic is used solely for propagating attester slashings to proposers on the network. Attester slashings are sent in their entirety. Clients who receive an attester slashing on this topic MUST validate the conditions within `process_attester_slashing` before forwarding it across the network.
@ -267,7 +271,7 @@ Attestation subnets are used to propagate unaggregated attestations to subsectio
- The attestation's committee index (`attestation.data.index`) is for the correct subnet.
- The attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`).
- The block being voted for (`attestation.data.beacon_block_root`) passes validation.
- `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (`attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot`).
- `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (within a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot`.
- The signature of `attestation` is valid.
#### Interop
@ -394,12 +398,12 @@ Here, `result` represents the 1-byte response code.
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time:
- `ssz`: the contents are [SSZ-encoded](../simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Bytes32`'s.
- `ssz`: the contents are [SSZ-encoded](../../ssz/simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Bytes32`'s.
- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; MUST be supported in mainnet.
#### SSZ-encoding strategy (with or without Snappy)
The [SimpleSerialize (SSZ) specification](../simple-serialize.md) outlines how objects are SSZ-encoded. If the Snappy variant is selected, we feed the serialized form to the Snappy compressor on encoding. The inverse happens on decoding.
The [SimpleSerialize (SSZ) specification](../../ssz/simple-serialize.md) outlines how objects are SSZ-encoded. If the Snappy variant is selected, we feed the serialized form to the Snappy compressor on encoding. The inverse happens on decoding.
**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST prefix all encoded and compressed (if applicable) payloads with an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
@ -765,6 +769,14 @@ Some examples of where messages could be duplicated:
* Attestation aggregation strategies where clients partially aggregate attestations and propagate them. Partial aggregates could be duplicated
* Clients re-publishing seen messages
### Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets?
For some gossip channels (e.g. those for Attestations and BeaconBlocks), there are designated ranges of slots during which particular messages can be sent, limiting messages gossiped to those that can be reasonably used in the consensus at the current time/slot. This is to reduce optionality in DoS attacks.
`MAXIMUM_GOSSIP_CLOCK_DISPARITY` provides some leeway in validating slot ranges to prevent the gossip network from becoming overly brittle with respect to clock disparity. For minimum and maximum allowable slot broadcast times, `MAXIMUM_GOSSIP_CLOCK_DISPARITY` MUST be subtracted and added respectively, marginally extending the valid range. Although messages can at times be eagerly gossiped to the network, the node's fork choice prevents integration of these messages into the actual consensus until the _actual local start_ of the designated slot.
The value of this constant is currently a placeholder and will be tuned based on data observed in testnets.
### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets?
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel. The exact grouping will be dependent on more involved network tests. This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet). The value is currently set to to be equal `MAX_COMMITTEES_PER_SLOT` until network tests indicate otherwise.

View File

@ -1,6 +1,6 @@
# Ethereum 2.0 Phase 0 -- Honest Validator
**Notice**: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](../core/0_beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol.
**Notice**: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol.
## Table of contents
@ -75,7 +75,7 @@ A validator is an entity that participates in the consensus of the Ethereum 2.0
## Prerequisites
All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](../core/0_beacon-chain.md) and [Phase 0 -- Deposit Contract](../core/0_deposit-contract.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout.
All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Deposit Contract](./deposit-contract.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout.
## Constants
@ -110,11 +110,11 @@ The validator constructs their `withdrawal_credentials` via the following:
### Submit deposit
In Phase 0, all incoming validator deposits originate from the Ethereum 1.0 proof-of-work chain. Deposits are made to the [deposit contract](../core/0_deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`.
In Phase 0, all incoming validator deposits originate from the Ethereum 1.0 proof-of-work chain. Deposits are made to the [deposit contract](./deposit-contract.md) located at `DEPOSIT_CONTRACT_ADDRESS`.
To submit a deposit:
- Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object.
- Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](./beacon-chain.md#depositdata) SSZ object.
- Let `amount` be the amount in Gwei to be deposited by the validator where `amount >= MIN_DEPOSIT_AMOUNT`.
- Set `deposit_data.pubkey` to validator's `pubkey`.
- Set `deposit_data.withdrawal_credentials` to `withdrawal_credentials`.
@ -132,13 +132,13 @@ Deposits cannot be processed into the beacon chain until the Eth1 block in which
### Validator index
Once a validator has been processed and added to the beacon state's `validators`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](../core/0_beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally.
Once a validator has been processed and added to the beacon state's `validators`, the validator's `validator_index` is defined by the index into the registry at which the [`ValidatorRecord`](./beacon-chain.md#validator) contains the `pubkey` specified in the validator's deposit. A validator's `validator_index` is guaranteed to not change from the time of initial deposit until the validator exits and fully withdraws. This `validator_index` is used throughout the specification to dictate validator roles and responsibilities at any point and should be stored locally.
### Activation
In normal operation, the validator is quickly activated, at which point the validator is added to the shuffling and begins validation after an additional `MAX_SEED_LOOKAHEAD` epochs (25.6 minutes).
The function [`is_active_validator`](../core/0_beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given epoch. Usage is as follows:
The function [`is_active_validator`](./beacon-chain.md#is_active_validator) can be used to check if a validator is active during a given epoch. Usage is as follows:
```python
def check_if_validator_active(state: BeaconState, validator_index: ValidatorIndex) -> bool:
@ -209,7 +209,7 @@ A validator has two primary responsibilities to the beacon chain: [proposing blo
### Block proposal
A validator is expected to propose a [`SignedBeaconBlock`](../core/0_beacon-chain.md#signedbeaconblock) at the beginning of any slot during which `is_proposer(state, validator_index)` returns `True`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator creates, signs, and broadcasts a `block` that is a child of `parent` that satisfies a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function).
A validator is expected to propose a [`SignedBeaconBlock`](./beacon-chain.md#signedbeaconblock) at the beginning of any slot during which `is_proposer(state, validator_index)` returns `True`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator creates, signs, and broadcasts a `block` that is a child of `parent` that satisfies a valid [beacon chain state transition](./beacon-chain.md#beacon-chain-state-transition-function).
There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (e.g. at 312,500 validators = 10 million ETH, that's once per ~6 weeks).
@ -283,40 +283,44 @@ def is_candidate_block(block: Eth1Block, period_start: uint64) -> bool:
```python
def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Data:
period_start = voting_period_start_time(state)
# `eth1_chain` abstractly represents all blocks in the eth1 chain.
# `eth1_chain` abstractly represents all blocks in the eth1 chain sorted by ascending block height
votes_to_consider = [get_eth1_data(block) for block in eth1_chain if
is_candidate_block(block, period_start)]
# Valid votes already cast during this period
valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider]
# Default vote on latest eth1 block data in the period range unless eth1 chain is not live
default_vote = votes_to_consider[-1] if any(votes_to_consider) else state.eth1_data
return max(
valid_votes,
key=lambda v: (valid_votes.count(v), -valid_votes.index(v)), # Tiebreak by smallest distance
default=get_eth1_data(ETH1_FOLLOW_DISTANCE),
default=default_vote
)
```
##### Proposer slashings
Up to `MAX_PROPOSER_SLASHINGS`, [`ProposerSlashing`](../core/0_beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](../core/0_beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included.
Up to `MAX_PROPOSER_SLASHINGS`, [`ProposerSlashing`](./beacon-chain.md#proposerslashing) objects can be included in the `block`. The proposer slashings must satisfy the verification conditions found in [proposer slashings processing](./beacon-chain.md#proposer-slashings). The validator receives a small "whistleblower" reward for each proposer slashing found and included.
##### Attester slashings
Up to `MAX_ATTESTER_SLASHINGS`, [`AttesterSlashing`](../core/0_beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [attester slashings processing](../core/0_beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included.
Up to `MAX_ATTESTER_SLASHINGS`, [`AttesterSlashing`](./beacon-chain.md#attesterslashing) objects can be included in the `block`. The attester slashings must satisfy the verification conditions found in [attester slashings processing](./beacon-chain.md#attester-slashings). The validator receives a small "whistleblower" reward for each attester slashing found and included.
##### Attestations
Up to `MAX_ATTESTATIONS`, aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](../core/0_beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain.
Up to `MAX_ATTESTATIONS`, aggregate attestations can be included in the `block`. The attestations added must satisfy the verification conditions found in [attestation processing](./beacon-chain.md#attestations). To maximize profit, the validator should attempt to gather aggregate attestations that include singular attestations from the largest number of validators whose signatures from the same epoch have not previously been added on chain.
##### Deposits
If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. `state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`. These [`deposits`](../core/0_beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1 deposit contract](../core/0_deposit-contract.md) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](../core/0_beacon-chain.md#deposits).
If there are any unprocessed deposits for the existing `state.eth1_data` (i.e. `state.eth1_data.deposit_count > state.eth1_deposit_index`), then pending deposits _must_ be added to the block. The expected number of deposits is exactly `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)`. These [`deposits`](./beacon-chain.md#deposit) are constructed from the `Deposit` logs from the [Eth1 deposit contract](./deposit-contract.md) and must be processed in sequential order. The deposits included in the `block` must satisfy the verification conditions found in [deposits processing](./beacon-chain.md#deposits).
The `proof` for each deposit must be constructed against the deposit root contained in `state.eth1_data` rather than the deposit root at the time the deposit was initially logged from the 1.0 chain. This entails storing a full deposit merkle tree locally and computing updated proofs against the `eth1_data.deposit_root` as needed. See [`minimal_merkle.py`](https://github.com/ethereum/research/blob/master/spec_pythonizer/utils/merkle_minimal.py) for a sample implementation.
##### Voluntary exits
Up to `MAX_VOLUNTARY_EXITS`, [`VoluntaryExit`](../core/0_beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](../core/0_beacon-chain.md#voluntary-exits).
Up to `MAX_VOLUNTARY_EXITS`, [`VoluntaryExit`](./beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](./beacon-chain.md#voluntary-exits).
#### Packaging into a `SignedBeaconBlock`
@ -356,7 +360,7 @@ A validator should create and broadcast the `attestation` to the associated atte
#### Attestation data
First, the validator should construct `attestation_data`, an [`AttestationData`](../core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot.
First, the validator should construct `attestation_data`, an [`AttestationData`](./beacon-chain.md#attestationdata) object based upon the state at the assigned slot.
- Let `head_block` be the result of running the fork choice during the assigned slot.
- Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot using `process_slots(state, slot)`.
@ -382,7 +386,7 @@ Set `attestation_data.beacon_block_root = hash_tree_root(head_block)`.
#### Construct attestation
Next, the validator creates `attestation`, an [`Attestation`](../core/0_beacon-chain.md#attestation) object.
Next, the validator creates `attestation`, an [`Attestation`](./beacon-chain.md#attestation) object.
##### Data
@ -491,7 +495,7 @@ Because Phase 0 does not have shards and thus does not have Shard Committees, th
### Proposer slashing
To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) where conflicting is defined as two distinct blocks within the same epoch.
To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](./beacon-chain.md#beaconblock) where conflicting is defined as two distinct blocks within the same epoch.
*In Phase 0, as long as the validator does not sign two different beacon blocks for the same epoch, the validator is safe against proposer slashings.*
@ -504,7 +508,7 @@ If the software crashes at some point within this routine, then when the validat
### Attester slashing
To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](../core/0_beacon-chain.md#attestationdata) objects, i.e. two attestations that satisfy [`is_slashable_attestation_data`](../core/0_beacon-chain.md#is_slashable_attestation_data).
To avoid "attester slashings", a validator must not sign two conflicting [`AttestationData`](./beacon-chain.md#attestationdata) objects, i.e. two attestations that satisfy [`is_slashable_attestation_data`](./beacon-chain.md#is_slashable_attestation_data).
Specifically, when signing an `Attestation`, a validator should perform the following steps in the following order:

View File

@ -533,7 +533,7 @@ def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: Indexe
all_pubkeys = []
all_signing_roots = []
attestation = indexed_attestation.attestation
domain=get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
aggregation_bits = attestation.aggregation_bits
assert len(aggregation_bits) == len(indexed_attestation.committee)
for i, custody_bits in enumerate(attestation.custody_bits_blocks):
@ -676,8 +676,10 @@ def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTr
prev_gasprice = shard_state.gasprice
pubkeys = [state.validators[proposer].pubkey for proposer in proposers]
signing_roots = [compute_signing_root(header,
get_domain(state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(header.slot))) for header in headers]
signing_roots = [
compute_signing_root(header, get_domain(state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(header.slot)))
for header in headers
]
# Verify combined proposer signature
assert bls.AggregateVerify(zip(pubkeys, signing_roots), signature=transition.proposer_signature_aggregate)
@ -751,7 +753,7 @@ def process_crosslinks(state: BeaconState,
block_body: BeaconBlockBody,
attestations: Sequence[Attestation]) -> Set[Tuple[Shard, Root]]:
winners: Set[Tuple[Shard, Root]] = set()
for shard in range(get_active_shard_count(state)):
for shard in map(Shard, range(get_active_shard_count(state))):
# All attestations in the block for this shard
shard_attestations = [
attestation for attestation in attestations
@ -859,7 +861,7 @@ def process_light_client_signatures(state: BeaconState, block_body: BeaconBlockB
slot = get_previous_slot(state.slot)
signing_root = compute_signing_root(get_block_root_at_slot(state, slot),
get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(slot)))
get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(slot)))
return bls.FastAggregateVerify(signer_pubkeys, signing_root, signature=block_body.light_client_signature)
```

View File

@ -40,7 +40,7 @@
## Introduction
This document details the beacon chain additions and changes in Phase 1 of Ethereum 2.0 to support the shard data custody game, building upon the [Phase 0](0_beacon-chain.md) specification.
This document details the beacon chain additions and changes in Phase 1 of Ethereum 2.0 to support the shard data custody game, building upon the [Phase 0](../phase0/beacon-chain.md) specification.
## Constants
@ -345,7 +345,8 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
malefactor = state.validators[custody_slashing.malefactor_index]
whistleblower = state.validators[custody_slashing.whistleblower_index]
domain = get_domain(state, DOMAIN_CUSTODY_BIT_SLASHING, get_current_epoch(state))
assert bls_verify(whistleblower.pubkey, hash_tree_root(custody_slashing), signed_custody_slashing.signature, domain)
signing_root = compute_signing_root(custody_slashing, domain)
assert bls.Verify(whistleblower.pubkey, signing_root, signed_custody_slashing.signature)
# Verify that the whistleblower is slashable
assert is_slashable_validator(whistleblower, get_current_epoch(state))
# Verify that the claimed malefactor is slashable

View File

@ -49,7 +49,7 @@ We define the following Python custom types for type hinting and readability:
### `LightClientUpdate`
```python
class LightClientUpdate(container):
class LightClientUpdate(Container):
# Shard block root (and authenticating signature data)
shard_block_root: Root
fork_version: Version

View File

@ -114,8 +114,8 @@ def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
custody_challenge_index=0,
# exposed_derived_secrets will fully default to zeroes
)
epoch = get_current_epoch(post)
next_epoch = Epoch(epoch + 1)
post.current_light_committee = committee_to_compact_committee(post, get_light_client_committee(post, epoch))
post.next_light_committee = committee_to_compact_committee(post, get_light_client_committee(post, epoch + 1))
post.next_light_committee = committee_to_compact_committee(post, get_light_client_committee(post, next_epoch))
return post
```

View File

@ -239,7 +239,7 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi
Let `A` be an object derived from another object `B` by replacing some of the (possibly nested) values of `B` by their `hash_tree_root`. We say `A` is a "summary" of `B`, and that `B` is an "expansion" of `A`. Notice `hash_tree_root(A) == hash_tree_root(B)`.
We similarly define "summary types" and "expansion types". For example, [`BeaconBlock`](./core/0_beacon-chain.md#beaconblock) is an expansion type of [`BeaconBlockHeader`](./core/0_beacon-chain.md#beaconblockheader). Notice that objects expand to at most one object of a given expansion type. For example, `BeaconBlockHeader` objects uniquely expand to `BeaconBlock` objects.
We similarly define "summary types" and "expansion types". For example, [`BeaconBlock`](../specs/phase0/beacon-chain.md#beaconblock) is an expansion type of [`BeaconBlockHeader`](../specs/phase0/beacon-chain.md#beaconblockheader). Notice that objects expand to at most one object of a given expansion type. For example, `BeaconBlockHeader` objects uniquely expand to `BeaconBlock` objects.
## Implementations

View File

@ -1,3 +0,0 @@
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec

View File

@ -1,3 +0,0 @@
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec

View File

@ -1,4 +0,0 @@
eth-utils==1.6.0
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec

View File

@ -1,3 +0,0 @@
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec

View File

@ -1,4 +0,0 @@
eth-utils==1.6.0
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec

View File

@ -1,4 +0,0 @@
eth-utils==1.6.0
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec

View File

@ -1,3 +0,0 @@
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec

View File

@ -1,7 +1,7 @@
# Eth2 config helpers
`preset_loader`: A util to load constants-presets with.
See [Constants-presets documentation](../../configs/constants_presets/README.md).
`preset_loader`: A util to load config-presets with.
See [Configs documentation](../../../configs/README.md).
Usage:

View File

@ -4,7 +4,7 @@
A util to quickly write new test suite generators with.
See [Generators documentation](../../test_generators/README.md) for integration details.
See [Generators documentation](../../generators/README.md) for integration details.
Options:

View File

@ -63,4 +63,4 @@ The pyspec is not a replacement.
## License
Same as the spec itself; see [LICENSE](../../LICENSE) file in the specs repository root.
Same as the spec itself; see [LICENSE](../../../LICENSE) file in the specs repository root.

View File

@ -33,6 +33,6 @@ def pytest_addoption(parser):
@fixture(autouse=True)
def config(request):
config_name = request.config.getoption("--config")
apply_config.load_presets('../../configs/', config_name)
apply_config.load_presets('../../../configs/', config_name)
# now that the presets are loaded, reload the specs to apply them
reload_specs()

Some files were not shown because too many files have changed in this diff Show More