mirror of
https://github.com/status-im/eth2.0-specs.git
synced 2025-02-21 14:58:12 +00:00
merge
This commit is contained in:
commit
4955c40a7b
@ -52,7 +52,7 @@ jobs:
|
||||
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
paths:
|
||||
- ~/specs-repo
|
||||
install_test:
|
||||
install_env:
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
working_directory: ~/specs-repo
|
||||
@ -60,13 +60,13 @@ jobs:
|
||||
- restore_cache:
|
||||
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_cached_venv:
|
||||
venv_name: v1-pyspec
|
||||
venv_name: v1-pyspec-03
|
||||
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}'
|
||||
- run:
|
||||
name: Install pyspec requirements
|
||||
command: make install_test
|
||||
command: make install_test && make install_lint
|
||||
- save_cached_venv:
|
||||
venv_name: v1-pyspec
|
||||
venv_name: v1-pyspec-03
|
||||
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}'
|
||||
venv_path: ./test_libs/pyspec/venv
|
||||
test:
|
||||
@ -77,7 +77,7 @@ jobs:
|
||||
- restore_cache:
|
||||
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_cached_venv:
|
||||
venv_name: v1-pyspec
|
||||
venv_name: v1-pyspec-03
|
||||
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}'
|
||||
- run:
|
||||
name: Run py-tests
|
||||
@ -92,20 +92,22 @@ jobs:
|
||||
- restore_cache:
|
||||
key: v1-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_cached_venv:
|
||||
venv_name: v1-pyspec
|
||||
venv_name: v1-pyspec-03
|
||||
reqs_checksum: '{{ checksum "test_libs/pyspec/requirements.txt" }}-{{ checksum "test_libs/pyspec/requirements-testing.txt" }}'
|
||||
- run:
|
||||
name: Run linter
|
||||
command: make install_lint && make pyspec && make lint
|
||||
command: make lint
|
||||
workflows:
|
||||
version: 2.1
|
||||
test_spec:
|
||||
jobs:
|
||||
- checkout_specs
|
||||
- lint
|
||||
- install_test:
|
||||
- install_env:
|
||||
requires:
|
||||
- checkout_specs
|
||||
- test:
|
||||
requires:
|
||||
- install_test
|
||||
- install_env
|
||||
- lint:
|
||||
requires:
|
||||
- test
|
||||
|
2
Makefile
2
Makefile
@ -42,7 +42,7 @@ citest: $(PY_SPEC_ALL_TARGETS)
|
||||
install_lint:
|
||||
cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install flake8==3.5.0
|
||||
|
||||
lint:
|
||||
lint: $(PY_SPEC_ALL_TARGETS)
|
||||
cd $(PY_SPEC_DIR); . venv/bin/activate; \
|
||||
flake8 --max-line-length=120 ./eth2spec;
|
||||
|
||||
|
@ -5,6 +5,7 @@ import function_puller
|
||||
def build_phase0_spec(sourcefile, outfile):
|
||||
code_lines = []
|
||||
code_lines.append("""
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
|
@ -70,10 +70,10 @@ def get_spec(file_name: str) -> List[str]:
|
||||
code_lines.append(' ' + type_line)
|
||||
code_lines.append('\n')
|
||||
for (ssz_type_name, _) in type_defs:
|
||||
code_lines.append(f' global_vars["{ssz_type_name}"] = {ssz_type_name},')
|
||||
code_lines.append(f' global_vars["{ssz_type_name}"] = {ssz_type_name}')
|
||||
code_lines.append(' global_vars["ssz_types"] = [')
|
||||
for (ssz_type_name, _) in type_defs:
|
||||
code_lines.append(f' {ssz_type_name},')
|
||||
code_lines.append(f' "{ssz_type_name}",')
|
||||
code_lines.append(' ]')
|
||||
code_lines.append('\n')
|
||||
code_lines.append('def get_ssz_type_by_name(name: str) -> Container:')
|
||||
|
@ -1,6 +1,8 @@
|
||||
# BLS signature verification
|
||||
|
||||
**Warning: This document is pending academic review and should not yet be considered secure.**
|
||||
**Notice**: This document is a placeholder to facilitate the emergence of cross-client testnets. Substantive changes are postponed until [BLS standardisation](https://github.com/pairingwg/bls_standard) is finalized.
|
||||
|
||||
**Warning**: The constructions in this document should not be considered secure. In particular, the `hash_to_G2` function is known to be unsecure.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
|
@ -51,6 +51,7 @@
|
||||
- [`hash`](#hash)
|
||||
- [`hash_tree_root`](#hash_tree_root)
|
||||
- [`signing_root`](#signing_root)
|
||||
- [`bls_domain`](#bls_domain)
|
||||
- [`slot_to_epoch`](#slot_to_epoch)
|
||||
- [`get_previous_epoch`](#get_previous_epoch)
|
||||
- [`get_current_epoch`](#get_current_epoch)
|
||||
@ -98,8 +99,7 @@
|
||||
- [Genesis state](#genesis-state)
|
||||
- [Genesis block](#genesis-block)
|
||||
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||
- [State caching](#state-caching)
|
||||
- [Per-epoch processing](#per-epoch-processing)
|
||||
- [Epoch processing](#epoch-processing)
|
||||
- [Helper functions](#helper-functions-1)
|
||||
- [Justification and finalization](#justification-and-finalization)
|
||||
- [Crosslinks](#crosslinks)
|
||||
@ -107,8 +107,7 @@
|
||||
- [Registry updates](#registry-updates)
|
||||
- [Slashings](#slashings)
|
||||
- [Final updates](#final-updates)
|
||||
- [Per-slot processing](#per-slot-processing)
|
||||
- [Per-block processing](#per-block-processing)
|
||||
- [Block processing](#block-processing)
|
||||
- [Block header](#block-header)
|
||||
- [RANDAO](#randao)
|
||||
- [Eth1 data](#eth1-data)
|
||||
@ -119,7 +118,6 @@
|
||||
- [Deposits](#deposits)
|
||||
- [Voluntary exits](#voluntary-exits)
|
||||
- [Transfers](#transfers)
|
||||
- [State root verification](#state-root-verification)
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
@ -283,8 +281,9 @@ class Fork(Container):
|
||||
class Crosslink(Container):
|
||||
# Shard number
|
||||
shard: uint64
|
||||
# Epoch number
|
||||
epoch: uint64
|
||||
# Crosslinking data from epochs [start....end-1]
|
||||
start_epoch: uint64
|
||||
end_epoch: uint64
|
||||
# Root of the previous crosslink
|
||||
parent_root: Bytes32
|
||||
# Root of the crosslinked shard data since the previous crosslink
|
||||
@ -367,6 +366,7 @@ class BeaconBlockHeader(Container):
|
||||
body_root: Bytes32
|
||||
signature: Bytes96
|
||||
```
|
||||
|
||||
#### `Validator`
|
||||
|
||||
```python
|
||||
@ -560,9 +560,7 @@ class BeaconState(Container):
|
||||
latest_block_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT]
|
||||
latest_state_roots: Vector[Bytes32, SLOTS_PER_HISTORICAL_ROOT]
|
||||
latest_active_index_roots: Vector[Bytes32, LATEST_ACTIVE_INDEX_ROOTS_LENGTH]
|
||||
# Balances slashed at every withdrawal period
|
||||
latest_slashed_balances: Vector[uint64, LATEST_SLASHED_EXIT_LENGTH]
|
||||
# `latest_block_header.state_root == ZERO_HASH` temporarily
|
||||
latest_block_header: BeaconBlockHeader
|
||||
historical_roots: List[Bytes32]
|
||||
|
||||
@ -612,6 +610,16 @@ The `hash` function is SHA256.
|
||||
|
||||
`def signing_root(object: Container) -> Bytes32` is a function defined in the [SimpleSerialize spec](../simple-serialize.md#self-signed-containers) to compute signing messages.
|
||||
|
||||
### `bls_domain`
|
||||
|
||||
```python
|
||||
def bls_domain(domain_type: int, fork_version: bytes=b'\x00\x00\x00\x00') -> int:
|
||||
"""
|
||||
Return the bls domain given by the ``domain_type`` and optional 4 byte ``fork_version`` (defaults to zero).
|
||||
"""
|
||||
return bytes_to_int(int_to_bytes(domain_type, length=4) + fork_version)
|
||||
```
|
||||
|
||||
### `slot_to_epoch`
|
||||
|
||||
```python
|
||||
@ -631,7 +639,7 @@ def get_previous_epoch(state: BeaconState) -> Epoch:
|
||||
Return the current epoch if it's genesis epoch.
|
||||
"""
|
||||
current_epoch = get_current_epoch(state)
|
||||
return (current_epoch - 1) if current_epoch > GENESIS_EPOCH else current_epoch
|
||||
return GENESIS_EPOCH if current_epoch == GENESIS_EPOCH else current_epoch - 1
|
||||
```
|
||||
|
||||
### `get_current_epoch`
|
||||
@ -871,7 +879,7 @@ def get_shuffled_index(index: ValidatorIndex, index_count: int, seed: Bytes32) -
|
||||
# See the 'generalized domain' algorithm on page 3
|
||||
for round in range(SHUFFLE_ROUND_COUNT):
|
||||
pivot = bytes_to_int(hash(seed + int_to_bytes(round, length=1))[0:8]) % index_count
|
||||
flip = (pivot - index) % index_count
|
||||
flip = (pivot + index_count - index) % index_count
|
||||
position = max(index, flip)
|
||||
source = hash(seed + int_to_bytes(round, length=1) + int_to_bytes(position // 256, length=4))
|
||||
byte = source[(position % 256) // 8]
|
||||
@ -951,7 +959,7 @@ def get_domain(state: BeaconState,
|
||||
"""
|
||||
epoch = get_current_epoch(state) if message_epoch is None else message_epoch
|
||||
fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version
|
||||
return bytes_to_int(fork_version + int_to_bytes(domain_type, length=4))
|
||||
return bls_domain(domain_type, fork_version)
|
||||
```
|
||||
|
||||
### `get_bitfield_bit`
|
||||
@ -1173,7 +1181,11 @@ Let `genesis_state = get_genesis_beacon_state(genesis_deposits, eth2genesis.gene
|
||||
|
||||
```python
|
||||
def get_genesis_beacon_state(deposits: List[Deposit], genesis_time: int, genesis_eth1_data: Eth1Data) -> BeaconState:
|
||||
state = BeaconState(genesis_time=genesis_time, latest_eth1_data=genesis_eth1_data)
|
||||
state = BeaconState(
|
||||
genesis_time=genesis_time,
|
||||
latest_eth1_data=genesis_eth1_data,
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
)
|
||||
|
||||
# Process genesis deposits
|
||||
for deposit in deposits:
|
||||
@ -1199,50 +1211,61 @@ Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`.
|
||||
|
||||
## Beacon chain state transition function
|
||||
|
||||
We now define the state transition function. At a high level, the state transition is made up of four parts:
|
||||
|
||||
1. State caching, which happens at the start of every slot.
|
||||
2. The per-epoch transitions, which happens at the start of the first slot of every epoch.
|
||||
3. The per-slot transitions, which happens at every slot.
|
||||
4. The per-block transitions, which happens at every block.
|
||||
|
||||
Transition section notes:
|
||||
* The state caching caches the state root of the previous slot and updates block and state roots records.
|
||||
* The per-epoch transitions focus on the [validator](#dfn-validator) registry, including adjusting balances and activating and exiting [validators](#dfn-validator), as well as processing crosslinks and managing block justification/finalization.
|
||||
* The per-slot transitions focus on the slot counter.
|
||||
* The per-block transitions generally focus on verifying aggregate signatures and saving temporary records relating to the per-block activity in the `BeaconState`.
|
||||
|
||||
Beacon blocks that trigger unhandled Python exceptions (e.g. out-of-range list accesses) and failed `assert`s during the state transition are considered invalid.
|
||||
|
||||
*Note*: If there are skipped slots between a block and its parent block, run the steps in the [state-root](#state-caching), [per-epoch](#per-epoch-processing), and [per-slot](#per-slot-processing) sections once for each skipped slot and then once for the slot containing the new block.
|
||||
|
||||
### State caching
|
||||
|
||||
At every `slot > GENESIS_SLOT` run the following function:
|
||||
The post-state corresponding to a pre-state `state` and a block `block` is defined as `state_transition(state, block)`. State transitions that trigger an unhandled excpetion (e.g. a failed `assert` or an out-of-range list access) are considered invalid.
|
||||
|
||||
```python
|
||||
def cache_state(state: BeaconState) -> None:
|
||||
# Cache latest known state root (for previous slot)
|
||||
latest_state_root = hash_tree_root(state)
|
||||
state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = latest_state_root
|
||||
|
||||
# Store latest known state root (for previous slot) in latest_block_header if it is empty
|
||||
if state.latest_block_header.state_root == ZERO_HASH:
|
||||
state.latest_block_header.state_root = latest_state_root
|
||||
|
||||
# Cache latest known block root (for previous slot)
|
||||
latest_block_root = signing_root(state.latest_block_header)
|
||||
state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = latest_block_root
|
||||
def state_transition(state: BeaconState, block: BeaconBlock, validate_state_root: bool=False) -> BeaconState:
|
||||
# Process slots (including those with no blocks) since block
|
||||
process_slots(state, block.slot)
|
||||
# Process block
|
||||
process_block(state, block)
|
||||
# Validate state root (`validate_state_root == True` in production)
|
||||
if validate_state_root:
|
||||
assert block.state_root == hash_tree_root(state)
|
||||
# Return post-state
|
||||
return state
|
||||
```
|
||||
|
||||
### Per-epoch processing
|
||||
```python
|
||||
def process_slots(state: BeaconState, slot: Slot) -> None:
|
||||
assert state.slot < slot
|
||||
while state.slot < slot:
|
||||
process_slot(state)
|
||||
# Process epoch on the first slot of the next epoch
|
||||
if (state.slot + 1) % SLOTS_PER_EPOCH == 0:
|
||||
process_epoch(state)
|
||||
state.slot += 1
|
||||
```
|
||||
|
||||
The steps below happen when `state.slot > GENESIS_SLOT and (state.slot + 1) % SLOTS_PER_EPOCH == 0`.
|
||||
```python
|
||||
def process_slot(state: BeaconState) -> None:
|
||||
# Cache state root
|
||||
previous_state_root = hash_tree_root(state)
|
||||
state.latest_state_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_state_root
|
||||
|
||||
# Cache latest block header state root
|
||||
if state.latest_block_header.state_root == ZERO_HASH:
|
||||
state.latest_block_header.state_root = previous_state_root
|
||||
|
||||
# Cache block root
|
||||
previous_block_root = signing_root(state.latest_block_header)
|
||||
state.latest_block_roots[state.slot % SLOTS_PER_HISTORICAL_ROOT] = previous_block_root
|
||||
```
|
||||
|
||||
### Epoch processing
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
process_crosslinks(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_slashings(state)
|
||||
process_final_updates(state)
|
||||
```
|
||||
|
||||
#### Helper functions
|
||||
|
||||
We define epoch transition helper functions:
|
||||
|
||||
```python
|
||||
def get_total_active_balance(state: BeaconState) -> Gwei:
|
||||
return get_total_balance(state, get_active_validator_indices(state, get_current_epoch(state)))
|
||||
@ -1303,8 +1326,6 @@ def get_winning_crosslink_and_attesting_indices(state: BeaconState,
|
||||
|
||||
#### Justification and finalization
|
||||
|
||||
Run the following function:
|
||||
|
||||
```python
|
||||
def process_justification_and_finalization(state: BeaconState) -> None:
|
||||
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||
@ -1337,27 +1358,25 @@ def process_justification_and_finalization(state: BeaconState) -> None:
|
||||
# Process finalizations
|
||||
bitfield = state.justification_bitfield
|
||||
# The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
|
||||
if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch == current_epoch - 3:
|
||||
if (bitfield >> 1) % 8 == 0b111 and old_previous_justified_epoch + 3 == current_epoch:
|
||||
state.finalized_epoch = old_previous_justified_epoch
|
||||
state.finalized_root = get_block_root(state, state.finalized_epoch)
|
||||
# The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
|
||||
if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch == current_epoch - 2:
|
||||
if (bitfield >> 1) % 4 == 0b11 and old_previous_justified_epoch + 2 == current_epoch:
|
||||
state.finalized_epoch = old_previous_justified_epoch
|
||||
state.finalized_root = get_block_root(state, state.finalized_epoch)
|
||||
# The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
|
||||
if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch == current_epoch - 2:
|
||||
if (bitfield >> 0) % 8 == 0b111 and old_current_justified_epoch + 2 == current_epoch:
|
||||
state.finalized_epoch = old_current_justified_epoch
|
||||
state.finalized_root = get_block_root(state, state.finalized_epoch)
|
||||
# The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
|
||||
if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch == current_epoch - 1:
|
||||
if (bitfield >> 0) % 4 == 0b11 and old_current_justified_epoch + 1 == current_epoch:
|
||||
state.finalized_epoch = old_current_justified_epoch
|
||||
state.finalized_root = get_block_root(state, state.finalized_epoch)
|
||||
```
|
||||
|
||||
#### Crosslinks
|
||||
|
||||
Run the following function:
|
||||
|
||||
```python
|
||||
def process_crosslinks(state: BeaconState) -> None:
|
||||
state.previous_crosslinks = [c for c in state.current_crosslinks]
|
||||
@ -1372,8 +1391,6 @@ def process_crosslinks(state: BeaconState) -> None:
|
||||
|
||||
#### Rewards and penalties
|
||||
|
||||
First, we define additional helpers:
|
||||
|
||||
```python
|
||||
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||
adjusted_quotient = integer_squareroot(get_total_active_balance(state)) // BASE_REWARD_QUOTIENT
|
||||
@ -1449,8 +1466,6 @@ def get_crosslink_deltas(state: BeaconState) -> Tuple[List[Gwei], List[Gwei]]:
|
||||
return rewards, penalties
|
||||
```
|
||||
|
||||
Run the following function:
|
||||
|
||||
```python
|
||||
def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
@ -1465,8 +1480,6 @@ def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||
|
||||
#### Registry updates
|
||||
|
||||
Run the following function:
|
||||
|
||||
```python
|
||||
def process_registry_updates(state: BeaconState) -> None:
|
||||
# Process activation eligibility and ejections
|
||||
@ -1495,8 +1508,6 @@ def process_registry_updates(state: BeaconState) -> None:
|
||||
|
||||
#### Slashings
|
||||
|
||||
Run the following function:
|
||||
|
||||
```python
|
||||
def process_slashings(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
@ -1519,8 +1530,6 @@ def process_slashings(state: BeaconState) -> None:
|
||||
|
||||
#### Final updates
|
||||
|
||||
Run the following function:
|
||||
|
||||
```python
|
||||
def process_final_updates(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
@ -1559,19 +1568,16 @@ def process_final_updates(state: BeaconState) -> None:
|
||||
state.current_epoch_attestations = []
|
||||
```
|
||||
|
||||
### Per-slot processing
|
||||
|
||||
At every `slot > GENESIS_SLOT` run the following function:
|
||||
### Block processing
|
||||
|
||||
```python
|
||||
def advance_slot(state: BeaconState) -> None:
|
||||
state.slot += 1
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
```
|
||||
|
||||
### Per-block processing
|
||||
|
||||
For every `block` except the genesis block, run `process_block_header(state, block)`, `process_randao(state, block)` and `process_eth1_data(state, block)`.
|
||||
|
||||
#### Block header
|
||||
|
||||
```python
|
||||
@ -1596,44 +1602,57 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
|
||||
#### RANDAO
|
||||
|
||||
```python
|
||||
def process_randao(state: BeaconState, block: BeaconBlock) -> None:
|
||||
def process_randao(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
proposer = state.validator_registry[get_beacon_proposer_index(state)]
|
||||
# Verify that the provided randao value is valid
|
||||
assert bls_verify(
|
||||
proposer.pubkey,
|
||||
hash_tree_root(get_current_epoch(state)),
|
||||
block.body.randao_reveal,
|
||||
body.randao_reveal,
|
||||
get_domain(state, DOMAIN_RANDAO),
|
||||
)
|
||||
# Mix it in
|
||||
state.latest_randao_mixes[get_current_epoch(state) % LATEST_RANDAO_MIXES_LENGTH] = (
|
||||
xor(get_randao_mix(state, get_current_epoch(state)),
|
||||
hash(block.body.randao_reveal))
|
||||
hash(body.randao_reveal))
|
||||
)
|
||||
```
|
||||
|
||||
#### Eth1 data
|
||||
|
||||
```python
|
||||
def process_eth1_data(state: BeaconState, block: BeaconBlock) -> None:
|
||||
state.eth1_data_votes.append(block.body.eth1_data)
|
||||
if state.eth1_data_votes.count(block.body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD:
|
||||
state.latest_eth1_data = block.body.eth1_data
|
||||
def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
state.eth1_data_votes.append(body.eth1_data)
|
||||
if state.eth1_data_votes.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD:
|
||||
state.latest_eth1_data = body.eth1_data
|
||||
```
|
||||
|
||||
#### Operations
|
||||
|
||||
*Note*: All functions in this section mutate `state`.
|
||||
```python
|
||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# Verify that outstanding deposits are processed up to the maximum number of deposits
|
||||
assert len(body.deposits) == min(MAX_DEPOSITS, state.latest_eth1_data.deposit_count - state.deposit_index)
|
||||
# Verify that there are no duplicate transfers
|
||||
assert len(body.transfers) == len(set(body.transfers))
|
||||
|
||||
for operations, max_operations, function in (
|
||||
(body.proposer_slashings, MAX_PROPOSER_SLASHINGS, process_proposer_slashing),
|
||||
(body.attester_slashings, MAX_ATTESTER_SLASHINGS, process_attester_slashing),
|
||||
(body.attestations, MAX_ATTESTATIONS, process_attestation),
|
||||
(body.deposits, MAX_DEPOSITS, process_deposit),
|
||||
(body.voluntary_exits, MAX_VOLUNTARY_EXITS, process_voluntary_exit),
|
||||
(body.transfers, MAX_TRANSFERS, process_transfer),
|
||||
):
|
||||
assert len(operations) <= max_operations
|
||||
for operation in operations:
|
||||
function(state, operation)
|
||||
```
|
||||
|
||||
##### Proposer slashings
|
||||
|
||||
Verify that `len(block.body.proposer_slashings) <= MAX_PROPOSER_SLASHINGS`.
|
||||
|
||||
For each `proposer_slashing` in `block.body.proposer_slashings`, run the following function:
|
||||
|
||||
```python
|
||||
def process_proposer_slashing(state: BeaconState,
|
||||
proposer_slashing: ProposerSlashing) -> None:
|
||||
def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None:
|
||||
"""
|
||||
Process ``ProposerSlashing`` operation.
|
||||
"""
|
||||
@ -1654,13 +1673,8 @@ def process_proposer_slashing(state: BeaconState,
|
||||
|
||||
##### Attester slashings
|
||||
|
||||
Verify that `len(block.body.attester_slashings) <= MAX_ATTESTER_SLASHINGS`.
|
||||
|
||||
For each `attester_slashing` in `block.body.attester_slashings`, run the following function:
|
||||
|
||||
```python
|
||||
def process_attester_slashing(state: BeaconState,
|
||||
attester_slashing: AttesterSlashing) -> None:
|
||||
def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None:
|
||||
"""
|
||||
Process ``AttesterSlashing`` operation.
|
||||
"""
|
||||
@ -1673,7 +1687,7 @@ def process_attester_slashing(state: BeaconState,
|
||||
slashed_any = False
|
||||
attesting_indices_1 = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices
|
||||
attesting_indices_2 = attestation_2.custody_bit_0_indices + attestation_2.custody_bit_1_indices
|
||||
for index in set(attesting_indices_1).intersection(attesting_indices_2):
|
||||
for index in sorted(set(attesting_indices_1).intersection(attesting_indices_2)):
|
||||
if is_slashable_validator(state.validator_registry[index], get_current_epoch(state)):
|
||||
slash_validator(state, index)
|
||||
slashed_any = True
|
||||
@ -1682,10 +1696,6 @@ def process_attester_slashing(state: BeaconState,
|
||||
|
||||
##### Attestations
|
||||
|
||||
Verify that `len(block.body.attestations) <= MAX_ATTESTATIONS`.
|
||||
|
||||
For each `attestation` in `block.body.attestations`, run the following function:
|
||||
|
||||
```python
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
"""
|
||||
@ -1714,7 +1724,8 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
|
||||
# Check FFG data, crosslink data, and signature
|
||||
assert ffg_data == (data.source_epoch, data.source_root, data.target_epoch)
|
||||
assert data.crosslink.epoch == min(data.target_epoch, parent_crosslink.epoch + MAX_EPOCHS_PER_CROSSLINK)
|
||||
assert data.crosslink.start_epoch == parent_crosslink.end_epoch
|
||||
assert data.crosslink.end_epoch == min(data.target_epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)
|
||||
assert data.crosslink.parent_root == hash_tree_root(parent_crosslink)
|
||||
assert data.crosslink.data_root == ZERO_HASH # [to be removed in phase 1]
|
||||
validate_indexed_attestation(state, convert_to_indexed(state, attestation))
|
||||
@ -1722,10 +1733,6 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
|
||||
##### Deposits
|
||||
|
||||
Verify that `len(block.body.deposits) == min(MAX_DEPOSITS, state.latest_eth1_data.deposit_count - state.deposit_index)`.
|
||||
|
||||
For each `deposit` in `block.body.deposits`, run the following function:
|
||||
|
||||
```python
|
||||
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
"""
|
||||
@ -1749,8 +1756,9 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
validator_pubkeys = [v.pubkey for v in state.validator_registry]
|
||||
if pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession)
|
||||
# Note: deposits are valid across forks, hence the deposit domain is retrieved directly from `bls_domain`
|
||||
if not bls_verify(
|
||||
pubkey, signing_root(deposit.data), deposit.data.signature, get_domain(state, DOMAIN_DEPOSIT)
|
||||
pubkey, signing_root(deposit.data), deposit.data.signature, bls_domain(DOMAIN_DEPOSIT)
|
||||
):
|
||||
return
|
||||
|
||||
@ -1773,10 +1781,6 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
|
||||
##### Voluntary exits
|
||||
|
||||
Verify that `len(block.body.voluntary_exits) <= MAX_VOLUNTARY_EXITS`.
|
||||
|
||||
For each `exit` in `block.body.voluntary_exits`, run the following function:
|
||||
|
||||
```python
|
||||
def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None:
|
||||
"""
|
||||
@ -1800,10 +1804,6 @@ def process_voluntary_exit(state: BeaconState, exit: VoluntaryExit) -> None:
|
||||
|
||||
##### Transfers
|
||||
|
||||
Verify that `len(block.body.transfers) <= MAX_TRANSFERS` and that all transfers are distinct.
|
||||
|
||||
For each `transfer` in `block.body.transfers`, run the following function:
|
||||
|
||||
```python
|
||||
def process_transfer(state: BeaconState, transfer: Transfer) -> None:
|
||||
"""
|
||||
@ -1834,12 +1834,3 @@ def process_transfer(state: BeaconState, transfer: Transfer) -> None:
|
||||
assert not (0 < state.balances[transfer.sender] < MIN_DEPOSIT_AMOUNT)
|
||||
assert not (0 < state.balances[transfer.recipient] < MIN_DEPOSIT_AMOUNT)
|
||||
```
|
||||
|
||||
#### State root verification
|
||||
|
||||
Verify the block's `state_root` by running the following function:
|
||||
|
||||
```python
|
||||
def verify_block_state_root(state: BeaconState, block: BeaconBlock) -> None:
|
||||
assert block.state_root == hash_tree_root(state)
|
||||
```
|
||||
|
@ -35,9 +35,9 @@
|
||||
- [`empty`](#empty)
|
||||
- [`get_crosslink_chunk_count`](#get_crosslink_chunk_count)
|
||||
- [`get_custody_chunk_bit`](#get_custody_chunk_bit)
|
||||
- [`get_chunk_bits_root`](#get_chunk_bits_root)
|
||||
- [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period)
|
||||
- [`get_validators_custody_reveal_period`](#get_validators_custody_reveal_period)
|
||||
- [`get_chunk_bits_root`](#get_chunk_bits_root)
|
||||
- [`replace_empty_or_append`](#replace_empty_or_append)
|
||||
- [Per-block processing](#per-block-processing)
|
||||
- [Operations](#operations)
|
||||
@ -104,6 +104,8 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
|
||||
|
||||
### Reward and penalty quotients
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE` | `2**1` (= 2) |
|
||||
|
||||
### Signature domains
|
||||
@ -146,7 +148,7 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
|
||||
'challenge_index': 'uint64',
|
||||
'challenger_index': ValidatorIndex,
|
||||
'responder_index': ValidatorIndex,
|
||||
'deadline': Epoch,
|
||||
'inclusion_epoch': Epoch,
|
||||
'data_root': Hash,
|
||||
'depth': 'uint64',
|
||||
'chunk_index': 'uint64',
|
||||
@ -160,7 +162,7 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
|
||||
'challenge_index': 'uint64',
|
||||
'challenger_index': ValidatorIndex,
|
||||
'responder_index': ValidatorIndex,
|
||||
'deadline': Epoch,
|
||||
'inclusion_epoch': Epoch,
|
||||
'data_root': Hash,
|
||||
'chunk_count': 'uint64',
|
||||
'chunk_bits_merkle_root': Hash,
|
||||
@ -262,12 +264,10 @@ The `empty` function accepts and SSZ type as input and returns an object of that
|
||||
### `get_crosslink_chunk_count`
|
||||
|
||||
```python
|
||||
def get_custody_chunk_count(attestation: Attestation) -> int:
|
||||
crosslink_start_epoch = attestation.data.latest_crosslink.epoch
|
||||
crosslink_end_epoch = slot_to_epoch(attestation.data.slot)
|
||||
crosslink_crosslink_length = min(MAX_EPOCHS_PER_CROSSLINK, end_epoch - start_epoch)
|
||||
def get_custody_chunk_count(crosslink: Crosslink) -> int:
|
||||
crosslink_length = min(MAX_EPOCHS_PER_CROSSLINK, crosslink.end_epoch - crosslink.start_epoch)
|
||||
chunks_per_epoch = 2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK
|
||||
return crosslink_crosslink_length * chunks_per_epoch
|
||||
return crosslink_length * chunks_per_epoch
|
||||
```
|
||||
|
||||
### `get_custody_chunk_bit`
|
||||
@ -299,7 +299,7 @@ def get_randao_epoch_for_custody_period(period: int, validator_index: ValidatorI
|
||||
|
||||
### `get_validators_custody_reveal_period`
|
||||
|
||||
```python
|
||||
```python
|
||||
def get_validators_custody_reveal_period(state: BeaconState,
|
||||
validator_index: ValidatorIndex,
|
||||
epoch: Epoch=None) -> int:
|
||||
@ -380,7 +380,7 @@ def process_custody_key_reveal(state: BeaconState,
|
||||
increase_balance(state, proposer_index, base_reward(state, index) // MINOR_REWARD_QUOTIENT)
|
||||
```
|
||||
|
||||
##### Early derived secret reveals
|
||||
#### Early derived secret reveals
|
||||
|
||||
Verify that `len(block.body.early_derived_secret_reveals) <= MAX_EARLY_DERIVED_SECRET_REVEALS`.
|
||||
|
||||
@ -478,14 +478,14 @@ def process_chunk_challenge(state: BeaconState,
|
||||
record.chunk_index != challenge.chunk_index
|
||||
)
|
||||
# Verify depth
|
||||
depth = math.log2(next_power_of_two(get_custody_chunk_count(challenge.attestation)))
|
||||
depth = math.log2(next_power_of_two(get_custody_chunk_count(challenge.attestation.data.crosslink)))
|
||||
assert challenge.chunk_index < 2**depth
|
||||
# Add new chunk challenge record
|
||||
new_record = CustodyChunkChallengeRecord(
|
||||
challenge_index=state.custody_challenge_index,
|
||||
challenger_index=get_beacon_proposer_index(state),
|
||||
responder_index=challenge.responder_index
|
||||
deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE,
|
||||
inclusion_epoch=get_current_epoch(state),
|
||||
data_root=challenge.attestation.data.crosslink.data_root,
|
||||
depth=depth,
|
||||
chunk_index=challenge.chunk_index,
|
||||
@ -528,10 +528,9 @@ def process_bit_challenge(state: BeaconState,
|
||||
attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bitfield)
|
||||
assert challenge.responder_index in attesters
|
||||
|
||||
# A validator can be the challenger or responder for at most one challenge at a time
|
||||
# A validator can be the challenger for at most one challenge at a time
|
||||
for record in state.custody_bit_challenge_records:
|
||||
assert record.challenger_index != challenge.challenger_index
|
||||
assert record.responder_index != challenge.responder_index
|
||||
|
||||
# Verify the responder is a valid custody key
|
||||
epoch_to_sign = get_randao_epoch_for_custody_period(
|
||||
@ -553,7 +552,7 @@ def process_bit_challenge(state: BeaconState,
|
||||
)
|
||||
|
||||
# Verify the chunk count
|
||||
chunk_count = get_custody_chunk_count(challenge.attestation)
|
||||
chunk_count = get_custody_chunk_count(challenge.attestation.data.crosslink)
|
||||
assert verify_bitfield(challenge.chunk_bits, chunk_count)
|
||||
# Verify the first bit of the hash of the chunk bits does not equal the custody bit
|
||||
custody_bit = get_bitfield_bit(attestation.custody_bitfield, attesters.index(responder_index))
|
||||
@ -563,7 +562,7 @@ def process_bit_challenge(state: BeaconState,
|
||||
challenge_index=state.custody_challenge_index,
|
||||
challenger_index=challenge.challenger_index,
|
||||
responder_index=challenge.responder_index,
|
||||
deadline=get_current_epoch(state) + CUSTODY_RESPONSE_DEADLINE,
|
||||
inclusion_epoch=get_current_epoch(state),
|
||||
data_root=challenge.attestation.data.crosslink.data_root,
|
||||
chunk_count=chunk_count,
|
||||
chunk_bits_merkle_root=merkle_root(pad_to_power_of_2((challenge.chunk_bits))),
|
||||
@ -604,6 +603,8 @@ def process_chunk_challenge_response(state: BeaconState,
|
||||
assert response.chunk_index == challenge.chunk_index
|
||||
# Verify bit challenge data is null
|
||||
assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == ZERO_HASH
|
||||
# Verify minimum delay
|
||||
assert get_current_epoch(state) >= challenge.inclusion_epoch + ACTIVATION_EXIT_DELAY
|
||||
# Verify the chunk matches the crosslink data root
|
||||
assert verify_merkle_branch(
|
||||
leaf=hash_tree_root(response.chunk),
|
||||
@ -626,6 +627,9 @@ def process_bit_challenge_response(state: BeaconState,
|
||||
challenge: CustodyBitChallengeRecord) -> None:
|
||||
# Verify chunk index
|
||||
assert response.chunk_index < challenge.chunk_count
|
||||
# Verify responder has not been slashed
|
||||
responder = state.validator_registry[challenge.responder_index]
|
||||
assert not responder.slashed
|
||||
# Verify the chunk matches the crosslink data root
|
||||
assert verify_merkle_branch(
|
||||
leaf=hash_tree_root(response.chunk),
|
||||
@ -671,13 +675,13 @@ Run `process_challenge_deadlines(state)` immediately after `process_reveal_deadl
|
||||
```python
|
||||
def process_challenge_deadlines(state: BeaconState) -> None:
|
||||
for challenge in state.custody_chunk_challenge_records:
|
||||
if get_current_epoch(state) > challenge.deadline:
|
||||
if get_current_epoch(state) > challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE:
|
||||
slash_validator(state, challenge.responder_index, challenge.challenger_index)
|
||||
records = state.custody_chunk_challenge_records
|
||||
records[records.index(challenge)] = CustodyChunkChallengeRecord()
|
||||
|
||||
for challenge in state.custody_bit_challenge_records:
|
||||
if get_current_epoch(state) > challenge.deadline:
|
||||
if get_current_epoch(state) > challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE:
|
||||
slash_validator(state, challenge.responder_index, challenge.challenger_index)
|
||||
records = state.custody_bit_challenge_records
|
||||
records[records.index(challenge)] = CustodyBitChallengeRecord()
|
||||
@ -688,26 +692,13 @@ Append this to `process_final_updates(state)`:
|
||||
```python
|
||||
# Clean up exposed RANDAO key reveals
|
||||
state.exposed_derived_secrets[current_epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] = []
|
||||
```
|
||||
|
||||
In `process_penalties_and_exits`, change the definition of `eligible` to the following (note that it is not a pure function because `state` is declared in the surrounding scope):
|
||||
|
||||
```python
|
||||
def eligible(state: BeaconState, index: ValidatorIndex) -> bool:
|
||||
validator = state.validator_registry[index]
|
||||
# Cannot exit if there are still open chunk challenges
|
||||
if len([record for record in state.custody_chunk_challenge_records if record.responder_index == index]) > 0:
|
||||
return False
|
||||
# Cannot exit if there are still open bit challenges
|
||||
if len([record for record in state.custody_bit_challenge_records if record.responder_index == index]) > 0:
|
||||
return False
|
||||
# Cannot exit if you have not revealed all of your custody keys
|
||||
elif validator.next_custody_reveal_period <= get_validators_custody_reveal_period(state, index, validator.exit_epoch):
|
||||
return False
|
||||
# Cannot exit if you already have
|
||||
elif validator.withdrawable_epoch < FAR_FUTURE_EPOCH:
|
||||
return False
|
||||
# Return minimum time
|
||||
else:
|
||||
return current_epoch >= validator.exit_epoch + MIN_VALIDATOR_WITHDRAWAL_EPOCHS
|
||||
# Reset withdrawable epochs if challenge records are empty
|
||||
records = state.custody_chunk_challenge_records + state.bit_challenge_records
|
||||
validator_indices_in_records = set(
|
||||
[record.challenger_index for record in records] + [record.responder_index for record in records]
|
||||
)
|
||||
for index, validator in enumerate(state.validator_registry):
|
||||
if index not in validator_indices_in_records:
|
||||
if validator.exit_epoch != FAR_FUTURE_EPOCH and validator.withdrawable_epoch == FAR_FUTURE_EPOCH:
|
||||
validator.withdrawable_epoch = validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
```
|
||||
|
@ -215,7 +215,7 @@ def get_shard_header(block: ShardBlock) -> ShardBlockHeader:
|
||||
def verify_shard_attestation_signature(state: BeaconState,
|
||||
attestation: ShardAttestation) -> None:
|
||||
data = attestation.data
|
||||
persistent_committee = get_persistent_committee(state, data.crosslink.shard, data.slot)
|
||||
persistent_committee = get_persistent_committee(state, data.shard, data.slot)
|
||||
assert verify_bitfield(attestation.aggregation_bitfield, len(persistent_committee))
|
||||
pubkeys = []
|
||||
for i, index in enumerate(persistent_committee):
|
||||
@ -225,7 +225,7 @@ def verify_shard_attestation_signature(state: BeaconState,
|
||||
pubkeys.append(validator.pubkey)
|
||||
assert bls_verify(
|
||||
pubkey=bls_aggregate_pubkeys(pubkeys),
|
||||
message_hash=data.crosslink.shard_block_root,
|
||||
message_hash=data.shard_block_root,
|
||||
signature=attestation.aggregate_signature,
|
||||
domain=get_domain(state, slot_to_epoch(data.slot), DOMAIN_SHARD_ATTESTER)
|
||||
)
|
||||
@ -280,22 +280,22 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock],
|
||||
return True
|
||||
|
||||
# Check slot number
|
||||
assert block.slot >= PHASE_1_GENESIS_SLOT
|
||||
assert candidate.slot >= PHASE_1_GENESIS_SLOT
|
||||
assert unix_time >= beacon_state.genesis_time + (block.slot - GENESIS_SLOT) * SECONDS_PER_SLOT
|
||||
|
||||
# Check shard number
|
||||
assert block.shard <= SHARD_COUNT
|
||||
assert candidate.shard <= SHARD_COUNT
|
||||
|
||||
# Check beacon block
|
||||
beacon_block = beacon_blocks[block.slot]
|
||||
assert block.beacon_block_root == signing_root(beacon_block)
|
||||
assert beacon_block.slot <= block.slot:
|
||||
beacon_block = beacon_blocks[candidate.slot]
|
||||
assert candidate.beacon_block_root == signing_root(beacon_block)
|
||||
assert beacon_block.slot <= candidate.slot:
|
||||
|
||||
# Check state root
|
||||
assert block.state_root == ZERO_HASH # [to be removed in phase 2]
|
||||
assert candidate.state_root == ZERO_HASH # [to be removed in phase 2]
|
||||
|
||||
# Check parent block
|
||||
if block.slot == PHASE_1_GENESIS_SLOT:
|
||||
if candidate.slot == PHASE_1_GENESIS_SLOT:
|
||||
assert candidate.parent_root == ZERO_HASH
|
||||
else:
|
||||
parent_block = next(
|
||||
@ -303,26 +303,26 @@ def is_valid_shard_block(beacon_blocks: List[BeaconBlock],
|
||||
signing_root(block) == candidate.parent_root
|
||||
, None)
|
||||
assert parent_block != None
|
||||
assert parent_block.shard == block.shard
|
||||
assert parent_block.slot < block.slot
|
||||
assert parent_block.shard == candidate.shard
|
||||
assert parent_block.slot < candidate.slot
|
||||
assert signing_root(beacon_blocks[parent_block.slot]) == parent_block.beacon_chain_root
|
||||
|
||||
# Check attestations
|
||||
assert len(block.attestations) <= MAX_SHARD_ATTESTIONS
|
||||
for _, attestation in enumerate(block.attestations):
|
||||
assert max(GENESIS_SHARD_SLOT, block.slot - SLOTS_PER_EPOCH) <= attestation.data.slot
|
||||
assert attestation.data.slot <= block.slot - MIN_ATTESTATION_INCLUSION_DELAY
|
||||
assert attestation.data.crosslink.shard == block.shard
|
||||
assert len(candidate.attestations) <= MAX_SHARD_ATTESTIONS
|
||||
for _, attestation in enumerate(candidate.attestations):
|
||||
assert max(GENESIS_SHARD_SLOT, candidate.slot - SLOTS_PER_EPOCH) <= attestation.data.slot
|
||||
assert attestation.data.slot <= candidate.slot - MIN_ATTESTATION_INCLUSION_DELAY
|
||||
assert attestation.data.crosslink.shard == candidate.shard
|
||||
verify_shard_attestation_signature(beacon_state, attestation)
|
||||
|
||||
# Check signature
|
||||
proposer_index = get_shard_proposer_index(beacon_state, block.shard, block.slot)
|
||||
proposer_index = get_shard_proposer_index(beacon_state, candidate.shard, candidate.slot)
|
||||
assert proposer_index is not None
|
||||
assert bls_verify(
|
||||
pubkey=validators[proposer_index].pubkey,
|
||||
message_hash=signing_root(block),
|
||||
signature=block.signature,
|
||||
domain=get_domain(beacon_state, slot_to_epoch(block.slot), DOMAIN_SHARD_PROPOSER)
|
||||
signature=candidate.signature,
|
||||
domain=get_domain(beacon_state, slot_to_epoch(candidate.slot), DOMAIN_SHARD_PROPOSER)
|
||||
)
|
||||
|
||||
return True
|
||||
@ -339,18 +339,18 @@ Let:
|
||||
```python
|
||||
def is_valid_shard_attestation(valid_shard_blocks: List[ShardBlock],
|
||||
beacon_state: BeaconState,
|
||||
candidate: Attestation) -> bool:
|
||||
candidate: ShardAttestation) -> bool:
|
||||
# Check shard block
|
||||
shard_block = next(
|
||||
block for block in valid_shard_blocks if
|
||||
signing_root(block) == candidate.attestation.data.crosslink.shard_block_root
|
||||
signing_root(block) == candidate.data.shard_block_root
|
||||
, None)
|
||||
assert shard_block != None
|
||||
assert shard_block.slot == attestation.data.slot
|
||||
assert shard_block.shard == attestation.data.crosslink.shard
|
||||
assert shard_block.slot == candidate.data.slot
|
||||
assert shard_block.shard == candidate.data.shard
|
||||
|
||||
# Check signature
|
||||
verify_shard_attestation_signature(beacon_state, attestation)
|
||||
verify_shard_attestation_signature(beacon_state, candidate)
|
||||
|
||||
return True
|
||||
```
|
||||
|
158
specs/networking/libp2p-standardization.md
Normal file
158
specs/networking/libp2p-standardization.md
Normal file
@ -0,0 +1,158 @@
|
||||
ETH 2.0 Networking Spec - Libp2p standard protocols
|
||||
===
|
||||
|
||||
# Abstract
|
||||
|
||||
Ethereum 2.0 clients plan to use the libp2p protocol networking stack for
|
||||
mainnet release. This document aims to standardize the libp2p client protocols,
|
||||
configuration and messaging formats.
|
||||
|
||||
# Libp2p Components
|
||||
|
||||
## Transport
|
||||
|
||||
This section details the libp2p transport layer that underlies the
|
||||
[protocols](#protocols) that are listed in this document.
|
||||
|
||||
Libp2p allows composition of multiple transports. Eth2.0 clients should support
|
||||
TCP/IP and optionally websockets. Websockets are useful for implementations
|
||||
running in the browser and therefore native clients would ideally support these implementations
|
||||
by supporting websockets.
|
||||
|
||||
An ideal libp2p transport would therefore support both TCP/IP and websockets.
|
||||
|
||||
*Note: There is active development in libp2p to facilitate the
|
||||
[QUIC](https://github.com/libp2p/go-libp2p-quic-transport) transport, which may
|
||||
be adopted in the future*
|
||||
|
||||
### Encryption
|
||||
|
||||
Libp2p currently offers [Secio](https://github.com/libp2p/specs/pull/106) which
|
||||
can upgrade a transport which will then encrypt all future communication. Secio
|
||||
generates a symmetric ephemeral key which peers use to encrypt their
|
||||
communication. It can support a range of ciphers and currently supports key
|
||||
derivation for elliptic curve-based public keys.
|
||||
|
||||
Current defaults are:
|
||||
- Key agreement: `ECDH-P256` (also supports `ECDH-P384`)
|
||||
- Cipher: `AES-128` (also supports `AES-256`, `TwofishCTR`)
|
||||
- Digests: `SHA256` (also supports `SHA512`)
|
||||
|
||||
*Note: Secio is being deprecated in favour of [TLS
|
||||
1.3](https://github.com/libp2p/specs/blob/master/tls/tls.md). It is our
|
||||
intention to transition to use TLS 1.3 for encryption between nodes, rather
|
||||
than Secio.*
|
||||
|
||||
|
||||
## Protocols
|
||||
|
||||
This section lists the necessary libp2p protocols required by Ethereum 2.0
|
||||
running a libp2p network stack.
|
||||
|
||||
## Multistream-select
|
||||
|
||||
#### Protocol id: `/multistream/1.0.0`
|
||||
|
||||
Clients running libp2p should support the
|
||||
[multistream-select](https://github.com/multiformats/multistream-select/)
|
||||
protocol which allows clients to negotiate libp2p protocols establish streams
|
||||
per protocol.
|
||||
|
||||
## Multiplexing
|
||||
|
||||
Libp2p allows clients to compose multiple multiplexing methods. Clients should
|
||||
support [mplex](https://github.com/libp2p/specs/tree/master/mplex) and
|
||||
optionally [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md)
|
||||
(these can be composed).
|
||||
|
||||
**Mplex protocol id: `/mplex/6.7.0`**
|
||||
|
||||
**Yamux protocol id: `/yamux/1.0.0`**
|
||||
|
||||
## Gossipsub
|
||||
|
||||
#### Protocol id: `/eth/serenity/gossipsub/1.0.0`
|
||||
|
||||
*Note: Parameters listed here are subject to a large-scale network feasibility
|
||||
study*
|
||||
|
||||
The [Gossipsub](https://github.com/libp2p/specs/tree/master/pubsub/gossipsub)
|
||||
protocol is used for block and attestation propagation across the
|
||||
network.
|
||||
|
||||
### Configuration Parameters
|
||||
|
||||
Gossipsub has a number of internal configuration parameters which directly
|
||||
effect the network performance. Clients can implement independently, however
|
||||
we aim to standardize these across clients to optimize the gossip network for
|
||||
propagation times and message duplication. Current network-related defaults are:
|
||||
|
||||
```
|
||||
(
|
||||
// The target number of peers in the overlay mesh network (D in the libp2p specs).
|
||||
mesh_size: 6
|
||||
// The minimum number of peers in the mesh network before adding more (D_lo in the libp2p specs).
|
||||
mesh_lo: 4
|
||||
// The maximum number of peers in the mesh network before removing some (D_high in the libp2p sepcs).
|
||||
mesh_high: 12
|
||||
// The number of peers to gossip to during a heartbeat (D_lazy in the libp2p sepcs).
|
||||
gossip_lazy: 6 // defaults to `mesh_size`
|
||||
// Time to live for fanout peers (seconds).
|
||||
fanout_ttl: 60
|
||||
// The number of heartbeats to gossip about.
|
||||
gossip_history: 3
|
||||
// Time between each heartbeat (seconds).
|
||||
heartbeat_interval: 1
|
||||
)
|
||||
```
|
||||
|
||||
### Topics
|
||||
|
||||
*The Go and Js implementations use string topics - This is likely to be
|
||||
updated to topic hashes in later versions - https://github.com/libp2p/rust-libp2p/issues/473*
|
||||
|
||||
For Eth2.0 clients, topics are sent as `SHA2-256` hashes of the topic string.
|
||||
|
||||
There are two main topics used to propagate attestations and beacon blocks to
|
||||
all nodes on the network.
|
||||
|
||||
- The `beacon_block` topic - This topic is used solely for propagating new
|
||||
beacon blocks to all nodes on the networks.
|
||||
- The `beacon_attestation` topic - This topic is used to propagate
|
||||
aggregated attestations to subscribing nodes (typically block proposers) to
|
||||
be included into future blocks. Attestations are aggregated in their
|
||||
respective subnets before publishing on this topic.
|
||||
|
||||
Shards are grouped into their own subnets (defined by a shard topic). The
|
||||
number of shard subnets is defined via `SHARD_SUBNET_COUNT` and the shard
|
||||
`shard_number % SHARD_SUBNET_COUNT` is assigned to the topic:
|
||||
`shard{shard_number % SHARD_SUBNET_COUNT}_attestation`.
|
||||
|
||||
### Messages
|
||||
|
||||
*Note: The message format here is Eth2.0-specific*
|
||||
|
||||
Each Gossipsub
|
||||
[Message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24)
|
||||
has a maximum size of 512KB (estimated from expected largest uncompressed block
|
||||
size).
|
||||
|
||||
The `data` field of a Gossipsub `Message` is an SSZ-encoded object. For the `beacon_block` topic,
|
||||
this is a `beacon_block`. For the `beacon_attestation` topic, this is
|
||||
an `attestation`.
|
||||
|
||||
## Eth-2 RPC
|
||||
|
||||
#### Protocol Id: `/eth/serenity/beacon/rpc/1`
|
||||
|
||||
The [RPC Interface](./rpc-interface.md) is specified in this repository.
|
||||
|
||||
## Discovery
|
||||
|
||||
Discovery Version 5
|
||||
([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md))
|
||||
will be used for discovery. This protocol uses a UDP transport and specifies
|
||||
its own encryption, ip-discovery and topic advertisement. Therefore, it has no
|
||||
need to establish streams through `multistream-select`, rather, act
|
||||
as a standalone implementation that feeds discovered peers/topics (ENR-records) as
|
||||
`multiaddrs` into the libp2p service.
|
@ -41,7 +41,7 @@ Remote method calls are wrapped in a "request" structure:
|
||||
(
|
||||
id: uint64
|
||||
method_id: uint16
|
||||
body: Request
|
||||
body: (message_body...)
|
||||
)
|
||||
```
|
||||
|
||||
@ -55,15 +55,7 @@ and their corresponding responses are wrapped in a "response" structure:
|
||||
)
|
||||
```
|
||||
|
||||
If an error occurs, a variant of the response structure is returned:
|
||||
|
||||
```
|
||||
(
|
||||
id: uint64
|
||||
response_code: uint16
|
||||
result: bytes
|
||||
)
|
||||
```
|
||||
A union type is used to determine the contents of the `body` field in the request structure. Each "body" entry in the RPC calls below corresponds to one subtype in the `body` type union.
|
||||
|
||||
The details of the RPC-Over-`libp2p` protocol are similar to [JSON-RPC 2.0](https://www.jsonrpc.org/specification). Specifically:
|
||||
|
||||
|
@ -20,6 +20,8 @@
|
||||
- [Process deposit](#process-deposit)
|
||||
- [Validator index](#validator-index)
|
||||
- [Activation](#activation)
|
||||
- [Validator assignments](#validator-assignments)
|
||||
- [Lookahead](#lookahead)
|
||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||
- [Block proposal](#block-proposal)
|
||||
- [Block header](#block-header)
|
||||
@ -45,8 +47,6 @@
|
||||
- [Aggregation bitfield](#aggregation-bitfield)
|
||||
- [Custody bitfield](#custody-bitfield)
|
||||
- [Aggregate signature](#aggregate-signature)
|
||||
- [Validator assignments](#validator-assignments)
|
||||
- [Lookahead](#lookahead)
|
||||
- [How to avoid slashing](#how-to-avoid-slashing)
|
||||
- [Proposer slashing](#proposer-slashing)
|
||||
- [Attester slashing](#attester-slashing)
|
||||
@ -98,7 +98,7 @@ To submit a deposit:
|
||||
* Pack the validator's [initialization parameters](#initialization) into `deposit_data`, a [`DepositData`](../core/0_beacon-chain.md#depositdata) SSZ object.
|
||||
* Let `amount` be the amount in Gwei to be deposited by the validator where `MIN_DEPOSIT_AMOUNT <= amount <= MAX_EFFECTIVE_BALANCE`.
|
||||
* Set `deposit_data.amount = amount`.
|
||||
* Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=DOMAIN_DEPOSIT`.
|
||||
* Let `signature` be the result of `bls_sign` of the `signing_root(deposit_data)` with `domain=bls_domain(DOMAIN_DEPOSIT)`. (Deposits are valid regardless of fork version, `bls_domain` will default to zeroes there).
|
||||
* Send a transaction on the Ethereum 1.0 chain to `DEPOSIT_CONTRACT_ADDRESS` executing `def deposit(pubkey: bytes[48], withdrawal_credentials: bytes[32], signature: bytes[96])` along with a deposit of `amount` Gwei.
|
||||
|
||||
*Note*: Deposits made for the same `pubkey` are treated as for the same validator. A singular `Validator` will be added to `state.validator_registry` with each additional deposit amount added to the validator's balance. A validator can only be activated when total deposits for the validator pubkey meet or exceed `MAX_EFFECTIVE_BALANCE`.
|
||||
@ -127,13 +127,62 @@ Once a validator is activated, the validator is assigned [responsibilities](#bea
|
||||
|
||||
*Note*: There is a maximum validator churn per finalized epoch so the delay until activation is variable depending upon finality, total active validator balance, and the number of validators in the queue to be activated.
|
||||
|
||||
## Validator assignments
|
||||
|
||||
A validator can get committee assignments for a given epoch using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `epoch <= next_epoch`.
|
||||
|
||||
```python
|
||||
def get_committee_assignment(
|
||||
state: BeaconState,
|
||||
epoch: Epoch,
|
||||
validator_index: ValidatorIndex) -> Tuple[List[ValidatorIndex], Shard, Slot]:
|
||||
"""
|
||||
Return the committee assignment in the ``epoch`` for ``validator_index``.
|
||||
``assignment`` returned is a tuple of the following form:
|
||||
* ``assignment[0]`` is the list of validators in the committee
|
||||
* ``assignment[1]`` is the shard to which the committee is assigned
|
||||
* ``assignment[2]`` is the slot at which the committee is assigned
|
||||
"""
|
||||
next_epoch = get_current_epoch(state) + 1
|
||||
assert epoch <= next_epoch
|
||||
|
||||
committees_per_slot = get_epoch_committee_count(state, epoch) // SLOTS_PER_EPOCH
|
||||
epoch_start_slot = get_epoch_start_slot(epoch)
|
||||
for slot in range(epoch_start_slot, epoch_start_slot + SLOTS_PER_EPOCH)
|
||||
offset = committees_per_slot * (slot % SLOTS_PER_EPOCH)
|
||||
slot_start_shard = (get_epoch_start_shard(state, epoch) + offset) % SHARD_COUNT
|
||||
for i in range(committees_per_slot):
|
||||
shard = (slot_start_shard + i) % SHARD_COUNT
|
||||
committee = get_crosslink_committee(state, epoch, shard)
|
||||
if validator_index in committee:
|
||||
return committee, shard, slot
|
||||
```
|
||||
|
||||
A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run with a `state` of the slot in question. Proposer selection is only stable within the context of the current epoch.
|
||||
|
||||
```python
|
||||
def is_proposer(state: BeaconState,
|
||||
validator_index: ValidatorIndex) -> bool:
|
||||
return get_beacon_proposer_index(state) == validator_index
|
||||
```
|
||||
|
||||
*Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot.
|
||||
|
||||
### Lookahead
|
||||
|
||||
The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must be checked during the epoch in question.
|
||||
|
||||
`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting at which future slot they will have to attest and also which shard they should begin syncing (in Phase 1+).
|
||||
|
||||
Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
|
||||
|
||||
## Beacon chain responsibilities
|
||||
|
||||
A validator has two primary responsibilities to the beacon chain: [proposing blocks](#block-proposal) and [creating attestations](#attestations-1). Proposals happen infrequently, whereas attestations should be created once per epoch.
|
||||
|
||||
### Block proposal
|
||||
|
||||
A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `get_beacon_proposer_index(state)` returns the validator's `validator_index`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator is to create, sign, and broadcast a `block` that is a child of `parent` and that executes a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function).
|
||||
A validator is expected to propose a [`BeaconBlock`](../core/0_beacon-chain.md#beaconblock) at the beginning of any slot during which `is_proposer(state, validator_index)` returns `True`. To propose, the validator selects the `BeaconBlock`, `parent`, that in their view of the fork choice is the head of the chain during `slot - 1`. The validator creates, signs, and broadcasts a `block` that is a child of `parent` that satisfies a valid [beacon chain state transition](../core/0_beacon-chain.md#beacon-chain-state-transition-function).
|
||||
|
||||
There is one proposer per slot, so if there are N active validators any individual validator will on average be assigned to propose once per N slots (e.g. at 312500 validators = 10 million ETH, that's once per ~3 weeks).
|
||||
|
||||
@ -229,7 +278,7 @@ Up to `MAX_VOLUNTARY_EXITS` [`VoluntaryExit`](../core/0_beacon-chain.md#voluntar
|
||||
|
||||
### Attestations
|
||||
|
||||
A validator is expected to create, sign, and broadcast an attestation during each epoch. The slot during which the validator performs this role is any slot at which `get_crosslink_committees_at_slot(state, slot)` contains a committee that contains `validator_index`.
|
||||
A validator is expected to create, sign, and broadcast an attestation during each epoch. The committee, assigned shard, and assigned slot for which the validator performs this role during an epoch is defined by `get_committee_assignment(state, epoch, validator_index)`.
|
||||
|
||||
A validator should create and broadcast the attestation halfway through the `slot` during which the validator is assigned ― that is, `SECONDS_PER_SLOT * 0.5` seconds after the start of `slot`.
|
||||
|
||||
@ -238,7 +287,7 @@ A validator should create and broadcast the attestation halfway through the `slo
|
||||
First the validator should construct `attestation_data`, an [`AttestationData`](../core/0_beacon-chain.md#attestationdata) object based upon the state at the assigned slot.
|
||||
|
||||
* Let `head_block` be the result of running the fork choice during the assigned slot.
|
||||
* Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot.
|
||||
* Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot using `process_slots(state, slot)`.
|
||||
|
||||
##### LMD GHOST vote
|
||||
|
||||
@ -257,10 +306,12 @@ Set `attestation_data.beacon_block_root = signing_root(head_block)`.
|
||||
|
||||
##### Crosslink vote
|
||||
|
||||
Construct `attestation_data.crosslink` via the following
|
||||
Construct `attestation_data.crosslink` via the following.
|
||||
|
||||
* Set `attestation_data.crosslink.shard = shard` where `shard` is the shard associated with the validator's committee defined by `get_crosslink_committees_at_slot`.
|
||||
* Set `attestation_data.crosslink.epoch = min(attestation_data.target_epoch, head_state.current_crosslinks[shard].epoch + MAX_EPOCHS_PER_CROSSLINK)`.
|
||||
* Set `attestation_data.crosslink.shard = shard` where `shard` is the shard associated with the validator's committee.
|
||||
* Let `parent_crosslink = head_state.current_crosslinks[shard]`.
|
||||
* Set `attestation_data.crosslink.start_epoch = parent_crosslink.end_epoch`.
|
||||
* Set `attestation_data.crosslink.end_epoch = min(attestation_data.target_epoch, parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK)`.
|
||||
* Set `attestation_data.crosslink.parent_root = hash_tree_root(head_state.current_crosslinks[shard])`.
|
||||
* Set `attestation_data.crosslink.data_root = ZERO_HASH`. *Note*: This is a stub for Phase 0.
|
||||
|
||||
@ -310,67 +361,6 @@ signed_attestation_data = bls_sign(
|
||||
)
|
||||
```
|
||||
|
||||
## Validator assignments
|
||||
|
||||
A validator can get the current, previous, and next epoch committee assignments using the following helper via `get_committee_assignment(state, epoch, validator_index)` where `previous_epoch <= epoch <= next_epoch`.
|
||||
|
||||
```python
|
||||
def get_committee_assignment(
|
||||
state: BeaconState,
|
||||
epoch: Epoch,
|
||||
validator_index: ValidatorIndex) -> Tuple[List[ValidatorIndex], Shard, Slot]:
|
||||
"""
|
||||
Return the committee assignment in the ``epoch`` for ``validator_index``.
|
||||
``assignment`` returned is a tuple of the following form:
|
||||
* ``assignment[0]`` is the list of validators in the committee
|
||||
* ``assignment[1]`` is the shard to which the committee is assigned
|
||||
* ``assignment[2]`` is the slot at which the committee is assigned
|
||||
"""
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
next_epoch = get_current_epoch(state) + 1
|
||||
assert previous_epoch <= epoch <= next_epoch
|
||||
|
||||
epoch_start_slot = get_epoch_start_slot(epoch)
|
||||
for slot in range(epoch_start_slot, epoch_start_slot + SLOTS_PER_EPOCH):
|
||||
crosslink_committees = get_crosslink_committees_at_slot(
|
||||
state,
|
||||
slot,
|
||||
)
|
||||
selected_committees = [
|
||||
committee # Tuple[List[ValidatorIndex], Shard]
|
||||
for committee in crosslink_committees
|
||||
if validator_index in committee[0]
|
||||
]
|
||||
if len(selected_committees) > 0:
|
||||
validators = selected_committees[0][0]
|
||||
shard = selected_committees[0][1]
|
||||
|
||||
assignment = (validators, shard, slot)
|
||||
return assignment
|
||||
```
|
||||
|
||||
A validator can use the following function to see if they are supposed to propose during their assigned committee slot. This function can only be run during the slot in question. Proposer selection is only stable within the context of the current epoch.
|
||||
|
||||
```python
|
||||
def is_proposer_at_slot(state: BeaconState,
|
||||
slot: Slot,
|
||||
validator_index: ValidatorIndex) -> bool:
|
||||
assert state.slot == slot
|
||||
|
||||
return get_beacon_proposer_index(state) == validator_index
|
||||
```
|
||||
|
||||
*Note*: To see if a validator is assigned to proposer during the slot, the validator must run an empty slot transition from the previous state to the current slot.
|
||||
|
||||
|
||||
### Lookahead
|
||||
|
||||
The beacon chain shufflings are designed to provide a minimum of 1 epoch lookahead on the validator's upcoming committee assignments for attesting dictated by the shuffling and slot. Note that this lookahead does not apply to proposing, which must checked during the slot in question.
|
||||
|
||||
`get_committee_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments which involves noting at which future slot one will have to attest and also which shard one should begin syncing (in Phase 1+).
|
||||
|
||||
Specifically, a validator should call `get_committee_assignment(state, next_epoch, validator_index)` when checking for next epoch assignments.
|
||||
|
||||
## How to avoid slashing
|
||||
|
||||
"Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed -- [proposer slashing](#proposer-slashing) and [attester slashing](#attester-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages a validator has previously signed.
|
||||
|
@ -1,3 +1,3 @@
|
||||
py-ecc==1.6.0
|
||||
py-ecc==1.7.0
|
||||
eth-utils==1.4.1
|
||||
../../test_libs/gen_helpers
|
||||
|
@ -5,7 +5,7 @@ from eth_utils import (
|
||||
from gen_base import gen_suite, gen_typing
|
||||
from preset_loader import loader
|
||||
from eth2spec.debug.encode import encode
|
||||
from eth2spec.utils.minimal_ssz import signing_root
|
||||
from eth2spec.utils.ssz.ssz_impl import signing_root
|
||||
from eth2spec.utils.merkle_minimal import get_merkle_root, calc_merkle_tree_from_leaves, get_merkle_proof
|
||||
|
||||
from typing import List, Tuple
|
||||
|
@ -2,7 +2,7 @@ from random import Random
|
||||
|
||||
from eth2spec.debug import random_value, encode
|
||||
from eth2spec.phase0 import spec
|
||||
from eth2spec.utils.minimal_ssz import (
|
||||
from eth2spec.utils.ssz.ssz_impl import (
|
||||
hash_tree_root,
|
||||
signing_root,
|
||||
serialize,
|
||||
|
@ -1,28 +1,35 @@
|
||||
from eth2spec.utils.minimal_ssz import hash_tree_root
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.utils.ssz.ssz_typing import *
|
||||
|
||||
|
||||
def decode(json, typ):
|
||||
if isinstance(typ, str) and typ[:4] == 'uint':
|
||||
return json
|
||||
elif typ == 'bool':
|
||||
assert json in (True, False)
|
||||
return json
|
||||
elif isinstance(typ, list):
|
||||
return [decode(element, typ[0]) for element in json]
|
||||
elif isinstance(typ, str) and typ[:4] == 'byte':
|
||||
return bytes.fromhex(json[2:])
|
||||
elif hasattr(typ, 'fields'):
|
||||
def decode(data, typ):
|
||||
if is_uint_type(typ):
|
||||
return data
|
||||
elif is_bool_type(typ):
|
||||
assert data in (True, False)
|
||||
return data
|
||||
elif is_list_type(typ):
|
||||
elem_typ = read_list_elem_type(typ)
|
||||
return [decode(element, elem_typ) for element in data]
|
||||
elif is_vector_type(typ):
|
||||
elem_typ = read_vector_elem_type(typ)
|
||||
return Vector(decode(element, elem_typ) for element in data)
|
||||
elif is_bytes_type(typ):
|
||||
return bytes.fromhex(data[2:])
|
||||
elif is_bytesn_type(typ):
|
||||
return BytesN(bytes.fromhex(data[2:]))
|
||||
elif is_container_type(typ):
|
||||
temp = {}
|
||||
for field, subtype in typ.fields.items():
|
||||
temp[field] = decode(json[field], subtype)
|
||||
if field + "_hash_tree_root" in json:
|
||||
assert(json[field + "_hash_tree_root"][2:] ==
|
||||
for field, subtype in typ.get_fields():
|
||||
temp[field] = decode(data[field], subtype)
|
||||
if field + "_hash_tree_root" in data:
|
||||
assert(data[field + "_hash_tree_root"][2:] ==
|
||||
hash_tree_root(temp[field], subtype).hex())
|
||||
ret = typ(**temp)
|
||||
if "hash_tree_root" in json:
|
||||
assert(json["hash_tree_root"][2:] ==
|
||||
if "hash_tree_root" in data:
|
||||
assert(data["hash_tree_root"][2:] ==
|
||||
hash_tree_root(ret, typ).hex())
|
||||
return ret
|
||||
else:
|
||||
print(json, typ)
|
||||
print(data, typ)
|
||||
raise Exception("Type not recognized")
|
||||
|
@ -1,24 +1,30 @@
|
||||
from eth2spec.utils.minimal_ssz import hash_tree_root
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.utils.ssz.ssz_typing import *
|
||||
|
||||
|
||||
def encode(value, typ, include_hash_tree_roots=False):
|
||||
if isinstance(typ, str) and typ[:4] == 'uint':
|
||||
if typ[4:] == '128' or typ[4:] == '256':
|
||||
if is_uint_type(typ):
|
||||
if hasattr(typ, '__supertype__'):
|
||||
typ = typ.__supertype__
|
||||
# Larger uints are boxed and the class declares their byte length
|
||||
if issubclass(typ, uint) and typ.byte_len > 8:
|
||||
return str(value)
|
||||
return value
|
||||
elif typ == 'bool':
|
||||
elif is_bool_type(typ):
|
||||
assert value in (True, False)
|
||||
return value
|
||||
elif isinstance(typ, list):
|
||||
return [encode(element, typ[0], include_hash_tree_roots) for element in value]
|
||||
elif isinstance(typ, str) and typ[:4] == 'byte':
|
||||
elif is_list_type(typ) or is_vector_type(typ):
|
||||
elem_typ = read_elem_type(typ)
|
||||
return [encode(element, elem_typ, include_hash_tree_roots) for element in value]
|
||||
elif isinstance(typ, type) and issubclass(typ, bytes): # both bytes and BytesN
|
||||
return '0x' + value.hex()
|
||||
elif hasattr(typ, 'fields'):
|
||||
elif is_container_type(typ):
|
||||
ret = {}
|
||||
for field, subtype in typ.fields.items():
|
||||
ret[field] = encode(getattr(value, field), subtype, include_hash_tree_roots)
|
||||
for field, subtype in typ.get_fields():
|
||||
field_value = getattr(value, field)
|
||||
ret[field] = encode(field_value, subtype, include_hash_tree_roots)
|
||||
if include_hash_tree_roots:
|
||||
ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(getattr(value, field), subtype).hex()
|
||||
ret[field + "_hash_tree_root"] = '0x' + hash_tree_root(field_value, subtype).hex()
|
||||
if include_hash_tree_roots:
|
||||
ret["hash_tree_root"] = '0x' + hash_tree_root(value, typ).hex()
|
||||
return ret
|
||||
|
@ -2,10 +2,11 @@ from random import Random
|
||||
from typing import Any
|
||||
from enum import Enum
|
||||
|
||||
from eth2spec.utils.ssz.ssz_typing import *
|
||||
from eth2spec.utils.ssz.ssz_impl import is_basic_type
|
||||
|
||||
UINT_SIZES = [8, 16, 32, 64, 128, 256]
|
||||
|
||||
basic_types = ["uint%d" % v for v in UINT_SIZES] + ['bool', 'byte']
|
||||
# in bytes
|
||||
UINT_SIZES = [1, 2, 4, 8, 16, 32]
|
||||
|
||||
random_mode_names = ["random", "zero", "max", "nil", "one", "lengthy"]
|
||||
|
||||
@ -49,60 +50,61 @@ def get_random_ssz_object(rng: Random,
|
||||
"""
|
||||
if chaos:
|
||||
mode = rng.choice(list(RandomizationMode))
|
||||
if isinstance(typ, str):
|
||||
# Bytes array
|
||||
if typ == 'bytes':
|
||||
if mode == RandomizationMode.mode_nil_count:
|
||||
return b''
|
||||
if mode == RandomizationMode.mode_max_count:
|
||||
return get_random_bytes_list(rng, max_bytes_length)
|
||||
if mode == RandomizationMode.mode_one_count:
|
||||
return get_random_bytes_list(rng, 1)
|
||||
if mode == RandomizationMode.mode_zero:
|
||||
return b'\x00'
|
||||
if mode == RandomizationMode.mode_max:
|
||||
return b'\xff'
|
||||
return get_random_bytes_list(rng, rng.randint(0, max_bytes_length))
|
||||
elif typ[:5] == 'bytes' and len(typ) > 5:
|
||||
length = int(typ[5:])
|
||||
# Sanity, don't generate absurdly big random values
|
||||
# If a client is aiming to performance-test, they should create a benchmark suite.
|
||||
assert length <= max_bytes_length
|
||||
if mode == RandomizationMode.mode_zero:
|
||||
return b'\x00' * length
|
||||
if mode == RandomizationMode.mode_max:
|
||||
return b'\xff' * length
|
||||
return get_random_bytes_list(rng, length)
|
||||
# Basic types
|
||||
else:
|
||||
if mode == RandomizationMode.mode_zero:
|
||||
return get_min_basic_value(typ)
|
||||
if mode == RandomizationMode.mode_max:
|
||||
return get_max_basic_value(typ)
|
||||
return get_random_basic_value(rng, typ)
|
||||
# Bytes array
|
||||
if is_bytes_type(typ):
|
||||
if mode == RandomizationMode.mode_nil_count:
|
||||
return b''
|
||||
if mode == RandomizationMode.mode_max_count:
|
||||
return get_random_bytes_list(rng, max_bytes_length)
|
||||
if mode == RandomizationMode.mode_one_count:
|
||||
return get_random_bytes_list(rng, 1)
|
||||
if mode == RandomizationMode.mode_zero:
|
||||
return b'\x00'
|
||||
if mode == RandomizationMode.mode_max:
|
||||
return b'\xff'
|
||||
return get_random_bytes_list(rng, rng.randint(0, max_bytes_length))
|
||||
elif is_bytesn_type(typ):
|
||||
length = typ.length
|
||||
# Sanity, don't generate absurdly big random values
|
||||
# If a client is aiming to performance-test, they should create a benchmark suite.
|
||||
assert length <= max_bytes_length
|
||||
if mode == RandomizationMode.mode_zero:
|
||||
return b'\x00' * length
|
||||
if mode == RandomizationMode.mode_max:
|
||||
return b'\xff' * length
|
||||
return get_random_bytes_list(rng, length)
|
||||
# Basic types
|
||||
elif is_basic_type(typ):
|
||||
if mode == RandomizationMode.mode_zero:
|
||||
return get_min_basic_value(typ)
|
||||
if mode == RandomizationMode.mode_max:
|
||||
return get_max_basic_value(typ)
|
||||
return get_random_basic_value(rng, typ)
|
||||
# Vector:
|
||||
elif isinstance(typ, list) and len(typ) == 2:
|
||||
elif is_vector_type(typ):
|
||||
elem_typ = read_vector_elem_type(typ)
|
||||
return [
|
||||
get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos)
|
||||
for _ in range(typ[1])
|
||||
get_random_ssz_object(rng, elem_typ, max_bytes_length, max_list_length, mode, chaos)
|
||||
for _ in range(typ.length)
|
||||
]
|
||||
# List:
|
||||
elif isinstance(typ, list) and len(typ) == 1:
|
||||
elif is_list_type(typ):
|
||||
elem_typ = read_list_elem_type(typ)
|
||||
length = rng.randint(0, max_list_length)
|
||||
if mode == RandomizationMode.mode_one_count:
|
||||
length = 1
|
||||
if mode == RandomizationMode.mode_max_count:
|
||||
length = max_list_length
|
||||
return [
|
||||
get_random_ssz_object(rng, typ[0], max_bytes_length, max_list_length, mode, chaos)
|
||||
get_random_ssz_object(rng, elem_typ, max_bytes_length, max_list_length, mode, chaos)
|
||||
for _ in range(length)
|
||||
]
|
||||
# Container:
|
||||
elif hasattr(typ, 'fields'):
|
||||
elif is_container_type(typ):
|
||||
return typ(**{
|
||||
field:
|
||||
get_random_ssz_object(rng, subtype, max_bytes_length, max_list_length, mode, chaos)
|
||||
for field, subtype in typ.fields.items()
|
||||
for field, subtype in typ.get_fields()
|
||||
})
|
||||
else:
|
||||
print(typ)
|
||||
@ -113,40 +115,34 @@ def get_random_bytes_list(rng: Random, length: int) -> bytes:
|
||||
return bytes(rng.getrandbits(8) for _ in range(length))
|
||||
|
||||
|
||||
def get_random_basic_value(rng: Random, typ: str) -> Any:
|
||||
if typ == 'bool':
|
||||
def get_random_basic_value(rng: Random, typ) -> Any:
|
||||
if is_bool_type(typ):
|
||||
return rng.choice((True, False))
|
||||
if typ[:4] == 'uint':
|
||||
size = int(typ[4:])
|
||||
if is_uint_type(typ):
|
||||
size = uint_byte_size(typ)
|
||||
assert size in UINT_SIZES
|
||||
return rng.randint(0, 2**size - 1)
|
||||
if typ == 'byte':
|
||||
return rng.randint(0, 8)
|
||||
return rng.randint(0, 256**size - 1)
|
||||
else:
|
||||
raise ValueError("Not a basic type")
|
||||
|
||||
|
||||
def get_min_basic_value(typ: str) -> Any:
|
||||
if typ == 'bool':
|
||||
def get_min_basic_value(typ) -> Any:
|
||||
if is_bool_type(typ):
|
||||
return False
|
||||
if typ[:4] == 'uint':
|
||||
size = int(typ[4:])
|
||||
if is_uint_type(typ):
|
||||
size = uint_byte_size(typ)
|
||||
assert size in UINT_SIZES
|
||||
return 0
|
||||
if typ == 'byte':
|
||||
return 0x00
|
||||
else:
|
||||
raise ValueError("Not a basic type")
|
||||
|
||||
|
||||
def get_max_basic_value(typ: str) -> Any:
|
||||
if typ == 'bool':
|
||||
def get_max_basic_value(typ) -> Any:
|
||||
if is_bool_type(typ):
|
||||
return True
|
||||
if typ[:4] == 'uint':
|
||||
size = int(typ[4:])
|
||||
if is_uint_type(typ):
|
||||
size = uint_byte_size(typ)
|
||||
assert size in UINT_SIZES
|
||||
return 2**size - 1
|
||||
if typ == 'byte':
|
||||
return 0xff
|
||||
return 256**size - 1
|
||||
else:
|
||||
raise ValueError("Not a basic type")
|
||||
|
@ -1,112 +0,0 @@
|
||||
from . import spec
|
||||
|
||||
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
List
|
||||
)
|
||||
|
||||
from .spec import (
|
||||
BeaconState,
|
||||
BeaconBlock,
|
||||
Slot,
|
||||
)
|
||||
|
||||
|
||||
def expected_deposit_count(state: BeaconState) -> int:
|
||||
return min(
|
||||
spec.MAX_DEPOSITS,
|
||||
state.latest_eth1_data.deposit_count - state.deposit_index
|
||||
)
|
||||
|
||||
|
||||
def process_operation_type(state: BeaconState,
|
||||
operations: List[Any],
|
||||
max_operations: int,
|
||||
tx_fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
assert len(operations) <= max_operations
|
||||
for operation in operations:
|
||||
tx_fn(state, operation)
|
||||
|
||||
|
||||
def process_operations(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_operation_type(
|
||||
state,
|
||||
block.body.proposer_slashings,
|
||||
spec.MAX_PROPOSER_SLASHINGS,
|
||||
spec.process_proposer_slashing,
|
||||
)
|
||||
|
||||
process_operation_type(
|
||||
state,
|
||||
block.body.attester_slashings,
|
||||
spec.MAX_ATTESTER_SLASHINGS,
|
||||
spec.process_attester_slashing,
|
||||
)
|
||||
|
||||
process_operation_type(
|
||||
state,
|
||||
block.body.attestations,
|
||||
spec.MAX_ATTESTATIONS,
|
||||
spec.process_attestation,
|
||||
)
|
||||
|
||||
assert len(block.body.deposits) == expected_deposit_count(state)
|
||||
process_operation_type(
|
||||
state,
|
||||
block.body.deposits,
|
||||
spec.MAX_DEPOSITS,
|
||||
spec.process_deposit,
|
||||
)
|
||||
|
||||
process_operation_type(
|
||||
state,
|
||||
block.body.voluntary_exits,
|
||||
spec.MAX_VOLUNTARY_EXITS,
|
||||
spec.process_voluntary_exit,
|
||||
)
|
||||
|
||||
assert len(block.body.transfers) == len(set(block.body.transfers))
|
||||
process_operation_type(
|
||||
state,
|
||||
block.body.transfers,
|
||||
spec.MAX_TRANSFERS,
|
||||
spec.process_transfer,
|
||||
)
|
||||
|
||||
|
||||
def process_block(state: BeaconState,
|
||||
block: BeaconBlock,
|
||||
verify_state_root: bool=False) -> None:
|
||||
spec.process_block_header(state, block)
|
||||
spec.process_randao(state, block)
|
||||
spec.process_eth1_data(state, block)
|
||||
|
||||
process_operations(state, block)
|
||||
if verify_state_root:
|
||||
spec.verify_block_state_root(state, block)
|
||||
|
||||
|
||||
def process_epoch_transition(state: BeaconState) -> None:
|
||||
spec.process_justification_and_finalization(state)
|
||||
spec.process_crosslinks(state)
|
||||
spec.process_rewards_and_penalties(state)
|
||||
spec.process_registry_updates(state)
|
||||
spec.process_slashings(state)
|
||||
spec.process_final_updates(state)
|
||||
|
||||
|
||||
def state_transition_to(state: BeaconState, up_to: Slot) -> BeaconState:
|
||||
while state.slot < up_to:
|
||||
spec.cache_state(state)
|
||||
if (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0:
|
||||
process_epoch_transition(state)
|
||||
spec.advance_slot(state)
|
||||
|
||||
|
||||
def state_transition(state: BeaconState,
|
||||
block: BeaconBlock,
|
||||
verify_state_root: bool=False) -> BeaconState:
|
||||
state_transition_to(state, block.slot)
|
||||
process_block(state, block, verify_state_root)
|
@ -9,4 +9,4 @@ def bls_verify_multiple(pubkeys, message_hashes, signature, domain):
|
||||
|
||||
|
||||
def bls_aggregate_pubkeys(pubkeys):
|
||||
return b'\x42' * 96
|
||||
return b'\x42' * 48
|
||||
|
@ -34,10 +34,13 @@ def get_merkle_proof(tree, item_index):
|
||||
|
||||
def next_power_of_two(v: int) -> int:
|
||||
"""
|
||||
Get the next power of 2. (for 64 bit range ints)
|
||||
Get the next power of 2. (for 64 bit range ints).
|
||||
0 is a special case, to have non-empty defaults.
|
||||
Examples:
|
||||
0 -> 0, 1 -> 1, 2 -> 2, 3 -> 4, 32 -> 32, 33 -> 64
|
||||
0 -> 1, 1 -> 1, 2 -> 2, 3 -> 4, 32 -> 32, 33 -> 64
|
||||
"""
|
||||
if v == 0:
|
||||
return 1
|
||||
# effectively fill the bitstring (1 less, do not want to with ones, then increment for next power of 2.
|
||||
v -= 1
|
||||
v |= v >> (1 << 0)
|
||||
|
@ -8,13 +8,13 @@ BYTES_PER_LENGTH_OFFSET = 4
|
||||
|
||||
|
||||
def is_basic_type(typ):
|
||||
return is_uint(typ) or typ == bool
|
||||
return is_uint_type(typ) or is_bool_type(typ)
|
||||
|
||||
|
||||
def serialize_basic(value, typ):
|
||||
if is_uint(typ):
|
||||
if is_uint_type(typ):
|
||||
return value.to_bytes(uint_byte_size(typ), 'little')
|
||||
if issubclass(typ, bool):
|
||||
if is_bool_type(typ):
|
||||
if value:
|
||||
return b'\x01'
|
||||
else:
|
||||
@ -24,11 +24,11 @@ def serialize_basic(value, typ):
|
||||
def is_fixed_size(typ):
|
||||
if is_basic_type(typ):
|
||||
return True
|
||||
elif is_list_type(typ):
|
||||
elif is_list_kind(typ):
|
||||
return False
|
||||
elif is_vector_type(typ):
|
||||
return is_fixed_size(read_vector_elem_typ(typ))
|
||||
elif is_container_typ(typ):
|
||||
elif is_vector_kind(typ):
|
||||
return is_fixed_size(read_vector_elem_type(typ))
|
||||
elif is_container_type(typ):
|
||||
return all(is_fixed_size(t) for t in typ.get_field_types())
|
||||
else:
|
||||
raise Exception("Type not supported: {}".format(typ))
|
||||
@ -38,9 +38,9 @@ def is_fixed_size(typ):
|
||||
def serialize(obj, typ=None):
|
||||
if is_basic_type(typ):
|
||||
return serialize_basic(obj, typ)
|
||||
elif is_list_type(typ) or is_vector_type(typ):
|
||||
return encode_series(list(obj), [read_elem_typ(typ)]*len(obj))
|
||||
elif is_container_typ(typ):
|
||||
elif is_list_kind(typ) or is_vector_kind(typ):
|
||||
return encode_series(obj, [read_elem_type(typ)]*len(obj))
|
||||
elif is_container_type(typ):
|
||||
return encode_series(obj.get_field_values(), typ.get_field_types())
|
||||
else:
|
||||
raise Exception("Type not supported: {}".format(typ))
|
||||
@ -103,24 +103,25 @@ def mix_in_length(root, length):
|
||||
def hash_tree_root(obj, typ=None):
|
||||
if is_basic_type(typ):
|
||||
return merkleize_chunks(chunkify(serialize_basic(obj, typ)))
|
||||
elif is_list_type(typ) or is_vector_type(typ):
|
||||
subtype = read_elem_typ(typ)
|
||||
elif is_list_kind(typ) or is_vector_kind(typ):
|
||||
subtype = read_elem_type(typ)
|
||||
if is_basic_type(subtype):
|
||||
leaves = chunkify(pack(obj, subtype))
|
||||
else:
|
||||
leaves = [hash_tree_root(elem, subtype) for elem in obj]
|
||||
leaf_root = merkleize_chunks(leaves)
|
||||
return mix_in_length(leaf_root, len(obj)) if is_list_type(typ) else leaf_root
|
||||
elif is_container_typ(typ):
|
||||
leaves = [hash_tree_root(elem, subtyp) for elem, subtyp in obj.get_fields()]
|
||||
return mix_in_length(leaf_root, len(obj)) if is_list_kind(typ) else leaf_root
|
||||
elif is_container_type(typ):
|
||||
leaves = [hash_tree_root(field_value, field_typ) for field_value, field_typ in obj.get_typed_values()]
|
||||
return merkleize_chunks(chunkify(b''.join(leaves)))
|
||||
else:
|
||||
raise Exception("Type not supported: obj {} type {}".format(obj, typ))
|
||||
|
||||
|
||||
@infer_input_type
|
||||
def signing_root(obj, typ=None):
|
||||
assert is_container_typ(typ)
|
||||
leaves = [hash_tree_root(elem, subtyp) for elem, subtyp in obj.get_fields()[:-1]]
|
||||
def signing_root(obj, typ):
|
||||
assert is_container_type(typ)
|
||||
# ignore last field
|
||||
leaves = [hash_tree_root(field_value, field_typ) for field_value, field_typ in obj.get_typed_values()[:-1]]
|
||||
return merkleize_chunks(chunkify(b''.join(leaves)))
|
||||
|
||||
|
@ -53,12 +53,12 @@ def is_bottom_layer_type(typ):
|
||||
)
|
||||
|
||||
@infer_input_type
|
||||
def get_fields(obj, typ=None):
|
||||
def get_typed_values(obj, typ=None):
|
||||
if is_container_typ(typ):
|
||||
return obj.get_fields()
|
||||
return obj.get_typed_values()
|
||||
elif is_list_type(typ) or is_vector_type(typ):
|
||||
subtype = read_elem_typ(typ)
|
||||
return zip([subtype] * len(obj), obj)
|
||||
elem_type = read_elem_typ(typ)
|
||||
return zip(obj, [elem_type] * len(obj))
|
||||
else:
|
||||
raise Exception("Invalid type")
|
||||
|
||||
@ -74,8 +74,8 @@ def ssz_all(obj, typ=None, root=1):
|
||||
data = serialize_basic(obj, typ) if is_basic_type(typ) else pack(obj, read_elem_typ(typ))
|
||||
return {**o, **merkle_tree_of_chunks(chunkify(data), base)}
|
||||
else:
|
||||
fields = get_fields(obj, typ=typ)
|
||||
fields = get_typed_values(obj, typ=typ)
|
||||
sub_base = base * next_power_of_two(len(fields))
|
||||
for i, (elem, subtype) in enumerate(fields):
|
||||
o = {**o, **ssz_all(elem, typ=subtype, root=sub_base+i)}
|
||||
for i, (elem, elem_type) in enumerate(fields):
|
||||
o = {**o, **ssz_all(elem, typ=elem_type, root=sub_base+i)}
|
||||
return {**o, **filter(sub_base, len(fields))}
|
||||
|
@ -1,6 +1,7 @@
|
||||
from typing import List, Iterable, Type, NewType, TypeVar
|
||||
from typing import Union
|
||||
from inspect import isclass
|
||||
from typing import List, Iterable, TypeVar, Type, NewType
|
||||
from typing import Union
|
||||
from typing_inspect import get_origin
|
||||
|
||||
T = TypeVar('T')
|
||||
L = TypeVar('L')
|
||||
@ -64,20 +65,27 @@ class uint256(uint):
|
||||
return super().__new__(cls, value)
|
||||
|
||||
|
||||
def is_uint(typ):
|
||||
def is_uint_type(typ):
|
||||
# All integers are uint in the scope of the spec here.
|
||||
# Since we default to uint64. Bounds can be checked elsewhere.
|
||||
return (isinstance(typ, int.__class__) and issubclass(typ, int)) or typ == uint64
|
||||
# However, some are wrapped in a NewType
|
||||
if hasattr(typ, '__supertype__'):
|
||||
# get the type that the NewType is wrapping
|
||||
typ = typ.__supertype__
|
||||
|
||||
return isinstance(typ, type) and issubclass(typ, int) and not issubclass(typ, bool)
|
||||
|
||||
|
||||
def uint_byte_size(typ):
|
||||
if isinstance(typ, int.__class__) and issubclass(typ, uint):
|
||||
return typ.byte_len
|
||||
elif typ in (int, uint64):
|
||||
# Default to uint64
|
||||
return 8
|
||||
else:
|
||||
raise TypeError("Type %s is not an uint (or int-default uint64) type" % typ)
|
||||
if hasattr(typ, '__supertype__'):
|
||||
typ = typ.__supertype__
|
||||
if isinstance(typ, type):
|
||||
if issubclass(typ, uint):
|
||||
return typ.byte_len
|
||||
elif issubclass(typ, int):
|
||||
# Default to uint64
|
||||
return 8
|
||||
raise TypeError("Type %s is not an uint (or int-default uint64) type" % typ)
|
||||
|
||||
|
||||
# SSZ Container base class
|
||||
@ -89,7 +97,7 @@ class Container(object):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
cls = self.__class__
|
||||
for f, t in cls.get_fields().items():
|
||||
for f, t in cls.get_fields():
|
||||
if f not in kwargs:
|
||||
setattr(self, f, get_zero_value(t))
|
||||
else:
|
||||
@ -114,13 +122,28 @@ class Container(object):
|
||||
def __repr__(self):
|
||||
return repr({field: getattr(self, field) for field in self.get_field_names()})
|
||||
|
||||
def __str__(self):
|
||||
output = []
|
||||
for field in self.get_field_names():
|
||||
output.append(f'{field}: {getattr(self, field)}')
|
||||
return "\n".join(output)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.hash_tree_root() == other.hash_tree_root()
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.hash_tree_root())
|
||||
|
||||
@classmethod
|
||||
def get_fields_dict(cls):
|
||||
return dict(cls.__annotations__)
|
||||
|
||||
@classmethod
|
||||
def get_fields(cls):
|
||||
return dict(cls.__annotations__).items()
|
||||
return list(dict(cls.__annotations__).items())
|
||||
|
||||
def get_typed_values(self):
|
||||
return list(zip(self.get_field_values(), self.get_field_types()))
|
||||
|
||||
@classmethod
|
||||
def get_field_names(cls):
|
||||
@ -137,6 +160,9 @@ class Container(object):
|
||||
|
||||
|
||||
def _is_vector_instance_of(a, b):
|
||||
# Other must not be a BytesN
|
||||
if issubclass(b, bytes):
|
||||
return False
|
||||
if not hasattr(b, 'elem_type') or not hasattr(b, 'length'):
|
||||
# Vector (b) is not an instance of Vector[X, Y] (a)
|
||||
return False
|
||||
@ -149,6 +175,9 @@ def _is_vector_instance_of(a, b):
|
||||
|
||||
|
||||
def _is_equal_vector_type(a, b):
|
||||
# Other must not be a BytesN
|
||||
if issubclass(b, bytes):
|
||||
return False
|
||||
if not hasattr(a, 'elem_type') or not hasattr(a, 'length'):
|
||||
if not hasattr(b, 'elem_type') or not hasattr(b, 'length'):
|
||||
# Vector == Vector
|
||||
@ -189,6 +218,9 @@ class VectorMeta(type):
|
||||
def __ne__(self, other):
|
||||
return not _is_equal_vector_type(self, other)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.__class__)
|
||||
|
||||
|
||||
class Vector(metaclass=VectorMeta):
|
||||
|
||||
@ -238,8 +270,14 @@ class Vector(metaclass=VectorMeta):
|
||||
def __len__(self):
|
||||
return len(self.items)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.hash_tree_root() == other.hash_tree_root()
|
||||
|
||||
|
||||
def _is_bytes_n_instance_of(a, b):
|
||||
# Other has to be a Bytes derivative class to be a BytesN
|
||||
if not issubclass(b, bytes):
|
||||
return False
|
||||
if not hasattr(b, 'length'):
|
||||
# BytesN (b) is not an instance of BytesN[X] (a)
|
||||
return False
|
||||
@ -252,6 +290,9 @@ def _is_bytes_n_instance_of(a, b):
|
||||
|
||||
|
||||
def _is_equal_bytes_n_type(a, b):
|
||||
# Other has to be a Bytes derivative class to be a BytesN
|
||||
if not issubclass(b, bytes):
|
||||
return False
|
||||
if not hasattr(a, 'length'):
|
||||
if not hasattr(b, 'length'):
|
||||
# BytesN == BytesN
|
||||
@ -270,7 +311,7 @@ class BytesNMeta(type):
|
||||
out = type.__new__(cls, class_name, parents, attrs)
|
||||
if 'length' in attrs:
|
||||
setattr(out, 'length', attrs['length'])
|
||||
out._name = 'Vector'
|
||||
out._name = 'BytesN'
|
||||
out.elem_type = byte
|
||||
return out
|
||||
|
||||
@ -289,6 +330,9 @@ class BytesNMeta(type):
|
||||
def __ne__(self, other):
|
||||
return not _is_equal_bytes_n_type(self, other)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.__class__)
|
||||
|
||||
|
||||
def parse_bytes(val):
|
||||
if val is None:
|
||||
@ -321,7 +365,7 @@ class BytesN(bytes, metaclass=BytesNMeta):
|
||||
else:
|
||||
bytesval = b'\x00' * cls.length
|
||||
if len(bytesval) != cls.length:
|
||||
raise TypeError("bytesN[%d] cannot be initialized with value of %d bytes" % (cls.length, len(bytesval)))
|
||||
raise TypeError("BytesN[%d] cannot be initialized with value of %d bytes" % (cls.length, len(bytesval)))
|
||||
return super().__new__(cls, bytesval)
|
||||
|
||||
def serialize(self):
|
||||
@ -335,29 +379,31 @@ class BytesN(bytes, metaclass=BytesNMeta):
|
||||
|
||||
# SSZ Defaults
|
||||
# -----------------------------
|
||||
|
||||
def get_zero_value(typ):
|
||||
if is_uint(typ):
|
||||
return 0
|
||||
if issubclass(typ, bool):
|
||||
return False
|
||||
if issubclass(typ, list):
|
||||
return []
|
||||
if issubclass(typ, Vector):
|
||||
return typ()
|
||||
if issubclass(typ, BytesN):
|
||||
return typ()
|
||||
if issubclass(typ, bytes):
|
||||
return b''
|
||||
if issubclass(typ, Container):
|
||||
return typ(**{f: get_zero_value(t) for f, t in typ.get_fields()}),
|
||||
|
||||
result = None
|
||||
if is_uint_type(typ):
|
||||
result = 0
|
||||
elif is_list_type(typ):
|
||||
result = []
|
||||
elif issubclass(typ, bool):
|
||||
result = False
|
||||
elif issubclass(typ, Vector):
|
||||
result = typ()
|
||||
elif issubclass(typ, BytesN):
|
||||
result = typ()
|
||||
elif issubclass(typ, bytes):
|
||||
result = b''
|
||||
elif issubclass(typ, Container):
|
||||
result = typ(**{f: get_zero_value(t) for f, t in typ.get_fields()})
|
||||
else:
|
||||
return Exception("Type not supported: {}".format(typ))
|
||||
return result
|
||||
|
||||
# Type helpers
|
||||
# -----------------------------
|
||||
|
||||
def infer_type(obj):
|
||||
if is_uint(obj.__class__):
|
||||
if is_uint_type(obj.__class__):
|
||||
return obj.__class__
|
||||
elif isinstance(obj, int):
|
||||
return uint64
|
||||
@ -380,29 +426,75 @@ def infer_input_type(fn):
|
||||
return infer_helper
|
||||
|
||||
|
||||
def is_bool_type(typ):
|
||||
if hasattr(typ, '__supertype__'):
|
||||
typ = typ.__supertype__
|
||||
return isinstance(typ, type) and issubclass(typ, bool)
|
||||
|
||||
|
||||
def is_list_type(typ):
|
||||
return (hasattr(typ, '_name') and typ._name == 'List') or typ == bytes
|
||||
"""
|
||||
Checks if the given type is a list.
|
||||
"""
|
||||
return get_origin(typ) is List or get_origin(typ) is list
|
||||
|
||||
|
||||
def is_bytes_type(typ):
|
||||
# Do not accept subclasses of bytes here, to avoid confusion with BytesN
|
||||
return typ == bytes
|
||||
|
||||
|
||||
def is_list_kind(typ):
|
||||
"""
|
||||
Checks if the given type is a kind of list. Can be bytes.
|
||||
"""
|
||||
return is_list_type(typ) or is_bytes_type(typ)
|
||||
|
||||
|
||||
def is_vector_type(typ):
|
||||
return isinstance(typ, int.__class__) and issubclass(typ, Vector)
|
||||
"""
|
||||
Checks if the given type is a vector.
|
||||
"""
|
||||
return isinstance(typ, type) and issubclass(typ, Vector)
|
||||
|
||||
def is_container_typ(typ):
|
||||
return isinstance(typ, int.__class__) and issubclass(typ, Container)
|
||||
|
||||
def read_list_elem_typ(list_typ: Type[List[T]]) -> T:
|
||||
def is_bytesn_type(typ):
|
||||
return isinstance(typ, type) and issubclass(typ, BytesN)
|
||||
|
||||
|
||||
def is_vector_kind(typ):
|
||||
"""
|
||||
Checks if the given type is a kind of vector. Can be BytesN.
|
||||
"""
|
||||
return is_vector_type(typ) or is_bytesn_type(typ)
|
||||
|
||||
|
||||
def is_container_type(typ):
|
||||
return isinstance(typ, type) and issubclass(typ, Container)
|
||||
|
||||
|
||||
T = TypeVar('T')
|
||||
L = TypeVar('L')
|
||||
|
||||
|
||||
def read_list_elem_type(list_typ: Type[List[T]]) -> T:
|
||||
if list_typ.__args__ is None or len(list_typ.__args__) != 1:
|
||||
raise TypeError("Supplied list-type is invalid, no element type found.")
|
||||
return list_typ.__args__[0]
|
||||
|
||||
def read_vector_elem_typ(vector_typ: Type[Vector[T, L]]) -> T:
|
||||
|
||||
def read_vector_elem_type(vector_typ: Type[Vector[T, L]]) -> T:
|
||||
return vector_typ.elem_type
|
||||
|
||||
def read_elem_typ(typ):
|
||||
|
||||
def read_elem_type(typ):
|
||||
if typ == bytes:
|
||||
return byte
|
||||
elif is_list_type(typ):
|
||||
return read_list_elem_typ(typ)
|
||||
return read_list_elem_type(typ)
|
||||
elif is_vector_type(typ):
|
||||
return read_vector_elem_typ(typ)
|
||||
return read_vector_elem_type(typ)
|
||||
elif issubclass(typ, bytes):
|
||||
return byte
|
||||
else:
|
||||
raise TypeError("Unexpected type: {}".format(typ))
|
||||
|
@ -2,3 +2,4 @@ eth-utils>=1.3.0,<2
|
||||
eth-typing>=2.1.0,<3.0.0
|
||||
pycryptodome==3.7.3
|
||||
py_ecc>=1.6.0
|
||||
typing_inspect==0.4.0
|
||||
|
@ -3,13 +3,11 @@ import pytest
|
||||
|
||||
import eth2spec.phase0.spec as spec
|
||||
|
||||
from eth2spec.phase0.state_transition import (
|
||||
state_transition,
|
||||
)
|
||||
from eth2spec.phase0.spec import (
|
||||
get_current_epoch,
|
||||
process_attestation,
|
||||
slot_to_epoch,
|
||||
state_transition,
|
||||
)
|
||||
from tests.helpers import (
|
||||
build_empty_block_for_next_slot,
|
||||
@ -66,6 +64,22 @@ def test_success_prevous_epoch(state):
|
||||
return pre_state, attestation, post_state
|
||||
|
||||
|
||||
def test_success_since_max_epochs_per_crosslink(state):
|
||||
for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2):
|
||||
next_epoch(state)
|
||||
|
||||
attestation = get_valid_attestation(state)
|
||||
data = attestation.data
|
||||
assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK
|
||||
|
||||
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
|
||||
next_slot(state)
|
||||
|
||||
pre_state, post_state = run_attestation_processing(state, attestation)
|
||||
|
||||
return pre_state, attestation, post_state
|
||||
|
||||
|
||||
def test_before_inclusion_delay(state):
|
||||
attestation = get_valid_attestation(state)
|
||||
# do not increment slot to allow for inclusion delay
|
||||
@ -126,7 +140,33 @@ def test_bad_previous_crosslink(state):
|
||||
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
|
||||
next_slot(state)
|
||||
|
||||
state.current_crosslinks[attestation.data.crosslink.shard].epoch += 10
|
||||
attestation.data.crosslink.parent_root = b'\x27' * 32
|
||||
|
||||
pre_state, post_state = run_attestation_processing(state, attestation, False)
|
||||
|
||||
return pre_state, attestation, post_state
|
||||
|
||||
|
||||
def test_bad_crosslink_start_epoch(state):
|
||||
next_epoch(state)
|
||||
attestation = get_valid_attestation(state)
|
||||
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
|
||||
next_slot(state)
|
||||
|
||||
attestation.data.crosslink.start_epoch += 1
|
||||
|
||||
pre_state, post_state = run_attestation_processing(state, attestation, False)
|
||||
|
||||
return pre_state, attestation, post_state
|
||||
|
||||
|
||||
def test_bad_crosslink_end_epoch(state):
|
||||
next_epoch(state)
|
||||
attestation = get_valid_attestation(state)
|
||||
for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
|
||||
next_slot(state)
|
||||
|
||||
attestation.data.crosslink.end_epoch += 1
|
||||
|
||||
pre_state, post_state = run_attestation_processing(state, attestation, False)
|
||||
|
||||
|
@ -4,11 +4,11 @@ import pytest
|
||||
|
||||
from eth2spec.phase0.spec import (
|
||||
get_beacon_proposer_index,
|
||||
cache_state,
|
||||
advance_slot,
|
||||
process_slot,
|
||||
process_block_header,
|
||||
)
|
||||
from tests.helpers import (
|
||||
advance_slot,
|
||||
build_empty_block_for_next_slot,
|
||||
next_slot,
|
||||
)
|
||||
@ -18,7 +18,7 @@ pytestmark = pytest.mark.header
|
||||
|
||||
|
||||
def prepare_state_for_header_processing(state):
|
||||
cache_state(state)
|
||||
process_slot(state)
|
||||
advance_slot(state)
|
||||
|
||||
|
||||
|
@ -3,13 +3,11 @@ import pytest
|
||||
|
||||
import eth2spec.phase0.spec as spec
|
||||
|
||||
from eth2spec.phase0.state_transition import (
|
||||
state_transition,
|
||||
)
|
||||
from eth2spec.phase0.spec import (
|
||||
cache_state,
|
||||
process_slot,
|
||||
get_crosslink_deltas,
|
||||
process_crosslinks,
|
||||
state_transition,
|
||||
)
|
||||
from tests.helpers import (
|
||||
add_attestation_to_state,
|
||||
@ -35,7 +33,7 @@ def run_process_crosslinks(state, valid=True):
|
||||
state_transition(state, block)
|
||||
|
||||
# cache state before epoch transition
|
||||
cache_state(state)
|
||||
process_slot(state)
|
||||
|
||||
post_state = deepcopy(state)
|
||||
process_crosslinks(post_state)
|
||||
|
@ -2,11 +2,8 @@ from copy import deepcopy
|
||||
|
||||
from py_ecc import bls
|
||||
|
||||
from eth2spec.phase0.state_transition import (
|
||||
state_transition,
|
||||
)
|
||||
import eth2spec.phase0.spec as spec
|
||||
from eth2spec.utils.minimal_ssz import signing_root
|
||||
from eth2spec.utils.ssz.ssz_impl import signing_root
|
||||
from eth2spec.phase0.spec import (
|
||||
# constants
|
||||
ZERO_HASH,
|
||||
@ -27,6 +24,7 @@ from eth2spec.phase0.spec import (
|
||||
VoluntaryExit,
|
||||
# functions
|
||||
convert_to_indexed,
|
||||
bls_domain,
|
||||
get_active_validator_indices,
|
||||
get_attesting_indices,
|
||||
get_block_root,
|
||||
@ -40,6 +38,7 @@ from eth2spec.phase0.spec import (
|
||||
get_shard_delta,
|
||||
hash_tree_root,
|
||||
slot_to_epoch,
|
||||
state_transition,
|
||||
verify_merkle_branch,
|
||||
hash,
|
||||
)
|
||||
@ -55,6 +54,10 @@ pubkeys = [bls.privtopub(privkey) for privkey in privkeys]
|
||||
pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)}
|
||||
|
||||
|
||||
def advance_slot(state) -> None:
|
||||
state.slot += 1
|
||||
|
||||
|
||||
def get_balance(state, index):
|
||||
return state.balances[index]
|
||||
|
||||
@ -142,10 +145,7 @@ def build_deposit_data(state, pubkey, privkey, amount):
|
||||
signature = bls.sign(
|
||||
message_hash=signing_root(deposit_data),
|
||||
privkey=privkey,
|
||||
domain=get_domain(
|
||||
state,
|
||||
spec.DOMAIN_DEPOSIT,
|
||||
)
|
||||
domain=bls_domain(spec.DOMAIN_DEPOSIT),
|
||||
)
|
||||
deposit_data.signature = signature
|
||||
return deposit_data
|
||||
@ -175,6 +175,7 @@ def build_attestation_data(state, slot, shard):
|
||||
justified_block_root = state.current_justified_root
|
||||
|
||||
crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks
|
||||
parent_crosslink = crosslinks[shard]
|
||||
return AttestationData(
|
||||
beacon_block_root=block_root,
|
||||
source_epoch=justified_epoch,
|
||||
@ -183,9 +184,10 @@ def build_attestation_data(state, slot, shard):
|
||||
target_root=epoch_boundary_root,
|
||||
crosslink=Crosslink(
|
||||
shard=shard,
|
||||
epoch=min(slot_to_epoch(slot), crosslinks[shard].epoch + MAX_EPOCHS_PER_CROSSLINK),
|
||||
start_epoch=parent_crosslink.end_epoch,
|
||||
end_epoch=min(slot_to_epoch(slot), parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK),
|
||||
data_root=spec.ZERO_HASH,
|
||||
parent_root=hash_tree_root(crosslinks[shard]),
|
||||
parent_root=hash_tree_root(parent_crosslink),
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -4,9 +4,6 @@ import pytest
|
||||
|
||||
import eth2spec.phase0.spec as spec
|
||||
|
||||
from eth2spec.phase0.state_transition import (
|
||||
state_transition,
|
||||
)
|
||||
from .helpers import (
|
||||
build_empty_block_for_next_slot,
|
||||
fill_aggregate_attestation,
|
||||
@ -67,7 +64,7 @@ def next_epoch_with_attestations(state,
|
||||
fill_aggregate_attestation(post_state, prev_attestation)
|
||||
block.body.attestations.append(prev_attestation)
|
||||
|
||||
state_transition(post_state, block)
|
||||
spec.state_transition(post_state, block)
|
||||
blocks.append(block)
|
||||
|
||||
return state, blocks, post_state
|
||||
|
@ -5,7 +5,7 @@ import pytest
|
||||
from py_ecc import bls
|
||||
import eth2spec.phase0.spec as spec
|
||||
|
||||
from eth2spec.utils.minimal_ssz import signing_root
|
||||
from eth2spec.utils.ssz.ssz_impl import signing_root
|
||||
from eth2spec.phase0.spec import (
|
||||
# constants
|
||||
ZERO_HASH,
|
||||
@ -20,13 +20,10 @@ from eth2spec.phase0.spec import (
|
||||
get_block_root_at_slot,
|
||||
get_current_epoch,
|
||||
get_domain,
|
||||
advance_slot,
|
||||
cache_state,
|
||||
process_slot,
|
||||
verify_merkle_branch,
|
||||
hash,
|
||||
)
|
||||
from eth2spec.phase0.state_transition import (
|
||||
state_transition,
|
||||
hash,
|
||||
)
|
||||
from eth2spec.utils.merkle_minimal import (
|
||||
calc_merkle_tree_from_leaves,
|
||||
@ -34,6 +31,7 @@ from eth2spec.utils.merkle_minimal import (
|
||||
get_merkle_root,
|
||||
)
|
||||
from .helpers import (
|
||||
advance_slot,
|
||||
get_balance,
|
||||
build_deposit_data,
|
||||
build_empty_block_for_next_slot,
|
||||
@ -54,7 +52,7 @@ pytestmark = pytest.mark.sanity
|
||||
|
||||
def test_slot_transition(state):
|
||||
test_state = deepcopy(state)
|
||||
cache_state(test_state)
|
||||
process_slot(test_state)
|
||||
advance_slot(test_state)
|
||||
assert test_state.slot == state.slot + 1
|
||||
assert get_state_root(test_state, state.slot) == state.hash_tree_root()
|
||||
|
Loading…
x
Reference in New Issue
Block a user