Merge branch 'dev' into fix-store-justified-checkpoint
This commit is contained in:
commit
6ffc735642
|
@ -11,7 +11,7 @@ This repository hosts the current Eth2 specifications. Discussions about design
|
||||||
|
|
||||||
[![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec)
|
[![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec)
|
||||||
|
|
||||||
Core specifications for Eth2 clients be found in [specs](specs/). These are divided into features.
|
Core specifications for Eth2 clients can be found in [specs](specs/). These are divided into features.
|
||||||
Features are researched and developed in parallel, and then consolidated into sequential upgrades when ready.
|
Features are researched and developed in parallel, and then consolidated into sequential upgrades when ready.
|
||||||
|
|
||||||
The current features are:
|
The current features are:
|
||||||
|
|
|
@ -1,22 +1,24 @@
|
||||||
# Mainnet preset - Sharding
|
# Mainnet preset - Sharding
|
||||||
|
|
||||||
# Beacon-chain
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Misc
|
# Misc
|
||||||
|
# ---------------------------------------------------------------
|
||||||
# 2**10 (= 1,024)
|
# 2**10 (= 1,024)
|
||||||
MAX_SHARDS: 1024
|
MAX_SHARDS: 1024
|
||||||
# 2**6 = 64
|
# 2**6 (= 64)
|
||||||
INITIAL_ACTIVE_SHARDS: 64
|
INITIAL_ACTIVE_SHARDS: 64
|
||||||
# 2**3 (= 8)
|
# 2**3 (= 8)
|
||||||
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
|
SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT: 8
|
||||||
# 2**4 (= 16)
|
# 2**4 (= 16)
|
||||||
MAX_SHARD_PROPOSER_SLASHINGS: 16
|
MAX_SHARD_PROPOSER_SLASHINGS: 16
|
||||||
|
#
|
||||||
# Shard block configs
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
MAX_SHARD_HEADERS_PER_SHARD: 4
|
MAX_SHARD_HEADERS_PER_SHARD: 4
|
||||||
# 2**8 (= 256)
|
# 2**8 (= 256)
|
||||||
SHARD_STATE_MEMORY_SLOTS: 256
|
SHARD_STATE_MEMORY_SLOTS: 256
|
||||||
|
# 2**40 (= 1,099,511,627,776)
|
||||||
|
BLOB_BUILDER_REGISTRY_LIMIT: 1099511627776
|
||||||
|
|
||||||
|
# Shard blob samples
|
||||||
|
# ---------------------------------------------------------------
|
||||||
# 2**11 (= 2,048)
|
# 2**11 (= 2,048)
|
||||||
MAX_SAMPLES_PER_BLOCK: 2048
|
MAX_SAMPLES_PER_BLOCK: 2048
|
||||||
# 2**10 (= 1,1024)
|
# 2**10 (= 1,1024)
|
||||||
|
@ -25,6 +27,6 @@ TARGET_SAMPLES_PER_BLOCK: 1024
|
||||||
# Gwei values
|
# Gwei values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# 2**33 (= 8,589,934,592) Gwei
|
# 2**33 (= 8,589,934,592) Gwei
|
||||||
MAX_GASPRICE: 8589934592
|
MAX_SAMPLE_PRICE: 8589934592
|
||||||
# 2**3 (= 8) Gwei
|
# 2**3 (= 8) Gwei
|
||||||
MIN_GASPRICE: 8
|
MIN_SAMPLE_PRICE: 8
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
# Minimal preset - Sharding
|
# Minimal preset - Sharding
|
||||||
|
|
||||||
# Beacon-chain
|
# Misc
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# Misc
|
# Misc
|
||||||
# [customized] reduced for testing
|
# [customized] reduced for testing
|
||||||
|
@ -8,15 +8,18 @@ MAX_SHARDS: 8
|
||||||
# [customized] reduced for testing
|
# [customized] reduced for testing
|
||||||
INITIAL_ACTIVE_SHARDS: 2
|
INITIAL_ACTIVE_SHARDS: 2
|
||||||
# 2**3 (= 8)
|
# 2**3 (= 8)
|
||||||
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
|
SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT: 8
|
||||||
# [customized] reduced for testing
|
# [customized] reduced for testing
|
||||||
MAX_SHARD_PROPOSER_SLASHINGS: 4
|
MAX_SHARD_PROPOSER_SLASHINGS: 4
|
||||||
|
#
|
||||||
# Shard block configs
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
MAX_SHARD_HEADERS_PER_SHARD: 4
|
MAX_SHARD_HEADERS_PER_SHARD: 4
|
||||||
# 2**8 (= 256)
|
# 2**8 (= 256)
|
||||||
SHARD_STATE_MEMORY_SLOTS: 256
|
SHARD_STATE_MEMORY_SLOTS: 256
|
||||||
|
# 2**40 (= 1,099,511,627,776)
|
||||||
|
BLOB_BUILDER_REGISTRY_LIMIT: 1099511627776
|
||||||
|
|
||||||
|
# Shard blob samples
|
||||||
|
# ---------------------------------------------------------------
|
||||||
# 2**11 (= 2,048)
|
# 2**11 (= 2,048)
|
||||||
MAX_SAMPLES_PER_BLOCK: 2048
|
MAX_SAMPLES_PER_BLOCK: 2048
|
||||||
# 2**10 (= 1,1024)
|
# 2**10 (= 1,1024)
|
||||||
|
@ -25,6 +28,6 @@ TARGET_SAMPLES_PER_BLOCK: 1024
|
||||||
# Gwei values
|
# Gwei values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# 2**33 (= 8,589,934,592) Gwei
|
# 2**33 (= 8,589,934,592) Gwei
|
||||||
MAX_GASPRICE: 8589934592
|
MAX_SAMPLE_PRICE: 8589934592
|
||||||
# 2**3 (= 8) Gwei
|
# 2**3 (= 8) Gwei
|
||||||
MIN_GASPRICE: 8
|
MIN_SAMPLE_PRICE: 8
|
||||||
|
|
38
setup.py
38
setup.py
|
@ -56,7 +56,7 @@ def floorlog2(x: int) -> uint64:
|
||||||
|
|
||||||
|
|
||||||
OPTIMIZED_BLS_AGGREGATE_PUBKEYS = '''
|
OPTIMIZED_BLS_AGGREGATE_PUBKEYS = '''
|
||||||
def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:
|
def eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:
|
||||||
return bls.AggregatePKs(pubkeys)
|
return bls.AggregatePKs(pubkeys)
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
@ -447,7 +447,7 @@ class AltairSpecBuilder(Phase0SpecBuilder):
|
||||||
@classmethod
|
@classmethod
|
||||||
def imports(cls, preset_name: str) -> str:
|
def imports(cls, preset_name: str) -> str:
|
||||||
return super().imports(preset_name) + '\n' + f'''
|
return super().imports(preset_name) + '\n' + f'''
|
||||||
from typing import NewType, Union
|
from typing import NewType, Union as PyUnion
|
||||||
|
|
||||||
from eth2spec.phase0 import {preset_name} as phase0
|
from eth2spec.phase0 import {preset_name} as phase0
|
||||||
from eth2spec.utils.ssz.ssz_typing import Path
|
from eth2spec.utils.ssz.ssz_typing import Path
|
||||||
|
@ -463,7 +463,7 @@ GeneralizedIndex = NewType('GeneralizedIndex', int)
|
||||||
@classmethod
|
@classmethod
|
||||||
def sundry_functions(cls) -> str:
|
def sundry_functions(cls) -> str:
|
||||||
return super().sundry_functions() + '\n\n' + '''
|
return super().sundry_functions() + '\n\n' + '''
|
||||||
def get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex:
|
def get_generalized_index(ssz_class: Any, *path: Sequence[PyUnion[int, SSZVariableName]]) -> GeneralizedIndex:
|
||||||
ssz_path = Path(ssz_class)
|
ssz_path = Path(ssz_class)
|
||||||
for item in path:
|
for item in path:
|
||||||
ssz_path = ssz_path / item
|
ssz_path = ssz_path / item
|
||||||
|
@ -480,21 +480,21 @@ def get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariable
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:
|
def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:
|
||||||
if "eth2_aggregate_pubkeys" in functions:
|
if "eth_aggregate_pubkeys" in functions:
|
||||||
functions["eth2_aggregate_pubkeys"] = OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip()
|
functions["eth_aggregate_pubkeys"] = OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip()
|
||||||
return super().implement_optimizations(functions)
|
return super().implement_optimizations(functions)
|
||||||
|
|
||||||
#
|
#
|
||||||
# MergeSpecBuilder
|
# MergeSpecBuilder
|
||||||
#
|
#
|
||||||
class MergeSpecBuilder(Phase0SpecBuilder):
|
class MergeSpecBuilder(AltairSpecBuilder):
|
||||||
fork: str = MERGE
|
fork: str = MERGE
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def imports(cls, preset_name: str):
|
def imports(cls, preset_name: str):
|
||||||
return super().imports(preset_name) + f'''
|
return super().imports(preset_name) + f'''
|
||||||
from typing import Protocol
|
from typing import Protocol
|
||||||
from eth2spec.phase0 import {preset_name} as phase0
|
from eth2spec.altair import {preset_name} as altair
|
||||||
from eth2spec.utils.ssz.ssz_typing import Bytes20, ByteList, ByteVector, uint256, Union
|
from eth2spec.utils.ssz.ssz_typing import Bytes20, ByteList, ByteVector, uint256, Union
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
@ -509,7 +509,7 @@ ExecutionState = Any
|
||||||
|
|
||||||
|
|
||||||
def get_pow_block(hash: Bytes32) -> PowBlock:
|
def get_pow_block(hash: Bytes32) -> PowBlock:
|
||||||
return PowBlock(block_hash=hash, is_valid=True, is_processed=True,
|
return PowBlock(block_hash=hash, parent_hash=Bytes32(), is_valid=True, is_processed=True,
|
||||||
total_difficulty=uint256(0), difficulty=uint256(0))
|
total_difficulty=uint256(0), difficulty=uint256(0))
|
||||||
|
|
||||||
|
|
||||||
|
@ -844,19 +844,15 @@ class PySpecCommand(Command):
|
||||||
if len(self.md_doc_paths) == 0:
|
if len(self.md_doc_paths) == 0:
|
||||||
print("no paths were specified, using default markdown file paths for pyspec"
|
print("no paths were specified, using default markdown file paths for pyspec"
|
||||||
" build (spec fork: %s)" % self.spec_fork)
|
" build (spec fork: %s)" % self.spec_fork)
|
||||||
if self.spec_fork == PHASE0:
|
if self.spec_fork in (PHASE0, ALTAIR, MERGE):
|
||||||
self.md_doc_paths = """
|
self.md_doc_paths = """
|
||||||
specs/phase0/beacon-chain.md
|
specs/phase0/beacon-chain.md
|
||||||
specs/phase0/fork-choice.md
|
specs/phase0/fork-choice.md
|
||||||
specs/phase0/validator.md
|
specs/phase0/validator.md
|
||||||
specs/phase0/weak-subjectivity.md
|
specs/phase0/weak-subjectivity.md
|
||||||
"""
|
"""
|
||||||
elif self.spec_fork == ALTAIR:
|
if self.spec_fork in (ALTAIR, MERGE):
|
||||||
self.md_doc_paths = """
|
self.md_doc_paths += """
|
||||||
specs/phase0/beacon-chain.md
|
|
||||||
specs/phase0/fork-choice.md
|
|
||||||
specs/phase0/validator.md
|
|
||||||
specs/phase0/weak-subjectivity.md
|
|
||||||
specs/altair/beacon-chain.md
|
specs/altair/beacon-chain.md
|
||||||
specs/altair/bls.md
|
specs/altair/bls.md
|
||||||
specs/altair/fork.md
|
specs/altair/fork.md
|
||||||
|
@ -864,18 +860,14 @@ class PySpecCommand(Command):
|
||||||
specs/altair/p2p-interface.md
|
specs/altair/p2p-interface.md
|
||||||
specs/altair/sync-protocol.md
|
specs/altair/sync-protocol.md
|
||||||
"""
|
"""
|
||||||
elif self.spec_fork == MERGE:
|
if self.spec_fork == MERGE:
|
||||||
self.md_doc_paths = """
|
self.md_doc_paths += """
|
||||||
specs/phase0/beacon-chain.md
|
|
||||||
specs/phase0/fork-choice.md
|
|
||||||
specs/phase0/validator.md
|
|
||||||
specs/phase0/weak-subjectivity.md
|
|
||||||
specs/merge/beacon-chain.md
|
specs/merge/beacon-chain.md
|
||||||
specs/merge/fork.md
|
specs/merge/fork.md
|
||||||
specs/merge/fork-choice.md
|
specs/merge/fork-choice.md
|
||||||
specs/merge/validator.md
|
specs/merge/validator.md
|
||||||
"""
|
"""
|
||||||
else:
|
if len(self.md_doc_paths) == 0:
|
||||||
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
|
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
|
||||||
|
|
||||||
self.parsed_md_doc_paths = self.md_doc_paths.split()
|
self.parsed_md_doc_paths = self.md_doc_paths.split()
|
||||||
|
@ -1024,7 +1016,7 @@ setup(
|
||||||
"py_ecc==5.2.0",
|
"py_ecc==5.2.0",
|
||||||
"milagro_bls_binding==1.6.3",
|
"milagro_bls_binding==1.6.3",
|
||||||
"dataclasses==0.6",
|
"dataclasses==0.6",
|
||||||
"remerkleable==0.1.21",
|
"remerkleable==0.1.22",
|
||||||
RUAMEL_YAML_VERSION,
|
RUAMEL_YAML_VERSION,
|
||||||
"lru-dict==1.1.6",
|
"lru-dict==1.1.6",
|
||||||
MARKO_VERSION,
|
MARKO_VERSION,
|
||||||
|
|
|
@ -287,7 +287,7 @@ def get_next_sync_committee(state: BeaconState) -> SyncCommittee:
|
||||||
"""
|
"""
|
||||||
indices = get_next_sync_committee_indices(state)
|
indices = get_next_sync_committee_indices(state)
|
||||||
pubkeys = [state.validators[index].pubkey for index in indices]
|
pubkeys = [state.validators[index].pubkey for index in indices]
|
||||||
aggregate_pubkey = eth2_aggregate_pubkeys(pubkeys)
|
aggregate_pubkey = eth_aggregate_pubkeys(pubkeys)
|
||||||
return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
|
return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -544,7 +544,7 @@ def process_sync_aggregate(state: BeaconState, sync_aggregate: SyncAggregate) ->
|
||||||
previous_slot = max(state.slot, Slot(1)) - Slot(1)
|
previous_slot = max(state.slot, Slot(1)) - Slot(1)
|
||||||
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
|
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
|
||||||
signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
|
signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
|
||||||
assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
|
assert eth_fast_aggregate_verify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
|
||||||
|
|
||||||
# Compute participant and proposer rewards
|
# Compute participant and proposer rewards
|
||||||
total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
|
total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
|
|
@ -9,8 +9,8 @@
|
||||||
- [Introduction](#introduction)
|
- [Introduction](#introduction)
|
||||||
- [Constants](#constants)
|
- [Constants](#constants)
|
||||||
- [Extensions](#extensions)
|
- [Extensions](#extensions)
|
||||||
- [`eth2_aggregate_pubkeys`](#eth2_aggregate_pubkeys)
|
- [`eth_aggregate_pubkeys`](#eth_aggregate_pubkeys)
|
||||||
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
|
- [`eth_fast_aggregate_verify`](#eth_fast_aggregate_verify)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
@ -29,14 +29,14 @@ Knowledge of the [phase 0 specification](../phase0/beacon-chain.md) is assumed,
|
||||||
|
|
||||||
## Extensions
|
## Extensions
|
||||||
|
|
||||||
### `eth2_aggregate_pubkeys`
|
### `eth_aggregate_pubkeys`
|
||||||
|
|
||||||
An additional function `AggregatePKs` is defined to extend the
|
An additional function `AggregatePKs` is defined to extend the
|
||||||
[IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04)
|
[IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04)
|
||||||
spec referenced in the phase 0 document.
|
spec referenced in the phase 0 document.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:
|
def eth_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:
|
||||||
"""
|
"""
|
||||||
Return the aggregate public key for the public keys in ``pubkeys``.
|
Return the aggregate public key for the public keys in ``pubkeys``.
|
||||||
|
|
||||||
|
@ -46,16 +46,19 @@ def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:
|
||||||
Refer to the BLS signature draft standard for more information.
|
Refer to the BLS signature draft standard for more information.
|
||||||
"""
|
"""
|
||||||
assert len(pubkeys) > 0
|
assert len(pubkeys) > 0
|
||||||
|
# Ensure that the given inputs are valid pubkeys
|
||||||
|
assert all(bls.KeyValidate(pubkey) for pubkey in pubkeys)
|
||||||
|
|
||||||
result = copy(pubkeys[0])
|
result = copy(pubkeys[0])
|
||||||
for pubkey in pubkeys[1:]:
|
for pubkey in pubkeys[1:]:
|
||||||
result += pubkey
|
result += pubkey
|
||||||
return result
|
return result
|
||||||
```
|
```
|
||||||
|
|
||||||
### `eth2_fast_aggregate_verify`
|
### `eth_fast_aggregate_verify`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool:
|
def eth_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool:
|
||||||
"""
|
"""
|
||||||
Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty.
|
Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty.
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -139,6 +139,8 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64
|
||||||
|
|
||||||
- _[IGNORE]_ The contribution's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `contribution.slot == current_slot`.
|
- _[IGNORE]_ The contribution's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `contribution.slot == current_slot`.
|
||||||
- _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`.
|
- _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`.
|
||||||
|
- _[REJECT]_ The contribution has participants --
|
||||||
|
that is, `any(contribution.aggregation_bits)`.
|
||||||
- _[REJECT]_ `contribution_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_sync_committee_aggregator(contribution_and_proof.selection_proof)` returns `True`.
|
- _[REJECT]_ `contribution_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_sync_committee_aggregator(contribution_and_proof.selection_proof)` returns `True`.
|
||||||
- _[REJECT]_ The aggregator's validator index is in the declared subcommittee of the current sync committee --
|
- _[REJECT]_ The aggregator's validator index is in the declared subcommittee of the current sync committee --
|
||||||
i.e. `state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index)`.
|
i.e. `state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index)`.
|
||||||
|
|
|
@ -354,7 +354,7 @@ def is_sync_committee_aggregator(signature: BLSSignature) -> bool:
|
||||||
|
|
||||||
If a validator is selected to aggregate the `SyncCommitteeMessage`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`.
|
If a validator is selected to aggregate the `SyncCommitteeMessage`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`.
|
||||||
|
|
||||||
Given all of the (valid) collected `sync_committee_messages: Set[SyncCommitteeMessage]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields:
|
Collect all of the (valid) `sync_committee_messages: Set[SyncCommitteeMessage]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator. If `len(sync_committee_messages) > 0`, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields:
|
||||||
|
|
||||||
###### Slot
|
###### Slot
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,5 @@
|
||||||
# Ethereum 2.0 The Merge
|
# Ethereum 2.0 The Merge
|
||||||
|
|
||||||
**Warning**: This document is currently based on [Phase 0](../phase0/beacon-chain.md) and will be rebased on [Altair](../altair/beacon-chain.md).
|
|
||||||
|
|
||||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
## Table of contents
|
## Table of contents
|
||||||
|
@ -14,6 +12,8 @@
|
||||||
- [Custom types](#custom-types)
|
- [Custom types](#custom-types)
|
||||||
- [Constants](#constants)
|
- [Constants](#constants)
|
||||||
- [Execution](#execution)
|
- [Execution](#execution)
|
||||||
|
- [Configuration](#configuration)
|
||||||
|
- [Genesis testing settings](#genesis-testing-settings)
|
||||||
- [Containers](#containers)
|
- [Containers](#containers)
|
||||||
- [Extended containers](#extended-containers)
|
- [Extended containers](#extended-containers)
|
||||||
- [`BeaconBlockBody`](#beaconblockbody)
|
- [`BeaconBlockBody`](#beaconblockbody)
|
||||||
|
@ -33,6 +33,7 @@
|
||||||
- [`on_payload`](#on_payload)
|
- [`on_payload`](#on_payload)
|
||||||
- [Block processing](#block-processing)
|
- [Block processing](#block-processing)
|
||||||
- [Execution payload processing](#execution-payload-processing)
|
- [Execution payload processing](#execution-payload-processing)
|
||||||
|
- [`is_valid_gas_limit`](#is_valid_gas_limit)
|
||||||
- [`process_execution_payload`](#process_execution_payload)
|
- [`process_execution_payload`](#process_execution_payload)
|
||||||
- [Testing](#testing)
|
- [Testing](#testing)
|
||||||
|
|
||||||
|
@ -61,6 +62,19 @@ This patch adds transaction execution to the beacon chain as part of the Merge f
|
||||||
| `MAX_BYTES_PER_OPAQUE_TRANSACTION` | `uint64(2**20)` (= 1,048,576) |
|
| `MAX_BYTES_PER_OPAQUE_TRANSACTION` | `uint64(2**20)` (= 1,048,576) |
|
||||||
| `MAX_TRANSACTIONS_PER_PAYLOAD` | `uint64(2**14)` (= 16,384) |
|
| `MAX_TRANSACTIONS_PER_PAYLOAD` | `uint64(2**14)` (= 16,384) |
|
||||||
| `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) |
|
| `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) |
|
||||||
|
| `GAS_LIMIT_DENOMINATOR` | `uint64(2**10)` (= 1,024) |
|
||||||
|
| `MIN_GAS_LIMIT` | `uint64(5000)` (= 5,000) |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Genesis testing settings
|
||||||
|
|
||||||
|
*Note*: These configuration settings do not apply to the mainnet and are utilized only by pure Merge testing.
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `GENESIS_GAS_LIMIT` | `uint64(30000000)` (= 30,000,000) |
|
||||||
|
| `GENESIS_BASE_FEE_PER_GAS` | `Bytes32('0x00ca9a3b00000000000000000000000000000000000000000000000000000000')` (= 1,000,000,000) |
|
||||||
|
|
||||||
## Containers
|
## Containers
|
||||||
|
|
||||||
|
@ -69,7 +83,17 @@ This patch adds transaction execution to the beacon chain as part of the Merge f
|
||||||
#### `BeaconBlockBody`
|
#### `BeaconBlockBody`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class BeaconBlockBody(phase0.BeaconBlockBody):
|
class BeaconBlockBody(Container):
|
||||||
|
randao_reveal: BLSSignature
|
||||||
|
eth1_data: Eth1Data # Eth1 data vote
|
||||||
|
graffiti: Bytes32 # Arbitrary data
|
||||||
|
# Operations
|
||||||
|
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
|
||||||
|
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
|
||||||
|
attestations: List[Attestation, MAX_ATTESTATIONS]
|
||||||
|
deposits: List[Deposit, MAX_DEPOSITS]
|
||||||
|
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||||
|
sync_aggregate: SyncAggregate
|
||||||
# Execution
|
# Execution
|
||||||
execution_payload: ExecutionPayload # [New in Merge]
|
execution_payload: ExecutionPayload # [New in Merge]
|
||||||
```
|
```
|
||||||
|
@ -77,7 +101,41 @@ class BeaconBlockBody(phase0.BeaconBlockBody):
|
||||||
#### `BeaconState`
|
#### `BeaconState`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class BeaconState(phase0.BeaconState):
|
class BeaconState(Container):
|
||||||
|
# Versioning
|
||||||
|
genesis_time: uint64
|
||||||
|
genesis_validators_root: Root
|
||||||
|
slot: Slot
|
||||||
|
fork: Fork
|
||||||
|
# History
|
||||||
|
latest_block_header: BeaconBlockHeader
|
||||||
|
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||||
|
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||||
|
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
|
||||||
|
# Eth1
|
||||||
|
eth1_data: Eth1Data
|
||||||
|
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||||
|
eth1_deposit_index: uint64
|
||||||
|
# Registry
|
||||||
|
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
# Randomness
|
||||||
|
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
|
||||||
|
# Slashings
|
||||||
|
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
||||||
|
# Participation
|
||||||
|
previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
# Finality
|
||||||
|
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
|
||||||
|
previous_justified_checkpoint: Checkpoint
|
||||||
|
current_justified_checkpoint: Checkpoint
|
||||||
|
finalized_checkpoint: Checkpoint
|
||||||
|
# Inactivity
|
||||||
|
inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
# Sync
|
||||||
|
current_sync_committee: SyncCommittee
|
||||||
|
next_sync_committee: SyncCommittee
|
||||||
# Execution
|
# Execution
|
||||||
latest_execution_payload_header: ExecutionPayloadHeader # [New in Merge]
|
latest_execution_payload_header: ExecutionPayloadHeader # [New in Merge]
|
||||||
```
|
```
|
||||||
|
@ -86,6 +144,8 @@ class BeaconState(phase0.BeaconState):
|
||||||
|
|
||||||
#### `ExecutionPayload`
|
#### `ExecutionPayload`
|
||||||
|
|
||||||
|
*Note*: The `base_fee_per_gas` field is serialized in little-endian.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class ExecutionPayload(Container):
|
class ExecutionPayload(Container):
|
||||||
# Execution block header fields
|
# Execution block header fields
|
||||||
|
@ -99,6 +159,7 @@ class ExecutionPayload(Container):
|
||||||
gas_limit: uint64
|
gas_limit: uint64
|
||||||
gas_used: uint64
|
gas_used: uint64
|
||||||
timestamp: uint64
|
timestamp: uint64
|
||||||
|
base_fee_per_gas: Bytes32 # base fee introduced in EIP-1559, little-endian serialized
|
||||||
# Extra payload fields
|
# Extra payload fields
|
||||||
block_hash: Hash32 # Hash of execution block
|
block_hash: Hash32 # Hash of execution block
|
||||||
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||||
|
@ -119,6 +180,7 @@ class ExecutionPayloadHeader(Container):
|
||||||
gas_limit: uint64
|
gas_limit: uint64
|
||||||
gas_used: uint64
|
gas_used: uint64
|
||||||
timestamp: uint64
|
timestamp: uint64
|
||||||
|
base_fee_per_gas: Bytes32
|
||||||
# Extra payload fields
|
# Extra payload fields
|
||||||
block_hash: Hash32 # Hash of execution block
|
block_hash: Hash32 # Hash of execution block
|
||||||
transactions_root: Root
|
transactions_root: Root
|
||||||
|
@ -190,23 +252,48 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||||
process_randao(state, block.body)
|
process_randao(state, block.body)
|
||||||
process_eth1_data(state, block.body)
|
process_eth1_data(state, block.body)
|
||||||
process_operations(state, block.body)
|
process_operations(state, block.body)
|
||||||
|
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||||
if is_execution_enabled(state, block.body):
|
if is_execution_enabled(state, block.body):
|
||||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [New in Merge]
|
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [New in Merge]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Execution payload processing
|
### Execution payload processing
|
||||||
|
|
||||||
|
#### `is_valid_gas_limit`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_valid_gas_limit(payload: ExecutionPayload, parent: ExecutionPayloadHeader) -> bool:
|
||||||
|
parent_gas_limit = parent.gas_limit
|
||||||
|
|
||||||
|
# Check if the payload used too much gas
|
||||||
|
if payload.gas_used > payload.gas_limit:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if the payload changed the gas limit too much
|
||||||
|
if payload.gas_limit >= parent_gas_limit + parent_gas_limit // GAS_LIMIT_DENOMINATOR:
|
||||||
|
return False
|
||||||
|
if payload.gas_limit <= parent_gas_limit - parent_gas_limit // GAS_LIMIT_DENOMINATOR:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Check if the gas limit is at least the minimum gas limit
|
||||||
|
if payload.gas_limit < MIN_GAS_LIMIT:
|
||||||
|
return False
|
||||||
|
|
||||||
|
return True
|
||||||
|
```
|
||||||
|
|
||||||
#### `process_execution_payload`
|
#### `process_execution_payload`
|
||||||
|
|
||||||
*Note:* This function depends on `process_randao` function call as it retrieves the most recent randao mix from the `state`. Implementations that are considering parallel processing of execution payload with respect to beacon chain state transition function should work around this dependency.
|
*Note:* This function depends on `process_randao` function call as it retrieves the most recent randao mix from the `state`. Implementations that are considering parallel processing of execution payload with respect to beacon chain state transition function should work around this dependency.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
|
def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
|
||||||
# Verify consistency of the parent hash, block number and random
|
# Verify consistency of the parent hash, block number, random, base fee per gas and gas limit
|
||||||
if is_merge_complete(state):
|
if is_merge_complete(state):
|
||||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||||
assert payload.block_number == state.latest_execution_payload_header.block_number + uint64(1)
|
assert payload.block_number == state.latest_execution_payload_header.block_number + uint64(1)
|
||||||
assert payload.random == get_randao_mix(state, get_current_epoch(state))
|
assert payload.random == get_randao_mix(state, get_current_epoch(state))
|
||||||
|
assert is_valid_gas_limit(payload, state.latest_execution_payload_header)
|
||||||
# Verify timestamp
|
# Verify timestamp
|
||||||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||||
# Verify the execution payload is valid
|
# Verify the execution payload is valid
|
||||||
|
@ -223,6 +310,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
|
||||||
gas_limit=payload.gas_limit,
|
gas_limit=payload.gas_limit,
|
||||||
gas_used=payload.gas_used,
|
gas_used=payload.gas_used,
|
||||||
timestamp=payload.timestamp,
|
timestamp=payload.timestamp,
|
||||||
|
base_fee_per_gas=payload.base_fee_per_gas,
|
||||||
block_hash=payload.block_hash,
|
block_hash=payload.block_hash,
|
||||||
transactions_root=hash_tree_root(payload.transactions),
|
transactions_root=hash_tree_root(payload.transactions),
|
||||||
)
|
)
|
||||||
|
@ -232,7 +320,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
|
||||||
|
|
||||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Merge testing only.
|
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Merge testing only.
|
||||||
|
|
||||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified to use `MERGE_FORK_VERSION` and initialize `latest_execution_payload_header`.
|
*Note*: The function `initialize_beacon_state_from_eth1` is modified: (1) using `MERGE_FORK_VERSION` as the current fork version, (2) utilizing the Merge `BeaconBlockBody` when constructing the initial `latest_block_header`, and (3) initialize `latest_execution_payload_header`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||||
|
@ -269,10 +357,17 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||||
# Set genesis validators root for domain separation and chain versioning
|
# Set genesis validators root for domain separation and chain versioning
|
||||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||||
|
|
||||||
|
# Fill in sync committees
|
||||||
|
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||||
|
state.current_sync_committee = get_next_sync_committee(state)
|
||||||
|
state.next_sync_committee = get_next_sync_committee(state)
|
||||||
|
|
||||||
# [New in Merge] Initialize the execution payload header (with block number set to 0)
|
# [New in Merge] Initialize the execution payload header (with block number set to 0)
|
||||||
state.latest_execution_payload_header.block_hash = eth1_block_hash
|
state.latest_execution_payload_header.block_hash = eth1_block_hash
|
||||||
state.latest_execution_payload_header.timestamp = eth1_timestamp
|
state.latest_execution_payload_header.timestamp = eth1_timestamp
|
||||||
state.latest_execution_payload_header.random = eth1_block_hash
|
state.latest_execution_payload_header.random = eth1_block_hash
|
||||||
|
state.latest_execution_payload_header.gas_limit = GENESIS_GAS_LIMIT
|
||||||
|
state.latest_execution_payload_header.base_fee_per_gas = GENESIS_BASE_FEE_PER_GAS
|
||||||
|
|
||||||
return state
|
return state
|
||||||
```
|
```
|
||||||
|
|
|
@ -82,6 +82,7 @@ class TransitionStore(object):
|
||||||
@dataclass
|
@dataclass
|
||||||
class PowBlock(object):
|
class PowBlock(object):
|
||||||
block_hash: Hash32
|
block_hash: Hash32
|
||||||
|
parent_hash: Hash32
|
||||||
is_processed: boolean
|
is_processed: boolean
|
||||||
is_valid: boolean
|
is_valid: boolean
|
||||||
total_difficulty: uint256
|
total_difficulty: uint256
|
||||||
|
@ -99,9 +100,10 @@ Let `get_pow_block(block_hash: Hash32) -> PowBlock` be the function that given t
|
||||||
Used by fork-choice handler, `on_block`.
|
Used by fork-choice handler, `on_block`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def is_valid_terminal_pow_block(transition_store: TransitionStore, block: PowBlock) -> bool:
|
def is_valid_terminal_pow_block(transition_store: TransitionStore, block: PowBlock, parent: PowBlock) -> bool:
|
||||||
is_total_difficulty_reached = block.total_difficulty >= transition_store.transition_total_difficulty
|
is_total_difficulty_reached = block.total_difficulty >= transition_store.transition_total_difficulty
|
||||||
return block.is_valid and is_total_difficulty_reached
|
is_parent_total_difficulty_valid = parent.total_difficulty < transition_store.transition_total_difficulty
|
||||||
|
return block.is_valid and is_total_difficulty_reached and is_parent_total_difficulty_valid
|
||||||
```
|
```
|
||||||
|
|
||||||
## Updated fork-choice handlers
|
## Updated fork-choice handlers
|
||||||
|
@ -130,8 +132,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock, transition_store: Tr
|
||||||
if (transition_store is not None) and is_merge_block(pre_state, block):
|
if (transition_store is not None) and is_merge_block(pre_state, block):
|
||||||
# Delay consideration of block until PoW block is processed by the PoW node
|
# Delay consideration of block until PoW block is processed by the PoW node
|
||||||
pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
||||||
|
pow_parent = get_pow_block(pow_block.parent_hash)
|
||||||
assert pow_block.is_processed
|
assert pow_block.is_processed
|
||||||
assert is_valid_terminal_pow_block(transition_store, pow_block)
|
assert is_valid_terminal_pow_block(transition_store, pow_block, pow_parent)
|
||||||
|
|
||||||
# Check the block is valid and compute the post-state
|
# Check the block is valid and compute the post-state
|
||||||
state = pre_state.copy()
|
state = pre_state.copy()
|
||||||
|
|
|
@ -43,15 +43,18 @@ Note that for the pure Merge networks, we don't apply `upgrade_to_merge` since i
|
||||||
|
|
||||||
### Upgrading the state
|
### Upgrading the state
|
||||||
|
|
||||||
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH`, an irregular state change is made to upgrade to Merge.
|
As with the Phase0-to-Altair upgrade, the `state_transition` is modified to upgrade the `BeaconState`.
|
||||||
|
The `BeaconState` upgrade runs as part of `process_slots`, slots with missing block proposals do not affect the upgrade time.
|
||||||
|
|
||||||
|
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH`, an irregular state change is made to upgrade to Merge.
|
||||||
The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `MERGE_FORK_EPOCH * SLOTS_PER_EPOCH`.
|
The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `MERGE_FORK_EPOCH * SLOTS_PER_EPOCH`.
|
||||||
Care must be taken when transitioning through the fork boundary as implementations will need a modified [state transition function](../phase0/beacon-chain.md#beacon-chain-state-transition-function) that deviates from the Phase 0 document.
|
|
||||||
In particular, the outer `state_transition` function defined in the Phase 0 document will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`.
|
When multiple upgrades are scheduled for the same epoch (common for test-networks),
|
||||||
|
all the upgrades run in sequence before resuming the regular state transition.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def upgrade_to_merge(pre: phase0.BeaconState) -> BeaconState:
|
def upgrade_to_merge(pre: altair.BeaconState) -> BeaconState:
|
||||||
epoch = phase0.get_current_epoch(pre)
|
epoch = altair.get_current_epoch(pre)
|
||||||
post = BeaconState(
|
post = BeaconState(
|
||||||
# Versioning
|
# Versioning
|
||||||
genesis_time=pre.genesis_time,
|
genesis_time=pre.genesis_time,
|
||||||
|
@ -78,14 +81,19 @@ def upgrade_to_merge(pre: phase0.BeaconState) -> BeaconState:
|
||||||
randao_mixes=pre.randao_mixes,
|
randao_mixes=pre.randao_mixes,
|
||||||
# Slashings
|
# Slashings
|
||||||
slashings=pre.slashings,
|
slashings=pre.slashings,
|
||||||
# Attestations
|
# Participation
|
||||||
previous_epoch_attestations=pre.previous_epoch_attestations,
|
previous_epoch_participation=pre.previous_epoch_participation,
|
||||||
current_epoch_attestations=pre.current_epoch_attestations,
|
current_epoch_participation=pre.current_epoch_participation,
|
||||||
# Finality
|
# Finality
|
||||||
justification_bits=pre.justification_bits,
|
justification_bits=pre.justification_bits,
|
||||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||||
finalized_checkpoint=pre.finalized_checkpoint,
|
finalized_checkpoint=pre.finalized_checkpoint,
|
||||||
|
# Inactivity
|
||||||
|
inactivity_scores=pre.inactivity_scores,
|
||||||
|
# Sync
|
||||||
|
current_sync_committee=pre.current_sync_committee,
|
||||||
|
next_sync_committee=pre.next_sync_committee,
|
||||||
# Execution-layer
|
# Execution-layer
|
||||||
latest_execution_payload_header=ExecutionPayloadHeader(),
|
latest_execution_payload_header=ExecutionPayloadHeader(),
|
||||||
)
|
)
|
||||||
|
|
|
@ -0,0 +1,131 @@
|
||||||
|
# Ethereum Merge networking specification
|
||||||
|
|
||||||
|
This document contains the networking specification for Ethereum 2.0 clients added during the Merge deployment.
|
||||||
|
|
||||||
|
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. This document should be viewed as additive to the documents from [Phase 0](../phase0/p2p-interface.md) and from [Altair](../altair/p2p-interface.md)
|
||||||
|
and will be referred to as the "Phase 0 document" and "Altair document" respectively, hereafter.
|
||||||
|
Readers should understand the Phase 0 and Altair documents and use them as a basis to understand the changes outlined in this document.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Warning](#warning)
|
||||||
|
- [Modifications in the Merge](#modifications-in-the-merge)
|
||||||
|
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||||
|
- [Topics and messages](#topics-and-messages)
|
||||||
|
- [Global topics](#global-topics)
|
||||||
|
- [`beacon_block`](#beacon_block)
|
||||||
|
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||||
|
- [The Req/Resp domain](#the-reqresp-domain)
|
||||||
|
- [Messages](#messages)
|
||||||
|
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||||
|
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Warning
|
||||||
|
|
||||||
|
This document is currently illustrative for early Merge testnets and some parts are subject to change.
|
||||||
|
Refer to the note in the [validator guide](./validator.md) for further details.
|
||||||
|
|
||||||
|
# Modifications in the Merge
|
||||||
|
|
||||||
|
## The gossip domain: gossipsub
|
||||||
|
|
||||||
|
Some gossip meshes are upgraded in the Merge to support upgraded types.
|
||||||
|
|
||||||
|
### Topics and messages
|
||||||
|
|
||||||
|
Topics follow the same specification as in prior upgrades.
|
||||||
|
All topics remain stable except the beacon block topic which is updated with the modified type.
|
||||||
|
|
||||||
|
The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 and Altair documents.
|
||||||
|
|
||||||
|
The derivation of the `message-id` remains stable.
|
||||||
|
|
||||||
|
The new topics along with the type of the `data` field of a gossipsub message are given in this table:
|
||||||
|
|
||||||
|
| Name | Message Type |
|
||||||
|
| - | - |
|
||||||
|
| `beacon_block` | `SignedBeaconBlock` (modified) |
|
||||||
|
|
||||||
|
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||||
|
|
||||||
|
#### Global topics
|
||||||
|
|
||||||
|
The Merge changes the type of the global beacon block topic.
|
||||||
|
|
||||||
|
##### `beacon_block`
|
||||||
|
|
||||||
|
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in the Merge.
|
||||||
|
Specifically, this type changes with the addition of `execution_payload` to the inner `BeaconBlockBody`.
|
||||||
|
See the Merge [state transition document](./beacon-chain.md#beaconblockbody) for further details.
|
||||||
|
|
||||||
|
In addition to the gossip validations for this topic from prior specifications,
|
||||||
|
the following validations MUST pass before forwarding the `signed_beacon_block` on the network.
|
||||||
|
Alias `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`.
|
||||||
|
- If the merge is complete with respect to the head state -- i.e. `is_merge_complete(state)` --
|
||||||
|
then validate the following:
|
||||||
|
- _[REJECT]_ The block's execution payload must be non-empty --
|
||||||
|
i.e. `execution_payload != ExecutionPayload()`
|
||||||
|
- If the execution is enabled for the block -- i.e. `is_execution_enabled(state, block.body)`
|
||||||
|
then validate the following:
|
||||||
|
- _[REJECT]_ The block's execution payload timestamp is correct with respect to the slot
|
||||||
|
-- i.e. `execution_payload.timestamp == compute_time_at_slot(state, block.slot)`.
|
||||||
|
- _[REJECT]_ Gas used is less than the gas limit --
|
||||||
|
i.e. `execution_payload.gas_used <= execution_payload.gas_limit`.
|
||||||
|
- _[REJECT]_ The execution payload block hash is not equal to the parent hash --
|
||||||
|
i.e. `execution_payload.block_hash != execution_payload.parent_hash`.
|
||||||
|
- _[REJECT]_ The execution payload transaction list data is within expected size limits,
|
||||||
|
the data MUST NOT be larger than the SSZ list-limit,
|
||||||
|
and a client MAY be more strict.
|
||||||
|
|
||||||
|
*Note*: Additional [gossip validations](https://github.com/ethereum/devp2p/blob/master/caps/eth.md#block-encoding-and-validity)
|
||||||
|
(see block "data validity" conditions) that rely more heavily on execution-layer state and logic are currently under consideration.
|
||||||
|
|
||||||
|
### Transitioning the gossip
|
||||||
|
|
||||||
|
See gossip transition details found in the [Altair document](../altair/p2p) for
|
||||||
|
details on how to handle transitioning gossip topics for the Merge.
|
||||||
|
|
||||||
|
## The Req/Resp domain
|
||||||
|
|
||||||
|
### Messages
|
||||||
|
|
||||||
|
#### BeaconBlocksByRange v2
|
||||||
|
|
||||||
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||||
|
|
||||||
|
Request and Response remain unchanged.
|
||||||
|
The Merge fork-digest is introduced to the `context` enum to specify the Merge block type.
|
||||||
|
|
||||||
|
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
|
|
||||||
|
[0]: # (eth2spec: skip)
|
||||||
|
|
||||||
|
| `fork_version` | Chunk SSZ type |
|
||||||
|
| ------------------------ | -------------------------- |
|
||||||
|
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||||
|
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||||
|
| `MERGE_FORK_VERSION` | `merge.SignedBeaconBlock` |
|
||||||
|
|
||||||
|
#### BeaconBlocksByRoot v2
|
||||||
|
|
||||||
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||||
|
|
||||||
|
Request and Response remain unchanged.
|
||||||
|
The Merge fork-digest is introduced to the `context` enum to specify the Merge block type.
|
||||||
|
|
||||||
|
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
|
|
||||||
|
[1]: # (eth2spec: skip)
|
||||||
|
|
||||||
|
| `fork_version` | Chunk SSZ type |
|
||||||
|
| ------------------------ | -------------------------- |
|
||||||
|
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||||
|
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||||
|
| `MERGE_FORK_VERSION` | `merge.SignedBeaconBlock` |
|
|
@ -1,7 +1,5 @@
|
||||||
# Ethereum 2.0 The Merge
|
# Ethereum 2.0 The Merge
|
||||||
|
|
||||||
**Warning:** This document is currently based on [Phase 0](../phase0/validator.md) but will be rebased to [Altair](../altair/validator.md) once the latter is shipped.
|
|
||||||
|
|
||||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
## Table of contents
|
## Table of contents
|
||||||
|
@ -19,7 +17,6 @@
|
||||||
- [Block proposal](#block-proposal)
|
- [Block proposal](#block-proposal)
|
||||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||||
- [Execution Payload](#execution-payload)
|
- [Execution Payload](#execution-payload)
|
||||||
- [`get_pow_chain_head`](#get_pow_chain_head)
|
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
@ -30,9 +27,11 @@ This document represents the changes to be made in the code of an "honest valida
|
||||||
|
|
||||||
## Prerequisites
|
## Prerequisites
|
||||||
|
|
||||||
This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden.
|
This document is an extension of the [Altair -- Honest Validator](../altair/validator.md) guide.
|
||||||
|
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
|
||||||
|
|
||||||
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [The Merge](./beacon-chain.md) are requisite for this document and used throughout. Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [The Merge](./beacon-chain.md) are requisite for this document and used throughout.
|
||||||
|
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||||
|
|
||||||
## Protocols
|
## Protocols
|
||||||
|
|
||||||
|
@ -63,13 +62,19 @@ All validator responsibilities remain unchanged other than those noted below. Na
|
||||||
|
|
||||||
##### Execution Payload
|
##### Execution Payload
|
||||||
|
|
||||||
###### `get_pow_chain_head`
|
* Set `block.body.execution_payload = get_execution_payload(state, transition_store, execution_engine, pow_chain)` where:
|
||||||
|
|
||||||
Let `get_pow_chain_head() -> PowBlock` be the function that returns the head of the PoW chain. The body of the function is implementation specific.
|
|
||||||
|
|
||||||
* Set `block.body.execution_payload = get_execution_payload(state, transition_store, execution_engine)` where:
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
def get_pow_block_at_total_difficulty(total_difficulty: uint256, pow_chain: Sequence[PowBlock]) -> Optional[PowBlock]:
|
||||||
|
# `pow_chain` abstractly represents all blocks in the PoW chain
|
||||||
|
for block in pow_chain:
|
||||||
|
parent = get_pow_block(block.parent_hash)
|
||||||
|
if block.total_difficulty >= total_difficulty and parent.total_difficulty < total_difficulty:
|
||||||
|
return block
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def compute_randao_mix(state: BeaconState, randao_reveal: BLSSignature) -> Bytes32:
|
def compute_randao_mix(state: BeaconState, randao_reveal: BLSSignature) -> Bytes32:
|
||||||
epoch = get_current_epoch(state)
|
epoch = get_current_epoch(state)
|
||||||
return xor(get_randao_mix(state, epoch), hash(randao_reveal))
|
return xor(get_randao_mix(state, epoch), hash(randao_reveal))
|
||||||
|
@ -87,15 +92,16 @@ def produce_execution_payload(state: BeaconState,
|
||||||
def get_execution_payload(state: BeaconState,
|
def get_execution_payload(state: BeaconState,
|
||||||
transition_store: TransitionStore,
|
transition_store: TransitionStore,
|
||||||
randao_reveal: BLSSignature,
|
randao_reveal: BLSSignature,
|
||||||
execution_engine: ExecutionEngine) -> ExecutionPayload:
|
execution_engine: ExecutionEngine,
|
||||||
|
pow_chain: Sequence[PowBlock]) -> ExecutionPayload:
|
||||||
if not is_merge_complete(state):
|
if not is_merge_complete(state):
|
||||||
pow_block = get_pow_chain_head()
|
terminal_pow_block = get_pow_block_at_total_difficulty(transition_store.transition_total_difficulty, pow_chain)
|
||||||
if not is_valid_terminal_pow_block(transition_store, pow_block):
|
if terminal_pow_block is None:
|
||||||
# Pre-merge, empty payload
|
# Pre-merge, empty payload
|
||||||
return ExecutionPayload()
|
return ExecutionPayload()
|
||||||
else:
|
else:
|
||||||
# Signify merge via producing on top of the last PoW block
|
# Signify merge via producing on top of the last PoW block
|
||||||
return produce_execution_payload(state, pow_block.block_hash, randao_reveal, execution_engine)
|
return produce_execution_payload(state, terminal_pow_block.block_hash, randao_reveal, execution_engine)
|
||||||
|
|
||||||
# Post-merge, normal payload
|
# Post-merge, normal payload
|
||||||
parent_hash = state.latest_execution_payload_header.block_hash
|
parent_hash = state.latest_execution_payload_header.block_hash
|
||||||
|
|
|
@ -647,6 +647,7 @@ The [IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irt
|
||||||
- `def Aggregate(signatures: Sequence[BLSSignature]) -> BLSSignature`
|
- `def Aggregate(signatures: Sequence[BLSSignature]) -> BLSSignature`
|
||||||
- `def FastAggregateVerify(pubkeys: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool`
|
- `def FastAggregateVerify(pubkeys: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool`
|
||||||
- `def AggregateVerify(pubkeys: Sequence[BLSPubkey], messages: Sequence[Bytes], signature: BLSSignature) -> bool`
|
- `def AggregateVerify(pubkeys: Sequence[BLSPubkey], messages: Sequence[Bytes], signature: BLSSignature) -> bool`
|
||||||
|
- `def KeyValidate(pubkey: BLSPubkey) -> bool`
|
||||||
|
|
||||||
The above functions are accessed through the `bls` module, e.g. `bls.Verify`.
|
The above functions are accessed through the `bls` module, e.g. `bls.Verify`.
|
||||||
|
|
||||||
|
|
|
@ -9,14 +9,18 @@
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
- [Introduction](#introduction)
|
||||||
|
- [Glossary](#glossary)
|
||||||
- [Custom types](#custom-types)
|
- [Custom types](#custom-types)
|
||||||
- [Constants](#constants)
|
- [Constants](#constants)
|
||||||
- [Misc](#misc)
|
- [Misc](#misc)
|
||||||
- [Domain types](#domain-types)
|
- [Domain types](#domain-types)
|
||||||
- [Shard Work Status](#shard-work-status)
|
- [Shard Work Status](#shard-work-status)
|
||||||
- [Preset](#preset)
|
|
||||||
- [Misc](#misc-1)
|
- [Misc](#misc-1)
|
||||||
- [Shard block samples](#shard-block-samples)
|
- [Participation flag indices](#participation-flag-indices)
|
||||||
|
- [Incentivization weights](#incentivization-weights)
|
||||||
|
- [Preset](#preset)
|
||||||
|
- [Misc](#misc-2)
|
||||||
|
- [Shard blob samples](#shard-blob-samples)
|
||||||
- [Precomputed size verification points](#precomputed-size-verification-points)
|
- [Precomputed size verification points](#precomputed-size-verification-points)
|
||||||
- [Gwei values](#gwei-values)
|
- [Gwei values](#gwei-values)
|
||||||
- [Configuration](#configuration)
|
- [Configuration](#configuration)
|
||||||
|
@ -25,26 +29,29 @@
|
||||||
- [`BeaconBlockBody`](#beaconblockbody)
|
- [`BeaconBlockBody`](#beaconblockbody)
|
||||||
- [`BeaconState`](#beaconstate)
|
- [`BeaconState`](#beaconstate)
|
||||||
- [New containers](#new-containers)
|
- [New containers](#new-containers)
|
||||||
|
- [`Builder`](#builder)
|
||||||
- [`DataCommitment`](#datacommitment)
|
- [`DataCommitment`](#datacommitment)
|
||||||
|
- [`AttestedDataCommitment`](#attesteddatacommitment)
|
||||||
|
- [ShardBlobBody](#shardblobbody)
|
||||||
- [`ShardBlobBodySummary`](#shardblobbodysummary)
|
- [`ShardBlobBodySummary`](#shardblobbodysummary)
|
||||||
|
- [`ShardBlob`](#shardblob)
|
||||||
- [`ShardBlobHeader`](#shardblobheader)
|
- [`ShardBlobHeader`](#shardblobheader)
|
||||||
|
- [`SignedShardBlob`](#signedshardblob)
|
||||||
- [`SignedShardBlobHeader`](#signedshardblobheader)
|
- [`SignedShardBlobHeader`](#signedshardblobheader)
|
||||||
- [`PendingShardHeader`](#pendingshardheader)
|
- [`PendingShardHeader`](#pendingshardheader)
|
||||||
- [`ShardBlobReference`](#shardblobreference)
|
- [`ShardBlobReference`](#shardblobreference)
|
||||||
- [`SignedShardBlobReference`](#signedshardblobreference)
|
|
||||||
- [`ShardProposerSlashing`](#shardproposerslashing)
|
- [`ShardProposerSlashing`](#shardproposerslashing)
|
||||||
- [`ShardWork`](#shardwork)
|
- [`ShardWork`](#shardwork)
|
||||||
- [Helper functions](#helper-functions)
|
- [Helper functions](#helper-functions)
|
||||||
- [Misc](#misc-2)
|
- [Misc](#misc-3)
|
||||||
- [`next_power_of_two`](#next_power_of_two)
|
- [`next_power_of_two`](#next_power_of_two)
|
||||||
- [`compute_previous_slot`](#compute_previous_slot)
|
- [`compute_previous_slot`](#compute_previous_slot)
|
||||||
- [`compute_updated_gasprice`](#compute_updated_gasprice)
|
- [`compute_updated_sample_price`](#compute_updated_sample_price)
|
||||||
- [`compute_committee_source_epoch`](#compute_committee_source_epoch)
|
- [`compute_committee_source_epoch`](#compute_committee_source_epoch)
|
||||||
|
- [`batch_apply_participation_flag`](#batch_apply_participation_flag)
|
||||||
- [Beacon state accessors](#beacon-state-accessors)
|
- [Beacon state accessors](#beacon-state-accessors)
|
||||||
- [Updated `get_committee_count_per_slot`](#updated-get_committee_count_per_slot)
|
- [Updated `get_committee_count_per_slot`](#updated-get_committee_count_per_slot)
|
||||||
- [`get_active_shard_count`](#get_active_shard_count)
|
- [`get_active_shard_count`](#get_active_shard_count)
|
||||||
- [`get_shard_committee`](#get_shard_committee)
|
|
||||||
- [`compute_proposer_index`](#compute_proposer_index)
|
|
||||||
- [`get_shard_proposer_index`](#get_shard_proposer_index)
|
- [`get_shard_proposer_index`](#get_shard_proposer_index)
|
||||||
- [`get_start_shard`](#get_start_shard)
|
- [`get_start_shard`](#get_start_shard)
|
||||||
- [`compute_shard_from_committee_index`](#compute_shard_from_committee_index)
|
- [`compute_shard_from_committee_index`](#compute_shard_from_committee_index)
|
||||||
|
@ -56,9 +63,7 @@
|
||||||
- [`process_shard_proposer_slashing`](#process_shard_proposer_slashing)
|
- [`process_shard_proposer_slashing`](#process_shard_proposer_slashing)
|
||||||
- [Epoch transition](#epoch-transition)
|
- [Epoch transition](#epoch-transition)
|
||||||
- [`process_pending_shard_confirmations`](#process_pending_shard_confirmations)
|
- [`process_pending_shard_confirmations`](#process_pending_shard_confirmations)
|
||||||
- [`charge_confirmed_shard_fees`](#charge_confirmed_shard_fees)
|
|
||||||
- [`reset_pending_shard_work`](#reset_pending_shard_work)
|
- [`reset_pending_shard_work`](#reset_pending_shard_work)
|
||||||
- [`process_shard_epoch_increment`](#process_shard_epoch_increment)
|
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
@ -70,6 +75,12 @@ This document describes the extensions made to the Phase 0 design of The Beacon
|
||||||
based on the ideas [here](https://hackmd.io/G-Iy5jqyT7CXWEz8Ssos8g) and more broadly [here](https://arxiv.org/abs/1809.09044),
|
based on the ideas [here](https://hackmd.io/G-Iy5jqyT7CXWEz8Ssos8g) and more broadly [here](https://arxiv.org/abs/1809.09044),
|
||||||
using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design.
|
using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design.
|
||||||
|
|
||||||
|
### Glossary
|
||||||
|
|
||||||
|
- **Data**: A list of KZG points, to translate a byte string into
|
||||||
|
- **Blob**: Data with commitments and meta-data, like a flattened bundle of L2 transactions.
|
||||||
|
- **Builder**: Independent actor that builds blobs and bids for proposal slots via fee-paying blob-headers, responsible for availability.
|
||||||
|
- **Shard proposer**: Validator taking bids from blob builders for shard data opportunity, co-signs with builder to propose the blob.
|
||||||
|
|
||||||
## Custom types
|
## Custom types
|
||||||
|
|
||||||
|
@ -80,6 +91,7 @@ We define the following Python custom types for type hinting and readability:
|
||||||
| `Shard` | `uint64` | A shard number |
|
| `Shard` | `uint64` | A shard number |
|
||||||
| `BLSCommitment` | `Bytes48` | A G1 curve point |
|
| `BLSCommitment` | `Bytes48` | A G1 curve point |
|
||||||
| `BLSPoint` | `uint256` | A number `x` in the range `0 <= x < MODULUS` |
|
| `BLSPoint` | `uint256` | A number `x` in the range `0 <= x < MODULUS` |
|
||||||
|
| `BuilderIndex` | `uint64` | Builder registry index |
|
||||||
|
|
||||||
## Constants
|
## Constants
|
||||||
|
|
||||||
|
@ -98,8 +110,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
|
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
| - | - |
|
| - | - |
|
||||||
| `DOMAIN_SHARD_PROPOSER` | `DomainType('0x80000000')` |
|
| `DOMAIN_SHARD_BLOB` | `DomainType('0x80000000')` |
|
||||||
| `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` |
|
|
||||||
|
|
||||||
### Shard Work Status
|
### Shard Work Status
|
||||||
|
|
||||||
|
@ -109,6 +120,30 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| `SHARD_WORK_CONFIRMED` | `1` | Confirmed, reduced to just the commitment |
|
| `SHARD_WORK_CONFIRMED` | `1` | Confirmed, reduced to just the commitment |
|
||||||
| `SHARD_WORK_PENDING` | `2` | Pending, a list of competing headers |
|
| `SHARD_WORK_PENDING` | `2` | Pending, a list of competing headers |
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
TODO: `PARTICIPATION_FLAG_WEIGHTS` backwards-compatibility is difficult, depends on usage.
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `PARTICIPATION_FLAG_WEIGHTS` | `[TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT, TIMELY_SHARD_WEIGHT]` |
|
||||||
|
|
||||||
|
### Participation flag indices
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `TIMELY_SHARD_FLAG_INDEX` | `3` |
|
||||||
|
|
||||||
|
### Incentivization weights
|
||||||
|
|
||||||
|
TODO: determine weight for shard attestations
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `TIMELY_SHARD_WEIGHT` | `uint64(8)` |
|
||||||
|
|
||||||
|
TODO: `WEIGHT_DENOMINATOR` needs to be adjusted, but this breaks a lot of Altair code.
|
||||||
|
|
||||||
## Preset
|
## Preset
|
||||||
|
|
||||||
### Misc
|
### Misc
|
||||||
|
@ -116,17 +151,19 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| Name | Value | Notes |
|
| Name | Value | Notes |
|
||||||
| - | - | - |
|
| - | - | - |
|
||||||
| `MAX_SHARDS` | `uint64(2**10)` (= 1,024) | Theoretical max shard count (used to determine data structure sizes) |
|
| `MAX_SHARDS` | `uint64(2**10)` (= 1,024) | Theoretical max shard count (used to determine data structure sizes) |
|
||||||
| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* |
|
| `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count |
|
||||||
|
| `SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Sample price may decrease/increase by at most exp(1 / this value) *per epoch* |
|
||||||
| `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block |
|
| `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block |
|
||||||
| `MAX_SHARD_HEADERS_PER_SHARD` | `4` | |
|
| `MAX_SHARD_HEADERS_PER_SHARD` | `4` | |
|
||||||
| `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (= 256) | Number of slots for which shard commitments and confirmation status is directly available in the state |
|
| `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (= 256) | Number of slots for which shard commitments and confirmation status is directly available in the state |
|
||||||
|
| `BLOB_BUILDER_REGISTRY_LIMIT` | `uint64(2**40)` (= 1,099,511,627,776) | shard blob builders |
|
||||||
|
|
||||||
### Shard block samples
|
### Shard blob samples
|
||||||
|
|
||||||
| Name | Value | Notes |
|
| Name | Value | Notes |
|
||||||
| - | - | - |
|
| - | - | - |
|
||||||
| `MAX_SAMPLES_PER_BLOCK` | `uint64(2**11)` (= 2,048) | 248 * 2,048 = 507,904 bytes |
|
| `MAX_SAMPLES_PER_BLOB` | `uint64(2**11)` (= 2,048) | 248 * 2,048 = 507,904 bytes |
|
||||||
| `TARGET_SAMPLES_PER_BLOCK` | `uint64(2**10)` (= 1,024) | 248 * 1,024 = 253,952 bytes |
|
| `TARGET_SAMPLES_PER_BLOB` | `uint64(2**10)` (= 1,024) | 248 * 1,024 = 253,952 bytes |
|
||||||
|
|
||||||
### Precomputed size verification points
|
### Precomputed size verification points
|
||||||
|
|
||||||
|
@ -134,20 +171,19 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| - | - |
|
| - | - |
|
||||||
| `G1_SETUP` | Type `List[G1]`. The G1-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. |
|
| `G1_SETUP` | Type `List[G1]`. The G1-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. |
|
||||||
| `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]` |
|
| `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]` |
|
||||||
| `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // int(MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE), MODULUS)` |
|
| `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // int(MAX_SAMPLES_PER_BLOB * POINTS_PER_SAMPLE), MODULUS)` |
|
||||||
|
|
||||||
### Gwei values
|
### Gwei values
|
||||||
|
|
||||||
| Name | Value | Unit | Description |
|
| Name | Value | Unit | Description |
|
||||||
| - | - | - | - |
|
| - | - | - | - |
|
||||||
| `MAX_GASPRICE` | `Gwei(2**33)` (= 8,589,934,592) | Gwei | Max gasprice charged for a TARGET-sized shard block |
|
| `MAX_SAMPLE_PRICE` | `Gwei(2**33)` (= 8,589,934,592) | Gwei | Max sample charged for a TARGET-sized shard blob |
|
||||||
| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | Min gasprice charged for a TARGET-sized shard block |
|
| `MIN_SAMPLE_PRICE` | `Gwei(2**3)` (= 8) | Gwei | Min sample price charged for a TARGET-sized shard blob |
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
| Name | Value | Notes |
|
Note: Some preset variables may become run-time configurable for testnets, but default to a preset while the spec is unstable.
|
||||||
| - | - | - |
|
E.g. `INITIAL_ACTIVE_SHARDS`, `MAX_SAMPLES_PER_BLOB` and `TARGET_SAMPLES_PER_BLOB`.
|
||||||
| `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count |
|
|
||||||
|
|
||||||
## Updated containers
|
## Updated containers
|
||||||
|
|
||||||
|
@ -164,8 +200,8 @@ class AttestationData(Container):
|
||||||
# FFG vote
|
# FFG vote
|
||||||
source: Checkpoint
|
source: Checkpoint
|
||||||
target: Checkpoint
|
target: Checkpoint
|
||||||
# Shard header root
|
# Hash-tree-root of ShardBlob
|
||||||
shard_header_root: Root # [New in Sharding]
|
shard_blob_root: Root # [New in Sharding]
|
||||||
```
|
```
|
||||||
|
|
||||||
### `BeaconBlockBody`
|
### `BeaconBlockBody`
|
||||||
|
@ -179,21 +215,25 @@ class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body]
|
||||||
### `BeaconState`
|
### `BeaconState`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class BeaconState(merge.BeaconState): # [extends The Merge state]
|
class BeaconState(merge.BeaconState):
|
||||||
# [Updated fields] (Warning: this changes with Altair, Sharding will rebase to use participation-flags)
|
# Blob builder registry.
|
||||||
previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
blob_builders: List[Builder, BLOB_BUILDER_REGISTRY_LIMIT]
|
||||||
current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
blob_builder_balances: List[Gwei, BLOB_BUILDER_REGISTRY_LIMIT]
|
||||||
# [New fields]
|
|
||||||
# A ring buffer of the latest slots, with information per active shard.
|
# A ring buffer of the latest slots, with information per active shard.
|
||||||
shard_buffer: Vector[List[ShardWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS]
|
shard_buffer: Vector[List[ShardWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS]
|
||||||
shard_gasprice: uint64
|
shard_sample_price: uint64
|
||||||
current_epoch_start_shard: Shard
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## New containers
|
## New containers
|
||||||
|
|
||||||
The shard data itself is network-layer only, and can be found in the [P2P specification](./p2p-interface.md).
|
### `Builder`
|
||||||
The beacon chain registers just the commitments of the shard data.
|
|
||||||
|
```python
|
||||||
|
class Builder(Container):
|
||||||
|
pubkey: BLSPubkey
|
||||||
|
# TODO: fields for either an expiry mechanism (refunding execution account with remaining balance)
|
||||||
|
# and/or a builder-transaction mechanism.
|
||||||
|
```
|
||||||
|
|
||||||
### `DataCommitment`
|
### `DataCommitment`
|
||||||
|
|
||||||
|
@ -202,41 +242,117 @@ class DataCommitment(Container):
|
||||||
# KZG10 commitment to the data
|
# KZG10 commitment to the data
|
||||||
point: BLSCommitment
|
point: BLSCommitment
|
||||||
# Length of the data in samples
|
# Length of the data in samples
|
||||||
length: uint64
|
samples_count: uint64
|
||||||
|
```
|
||||||
|
|
||||||
|
### `AttestedDataCommitment`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class AttestedDataCommitment(Container):
|
||||||
|
# KZG10 commitment to the data, and length
|
||||||
|
commitment: DataCommitment
|
||||||
|
# hash_tree_root of the ShardBlobHeader (stored so that attestations can be checked against it)
|
||||||
|
root: Root
|
||||||
|
# The proposer who included the shard-header
|
||||||
|
includer_index: ValidatorIndex
|
||||||
|
```
|
||||||
|
|
||||||
|
### ShardBlobBody
|
||||||
|
|
||||||
|
Unsigned shard data, bundled by a shard-builder.
|
||||||
|
Unique, signing different bodies as shard proposer for the same `(slot, shard)` is slashable.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ShardBlobBody(Container):
|
||||||
|
# The actual data commitment
|
||||||
|
commitment: DataCommitment
|
||||||
|
# Proof that the degree < commitment.samples_count * POINTS_PER_SAMPLE
|
||||||
|
degree_proof: BLSCommitment
|
||||||
|
# The actual data. Should match the commitment and degree proof.
|
||||||
|
data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOB]
|
||||||
|
# Latest block root of the Beacon Chain, before shard_blob.slot
|
||||||
|
beacon_block_root: Root
|
||||||
|
# fee payment fields (EIP 1559 like)
|
||||||
|
# TODO: express in MWei instead?
|
||||||
|
max_priority_fee_per_sample: Gwei
|
||||||
|
max_fee_per_sample: Gwei
|
||||||
```
|
```
|
||||||
|
|
||||||
### `ShardBlobBodySummary`
|
### `ShardBlobBodySummary`
|
||||||
|
|
||||||
|
Summary version of the `ShardBlobBody`, omitting the data payload, while preserving the data-commitments.
|
||||||
|
|
||||||
|
The commitments are not further collapsed to a single hash,
|
||||||
|
to avoid an extra network roundtrip between proposer and builder, to include the header on-chain more quickly.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class ShardBlobBodySummary(Container):
|
class ShardBlobBodySummary(Container):
|
||||||
# The actual data commitment
|
# The actual data commitment
|
||||||
commitment: DataCommitment
|
commitment: DataCommitment
|
||||||
# Proof that the degree < commitment.length
|
# Proof that the degree < commitment.samples_count * POINTS_PER_SAMPLE
|
||||||
degree_proof: BLSCommitment
|
degree_proof: BLSCommitment
|
||||||
# Hash-tree-root as summary of the data field
|
# Hash-tree-root as summary of the data field
|
||||||
data_root: Root
|
data_root: Root
|
||||||
# Latest block root of the Beacon Chain, before shard_blob.slot
|
# Latest block root of the Beacon Chain, before shard_blob.slot
|
||||||
beacon_block_root: Root
|
beacon_block_root: Root
|
||||||
|
# fee payment fields (EIP 1559 like)
|
||||||
|
# TODO: express in MWei instead?
|
||||||
|
max_priority_fee_per_sample: Gwei
|
||||||
|
max_fee_per_sample: Gwei
|
||||||
|
```
|
||||||
|
|
||||||
|
### `ShardBlob`
|
||||||
|
|
||||||
|
`ShardBlobBody` wrapped with the header data that is unique to the shard blob proposal.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ShardBlob(Container):
|
||||||
|
slot: Slot
|
||||||
|
shard: Shard
|
||||||
|
# Builder of the data, pays data-fee to proposer
|
||||||
|
builder_index: BuilderIndex
|
||||||
|
# Proposer of the shard-blob
|
||||||
|
proposer_index: ValidatorIndex
|
||||||
|
# Blob contents
|
||||||
|
body: ShardBlobBody
|
||||||
```
|
```
|
||||||
|
|
||||||
### `ShardBlobHeader`
|
### `ShardBlobHeader`
|
||||||
|
|
||||||
|
Header version of `ShardBlob`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class ShardBlobHeader(Container):
|
class ShardBlobHeader(Container):
|
||||||
# Slot and shard that this header is intended for
|
|
||||||
slot: Slot
|
slot: Slot
|
||||||
shard: Shard
|
shard: Shard
|
||||||
# SSZ-summary of ShardBlobBody
|
# Builder of the data, pays data-fee to proposer
|
||||||
body_summary: ShardBlobBodySummary
|
builder_index: BuilderIndex
|
||||||
# Proposer of the shard-blob
|
# Proposer of the shard-blob
|
||||||
proposer_index: ValidatorIndex
|
proposer_index: ValidatorIndex
|
||||||
|
# Blob contents, without the full data
|
||||||
|
body_summary: ShardBlobBodySummary
|
||||||
|
```
|
||||||
|
|
||||||
|
### `SignedShardBlob`
|
||||||
|
|
||||||
|
Full blob data, signed by the shard builder (ensuring fee payment) and shard proposer (ensuring a single proposal).
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SignedShardBlob(Container):
|
||||||
|
message: ShardBlob
|
||||||
|
signature: BLSSignature
|
||||||
```
|
```
|
||||||
|
|
||||||
### `SignedShardBlobHeader`
|
### `SignedShardBlobHeader`
|
||||||
|
|
||||||
|
Header of the blob, the signature is equally applicable to `SignedShardBlob`.
|
||||||
|
Shard proposers can accept `SignedShardBlobHeader` as a data-transaction by co-signing the header.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class SignedShardBlobHeader(Container):
|
class SignedShardBlobHeader(Container):
|
||||||
message: ShardBlobHeader
|
message: ShardBlobHeader
|
||||||
|
# Signature by builder.
|
||||||
|
# Once accepted by proposer, the signatures is the aggregate of both.
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -244,10 +360,8 @@ class SignedShardBlobHeader(Container):
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class PendingShardHeader(Container):
|
class PendingShardHeader(Container):
|
||||||
# KZG10 commitment to the data
|
# The commitment that is attested
|
||||||
commitment: DataCommitment
|
attested: AttestedDataCommitment
|
||||||
# hash_tree_root of the ShardHeader (stored so that attestations can be checked against it)
|
|
||||||
root: Root
|
|
||||||
# Who voted for the header
|
# Who voted for the header
|
||||||
votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
# Sum of effective balances of votes
|
# Sum of effective balances of votes
|
||||||
|
@ -258,41 +372,43 @@ class PendingShardHeader(Container):
|
||||||
|
|
||||||
### `ShardBlobReference`
|
### `ShardBlobReference`
|
||||||
|
|
||||||
|
Reference version of `ShardBlobHeader`, substituting the body for just a hash-tree-root.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class ShardBlobReference(Container):
|
class ShardBlobReference(Container):
|
||||||
# Slot and shard that this reference is intended for
|
|
||||||
slot: Slot
|
slot: Slot
|
||||||
shard: Shard
|
shard: Shard
|
||||||
# Hash-tree-root of ShardBlobBody
|
# Builder of the data
|
||||||
body_root: Root
|
builder_index: BuilderIndex
|
||||||
# Proposer of the shard-blob
|
# Proposer of the shard-blob
|
||||||
proposer_index: ValidatorIndex
|
proposer_index: ValidatorIndex
|
||||||
```
|
# Blob hash-tree-root for slashing reference
|
||||||
|
body_root: Root
|
||||||
### `SignedShardBlobReference`
|
|
||||||
|
|
||||||
```python
|
|
||||||
class SignedShardBlobReference(Container):
|
|
||||||
message: ShardBlobReference
|
|
||||||
signature: BLSSignature
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### `ShardProposerSlashing`
|
### `ShardProposerSlashing`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class ShardProposerSlashing(Container):
|
class ShardProposerSlashing(Container):
|
||||||
signed_reference_1: SignedShardBlobReference
|
slot: Slot
|
||||||
signed_reference_2: SignedShardBlobReference
|
shard: Shard
|
||||||
|
proposer_index: ValidatorIndex
|
||||||
|
builder_index_1: BuilderIndex
|
||||||
|
builder_index_2: BuilderIndex
|
||||||
|
body_root_1: Root
|
||||||
|
body_root_2: Root
|
||||||
|
signature_1: BLSSignature
|
||||||
|
signature_2: BLSSignature
|
||||||
```
|
```
|
||||||
|
|
||||||
### `ShardWork`
|
### `ShardWork`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class ShardWork(Container):
|
class ShardWork(Container):
|
||||||
# Upon confirmation the data is reduced to just the header.
|
# Upon confirmation the data is reduced to just the commitment.
|
||||||
status: Union[ # See Shard Work Status enum
|
status: Union[ # See Shard Work Status enum
|
||||||
None, # SHARD_WORK_UNCONFIRMED
|
None, # SHARD_WORK_UNCONFIRMED
|
||||||
DataCommitment, # SHARD_WORK_CONFIRMED
|
AttestedDataCommitment, # SHARD_WORK_CONFIRMED
|
||||||
List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # SHARD_WORK_PENDING
|
List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # SHARD_WORK_PENDING
|
||||||
]
|
]
|
||||||
```
|
```
|
||||||
|
@ -318,18 +434,17 @@ def compute_previous_slot(slot: Slot) -> Slot:
|
||||||
return Slot(0)
|
return Slot(0)
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `compute_updated_gasprice`
|
#### `compute_updated_sample_price`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint64, adjustment_quotient: uint64) -> Gwei:
|
def compute_updated_sample_price(prev_price: Gwei, samples_length: uint64, active_shards: uint64) -> Gwei:
|
||||||
if shard_block_length > TARGET_SAMPLES_PER_BLOCK:
|
adjustment_quotient = active_shards * SLOTS_PER_EPOCH * SAMPLE_PRICE_ADJUSTMENT_COEFFICIENT
|
||||||
delta = max(1, prev_gasprice * (shard_block_length - TARGET_SAMPLES_PER_BLOCK)
|
if samples_length > TARGET_SAMPLES_PER_BLOB:
|
||||||
// TARGET_SAMPLES_PER_BLOCK // adjustment_quotient)
|
delta = max(1, prev_price * (samples_length - TARGET_SAMPLES_PER_BLOB) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient)
|
||||||
return min(prev_gasprice + delta, MAX_GASPRICE)
|
return min(prev_price + delta, MAX_SAMPLE_PRICE)
|
||||||
else:
|
else:
|
||||||
delta = max(1, prev_gasprice * (TARGET_SAMPLES_PER_BLOCK - shard_block_length)
|
delta = max(1, prev_price * (TARGET_SAMPLES_PER_BLOB - samples_length) // TARGET_SAMPLES_PER_BLOB // adjustment_quotient)
|
||||||
// TARGET_SAMPLES_PER_BLOCK // adjustment_quotient)
|
return max(prev_price, MIN_SAMPLE_PRICE + delta) - delta
|
||||||
return max(prev_gasprice, MIN_GASPRICE + delta) - delta
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `compute_committee_source_epoch`
|
#### `compute_committee_source_epoch`
|
||||||
|
@ -345,6 +460,20 @@ def compute_committee_source_epoch(epoch: Epoch, period: uint64) -> Epoch:
|
||||||
return source_epoch
|
return source_epoch
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `batch_apply_participation_flag`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def batch_apply_participation_flag(state: BeaconState, bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE],
|
||||||
|
epoch: Epoch, full_committee: Sequence[ValidatorIndex], flag_index: int):
|
||||||
|
if epoch == get_current_epoch(state):
|
||||||
|
epoch_participation = state.current_epoch_participation
|
||||||
|
else:
|
||||||
|
epoch_participation = state.previous_epoch_participation
|
||||||
|
for bit, index in zip(bits, full_committee):
|
||||||
|
if bit:
|
||||||
|
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||||
|
```
|
||||||
|
|
||||||
### Beacon state accessors
|
### Beacon state accessors
|
||||||
|
|
||||||
#### Updated `get_committee_count_per_slot`
|
#### Updated `get_committee_count_per_slot`
|
||||||
|
@ -371,52 +500,6 @@ def get_active_shard_count(state: BeaconState, epoch: Epoch) -> uint64:
|
||||||
return INITIAL_ACTIVE_SHARDS
|
return INITIAL_ACTIVE_SHARDS
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `get_shard_committee`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]:
|
|
||||||
"""
|
|
||||||
Return the shard committee of the given ``epoch`` of the given ``shard``.
|
|
||||||
"""
|
|
||||||
source_epoch = compute_committee_source_epoch(epoch, SHARD_COMMITTEE_PERIOD)
|
|
||||||
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
|
||||||
seed = get_seed(beacon_state, source_epoch, DOMAIN_SHARD_COMMITTEE)
|
|
||||||
return compute_committee(
|
|
||||||
indices=active_validator_indices,
|
|
||||||
seed=seed,
|
|
||||||
index=shard,
|
|
||||||
count=get_active_shard_count(beacon_state, epoch),
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `compute_proposer_index`
|
|
||||||
|
|
||||||
Updated version to get a proposer index that will only allow proposers with a certain minimum balance,
|
|
||||||
ensuring that the balance is always sufficient to cover gas costs.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def compute_proposer_index(beacon_state: BeaconState,
|
|
||||||
indices: Sequence[ValidatorIndex],
|
|
||||||
seed: Bytes32,
|
|
||||||
min_effective_balance: Gwei = Gwei(0)) -> ValidatorIndex:
|
|
||||||
"""
|
|
||||||
Return from ``indices`` a random index sampled by effective balance.
|
|
||||||
"""
|
|
||||||
assert len(indices) > 0
|
|
||||||
MAX_RANDOM_BYTE = 2**8 - 1
|
|
||||||
i = uint64(0)
|
|
||||||
total = uint64(len(indices))
|
|
||||||
while True:
|
|
||||||
candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
|
|
||||||
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
|
||||||
effective_balance = beacon_state.validators[candidate_index].effective_balance
|
|
||||||
if effective_balance <= min_effective_balance:
|
|
||||||
continue
|
|
||||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
|
||||||
return candidate_index
|
|
||||||
i += 1
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `get_shard_proposer_index`
|
#### `get_shard_proposer_index`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -425,19 +508,9 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard
|
||||||
Return the proposer's index of shard block at ``slot``.
|
Return the proposer's index of shard block at ``slot``.
|
||||||
"""
|
"""
|
||||||
epoch = compute_epoch_at_slot(slot)
|
epoch = compute_epoch_at_slot(slot)
|
||||||
committee = get_shard_committee(beacon_state, epoch, shard)
|
seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_BLOB) + uint_to_bytes(slot) + uint_to_bytes(shard))
|
||||||
seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_PROPOSER) + uint_to_bytes(slot))
|
indices = get_active_validator_indices(state, epoch)
|
||||||
|
return compute_proposer_index(beacon_state, indices, seed)
|
||||||
# Proposer must have sufficient balance to pay for worst case fee burn
|
|
||||||
EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = (
|
|
||||||
EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT
|
|
||||||
* HYSTERESIS_DOWNWARD_MULTIPLIER // HYSTERESIS_QUOTIENT
|
|
||||||
)
|
|
||||||
min_effective_balance = (
|
|
||||||
beacon_state.shard_gasprice * MAX_SAMPLES_PER_BLOCK // TARGET_SAMPLES_PER_BLOCK
|
|
||||||
+ EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION
|
|
||||||
)
|
|
||||||
return compute_proposer_index(beacon_state, committee, seed, min_effective_balance)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `get_start_shard`
|
#### `get_start_shard`
|
||||||
|
@ -447,22 +520,10 @@ def get_start_shard(state: BeaconState, slot: Slot) -> Shard:
|
||||||
"""
|
"""
|
||||||
Return the start shard at ``slot``.
|
Return the start shard at ``slot``.
|
||||||
"""
|
"""
|
||||||
current_epoch_start_slot = compute_start_slot_at_epoch(get_current_epoch(state))
|
epoch = compute_epoch_at_slot(Slot(_slot))
|
||||||
shard = state.current_epoch_start_shard
|
committee_count = get_committee_count_per_slot(state, epoch)
|
||||||
if slot > current_epoch_start_slot:
|
active_shard_count = get_active_shard_count(state, epoch)
|
||||||
# Current epoch or the next epoch lookahead
|
return committee_count * slot % active_shard_count
|
||||||
for _slot in range(current_epoch_start_slot, slot):
|
|
||||||
committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(_slot)))
|
|
||||||
active_shard_count = get_active_shard_count(state, compute_epoch_at_slot(Slot(_slot)))
|
|
||||||
shard = (shard + committee_count) % active_shard_count
|
|
||||||
elif slot < current_epoch_start_slot:
|
|
||||||
# Previous epoch
|
|
||||||
for _slot in list(range(slot, current_epoch_start_slot))[::-1]:
|
|
||||||
committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(_slot)))
|
|
||||||
active_shard_count = get_active_shard_count(state, compute_epoch_at_slot(Slot(_slot)))
|
|
||||||
# Ensure positive
|
|
||||||
shard = (shard + active_shard_count - committee_count) % active_shard_count
|
|
||||||
return Shard(shard)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `compute_shard_from_committee_index`
|
#### `compute_shard_from_committee_index`
|
||||||
|
@ -494,9 +555,9 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||||
process_randao(state, block.body)
|
process_randao(state, block.body)
|
||||||
process_eth1_data(state, block.body)
|
process_eth1_data(state, block.body)
|
||||||
process_operations(state, block.body) # [Modified in Sharding]
|
process_operations(state, block.body) # [Modified in Sharding]
|
||||||
# Pre-merge, skip execution payload processing
|
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||||
if is_execution_enabled(state, block):
|
# is_execution_enabled is omitted, execution is enabled by default.
|
||||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [New in Merge]
|
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE)
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Operations
|
#### Operations
|
||||||
|
@ -514,45 +575,67 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||||
for_ops(body.attester_slashings, process_attester_slashing)
|
for_ops(body.attester_slashings, process_attester_slashing)
|
||||||
# New shard proposer slashing processing
|
# New shard proposer slashing processing
|
||||||
for_ops(body.shard_proposer_slashings, process_shard_proposer_slashing)
|
for_ops(body.shard_proposer_slashings, process_shard_proposer_slashing)
|
||||||
# Limit is dynamic based on active shard count
|
|
||||||
|
# Limit is dynamic: based on active shard count
|
||||||
assert len(body.shard_headers) <= MAX_SHARD_HEADERS_PER_SHARD * get_active_shard_count(state, get_current_epoch(state))
|
assert len(body.shard_headers) <= MAX_SHARD_HEADERS_PER_SHARD * get_active_shard_count(state, get_current_epoch(state))
|
||||||
for_ops(body.shard_headers, process_shard_header)
|
for_ops(body.shard_headers, process_shard_header)
|
||||||
|
|
||||||
# New attestation processing
|
# New attestation processing
|
||||||
for_ops(body.attestations, process_attestation)
|
for_ops(body.attestations, process_attestation)
|
||||||
for_ops(body.deposits, process_deposit)
|
for_ops(body.deposits, process_deposit)
|
||||||
for_ops(body.voluntary_exits, process_voluntary_exit)
|
for_ops(body.voluntary_exits, process_voluntary_exit)
|
||||||
|
|
||||||
|
# TODO: to avoid parallel shards racing, and avoid inclusion-order problems,
|
||||||
|
# update the fee price per slot, instead of per header.
|
||||||
|
# state.shard_sample_price = compute_updated_sample_price(state.shard_sample_price, ?, shard_count)
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Extended Attestation processing
|
##### Extended Attestation processing
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||||
phase0.process_attestation(state, attestation)
|
altair.process_attestation(state, attestation)
|
||||||
update_pending_shard_work(state, attestation)
|
process_attested_shard_work(state, attestation)
|
||||||
```
|
```
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> None:
|
def process_attested_shard_work(state: BeaconState, attestation: Attestation) -> None:
|
||||||
attestation_shard = compute_shard_from_committee_index(
|
attestation_shard = compute_shard_from_committee_index(
|
||||||
state,
|
state,
|
||||||
attestation.data.slot,
|
attestation.data.slot,
|
||||||
attestation.data.index,
|
attestation.data.index,
|
||||||
)
|
)
|
||||||
|
full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index)
|
||||||
|
|
||||||
buffer_index = attestation.data.slot % SHARD_STATE_MEMORY_SLOTS
|
buffer_index = attestation.data.slot % SHARD_STATE_MEMORY_SLOTS
|
||||||
committee_work = state.shard_buffer[buffer_index][attestation_shard]
|
committee_work = state.shard_buffer[buffer_index][attestation_shard]
|
||||||
|
|
||||||
# Skip attestation vote accounting if the header is not pending
|
# Skip attestation vote accounting if the header is not pending
|
||||||
if committee_work.status.selector != SHARD_WORK_PENDING:
|
if committee_work.status.selector != SHARD_WORK_PENDING:
|
||||||
# TODO In Altair: set participation bit flag, if attestation matches winning header.
|
# If the data was already confirmed, check if this matches, to apply the flag to the attesters.
|
||||||
|
if committee_work.status.selector == SHARD_WORK_CONFIRMED:
|
||||||
|
attested: AttestedDataCommitment = committee_work.status.value
|
||||||
|
if attested.root == attestation.data.shard_blob_root:
|
||||||
|
batch_apply_participation_flag(state, attestation.aggregation_bits,
|
||||||
|
attestation.data.target.epoch,
|
||||||
|
full_committee, TIMELY_SHARD_FLAG_INDEX)
|
||||||
return
|
return
|
||||||
|
|
||||||
current_headers: Sequence[PendingShardHeader] = committee_work.status.value
|
current_headers: Sequence[PendingShardHeader] = committee_work.status.value
|
||||||
|
|
||||||
# Find the corresponding header, abort if it cannot be found
|
# Find the corresponding header, abort if it cannot be found
|
||||||
header_index = [header.root for header in current_headers].index(attestation.data.shard_header_root)
|
header_index = len(current_headers)
|
||||||
|
for i, header in enumerate(current_headers):
|
||||||
|
if attestation.data.shard_blob_root == header.attested.root:
|
||||||
|
header_index = i
|
||||||
|
break
|
||||||
|
|
||||||
|
# Attestations for an unknown header do not count towards shard confirmations, but can otherwise be valid.
|
||||||
|
if header_index == len(current_headers):
|
||||||
|
# Note: Attestations may be re-included if headers are included late.
|
||||||
|
return
|
||||||
|
|
||||||
pending_header: PendingShardHeader = current_headers[header_index]
|
pending_header: PendingShardHeader = current_headers[header_index]
|
||||||
full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index)
|
|
||||||
|
|
||||||
# The weight may be outdated if it is not the initial weight, and from a previous epoch
|
# The weight may be outdated if it is not the initial weight, and from a previous epoch
|
||||||
if pending_header.weight != 0 and compute_epoch_at_slot(pending_header.update_slot) < get_current_epoch(state):
|
if pending_header.weight != 0 and compute_epoch_at_slot(pending_header.update_slot) < get_current_epoch(state):
|
||||||
|
@ -573,8 +656,11 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N
|
||||||
|
|
||||||
# Check if the PendingShardHeader is eligible for expedited confirmation, requiring 2/3 of balance attesting
|
# Check if the PendingShardHeader is eligible for expedited confirmation, requiring 2/3 of balance attesting
|
||||||
if pending_header.weight * 3 >= full_committee_balance * 2:
|
if pending_header.weight * 3 >= full_committee_balance * 2:
|
||||||
# TODO In Altair: set participation bit flag for voters of this early winning header
|
# participants of the winning header are remembered with participation flags
|
||||||
if pending_header.commitment == DataCommitment():
|
batch_apply_participation_flag(state, pending_header.votes, attestation.data.target.epoch,
|
||||||
|
full_committee, TIMELY_SHARD_FLAG_INDEX)
|
||||||
|
|
||||||
|
if pending_header.attested.commitment == DataCommitment():
|
||||||
# The committee voted to not confirm anything
|
# The committee voted to not confirm anything
|
||||||
state.shard_buffer[buffer_index][attestation_shard].status.change(
|
state.shard_buffer[buffer_index][attestation_shard].status.change(
|
||||||
selector=SHARD_WORK_UNCONFIRMED,
|
selector=SHARD_WORK_UNCONFIRMED,
|
||||||
|
@ -583,7 +669,7 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N
|
||||||
else:
|
else:
|
||||||
state.shard_buffer[buffer_index][attestation_shard].status.change(
|
state.shard_buffer[buffer_index][attestation_shard].status.change(
|
||||||
selector=SHARD_WORK_CONFIRMED,
|
selector=SHARD_WORK_CONFIRMED,
|
||||||
value=pending_header.commitment,
|
value=pending_header.attested,
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -591,49 +677,89 @@ def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> N
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None:
|
def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None:
|
||||||
header = signed_header.message
|
header: ShardBlobHeader = signed_header.message
|
||||||
|
slot = header.slot
|
||||||
|
shard = header.shard
|
||||||
|
|
||||||
# Verify the header is not 0, and not from the future.
|
# Verify the header is not 0, and not from the future.
|
||||||
assert Slot(0) < header.slot <= state.slot
|
assert Slot(0) < slot <= state.slot
|
||||||
header_epoch = compute_epoch_at_slot(header.slot)
|
header_epoch = compute_epoch_at_slot(slot)
|
||||||
# Verify that the header is within the processing time window
|
# Verify that the header is within the processing time window
|
||||||
assert header_epoch in [get_previous_epoch(state), get_current_epoch(state)]
|
assert header_epoch in [get_previous_epoch(state), get_current_epoch(state)]
|
||||||
# Verify that the shard is active
|
# Verify that the shard is valid
|
||||||
assert header.shard < get_active_shard_count(state, header_epoch)
|
shard_count = get_active_shard_count(state, header_epoch)
|
||||||
|
assert shard < shard_count
|
||||||
|
# Verify that a committee is able to attest this (slot, shard)
|
||||||
|
start_shard = get_start_shard(state, slot)
|
||||||
|
committee_index = (shard_count + shard - start_shard) % shard_count
|
||||||
|
committees_per_slot = get_committee_count_per_slot(state, header_epoch)
|
||||||
|
assert committee_index <= committees_per_slot
|
||||||
|
|
||||||
# Verify that the block root matches,
|
# Verify that the block root matches,
|
||||||
# to ensure the header will only be included in this specific Beacon Chain sub-tree.
|
# to ensure the header will only be included in this specific Beacon Chain sub-tree.
|
||||||
assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1)
|
assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, slot - 1)
|
||||||
|
|
||||||
# Check that this data is still pending
|
# Check that this data is still pending
|
||||||
committee_work = state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.shard]
|
committee_work = state.shard_buffer[slot % SHARD_STATE_MEMORY_SLOTS][shard]
|
||||||
assert committee_work.status.selector == SHARD_WORK_PENDING
|
assert committee_work.status.selector == SHARD_WORK_PENDING
|
||||||
|
|
||||||
# Check that this header is not yet in the pending list
|
# Check that this header is not yet in the pending list
|
||||||
current_headers: List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] = committee_work.status.value
|
current_headers: List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] = committee_work.status.value
|
||||||
header_root = hash_tree_root(header)
|
header_root = hash_tree_root(header)
|
||||||
assert header_root not in [pending_header.root for pending_header in current_headers]
|
assert header_root not in [pending_header.attested.root for pending_header in current_headers]
|
||||||
|
|
||||||
# Verify proposer
|
# Verify proposer matches
|
||||||
assert header.proposer_index == get_shard_proposer_index(state, header.slot, header.shard)
|
assert header.proposer_index == get_shard_proposer_index(state, slot, shard)
|
||||||
# Verify signature
|
|
||||||
signing_root = compute_signing_root(header, get_domain(state, DOMAIN_SHARD_PROPOSER))
|
# Verify builder and proposer aggregate signature
|
||||||
assert bls.Verify(state.validators[header.proposer_index].pubkey, signing_root, signed_header.signature)
|
blob_signing_root = compute_signing_root(header, get_domain(state, DOMAIN_SHARD_BLOB))
|
||||||
|
builder_pubkey = state.blob_builders[header.builder_index].pubkey
|
||||||
|
proposer_pubkey = state.validators[header.proposer_index].pubkey
|
||||||
|
assert bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_header.signature)
|
||||||
|
|
||||||
# Verify the length by verifying the degree.
|
# Verify the length by verifying the degree.
|
||||||
body_summary = header.body_summary
|
body_summary = header.body_summary
|
||||||
if body_summary.commitment.length == 0:
|
points_count = body_summary.commitment.samples_count * POINTS_PER_SAMPLE
|
||||||
|
if points_count == 0:
|
||||||
assert body_summary.degree_proof == G1_SETUP[0]
|
assert body_summary.degree_proof == G1_SETUP[0]
|
||||||
assert (
|
assert (
|
||||||
bls.Pairing(body_summary.degree_proof, G2_SETUP[0])
|
bls.Pairing(body_summary.degree_proof, G2_SETUP[0])
|
||||||
== bls.Pairing(body_summary.commitment.point, G2_SETUP[-body_summary.commitment.length])
|
== bls.Pairing(body_summary.commitment.point, G2_SETUP[-points_count])
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Charge EIP 1559 fee, builder pays for opportunity, and is responsible for later availability,
|
||||||
|
# or fail to publish at their own expense.
|
||||||
|
samples = body_summary.commitment.samples_count
|
||||||
|
# TODO: overflows, need bigger int type
|
||||||
|
max_fee = body_summary.max_fee_per_sample * samples
|
||||||
|
|
||||||
|
# Builder must have sufficient balance, even if max_fee is not completely utilized
|
||||||
|
assert state.blob_builder_balances[header.builder_index] >= max_fee
|
||||||
|
|
||||||
|
base_fee = state.shard_sample_price * samples
|
||||||
|
# Base fee must be paid
|
||||||
|
assert max_fee >= base_fee
|
||||||
|
|
||||||
|
# Remaining fee goes towards proposer for prioritizing, up to a maximum
|
||||||
|
max_priority_fee = body_summary.max_priority_fee_per_sample * samples
|
||||||
|
priority_fee = min(max_fee - base_fee, max_priority_fee)
|
||||||
|
|
||||||
|
# Burn base fee, take priority fee
|
||||||
|
# priority_fee <= max_fee - base_fee, thus priority_fee + base_fee <= max_fee, thus sufficient balance.
|
||||||
|
state.blob_builder_balances[header.builder_index] -= base_fee + priority_fee
|
||||||
|
# Pay out priority fee
|
||||||
|
increase_balance(state, header.proposer_index, priority_fee)
|
||||||
|
|
||||||
# Initialize the pending header
|
# Initialize the pending header
|
||||||
index = compute_committee_index_from_shard(state, header.slot, header.shard)
|
index = compute_committee_index_from_shard(state, slot, shard)
|
||||||
committee_length = len(get_beacon_committee(state, header.slot, index))
|
committee_length = len(get_beacon_committee(state, slot, index))
|
||||||
initial_votes = Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length)
|
initial_votes = Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length)
|
||||||
pending_header = PendingShardHeader(
|
pending_header = PendingShardHeader(
|
||||||
|
attested=AttestedDataCommitment(
|
||||||
commitment=body_summary.commitment,
|
commitment=body_summary.commitment,
|
||||||
root=header_root,
|
root=header_root,
|
||||||
|
includer_index=get_beacon_proposer_index(state),
|
||||||
|
)
|
||||||
votes=initial_votes,
|
votes=initial_votes,
|
||||||
weight=0,
|
weight=0,
|
||||||
update_slot=state.slot,
|
update_slot=state.slot,
|
||||||
|
@ -652,27 +778,36 @@ The goal is to ensure that a proof can only be constructed if `deg(B) < l` (ther
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: ShardProposerSlashing) -> None:
|
def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: ShardProposerSlashing) -> None:
|
||||||
reference_1 = proposer_slashing.signed_reference_1.message
|
slot = proposer_slashing.slot
|
||||||
reference_2 = proposer_slashing.signed_reference_2.message
|
shard = proposer_slashing.shard
|
||||||
|
proposer_index = proposer_slashing.proposer_index
|
||||||
|
|
||||||
# Verify header slots match
|
reference_1 = ShardBlobReference(slot=slot, shard=shard,
|
||||||
assert reference_1.slot == reference_2.slot
|
proposer_index=proposer_index,
|
||||||
# Verify header shards match
|
builder_index=proposer_slashing.builder_index_1,
|
||||||
assert reference_1.shard == reference_2.shard
|
body_root=proposer_slashing.body_root_1)
|
||||||
# Verify header proposer indices match
|
reference_2 = ShardBlobReference(slot=slot, shard=shard,
|
||||||
assert reference_1.proposer_index == reference_2.proposer_index
|
proposer_index=proposer_index,
|
||||||
# Verify the headers are different (i.e. different body)
|
builder_index=proposer_slashing.builder_index_2,
|
||||||
|
body_root=proposer_slashing.body_root_2)
|
||||||
|
|
||||||
|
# Verify the signed messages are different
|
||||||
assert reference_1 != reference_2
|
assert reference_1 != reference_2
|
||||||
# Verify the proposer is slashable
|
|
||||||
proposer = state.validators[reference_1.proposer_index]
|
|
||||||
assert is_slashable_validator(proposer, get_current_epoch(state))
|
|
||||||
# Verify signatures
|
|
||||||
for signed_header in (proposer_slashing.signed_reference_1, proposer_slashing.signed_reference_2):
|
|
||||||
domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_at_slot(signed_header.message.slot))
|
|
||||||
signing_root = compute_signing_root(signed_header.message, domain)
|
|
||||||
assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature)
|
|
||||||
|
|
||||||
slash_validator(state, reference_1.proposer_index)
|
# Verify the proposer is slashable
|
||||||
|
proposer = state.validators[proposer_index]
|
||||||
|
assert is_slashable_validator(proposer, get_current_epoch(state))
|
||||||
|
|
||||||
|
# The builders are not slashed, the proposer co-signed with them
|
||||||
|
builder_pubkey_1 = state.blob_builders[proposer_slashing.builder_index_1].pubkey
|
||||||
|
builder_pubkey_2 = state.blob_builders[proposer_slashing.builder_index_2].pubkey
|
||||||
|
domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_at_slot(slot))
|
||||||
|
signing_root_1 = compute_signing_root(reference_1, domain)
|
||||||
|
signing_root_2 = compute_signing_root(reference_2, domain)
|
||||||
|
assert bls.FastAggregateVerify([builder_pubkey_1, proposer.pubkey], signing_root_1, proposer_slashing.signature_1)
|
||||||
|
assert bls.FastAggregateVerify([builder_pubkey_2, proposer.pubkey], signing_root_2, proposer_slashing.signature_2)
|
||||||
|
|
||||||
|
slash_validator(state, proposer_index)
|
||||||
```
|
```
|
||||||
|
|
||||||
### Epoch transition
|
### Epoch transition
|
||||||
|
@ -681,26 +816,23 @@ This epoch transition overrides the Merge epoch transition:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_epoch(state: BeaconState) -> None:
|
def process_epoch(state: BeaconState) -> None:
|
||||||
# Sharding
|
# Sharding pre-processing
|
||||||
process_pending_shard_confirmations(state)
|
process_pending_shard_confirmations(state)
|
||||||
charge_confirmed_shard_fees(state)
|
|
||||||
reset_pending_shard_work(state)
|
reset_pending_shard_work(state)
|
||||||
|
|
||||||
# Phase0
|
# Base functionality
|
||||||
process_justification_and_finalization(state)
|
process_justification_and_finalization(state)
|
||||||
process_rewards_and_penalties(state)
|
process_inactivity_updates(state)
|
||||||
|
process_rewards_and_penalties(state) # Note: modified, see new TIMELY_SHARD_FLAG_INDEX
|
||||||
process_registry_updates(state)
|
process_registry_updates(state)
|
||||||
process_slashings(state)
|
process_slashings(state)
|
||||||
|
|
||||||
# Final updates
|
|
||||||
process_eth1_data_reset(state)
|
process_eth1_data_reset(state)
|
||||||
process_effective_balance_updates(state)
|
process_effective_balance_updates(state)
|
||||||
process_slashings_reset(state)
|
process_slashings_reset(state)
|
||||||
process_randao_mixes_reset(state)
|
process_randao_mixes_reset(state)
|
||||||
process_historical_roots_update(state)
|
process_historical_roots_update(state)
|
||||||
process_participation_record_updates(state)
|
process_participation_flag_updates(state)
|
||||||
|
process_sync_committee_updates(state)
|
||||||
process_shard_epoch_increment(state)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `process_pending_shard_confirmations`
|
#### `process_pending_shard_confirmations`
|
||||||
|
@ -722,46 +854,10 @@ def process_pending_shard_confirmations(state: BeaconState) -> None:
|
||||||
committee_work = state.shard_buffer[buffer_index][shard_index]
|
committee_work = state.shard_buffer[buffer_index][shard_index]
|
||||||
if committee_work.status.selector == SHARD_WORK_PENDING:
|
if committee_work.status.selector == SHARD_WORK_PENDING:
|
||||||
winning_header = max(committee_work.status.value, key=lambda header: header.weight)
|
winning_header = max(committee_work.status.value, key=lambda header: header.weight)
|
||||||
# TODO In Altair: set participation bit flag of voters for winning header
|
if winning_header.attested.commitment == DataCommitment():
|
||||||
if winning_header.commitment == DataCommitment():
|
|
||||||
committee_work.status.change(selector=SHARD_WORK_UNCONFIRMED, value=None)
|
committee_work.status.change(selector=SHARD_WORK_UNCONFIRMED, value=None)
|
||||||
else:
|
else:
|
||||||
committee_work.status.change(selector=SHARD_WORK_CONFIRMED, value=winning_header.commitment)
|
committee_work.status.change(selector=SHARD_WORK_CONFIRMED, value=winning_header.attested)
|
||||||
```
|
|
||||||
|
|
||||||
#### `charge_confirmed_shard_fees`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def charge_confirmed_shard_fees(state: BeaconState) -> None:
|
|
||||||
new_gasprice = state.shard_gasprice
|
|
||||||
previous_epoch = get_previous_epoch(state)
|
|
||||||
previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch)
|
|
||||||
adjustment_quotient = (
|
|
||||||
get_active_shard_count(state, previous_epoch)
|
|
||||||
* SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT
|
|
||||||
)
|
|
||||||
# Iterate through confirmed shard-headers
|
|
||||||
for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH):
|
|
||||||
buffer_index = slot % SHARD_STATE_MEMORY_SLOTS
|
|
||||||
for shard_index in range(len(state.shard_buffer[buffer_index])):
|
|
||||||
committee_work = state.shard_buffer[buffer_index][shard_index]
|
|
||||||
if committee_work.status.selector == SHARD_WORK_CONFIRMED:
|
|
||||||
commitment: DataCommitment = committee_work.status.value
|
|
||||||
# Charge EIP 1559 fee
|
|
||||||
proposer = get_shard_proposer_index(state, slot, Shard(shard_index))
|
|
||||||
fee = (
|
|
||||||
(state.shard_gasprice * commitment.length)
|
|
||||||
// TARGET_SAMPLES_PER_BLOCK
|
|
||||||
)
|
|
||||||
decrease_balance(state, proposer, fee)
|
|
||||||
|
|
||||||
# Track updated gas price
|
|
||||||
new_gasprice = compute_updated_gasprice(
|
|
||||||
new_gasprice,
|
|
||||||
commitment.length,
|
|
||||||
adjustment_quotient,
|
|
||||||
)
|
|
||||||
state.shard_gasprice = new_gasprice
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `reset_pending_shard_work`
|
#### `reset_pending_shard_work`
|
||||||
|
@ -789,8 +885,7 @@ def reset_pending_shard_work(state: BeaconState) -> None:
|
||||||
selector=SHARD_WORK_PENDING,
|
selector=SHARD_WORK_PENDING,
|
||||||
value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD](
|
value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD](
|
||||||
PendingShardHeader(
|
PendingShardHeader(
|
||||||
commitment=DataCommitment(),
|
attested=AttestedDataCommitment()
|
||||||
root=Root(),
|
|
||||||
votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length),
|
votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length),
|
||||||
weight=0,
|
weight=0,
|
||||||
update_slot=slot,
|
update_slot=slot,
|
||||||
|
@ -799,11 +894,3 @@ def reset_pending_shard_work(state: BeaconState) -> None:
|
||||||
)
|
)
|
||||||
# a shard without committee available defaults to SHARD_WORK_UNCONFIRMED.
|
# a shard without committee available defaults to SHARD_WORK_UNCONFIRMED.
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `process_shard_epoch_increment`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def process_shard_epoch_increment(state: BeaconState) -> None:
|
|
||||||
# Update current_epoch_start_shard
|
|
||||||
state.current_epoch_start_shard = get_start_shard(state, Slot(state.slot + 1))
|
|
||||||
```
|
|
||||||
|
|
|
@ -11,16 +11,13 @@
|
||||||
- [Introduction](#introduction)
|
- [Introduction](#introduction)
|
||||||
- [Constants](#constants)
|
- [Constants](#constants)
|
||||||
- [Misc](#misc)
|
- [Misc](#misc)
|
||||||
- [New containers](#new-containers)
|
|
||||||
- [ShardBlobBody](#shardblobbody)
|
|
||||||
- [ShardBlob](#shardblob)
|
|
||||||
- [SignedShardBlob](#signedshardblob)
|
|
||||||
- [Gossip domain](#gossip-domain)
|
- [Gossip domain](#gossip-domain)
|
||||||
- [Topics and messages](#topics-and-messages)
|
- [Topics and messages](#topics-and-messages)
|
||||||
- [Shard blob subnets](#shard-blob-subnets)
|
- [Shard blob subnets](#shard-blob-subnets)
|
||||||
- [`shard_blob_{subnet_id}`](#shard_blob_subnet_id)
|
- [`shard_blob_{subnet_id}`](#shard_blob_subnet_id)
|
||||||
- [Global topics](#global-topics)
|
- [Global topics](#global-topics)
|
||||||
- [`shard_header`](#shard_header)
|
- [`shard_blob_header`](#shard_blob_header)
|
||||||
|
- [`shard_blob_tx`](#shard_blob_tx)
|
||||||
- [`shard_proposer_slashing`](#shard_proposer_slashing)
|
- [`shard_proposer_slashing`](#shard_proposer_slashing)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
@ -29,8 +26,7 @@
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
The specification of these changes continues in the same format as the [Phase0](../phase0/p2p-interface.md) and
|
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||||
[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite.
|
|
||||||
The adjustments and additions for Shards are outlined in this document.
|
The adjustments and additions for Shards are outlined in this document.
|
||||||
|
|
||||||
## Constants
|
## Constants
|
||||||
|
@ -40,47 +36,9 @@ The adjustments and additions for Shards are outlined in this document.
|
||||||
| Name | Value | Description |
|
| Name | Value | Description |
|
||||||
| ---- | ----- | ----------- |
|
| ---- | ----- | ----------- |
|
||||||
| `SHARD_BLOB_SUBNET_COUNT` | `64` | The number of `shard_blob_{subnet_id}` subnets used in the gossipsub protocol. |
|
| `SHARD_BLOB_SUBNET_COUNT` | `64` | The number of `shard_blob_{subnet_id}` subnets used in the gossipsub protocol. |
|
||||||
|
| `SHARD_TX_PROPAGATION_GRACE_SLOTS` | `4` | The number of slots for a late transaction to propagate |
|
||||||
|
| `SHARD_TX_PROPAGATION_BUFFER_SLOTS` | `8` | The number of slots for an early transaction to propagate |
|
||||||
|
|
||||||
## New containers
|
|
||||||
|
|
||||||
### ShardBlobBody
|
|
||||||
|
|
||||||
```python
|
|
||||||
class ShardBlobBody(Container):
|
|
||||||
# The actual data commitment
|
|
||||||
commitment: DataCommitment
|
|
||||||
# Proof that the degree < commitment.length
|
|
||||||
degree_proof: BLSCommitment
|
|
||||||
# The actual data. Should match the commitment and degree proof.
|
|
||||||
data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOCK]
|
|
||||||
# Latest block root of the Beacon Chain, before shard_blob.slot
|
|
||||||
beacon_block_root: Root
|
|
||||||
```
|
|
||||||
|
|
||||||
The user MUST always verify the commitments in the `body` are valid for the `data` in the `body`.
|
|
||||||
|
|
||||||
### ShardBlob
|
|
||||||
|
|
||||||
```python
|
|
||||||
class ShardBlob(Container):
|
|
||||||
# Slot and shard that this blob is intended for
|
|
||||||
slot: Slot
|
|
||||||
shard: Shard
|
|
||||||
# Shard data with related commitments and beacon anchor
|
|
||||||
body: ShardBlobBody
|
|
||||||
# Proposer of the shard-blob
|
|
||||||
proposer_index: ValidatorIndex
|
|
||||||
```
|
|
||||||
|
|
||||||
This is the expanded form of the `ShardBlobHeader` type.
|
|
||||||
|
|
||||||
### SignedShardBlob
|
|
||||||
|
|
||||||
```python
|
|
||||||
class SignedShardBlob(Container):
|
|
||||||
message: ShardBlob
|
|
||||||
signature: BLSSignature
|
|
||||||
```
|
|
||||||
|
|
||||||
## Gossip domain
|
## Gossip domain
|
||||||
|
|
||||||
|
@ -89,20 +47,21 @@ class SignedShardBlob(Container):
|
||||||
Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
|
Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
|
||||||
|
|
||||||
| Name | Message Type |
|
| Name | Message Type |
|
||||||
|----------------------------------|---------------------------|
|
|---------------------------------|--------------------------|
|
||||||
| `shard_blob_{subnet_id}` | `SignedShardBlob` |
|
| `shard_blob_{subnet_id}` | `SignedShardBlob` |
|
||||||
| `shard_header` | `SignedShardBlobHeader` |
|
| `shard_blob_header` | `SignedShardBlobHeader` |
|
||||||
|
| `shard_blob_tx` | `SignedShardBlobHeader` |
|
||||||
| `shard_proposer_slashing` | `ShardProposerSlashing` |
|
| `shard_proposer_slashing` | `ShardProposerSlashing` |
|
||||||
|
|
||||||
The [DAS network specification](./das-p2p.md) defines additional topics.
|
The [DAS network specification](./das-p2p.md) defines additional topics.
|
||||||
|
|
||||||
#### Shard blob subnets
|
#### Shard blob subnets
|
||||||
|
|
||||||
Shard blob subnets are used to propagate shard blobs to subsections of the network.
|
Shard blob subnets are used by builders to make their blobs available after selection by shard proposers.
|
||||||
|
|
||||||
##### `shard_blob_{subnet_id}`
|
##### `shard_blob_{subnet_id}`
|
||||||
|
|
||||||
Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets.
|
Shard blob data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_subnet_for_shard_blob(state: BeaconState, slot: Slot, shard: Shard) -> uint64:
|
def compute_subnet_for_shard_blob(state: BeaconState, slot: Slot, shard: Shard) -> uint64:
|
||||||
|
@ -118,51 +77,94 @@ def compute_subnet_for_shard_blob(state: BeaconState, slot: Slot, shard: Shard)
|
||||||
return uint64((committees_since_epoch_start + committee_index) % SHARD_BLOB_SUBNET_COUNT)
|
return uint64((committees_since_epoch_start + committee_index) % SHARD_BLOB_SUBNET_COUNT)
|
||||||
```
|
```
|
||||||
|
|
||||||
The following validations MUST pass before forwarding the `signed_blob` (with inner `message` as `blob`) on the horizontal subnet or creating samples for it.
|
The following validations MUST pass before forwarding the `signed_blob`,
|
||||||
- _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
on the horizontal subnet or creating samples for it. Alias `blob = signed_blob.message`.
|
||||||
i.e. validate that `blob.slot <= current_slot`
|
|
||||||
(a client MAY queue future blobs for processing at the appropriate slot).
|
- _[IGNORE]_ The `blob` is published 1 slot early or later (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
||||||
- _[IGNORE]_ The `blob` is new enough to be still be processed --
|
i.e. validate that `blob.slot <= current_slot + 1`
|
||||||
|
(a client MAY queue future blobs for propagation at the appropriate slot).
|
||||||
|
- _[IGNORE]_ The `blob` is new enough to still be processed --
|
||||||
i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)`
|
i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)`
|
||||||
- _[REJECT]_ The shard should have a committee at slot --
|
- _[REJECT]_ The shard blob is for an active shard --
|
||||||
|
i.e. `blob.shard < get_active_shard_count(state, compute_epoch_at_slot(blob.slot))`
|
||||||
|
- _[REJECT]_ The `blob.shard` MUST have a committee at the `blob.slot` --
|
||||||
i.e. validate that `compute_committee_index_from_shard(state, blob.slot, blob.shard)` doesn't raise an error
|
i.e. validate that `compute_committee_index_from_shard(state, blob.slot, blob.shard)` doesn't raise an error
|
||||||
- _[REJECT]_ The shard blob is for the correct subnet --
|
- _[REJECT]_ The shard blob is for the correct subnet --
|
||||||
i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id`
|
i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id`
|
||||||
- _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination.
|
- _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination.
|
||||||
- _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large.
|
- _[REJECT]_ The blob is not too large -- the data MUST NOT be larger than the SSZ list-limit, and a client MAY apply stricter bounds.
|
||||||
- _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid.
|
- _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid.
|
||||||
- _[REJECT]_ The proposer signature, `signed_blob.signature`, is valid with respect to the `proposer_index` pubkey.
|
- _[REJECT]_ The blob builder defined by `blob.builder_index` exists and has sufficient balance to back the fee payment.
|
||||||
- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's slot
|
- _[REJECT]_ The blob signature, `signed_blob.signature`, is valid for the aggregate of proposer and builder --
|
||||||
|
i.e. `bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_blob.signature)`.
|
||||||
|
- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's `slot` and `shard`,
|
||||||
in the context of the current shuffling (defined by `blob.body.beacon_block_root`/`slot`).
|
in the context of the current shuffling (defined by `blob.body.beacon_block_root`/`slot`).
|
||||||
If the `proposer_index` cannot immediately be verified against the expected shuffling,
|
If the `proposer_index` cannot immediately be verified against the expected shuffling,
|
||||||
the block MAY be queued for later processing while proposers for the blob's branch are calculated --
|
the blob MAY be queued for later processing while proposers for the blob's branch are calculated --
|
||||||
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||||
|
|
||||||
#### Global topics
|
#### Global topics
|
||||||
|
|
||||||
There are two additional global topics for Sharding, one is used to propagate shard blob headers (`shard_header`) to
|
There are three additional global topics for Sharding.
|
||||||
all nodes on the network. Another one is used to propagate validator message (`shard_proposer_slashing`).
|
|
||||||
|
|
||||||
##### `shard_header`
|
- `shard_blob_header`: co-signed headers to be included on-chain and to serve as a signal to the builder to publish full data.
|
||||||
|
- `shard_blob_tx`: builder-signed headers, also known as "data transaction".
|
||||||
|
- `shard_proposer_slashing`: slashings of duplicate shard proposals.
|
||||||
|
|
||||||
Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_header` subnet.
|
##### `shard_blob_header`
|
||||||
|
|
||||||
The following validations MUST pass before forwarding the `signed_shard_blob_header` (with inner `message` as `header`) on the network.
|
Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_blob_header` subnet.
|
||||||
- _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
Shard blob headers select shard blob bids by builders
|
||||||
i.e. validate that `header.slot <= current_slot`
|
and should be timely to ensure builders can publish the full shard blob before subsequent attestations.
|
||||||
(a client MAY queue future headers for processing at the appropriate slot).
|
|
||||||
- _[IGNORE]_ The `header` is new enough to be still be processed --
|
The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message`.
|
||||||
|
|
||||||
|
- _[IGNORE]_ The `header` is published 1 slot early or later (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
||||||
|
i.e. validate that `header.slot <= current_slot + 1`
|
||||||
|
(a client MAY queue future headers for propagation at the appropriate slot).
|
||||||
|
- _[IGNORE]_ The header is new enough to still be processed --
|
||||||
i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)`
|
i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)`
|
||||||
|
- _[REJECT]_ The shard header is for an active shard --
|
||||||
|
i.e. `header.shard < get_active_shard_count(state, compute_epoch_at_slot(header.slot))`
|
||||||
|
- _[REJECT]_ The `header.shard` MUST have a committee at the `header.slot` --
|
||||||
|
i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error.
|
||||||
- _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination.
|
- _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination.
|
||||||
- _[REJECT]_ The shard should have a committee at slot --
|
- _[REJECT]_ The blob builder defined by `blob.builder_index` exists and has sufficient balance to back the fee payment.
|
||||||
i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error
|
- _[REJECT]_ The header signature, `signed_blob_header.signature`, is valid for the aggregate of proposer and builder --
|
||||||
- _[REJECT]_ The proposer signature, `signed_shard_blob_header.signature`, is valid with respect to the `proposer_index` pubkey.
|
i.e. `bls.FastAggregateVerify([builder_pubkey, proposer_pubkey], blob_signing_root, signed_blob_header.signature)`.
|
||||||
- _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot
|
- _[REJECT]_ The header is proposed by the expected `proposer_index` for the blob's `header.slot` and `header.shard`
|
||||||
in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`).
|
in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`).
|
||||||
If the `proposer_index` cannot immediately be verified against the expected shuffling,
|
If the `proposer_index` cannot immediately be verified against the expected shuffling,
|
||||||
the block MAY be queued for later processing while proposers for the block's branch are calculated --
|
the blob MAY be queued for later processing while proposers for the blob's branch are calculated --
|
||||||
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||||
|
|
||||||
|
##### `shard_blob_tx`
|
||||||
|
|
||||||
|
Shard data-transactions in the form of a `SignedShardBlobHeader` are published to the global `shard_blob_tx` subnet.
|
||||||
|
These shard blob headers are signed solely by the blob-builder.
|
||||||
|
|
||||||
|
The following validations MUST pass before forwarding the `signed_blob_header` on the network. Alias `header = signed_blob_header.message`.
|
||||||
|
|
||||||
|
- _[IGNORE]_ The header is not propagating more than `SHARD_TX_PROPAGATION_BUFFER_SLOTS` slots ahead of time --
|
||||||
|
i.e. validate that `header.slot <= current_slot + SHARD_TX_PROPAGATION_BUFFER_SLOTS`.
|
||||||
|
- _[IGNORE]_ The header is not propagating later than `SHARD_TX_PROPAGATION_GRACE_SLOTS` slots too late --
|
||||||
|
i.e. validate that `header.slot + SHARD_TX_PROPAGATION_GRACE_SLOTS >= current_slot`
|
||||||
|
- _[REJECT]_ The shard header is for an active shard --
|
||||||
|
i.e. `header.shard < get_active_shard_count(state, compute_epoch_at_slot(header.slot))`
|
||||||
|
- _[REJECT]_ The `header.shard` MUST have a committee at the `header.slot` --
|
||||||
|
i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error.
|
||||||
|
- _[IGNORE]_ The header is not stale -- i.e. the corresponding shard proposer has not already selected a header for `(header.slot, header.shard)`.
|
||||||
|
- _[IGNORE]_ The header is the first header with valid signature received for the `(header.builder_index, header.slot, header.shard)` combination.
|
||||||
|
- _[REJECT]_ The blob builder, define by `header.builder_index`, exists and has sufficient balance to back the fee payment.
|
||||||
|
- _[IGNORE]_ The header fee SHOULD be higher than previously seen headers for `(header.slot, header.shard)`, from any builder.
|
||||||
|
Propagating nodes MAY increase fee increments in case of spam.
|
||||||
|
- _[REJECT]_ The header signature, `signed_blob_header.signature`, is valid for ONLY the builder --
|
||||||
|
i.e. `bls.Verify(builder_pubkey, blob_signing_root, signed_blob_header.signature)`. The signature is not an aggregate with the proposer.
|
||||||
|
- _[REJECT]_ The header is designated for proposal by the expected `proposer_index` for the blob's `header.slot` and `header.shard`
|
||||||
|
in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`).
|
||||||
|
If the `proposer_index` cannot immediately be verified against the expected shuffling,
|
||||||
|
the blob MAY be queued for later processing while proposers for the blob's branch are calculated --
|
||||||
|
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||||
|
|
||||||
##### `shard_proposer_slashing`
|
##### `shard_proposer_slashing`
|
||||||
|
|
||||||
|
@ -170,6 +172,6 @@ Shard proposer slashings, in the form of `ShardProposerSlashing`, are published
|
||||||
|
|
||||||
The following validations MUST pass before forwarding the `shard_proposer_slashing` on to the network.
|
The following validations MUST pass before forwarding the `shard_proposer_slashing` on to the network.
|
||||||
- _[IGNORE]_ The shard proposer slashing is the first valid shard proposer slashing received
|
- _[IGNORE]_ The shard proposer slashing is the first valid shard proposer slashing received
|
||||||
for the proposer with index `proposer_slashing.signed_header_1.message.proposer_index`.
|
for the proposer with index `proposer_slashing.proposer_index`.
|
||||||
The `slot` and `shard` are ignored, there are no per-shard slashings.
|
The `proposer_slashing.slot` and `proposer_slashing.shard` are ignored, there are no repeated or per-shard slashings.
|
||||||
- _[REJECT]_ All of the conditions within `process_shard_proposer_slashing` pass validation.
|
- _[REJECT]_ All of the conditions within `process_shard_proposer_slashing` pass validation.
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
1.1.0-beta.1
|
1.1.0-beta.2
|
|
@ -26,6 +26,7 @@ from eth2spec.test.context import (
|
||||||
with_presets,
|
with_presets,
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
always_bls,
|
always_bls,
|
||||||
|
spec_test,
|
||||||
)
|
)
|
||||||
from eth2spec.utils.hash_function import hash
|
from eth2spec.utils.hash_function import hash
|
||||||
|
|
||||||
|
@ -112,6 +113,47 @@ def test_invalid_signature_missing_participant(spec, state):
|
||||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_invalid_signature_no_participants(spec, state):
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
# No participants is an allowed case, but needs a specific signature, not the full-zeroed signature.
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[False] * len(block.body.sync_aggregate.sync_committee_bits),
|
||||||
|
sync_committee_signature=b'\x00' * 96
|
||||||
|
)
|
||||||
|
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||||
|
|
||||||
|
# No-participants, with valid signature, is tested in test_sync_committee_rewards_empty_participants already.
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_invalid_signature_infinite_signature_with_all_participants(spec, state):
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
# Include all participants, try the special-case signature for no-participants
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[True] * len(block.body.sync_aggregate.sync_committee_bits),
|
||||||
|
sync_committee_signature=spec.G2_POINT_AT_INFINITY
|
||||||
|
)
|
||||||
|
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_invalid_signature_infinite_signature_with_single_participant(spec, state):
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
# Try include a single participant with the special-case signature for no-participants.
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[True] + ([False] * (len(block.body.sync_aggregate.sync_committee_bits) - 1)),
|
||||||
|
sync_committee_signature=spec.G2_POINT_AT_INFINITY
|
||||||
|
)
|
||||||
|
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||||
|
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
|
@ -534,6 +576,7 @@ def test_random_all_but_one_participating_with_duplicates(spec, state):
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@with_presets([MAINNET], reason="to create duplicate committee")
|
@with_presets([MAINNET], reason="to create duplicate committee")
|
||||||
|
@spec_test
|
||||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=default_activation_threshold)
|
@with_custom_state(balances_fn=misc_balances, threshold_fn=default_activation_threshold)
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_random_misc_balances_and_half_participation_with_duplicates(spec, state):
|
def test_random_misc_balances_and_half_participation_with_duplicates(spec, state):
|
||||||
|
@ -596,6 +639,7 @@ def test_random_all_but_one_participating_without_duplicates(spec, state):
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||||
|
@spec_test
|
||||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=default_activation_threshold)
|
@with_custom_state(balances_fn=misc_balances, threshold_fn=default_activation_threshold)
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_random_misc_balances_and_half_participation_without_duplicates(spec, state):
|
def test_random_misc_balances_and_half_participation_without_duplicates(spec, state):
|
||||||
|
|
|
@ -3,10 +3,15 @@ from random import Random
|
||||||
from eth2spec.test.context import spec_state_test, with_altair_and_later
|
from eth2spec.test.context import spec_state_test, with_altair_and_later
|
||||||
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores, zero_inactivity_scores
|
from eth2spec.test.helpers.inactivity_scores import randomize_inactivity_scores, zero_inactivity_scores
|
||||||
from eth2spec.test.helpers.state import (
|
from eth2spec.test.helpers.state import (
|
||||||
|
next_epoch,
|
||||||
next_epoch_via_block,
|
next_epoch_via_block,
|
||||||
set_full_participation,
|
set_full_participation,
|
||||||
set_empty_participation,
|
set_empty_participation,
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.voluntary_exits import (
|
||||||
|
exit_validators,
|
||||||
|
get_exited_validators
|
||||||
|
)
|
||||||
from eth2spec.test.helpers.epoch_processing import (
|
from eth2spec.test.helpers.epoch_processing import (
|
||||||
run_epoch_processing_with
|
run_epoch_processing_with
|
||||||
)
|
)
|
||||||
|
@ -195,17 +200,24 @@ def test_random_inactivity_scores_full_participation_leaking(spec, state):
|
||||||
assert spec.is_in_inactivity_leak(state)
|
assert spec.is_in_inactivity_leak(state)
|
||||||
|
|
||||||
|
|
||||||
def slash_some_validators(spec, state, rng=Random(40404040)):
|
def slash_some_validators_for_inactivity_scores_test(spec, state, rng=Random(40404040)):
|
||||||
|
# ``run_inactivity_scores_test`` runs at the next epoch from `state`.
|
||||||
|
# We retrieve the proposer of this future state to avoid
|
||||||
|
# accidentally slashing that validator
|
||||||
|
future_state = state.copy()
|
||||||
|
next_epoch_via_block(spec, future_state)
|
||||||
|
|
||||||
|
proposer_index = spec.get_beacon_proposer_index(future_state)
|
||||||
# Slash ~1/4 of validaors
|
# Slash ~1/4 of validaors
|
||||||
for validator_index in range(len(state.validators)):
|
for validator_index in range(len(state.validators)):
|
||||||
if rng.choice(range(4)) == 0:
|
if rng.choice(range(4)) == 0 and validator_index != proposer_index:
|
||||||
spec.slash_validator(state, validator_index)
|
spec.slash_validator(state, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_some_slashed_zero_scores_full_participation(spec, state):
|
def test_some_slashed_zero_scores_full_participation(spec, state):
|
||||||
slash_some_validators(spec, state, rng=Random(33429))
|
slash_some_validators_for_inactivity_scores_test(spec, state, rng=Random(33429))
|
||||||
yield from run_inactivity_scores_test(
|
yield from run_inactivity_scores_test(
|
||||||
spec, state,
|
spec, state,
|
||||||
set_full_participation, zero_inactivity_scores,
|
set_full_participation, zero_inactivity_scores,
|
||||||
|
@ -218,7 +230,7 @@ def test_some_slashed_zero_scores_full_participation(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@leaking()
|
@leaking()
|
||||||
def test_some_slashed_zero_scores_full_participation_leaking(spec, state):
|
def test_some_slashed_zero_scores_full_participation_leaking(spec, state):
|
||||||
slash_some_validators(spec, state, rng=Random(33221))
|
slash_some_validators_for_inactivity_scores_test(spec, state, rng=Random(332243))
|
||||||
yield from run_inactivity_scores_test(
|
yield from run_inactivity_scores_test(
|
||||||
spec, state,
|
spec, state,
|
||||||
set_full_participation, zero_inactivity_scores,
|
set_full_participation, zero_inactivity_scores,
|
||||||
|
@ -239,7 +251,7 @@ def test_some_slashed_zero_scores_full_participation_leaking(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_some_slashed_full_random(spec, state):
|
def test_some_slashed_full_random(spec, state):
|
||||||
rng = Random(1010222)
|
rng = Random(1010222)
|
||||||
slash_some_validators(spec, state, rng=rng)
|
slash_some_validators_for_inactivity_scores_test(spec, state, rng=rng)
|
||||||
yield from run_inactivity_scores_test(
|
yield from run_inactivity_scores_test(
|
||||||
spec, state,
|
spec, state,
|
||||||
randomize_attestation_participation, randomize_inactivity_scores, rng=rng,
|
randomize_attestation_participation, randomize_inactivity_scores, rng=rng,
|
||||||
|
@ -251,7 +263,7 @@ def test_some_slashed_full_random(spec, state):
|
||||||
@leaking()
|
@leaking()
|
||||||
def test_some_slashed_full_random_leaking(spec, state):
|
def test_some_slashed_full_random_leaking(spec, state):
|
||||||
rng = Random(1102233)
|
rng = Random(1102233)
|
||||||
slash_some_validators(spec, state, rng=rng)
|
slash_some_validators_for_inactivity_scores_test(spec, state, rng=rng)
|
||||||
yield from run_inactivity_scores_test(
|
yield from run_inactivity_scores_test(
|
||||||
spec, state,
|
spec, state,
|
||||||
randomize_previous_epoch_participation, randomize_inactivity_scores, rng=rng,
|
randomize_previous_epoch_participation, randomize_inactivity_scores, rng=rng,
|
||||||
|
@ -259,3 +271,53 @@ def test_some_slashed_full_random_leaking(spec, state):
|
||||||
|
|
||||||
# Check still in leak
|
# Check still in leak
|
||||||
assert spec.is_in_inactivity_leak(state)
|
assert spec.is_in_inactivity_leak(state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@leaking()
|
||||||
|
def test_some_exited_full_random_leaking(spec, state):
|
||||||
|
rng = Random(1102233)
|
||||||
|
|
||||||
|
exit_count = 3
|
||||||
|
|
||||||
|
# randomize ahead of time to check exited validators do not have
|
||||||
|
# mutations applied to their inactivity scores
|
||||||
|
randomize_inactivity_scores(spec, state, rng=rng)
|
||||||
|
|
||||||
|
assert not any(get_exited_validators(spec, state))
|
||||||
|
exited_indices = exit_validators(spec, state, exit_count, rng=rng)
|
||||||
|
assert not any(get_exited_validators(spec, state))
|
||||||
|
|
||||||
|
# advance the state to effect the exits
|
||||||
|
target_epoch = max(state.validators[index].exit_epoch for index in exited_indices)
|
||||||
|
# validators that have exited in the previous epoch or earlier will not
|
||||||
|
# have their inactivity scores modified, the test advances the state past this point
|
||||||
|
# to confirm this invariant:
|
||||||
|
previous_epoch = spec.get_previous_epoch(state)
|
||||||
|
for _ in range(target_epoch - previous_epoch):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
assert len(get_exited_validators(spec, state)) == exit_count
|
||||||
|
|
||||||
|
previous_scores = state.inactivity_scores.copy()
|
||||||
|
|
||||||
|
yield from run_inactivity_scores_test(
|
||||||
|
spec, state,
|
||||||
|
randomize_previous_epoch_participation, rng=rng,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ensure exited validators have their score "frozen" at exit
|
||||||
|
# but otherwise there was a change
|
||||||
|
some_changed = False
|
||||||
|
for index in range(len(state.validators)):
|
||||||
|
if index in exited_indices:
|
||||||
|
assert previous_scores[index] == state.inactivity_scores[index]
|
||||||
|
else:
|
||||||
|
previous_score = previous_scores[index]
|
||||||
|
current_score = state.inactivity_scores[index]
|
||||||
|
if previous_score != current_score:
|
||||||
|
some_changed = True
|
||||||
|
assert some_changed
|
||||||
|
|
||||||
|
# Check still in leak
|
||||||
|
assert spec.is_in_inactivity_leak(state)
|
||||||
|
|
|
@ -347,10 +347,6 @@ def with_phases(phases, other_phases=None):
|
||||||
preset_name = kw.pop('preset')
|
preset_name = kw.pop('preset')
|
||||||
targets = spec_targets[preset_name]
|
targets = spec_targets[preset_name]
|
||||||
|
|
||||||
# TODO: test state is dependent on phase0 but is immediately transitioned to later phases.
|
|
||||||
# A new state-creation helper for later phases may be in place, and then tests can run without phase0
|
|
||||||
available_phases.add(PHASE0)
|
|
||||||
|
|
||||||
# Populate all phases for multi-phase tests
|
# Populate all phases for multi-phase tests
|
||||||
phase_dir = {}
|
phase_dir = {}
|
||||||
if PHASE0 in available_phases:
|
if PHASE0 in available_phases:
|
||||||
|
@ -433,23 +429,15 @@ def with_config_overrides(config_overrides):
|
||||||
|
|
||||||
|
|
||||||
def is_post_altair(spec):
|
def is_post_altair(spec):
|
||||||
if spec.fork == MERGE: # TODO: remove parallel Altair-Merge condition after rebase.
|
return spec.fork not in FORKS_BEFORE_ALTAIR
|
||||||
return False
|
|
||||||
if spec.fork in FORKS_BEFORE_ALTAIR:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def is_post_merge(spec):
|
def is_post_merge(spec):
|
||||||
if spec.fork == ALTAIR: # TODO: remove parallel Altair-Merge condition after rebase.
|
return spec.fork not in FORKS_BEFORE_MERGE
|
||||||
return False
|
|
||||||
if spec.fork in FORKS_BEFORE_MERGE:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
with_altair_and_later = with_phases([ALTAIR]) # TODO: include Merge, but not until Merge work is rebased.
|
with_altair_and_later = with_phases([ALTAIR, MERGE])
|
||||||
with_merge_and_later = with_phases([MERGE])
|
with_merge_and_later = with_phases([MERGE]) # TODO: include sharding when spec stabilizes.
|
||||||
|
|
||||||
|
|
||||||
def fork_transition_test(pre_fork_name, post_fork_name, fork_epoch=None):
|
def fork_transition_test(pre_fork_name, post_fork_name, fork_epoch=None):
|
||||||
|
|
|
@ -217,30 +217,13 @@ def next_slots_with_attestations(spec,
|
||||||
post_state = state.copy()
|
post_state = state.copy()
|
||||||
signed_blocks = []
|
signed_blocks = []
|
||||||
for _ in range(slot_count):
|
for _ in range(slot_count):
|
||||||
block = build_empty_block_for_next_slot(spec, post_state)
|
signed_block = state_transition_with_full_block(
|
||||||
if fill_cur_epoch and post_state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
|
|
||||||
slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
|
|
||||||
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)):
|
|
||||||
attestations = _get_valid_attestation_at_slot(
|
|
||||||
post_state,
|
|
||||||
spec,
|
spec,
|
||||||
slot_to_attest,
|
|
||||||
participation_fn=participation_fn
|
|
||||||
)
|
|
||||||
for attestation in attestations:
|
|
||||||
block.body.attestations.append(attestation)
|
|
||||||
if fill_prev_epoch:
|
|
||||||
slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1
|
|
||||||
attestations = _get_valid_attestation_at_slot(
|
|
||||||
post_state,
|
post_state,
|
||||||
spec,
|
fill_cur_epoch,
|
||||||
slot_to_attest,
|
fill_prev_epoch,
|
||||||
participation_fn=participation_fn
|
participation_fn,
|
||||||
)
|
)
|
||||||
for attestation in attestations:
|
|
||||||
block.body.attestations.append(attestation)
|
|
||||||
|
|
||||||
signed_block = state_transition_and_sign_block(spec, post_state, block)
|
|
||||||
signed_blocks.append(signed_block)
|
signed_blocks.append(signed_block)
|
||||||
|
|
||||||
return state, signed_blocks, post_state
|
return state, signed_blocks, post_state
|
||||||
|
@ -249,7 +232,8 @@ def next_slots_with_attestations(spec,
|
||||||
def next_epoch_with_attestations(spec,
|
def next_epoch_with_attestations(spec,
|
||||||
state,
|
state,
|
||||||
fill_cur_epoch,
|
fill_cur_epoch,
|
||||||
fill_prev_epoch):
|
fill_prev_epoch,
|
||||||
|
participation_fn=None):
|
||||||
assert state.slot % spec.SLOTS_PER_EPOCH == 0
|
assert state.slot % spec.SLOTS_PER_EPOCH == 0
|
||||||
|
|
||||||
return next_slots_with_attestations(
|
return next_slots_with_attestations(
|
||||||
|
@ -258,9 +242,76 @@ def next_epoch_with_attestations(spec,
|
||||||
spec.SLOTS_PER_EPOCH,
|
spec.SLOTS_PER_EPOCH,
|
||||||
fill_cur_epoch,
|
fill_cur_epoch,
|
||||||
fill_prev_epoch,
|
fill_prev_epoch,
|
||||||
|
participation_fn,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def state_transition_with_full_block(spec, state, fill_cur_epoch, fill_prev_epoch, participation_fn=None):
|
||||||
|
"""
|
||||||
|
Build and apply a block with attestions at the calculated `slot_to_attest` of current epoch and/or previous epoch.
|
||||||
|
"""
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
if fill_cur_epoch and state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY:
|
||||||
|
slot_to_attest = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
|
||||||
|
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(state)):
|
||||||
|
attestations = _get_valid_attestation_at_slot(
|
||||||
|
state,
|
||||||
|
spec,
|
||||||
|
slot_to_attest,
|
||||||
|
participation_fn=participation_fn
|
||||||
|
)
|
||||||
|
for attestation in attestations:
|
||||||
|
block.body.attestations.append(attestation)
|
||||||
|
if fill_prev_epoch:
|
||||||
|
slot_to_attest = state.slot - spec.SLOTS_PER_EPOCH + 1
|
||||||
|
attestations = _get_valid_attestation_at_slot(
|
||||||
|
state,
|
||||||
|
spec,
|
||||||
|
slot_to_attest,
|
||||||
|
participation_fn=participation_fn
|
||||||
|
)
|
||||||
|
for attestation in attestations:
|
||||||
|
block.body.attestations.append(attestation)
|
||||||
|
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
return signed_block
|
||||||
|
|
||||||
|
|
||||||
|
def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, fill_prev_epoch):
|
||||||
|
"""
|
||||||
|
Build and apply a block with attestions at all valid slots of current epoch and/or previous epoch.
|
||||||
|
"""
|
||||||
|
# Build a block with previous attestations
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
attestations = []
|
||||||
|
|
||||||
|
if fill_cur_epoch:
|
||||||
|
# current epoch
|
||||||
|
slots = state.slot % spec.SLOTS_PER_EPOCH
|
||||||
|
for slot_offset in range(slots):
|
||||||
|
target_slot = state.slot - slot_offset
|
||||||
|
attestations += _get_valid_attestation_at_slot(
|
||||||
|
state,
|
||||||
|
spec,
|
||||||
|
target_slot,
|
||||||
|
)
|
||||||
|
|
||||||
|
if fill_prev_epoch:
|
||||||
|
# attest previous epoch
|
||||||
|
slots = spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH
|
||||||
|
for slot_offset in range(1, slots):
|
||||||
|
target_slot = state.slot - (state.slot % spec.SLOTS_PER_EPOCH) - slot_offset
|
||||||
|
attestations += _get_valid_attestation_at_slot(
|
||||||
|
state,
|
||||||
|
spec,
|
||||||
|
target_slot,
|
||||||
|
)
|
||||||
|
|
||||||
|
block.body.attestations = attestations
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
return signed_block
|
||||||
|
|
||||||
|
|
||||||
def prepare_state_with_attestations(spec, state, participation_fn=None):
|
def prepare_state_with_attestations(spec, state, participation_fn=None):
|
||||||
"""
|
"""
|
||||||
Prepare state with attestations according to the ``participation_fn``.
|
Prepare state with attestations according to the ``participation_fn``.
|
||||||
|
|
|
@ -30,6 +30,7 @@ def get_process_calls(spec):
|
||||||
# Merge
|
# Merge
|
||||||
'process_application_payload':
|
'process_application_payload':
|
||||||
lambda state, block: spec.process_application_payload(state, block.body),
|
lambda state, block: spec.process_application_payload(state, block.body),
|
||||||
|
# TODO: add sharding processing functions when spec stabilizes.
|
||||||
# Custody Game
|
# Custody Game
|
||||||
'process_custody_game_operations':
|
'process_custody_game_operations':
|
||||||
lambda state, block: spec.process_custody_game_operations(state, block.body),
|
lambda state, block: spec.process_custody_game_operations(state, block.body),
|
||||||
|
|
|
@ -18,12 +18,9 @@ DAS = SpecForkName('das')
|
||||||
ALL_PHASES = (PHASE0, ALTAIR, MERGE)
|
ALL_PHASES = (PHASE0, ALTAIR, MERGE)
|
||||||
# The forks that output to the test vectors.
|
# The forks that output to the test vectors.
|
||||||
TESTGEN_FORKS = (PHASE0, ALTAIR, MERGE)
|
TESTGEN_FORKS = (PHASE0, ALTAIR, MERGE)
|
||||||
# TODO: everything runs in parallel to Altair.
|
|
||||||
# After features are rebased on the Altair fork, this can be reduced to just PHASE0.
|
|
||||||
FORKS_BEFORE_ALTAIR = (PHASE0, MERGE, SHARDING, CUSTODY_GAME, DAS)
|
|
||||||
|
|
||||||
# TODO: when rebasing Merge onto Altair, add ALTAIR to this tuple.
|
FORKS_BEFORE_ALTAIR = (PHASE0,)
|
||||||
FORKS_BEFORE_MERGE = (PHASE0,)
|
FORKS_BEFORE_MERGE = (PHASE0, ALTAIR)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Config
|
# Config
|
||||||
|
|
|
@ -28,7 +28,7 @@ def get_process_calls(spec):
|
||||||
'process_participation_record_updates'
|
'process_participation_record_updates'
|
||||||
),
|
),
|
||||||
'process_sync_committee_updates', # altair
|
'process_sync_committee_updates', # altair
|
||||||
'process_shard_epoch_increment' # sharding
|
# TODO: add sharding processing functions when spec stabilizes.
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -20,6 +20,7 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
|
||||||
gas_limit=latest.gas_limit, # retain same limit
|
gas_limit=latest.gas_limit, # retain same limit
|
||||||
gas_used=0, # empty block, 0 gas
|
gas_used=0, # empty block, 0 gas
|
||||||
timestamp=timestamp,
|
timestamp=timestamp,
|
||||||
|
base_fee_per_gas=latest.base_fee_per_gas, # retain same base_fee
|
||||||
block_hash=spec.Hash32(),
|
block_hash=spec.Hash32(),
|
||||||
transactions=empty_txs,
|
transactions=empty_txs,
|
||||||
)
|
)
|
||||||
|
@ -41,6 +42,7 @@ def get_execution_payload_header(spec, execution_payload):
|
||||||
gas_limit=execution_payload.gas_limit,
|
gas_limit=execution_payload.gas_limit,
|
||||||
gas_used=execution_payload.gas_used,
|
gas_used=execution_payload.gas_used,
|
||||||
timestamp=execution_payload.timestamp,
|
timestamp=execution_payload.timestamp,
|
||||||
|
base_fee_per_gas=execution_payload.base_fee_per_gas,
|
||||||
block_hash=execution_payload.block_hash,
|
block_hash=execution_payload.block_hash,
|
||||||
transactions_root=spec.hash_tree_root(execution_payload.transactions)
|
transactions_root=spec.hash_tree_root(execution_payload.transactions)
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,4 +1,8 @@
|
||||||
from eth_utils import encode_hex
|
from eth_utils import encode_hex
|
||||||
|
from eth2spec.test.helpers.attestations import (
|
||||||
|
next_epoch_with_attestations,
|
||||||
|
next_slots_with_attestations,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_anchor_root(spec, state):
|
def get_anchor_root(spec, state):
|
||||||
|
@ -18,23 +22,20 @@ def add_block_to_store(spec, store, signed_block):
|
||||||
spec.on_block(store, signed_block)
|
spec.on_block(store, signed_block)
|
||||||
|
|
||||||
|
|
||||||
def tick_and_run_on_block(spec, store, signed_block, test_steps=None):
|
def tick_and_add_block(spec, store, signed_block, test_steps, valid=True, allow_invalid_attestations=False):
|
||||||
if test_steps is None:
|
|
||||||
test_steps = []
|
|
||||||
|
|
||||||
pre_state = store.block_states[signed_block.message.parent_root]
|
pre_state = store.block_states[signed_block.message.parent_root]
|
||||||
block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
|
block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
|
||||||
|
|
||||||
if store.time < block_time:
|
if store.time < block_time:
|
||||||
on_tick_and_append_step(spec, store, block_time, test_steps)
|
on_tick_and_append_step(spec, store, block_time, test_steps)
|
||||||
|
|
||||||
yield from run_on_block(spec, store, signed_block, test_steps)
|
post_state = yield from add_block(
|
||||||
|
spec, store, signed_block, test_steps, valid=valid, allow_invalid_attestations=allow_invalid_attestations)
|
||||||
|
|
||||||
|
return post_state
|
||||||
|
|
||||||
|
|
||||||
def tick_and_run_on_attestation(spec, store, attestation, test_steps=None):
|
def tick_and_run_on_attestation(spec, store, attestation, test_steps):
|
||||||
if test_steps is None:
|
|
||||||
test_steps = []
|
|
||||||
|
|
||||||
parent_block = store.blocks[attestation.data.beacon_block_root]
|
parent_block = store.blocks[attestation.data.beacon_block_root]
|
||||||
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
|
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
|
||||||
block_time = pre_state.genesis_time + parent_block.slot * spec.config.SECONDS_PER_SLOT
|
block_time = pre_state.genesis_time + parent_block.slot * spec.config.SECONDS_PER_SLOT
|
||||||
|
@ -49,6 +50,37 @@ def tick_and_run_on_attestation(spec, store, attestation, test_steps=None):
|
||||||
test_steps.append({'attestation': get_attestation_file_name(attestation)})
|
test_steps.append({'attestation': get_attestation_file_name(attestation)})
|
||||||
|
|
||||||
|
|
||||||
|
def add_attestation(spec, store, attestation, test_steps, valid=True):
|
||||||
|
yield get_attestation_file_name(attestation), attestation
|
||||||
|
|
||||||
|
if not valid:
|
||||||
|
try:
|
||||||
|
run_on_attestation(spec, store, attestation, valid=True)
|
||||||
|
except AssertionError:
|
||||||
|
test_steps.append({
|
||||||
|
'attestation': get_attestation_file_name(attestation),
|
||||||
|
'valid': False,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
run_on_attestation(spec, store, attestation, valid=True)
|
||||||
|
test_steps.append({'attestation': get_attestation_file_name(attestation)})
|
||||||
|
|
||||||
|
|
||||||
|
def run_on_attestation(spec, store, attestation, valid=True):
|
||||||
|
if not valid:
|
||||||
|
try:
|
||||||
|
spec.on_attestation(store, attestation)
|
||||||
|
except AssertionError:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
spec.on_attestation(store, attestation)
|
||||||
|
|
||||||
|
|
||||||
def get_genesis_forkchoice_store(spec, genesis_state):
|
def get_genesis_forkchoice_store(spec, genesis_state):
|
||||||
store, _ = get_genesis_forkchoice_store_and_block(spec, genesis_state)
|
store, _ = get_genesis_forkchoice_store_and_block(spec, genesis_state)
|
||||||
return store
|
return store
|
||||||
|
@ -73,25 +105,53 @@ def on_tick_and_append_step(spec, store, time, test_steps):
|
||||||
test_steps.append({'tick': int(time)})
|
test_steps.append({'tick': int(time)})
|
||||||
|
|
||||||
|
|
||||||
def run_on_block(spec, store, signed_block, test_steps, valid=True):
|
def run_on_block(spec, store, signed_block, valid=True):
|
||||||
if not valid:
|
if not valid:
|
||||||
try:
|
try:
|
||||||
spec.on_block(store, signed_block)
|
spec.on_block(store, signed_block)
|
||||||
|
|
||||||
except AssertionError:
|
except AssertionError:
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
assert False
|
assert False
|
||||||
|
|
||||||
spec.on_block(store, signed_block)
|
spec.on_block(store, signed_block)
|
||||||
|
assert store.blocks[signed_block.message.hash_tree_root()] == signed_block.message
|
||||||
|
|
||||||
|
|
||||||
|
def add_block(spec, store, signed_block, test_steps, valid=True, allow_invalid_attestations=False):
|
||||||
|
"""
|
||||||
|
Run on_block and on_attestation
|
||||||
|
"""
|
||||||
yield get_block_file_name(signed_block), signed_block
|
yield get_block_file_name(signed_block), signed_block
|
||||||
|
|
||||||
|
if not valid:
|
||||||
|
try:
|
||||||
|
run_on_block(spec, store, signed_block, valid=True)
|
||||||
|
except AssertionError:
|
||||||
|
test_steps.append({
|
||||||
|
'block': get_block_file_name(signed_block),
|
||||||
|
'valid': False,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
run_on_block(spec, store, signed_block, valid=True)
|
||||||
test_steps.append({'block': get_block_file_name(signed_block)})
|
test_steps.append({'block': get_block_file_name(signed_block)})
|
||||||
|
|
||||||
# An on_block step implies receiving block's attestations
|
# An on_block step implies receiving block's attestations
|
||||||
|
try:
|
||||||
for attestation in signed_block.message.body.attestations:
|
for attestation in signed_block.message.body.attestations:
|
||||||
spec.on_attestation(store, attestation)
|
run_on_attestation(spec, store, attestation, valid=True)
|
||||||
|
except AssertionError:
|
||||||
|
if allow_invalid_attestations:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
assert store.blocks[signed_block.message.hash_tree_root()] == signed_block.message
|
block_root = signed_block.message.hash_tree_root()
|
||||||
|
assert store.blocks[block_root] == signed_block.message
|
||||||
|
assert store.block_states[block_root].hash_tree_root() == signed_block.message.state_root
|
||||||
test_steps.append({
|
test_steps.append({
|
||||||
'checks': {
|
'checks': {
|
||||||
'time': int(store.time),
|
'time': int(store.time),
|
||||||
|
@ -102,6 +162,8 @@ def run_on_block(spec, store, signed_block, test_steps, valid=True):
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
return store.block_states[signed_block.message.hash_tree_root()]
|
||||||
|
|
||||||
|
|
||||||
def get_formatted_head_output(spec, store):
|
def get_formatted_head_output(spec, store):
|
||||||
head = spec.get_head(store)
|
head = spec.get_head(store)
|
||||||
|
@ -110,3 +172,49 @@ def get_formatted_head_output(spec, store):
|
||||||
'slot': int(slot),
|
'slot': int(slot),
|
||||||
'root': encode_hex(head),
|
'root': encode_hex(head),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def apply_next_epoch_with_attestations(spec,
|
||||||
|
state,
|
||||||
|
store,
|
||||||
|
fill_cur_epoch,
|
||||||
|
fill_prev_epoch,
|
||||||
|
participation_fn=None,
|
||||||
|
test_steps=None):
|
||||||
|
if test_steps is None:
|
||||||
|
test_steps = []
|
||||||
|
|
||||||
|
_, new_signed_blocks, post_state = next_epoch_with_attestations(
|
||||||
|
spec, state, fill_cur_epoch, fill_prev_epoch, participation_fn=participation_fn)
|
||||||
|
for signed_block in new_signed_blocks:
|
||||||
|
block = signed_block.message
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
block_root = block.hash_tree_root()
|
||||||
|
assert store.blocks[block_root] == block
|
||||||
|
last_signed_block = signed_block
|
||||||
|
|
||||||
|
assert store.block_states[block_root].hash_tree_root() == post_state.hash_tree_root()
|
||||||
|
|
||||||
|
return post_state, store, last_signed_block
|
||||||
|
|
||||||
|
|
||||||
|
def apply_next_slots_with_attestations(spec,
|
||||||
|
state,
|
||||||
|
store,
|
||||||
|
slots,
|
||||||
|
fill_cur_epoch,
|
||||||
|
fill_prev_epoch,
|
||||||
|
test_steps,
|
||||||
|
participation_fn=None):
|
||||||
|
_, new_signed_blocks, post_state = next_slots_with_attestations(
|
||||||
|
spec, state, slots, fill_cur_epoch, fill_prev_epoch, participation_fn=participation_fn)
|
||||||
|
for signed_block in new_signed_blocks:
|
||||||
|
block = signed_block.message
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
block_root = block.hash_tree_root()
|
||||||
|
assert store.blocks[block_root] == block
|
||||||
|
last_signed_block = signed_block
|
||||||
|
|
||||||
|
assert store.block_states[block_root].hash_tree_root() == post_state.hash_tree_root()
|
||||||
|
|
||||||
|
return post_state, store, last_signed_block
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
from eth2spec.test.helpers.constants import (
|
from eth2spec.test.helpers.constants import (
|
||||||
ALTAIR,
|
ALTAIR, MERGE,
|
||||||
FORKS_BEFORE_ALTAIR,
|
FORKS_BEFORE_ALTAIR, FORKS_BEFORE_MERGE,
|
||||||
MERGE,
|
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.keys import pubkeys
|
from eth2spec.test.helpers.keys import pubkeys
|
||||||
|
|
||||||
|
@ -25,11 +24,13 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||||
deposit_root = b'\x42' * 32
|
deposit_root = b'\x42' * 32
|
||||||
|
|
||||||
eth1_block_hash = b'\xda' * 32
|
eth1_block_hash = b'\xda' * 32
|
||||||
|
previous_version = spec.config.GENESIS_FORK_VERSION
|
||||||
current_version = spec.config.GENESIS_FORK_VERSION
|
current_version = spec.config.GENESIS_FORK_VERSION
|
||||||
|
|
||||||
if spec.fork == ALTAIR:
|
if spec.fork == ALTAIR:
|
||||||
current_version = spec.config.ALTAIR_FORK_VERSION
|
current_version = spec.config.ALTAIR_FORK_VERSION
|
||||||
elif spec.fork == MERGE:
|
elif spec.fork == MERGE:
|
||||||
|
previous_version = spec.config.ALTAIR_FORK_VERSION
|
||||||
current_version = spec.config.MERGE_FORK_VERSION
|
current_version = spec.config.MERGE_FORK_VERSION
|
||||||
|
|
||||||
state = spec.BeaconState(
|
state = spec.BeaconState(
|
||||||
|
@ -41,7 +42,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||||
block_hash=eth1_block_hash,
|
block_hash=eth1_block_hash,
|
||||||
),
|
),
|
||||||
fork=spec.Fork(
|
fork=spec.Fork(
|
||||||
previous_version=spec.config.GENESIS_FORK_VERSION,
|
previous_version=previous_version,
|
||||||
current_version=current_version,
|
current_version=current_version,
|
||||||
epoch=spec.GENESIS_EPOCH,
|
epoch=spec.GENESIS_EPOCH,
|
||||||
),
|
),
|
||||||
|
@ -73,4 +74,11 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||||
state.current_sync_committee = spec.get_next_sync_committee(state)
|
state.current_sync_committee = spec.get_next_sync_committee(state)
|
||||||
state.next_sync_committee = spec.get_next_sync_committee(state)
|
state.next_sync_committee = spec.get_next_sync_committee(state)
|
||||||
|
|
||||||
|
if spec.fork not in FORKS_BEFORE_MERGE:
|
||||||
|
# Initialize the execution payload header (with block number and genesis time set to 0)
|
||||||
|
state.latest_execution_payload_header.block_hash = eth1_block_hash
|
||||||
|
state.latest_execution_payload_header.random = eth1_block_hash
|
||||||
|
state.latest_execution_payload_header.gas_limit = spec.GENESIS_GAS_LIMIT
|
||||||
|
state.latest_execution_payload_header.base_fee_per_gas = spec.GENESIS_BASE_FEE_PER_GAS
|
||||||
|
|
||||||
return state
|
return state
|
||||||
|
|
|
@ -4,9 +4,6 @@ MERGE_FORK_TEST_META_TAGS = {
|
||||||
|
|
||||||
|
|
||||||
def run_fork_test(post_spec, pre_state):
|
def run_fork_test(post_spec, pre_state):
|
||||||
# Clean up state to be more realistic
|
|
||||||
pre_state.current_epoch_attestations = []
|
|
||||||
|
|
||||||
yield 'pre', pre_state
|
yield 'pre', pre_state
|
||||||
|
|
||||||
post_state = post_spec.upgrade_to_merge(pre_state)
|
post_state = post_spec.upgrade_to_merge(pre_state)
|
||||||
|
@ -24,10 +21,14 @@ def run_fork_test(post_spec, pre_state):
|
||||||
'randao_mixes',
|
'randao_mixes',
|
||||||
# Slashings
|
# Slashings
|
||||||
'slashings',
|
'slashings',
|
||||||
# Attestations
|
# Participation
|
||||||
'previous_epoch_attestations', 'current_epoch_attestations',
|
'previous_epoch_participation', 'current_epoch_participation',
|
||||||
# Finality
|
# Finality
|
||||||
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
|
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
|
||||||
|
# Inactivity
|
||||||
|
'inactivity_scores',
|
||||||
|
# Sync
|
||||||
|
'current_sync_committee', 'next_sync_committee'
|
||||||
]
|
]
|
||||||
for field in stable_fields:
|
for field in stable_fields:
|
||||||
assert getattr(pre_state, field) == getattr(post_state, field)
|
assert getattr(pre_state, field) == getattr(post_state, field)
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
from random import Random
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
from eth2spec.test.helpers.keys import privkeys
|
from eth2spec.test.helpers.keys import privkeys
|
||||||
|
|
||||||
|
@ -23,3 +24,21 @@ def sign_voluntary_exit(spec, state, voluntary_exit, privkey):
|
||||||
message=voluntary_exit,
|
message=voluntary_exit,
|
||||||
signature=bls.Sign(privkey, signing_root)
|
signature=bls.Sign(privkey, signing_root)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Helpers for applying effects of a voluntary exit
|
||||||
|
#
|
||||||
|
def get_exited_validators(spec, state):
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
return [index for (index, validator) in enumerate(state.validators) if validator.exit_epoch <= current_epoch]
|
||||||
|
|
||||||
|
|
||||||
|
def exit_validators(spec, state, validator_count, rng=None):
|
||||||
|
if rng is None:
|
||||||
|
rng = Random(1337)
|
||||||
|
|
||||||
|
indices = rng.sample(range(len(state.validators)), validator_count)
|
||||||
|
for index in indices:
|
||||||
|
spec.initiate_validator_exit(state, index)
|
||||||
|
return indices
|
||||||
|
|
|
@ -7,7 +7,7 @@ from eth2spec.test.context import (
|
||||||
)
|
)
|
||||||
from eth2spec.test.utils import with_meta_tags
|
from eth2spec.test.utils import with_meta_tags
|
||||||
from eth2spec.test.helpers.constants import (
|
from eth2spec.test.helpers.constants import (
|
||||||
PHASE0, MERGE,
|
ALTAIR, MERGE,
|
||||||
MINIMAL,
|
MINIMAL,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.state import (
|
from eth2spec.test.helpers.state import (
|
||||||
|
@ -20,7 +20,7 @@ from eth2spec.test.helpers.merge.fork import (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_state
|
@with_state
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -28,7 +28,7 @@ def test_fork_base_state(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_state
|
@with_state
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -37,7 +37,7 @@ def test_fork_next_epoch(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_state
|
@with_state
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -46,7 +46,7 @@ def test_fork_next_epoch_with_block(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_state
|
@with_state
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -56,7 +56,7 @@ def test_fork_many_next_epoch(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -64,7 +64,7 @@ def test_fork_random_low_balances(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -72,7 +72,7 @@ def test_fork_random_misc_balances(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@with_presets([MINIMAL],
|
@with_presets([MINIMAL],
|
||||||
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
|
|
|
@ -9,20 +9,17 @@ from eth2spec.test.context import (
|
||||||
)
|
)
|
||||||
from eth2spec.test.utils import with_meta_tags
|
from eth2spec.test.utils import with_meta_tags
|
||||||
from eth2spec.test.helpers.constants import (
|
from eth2spec.test.helpers.constants import (
|
||||||
PHASE0, MERGE,
|
ALTAIR, MERGE,
|
||||||
MINIMAL,
|
MINIMAL,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.merge.fork import (
|
from eth2spec.test.helpers.merge.fork import (
|
||||||
MERGE_FORK_TEST_META_TAGS,
|
MERGE_FORK_TEST_META_TAGS,
|
||||||
run_fork_test,
|
run_fork_test,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.random import (
|
from eth2spec.test.helpers.random import randomize_state
|
||||||
randomize_state,
|
|
||||||
randomize_attestation_participation,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_state
|
@with_state
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -31,7 +28,7 @@ def test_merge_fork_random_0(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_state
|
@with_state
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -40,7 +37,7 @@ def test_merge_fork_random_1(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_state
|
@with_state
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -49,7 +46,7 @@ def test_merge_fork_random_2(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_state
|
@with_state
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -58,40 +55,7 @@ def test_merge_fork_random_3(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@spec_test
|
|
||||||
@with_state
|
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
|
||||||
def test_merge_fork_random_duplicate_attestations(spec, phases, state):
|
|
||||||
randomize_state(spec, state, rng=Random(1111))
|
|
||||||
# Note: `run_fork_test` empties `current_epoch_attestations`
|
|
||||||
state.previous_epoch_attestations = state.previous_epoch_attestations + state.previous_epoch_attestations
|
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
|
||||||
@spec_test
|
|
||||||
@with_state
|
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
|
||||||
def test_merge_fork_random_mismatched_attestations(spec, phases, state):
|
|
||||||
# Create a random state
|
|
||||||
randomize_state(spec, state, rng=Random(2222))
|
|
||||||
|
|
||||||
# Now make two copies
|
|
||||||
state_0 = state.copy()
|
|
||||||
state_1 = state.copy()
|
|
||||||
|
|
||||||
# Randomize attestation participation of both
|
|
||||||
randomize_attestation_participation(spec, state_0, rng=Random(3333))
|
|
||||||
randomize_attestation_participation(spec, state_1, rng=Random(4444))
|
|
||||||
|
|
||||||
# Note: `run_fork_test` empties `current_epoch_attestations`
|
|
||||||
# Use pending attestations from both random states in a single state for testing
|
|
||||||
state_0.previous_epoch_attestations = state_0.previous_epoch_attestations + state_1.previous_epoch_attestations
|
|
||||||
yield from run_fork_test(phases[MERGE], state_0)
|
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -100,7 +64,7 @@ def test_merge_fork_random_low_balances(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
@with_meta_tags(MERGE_FORK_TEST_META_TAGS)
|
||||||
|
@ -109,7 +73,7 @@ def test_merge_fork_random_misc_balances(spec, phases, state):
|
||||||
yield from run_fork_test(phases[MERGE], state)
|
yield from run_fork_test(phases[MERGE], state)
|
||||||
|
|
||||||
|
|
||||||
@with_phases(phases=[PHASE0], other_phases=[MERGE])
|
@with_phases(phases=[ALTAIR], other_phases=[MERGE])
|
||||||
@with_presets([MINIMAL],
|
@with_presets([MINIMAL],
|
||||||
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||||
@spec_test
|
@spec_test
|
||||||
|
|
|
@ -306,7 +306,7 @@ def test_att1_empty_indices(spec, state):
|
||||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||||
|
|
||||||
attester_slashing.attestation_1.attesting_indices = []
|
attester_slashing.attestation_1.attesting_indices = []
|
||||||
attester_slashing.attestation_1.signature = spec.bls.Z2_SIGNATURE
|
attester_slashing.attestation_1.signature = spec.bls.G2_POINT_AT_INFINITY
|
||||||
|
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
@ -318,7 +318,7 @@ def test_att2_empty_indices(spec, state):
|
||||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||||
|
|
||||||
attester_slashing.attestation_2.attesting_indices = []
|
attester_slashing.attestation_2.attesting_indices = []
|
||||||
attester_slashing.attestation_2.signature = spec.bls.Z2_SIGNATURE
|
attester_slashing.attestation_2.signature = spec.bls.G2_POINT_AT_INFINITY
|
||||||
|
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
@ -330,10 +330,10 @@ def test_all_empty_indices(spec, state):
|
||||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False)
|
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False)
|
||||||
|
|
||||||
attester_slashing.attestation_1.attesting_indices = []
|
attester_slashing.attestation_1.attesting_indices = []
|
||||||
attester_slashing.attestation_1.signature = spec.bls.Z2_SIGNATURE
|
attester_slashing.attestation_1.signature = spec.bls.G2_POINT_AT_INFINITY
|
||||||
|
|
||||||
attester_slashing.attestation_2.attesting_indices = []
|
attester_slashing.attestation_2.attesting_indices = []
|
||||||
attester_slashing.attestation_2.signature = spec.bls.Z2_SIGNATURE
|
attester_slashing.attestation_2.signature = spec.bls.G2_POINT_AT_INFINITY
|
||||||
|
|
||||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||||
|
|
||||||
|
|
|
@ -11,12 +11,12 @@ from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||||
from eth2spec.test.helpers.constants import MINIMAL
|
from eth2spec.test.helpers.constants import MINIMAL
|
||||||
from eth2spec.test.helpers.fork_choice import (
|
from eth2spec.test.helpers.fork_choice import (
|
||||||
tick_and_run_on_attestation,
|
tick_and_run_on_attestation,
|
||||||
tick_and_run_on_block,
|
tick_and_add_block,
|
||||||
get_anchor_root,
|
get_anchor_root,
|
||||||
get_genesis_forkchoice_store_and_block,
|
get_genesis_forkchoice_store_and_block,
|
||||||
get_formatted_head_output,
|
get_formatted_head_output,
|
||||||
on_tick_and_append_step,
|
on_tick_and_append_step,
|
||||||
run_on_block,
|
add_block,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.state import (
|
from eth2spec.test.helpers.state import (
|
||||||
next_epoch,
|
next_epoch,
|
||||||
|
@ -68,12 +68,12 @@ def test_chain_no_attestations(spec, state):
|
||||||
# On receiving a block of `GENESIS_SLOT + 1` slot
|
# On receiving a block of `GENESIS_SLOT + 1` slot
|
||||||
block_1 = build_empty_block_for_next_slot(spec, state)
|
block_1 = build_empty_block_for_next_slot(spec, state)
|
||||||
signed_block_1 = state_transition_and_sign_block(spec, state, block_1)
|
signed_block_1 = state_transition_and_sign_block(spec, state, block_1)
|
||||||
yield from tick_and_run_on_block(spec, store, signed_block_1, test_steps)
|
yield from tick_and_add_block(spec, store, signed_block_1, test_steps)
|
||||||
|
|
||||||
# On receiving a block of next epoch
|
# On receiving a block of next epoch
|
||||||
block_2 = build_empty_block_for_next_slot(spec, state)
|
block_2 = build_empty_block_for_next_slot(spec, state)
|
||||||
signed_block_2 = state_transition_and_sign_block(spec, state, block_2)
|
signed_block_2 = state_transition_and_sign_block(spec, state, block_2)
|
||||||
yield from tick_and_run_on_block(spec, store, signed_block_2, test_steps)
|
yield from tick_and_add_block(spec, store, signed_block_2, test_steps)
|
||||||
|
|
||||||
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
||||||
test_steps.append({
|
test_steps.append({
|
||||||
|
@ -107,14 +107,14 @@ def test_split_tie_breaker_no_attestations(spec, state):
|
||||||
block_1_state = genesis_state.copy()
|
block_1_state = genesis_state.copy()
|
||||||
block_1 = build_empty_block_for_next_slot(spec, block_1_state)
|
block_1 = build_empty_block_for_next_slot(spec, block_1_state)
|
||||||
signed_block_1 = state_transition_and_sign_block(spec, block_1_state, block_1)
|
signed_block_1 = state_transition_and_sign_block(spec, block_1_state, block_1)
|
||||||
yield from tick_and_run_on_block(spec, store, signed_block_1, test_steps)
|
yield from tick_and_add_block(spec, store, signed_block_1, test_steps)
|
||||||
|
|
||||||
# additional block at slot 1
|
# additional block at slot 1
|
||||||
block_2_state = genesis_state.copy()
|
block_2_state = genesis_state.copy()
|
||||||
block_2 = build_empty_block_for_next_slot(spec, block_2_state)
|
block_2 = build_empty_block_for_next_slot(spec, block_2_state)
|
||||||
block_2.body.graffiti = b'\x42' * 32
|
block_2.body.graffiti = b'\x42' * 32
|
||||||
signed_block_2 = state_transition_and_sign_block(spec, block_2_state, block_2)
|
signed_block_2 = state_transition_and_sign_block(spec, block_2_state, block_2)
|
||||||
yield from tick_and_run_on_block(spec, store, signed_block_2, test_steps)
|
yield from tick_and_add_block(spec, store, signed_block_2, test_steps)
|
||||||
|
|
||||||
highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2))
|
highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2))
|
||||||
assert spec.get_head(store) == highest_root
|
assert spec.get_head(store) == highest_root
|
||||||
|
@ -150,14 +150,14 @@ def test_shorter_chain_but_heavier_weight(spec, state):
|
||||||
for _ in range(3):
|
for _ in range(3):
|
||||||
long_block = build_empty_block_for_next_slot(spec, long_state)
|
long_block = build_empty_block_for_next_slot(spec, long_state)
|
||||||
signed_long_block = state_transition_and_sign_block(spec, long_state, long_block)
|
signed_long_block = state_transition_and_sign_block(spec, long_state, long_block)
|
||||||
yield from tick_and_run_on_block(spec, store, signed_long_block, test_steps)
|
yield from tick_and_add_block(spec, store, signed_long_block, test_steps)
|
||||||
|
|
||||||
# build short tree
|
# build short tree
|
||||||
short_state = genesis_state.copy()
|
short_state = genesis_state.copy()
|
||||||
short_block = build_empty_block_for_next_slot(spec, short_state)
|
short_block = build_empty_block_for_next_slot(spec, short_state)
|
||||||
short_block.body.graffiti = b'\x42' * 32
|
short_block.body.graffiti = b'\x42' * 32
|
||||||
signed_short_block = state_transition_and_sign_block(spec, short_state, short_block)
|
signed_short_block = state_transition_and_sign_block(spec, short_state, short_block)
|
||||||
yield from tick_and_run_on_block(spec, store, signed_short_block, test_steps)
|
yield from tick_and_add_block(spec, store, signed_short_block, test_steps)
|
||||||
|
|
||||||
short_attestation = get_valid_attestation(spec, short_state, short_block.slot, signed=True)
|
short_attestation = get_valid_attestation(spec, short_state, short_block.slot, signed=True)
|
||||||
yield from tick_and_run_on_attestation(spec, store, short_attestation, test_steps)
|
yield from tick_and_run_on_attestation(spec, store, short_attestation, test_steps)
|
||||||
|
@ -200,7 +200,7 @@ def test_filtered_block_tree(spec, state):
|
||||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
for signed_block in signed_blocks:
|
for signed_block in signed_blocks:
|
||||||
yield from run_on_block(spec, store, signed_block, test_steps)
|
yield from add_block(spec, store, signed_block, test_steps)
|
||||||
|
|
||||||
assert store.justified_checkpoint == state.current_justified_checkpoint
|
assert store.justified_checkpoint == state.current_justified_checkpoint
|
||||||
|
|
||||||
|
@ -247,7 +247,7 @@ def test_filtered_block_tree(spec, state):
|
||||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
|
||||||
# include rogue block and associated attestations in the store
|
# include rogue block and associated attestations in the store
|
||||||
yield from run_on_block(spec, store, signed_rogue_block, test_steps)
|
yield from add_block(spec, store, signed_rogue_block, test_steps)
|
||||||
|
|
||||||
for attestation in attestations:
|
for attestation in attestations:
|
||||||
yield from tick_and_run_on_attestation(spec, store, attestation, test_steps)
|
yield from tick_and_run_on_attestation(spec, store, attestation, test_steps)
|
||||||
|
|
|
@ -0,0 +1,689 @@
|
||||||
|
import random
|
||||||
|
|
||||||
|
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||||
|
from eth2spec.test.context import MINIMAL, spec_state_test, with_all_phases, with_presets
|
||||||
|
from eth2spec.test.helpers.attestations import (
|
||||||
|
next_epoch_with_attestations,
|
||||||
|
next_slots_with_attestations,
|
||||||
|
state_transition_with_full_block,
|
||||||
|
state_transition_with_full_attestations_block,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.block import (
|
||||||
|
build_empty_block_for_next_slot,
|
||||||
|
build_empty_block,
|
||||||
|
transition_unsigned_block,
|
||||||
|
sign_block,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.fork_choice import (
|
||||||
|
get_genesis_forkchoice_store_and_block,
|
||||||
|
on_tick_and_append_step,
|
||||||
|
add_block,
|
||||||
|
tick_and_add_block,
|
||||||
|
apply_next_epoch_with_attestations,
|
||||||
|
apply_next_slots_with_attestations,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.state import (
|
||||||
|
next_epoch,
|
||||||
|
next_slots,
|
||||||
|
state_transition_and_sign_block,
|
||||||
|
transition_to,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
rng = random.Random(2020)
|
||||||
|
|
||||||
|
|
||||||
|
def _drop_random_one_third(_slot, _index, indices):
|
||||||
|
committee_len = len(indices)
|
||||||
|
assert committee_len >= 3
|
||||||
|
filter_len = committee_len // 3
|
||||||
|
participant_count = committee_len - filter_len
|
||||||
|
return rng.sample(indices, participant_count)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_basic(spec, state):
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# On receiving a block of `GENESIS_SLOT + 1` slot
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
assert spec.get_head(store) == signed_block.message.hash_tree_root()
|
||||||
|
|
||||||
|
# On receiving a block of next epoch
|
||||||
|
store.time = current_time + spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||||
|
block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
assert spec.get_head(store) == signed_block.message.hash_tree_root()
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
# TODO: add tests for justified_root and finalized_root
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@with_presets([MINIMAL], reason="too slow")
|
||||||
|
def test_on_block_checkpoints(spec, state):
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# Run for 1 epoch with full attestations
|
||||||
|
next_epoch(spec, state)
|
||||||
|
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||||
|
|
||||||
|
state, store, last_signed_block = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, True, False, test_steps=test_steps)
|
||||||
|
last_block_root = hash_tree_root(last_signed_block.message)
|
||||||
|
assert spec.get_head(store) == last_block_root
|
||||||
|
|
||||||
|
# Forward 1 epoch
|
||||||
|
next_epoch(spec, state)
|
||||||
|
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||||
|
|
||||||
|
# Mock the finalized_checkpoint and build a block on it
|
||||||
|
fin_state = store.block_states[last_block_root].copy()
|
||||||
|
fin_state.finalized_checkpoint = store.block_states[last_block_root].current_justified_checkpoint.copy()
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, fin_state)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, fin_state.copy(), block)
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
assert spec.get_head(store) == signed_block.message.hash_tree_root()
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_on_block_future_block(spec, state):
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# Do NOT tick time to `GENESIS_SLOT + 1` slot
|
||||||
|
# Fail receiving block of `GENESIS_SLOT + 1` slot
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
yield from add_block(spec, store, signed_block, test_steps, valid=False)
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_on_block_bad_parent_root(spec, state):
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# Fail receiving block of `GENESIS_SLOT + 1` slot
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
transition_unsigned_block(spec, state, block)
|
||||||
|
block.state_root = state.hash_tree_root()
|
||||||
|
|
||||||
|
block.parent_root = b'\x45' * 32
|
||||||
|
|
||||||
|
signed_block = sign_block(spec, state, block)
|
||||||
|
|
||||||
|
yield from add_block(spec, store, signed_block, test_steps, valid=False)
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@with_presets([MINIMAL], reason="too slow")
|
||||||
|
def test_on_block_before_finalized(spec, state):
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# Fork
|
||||||
|
another_state = state.copy()
|
||||||
|
|
||||||
|
# Create a finalized chain
|
||||||
|
for _ in range(4):
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, True, False, test_steps=test_steps)
|
||||||
|
assert store.finalized_checkpoint.epoch == 2
|
||||||
|
|
||||||
|
# Fail receiving block of `GENESIS_SLOT + 1` slot
|
||||||
|
block = build_empty_block_for_next_slot(spec, another_state)
|
||||||
|
block.body.graffiti = b'\x12' * 32
|
||||||
|
signed_block = state_transition_and_sign_block(spec, another_state, block)
|
||||||
|
assert signed_block.message.hash_tree_root() not in store.blocks
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False)
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@with_presets([MINIMAL], reason="too slow")
|
||||||
|
def test_on_block_finalized_skip_slots(spec, state):
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# Create a finalized chain
|
||||||
|
for _ in range(4):
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, True, False, test_steps=test_steps)
|
||||||
|
assert store.finalized_checkpoint.epoch == 2
|
||||||
|
|
||||||
|
# Another chain
|
||||||
|
another_state = store.block_states[store.finalized_checkpoint.root].copy()
|
||||||
|
# Build block that includes the skipped slots up to finality in chain
|
||||||
|
block = build_empty_block(spec,
|
||||||
|
another_state,
|
||||||
|
spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2)
|
||||||
|
block.body.graffiti = b'\x12' * 32
|
||||||
|
signed_block = state_transition_and_sign_block(spec, another_state, block)
|
||||||
|
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@with_presets([MINIMAL], reason="too slow")
|
||||||
|
def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state):
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1)
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
transition_unsigned_block(spec, state, block)
|
||||||
|
block.state_root = state.hash_tree_root()
|
||||||
|
store = spec.get_forkchoice_store(state, block)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', block
|
||||||
|
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
pre_finalized_checkpoint_epoch = store.finalized_checkpoint.epoch
|
||||||
|
|
||||||
|
# Finalized
|
||||||
|
for _ in range(3):
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, True, False, test_steps=test_steps)
|
||||||
|
assert store.finalized_checkpoint.epoch == pre_finalized_checkpoint_epoch + 1
|
||||||
|
|
||||||
|
# Now build a block at later slot than finalized epoch
|
||||||
|
# Includes finalized block in chain, but not at appropriate skip slot
|
||||||
|
pre_state = store.block_states[block.hash_tree_root()].copy()
|
||||||
|
block = build_empty_block(spec,
|
||||||
|
state=pre_state,
|
||||||
|
slot=spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2)
|
||||||
|
block.body.graffiti = b'\x12' * 32
|
||||||
|
signed_block = sign_block(spec, pre_state, block)
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False)
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
@with_presets([MINIMAL], reason="mainnet config requires too many pre-generated public/private keys")
|
||||||
|
def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state):
|
||||||
|
"""
|
||||||
|
Test `should_update_justified_checkpoint`:
|
||||||
|
compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
||||||
|
"""
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# Skip epoch 0 & 1
|
||||||
|
for _ in range(2):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
# Fill epoch 2
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, True, False, test_steps=test_steps)
|
||||||
|
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
|
||||||
|
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
|
||||||
|
# Skip epoch 3 & 4
|
||||||
|
for _ in range(2):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
# Epoch 5: Attest current epoch
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, True, False, participation_fn=_drop_random_one_third, test_steps=test_steps)
|
||||||
|
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
|
||||||
|
assert state.current_justified_checkpoint.epoch == 2
|
||||||
|
assert store.justified_checkpoint.epoch == 2
|
||||||
|
assert state.current_justified_checkpoint == store.justified_checkpoint
|
||||||
|
|
||||||
|
# Skip epoch 6
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
pre_state = state.copy()
|
||||||
|
|
||||||
|
# Build a block to justify epoch 5
|
||||||
|
signed_block = state_transition_with_full_block(spec, state, True, True)
|
||||||
|
assert state.finalized_checkpoint.epoch == 0
|
||||||
|
assert state.current_justified_checkpoint.epoch == 5
|
||||||
|
assert state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch
|
||||||
|
assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH < spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
||||||
|
# Run on_block
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
# Ensure justified_checkpoint has been changed but finality is unchanged
|
||||||
|
assert store.justified_checkpoint.epoch == 5
|
||||||
|
assert store.justified_checkpoint == state.current_justified_checkpoint
|
||||||
|
assert store.finalized_checkpoint.epoch == pre_state.finalized_checkpoint.epoch == 0
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch")
|
||||||
|
@spec_state_test
|
||||||
|
def test_on_block_outside_safe_slots_but_finality(spec, state):
|
||||||
|
"""
|
||||||
|
Test `should_update_justified_checkpoint` case
|
||||||
|
- compute_slots_since_epoch_start(get_current_slot(store)) > SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
||||||
|
- new_justified_checkpoint and store.justified_checkpoint.root are NOT conflicting
|
||||||
|
|
||||||
|
Thus should_update_justified_checkpoint returns True.
|
||||||
|
|
||||||
|
Part of this script is similar to `test_new_justified_is_later_than_store_justified`.
|
||||||
|
"""
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# Skip epoch 0
|
||||||
|
next_epoch(spec, state)
|
||||||
|
# Fill epoch 1 to 3, attest current epoch
|
||||||
|
for _ in range(3):
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, True, False, test_steps=test_steps)
|
||||||
|
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
|
||||||
|
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||||
|
|
||||||
|
# Skip epoch 4-6
|
||||||
|
for _ in range(3):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
# epoch 7
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, True, True, test_steps=test_steps)
|
||||||
|
assert state.finalized_checkpoint.epoch == 2
|
||||||
|
assert state.current_justified_checkpoint.epoch == 7
|
||||||
|
|
||||||
|
# epoch 8, attest the first 5 blocks
|
||||||
|
state, store, _ = yield from apply_next_slots_with_attestations(
|
||||||
|
spec, state, store, 5, True, True, test_steps)
|
||||||
|
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
|
||||||
|
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 7
|
||||||
|
|
||||||
|
# Propose a block at epoch 9, 5th slot
|
||||||
|
next_epoch(spec, state)
|
||||||
|
next_slots(spec, state, 4)
|
||||||
|
signed_block = state_transition_with_full_attestations_block(spec, state, True, True)
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
|
||||||
|
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 7
|
||||||
|
|
||||||
|
# Propose an empty block at epoch 10, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot
|
||||||
|
# This block would trigger justification and finality updates on store
|
||||||
|
next_epoch(spec, state)
|
||||||
|
next_slots(spec, state, 4)
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
assert state.finalized_checkpoint.epoch == 7
|
||||||
|
assert state.current_justified_checkpoint.epoch == 8
|
||||||
|
# Step time past safe slots and run on_block
|
||||||
|
if store.time < spec.compute_time_at_slot(state, signed_block.message.slot):
|
||||||
|
time = store.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
|
||||||
|
on_tick_and_append_step(spec, store, time, test_steps)
|
||||||
|
assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
||||||
|
yield from add_block(spec, store, signed_block, test_steps)
|
||||||
|
|
||||||
|
# Ensure justified_checkpoint finality has been changed
|
||||||
|
assert store.finalized_checkpoint.epoch == 7
|
||||||
|
assert store.finalized_checkpoint == state.finalized_checkpoint
|
||||||
|
assert store.justified_checkpoint.epoch == 8
|
||||||
|
assert store.justified_checkpoint == state.current_justified_checkpoint
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch")
|
||||||
|
@spec_state_test
|
||||||
|
def test_new_justified_is_later_than_store_justified(spec, state):
|
||||||
|
"""
|
||||||
|
J: Justified
|
||||||
|
F: Finalized
|
||||||
|
fork_1_state (forked from genesis):
|
||||||
|
epoch
|
||||||
|
[0] <- [1] <- [2] <- [3] <- [4]
|
||||||
|
F J
|
||||||
|
|
||||||
|
fork_2_state (forked from fork_1_state's epoch 2):
|
||||||
|
epoch
|
||||||
|
└──── [3] <- [4] <- [5] <- [6]
|
||||||
|
F J
|
||||||
|
|
||||||
|
fork_3_state (forked from genesis):
|
||||||
|
[0] <- [1] <- [2] <- [3] <- [4] <- [5]
|
||||||
|
F J
|
||||||
|
"""
|
||||||
|
# The 1st fork, from genesis
|
||||||
|
fork_1_state = state.copy()
|
||||||
|
# The 3rd fork, from genesis
|
||||||
|
fork_3_state = state.copy()
|
||||||
|
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# ----- Process fork_1_state
|
||||||
|
# Skip epoch 0
|
||||||
|
next_epoch(spec, fork_1_state)
|
||||||
|
# Fill epoch 1 with previous epoch attestations
|
||||||
|
fork_1_state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, fork_1_state, store, False, True, test_steps=test_steps)
|
||||||
|
|
||||||
|
# Fork `fork_2_state` at the start of epoch 2
|
||||||
|
fork_2_state = fork_1_state.copy()
|
||||||
|
assert spec.get_current_epoch(fork_2_state) == 2
|
||||||
|
|
||||||
|
# Skip epoch 2
|
||||||
|
next_epoch(spec, fork_1_state)
|
||||||
|
# # Fill epoch 3 & 4 with previous epoch attestations
|
||||||
|
for _ in range(2):
|
||||||
|
fork_1_state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, fork_1_state, store, False, True, test_steps=test_steps)
|
||||||
|
|
||||||
|
assert fork_1_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
|
||||||
|
assert fork_1_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||||
|
assert store.justified_checkpoint == fork_1_state.current_justified_checkpoint
|
||||||
|
|
||||||
|
# ------ fork_2_state: Create a chain to set store.best_justified_checkpoint
|
||||||
|
# NOTE: The goal is to make `store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch`
|
||||||
|
all_blocks = []
|
||||||
|
|
||||||
|
# Proposed an empty block at epoch 2, 1st slot
|
||||||
|
block = build_empty_block_for_next_slot(spec, fork_2_state)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, fork_2_state, block)
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
assert fork_2_state.current_justified_checkpoint.epoch == 0
|
||||||
|
|
||||||
|
# Skip to epoch 4
|
||||||
|
for _ in range(2):
|
||||||
|
next_epoch(spec, fork_2_state)
|
||||||
|
assert fork_2_state.current_justified_checkpoint.epoch == 0
|
||||||
|
|
||||||
|
# Propose a block at epoch 4, 5th slot
|
||||||
|
# Propose a block at epoch 5, 5th slot
|
||||||
|
for _ in range(2):
|
||||||
|
next_epoch(spec, fork_2_state)
|
||||||
|
next_slots(spec, fork_2_state, 4)
|
||||||
|
signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True)
|
||||||
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
assert fork_2_state.current_justified_checkpoint.epoch == 0
|
||||||
|
|
||||||
|
# Propose a block at epoch 6, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot
|
||||||
|
next_epoch(spec, fork_2_state)
|
||||||
|
next_slots(spec, fork_2_state, spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2)
|
||||||
|
signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True)
|
||||||
|
assert fork_2_state.finalized_checkpoint.epoch == 0
|
||||||
|
assert fork_2_state.current_justified_checkpoint.epoch == 5
|
||||||
|
# Check SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
||||||
|
spec.on_tick(store, store.genesis_time + fork_2_state.slot * spec.config.SECONDS_PER_SLOT)
|
||||||
|
assert spec.compute_slots_since_epoch_start(spec.get_current_slot(store)) >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
||||||
|
# Run on_block
|
||||||
|
yield from add_block(spec, store, signed_block, test_steps)
|
||||||
|
assert store.finalized_checkpoint.epoch == 0
|
||||||
|
assert store.justified_checkpoint.epoch == 3
|
||||||
|
assert store.best_justified_checkpoint.epoch == 5
|
||||||
|
|
||||||
|
# ------ fork_3_state: Create another chain to test the
|
||||||
|
# "Update justified if new justified is later than store justified" case
|
||||||
|
all_blocks = []
|
||||||
|
for _ in range(3):
|
||||||
|
next_epoch(spec, fork_3_state)
|
||||||
|
|
||||||
|
# epoch 3
|
||||||
|
_, signed_blocks, fork_3_state = next_epoch_with_attestations(spec, fork_3_state, True, True)
|
||||||
|
all_blocks += signed_blocks
|
||||||
|
assert fork_3_state.finalized_checkpoint.epoch == 0
|
||||||
|
|
||||||
|
# epoch 4, attest the first 5 blocks
|
||||||
|
_, blocks, fork_3_state = next_slots_with_attestations(spec, fork_3_state, 5, True, True)
|
||||||
|
all_blocks += blocks.copy()
|
||||||
|
assert fork_3_state.finalized_checkpoint.epoch == 0
|
||||||
|
|
||||||
|
# Propose a block at epoch 5, 5th slot
|
||||||
|
next_epoch(spec, fork_3_state)
|
||||||
|
next_slots(spec, fork_3_state, 4)
|
||||||
|
signed_block = state_transition_with_full_block(spec, fork_3_state, True, True)
|
||||||
|
all_blocks.append(signed_block.copy())
|
||||||
|
assert fork_3_state.finalized_checkpoint.epoch == 0
|
||||||
|
|
||||||
|
# Propose a block at epoch 6, 5th slot
|
||||||
|
next_epoch(spec, fork_3_state)
|
||||||
|
next_slots(spec, fork_3_state, 4)
|
||||||
|
signed_block = state_transition_with_full_block(spec, fork_3_state, True, True)
|
||||||
|
all_blocks.append(signed_block.copy())
|
||||||
|
assert fork_3_state.finalized_checkpoint.epoch == 3
|
||||||
|
assert fork_3_state.current_justified_checkpoint.epoch == 4
|
||||||
|
|
||||||
|
# FIXME: pending on the `on_block`, `on_attestation` fix
|
||||||
|
# # Apply blocks of `fork_3_state` to `store`
|
||||||
|
# for block in all_blocks:
|
||||||
|
# if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot):
|
||||||
|
# spec.on_tick(store, store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT)
|
||||||
|
# # valid_attestations=False because the attestations are outdated (older than previous epoch)
|
||||||
|
# yield from add_block(spec, store, block, test_steps, allow_invalid_attestations=False)
|
||||||
|
|
||||||
|
# assert store.finalized_checkpoint == fork_3_state.finalized_checkpoint
|
||||||
|
# assert (store.justified_checkpoint
|
||||||
|
# == fork_3_state.current_justified_checkpoint
|
||||||
|
# != store.best_justified_checkpoint)
|
||||||
|
# assert (store.best_justified_checkpoint
|
||||||
|
# == fork_2_state.current_justified_checkpoint)
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state):
|
||||||
|
"""
|
||||||
|
J: Justified
|
||||||
|
F: Finalized
|
||||||
|
state (forked from genesis):
|
||||||
|
epoch
|
||||||
|
[0] <- [1] <- [2] <- [3] <- [4] <- [5]
|
||||||
|
F J
|
||||||
|
|
||||||
|
another_state (forked from epoch 0):
|
||||||
|
└──── [1] <- [2] <- [3] <- [4] <- [5]
|
||||||
|
F J
|
||||||
|
"""
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# ----- Process state
|
||||||
|
# Goal: make `store.finalized_checkpoint.epoch == 0` and `store.justified_checkpoint.epoch == 3`
|
||||||
|
# Skip epoch 0
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
# Forking another_state
|
||||||
|
another_state = state.copy()
|
||||||
|
|
||||||
|
# Fill epoch 1 with previous epoch attestations
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, False, True, test_steps=test_steps)
|
||||||
|
# Skip epoch 2
|
||||||
|
next_epoch(spec, state)
|
||||||
|
# Fill epoch 3 & 4 with previous epoch attestations
|
||||||
|
for _ in range(2):
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, False, True, test_steps=test_steps)
|
||||||
|
|
||||||
|
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0
|
||||||
|
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||||
|
assert store.justified_checkpoint == state.current_justified_checkpoint
|
||||||
|
|
||||||
|
# Create another chain
|
||||||
|
# Goal: make `another_state.finalized_checkpoint.epoch == 2` and `another_state.justified_checkpoint.epoch == 3`
|
||||||
|
all_blocks = []
|
||||||
|
# Fill epoch 1 & 2 with previous + current epoch attestations
|
||||||
|
for _ in range(3):
|
||||||
|
_, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True)
|
||||||
|
all_blocks += signed_blocks
|
||||||
|
|
||||||
|
assert another_state.finalized_checkpoint.epoch == 2
|
||||||
|
assert another_state.current_justified_checkpoint.epoch == 3
|
||||||
|
assert state.finalized_checkpoint != another_state.finalized_checkpoint
|
||||||
|
assert state.current_justified_checkpoint != another_state.current_justified_checkpoint
|
||||||
|
|
||||||
|
# pre_store_justified_checkpoint_root = store.justified_checkpoint.root
|
||||||
|
|
||||||
|
# FIXME: pending on the `on_block`, `on_attestation` fix
|
||||||
|
# # Apply blocks of `another_state` to `store`
|
||||||
|
# for block in all_blocks:
|
||||||
|
# # NOTE: Do not call `on_tick` here
|
||||||
|
# yield from add_block(spec, store, block, test_steps, allow_invalid_attestations=True)
|
||||||
|
|
||||||
|
# finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||||
|
# ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot)
|
||||||
|
# assert ancestor_at_finalized_slot != store.finalized_checkpoint.root
|
||||||
|
|
||||||
|
# assert store.finalized_checkpoint == another_state.finalized_checkpoint
|
||||||
|
# assert store.justified_checkpoint == another_state.current_justified_checkpoint
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state):
|
||||||
|
"""
|
||||||
|
J: Justified
|
||||||
|
F: Finalized
|
||||||
|
state:
|
||||||
|
epoch
|
||||||
|
[0] <- [1] <- [2] <- [3] <- [4] <- [5]
|
||||||
|
F J
|
||||||
|
|
||||||
|
another_state (forked from state at epoch 3):
|
||||||
|
└──── [4] <- [5]
|
||||||
|
F J
|
||||||
|
"""
|
||||||
|
test_steps = []
|
||||||
|
# Initialization
|
||||||
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
yield 'anchor_state', state
|
||||||
|
yield 'anchor_block', anchor_block
|
||||||
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
|
assert store.time == current_time
|
||||||
|
|
||||||
|
# Process state
|
||||||
|
next_epoch(spec, state)
|
||||||
|
spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT)
|
||||||
|
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, False, True, test_steps=test_steps)
|
||||||
|
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, True, False, test_steps=test_steps)
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
for _ in range(2):
|
||||||
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, False, True, test_steps=test_steps)
|
||||||
|
|
||||||
|
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
|
||||||
|
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
|
||||||
|
assert store.justified_checkpoint == state.current_justified_checkpoint
|
||||||
|
|
||||||
|
# Create another chain
|
||||||
|
# Forking from epoch 3
|
||||||
|
all_blocks = []
|
||||||
|
slot = spec.compute_start_slot_at_epoch(3)
|
||||||
|
block_root = spec.get_block_root_at_slot(state, slot)
|
||||||
|
another_state = store.block_states[block_root].copy()
|
||||||
|
for _ in range(2):
|
||||||
|
_, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, True)
|
||||||
|
all_blocks += signed_blocks
|
||||||
|
|
||||||
|
assert another_state.finalized_checkpoint.epoch == 3
|
||||||
|
assert another_state.current_justified_checkpoint.epoch == 4
|
||||||
|
|
||||||
|
pre_store_justified_checkpoint_root = store.justified_checkpoint.root
|
||||||
|
for block in all_blocks:
|
||||||
|
# FIXME: Once `on_block` and `on_attestation` logic is fixed,
|
||||||
|
# fix test case and remove allow_invalid_attestations flag
|
||||||
|
yield from tick_and_add_block(spec, store, block, test_steps, allow_invalid_attestations=True)
|
||||||
|
|
||||||
|
finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||||
|
ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot)
|
||||||
|
assert ancestor_at_finalized_slot == store.finalized_checkpoint.root
|
||||||
|
|
||||||
|
assert store.finalized_checkpoint == another_state.finalized_checkpoint
|
||||||
|
assert store.justified_checkpoint != another_state.current_justified_checkpoint
|
||||||
|
|
||||||
|
yield 'steps', test_steps
|
|
@ -1,239 +1,47 @@
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
|
|
||||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||||
|
from eth2spec.test.context import (
|
||||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
spec_state_test,
|
||||||
from eth2spec.test.helpers.attestations import next_epoch_with_attestations
|
with_all_phases,
|
||||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block, transition_unsigned_block, \
|
|
||||||
build_empty_block
|
|
||||||
from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store
|
|
||||||
from eth2spec.test.helpers.state import next_epoch, state_transition_and_sign_block, transition_to
|
|
||||||
|
|
||||||
|
|
||||||
def run_on_block(spec, store, signed_block, valid=True):
|
|
||||||
if not valid:
|
|
||||||
try:
|
|
||||||
spec.on_block(store, signed_block)
|
|
||||||
except AssertionError:
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
assert False
|
|
||||||
|
|
||||||
spec.on_block(store, signed_block)
|
|
||||||
assert store.blocks[hash_tree_root(signed_block.message)] == signed_block.message
|
|
||||||
|
|
||||||
|
|
||||||
def apply_next_epoch_with_attestations(spec, state, store):
|
|
||||||
_, new_signed_blocks, post_state = next_epoch_with_attestations(spec, state, True, False)
|
|
||||||
for signed_block in new_signed_blocks:
|
|
||||||
block = signed_block.message
|
|
||||||
block_root = hash_tree_root(block)
|
|
||||||
store.blocks[block_root] = block
|
|
||||||
store.block_states[block_root] = post_state
|
|
||||||
last_signed_block = signed_block
|
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
|
||||||
return post_state, store, last_signed_block
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_basic(spec, state):
|
|
||||||
# Initialization
|
|
||||||
store = get_genesis_forkchoice_store(spec, state)
|
|
||||||
time = 100
|
|
||||||
spec.on_tick(store, time)
|
|
||||||
assert store.time == time
|
|
||||||
|
|
||||||
# On receiving a block of `GENESIS_SLOT + 1` slot
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
|
||||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
|
||||||
run_on_block(spec, store, signed_block)
|
|
||||||
|
|
||||||
# On receiving a block of next epoch
|
|
||||||
store.time = time + spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
|
||||||
block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
|
||||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
|
||||||
|
|
||||||
run_on_block(spec, store, signed_block)
|
|
||||||
|
|
||||||
# TODO: add tests for justified_root and finalized_root
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_on_block_checkpoints(spec, state):
|
|
||||||
# Initialization
|
|
||||||
store = get_genesis_forkchoice_store(spec, state)
|
|
||||||
time = 100
|
|
||||||
spec.on_tick(store, time)
|
|
||||||
|
|
||||||
next_epoch(spec, state)
|
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
|
||||||
state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store)
|
|
||||||
next_epoch(spec, state)
|
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
|
||||||
last_block_root = hash_tree_root(last_signed_block.message)
|
|
||||||
|
|
||||||
# Mock the finalized_checkpoint
|
|
||||||
fin_state = store.block_states[last_block_root]
|
|
||||||
fin_state.finalized_checkpoint = (
|
|
||||||
store.block_states[last_block_root].current_justified_checkpoint
|
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.block import (
|
||||||
block = build_empty_block_for_next_slot(spec, fin_state)
|
build_empty_block_for_next_slot,
|
||||||
signed_block = state_transition_and_sign_block(spec, deepcopy(fin_state), block)
|
|
||||||
run_on_block(spec, store, signed_block)
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_on_block_future_block(spec, state):
|
|
||||||
# Initialization
|
|
||||||
store = get_genesis_forkchoice_store(spec, state)
|
|
||||||
|
|
||||||
# do not tick time
|
|
||||||
|
|
||||||
# Fail receiving block of `GENESIS_SLOT + 1` slot
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
|
||||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
|
||||||
run_on_block(spec, store, signed_block, False)
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_on_block_bad_parent_root(spec, state):
|
|
||||||
# Initialization
|
|
||||||
store = get_genesis_forkchoice_store(spec, state)
|
|
||||||
time = 100
|
|
||||||
spec.on_tick(store, time)
|
|
||||||
|
|
||||||
# Fail receiving block of `GENESIS_SLOT + 1` slot
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
|
||||||
transition_unsigned_block(spec, state, block)
|
|
||||||
block.state_root = state.hash_tree_root()
|
|
||||||
|
|
||||||
block.parent_root = b'\x45' * 32
|
|
||||||
|
|
||||||
signed_block = sign_block(spec, state, block)
|
|
||||||
|
|
||||||
run_on_block(spec, store, signed_block, False)
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_on_block_before_finalized(spec, state):
|
|
||||||
# Initialization
|
|
||||||
store = get_genesis_forkchoice_store(spec, state)
|
|
||||||
time = 100
|
|
||||||
spec.on_tick(store, time)
|
|
||||||
|
|
||||||
store.finalized_checkpoint = spec.Checkpoint(
|
|
||||||
epoch=store.finalized_checkpoint.epoch + 2,
|
|
||||||
root=store.finalized_checkpoint.root
|
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.fork_choice import (
|
||||||
# Fail receiving block of `GENESIS_SLOT + 1` slot
|
get_genesis_forkchoice_store,
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
run_on_block,
|
||||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
apply_next_epoch_with_attestations,
|
||||||
run_on_block(spec, store, signed_block, False)
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_on_block_finalized_skip_slots(spec, state):
|
|
||||||
# Initialization
|
|
||||||
store = get_genesis_forkchoice_store(spec, state)
|
|
||||||
time = 100
|
|
||||||
spec.on_tick(store, time)
|
|
||||||
|
|
||||||
store.finalized_checkpoint = spec.Checkpoint(
|
|
||||||
epoch=store.finalized_checkpoint.epoch + 2,
|
|
||||||
root=store.finalized_checkpoint.root
|
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.state import (
|
||||||
# Build block that includes the skipped slots up to finality in chain
|
next_epoch,
|
||||||
block = build_empty_block(spec, state, spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2)
|
state_transition_and_sign_block,
|
||||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
|
||||||
run_on_block(spec, store, signed_block)
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state):
|
|
||||||
# Initialization
|
|
||||||
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1)
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
|
||||||
transition_unsigned_block(spec, state, block)
|
|
||||||
block.state_root = state.hash_tree_root()
|
|
||||||
store = spec.get_forkchoice_store(state, block)
|
|
||||||
store.finalized_checkpoint = spec.Checkpoint(
|
|
||||||
epoch=store.finalized_checkpoint.epoch + 2,
|
|
||||||
root=store.finalized_checkpoint.root
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# First transition through the epoch to ensure no skipped slots
|
|
||||||
state, store, _ = apply_next_epoch_with_attestations(spec, state, store)
|
|
||||||
|
|
||||||
# Now build a block at later slot than finalized epoch
|
|
||||||
# Includes finalized block in chain, but not at appropriate skip slot
|
|
||||||
block = build_empty_block(spec, state, spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2)
|
|
||||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
|
||||||
run_on_block(spec, store, signed_block, False)
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state):
|
|
||||||
# Initialization
|
|
||||||
store = get_genesis_forkchoice_store(spec, state)
|
|
||||||
time = 0
|
|
||||||
spec.on_tick(store, time)
|
|
||||||
|
|
||||||
next_epoch(spec, state)
|
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
|
||||||
state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store)
|
|
||||||
next_epoch(spec, state)
|
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
|
||||||
last_block_root = hash_tree_root(last_signed_block.message)
|
|
||||||
|
|
||||||
# Mock the justified checkpoint
|
|
||||||
just_state = store.block_states[last_block_root]
|
|
||||||
new_justified = spec.Checkpoint(
|
|
||||||
epoch=just_state.current_justified_checkpoint.epoch + 1,
|
|
||||||
root=b'\x77' * 32,
|
|
||||||
)
|
|
||||||
just_state.current_justified_checkpoint = new_justified
|
|
||||||
|
|
||||||
block = build_empty_block_for_next_slot(spec, just_state)
|
|
||||||
signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block)
|
|
||||||
assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH < spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
|
||||||
run_on_block(spec, store, signed_block)
|
|
||||||
|
|
||||||
assert store.justified_checkpoint == new_justified
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
|
def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
|
||||||
|
"""
|
||||||
|
NOTE: test_new_justified_is_later_than_store_justified also tests best_justified_checkpoint
|
||||||
|
"""
|
||||||
# Initialization
|
# Initialization
|
||||||
store = get_genesis_forkchoice_store(spec, state)
|
store = get_genesis_forkchoice_store(spec, state)
|
||||||
time = 0
|
|
||||||
spec.on_tick(store, time)
|
|
||||||
|
|
||||||
next_epoch(spec, state)
|
next_epoch(spec, state)
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT)
|
||||||
state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store)
|
state, store, last_signed_block = yield from apply_next_epoch_with_attestations(
|
||||||
|
spec, state, store, True, False)
|
||||||
last_block_root = hash_tree_root(last_signed_block.message)
|
last_block_root = hash_tree_root(last_signed_block.message)
|
||||||
|
|
||||||
# Mock fictitious justified checkpoint in store
|
# NOTE: Mock fictitious justified checkpoint in store
|
||||||
store.justified_checkpoint = spec.Checkpoint(
|
store.justified_checkpoint = spec.Checkpoint(
|
||||||
epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot),
|
epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot),
|
||||||
root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000")
|
root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000")
|
||||||
)
|
)
|
||||||
|
|
||||||
next_epoch(spec, state)
|
next_epoch(spec, state)
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT)
|
||||||
|
|
||||||
# Create new higher justified checkpoint not in branch of store's justified checkpoint
|
# Create new higher justified checkpoint not in branch of store's justified checkpoint
|
||||||
just_block = build_empty_block_for_next_slot(spec, state)
|
just_block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
@ -243,11 +51,13 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
|
||||||
spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT)
|
spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT)
|
||||||
assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
||||||
|
|
||||||
|
previously_finalized = store.finalized_checkpoint
|
||||||
previously_justified = store.justified_checkpoint
|
previously_justified = store.justified_checkpoint
|
||||||
|
|
||||||
# Add a series of new blocks with "better" justifications
|
# Add a series of new blocks with "better" justifications
|
||||||
best_justified_checkpoint = spec.Checkpoint(epoch=0)
|
best_justified_checkpoint = spec.Checkpoint(epoch=0)
|
||||||
for i in range(3, 0, -1):
|
for i in range(3, 0, -1):
|
||||||
|
# Mutate store
|
||||||
just_state = store.block_states[last_block_root]
|
just_state = store.block_states[last_block_root]
|
||||||
new_justified = spec.Checkpoint(
|
new_justified = spec.Checkpoint(
|
||||||
epoch=previously_justified.epoch + i,
|
epoch=previously_justified.epoch + i,
|
||||||
|
@ -261,63 +71,17 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
|
||||||
block = build_empty_block_for_next_slot(spec, just_state)
|
block = build_empty_block_for_next_slot(spec, just_state)
|
||||||
signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block)
|
signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block)
|
||||||
|
|
||||||
|
# NOTE: Mock store so that the modified state could be accessed
|
||||||
|
parent_block = store.blocks[last_block_root].copy()
|
||||||
|
parent_block.state_root = just_state.hash_tree_root()
|
||||||
|
store.blocks[block.parent_root] = parent_block
|
||||||
|
store.block_states[block.parent_root] = just_state.copy()
|
||||||
|
assert block.parent_root in store.blocks.keys()
|
||||||
|
assert block.parent_root in store.block_states.keys()
|
||||||
|
|
||||||
run_on_block(spec, store, signed_block)
|
run_on_block(spec, store, signed_block)
|
||||||
|
|
||||||
|
assert store.finalized_checkpoint == previously_finalized
|
||||||
assert store.justified_checkpoint == previously_justified
|
assert store.justified_checkpoint == previously_justified
|
||||||
# ensure the best from the series was stored
|
# ensure the best from the series was stored
|
||||||
assert store.best_justified_checkpoint == best_justified_checkpoint
|
assert store.best_justified_checkpoint == best_justified_checkpoint
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
|
||||||
@spec_state_test
|
|
||||||
def test_on_block_outside_safe_slots_but_finality(spec, state):
|
|
||||||
# Initialization
|
|
||||||
store = get_genesis_forkchoice_store(spec, state)
|
|
||||||
time = 100
|
|
||||||
spec.on_tick(store, time)
|
|
||||||
|
|
||||||
next_epoch(spec, state)
|
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
|
||||||
state, store, last_signed_block = apply_next_epoch_with_attestations(spec, state, store)
|
|
||||||
last_block_root = hash_tree_root(last_signed_block.message)
|
|
||||||
|
|
||||||
# Mock fictitious justified checkpoint in store
|
|
||||||
store.justified_checkpoint = spec.Checkpoint(
|
|
||||||
epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot),
|
|
||||||
root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000")
|
|
||||||
)
|
|
||||||
|
|
||||||
next_epoch(spec, state)
|
|
||||||
spec.on_tick(store, store.time + state.slot * spec.config.SECONDS_PER_SLOT)
|
|
||||||
|
|
||||||
# Create new higher justified checkpoint not in branch of store's justified checkpoint
|
|
||||||
just_block = build_empty_block_for_next_slot(spec, state)
|
|
||||||
store.blocks[just_block.hash_tree_root()] = just_block
|
|
||||||
|
|
||||||
# Step time past safe slots
|
|
||||||
spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT)
|
|
||||||
assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
|
||||||
|
|
||||||
# Mock justified and finalized update in state
|
|
||||||
just_fin_state = store.block_states[last_block_root]
|
|
||||||
new_justified = spec.Checkpoint(
|
|
||||||
epoch=spec.compute_epoch_at_slot(just_block.slot) + 1,
|
|
||||||
root=just_block.hash_tree_root(),
|
|
||||||
)
|
|
||||||
assert new_justified.epoch > store.justified_checkpoint.epoch
|
|
||||||
new_finalized = spec.Checkpoint(
|
|
||||||
epoch=spec.compute_epoch_at_slot(just_block.slot),
|
|
||||||
root=just_block.parent_root,
|
|
||||||
)
|
|
||||||
assert new_finalized.epoch > store.finalized_checkpoint.epoch
|
|
||||||
just_fin_state.current_justified_checkpoint = new_justified
|
|
||||||
just_fin_state.finalized_checkpoint = new_finalized
|
|
||||||
|
|
||||||
# Build and add block that includes the new justified/finalized info
|
|
||||||
block = build_empty_block_for_next_slot(spec, just_fin_state)
|
|
||||||
signed_block = state_transition_and_sign_block(spec, deepcopy(just_fin_state), block)
|
|
||||||
|
|
||||||
run_on_block(spec, store, signed_block)
|
|
||||||
|
|
||||||
assert store.finalized_checkpoint == new_finalized
|
|
||||||
assert store.justified_checkpoint == new_justified
|
|
||||||
|
|
|
@ -10,9 +10,8 @@ bls = py_ecc_bls
|
||||||
|
|
||||||
STUB_SIGNATURE = b'\x11' * 96
|
STUB_SIGNATURE = b'\x11' * 96
|
||||||
STUB_PUBKEY = b'\x22' * 48
|
STUB_PUBKEY = b'\x22' * 48
|
||||||
Z1_PUBKEY = b'\xc0' + b'\x00' * 47
|
G2_POINT_AT_INFINITY = b'\xc0' + b'\x00' * 95
|
||||||
Z2_SIGNATURE = b'\xc0' + b'\x00' * 95
|
STUB_COORDINATES = _signature_to_G2(G2_POINT_AT_INFINITY)
|
||||||
STUB_COORDINATES = _signature_to_G2(Z2_SIGNATURE)
|
|
||||||
|
|
||||||
|
|
||||||
def use_milagro():
|
def use_milagro():
|
||||||
|
@ -95,6 +94,12 @@ def signature_to_G2(signature):
|
||||||
|
|
||||||
@only_with_bls(alt_return=STUB_PUBKEY)
|
@only_with_bls(alt_return=STUB_PUBKEY)
|
||||||
def AggregatePKs(pubkeys):
|
def AggregatePKs(pubkeys):
|
||||||
|
if bls == py_ecc_bls:
|
||||||
|
assert all(bls.KeyValidate(pubkey) for pubkey in pubkeys)
|
||||||
|
elif bls == milagro_bls:
|
||||||
|
# milagro_bls._AggregatePKs checks KeyValidate internally
|
||||||
|
pass
|
||||||
|
|
||||||
return bls._AggregatePKs(list(pubkeys))
|
return bls._AggregatePKs(list(pubkeys))
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,8 @@ The BLS test suite runner has the following handlers:
|
||||||
|
|
||||||
- [`aggregate_verify`](./aggregate_verify.md)
|
- [`aggregate_verify`](./aggregate_verify.md)
|
||||||
- [`aggregate`](./aggregate.md)
|
- [`aggregate`](./aggregate.md)
|
||||||
|
- [`eth_aggregate_pubkeys`](./eth_aggregate_pubkeys.md)
|
||||||
|
- [`eth_fast_aggregate_verify`](./eth_fast_aggregate_verify.md)
|
||||||
- [`fast_aggregate_verify`](./fast_aggregate_verify.md)
|
- [`fast_aggregate_verify`](./fast_aggregate_verify.md)
|
||||||
- [`sign`](./sign.md)
|
- [`sign`](./sign.md)
|
||||||
- [`verify`](./verify.md)
|
- [`verify`](./verify.md)
|
||||||
|
|
|
@ -14,6 +14,8 @@ output: BLS Signature -- expected output, single BLS signature or empty.
|
||||||
- `BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`.
|
- `BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`.
|
||||||
- No output value if the input is invalid.
|
- No output value if the input is invalid.
|
||||||
|
|
||||||
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
## Condition
|
## Condition
|
||||||
|
|
||||||
The `aggregate` handler should aggregate the signatures in the `input`, and the result should match the expected `output`.
|
The `aggregate` handler should aggregate the signatures in the `input`, and the result should match the expected `output`.
|
||||||
|
|
|
@ -8,10 +8,17 @@ The test data is declared in a `data.yaml` file:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
input:
|
input:
|
||||||
pubkeys: List[bytes48] -- the pubkeys
|
pubkeys: List[BLS Pubkey] -- the pubkeys
|
||||||
messages: List[bytes32] -- the messages
|
messages: List[bytes32] -- the messages
|
||||||
signature: bytes96 -- the signature to verify against pubkeys and messages
|
signature: BLS Signature -- the signature to verify against pubkeys and messages
|
||||||
output: bool -- VALID or INVALID
|
output: bool -- true (VALID) or false (INVALID)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- `BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`.
|
||||||
|
- `BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`.
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
|
## Condition
|
||||||
|
|
||||||
|
The `aggregate_verify` handler should verify the signature with pubkeys and messages in the `input`, and the result should match the expected `output`.
|
||||||
|
|
|
@ -0,0 +1,19 @@
|
||||||
|
# Test format: Ethereum-customized BLS pubkey aggregation
|
||||||
|
|
||||||
|
A BLS pubkey aggregation combines a series of pubkeys into a single pubkey.
|
||||||
|
|
||||||
|
## Test case format
|
||||||
|
|
||||||
|
The test data is declared in a `data.yaml` file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
input: List[BLS Pubkey] -- list of input BLS pubkeys
|
||||||
|
output: BLSPubkey -- expected output, single BLS pubkeys or empty.
|
||||||
|
```
|
||||||
|
|
||||||
|
- `BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`.
|
||||||
|
- No output value if the input is invalid.
|
||||||
|
|
||||||
|
## Condition
|
||||||
|
|
||||||
|
The `eth_aggregate_pubkeys` handler should aggregate the signatures in the `input`, and the result should match the expected `output`.
|
|
@ -0,0 +1,24 @@
|
||||||
|
# Test format: Ethereum-customized BLS fast aggregate verify
|
||||||
|
|
||||||
|
Verify the signature against the given pubkeys and one message.
|
||||||
|
|
||||||
|
## Test case format
|
||||||
|
|
||||||
|
The test data is declared in a `data.yaml` file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
input:
|
||||||
|
pubkeys: List[BLS Pubkey] -- list of input BLS pubkeys
|
||||||
|
message: bytes32 -- the message
|
||||||
|
signature: BLS Signature -- the signature to verify against pubkeys and message
|
||||||
|
output: bool -- true (VALID) or false (INVALID)
|
||||||
|
```
|
||||||
|
|
||||||
|
- `BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`.
|
||||||
|
- `BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`.
|
||||||
|
|
||||||
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
|
## Condition
|
||||||
|
|
||||||
|
The `eth_fast_aggregate_verify` handler should verify the signature with pubkeys and message in the `input`, and the result should match the expected `output`.
|
|
@ -1,4 +1,4 @@
|
||||||
# Test format: BLS sign message
|
# Test format: BLS fast aggregate verify
|
||||||
|
|
||||||
Verify the signature against the given pubkeys and one message.
|
Verify the signature against the given pubkeys and one message.
|
||||||
|
|
||||||
|
@ -8,10 +8,17 @@ The test data is declared in a `data.yaml` file:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
input:
|
input:
|
||||||
pubkeys: List[bytes48] -- the pubkey
|
pubkeys: List[BLS Pubkey] -- list of input BLS pubkeys
|
||||||
message: bytes32 -- the message
|
message: bytes32 -- the message
|
||||||
signature: bytes96 -- the signature to verify against pubkeys and message
|
signature: BLS Signature -- the signature to verify against pubkeys and message
|
||||||
output: bool -- VALID or INVALID
|
output: bool -- true (VALID) or false (INVALID)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
- `BLS Pubkey` here is encoded as a string: hexadecimal encoding of 48 bytes (96 nibbles), prefixed with `0x`.
|
||||||
|
- `BLS Signature` here is encoded as a string: hexadecimal encoding of 96 bytes (192 nibbles), prefixed with `0x`.
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
|
## Condition
|
||||||
|
|
||||||
|
The `fast_aggregate_verify` handler should verify the signature with pubkeys and message in the `input`, and the result should match the expected `output`.
|
||||||
|
|
|
@ -28,7 +28,11 @@ The steps to execute in sequence. There may be multiple items of the following t
|
||||||
The parameter that is required for executing `on_tick(store, time)`.
|
The parameter that is required for executing `on_tick(store, time)`.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
{ tick: int } -- to execute `on_tick(store, time)`
|
{
|
||||||
|
tick: int -- to execute `on_tick(store, time)`.
|
||||||
|
valid: bool -- optional, default to `true`.
|
||||||
|
If it's `false`, this execution step is expected to be invalid.
|
||||||
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
After this step, the `store` object may have been updated.
|
After this step, the `store` object may have been updated.
|
||||||
|
@ -38,7 +42,12 @@ After this step, the `store` object may have been updated.
|
||||||
The parameter that is required for executing `on_attestation(store, attestation)`.
|
The parameter that is required for executing `on_attestation(store, attestation)`.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
{ attestation: string } -- the name of the `attestation_<32-byte-root>.ssz_snappy` file. To execute `on_attestation(store, attestation)` with the given attestation.
|
{
|
||||||
|
attestation: string -- the name of the `attestation_<32-byte-root>.ssz_snappy` file.
|
||||||
|
To execute `on_attestation(store, attestation)` with the given attestation.
|
||||||
|
valid: bool -- optional, default to `true`.
|
||||||
|
If it's `false`, this execution step is expected to be invalid.
|
||||||
|
}
|
||||||
```
|
```
|
||||||
The file is located in the same folder (see below).
|
The file is located in the same folder (see below).
|
||||||
|
|
||||||
|
@ -49,7 +58,12 @@ After this step, the `store` object may have been updated.
|
||||||
The parameter that is required for executing `on_block(store, block)`.
|
The parameter that is required for executing `on_block(store, block)`.
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
{ block: string } -- the name of the `block_<32-byte-root>.ssz_snappy` file. To execute `on_block(store, block)` with the given attestation.
|
{
|
||||||
|
block: string -- the name of the `block_<32-byte-root>.ssz_snappy` file.
|
||||||
|
To execute `on_block(store, block)` with the given attestation.
|
||||||
|
valid: bool -- optional, default to `true`.
|
||||||
|
If it's `false`, this execution step is expected to be invalid.
|
||||||
|
}
|
||||||
```
|
```
|
||||||
The file is located in the same folder (see below).
|
The file is located in the same folder (see below).
|
||||||
|
|
||||||
|
|
|
@ -12,8 +12,10 @@ from eth_utils import (
|
||||||
import milagro_bls_binding as milagro_bls
|
import milagro_bls_binding as milagro_bls
|
||||||
|
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
from eth2spec.test.helpers.constants import PHASE0
|
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||||
|
from eth2spec.test.helpers.typing import SpecForkName
|
||||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||||
|
from eth2spec.altair import spec
|
||||||
|
|
||||||
|
|
||||||
def to_bytes(i):
|
def to_bytes(i):
|
||||||
|
@ -51,9 +53,12 @@ PRIVKEYS = [
|
||||||
]
|
]
|
||||||
PUBKEYS = [bls.SkToPk(privkey) for privkey in PRIVKEYS]
|
PUBKEYS = [bls.SkToPk(privkey) for privkey in PRIVKEYS]
|
||||||
|
|
||||||
Z1_PUBKEY = b'\xc0' + b'\x00' * 47
|
ZERO_PUBKEY = b'\x00' * 48
|
||||||
NO_SIGNATURE = b'\x00' * 96
|
G1_POINT_AT_INFINITY = b'\xc0' + b'\x00' * 47
|
||||||
Z2_SIGNATURE = b'\xc0' + b'\x00' * 95
|
|
||||||
|
ZERO_SIGNATURE = b'\x00' * 96
|
||||||
|
G2_POINT_AT_INFINITY = b'\xc0' + b'\x00' * 95
|
||||||
|
|
||||||
ZERO_PRIVKEY = 0
|
ZERO_PRIVKEY = 0
|
||||||
ZERO_PRIVKEY_BYTES = b'\x00' * 32
|
ZERO_PRIVKEY_BYTES = b'\x00' * 32
|
||||||
|
|
||||||
|
@ -146,13 +151,13 @@ def case02_verify():
|
||||||
}
|
}
|
||||||
|
|
||||||
# Invalid pubkey and signature with the point at infinity
|
# Invalid pubkey and signature with the point at infinity
|
||||||
assert not bls.Verify(Z1_PUBKEY, SAMPLE_MESSAGE, Z2_SIGNATURE)
|
assert not bls.Verify(G1_POINT_AT_INFINITY, SAMPLE_MESSAGE, G2_POINT_AT_INFINITY)
|
||||||
assert not milagro_bls.Verify(Z1_PUBKEY, SAMPLE_MESSAGE, Z2_SIGNATURE)
|
assert not milagro_bls.Verify(G1_POINT_AT_INFINITY, SAMPLE_MESSAGE, G2_POINT_AT_INFINITY)
|
||||||
yield f'verify_infinity_pubkey_and_infinity_signature', {
|
yield f'verify_infinity_pubkey_and_infinity_signature', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkey': encode_hex(Z1_PUBKEY),
|
'pubkey': encode_hex(G1_POINT_AT_INFINITY),
|
||||||
'message': encode_hex(SAMPLE_MESSAGE),
|
'message': encode_hex(SAMPLE_MESSAGE),
|
||||||
'signature': encode_hex(Z2_SIGNATURE),
|
'signature': encode_hex(G2_POINT_AT_INFINITY),
|
||||||
},
|
},
|
||||||
'output': False,
|
'output': False,
|
||||||
}
|
}
|
||||||
|
@ -178,10 +183,10 @@ def case03_aggregate():
|
||||||
}
|
}
|
||||||
|
|
||||||
# Valid to aggregate G2 point at infinity
|
# Valid to aggregate G2 point at infinity
|
||||||
aggregate_sig = bls.Aggregate([Z2_SIGNATURE])
|
aggregate_sig = bls.Aggregate([G2_POINT_AT_INFINITY])
|
||||||
assert aggregate_sig == milagro_bls.Aggregate([Z2_SIGNATURE]) == Z2_SIGNATURE
|
assert aggregate_sig == milagro_bls.Aggregate([G2_POINT_AT_INFINITY]) == G2_POINT_AT_INFINITY
|
||||||
yield f'aggregate_infinity_signature', {
|
yield f'aggregate_infinity_signature', {
|
||||||
'input': [encode_hex(Z2_SIGNATURE)],
|
'input': [encode_hex(G2_POINT_AT_INFINITY)],
|
||||||
'output': encode_hex(aggregate_sig),
|
'output': encode_hex(aggregate_sig),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -237,32 +242,32 @@ def case04_fast_aggregate_verify():
|
||||||
}
|
}
|
||||||
|
|
||||||
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE
|
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE
|
||||||
assert not bls.FastAggregateVerify([], message, Z2_SIGNATURE)
|
assert not bls.FastAggregateVerify([], message, G2_POINT_AT_INFINITY)
|
||||||
assert not milagro_bls.FastAggregateVerify([], message, Z2_SIGNATURE)
|
assert not milagro_bls.FastAggregateVerify([], message, G2_POINT_AT_INFINITY)
|
||||||
yield f'fast_aggregate_verify_na_pubkeys_and_infinity_signature', {
|
yield f'fast_aggregate_verify_na_pubkeys_and_infinity_signature', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkeys': [],
|
'pubkeys': [],
|
||||||
'message': encode_hex(message),
|
'message': encode_hex(message),
|
||||||
'signature': encode_hex(Z2_SIGNATURE),
|
'signature': encode_hex(G2_POINT_AT_INFINITY),
|
||||||
},
|
},
|
||||||
'output': False,
|
'output': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00...
|
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00...
|
||||||
assert not bls.FastAggregateVerify([], message, NO_SIGNATURE)
|
assert not bls.FastAggregateVerify([], message, ZERO_SIGNATURE)
|
||||||
assert not milagro_bls.FastAggregateVerify([], message, NO_SIGNATURE)
|
assert not milagro_bls.FastAggregateVerify([], message, ZERO_SIGNATURE)
|
||||||
yield f'fast_aggregate_verify_na_pubkeys_and_na_signature', {
|
yield f'fast_aggregate_verify_na_pubkeys_and_zero_signature', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkeys': [],
|
'pubkeys': [],
|
||||||
'message': encode_hex(message),
|
'message': encode_hex(message),
|
||||||
'signature': encode_hex(NO_SIGNATURE),
|
'signature': encode_hex(ZERO_SIGNATURE),
|
||||||
},
|
},
|
||||||
'output': False,
|
'output': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Invalid pubkeys and signature -- pubkeys contains point at infinity
|
# Invalid pubkeys and signature -- pubkeys contains point at infinity
|
||||||
pubkeys = PUBKEYS.copy()
|
pubkeys = PUBKEYS.copy()
|
||||||
pubkeys_with_infinity = pubkeys + [Z1_PUBKEY]
|
pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY]
|
||||||
signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS]
|
signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS]
|
||||||
aggregate_signature = bls.Aggregate(signatures)
|
aggregate_signature = bls.Aggregate(signatures)
|
||||||
assert not bls.FastAggregateVerify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature)
|
assert not bls.FastAggregateVerify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature)
|
||||||
|
@ -317,31 +322,31 @@ def case05_aggregate_verify():
|
||||||
}
|
}
|
||||||
|
|
||||||
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE
|
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == Z1_SIGNATURE
|
||||||
assert not bls.AggregateVerify([], [], Z2_SIGNATURE)
|
assert not bls.AggregateVerify([], [], G2_POINT_AT_INFINITY)
|
||||||
assert not milagro_bls.AggregateVerify([], [], Z2_SIGNATURE)
|
assert not milagro_bls.AggregateVerify([], [], G2_POINT_AT_INFINITY)
|
||||||
yield f'aggregate_verify_na_pubkeys_and_infinity_signature', {
|
yield f'aggregate_verify_na_pubkeys_and_infinity_signature', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkeys': [],
|
'pubkeys': [],
|
||||||
'messages': [],
|
'messages': [],
|
||||||
'signature': encode_hex(Z2_SIGNATURE),
|
'signature': encode_hex(G2_POINT_AT_INFINITY),
|
||||||
},
|
},
|
||||||
'output': False,
|
'output': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00...
|
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00...
|
||||||
assert not bls.AggregateVerify([], [], NO_SIGNATURE)
|
assert not bls.AggregateVerify([], [], ZERO_SIGNATURE)
|
||||||
assert not milagro_bls.AggregateVerify([], [], NO_SIGNATURE)
|
assert not milagro_bls.AggregateVerify([], [], ZERO_SIGNATURE)
|
||||||
yield f'aggregate_verify_na_pubkeys_and_na_signature', {
|
yield f'aggregate_verify_na_pubkeys_and_zero_signature', {
|
||||||
'input': {
|
'input': {
|
||||||
'pubkeys': [],
|
'pubkeys': [],
|
||||||
'messages': [],
|
'messages': [],
|
||||||
'signature': encode_hex(NO_SIGNATURE),
|
'signature': encode_hex(ZERO_SIGNATURE),
|
||||||
},
|
},
|
||||||
'output': False,
|
'output': False,
|
||||||
}
|
}
|
||||||
|
|
||||||
# Invalid pubkeys and signature -- pubkeys contains point at infinity
|
# Invalid pubkeys and signature -- pubkeys contains point at infinity
|
||||||
pubkeys_with_infinity = pubkeys + [Z1_PUBKEY]
|
pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY]
|
||||||
messages_with_sample = messages + [SAMPLE_MESSAGE]
|
messages_with_sample = messages + [SAMPLE_MESSAGE]
|
||||||
assert not bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature)
|
assert not bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature)
|
||||||
assert not milagro_bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature)
|
assert not milagro_bls.AggregateVerify(pubkeys_with_infinity, messages_with_sample, aggregate_signature)
|
||||||
|
@ -355,7 +360,150 @@ def case05_aggregate_verify():
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def create_provider(handler_name: str,
|
def case06_eth_aggregate_pubkeys():
|
||||||
|
for pubkey in PUBKEYS:
|
||||||
|
encoded_pubkey = encode_hex(pubkey)
|
||||||
|
aggregate_pubkey = spec.eth_aggregate_pubkeys([pubkey])
|
||||||
|
# Should be unchanged
|
||||||
|
assert aggregate_pubkey == milagro_bls._AggregatePKs([pubkey]) == pubkey
|
||||||
|
# Valid pubkey
|
||||||
|
yield f'eth_aggregate_pubkeys_valid_{(hash(bytes(encoded_pubkey, "utf-8"))[:8]).hex()}', {
|
||||||
|
'input': [encode_hex(pubkey)],
|
||||||
|
'output': encode_hex(aggregate_pubkey),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Valid pubkeys
|
||||||
|
aggregate_pubkey = spec.eth_aggregate_pubkeys(PUBKEYS)
|
||||||
|
assert aggregate_pubkey == milagro_bls._AggregatePKs(PUBKEYS)
|
||||||
|
yield f'eth_aggregate_pubkeys_valid_pubkeys', {
|
||||||
|
'input': [encode_hex(pubkey) for pubkey in PUBKEYS],
|
||||||
|
'output': encode_hex(aggregate_pubkey),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys -- len(pubkeys) == 0
|
||||||
|
expect_exception(spec.eth_aggregate_pubkeys, [])
|
||||||
|
expect_exception(milagro_bls._AggregatePKs, [])
|
||||||
|
yield f'eth_aggregate_pubkeys_empty_list', {
|
||||||
|
'input': [],
|
||||||
|
'output': None,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys -- [ZERO_PUBKEY]
|
||||||
|
expect_exception(spec.eth_aggregate_pubkeys, [ZERO_PUBKEY])
|
||||||
|
expect_exception(milagro_bls._AggregatePKs, [ZERO_PUBKEY])
|
||||||
|
yield f'eth_aggregate_pubkeys_zero_pubkey', {
|
||||||
|
'input': [encode_hex(ZERO_PUBKEY)],
|
||||||
|
'output': None,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys -- G1 point at infinity
|
||||||
|
expect_exception(spec.eth_aggregate_pubkeys, [G1_POINT_AT_INFINITY])
|
||||||
|
expect_exception(milagro_bls._AggregatePKs, [G1_POINT_AT_INFINITY])
|
||||||
|
yield f'eth_aggregate_pubkeys_infinity_pubkey', {
|
||||||
|
'input': [encode_hex(G1_POINT_AT_INFINITY)],
|
||||||
|
'output': None,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys -- b'\x40\x00\x00\x00....\x00' pubkey
|
||||||
|
x40_pubkey = b'\x40' + b'\00' * 47
|
||||||
|
expect_exception(spec.eth_aggregate_pubkeys, [x40_pubkey])
|
||||||
|
expect_exception(milagro_bls._AggregatePKs, [x40_pubkey])
|
||||||
|
yield f'eth_aggregate_pubkeys_x40_pubkey', {
|
||||||
|
'input': [encode_hex(x40_pubkey)],
|
||||||
|
'output': None,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def case07_eth_fast_aggregate_verify():
|
||||||
|
"""
|
||||||
|
Similar to `case04_fast_aggregate_verify` except for the empty case
|
||||||
|
"""
|
||||||
|
for i, message in enumerate(MESSAGES):
|
||||||
|
privkeys = PRIVKEYS[:i + 1]
|
||||||
|
sigs = [bls.Sign(privkey, message) for privkey in privkeys]
|
||||||
|
aggregate_signature = bls.Aggregate(sigs)
|
||||||
|
pubkeys = [bls.SkToPk(privkey) for privkey in privkeys]
|
||||||
|
pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys]
|
||||||
|
|
||||||
|
# Valid signature
|
||||||
|
identifier = f'{pubkeys_serial}_{encode_hex(message)}'
|
||||||
|
assert spec.eth_fast_aggregate_verify(pubkeys, message, aggregate_signature)
|
||||||
|
yield f'eth_fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||||
|
'input': {
|
||||||
|
'pubkeys': pubkeys_serial,
|
||||||
|
'message': encode_hex(message),
|
||||||
|
'signature': encode_hex(aggregate_signature),
|
||||||
|
},
|
||||||
|
'output': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invalid signature -- extra pubkey
|
||||||
|
pubkeys_extra = pubkeys + [bls.SkToPk(PRIVKEYS[-1])]
|
||||||
|
pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra]
|
||||||
|
identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}'
|
||||||
|
assert not spec.eth_fast_aggregate_verify(pubkeys_extra, message, aggregate_signature)
|
||||||
|
yield f'eth_fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||||
|
'input': {
|
||||||
|
'pubkeys': pubkeys_extra_serial,
|
||||||
|
'message': encode_hex(message),
|
||||||
|
'signature': encode_hex(aggregate_signature),
|
||||||
|
},
|
||||||
|
'output': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invalid signature -- tampered with signature
|
||||||
|
tampered_signature = aggregate_signature[:-4] + b'\xff\xff\xff\xff'
|
||||||
|
identifier = f'{pubkeys_serial}_{encode_hex(message)}'
|
||||||
|
assert not spec.eth_fast_aggregate_verify(pubkeys, message, tampered_signature)
|
||||||
|
yield f'eth_fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||||
|
'input': {
|
||||||
|
'pubkeys': pubkeys_serial,
|
||||||
|
'message': encode_hex(message),
|
||||||
|
'signature': encode_hex(tampered_signature),
|
||||||
|
},
|
||||||
|
'output': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
# NOTE: Unlike `FastAggregateVerify`, len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY is VALID
|
||||||
|
assert spec.eth_fast_aggregate_verify([], message, G2_POINT_AT_INFINITY)
|
||||||
|
yield f'eth_fast_aggregate_verify_na_pubkeys_and_infinity_signature', {
|
||||||
|
'input': {
|
||||||
|
'pubkeys': [],
|
||||||
|
'message': encode_hex(message),
|
||||||
|
'signature': encode_hex(G2_POINT_AT_INFINITY),
|
||||||
|
},
|
||||||
|
'output': True,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys and signature -- len(pubkeys) == 0 and signature == 0x00...
|
||||||
|
assert not spec.eth_fast_aggregate_verify([], message, ZERO_SIGNATURE)
|
||||||
|
yield f'eth_fast_aggregate_verify_na_pubkeys_and_zero_signature', {
|
||||||
|
'input': {
|
||||||
|
'pubkeys': [],
|
||||||
|
'message': encode_hex(message),
|
||||||
|
'signature': encode_hex(ZERO_SIGNATURE),
|
||||||
|
},
|
||||||
|
'output': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Invalid pubkeys and signature -- pubkeys contains point at infinity
|
||||||
|
pubkeys = PUBKEYS.copy()
|
||||||
|
pubkeys_with_infinity = pubkeys + [G1_POINT_AT_INFINITY]
|
||||||
|
signatures = [bls.Sign(privkey, SAMPLE_MESSAGE) for privkey in PRIVKEYS]
|
||||||
|
aggregate_signature = bls.Aggregate(signatures)
|
||||||
|
assert not spec.eth_fast_aggregate_verify(pubkeys_with_infinity, SAMPLE_MESSAGE, aggregate_signature)
|
||||||
|
yield f'eth_fast_aggregate_verify_infinity_pubkey', {
|
||||||
|
'input': {
|
||||||
|
'pubkeys': [encode_hex(pubkey) for pubkey in pubkeys_with_infinity],
|
||||||
|
'message': encode_hex(SAMPLE_MESSAGE),
|
||||||
|
'signature': encode_hex(aggregate_signature),
|
||||||
|
},
|
||||||
|
'output': False,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def create_provider(fork_name: SpecForkName,
|
||||||
|
handler_name: str,
|
||||||
test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider:
|
test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider:
|
||||||
|
|
||||||
def prepare_fn() -> None:
|
def prepare_fn() -> None:
|
||||||
|
@ -368,7 +516,7 @@ def create_provider(handler_name: str,
|
||||||
print(data)
|
print(data)
|
||||||
(case_name, case_content) = data
|
(case_name, case_content) = data
|
||||||
yield gen_typing.TestCase(
|
yield gen_typing.TestCase(
|
||||||
fork_name=PHASE0,
|
fork_name=fork_name,
|
||||||
preset_name='general',
|
preset_name='general',
|
||||||
runner_name='bls',
|
runner_name='bls',
|
||||||
handler_name=handler_name,
|
handler_name=handler_name,
|
||||||
|
@ -383,9 +531,13 @@ def create_provider(handler_name: str,
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
bls.use_py_ecc() # Py-ecc is chosen instead of Milagro, since the code is better understood to be correct.
|
bls.use_py_ecc() # Py-ecc is chosen instead of Milagro, since the code is better understood to be correct.
|
||||||
gen_runner.run_generator("bls", [
|
gen_runner.run_generator("bls", [
|
||||||
create_provider('sign', case01_sign),
|
# PHASE0
|
||||||
create_provider('verify', case02_verify),
|
create_provider(PHASE0, 'sign', case01_sign),
|
||||||
create_provider('aggregate', case03_aggregate),
|
create_provider(PHASE0, 'verify', case02_verify),
|
||||||
create_provider('fast_aggregate_verify', case04_fast_aggregate_verify),
|
create_provider(PHASE0, 'aggregate', case03_aggregate),
|
||||||
create_provider('aggregate_verify', case05_aggregate_verify),
|
create_provider(PHASE0, 'fast_aggregate_verify', case04_fast_aggregate_verify),
|
||||||
|
create_provider(PHASE0, 'aggregate_verify', case05_aggregate_verify),
|
||||||
|
# ALTAIR
|
||||||
|
create_provider(ALTAIR, 'eth_aggregate_pubkeys', case06_eth_aggregate_pubkeys),
|
||||||
|
create_provider(ALTAIR, 'eth_fast_aggregate_verify', case07_eth_fast_aggregate_verify),
|
||||||
])
|
])
|
||||||
|
|
|
@ -5,6 +5,7 @@ from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
phase_0_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [
|
phase_0_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [
|
||||||
'get_head',
|
'get_head',
|
||||||
|
'on_block',
|
||||||
]}
|
]}
|
||||||
# No additional Altair specific finality tests, yet.
|
# No additional Altair specific finality tests, yet.
|
||||||
altair_mods = phase_0_mods
|
altair_mods = phase_0_mods
|
||||||
|
|
Loading…
Reference in New Issue