Merge branch 'dev' into lc-gindexname
This commit is contained in:
commit
d9e53cbaf5
|
@ -94,6 +94,13 @@ MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 8
|
|||
# ---------------------------------------------------------------
|
||||
# 40%
|
||||
PROPOSER_SCORE_BOOST: 40
|
||||
# 20%
|
||||
REORG_HEAD_WEIGHT_THRESHOLD: 20
|
||||
# 160%
|
||||
REORG_PARENT_WEIGHT_THRESHOLD: 160
|
||||
# `2` epochs
|
||||
REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2
|
||||
|
||||
|
||||
# Deposit contract
|
||||
# ---------------------------------------------------------------
|
||||
|
|
|
@ -94,6 +94,12 @@ MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 4
|
|||
# ---------------------------------------------------------------
|
||||
# 40%
|
||||
PROPOSER_SCORE_BOOST: 40
|
||||
# 20%
|
||||
REORG_HEAD_WEIGHT_THRESHOLD: 20
|
||||
# 160%
|
||||
REORG_PARENT_WEIGHT_THRESHOLD: 160
|
||||
# `2` epochs
|
||||
REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2
|
||||
|
||||
|
||||
# Deposit contract
|
||||
|
|
|
@ -8,3 +8,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096
|
|||
MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096
|
||||
# `uint64(6)`
|
||||
MAX_BLOBS_PER_BLOCK: 6
|
||||
# `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17
|
||||
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 17
|
||||
|
|
|
@ -8,3 +8,5 @@ FIELD_ELEMENTS_PER_BLOB: 4096
|
|||
MAX_BLOB_COMMITMENTS_PER_BLOCK: 16
|
||||
# `uint64(6)`
|
||||
MAX_BLOBS_PER_BLOCK: 6
|
||||
# [customized] `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9
|
||||
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 9
|
||||
|
|
|
@ -68,8 +68,7 @@ def objects_to_spec(preset_name: str,
|
|||
if k in [
|
||||
"ceillog2",
|
||||
"floorlog2",
|
||||
"compute_merkle_proof_for_block_body",
|
||||
"compute_merkle_proof_for_state",
|
||||
"compute_merkle_proof",
|
||||
]:
|
||||
del spec_object.functions[k]
|
||||
|
||||
|
@ -111,8 +110,9 @@ def objects_to_spec(preset_name: str,
|
|||
return out
|
||||
|
||||
# Merge all constant objects
|
||||
hardcoded_ssz_dep_constants = reduce(lambda obj, builder: {**obj, **builder.hardcoded_ssz_dep_constants()}, builders, {})
|
||||
hardcoded_ssz_dep_constants = reduce(lambda obj, builder: {**obj, **builder.hardcoded_ssz_dep_constants()}, builders, {})
|
||||
hardcoded_custom_type_dep_constants = reduce(lambda obj, builder: {**obj, **builder.hardcoded_custom_type_dep_constants(spec_object)}, builders, {})
|
||||
hardcoded_func_dep_presets = reduce(lambda obj, builder: {**obj, **builder.hardcoded_func_dep_presets(spec_object)}, builders, {})
|
||||
# Concatenate all strings
|
||||
imports = reduce(lambda txt, builder: (txt + "\n\n" + builder.imports(preset_name) ).strip("\n"), builders, "")
|
||||
preparations = reduce(lambda txt, builder: (txt + "\n\n" + builder.preparations() ).strip("\n"), builders, "")
|
||||
|
@ -126,6 +126,7 @@ def objects_to_spec(preset_name: str,
|
|||
ssz_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, hardcoded_ssz_dep_constants[x]), hardcoded_ssz_dep_constants))
|
||||
ssz_dep_constants_verification = '\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), hardcoded_ssz_dep_constants))
|
||||
custom_type_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, hardcoded_custom_type_dep_constants[x]), hardcoded_custom_type_dep_constants))
|
||||
func_dep_presets_verification = '\n'.join(map(lambda x: 'assert %s == %s # noqa: E501' % (x, spec_object.func_dep_presets[x]), hardcoded_func_dep_presets))
|
||||
spec_strs = [
|
||||
imports,
|
||||
preparations,
|
||||
|
@ -147,6 +148,7 @@ def objects_to_spec(preset_name: str,
|
|||
# Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are
|
||||
# as same as the spec definition.
|
||||
ssz_dep_constants_verification,
|
||||
func_dep_presets_verification,
|
||||
]
|
||||
return "\n\n\n".join([str.strip("\n") for str in spec_strs if str]) + "\n"
|
||||
|
||||
|
@ -223,6 +225,7 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
|
|||
preset_vars = combine_dicts(spec0.preset_vars, spec1.preset_vars)
|
||||
config_vars = combine_dicts(spec0.config_vars, spec1.config_vars)
|
||||
ssz_dep_constants = combine_dicts(spec0.ssz_dep_constants, spec1.ssz_dep_constants)
|
||||
func_dep_presets = combine_dicts(spec0.func_dep_presets, spec1.func_dep_presets)
|
||||
ssz_objects = combine_ssz_objects(spec0.ssz_objects, spec1.ssz_objects, custom_types)
|
||||
dataclasses = combine_dicts(spec0.dataclasses, spec1.dataclasses)
|
||||
return SpecObject(
|
||||
|
@ -233,6 +236,7 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
|
|||
preset_vars=preset_vars,
|
||||
config_vars=config_vars,
|
||||
ssz_dep_constants=ssz_dep_constants,
|
||||
func_dep_presets=func_dep_presets,
|
||||
ssz_objects=ssz_objects,
|
||||
dataclasses=dataclasses,
|
||||
)
|
||||
|
|
|
@ -27,16 +27,16 @@ GeneralizedIndex = NewType('GeneralizedIndex', int)
|
|||
@classmethod
|
||||
def sundry_functions(cls) -> str:
|
||||
return '''
|
||||
def get_generalized_index(ssz_class: Any, *path: Sequence[PyUnion[int, SSZVariableName]]) -> GeneralizedIndex:
|
||||
def get_generalized_index(ssz_class: Any, *path: PyUnion[int, SSZVariableName]) -> GeneralizedIndex:
|
||||
ssz_path = Path(ssz_class)
|
||||
for item in path:
|
||||
ssz_path = ssz_path / item
|
||||
return GeneralizedIndex(ssz_path.gindex())
|
||||
|
||||
|
||||
def compute_merkle_proof_for_state(state: BeaconState,
|
||||
index: GeneralizedIndex) -> Sequence[Bytes32]:
|
||||
return build_proof(state.get_backing(), index)'''
|
||||
def compute_merkle_proof(object: SSZObject,
|
||||
index: GeneralizedIndex) -> Sequence[Bytes32]:
|
||||
return build_proof(object.get_backing(), index)'''
|
||||
|
||||
|
||||
@classmethod
|
||||
|
|
|
@ -47,6 +47,10 @@ class BaseSpecBuilder(ABC):
|
|||
"""
|
||||
return {}
|
||||
|
||||
@classmethod
|
||||
def hardcoded_func_dep_presets(cls, spec_object) -> Dict[str, str]:
|
||||
return {}
|
||||
|
||||
@classmethod
|
||||
def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:
|
||||
return functions
|
||||
|
|
|
@ -27,7 +27,12 @@ def get_execution_state(_execution_state_root: Bytes32) -> ExecutionState:
|
|||
|
||||
|
||||
def get_pow_chain_head() -> PowBlock:
|
||||
pass"""
|
||||
pass
|
||||
|
||||
|
||||
def validator_is_connected(validator_index: ValidatorIndex) -> bool:
|
||||
# pylint: disable=unused-argument
|
||||
return True"""
|
||||
|
||||
@classmethod
|
||||
def execution_engine_cls(cls) -> str:
|
||||
|
|
|
@ -13,15 +13,6 @@ class CapellaSpecBuilder(BaseSpecBuilder):
|
|||
from eth2spec.bellatrix import {preset_name} as bellatrix
|
||||
'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def sundry_functions(cls) -> str:
|
||||
return '''
|
||||
def compute_merkle_proof_for_block_body(body: BeaconBlockBody,
|
||||
index: GeneralizedIndex) -> Sequence[Bytes32]:
|
||||
return build_proof(body.get_backing(), index)'''
|
||||
|
||||
|
||||
@classmethod
|
||||
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
|
||||
return {
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from typing import Dict
|
||||
from .base import BaseSpecBuilder
|
||||
from ..constants import DENEB
|
||||
|
||||
|
@ -23,7 +24,8 @@ T = TypeVar('T') # For generic function
|
|||
return '''
|
||||
def retrieve_blobs_and_proofs(beacon_block_root: Root) -> Tuple[Sequence[Blob], Sequence[KZGProof]]:
|
||||
# pylint: disable=unused-argument
|
||||
return [], []'''
|
||||
return [], []
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def execution_engine_cls(cls) -> str:
|
||||
|
@ -63,9 +65,16 @@ EXECUTION_ENGINE = NoopExecutionEngine()"""
|
|||
|
||||
|
||||
@classmethod
|
||||
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
|
||||
def hardcoded_custom_type_dep_constants(cls, spec_object) -> Dict[str, str]:
|
||||
return {
|
||||
'BYTES_PER_FIELD_ELEMENT': spec_object.constant_vars['BYTES_PER_FIELD_ELEMENT'].value,
|
||||
'FIELD_ELEMENTS_PER_BLOB': spec_object.preset_vars['FIELD_ELEMENTS_PER_BLOB'].value,
|
||||
'MAX_BLOBS_PER_BLOCK': spec_object.preset_vars['MAX_BLOBS_PER_BLOCK'].value,
|
||||
'MAX_BLOB_COMMITMENTS_PER_BLOCK': spec_object.preset_vars['MAX_BLOB_COMMITMENTS_PER_BLOCK'].value,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def hardcoded_func_dep_presets(cls, spec_object) -> Dict[str, str]:
|
||||
return {
|
||||
'KZG_COMMITMENT_INCLUSION_PROOF_DEPTH': spec_object.preset_vars['KZG_COMMITMENT_INCLUSION_PROOF_DEPTH'].value,
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ class SpecObject(NamedTuple):
|
|||
preset_vars: Dict[str, VariableDefinition]
|
||||
config_vars: Dict[str, VariableDefinition]
|
||||
ssz_dep_constants: Dict[str, str] # the constants that depend on ssz_objects
|
||||
func_dep_presets: Dict[str, str] # the constants that depend on functions
|
||||
ssz_objects: Dict[str, str]
|
||||
dataclasses: Dict[str, str]
|
||||
|
||||
|
|
15
setup.py
15
setup.py
|
@ -162,6 +162,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
|
|||
preset_vars: Dict[str, VariableDefinition] = {}
|
||||
config_vars: Dict[str, VariableDefinition] = {}
|
||||
ssz_dep_constants: Dict[str, str] = {}
|
||||
func_dep_presets: Dict[str, str] = {}
|
||||
ssz_objects: Dict[str, str] = {}
|
||||
dataclasses: Dict[str, str] = {}
|
||||
custom_types: Dict[str, str] = {}
|
||||
|
@ -214,6 +215,16 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
|
|||
|
||||
value_cell = cells[1]
|
||||
value = value_cell.children[0].children
|
||||
|
||||
description = None
|
||||
if len(cells) >= 3:
|
||||
description_cell = cells[2]
|
||||
if len(description_cell.children) > 0:
|
||||
description = description_cell.children[0].children
|
||||
if isinstance(description, list):
|
||||
# marko parses `**X**` as a list containing a X
|
||||
description = description[0].children
|
||||
|
||||
if isinstance(value, list):
|
||||
# marko parses `**X**` as a list containing a X
|
||||
value = value[0].children
|
||||
|
@ -228,6 +239,9 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
|
|||
ssz_dep_constants[name] = value
|
||||
continue
|
||||
|
||||
if description is not None and description.startswith("<!-- predefined -->"):
|
||||
func_dep_presets[name] = value
|
||||
|
||||
value_def = _parse_value(name, value)
|
||||
if name in preset:
|
||||
preset_vars[name] = VariableDefinition(value_def.type_name, preset[name], value_def.comment, None)
|
||||
|
@ -256,6 +270,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
|
|||
preset_vars=preset_vars,
|
||||
config_vars=config_vars,
|
||||
ssz_dep_constants=ssz_dep_constants,
|
||||
func_dep_presets=func_dep_presets,
|
||||
ssz_objects=ssz_objects,
|
||||
dataclasses=dataclasses,
|
||||
)
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`compute_merkle_proof_for_state`](#compute_merkle_proof_for_state)
|
||||
- [`compute_merkle_proof`](#compute_merkle_proof)
|
||||
- [`block_to_light_client_header`](#block_to_light_client_header)
|
||||
- [Deriving light client data](#deriving-light-client-data)
|
||||
- [`create_light_client_bootstrap`](#create_light_client_bootstrap)
|
||||
|
@ -27,11 +27,13 @@ This document provides helper functions to enable full nodes to serve light clie
|
|||
|
||||
## Helper functions
|
||||
|
||||
### `compute_merkle_proof_for_state`
|
||||
### `compute_merkle_proof`
|
||||
|
||||
This function return the Merkle proof of the given SSZ object `object` at generalized index `index`.
|
||||
|
||||
```python
|
||||
def compute_merkle_proof_for_state(state: BeaconState,
|
||||
index: GeneralizedIndex) -> Sequence[Bytes32]:
|
||||
def compute_merkle_proof(object: SSZObject,
|
||||
index: GeneralizedIndex) -> Sequence[Bytes32]:
|
||||
...
|
||||
```
|
||||
|
||||
|
@ -73,7 +75,7 @@ def create_light_client_bootstrap(state: BeaconState,
|
|||
return LightClientBootstrap(
|
||||
header=block_to_light_client_header(block),
|
||||
current_sync_committee=state.current_sync_committee,
|
||||
current_sync_committee_branch=compute_merkle_proof_for_state(state, CURRENT_SYNC_COMMITTEE_GINDEX),
|
||||
current_sync_committee_branch=compute_merkle_proof(state, CURRENT_SYNC_COMMITTEE_GINDEX),
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -120,8 +122,7 @@ def create_light_client_update(state: BeaconState,
|
|||
# `next_sync_committee` is only useful if the message is signed by the current sync committee
|
||||
if update_attested_period == update_signature_period:
|
||||
update.next_sync_committee = attested_state.next_sync_committee
|
||||
update.next_sync_committee_branch = compute_merkle_proof_for_state(
|
||||
attested_state, NEXT_SYNC_COMMITTEE_GINDEX)
|
||||
update.next_sync_committee_branch = compute_merkle_proof(attested_state, NEXT_SYNC_COMMITTEE_GINDEX)
|
||||
|
||||
# Indicate finality whenever possible
|
||||
if finalized_block is not None:
|
||||
|
@ -130,8 +131,7 @@ def create_light_client_update(state: BeaconState,
|
|||
assert hash_tree_root(update.finalized_header.beacon) == attested_state.finalized_checkpoint.root
|
||||
else:
|
||||
assert attested_state.finalized_checkpoint.root == Bytes32()
|
||||
update.finality_branch = compute_merkle_proof_for_state(
|
||||
attested_state, FINALIZED_ROOT_GINDEX)
|
||||
update.finality_branch = compute_merkle_proof(attested_state, FINALIZED_ROOT_GINDEX)
|
||||
|
||||
update.sync_aggregate = block.message.body.sync_aggregate
|
||||
update.signature_slot = block.message.slot
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
- [`ExecutionEngine`](#executionengine)
|
||||
- [`notify_forkchoice_updated`](#notify_forkchoice_updated)
|
||||
- [`safe_block_hash`](#safe_block_hash)
|
||||
- [`should_override_forkchoice_update`](#should_override_forkchoice_update)
|
||||
- [Helpers](#helpers)
|
||||
- [`PayloadAttributes`](#payloadattributes)
|
||||
- [`PowBlock`](#powblock)
|
||||
|
@ -76,6 +77,86 @@ As per EIP-3675, before a post-transition block is finalized, `notify_forkchoice
|
|||
The `safe_block_hash` parameter MUST be set to return value of
|
||||
[`get_safe_execution_payload_hash(store: Store)`](../../fork_choice/safe-block.md#get_safe_execution_payload_hash) function.
|
||||
|
||||
##### `should_override_forkchoice_update`
|
||||
|
||||
If proposer boost re-orgs are implemented and enabled (see `get_proposer_head`) then additional care
|
||||
must be taken to ensure that the proposer is able to build an execution payload.
|
||||
|
||||
If a beacon node knows it will propose the next block then it SHOULD NOT call
|
||||
`notify_forkchoice_updated` if it detects the current head to be weak and potentially capable of
|
||||
being re-orged. Complete information for evaluating `get_proposer_head` _will not_ be available
|
||||
immediately after the receipt of a new block, so an approximation of those conditions should be
|
||||
used when deciding whether to send or suppress a fork choice notification. The exact conditions
|
||||
used may be implementation-specific, a suggested implementation is below.
|
||||
|
||||
Let `validator_is_connected(validator_index: ValidatorIndex) -> bool` be a function that indicates
|
||||
whether the validator with `validator_index` is connected to the node (e.g. has sent an unexpired
|
||||
proposer preparation message).
|
||||
|
||||
```python
|
||||
def should_override_forkchoice_update(store: Store, head_root: Root) -> bool:
|
||||
head_block = store.blocks[head_root]
|
||||
parent_root = head_block.parent_root
|
||||
parent_block = store.blocks[parent_root]
|
||||
current_slot = get_current_slot(store)
|
||||
proposal_slot = head_block.slot + Slot(1)
|
||||
|
||||
# Only re-org the head_block block if it arrived later than the attestation deadline.
|
||||
head_late = is_head_late(store, head_root)
|
||||
|
||||
# Shuffling stable.
|
||||
shuffling_stable = is_shuffling_stable(proposal_slot)
|
||||
|
||||
# FFG information of the new head_block will be competitive with the current head.
|
||||
ffg_competitive = is_ffg_competitive(store, head_root, parent_root)
|
||||
|
||||
# Do not re-org if the chain is not finalizing with acceptable frequency.
|
||||
finalization_ok = is_finalization_ok(store, proposal_slot)
|
||||
|
||||
# Only suppress the fork choice update if we are confident that we will propose the next block.
|
||||
parent_state_advanced = store.block_states[parent_root].copy()
|
||||
process_slots(parent_state_advanced, proposal_slot)
|
||||
proposer_index = get_beacon_proposer_index(parent_state_advanced)
|
||||
proposing_reorg_slot = validator_is_connected(proposer_index)
|
||||
|
||||
# Single slot re-org.
|
||||
parent_slot_ok = parent_block.slot + 1 == head_block.slot
|
||||
proposing_on_time = is_proposing_on_time(store)
|
||||
|
||||
# Note that this condition is different from `get_proposer_head`
|
||||
current_time_ok = (head_block.slot == current_slot
|
||||
or (proposal_slot == current_slot and proposing_on_time))
|
||||
single_slot_reorg = parent_slot_ok and current_time_ok
|
||||
|
||||
# Check the head weight only if the attestations from the head slot have already been applied.
|
||||
# Implementations may want to do this in different ways, e.g. by advancing
|
||||
# `store.time` early, or by counting queued attestations during the head block's slot.
|
||||
if current_slot > head_block.slot:
|
||||
head_weak = is_head_weak(store, head_root)
|
||||
parent_strong = is_parent_strong(store, parent_root)
|
||||
else:
|
||||
head_weak = True
|
||||
parent_strong = True
|
||||
|
||||
return all([head_late, shuffling_stable, ffg_competitive, finalization_ok,
|
||||
proposing_reorg_slot, single_slot_reorg,
|
||||
head_weak, parent_strong])
|
||||
```
|
||||
|
||||
*Note*: The ordering of conditions is a suggestion only. Implementations are free to
|
||||
optimize by re-ordering the conditions from least to most expensive and by returning early if
|
||||
any of the early conditions are `False`.
|
||||
|
||||
In case `should_override_forkchoice_update` returns `True`, a node SHOULD instead call
|
||||
`notify_forkchoice_updated` with parameters appropriate for building upon the parent block. Care
|
||||
must be taken to compute the correct `payload_attributes`, as they may change depending on the slot
|
||||
of the block to be proposed (due to withdrawals).
|
||||
|
||||
If `should_override_forkchoice_update` returns `True` but `get_proposer_head` later chooses the
|
||||
canonical head rather than its parent, then this is a misprediction that will cause the node
|
||||
to construct a payload with less notice. The result of `get_proposer_head` MUST be preferred over
|
||||
the result of `should_override_forkchoice_update` (when proposer reorgs are enabled).
|
||||
|
||||
## Helpers
|
||||
|
||||
### `PayloadAttributes`
|
||||
|
@ -191,11 +272,15 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
# Add block timeliness to the store
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval and is_first_block:
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
|
|
|
@ -103,11 +103,15 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
# Add block timeliness to the store
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval and is_first_block:
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`compute_merkle_proof_for_block_body`](#compute_merkle_proof_for_block_body)
|
||||
- [Modified `block_to_light_client_header`](#modified-block_to_light_client_header)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
@ -22,14 +21,6 @@ This upgrade adds information about the execution payload to light client data a
|
|||
|
||||
## Helper functions
|
||||
|
||||
### `compute_merkle_proof_for_block_body`
|
||||
|
||||
```python
|
||||
def compute_merkle_proof_for_block_body(body: BeaconBlockBody,
|
||||
index: GeneralizedIndex) -> Sequence[Bytes32]:
|
||||
...
|
||||
```
|
||||
|
||||
### Modified `block_to_light_client_header`
|
||||
|
||||
```python
|
||||
|
@ -55,7 +46,7 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
|||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
)
|
||||
execution_branch = compute_merkle_proof_for_block_body(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
execution_branch = compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
else:
|
||||
# Note that during fork transitions, `finalized_header` may still point to earlier forks.
|
||||
# While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Domain types](#domain-types)
|
||||
- [Blob](#blob)
|
||||
- [Preset](#preset)
|
||||
- [Execution](#execution)
|
||||
|
@ -67,12 +66,6 @@ Deneb is a consensus-layer upgrade containing a number of features. Including:
|
|||
|
||||
## Constants
|
||||
|
||||
### Domain types
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `DOMAIN_BLOB_SIDECAR` | `DomainType('0x0B000000')` |
|
||||
|
||||
### Blob
|
||||
|
||||
| Name | Value |
|
||||
|
|
|
@ -107,11 +107,15 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
# Add block timeliness to the store
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval and is_first_block:
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
|
|
|
@ -52,7 +52,7 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
|||
execution_header.blob_gas_used = payload.blob_gas_used
|
||||
execution_header.excess_blob_gas = payload.excess_blob_gas
|
||||
|
||||
execution_branch = compute_merkle_proof_for_block_body(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
execution_branch = compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
else:
|
||||
# Note that during fork transitions, `finalized_header` may still point to earlier forks.
|
||||
# While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
|
||||
|
|
|
@ -11,13 +11,14 @@ The specification of these changes continues in the same format as the network s
|
|||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Modifications in Deneb](#modifications-in-deneb)
|
||||
- [Constant](#constant)
|
||||
- [Preset](#preset)
|
||||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [`BlobSidecar`](#blobsidecar)
|
||||
- [`SignedBlobSidecar`](#signedblobsidecar)
|
||||
- [`BlobIdentifier`](#blobidentifier)
|
||||
- [Helpers](#helpers)
|
||||
- [`verify_blob_sidecar_signature`](#verify_blob_sidecar_signature)
|
||||
- [`verify_blob_sidecar_inclusion_proof`](#verify_blob_sidecar_inclusion_proof)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
|
@ -41,6 +42,18 @@ The specification of these changes continues in the same format as the network s
|
|||
|
||||
## Modifications in Deneb
|
||||
|
||||
### Constant
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
### Preset
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
| Name | Value | Description |
|
||||
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
|
||||
| `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` | `uint64(floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK))` (= 17) | <!-- predefined --> Merkle proof depth for `blob_kzg_commitments` list item |
|
||||
|
||||
### Configuration
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
@ -60,24 +73,12 @@ The specification of these changes continues in the same format as the network s
|
|||
|
||||
```python
|
||||
class BlobSidecar(Container):
|
||||
block_root: Root
|
||||
index: BlobIndex # Index of blob in block
|
||||
slot: Slot
|
||||
block_parent_root: Root # Proposer shuffling determinant
|
||||
proposer_index: ValidatorIndex
|
||||
blob: Blob
|
||||
kzg_commitment: KZGCommitment
|
||||
kzg_proof: KZGProof # Allows for quick verification of kzg_commitment
|
||||
```
|
||||
|
||||
#### `SignedBlobSidecar`
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
```python
|
||||
class SignedBlobSidecar(Container):
|
||||
message: BlobSidecar
|
||||
signature: BLSSignature
|
||||
signed_block_header: SignedBeaconBlockHeader
|
||||
kzg_commitment_inclusion_proof: Vector[Bytes32, KZG_COMMITMENT_INCLUSION_PROOF_DEPTH]
|
||||
```
|
||||
|
||||
#### `BlobIdentifier`
|
||||
|
@ -92,13 +93,18 @@ class BlobIdentifier(Container):
|
|||
|
||||
#### Helpers
|
||||
|
||||
##### `verify_blob_sidecar_signature`
|
||||
##### `verify_blob_sidecar_inclusion_proof`
|
||||
|
||||
```python
|
||||
def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: SignedBlobSidecar) -> bool:
|
||||
proposer = state.validators[signed_blob_sidecar.message.proposer_index]
|
||||
signing_root = compute_signing_root(signed_blob_sidecar.message, get_domain(state, DOMAIN_BLOB_SIDECAR))
|
||||
return bls.Verify(proposer.pubkey, signing_root, signed_blob_sidecar.signature)
|
||||
def verify_blob_sidecar_inclusion_proof(blob_sidecar: BlobSidecar) -> bool:
|
||||
gindex = get_subtree_index(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments', blob_sidecar.index))
|
||||
return is_valid_merkle_branch(
|
||||
leaf=blob_sidecar.kzg_commitment.hash_tree_root(),
|
||||
branch=blob_sidecar.kzg_commitment_inclusion_proof,
|
||||
depth=KZG_COMMITMENT_INCLUSION_PROOF_DEPTH,
|
||||
index=gindex,
|
||||
root=blob_sidecar.signed_block_header.message.body_root,
|
||||
)
|
||||
```
|
||||
|
||||
### The gossip domain: gossipsub
|
||||
|
@ -123,7 +129,7 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
|||
|
||||
| Name | Message Type |
|
||||
| - | - |
|
||||
| `blob_sidecar_{subnet_id}` | `SignedBlobSidecar` [New in Deneb:EIP4844] |
|
||||
| `blob_sidecar_{subnet_id}` | `BlobSidecar` [New in Deneb:EIP4844] |
|
||||
|
||||
##### Global topics
|
||||
|
||||
|
@ -144,21 +150,23 @@ New validation:
|
|||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
This topic is used to propagate signed blob sidecars, where each blob index maps to some `subnet_id`.
|
||||
This topic is used to propagate blob sidecars, where each blob index maps to some `subnet_id`.
|
||||
|
||||
The following validations MUST pass before forwarding the `signed_blob_sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
|
||||
The following validations MUST pass before forwarding the `blob_sidecar` on the network, assuming the alias `block_header = blob_sidecar.signed_block_header.message`:
|
||||
|
||||
- _[REJECT]_ The sidecar's index is consistent with `MAX_BLOBS_PER_BLOCK` -- i.e. `sidecar.index < MAX_BLOBS_PER_BLOCK`.
|
||||
- _[REJECT]_ The sidecar is for the correct subnet -- i.e. `compute_subnet_for_blob_sidecar(sidecar.index) == subnet_id`.
|
||||
- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
|
||||
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
|
||||
- _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
|
||||
- _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation.
|
||||
- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `sidecar.block_parent_root`).
|
||||
- _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's block's parent -- i.e. `get_checkpoint_block(store, sidecar.block_parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`.
|
||||
- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_blob_sidecar_signature`.
|
||||
- _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`.
|
||||
- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`).
|
||||
- _[REJECT]_ The sidecar's index is consistent with `MAX_BLOBS_PER_BLOCK` -- i.e. `blob_sidecar.index < MAX_BLOBS_PER_BLOCK`.
|
||||
- _[REJECT]_ The sidecar is for the correct subnet -- i.e. `compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id`.
|
||||
- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
|
||||
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
|
||||
- _[REJECT]_ The proposer signature of `blob_sidecar.signed_block_header`, is valid with respect to the `block_header.proposer_index` pubkey.
|
||||
- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
|
||||
- _[REJECT]_ The sidecar's block's parent (defined by `block_header.parent_root`) passes validation.
|
||||
- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `block_header.parent_root`).
|
||||
- _[REJECT]_ The current finalized_checkpoint is an ancestor of the sidecar's block -- i.e. `get_checkpoint_block(store, block_header.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`.
|
||||
- _[REJECT]_ The sidecar's inclusion proof is valid as verified by `verify_blob_sidecar_inclusion_proof(blob_sidecar)`.
|
||||
- _[REJECT]_ The sidecar's blob is valid as verified by `verify_blob_kzg_proof(blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof)`.
|
||||
- _[IGNORE]_ The sidecar is the first sidecar for the tuple (block_header.slot, block_header.proposer_index, blob_sidecar.index) with valid header signature, sidecar inclusion proof, and kzg proof.
|
||||
- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_header.parent_root`/`block_header.slot`).
|
||||
If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||
|
||||
###### `beacon_aggregate_and_proof`
|
||||
|
@ -278,10 +286,7 @@ Requests sidecars by block root and index.
|
|||
The response is a list of `BlobSidecar` whose length is less than or equal to the number of requests.
|
||||
It may be less in the case that the responding peer is missing blocks or sidecars.
|
||||
|
||||
The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer
|
||||
may not be available beyond the initial distribution via gossip.
|
||||
|
||||
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `verify_blob_kzg_proof_batch`.
|
||||
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted, has valid inclusion proof, and is correct w.r.t. the expected KZG commitments through `verify_blob_kzg_proof`.
|
||||
|
||||
No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time.
|
||||
|
||||
|
@ -326,9 +331,7 @@ Response Content:
|
|||
|
||||
Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, leading up to the current head block as selected by fork choice.
|
||||
|
||||
The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip.
|
||||
|
||||
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `verify_blob_kzg_proof_batch`.
|
||||
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted, has valid inclusion proof, and is correct w.r.t. the expected KZG commitments through `verify_blob_kzg_proof`.
|
||||
|
||||
`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` window.
|
||||
|
||||
|
@ -338,7 +341,7 @@ The response MUST consist of zero or more `response_chunk`.
|
|||
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
|
||||
|
||||
Let `blob_serve_range` be `[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`.
|
||||
Clients MUST keep a record of signed blob sidecars seen on the epoch range `blob_serve_range`
|
||||
Clients MUST keep a record of blob sidecars seen on the epoch range `blob_serve_range`
|
||||
where `current_epoch` is defined by the current wall-clock time,
|
||||
and clients MUST support serving requests of blobs on this range.
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
- [ExecutionPayload](#executionpayload)
|
||||
- [Blob KZG commitments](#blob-kzg-commitments)
|
||||
- [Constructing the `SignedBlobSidecar`s](#constructing-the-signedblobsidecars)
|
||||
- [Constructing the `BlobSidecar`s](#constructing-the-blobsidecars)
|
||||
- [Sidecar](#sidecar)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
@ -131,51 +131,48 @@ def prepare_execution_payload(state: BeaconState,
|
|||
1. The execution payload is obtained from the execution engine as defined above using `payload_id`. The response also includes a `blobs_bundle` entry containing the corresponding `blobs`, `commitments`, and `proofs`.
|
||||
2. Set `block.body.blob_kzg_commitments = commitments`.
|
||||
|
||||
#### Constructing the `SignedBlobSidecar`s
|
||||
#### Constructing the `BlobSidecar`s
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
To construct a `SignedBlobSidecar`, a `signed_blob_sidecar` is defined with the necessary context for block and sidecar proposal.
|
||||
To construct a `BlobSidecar`, a `blob_sidecar` is defined with the necessary context for block and sidecar proposal.
|
||||
|
||||
##### Sidecar
|
||||
|
||||
Blobs associated with a block are packaged into sidecar objects for distribution to the network.
|
||||
Blobs associated with a block are packaged into sidecar objects for distribution to the associated sidecar topic, the `blob_sidecar_{subnet_id}` pubsub topic.
|
||||
|
||||
Each `sidecar` is obtained from:
|
||||
```python
|
||||
def get_blob_sidecars(block: BeaconBlock,
|
||||
def get_blob_sidecars(signed_block: SignedBeaconBlock,
|
||||
blobs: Sequence[Blob],
|
||||
blob_kzg_proofs: Sequence[KZGProof]) -> Sequence[BlobSidecar]:
|
||||
block = signed_block.message
|
||||
block_header = BeaconBlockHeader(
|
||||
slot=block.slot,
|
||||
proposer_index=block.proposer_index,
|
||||
parent_root=block.parent_root,
|
||||
state_root=block.state_root,
|
||||
body_root=hash_tree_root(block.body),
|
||||
)
|
||||
signed_block_header = SignedBeaconBlockHeader(message=block_header, signature=signed_block.signature)
|
||||
return [
|
||||
BlobSidecar(
|
||||
block_root=hash_tree_root(block),
|
||||
index=index,
|
||||
slot=block.slot,
|
||||
block_parent_root=block.parent_root,
|
||||
blob=blob,
|
||||
kzg_commitment=block.body.blob_kzg_commitments[index],
|
||||
kzg_proof=blob_kzg_proofs[index],
|
||||
signed_block_header=signed_block_header,
|
||||
kzg_commitment_inclusion_proof=compute_merkle_proof(
|
||||
block.body,
|
||||
get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments', index),
|
||||
),
|
||||
)
|
||||
for index, blob in enumerate(blobs)
|
||||
]
|
||||
|
||||
```
|
||||
|
||||
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the associated sidecar topic, the `blob_sidecar_{subnet_id}` pubsub topic.
|
||||
|
||||
`signature` is obtained from:
|
||||
|
||||
```python
|
||||
def get_blob_sidecar_signature(state: BeaconState,
|
||||
sidecar: BlobSidecar,
|
||||
privkey: int) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_BLOB_SIDECAR, compute_epoch_at_slot(sidecar.slot))
|
||||
signing_root = compute_signing_root(sidecar, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
The `subnet_id` for the `signed_sidecar` is calculated with:
|
||||
- Let `blob_index = signed_sidecar.message.index`.
|
||||
The `subnet_id` for the `blob_sidecar` is calculated with:
|
||||
- Let `blob_index = blob_sidecar.index`.
|
||||
- Let `subnet_id = compute_subnet_for_blob_sidecar(blob_index)`.
|
||||
|
||||
```python
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
- [`get_current_slot`](#get_current_slot)
|
||||
- [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start)
|
||||
- [`get_ancestor`](#get_ancestor)
|
||||
- [`calculate_committee_fraction`](#calculate_committee_fraction)
|
||||
- [`get_checkpoint_block`](#get_checkpoint_block)
|
||||
- [`get_weight`](#get_weight)
|
||||
- [`get_voting_source`](#get_voting_source)
|
||||
|
@ -26,6 +27,15 @@
|
|||
- [`get_head`](#get_head)
|
||||
- [`update_checkpoints`](#update_checkpoints)
|
||||
- [`update_unrealized_checkpoints`](#update_unrealized_checkpoints)
|
||||
- [Proposer head and reorg helpers](#proposer-head-and-reorg-helpers)
|
||||
- [`is_head_late`](#is_head_late)
|
||||
- [`is_shuffling_stable`](#is_shuffling_stable)
|
||||
- [`is_ffg_competitive`](#is_ffg_competitive)
|
||||
- [`is_finalization_ok`](#is_finalization_ok)
|
||||
- [`is_proposing_on_time`](#is_proposing_on_time)
|
||||
- [`is_head_weak`](#is_head_weak)
|
||||
- [`is_parent_strong`](#is_parent_strong)
|
||||
- [`get_proposer_head`](#get_proposer_head)
|
||||
- [Pull-up tip helpers](#pull-up-tip-helpers)
|
||||
- [`compute_pulled_up_tip`](#compute_pulled_up_tip)
|
||||
- [`on_tick` helpers](#on_tick-helpers)
|
||||
|
@ -76,11 +86,16 @@ Any of the above handlers that trigger an unhandled exception (e.g. a failed ass
|
|||
|
||||
### Configuration
|
||||
|
||||
| Name | Value |
|
||||
| ---------------------- | ------------ |
|
||||
| `PROPOSER_SCORE_BOOST` | `uint64(40)` |
|
||||
| Name | Value |
|
||||
| ------------------------------------- | ------------ |
|
||||
| `PROPOSER_SCORE_BOOST` | `uint64(40)` |
|
||||
| `REORG_HEAD_WEIGHT_THRESHOLD` | `uint64(20)` |
|
||||
| `REORG_PARENT_WEIGHT_THRESHOLD` | `uint64(160)`|
|
||||
| `REORG_MAX_EPOCHS_SINCE_FINALIZATION` | `Epoch(2)` |
|
||||
|
||||
- The proposer score boost is worth `PROPOSER_SCORE_BOOST` percentage of the committee's weight, i.e., for slot with committee weight `committee_weight` the boost weight is equal to `(committee_weight * PROPOSER_SCORE_BOOST) // 100`.
|
||||
- The proposer score boost and re-org weight threshold are percentage
|
||||
values that are measured with respect to the weight of a single committee. See
|
||||
`calculate_committee_fraction`.
|
||||
|
||||
### Helpers
|
||||
|
||||
|
@ -115,6 +130,7 @@ class Store(object):
|
|||
equivocating_indices: Set[ValidatorIndex]
|
||||
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
|
||||
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
|
||||
block_timeliness: Dict[Root, boolean] = field(default_factory=dict)
|
||||
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
|
||||
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
|
||||
unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
|
||||
|
@ -191,6 +207,14 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
|||
return root
|
||||
```
|
||||
|
||||
#### `calculate_committee_fraction`
|
||||
|
||||
```python
|
||||
def calculate_committee_fraction(state: BeaconState, committee_percent: uint64) -> Gwei:
|
||||
committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
return Gwei((committee_weight * committee_percent) // 100)
|
||||
```
|
||||
|
||||
#### `get_checkpoint_block`
|
||||
|
||||
```python
|
||||
|
@ -225,8 +249,7 @@ def get_weight(store: Store, root: Root) -> Gwei:
|
|||
proposer_score = Gwei(0)
|
||||
# Boost is applied if ``root`` is an ancestor of ``proposer_boost_root``
|
||||
if get_ancestor(store, store.proposer_boost_root, store.blocks[root].slot) == root:
|
||||
committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH
|
||||
proposer_score = (committee_weight * PROPOSER_SCORE_BOOST) // 100
|
||||
proposer_score = calculate_committee_fraction(state, PROPOSER_SCORE_BOOST)
|
||||
return attestation_score + proposer_score
|
||||
```
|
||||
|
||||
|
@ -247,7 +270,6 @@ def get_voting_source(store: Store, block_root: Root) -> Checkpoint:
|
|||
# The block is not from a prior epoch, therefore the voting source is not pulled up
|
||||
head_state = store.block_states[block_root]
|
||||
return head_state.current_justified_checkpoint
|
||||
|
||||
```
|
||||
|
||||
#### `filter_block_tree`
|
||||
|
@ -374,7 +396,113 @@ def update_unrealized_checkpoints(store: Store, unrealized_justified_checkpoint:
|
|||
if unrealized_finalized_checkpoint.epoch > store.unrealized_finalized_checkpoint.epoch:
|
||||
store.unrealized_finalized_checkpoint = unrealized_finalized_checkpoint
|
||||
```
|
||||
#### Proposer head and reorg helpers
|
||||
|
||||
_Implementing these helpers is optional_.
|
||||
|
||||
##### `is_head_late`
|
||||
```python
|
||||
def is_head_late(store: Store, head_root: Root) -> bool:
|
||||
return not store.block_timeliness[head_root]
|
||||
```
|
||||
|
||||
##### `is_shuffling_stable`
|
||||
```python
|
||||
def is_shuffling_stable(slot: Slot) -> bool:
|
||||
return slot % SLOTS_PER_EPOCH != 0
|
||||
```
|
||||
|
||||
##### `is_ffg_competitive`
|
||||
|
||||
```python
|
||||
def is_ffg_competitive(store: Store, head_root: Root, parent_root: Root) -> bool:
|
||||
return (store.unrealized_justifications[head_root] == store.unrealized_justifications[parent_root])
|
||||
```
|
||||
|
||||
##### `is_finalization_ok`
|
||||
|
||||
```python
|
||||
def is_finalization_ok(store: Store, slot: Slot) -> bool:
|
||||
epochs_since_finalization = compute_epoch_at_slot(slot) - store.finalized_checkpoint.epoch
|
||||
return epochs_since_finalization <= REORG_MAX_EPOCHS_SINCE_FINALIZATION
|
||||
```
|
||||
|
||||
##### `is_proposing_on_time`
|
||||
|
||||
```python
|
||||
def is_proposing_on_time(store: Store) -> bool:
|
||||
# Use half `SECONDS_PER_SLOT // INTERVALS_PER_SLOT` as the proposer reorg deadline
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
proposer_reorg_cutoff = SECONDS_PER_SLOT // INTERVALS_PER_SLOT // 2
|
||||
return time_into_slot <= proposer_reorg_cutoff
|
||||
```
|
||||
|
||||
##### `is_head_weak`
|
||||
|
||||
```python
|
||||
def is_head_weak(store: Store, head_root: Root) -> bool:
|
||||
justified_state = store.checkpoint_states[store.justified_checkpoint]
|
||||
reorg_threshold = calculate_committee_fraction(justified_state, REORG_HEAD_WEIGHT_THRESHOLD)
|
||||
head_weight = get_weight(store, head_root)
|
||||
return head_weight < reorg_threshold
|
||||
```
|
||||
|
||||
##### `is_parent_strong`
|
||||
|
||||
```python
|
||||
def is_parent_strong(store: Store, parent_root: Root) -> bool:
|
||||
justified_state = store.checkpoint_states[store.justified_checkpoint]
|
||||
parent_threshold = calculate_committee_fraction(justified_state, REORG_PARENT_WEIGHT_THRESHOLD)
|
||||
parent_weight = get_weight(store, parent_root)
|
||||
return parent_weight > parent_threshold
|
||||
```
|
||||
|
||||
##### `get_proposer_head`
|
||||
|
||||
```python
|
||||
def get_proposer_head(store: Store, head_root: Root, slot: Slot) -> Root:
|
||||
head_block = store.blocks[head_root]
|
||||
parent_root = head_block.parent_root
|
||||
parent_block = store.blocks[parent_root]
|
||||
|
||||
# Only re-org the head block if it arrived later than the attestation deadline.
|
||||
head_late = is_head_late(store, head_root)
|
||||
|
||||
# Do not re-org on an epoch boundary where the proposer shuffling could change.
|
||||
shuffling_stable = is_shuffling_stable(slot)
|
||||
|
||||
# Ensure that the FFG information of the new head will be competitive with the current head.
|
||||
ffg_competitive = is_ffg_competitive(store, head_root, parent_root)
|
||||
|
||||
# Do not re-org if the chain is not finalizing with acceptable frequency.
|
||||
finalization_ok = is_finalization_ok(store, slot)
|
||||
|
||||
# Only re-org if we are proposing on-time.
|
||||
proposing_on_time = is_proposing_on_time(store)
|
||||
|
||||
# Only re-org a single slot at most.
|
||||
parent_slot_ok = parent_block.slot + 1 == head_block.slot
|
||||
current_time_ok = head_block.slot + 1 == slot
|
||||
single_slot_reorg = parent_slot_ok and current_time_ok
|
||||
|
||||
# Check that the head has few enough votes to be overpowered by our proposer boost.
|
||||
assert store.proposer_boost_root != head_root # ensure boost has worn off
|
||||
head_weak = is_head_weak(store, head_root)
|
||||
|
||||
# Check that the missing votes are assigned to the parent and not being hoarded.
|
||||
parent_strong = is_parent_strong(store, parent_root)
|
||||
|
||||
if all([head_late, shuffling_stable, ffg_competitive, finalization_ok,
|
||||
proposing_on_time, single_slot_reorg, head_weak, parent_strong]):
|
||||
# We can re-org the current head by building upon its parent block.
|
||||
return parent_root
|
||||
else:
|
||||
return head_root
|
||||
```
|
||||
|
||||
*Note*: The ordering of conditions is a suggestion only. Implementations are free to
|
||||
optimize by re-ordering the conditions from least to most expensive and by returning early if
|
||||
any of the early conditions are `False`.
|
||||
|
||||
#### Pull-up tip helpers
|
||||
|
||||
|
@ -536,11 +664,15 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
# Add new state for this block to the store
|
||||
store.block_states[block_root] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
# Add block timeliness to the store
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
is_timely = get_current_slot(store) == block.slot and is_before_attesting_interval
|
||||
store.block_timeliness[hash_tree_root(block)] = is_timely
|
||||
|
||||
# Add proposer score boost if the block is timely and not conflicting with an existing block
|
||||
is_first_block = store.proposer_boost_root == Root()
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval and is_first_block:
|
||||
if is_timely and is_first_block:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update checkpoints in store if necessary
|
||||
|
|
|
@ -274,15 +274,22 @@ A validator has two primary responsibilities to the beacon chain: [proposing blo
|
|||
A validator is expected to propose a [`SignedBeaconBlock`](./beacon-chain.md#signedbeaconblock) at
|
||||
the beginning of any `slot` during which `is_proposer(state, validator_index)` returns `True`.
|
||||
|
||||
To propose, the validator selects the `BeaconBlock`, `parent` which:
|
||||
To propose, the validator selects a `BeaconBlock`, `parent` using this process:
|
||||
|
||||
1. In their view of fork choice is the head of the chain at the start of
|
||||
`slot`, after running `on_tick` and applying any queued attestations from `slot - 1`.
|
||||
2. Is from a slot strictly less than the slot of the block about to be proposed,
|
||||
i.e. `parent.slot < slot`.
|
||||
1. Compute fork choice's view of the head at the start of `slot`, after running
|
||||
`on_tick` and applying any queued attestations from `slot - 1`.
|
||||
Set `head_root = get_head(store)`.
|
||||
2. Compute the _proposer head_, which is the head upon which the proposer SHOULD build in order to
|
||||
incentivise timely block propagation by other validators.
|
||||
Set `parent_root = get_proposer_head(store, head_root, slot)`.
|
||||
A proposer may set `parent_root == head_root` if proposer re-orgs are not implemented or have
|
||||
been disabled.
|
||||
3. Let `parent` be the block with `parent_root`.
|
||||
|
||||
The validator creates, signs, and broadcasts a `block` that is a child of `parent`
|
||||
that satisfies a valid [beacon chain state transition](./beacon-chain.md#beacon-chain-state-transition-function).
|
||||
and satisfies a valid [beacon chain state transition](./beacon-chain.md#beacon-chain-state-transition-function).
|
||||
Note that the parent's slot must be strictly less than the slot of the block about to be proposed,
|
||||
i.e. `parent.slot < slot`.
|
||||
|
||||
There is one proposer per slot, so if there are N active validators any individual validator
|
||||
will on average be assigned to propose once per N slots (e.g. at 312,500 validators = 10 million ETH, that's once per ~6 weeks).
|
||||
|
|
|
@ -167,7 +167,7 @@ def get_item_position(typ: SSZType, index_or_variable_name: Union[int, SSZVariab
|
|||
```
|
||||
|
||||
```python
|
||||
def get_generalized_index(typ: SSZType, path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex:
|
||||
def get_generalized_index(typ: SSZType, *path: PyUnion[int, SSZVariableName]) -> GeneralizedIndex:
|
||||
"""
|
||||
Converts a path (eg. `[7, "foo", 3]` for `x[7].foo[3]`, `[12, "bar", "__len__"]` for
|
||||
`len(x[12].bar)`) into the generalized index representing its position in the Merkle tree.
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.4.0-beta.3
|
||||
1.4.0-beta.4
|
||||
|
|
|
@ -10,8 +10,7 @@ from eth2spec.test.context import (
|
|||
@spec_state_test
|
||||
def test_current_sync_committee_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
current_sync_committee_branch = spec.compute_merkle_proof_for_state(
|
||||
state, spec.CURRENT_SYNC_COMMITTEE_GINDEX)
|
||||
current_sync_committee_branch = spec.compute_merkle_proof(state, spec.CURRENT_SYNC_COMMITTEE_GINDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.current_sync_committee.hash_tree_root().hex(),
|
||||
"leaf_index": spec.CURRENT_SYNC_COMMITTEE_GINDEX,
|
||||
|
@ -31,8 +30,7 @@ def test_current_sync_committee_merkle_proof(spec, state):
|
|||
@spec_state_test
|
||||
def test_next_sync_committee_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
next_sync_committee_branch = spec.compute_merkle_proof_for_state(
|
||||
state, spec.NEXT_SYNC_COMMITTEE_GINDEX)
|
||||
next_sync_committee_branch = spec.compute_merkle_proof(state, spec.NEXT_SYNC_COMMITTEE_GINDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.next_sync_committee.hash_tree_root().hex(),
|
||||
"leaf_index": spec.NEXT_SYNC_COMMITTEE_GINDEX,
|
||||
|
@ -52,8 +50,7 @@ def test_next_sync_committee_merkle_proof(spec, state):
|
|||
@spec_state_test
|
||||
def test_finality_root_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
finality_branch = spec.compute_merkle_proof_for_state(
|
||||
state, spec.FINALIZED_ROOT_GINDEX)
|
||||
finality_branch = spec.compute_merkle_proof(state, spec.FINALIZED_ROOT_GINDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.finalized_checkpoint.root.hex(),
|
||||
"leaf_index": spec.FINALIZED_ROOT_GINDEX,
|
||||
|
|
|
@ -0,0 +1,186 @@
|
|||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_bellatrix_and_later,
|
||||
with_presets,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
get_valid_attestation_at_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
apply_next_epoch_with_attestations,
|
||||
apply_next_slots_with_attestations,
|
||||
get_genesis_forkchoice_store_and_block,
|
||||
on_tick_and_append_step,
|
||||
output_store_checks,
|
||||
tick_and_add_block,
|
||||
tick_and_run_on_attestation,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
next_epoch,
|
||||
next_slot,
|
||||
)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_should_override_forkchoice_update__false(spec, state):
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
# On receiving a block of `GENESIS_SLOT + 1` slot
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
assert spec.get_head(store) == signed_block.message.hash_tree_root()
|
||||
|
||||
# Proposer of next slot
|
||||
head_root = spec.get_head(store)
|
||||
|
||||
# Next slot
|
||||
next_slot(spec, state)
|
||||
slot = state.slot
|
||||
|
||||
current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
|
||||
should_override = spec.should_override_forkchoice_update(store, head_root)
|
||||
assert not should_override
|
||||
|
||||
output_store_checks(spec, store, test_steps)
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'should_override_forkchoice_update': {
|
||||
'validator_is_connected': True,
|
||||
'result': should_override,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_should_override_forkchoice_update__true(spec, state):
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
next_epoch(spec, state)
|
||||
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||
|
||||
# Fill epoch 1 to 3
|
||||
for _ in range(3):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
|
||||
|
||||
# Make an empty block
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
|
||||
# Fill a slot (parent)
|
||||
state, store, signed_parent_block = yield from apply_next_slots_with_attestations(
|
||||
spec, state, store, 1, True, True, test_steps)
|
||||
|
||||
# Fill a slot with attestations to its parent
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
parent_block_slot = block.slot - 1
|
||||
block.body.attestations = get_valid_attestation_at_slot(
|
||||
state,
|
||||
spec,
|
||||
parent_block_slot,
|
||||
)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
# Make the head block late
|
||||
attesting_cutoff = spec.config.SECONDS_PER_SLOT // spec.INTERVALS_PER_SLOT
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + attesting_cutoff
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
assert spec.get_current_slot(store) == block.slot
|
||||
|
||||
# Check conditions
|
||||
head_root = spec.get_head(store)
|
||||
head_block = store.blocks[head_root]
|
||||
parent_root = head_block.parent_root
|
||||
assert parent_root == signed_parent_block.message.hash_tree_root()
|
||||
parent_block = store.blocks[parent_root]
|
||||
|
||||
# Add attestations to the parent block
|
||||
temp_state = state.copy()
|
||||
next_slot(spec, temp_state)
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + 1
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
attestations = get_valid_attestation_at_slot(
|
||||
temp_state,
|
||||
spec,
|
||||
slot_to_attest=temp_state.slot - 1,
|
||||
beacon_block_root=parent_root,
|
||||
)
|
||||
current_slot = spec.get_current_slot(store)
|
||||
for attestation in attestations:
|
||||
yield from tick_and_run_on_attestation(spec, store, attestation, test_steps)
|
||||
|
||||
current_slot = spec.get_current_slot(store)
|
||||
proposal_slot = head_block.slot + 1
|
||||
|
||||
# The conditions in `get_proposer_head`
|
||||
assert spec.is_head_late(store, head_root)
|
||||
assert spec.is_shuffling_stable(proposal_slot)
|
||||
assert spec.is_ffg_competitive(store, head_root, parent_root)
|
||||
assert spec.is_finalization_ok(store, proposal_slot)
|
||||
|
||||
parent_state_advanced = store.block_states[parent_root].copy()
|
||||
spec.process_slots(parent_state_advanced, proposal_slot)
|
||||
proposer_index = spec.get_beacon_proposer_index(parent_state_advanced)
|
||||
assert spec.validator_is_connected(proposer_index)
|
||||
|
||||
# Single slot re-org.
|
||||
parent_slot_ok = parent_block.slot + 1 == head_block.slot
|
||||
proposing_on_time = spec.is_proposing_on_time(store)
|
||||
assert proposing_on_time
|
||||
assert parent_slot_ok and proposal_slot == current_slot and proposing_on_time
|
||||
|
||||
assert spec.is_head_weak(store, head_root)
|
||||
assert spec.is_parent_strong(store, parent_root)
|
||||
|
||||
should_override = spec.should_override_forkchoice_update(store, head_root)
|
||||
assert should_override
|
||||
|
||||
output_store_checks(spec, store, test_steps)
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'should_override_forkchoice_update': {
|
||||
'validator_is_connected': True,
|
||||
'result': should_override,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
|
@ -15,8 +15,7 @@ def test_execution_merkle_proof(spec, state):
|
|||
block = state_transition_with_full_block(spec, state, True, False)
|
||||
|
||||
yield "object", block.message.body
|
||||
execution_branch = spec.compute_merkle_proof_for_block_body(
|
||||
block.message.body, spec.EXECUTION_PAYLOAD_INDEX)
|
||||
execution_branch = spec.compute_merkle_proof(block.message.body, spec.EXECUTION_PAYLOAD_INDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + block.message.body.execution_payload.hash_tree_root().hex(),
|
||||
"leaf_index": spec.EXECUTION_PAYLOAD_INDEX,
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_deneb_and_later,
|
||||
with_test_suite_name,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
sign_block
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_opaque_tx,
|
||||
)
|
||||
|
||||
|
||||
@with_test_suite_name("BeaconBlockBody")
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_blob_kzg_commitment_merkle_proof(spec, state):
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=1)
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = sign_block(spec, state, block, proposer_index=0)
|
||||
blob_sidecars = spec.get_blob_sidecars(signed_block, blobs, proofs)
|
||||
blob_index = 0
|
||||
blob_sidecar = blob_sidecars[blob_index]
|
||||
|
||||
yield "object", block.body
|
||||
kzg_commitment_inclusion_proof = blob_sidecar.kzg_commitment_inclusion_proof
|
||||
gindex = spec.get_generalized_index(spec.BeaconBlockBody, 'blob_kzg_commitments', blob_index)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + blob_sidecar.kzg_commitment.hash_tree_root().hex(),
|
||||
"leaf_index": gindex,
|
||||
"branch": ['0x' + root.hex() for root in kzg_commitment_inclusion_proof]
|
||||
}
|
||||
assert spec.is_valid_merkle_branch(
|
||||
leaf=blob_sidecar.kzg_commitment.hash_tree_root(),
|
||||
branch=blob_sidecar.kzg_commitment_inclusion_proof,
|
||||
depth=spec.floorlog2(gindex),
|
||||
index=spec.get_subtree_index(gindex),
|
||||
root=blob_sidecar.signed_block_header.message.body_root,
|
||||
)
|
|
@ -20,3 +20,6 @@ def test_networking(spec):
|
|||
assert spec.config.MAX_REQUEST_BLOB_SIDECARS == spec.config.MAX_REQUEST_BLOCKS_DENEB * spec.MAX_BLOBS_PER_BLOCK
|
||||
# Start with the same size, but `BLOB_SIDECAR_SUBNET_COUNT` could potentially increase later.
|
||||
assert spec.config.BLOB_SIDECAR_SUBNET_COUNT == spec.MAX_BLOBS_PER_BLOCK
|
||||
for i in range(spec.MAX_BLOB_COMMITMENTS_PER_BLOCK):
|
||||
gindex = spec.get_generalized_index(spec.BeaconBlockBody, 'blob_kzg_commitments', i)
|
||||
assert spec.floorlog2(gindex) == spec.KZG_COMMITMENT_INCLUSION_PROOF_DEPTH
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import random
|
||||
from eth2spec.test.context import (
|
||||
always_bls,
|
||||
spec_state_test,
|
||||
with_deneb_and_later,
|
||||
)
|
||||
|
@ -10,58 +10,59 @@ from eth2spec.test.helpers.sharding import (
|
|||
get_sample_opaque_tx,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot
|
||||
)
|
||||
from eth2spec.test.helpers.keys import (
|
||||
pubkey_to_privkey
|
||||
build_empty_block_for_next_slot,
|
||||
sign_block
|
||||
)
|
||||
|
||||
|
||||
def _get_sample_sidecars(spec, state, rng):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
# 2 txs, each has 2 blobs
|
||||
blob_count = 2
|
||||
opaque_tx_1, blobs_1, blob_kzg_commitments_1, proofs_1 = get_sample_opaque_tx(spec, blob_count=blob_count, rng=rng)
|
||||
opaque_tx_2, blobs_2, blob_kzg_commitments_2, proofs_2 = get_sample_opaque_tx(spec, blob_count=blob_count, rng=rng)
|
||||
assert opaque_tx_1 != opaque_tx_2
|
||||
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments_1 + blob_kzg_commitments_2
|
||||
block.body.execution_payload.transactions = [opaque_tx_1, opaque_tx_2]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
|
||||
blobs = blobs_1 + blobs_2
|
||||
proofs = proofs_1 + proofs_2
|
||||
signed_block = sign_block(spec, state, block, proposer_index=0)
|
||||
return spec.get_blob_sidecars(signed_block, blobs, proofs)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_blob_sidecar_signature(spec, state):
|
||||
"""
|
||||
Test `get_blob_sidecar_signature`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
def test_blob_sidecar_inclusion_proof_correct(spec, state):
|
||||
rng = random.Random(1234)
|
||||
blob_sidecars = _get_sample_sidecars(spec, state, rng)
|
||||
|
||||
blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs)
|
||||
proposer = state.validators[blob_sidecars[1].proposer_index]
|
||||
privkey = pubkey_to_privkey[proposer.pubkey]
|
||||
sidecar_signature = spec.get_blob_sidecar_signature(state,
|
||||
blob_sidecars[1],
|
||||
privkey)
|
||||
|
||||
signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature)
|
||||
|
||||
assert spec.verify_blob_sidecar_signature(state, signed_blob_sidecar)
|
||||
for blob_sidecar in blob_sidecars:
|
||||
assert spec.verify_blob_sidecar_inclusion_proof(blob_sidecar)
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_blob_sidecar_signature_incorrect(spec, state):
|
||||
"""
|
||||
Test `get_blob_sidecar_signature`
|
||||
"""
|
||||
blob_count = 4
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
def test_blob_sidecar_inclusion_proof_incorrect_wrong_body(spec, state):
|
||||
rng = random.Random(1234)
|
||||
blob_sidecars = _get_sample_sidecars(spec, state, rng)
|
||||
|
||||
blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs)
|
||||
for blob_sidecar in blob_sidecars:
|
||||
block = blob_sidecar.signed_block_header.message
|
||||
block.body_root = spec.hash(block.body_root) # mutate body root to break proof
|
||||
assert not spec.verify_blob_sidecar_inclusion_proof(blob_sidecar)
|
||||
|
||||
sidecar_signature = spec.get_blob_sidecar_signature(state,
|
||||
blob_sidecars[1],
|
||||
123)
|
||||
|
||||
signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature)
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_blob_sidecar_inclusion_proof_incorrect_wrong_proof(spec, state):
|
||||
rng = random.Random(1234)
|
||||
blob_sidecars = _get_sample_sidecars(spec, state, rng)
|
||||
|
||||
assert not spec.verify_blob_sidecar_signature(state, signed_blob_sidecar)
|
||||
for blob_sidecar in blob_sidecars:
|
||||
# wrong proof
|
||||
blob_sidecar.kzg_commitment_inclusion_proof = spec.compute_merkle_proof(spec.BeaconBlockBody(), 0)
|
||||
assert not spec.verify_blob_sidecar_inclusion_proof(blob_sidecar)
|
||||
|
|
|
@ -51,19 +51,21 @@ def run_attestation_processing(spec, state, attestation, valid=True):
|
|||
yield 'post', state
|
||||
|
||||
|
||||
def build_attestation_data(spec, state, slot, index, shard=None):
|
||||
def build_attestation_data(spec, state, slot, index, beacon_block_root=None, shard=None):
|
||||
assert state.slot >= slot
|
||||
|
||||
if slot == state.slot:
|
||||
block_root = build_empty_block_for_next_slot(spec, state).parent_root
|
||||
if beacon_block_root is not None:
|
||||
pass
|
||||
elif slot == state.slot:
|
||||
beacon_block_root = build_empty_block_for_next_slot(spec, state).parent_root
|
||||
else:
|
||||
block_root = spec.get_block_root_at_slot(state, slot)
|
||||
beacon_block_root = spec.get_block_root_at_slot(state, slot)
|
||||
|
||||
current_epoch_start_slot = spec.compute_start_slot_at_epoch(spec.get_current_epoch(state))
|
||||
if slot < current_epoch_start_slot:
|
||||
epoch_boundary_root = spec.get_block_root(state, spec.get_previous_epoch(state))
|
||||
elif slot == current_epoch_start_slot:
|
||||
epoch_boundary_root = block_root
|
||||
epoch_boundary_root = beacon_block_root
|
||||
else:
|
||||
epoch_boundary_root = spec.get_block_root(state, spec.get_current_epoch(state))
|
||||
|
||||
|
@ -77,7 +79,7 @@ def build_attestation_data(spec, state, slot, index, shard=None):
|
|||
data = spec.AttestationData(
|
||||
slot=slot,
|
||||
index=index,
|
||||
beacon_block_root=block_root,
|
||||
beacon_block_root=beacon_block_root,
|
||||
source=spec.Checkpoint(epoch=source_epoch, root=source_root),
|
||||
target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root),
|
||||
)
|
||||
|
@ -91,6 +93,7 @@ def get_valid_attestation(spec,
|
|||
slot=None,
|
||||
index=None,
|
||||
filter_participant_set=None,
|
||||
beacon_block_root=None,
|
||||
signed=False):
|
||||
# If filter_participant_set filters everything, the attestation has 0 participants, and cannot be signed.
|
||||
# Thus strictly speaking invalid when no participant is added later.
|
||||
|
@ -99,9 +102,7 @@ def get_valid_attestation(spec,
|
|||
if index is None:
|
||||
index = 0
|
||||
|
||||
attestation_data = build_attestation_data(
|
||||
spec, state, slot=slot, index=index
|
||||
)
|
||||
attestation_data = build_attestation_data(spec, state, slot=slot, index=index, beacon_block_root=beacon_block_root)
|
||||
|
||||
beacon_committee = spec.get_beacon_committee(
|
||||
state,
|
||||
|
@ -195,7 +196,7 @@ def add_attestations_to_state(spec, state, attestations, slot):
|
|||
spec.process_attestation(state, attestation)
|
||||
|
||||
|
||||
def get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None):
|
||||
def get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None, beacon_block_root=None):
|
||||
committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest))
|
||||
for index in range(committees_per_slot):
|
||||
def participants_filter(comm):
|
||||
|
@ -210,7 +211,8 @@ def get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=
|
|||
slot_to_attest,
|
||||
index=index,
|
||||
signed=True,
|
||||
filter_participant_set=participants_filter
|
||||
filter_participant_set=participants_filter,
|
||||
beacon_block_root=beacon_block_root,
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -92,14 +92,11 @@ def add_attestations(spec, store, attestations, test_steps, is_from_block=False)
|
|||
|
||||
|
||||
def tick_and_run_on_attestation(spec, store, attestation, test_steps, is_from_block=False):
|
||||
parent_block = store.blocks[attestation.data.beacon_block_root]
|
||||
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
|
||||
block_time = pre_state.genesis_time + parent_block.slot * spec.config.SECONDS_PER_SLOT
|
||||
next_epoch_time = block_time + spec.SLOTS_PER_EPOCH * spec.config.SECONDS_PER_SLOT
|
||||
|
||||
if store.time < next_epoch_time:
|
||||
spec.on_tick(store, next_epoch_time)
|
||||
test_steps.append({'tick': int(next_epoch_time)})
|
||||
# Make get_current_slot(store) >= attestation.data.slot + 1
|
||||
min_time_to_include = (attestation.data.slot + 1) * spec.config.SECONDS_PER_SLOT
|
||||
if store.time < min_time_to_include:
|
||||
spec.on_tick(store, min_time_to_include)
|
||||
test_steps.append({'tick': int(min_time_to_include)})
|
||||
|
||||
yield from add_attestation(spec, store, attestation, test_steps, is_from_block)
|
||||
|
||||
|
|
|
@ -56,13 +56,11 @@ def create_update(spec,
|
|||
|
||||
if with_next:
|
||||
update.next_sync_committee = attested_state.next_sync_committee
|
||||
update.next_sync_committee_branch = spec.compute_merkle_proof_for_state(
|
||||
attested_state, spec.NEXT_SYNC_COMMITTEE_GINDEX)
|
||||
update.next_sync_committee_branch = spec.compute_merkle_proof(attested_state, spec.NEXT_SYNC_COMMITTEE_GINDEX)
|
||||
|
||||
if with_finality:
|
||||
update.finalized_header = spec.block_to_light_client_header(finalized_block)
|
||||
update.finality_branch = spec.compute_merkle_proof_for_state(
|
||||
attested_state, spec.FINALIZED_ROOT_GINDEX)
|
||||
update.finality_branch = spec.compute_merkle_proof(attested_state, spec.FINALIZED_ROOT_GINDEX)
|
||||
|
||||
update.sync_aggregate, update.signature_slot = get_sync_aggregate(
|
||||
spec, attested_state, num_participants)
|
||||
|
|
|
@ -0,0 +1,167 @@
|
|||
from eth_utils import encode_hex
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_altair_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
get_valid_attestation_at_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
apply_next_epoch_with_attestations,
|
||||
apply_next_slots_with_attestations,
|
||||
get_genesis_forkchoice_store_and_block,
|
||||
on_tick_and_append_step,
|
||||
output_store_checks,
|
||||
tick_and_add_block,
|
||||
tick_and_run_on_attestation,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
next_slot,
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_basic_is_head_root(spec, state):
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
# On receiving a block of `GENESIS_SLOT + 1` slot
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
assert spec.get_head(store) == signed_block.message.hash_tree_root()
|
||||
|
||||
# Proposer of next slot
|
||||
head_root = spec.get_head(store)
|
||||
|
||||
# Proposing next slot
|
||||
next_slot(spec, state)
|
||||
slot = state.slot
|
||||
|
||||
current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
proposer_head = spec.get_proposer_head(store, head_root, slot)
|
||||
assert proposer_head == head_root
|
||||
|
||||
output_store_checks(spec, store, test_steps)
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'get_proposer_head': encode_hex(proposer_head),
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_basic_is_parent_root(spec, state):
|
||||
test_steps = []
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
next_epoch(spec, state)
|
||||
on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps)
|
||||
|
||||
# Fill epoch 1 to 3
|
||||
for _ in range(3):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
|
||||
|
||||
# Make an empty block
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
|
||||
# Fill a slot (parent)
|
||||
state, store, signed_parent_block = yield from apply_next_slots_with_attestations(
|
||||
spec, state, store, 1, True, True, test_steps)
|
||||
|
||||
# Fill a slot with attestations to its parent
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
parent_block_slot = block.slot - 1
|
||||
block.body.attestations = get_valid_attestation_at_slot(
|
||||
state,
|
||||
spec,
|
||||
parent_block_slot,
|
||||
)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
# Make the head block late
|
||||
attesting_cutoff = spec.config.SECONDS_PER_SLOT // spec.INTERVALS_PER_SLOT
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + attesting_cutoff
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert store.time == current_time
|
||||
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||
|
||||
# Check conditions
|
||||
head_root = spec.get_head(store)
|
||||
head_block = store.blocks[head_root]
|
||||
parent_root = head_block.parent_root
|
||||
assert parent_root == signed_parent_block.message.hash_tree_root()
|
||||
parent_block = store.blocks[parent_root]
|
||||
|
||||
# Proposing next slot
|
||||
next_slot(spec, state)
|
||||
slot = state.slot
|
||||
|
||||
# Add attestations to the parent block
|
||||
current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
attestations = get_valid_attestation_at_slot(
|
||||
state,
|
||||
spec,
|
||||
slot_to_attest=slot - 1,
|
||||
beacon_block_root=parent_root,
|
||||
)
|
||||
for attestation in attestations:
|
||||
yield from tick_and_run_on_attestation(spec, store, attestation, test_steps)
|
||||
|
||||
# The conditions in `get_proposer_head`
|
||||
assert spec.is_head_late(store, head_root)
|
||||
assert spec.is_shuffling_stable(slot)
|
||||
assert spec.is_ffg_competitive(store, head_root, parent_root)
|
||||
assert spec.is_finalization_ok(store, slot)
|
||||
assert spec.is_proposing_on_time(store)
|
||||
|
||||
parent_slot_ok = parent_block.slot + 1 == head_block.slot
|
||||
current_time_ok = head_block.slot + 1 == slot
|
||||
single_slot_reorg = parent_slot_ok and current_time_ok
|
||||
assert single_slot_reorg
|
||||
|
||||
assert spec.is_head_weak(store, head_root)
|
||||
assert spec.is_parent_strong(store, parent_root)
|
||||
|
||||
proposer_head = spec.get_proposer_head(store, head_root, state.slot)
|
||||
assert proposer_head == parent_root
|
||||
|
||||
output_store_checks(spec, store, test_steps)
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'get_proposer_head': encode_hex(proposer_head),
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
|
@ -142,7 +142,7 @@ Optional step for optimistic sync tests.
|
|||
}
|
||||
```
|
||||
|
||||
This step sets the [`payloadStatus`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#PayloadStatusV1)
|
||||
This step sets the [`payloadStatus`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#payloadstatusv1)
|
||||
value that Execution Layer client mock returns in responses to the following Engine API calls:
|
||||
* [`engine_newPayloadV1(payload)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_newpayloadv1) if `payload.blockHash == payload_info.block_hash`
|
||||
* [`engine_forkchoiceUpdatedV1(forkchoiceState, ...)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_forkchoiceupdatedv1) if `forkchoiceState.headBlockHash == payload_info.block_hash`
|
||||
|
@ -159,7 +159,7 @@ The checks to verify the current status of `store`.
|
|||
checks: {<store_attibute>: value} -- the assertions.
|
||||
```
|
||||
|
||||
`<store_attibute>` is the field member or property of [`Store`](../../../specs/phase0/fork-choice.md#store) object that maintained by client implementation. Currently, the possible fields included:
|
||||
`<store_attibute>` is the field member or property of [`Store`](../../../specs/phase0/fork-choice.md#store) object that maintained by client implementation. The fields include:
|
||||
|
||||
```yaml
|
||||
head: {
|
||||
|
@ -179,6 +179,16 @@ finalized_checkpoint: {
|
|||
proposer_boost_root: string -- Encoded 32-byte value from store.proposer_boost_root
|
||||
```
|
||||
|
||||
Additionally, these fields if `get_proposer_head` and `should_override_forkchoice_update` features are implemented:
|
||||
|
||||
```yaml
|
||||
get_proposer_head: string -- Encoded 32-byte value from get_proposer_head(store)
|
||||
should_override_forkchoice_update: { -- [New in Bellatrix]
|
||||
validator_is_connected: bool, -- The mocking result of `validator_is_connected(proposer_index)` in this call
|
||||
result: bool, -- The result of `should_override_forkchoice_update(store, head_root)`, where head_root is the result value from get_head(store)
|
||||
}
|
||||
```
|
||||
|
||||
For example:
|
||||
```yaml
|
||||
- checks:
|
||||
|
@ -187,6 +197,8 @@ For example:
|
|||
justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
|
||||
finalized_checkpoint: {epoch: 2, root: '0x40d32d6283ec11c53317a46808bc88f55657d93b95a1af920403187accf48f4f'}
|
||||
proposer_boost_root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'
|
||||
get_proposer_head: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'
|
||||
should_override_forkchoice_update: {validator_is_connected: false, result: false}
|
||||
```
|
||||
|
||||
*Note*: Each `checks` step may include one or multiple items. Each item has to be checked against the current store.
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
# Merkle proof tests
|
||||
|
||||
Handlers:
|
||||
- `single_merkle_proof`: see [Single leaf merkle proof test format](../light_client/single_merkle_proof.md)
|
|
@ -10,12 +10,14 @@ if __name__ == "__main__":
|
|||
'ex_ante',
|
||||
'reorg',
|
||||
'withholding',
|
||||
'get_proposer_head',
|
||||
]}
|
||||
|
||||
# For merge `on_merge_block` test kind added with `pow_block_N.ssz` files with several
|
||||
# PowBlock's which should be resolved by `get_pow_block(hash: Hash32) -> PowBlock` function
|
||||
_new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.fork_choice.test_' + key for key in [
|
||||
'on_merge_block',
|
||||
'should_override_forkchoice_update',
|
||||
]}
|
||||
bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods)
|
||||
capella_mods = bellatrix_mods # No additional Capella specific fork choice tests
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
# Merkle proof tests
|
||||
|
||||
The purpose of this test-generator is to provide test-vectors for validating the correct implementation of the Merkle proof verification.
|
||||
|
||||
Test-format documentation can be found [here](../../formats/merkle_proof/README.md).
|
|
@ -0,0 +1,14 @@
|
|||
from eth2spec.test.helpers.constants import DENEB
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
deneb_mods = {key: 'eth2spec.test.deneb.merkle_proof.test_' + key for key in [
|
||||
'single_merkle_proof',
|
||||
]}
|
||||
|
||||
all_mods = {
|
||||
DENEB: deneb_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="merkle_proof", all_mods=all_mods)
|
|
@ -0,0 +1,2 @@
|
|||
pytest>=4.4
|
||||
../../../[generator]
|
Loading…
Reference in New Issue