Merge branch 'dev' into 4844-remove-empty-container

This commit is contained in:
Danny Ryan 2023-03-01 08:58:42 -07:00 committed by GitHub
commit 7538e9a4f3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
30 changed files with 472 additions and 320 deletions

View File

@ -23,16 +23,18 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
# To check generator matching:
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \
$(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SPEC_DIR)/altair/**/*.md) \
$(wildcard $(SPEC_DIR)/bellatrix/*.md) \
$(wildcard $(SPEC_DIR)/capella/*.md) $(wildcard $(SPEC_DIR)/capella/**/*.md) \
$(wildcard $(SPEC_DIR)/custody/*.md) \
$(wildcard $(SPEC_DIR)/das/*.md) \
$(wildcard $(SPEC_DIR)/sharding/*.md) \
$(wildcard $(SPEC_DIR)/deneb/*.md) $(wildcard $(SPEC_DIR)/deneb/**/*.md) \
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \
$(wildcard $(SPEC_DIR)/*/*/*.md) \
$(wildcard $(SPEC_DIR)/_features/*/*.md) \
$(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
$(wildcard $(SSZ_DIR)/*.md)
ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb
# The parameters for commands. Use `foreach` to avoid listing specs again.
COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), ./eth2spec/$S)
MYPY_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), -p eth2spec.$S)
COV_HTML_OUT=.htmlcov
COV_HTML_OUT_DIR=$(PY_SPEC_DIR)/$(COV_HTML_OUT)
COV_INDEX_FILE=$(COV_HTML_OUT_DIR)/index.html
@ -63,15 +65,14 @@ partial_clean:
rm -f .coverage
rm -rf $(PY_SPEC_DIR)/.pytest_cache
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache
rm -rf $(ETH2SPEC_MODULE_DIR)/phase0
rm -rf $(ETH2SPEC_MODULE_DIR)/altair
rm -rf $(ETH2SPEC_MODULE_DIR)/bellatrix
rm -rf $(ETH2SPEC_MODULE_DIR)/capella
rm -rf $(ETH2SPEC_MODULE_DIR)/deneb
rm -rf $(COV_HTML_OUT_DIR)
rm -rf $(TEST_REPORT_DIR)
rm -rf eth2spec.egg-info dist build
rm -rf build
rm -rf build;
@for spec_name in $(ALL_EXECUTABLE_SPECS) ; do \
echo $$spec_name; \
rm -rf $(ETH2SPEC_MODULE_DIR)/$$spec_name; \
done
clean: partial_clean
rm -rf venv
@ -105,12 +106,12 @@ install_test:
# Testing against `minimal` or `mainnet` config by default
test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
python3 -m pytest -n 4 --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
# Testing against `minimal` or `mainnet` config by default
find_test: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
python3 -m pytest -k=$(K) --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
citest: pyspec
mkdir -p $(TEST_REPORT_DIR);
@ -119,7 +120,7 @@ ifdef fork
python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
else
. venv/bin/activate; cd $(PY_SPEC_DIR); \
python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
endif
@ -137,13 +138,11 @@ check_toc: $(MARKDOWN_FILES:=.toc)
codespell:
codespell . --skip "./.git,./venv,$(PY_SPEC_DIR)/.mypy_cache" -I .codespell-whitelist
# TODO: add future protocol upgrade patch packages to linting.
# NOTE: we use `pylint` just for catching unused arguments in spec code
lint: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
&& pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb \
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb
&& pylint --rcfile $(LINTER_CONFIG_FILE) $(PYLINT_SCOPE) \
&& mypy --config-file $(LINTER_CONFIG_FILE) $(MYPY_SCOPE)
lint_generators: pyspec
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \

View File

@ -26,9 +26,9 @@ Features are researched and developed in parallel, and then consolidated into se
| - | - | - |
| Capella (tentative) | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))</li></ul><ul><li>[Validator additions](specs/capella/validator.md)</li><li>[P2P networking](specs/capella/p2p-interface.md)</li></ul></ul> |
| Deneb (tentative) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/deneb/beacon-chain.md)</li><li>[Deneb fork](specs/deneb/fork.md)</li><li>[Polynomial commitments](specs/deneb/polynomial-commitments.md)</li><li>[Fork choice changes](specs/deneb/fork-choice.md)</li></ul><li>Additions</li><ul><li>[Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))</li></ul><ul><li>[Honest validator guide changes](specs/deneb/validator.md)</li><li>[P2P networking](specs/deneb/p2p-interface.md)</li></ul></ul> |
| Sharding (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/sharding/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[P2P networking](specs/sharding/p2p-interface.md)</li></ul></ul> |
| Custody Game (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/custody_game/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/custody_game/validator.md)</li></ul></ul> | Dependent on sharding |
| Data Availability Sampling (outdated) | <ul><li>Core</li><ul><li>[Core types and functions](specs/das/das-core.md)</li><li>[Fork choice changes](specs/das/fork-choice.md)</li></ul><li>Additions</li><ul><li>[P2P Networking](specs/das/p2p-interface.md)</li><li>[Sampling process](specs/das/sampling.md)</li></ul></ul> | <ul><li> Dependent on sharding</li><li>[Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)</li></ul> |
| Sharding (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/_features/sharding/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[P2P networking](specs/_features/sharding/p2p-interface.md)</li></ul></ul> |
| Custody Game (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/_features/custody_game/validator.md)</li></ul></ul> | Dependent on sharding |
| Data Availability Sampling (outdated) | <ul><li>Core</li><ul><li>[Core types and functions](specs/_features/das/das-core.md)</li><li>[Fork choice changes](specs/_features/das/fork-choice.md)</li></ul><li>Additions</li><ul><li>[P2P Networking](specs/_features/das/p2p-interface.md)</li><li>[Sampling process](specs/_features/das/sampling.md)</li></ul></ul> | <ul><li> Dependent on sharding</li><li>[Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)</li></ul> |
### Accompanying documents can be found in [specs](specs) and include:

View File

@ -1,4 +1,4 @@
# Mainnet preset - Phase0
# Mainnet preset - Deneb
# Misc
# ---------------------------------------------------------------

View File

@ -1,4 +1,4 @@
# Minimal preset - Phase0
# Minimal preset - Deneb
# Misc
# ---------------------------------------------------------------

View File

@ -653,9 +653,9 @@ T = TypeVar('T') # For generic function
@classmethod
def sundry_functions(cls) -> str:
return super().sundry_functions() + '\n\n' + '''
def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> PyUnion[BlobsSidecar, str]:
def retrieve_blobs_and_proofs(beacon_block_root: Root) -> PyUnion[Tuple[Blob, KZGProof], Tuple[str, str]]:
# pylint: disable=unused-argument
return "TEST"'''
return ("TEST", "TEST")'''
@classmethod
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
@ -1169,7 +1169,7 @@ setup(
"pycryptodome==3.15.0",
"py_ecc==6.0.0",
"milagro_bls_binding==1.9.0",
"remerkleable==0.1.25",
"remerkleable==0.1.27",
"trie==2.0.2",
RUAMEL_YAML_VERSION,
"lru-dict==1.1.8",

View File

@ -36,11 +36,11 @@ docs are requisite for this document and used throughout. Please see the Custody
## Becoming a validator
Becoming a validator in Custody Game is unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#becoming-a-validator) for details.
Becoming a validator in Custody Game is unchanged from Phase 0. See the [Phase 0 validator guide](../../phase0/validator.md#becoming-a-validator) for details.
## Beacon chain validator assignments
Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#validator-assignments) for details.
Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../../phase0/validator.md#validator-assignments) for details.
##### Custody slashings

View File

@ -143,7 +143,7 @@ If the node does not already have connected peers on the topic it needs to sampl
### Topics and messages
Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
| Name | Message Type |
|----------------------------------|---------------------------|
| `das_sample_{subnet_index}` | `DASSample` |
@ -192,7 +192,7 @@ This is to serve other peers that may have missed it.
To pull samples from nodes, in case of network instability when samples are unavailable, a new query method is added to the Req-Resp domain.
This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../phase0/p2p-interface.md).
This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../../phase0/p2p-interface.md).
Note that DAS networking uses a different protocol prefix: `/eth2/das/req`

View File

@ -39,7 +39,7 @@ The adjustments and additions for Shards are outlined in this document.
### Topics and messages
Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
| Name | Message Type |
|---------------------------------|--------------------------|

View File

@ -33,7 +33,7 @@ This document represents the changes to be made in the code of an "honest valida
## Prerequisites
This document is an extension of the [Bellatrix -- Honest Validator](../bellatrix/validator.md) guide.
This document is an extension of the [Bellatrix -- Honest Validator](../../bellatrix/validator.md) guide.
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Sharding](./beacon-chain.md) are requisite for this document and used throughout.

View File

@ -242,7 +242,7 @@ class BeaconState(Container):
current_sync_committee: SyncCommittee
next_sync_committee: SyncCommittee
# Execution
latest_execution_payload_header: ExecutionPayloadHeader
latest_execution_payload_header: ExecutionPayloadHeader # [Modified in Capella]
# Withdrawals
next_withdrawal_index: WithdrawalIndex # [New in Capella]
next_withdrawal_validator_index: ValidatorIndex # [New in Capella]

View File

@ -11,6 +11,7 @@
- [Introduction](#introduction)
- [Custom types](#custom-types)
- [Constants](#constants)
- [Domain types](#domain-types)
- [Blob](#blob)
- [Preset](#preset)
- [Execution](#execution)
@ -44,15 +45,22 @@ This upgrade adds blobs to the beacon chain as part of Deneb. This is an extensi
| Name | SSZ equivalent | Description |
| - | - | - |
| `VersionedHash` | `Bytes32` | |
| `BlobIndex` | `uint64` | |
## Constants
### Domain types
| Name | Value |
| - | - |
| `DOMAIN_BLOB_SIDECAR` | `DomainType('0x0B000000')` |
### Blob
| Name | Value |
| - | - |
| `BLOB_TX_TYPE` | `uint8(0x05)` |
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` |
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` |
## Preset
@ -108,11 +116,11 @@ class ExecutionPayload(Container):
timestamp: uint64
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
base_fee_per_gas: uint256
excess_data_gas: uint256 # [New in Deneb]
# Extra payload fields
block_hash: Hash32 # Hash of execution block
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
excess_data_gas: uint256 # [New in Deneb]
```
#### `ExecutionPayloadHeader`
@ -132,11 +140,11 @@ class ExecutionPayloadHeader(Container):
timestamp: uint64
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
base_fee_per_gas: uint256
excess_data_gas: uint256 # [New in Deneb]
# Extra payload fields
block_hash: Hash32 # Hash of execution block
transactions_root: Root
withdrawals_root: Root
excess_data_gas: uint256 # [New in Deneb]
```
## Helper functions
@ -230,10 +238,10 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
timestamp=payload.timestamp,
extra_data=payload.extra_data,
base_fee_per_gas=payload.base_fee_per_gas,
excess_data_gas=payload.excess_data_gas, # [New in Deneb]
block_hash=payload.block_hash,
transactions_root=hash_tree_root(payload.transactions),
withdrawals_root=hash_tree_root(payload.withdrawals),
excess_data_gas=payload.excess_data_gas, # [New in Deneb]
)
```
@ -249,7 +257,7 @@ def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> N
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Deneb testing only.
The `BeaconState` initialization is unchanged, except for the use of the updated `deneb.BeaconBlockBody` type
The `BeaconState` initialization is unchanged, except for the use of the updated `deneb.BeaconBlockBody` type
when initializing the first body-root.
```python

View File

@ -7,9 +7,8 @@
- [Introduction](#introduction)
- [Containers](#containers)
- [`BlobsSidecar`](#blobssidecar)
- [Helpers](#helpers)
- [`validate_blobs_sidecar`](#validate_blobs_sidecar)
- [`validate_blobs`](#validate_blobs)
- [`is_data_available`](#is_data_available)
- [Updated fork-choice handlers](#updated-fork-choice-handlers)
- [`on_block`](#on_block)
@ -23,54 +22,40 @@ This is the modification of the fork choice accompanying the Deneb upgrade.
## Containers
### `BlobsSidecar`
```python
class BlobsSidecar(Container):
beacon_block_root: Root
beacon_block_slot: Slot
blobs: List[Blob, MAX_BLOBS_PER_BLOCK]
kzg_aggregated_proof: KZGProof
```
## Helpers
#### `validate_blobs_sidecar`
#### `validate_blobs`
```python
def validate_blobs_sidecar(slot: Slot,
beacon_block_root: Root,
expected_kzg_commitments: Sequence[KZGCommitment],
blobs_sidecar: BlobsSidecar) -> None:
assert slot == blobs_sidecar.beacon_block_slot
assert beacon_block_root == blobs_sidecar.beacon_block_root
blobs = blobs_sidecar.blobs
kzg_aggregated_proof = blobs_sidecar.kzg_aggregated_proof
def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment],
blobs: Sequence[Blob],
proofs: Sequence[KZGProof]) -> None:
assert len(expected_kzg_commitments) == len(blobs)
assert len(blobs) == len(proofs)
assert verify_aggregate_kzg_proof(blobs, expected_kzg_commitments, kzg_aggregated_proof)
assert verify_blob_kzg_proof_batch(blobs, expected_kzg_commitments, proofs)
```
#### `is_data_available`
The implementation of `is_data_available` will become more sophisticated during later scaling upgrades.
Initially, verification requires every verifying actor to retrieve the matching `BlobsSidecar`,
and validate the sidecar with `validate_blobs_sidecar`.
Initially, verification requires every verifying actor to retrieve all matching `Blob`s and `KZGProof`s, and validate them with `validate_blobs`.
The block MUST NOT be considered valid until a valid `BlobsSidecar` has been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `BlobsSidecar` has subsequently been pruned.
The block MUST NOT be considered valid until all valid `Blob`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `Blob`s have subsequently been pruned.
```python
def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
# `retrieve_blobs_sidecar` is implementation and context dependent, raises an exception if not available.
def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
# `retrieve_blobs_and_proofs` is implementation and context dependent
# It returns all the blobs for the given block root, and raises an exception if not available
# Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
sidecar = retrieve_blobs_sidecar(slot, beacon_block_root)
blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root)
# For testing, `retrieve_blobs_sidecar` returns "TEST".
# TODO: Remove it once we have a way to inject `BlobsSidecar` into tests.
if isinstance(sidecar, str):
# For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST").
# TODO: Remove it once we have a way to inject `BlobSidecar` into tests.
if isinstance(blobs, str) or isinstance(proofs, str):
return True
validate_blobs_sidecar(slot, beacon_block_root, blob_kzg_commitments, sidecar)
validate_blobs(blob_kzg_commitments, blobs, proofs)
return True
```
@ -78,7 +63,7 @@ def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments:
### `on_block`
*Note*: The only modification is the addition of the verification of transition block conditions.
*Note*: The only modification is the addition of the blob data availability check.
```python
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
@ -102,7 +87,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
# [New in Deneb]
# Check if blob data is available
# If not, this block MAY be queued and subsequently considered when blob data becomes available
assert is_data_available(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments)
assert is_data_available(hash_tree_root(block), block.body.blob_kzg_commitments)
# Check the block is valid and compute the post-state
state = pre_state.copy()

View File

@ -64,8 +64,6 @@ Note that for the pure Deneb networks, we don't apply `upgrade_to_deneb` since i
### Upgrading the state
Since the `deneb.BeaconState` format is equal to the `capella.BeaconState` format, we only have to update `BeaconState.fork`.
```python
def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
epoch = capella.get_current_epoch(pre)
@ -82,10 +80,10 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
timestamp=pre.latest_execution_payload_header.timestamp,
extra_data=pre.latest_execution_payload_header.extra_data,
base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
excess_data_gas=uint256(0), # [New in Deneb]
block_hash=pre.latest_execution_payload_header.block_hash,
transactions_root=pre.latest_execution_payload_header.transactions_root,
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
excess_data_gas=uint256(0), # [New in Deneb]
)
post = BeaconState(
# Versioning

View File

@ -10,21 +10,23 @@ The specification of these changes continues in the same format as the network s
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Configuration](#configuration)
- [Containers](#containers)
- [`SignedBeaconBlockAndBlobsSidecar`](#signedbeaconblockandblobssidecar)
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
- [Topics and messages](#topics-and-messages)
- [Global topics](#global-topics)
- [`beacon_block`](#beacon_block)
- [`beacon_block_and_blobs_sidecar`](#beacon_block_and_blobs_sidecar)
- [Transitioning the gossip](#transitioning-the-gossip)
- [The Req/Resp domain](#the-reqresp-domain)
- [Messages](#messages)
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
- [BeaconBlockAndBlobsSidecarByRoot v1](#beaconblockandblobssidecarbyroot-v1)
- [BlobsSidecarsByRange v1](#blobssidecarsbyrange-v1)
- [Configuration](#configuration)
- [Containers](#containers)
- [`BlobSidecar`](#blobsidecar)
- [`SignedBlobSidecar`](#signedblobsidecar)
- [`BlobIdentifier`](#blobidentifier)
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
- [Topics and messages](#topics-and-messages)
- [Global topics](#global-topics)
- [`beacon_block`](#beacon_block)
- [`blob_sidecar_{index}`](#blob_sidecar_index)
- [Transitioning the gossip](#transitioning-the-gossip)
- [The Req/Resp domain](#the-reqresp-domain)
- [Messages](#messages)
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
- [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1)
- [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1)
- [Design decision rationale](#design-decision-rationale)
- [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks)
@ -35,17 +37,40 @@ The specification of these changes continues in the same format as the network s
| Name | Value | Description |
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
| `MAX_REQUEST_BLOBS_SIDECARS` | `2**7` (= 128) | Maximum number of blobs sidecars in a single request |
| `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blobs sidecars |
| `MAX_REQUEST_BLOCKS_DENEB` | `2**7` (= 128) | Maximum number of blocks in a single request |
| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request |
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
## Containers
### `SignedBeaconBlockAndBlobsSidecar`
### `BlobSidecar`
```python
class SignedBeaconBlockAndBlobsSidecar(Container):
beacon_block: SignedBeaconBlock
blobs_sidecar: BlobsSidecar
class BlobSidecar(Container):
block_root: Root
index: BlobIndex # Index of blob in block
slot: Slot
block_parent_root: Root # Proposer shuffling determinant
proposer_index: ValidatorIndex
blob: Blob
kzg_commitment: KZGCommitment
kzg_proof: KZGProof # Allows for quick verification of kzg_commitment
```
### `SignedBlobSidecar`
```python
class SignedBlobSidecar(Container):
message: BlobSidecar
signature: BLSSignature
```
### `BlobIdentifier`
```python
class BlobIdentifier(Container):
block_root: Root
index: BlobIndex
```
## The gossip domain: gossipsub
@ -55,7 +80,8 @@ Some gossip meshes are upgraded in the fork of Deneb to support upgraded types.
### Topics and messages
Topics follow the same specification as in prior upgrades.
The `beacon_block` topic is deprecated and replaced by the `beacon_block_and_blobs_sidecar` topic. All other topics remain stable.
The `beacon_block` topic is modified to also support deneb blocks and new topics are added per table below. All other topics remain stable.
The specification around the creation, validation, and dissemination of messages has not changed from the Capella document unless explicitly noted here.
@ -65,34 +91,32 @@ The new topics along with the type of the `data` field of a gossipsub message ar
| Name | Message Type |
| - | - |
| `beacon_block_and_blobs_sidecar` | `SignedBeaconBlockAndBlobsSidecar` (new) |
| `blob_sidecar_{index}` | `SignedBlobSidecar` (new) |
#### Global topics
Deneb introduces a new global topic for beacon block and blobs-sidecars.
Deneb introduces new global topics for blob sidecars.
##### `beacon_block`
This topic is deprecated and clients **MUST NOT** expose in their topic set to any peer. Implementers do not need to do
anything beyond simply skip implementation, and it is explicitly called out as it is a departure from previous versioning
of this topic.
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in deneb.
Refer to [the section below](#transitioning-the-gossip) for details on how to transition the gossip.
##### `blob_sidecar_{index}`
##### `beacon_block_and_blobs_sidecar`
This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`.
This topic is used to propagate new signed and coupled beacon blocks and blobs sidecars to all nodes on the networks.
The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
In addition to the gossip validations for the `beacon_block` topic from prior specifications, the following validations MUST pass before forwarding the `signed_beacon_block_and_blobs_sidecar` on the network.
Alias `signed_beacon_block = signed_beacon_block_and_blobs_sidecar.beacon_block`, `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`.
- _[REJECT]_ The KZG commitments correspond to the versioned hashes in the transactions list
-- i.e. `verify_kzg_commitments_against_transactions(block.body.execution_payload.transactions, block.body.blob_kzg_commitments)`
- _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`.
- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
- _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
- _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation.
- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey.
- _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`.
- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`).
If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
Alias `sidecar = signed_beacon_block_and_blobs_sidecar.blobs_sidecar`.
- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
-- i.e. `sidecar.beacon_block_slot == block.slot`.
- _[REJECT]_ The KZG commitments in the block are valid against the provided blobs sidecar
-- i.e. `validate_blobs_sidecar(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments, sidecar)`
### Transitioning the gossip
@ -121,13 +145,12 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` |
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
#### BeaconBlocksByRoot v2
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
After `DENEB_FORK_EPOCH`, `BeaconBlocksByRootV2` is replaced by `BeaconBlockAndBlobsSidecarByRootV1`.
Clients MUST support requesting blocks by root for pre-fork-epoch blocks.
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
[1]: # (eth2spec: skip)
@ -138,16 +161,29 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` |
#### BeaconBlockAndBlobsSidecarByRoot v1
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
**Protocol ID:** `/eth2/beacon_chain/req/beacon_block_and_blobs_sidecar_by_root/1/`
#### BlobSidecarsByRoot v1
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/`
New in deneb.
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
[1]: # (eth2spec: skip)
| `fork_version` | Chunk SSZ type |
|--------------------------|-------------------------------|
| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` |
Request Content:
```
(
List[Root, MAX_REQUEST_BLOCKS]
List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS]
)
```
@ -155,29 +191,42 @@ Response Content:
```
(
List[SignedBeaconBlockAndBlobsSidecar, MAX_REQUEST_BLOCKS]
List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS]
)
```
Requests blocks by block root (= `hash_tree_root(SignedBeaconBlockAndBlobsSidecar.beacon_block.message)`).
The response is a list of `SignedBeaconBlockAndBlobsSidecar` whose length is less than or equal to the number of requests.
It may be less in the case that the responding peer is missing blocks and sidecars.
Requests sidecars by block root and index.
The response is a list of `BlobSidecar` whose length is less than or equal to the number of requests.
It may be less in the case that the responding peer is missing blocks or sidecars.
No more than `MAX_REQUEST_BLOCKS` may be requested at a time.
The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer
may not be available beyond the initial distribution via gossip.
`BeaconBlockAndBlobsSidecarByRoot` is primarily used to recover recent blocks and sidecars (e.g. when receiving a block or attestation whose parent is unknown).
No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time.
`BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing).
The response MUST consist of zero or more `response_chunk`.
Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlockAndBlobsSidecar` payload.
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
Clients MUST support requesting blocks and sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers SHOULD respond with error code `3: ResourceUnavailable`.
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob in the response.
Clients MUST respond with at least one block and sidecar, if they have it.
Clients MUST respond with at least one sidecar, if they have it.
Clients MAY limit the number of blocks and sidecars in the response.
#### BlobsSidecarsByRange v1
#### BlobSidecarsByRange v1
**Protocol ID:** `/eth2/beacon_chain/req/blobs_sidecars_by_range/1/`
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/`
New in deneb.
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
[1]: # (eth2spec: skip)
| `fork_version` | Chunk SSZ type |
|--------------------------|-------------------------------|
| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` |
Request Content:
```
@ -190,73 +239,70 @@ Request Content:
Response Content:
```
(
List[BlobsSidecar, MAX_REQUEST_BLOBS_SIDECARS]
List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS]
)
```
Requests blobs sidecars in the slot range `[start_slot, start_slot + count)`,
leading up to the current head block as selected by fork choice.
Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, leading up to the current head block as selected by fork choice.
The response is unsigned, i.e. `BlobsSidecarsByRange`, as the signature of the beacon block proposer
may not be available beyond the initial distribution via gossip.
The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip.
Before consuming the next response chunk, the response reader SHOULD verify the blobs sidecar is well-formatted and
correct w.r.t. the expected KZG commitments through `validate_blobs_sidecar`.
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `validate_blobs`.
`BlobsSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` window.
`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` window.
The request MUST be encoded as an SSZ-container.
The response MUST consist of zero or more `response_chunk`.
Each _successful_ `response_chunk` MUST contain a single `BlobsSidecar` payload.
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
`[max(current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`
`[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`
where `current_epoch` is defined by the current wall-clock time,
and clients MUST support serving requests of blobs on this range.
Peers that are unable to reply to blobs sidecars requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
Peers that are unable to reply to blob sidecar requests within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
epoch range SHOULD respond with error code `3: ResourceUnavailable`.
Such peers that are unable to successfully reply to this range of requests MAY get descored
or disconnected at any time.
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
to be fully compliant with `BlobsSidecarsByRange` requests.
MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
to be fully compliant with `BlobSidecarsByRange` requests.
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin
participating in the networking immediately, other peers MAY
disconnect and/or temporarily ban such an un-synced or semi-synced client.
Clients MUST respond with at least the first blobs sidecar that exists in the range, if they have it,
and no more than `MAX_REQUEST_BLOBS_SIDECARS` sidecars.
Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS` sidecars.
The following blobs sidecars, where they exist, MUST be sent in consecutive order.
Clients MUST include all blob sidecars of each block from which they include blob sidecars.
Clients MAY limit the number of blobs sidecars in the response.
The following blob sidecars, where they exist, MUST be sent in consecutive `(slot, index)` order.
Slots that do not contain known blobs MUST be skipped, mimicking the behaviour
of the `BlocksByRange` request. Only response chunks with known blobs should
therefore be sent.
The response MUST contain no more than `count` blobs sidecars.
Clients MAY limit the number of blob sidecars in the response.
Clients MUST respond with blobs sidecars from their view of the current fork choice
-- that is, blobs sidecars as included by blocks from the single chain defined by the current head.
The response MUST contain no more than `count * MAX_BLOBS_PER_BLOCK` blob sidecars.
Clients MUST respond with blob sidecars from their view of the current fork choice
-- that is, blob sidecars as included by blocks from the single chain defined by the current head.
Of note, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake.
Clients MUST respond with blobs sidecars that are consistent from a single chain within the context of the request.
Clients MUST respond with blob sidecars that are consistent from a single chain within the context of the request.
After the initial blobs sidecar, clients MAY stop in the process of responding
if their fork choice changes the view of the chain in the context of the request.
After the initial blob sidecar, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request.
# Design decision rationale
## Design decision rationale
## Why are blobs relayed as a sidecar, separate from beacon blocks?
### Why are blobs relayed as a sidecar, separate from beacon blocks?
This "sidecar" design provides forward compatibility for further data increases by black-boxing `is_data_available()`:
with full sharding `is_data_available()` can be replaced by data-availability-sampling (DAS)
thus avoiding all blobs being downloaded by all beacon nodes on the network.
Such sharding design may introduce an updated `BlobsSidecar` to identify the shard,
Such sharding design may introduce an updated `BlobSidecar` to identify the shard,
but does not affect the `BeaconBlock` structure.

View File

@ -25,11 +25,10 @@
- [`bytes_to_kzg_commitment`](#bytes_to_kzg_commitment)
- [`bytes_to_kzg_proof`](#bytes_to_kzg_proof)
- [`blob_to_polynomial`](#blob_to_polynomial)
- [`compute_challenges`](#compute_challenges)
- [`compute_challenge`](#compute_challenge)
- [`bls_modular_inverse`](#bls_modular_inverse)
- [`div`](#div)
- [`g1_lincomb`](#g1_lincomb)
- [`poly_lincomb`](#poly_lincomb)
- [`compute_powers`](#compute_powers)
- [Polynomials](#polynomials)
- [`evaluate_polynomial_in_evaluation_form`](#evaluate_polynomial_in_evaluation_form)
@ -37,11 +36,13 @@
- [`blob_to_kzg_commitment`](#blob_to_kzg_commitment)
- [`verify_kzg_proof`](#verify_kzg_proof)
- [`verify_kzg_proof_impl`](#verify_kzg_proof_impl)
- [`verify_kzg_proof_batch`](#verify_kzg_proof_batch)
- [`compute_kzg_proof`](#compute_kzg_proof)
- [`compute_quotient_eval_within_domain`](#compute_quotient_eval_within_domain)
- [`compute_kzg_proof_impl`](#compute_kzg_proof_impl)
- [`compute_aggregated_poly_and_commitment`](#compute_aggregated_poly_and_commitment)
- [`compute_aggregate_kzg_proof`](#compute_aggregate_kzg_proof)
- [`verify_aggregate_kzg_proof`](#verify_aggregate_kzg_proof)
- [`compute_blob_kzg_proof`](#compute_blob_kzg_proof)
- [`verify_blob_kzg_proof`](#verify_blob_kzg_proof)
- [`verify_blob_kzg_proof_batch`](#verify_blob_kzg_proof_batch)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
@ -83,6 +84,7 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
| - | - |
| `FIELD_ELEMENTS_PER_BLOB` | `uint64(4096)` |
| `FIAT_SHAMIR_PROTOCOL_DOMAIN` | `b'FSBLOBVERIFY_V1_'` |
| `RANDOM_CHALLENGE_KZG_BATCH_DOMAIN` | `b'RCKZGBATCH___V1_'` |
### Crypto
@ -222,44 +224,24 @@ def blob_to_polynomial(blob: Blob) -> Polynomial:
return polynomial
```
#### `compute_challenges`
#### `compute_challenge`
```python
def compute_challenges(polynomials: Sequence[Polynomial],
commitments: Sequence[KZGCommitment]) -> Tuple[Sequence[BLSFieldElement], BLSFieldElement]:
def compute_challenge(blob: Blob,
commitment: KZGCommitment) -> BLSFieldElement:
"""
Return the Fiat-Shamir challenges required by the rest of the protocol.
The Fiat-Shamir logic works as per the following pseudocode:
hashed_data = hash(DOMAIN_SEPARATOR, polynomials, commitments)
r = hash(hashed_data, 0)
r_powers = [1, r, r**2, r**3, ...]
eval_challenge = hash(hashed_data, 1)
Then return `r_powers` and `eval_challenge` after converting them to BLS field elements.
The resulting field elements are not uniform over the BLS field.
Return the Fiat-Shamir challenge required by the rest of the protocol.
"""
# Append the number of polynomials and the degree of each polynomial as a domain separator
num_polynomials = int.to_bytes(len(polynomials), 8, ENDIANNESS)
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS)
data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly + num_polynomials
# Append each polynomial which is composed by field elements
for poly in polynomials:
for field_element in poly:
data += int.to_bytes(field_element, BYTES_PER_FIELD_ELEMENT, ENDIANNESS)
# Append the degree of the polynomial as a domain separator
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 16, ENDIANNESS)
data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly
# Append serialized G1 points
for commitment in commitments:
data += commitment
data += blob
data += commitment
# Transcript has been prepared: time to create the challenges
hashed_data = hash(data)
r = hash_to_bls_field(hashed_data + b'\x00')
r_powers = compute_powers(r, len(commitments))
eval_challenge = hash_to_bls_field(hashed_data + b'\x01')
return r_powers, eval_challenge
# Transcript has been prepared: time to create the challenge
return hash_to_bls_field(data)
```
#### `bls_modular_inverse`
@ -297,23 +279,6 @@ def g1_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElemen
return KZGCommitment(bls.G1_to_bytes48(result))
```
#### `poly_lincomb`
```python
def poly_lincomb(polys: Sequence[Polynomial],
scalars: Sequence[BLSFieldElement]) -> Polynomial:
"""
Given a list of ``polynomials``, interpret it as a 2D matrix and compute the linear combination
of each column with `scalars`: return the resulting polynomials.
"""
assert len(polys) == len(scalars)
result = [0] * FIELD_ELEMENTS_PER_BLOB
for v, s in zip(polys, scalars):
for i, x in enumerate(v):
result[i] = (result[i] + int(s) * int(x)) % BLS_MODULUS
return Polynomial([BLSFieldElement(x) for x in result])
```
#### `compute_powers`
```python
@ -414,6 +379,53 @@ def verify_kzg_proof_impl(commitment: KZGCommitment,
])
```
#### `verify_kzg_proof_batch`
```python
def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment],
zs: Sequence[BLSFieldElement],
ys: Sequence[BLSFieldElement],
proofs: Sequence[KZGProof]) -> bool:
"""
Verify multiple KZG proofs efficiently.
"""
assert len(commitments) == len(zs) == len(ys) == len(proofs)
# Compute a random challenge. Note that it does not have to be computed from a hash,
# r just has to be random.
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS)
num_commitments = int.to_bytes(len(commitments), 8, ENDIANNESS)
data = RANDOM_CHALLENGE_KZG_BATCH_DOMAIN + degree_poly + num_commitments
# Append all inputs to the transcript before we hash
for commitment, z, y, proof in zip(commitments, zs, ys, proofs):
data += commitment \
+ int.to_bytes(z, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) \
+ int.to_bytes(y, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) \
+ proof
r = hash_to_bls_field(data)
r_powers = compute_powers(r, len(commitments))
# Verify: e(sum r^i proof_i, [s]) ==
# e(sum r^i (commitment_i - [y_i]) + sum r^i z_i proof_i, [1])
proof_lincomb = g1_lincomb(proofs, r_powers)
proof_z_lincomb = g1_lincomb(
proofs,
[BLSFieldElement((int(z) * int(r_power)) % BLS_MODULUS) for z, r_power in zip(zs, r_powers)],
)
C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y))
for commitment, y in zip(commitments, ys)]
C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys]
C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers)
return bls.pairing_check([
[bls.bytes48_to_G1(proof_lincomb), bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2[1]))],
[bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2]
])
```
#### `compute_kzg_proof`
```python
@ -427,92 +439,125 @@ def compute_kzg_proof(blob: Blob, z: Bytes32) -> KZGProof:
return compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z))
```
#### `compute_quotient_eval_within_domain`
```python
def compute_quotient_eval_within_domain(z: BLSFieldElement,
polynomial: Polynomial,
y: BLSFieldElement
) -> BLSFieldElement:
"""
Given `y == p(z)` for a polynomial `p(x)`, compute `q(z)`: the KZG quotient polynomial evaluated at `z` for the
special case where `z` is in `ROOTS_OF_UNITY`.
For more details, read https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html section "Dividing
when one of the points is zero". The code below computes q(x_m) for the roots of unity special case.
"""
roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY)
result = 0
for i, omega_i in enumerate(roots_of_unity_brp):
if omega_i == z: # skip the evaluation point in the sum
continue
f_i = int(BLS_MODULUS) + int(polynomial[i]) - int(y) % BLS_MODULUS
numerator = f_i * int(omega_i) % BLS_MODULUS
denominator = int(z) * (int(BLS_MODULUS) + int(z) - int(omega_i)) % BLS_MODULUS
result += int(div(BLSFieldElement(numerator), BLSFieldElement(denominator)))
return BLSFieldElement(result % BLS_MODULUS)
```
#### `compute_kzg_proof_impl`
```python
def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof:
"""
Helper function for compute_kzg_proof() and compute_aggregate_kzg_proof().
Helper function for `compute_kzg_proof()` and `compute_blob_kzg_proof()`.
"""
roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY)
# For all x_i, compute p(x_i) - p(z)
y = evaluate_polynomial_in_evaluation_form(polynomial, z)
polynomial_shifted = [BLSFieldElement((int(p) - int(y)) % BLS_MODULUS) for p in polynomial]
# Make sure we won't divide by zero during division
assert z not in ROOTS_OF_UNITY
# For all x_i, compute (x_i - z)
denominator_poly = [BLSFieldElement((int(x) - int(z)) % BLS_MODULUS)
for x in bit_reversal_permutation(ROOTS_OF_UNITY)]
for x in roots_of_unity_brp]
# Compute the quotient polynomial directly in evaluation form
quotient_polynomial = [BLSFieldElement(0)] * FIELD_ELEMENTS_PER_BLOB
for i, (a, b) in enumerate(zip(polynomial_shifted, denominator_poly)):
if b == 0:
# The denominator is zero hence `z` is a root of unity: we must handle it as a special case
quotient_polynomial[i] = compute_quotient_eval_within_domain(roots_of_unity_brp[i], polynomial, y)
else:
# Compute: q(x_i) = (p(x_i) - p(z)) / (x_i - z).
quotient_polynomial[i] = div(a, b)
# Calculate quotient polynomial by doing point-by-point division
quotient_polynomial = [div(a, b) for a, b in zip(polynomial_shifted, denominator_poly)]
return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial))
```
#### `compute_aggregated_poly_and_commitment`
#### `compute_blob_kzg_proof`
```python
def compute_aggregated_poly_and_commitment(
blobs: Sequence[Blob],
kzg_commitments: Sequence[KZGCommitment]) -> Tuple[Polynomial, KZGCommitment, BLSFieldElement]:
def compute_blob_kzg_proof(blob: Blob) -> KZGProof:
"""
Return (1) the aggregated polynomial, (2) the aggregated KZG commitment,
and (3) the polynomial evaluation random challenge.
This function should also work with blobs == [] and kzg_commitments == []
"""
assert len(blobs) == len(kzg_commitments)
# Convert blobs to polynomials
polynomials = [blob_to_polynomial(blob) for blob in blobs]
# Generate random linear combination and evaluation challenges
r_powers, evaluation_challenge = compute_challenges(polynomials, kzg_commitments)
# Create aggregated polynomial in evaluation form
aggregated_poly = poly_lincomb(polynomials, r_powers)
# Compute commitment to aggregated polynomial
aggregated_poly_commitment = KZGCommitment(g1_lincomb(kzg_commitments, r_powers))
return aggregated_poly, aggregated_poly_commitment, evaluation_challenge
```
#### `compute_aggregate_kzg_proof`
```python
def compute_aggregate_kzg_proof(blobs: Sequence[Blob]) -> KZGProof:
"""
Given a list of blobs, return the aggregated KZG proof that is used to verify them against their commitments.
Given a blob, return the KZG proof that is used to verify it against the commitment.
Public method.
"""
commitments = [blob_to_kzg_commitment(blob) for blob in blobs]
aggregated_poly, aggregated_poly_commitment, evaluation_challenge = compute_aggregated_poly_and_commitment(
blobs,
commitments
)
return compute_kzg_proof_impl(aggregated_poly, evaluation_challenge)
commitment = blob_to_kzg_commitment(blob)
polynomial = blob_to_polynomial(blob)
evaluation_challenge = compute_challenge(blob, commitment)
return compute_kzg_proof_impl(polynomial, evaluation_challenge)
```
#### `verify_aggregate_kzg_proof`
#### `verify_blob_kzg_proof`
```python
def verify_aggregate_kzg_proof(blobs: Sequence[Blob],
commitments_bytes: Sequence[Bytes48],
aggregated_proof_bytes: Bytes48) -> bool:
def verify_blob_kzg_proof(blob: Blob,
commitment_bytes: Bytes48,
proof_bytes: Bytes48) -> bool:
"""
Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments.
Given a blob and a KZG proof, verify that the blob data corresponds to the provided commitment.
Public method.
"""
commitments = [bytes_to_kzg_commitment(c) for c in commitments_bytes]
commitment = bytes_to_kzg_commitment(commitment_bytes)
aggregated_poly, aggregated_poly_commitment, evaluation_challenge = compute_aggregated_poly_and_commitment(
blobs,
commitments
)
polynomial = blob_to_polynomial(blob)
evaluation_challenge = compute_challenge(blob, commitment)
# Evaluate aggregated polynomial at `evaluation_challenge` (evaluation function checks for div-by-zero)
y = evaluate_polynomial_in_evaluation_form(aggregated_poly, evaluation_challenge)
# Evaluate polynomial at `evaluation_challenge`
y = evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge)
# Verify aggregated proof
aggregated_proof = bytes_to_kzg_proof(aggregated_proof_bytes)
return verify_kzg_proof_impl(aggregated_poly_commitment, evaluation_challenge, y, aggregated_proof)
# Verify proof
proof = bytes_to_kzg_proof(proof_bytes)
return verify_kzg_proof_impl(commitment, evaluation_challenge, y, proof)
```
#### `verify_blob_kzg_proof_batch`
```python
def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
commitments_bytes: Sequence[Bytes48],
proofs_bytes: Sequence[Bytes48]) -> bool:
"""
Given a list of blobs and blob KZG proofs, verify that they correspond to the provided commitments.
Public method.
"""
assert len(blobs) == len(commitments_bytes) == len(proofs_bytes)
commitments, evaluation_challenges, ys, proofs = [], [], [], []
for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes):
commitment = bytes_to_kzg_commitment(commitment_bytes)
commitments.append(commitment)
polynomial = blob_to_polynomial(blob)
evaluation_challenge = compute_challenge(blob, commitment)
evaluation_challenges.append(evaluation_challenge)
ys.append(evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge))
proofs.append(bytes_to_kzg_proof(proof_bytes))
return verify_kzg_proof_batch(commitments, evaluation_challenges, ys, proofs)
```

View File

@ -16,8 +16,7 @@
- [Block and sidecar proposal](#block-and-sidecar-proposal)
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
- [Blob KZG commitments](#blob-kzg-commitments)
- [Constructing the `SignedBeaconBlockAndBlobsSidecar`](#constructing-the-signedbeaconblockandblobssidecar)
- [Block](#block)
- [Constructing the `SignedBlobSidecar`s](#constructing-the-signedblobsidecars)
- [Sidecar](#sidecar)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
@ -53,7 +52,6 @@ def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFi
## Beacon chain responsibilities
All validator responsibilities remain unchanged other than those noted below.
Namely, the blob handling and the addition of `SignedBeaconBlockAndBlobsSidecar`.
### Block and sidecar proposal
@ -79,29 +77,47 @@ def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload,
3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`.
#### Constructing the `SignedBeaconBlockAndBlobsSidecar`
To construct a `SignedBeaconBlockAndBlobsSidecar`, a `signed_beacon_block_and_blobs_sidecar` is defined with the necessary context for block and sidecar proposal.
#### Constructing the `SignedBlobSidecar`s
##### Block
Set `signed_beacon_block_and_blobs_sidecar.beacon_block = block` where `block` is obtained above.
To construct a `SignedBlobSidecar`, a `signed_blob_sidecar` is defined with the necessary context for block and sidecar proposal.
##### Sidecar
Coupled with block, the corresponding blobs are packaged into a sidecar object for distribution to the network.
Set `signed_beacon_block_and_blobs_sidecar.blobs_sidecar = sidecar` where `sidecar` is obtained from:
Blobs associated with a block are packaged into sidecar objects for distribution to the network.
Each `sidecar` is obtained from:
```python
def get_blobs_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> BlobsSidecar:
return BlobsSidecar(
beacon_block_root=hash_tree_root(block),
beacon_block_slot=block.slot,
blobs=blobs,
kzg_aggregated_proof=compute_aggregate_kzg_proof(blobs),
)
def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobSidecar]:
return [
BlobSidecar(
block_root=hash_tree_root(block),
index=index,
slot=block.slot,
block_parent_root=block.parent_root,
blob=blob,
kzg_commitment=block.body.blob_kzg_commitments[index],
kzg_proof=compute_blob_kzg_proof(blob),
)
for index, blob in enumerate(blobs)
]
```
This `signed_beacon_block_and_blobs_sidecar` is then published to the global `beacon_block_and_blobs_sidecar` topic.
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the `blob_sidecar_{index}` topics according to its index.
`signature` is obtained from:
```python
def get_blob_sidecar_signature(state: BeaconState,
sidecar: BlobSidecar,
privkey: int) -> BLSSignature:
domain = get_domain(state, DOMAIN_BLOB_SIDECAR, compute_epoch_at_slot(sidecar.slot))
signing_root = compute_signing_root(sidecar, domain)
return bls.Sign(privkey, signing_root)
```
After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested.
The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` epochs and serve when capable,
to ensure the data-availability of these blobs throughout the network.

View File

@ -18,7 +18,7 @@
- [`get_current_slot`](#get_current_slot)
- [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start)
- [`get_ancestor`](#get_ancestor)
- [`get_latest_attesting_balance`](#get_latest_attesting_balance)
- [`get_weight`](#get_weight)
- [`filter_block_tree`](#filter_block_tree)
- [`get_filtered_block_tree`](#get_filtered_block_tree)
- [`get_head`](#get_head)
@ -174,10 +174,10 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
return root
```
#### `get_latest_attesting_balance`
#### `get_weight`
```python
def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
def get_weight(store: Store, root: Root) -> Gwei:
state = store.checkpoint_states[store.justified_checkpoint]
active_indices = get_active_validator_indices(state, get_current_epoch(state))
attestation_score = Gwei(sum(
@ -194,13 +194,9 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
proposer_score = Gwei(0)
# Boost is applied if ``root`` is an ancestor of ``proposer_boost_root``
if get_ancestor(store, store.proposer_boost_root, store.blocks[root].slot) == root:
num_validators = len(get_active_validator_indices(state, get_current_epoch(state)))
avg_balance = get_total_active_balance(state) // num_validators
committee_size = num_validators // SLOTS_PER_EPOCH
committee_weight = committee_size * avg_balance
committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH
proposer_score = (committee_weight * PROPOSER_SCORE_BOOST) // 100
return attestation_score + proposer_score
```
#### `filter_block_tree`
@ -273,7 +269,7 @@ def get_head(store: Store) -> Root:
return head
# Sort by latest attesting balance with ties broken lexicographically
# Ties broken by favoring block with lexicographically higher root
head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root))
head = max(children, key=lambda root: (get_weight(store, root), root))
```
#### `should_update_justified_checkpoint`

View File

@ -1 +1 @@
1.3.0-rc.2
1.3.0-rc.3

View File

@ -37,7 +37,7 @@ from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
@with_capella_and_later
@spec_state_test
def test_success_bls_change(spec, state):
def test_bls_change(spec, state):
index = 0
signed_address_change = get_signed_address_change(spec, state, validator_index=index)
pre_credentials = state.validators[index].withdrawal_credentials
@ -60,7 +60,46 @@ def test_success_bls_change(spec, state):
@with_capella_and_later
@spec_state_test
def test_success_exit_and_bls_change(spec, state):
def test_deposit_and_bls_change(spec, state):
initial_registry_len = len(state.validators)
initial_balances_len = len(state.balances)
validator_index = len(state.validators)
amount = spec.MAX_EFFECTIVE_BALANCE
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
signed_address_change = get_signed_address_change(
spec, state,
validator_index=validator_index,
withdrawal_pubkey=deposit.data.pubkey, # Deposit helper defaults to use pubkey as withdrawal credential
)
deposit_credentials = deposit.data.withdrawal_credentials
assert deposit_credentials[:1] == spec.BLS_WITHDRAWAL_PREFIX
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
block.body.deposits.append(deposit)
block.body.bls_to_execution_changes.append(signed_address_change)
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
yield 'post', state
assert len(state.validators) == initial_registry_len + 1
assert len(state.balances) == initial_balances_len + 1
validator_credentials = state.validators[validator_index].withdrawal_credentials
assert deposit_credentials != validator_credentials
assert validator_credentials[:1] == spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX
assert validator_credentials[1:12] == b'\x00' * 11
assert validator_credentials[12:] == signed_address_change.message.to_execution_address
@with_capella_and_later
@spec_state_test
def test_exit_and_bls_change(spec, state):
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH

View File

@ -16,7 +16,7 @@ from eth2spec.test.helpers.sharding import (
)
def _run_validate_blobs_sidecar_test(spec, state, blob_count):
def _run_validate_blobs(spec, state, blob_count):
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
@ -24,30 +24,32 @@ def _run_validate_blobs_sidecar_test(spec, state, blob_count):
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
state_transition_and_sign_block(spec, state, block)
blobs_sidecar = spec.get_blobs_sidecar(block, blobs)
expected_commitments = [spec.blob_to_kzg_commitment(blobs[i]) for i in range(blob_count)]
spec.validate_blobs_sidecar(block.slot, block.hash_tree_root(), expected_commitments, blobs_sidecar)
# Also test the proof generation in `get_blob_sidecars`
blob_sidecars = spec.get_blob_sidecars(block, blobs)
blobs = [sidecar.blob for sidecar in blob_sidecars]
kzg_proofs = [sidecar.kzg_proof for sidecar in blob_sidecars]
spec.validate_blobs(blob_kzg_commitments, blobs, kzg_proofs)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_sidecar_zero_blobs(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=0)
def test_validate_blobs_zero_blobs(spec, state):
_run_validate_blobs(spec, state, blob_count=0)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_sidecar_one_blob(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=1)
def test_validate_blobs_one_blob(spec, state):
_run_validate_blobs(spec, state, blob_count=1)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_sidecar_two_blobs(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=2)
def test_validate_blobs_two_blobs(spec, state):
_run_validate_blobs(spec, state, blob_count=2)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_sidecar_max_blobs(spec, state):
_run_validate_blobs_sidecar_test(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)
def test_validate_blobs_max_blobs(spec, state):
_run_validate_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)

View File

@ -87,3 +87,23 @@ def test_barycentric_within_domain(spec, state):
# The two evaluations should be agree and p(z) should also be the i-th "coefficient" of the polynomial in
# evaluation form
assert p_z_coeff == p_z_eval == poly_eval[i]
@with_deneb_and_later
@spec_state_test
def test_compute_kzg_proof_within_domain(spec, state):
"""
Create and verify KZG proof that p(z) == y
where z is in the domain of our KZG scheme (i.e. a relevant root of unity).
"""
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
polynomial = spec.blob_to_polynomial(blob)
roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY)
for i, z in enumerate(roots_of_unity_brp):
proof = spec.compute_kzg_proof_impl(polynomial, z)
y = spec.evaluate_polynomial_in_evaluation_form(polynomial, z)
assert spec.verify_kzg_proof_impl(commitment, z, y, proof)

View File

@ -29,14 +29,12 @@ def run_fork_test(post_spec, pre_state):
'inactivity_scores',
# Sync
'current_sync_committee', 'next_sync_committee',
# Execution
'latest_execution_payload_header',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
modified_fields = ['fork', 'latest_execution_payload_header']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)

View File

@ -177,7 +177,7 @@ def get_opt_head_block_root(spec, mega_store):
return head
# Sort by latest attesting balance with ties broken lexicographically
# Ties broken by favoring block with lexicographically higher root
head = max(children, key=lambda root: (spec.get_latest_attesting_balance(store, root), root))
head = max(children, key=lambda root: (spec.get_weight(store, root), root))
def is_invalidated(mega_store, block_root):

View File

@ -729,14 +729,14 @@ def test_proposer_boost(spec, state):
on_tick_and_append_step(spec, store, time, test_steps)
yield from add_block(spec, store, signed_block, test_steps)
assert store.proposer_boost_root == spec.hash_tree_root(block)
assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) > 0
assert spec.get_weight(store, spec.hash_tree_root(block)) > 0
# Ensure that boost is removed after slot is over
time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT +
spec.config.SECONDS_PER_SLOT)
on_tick_and_append_step(spec, store, time, test_steps)
assert store.proposer_boost_root == spec.Root()
assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) == 0
assert spec.get_weight(store, spec.hash_tree_root(block)) == 0
next_slots(spec, state, 3)
block = build_empty_block_for_next_slot(spec, state)
@ -747,14 +747,14 @@ def test_proposer_boost(spec, state):
on_tick_and_append_step(spec, store, time, test_steps)
yield from add_block(spec, store, signed_block, test_steps)
assert store.proposer_boost_root == spec.hash_tree_root(block)
assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) > 0
assert spec.get_weight(store, spec.hash_tree_root(block)) > 0
# Ensure that boost is removed after slot is over
time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT +
spec.config.SECONDS_PER_SLOT)
on_tick_and_append_step(spec, store, time, test_steps)
assert store.proposer_boost_root == spec.Root()
assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) == 0
assert spec.get_weight(store, spec.hash_tree_root(block)) == 0
test_steps.append({
'checks': {