Merge branch 'dev' into pr3308
This commit is contained in:
commit
a044c0c805
|
@ -0,0 +1,24 @@
|
||||||
|
|
||||||
|
name: Publish docs
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
permissions:
|
||||||
|
contents: write
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
- name: Build docs
|
||||||
|
run: make copy_docs
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: 3.x
|
||||||
|
- uses: actions/cache@v2
|
||||||
|
with:
|
||||||
|
key: ${{ github.ref }}
|
||||||
|
path: .cache
|
||||||
|
- run: pip install -e .[docs]
|
||||||
|
- run: mkdocs gh-deploy --force
|
|
@ -35,3 +35,11 @@ tests/core/pyspec/eth2spec/test_results.xml
|
||||||
|
|
||||||
# TOC tool outputs temporary files
|
# TOC tool outputs temporary files
|
||||||
*.tmp
|
*.tmp
|
||||||
|
|
||||||
|
# docs reader build
|
||||||
|
docs/specs
|
||||||
|
docs/sync
|
||||||
|
docs/ssz
|
||||||
|
docs/fork_choice
|
||||||
|
docs/README.md
|
||||||
|
site
|
||||||
|
|
25
Makefile
25
Makefile
|
@ -19,6 +19,11 @@ GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/.)))
|
||||||
# Map this list of generator paths to "gen_{generator name}" entries
|
# Map this list of generator paths to "gen_{generator name}" entries
|
||||||
GENERATOR_TARGETS = $(patsubst $(GENERATOR_DIR)/%/, gen_%, $(GENERATORS))
|
GENERATOR_TARGETS = $(patsubst $(GENERATOR_DIR)/%/, gen_%, $(GENERATORS))
|
||||||
GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS))
|
GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS))
|
||||||
|
# Documents
|
||||||
|
DOCS_DIR = ./docs
|
||||||
|
SSZ_DIR = ./ssz
|
||||||
|
SYNC_DIR = ./sync
|
||||||
|
FORK_CHOICE_DIR = ./fork_choice
|
||||||
|
|
||||||
# To check generator matching:
|
# To check generator matching:
|
||||||
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
|
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
|
||||||
|
@ -214,3 +219,23 @@ detect_generator_incomplete: $(TEST_VECTOR_DIR)
|
||||||
|
|
||||||
detect_generator_error_log: $(TEST_VECTOR_DIR)
|
detect_generator_error_log: $(TEST_VECTOR_DIR)
|
||||||
[ -f $(GENERATOR_ERROR_LOG_FILE) ] && echo "[ERROR] $(GENERATOR_ERROR_LOG_FILE) file exists" || echo "[PASSED] error log file does not exist"
|
[ -f $(GENERATOR_ERROR_LOG_FILE) ] && echo "[ERROR] $(GENERATOR_ERROR_LOG_FILE) file exists" || echo "[PASSED] error log file does not exist"
|
||||||
|
|
||||||
|
|
||||||
|
# For docs reader
|
||||||
|
install_docs:
|
||||||
|
python3 -m venv venv; . venv/bin/activate; python3 -m pip install -e .[docs];
|
||||||
|
|
||||||
|
copy_docs:
|
||||||
|
cp -r $(SPEC_DIR) $(DOCS_DIR);
|
||||||
|
cp -r $(SYNC_DIR) $(DOCS_DIR);
|
||||||
|
cp -r $(SSZ_DIR) $(DOCS_DIR);
|
||||||
|
cp -r $(FORK_CHOICE_DIR) $(DOCS_DIR);
|
||||||
|
cp $(CURRENT_DIR)/README.md $(DOCS_DIR)/README.md
|
||||||
|
|
||||||
|
build_docs: copy_docs
|
||||||
|
. venv/bin/activate;
|
||||||
|
mkdocs build
|
||||||
|
|
||||||
|
serve_docs:
|
||||||
|
. venv/bin/activate;
|
||||||
|
mkdocs serve
|
||||||
|
|
|
@ -65,6 +65,10 @@ Documentation on the different components used during spec writing can be found
|
||||||
* [YAML Test Generators](tests/generators/README.md)
|
* [YAML Test Generators](tests/generators/README.md)
|
||||||
* [Executable Python Spec, with Py-tests](tests/core/pyspec/README.md)
|
* [Executable Python Spec, with Py-tests](tests/core/pyspec/README.md)
|
||||||
|
|
||||||
|
## Online viewer of the latest release (latest `master` branch)
|
||||||
|
|
||||||
|
[Ethereum Consensus Specs](https://ethereum.github.io/consensus-specs/)
|
||||||
|
|
||||||
## Consensus spec tests
|
## Consensus spec tests
|
||||||
|
|
||||||
Conformance tests built from the executable python spec are available in the [Ethereum Proof-of-Stake Consensus Spec Tests](https://github.com/ethereum/consensus-spec-tests) repo. Compressed tarballs are available in [releases](https://github.com/ethereum/consensus-spec-tests/releases).
|
Conformance tests built from the executable python spec are available in the [Ethereum Proof-of-Stake Consensus Spec Tests](https://github.com/ethereum/consensus-spec-tests) repo. Compressed tarballs are available in [releases](https://github.com/ethereum/consensus-spec-tests/releases).
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
nav:
|
||||||
|
- Home:
|
||||||
|
- README.md
|
||||||
|
- specs
|
||||||
|
- ...
|
|
@ -0,0 +1,163 @@
|
||||||
|
# How to add a new feature proposal in consensus-specs
|
||||||
|
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
## Table of Contents
|
||||||
|
|
||||||
|
- [A. Make it executable for linter checks](#a-make-it-executable-for-linter-checks)
|
||||||
|
- [1. Create a folder under `./specs/_features`](#1-create-a-folder-under-specs_features)
|
||||||
|
- [2. Choose the "previous fork" to extend: usually, use the scheduled or the latest mainnet fork version.](#2-choose-the-previous-fork-to-extend-usually-use-the-scheduled-or-the-latest-mainnet-fork-version)
|
||||||
|
- [3. Write down your proposed `beacon-chain.md` change](#3-write-down-your-proposed-beacon-chainmd-change)
|
||||||
|
- [4. Add `fork.md`](#4-add-forkmd)
|
||||||
|
- [5. Make it executable](#5-make-it-executable)
|
||||||
|
- [B: Make it executable for pytest and test generator](#b-make-it-executable-for-pytest-and-test-generator)
|
||||||
|
- [1. Add `light-client/*` docs if you updated the content of `BeaconBlock`](#1-add-light-client-docs-if-you-updated-the-content-of-beaconblock)
|
||||||
|
- [2. Add the mainnet and minimal presets and update the configs](#2-add-the-mainnet-and-minimal-presets-and-update-the-configs)
|
||||||
|
- [3. Update `context.py`](#3-update-contextpy)
|
||||||
|
- [4. Update `constants.py`](#4-update-constantspy)
|
||||||
|
- [5. Update `genesis.py`:](#5-update-genesispy)
|
||||||
|
- [6. To add fork transition tests, update fork_transition.py](#6-to-add-fork-transition-tests-update-fork_transitionpy)
|
||||||
|
- [7. Update CI configurations](#7-update-ci-configurations)
|
||||||
|
- [Others](#others)
|
||||||
|
- [Bonus](#bonus)
|
||||||
|
- [Need help?](#need-help)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
|
||||||
|
|
||||||
|
## A. Make it executable for linter checks
|
||||||
|
|
||||||
|
### 1. Create a folder under `./specs/_features`
|
||||||
|
|
||||||
|
For example, if it's an `EIP-9999` CL spec, you can create a `./specs/_features/eip9999` folder.
|
||||||
|
|
||||||
|
### 2. Choose the "previous fork" to extend: usually, use the scheduled or the latest mainnet fork version.
|
||||||
|
|
||||||
|
For example, if the latest fork is Capella, use `./specs/capella` content as your "previous fork".
|
||||||
|
|
||||||
|
### 3. Write down your proposed `beacon-chain.md` change
|
||||||
|
- You can either use [Beacon Chain Spec Template](./templates/beacon-chain-template.md), or make a copy of the latest fork content and then edit it.
|
||||||
|
- Tips:
|
||||||
|
- We use [`doctoc`](https://www.npmjs.com/package/doctoc) tool to generate the table of content.
|
||||||
|
```
|
||||||
|
cd consensus-specs
|
||||||
|
doctoc specs
|
||||||
|
```
|
||||||
|
- The differences between "Constants", "Configurations", and "Presets":
|
||||||
|
- Constants: The constant that should never be changed.
|
||||||
|
- Configurations: The settings that we may change for different networks.
|
||||||
|
- Presets: The settings that we may change for testing.
|
||||||
|
- Readability and simplicity are more important than efficiency and optimization.
|
||||||
|
- Use simple Python rather than the fancy Python dark magic.
|
||||||
|
|
||||||
|
### 4. Add `fork.md`
|
||||||
|
You can refer to the previous fork's `fork.md` file.
|
||||||
|
### 5. Make it executable
|
||||||
|
- Update [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py) with the new feature name.
|
||||||
|
- Update [`setup.py`](https://github.com/ethereum/consensus-specs/blob/dev/setup.py):
|
||||||
|
- Add a new `SpecBuilder` with the new feature name constant. e.g., `EIP9999SpecBuilder`
|
||||||
|
- Add the new `SpecBuilder` to `spec_builders` list.
|
||||||
|
- Add the path of the new markdown files in `finalize_options` function.
|
||||||
|
|
||||||
|
## B: Make it executable for pytest and test generator
|
||||||
|
|
||||||
|
### 1. Add `light-client/*` docs if you updated the content of `BeaconBlock`
|
||||||
|
- You can refer to the previous fork's `light-client/*` file.
|
||||||
|
- Add the path of the new markdown files in `setup.py`'s `finalize_options` function.
|
||||||
|
|
||||||
|
### 2. Add the mainnet and minimal presets and update the configs
|
||||||
|
- Add presets: `presets/mainnet/<new-feature-name>.yaml` and `presets/minimal/<new-feature-name>.yaml`
|
||||||
|
- Update configs: `configs/mainnet.yaml` and `configs/minimal.yaml`
|
||||||
|
|
||||||
|
### 3. Update [`context.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py)
|
||||||
|
- Update `spec_targets` by adding `<NEW_FEATURE>`
|
||||||
|
|
||||||
|
```python
|
||||||
|
from eth2spec.eip9999 import mainnet as spec_eip9999_mainnet, minimal as spec_eip9999_minimal
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
|
||||||
|
MINIMAL: {
|
||||||
|
...
|
||||||
|
EIP9999: spec_eip9999_minimal,
|
||||||
|
},
|
||||||
|
MAINNET: {
|
||||||
|
...
|
||||||
|
EIP9999: spec_eip9999_mainnet
|
||||||
|
},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
### 4. Update [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py)
|
||||||
|
- Add `<NEW_FEATURE>` to `ALL_PHASES` and `TESTGEN_FORKS`
|
||||||
|
|
||||||
|
### 5. Update [`genesis.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/genesis.py):
|
||||||
|
|
||||||
|
We use `create_genesis_state` to create the default `state` in tests.
|
||||||
|
|
||||||
|
- Update `create_genesis_state` by adding `fork_version` setting:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||||
|
...
|
||||||
|
if spec.fork == ALTAIR:
|
||||||
|
current_version = spec.config.ALTAIR_FORK_VERSION
|
||||||
|
...
|
||||||
|
elif spec.fork == EIP9999:
|
||||||
|
# Add the previous fork version of given fork
|
||||||
|
previous_version = spec.config.<PREVIOUS_FORK_VERSION>
|
||||||
|
current_version = spec.config.EIP9999_FORK_VERSION
|
||||||
|
```
|
||||||
|
|
||||||
|
- If the given feature changes `BeaconState` fields, you have to set the initial values by adding:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||||
|
...
|
||||||
|
if is_post_eip9999(spec):
|
||||||
|
state.<NEW_FIELD> = <value>
|
||||||
|
|
||||||
|
return state
|
||||||
|
```
|
||||||
|
|
||||||
|
- If the given feature changes `ExecutionPayload` fields, you have to set the initial values by updating `get_sample_genesis_execution_payload_header` helper.
|
||||||
|
|
||||||
|
### 6. To add fork transition tests, update [fork_transition.py](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py)
|
||||||
|
|
||||||
|
```python
|
||||||
|
def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None):
|
||||||
|
...
|
||||||
|
|
||||||
|
if post_spec.fork == ALTAIR:
|
||||||
|
state = post_spec.upgrade_to_altair(state)
|
||||||
|
...
|
||||||
|
elif post_spec.fork == EIP9999:
|
||||||
|
state = post_spec.upgrade_to_eip9999(state)
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
if post_spec.fork == ALTAIR:
|
||||||
|
assert state.fork.previous_version == post_spec.config.GENESIS_FORK_VERSION
|
||||||
|
assert state.fork.current_version == post_spec.config.ALTAIR_FORK_VERSION
|
||||||
|
...
|
||||||
|
elif post_spec.fork == EIP9999:
|
||||||
|
assert state.fork.previous_version == post_spec.config.<PREVIOUS_FORK_VERSION>
|
||||||
|
assert state.fork.current_version == post_spec.config.EIP9999_FORK_VERSION
|
||||||
|
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
### 7. Update CI configurations
|
||||||
|
- Update [GitHub Actions config](https://github.com/ethereum/consensus-specs/blob/dev/.github/workflows/run-tests.yml)
|
||||||
|
- Update `pyspec-tests.strategy.matrix.version` list by adding new feature to it
|
||||||
|
- Update [CircleCI config](https://github.com/ethereum/consensus-specs/blob/dev/.circleci/config.yml)
|
||||||
|
- Add new job to the `workflows.test_spec.jobs`
|
||||||
|
|
||||||
|
## Others
|
||||||
|
|
||||||
|
### Bonus
|
||||||
|
- Add `validator.md` if honest validator behavior changes with the new feature.
|
||||||
|
|
||||||
|
### Need help?
|
||||||
|
You can tag spec elves for cleaning up your PR. 🧚
|
|
@ -0,0 +1,84 @@
|
||||||
|
# `beacon-chain.md` Template
|
||||||
|
|
||||||
|
# <FORK_NAME> -- The Beacon Chain
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
## Notation
|
||||||
|
|
||||||
|
## Custom types
|
||||||
|
|
||||||
|
## Constants
|
||||||
|
|
||||||
|
### [CATEGORY OF CONSTANTS]
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `<CONSTANT_NAME>` | `<VALUE>`` |
|
||||||
|
|
||||||
|
## Preset
|
||||||
|
|
||||||
|
|
||||||
|
### [CATEGORY OF PRESETS]
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `<PRESET_FIELD_NAME>` | `<VALUE>` |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### [CATEGORY OF CONFIGURATIONS]
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `<CONFIGURATION_FIELD_NAME>` | `<VALUE>` |
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
### [CATEGORY OF CONTAINERS]
|
||||||
|
|
||||||
|
#### `CONTAINER_NAME`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class CONTAINER_NAME(Container):
|
||||||
|
FILED_NAME: SSZ_TYPE
|
||||||
|
```
|
||||||
|
|
||||||
|
## Helper functions
|
||||||
|
|
||||||
|
### [CATEGORY OF HELPERS]
|
||||||
|
|
||||||
|
```python
|
||||||
|
<PYTHON HELPER FUNCTION>
|
||||||
|
```
|
||||||
|
|
||||||
|
### Epoch processing
|
||||||
|
|
||||||
|
|
||||||
|
### Block processing
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure <FORK_NAME> testing only.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||||
|
eth1_timestamp: uint64,
|
||||||
|
deposits: Sequence[Deposit],
|
||||||
|
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||||
|
) -> BeaconState:
|
||||||
|
...
|
||||||
|
```
|
|
@ -0,0 +1,5 @@
|
||||||
|
nav:
|
||||||
|
- 'Index': index.md
|
||||||
|
- 'Altair': specs/altair/light-client/sync-protocol
|
||||||
|
- 'Capella': specs/capella/light-client/sync-protocol
|
||||||
|
- 'Deneb': specs/deneb/light-client/sync-protocol
|
|
@ -0,0 +1 @@
|
||||||
|
# Light client specifications
|
|
@ -0,0 +1,34 @@
|
||||||
|
/* Reference: https://zenn.dev/mebiusbox/articles/81d977a72cee01 */
|
||||||
|
|
||||||
|
[data-md-color-scheme=default] {
|
||||||
|
--md-default-fg-color--light: #222 !important;
|
||||||
|
}
|
||||||
|
[data-md-color-scheme=slate] {
|
||||||
|
--md-default-fg-color--light: #fefefe !important;
|
||||||
|
--md-typeset-a-color: #fc0 !important;
|
||||||
|
}
|
||||||
|
|
||||||
|
.md-typeset pre {
|
||||||
|
color: #f8f8f2;
|
||||||
|
}
|
||||||
|
.md-typeset .highlighttable {
|
||||||
|
margin-left:-20px;
|
||||||
|
margin-right: -20px;
|
||||||
|
border-radius: 0;
|
||||||
|
}
|
||||||
|
.md-typeset .highlighttable > * {
|
||||||
|
--md-code-bg-color: #222 !important;
|
||||||
|
--md-code-fg-color: #fefefe !important;
|
||||||
|
}
|
||||||
|
.md-typeset .highlighttable .linenos .linenodiv pre span {
|
||||||
|
background-color: #222 !important;
|
||||||
|
color: #fefefe !important;
|
||||||
|
}
|
||||||
|
.md-typeset .highlighttable .md-clipboard:before,
|
||||||
|
.md-typeset .highlighttable .md-clipboard:after {
|
||||||
|
color: rgba(240,240,240,.8);
|
||||||
|
}
|
||||||
|
.md-typeset .highlighttable .md-clipboard:hover:before,
|
||||||
|
.md-typeset .highlighttable .md-clipboard:hover:after {
|
||||||
|
color: rgba(102,217,224,1);
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
nav:
|
||||||
|
- ...
|
||||||
|
- Fork Choice -- Core:
|
||||||
|
- phase0: specs/phase0/fork-choice
|
||||||
|
- bellatrix: specs/bellatrix/fork-choice
|
||||||
|
- capella: specs/capella/fork-choice
|
||||||
|
- deneb: specs/deneb/fork-choice
|
|
@ -0,0 +1,40 @@
|
||||||
|
site_name: Ethereum Consensus Specs
|
||||||
|
site_url: https://ethereum.github.io/consensus-specs/
|
||||||
|
repo_name: ethereum/consensus-specs
|
||||||
|
theme:
|
||||||
|
name: material
|
||||||
|
palette:
|
||||||
|
- scheme: default
|
||||||
|
primary: black
|
||||||
|
toggle:
|
||||||
|
icon: material/brightness-7
|
||||||
|
name: Switch to dark mode
|
||||||
|
- scheme: slate
|
||||||
|
primary: black
|
||||||
|
toggle:
|
||||||
|
icon: material/brightness-4
|
||||||
|
name: Switch to light mode
|
||||||
|
features:
|
||||||
|
- navigation.tabs
|
||||||
|
- search
|
||||||
|
markdown_extensions:
|
||||||
|
- toc:
|
||||||
|
permalink: true
|
||||||
|
- pymdownx.superfences
|
||||||
|
- pymdownx.highlight:
|
||||||
|
use_pygments: true
|
||||||
|
noclasses: true
|
||||||
|
pygments_style: monokai
|
||||||
|
linenums: true
|
||||||
|
anchor_linenums: true
|
||||||
|
- mdx_truly_sane_lists:
|
||||||
|
nested_indent: 4
|
||||||
|
plugins:
|
||||||
|
- search
|
||||||
|
- awesome-pages
|
||||||
|
extra_css:
|
||||||
|
- stylesheets/extra.css
|
||||||
|
extra:
|
||||||
|
social:
|
||||||
|
- icon: fontawesome/brands/github
|
||||||
|
link: https://github.com/ethereum/consensus-specs
|
1
setup.py
1
setup.py
|
@ -1181,6 +1181,7 @@ setup(
|
||||||
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
|
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
|
||||||
"lint": ["flake8==5.0.4", "mypy==0.981", "pylint==2.15.3"],
|
"lint": ["flake8==5.0.4", "mypy==0.981", "pylint==2.15.3"],
|
||||||
"generator": ["python-snappy==0.6.1", "filelock"],
|
"generator": ["python-snappy==0.6.1", "filelock"],
|
||||||
|
"docs": ["mkdocs==1.4.2", "mkdocs-material==9.1.5", "mdx-truly-sane-lists==1.3", "mkdocs-awesome-pages-plugin==2.8.0"]
|
||||||
},
|
},
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"eth-utils>=2.0.0,<3",
|
"eth-utils>=2.0.0,<3",
|
||||||
|
|
|
@ -0,0 +1,4 @@
|
||||||
|
nav:
|
||||||
|
- phase0
|
||||||
|
- ...
|
||||||
|
- _features
|
|
@ -0,0 +1,72 @@
|
||||||
|
# EIP-4788 -- The Beacon Chain
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Containers](#containers)
|
||||||
|
- [Extended Containers](#extended-containers)
|
||||||
|
- [`ExecutionPayload`](#executionpayload)
|
||||||
|
- [`ExecutionPayloadHeader`](#executionpayloadheader)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
### Extended Containers
|
||||||
|
|
||||||
|
#### `ExecutionPayload`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ExecutionPayload(Container):
|
||||||
|
# Execution block header fields
|
||||||
|
parent_hash: Hash32
|
||||||
|
fee_recipient: ExecutionAddress # 'beneficiary' in the yellow paper
|
||||||
|
state_root: Bytes32
|
||||||
|
receipts_root: Bytes32
|
||||||
|
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||||
|
prev_randao: Bytes32 # 'difficulty' in the yellow paper
|
||||||
|
block_number: uint64 # 'number' in the yellow paper
|
||||||
|
gas_limit: uint64
|
||||||
|
gas_used: uint64
|
||||||
|
timestamp: uint64
|
||||||
|
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||||
|
base_fee_per_gas: uint256
|
||||||
|
# Extra payload fields
|
||||||
|
block_hash: Hash32 # Hash of execution block
|
||||||
|
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||||
|
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||||
|
parent_beacon_block_root: Root # [New in EIP-4788]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `ExecutionPayloadHeader`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ExecutionPayloadHeader(Container):
|
||||||
|
# Execution block header fields
|
||||||
|
parent_hash: Hash32
|
||||||
|
fee_recipient: ExecutionAddress
|
||||||
|
state_root: Bytes32
|
||||||
|
receipts_root: Bytes32
|
||||||
|
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||||
|
prev_randao: Bytes32
|
||||||
|
block_number: uint64
|
||||||
|
gas_limit: uint64
|
||||||
|
gas_used: uint64
|
||||||
|
timestamp: uint64
|
||||||
|
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||||
|
base_fee_per_gas: uint256
|
||||||
|
# Extra payload fields
|
||||||
|
block_hash: Hash32 # Hash of execution block
|
||||||
|
transactions_root: Root
|
||||||
|
withdrawals_root: Root
|
||||||
|
parent_beacon_block_root: Root # [New in EIP-4788]
|
||||||
|
```
|
|
@ -0,0 +1,103 @@
|
||||||
|
# EIP-4788 -- Honest Validator
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Prerequisites](#prerequisites)
|
||||||
|
- [Helpers](#helpers)
|
||||||
|
- [Protocols](#protocols)
|
||||||
|
- [`ExecutionEngine`](#executionengine)
|
||||||
|
- [`get_payload`](#get_payload)
|
||||||
|
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||||
|
- [Block proposal](#block-proposal)
|
||||||
|
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||||
|
- [ExecutionPayload](#executionpayload)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This document represents the changes to be made in the code of an "honest validator" to implement the EIP-4788 feature.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
This document is an extension of the [Capella -- Honest Validator](../capella/validator.md) guide.
|
||||||
|
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
|
||||||
|
|
||||||
|
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Capella](../capella/beacon-chain.md) are requisite for this document and used throughout.
|
||||||
|
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||||
|
|
||||||
|
## Helpers
|
||||||
|
|
||||||
|
## Protocols
|
||||||
|
|
||||||
|
### `ExecutionEngine`
|
||||||
|
|
||||||
|
#### `get_payload`
|
||||||
|
|
||||||
|
`get_payload` returns the upgraded EIP-4788 `ExecutionPayload` type.
|
||||||
|
|
||||||
|
## Beacon chain responsibilities
|
||||||
|
|
||||||
|
All validator responsibilities remain unchanged other than those noted below.
|
||||||
|
|
||||||
|
### Block proposal
|
||||||
|
|
||||||
|
#### Constructing the `BeaconBlockBody`
|
||||||
|
|
||||||
|
##### ExecutionPayload
|
||||||
|
|
||||||
|
`ExecutionPayload`s are constructed as they were in Capella, except that the parent beacon block root is also supplied.
|
||||||
|
|
||||||
|
*Note*: In this section, `state` is the state of the slot for the block proposal _without_ the block yet applied.
|
||||||
|
That is, `state` is the `previous_state` processed through any empty slots up to the assigned slot using `process_slots(previous_state, slot)`.
|
||||||
|
|
||||||
|
*Note*: The only change made to `prepare_execution_payload` is to add the parent beacon block root as an additional
|
||||||
|
parameter to the `PayloadAttributes`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def prepare_execution_payload(state: BeaconState,
|
||||||
|
pow_chain: Dict[Hash32, PowBlock],
|
||||||
|
safe_block_hash: Hash32,
|
||||||
|
finalized_block_hash: Hash32,
|
||||||
|
suggested_fee_recipient: ExecutionAddress,
|
||||||
|
execution_engine: ExecutionEngine) -> Optional[PayloadId]:
|
||||||
|
if not is_merge_transition_complete(state):
|
||||||
|
is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32()
|
||||||
|
is_activation_epoch_reached = get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||||
|
if is_terminal_block_hash_set and not is_activation_epoch_reached:
|
||||||
|
# Terminal block hash is set but activation epoch is not yet reached, no prepare payload call is needed
|
||||||
|
return None
|
||||||
|
|
||||||
|
terminal_pow_block = get_terminal_pow_block(pow_chain)
|
||||||
|
if terminal_pow_block is None:
|
||||||
|
# Pre-merge, no prepare payload call is needed
|
||||||
|
return None
|
||||||
|
# Signify merge via producing on top of the terminal PoW block
|
||||||
|
parent_hash = terminal_pow_block.block_hash
|
||||||
|
else:
|
||||||
|
# Post-merge, normal payload
|
||||||
|
parent_hash = state.latest_execution_payload_header.block_hash
|
||||||
|
|
||||||
|
# Set the forkchoice head and initiate the payload build process
|
||||||
|
payload_attributes = PayloadAttributes(
|
||||||
|
timestamp=compute_timestamp_at_slot(state, state.slot),
|
||||||
|
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||||
|
suggested_fee_recipient=suggested_fee_recipient,
|
||||||
|
withdrawals=get_expected_withdrawals(state),
|
||||||
|
parent_beacon_block_root=hash_tree_root(state.latest_block_header), # [New in EIP-4788]
|
||||||
|
)
|
||||||
|
return execution_engine.notify_forkchoice_updated(
|
||||||
|
head_block_hash=parent_hash,
|
||||||
|
safe_block_hash=safe_block_hash,
|
||||||
|
finalized_block_hash=finalized_block_hash,
|
||||||
|
payload_attributes=payload_attributes,
|
||||||
|
)
|
||||||
|
```
|
|
@ -33,7 +33,7 @@
|
||||||
This is the beacon chain specification of in-protocol deposits processing mechanism.
|
This is the beacon chain specification of in-protocol deposits processing mechanism.
|
||||||
This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110).
|
This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110).
|
||||||
|
|
||||||
*Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development.
|
*Note:* This specification is built upon [Capella](../../capella/beacon-chain.md) and is under active development.
|
||||||
|
|
||||||
## Constants
|
## Constants
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
EIP-6914 -- The Beacon Chain
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Preset](#preset)
|
||||||
|
- [Time parameters](#time-parameters)
|
||||||
|
- [Helper functions](#helper-functions)
|
||||||
|
- [Predicates](#predicates)
|
||||||
|
- [`is_reusable_validator`](#is_reusable_validator)
|
||||||
|
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||||
|
- [Block processing](#block-processing)
|
||||||
|
- [Modified `get_index_for_new_validator`](#modified-get_index_for_new_validator)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This is the beacon chain specification to assign new deposits to existing validator records. Refers to [EIP-6914](https://github.com/ethereum/EIPs/pull/6914).
|
||||||
|
|
||||||
|
*Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development.
|
||||||
|
|
||||||
|
## Preset
|
||||||
|
|
||||||
|
### Time parameters
|
||||||
|
|
||||||
|
| Name | Value | Unit | Duration |
|
||||||
|
| - | - | - | - |
|
||||||
|
| `SAFE_EPOCHS_TO_REUSE_INDEX` | `uint64(2**16)` (= 65,536) | epochs | ~0.8 year |
|
||||||
|
|
||||||
|
## Helper functions
|
||||||
|
|
||||||
|
### Predicates
|
||||||
|
|
||||||
|
#### `is_reusable_validator`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_reusable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> bool:
|
||||||
|
"""
|
||||||
|
Check if ``validator`` index can be re-assigned to a new deposit.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
epoch > validator.withdrawable_epoch + SAFE_EPOCHS_TO_REUSE_INDEX
|
||||||
|
and balance == 0
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Beacon chain state transition function
|
||||||
|
|
||||||
|
### Block processing
|
||||||
|
|
||||||
|
#### Modified `get_index_for_new_validator`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex:
|
||||||
|
for index, validator in enumerate(state.validators):
|
||||||
|
if is_reusable_validator(validator, state.balances[index], get_current_epoch(state)):
|
||||||
|
return ValidatorIndex(index)
|
||||||
|
return ValidatorIndex(len(state.validators))
|
||||||
|
```
|
|
@ -47,7 +47,7 @@ Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interfa
|
||||||
| `shard_column_{subnet_id}` | `SignedShardSample` |
|
| `shard_column_{subnet_id}` | `SignedShardSample` |
|
||||||
| `builder_block_bid` | `BuilderBlockBid` |
|
| `builder_block_bid` | `BuilderBlockBid` |
|
||||||
|
|
||||||
The [DAS network specification](./das-p2p.md) defines additional topics.
|
The [DAS network specification](../das/das-core.md) defines additional topics.
|
||||||
|
|
||||||
#### Builder block bid
|
#### Builder block bid
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,8 @@
|
||||||
- [Misc](#misc-1)
|
- [Misc](#misc-1)
|
||||||
- [`add_flag`](#add_flag)
|
- [`add_flag`](#add_flag)
|
||||||
- [`has_flag`](#has_flag)
|
- [`has_flag`](#has_flag)
|
||||||
|
- [`get_index_for_new_validator`](#get_index_for_new_validator)
|
||||||
|
- [`set_or_append_list`](#set_or_append_list)
|
||||||
- [Beacon state accessors](#beacon-state-accessors)
|
- [Beacon state accessors](#beacon-state-accessors)
|
||||||
- [`get_next_sync_committee_indices`](#get_next_sync_committee_indices)
|
- [`get_next_sync_committee_indices`](#get_next_sync_committee_indices)
|
||||||
- [`get_next_sync_committee`](#get_next_sync_committee)
|
- [`get_next_sync_committee`](#get_next_sync_committee)
|
||||||
|
@ -248,6 +250,23 @@ def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
|
||||||
return flags & flag == flag
|
return flags & flag == flag
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `get_index_for_new_validator`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex:
|
||||||
|
return ValidatorIndex(len(state.validators))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `set_or_append_list`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def set_or_append_list(list: List, index: ValidatorIndex, value: Any) -> None:
|
||||||
|
if index == len(list):
|
||||||
|
list.append(value)
|
||||||
|
else:
|
||||||
|
list[index] = value
|
||||||
|
```
|
||||||
|
|
||||||
### Beacon state accessors
|
### Beacon state accessors
|
||||||
|
|
||||||
#### `get_next_sync_committee_indices`
|
#### `get_next_sync_committee_indices`
|
||||||
|
@ -511,12 +530,14 @@ def apply_deposit(state: BeaconState,
|
||||||
signing_root = compute_signing_root(deposit_message, domain)
|
signing_root = compute_signing_root(deposit_message, domain)
|
||||||
# Initialize validator if the deposit signature is valid
|
# Initialize validator if the deposit signature is valid
|
||||||
if bls.Verify(pubkey, signing_root, signature):
|
if bls.Verify(pubkey, signing_root, signature):
|
||||||
state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount))
|
index = get_index_for_new_validator(state)
|
||||||
state.balances.append(amount)
|
validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount)
|
||||||
|
set_or_append_list(state.validators, index, validator)
|
||||||
|
set_or_append_list(state.balances, index, amount)
|
||||||
# [New in Altair]
|
# [New in Altair]
|
||||||
state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||||
state.current_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000))
|
||||||
state.inactivity_scores.append(uint64(0))
|
set_or_append_list(state.inactivity_scores, index, uint64(0))
|
||||||
else:
|
else:
|
||||||
# Increase balance by deposit amount
|
# Increase balance by deposit amount
|
||||||
index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||||
|
|
|
@ -13,7 +13,7 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
- [Warning](#warning)
|
- [Warning](#warning)
|
||||||
- [Modifications in Altair](#modifications-in-altair)
|
- [Modifications in Altair](#modifications-in-altair)
|
||||||
- [MetaData](#metadata)
|
- [MetaData](#metadata)
|
||||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||||
|
@ -43,9 +43,9 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery
|
||||||
This document is currently illustrative for early Altair testnets and some parts are subject to change.
|
This document is currently illustrative for early Altair testnets and some parts are subject to change.
|
||||||
Refer to the note in the [validator guide](./validator.md) for further details.
|
Refer to the note in the [validator guide](./validator.md) for further details.
|
||||||
|
|
||||||
# Modifications in Altair
|
## Modifications in Altair
|
||||||
|
|
||||||
## MetaData
|
### MetaData
|
||||||
|
|
||||||
The `MetaData` stored locally by clients is updated with an additional field to communicate the sync committee subnet subscriptions:
|
The `MetaData` stored locally by clients is updated with an additional field to communicate the sync committee subnet subscriptions:
|
||||||
|
|
||||||
|
@ -62,12 +62,12 @@ Where
|
||||||
- `seq_number` and `attnets` have the same meaning defined in the Phase 0 document.
|
- `seq_number` and `attnets` have the same meaning defined in the Phase 0 document.
|
||||||
- `syncnets` is a `Bitvector` representing the node's sync committee subnet subscriptions. This field should mirror the data in the node's ENR as outlined in the [validator guide](./validator.md#sync-committee-subnet-stability).
|
- `syncnets` is a `Bitvector` representing the node's sync committee subnet subscriptions. This field should mirror the data in the node's ENR as outlined in the [validator guide](./validator.md#sync-committee-subnet-stability).
|
||||||
|
|
||||||
## The gossip domain: gossipsub
|
### The gossip domain: gossipsub
|
||||||
|
|
||||||
Gossip meshes are added in Altair to support the consensus activities of the sync committees.
|
Gossip meshes are added in Altair to support the consensus activities of the sync committees.
|
||||||
Validators use an aggregation scheme to balance the processing and networking load across all of the relevant actors.
|
Validators use an aggregation scheme to balance the processing and networking load across all of the relevant actors.
|
||||||
|
|
||||||
### Topics and messages
|
#### Topics and messages
|
||||||
|
|
||||||
Topics follow the same specification as in the Phase 0 document.
|
Topics follow the same specification as in the Phase 0 document.
|
||||||
New topics are added in Altair to support the sync committees and the beacon block topic is updated with the modified type.
|
New topics are added in Altair to support the sync committees and the beacon block topic is updated with the modified type.
|
||||||
|
@ -103,11 +103,11 @@ Definitions of these new types can be found in the [Altair validator guide](./va
|
||||||
|
|
||||||
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||||
|
|
||||||
#### Global topics
|
##### Global topics
|
||||||
|
|
||||||
Altair changes the type of the global beacon block topic and adds one global topic to propagate partially aggregated sync committee messages to all potential proposers of beacon blocks.
|
Altair changes the type of the global beacon block topic and adds one global topic to propagate partially aggregated sync committee messages to all potential proposers of beacon blocks.
|
||||||
|
|
||||||
##### `beacon_block`
|
###### `beacon_block`
|
||||||
|
|
||||||
The existing specification for this topic does not change from the Phase 0 document,
|
The existing specification for this topic does not change from the Phase 0 document,
|
||||||
but the type of the payload does change to the (modified) `SignedBeaconBlock`.
|
but the type of the payload does change to the (modified) `SignedBeaconBlock`.
|
||||||
|
@ -115,7 +115,7 @@ This type changes due to the inclusion of the inner `BeaconBlockBody` that is mo
|
||||||
|
|
||||||
See the [state transition document](./beacon-chain.md#beaconblockbody) for Altair for further details.
|
See the [state transition document](./beacon-chain.md#beaconblockbody) for Altair for further details.
|
||||||
|
|
||||||
##### `sync_committee_contribution_and_proof`
|
###### `sync_committee_contribution_and_proof`
|
||||||
|
|
||||||
This topic is used to propagate partially aggregated sync committee messages to be included in future blocks.
|
This topic is used to propagate partially aggregated sync committee messages to be included in future blocks.
|
||||||
|
|
||||||
|
@ -152,11 +152,11 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64
|
||||||
- _[REJECT]_ The aggregator signature, `signed_contribution_and_proof.signature`, is valid.
|
- _[REJECT]_ The aggregator signature, `signed_contribution_and_proof.signature`, is valid.
|
||||||
- _[REJECT]_ The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey derived from the participation info in `aggregation_bits` for the subcommittee specified by the `contribution.subcommittee_index`.
|
- _[REJECT]_ The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey derived from the participation info in `aggregation_bits` for the subcommittee specified by the `contribution.subcommittee_index`.
|
||||||
|
|
||||||
#### Sync committee subnets
|
##### Sync committee subnets
|
||||||
|
|
||||||
Sync committee subnets are used to propagate unaggregated sync committee messages to subsections of the network.
|
Sync committee subnets are used to propagate unaggregated sync committee messages to subsections of the network.
|
||||||
|
|
||||||
##### `sync_committee_{subnet_id}`
|
###### `sync_committee_{subnet_id}`
|
||||||
|
|
||||||
The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee messages to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic.
|
The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee messages to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic.
|
||||||
|
|
||||||
|
@ -170,7 +170,7 @@ The following validations MUST pass before forwarding the `sync_committee_messag
|
||||||
Note this validation is _per topic_ so that for a given `slot`, multiple messages could be forwarded with the same `validator_index` as long as the `subnet_id`s are distinct.
|
Note this validation is _per topic_ so that for a given `slot`, multiple messages could be forwarded with the same `validator_index` as long as the `subnet_id`s are distinct.
|
||||||
- _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`.
|
- _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`.
|
||||||
|
|
||||||
#### Sync committees and aggregation
|
##### Sync committees and aggregation
|
||||||
|
|
||||||
The aggregation scheme closely follows the design of the attestation aggregation scheme.
|
The aggregation scheme closely follows the design of the attestation aggregation scheme.
|
||||||
Sync committee messages are broadcast into "subnets" defined by a topic.
|
Sync committee messages are broadcast into "subnets" defined by a topic.
|
||||||
|
@ -182,7 +182,7 @@ Unaggregated messages (along with metadata) are sent as `SyncCommitteeMessage`s
|
||||||
|
|
||||||
Aggregated sync committee messages are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic.
|
Aggregated sync committee messages are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic.
|
||||||
|
|
||||||
### Transitioning the gossip
|
#### Transitioning the gossip
|
||||||
|
|
||||||
With any fork, the fork version, and thus the `ForkDigestValue`, change.
|
With any fork, the fork version, and thus the `ForkDigestValue`, change.
|
||||||
Message types are unique per topic, and so for a smooth transition a node must temporarily subscribe to both the old and new topics.
|
Message types are unique per topic, and so for a smooth transition a node must temporarily subscribe to both the old and new topics.
|
||||||
|
@ -205,9 +205,9 @@ Post-fork:
|
||||||
E.g. an attestation on the both the old and new topic is ignored like any duplicate.
|
E.g. an attestation on the both the old and new topic is ignored like any duplicate.
|
||||||
- Two epochs after the fork, pre-fork topics SHOULD be unsubscribed from. This is well after the configured `seen_ttl`.
|
- Two epochs after the fork, pre-fork topics SHOULD be unsubscribed from. This is well after the configured `seen_ttl`.
|
||||||
|
|
||||||
## The Req/Resp domain
|
### The Req/Resp domain
|
||||||
|
|
||||||
### Req-Resp interaction
|
#### Req-Resp interaction
|
||||||
|
|
||||||
An additional `<context-bytes>` field is introduced to the `response_chunk` as defined in the Phase 0 document:
|
An additional `<context-bytes>` field is introduced to the `response_chunk` as defined in the Phase 0 document:
|
||||||
|
|
||||||
|
@ -221,7 +221,7 @@ On a non-zero `<result>` with `ErrorMessage` payload, the `<context-bytes>` is a
|
||||||
In Altair and later forks, `<context-bytes>` functions as a short meta-data,
|
In Altair and later forks, `<context-bytes>` functions as a short meta-data,
|
||||||
defined per req-resp method, and can parametrize the payload decoder.
|
defined per req-resp method, and can parametrize the payload decoder.
|
||||||
|
|
||||||
#### `ForkDigest`-context
|
##### `ForkDigest`-context
|
||||||
|
|
||||||
Starting with Altair, and in future forks, SSZ type definitions may change.
|
Starting with Altair, and in future forks, SSZ type definitions may change.
|
||||||
For this common case, we define the `ForkDigest`-context:
|
For this common case, we define the `ForkDigest`-context:
|
||||||
|
@ -229,9 +229,9 @@ For this common case, we define the `ForkDigest`-context:
|
||||||
A fixed-width 4 byte `<context-bytes>`, set to the `ForkDigest` matching the chunk:
|
A fixed-width 4 byte `<context-bytes>`, set to the `ForkDigest` matching the chunk:
|
||||||
`compute_fork_digest(fork_version, genesis_validators_root)`.
|
`compute_fork_digest(fork_version, genesis_validators_root)`.
|
||||||
|
|
||||||
### Messages
|
#### Messages
|
||||||
|
|
||||||
#### BeaconBlocksByRange v2
|
##### BeaconBlocksByRange v2
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||||
|
|
||||||
|
@ -246,7 +246,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||||
|
|
||||||
#### BeaconBlocksByRoot v2
|
##### BeaconBlocksByRoot v2
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||||
|
|
||||||
|
@ -261,7 +261,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||||
|
|
||||||
#### GetMetaData v2
|
##### GetMetaData v2
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/metadata/2/`
|
**Protocol ID:** `/eth2/beacon_chain/req/metadata/2/`
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ Requests the MetaData of a peer, using the new `MetaData` definition given above
|
||||||
that is extended from phase 0 in Altair. Other conditions for the `GetMetaData`
|
that is extended from phase 0 in Altair. Other conditions for the `GetMetaData`
|
||||||
protocol are unchanged from the phase 0 p2p networking document.
|
protocol are unchanged from the phase 0 p2p networking document.
|
||||||
|
|
||||||
### Transitioning from v1 to v2
|
#### Transitioning from v1 to v2
|
||||||
|
|
||||||
In advance of the fork, implementations can opt in to both run the v1 and v2 for a smooth transition.
|
In advance of the fork, implementations can opt in to both run the v1 and v2 for a smooth transition.
|
||||||
This is non-breaking, and is recommended as soon as the fork specification is stable.
|
This is non-breaking, and is recommended as soon as the fork specification is stable.
|
||||||
|
@ -291,7 +291,7 @@ The v1 method MAY be unregistered at the fork boundary.
|
||||||
In the event of a request on v1 for an Altair specific payload,
|
In the event of a request on v1 for an Altair specific payload,
|
||||||
the responder MUST return the **InvalidRequest** response code.
|
the responder MUST return the **InvalidRequest** response code.
|
||||||
|
|
||||||
## The discovery domain: discv5
|
### The discovery domain: discv5
|
||||||
|
|
||||||
The `attnets` key of the ENR is used as defined in the Phase 0 document.
|
The `attnets` key of the ENR is used as defined in the Phase 0 document.
|
||||||
|
|
||||||
|
|
|
@ -13,23 +13,23 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
- [Warning](#warning)
|
- [Warning](#warning)
|
||||||
- [Modifications in Bellatrix](#modifications-in-bellatrix)
|
- [Modifications in Bellatrix](#modifications-in-bellatrix)
|
||||||
- [Configuration](#configuration)
|
- [Configuration](#configuration)
|
||||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||||
- [Topics and messages](#topics-and-messages)
|
- [Topics and messages](#topics-and-messages)
|
||||||
- [Global topics](#global-topics)
|
- [Global topics](#global-topics)
|
||||||
- [`beacon_block`](#beacon_block)
|
- [`beacon_block`](#beacon_block)
|
||||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||||
- [The Req/Resp domain](#the-reqresp-domain)
|
- [The Req/Resp domain](#the-reqresp-domain)
|
||||||
- [Messages](#messages)
|
- [Messages](#messages)
|
||||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||||
- [Design decision rationale](#design-decision-rationale)
|
- [Design decision rationale](#design-decision-rationale)
|
||||||
- [Gossipsub](#gossipsub)
|
- [Gossipsub](#gossipsub)
|
||||||
- [Why was the max gossip message size increased at Bellatrix?](#why-was-the-max-gossip-message-size-increased-at-bellatrix)
|
- [Why was the max gossip message size increased at Bellatrix?](#why-was-the-max-gossip-message-size-increased-at-bellatrix)
|
||||||
- [Req/Resp](#reqresp)
|
- [Req/Resp](#reqresp)
|
||||||
- [Why was the max chunk response size increased at Bellatrix?](#why-was-the-max-chunk-response-size-increased-at-bellatrix)
|
- [Why was the max chunk response size increased at Bellatrix?](#why-was-the-max-chunk-response-size-increased-at-bellatrix)
|
||||||
- [Why allow invalid payloads on the P2P network?](#why-allow-invalid-payloads-on-the-p2p-network)
|
- [Why allow invalid payloads on the P2P network?](#why-allow-invalid-payloads-on-the-p2p-network)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
@ -39,9 +39,9 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas
|
||||||
This document is currently illustrative for early Bellatrix testnets and some parts are subject to change.
|
This document is currently illustrative for early Bellatrix testnets and some parts are subject to change.
|
||||||
Refer to the note in the [validator guide](./validator.md) for further details.
|
Refer to the note in the [validator guide](./validator.md) for further details.
|
||||||
|
|
||||||
# Modifications in Bellatrix
|
## Modifications in Bellatrix
|
||||||
|
|
||||||
## Configuration
|
### Configuration
|
||||||
|
|
||||||
This section outlines modifications constants that are used in this spec.
|
This section outlines modifications constants that are used in this spec.
|
||||||
|
|
||||||
|
@ -50,11 +50,11 @@ This section outlines modifications constants that are used in this spec.
|
||||||
| `GOSSIP_MAX_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed gossip messages starting at Bellatrix upgrade. |
|
| `GOSSIP_MAX_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed gossip messages starting at Bellatrix upgrade. |
|
||||||
| `MAX_CHUNK_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses starting at Bellatrix upgrade. |
|
| `MAX_CHUNK_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses starting at Bellatrix upgrade. |
|
||||||
|
|
||||||
## The gossip domain: gossipsub
|
### The gossip domain: gossipsub
|
||||||
|
|
||||||
Some gossip meshes are upgraded in Bellatrix to support upgraded types.
|
Some gossip meshes are upgraded in Bellatrix to support upgraded types.
|
||||||
|
|
||||||
### Topics and messages
|
#### Topics and messages
|
||||||
|
|
||||||
Topics follow the same specification as in prior upgrades.
|
Topics follow the same specification as in prior upgrades.
|
||||||
All topics remain stable except the beacon block topic which is updated with the modified type.
|
All topics remain stable except the beacon block topic which is updated with the modified type.
|
||||||
|
@ -76,11 +76,11 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
||||||
|
|
||||||
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||||
|
|
||||||
#### Global topics
|
##### Global topics
|
||||||
|
|
||||||
Bellatrix changes the type of the global beacon block topic.
|
Bellatrix changes the type of the global beacon block topic.
|
||||||
|
|
||||||
##### `beacon_block`
|
###### `beacon_block`
|
||||||
|
|
||||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Bellatrix.
|
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Bellatrix.
|
||||||
Specifically, this type changes with the addition of `execution_payload` to the inner `BeaconBlockBody`.
|
Specifically, this type changes with the addition of `execution_payload` to the inner `BeaconBlockBody`.
|
||||||
|
@ -107,12 +107,12 @@ Alias `block = signed_beacon_block.message`, `execution_payload = block.body.exe
|
||||||
The following gossip validation from prior specifications MUST NOT be applied if the execution is enabled for the block -- i.e. `is_execution_enabled(state, block.body)`:
|
The following gossip validation from prior specifications MUST NOT be applied if the execution is enabled for the block -- i.e. `is_execution_enabled(state, block.body)`:
|
||||||
- [REJECT] The block's parent (defined by `block.parent_root`) passes validation.
|
- [REJECT] The block's parent (defined by `block.parent_root`) passes validation.
|
||||||
|
|
||||||
### Transitioning the gossip
|
#### Transitioning the gossip
|
||||||
|
|
||||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||||
details on how to handle transitioning gossip topics.
|
details on how to handle transitioning gossip topics.
|
||||||
|
|
||||||
## The Req/Resp domain
|
### The Req/Resp domain
|
||||||
|
|
||||||
Non-faulty, [optimistic](/sync/optimistic.md) nodes may send blocks which
|
Non-faulty, [optimistic](/sync/optimistic.md) nodes may send blocks which
|
||||||
result in an INVALID response from an execution engine. To prevent network
|
result in an INVALID response from an execution engine. To prevent network
|
||||||
|
@ -122,9 +122,9 @@ down-scored or disconnected. Transmission of a block which is invalid due to
|
||||||
any consensus layer rules (i.e., *not* execution layer rules) MAY result in
|
any consensus layer rules (i.e., *not* execution layer rules) MAY result in
|
||||||
down-scoring or disconnection.
|
down-scoring or disconnection.
|
||||||
|
|
||||||
### Messages
|
#### Messages
|
||||||
|
|
||||||
#### BeaconBlocksByRange v2
|
##### BeaconBlocksByRange v2
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||||
|
|
||||||
|
@ -146,7 +146,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||||
|
|
||||||
#### BeaconBlocksByRoot v2
|
##### BeaconBlocksByRoot v2
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||||
|
|
||||||
|
@ -165,9 +165,9 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
|
|
||||||
# Design decision rationale
|
# Design decision rationale
|
||||||
|
|
||||||
## Gossipsub
|
### Gossipsub
|
||||||
|
|
||||||
### Why was the max gossip message size increased at Bellatrix?
|
#### Why was the max gossip message size increased at Bellatrix?
|
||||||
|
|
||||||
With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic
|
With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic
|
||||||
field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in
|
field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in
|
||||||
|
@ -190,9 +190,9 @@ order of 128 KiB in the worst case and the current gas limit (~30M) bounds max b
|
||||||
than 2 MiB today, this marginal difference in theoretical bounds will have zero
|
than 2 MiB today, this marginal difference in theoretical bounds will have zero
|
||||||
impact on network functionality and security.
|
impact on network functionality and security.
|
||||||
|
|
||||||
## Req/Resp
|
### Req/Resp
|
||||||
|
|
||||||
### Why was the max chunk response size increased at Bellatrix?
|
#### Why was the max chunk response size increased at Bellatrix?
|
||||||
|
|
||||||
Similar to the discussion about the maximum gossip size increase, the
|
Similar to the discussion about the maximum gossip size increase, the
|
||||||
`ExecutionPayload` type can cause `BeaconBlock`s to exceed the 1 MiB bounds put
|
`ExecutionPayload` type can cause `BeaconBlock`s to exceed the 1 MiB bounds put
|
||||||
|
@ -204,7 +204,7 @@ valid block sizes in the range of gas limits expected in the medium term.
|
||||||
As with both gossip and req/rsp maximum values, type-specific limits should
|
As with both gossip and req/rsp maximum values, type-specific limits should
|
||||||
always by simultaneously respected.
|
always by simultaneously respected.
|
||||||
|
|
||||||
### Why allow invalid payloads on the P2P network?
|
#### Why allow invalid payloads on the P2P network?
|
||||||
|
|
||||||
The specification allows blocks with invalid execution payloads to propagate across
|
The specification allows blocks with invalid execution payloads to propagate across
|
||||||
gossip and via RPC calls. The reasoning for this is as follows:
|
gossip and via RPC calls. The reasoning for this is as follows:
|
||||||
|
|
|
@ -4,7 +4,7 @@ This document contains the networking specification for Capella.
|
||||||
|
|
||||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||||
|
|
||||||
## Table of contents
|
### Table of contents
|
||||||
|
|
||||||
<!-- TOC -->
|
<!-- TOC -->
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
@ -26,13 +26,13 @@ The specification of these changes continues in the same format as the network s
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
|
||||||
# Modifications in Capella
|
## Modifications in Capella
|
||||||
|
|
||||||
## The gossip domain: gossipsub
|
### The gossip domain: gossipsub
|
||||||
|
|
||||||
A new topic is added to support the gossip of withdrawal credential change messages. And an existing topic is upgraded for updated types in Capella.
|
A new topic is added to support the gossip of withdrawal credential change messages. And an existing topic is upgraded for updated types in Capella.
|
||||||
|
|
||||||
### Topics and messages
|
#### Topics and messages
|
||||||
|
|
||||||
Topics follow the same specification as in prior upgrades. All existing topics remain stable except the beacon block topic which is updated with the modified type.
|
Topics follow the same specification as in prior upgrades. All existing topics remain stable except the beacon block topic which is updated with the modified type.
|
||||||
|
|
||||||
|
@ -45,17 +45,17 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
||||||
|
|
||||||
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||||
|
|
||||||
#### Global topics
|
##### Global topics
|
||||||
|
|
||||||
Capella changes the type of the global beacon block topic and adds one global topic to propagate withdrawal credential change messages to all potential proposers of beacon blocks.
|
Capella changes the type of the global beacon block topic and adds one global topic to propagate withdrawal credential change messages to all potential proposers of beacon blocks.
|
||||||
|
|
||||||
##### `beacon_block`
|
###### `beacon_block`
|
||||||
|
|
||||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Capella.
|
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Capella.
|
||||||
Specifically, this type changes with the addition of `bls_to_execution_changes` to the inner `BeaconBlockBody`.
|
Specifically, this type changes with the addition of `bls_to_execution_changes` to the inner `BeaconBlockBody`.
|
||||||
See Capella [state transition document](./beacon-chain.md#beaconblockbody) for further details.
|
See Capella [state transition document](./beacon-chain.md#beaconblockbody) for further details.
|
||||||
|
|
||||||
##### `bls_to_execution_change`
|
###### `bls_to_execution_change`
|
||||||
|
|
||||||
This topic is used to propagate signed bls to execution change messages to be included in future blocks.
|
This topic is used to propagate signed bls to execution change messages to be included in future blocks.
|
||||||
|
|
||||||
|
@ -67,16 +67,16 @@ The following validations MUST pass before forwarding the `signed_bls_to_executi
|
||||||
for the validator with index `signed_bls_to_execution_change.message.validator_index`.
|
for the validator with index `signed_bls_to_execution_change.message.validator_index`.
|
||||||
- _[REJECT]_ All of the conditions within `process_bls_to_execution_change` pass validation.
|
- _[REJECT]_ All of the conditions within `process_bls_to_execution_change` pass validation.
|
||||||
|
|
||||||
### Transitioning the gossip
|
#### Transitioning the gossip
|
||||||
|
|
||||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||||
details on how to handle transitioning gossip topics for Capella.
|
details on how to handle transitioning gossip topics for Capella.
|
||||||
|
|
||||||
## The Req/Resp domain
|
### The Req/Resp domain
|
||||||
|
|
||||||
### Messages
|
#### Messages
|
||||||
|
|
||||||
#### BeaconBlocksByRange v2
|
##### BeaconBlocksByRange v2
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||||
|
|
||||||
#### BeaconBlocksByRoot v2
|
##### BeaconBlocksByRoot v2
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||||
|
|
||||||
|
|
|
@ -10,32 +10,35 @@ The specification of these changes continues in the same format as the network s
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
- [Configuration](#configuration)
|
- [Modifications in Deneb](#modifications-in-deneb)
|
||||||
- [Containers](#containers)
|
- [Configuration](#configuration)
|
||||||
- [`BlobSidecar`](#blobsidecar)
|
- [Containers](#containers)
|
||||||
- [`SignedBlobSidecar`](#signedblobsidecar)
|
- [`BlobSidecar`](#blobsidecar)
|
||||||
- [`BlobIdentifier`](#blobidentifier)
|
- [`SignedBlobSidecar`](#signedblobsidecar)
|
||||||
- [Helpers](#helpers)
|
- [`BlobIdentifier`](#blobidentifier)
|
||||||
- [`verify_blob_sidecar_signature`](#verify_blob_sidecar_signature)
|
- [Helpers](#helpers)
|
||||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
- [`verify_blob_sidecar_signature`](#verify_blob_sidecar_signature)
|
||||||
- [Topics and messages](#topics-and-messages)
|
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||||
- [Global topics](#global-topics)
|
- [Topics and messages](#topics-and-messages)
|
||||||
- [`beacon_block`](#beacon_block)
|
- [Global topics](#global-topics)
|
||||||
- [`blob_sidecar_{index}`](#blob_sidecar_index)
|
- [`beacon_block`](#beacon_block)
|
||||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
- [`blob_sidecar_{index}`](#blob_sidecar_index)
|
||||||
- [The Req/Resp domain](#the-reqresp-domain)
|
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||||
- [Messages](#messages)
|
- [The Req/Resp domain](#the-reqresp-domain)
|
||||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
- [Messages](#messages)
|
||||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||||
- [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1)
|
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||||
- [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1)
|
- [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1)
|
||||||
|
- [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1)
|
||||||
- [Design decision rationale](#design-decision-rationale)
|
- [Design decision rationale](#design-decision-rationale)
|
||||||
- [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks)
|
- [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
|
||||||
## Configuration
|
## Modifications in Deneb
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
| Name | Value | Description |
|
| Name | Value | Description |
|
||||||
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
|
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
|
||||||
|
@ -43,9 +46,9 @@ The specification of these changes continues in the same format as the network s
|
||||||
| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request |
|
| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request |
|
||||||
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
|
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
|
||||||
|
|
||||||
## Containers
|
### Containers
|
||||||
|
|
||||||
### `BlobSidecar`
|
#### `BlobSidecar`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class BlobSidecar(Container):
|
class BlobSidecar(Container):
|
||||||
|
@ -59,7 +62,7 @@ class BlobSidecar(Container):
|
||||||
kzg_proof: KZGProof # Allows for quick verification of kzg_commitment
|
kzg_proof: KZGProof # Allows for quick verification of kzg_commitment
|
||||||
```
|
```
|
||||||
|
|
||||||
### `SignedBlobSidecar`
|
#### `SignedBlobSidecar`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class SignedBlobSidecar(Container):
|
class SignedBlobSidecar(Container):
|
||||||
|
@ -67,7 +70,7 @@ class SignedBlobSidecar(Container):
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
```
|
```
|
||||||
|
|
||||||
### `BlobIdentifier`
|
#### `BlobIdentifier`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class BlobIdentifier(Container):
|
class BlobIdentifier(Container):
|
||||||
|
@ -75,9 +78,9 @@ class BlobIdentifier(Container):
|
||||||
index: BlobIndex
|
index: BlobIndex
|
||||||
```
|
```
|
||||||
|
|
||||||
### Helpers
|
#### Helpers
|
||||||
|
|
||||||
#### `verify_blob_sidecar_signature`
|
##### `verify_blob_sidecar_signature`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: SignedBlobSidecar) -> bool:
|
def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: SignedBlobSidecar) -> bool:
|
||||||
|
@ -86,11 +89,11 @@ def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: Signe
|
||||||
return bls.Verify(proposer.pubkey, signing_root, signed_blob_sidecar.signature)
|
return bls.Verify(proposer.pubkey, signing_root, signed_blob_sidecar.signature)
|
||||||
```
|
```
|
||||||
|
|
||||||
## The gossip domain: gossipsub
|
### The gossip domain: gossipsub
|
||||||
|
|
||||||
Some gossip meshes are upgraded in the fork of Deneb to support upgraded types.
|
Some gossip meshes are upgraded in the fork of Deneb to support upgraded types.
|
||||||
|
|
||||||
### Topics and messages
|
#### Topics and messages
|
||||||
|
|
||||||
Topics follow the same specification as in prior upgrades.
|
Topics follow the same specification as in prior upgrades.
|
||||||
|
|
||||||
|
@ -106,15 +109,15 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
||||||
| - | - |
|
| - | - |
|
||||||
| `blob_sidecar_{index}` | `SignedBlobSidecar` (new) |
|
| `blob_sidecar_{index}` | `SignedBlobSidecar` (new) |
|
||||||
|
|
||||||
#### Global topics
|
##### Global topics
|
||||||
|
|
||||||
Deneb introduces new global topics for blob sidecars.
|
Deneb introduces new global topics for blob sidecars.
|
||||||
|
|
||||||
##### `beacon_block`
|
###### `beacon_block`
|
||||||
|
|
||||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in deneb.
|
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in deneb.
|
||||||
|
|
||||||
##### `blob_sidecar_{index}`
|
###### `blob_sidecar_{index}`
|
||||||
|
|
||||||
This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`.
|
This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`.
|
||||||
|
|
||||||
|
@ -132,16 +135,16 @@ The following validations MUST pass before forwarding the `signed_blob_sidecar`
|
||||||
If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||||
|
|
||||||
|
|
||||||
### Transitioning the gossip
|
#### Transitioning the gossip
|
||||||
|
|
||||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||||
details on how to handle transitioning gossip topics for this upgrade.
|
details on how to handle transitioning gossip topics for this upgrade.
|
||||||
|
|
||||||
## The Req/Resp domain
|
### The Req/Resp domain
|
||||||
|
|
||||||
### Messages
|
#### Messages
|
||||||
|
|
||||||
#### BeaconBlocksByRange v2
|
##### BeaconBlocksByRange v2
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||||
|
|
||||||
|
@ -161,7 +164,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
|
|
||||||
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
|
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
|
||||||
|
|
||||||
#### BeaconBlocksByRoot v2
|
##### BeaconBlocksByRoot v2
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||||
|
|
||||||
|
@ -179,7 +182,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
|
|
||||||
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
|
No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
|
||||||
|
|
||||||
#### BlobSidecarsByRoot v1
|
##### BlobSidecarsByRoot v1
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/`
|
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/`
|
||||||
|
|
||||||
|
@ -228,7 +231,7 @@ Clients MUST support requesting sidecars since `minimum_request_epoch`, where `m
|
||||||
Clients MUST respond with at least one sidecar, if they have it.
|
Clients MUST respond with at least one sidecar, if they have it.
|
||||||
Clients MAY limit the number of blocks and sidecars in the response.
|
Clients MAY limit the number of blocks and sidecars in the response.
|
||||||
|
|
||||||
#### BlobSidecarsByRange v1
|
##### BlobSidecarsByRange v1
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/`
|
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/`
|
||||||
|
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
- [Configuration](#configuration)
|
- [Configuration](#configuration)
|
||||||
- [Helpers](#helpers)
|
- [Helpers](#helpers)
|
||||||
- [`LatestMessage`](#latestmessage)
|
- [`LatestMessage`](#latestmessage)
|
||||||
- [`is_previous_epoch_justified`](#is_previous_epoch_justified)
|
|
||||||
- [`Store`](#store)
|
- [`Store`](#store)
|
||||||
|
- [`is_previous_epoch_justified`](#is_previous_epoch_justified)
|
||||||
- [`get_forkchoice_store`](#get_forkchoice_store)
|
- [`get_forkchoice_store`](#get_forkchoice_store)
|
||||||
- [`get_slots_since_genesis`](#get_slots_since_genesis)
|
- [`get_slots_since_genesis`](#get_slots_since_genesis)
|
||||||
- [`get_current_slot`](#get_current_slot)
|
- [`get_current_slot`](#get_current_slot)
|
||||||
|
@ -93,17 +93,6 @@ class LatestMessage(object):
|
||||||
root: Root
|
root: Root
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### `is_previous_epoch_justified`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def is_previous_epoch_justified(store: Store) -> bool:
|
|
||||||
current_slot = get_current_slot(store)
|
|
||||||
current_epoch = compute_epoch_at_slot(current_slot)
|
|
||||||
return store.justified_checkpoint.epoch + 1 == current_epoch
|
|
||||||
```
|
|
||||||
|
|
||||||
|
|
||||||
#### `Store`
|
#### `Store`
|
||||||
|
|
||||||
The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below:
|
The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below:
|
||||||
|
@ -131,6 +120,15 @@ class Store(object):
|
||||||
unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
|
unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `is_previous_epoch_justified`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_previous_epoch_justified(store: Store) -> bool:
|
||||||
|
current_slot = get_current_slot(store)
|
||||||
|
current_epoch = compute_epoch_at_slot(current_slot)
|
||||||
|
return store.justified_checkpoint.epoch + 1 == current_epoch
|
||||||
|
```
|
||||||
|
|
||||||
#### `get_forkchoice_store`
|
#### `get_forkchoice_store`
|
||||||
|
|
||||||
The provided anchor-state will be regarded as a trusted state, to not roll back beyond.
|
The provided anchor-state will be regarded as a trusted state, to not roll back beyond.
|
||||||
|
|
|
@ -111,11 +111,11 @@ It consists of four main sections:
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
|
||||||
# Network fundamentals
|
## Network fundamentals
|
||||||
|
|
||||||
This section outlines the specification for the networking stack in Ethereum consensus-layer clients.
|
This section outlines the specification for the networking stack in Ethereum consensus-layer clients.
|
||||||
|
|
||||||
## Transport
|
### Transport
|
||||||
|
|
||||||
Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently),
|
Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently),
|
||||||
we hereby define a profile for basic interoperability.
|
we hereby define a profile for basic interoperability.
|
||||||
|
@ -133,14 +133,14 @@ All listening endpoints must be publicly dialable, and thus not rely on libp2p c
|
||||||
Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.),
|
Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.),
|
||||||
MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint.
|
MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint.
|
||||||
|
|
||||||
## Encryption and identification
|
### Encryption and identification
|
||||||
|
|
||||||
The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure
|
The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure
|
||||||
channel handshake with `secp256k1` identities will be used for encryption.
|
channel handshake with `secp256k1` identities will be used for encryption.
|
||||||
|
|
||||||
As specified in the libp2p specification, clients MUST support the `XX` handshake pattern.
|
As specified in the libp2p specification, clients MUST support the `XX` handshake pattern.
|
||||||
|
|
||||||
## Protocol Negotiation
|
### Protocol Negotiation
|
||||||
|
|
||||||
Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers.
|
Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers.
|
||||||
|
|
||||||
|
@ -148,7 +148,7 @@ Clients MUST support [multistream-select 1.0](https://github.com/multiformats/mu
|
||||||
and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95) when the spec solidifies.
|
and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95) when the spec solidifies.
|
||||||
Once all clients have implementations for multiselect 2.0, multistream-select 1.0 MAY be phased out.
|
Once all clients have implementations for multiselect 2.0, multistream-select 1.0 MAY be phased out.
|
||||||
|
|
||||||
## Multiplexing
|
### Multiplexing
|
||||||
|
|
||||||
During connection bootstrapping, libp2p dynamically negotiates a mutually supported multiplexing method to conduct parallel conversations.
|
During connection bootstrapping, libp2p dynamically negotiates a mutually supported multiplexing method to conduct parallel conversations.
|
||||||
This applies to transports that are natively incapable of multiplexing (e.g. TCP, WebSockets, WebRTC),
|
This applies to transports that are natively incapable of multiplexing (e.g. TCP, WebSockets, WebRTC),
|
||||||
|
@ -163,9 +163,9 @@ and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md).
|
||||||
If both are supported by the client, yamux MUST take precedence during negotiation.
|
If both are supported by the client, yamux MUST take precedence during negotiation.
|
||||||
See the [Rationale](#design-decision-rationale) section below for tradeoffs.
|
See the [Rationale](#design-decision-rationale) section below for tradeoffs.
|
||||||
|
|
||||||
# Consensus-layer network interaction domains
|
## Consensus-layer network interaction domains
|
||||||
|
|
||||||
## Configuration
|
### Configuration
|
||||||
|
|
||||||
This section outlines constants that are used in this spec.
|
This section outlines constants that are used in this spec.
|
||||||
|
|
||||||
|
@ -182,7 +182,7 @@ This section outlines constants that are used in this spec.
|
||||||
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
|
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
|
||||||
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
|
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
|
||||||
|
|
||||||
## MetaData
|
### MetaData
|
||||||
|
|
||||||
Clients MUST locally store the following `MetaData`:
|
Clients MUST locally store the following `MetaData`:
|
||||||
|
|
||||||
|
@ -203,7 +203,7 @@ Where
|
||||||
is entirely independent of the ENR sequence number,
|
is entirely independent of the ENR sequence number,
|
||||||
and will in most cases be out of sync with the ENR sequence number.
|
and will in most cases be out of sync with the ENR sequence number.
|
||||||
|
|
||||||
## The gossip domain: gossipsub
|
### The gossip domain: gossipsub
|
||||||
|
|
||||||
Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) libp2p Protocol
|
Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) libp2p Protocol
|
||||||
including the [gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) extension.
|
including the [gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) extension.
|
||||||
|
@ -229,7 +229,7 @@ The following gossipsub [parameters](https://github.com/libp2p/specs/blob/master
|
||||||
for peer scoring and other attack mitigations.
|
for peer scoring and other attack mitigations.
|
||||||
These are currently under investigation and will be spec'd and released to mainnet when they are ready.
|
These are currently under investigation and will be spec'd and released to mainnet when they are ready.
|
||||||
|
|
||||||
### Topics and messages
|
#### Topics and messages
|
||||||
|
|
||||||
Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages).
|
Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages).
|
||||||
Topic strings have form: `/eth2/ForkDigestValue/Name/Encoding`.
|
Topic strings have form: `/eth2/ForkDigestValue/Name/Encoding`.
|
||||||
|
@ -289,7 +289,7 @@ We utilize `ACCEPT`, `REJECT`, and `IGNORE`. For each gossipsub topic, there are
|
||||||
If all validations pass, return `ACCEPT`.
|
If all validations pass, return `ACCEPT`.
|
||||||
If one or more validations fail while processing the items in order, return either `REJECT` or `IGNORE` as specified in the prefix of the particular condition.
|
If one or more validations fail while processing the items in order, return either `REJECT` or `IGNORE` as specified in the prefix of the particular condition.
|
||||||
|
|
||||||
#### Global topics
|
##### Global topics
|
||||||
|
|
||||||
There are two primary global topics used to propagate beacon blocks (`beacon_block`)
|
There are two primary global topics used to propagate beacon blocks (`beacon_block`)
|
||||||
and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the network.
|
and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the network.
|
||||||
|
@ -297,7 +297,7 @@ and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the ne
|
||||||
There are three additional global topics that are used to propagate lower frequency validator messages
|
There are three additional global topics that are used to propagate lower frequency validator messages
|
||||||
(`voluntary_exit`, `proposer_slashing`, and `attester_slashing`).
|
(`voluntary_exit`, `proposer_slashing`, and `attester_slashing`).
|
||||||
|
|
||||||
##### `beacon_block`
|
###### `beacon_block`
|
||||||
|
|
||||||
The `beacon_block` topic is used solely for propagating new signed beacon blocks to all nodes on the networks.
|
The `beacon_block` topic is used solely for propagating new signed beacon blocks to all nodes on the networks.
|
||||||
Signed blocks are sent in their entirety.
|
Signed blocks are sent in their entirety.
|
||||||
|
@ -325,7 +325,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block`
|
||||||
the block MAY be queued for later processing while proposers for the block's branch are calculated --
|
the block MAY be queued for later processing while proposers for the block's branch are calculated --
|
||||||
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||||
|
|
||||||
##### `beacon_aggregate_and_proof`
|
###### `beacon_aggregate_and_proof`
|
||||||
|
|
||||||
The `beacon_aggregate_and_proof` topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s)
|
The `beacon_aggregate_and_proof` topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s)
|
||||||
to subscribing nodes (typically validators) to be included in future blocks.
|
to subscribing nodes (typically validators) to be included in future blocks.
|
||||||
|
@ -360,7 +360,7 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
|
||||||
== store.finalized_checkpoint.root`
|
== store.finalized_checkpoint.root`
|
||||||
|
|
||||||
|
|
||||||
##### `voluntary_exit`
|
###### `voluntary_exit`
|
||||||
|
|
||||||
The `voluntary_exit` topic is used solely for propagating signed voluntary validator exits to proposers on the network.
|
The `voluntary_exit` topic is used solely for propagating signed voluntary validator exits to proposers on the network.
|
||||||
Signed voluntary exits are sent in their entirety.
|
Signed voluntary exits are sent in their entirety.
|
||||||
|
@ -370,7 +370,7 @@ The following validations MUST pass before forwarding the `signed_voluntary_exit
|
||||||
for the validator with index `signed_voluntary_exit.message.validator_index`.
|
for the validator with index `signed_voluntary_exit.message.validator_index`.
|
||||||
- _[REJECT]_ All of the conditions within `process_voluntary_exit` pass validation.
|
- _[REJECT]_ All of the conditions within `process_voluntary_exit` pass validation.
|
||||||
|
|
||||||
##### `proposer_slashing`
|
###### `proposer_slashing`
|
||||||
|
|
||||||
The `proposer_slashing` topic is used solely for propagating proposer slashings to proposers on the network.
|
The `proposer_slashing` topic is used solely for propagating proposer slashings to proposers on the network.
|
||||||
Proposer slashings are sent in their entirety.
|
Proposer slashings are sent in their entirety.
|
||||||
|
@ -380,7 +380,7 @@ The following validations MUST pass before forwarding the `proposer_slashing` on
|
||||||
for the proposer with index `proposer_slashing.signed_header_1.message.proposer_index`.
|
for the proposer with index `proposer_slashing.signed_header_1.message.proposer_index`.
|
||||||
- _[REJECT]_ All of the conditions within `process_proposer_slashing` pass validation.
|
- _[REJECT]_ All of the conditions within `process_proposer_slashing` pass validation.
|
||||||
|
|
||||||
##### `attester_slashing`
|
###### `attester_slashing`
|
||||||
|
|
||||||
The `attester_slashing` topic is used solely for propagating attester slashings to proposers on the network.
|
The `attester_slashing` topic is used solely for propagating attester slashings to proposers on the network.
|
||||||
Attester slashings are sent in their entirety.
|
Attester slashings are sent in their entirety.
|
||||||
|
@ -392,11 +392,11 @@ Clients who receive an attester slashing on this topic MUST validate the conditi
|
||||||
verify if `any(attester_slashed_indices.difference(prior_seen_attester_slashed_indices))`).
|
verify if `any(attester_slashed_indices.difference(prior_seen_attester_slashed_indices))`).
|
||||||
- _[REJECT]_ All of the conditions within `process_attester_slashing` pass validation.
|
- _[REJECT]_ All of the conditions within `process_attester_slashing` pass validation.
|
||||||
|
|
||||||
#### Attestation subnets
|
##### Attestation subnets
|
||||||
|
|
||||||
Attestation subnets are used to propagate unaggregated attestations to subsections of the network.
|
Attestation subnets are used to propagate unaggregated attestations to subsections of the network.
|
||||||
|
|
||||||
##### `beacon_attestation_{subnet_id}`
|
###### `beacon_attestation_{subnet_id}`
|
||||||
|
|
||||||
The `beacon_attestation_{subnet_id}` topics are used to propagate unaggregated attestations
|
The `beacon_attestation_{subnet_id}` topics are used to propagate unaggregated attestations
|
||||||
to the subnet `subnet_id` (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`.
|
to the subnet `subnet_id` (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`.
|
||||||
|
@ -432,7 +432,7 @@ The following validations MUST pass before forwarding the `attestation` on the s
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#### Attestations and Aggregation
|
##### Attestations and Aggregation
|
||||||
|
|
||||||
Attestation broadcasting is grouped into subnets defined by a topic.
|
Attestation broadcasting is grouped into subnets defined by a topic.
|
||||||
The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`.
|
The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`.
|
||||||
|
@ -445,7 +445,7 @@ Unaggregated attestations are sent as `Attestation`s to the subnet topic,
|
||||||
|
|
||||||
Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s.
|
Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s.
|
||||||
|
|
||||||
### Encodings
|
#### Encodings
|
||||||
|
|
||||||
Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded.
|
Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded.
|
||||||
|
|
||||||
|
@ -461,9 +461,9 @@ so [basic snappy block compression](https://github.com/google/snappy/blob/master
|
||||||
Implementations MUST use a single encoding for gossip.
|
Implementations MUST use a single encoding for gossip.
|
||||||
Changing an encoding will require coordination between participating implementations.
|
Changing an encoding will require coordination between participating implementations.
|
||||||
|
|
||||||
## The Req/Resp domain
|
### The Req/Resp domain
|
||||||
|
|
||||||
### Protocol identification
|
#### Protocol identification
|
||||||
|
|
||||||
Each message type is segregated into its own libp2p protocol ID, which is a case-sensitive UTF-8 string of the form:
|
Each message type is segregated into its own libp2p protocol ID, which is a case-sensitive UTF-8 string of the form:
|
||||||
|
|
||||||
|
@ -485,7 +485,7 @@ With:
|
||||||
This protocol segregation allows libp2p `multistream-select 1.0` / `multiselect 2.0`
|
This protocol segregation allows libp2p `multistream-select 1.0` / `multiselect 2.0`
|
||||||
to handle the request type, version, and encoding negotiation before establishing the underlying streams.
|
to handle the request type, version, and encoding negotiation before establishing the underlying streams.
|
||||||
|
|
||||||
### Req/Resp interaction
|
#### Req/Resp interaction
|
||||||
|
|
||||||
We use ONE stream PER request/response interaction.
|
We use ONE stream PER request/response interaction.
|
||||||
Streams are closed when the interaction finishes, whether in success or in error.
|
Streams are closed when the interaction finishes, whether in success or in error.
|
||||||
|
@ -515,7 +515,7 @@ Regardless of these type specific bounds, a global maximum uncompressed byte siz
|
||||||
Clients MUST ensure that lengths are within these bounds; if not, they SHOULD reset the stream immediately.
|
Clients MUST ensure that lengths are within these bounds; if not, they SHOULD reset the stream immediately.
|
||||||
Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance.
|
Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance.
|
||||||
|
|
||||||
#### Requesting side
|
##### Requesting side
|
||||||
|
|
||||||
Once a new stream with the protocol ID for the request type has been negotiated, the full request message SHOULD be sent immediately.
|
Once a new stream with the protocol ID for the request type has been negotiated, the full request message SHOULD be sent immediately.
|
||||||
The request MUST be encoded according to the encoding strategy.
|
The request MUST be encoded according to the encoding strategy.
|
||||||
|
@ -537,7 +537,7 @@ A requester SHOULD read from the stream until either:
|
||||||
For requests consisting of a single valid `response_chunk`,
|
For requests consisting of a single valid `response_chunk`,
|
||||||
the requester SHOULD read the chunk fully, as defined by the `encoding-dependent-header`, before closing the stream.
|
the requester SHOULD read the chunk fully, as defined by the `encoding-dependent-header`, before closing the stream.
|
||||||
|
|
||||||
#### Responding side
|
##### Responding side
|
||||||
|
|
||||||
Once a new stream with the protocol ID for the request type has been negotiated,
|
Once a new stream with the protocol ID for the request type has been negotiated,
|
||||||
the responder SHOULD process the incoming request and MUST validate it before processing it.
|
the responder SHOULD process the incoming request and MUST validate it before processing it.
|
||||||
|
@ -588,7 +588,7 @@ The `ErrorMessage` schema is:
|
||||||
*Note*: By convention, the `error_message` is a sequence of bytes that MAY be interpreted as a UTF-8 string (for debugging purposes).
|
*Note*: By convention, the `error_message` is a sequence of bytes that MAY be interpreted as a UTF-8 string (for debugging purposes).
|
||||||
Clients MUST treat as valid any byte sequences.
|
Clients MUST treat as valid any byte sequences.
|
||||||
|
|
||||||
### Encoding strategies
|
#### Encoding strategies
|
||||||
|
|
||||||
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction.
|
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction.
|
||||||
Only one value is possible at this time:
|
Only one value is possible at this time:
|
||||||
|
@ -599,7 +599,7 @@ Only one value is possible at this time:
|
||||||
For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s.
|
For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s.
|
||||||
This encoding type MUST be supported by all clients.
|
This encoding type MUST be supported by all clients.
|
||||||
|
|
||||||
#### SSZ-snappy encoding strategy
|
##### SSZ-snappy encoding strategy
|
||||||
|
|
||||||
The [SimpleSerialize (SSZ) specification](../../ssz/simple-serialize.md) outlines how objects are SSZ-encoded.
|
The [SimpleSerialize (SSZ) specification](../../ssz/simple-serialize.md) outlines how objects are SSZ-encoded.
|
||||||
|
|
||||||
|
@ -646,9 +646,9 @@ constituents individually as `response_chunk`s. For example, the
|
||||||
`List[SignedBeaconBlock, ...]` response type sends zero or more `response_chunk`s.
|
`List[SignedBeaconBlock, ...]` response type sends zero or more `response_chunk`s.
|
||||||
Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload.
|
Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload.
|
||||||
|
|
||||||
### Messages
|
#### Messages
|
||||||
|
|
||||||
#### Status
|
##### Status
|
||||||
|
|
||||||
**Protocol ID:** ``/eth2/beacon_chain/req/status/1/``
|
**Protocol ID:** ``/eth2/beacon_chain/req/status/1/``
|
||||||
|
|
||||||
|
@ -694,7 +694,7 @@ SHOULD request beacon blocks from its counterparty via the `BeaconBlocksByRange`
|
||||||
the client might need to send `Status` request again to learn if the peer has a higher head.
|
the client might need to send `Status` request again to learn if the peer has a higher head.
|
||||||
Implementers are free to implement such behavior in their own way.
|
Implementers are free to implement such behavior in their own way.
|
||||||
|
|
||||||
#### Goodbye
|
##### Goodbye
|
||||||
|
|
||||||
**Protocol ID:** ``/eth2/beacon_chain/req/goodbye/1/``
|
**Protocol ID:** ``/eth2/beacon_chain/req/goodbye/1/``
|
||||||
|
|
||||||
|
@ -718,7 +718,7 @@ The request/response MUST be encoded as a single SSZ-field.
|
||||||
|
|
||||||
The response MUST consist of a single `response_chunk`.
|
The response MUST consist of a single `response_chunk`.
|
||||||
|
|
||||||
#### BeaconBlocksByRange
|
##### BeaconBlocksByRange
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/1/`
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/1/`
|
||||||
|
|
||||||
|
@ -795,7 +795,7 @@ In particular when `step == 1`, each `parent_root` MUST match the `hash_tree_roo
|
||||||
After the initial block, clients MAY stop in the process of responding
|
After the initial block, clients MAY stop in the process of responding
|
||||||
if their fork choice changes the view of the chain in the context of the request.
|
if their fork choice changes the view of the chain in the context of the request.
|
||||||
|
|
||||||
#### BeaconBlocksByRoot
|
##### BeaconBlocksByRoot
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/1/`
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/1/`
|
||||||
|
|
||||||
|
@ -835,7 +835,7 @@ Clients MAY limit the number of blocks in the response.
|
||||||
|
|
||||||
`/eth2/beacon_chain/req/beacon_blocks_by_root/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period.
|
`/eth2/beacon_chain/req/beacon_blocks_by_root/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period.
|
||||||
|
|
||||||
#### Ping
|
##### Ping
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/ping/1/`
|
**Protocol ID:** `/eth2/beacon_chain/req/ping/1/`
|
||||||
|
|
||||||
|
@ -867,7 +867,7 @@ The request MUST be encoded as an SSZ-field.
|
||||||
|
|
||||||
The response MUST consist of a single `response_chunk`.
|
The response MUST consist of a single `response_chunk`.
|
||||||
|
|
||||||
#### GetMetaData
|
##### GetMetaData
|
||||||
|
|
||||||
**Protocol ID:** `/eth2/beacon_chain/req/metadata/1/`
|
**Protocol ID:** `/eth2/beacon_chain/req/metadata/1/`
|
||||||
|
|
||||||
|
@ -890,14 +890,14 @@ The response MUST be encoded as an SSZ-container.
|
||||||
|
|
||||||
The response MUST consist of a single `response_chunk`.
|
The response MUST consist of a single `response_chunk`.
|
||||||
|
|
||||||
## The discovery domain: discv5
|
### The discovery domain: discv5
|
||||||
|
|
||||||
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) (Protocol version v5.1) is used for peer discovery.
|
Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) (Protocol version v5.1) is used for peer discovery.
|
||||||
|
|
||||||
`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only.
|
`discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only.
|
||||||
`discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context.
|
`discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context.
|
||||||
|
|
||||||
### Integration into libp2p stacks
|
#### Integration into libp2p stacks
|
||||||
|
|
||||||
`discv5` SHOULD be integrated into the client’s libp2p stack by implementing an adaptor
|
`discv5` SHOULD be integrated into the client’s libp2p stack by implementing an adaptor
|
||||||
to make it conform to the [service discovery](https://github.com/libp2p/go-libp2p-core/blob/master/discovery/discovery.go)
|
to make it conform to the [service discovery](https://github.com/libp2p/go-libp2p-core/blob/master/discovery/discovery.go)
|
||||||
|
@ -908,7 +908,7 @@ and the outputs will be multiaddrs converted from the ENR records returned by th
|
||||||
|
|
||||||
This integration enables the libp2p stack to subsequently form connections and streams with discovered peers.
|
This integration enables the libp2p stack to subsequently form connections and streams with discovered peers.
|
||||||
|
|
||||||
### ENR structure
|
#### ENR structure
|
||||||
|
|
||||||
The Ethereum Node Record (ENR) for an Ethereum consensus client MUST contain the following entries
|
The Ethereum Node Record (ENR) for an Ethereum consensus client MUST contain the following entries
|
||||||
(exclusive of the sequence number and signature, which MUST be present in an ENR):
|
(exclusive of the sequence number and signature, which MUST be present in an ENR):
|
||||||
|
@ -923,7 +923,7 @@ The ENR MAY contain the following entries:
|
||||||
|
|
||||||
Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778).
|
Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778).
|
||||||
|
|
||||||
#### Attestation subnet bitfield
|
##### Attestation subnet bitfield
|
||||||
|
|
||||||
The ENR `attnets` entry signifies the attestation subnet bitfield with the following form
|
The ENR `attnets` entry signifies the attestation subnet bitfield with the following form
|
||||||
to more easily discover peers participating in particular attestation gossip subnets.
|
to more easily discover peers participating in particular attestation gossip subnets.
|
||||||
|
@ -936,7 +936,7 @@ If a node's `MetaData.attnets` has any non-zero bit, the ENR MUST include the `a
|
||||||
|
|
||||||
If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally include the `attnets` entry or leave it out entirely.
|
If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally include the `attnets` entry or leave it out entirely.
|
||||||
|
|
||||||
#### `eth2` field
|
##### `eth2` field
|
||||||
|
|
||||||
ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version,
|
ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version,
|
||||||
and next fork epoch to ensure connections are made with peers on the intended Ethereum network.
|
and next fork epoch to ensure connections are made with peers on the intended Ethereum network.
|
||||||
|
@ -979,11 +979,11 @@ Clients MAY connect to peers with the same `fork_digest` but a different `next_f
|
||||||
Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients,
|
Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients,
|
||||||
these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`.
|
these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`.
|
||||||
|
|
||||||
# Design decision rationale
|
## Design decision rationale
|
||||||
|
|
||||||
## Transport
|
### Transport
|
||||||
|
|
||||||
### Why are we defining specific transports?
|
#### Why are we defining specific transports?
|
||||||
|
|
||||||
libp2p peers can listen on multiple transports concurrently, and these can change over time.
|
libp2p peers can listen on multiple transports concurrently, and these can change over time.
|
||||||
Multiaddrs encode not only the address but also the transport to be used to dial.
|
Multiaddrs encode not only the address but also the transport to be used to dial.
|
||||||
|
@ -992,7 +992,7 @@ Due to this dynamic nature, agreeing on specific transports like TCP, QUIC, or W
|
||||||
|
|
||||||
However, it is useful to define a minimum baseline for interoperability purposes.
|
However, it is useful to define a minimum baseline for interoperability purposes.
|
||||||
|
|
||||||
### Can clients support other transports/handshakes than the ones mandated by the spec?
|
#### Can clients support other transports/handshakes than the ones mandated by the spec?
|
||||||
|
|
||||||
Clients may support other transports such as libp2p QUIC, WebSockets, and WebRTC transports, if available in the language of choice.
|
Clients may support other transports such as libp2p QUIC, WebSockets, and WebRTC transports, if available in the language of choice.
|
||||||
While interoperability shall not be harmed by lack of such support, the advantages are desirable:
|
While interoperability shall not be harmed by lack of such support, the advantages are desirable:
|
||||||
|
@ -1007,7 +1007,7 @@ and the accompanying [QUIC-TLS document](https://tools.ietf.org/html/draft-ietf-
|
||||||
The usage of one handshake procedure or the other shall be transparent to the application layer,
|
The usage of one handshake procedure or the other shall be transparent to the application layer,
|
||||||
once the libp2p Host/Node object has been configured appropriately.
|
once the libp2p Host/Node object has been configured appropriately.
|
||||||
|
|
||||||
### What are the advantages of using TCP/QUIC/Websockets?
|
#### What are the advantages of using TCP/QUIC/Websockets?
|
||||||
|
|
||||||
TCP is a reliable, ordered, full-duplex, congestion-controlled network protocol that powers much of the Internet as we know it today.
|
TCP is a reliable, ordered, full-duplex, congestion-controlled network protocol that powers much of the Internet as we know it today.
|
||||||
HTTP/1.1 and HTTP/2 run atop TCP.
|
HTTP/1.1 and HTTP/2 run atop TCP.
|
||||||
|
@ -1027,7 +1027,7 @@ and we may only become subject to standard IP-based firewall filtering—somethi
|
||||||
WebSockets and/or WebRTC transports are necessary for interaction with browsers,
|
WebSockets and/or WebRTC transports are necessary for interaction with browsers,
|
||||||
and will become increasingly important as we incorporate browser-based light clients to the Ethereum network.
|
and will become increasingly important as we incorporate browser-based light clients to the Ethereum network.
|
||||||
|
|
||||||
### Why do we not just support a single transport?
|
#### Why do we not just support a single transport?
|
||||||
|
|
||||||
Networks evolve.
|
Networks evolve.
|
||||||
Hardcoding design decisions leads to ossification, preventing the evolution of networks alongside the state of the art.
|
Hardcoding design decisions leads to ossification, preventing the evolution of networks alongside the state of the art.
|
||||||
|
@ -1039,7 +1039,7 @@ Clients can adopt new transports without breaking old ones, and the multi-transp
|
||||||
(e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS),
|
(e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS),
|
||||||
without the need for proxying or trust delegation to servers.
|
without the need for proxying or trust delegation to servers.
|
||||||
|
|
||||||
### Why are we not using QUIC from the start?
|
#### Why are we not using QUIC from the start?
|
||||||
|
|
||||||
The QUIC standard is still not finalized (at working draft 22 at the time of writing),
|
The QUIC standard is still not finalized (at working draft 22 at the time of writing),
|
||||||
and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations).
|
and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations).
|
||||||
|
@ -1052,9 +1052,9 @@ On the other hand, TLS 1.3 is the newest, simplified iteration of TLS.
|
||||||
Old, insecure, obsolete ciphers and algorithms have been removed, adopting Ed25519 as the sole ECDH key agreement function.
|
Old, insecure, obsolete ciphers and algorithms have been removed, adopting Ed25519 as the sole ECDH key agreement function.
|
||||||
Handshakes are faster, 1-RTT data is supported, and session resumption is a reality, amongst other features.
|
Handshakes are faster, 1-RTT data is supported, and session resumption is a reality, amongst other features.
|
||||||
|
|
||||||
## Multiplexing
|
### Multiplexing
|
||||||
|
|
||||||
### Why are we using mplex/yamux?
|
#### Why are we using mplex/yamux?
|
||||||
|
|
||||||
[Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) is a multiplexer invented by Hashicorp that supports stream-level congestion control.
|
[Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) is a multiplexer invented by Hashicorp that supports stream-level congestion control.
|
||||||
Implementations exist in a limited set of languages, and it’s not a trivial piece to develop.
|
Implementations exist in a limited set of languages, and it’s not a trivial piece to develop.
|
||||||
|
@ -1066,9 +1066,9 @@ It does not support stream-level congestion control and is subject to head-of-li
|
||||||
Overlay multiplexers are not necessary with QUIC since the protocol provides native multiplexing,
|
Overlay multiplexers are not necessary with QUIC since the protocol provides native multiplexing,
|
||||||
but they need to be layered atop TCP, WebSockets, and other transports that lack such support.
|
but they need to be layered atop TCP, WebSockets, and other transports that lack such support.
|
||||||
|
|
||||||
## Protocol Negotiation
|
### Protocol Negotiation
|
||||||
|
|
||||||
### When is multiselect 2.0 due and why do we plan to migrate to it?
|
#### When is multiselect 2.0 due and why do we plan to migrate to it?
|
||||||
|
|
||||||
multiselect 2.0 is currently being conceptualized.
|
multiselect 2.0 is currently being conceptualized.
|
||||||
The debate started [on this issue](https://github.com/libp2p/specs/pull/95),
|
The debate started [on this issue](https://github.com/libp2p/specs/pull/95),
|
||||||
|
@ -1084,7 +1084,7 @@ We plan to eventually migrate to multiselect 2.0 because it will:
|
||||||
3. Leverage *push data* mechanisms of underlying protocols to expedite negotiation.
|
3. Leverage *push data* mechanisms of underlying protocols to expedite negotiation.
|
||||||
4. Provide the building blocks for enhanced censorship resistance.
|
4. Provide the building blocks for enhanced censorship resistance.
|
||||||
|
|
||||||
### What is the difference between connection-level and stream-level protocol negotiation?
|
#### What is the difference between connection-level and stream-level protocol negotiation?
|
||||||
|
|
||||||
All libp2p connections must be authenticated, encrypted, and multiplexed.
|
All libp2p connections must be authenticated, encrypted, and multiplexed.
|
||||||
Connections using network transports unsupportive of native authentication/encryption and multiplexing (e.g. TCP) need to undergo protocol negotiation to agree on a mutually supported:
|
Connections using network transports unsupportive of native authentication/encryption and multiplexing (e.g. TCP) need to undergo protocol negotiation to agree on a mutually supported:
|
||||||
|
@ -1101,9 +1101,9 @@ When opening streams, peers pin a protocol to that stream, by conducting *stream
|
||||||
At present, multistream-select 1.0 is used for both types of negotiation,
|
At present, multistream-select 1.0 is used for both types of negotiation,
|
||||||
but multiselect 2.0 will use dedicated mechanisms for connection bootstrapping process and stream protocol negotiation.
|
but multiselect 2.0 will use dedicated mechanisms for connection bootstrapping process and stream protocol negotiation.
|
||||||
|
|
||||||
## Encryption
|
### Encryption
|
||||||
|
|
||||||
### Why are we not supporting SecIO?
|
#### Why are we not supporting SecIO?
|
||||||
|
|
||||||
SecIO has been the default encryption layer for libp2p for years.
|
SecIO has been the default encryption layer for libp2p for years.
|
||||||
It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale.
|
It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale.
|
||||||
|
@ -1114,7 +1114,7 @@ a mechanism that multiselect 2.0 will leverage to reduce round trips during conn
|
||||||
|
|
||||||
SecIO is not considered secure for the purposes of this spec.
|
SecIO is not considered secure for the purposes of this spec.
|
||||||
|
|
||||||
### Why are we using Noise?
|
#### Why are we using Noise?
|
||||||
|
|
||||||
Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org):
|
Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org):
|
||||||
|
|
||||||
|
@ -1129,7 +1129,7 @@ and are used in major cryptographic-centric projects like WireGuard, I2P, and Li
|
||||||
[Various](https://www.wireguard.com/papers/kobeissi-bhargavan-noise-explorer-2018.pdf) [studies](https://eprint.iacr.org/2019/436.pdf)
|
[Various](https://www.wireguard.com/papers/kobeissi-bhargavan-noise-explorer-2018.pdf) [studies](https://eprint.iacr.org/2019/436.pdf)
|
||||||
have assessed the stated security goals of several Noise handshakes with positive results.
|
have assessed the stated security goals of several Noise handshakes with positive results.
|
||||||
|
|
||||||
### Why are we using encryption at all?
|
#### Why are we using encryption at all?
|
||||||
|
|
||||||
Transport level encryption secures message exchange and provides properties that are useful for privacy, safety, and censorship resistance.
|
Transport level encryption secures message exchange and provides properties that are useful for privacy, safety, and censorship resistance.
|
||||||
These properties are derived from the following security guarantees that apply to the entire communication between two peers:
|
These properties are derived from the following security guarantees that apply to the entire communication between two peers:
|
||||||
|
@ -1146,9 +1146,9 @@ Note that transport-level encryption is not exclusive of application-level encry
|
||||||
Transport-level encryption secures the communication itself,
|
Transport-level encryption secures the communication itself,
|
||||||
while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.).
|
while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.).
|
||||||
|
|
||||||
## Gossipsub
|
### Gossipsub
|
||||||
|
|
||||||
### Why are we using a pub/sub algorithm for block and attestation propagation?
|
#### Why are we using a pub/sub algorithm for block and attestation propagation?
|
||||||
|
|
||||||
Pubsub is a technique to broadcast/disseminate data across a network rapidly.
|
Pubsub is a technique to broadcast/disseminate data across a network rapidly.
|
||||||
Such data is packaged in fire-and-forget messages that do not require a response from every recipient.
|
Such data is packaged in fire-and-forget messages that do not require a response from every recipient.
|
||||||
|
@ -1156,18 +1156,18 @@ Peers subscribed to a topic participate in the propagation of messages in that t
|
||||||
|
|
||||||
The alternative is to maintain a fully connected mesh (all peers connected to each other 1:1), which scales poorly (O(n^2)).
|
The alternative is to maintain a fully connected mesh (all peers connected to each other 1:1), which scales poorly (O(n^2)).
|
||||||
|
|
||||||
### Why are we using topics to segregate encodings, yet only support one encoding?
|
#### Why are we using topics to segregate encodings, yet only support one encoding?
|
||||||
|
|
||||||
For future extensibility with almost zero overhead now (besides the extra bytes in the topic name).
|
For future extensibility with almost zero overhead now (besides the extra bytes in the topic name).
|
||||||
|
|
||||||
### How do we upgrade gossip channels (e.g. changes in encoding, compression)?
|
#### How do we upgrade gossip channels (e.g. changes in encoding, compression)?
|
||||||
|
|
||||||
Changing gossipsub/broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, during a hard fork.
|
Changing gossipsub/broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, during a hard fork.
|
||||||
|
|
||||||
When a node is preparing for upcoming tasks (e.g. validator duty lookahead) on a gossipsub topic,
|
When a node is preparing for upcoming tasks (e.g. validator duty lookahead) on a gossipsub topic,
|
||||||
the node should join the topic of the future epoch in which the task is to occur in addition to listening to the topics for the current epoch.
|
the node should join the topic of the future epoch in which the task is to occur in addition to listening to the topics for the current epoch.
|
||||||
|
|
||||||
### Why must all clients use the same gossip topic instead of one negotiated between each peer pair?
|
#### Why must all clients use the same gossip topic instead of one negotiated between each peer pair?
|
||||||
|
|
||||||
Supporting multiple topics/encodings would require the presence of relayers to translate between encodings
|
Supporting multiple topics/encodings would require the presence of relayers to translate between encodings
|
||||||
and topics so as to avoid network fragmentation where participants have diverging views on the gossiped state,
|
and topics so as to avoid network fragmentation where participants have diverging views on the gossiped state,
|
||||||
|
@ -1182,7 +1182,7 @@ but the price here is pretty high in terms of overhead -- both computational and
|
||||||
|
|
||||||
It is permitted for clients to publish data on alternative topics as long as they also publish on the network-wide mandatory topic.
|
It is permitted for clients to publish data on alternative topics as long as they also publish on the network-wide mandatory topic.
|
||||||
|
|
||||||
### Why are the topics strings and not hashes?
|
#### Why are the topics strings and not hashes?
|
||||||
|
|
||||||
Topic names have a hierarchical structure.
|
Topic names have a hierarchical structure.
|
||||||
In the future, gossipsub may support wildcard subscriptions
|
In the future, gossipsub may support wildcard subscriptions
|
||||||
|
@ -1195,14 +1195,14 @@ since the domain is finite anyway, and calculating a digest's preimage would be
|
||||||
Furthermore, the topic names are shorter than their digest equivalents (assuming SHA-256 hash),
|
Furthermore, the topic names are shorter than their digest equivalents (assuming SHA-256 hash),
|
||||||
so hashing topics would bloat messages unnecessarily.
|
so hashing topics would bloat messages unnecessarily.
|
||||||
|
|
||||||
### Why are we using the `StrictNoSign` signature policy?
|
#### Why are we using the `StrictNoSign` signature policy?
|
||||||
|
|
||||||
The policy omits the `from` (1), `seqno` (3), `signature` (5) and `key` (6) fields. These fields would:
|
The policy omits the `from` (1), `seqno` (3), `signature` (5) and `key` (6) fields. These fields would:
|
||||||
- Expose origin of sender (`from`), type of sender (based on `seqno`)
|
- Expose origin of sender (`from`), type of sender (based on `seqno`)
|
||||||
- Add extra unused data to the gossip, since message IDs are based on `data`, not on the `from` and `seqno`.
|
- Add extra unused data to the gossip, since message IDs are based on `data`, not on the `from` and `seqno`.
|
||||||
- Introduce more message validation than necessary, e.g. no `signature`.
|
- Introduce more message validation than necessary, e.g. no `signature`.
|
||||||
|
|
||||||
### Why are we overriding the default libp2p pubsub `message-id`?
|
#### Why are we overriding the default libp2p pubsub `message-id`?
|
||||||
|
|
||||||
For our current purposes, there is no need to address messages based on source peer, or track a message `seqno`.
|
For our current purposes, there is no need to address messages based on source peer, or track a message `seqno`.
|
||||||
By overriding the default `message-id` to use content-addressing we can filter unnecessary duplicates before hitting the application layer.
|
By overriding the default `message-id` to use content-addressing we can filter unnecessary duplicates before hitting the application layer.
|
||||||
|
@ -1214,7 +1214,7 @@ Some examples of where messages could be duplicated:
|
||||||
Partial aggregates could be duplicated
|
Partial aggregates could be duplicated
|
||||||
* Clients re-publishing seen messages
|
* Clients re-publishing seen messages
|
||||||
|
|
||||||
### Why are these specific gossip parameters chosen?
|
#### Why are these specific gossip parameters chosen?
|
||||||
|
|
||||||
- `D`, `D_low`, `D_high`, `D_lazy`: recommended defaults.
|
- `D`, `D_low`, `D_high`, `D_lazy`: recommended defaults.
|
||||||
- `heartbeat_interval`: 0.7 seconds, recommended for the beacon chain in the [GossipSub evaluation report by Protocol Labs](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4).
|
- `heartbeat_interval`: 0.7 seconds, recommended for the beacon chain in the [GossipSub evaluation report by Protocol Labs](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4).
|
||||||
|
@ -1233,7 +1233,7 @@ Some examples of where messages could be duplicated:
|
||||||
Attestation gossip validity is bounded by an epoch, so this is the safe max bound.
|
Attestation gossip validity is bounded by an epoch, so this is the safe max bound.
|
||||||
|
|
||||||
|
|
||||||
### Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets?
|
#### Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets?
|
||||||
|
|
||||||
For some gossip channels (e.g. those for Attestations and BeaconBlocks),
|
For some gossip channels (e.g. those for Attestations and BeaconBlocks),
|
||||||
there are designated ranges of slots during which particular messages can be sent,
|
there are designated ranges of slots during which particular messages can be sent,
|
||||||
|
@ -1247,14 +1247,14 @@ For minimum and maximum allowable slot broadcast times,
|
||||||
Although messages can at times be eagerly gossiped to the network,
|
Although messages can at times be eagerly gossiped to the network,
|
||||||
the node's fork choice prevents integration of these messages into the actual consensus until the _actual local start_ of the designated slot.
|
the node's fork choice prevents integration of these messages into the actual consensus until the _actual local start_ of the designated slot.
|
||||||
|
|
||||||
### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets?
|
#### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets?
|
||||||
|
|
||||||
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel.
|
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel.
|
||||||
The exact grouping will be dependent on more involved network tests.
|
The exact grouping will be dependent on more involved network tests.
|
||||||
This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet).
|
This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet).
|
||||||
The value is currently set to be equal to `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
The value is currently set to be equal to `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
||||||
|
|
||||||
### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?
|
#### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?
|
||||||
|
|
||||||
Attestations can only be included on chain within an epoch's worth of slots so this is the natural cutoff.
|
Attestations can only be included on chain within an epoch's worth of slots so this is the natural cutoff.
|
||||||
There is no utility to the chain to broadcast attestations older than one epoch,
|
There is no utility to the chain to broadcast attestations older than one epoch,
|
||||||
|
@ -1265,7 +1265,7 @@ In addition to this, relaying attestations requires validating the attestation i
|
||||||
Thus, validating arbitrarily old attestations would put additional requirements on which states need to be readily available to the node.
|
Thus, validating arbitrarily old attestations would put additional requirements on which states need to be readily available to the node.
|
||||||
This would result in a higher resource burden and could serve as a DoS vector.
|
This would result in a higher resource burden and could serve as a DoS vector.
|
||||||
|
|
||||||
### Why are aggregate attestations broadcast to the global topic as `AggregateAndProof`s rather than just as `Attestation`s?
|
#### Why are aggregate attestations broadcast to the global topic as `AggregateAndProof`s rather than just as `Attestation`s?
|
||||||
|
|
||||||
The dominant strategy for an individual validator is to always broadcast an aggregate containing their own attestation
|
The dominant strategy for an individual validator is to always broadcast an aggregate containing their own attestation
|
||||||
to the global channel to ensure that proposers see their attestation for inclusion.
|
to the global channel to ensure that proposers see their attestation for inclusion.
|
||||||
|
@ -1275,19 +1275,19 @@ the gossiped aggregate ensures that this dominant strategy will not flood the gl
|
||||||
Also, an attacker can create any number of honest-looking aggregates and broadcast them to the global pubsub channel.
|
Also, an attacker can create any number of honest-looking aggregates and broadcast them to the global pubsub channel.
|
||||||
Thus without some sort of proof of selection as an aggregator, the global channel can trivially be spammed.
|
Thus without some sort of proof of selection as an aggregator, the global channel can trivially be spammed.
|
||||||
|
|
||||||
### Why are we sending entire objects in the pubsub and not just hashes?
|
#### Why are we sending entire objects in the pubsub and not just hashes?
|
||||||
|
|
||||||
Entire objects should be sent to get the greatest propagation speeds.
|
Entire objects should be sent to get the greatest propagation speeds.
|
||||||
If only hashes are sent, then block and attestation propagation is dependent on recursive requests from each peer.
|
If only hashes are sent, then block and attestation propagation is dependent on recursive requests from each peer.
|
||||||
In a hash-only scenario, peers could receive hashes without knowing who to download the actual contents from.
|
In a hash-only scenario, peers could receive hashes without knowing who to download the actual contents from.
|
||||||
Sending entire objects ensures that they get propagated through the entire network.
|
Sending entire objects ensures that they get propagated through the entire network.
|
||||||
|
|
||||||
### Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc?
|
#### Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc?
|
||||||
|
|
||||||
The prohibition of unverified-block-gossiping extends to nodes that cannot verify a signature
|
The prohibition of unverified-block-gossiping extends to nodes that cannot verify a signature
|
||||||
due to not being fully synced to ensure that such (amplified) DOS attacks are not possible.
|
due to not being fully synced to ensure that such (amplified) DOS attacks are not possible.
|
||||||
|
|
||||||
### How are we going to discover peers in a gossipsub topic?
|
#### How are we going to discover peers in a gossipsub topic?
|
||||||
|
|
||||||
In Phase 0, peers for attestation subnets will be found using the `attnets` entry in the ENR.
|
In Phase 0, peers for attestation subnets will be found using the `attnets` entry in the ENR.
|
||||||
|
|
||||||
|
@ -1295,7 +1295,7 @@ Although this method will be sufficient for early upgrade of the beacon chain, w
|
||||||
ENRs should ultimately not be used for this purpose.
|
ENRs should ultimately not be used for this purpose.
|
||||||
They are best suited to store identity, location, and capability information, rather than more volatile advertisements.
|
They are best suited to store identity, location, and capability information, rather than more volatile advertisements.
|
||||||
|
|
||||||
### How should fork version be used in practice?
|
#### How should fork version be used in practice?
|
||||||
|
|
||||||
Fork versions are to be manually updated (likely via incrementing) at each hard fork.
|
Fork versions are to be manually updated (likely via incrementing) at each hard fork.
|
||||||
This is to provide native domain separation for signatures as well as to aid in usefulness for identitying peers (via ENRs)
|
This is to provide native domain separation for signatures as well as to aid in usefulness for identitying peers (via ENRs)
|
||||||
|
@ -1308,9 +1308,9 @@ In these cases, extra care should be taken to isolate fork versions (e.g. flip a
|
||||||
A node locally stores all previous and future planned fork versions along with the each fork epoch.
|
A node locally stores all previous and future planned fork versions along with the each fork epoch.
|
||||||
This allows for handling sync and processing messages starting from past forks/epochs.
|
This allows for handling sync and processing messages starting from past forks/epochs.
|
||||||
|
|
||||||
## Req/Resp
|
### Req/Resp
|
||||||
|
|
||||||
### Why segregate requests into dedicated protocol IDs?
|
#### Why segregate requests into dedicated protocol IDs?
|
||||||
|
|
||||||
Requests are segregated by protocol ID to:
|
Requests are segregated by protocol ID to:
|
||||||
|
|
||||||
|
@ -1343,7 +1343,7 @@ Multiselect 2.0 will eventually remove this overhead by memoizing previously sel
|
||||||
Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol
|
Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol
|
||||||
so the additional overhead is not expected to significantly hinder this domain.
|
so the additional overhead is not expected to significantly hinder this domain.
|
||||||
|
|
||||||
### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?
|
#### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?
|
||||||
|
|
||||||
We are using single-use streams where each stream is closed at the end of the message.
|
We are using single-use streams where each stream is closed at the end of the message.
|
||||||
Thus, libp2p transparently handles message delimiting in the underlying stream.
|
Thus, libp2p transparently handles message delimiting in the underlying stream.
|
||||||
|
@ -1361,7 +1361,7 @@ Nevertheless, in the case of `ssz_snappy`, messages are still length-prefixed wi
|
||||||
[Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length (unsigned here) ints.
|
[Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length (unsigned here) ints.
|
||||||
Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte.
|
Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte.
|
||||||
|
|
||||||
### Why do we version protocol strings with ordinals instead of semver?
|
#### Why do we version protocol strings with ordinals instead of semver?
|
||||||
|
|
||||||
Using semver for network protocols is confusing.
|
Using semver for network protocols is confusing.
|
||||||
It is never clear what a change in a field, even if backwards compatible on deserialization, actually implies.
|
It is never clear what a change in a field, even if backwards compatible on deserialization, actually implies.
|
||||||
|
@ -1382,11 +1382,11 @@ because it's unclear if "backwards compatibility" and "breaking change" apply on
|
||||||
|
|
||||||
For this reason, we remove and replace semver with ordinals that require explicit agreement and do not mandate a specific policy for changes.
|
For this reason, we remove and replace semver with ordinals that require explicit agreement and do not mandate a specific policy for changes.
|
||||||
|
|
||||||
### Why is it called Req/Resp and not RPC?
|
#### Why is it called Req/Resp and not RPC?
|
||||||
|
|
||||||
Req/Resp is used to avoid confusion with JSON-RPC and similar user-client interaction mechanisms.
|
Req/Resp is used to avoid confusion with JSON-RPC and similar user-client interaction mechanisms.
|
||||||
|
|
||||||
### Why do we allow empty responses in block requests?
|
#### Why do we allow empty responses in block requests?
|
||||||
|
|
||||||
When requesting blocks by range or root, it may happen that there are no blocks in the selected range or the responding node does not have the requested blocks.
|
When requesting blocks by range or root, it may happen that there are no blocks in the selected range or the responding node does not have the requested blocks.
|
||||||
|
|
||||||
|
@ -1413,7 +1413,7 @@ Failing to provide blocks that nodes "should" have is reason to trust a peer les
|
||||||
-- for example, if a particular peer gossips a block, it should have access to its parent.
|
-- for example, if a particular peer gossips a block, it should have access to its parent.
|
||||||
If a request for the parent fails, it's indicative of poor peer quality since peers should validate blocks before gossiping them.
|
If a request for the parent fails, it's indicative of poor peer quality since peers should validate blocks before gossiping them.
|
||||||
|
|
||||||
### Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?
|
#### Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?
|
||||||
|
|
||||||
When connecting, the `Status` message gives an idea about the sync status of a particular peer, but this changes over time.
|
When connecting, the `Status` message gives an idea about the sync status of a particular peer, but this changes over time.
|
||||||
By the time a subsequent `BeaconBlockByRange` request is processed, the information may be stale,
|
By the time a subsequent `BeaconBlockByRange` request is processed, the information may be stale,
|
||||||
|
@ -1423,7 +1423,7 @@ To avoid this race condition, we allow the responding side to choose which branc
|
||||||
The requesting client then goes on to validate the blocks and incorporate them in their own database
|
The requesting client then goes on to validate the blocks and incorporate them in their own database
|
||||||
-- because they follow the same rules, they should at this point arrive at the same canonical chain.
|
-- because they follow the same rules, they should at this point arrive at the same canonical chain.
|
||||||
|
|
||||||
### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?
|
#### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?
|
||||||
|
|
||||||
Due to economic finality and weak subjectivity requirements of a proof-of-stake blockchain, for a new node to safely join the network
|
Due to economic finality and weak subjectivity requirements of a proof-of-stake blockchain, for a new node to safely join the network
|
||||||
the node must provide a recent checkpoint found out-of-band. This checkpoint can be in the form of a `root` & `epoch` or it can be the entire
|
the node must provide a recent checkpoint found out-of-band. This checkpoint can be in the form of a `root` & `epoch` or it can be the entire
|
||||||
|
@ -1447,7 +1447,7 @@ MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
|
||||||
|
|
||||||
Where `MAX_SAFETY_DECAY = 100` and thus `MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024` (~5 months).
|
Where `MAX_SAFETY_DECAY = 100` and thus `MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024` (~5 months).
|
||||||
|
|
||||||
### Why must the proposer signature be checked when backfilling blocks in the database?
|
#### Why must the proposer signature be checked when backfilling blocks in the database?
|
||||||
|
|
||||||
When backfilling blocks in a database from a know safe block/state (e.g. when starting from a weak subjectivity state),
|
When backfilling blocks in a database from a know safe block/state (e.g. when starting from a weak subjectivity state),
|
||||||
the node not only must ensure the `BeaconBlock`s form a chain to the known safe block,
|
the node not only must ensure the `BeaconBlock`s form a chain to the known safe block,
|
||||||
|
@ -1462,7 +1462,7 @@ Although in this particular use case this does not represent a decay in safety
|
||||||
would represent invalid historic data and could be unwittingly transmitted to
|
would represent invalid historic data and could be unwittingly transmitted to
|
||||||
additional nodes.
|
additional nodes.
|
||||||
|
|
||||||
### What's the effect of empty slots on the sync algorithm?
|
#### What's the effect of empty slots on the sync algorithm?
|
||||||
|
|
||||||
When syncing one can only tell that a slot has been skipped on a particular branch
|
When syncing one can only tell that a slot has been skipped on a particular branch
|
||||||
by examining subsequent blocks and analyzing the graph formed by the parent root.
|
by examining subsequent blocks and analyzing the graph formed by the parent root.
|
||||||
|
@ -1472,9 +1472,9 @@ For example, if a peer responds with blocks [2, 3] when asked for [2, 3, 4], cli
|
||||||
-- it merely means that the responding peer did not send it (they may not have it yet or may maliciously be trying to hide it)
|
-- it merely means that the responding peer did not send it (they may not have it yet or may maliciously be trying to hide it)
|
||||||
and successive blocks will be needed to determine if there exists a block at slot 4 in this particular branch.
|
and successive blocks will be needed to determine if there exists a block at slot 4 in this particular branch.
|
||||||
|
|
||||||
## Discovery
|
### Discovery
|
||||||
|
|
||||||
### Why are we using discv5 and not libp2p Kademlia DHT?
|
#### Why are we using discv5 and not libp2p Kademlia DHT?
|
||||||
|
|
||||||
discv5 is a standalone protocol, running on UDP on a dedicated port, meant for peer and service discovery only.
|
discv5 is a standalone protocol, running on UDP on a dedicated port, meant for peer and service discovery only.
|
||||||
discv5 supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are, or will be, requirements in this context.
|
discv5 supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are, or will be, requirements in this context.
|
||||||
|
@ -1490,7 +1490,7 @@ It should also help light clients of both networks find nodes with specific capa
|
||||||
|
|
||||||
discv5 is in the process of being audited.
|
discv5 is in the process of being audited.
|
||||||
|
|
||||||
### What is the difference between an ENR and a multiaddr, and why are we using ENRs?
|
#### What is the difference between an ENR and a multiaddr, and why are we using ENRs?
|
||||||
|
|
||||||
Ethereum Node Records are self-certified node records.
|
Ethereum Node Records are self-certified node records.
|
||||||
Nodes craft and disseminate ENRs for themselves, proving authorship via a cryptographic signature.
|
Nodes craft and disseminate ENRs for themselves, proving authorship via a cryptographic signature.
|
||||||
|
@ -1510,7 +1510,7 @@ discv5 uses ENRs and we will presumably need to:
|
||||||
2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR
|
2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR
|
||||||
(ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Ethereum execution-layer nodes).
|
(ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Ethereum execution-layer nodes).
|
||||||
|
|
||||||
### Why do we not form ENRs and find peers until genesis block/state is known?
|
#### Why do we not form ENRs and find peers until genesis block/state is known?
|
||||||
|
|
||||||
Although client software might very well be running locally prior to the solidification of the beacon chain genesis state and block,
|
Although client software might very well be running locally prior to the solidification of the beacon chain genesis state and block,
|
||||||
clients cannot form valid ENRs prior to this point.
|
clients cannot form valid ENRs prior to this point.
|
||||||
|
@ -1521,9 +1521,9 @@ Once genesis data is known, we can then form ENRs and safely find peers.
|
||||||
When using a proof-of-work deposit contract for deposits, `fork_digest` will be known `GENESIS_DELAY` (7 days in mainnet configuration) before `genesis_time`,
|
When using a proof-of-work deposit contract for deposits, `fork_digest` will be known `GENESIS_DELAY` (7 days in mainnet configuration) before `genesis_time`,
|
||||||
providing ample time to find peers and form initial connections and gossip subnets prior to genesis.
|
providing ample time to find peers and form initial connections and gossip subnets prior to genesis.
|
||||||
|
|
||||||
## Compression/Encoding
|
### Compression/Encoding
|
||||||
|
|
||||||
### Why are we using SSZ for encoding?
|
#### Why are we using SSZ for encoding?
|
||||||
|
|
||||||
SSZ is used at the consensus layer, and all implementations should have support for SSZ-encoding/decoding,
|
SSZ is used at the consensus layer, and all implementations should have support for SSZ-encoding/decoding,
|
||||||
requiring no further dependencies to be added to client implementations.
|
requiring no further dependencies to be added to client implementations.
|
||||||
|
@ -1533,7 +1533,7 @@ The actual data in most protocols will be further compressed for efficiency.
|
||||||
SSZ has well-defined schemas for consensus objects (typically sent across the wire) reducing any serialization schema data that needs to be sent.
|
SSZ has well-defined schemas for consensus objects (typically sent across the wire) reducing any serialization schema data that needs to be sent.
|
||||||
It also has defined all required types that are required for this network specification.
|
It also has defined all required types that are required for this network specification.
|
||||||
|
|
||||||
### Why are we compressing, and at which layers?
|
#### Why are we compressing, and at which layers?
|
||||||
|
|
||||||
We compress on the wire to achieve smaller payloads per-message, which, in aggregate,
|
We compress on the wire to achieve smaller payloads per-message, which, in aggregate,
|
||||||
result in higher efficiency, better utilization of available bandwidth, and overall reduction in network-wide traffic overhead.
|
result in higher efficiency, better utilization of available bandwidth, and overall reduction in network-wide traffic overhead.
|
||||||
|
@ -1563,13 +1563,13 @@ This looks different depending on the interaction layer:
|
||||||
implementers are encouraged to encapsulate the encoding and compression logic behind
|
implementers are encouraged to encapsulate the encoding and compression logic behind
|
||||||
MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
||||||
|
|
||||||
### Why are we using Snappy for compression?
|
#### Why are we using Snappy for compression?
|
||||||
|
|
||||||
Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks,
|
Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks,
|
||||||
and can calculate the size of the uncompressed object without inflating it in memory.
|
and can calculate the size of the uncompressed object without inflating it in memory.
|
||||||
This prevents DOS vectors where large uncompressed data is sent.
|
This prevents DOS vectors where large uncompressed data is sent.
|
||||||
|
|
||||||
### Can I get access to unencrypted bytes on the wire for debugging purposes?
|
#### Can I get access to unencrypted bytes on the wire for debugging purposes?
|
||||||
|
|
||||||
Yes, you can add loggers in your libp2p protocol handlers to log incoming and outgoing messages.
|
Yes, you can add loggers in your libp2p protocol handlers to log incoming and outgoing messages.
|
||||||
It is recommended to use programming design patterns to encapsulate the logging logic cleanly.
|
It is recommended to use programming design patterns to encapsulate the logging logic cleanly.
|
||||||
|
@ -1580,7 +1580,7 @@ you can use logging facilities in those frameworks/runtimes to enable message tr
|
||||||
For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md)
|
For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md)
|
||||||
(which is essentially no-op encryption or message authentication), in combination with tcpdump or Wireshark to inspect the wire.
|
(which is essentially no-op encryption or message authentication), in combination with tcpdump or Wireshark to inspect the wire.
|
||||||
|
|
||||||
### What are SSZ type size bounds?
|
#### What are SSZ type size bounds?
|
||||||
|
|
||||||
The SSZ encoding outputs of each type have size bounds: each dynamic type, such as a list, has a "limit", which can be used to compute the maximum valid output size.
|
The SSZ encoding outputs of each type have size bounds: each dynamic type, such as a list, has a "limit", which can be used to compute the maximum valid output size.
|
||||||
Note that for some more complex dynamic-length objects, element offsets (4 bytes each) may need to be included.
|
Note that for some more complex dynamic-length objects, element offsets (4 bytes each) may need to be included.
|
||||||
|
@ -1589,7 +1589,7 @@ Other types are static, they have a fixed size: no dynamic-length content is inv
|
||||||
For reference, the type bounds can be computed ahead of time, [as per this example](https://gist.github.com/protolambda/db75c7faa1e94f2464787a480e5d613e).
|
For reference, the type bounds can be computed ahead of time, [as per this example](https://gist.github.com/protolambda/db75c7faa1e94f2464787a480e5d613e).
|
||||||
It is advisable to derive these lengths from the SSZ type definitions in use, to ensure that version changes do not cause out-of-sync type bounds.
|
It is advisable to derive these lengths from the SSZ type definitions in use, to ensure that version changes do not cause out-of-sync type bounds.
|
||||||
|
|
||||||
# libp2p implementations matrix
|
## libp2p implementations matrix
|
||||||
|
|
||||||
This section will soon contain a matrix showing the maturity/state of the libp2p features required
|
This section will soon contain a matrix showing the maturity/state of the libp2p features required
|
||||||
by this spec across the languages in which clients are being developed.
|
by this spec across the languages in which clients are being developed.
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
1.3.0-rc.5
|
1.3.0
|
||||||
|
|
|
@ -708,7 +708,7 @@ def case06_verify_blob_kzg_proof_batch():
|
||||||
# Edge case: Invalid commitment, too few bytes
|
# Edge case: Invalid commitment, too few bytes
|
||||||
commitments_invalid_tooFewBytes = commitments[:3] + [commitments[3][:-1]] + commitments[4:]
|
commitments_invalid_tooFewBytes = commitments[:3] + [commitments[3][:-1]] + commitments[4:]
|
||||||
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooFewBytes)
|
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooFewBytes)
|
||||||
yield 'verify_blob_kzg_proof_batch_case_too_few_bytes', {
|
yield 'verify_blob_kzg_proof_batch_case_commitment_too_few_bytes', {
|
||||||
'input': {
|
'input': {
|
||||||
'blobs': encode_hex_list(VALID_BLOBS),
|
'blobs': encode_hex_list(VALID_BLOBS),
|
||||||
'commitments': encode_hex_list(commitments_invalid_tooFewBytes),
|
'commitments': encode_hex_list(commitments_invalid_tooFewBytes),
|
||||||
|
@ -720,7 +720,7 @@ def case06_verify_blob_kzg_proof_batch():
|
||||||
# Edge case: Invalid commitment, too many bytes
|
# Edge case: Invalid commitment, too many bytes
|
||||||
commitments_invalid_tooManyBytes = commitments[:3] + [commitments[3] + b"\x00"] + commitments[4:]
|
commitments_invalid_tooManyBytes = commitments[:3] + [commitments[3] + b"\x00"] + commitments[4:]
|
||||||
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooManyBytes)
|
expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooManyBytes)
|
||||||
yield 'verify_blob_kzg_proof_batch_case_too_many_bytes', {
|
yield 'verify_blob_kzg_proof_batch_case_commitment_too_many_bytes', {
|
||||||
'input': {
|
'input': {
|
||||||
'blobs': encode_hex_list(VALID_BLOBS),
|
'blobs': encode_hex_list(VALID_BLOBS),
|
||||||
'commitments': encode_hex_list(commitments_invalid_tooManyBytes),
|
'commitments': encode_hex_list(commitments_invalid_tooManyBytes),
|
||||||
|
|
Loading…
Reference in New Issue