commit
7402712e4f
|
@ -181,6 +181,32 @@ jobs:
|
|||
command: make citest fork=eip7002
|
||||
- store_test_results:
|
||||
path: tests/core/pyspec/test-reports
|
||||
test-whisk:
|
||||
docker:
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_pyspec_cached_venv
|
||||
- run:
|
||||
name: Run py-tests
|
||||
command: make citest fork=whisk
|
||||
- store_test_results:
|
||||
path: tests/core/pyspec/test-reports
|
||||
test-eip7594:
|
||||
docker:
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_pyspec_cached_venv
|
||||
- run:
|
||||
name: Run py-tests
|
||||
command: make citest fork=eip7594
|
||||
- store_test_results:
|
||||
path: tests/core/pyspec/test-reports
|
||||
table_of_contents:
|
||||
docker:
|
||||
- image: circleci/node:10.16.3
|
||||
|
@ -307,6 +333,12 @@ workflows:
|
|||
- test-eip7002:
|
||||
requires:
|
||||
- install_pyspec_test
|
||||
- test-whisk:
|
||||
requires:
|
||||
- install_pyspec_test
|
||||
- test-eip7594:
|
||||
requires:
|
||||
- install_pyspec_test
|
||||
- table_of_contents
|
||||
- codespell
|
||||
- lint:
|
||||
|
|
|
@ -0,0 +1,2 @@
|
|||
**/venv
|
||||
**/.venv
|
|
@ -71,7 +71,7 @@ jobs:
|
|||
needs: [preclear,lint,codespell,table_of_contents]
|
||||
strategy:
|
||||
matrix:
|
||||
version: ["phase0", "altair", "bellatrix", "capella", "deneb", "eip6110", "eip7002"]
|
||||
version: ["phase0", "altair", "bellatrix", "capella", "deneb", "eip6110", "eip7002", "whisk", "eip7594"]
|
||||
steps:
|
||||
- name: Checkout this repo
|
||||
uses: actions/checkout@v3.2.0
|
||||
|
@ -111,4 +111,4 @@ jobs:
|
|||
ls -la ./
|
||||
rm -rf ./* || true
|
||||
rm -rf ./.??* || true
|
||||
ls -la ./
|
||||
ls -la ./
|
||||
|
|
|
@ -24,6 +24,7 @@ tests/core/pyspec/eth2spec/deneb/
|
|||
tests/core/pyspec/eth2spec/eip6110/
|
||||
tests/core/pyspec/eth2spec/eip7002/
|
||||
tests/core/pyspec/eth2spec/whisk/
|
||||
tests/core/pyspec/eth2spec/eip7594/
|
||||
|
||||
# coverage reports
|
||||
.htmlcov
|
||||
|
@ -46,3 +47,6 @@ docs/ssz
|
|||
docs/fork_choice
|
||||
docs/README.md
|
||||
site
|
||||
|
||||
# docker test results
|
||||
testResults
|
||||
|
|
15
Makefile
15
Makefile
|
@ -14,6 +14,7 @@ SOLIDITY_FILE_NAME = deposit_contract.json
|
|||
DEPOSIT_CONTRACT_TESTER_DIR = ${SOLIDITY_DEPOSIT_CONTRACT_DIR}/web3_tester
|
||||
CONFIGS_DIR = ./configs
|
||||
TEST_PRESET_TYPE ?= minimal
|
||||
NUMBER_OF_CORES=16
|
||||
# Collect a list of generator names
|
||||
GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/.)))
|
||||
# Map this list of generator paths to "gen_{generator name}" entries
|
||||
|
@ -34,11 +35,11 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \
|
|||
$(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
|
||||
$(wildcard $(SSZ_DIR)/*.md)
|
||||
|
||||
ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb eip6110 whisk
|
||||
ALL_EXECUTABLE_SPEC_NAMES = phase0 altair bellatrix capella deneb eip6110 eip7002 whisk
|
||||
# The parameters for commands. Use `foreach` to avoid listing specs again.
|
||||
COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
|
||||
PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), ./eth2spec/$S)
|
||||
MYPY_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), -p eth2spec.$S)
|
||||
COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPEC_NAMES), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
|
||||
PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPEC_NAMES), ./eth2spec/$S)
|
||||
MYPY_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPEC_NAMES), -p eth2spec.$S)
|
||||
|
||||
COV_HTML_OUT=.htmlcov
|
||||
COV_HTML_OUT_DIR=$(PY_SPEC_DIR)/$(COV_HTML_OUT)
|
||||
|
@ -74,7 +75,7 @@ partial_clean:
|
|||
rm -rf $(TEST_REPORT_DIR)
|
||||
rm -rf eth2spec.egg-info dist build
|
||||
rm -rf build;
|
||||
@for spec_name in $(ALL_EXECUTABLE_SPECS) ; do \
|
||||
@for spec_name in $(ALL_EXECUTABLE_SPEC_NAMES) ; do \
|
||||
echo $$spec_name; \
|
||||
rm -rf $(ETH2SPEC_MODULE_DIR)/$$spec_name; \
|
||||
done
|
||||
|
@ -128,10 +129,10 @@ citest: pyspec
|
|||
mkdir -p $(TEST_REPORT_DIR);
|
||||
ifdef fork
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python3 -m pytest -n 16 --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
|
||||
python3 -m pytest -n $(NUMBER_OF_CORES) --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec
|
||||
else
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python3 -m pytest -n 16 --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
|
||||
python3 -m pytest -n $(NUMBER_OF_CORES) --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec
|
||||
endif
|
||||
|
||||
|
||||
|
|
|
@ -154,3 +154,7 @@ BLOB_SIDECAR_SUBNET_COUNT: 6
|
|||
WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256
|
||||
# `Epoch(2)`
|
||||
WHISK_PROPOSER_SELECTION_GAP: 2
|
||||
|
||||
# EIP7594
|
||||
EIP7594_FORK_VERSION: 0x06000001
|
||||
EIP7594_FORK_EPOCH: 18446744073709551615
|
||||
|
|
|
@ -153,3 +153,7 @@ BLOB_SIDECAR_SUBNET_COUNT: 6
|
|||
# Whisk
|
||||
WHISK_EPOCHS_PER_SHUFFLING_PHASE: 4
|
||||
WHISK_PROPOSER_SELECTION_GAP: 1
|
||||
|
||||
# EIP7594
|
||||
EIP7594_FORK_VERSION: 0x06000001
|
||||
EIP7594_FORK_EPOCH: 18446744073709551615
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
# Rename the build stage from 'base' to 'builder' for clarification and code readability
|
||||
FROM python:3.11.0-slim-bullseye as builder
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
WORKDIR=/consensus-specs \
|
||||
PIP_UPGRADE_CMD="python -m pip install --upgrade pip" \
|
||||
INSTALL_CMD="apt install -y git build-essential"
|
||||
|
||||
RUN mkdir ${WORKDIR}
|
||||
WORKDIR ${WORKDIR}
|
||||
|
||||
# Chain the commands together
|
||||
RUN apt update && ${INSTALL_CMD} && ${PIP_UPGRADE_CMD} && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Copy the current directory contents into the builder
|
||||
COPY . .
|
||||
|
||||
# Inline installation commands
|
||||
RUN make install_test && \
|
||||
make preinstallation && \
|
||||
make pyspec
|
||||
|
|
@ -0,0 +1,20 @@
|
|||
## Docker related information
|
||||
|
||||
This dockerfile sets up the dependencies required to run consensus-spec tests. The docker image can be locally built with:
|
||||
- `docker build ./ -t $IMAGE_NAME -f ./docker/Dockerfile`
|
||||
|
||||
|
||||
Handy commands:
|
||||
- `docker run -it $IMAGE_NAME /bin/sh` will give you a shell inside the docker container to manually run any tests
|
||||
- `docker run $IMAGE_NAME make citest` will run the make citest command inside the docker container
|
||||
|
||||
Ideally manual running of docker containers is for advanced users, we recommend the script based approach described below for most users.
|
||||
|
||||
The `scripts/build_run_docker_tests.sh` script will cover most usecases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), number of cores, preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies.
|
||||
|
||||
E.g:
|
||||
- `./build_run_test.sh --p mainnet --n 16` will run the mainnet preset tests with 16 threads
|
||||
- `./build_run_test.sh --a` will run all the tests across all the forks
|
||||
- `./build_run_test.sh --f deneb --n 16` will only run deneb tests on 16 threads
|
||||
|
||||
Results are always placed in a folder called `./testResults`. The results are `.xml` files and contain the fork they represent and the date/time they were run at.
|
|
@ -11,13 +11,12 @@
|
|||
- [4. Add `fork.md`](#4-add-forkmd)
|
||||
- [5. Make it executable](#5-make-it-executable)
|
||||
- [B: Make it executable for pytest and test generator](#b-make-it-executable-for-pytest-and-test-generator)
|
||||
- [1. Add `light-client/*` docs if you updated the content of `BeaconBlock`](#1-add-light-client-docs-if-you-updated-the-content-of-beaconblock)
|
||||
- [1. [Optional] Add `light-client/*` docs if you updated the content of `BeaconBlock`](#1-optional-add-light-client-docs-if-you-updated-the-content-of-beaconblock)
|
||||
- [2. Add the mainnet and minimal presets and update the configs](#2-add-the-mainnet-and-minimal-presets-and-update-the-configs)
|
||||
- [3. Update `context.py`](#3-update-contextpy)
|
||||
- [4. Update `constants.py`](#4-update-constantspy)
|
||||
- [5. Update `genesis.py`:](#5-update-genesispy)
|
||||
- [6. To add fork transition tests, update fork_transition.py](#6-to-add-fork-transition-tests-update-fork_transitionpy)
|
||||
- [7. Update CI configurations](#7-update-ci-configurations)
|
||||
- [6. Update CI configurations](#6-update-ci-configurations)
|
||||
- [Others](#others)
|
||||
- [Bonus](#bonus)
|
||||
- [Need help?](#need-help)
|
||||
|
@ -58,6 +57,8 @@ You can refer to the previous fork's `fork.md` file.
|
|||
- Update [`pysetup/constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/constants.py) with the new feature name as Pyspec `constants.py` defined.
|
||||
- Update [`pysetup/spec_builders/__init__.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/spec_builders/__init__.py). Implement a new `<FEATURE_NAME>SpecBuilder` in `pysetup/spec_builders/<FEATURE_NAME>.py` with the new feature name. e.g., `EIP9999SpecBuilder`. Append it to the `spec_builders` list.
|
||||
- Update [`pysetup/md_doc_paths.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/md_doc_paths.py): add the path of the new markdown files in `get_md_doc_paths` function if needed.
|
||||
- Update `PREVIOUS_FORK_OF` setting in both [`test/helpers/constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/constants.py) and [`pysetup/md_doc_paths.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/md_doc_paths.py).
|
||||
- NOTE: since these two modules (the pyspec itself and the spec builder tool) must be separate, the fork sequence setting has to be defined again.
|
||||
|
||||
## B: Make it executable for pytest and test generator
|
||||
|
||||
|
@ -70,24 +71,7 @@ You can refer to the previous fork's `fork.md` file.
|
|||
- Update configs: `configs/mainnet.yaml` and `configs/minimal.yaml`
|
||||
|
||||
### 3. Update [`context.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py)
|
||||
- Update `spec_targets` by adding `<NEW_FEATURE>`
|
||||
|
||||
```python
|
||||
from eth2spec.eip9999 import mainnet as spec_eip9999_mainnet, minimal as spec_eip9999_minimal
|
||||
|
||||
...
|
||||
|
||||
spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
|
||||
MINIMAL: {
|
||||
...
|
||||
EIP9999: spec_eip9999_minimal,
|
||||
},
|
||||
MAINNET: {
|
||||
...
|
||||
EIP9999: spec_eip9999_mainnet
|
||||
},
|
||||
}
|
||||
```
|
||||
- [Optional] Add `with_<new-feature-name>_and_later` decorator for writing pytest cases. e.g., `with_capella_and_later`.
|
||||
|
||||
### 4. Update [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py)
|
||||
- Add `<NEW_FEATURE>` to `ALL_PHASES` and `TESTGEN_FORKS`
|
||||
|
@ -96,20 +80,6 @@ spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
|
|||
|
||||
We use `create_genesis_state` to create the default `state` in tests.
|
||||
|
||||
- Update `create_genesis_state` by adding `fork_version` setting:
|
||||
|
||||
```python
|
||||
def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
...
|
||||
if spec.fork == ALTAIR:
|
||||
current_version = spec.config.ALTAIR_FORK_VERSION
|
||||
...
|
||||
elif spec.fork == EIP9999:
|
||||
# Add the previous fork version of given fork
|
||||
previous_version = spec.config.<PREVIOUS_FORK_VERSION>
|
||||
current_version = spec.config.EIP9999_FORK_VERSION
|
||||
```
|
||||
|
||||
- If the given feature changes `BeaconState` fields, you have to set the initial values by adding:
|
||||
|
||||
```python
|
||||
|
@ -123,32 +93,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
|||
|
||||
- If the given feature changes `ExecutionPayload` fields, you have to set the initial values by updating `get_sample_genesis_execution_payload_header` helper.
|
||||
|
||||
### 6. To add fork transition tests, update [fork_transition.py](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py)
|
||||
|
||||
```python
|
||||
def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None):
|
||||
...
|
||||
|
||||
if post_spec.fork == ALTAIR:
|
||||
state = post_spec.upgrade_to_altair(state)
|
||||
...
|
||||
elif post_spec.fork == EIP9999:
|
||||
state = post_spec.upgrade_to_eip9999(state)
|
||||
|
||||
...
|
||||
|
||||
if post_spec.fork == ALTAIR:
|
||||
assert state.fork.previous_version == post_spec.config.GENESIS_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.ALTAIR_FORK_VERSION
|
||||
...
|
||||
elif post_spec.fork == EIP9999:
|
||||
assert state.fork.previous_version == post_spec.config.<PREVIOUS_FORK_VERSION>
|
||||
assert state.fork.current_version == post_spec.config.EIP9999_FORK_VERSION
|
||||
|
||||
...
|
||||
```
|
||||
|
||||
### 7. Update CI configurations
|
||||
### 6. Update CI configurations
|
||||
- Update [GitHub Actions config](https://github.com/ethereum/consensus-specs/blob/dev/.github/workflows/run-tests.yml)
|
||||
- Update `pyspec-tests.strategy.matrix.version` list by adding new feature to it
|
||||
- Update [CircleCI config](https://github.com/ethereum/consensus-specs/blob/dev/.circleci/config.yml)
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
# Mainnet preset - EIP7594
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# `uint64(2**6)` (= 64)
|
||||
FIELD_ELEMENTS_PER_CELL: 64
|
|
@ -0,0 +1,140 @@
|
|||
{
|
||||
"vec_G": [
|
||||
"0xa44aae199242e24a4d00b8e5c96e318793eeeb2e154423ca6dcac20043387323dea3216a69bc13e9a3507ff842da544d",
|
||||
"0x8dff68d38281daa552c587d073e498d1ed311986967192cba052827b07f194a936809ea3de921511db45d15234754993",
|
||||
"0xad0ff210542fc069d065b53b2cd4228a0f097facebe089a83b989fd3344c53e440ded5da26bc6115299d9d464e1a9f28",
|
||||
"0xb638e703f852d2ac49595141f7688c52a95dcc0b00f82a8548b14d823ebffe8657557ed7dab6bee44b17d39d742f69aa",
|
||||
"0x9377c7771e07a1a9b9368796ce1a1b93d560d7726afde02627b424ee1dcddb3761ed49f2e1ae5279dca050935bd4a6dd",
|
||||
"0x8d1be282936392c0c61c94745cfb29da0f3334272c9b37fe43c8b869640eb1ea88c8aaf5ff797bd90daf3d6ebeb4efb3",
|
||||
"0xb3b55847d3bcf98b587c4441b0703939b5984bac91b00aabb5f3a45b38445405b61127bc6ee9f6b4b9e88c7a29c3aaa3",
|
||||
"0xafb61afb9f92c37ec21220c02edf14876d2a08eab8ad3c2bc1f3bfe5036abfd23a4a7216616fa1953e14340bf0acab37",
|
||||
"0xa94133ee96e00465fe5423e0ea52404e0f624ee8cc9d69b4cf94e7d73635dfa2087cd2d2596ac4a75504aac5ef6a02d4",
|
||||
"0xa4b93e670e7ee926ffb4ea4e07b87346e5d33c76520912f8a7937cdc3290a4c054586e175c39826b7fafbe777d14e4f4",
|
||||
"0xa57540f7c906d9e70ef90580967562f8c06398ac2d77612045dce0ea6fbc2fedcfdbeb3f6ad3bb717e1295d9539ede63",
|
||||
"0x8504de35cb15935580dab4ae521aede9a6764731a687d23ed213859545065fae9ba915f33f05c5417d91801d2e70098c",
|
||||
"0x976674e04ccfe0530db2a3723a36760949857f56b77df698d6b8c88a1152ca4ee2f0dad4fac413a34e0aaef9684fb547",
|
||||
"0xa7aee20d139d52043203c05ce128a7b17d0e37cfd92370aecc2a777339f03558bbe9cb4bae5c42557be50d6be381537c",
|
||||
"0xaea7520f557b33f6dbbf244f3c7e5784ce727ff18396dc7c200818333b0e946c1bd8c2e464646ca7b43a857e09b23bc5",
|
||||
"0xb15a797f9a56e5511f73a02cc6a94ca3e6b4a50e8166151cba087d1bc051486b1771054ab1a76cea9045c961b989dad3",
|
||||
"0x90458a1852cc7581b8dbf82e7ce16407542b30ca8d7b13ba86707a57e25996545cf2dc2ce03af35c4465129f1441dc2c",
|
||||
"0x8975b5a131cdcebb1a7716e766dd829acaf1bb49e245e765b968f67de81be679937485c84193eb466033fedff89f117f",
|
||||
"0x86a8d7b004b32f4a00b520b35701cd64df833827e37ff5ccff01f1f9ed72cd6a95cc6de5313085aa0f16d537413cf3f8",
|
||||
"0x881dbeff4ac65d1be0bb462480ecfe299003042c4190a5c68201981e08868e3f056f1e35a5117488db6633d567c9930e",
|
||||
"0xa70b2ea517b3c51cd02f6b6677d926ac648fa605330be36c7a475b4dfacbdad035c02d1124350fb67a6e9eef9db858b8",
|
||||
"0xaab1475f7a085a35253adf0a5c5c968f8a44e119e92f69ceff1fb46a62e161ac2c65358310a64128c8a87573646712f7",
|
||||
"0x94cafc40ecbd04ec8976ae399b820c3a0717dee37b2a8338724042cb7465a18ea29d76068b82bff1bc2147776a7f44c1",
|
||||
"0xb936bf0248d8df624185c9176d2edc9c6cbf7a4624c942f50011ae83ca2212ea2453c07cf8db96294bb010b18cfabc48",
|
||||
"0xaf0a2894d762325816d6a35485612eaa173b4fc31ff112b6e20dbab722692f58230b17c038212098fed7998eb2fa23a4",
|
||||
"0xa6caa65c5483318cb4e47fa186353c565517a03018a6eb04baf5aaa8844379b09ab998e568cfc2af976e44cd1cb15395",
|
||||
"0x924c94856f92e5212215636fe3ccc5de9e952293be6fe2369250d6aec62d481977b7d2815d72ccca28d0be18918c8d63",
|
||||
"0x91c764d72eb782e2957a39eca43107069d28dd2e33f39484c55201f685714269672c191ee42d27237bb9055612eca176",
|
||||
"0x8af8de9f36eac06547468021c040e020b284f517b8a4ef99f0909962c2fed60a0c7955f9763258dc4839dbaafe8f9365",
|
||||
"0x9864fc53cbf30454f8ce1d9094d70f4c974e841c7d69815d73eb1b5efa0b063b511cac62ded138e75a7a0440f6b332d4",
|
||||
"0x83cbf72e944cc0bd81fa18eda658c9c668f3f17c06b1936d7646aef0d7f5d35004709dbb04a811cade784bb5a89f96ad",
|
||||
"0x93c9e4b3a4f477723410f30134fe16184df889ef021aaafbd8c562929b90031fb22b1b4e806287703f12287fbb0e99af",
|
||||
"0x99fb0257c487a9801d7af55179f8eba20d90e673f57791948a35caf7dbdc08ee6069d2d1b9751c7d1b831918bdceb7db",
|
||||
"0xadc24c2c32ce6a7ae62fac5fcd87f5658071a01e86d18bd8366c50a6414caec5fcd067168b9359b2cdb07b70f7f21f11",
|
||||
"0xaaf509c0687bab09c61f36202e81c21f8ad01276dee9c8035457fd1bf550afc2eacdaa93a9a7b53c60120ac07594261e",
|
||||
"0xb30b3bfc59f53f15adaca94304eaf9df1c85ceb8f2f3831fc7472e9aab4ed62454655930ab7c924d744ae49919db7b9e",
|
||||
"0x887e2559ea7fe8012bff545cf77b51f149f18ea6dfba90e60aa4bca704aec4f79e628b73fcb9f55d38071cbca322853d",
|
||||
"0xb7fed350264b85c1c167f8763e6b4ef23bd65a1d611daa5e3ee8de5df399603f206f7b81cc061019bedc694243cc23b6",
|
||||
"0xa83210d7c9c7858b047a866f672e6cdec26d73fc4815f28636cca898ff349970163819869500769208e74bc5a484845a",
|
||||
"0xb08abbcda10f77e83b1f267c29ab192d4324d890834b2b44847a412a48cdb65e91a97c9a2fbc768034bceec55324a15f",
|
||||
"0xad67e686bd0159f8ed762916211b5b0884a40df62518b8035edb5b7091dec05ec0a28ed147f3d6b2ee6aaf52d94bff31",
|
||||
"0x8f324349647ccbaefb906d6790c314487b66a472ed1f8e02579b0658f2967185fe16227ad97802652941d23b5d2f67d1",
|
||||
"0x96f41b8f53b08fe484f9df8880ed95a5d239ac541c9bb4ebbf7351c36ab191a3be33982c6bbdd099610cd0d96406aece",
|
||||
"0xb1b79d46dd8a0dac9e9f555ce082cdf30f968263557dcccdeb54294f953f83f333c3af785c91e117de3ce70f79edcc66",
|
||||
"0x81cf46a6962ba9d4a4f5bf2e63828b3f11bc9f35b2d126f80d9c547f53cec1539f303f22b91750660af46a17fcdf92a7",
|
||||
"0xb7228f3497afba6c316d65eab6f3307bd88c01c49a881e649a67c89b88d5743ff74a8a7cb59e1b6e0f0ce32541c78dac",
|
||||
"0x8fb76e5fc58d3c7e20805e8ae8a9d2b9359470c1a8c90017744abcee7e86f09b53838d34b56b6c95ed8f3bd4a4d06022",
|
||||
"0x8ddfa7be366374d6fb55c6ab88c1a3b0b61edd87ef1069b492b38124e68a901da691702bef9ea3ad66019b59148d9285",
|
||||
"0xa137a4405d6ea2b9b6a124b7bd073bc57a5b62f6b7dc70f6ee1da1d6103da22e19368cc6c804853998901fb9a5508723",
|
||||
"0x86fc4a0481122463dea3fed7ba1671b41200edad47d1b16f90a0055e10ea46f1db64efe7c052aaded4e9ebcc00e811ee",
|
||||
"0xa21a5cf22c6e5d8c95a0cf4b0a28be314534bee6bf1b342551edfff8a594664f75a95531d176f54bc8a1b3780dd56a00",
|
||||
"0x9324572f9dbcbf9732eeb796089e902361e1d638fb83d4ad3bbd4b46bc169b23ce5e79ac066961ea6c096b5e219351eb",
|
||||
"0xb048c3ac9604adbf3aad2ecf66485cb1fe90c0d767f0fc6f050a8d1fc3ea5620a88e46e32e30154f2fdf0990dffb350d",
|
||||
"0x8a38fddb1a0a9de438aecf09cd0b2860a77491adfc2f47c485bd6e550d8f37e3accf0acd631743d855c830c20ffc4eae",
|
||||
"0xab0ba1ec519d872ef5f768c940135f26bd8586ae530c48e45e2a25229e8a740ba17c93b3dd6761ba6c81a1929878866a",
|
||||
"0x830b63ccc9713075ac618c64b870d8375d5bed64fd3701ec0caed47afe5ab3f567b3a1a981c302540ed0010c8aa48148",
|
||||
"0xacb93bff4d4640d5c25291fc93d159360828481c6339baac50aa861def7088afa5909b735d1d4a12e3e2a23e303b6399",
|
||||
"0xb398803308ffcd86e7b6df0ba952d95e7f417b73afed81e23eff09a4bd0a7ed1ab651beb206834d2c883ac4417f49032",
|
||||
"0x9756aa1c5173a38e831f5cadae27fb0ee8ed850e2a846718f0f5419cc90beb9518dc31e4e8fefe4a9a40e54917fe120b",
|
||||
"0xaeb4cbd4c463752a695e9c2d66188d015dd6220754130579c9bfa2d3b7c3c6c3fc7ec49fcf0009aba9bd5074dcb3f95e",
|
||||
"0xa1e3c0889f0657ddda6816c1e4e1e43e457a5a388f60cea410c048023ac227db4e3e6d2a7f0222f499a89137605210e3",
|
||||
"0xad96ad5fc3e43e68bc238e1267ccd81636e9e0ab035890185c4294705534a7bd25bb1c15a60786f35a829473d49781ea",
|
||||
"0xa36db550a04a4676ac760a32e3734f5f17f8b7b912d9c560e9c148a706a2492d8b5a146b4188c66e3f4d5272777ddd58",
|
||||
"0xaf47ec208a81bd7003cfccc1a1db8d2065f1c984f42abb430a903c9a643d1cc9fb981d55a01380bf7b74721275aaaa62",
|
||||
"0xa979361a25434641c217ef285c4c81974bc2fe3a856781beab30a883b95d1b93de1fc21872723737cc93e028c5d3d147",
|
||||
"0xb67ff15cc11b431c47fd1c136ea18c34224741c147eb584c6a3d253af826babe76dac4f7f7b847e7cd674730c3cf4956",
|
||||
"0xa1638a24170fda842334a68c3a3939ac24b1de7b124d184244405b26419ccf7a5ceb090a4f1755bc07a5fa6637165255",
|
||||
"0xb1ed9cf1516dca2a38b00694847809d8a172968b61a26d0615c5b2ab80363acda6a9af632fed703299d964a3736a7103",
|
||||
"0x99319462b880885aa5db0070f151e205bf8288bf993d434fc99081bffdc1528265d5e252e2666d0947fdeafa48625513",
|
||||
"0x8f5707ce471989512e497385171f9a5f462b0e987ffd8a696c602248155e9639b9597bbdd8b6cbd6685975136b52a40c",
|
||||
"0x87465b2c5dd27e13a0892c30e7e2ff6819489db9b53487265a23fe764b6b4eca3b2338de672e6ea4ab3f8736c9feef56",
|
||||
"0x89ddb3632add71b62e324fa6265600e809b29e4904d68c5fefd59a36f66cbd3741e03245aa4f015521d946e777d0c195",
|
||||
"0xa270e76ffa82fad0a4408aa2e45235dbbd18304eb470e51411ae4ddd16b142666bfe37d9510eea9e69ed04e799788e0c",
|
||||
"0x8983d57179a62eb563d3f7453672a5940b958a27df321bde6589056c1ea542c419e4116765a457c9b529b1014c3b3f68",
|
||||
"0xab405480f4d5001e4c43b52f095896a3c8e394bff02c14f57facbe539c04210b4b589903bd94d0ca58b78e8c82745a22",
|
||||
"0x82377e25d1f00987908d21ee2620a6653af77c72e038bb394c72d0b1d9b9a4930c6a2bb06ca091b8c4c19e62830268d6",
|
||||
"0xab94d4848d372c00e205c64a6c7386a4078cb1860989c99e0313776d0518b056f6608ea3b4d12f50e0a8678dbfa0c73c",
|
||||
"0x977ff883fc1217d4ef5220c74e06c3ce002cb691f191a1e31f46082fa2400236a5879d5dd4bd1d2421b991bb394c5e17",
|
||||
"0x95bac7596af12ba4c11226ecd0ed0828c98eb60c8f142477872b401e2d7af5f3b04204508cf40a88f29d2235125a1b65",
|
||||
"0x813e6c95f967f1371d0df1144bf73993947a6cd98e31f127db9239d69a8e97c1a41394890a2a2be85240c9b36ec74906",
|
||||
"0xb44194edd26a519267d4ca212540bbe114976f28be9082c77a308c1731159c8b0fabb25b590dc445053585df0e555797",
|
||||
"0xb7bf875591b4c4859154bbb9081fcb82b28fe87121fb866b598a5baad601478acbac0cb13d0cd14402368cee767b4231",
|
||||
"0xa7bce1268dd1ba7d2e3e24e9d3fd44d0f7664e658dc27e9bee4aff75d76ea920bc34f25d14fe96a02c96cbb6692b544c",
|
||||
"0x973194c2280380f42070200c9c729b3f651336379da482c5229ad321f29423bc6d1ccc1a5ced594806ce73b3ce437d12",
|
||||
"0x978b88b3a66934790fba6bd2fec74410e18fab319b6c8a828dc32c3c8ffc23014e26f6c42835b58440bad6201ba790a2",
|
||||
"0x8445283a55cd40ac99a710e4ebeca19b4042f89a9dbc0cb22cf62b6312edc7a4d4366efb169e1c0de8bacb8a1f2ff2ca",
|
||||
"0x85bfaa05173114a0f3a2276671846db99a8f858b279a868e236cd9d974f69171501198cfcdec3dca093e5439a88199be",
|
||||
"0xa3aab6d03e5c0cdd38096d7c97935913dbd24953e49eee341603ed434a067e1ac2270e6b74b45737ae1e79e9c248f15c",
|
||||
"0xaf36fb1566ffeb6f0673640853b6c576330bb32751454b83835f0f26f50cd5d5ebb6658f6b1e9eeb9dcdb879745c9c7d",
|
||||
"0xb216eb3d9d28c1ba93a57e82cc03469a9f40156266fcc96134a66da8a61aff3b78b783221fda5b23526fed2f91345418",
|
||||
"0xb74637cfe60f5e7c116ab4be75bcfdfb08ba29ecc7b2363f547a2236bc170346388dd9fbaa1670ce1e45d4c96069717b",
|
||||
"0x823a3cc16cfae5317b293fe905b8af7d7d2733c24f96cc66522aff2a376b5340dbcca8429f4082edb562da157c051c80",
|
||||
"0xadf3b83761df2ca910900775e5d65e29bfd274cbb0cdd9614115aceaaa019b0e38a3e3b11777fff99d2b3b8c22de490c",
|
||||
"0x8ef121f237356ed3dce22ec6e6b8a8085b71db20974483242d1280c18c51ba4f4438200cb4137e25f447e1a713f8894b",
|
||||
"0xaec4690276f929c9cd2fedef923e1d2324a6b5f273f5c938b9e971b93d0762f181013e2cef334bf3ba70f1795fafcf23",
|
||||
"0x91099cdfbe5ec822474b397366cba936c997bbe17169334bf94730c689b1e27943793f96e4825e0d96df577af77ad06f",
|
||||
"0x94ac37115fd458fb690177ac88e6fc7f11bafb231fdc20e2995fddab695494a4bc86b1fcf53f7259843749f55ae40b92",
|
||||
"0x832d99b9e3f910e8e19bee53dcf1ae0fcd7713e642cfebbdd891c59325bc50894a812ff53edbfbb38aca8cc5d97aea06",
|
||||
"0x96373b775b1eafe66113b1bddad0e4ae9ba26f2c32393a29a2fa3660979eac480748d05deda7a68cf44c64fa38c7a03d",
|
||||
"0xa0f960d2e4c4a6b75ded6207b686d3e943b675f5eaed6820d676889bd0625554753db4de8bc8d0c3cad475ee411e39b5",
|
||||
"0x97d86db51837301ebb10e4867a8d71ed6f82b152e6b9d4256d15e0807d7e461dbfceeeabfc2ab9d5bb5789f3d9c30779",
|
||||
"0x892bb178f0f2bdd2f6a027ba426396e610cd0803f6a1365ef6caf5508ddc5349f30f363e15cf19b2e700374b6d871830",
|
||||
"0xa1271b15e75da127dbb44e2765c879ec037479edcfe52a3b7b607c114509e03a057a6d685223c3f4a0fd9e734469378a",
|
||||
"0x8863d29a686a040514661be853c4cbdc28cbe7fe8c401aad01644f0a892ee4c4005148e40c2fdce642e690be9d8eef2f",
|
||||
"0xb567760e8dbf7a61ba5a77d4b07c4a879b580863894f3c4fd9d652cf1ca53b9a0aebd6d8f559c5665fdf5cab5b9242c9",
|
||||
"0x99bb4f6d41b33039c9443ba90203ca47eb6e79b126ec3e92e61495833d59c8464002cedc74bc33795d5a5e5d4772610d",
|
||||
"0x94cf97bf6f28e38b2e6f4cbc58a6fbe1f031ecd8a9cc66b62835698ea88e9fe6419a80b57ffa19bf77dc048e39c11f41",
|
||||
"0x8dc24197a96bbed35f779bd64cf9104975b68f310b82c2f03a587b522102cfecf061383108d7628e8b46359c06f41726",
|
||||
"0x86ed177c05f473eb8bad7f79238d911c11cc3c7378e24dd70aa83659888f4915f9e13e3563617d369e8217e1ba77c01f",
|
||||
"0x82b7176c8a6609cc286bb8f3f8d72a707aae630cb05510cba5a5cba711acd472d60beb2a413e45aef8265026d79fe576",
|
||||
"0x875085a20d7390d92385ff1e4c112195198f7763cebde5d13ffac243f0a96be7a2a57ab9ec105f99187bd13e00cbf2f9",
|
||||
"0xb14d2a2395677a2beb3b90bda389c67a7a4a8361ce353c8710a13aa59c08d2aea8d0197beb0db31b0e92fbde16bb9606",
|
||||
"0xb7f222ee1e25115ece1b098b1c0261b326dfc454380d07e99bf498bbd8aafb209da4b5ff64c6a481cdcafc5d205de296",
|
||||
"0x8bc66bbfb988913fd3b1c56d36ae3eb06b6219c8c278bdc8422d07e01e48e44239eca14255a43e1038f80322b2969156",
|
||||
"0x906d257ada831ab1db57a7511d10d33c43f84947a2cbb8e9401010c9de542edaaa39d2ce329c33fe1a99c0bd03779acf",
|
||||
"0x80373467a36d5e99aafde2875dc9caf6b1242bb4a285c6879f11d30ec4eaedea54327237eb02cf221d660ead62875948",
|
||||
"0x9081a5170a70333cd9d6bd638772c2012e9c095800d3cdaf77a7ca98a1413c109686b42b9fef681250eb93b715702d1d",
|
||||
"0x899427b7eca7c24e0760a6928f688ce91f7bc725b70c456c1ad7995effaac3edae2b41067e39cf8e2310a7201a4af55b",
|
||||
"0x8d5ea173aa180ed6940d9577898271a21faaddfaf5afbc46c66ac29039ab35946952217545f5e7b816873e97df6e294e",
|
||||
"0xa8af63310ce64f772410f18f29d60f9f1c5c49a858ed1971089673c1e0c8d85c8235617ea8bd919e542b238a63b1be07",
|
||||
"0xad591bb5842e0d6132c573ab747d391a698332637452bdd262b0a6ea2ca29b346c7405348593228769459f5e1f156a07",
|
||||
"0xb38395b34871fbc0c3a4d5e50c7e62a08ee58d2e19051ce269d2a56615f1f679e7eefe47e99ebe1e53a9bae9013c9de7",
|
||||
"0x87affdb63f3d5bd9f4e95da4dac365ba3f853be767b5c09c4fbc24162744566ab20544a61657374e4122f36a2cfcc8c2",
|
||||
"0x80cd960856a511cf957bf5bd4a4c3c6bc65c0fb5e785dc51560aa34ce56ddec6838f73e6bf257cfd273e05c7d719c098"
|
||||
],
|
||||
"vec_H": [
|
||||
"0x8a135f527bcc420b9a4dae226c237012346c2356abbf20b60c26eb513ff0745153ff20dd94286d96fe679b1a22cbff5d",
|
||||
"0xa5c64c216db68e10b32ee5c8fd29b1a3dce6238273ec141ca7d8d8dcbdf7b992c4ddf576633cd4f828244527e05e3461",
|
||||
"0xab0a28fa68ad7d91c40b49e277e25ebdef5b689dbeae3be297161e44df940c02d2594e5d76b6be1547780d8ffc3cf9de",
|
||||
"0x8532adc9d2fac12f65261fd17a57f231f7246feb60babc9c7beaeb628c0e1ad207e17252d736a7965542c3d7ebeb7fc2"
|
||||
],
|
||||
"H": "0xaeb2d25680cbf2be736d999a01d73472e2779229a8ee2a8701b5cea2a93898fdf2150d467247f23a7761f650d38bdf6f",
|
||||
"G_t": "0xa4e53147e355879fdb62f185ab7b8569925f356503a2ea67d4a13380f2a1bb82be57112893584834f1965cc8a4061d2f",
|
||||
"G_u": "0xa693bce513d30e072ef71b7dfd03966cba8b11b0af9dbc0585b92514175772a81d083d7ff48e0adf3e3bee88823db240",
|
||||
"G_sum": "0xa0181ccd048b494d5b35463e180408dc9c3325573f8639bf6bcd9447accfc093336158a0859fe3b3021ad141936da977",
|
||||
"H_sum": "0xa6dbebe99ca5ddf836d4d1fe64479de04d8370dea2c36c3409b83706d58ec58150eba667d1d60471299b494162fcb6c1"
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,6 @@
|
|||
# Minimal preset - EIP7594
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# `uint64(2**6)` (= 64)
|
||||
FIELD_ELEMENTS_PER_CELL: 64
|
|
@ -0,0 +1,20 @@
|
|||
{
|
||||
"vec_G": [
|
||||
"0xa44aae199242e24a4d00b8e5c96e318793eeeb2e154423ca6dcac20043387323dea3216a69bc13e9a3507ff842da544d",
|
||||
"0x8dff68d38281daa552c587d073e498d1ed311986967192cba052827b07f194a936809ea3de921511db45d15234754993",
|
||||
"0xad0ff210542fc069d065b53b2cd4228a0f097facebe089a83b989fd3344c53e440ded5da26bc6115299d9d464e1a9f28",
|
||||
"0xb638e703f852d2ac49595141f7688c52a95dcc0b00f82a8548b14d823ebffe8657557ed7dab6bee44b17d39d742f69aa"
|
||||
],
|
||||
"vec_H": [
|
||||
"0x9377c7771e07a1a9b9368796ce1a1b93d560d7726afde02627b424ee1dcddb3761ed49f2e1ae5279dca050935bd4a6dd",
|
||||
"0x8d1be282936392c0c61c94745cfb29da0f3334272c9b37fe43c8b869640eb1ea88c8aaf5ff797bd90daf3d6ebeb4efb3",
|
||||
"0xb3b55847d3bcf98b587c4441b0703939b5984bac91b00aabb5f3a45b38445405b61127bc6ee9f6b4b9e88c7a29c3aaa3",
|
||||
"0xafb61afb9f92c37ec21220c02edf14876d2a08eab8ad3c2bc1f3bfe5036abfd23a4a7216616fa1953e14340bf0acab37"
|
||||
],
|
||||
"H": "0xa94133ee96e00465fe5423e0ea52404e0f624ee8cc9d69b4cf94e7d73635dfa2087cd2d2596ac4a75504aac5ef6a02d4",
|
||||
"G_t": "0xa4b93e670e7ee926ffb4ea4e07b87346e5d33c76520912f8a7937cdc3290a4c054586e175c39826b7fafbe777d14e4f4",
|
||||
"G_u": "0xa57540f7c906d9e70ef90580967562f8c06398ac2d77612045dce0ea6fbc2fedcfdbeb3f6ad3bb717e1295d9539ede63",
|
||||
"G_sum": "0xa0d97028d7194094fe1c4f00189e360ae362eca4aa9dc8f92eabb8dcf0d93140a81953d4505cd7dc592504710d696ef9",
|
||||
"H_sum": "0xaf415fddfb82e7cbb91fae0c443425c51dbb68e05f0324bd2d79e40b923ecb4f806e96e9993eabadd1c39ac4e12e74bf"
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -7,6 +7,7 @@ DENEB = 'deneb'
|
|||
EIP6110 = 'eip6110'
|
||||
EIP7002 = 'eip7002'
|
||||
WHISK = 'whisk'
|
||||
EIP7594 = 'eip7594'
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -22,8 +22,11 @@ def collect_prev_forks(fork: str) -> list[str]:
|
|||
forks.append(fork)
|
||||
|
||||
|
||||
def is_byte_vector(value: str) -> bool:
|
||||
return value.startswith(('ByteVector'))
|
||||
def requires_mypy_type_ignore(value: str) -> bool:
|
||||
return (
|
||||
value.startswith(('ByteVector'))
|
||||
or (value.startswith(('Vector')) and 'floorlog2' in value)
|
||||
)
|
||||
|
||||
|
||||
def make_function_abstract(protocol_def: ProtocolDefinition, key: str):
|
||||
|
@ -41,7 +44,8 @@ def objects_to_spec(preset_name: str,
|
|||
new_type_definitions = (
|
||||
'\n\n'.join(
|
||||
[
|
||||
f"class {key}({value}):\n pass\n" if not is_byte_vector(value) else f"class {key}({value}): # type: ignore\n pass\n"
|
||||
f"class {key}({value}):\n pass\n" if not requires_mypy_type_ignore(value)
|
||||
else f"class {key}({value}): # type: ignore\n pass\n"
|
||||
for key, value in spec_object.custom_types.items()
|
||||
]
|
||||
)
|
||||
|
@ -108,7 +112,7 @@ def objects_to_spec(preset_name: str,
|
|||
if vardef.comment is not None:
|
||||
out += f' # {vardef.comment}'
|
||||
return out
|
||||
|
||||
|
||||
# Merge all constant objects
|
||||
hardcoded_ssz_dep_constants = reduce(lambda obj, builder: {**obj, **builder.hardcoded_ssz_dep_constants()}, builders, {})
|
||||
hardcoded_custom_type_dep_constants = reduce(lambda obj, builder: {**obj, **builder.hardcoded_custom_type_dep_constants(spec_object)}, builders, {})
|
||||
|
@ -131,12 +135,13 @@ def objects_to_spec(preset_name: str,
|
|||
imports,
|
||||
preparations,
|
||||
f"fork = \'{fork}\'\n",
|
||||
# The helper functions that some SSZ containers require. Need to be defined before `custom_type_dep_constants`
|
||||
CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS,
|
||||
# The constants that some SSZ containers require. Need to be defined before `new_type_definitions`
|
||||
custom_type_dep_constants,
|
||||
new_type_definitions,
|
||||
CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS,
|
||||
# The constants that some SSZ containers require. Need to be defined before `constants_spec`
|
||||
ssz_dep_constants,
|
||||
new_type_definitions,
|
||||
constant_vars_spec,
|
||||
preset_vars_spec,
|
||||
config_spec,
|
||||
|
|
|
@ -9,6 +9,7 @@ from .constants import (
|
|||
EIP6110,
|
||||
WHISK,
|
||||
EIP7002,
|
||||
EIP7594,
|
||||
)
|
||||
|
||||
|
||||
|
@ -21,6 +22,7 @@ PREVIOUS_FORK_OF = {
|
|||
EIP6110: DENEB,
|
||||
WHISK: CAPELLA,
|
||||
EIP7002: CAPELLA,
|
||||
EIP7594: DENEB,
|
||||
}
|
||||
|
||||
ALL_FORKS = list(PREVIOUS_FORK_OF.keys())
|
||||
|
|
|
@ -6,12 +6,13 @@ from .deneb import DenebSpecBuilder
|
|||
from .eip6110 import EIP6110SpecBuilder
|
||||
from .eip7002 import EIP7002SpecBuilder
|
||||
from .whisk import WhiskSpecBuilder
|
||||
from .eip7594 import EIP7594SpecBuilder
|
||||
|
||||
|
||||
spec_builders = {
|
||||
builder.fork: builder
|
||||
for builder in (
|
||||
Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder,
|
||||
EIP6110SpecBuilder, EIP7002SpecBuilder, WhiskSpecBuilder,
|
||||
EIP6110SpecBuilder, EIP7002SpecBuilder, WhiskSpecBuilder, EIP7594SpecBuilder,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -42,9 +42,9 @@ def compute_merkle_proof(object: SSZObject,
|
|||
@classmethod
|
||||
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
|
||||
return {
|
||||
'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',
|
||||
'CURRENT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(54)',
|
||||
'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',
|
||||
'FINALIZED_ROOT_GINDEX': 'GeneralizedIndex(105)',
|
||||
'CURRENT_SYNC_COMMITTEE_GINDEX': 'GeneralizedIndex(54)',
|
||||
'NEXT_SYNC_COMMITTEE_GINDEX': 'GeneralizedIndex(55)',
|
||||
}
|
||||
|
||||
@classmethod
|
||||
|
|
|
@ -16,5 +16,5 @@ from eth2spec.bellatrix import {preset_name} as bellatrix
|
|||
@classmethod
|
||||
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
|
||||
return {
|
||||
'EXECUTION_PAYLOAD_INDEX': 'GeneralizedIndex(25)',
|
||||
'EXECUTION_PAYLOAD_GINDEX': 'GeneralizedIndex(25)',
|
||||
}
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
from typing import Dict
|
||||
|
||||
from .base import BaseSpecBuilder
|
||||
from ..constants import EIP7594
|
||||
|
||||
|
||||
class EIP7594SpecBuilder(BaseSpecBuilder):
|
||||
fork: str = EIP7594
|
||||
|
||||
@classmethod
|
||||
def imports(cls, preset_name: str):
|
||||
return f'''
|
||||
from eth2spec.deneb import {preset_name} as deneb
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
def hardcoded_custom_type_dep_constants(cls, spec_object) -> Dict[str, str]:
|
||||
return {
|
||||
'FIELD_ELEMENTS_PER_CELL': spec_object.preset_vars['FIELD_ELEMENTS_PER_CELL'].value,
|
||||
}
|
|
@ -1,3 +1,4 @@
|
|||
from typing import Dict
|
||||
from .base import BaseSpecBuilder
|
||||
from ..constants import WHISK
|
||||
|
||||
|
@ -9,6 +10,8 @@ class WhiskSpecBuilder(BaseSpecBuilder):
|
|||
def imports(cls, preset_name: str):
|
||||
return f'''
|
||||
from eth2spec.capella import {preset_name} as capella
|
||||
import curdleproofs
|
||||
import json
|
||||
'''
|
||||
|
||||
@classmethod
|
||||
|
@ -17,4 +20,13 @@ from eth2spec.capella import {preset_name} as capella
|
|||
return {
|
||||
'WHISK_MAX_SHUFFLE_PROOF_SIZE': spec_object.preset_vars['WHISK_MAX_SHUFFLE_PROOF_SIZE'].value,
|
||||
'WHISK_MAX_OPENING_PROOF_SIZE': spec_object.preset_vars['WHISK_MAX_OPENING_PROOF_SIZE'].value,
|
||||
'WHISK_VALIDATORS_PER_SHUFFLE': spec_object.preset_vars['WHISK_VALIDATORS_PER_SHUFFLE'].value,
|
||||
'CURDLEPROOFS_N_BLINDERS': spec_object.preset_vars['CURDLEPROOFS_N_BLINDERS'].value,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
|
||||
constants = {
|
||||
'EXECUTION_PAYLOAD_GINDEX': 'GeneralizedIndex(41)',
|
||||
}
|
||||
return {**super().hardcoded_ssz_dep_constants(), **constants}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
pip>=23.1.2
|
||||
wheel>=0.40.0
|
||||
setuptools>=68.0.0
|
||||
pylint>=3.0.0
|
|
@ -0,0 +1,103 @@
|
|||
#! /bin/sh
|
||||
|
||||
# Run 'consensus-specs' tests from a docker container instance.
|
||||
# *Be sure to launch Docker before running this script.*
|
||||
#
|
||||
# It does the below:
|
||||
# 1. Run pytest for consensus-specs in a container.
|
||||
# 2. Copy and paste the coverage report.
|
||||
# 3. Remove all exited containers that use the consensus-specs:<TAG> images.
|
||||
|
||||
|
||||
# Set variables
|
||||
ALL_EXECUTABLE_SPECS=("phase0" "altair" "bellatrix" "capella" "deneb" "eip6110" "whisk")
|
||||
TEST_PRESET_TYPE=minimal
|
||||
FORK_TO_TEST=phase0
|
||||
NUMBER_OF_CORES=4
|
||||
WORKDIR="//consensus-specs//tests//core//pyspec"
|
||||
ETH2SPEC_FOLDER_NAME="eth2spec"
|
||||
CONTAINER_NAME="consensus-specs-tests"
|
||||
DATE=$(date +"%Y%m%d-%H-%M")
|
||||
# Default flag values
|
||||
version=$(git log --pretty=format:'%h' -n 1)
|
||||
IMAGE_NAME="consensus-specs:$version"
|
||||
number_of_core=4
|
||||
|
||||
# displays the available options
|
||||
display_help() {
|
||||
echo "Run 'consensus-specs' tests from a container instance."
|
||||
echo "Be sure to launch Docker before running this script."
|
||||
echo
|
||||
echo "Syntax: build_run_test.sh [--v TAG | --n NUMBER_OF_CORE | --f FORK_TO_TEST | --p PRESET_TYPE | --a | --h HELP]"
|
||||
echo " --f <fork> Specify the fork to test"
|
||||
echo " --i <image_name> Specify the docker image to use"
|
||||
echo " --n <number> Specify the number of cores"
|
||||
echo " --p <type> Specify the test preset type"
|
||||
echo " --a Test all forks"
|
||||
echo " --h Display this help and exit"
|
||||
}
|
||||
|
||||
# Stop and remove the 'consensus-specs-dockerfile-test' container.
|
||||
# If this container doesn't exist, then a error message is printed
|
||||
# (but the process is not stopped).
|
||||
cleanup() {
|
||||
echo "Stop and remove the 'consensus-specs-tests' container."
|
||||
docker stop $CONTAINER_NAME || true && docker rm $CONTAINER_NAME || true
|
||||
|
||||
}
|
||||
|
||||
# Copy the results from the container to a local folder
|
||||
copy_test_results() {
|
||||
local fork_name="$1" # Storing the first argument in a variable
|
||||
|
||||
docker cp $CONTAINER_NAME:$WORKDIR/test-reports/test_results.xml ./testResults/test-results-$fork_name-$DATE.xml
|
||||
}
|
||||
|
||||
# Function to check if the Docker image already exists
|
||||
image_exists() {
|
||||
docker images --format '{{.Repository}}:{{.Tag}}' | grep -q "$1"
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case $1 in
|
||||
--f) FORK_TO_TEST="$2"; shift ;;
|
||||
--v) IMAGE_NAME="$2"; shift ;;
|
||||
--n) NUMBER_OF_CORES="$2"; shift ;;
|
||||
--p) TEST_PRESET_TYPE="$2"; shift ;;
|
||||
--a) FORK_TO_TEST="all" ;;
|
||||
--h) display_help; exit 0 ;;
|
||||
*) echo "Unknown parameter: $1"; display_help; exit 1 ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
# initialize a test result directory
|
||||
mkdir -p ./testResults
|
||||
|
||||
# Only clean container after user exit console
|
||||
trap cleanup SIGINT
|
||||
|
||||
# Build Docker container if it doesn't exist
|
||||
if ! image_exists "$IMAGE_NAME"; then
|
||||
echo "Image $IMAGE_NAME does not exist. Building Docker image..."
|
||||
docker build ../ -t $IMAGE_NAME -f ../docker/Dockerfile
|
||||
else
|
||||
echo "Image $IMAGE_NAME already exists. Skipping build..."
|
||||
fi
|
||||
|
||||
# Equivalent to `make citest with the subsequent flags`
|
||||
if [ "$FORK_TO_TEST" == "all" ]; then
|
||||
for fork in "${ALL_EXECUTABLE_SPECS[@]}"; do
|
||||
docker run --name $CONTAINER_NAME $IMAGE_NAME \
|
||||
make citest fork=$fork TEST_PRESET_TYPE=$TEST_PRESET_TYPE NUMBER_OF_CORES=$NUMBER_OF_CORES
|
||||
copy_test_results $fork
|
||||
done
|
||||
else
|
||||
docker run --name $CONTAINER_NAME $IMAGE_NAME \
|
||||
make citest fork=$FORK_TO_TEST TEST_PRESET_TYPE=$TEST_PRESET_TYPE NUMBER_OF_CORES=$NUMBER_OF_CORES
|
||||
copy_test_results $FORK_TO_TEST
|
||||
fi
|
||||
|
||||
# Stop and remove the container
|
||||
cleanup
|
35
setup.py
35
setup.py
|
@ -93,6 +93,8 @@ def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]:
|
|||
base = class_def.bases[0]
|
||||
if isinstance(base, ast.Name):
|
||||
parent_class = base.id
|
||||
elif isinstance(base, ast.Subscript):
|
||||
parent_class = base.value.id
|
||||
else:
|
||||
# NOTE: SSZ definition derives from earlier phase...
|
||||
# e.g. `phase0.SignedBeaconBlock`
|
||||
|
@ -112,10 +114,22 @@ def _load_kzg_trusted_setups(preset_name):
|
|||
|
||||
with open(trusted_setups_file_path, 'r') as f:
|
||||
json_data = json.load(f)
|
||||
trusted_setup_G1_monomial = json_data['g1_monomial']
|
||||
trusted_setup_G1_lagrange = json_data['g1_lagrange']
|
||||
trusted_setup_G2_monomial = json_data['g2_monomial']
|
||||
|
||||
return trusted_setup_G2_monomial, trusted_setup_G1_lagrange
|
||||
return trusted_setup_G1_monomial, trusted_setup_G1_lagrange, trusted_setup_G2_monomial
|
||||
|
||||
def _load_curdleproofs_crs(preset_name):
|
||||
"""
|
||||
NOTE: File generated from https://github.com/asn-d6/curdleproofs/blob/8e8bf6d4191fb6a844002f75666fb7009716319b/tests/crs.rs#L53-L67
|
||||
"""
|
||||
file_path = str(Path(__file__).parent) + '/presets/' + preset_name + '/trusted_setups/curdleproofs_crs.json'
|
||||
|
||||
with open(file_path, 'r') as f:
|
||||
json_data = json.load(f)
|
||||
|
||||
return json_data
|
||||
|
||||
|
||||
ALL_KZG_SETUPS = {
|
||||
|
@ -123,6 +137,11 @@ ALL_KZG_SETUPS = {
|
|||
'mainnet': _load_kzg_trusted_setups('mainnet')
|
||||
}
|
||||
|
||||
ALL_CURDLEPROOFS_CRS = {
|
||||
'minimal': _load_curdleproofs_crs('minimal'),
|
||||
'mainnet': _load_curdleproofs_crs('mainnet'),
|
||||
}
|
||||
|
||||
|
||||
def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:
|
||||
_, _, title = child._parse_info
|
||||
|
@ -136,7 +155,7 @@ def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:
|
|||
|
||||
def _parse_value(name: str, typed_value: str, type_hint: Optional[str] = None) -> VariableDefinition:
|
||||
comment = None
|
||||
if name == "BLS12_381_Q":
|
||||
if name in ("ROOT_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_REDUCED"):
|
||||
comment = "noqa: E501"
|
||||
|
||||
typed_value = typed_value.strip()
|
||||
|
@ -151,9 +170,10 @@ def _parse_value(name: str, typed_value: str, type_hint: Optional[str] = None) -
|
|||
def _update_constant_vars_with_kzg_setups(constant_vars, preset_name):
|
||||
comment = "noqa: E501"
|
||||
kzg_setups = ALL_KZG_SETUPS[preset_name]
|
||||
constant_vars['KZG_SETUP_G2_MONOMIAL'] = VariableDefinition(constant_vars['KZG_SETUP_G2_MONOMIAL'].value, str(kzg_setups[0]), comment, None)
|
||||
constant_vars['KZG_SETUP_G1_MONOMIAL'] = VariableDefinition(constant_vars['KZG_SETUP_G1_MONOMIAL'].value, str(kzg_setups[0]), comment, None)
|
||||
constant_vars['KZG_SETUP_G1_LAGRANGE'] = VariableDefinition(constant_vars['KZG_SETUP_G1_LAGRANGE'].value, str(kzg_setups[1]), comment, None)
|
||||
|
||||
constant_vars['KZG_SETUP_G2_MONOMIAL'] = VariableDefinition(constant_vars['KZG_SETUP_G2_MONOMIAL'].value, str(kzg_setups[2]), comment, None)
|
||||
|
||||
|
||||
def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], preset_name=str) -> SpecObject:
|
||||
functions: Dict[str, str] = {}
|
||||
|
@ -262,6 +282,13 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
|
|||
if any('KZG_SETUP' in name for name in constant_vars):
|
||||
_update_constant_vars_with_kzg_setups(constant_vars, preset_name)
|
||||
|
||||
if any('CURDLEPROOFS_CRS' in name for name in constant_vars):
|
||||
constant_vars['CURDLEPROOFS_CRS'] = VariableDefinition(
|
||||
None,
|
||||
'curdleproofs.CurdleproofsCrs.from_json(json.dumps(' + str(ALL_CURDLEPROOFS_CRS[str(preset_name)]).replace('0x', '') + '))',
|
||||
"noqa: E501", None
|
||||
)
|
||||
|
||||
return SpecObject(
|
||||
functions=functions,
|
||||
protocols=protocols,
|
||||
|
|
|
@ -0,0 +1,125 @@
|
|||
# EIP7594 -- Fork Logic
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc)
|
||||
- [Modified `compute_fork_version`](#modified-compute_fork_version)
|
||||
- [Fork to EIP7594](#fork-to-eip7594)
|
||||
- [Fork trigger](#fork-trigger)
|
||||
- [Upgrading the state](#upgrading-the-state)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the process of EIP7594 upgrade.
|
||||
|
||||
## Configuration
|
||||
|
||||
Warning: this configuration is not definitive.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `EIP7594_FORK_VERSION` | `Version('0x05000000')` |
|
||||
| `EIP7594_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Misc
|
||||
|
||||
#### Modified `compute_fork_version`
|
||||
|
||||
```python
|
||||
def compute_fork_version(epoch: Epoch) -> Version:
|
||||
"""
|
||||
Return the fork version at the given ``epoch``.
|
||||
"""
|
||||
if epoch >= EIP7594_FORK_EPOCH:
|
||||
return EIP7594_FORK_VERSION
|
||||
if epoch >= DENEB_FORK_EPOCH:
|
||||
return DENEB_FORK_VERSION
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
return CAPELLA_FORK_VERSION
|
||||
if epoch >= BELLATRIX_FORK_EPOCH:
|
||||
return BELLATRIX_FORK_VERSION
|
||||
if epoch >= ALTAIR_FORK_EPOCH:
|
||||
return ALTAIR_FORK_VERSION
|
||||
return GENESIS_FORK_VERSION
|
||||
```
|
||||
|
||||
## Fork to EIP7594
|
||||
|
||||
### Fork trigger
|
||||
|
||||
EIP7594 does not need a hard fork. We only add this fork doc for compiling this new feature in pyspec.
|
||||
|
||||
For now, we assume the condition will be triggered at epoch `EIP7594_FORK_EPOCH`.
|
||||
|
||||
Note that for the pure EIP7594 networks, we don't apply `upgrade_to_eip7594` since it starts with EIP7594 version logic.
|
||||
|
||||
### Upgrading the state
|
||||
|
||||
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP7594_FORK_EPOCH`,
|
||||
an irregular state change is made to upgrade to EIP7594.
|
||||
|
||||
```python
|
||||
def upgrade_to_eip7594(pre: deneb.BeaconState) -> BeaconState:
|
||||
epoch = deneb.get_current_epoch(pre)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=EIP7594_FORK_VERSION, # [Modified in EIP7594]
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=pre.previous_epoch_participation,
|
||||
current_epoch_participation=pre.current_epoch_participation,
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Inactivity
|
||||
inactivity_scores=pre.inactivity_scores,
|
||||
# Sync
|
||||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=pre.latest_execution_payload_header,
|
||||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
)
|
||||
|
||||
return post
|
||||
```
|
|
@ -0,0 +1,525 @@
|
|||
# Deneb -- Polynomial Commitments
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Preset](#preset)
|
||||
- [Cells](#cells)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Linear combinations](#linear-combinations)
|
||||
- [`g2_lincomb`](#g2_lincomb)
|
||||
- [FFTs](#ffts)
|
||||
- [`_fft_field`](#_fft_field)
|
||||
- [`fft_field`](#fft_field)
|
||||
- [Polynomials in coefficient form](#polynomials-in-coefficient-form)
|
||||
- [`polynomial_eval_to_coeff`](#polynomial_eval_to_coeff)
|
||||
- [`add_polynomialcoeff`](#add_polynomialcoeff)
|
||||
- [`neg_polynomialcoeff`](#neg_polynomialcoeff)
|
||||
- [`multiply_polynomialcoeff`](#multiply_polynomialcoeff)
|
||||
- [`divide_polynomialcoeff`](#divide_polynomialcoeff)
|
||||
- [`shift_polynomialcoeff`](#shift_polynomialcoeff)
|
||||
- [`interpolate_polynomialcoeff`](#interpolate_polynomialcoeff)
|
||||
- [`vanishing_polynomialcoeff`](#vanishing_polynomialcoeff)
|
||||
- [`evaluate_polynomialcoeff`](#evaluate_polynomialcoeff)
|
||||
- [KZG multiproofs](#kzg-multiproofs)
|
||||
- [`compute_kzg_proof_multi_impl`](#compute_kzg_proof_multi_impl)
|
||||
- [`verify_kzg_proof_multi_impl`](#verify_kzg_proof_multi_impl)
|
||||
- [Cell cosets](#cell-cosets)
|
||||
- [`coset_for_cell`](#coset_for_cell)
|
||||
- [Cells](#cells-1)
|
||||
- [Cell computation](#cell-computation)
|
||||
- [`compute_cells_and_proofs`](#compute_cells_and_proofs)
|
||||
- [`compute_cells`](#compute_cells)
|
||||
- [Cell verification](#cell-verification)
|
||||
- [`verify_cell_proof`](#verify_cell_proof)
|
||||
- [`verify_cell_proof_batch`](#verify_cell_proof_batch)
|
||||
- [Reconstruction](#reconstruction)
|
||||
- [`recover_polynomial`](#recover_polynomial)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document extends [polynomial-commitments.md](polynomial-commitments.md) with the functions required for data availability sampling (DAS). It is not part of the core Deneb spec but an extension that can be optionally implemented to allow nodes to reduce their load using DAS.
|
||||
|
||||
For any KZG library extended to support DAS, functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library.
|
||||
|
||||
Public functions MUST accept raw bytes as input and perform the required cryptographic normalization before invoking any internal functions.
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `PolynomialCoeff` | `List[BLSFieldElement, 2 * FIELD_ELEMENTS_PER_BLOB]` | A polynomial in coefficient form |
|
||||
| `Cell` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_CELL]` | The unit of blob data that can come with their own KZG proofs |
|
||||
| `CellID` | `uint64` | Cell identifier |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value | Notes |
|
||||
| - | - | - |
|
||||
|
||||
## Preset
|
||||
|
||||
### Cells
|
||||
|
||||
Cells are the smallest unit of blob data that can come with their own KZG proofs. Samples can be constructed from one or several cells (e.g. an individual cell or line).
|
||||
|
||||
| Name | Value | Description |
|
||||
| - | - | - |
|
||||
| `FIELD_ELEMENTS_PER_CELL` | `uint64(64)` | Number of field elements in a cell |
|
||||
| `BYTES_PER_CELL` | `FIELD_ELEMENTS_PER_CELL * BYTES_PER_FIELD_ELEMENT` | The number of bytes in a cell |
|
||||
| `CELLS_PER_BLOB` | `((2 * FIELD_ELEMENTS_PER_BLOB) // FIELD_ELEMENTS_PER_CELL)` | The number of cells in a blob |
|
||||
| `RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN` | `b'RCKZGCBATCH__V1_'` |
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Linear combinations
|
||||
|
||||
#### `g2_lincomb`
|
||||
|
||||
```python
|
||||
def g2_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElement]) -> Bytes96:
|
||||
"""
|
||||
BLS multiscalar multiplication in G2. This function can be optimized using Pippenger's algorithm and variants.
|
||||
"""
|
||||
assert len(points) == len(scalars)
|
||||
result = bls.Z2()
|
||||
for x, a in zip(points, scalars):
|
||||
result = bls.add(result, bls.multiply(bls.bytes96_to_G2(x), a))
|
||||
return Bytes96(bls.G2_to_bytes96(result))
|
||||
```
|
||||
|
||||
### FFTs
|
||||
|
||||
#### `_fft_field`
|
||||
|
||||
```python
|
||||
def _fft_field(vals: Sequence[BLSFieldElement],
|
||||
roots_of_unity: Sequence[BLSFieldElement]) -> Sequence[BLSFieldElement]:
|
||||
if len(vals) == 1:
|
||||
return vals
|
||||
L = _fft_field(vals[::2], roots_of_unity[::2])
|
||||
R = _fft_field(vals[1::2], roots_of_unity[::2])
|
||||
o = [BLSFieldElement(0) for _ in vals]
|
||||
for i, (x, y) in enumerate(zip(L, R)):
|
||||
y_times_root = (int(y) * int(roots_of_unity[i])) % BLS_MODULUS
|
||||
o[i] = BLSFieldElement((int(x) + y_times_root) % BLS_MODULUS)
|
||||
o[i + len(L)] = BLSFieldElement((int(x) - y_times_root + BLS_MODULUS) % BLS_MODULUS)
|
||||
return o
|
||||
```
|
||||
|
||||
#### `fft_field`
|
||||
|
||||
```python
|
||||
def fft_field(vals: Sequence[BLSFieldElement],
|
||||
roots_of_unity: Sequence[BLSFieldElement],
|
||||
inv: bool=False) -> Sequence[BLSFieldElement]:
|
||||
if inv:
|
||||
# Inverse FFT
|
||||
invlen = pow(len(vals), BLS_MODULUS - 2, BLS_MODULUS)
|
||||
return [BLSFieldElement((int(x) * invlen) % BLS_MODULUS)
|
||||
for x in _fft_field(vals, list(roots_of_unity[0:1]) + list(roots_of_unity[:0:-1]))]
|
||||
else:
|
||||
# Regular FFT
|
||||
return _fft_field(vals, roots_of_unity)
|
||||
```
|
||||
|
||||
|
||||
### Polynomials in coefficient form
|
||||
|
||||
#### `polynomial_eval_to_coeff`
|
||||
|
||||
```python
|
||||
def polynomial_eval_to_coeff(polynomial: Polynomial) -> PolynomialCoeff:
|
||||
"""
|
||||
Interpolates a polynomial (given in evaluation form) to a polynomial in coefficient form.
|
||||
"""
|
||||
roots_of_unity = compute_roots_of_unity(FIELD_ELEMENTS_PER_BLOB)
|
||||
polynomial_coeff = fft_field(bit_reversal_permutation(list(polynomial)), roots_of_unity, inv=True)
|
||||
|
||||
return polynomial_coeff
|
||||
```
|
||||
|
||||
#### `add_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def add_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> PolynomialCoeff:
|
||||
"""
|
||||
Sum the coefficient form polynomials ``a`` and ``b``.
|
||||
"""
|
||||
a, b = (a, b) if len(a) >= len(b) else (b, a)
|
||||
return [(a[i] + (b[i] if i < len(b) else 0)) % BLS_MODULUS for i in range(len(a))]
|
||||
```
|
||||
|
||||
#### `neg_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def neg_polynomialcoeff(a: PolynomialCoeff) -> PolynomialCoeff:
|
||||
"""
|
||||
Negative of coefficient form polynomial ``a``
|
||||
"""
|
||||
return [(BLS_MODULUS - x) % BLS_MODULUS for x in a]
|
||||
```
|
||||
|
||||
#### `multiply_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def multiply_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> PolynomialCoeff:
|
||||
"""
|
||||
Multiplies the coefficient form polynomials ``a`` and ``b``
|
||||
"""
|
||||
r = [0]
|
||||
for power, coef in enumerate(a):
|
||||
summand = [0] * power + [int(coef) * int(x) % BLS_MODULUS for x in b]
|
||||
r = add_polynomialcoeff(r, summand)
|
||||
return r
|
||||
```
|
||||
#### `divide_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def divide_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> PolynomialCoeff:
|
||||
"""
|
||||
Long polynomial division for two coefficient form polynomials ``a`` and ``b``
|
||||
"""
|
||||
a = [x for x in a]
|
||||
o = []
|
||||
apos = len(a) - 1
|
||||
bpos = len(b) - 1
|
||||
diff = apos - bpos
|
||||
while diff >= 0:
|
||||
quot = div(a[apos], b[bpos])
|
||||
o.insert(0, quot)
|
||||
for i in range(bpos, -1, -1):
|
||||
a[diff + i] = (int(a[diff + i]) - int(b[i]) * int(quot)) % BLS_MODULUS
|
||||
apos -= 1
|
||||
diff -= 1
|
||||
return [x % BLS_MODULUS for x in o]
|
||||
```
|
||||
|
||||
#### `shift_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def shift_polynomialcoeff(polynomial_coeff: PolynomialCoeff, factor: BLSFieldElement) -> PolynomialCoeff:
|
||||
"""
|
||||
Shift the evaluation of a polynomial in coefficient form by factor.
|
||||
This results in a new polynomial g(x) = f(factor * x)
|
||||
"""
|
||||
factor_power = 1
|
||||
inv_factor = pow(int(factor), BLS_MODULUS - 2, BLS_MODULUS)
|
||||
o = []
|
||||
for p in polynomial_coeff:
|
||||
o.append(int(p) * factor_power % BLS_MODULUS)
|
||||
factor_power = factor_power * inv_factor % BLS_MODULUS
|
||||
return o
|
||||
```
|
||||
|
||||
#### `interpolate_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def interpolate_polynomialcoeff(xs: Sequence[BLSFieldElement], ys: Sequence[BLSFieldElement]) -> PolynomialCoeff:
|
||||
"""
|
||||
Lagrange interpolation: Finds the lowest degree polynomial that takes the value ``ys[i]`` at ``x[i]``
|
||||
for all i.
|
||||
Outputs a coefficient form polynomial. Leading coefficients may be zero.
|
||||
"""
|
||||
assert len(xs) == len(ys)
|
||||
r = [0]
|
||||
|
||||
for i in range(len(xs)):
|
||||
summand = [ys[i]]
|
||||
for j in range(len(ys)):
|
||||
if j != i:
|
||||
weight_adjustment = bls_modular_inverse(int(xs[i]) - int(xs[j]))
|
||||
summand = multiply_polynomialcoeff(
|
||||
summand, [(- int(weight_adjustment) * int(xs[j])) % BLS_MODULUS, weight_adjustment]
|
||||
)
|
||||
r = add_polynomialcoeff(r, summand)
|
||||
|
||||
return r
|
||||
```
|
||||
|
||||
#### `vanishing_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def vanishing_polynomialcoeff(xs: Sequence[BLSFieldElement]) -> PolynomialCoeff:
|
||||
"""
|
||||
Compute the vanishing polynomial on ``xs`` (in coefficient form)
|
||||
"""
|
||||
p = [1]
|
||||
for x in xs:
|
||||
p = multiply_polynomialcoeff(p, [-int(x), 1])
|
||||
return p
|
||||
```
|
||||
|
||||
#### `evaluate_polynomialcoeff`
|
||||
|
||||
```python
|
||||
def evaluate_polynomialcoeff(polynomial_coeff: PolynomialCoeff, z: BLSFieldElement) -> BLSFieldElement:
|
||||
"""
|
||||
Evaluate a coefficient form polynomial at ``z`` using Horner's schema
|
||||
"""
|
||||
y = 0
|
||||
for coef in polynomial_coeff[::-1]:
|
||||
y = (int(y) * int(z) + int(coef)) % BLS_MODULUS
|
||||
return BLSFieldElement(y % BLS_MODULUS)
|
||||
```
|
||||
|
||||
### KZG multiproofs
|
||||
|
||||
Extended KZG functions for multiproofs
|
||||
|
||||
#### `compute_kzg_proof_multi_impl`
|
||||
|
||||
```python
|
||||
def compute_kzg_proof_multi_impl(
|
||||
polynomial_coeff: PolynomialCoeff,
|
||||
zs: Sequence[BLSFieldElement]) -> Tuple[KZGProof, Sequence[BLSFieldElement]]:
|
||||
"""
|
||||
Helper function that computes multi-evaluation KZG proofs.
|
||||
"""
|
||||
|
||||
# For all x_i, compute p(x_i) - p(z)
|
||||
ys = [evaluate_polynomialcoeff(polynomial_coeff, z) for z in zs]
|
||||
interpolation_polynomial = interpolate_polynomialcoeff(zs, ys)
|
||||
polynomial_shifted = add_polynomialcoeff(polynomial_coeff, neg_polynomialcoeff(interpolation_polynomial))
|
||||
|
||||
# For all x_i, compute (x_i - z)
|
||||
denominator_poly = vanishing_polynomialcoeff(zs)
|
||||
|
||||
# Compute the quotient polynomial directly in evaluation form
|
||||
quotient_polynomial = divide_polynomialcoeff(polynomial_shifted, denominator_poly)
|
||||
|
||||
return KZGProof(g1_lincomb(KZG_SETUP_G1_MONOMIAL[:len(quotient_polynomial)], quotient_polynomial)), ys
|
||||
```
|
||||
|
||||
#### `verify_kzg_proof_multi_impl`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof_multi_impl(commitment: KZGCommitment,
|
||||
zs: Sequence[BLSFieldElement],
|
||||
ys: Sequence[BLSFieldElement],
|
||||
proof: KZGProof) -> bool:
|
||||
"""
|
||||
Helper function that verifies a KZG multiproof
|
||||
"""
|
||||
|
||||
assert len(zs) == len(ys)
|
||||
|
||||
zero_poly = g2_lincomb(KZG_SETUP_G2_MONOMIAL[:len(zs) + 1], vanishing_polynomialcoeff(zs))
|
||||
interpolated_poly = g1_lincomb(KZG_SETUP_G1_MONOMIAL[:len(zs)], interpolate_polynomialcoeff(zs, ys))
|
||||
|
||||
return (bls.pairing_check([
|
||||
[bls.bytes48_to_G1(proof), bls.bytes96_to_G2(zero_poly)],
|
||||
[
|
||||
bls.add(bls.bytes48_to_G1(commitment), bls.neg(bls.bytes48_to_G1(interpolated_poly))),
|
||||
bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[0])),
|
||||
],
|
||||
]))
|
||||
```
|
||||
|
||||
### Cell cosets
|
||||
|
||||
#### `coset_for_cell`
|
||||
|
||||
```python
|
||||
def coset_for_cell(cell_id: int) -> Cell:
|
||||
"""
|
||||
Get the coset for a given ``cell_id``
|
||||
"""
|
||||
assert cell_id < CELLS_PER_BLOB
|
||||
roots_of_unity_brp = bit_reversal_permutation(
|
||||
compute_roots_of_unity(2 * FIELD_ELEMENTS_PER_BLOB)
|
||||
)
|
||||
return Cell(roots_of_unity_brp[FIELD_ELEMENTS_PER_CELL * cell_id:FIELD_ELEMENTS_PER_CELL * (cell_id + 1)])
|
||||
```
|
||||
|
||||
## Cells
|
||||
|
||||
### Cell computation
|
||||
|
||||
#### `compute_cells_and_proofs`
|
||||
|
||||
```python
|
||||
def compute_cells_and_proofs(blob: Blob) -> Tuple[
|
||||
Vector[Cell, CELLS_PER_BLOB],
|
||||
Vector[KZGProof, CELLS_PER_BLOB]]:
|
||||
"""
|
||||
Compute all the cell proofs for one blob. This is an inefficient O(n^2) algorithm,
|
||||
for performant implementation the FK20 algorithm that runs in O(n log n) should be
|
||||
used instead.
|
||||
|
||||
Public method.
|
||||
"""
|
||||
polynomial = blob_to_polynomial(blob)
|
||||
polynomial_coeff = polynomial_eval_to_coeff(polynomial)
|
||||
|
||||
cells = []
|
||||
proofs = []
|
||||
|
||||
for i in range(CELLS_PER_BLOB):
|
||||
coset = coset_for_cell(i)
|
||||
proof, ys = compute_kzg_proof_multi_impl(polynomial_coeff, coset)
|
||||
cells.append(ys)
|
||||
proofs.append(proof)
|
||||
|
||||
return cells, proofs
|
||||
```
|
||||
|
||||
#### `compute_cells`
|
||||
|
||||
```python
|
||||
def compute_cells(blob: Blob) -> Vector[Cell, CELLS_PER_BLOB]:
|
||||
"""
|
||||
Compute the cell data for a blob (without computing the proofs).
|
||||
|
||||
Public method.
|
||||
"""
|
||||
polynomial = blob_to_polynomial(blob)
|
||||
polynomial_coeff = polynomial_eval_to_coeff(polynomial)
|
||||
|
||||
extended_data = fft_field(polynomial_coeff + [0] * FIELD_ELEMENTS_PER_BLOB,
|
||||
compute_roots_of_unity(2 * FIELD_ELEMENTS_PER_BLOB))
|
||||
extended_data_rbo = bit_reversal_permutation(extended_data)
|
||||
return [extended_data_rbo[i * FIELD_ELEMENTS_PER_CELL:(i + 1) * FIELD_ELEMENTS_PER_CELL]
|
||||
for i in range(CELLS_PER_BLOB)]
|
||||
```
|
||||
|
||||
### Cell verification
|
||||
|
||||
#### `verify_cell_proof`
|
||||
|
||||
```python
|
||||
def verify_cell_proof(commitment: KZGCommitment,
|
||||
cell_id: int,
|
||||
cell: Cell,
|
||||
proof: KZGProof) -> bool:
|
||||
"""
|
||||
Check a cell proof
|
||||
|
||||
Public method.
|
||||
"""
|
||||
coset = coset_for_cell(cell_id)
|
||||
|
||||
return verify_kzg_proof_multi_impl(commitment, coset, cell, proof)
|
||||
```
|
||||
|
||||
#### `verify_cell_proof_batch`
|
||||
|
||||
```python
|
||||
def verify_cell_proof_batch(row_commitments: Sequence[KZGCommitment],
|
||||
row_ids: Sequence[int],
|
||||
column_ids: Sequence[int],
|
||||
cells: Sequence[Cell],
|
||||
proofs: Sequence[KZGProof]) -> bool:
|
||||
"""
|
||||
Check multiple cell proofs. This function implements the naive algorithm of checking every cell
|
||||
individually; an efficient algorithm can be found here:
|
||||
https://ethresear.ch/t/a-universal-verification-equation-for-data-availability-sampling/13240
|
||||
|
||||
This implementation does not require randomness, but for the algorithm that
|
||||
requires it, `RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN` should be used to compute
|
||||
the challenge value.
|
||||
|
||||
Public method.
|
||||
"""
|
||||
|
||||
# Get commitments via row IDs
|
||||
commitments = [row_commitments[row_id] for row_id in row_ids]
|
||||
|
||||
return all(
|
||||
verify_kzg_proof_multi_impl(commitment, coset_for_cell(column_id), cell, proof)
|
||||
for commitment, column_id, cell, proof in zip(commitments, column_ids, cells, proofs)
|
||||
)
|
||||
```
|
||||
|
||||
## Reconstruction
|
||||
|
||||
### `recover_polynomial`
|
||||
|
||||
```python
|
||||
def recover_polynomial(cell_ids: Sequence[CellID], cells: Sequence[Cell]) -> Polynomial:
|
||||
"""
|
||||
Recovers a polynomial from 2 * FIELD_ELEMENTS_PER_CELL evaluations, half of which can be missing.
|
||||
|
||||
This algorithm uses FFTs to recover cells faster than using Lagrange implementation. However,
|
||||
a faster version thanks to Qi Zhou can be found here:
|
||||
https://github.com/ethereum/research/blob/51b530a53bd4147d123ab3e390a9d08605c2cdb8/polynomial_reconstruction/polynomial_reconstruction_danksharding.py
|
||||
|
||||
Public method.
|
||||
"""
|
||||
assert len(cell_ids) == len(cells)
|
||||
assert len(cells) >= CELLS_PER_BLOB // 2
|
||||
missing_cell_ids = [cell_id for cell_id in range(CELLS_PER_BLOB) if cell_id not in cell_ids]
|
||||
roots_of_unity_reduced = compute_roots_of_unity(CELLS_PER_BLOB)
|
||||
short_zero_poly = vanishing_polynomialcoeff([
|
||||
roots_of_unity_reduced[reverse_bits(cell_id, CELLS_PER_BLOB)]
|
||||
for cell_id in missing_cell_ids
|
||||
])
|
||||
|
||||
full_zero_poly = []
|
||||
for i in short_zero_poly:
|
||||
full_zero_poly.append(i)
|
||||
full_zero_poly.extend([0] * (FIELD_ELEMENTS_PER_CELL - 1))
|
||||
full_zero_poly = full_zero_poly + [0] * (2 * FIELD_ELEMENTS_PER_BLOB - len(full_zero_poly))
|
||||
|
||||
zero_poly_eval = fft_field(full_zero_poly,
|
||||
compute_roots_of_unity(2 * FIELD_ELEMENTS_PER_BLOB))
|
||||
zero_poly_eval_brp = bit_reversal_permutation(zero_poly_eval)
|
||||
for cell_id in missing_cell_ids:
|
||||
start = cell_id * FIELD_ELEMENTS_PER_CELL
|
||||
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
|
||||
assert zero_poly_eval_brp[start:end] == [0] * FIELD_ELEMENTS_PER_CELL
|
||||
for cell_id in cell_ids:
|
||||
start = cell_id * FIELD_ELEMENTS_PER_CELL
|
||||
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
|
||||
assert all(a != 0 for a in zero_poly_eval_brp[start:end])
|
||||
|
||||
extended_evaluation_rbo = [0] * (FIELD_ELEMENTS_PER_BLOB * 2)
|
||||
for cell_id, cell in zip(cell_ids, cells):
|
||||
start = cell_id * FIELD_ELEMENTS_PER_CELL
|
||||
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
|
||||
extended_evaluation_rbo[start:end] = cell
|
||||
extended_evaluation = bit_reversal_permutation(extended_evaluation_rbo)
|
||||
|
||||
extended_evaluation_times_zero = [BLSFieldElement(int(a) * int(b) % BLS_MODULUS)
|
||||
for a, b in zip(zero_poly_eval, extended_evaluation)]
|
||||
|
||||
roots_of_unity_extended = compute_roots_of_unity(2 * FIELD_ELEMENTS_PER_BLOB)
|
||||
|
||||
extended_evaluations_fft = fft_field(extended_evaluation_times_zero, roots_of_unity_extended, inv=True)
|
||||
|
||||
shift_factor = BLSFieldElement(PRIMITIVE_ROOT_OF_UNITY)
|
||||
shift_inv = div(BLSFieldElement(1), shift_factor)
|
||||
|
||||
shifted_extended_evaluation = shift_polynomialcoeff(extended_evaluations_fft, shift_factor)
|
||||
shifted_zero_poly = shift_polynomialcoeff(full_zero_poly, shift_factor)
|
||||
|
||||
eval_shifted_extended_evaluation = fft_field(shifted_extended_evaluation, roots_of_unity_extended)
|
||||
eval_shifted_zero_poly = fft_field(shifted_zero_poly, roots_of_unity_extended)
|
||||
|
||||
eval_shifted_reconstructed_poly = [
|
||||
div(a, b)
|
||||
for a, b in zip(eval_shifted_extended_evaluation, eval_shifted_zero_poly)
|
||||
]
|
||||
|
||||
shifted_reconstructed_poly = fft_field(eval_shifted_reconstructed_poly, roots_of_unity_extended, inv=True)
|
||||
|
||||
reconstructed_poly = shift_polynomialcoeff(shifted_reconstructed_poly, shift_inv)
|
||||
|
||||
reconstructed_data = bit_reversal_permutation(fft_field(reconstructed_poly, roots_of_unity_extended))
|
||||
|
||||
for cell_id, cell in zip(cell_ids, cells):
|
||||
start = cell_id * FIELD_ELEMENTS_PER_CELL
|
||||
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
|
||||
assert reconstructed_data[start:end] == cell
|
||||
|
||||
return reconstructed_data
|
||||
```
|
|
@ -58,6 +58,7 @@ This document specifies basic polynomial operations and KZG polynomial commitmen
|
|||
| `BLS_MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) |
|
||||
| `PRIMITIVE_ROOT_OF_UNITY` | `7` | Primitive root of unity of the BLS12_381 (inner) BLS_MODULUS |
|
||||
|
||||
|
||||
### KZG Trusted setup
|
||||
|
||||
| Name | Value |
|
||||
|
@ -103,7 +104,7 @@ def reverse_bit_order(n: int, order: int) -> int:
|
|||
```python
|
||||
def list_to_reverse_bit_order(l: List[int]) -> List[int]:
|
||||
"""
|
||||
Convert a list between normal and reverse bit order. This operation is idempotent.
|
||||
Convert a list between normal and reverse bit order. The permutation is an involution (inverts itself)..
|
||||
"""
|
||||
return [l[reverse_bit_order(i, len(l))] for i in range(len(l))]
|
||||
```
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
|
||||
This document details the beacon chain additions and changes of to support the Whisk SSLE.
|
||||
|
||||
*Note:* This specification is built upon [Capella](../../capella/beacon-chain.md) and is under active development.
|
||||
*Note:* This specification is built upon [capella](../../capella/beacon-chain.md) and is under active development.
|
||||
|
||||
## Constants
|
||||
|
||||
|
@ -89,14 +89,14 @@ def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement:
|
|||
TODO: Deneb will introduces this helper too. Should delete it once it's rebased to post-Deneb.
|
||||
"""
|
||||
field_element = int.from_bytes(b, ENDIANNESS)
|
||||
assert field_element < BLS_MODULUS
|
||||
return BLSFieldElement(field_element)
|
||||
return BLSFieldElement(field_element % BLS_MODULUS)
|
||||
```
|
||||
|
||||
| Name | Value |
|
||||
| ------------------ | ------------------------------------------------------------------------------- |
|
||||
| `BLS_G1_GENERATOR` | `bls.G1_to_bytes48(bls.G1)` |
|
||||
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` |
|
||||
| Name | Value |
|
||||
| --------------------- | ------------------------------------------------------------------------------- |
|
||||
| `BLS_G1_GENERATOR` | `BLSG1Point('0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb') # noqa: E501` |
|
||||
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` |
|
||||
| `CURDLEPROOFS_CRS` | TBD |
|
||||
|
||||
### Curdleproofs and opening proofs
|
||||
|
||||
|
@ -105,14 +105,17 @@ Note that Curdleproofs (Whisk Shuffle Proofs), the tracker opening proofs and al
|
|||
```python
|
||||
def IsValidWhiskShuffleProof(pre_shuffle_trackers: Sequence[WhiskTracker],
|
||||
post_shuffle_trackers: Sequence[WhiskTracker],
|
||||
M: BLSG1Point,
|
||||
shuffle_proof: WhiskShuffleProof) -> bool:
|
||||
"""
|
||||
Verify `post_shuffle_trackers` is a permutation of `pre_shuffle_trackers`.
|
||||
Defined in https://github.com/nalinbhardwaj/curdleproofs.pie/blob/dev/curdleproofs/curdleproofs/whisk_interface.py.
|
||||
"""
|
||||
# pylint: disable=unused-argument
|
||||
return True
|
||||
return curdleproofs.IsValidWhiskShuffleProof(
|
||||
CURDLEPROOFS_CRS,
|
||||
pre_shuffle_trackers,
|
||||
post_shuffle_trackers,
|
||||
shuffle_proof,
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
|
@ -123,8 +126,7 @@ def IsValidWhiskOpeningProof(tracker: WhiskTracker,
|
|||
Verify knowledge of `k` such that `tracker.k_r_G == k * tracker.r_G` and `k_commitment == k * BLS_G1_GENERATOR`.
|
||||
Defined in https://github.com/nalinbhardwaj/curdleproofs.pie/blob/dev/curdleproofs/curdleproofs/whisk_interface.py.
|
||||
"""
|
||||
# pylint: disable=unused-argument
|
||||
return True
|
||||
return curdleproofs.IsValidWhiskOpeningProof(tracker, k_commitment, tracker_proof)
|
||||
```
|
||||
|
||||
## Epoch processing
|
||||
|
@ -282,7 +284,7 @@ def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
|
|||
#### `BeaconBlockBody`
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(capella.BeaconBlockBody):
|
||||
class BeaconBlockBody(Container):
|
||||
randao_reveal: BLSSignature
|
||||
eth1_data: Eth1Data # Eth1 data vote
|
||||
graffiti: Bytes32 # Arbitrary data
|
||||
|
@ -295,13 +297,11 @@ class BeaconBlockBody(capella.BeaconBlockBody):
|
|||
sync_aggregate: SyncAggregate
|
||||
# Execution
|
||||
execution_payload: ExecutionPayload
|
||||
# Capella operations
|
||||
bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
|
||||
# Whisk
|
||||
whisk_opening_proof: WhiskTrackerProof # [New in Whisk]
|
||||
whisk_post_shuffle_trackers: Vector[WhiskTracker, WHISK_VALIDATORS_PER_SHUFFLE] # [New in Whisk]
|
||||
whisk_shuffle_proof: WhiskShuffleProof # [New in Whisk]
|
||||
whisk_shuffle_proof_M_commitment: BLSG1Point # [New in Whisk]
|
||||
whisk_registration_proof: WhiskTrackerProof # [New in Whisk]
|
||||
whisk_tracker: WhiskTracker # [New in Whisk]
|
||||
whisk_k_commitment: BLSG1Point # k * BLS_G1_GENERATOR [New in Whisk]
|
||||
|
@ -328,7 +328,6 @@ def process_shuffled_trackers(state: BeaconState, body: BeaconBlockBody) -> None
|
|||
if shuffle_epoch + WHISK_PROPOSER_SELECTION_GAP + 1 >= WHISK_EPOCHS_PER_SHUFFLING_PHASE:
|
||||
# Require trackers set to zero during cooldown
|
||||
assert body.whisk_post_shuffle_trackers == Vector[WhiskTracker, WHISK_VALIDATORS_PER_SHUFFLE]()
|
||||
assert body.whisk_shuffle_proof_M_commitment == BLSG1Point()
|
||||
assert body.whisk_shuffle_proof == WhiskShuffleProof()
|
||||
else:
|
||||
# Require shuffled trackers during shuffle
|
||||
|
@ -337,7 +336,6 @@ def process_shuffled_trackers(state: BeaconState, body: BeaconBlockBody) -> None
|
|||
assert IsValidWhiskShuffleProof(
|
||||
pre_shuffle_trackers,
|
||||
body.whisk_post_shuffle_trackers,
|
||||
body.whisk_shuffle_proof_M_commitment,
|
||||
body.whisk_shuffle_proof,
|
||||
)
|
||||
# Shuffle candidate trackers
|
||||
|
@ -351,12 +349,9 @@ def is_k_commitment_unique(state: BeaconState, k_commitment: BLSG1Point) -> bool
|
|||
```
|
||||
|
||||
```python
|
||||
def process_whisk(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
process_shuffled_trackers(state, body)
|
||||
|
||||
# Overwrite all validator Whisk fields (first Whisk proposal) or just the permutation commitment (next proposals)
|
||||
proposer = state.validators[get_beacon_proposer_index(state)]
|
||||
if proposer.whisk_tracker.r_G == BLS_G1_GENERATOR: # first Whisk proposal
|
||||
def process_whisk_registration(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
if state.whisk_trackers[proposer_index].r_G == BLS_G1_GENERATOR: # first Whisk proposal
|
||||
assert body.whisk_tracker.r_G != BLS_G1_GENERATOR
|
||||
assert is_k_commitment_unique(state, body.whisk_k_commitment)
|
||||
assert IsValidWhiskOpeningProof(
|
||||
|
@ -364,26 +359,25 @@ def process_whisk(state: BeaconState, body: BeaconBlockBody) -> None:
|
|||
body.whisk_k_commitment,
|
||||
body.whisk_registration_proof,
|
||||
)
|
||||
proposer.whisk_tracker = body.whisk_tracker
|
||||
proposer.whisk_k_commitment = body.whisk_k_commitment
|
||||
state.whisk_trackers[proposer_index] = body.whisk_tracker
|
||||
state.whisk_k_commitments[proposer_index] = body.whisk_k_commitment
|
||||
else: # next Whisk proposals
|
||||
assert body.whisk_registration_proof == WhiskTrackerProof()
|
||||
assert body.whisk_tracker == WhiskTracker()
|
||||
assert body.whisk_k_commitment == BLSG1Point()
|
||||
assert body.whisk_shuffle_proof_M_commitment == BLSG1Point()
|
||||
```
|
||||
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_withdrawals(state, block.body.execution_payload)
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE)
|
||||
process_withdrawals(state, block.body.execution_payload)
|
||||
process_execution_payload(state, block.body, EXECUTION_ENGINE)
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
process_whisk(state, block.body) # [New in Whisk]
|
||||
process_shuffled_trackers(state, block.body) # [New in Whisk]
|
||||
process_whisk_registration(state, block.body) # [New in Whisk]
|
||||
```
|
||||
|
||||
### Deposits
|
||||
|
|
|
@ -75,7 +75,8 @@ def create_light_client_bootstrap(state: BeaconState,
|
|||
return LightClientBootstrap(
|
||||
header=block_to_light_client_header(block),
|
||||
current_sync_committee=state.current_sync_committee,
|
||||
current_sync_committee_branch=compute_merkle_proof(state, CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
current_sync_committee_branch=CurrentSyncCommitteeBranch(
|
||||
compute_merkle_proof(state, CURRENT_SYNC_COMMITTEE_GINDEX)),
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -122,7 +123,8 @@ def create_light_client_update(state: BeaconState,
|
|||
# `next_sync_committee` is only useful if the message is signed by the current sync committee
|
||||
if update_attested_period == update_signature_period:
|
||||
update.next_sync_committee = attested_state.next_sync_committee
|
||||
update.next_sync_committee_branch = compute_merkle_proof(attested_state, NEXT_SYNC_COMMITTEE_INDEX)
|
||||
update.next_sync_committee_branch = NextSyncCommitteeBranch(
|
||||
compute_merkle_proof(attested_state, NEXT_SYNC_COMMITTEE_GINDEX))
|
||||
|
||||
# Indicate finality whenever possible
|
||||
if finalized_block is not None:
|
||||
|
@ -131,7 +133,8 @@ def create_light_client_update(state: BeaconState,
|
|||
assert hash_tree_root(update.finalized_header.beacon) == attested_state.finalized_checkpoint.root
|
||||
else:
|
||||
assert attested_state.finalized_checkpoint.root == Bytes32()
|
||||
update.finality_branch = compute_merkle_proof(attested_state, FINALIZED_ROOT_INDEX)
|
||||
update.finality_branch = FinalityBranch(
|
||||
compute_merkle_proof(attested_state, FINALIZED_ROOT_GINDEX))
|
||||
|
||||
update.sync_aggregate = block.message.body.sync_aggregate
|
||||
update.signature_slot = block.message.slot
|
||||
|
@ -158,7 +161,7 @@ def create_light_client_finality_update(update: LightClientUpdate) -> LightClien
|
|||
)
|
||||
```
|
||||
|
||||
Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest `attested_header.beacon.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientFinalityUpdate` whenever `finalized_header` changes.
|
||||
Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest `attested_header.beacon.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientFinalityUpdate` whenever `finalized_header` changes. If that `LightClientFinalityUpdate` does not have supermajority (> 2/3) sync committee participation, a second `LightClientFinalityUpdate` SHOULD be delivered for the same `finalized_header` once supermajority participation is obtained.
|
||||
|
||||
### `create_light_client_optimistic_update`
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ New global topics are added to provide light clients with the latest updates.
|
|||
This topic is used to propagate the latest `LightClientFinalityUpdate` to light clients, allowing them to keep track of the latest `finalized_header`.
|
||||
|
||||
The following validations MUST pass before forwarding the `finality_update` on the network.
|
||||
- _[IGNORE]_ The `finalized_header.beacon.slot` is greater than that of all previously forwarded `finality_update`s
|
||||
- _[IGNORE]_ The `finalized_header.beacon.slot` is greater than that of all previously forwarded `finality_update`s, or it matches the highest previously forwarded slot and also has a `sync_aggregate` indicating supermajority (> 2/3) sync committee participation while the previously forwarded `finality_update` for that slot did not indicate supermajority
|
||||
- _[IGNORE]_ The `finality_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `finality_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
||||
|
||||
For full nodes, the following validations MUST additionally pass before forwarding the `finality_update` on the network.
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Preset](#preset)
|
||||
- [Misc](#misc)
|
||||
|
@ -56,13 +57,21 @@ Additional documents describe how the light client sync protocol can be used:
|
|||
- [Light client](./light-client.md)
|
||||
- [Networking](./p2p-interface.md)
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `FinalityBranch` | `Vector[Bytes32, floorlog2(FINALIZED_ROOT_GINDEX)]` | Merkle branch of `finalized_checkpoint.root` within `BeaconState` |
|
||||
| `CurrentSyncCommitteeBranch` | `Vector[Bytes32, floorlog2(CURRENT_SYNC_COMMITTEE_GINDEX)]` | Merkle branch of `current_sync_committee` within `BeaconState` |
|
||||
| `NextSyncCommitteeBranch` | `Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_GINDEX)]` | Merkle branch of `next_sync_committee` within `BeaconState` |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `FINALIZED_ROOT_INDEX` | `get_generalized_index(BeaconState, 'finalized_checkpoint', 'root')` (= 105) |
|
||||
| `CURRENT_SYNC_COMMITTEE_INDEX` | `get_generalized_index(BeaconState, 'current_sync_committee')` (= 54) |
|
||||
| `NEXT_SYNC_COMMITTEE_INDEX` | `get_generalized_index(BeaconState, 'next_sync_committee')` (= 55) |
|
||||
| `FINALIZED_ROOT_GINDEX` | `get_generalized_index(BeaconState, 'finalized_checkpoint', 'root')` (= 105) |
|
||||
| `CURRENT_SYNC_COMMITTEE_GINDEX` | `get_generalized_index(BeaconState, 'current_sync_committee')` (= 54) |
|
||||
| `NEXT_SYNC_COMMITTEE_GINDEX` | `get_generalized_index(BeaconState, 'next_sync_committee')` (= 55) |
|
||||
|
||||
## Preset
|
||||
|
||||
|
@ -93,7 +102,7 @@ class LightClientBootstrap(Container):
|
|||
header: LightClientHeader
|
||||
# Current sync committee corresponding to `header.beacon.state_root`
|
||||
current_sync_committee: SyncCommittee
|
||||
current_sync_committee_branch: Vector[Bytes32, floorlog2(CURRENT_SYNC_COMMITTEE_INDEX)]
|
||||
current_sync_committee_branch: CurrentSyncCommitteeBranch
|
||||
```
|
||||
|
||||
### `LightClientUpdate`
|
||||
|
@ -104,10 +113,10 @@ class LightClientUpdate(Container):
|
|||
attested_header: LightClientHeader
|
||||
# Next sync committee corresponding to `attested_header.beacon.state_root`
|
||||
next_sync_committee: SyncCommittee
|
||||
next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)]
|
||||
next_sync_committee_branch: NextSyncCommitteeBranch
|
||||
# Finalized header corresponding to `attested_header.beacon.state_root`
|
||||
finalized_header: LightClientHeader
|
||||
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
|
||||
finality_branch: FinalityBranch
|
||||
# Sync committee aggregate signature
|
||||
sync_aggregate: SyncAggregate
|
||||
# Slot at which the aggregate signature was created (untrusted)
|
||||
|
@ -122,7 +131,7 @@ class LightClientFinalityUpdate(Container):
|
|||
attested_header: LightClientHeader
|
||||
# Finalized header corresponding to `attested_header.beacon.state_root`
|
||||
finalized_header: LightClientHeader
|
||||
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
|
||||
finality_branch: FinalityBranch
|
||||
# Sync committee aggregate signature
|
||||
sync_aggregate: SyncAggregate
|
||||
# Slot at which the aggregate signature was created (untrusted)
|
||||
|
@ -174,14 +183,14 @@ def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
|||
|
||||
```python
|
||||
def is_sync_committee_update(update: LightClientUpdate) -> bool:
|
||||
return update.next_sync_committee_branch != [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
return update.next_sync_committee_branch != NextSyncCommitteeBranch()
|
||||
```
|
||||
|
||||
### `is_finality_update`
|
||||
|
||||
```python
|
||||
def is_finality_update(update: LightClientUpdate) -> bool:
|
||||
return update.finality_branch != [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
|
||||
return update.finality_branch != FinalityBranch()
|
||||
```
|
||||
|
||||
### `is_better_update`
|
||||
|
@ -286,8 +295,8 @@ def initialize_light_client_store(trusted_block_root: Root,
|
|||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(bootstrap.current_sync_committee),
|
||||
branch=bootstrap.current_sync_committee_branch,
|
||||
depth=floorlog2(CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
index=get_subtree_index(CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
depth=floorlog2(CURRENT_SYNC_COMMITTEE_GINDEX),
|
||||
index=get_subtree_index(CURRENT_SYNC_COMMITTEE_GINDEX),
|
||||
root=bootstrap.header.beacon.state_root,
|
||||
)
|
||||
|
||||
|
@ -358,8 +367,8 @@ def validate_light_client_update(store: LightClientStore,
|
|||
assert is_valid_merkle_branch(
|
||||
leaf=finalized_root,
|
||||
branch=update.finality_branch,
|
||||
depth=floorlog2(FINALIZED_ROOT_INDEX),
|
||||
index=get_subtree_index(FINALIZED_ROOT_INDEX),
|
||||
depth=floorlog2(FINALIZED_ROOT_GINDEX),
|
||||
index=get_subtree_index(FINALIZED_ROOT_GINDEX),
|
||||
root=update.attested_header.beacon.state_root,
|
||||
)
|
||||
|
||||
|
@ -373,8 +382,8 @@ def validate_light_client_update(store: LightClientStore,
|
|||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(update.next_sync_committee),
|
||||
branch=update.next_sync_committee_branch,
|
||||
depth=floorlog2(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
index=get_subtree_index(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
depth=floorlog2(NEXT_SYNC_COMMITTEE_GINDEX),
|
||||
index=get_subtree_index(NEXT_SYNC_COMMITTEE_GINDEX),
|
||||
root=update.attested_header.beacon.state_root,
|
||||
)
|
||||
|
||||
|
@ -493,7 +502,7 @@ def process_light_client_finality_update(store: LightClientStore,
|
|||
update = LightClientUpdate(
|
||||
attested_header=finality_update.attested_header,
|
||||
next_sync_committee=SyncCommittee(),
|
||||
next_sync_committee_branch=[Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))],
|
||||
next_sync_committee_branch=NextSyncCommitteeBranch(),
|
||||
finalized_header=finality_update.finalized_header,
|
||||
finality_branch=finality_update.finality_branch,
|
||||
sync_aggregate=finality_update.sync_aggregate,
|
||||
|
@ -512,9 +521,9 @@ def process_light_client_optimistic_update(store: LightClientStore,
|
|||
update = LightClientUpdate(
|
||||
attested_header=optimistic_update.attested_header,
|
||||
next_sync_committee=SyncCommittee(),
|
||||
next_sync_committee_branch=[Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))],
|
||||
next_sync_committee_branch=NextSyncCommitteeBranch(),
|
||||
finalized_header=LightClientHeader(),
|
||||
finality_branch=[Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))],
|
||||
finality_branch=FinalityBranch(),
|
||||
sync_aggregate=optimistic_update.sync_aggregate,
|
||||
signature_slot=optimistic_update.signature_slot,
|
||||
)
|
||||
|
|
|
@ -294,7 +294,7 @@ def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) ->
|
|||
|
||||
### Epoch processing
|
||||
|
||||
*Note*: The function `process_historical_summaries_update` replaces `process_historical_roots_update` in Bellatrix.
|
||||
*Note*: The function `process_historical_summaries_update` replaces `process_historical_roots_update` in Capella.
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
|
|
|
@ -46,14 +46,15 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
|||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
)
|
||||
execution_branch = compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
execution_branch = ExecutionBranch(
|
||||
compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX))
|
||||
else:
|
||||
# Note that during fork transitions, `finalized_header` may still point to earlier forks.
|
||||
# While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
|
||||
# it was not included in the corresponding light client data. To ensure compatibility
|
||||
# with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data.
|
||||
execution_header = ExecutionPayloadHeader()
|
||||
execution_branch = [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
execution_branch = ExecutionBranch()
|
||||
|
||||
return LightClientHeader(
|
||||
beacon=BeaconBlockHeader(
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Containers](#containers)
|
||||
- [Modified `LightClientHeader`](#modified-lightclientheader)
|
||||
|
@ -27,11 +28,17 @@ Additional documents describes the impact of the upgrade on certain roles:
|
|||
- [Full node](./full-node.md)
|
||||
- [Networking](./p2p-interface.md)
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `ExecutionBranch` | `Vector[Bytes32, floorlog2(EXECUTION_PAYLOAD_GINDEX)]` | Merkle branch of `execution_payload` within `BeaconBlockBody` |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `EXECUTION_PAYLOAD_INDEX` | `get_generalized_index(BeaconBlockBody, 'execution_payload')` (= 25) |
|
||||
| `EXECUTION_PAYLOAD_GINDEX` | `get_generalized_index(BeaconBlockBody, 'execution_payload')` (= 25) |
|
||||
|
||||
## Containers
|
||||
|
||||
|
@ -43,7 +50,7 @@ class LightClientHeader(Container):
|
|||
beacon: BeaconBlockHeader
|
||||
# Execution payload header corresponding to `beacon.body_root` (from Capella onward)
|
||||
execution: ExecutionPayloadHeader
|
||||
execution_branch: Vector[Bytes32, floorlog2(EXECUTION_PAYLOAD_INDEX)]
|
||||
execution_branch: ExecutionBranch
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
@ -69,14 +76,14 @@ def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
|||
if epoch < CAPELLA_FORK_EPOCH:
|
||||
return (
|
||||
header.execution == ExecutionPayloadHeader()
|
||||
and header.execution_branch == [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
and header.execution_branch == ExecutionBranch()
|
||||
)
|
||||
|
||||
return is_valid_merkle_branch(
|
||||
leaf=get_lc_execution_root(header),
|
||||
branch=header.execution_branch,
|
||||
depth=floorlog2(EXECUTION_PAYLOAD_INDEX),
|
||||
index=get_subtree_index(EXECUTION_PAYLOAD_INDEX),
|
||||
depth=floorlog2(EXECUTION_PAYLOAD_GINDEX),
|
||||
index=get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
|
||||
root=header.beacon.body_root,
|
||||
)
|
||||
```
|
||||
|
|
|
@ -52,14 +52,15 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
|||
execution_header.blob_gas_used = payload.blob_gas_used
|
||||
execution_header.excess_blob_gas = payload.excess_blob_gas
|
||||
|
||||
execution_branch = compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_INDEX)
|
||||
execution_branch = ExecutionBranch(
|
||||
compute_merkle_proof(block.message.body, EXECUTION_PAYLOAD_GINDEX))
|
||||
else:
|
||||
# Note that during fork transitions, `finalized_header` may still point to earlier forks.
|
||||
# While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`),
|
||||
# it was not included in the corresponding light client data. To ensure compatibility
|
||||
# with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data.
|
||||
execution_header = ExecutionPayloadHeader()
|
||||
execution_branch = [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
execution_branch = ExecutionBranch()
|
||||
|
||||
return LightClientHeader(
|
||||
beacon=BeaconBlockHeader(
|
||||
|
|
|
@ -74,14 +74,14 @@ def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
|||
if epoch < CAPELLA_FORK_EPOCH:
|
||||
return (
|
||||
header.execution == ExecutionPayloadHeader()
|
||||
and header.execution_branch == [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))]
|
||||
and header.execution_branch == ExecutionBranch()
|
||||
)
|
||||
|
||||
return is_valid_merkle_branch(
|
||||
leaf=get_lc_execution_root(header),
|
||||
branch=header.execution_branch,
|
||||
depth=floorlog2(EXECUTION_PAYLOAD_INDEX),
|
||||
index=get_subtree_index(EXECUTION_PAYLOAD_INDEX),
|
||||
depth=floorlog2(EXECUTION_PAYLOAD_GINDEX),
|
||||
index=get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
|
||||
root=header.beacon.body_root,
|
||||
)
|
||||
```
|
||||
|
|
|
@ -175,7 +175,7 @@ The following validations MUST pass before forwarding the `blob_sidecar` on the
|
|||
- _[REJECT]_ The sidecar's index is consistent with `MAX_BLOBS_PER_BLOCK` -- i.e. `blob_sidecar.index < MAX_BLOBS_PER_BLOCK`.
|
||||
- _[REJECT]_ The sidecar is for the correct subnet -- i.e. `compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id`.
|
||||
- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
|
||||
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
|
||||
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `block_header.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)`
|
||||
- _[REJECT]_ The proposer signature of `blob_sidecar.signed_block_header`, is valid with respect to the `block_header.proposer_index` pubkey.
|
||||
- _[IGNORE]_ The sidecar's block's parent (defined by `block_header.parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
|
||||
- _[REJECT]_ The sidecar's block's parent (defined by `block_header.parent_root`) passes validation.
|
||||
|
|
|
@ -78,7 +78,8 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
|
|||
| `BYTES_PER_BLOB` | `uint64(BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB)` | The number of bytes in a blob |
|
||||
| `G1_POINT_AT_INFINITY` | `Bytes48(b'\xc0' + b'\x00' * 47)` | Serialized form of the point at infinity on the G1 group |
|
||||
| `KZG_ENDIANNESS` | `'big'` | The endianness of the field elements including blobs |
|
||||
| `PRIMITIVE_ROOT_OF_UNITY` | `7` | Primitive root of unity of the BLS12_381 (inner) BLS_MODULUS |
|
||||
| `PRIMITIVE_ROOT_OF_UNITY` | `7` | The primitive root of unity from which all roots of unity should be derived |
|
||||
|
||||
|
||||
## Preset
|
||||
|
||||
|
@ -95,8 +96,9 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
|
|||
| Name | Value |
|
||||
| - | - |
|
||||
| `KZG_SETUP_G2_LENGTH` | `65` |
|
||||
| `KZG_SETUP_G2_MONOMIAL` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]` |
|
||||
| `KZG_SETUP_G1_MONOMIAL` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]` |
|
||||
| `KZG_SETUP_G1_LAGRANGE` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]` |
|
||||
| `KZG_SETUP_G2_MONOMIAL` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]` |
|
||||
|
||||
## Helper functions
|
||||
|
||||
|
@ -592,4 +594,3 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
|
|||
|
||||
return verify_kzg_proof_batch(commitments, evaluation_challenges, ys, proofs)
|
||||
```
|
||||
|
||||
|
|
|
@ -16,10 +16,12 @@
|
|||
- [`get_forkchoice_store`](#get_forkchoice_store)
|
||||
- [`get_slots_since_genesis`](#get_slots_since_genesis)
|
||||
- [`get_current_slot`](#get_current_slot)
|
||||
- [`get_current_store_epoch`](#get_current_store_epoch)
|
||||
- [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start)
|
||||
- [`get_ancestor`](#get_ancestor)
|
||||
- [`calculate_committee_fraction`](#calculate_committee_fraction)
|
||||
- [`get_checkpoint_block`](#get_checkpoint_block)
|
||||
- [`get_proposer_score`](#get_proposer_score)
|
||||
- [`get_weight`](#get_weight)
|
||||
- [`get_voting_source`](#get_voting_source)
|
||||
- [`filter_block_tree`](#filter_block_tree)
|
||||
|
@ -140,8 +142,7 @@ class Store(object):
|
|||
|
||||
```python
|
||||
def is_previous_epoch_justified(store: Store) -> bool:
|
||||
current_slot = get_current_slot(store)
|
||||
current_epoch = compute_epoch_at_slot(current_slot)
|
||||
current_epoch = get_current_store_epoch(store)
|
||||
return store.justified_checkpoint.epoch + 1 == current_epoch
|
||||
```
|
||||
|
||||
|
@ -190,6 +191,13 @@ def get_current_slot(store: Store) -> Slot:
|
|||
return Slot(GENESIS_SLOT + get_slots_since_genesis(store))
|
||||
```
|
||||
|
||||
#### `get_current_store_epoch`
|
||||
|
||||
```python
|
||||
def get_current_store_epoch(store: Store) -> Epoch:
|
||||
return compute_epoch_at_slot(get_current_slot(store))
|
||||
```
|
||||
|
||||
#### `compute_slots_since_epoch_start`
|
||||
|
||||
```python
|
||||
|
@ -226,6 +234,15 @@ def get_checkpoint_block(store: Store, root: Root, epoch: Epoch) -> Root:
|
|||
return get_ancestor(store, root, epoch_first_slot)
|
||||
```
|
||||
|
||||
#### `get_proposer_score`
|
||||
|
||||
```python
|
||||
def get_proposer_score(store: Store) -> Gwei:
|
||||
justified_checkpoint_state = store.checkpoint_states[store.justified_checkpoint]
|
||||
committee_weight = get_total_active_balance(justified_checkpoint_state) // SLOTS_PER_EPOCH
|
||||
return (committee_weight * PROPOSER_SCORE_BOOST) // 100
|
||||
```
|
||||
|
||||
#### `get_weight`
|
||||
|
||||
```python
|
||||
|
@ -249,7 +266,7 @@ def get_weight(store: Store, root: Root) -> Gwei:
|
|||
proposer_score = Gwei(0)
|
||||
# Boost is applied if ``root`` is an ancestor of ``proposer_boost_root``
|
||||
if get_ancestor(store, store.proposer_boost_root, store.blocks[root].slot) == root:
|
||||
proposer_score = calculate_committee_fraction(state, PROPOSER_SCORE_BOOST)
|
||||
proposer_score = get_proposer_score(store)
|
||||
return attestation_score + proposer_score
|
||||
```
|
||||
|
||||
|
@ -261,7 +278,7 @@ def get_voting_source(store: Store, block_root: Root) -> Checkpoint:
|
|||
Compute the voting source checkpoint in event that block with root ``block_root`` is the head block
|
||||
"""
|
||||
block = store.blocks[block_root]
|
||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
current_epoch = get_current_store_epoch(store)
|
||||
block_epoch = compute_epoch_at_slot(block.slot)
|
||||
if current_epoch > block_epoch:
|
||||
# The block is from a prior epoch, the voting source will be pulled-up
|
||||
|
@ -293,23 +310,17 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB
|
|||
return True
|
||||
return False
|
||||
|
||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
current_epoch = get_current_store_epoch(store)
|
||||
voting_source = get_voting_source(store, block_root)
|
||||
|
||||
# The voting source should be at the same height as the store's justified checkpoint
|
||||
# The voting source should be either at the same height as the store's justified checkpoint or
|
||||
# not more than two epochs ago
|
||||
correct_justified = (
|
||||
store.justified_checkpoint.epoch == GENESIS_EPOCH
|
||||
or voting_source.epoch == store.justified_checkpoint.epoch
|
||||
or voting_source.epoch + 2 >= current_epoch
|
||||
)
|
||||
|
||||
# If the previous epoch is justified, the block should be pulled-up. In this case, check that unrealized
|
||||
# justification is higher than the store and that the voting source is not more than two epochs ago
|
||||
if not correct_justified and is_previous_epoch_justified(store):
|
||||
correct_justified = (
|
||||
store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch and
|
||||
voting_source.epoch + 2 >= current_epoch
|
||||
)
|
||||
|
||||
finalized_checkpoint_block = get_checkpoint_block(
|
||||
store,
|
||||
block_root,
|
||||
|
@ -519,7 +530,7 @@ def compute_pulled_up_tip(store: Store, block_root: Root) -> None:
|
|||
|
||||
# If the block is from a prior epoch, apply the realized values
|
||||
block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot)
|
||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
current_epoch = get_current_store_epoch(store)
|
||||
if block_epoch < current_epoch:
|
||||
update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint)
|
||||
```
|
||||
|
@ -556,7 +567,7 @@ def validate_target_epoch_against_current_time(store: Store, attestation: Attest
|
|||
target = attestation.data.target
|
||||
|
||||
# Attestations must be from the current or previous epoch
|
||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
current_epoch = get_current_store_epoch(store)
|
||||
# Use GENESIS_EPOCH for previous when genesis to avoid underflow
|
||||
previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
|
||||
# If attestation target is from a future epoch, delay consideration until the epoch arrives
|
||||
|
@ -653,7 +664,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
block.parent_root,
|
||||
store.finalized_checkpoint.epoch,
|
||||
)
|
||||
assert store.finalized_checkpoint.root == finalized_checkpoint_block
|
||||
assert store.finalized_checkpoint.root == finalized_checkpoint_block
|
||||
|
||||
# Check the block is valid and compute the post-state
|
||||
state = pre_state.copy()
|
||||
|
|
|
@ -330,7 +330,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block`
|
|||
i.e. validate that `signed_beacon_block.message.slot <= current_slot`
|
||||
(a client MAY queue future blocks for processing at the appropriate slot).
|
||||
- _[IGNORE]_ The block is from a slot greater than the latest finalized slot --
|
||||
i.e. validate that `signed_beacon_block.message.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
|
||||
i.e. validate that `signed_beacon_block.message.slot > compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)`
|
||||
(a client MAY choose to validate and store such blocks for additional purposes -- e.g. slashing detection, archive nodes, etc).
|
||||
- _[IGNORE]_ The block is the first block with valid signature received for the proposer for the slot, `signed_beacon_block.message.slot`.
|
||||
- _[REJECT]_ The proposer signature, `signed_beacon_block.signature`, is valid with respect to the `proposer_index` pubkey.
|
||||
|
@ -355,17 +355,20 @@ to subscribing nodes (typically validators) to be included in future blocks.
|
|||
|
||||
The following validations MUST pass before forwarding the `signed_aggregate_and_proof` on the network.
|
||||
(We define the following for convenience -- `aggregate_and_proof = signed_aggregate_and_proof.message` and `aggregate = aggregate_and_proof.aggregate`)
|
||||
- _[REJECT]_ The committee index is within the expected range -- i.e. `aggregate.data.index < get_committee_count_per_slot(state, aggregate.data.target.epoch)`.
|
||||
- _[IGNORE]_ `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
||||
i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot`
|
||||
(a client MAY queue future aggregates for processing at the appropriate slot).
|
||||
- _[REJECT]_ The aggregate attestation's epoch matches its target -- i.e. `aggregate.data.target.epoch ==
|
||||
compute_epoch_at_slot(aggregate.data.slot)`
|
||||
- _[REJECT]_ The number of aggregation bits matches the committee size -- i.e.
|
||||
`len(aggregate.aggregation_bits) == len(get_beacon_committee(state, aggregate.data.slot, aggregate.data.index))`.
|
||||
- _[REJECT]_ The aggregate attestation has participants --
|
||||
that is, `len(get_attesting_indices(state, aggregate.data, aggregate.aggregation_bits)) >= 1`.
|
||||
- _[IGNORE]_ A valid aggregate attestation defined by `hash_tree_root(aggregate.data)` whose `aggregation_bits` is a non-strict superset has _not_ already been seen.
|
||||
(via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally).
|
||||
- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the aggregator
|
||||
with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`.
|
||||
- _[REJECT]_ The attestation has participants --
|
||||
that is, `len(get_attesting_indices(state, aggregate.data, aggregate.aggregation_bits)) >= 1`.
|
||||
- _[REJECT]_ `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot --
|
||||
i.e. `is_aggregator(state, aggregate.data.slot, aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
|
||||
- _[REJECT]_ The aggregator's validator index is within the committee --
|
||||
|
@ -378,6 +381,8 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
|
|||
(via both gossip and non-gossip sources)
|
||||
(a client MAY queue aggregates for processing once block is retrieved).
|
||||
- _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation.
|
||||
- _[REJECT]_ The aggregate attestation's target block is an ancestor of the block named in the LMD vote -- i.e.
|
||||
`get_checkpoint_block(store, aggregate.data.beacon_block_root, aggregate.data.target.epoch) == aggregate.data.target.root`
|
||||
- _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `aggregate.data.beacon_block_root` -- i.e.
|
||||
`get_checkpoint_block(store, aggregate.data.beacon_block_root, finalized_checkpoint.epoch)
|
||||
== store.finalized_checkpoint.root`
|
||||
|
@ -425,7 +430,7 @@ The `beacon_attestation_{subnet_id}` topics are used to propagate unaggregated a
|
|||
to the subnet `subnet_id` (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`.
|
||||
|
||||
The following validations MUST pass before forwarding the `attestation` on the subnet.
|
||||
- _[REJECT]_ The committee index is within the expected range -- i.e. `data.index < get_committee_count_per_slot(state, data.target.epoch)`.
|
||||
- _[REJECT]_ The committee index is within the expected range -- i.e. `attestation.data.index < get_committee_count_per_slot(state, attestation.data.target.epoch)`.
|
||||
- _[REJECT]_ The attestation is for the correct subnet --
|
||||
i.e. `compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index) == subnet_id`,
|
||||
where `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)`,
|
||||
|
@ -439,7 +444,7 @@ The following validations MUST pass before forwarding the `attestation` on the s
|
|||
- _[REJECT]_ The attestation is unaggregated --
|
||||
that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit]) == 1`, i.e. exactly 1 bit is set).
|
||||
- _[REJECT]_ The number of aggregation bits matches the committee size -- i.e.
|
||||
`len(attestation.aggregation_bits) == len(get_beacon_committee(state, data.slot, data.index))`.
|
||||
`len(attestation.aggregation_bits) == len(get_beacon_committee(state, attestation.data.slot, attestation.data.index))`.
|
||||
- _[IGNORE]_ There has been no other valid attestation seen on an attestation subnet
|
||||
that has an identical `attestation.data.target.epoch` and participating validator index.
|
||||
- _[REJECT]_ The signature of `attestation` is valid.
|
||||
|
@ -691,9 +696,9 @@ The fields are, as seen by the client at the time of sending the message:
|
|||
- `current_fork_version` is the fork version at the node's current epoch defined by the wall-clock time
|
||||
(not necessarily the epoch to which the node is sync)
|
||||
- `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root`
|
||||
- `finalized_root`: `state.finalized_checkpoint.root` for the state corresponding to the head block
|
||||
- `finalized_root`: `store.finalized_checkpoint.root` according to [fork choice](./fork-choice.md).
|
||||
(Note this defaults to `Root(b'\x00' * 32)` for the genesis finalized checkpoint).
|
||||
- `finalized_epoch`: `state.finalized_checkpoint.epoch` for the state corresponding to the head block.
|
||||
- `finalized_epoch`: `store.finalized_checkpoint.epoch` according to [fork choice](./fork-choice.md).
|
||||
- `head_root`: The `hash_tree_root` root of the current head block (`BeaconBlock`).
|
||||
- `head_slot`: The slot of the block corresponding to the `head_root`.
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
- [Basic types](#basic-types)
|
||||
- [Composite types](#composite-types)
|
||||
- [Variable-size and fixed-size](#variable-size-and-fixed-size)
|
||||
- [Byte](#byte)
|
||||
- [Aliases](#aliases)
|
||||
- [Default values](#default-values)
|
||||
- [`is_zero`](#is_zero)
|
||||
|
@ -25,6 +26,7 @@
|
|||
- [Merkleization](#merkleization)
|
||||
- [Summaries and expansions](#summaries-and-expansions)
|
||||
- [Implementations](#implementations)
|
||||
- [JSON mapping](#json-mapping)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
@ -41,6 +43,7 @@
|
|||
### Basic types
|
||||
|
||||
* `uintN`: `N`-bit unsigned integer (where `N in [8, 16, 32, 64, 128, 256]`)
|
||||
* `byte`: 8-bit opaque data container, equivalent in serialization and hashing to `uint8`
|
||||
* `boolean`: `True` or `False`
|
||||
|
||||
### Composite types
|
||||
|
@ -69,15 +72,20 @@
|
|||
|
||||
We recursively define "variable-size" types to be lists, unions, `Bitlist` and all types that contain a variable-size type. All other types are said to be "fixed-size".
|
||||
|
||||
### Byte
|
||||
|
||||
Although the SSZ serialization of `byte` is equivalent to that of `uint8`, the former is used for opaque data while the latter is intended as a number.
|
||||
|
||||
### Aliases
|
||||
|
||||
For convenience we alias:
|
||||
|
||||
* `bit` to `boolean`
|
||||
* `byte` to `uint8` (this is a basic type)
|
||||
* `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type)
|
||||
* `ByteList[N]` to `List[byte, N]`
|
||||
|
||||
Aliases are semantically equivalent to their underlying type and therefore share canonical representations both in SSZ and in related formats.
|
||||
|
||||
### Default values
|
||||
Assuming a helper function `default(type)` which returns the default value for `type`, we can recursively define the default value for all types.
|
||||
|
||||
|
@ -256,3 +264,33 @@ We similarly define "summary types" and "expansion types". For example, [`Beacon
|
|||
## Implementations
|
||||
|
||||
See https://github.com/ethereum/eth2.0-specs/issues/2138 for a list of current known implementations.
|
||||
|
||||
## JSON mapping
|
||||
|
||||
The canonical JSON mapping assigns to each SSZ type a corresponding JSON encoding, enabling an SSZ schema to also define the JSON encoding.
|
||||
|
||||
When decoding JSON data, all fields in the SSZ schema must be present with a value. Parsers may ignore additional JSON fields.
|
||||
|
||||
| SSZ | JSON | Example |
|
||||
| --- | --- | --- |
|
||||
| `uintN` | string | `"0"` |
|
||||
| `byte` | hex-byte-string | `"0x00"` |
|
||||
| `boolean` | bool | `false` |
|
||||
| `Container` | object | `{ "field": ... }` |
|
||||
| `Vector[type, N]` | array | `[element, ...]` |
|
||||
| `Vector[byte, N]` | hex-byte-string | `"0x1122"` |
|
||||
| `Bitvector[N]` | hex-byte-string | `"0x1122"` |
|
||||
| `List[type, N]` | array | `[element, ...]` |
|
||||
| `List[byte, N]` | hex-byte-string | `"0x1122"` |
|
||||
| `Bitlist[N]` | hex-byte-string | `"0x1122"` |
|
||||
| `Union[type_0, type_1, ...]` | selector-object | `{ "selector": number, "data": type_N }` |
|
||||
|
||||
Integers are encoded as strings to avoid loss of precision in 64-bit values.
|
||||
|
||||
Aliases are encoded as their underlying type.
|
||||
|
||||
`hex-byte-string` is a `0x`-prefixed hex encoding of byte data, as it would appear in an SSZ stream.
|
||||
|
||||
`List` and `Vector` of `byte` (and aliases thereof) are encoded as `hex-byte-string`. `Bitlist` and `Bitvector` similarly map their SSZ-byte encodings to a `hex-byte-string`.
|
||||
|
||||
`Union` is encoded as an object with a `selector` and `data` field, where the contents of `data` change according to the selector.
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.4.0-beta.5
|
||||
1.4.0-beta.6
|
||||
|
|
|
@ -10,17 +10,17 @@ from eth2spec.test.context import (
|
|||
@spec_state_test
|
||||
def test_current_sync_committee_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
current_sync_committee_branch = spec.compute_merkle_proof(state, spec.CURRENT_SYNC_COMMITTEE_INDEX)
|
||||
current_sync_committee_branch = spec.compute_merkle_proof(state, spec.CURRENT_SYNC_COMMITTEE_GINDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.current_sync_committee.hash_tree_root().hex(),
|
||||
"leaf_index": spec.CURRENT_SYNC_COMMITTEE_INDEX,
|
||||
"leaf_index": spec.CURRENT_SYNC_COMMITTEE_GINDEX,
|
||||
"branch": ['0x' + root.hex() for root in current_sync_committee_branch]
|
||||
}
|
||||
assert spec.is_valid_merkle_branch(
|
||||
leaf=state.current_sync_committee.hash_tree_root(),
|
||||
branch=current_sync_committee_branch,
|
||||
depth=spec.floorlog2(spec.CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
index=spec.get_subtree_index(spec.CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
depth=spec.floorlog2(spec.CURRENT_SYNC_COMMITTEE_GINDEX),
|
||||
index=spec.get_subtree_index(spec.CURRENT_SYNC_COMMITTEE_GINDEX),
|
||||
root=state.hash_tree_root(),
|
||||
)
|
||||
|
||||
|
@ -30,17 +30,17 @@ def test_current_sync_committee_merkle_proof(spec, state):
|
|||
@spec_state_test
|
||||
def test_next_sync_committee_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
next_sync_committee_branch = spec.compute_merkle_proof(state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
next_sync_committee_branch = spec.compute_merkle_proof(state, spec.NEXT_SYNC_COMMITTEE_GINDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.next_sync_committee.hash_tree_root().hex(),
|
||||
"leaf_index": spec.NEXT_SYNC_COMMITTEE_INDEX,
|
||||
"leaf_index": spec.NEXT_SYNC_COMMITTEE_GINDEX,
|
||||
"branch": ['0x' + root.hex() for root in next_sync_committee_branch]
|
||||
}
|
||||
assert spec.is_valid_merkle_branch(
|
||||
leaf=state.next_sync_committee.hash_tree_root(),
|
||||
branch=next_sync_committee_branch,
|
||||
depth=spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX),
|
||||
index=spec.get_subtree_index(spec.NEXT_SYNC_COMMITTEE_INDEX),
|
||||
depth=spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_GINDEX),
|
||||
index=spec.get_subtree_index(spec.NEXT_SYNC_COMMITTEE_GINDEX),
|
||||
root=state.hash_tree_root(),
|
||||
)
|
||||
|
||||
|
@ -50,17 +50,17 @@ def test_next_sync_committee_merkle_proof(spec, state):
|
|||
@spec_state_test
|
||||
def test_finality_root_merkle_proof(spec, state):
|
||||
yield "object", state
|
||||
finality_branch = spec.compute_merkle_proof(state, spec.FINALIZED_ROOT_INDEX)
|
||||
finality_branch = spec.compute_merkle_proof(state, spec.FINALIZED_ROOT_GINDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.finalized_checkpoint.root.hex(),
|
||||
"leaf_index": spec.FINALIZED_ROOT_INDEX,
|
||||
"leaf_index": spec.FINALIZED_ROOT_GINDEX,
|
||||
"branch": ['0x' + root.hex() for root in finality_branch]
|
||||
}
|
||||
|
||||
assert spec.is_valid_merkle_branch(
|
||||
leaf=state.finalized_checkpoint.root,
|
||||
branch=finality_branch,
|
||||
depth=spec.floorlog2(spec.FINALIZED_ROOT_INDEX),
|
||||
index=spec.get_subtree_index(spec.FINALIZED_ROOT_INDEX),
|
||||
depth=spec.floorlog2(spec.FINALIZED_ROOT_GINDEX),
|
||||
index=spec.get_subtree_index(spec.FINALIZED_ROOT_GINDEX),
|
||||
root=state.hash_tree_root(),
|
||||
)
|
||||
|
|
|
@ -16,19 +16,22 @@ from eth2spec.test.helpers.attestations import (
|
|||
state_transition_with_full_block,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
MINIMAL,
|
||||
ALL_PHASES,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_transition import (
|
||||
do_fork,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import (
|
||||
get_spec_for_fork_version,
|
||||
is_post_capella, is_post_deneb,
|
||||
is_post_fork,
|
||||
)
|
||||
from eth2spec.test.helpers.light_client import (
|
||||
compute_start_slot_at_next_sync_committee_period,
|
||||
get_sync_aggregate,
|
||||
upgrade_lc_bootstrap_to_new_spec,
|
||||
upgrade_lc_update_to_new_spec,
|
||||
upgrade_lc_store_to_new_spec,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slots,
|
||||
|
@ -36,107 +39,6 @@ from eth2spec.test.helpers.state import (
|
|||
)
|
||||
|
||||
|
||||
def get_spec_for_fork_version(spec, fork_version, phases):
|
||||
if phases is None:
|
||||
return spec
|
||||
for fork in [fork for fork in ALL_PHASES if is_post_fork(spec.fork, fork)]:
|
||||
if fork == PHASE0:
|
||||
fork_version_field = 'GENESIS_FORK_VERSION'
|
||||
else:
|
||||
fork_version_field = fork.upper() + '_FORK_VERSION'
|
||||
if fork_version == getattr(spec.config, fork_version_field):
|
||||
return phases[fork]
|
||||
raise ValueError("Unknown fork version %s" % fork_version)
|
||||
|
||||
|
||||
def needs_upgrade_to_capella(d_spec, s_spec):
|
||||
return is_post_capella(s_spec) and not is_post_capella(d_spec)
|
||||
|
||||
|
||||
def needs_upgrade_to_deneb(d_spec, s_spec):
|
||||
return is_post_deneb(s_spec) and not is_post_deneb(d_spec)
|
||||
|
||||
|
||||
def check_lc_header_equal(d_spec, s_spec, data, upgraded):
|
||||
assert upgraded.beacon.slot == data.beacon.slot
|
||||
assert upgraded.beacon.hash_tree_root() == data.beacon.hash_tree_root()
|
||||
if is_post_capella(s_spec):
|
||||
if is_post_capella(d_spec):
|
||||
assert s_spec.get_lc_execution_root(upgraded) == d_spec.get_lc_execution_root(data)
|
||||
else:
|
||||
assert s_spec.get_lc_execution_root(upgraded) == s_spec.Root()
|
||||
|
||||
|
||||
def check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded):
|
||||
check_lc_header_equal(d_spec, s_spec, data.header, upgraded.header)
|
||||
assert upgraded.current_sync_committee == data.current_sync_committee
|
||||
assert upgraded.current_sync_committee_branch == data.current_sync_committee_branch
|
||||
|
||||
|
||||
def upgrade_lc_bootstrap_to_store(d_spec, s_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_bootstrap_to_capella(upgraded)
|
||||
check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_bootstrap_to_deneb(upgraded)
|
||||
check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
def check_lc_update_equal(d_spec, s_spec, data, upgraded):
|
||||
check_lc_header_equal(d_spec, s_spec, data.attested_header, upgraded.attested_header)
|
||||
assert upgraded.next_sync_committee == data.next_sync_committee
|
||||
assert upgraded.next_sync_committee_branch == data.next_sync_committee_branch
|
||||
check_lc_header_equal(d_spec, s_spec, data.finalized_header, upgraded.finalized_header)
|
||||
assert upgraded.sync_aggregate == data.sync_aggregate
|
||||
assert upgraded.signature_slot == data.signature_slot
|
||||
|
||||
|
||||
def upgrade_lc_update_to_store(d_spec, s_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_update_to_capella(upgraded)
|
||||
check_lc_update_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_update_to_deneb(upgraded)
|
||||
check_lc_update_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
def check_lc_store_equal(d_spec, s_spec, data, upgraded):
|
||||
check_lc_header_equal(d_spec, s_spec, data.finalized_header, upgraded.finalized_header)
|
||||
assert upgraded.current_sync_committee == data.current_sync_committee
|
||||
assert upgraded.next_sync_committee == data.next_sync_committee
|
||||
if upgraded.best_valid_update is None:
|
||||
assert data.best_valid_update is None
|
||||
else:
|
||||
check_lc_update_equal(d_spec, s_spec, data.best_valid_update, upgraded.best_valid_update)
|
||||
check_lc_header_equal(d_spec, s_spec, data.optimistic_header, upgraded.optimistic_header)
|
||||
assert upgraded.previous_max_active_participants == data.previous_max_active_participants
|
||||
assert upgraded.current_max_active_participants == data.current_max_active_participants
|
||||
|
||||
|
||||
def upgrade_lc_store_to_new_spec(d_spec, s_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_store_to_capella(upgraded)
|
||||
check_lc_store_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(d_spec, s_spec):
|
||||
upgraded = s_spec.upgrade_lc_store_to_deneb(upgraded)
|
||||
check_lc_store_equal(d_spec, s_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
class LightClientSyncTest(object):
|
||||
steps: List[Dict[str, Any]]
|
||||
genesis_validators_root: Any
|
||||
|
@ -175,7 +77,7 @@ def setup_test(spec, state, s_spec=None, phases=None):
|
|||
yield "bootstrap_fork_digest", "meta", encode_hex(data_fork_digest)
|
||||
yield "bootstrap", data
|
||||
|
||||
upgraded = upgrade_lc_bootstrap_to_store(d_spec, test.s_spec, data)
|
||||
upgraded = upgrade_lc_bootstrap_to_new_spec(d_spec, test.s_spec, data)
|
||||
test.store = test.s_spec.initialize_light_client_store(trusted_block_root, upgraded)
|
||||
store_fork_version = get_store_fork_version(test.s_spec)
|
||||
store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root)
|
||||
|
@ -248,10 +150,10 @@ def emit_update(test, spec, state, block, attested_state, attested_block, finali
|
|||
if not with_next:
|
||||
data.next_sync_committee = spec.SyncCommittee()
|
||||
data.next_sync_committee_branch = \
|
||||
[spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
[spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_GINDEX))]
|
||||
current_slot = state.slot
|
||||
|
||||
upgraded = upgrade_lc_update_to_store(d_spec, test.s_spec, data)
|
||||
upgraded = upgrade_lc_update_to_new_spec(d_spec, test.s_spec, data)
|
||||
test.s_spec.process_light_client_update(test.store, upgraded, current_slot, test.genesis_validators_root)
|
||||
|
||||
yield get_update_file_name(d_spec, data), data
|
||||
|
@ -281,15 +183,6 @@ def emit_upgrade_store(test, new_s_spec, phases=None):
|
|||
})
|
||||
|
||||
|
||||
def compute_start_slot_at_sync_committee_period(spec, sync_committee_period):
|
||||
return spec.compute_start_slot_at_epoch(sync_committee_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
|
||||
|
||||
def compute_start_slot_at_next_sync_committee_period(spec, state):
|
||||
sync_committee_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
return compute_start_slot_at_sync_committee_period(spec, sync_committee_period + 1)
|
||||
|
||||
|
||||
@with_light_client
|
||||
@spec_state_test_with_matching_config
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
|
@ -628,7 +521,7 @@ def run_test_single_fork(spec, phases, state, fork):
|
|||
finalized_state = state.copy()
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
|
@ -641,7 +534,7 @@ def run_test_single_fork(spec, phases, state, fork):
|
|||
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch) - 4)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(
|
||||
test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
|
@ -657,7 +550,7 @@ def run_test_single_fork(spec, phases, state, fork):
|
|||
# Final slot before fork, check that importing the pre-fork format still works
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
|
@ -668,7 +561,7 @@ def run_test_single_fork(spec, phases, state, fork):
|
|||
# Upgrade to post-fork spec, attested block is still before the fork
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(phases[fork], state)
|
||||
sync_aggregate, _ = get_sync_aggregate(phases[fork], state, phases=phases)
|
||||
state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate)
|
||||
spec = phases[fork]
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
|
@ -680,7 +573,7 @@ def run_test_single_fork(spec, phases, state, fork):
|
|||
# Another block after the fork, this time attested block is after the fork
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
|
@ -692,7 +585,7 @@ def run_test_single_fork(spec, phases, state, fork):
|
|||
transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_epoch + 1) - 2)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
|
@ -706,7 +599,7 @@ def run_test_single_fork(spec, phases, state, fork):
|
|||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state, phases=phases)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases)
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
|
|
|
@ -418,3 +418,37 @@ def test_transition_with_no_attestations_until_after_fork(state, fork_epoch, spe
|
|||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
||||
|
||||
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_non_empty_historical_roots(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Test with non-empty pre-state `state.historical_roots`.
|
||||
|
||||
Since Capella froze `historical_roots`, Capella spec doesn't invoke `process_historical_roots_update` anymore.
|
||||
Therefore, we need to fill in `historical_roots` with non-empty value.
|
||||
"""
|
||||
# fill in historical_roots with non-empty values
|
||||
pre_historical_roots = [b'\x56' * 32]
|
||||
state.historical_roots = pre_historical_roots
|
||||
|
||||
transition_until_fork(spec, state, fork_epoch)
|
||||
# check pre state
|
||||
assert spec.get_current_epoch(state) < fork_epoch
|
||||
assert len(state.historical_roots) > 0
|
||||
|
||||
yield "pre", state
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
blocks = []
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
transition_to_next_epoch_and_append_blocks(post_spec, state, post_tag, blocks, only_last_block=True)
|
||||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
||||
|
||||
assert len(state.historical_roots) > 0
|
||||
assert state.historical_roots == pre_historical_roots
|
||||
|
|
|
@ -15,16 +15,16 @@ def test_execution_merkle_proof(spec, state):
|
|||
block = state_transition_with_full_block(spec, state, True, False)
|
||||
|
||||
yield "object", block.message.body
|
||||
execution_branch = spec.compute_merkle_proof(block.message.body, spec.EXECUTION_PAYLOAD_INDEX)
|
||||
execution_branch = spec.compute_merkle_proof(block.message.body, spec.EXECUTION_PAYLOAD_GINDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + block.message.body.execution_payload.hash_tree_root().hex(),
|
||||
"leaf_index": spec.EXECUTION_PAYLOAD_INDEX,
|
||||
"leaf_index": spec.EXECUTION_PAYLOAD_GINDEX,
|
||||
"branch": ['0x' + root.hex() for root in execution_branch]
|
||||
}
|
||||
assert spec.is_valid_merkle_branch(
|
||||
leaf=block.message.body.execution_payload.hash_tree_root(),
|
||||
branch=execution_branch,
|
||||
depth=spec.floorlog2(spec.EXECUTION_PAYLOAD_INDEX),
|
||||
index=spec.get_subtree_index(spec.EXECUTION_PAYLOAD_INDEX),
|
||||
depth=spec.floorlog2(spec.EXECUTION_PAYLOAD_GINDEX),
|
||||
index=spec.get_subtree_index(spec.EXECUTION_PAYLOAD_GINDEX),
|
||||
root=block.message.body.hash_tree_root(),
|
||||
)
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from eth2spec.test import context
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALL_PHASES,
|
||||
ALL_PHASES, ALLOWED_TEST_RUNNER_FORKS
|
||||
)
|
||||
from eth2spec.utils import bls as bls_utils
|
||||
|
||||
|
@ -44,7 +44,7 @@ def pytest_addoption(parser):
|
|||
help="bls-default: make tests that are not dependent on BLS run without BLS"
|
||||
)
|
||||
parser.addoption(
|
||||
"--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro", "arkworks", "fastest"],
|
||||
"--bls-type", action="store", type=str, default="fastest", choices=["py_ecc", "milagro", "arkworks", "fastest"],
|
||||
help=(
|
||||
"bls-type: use specified BLS implementation;"
|
||||
"fastest: use milagro for signatures and arkworks for everything else (e.g. KZG)"
|
||||
|
@ -54,10 +54,10 @@ def pytest_addoption(parser):
|
|||
|
||||
def _validate_fork_name(forks):
|
||||
for fork in forks:
|
||||
if fork not in set(ALL_PHASES):
|
||||
if fork not in set(ALLOWED_TEST_RUNNER_FORKS):
|
||||
raise ValueError(
|
||||
f'The given --fork argument "{fork}" is not an available fork.'
|
||||
f' The available forks: {ALL_PHASES}'
|
||||
f' The available forks: {ALLOWED_TEST_RUNNER_FORKS}'
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -3,34 +3,35 @@ from copy import deepcopy
|
|||
from dataclasses import dataclass
|
||||
import importlib
|
||||
|
||||
from eth2spec.phase0 import mainnet as spec_phase0_mainnet, minimal as spec_phase0_minimal
|
||||
from eth2spec.altair import mainnet as spec_altair_mainnet, minimal as spec_altair_minimal
|
||||
from eth2spec.bellatrix import mainnet as spec_bellatrix_mainnet, minimal as spec_bellatrix_minimal
|
||||
from eth2spec.capella import mainnet as spec_capella_mainnet, minimal as spec_capella_minimal
|
||||
from eth2spec.deneb import mainnet as spec_deneb_mainnet, minimal as spec_deneb_minimal
|
||||
from eth2spec.eip6110 import mainnet as spec_eip6110_mainnet, minimal as spec_eip6110_minimal
|
||||
from eth2spec.eip7002 import mainnet as spec_eip7002_mainnet, minimal as spec_eip7002_minimal
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from .exceptions import SkippedTest
|
||||
from .helpers.constants import (
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
EIP6110, EIP7002,
|
||||
MINIMAL, MAINNET,
|
||||
EIP6110, EIP7002, EIP7594,
|
||||
WHISK,
|
||||
MINIMAL,
|
||||
ALL_PHASES,
|
||||
ALL_FORK_UPGRADES,
|
||||
POST_FORK_OF,
|
||||
ALLOWED_TEST_RUNNER_FORKS,
|
||||
LIGHT_CLIENT_TESTING_FORKS,
|
||||
)
|
||||
from .helpers.forks import is_post_fork
|
||||
from .helpers.typing import SpecForkName, PresetBaseName
|
||||
from .helpers.genesis import create_genesis_state
|
||||
from .helpers.typing import (
|
||||
Spec,
|
||||
SpecForks,
|
||||
)
|
||||
from .helpers.specs import (
|
||||
spec_targets,
|
||||
)
|
||||
from .utils import (
|
||||
vector_test,
|
||||
with_meta_tags,
|
||||
)
|
||||
|
||||
from random import Random
|
||||
from typing import Any, Callable, Sequence, TypedDict, Protocol, Dict
|
||||
from typing import Any, Callable, Sequence, Dict
|
||||
|
||||
from lru import LRU
|
||||
|
||||
|
@ -41,34 +42,6 @@ DEFAULT_TEST_PRESET = MINIMAL
|
|||
DEFAULT_PYTEST_FORKS = ALL_PHASES
|
||||
|
||||
|
||||
# TODO: currently phases are defined as python modules.
|
||||
# It would be better if they would be more well-defined interfaces for stronger typing.
|
||||
|
||||
class Configuration(Protocol):
|
||||
PRESET_BASE: str
|
||||
|
||||
|
||||
class Spec(Protocol):
|
||||
fork: str
|
||||
config: Configuration
|
||||
|
||||
|
||||
class SpecPhase0(Spec):
|
||||
...
|
||||
|
||||
|
||||
class SpecAltair(Spec):
|
||||
...
|
||||
|
||||
|
||||
class SpecBellatrix(Spec):
|
||||
...
|
||||
|
||||
|
||||
class SpecCapella(Spec):
|
||||
...
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ForkMeta:
|
||||
pre_fork_name: str
|
||||
|
@ -76,35 +49,6 @@ class ForkMeta:
|
|||
fork_epoch: int
|
||||
|
||||
|
||||
spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
|
||||
MINIMAL: {
|
||||
PHASE0: spec_phase0_minimal,
|
||||
ALTAIR: spec_altair_minimal,
|
||||
BELLATRIX: spec_bellatrix_minimal,
|
||||
CAPELLA: spec_capella_minimal,
|
||||
DENEB: spec_deneb_minimal,
|
||||
EIP6110: spec_eip6110_minimal,
|
||||
EIP7002: spec_eip7002_minimal,
|
||||
},
|
||||
MAINNET: {
|
||||
PHASE0: spec_phase0_mainnet,
|
||||
ALTAIR: spec_altair_mainnet,
|
||||
BELLATRIX: spec_bellatrix_mainnet,
|
||||
CAPELLA: spec_capella_mainnet,
|
||||
DENEB: spec_deneb_mainnet,
|
||||
EIP6110: spec_eip6110_mainnet,
|
||||
EIP7002: spec_eip7002_mainnet,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
class SpecForks(TypedDict, total=False):
|
||||
PHASE0: SpecPhase0
|
||||
ALTAIR: SpecAltair
|
||||
BELLATRIX: SpecBellatrix
|
||||
CAPELLA: SpecCapella
|
||||
|
||||
|
||||
def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Callable[[Any], int],
|
||||
spec: Spec, phases: SpecForks):
|
||||
balances = balances_fn(spec)
|
||||
|
@ -434,12 +378,12 @@ def with_all_phases(fn):
|
|||
return with_phases(ALL_PHASES)(fn)
|
||||
|
||||
|
||||
def with_all_phases_from(earliest_phase):
|
||||
def with_all_phases_from(earliest_phase, all_phases=ALL_PHASES):
|
||||
"""
|
||||
A decorator factory for running a tests with every phase except the ones listed
|
||||
"""
|
||||
def decorator(fn):
|
||||
return with_phases([phase for phase in ALL_PHASES if is_post_fork(phase, earliest_phase)])(fn)
|
||||
return with_phases([phase for phase in all_phases if is_post_fork(phase, earliest_phase)])(fn)
|
||||
return decorator
|
||||
|
||||
|
||||
|
@ -525,7 +469,7 @@ def with_phases(phases, other_phases=None):
|
|||
# When running test generator, it sets specific `phase`
|
||||
phase = kw['phase']
|
||||
_phases = [phase]
|
||||
_other_phases = [ALL_FORK_UPGRADES[phase]]
|
||||
_other_phases = [POST_FORK_OF[phase]]
|
||||
ret = _run_test_case_with_phases(fn, _phases, _other_phases, kw, args, is_fork_transition=True)
|
||||
else:
|
||||
# When running pytest, go through `fork_metas` instead of using `phases`
|
||||
|
@ -565,6 +509,8 @@ with_capella_and_later = with_all_phases_from(CAPELLA)
|
|||
with_deneb_and_later = with_all_phases_from(DENEB)
|
||||
with_eip6110_and_later = with_all_phases_from(EIP6110)
|
||||
with_eip7002_and_later = with_all_phases_from(EIP7002)
|
||||
with_whisk_and_later = with_all_phases_from(WHISK, all_phases=ALLOWED_TEST_RUNNER_FORKS)
|
||||
with_eip7594_and_later = with_all_phases_from(EIP7594, all_phases=ALLOWED_TEST_RUNNER_FORKS)
|
||||
|
||||
|
||||
class quoted_str(str):
|
||||
|
|
|
@ -0,0 +1,97 @@
|
|||
import random
|
||||
from eth2spec.test.context import (
|
||||
spec_test,
|
||||
single_phase,
|
||||
with_eip7594_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_blob,
|
||||
)
|
||||
from eth2spec.utils.bls import BLS_MODULUS
|
||||
|
||||
|
||||
@with_eip7594_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_fft(spec):
|
||||
rng = random.Random(5566)
|
||||
|
||||
roots_of_unity = spec.compute_roots_of_unity(spec.FIELD_ELEMENTS_PER_BLOB)
|
||||
|
||||
poly_coeff = [rng.randint(0, BLS_MODULUS - 1) for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)]
|
||||
|
||||
poly_eval = spec.fft_field(poly_coeff, roots_of_unity)
|
||||
poly_coeff_inversed = spec.fft_field(poly_eval, roots_of_unity, inv=True)
|
||||
|
||||
assert len(poly_eval) == len(poly_coeff) == len(poly_coeff_inversed)
|
||||
assert poly_coeff_inversed == poly_coeff
|
||||
|
||||
|
||||
@with_eip7594_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_verify_cell_proof(spec):
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
cells, proofs = spec.compute_cells_and_proofs(blob)
|
||||
cell_id = 0
|
||||
assert spec.verify_cell_proof(commitment, cell_id, cells[cell_id], proofs[cell_id])
|
||||
cell_id = 1
|
||||
assert spec.verify_cell_proof(commitment, cell_id, cells[cell_id], proofs[cell_id])
|
||||
|
||||
|
||||
@with_eip7594_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_verify_cell_proof_batch(spec):
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
cells, proofs = spec.compute_cells_and_proofs(blob)
|
||||
|
||||
assert spec.verify_cell_proof_batch(
|
||||
row_commitments=[commitment],
|
||||
row_ids=[0],
|
||||
column_ids=[0, 1],
|
||||
cells=cells[0:1],
|
||||
proofs=proofs,
|
||||
)
|
||||
|
||||
|
||||
@with_eip7594_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_recover_polynomial(spec):
|
||||
rng = random.Random(5566)
|
||||
|
||||
# Number of samples we will be recovering from
|
||||
N_SAMPLES = spec.CELLS_PER_BLOB // 2
|
||||
|
||||
# Get the data we will be working with
|
||||
blob = get_sample_blob(spec)
|
||||
# Get the data in evaluation form
|
||||
original_polynomial = spec.blob_to_polynomial(blob)
|
||||
|
||||
# Extend data with Reed-Solomon and split the extended data in cells
|
||||
cells = spec.compute_cells(blob)
|
||||
|
||||
# Compute the cells we will be recovering from
|
||||
cell_ids = []
|
||||
known_cells = []
|
||||
# First figure out just the indices of the cells
|
||||
for i in range(N_SAMPLES):
|
||||
j = rng.randint(0, spec.CELLS_PER_BLOB)
|
||||
while j in cell_ids:
|
||||
j = rng.randint(0, spec.CELLS_PER_BLOB)
|
||||
cell_ids.append(j)
|
||||
# Now the cells themselves
|
||||
known_cells = [cells[cell_id] for cell_id in cell_ids]
|
||||
|
||||
# Recover the data
|
||||
recovered_data = spec.recover_polynomial(cell_ids, known_cells)
|
||||
|
||||
# Check that the original data match the non-extended portion of the recovered data
|
||||
assert original_polynomial == recovered_data[:len(recovered_data) // 2]
|
||||
|
||||
# Now flatten the cells and check that they match the entirety of the recovered data
|
||||
flattened_cells = [x for xs in cells for x in xs]
|
||||
assert flattened_cells == recovered_data
|
|
@ -280,7 +280,7 @@ def state_transition_with_full_block(spec,
|
|||
)
|
||||
for attestation in attestations:
|
||||
block.body.attestations.append(attestation)
|
||||
if fill_prev_epoch:
|
||||
if fill_prev_epoch and state.slot >= spec.SLOTS_PER_EPOCH:
|
||||
slot_to_attest = state.slot - spec.SLOTS_PER_EPOCH + 1
|
||||
attestations = get_valid_attestation_at_slot(
|
||||
state,
|
||||
|
|
|
@ -1,9 +1,28 @@
|
|||
from eth2spec.test.helpers.execution_payload import build_empty_execution_payload
|
||||
from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.test.helpers.forks import is_post_whisk, is_post_altair, is_post_bellatrix
|
||||
from eth2spec.test.helpers.keys import privkeys, whisk_ks_initial, whisk_ks_final
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from curdleproofs import (
|
||||
GenerateWhiskTrackerProof,
|
||||
WhiskTracker,
|
||||
GenerateWhiskShuffleProof,
|
||||
)
|
||||
from py_ecc.optimized_bls12_381.optimized_curve import G1, multiply
|
||||
from py_ecc.typing import Optimized_Field, Optimized_Point3D
|
||||
from py_ecc.bls.g2_primitives import (
|
||||
G1_to_pubkey as py_ecc_G1_to_bytes48,
|
||||
pubkey_to_G1 as py_ecc_bytes48_to_G1,
|
||||
)
|
||||
from eth2spec.test.helpers.whisk import (
|
||||
compute_whisk_tracker_and_commitment,
|
||||
is_first_proposal,
|
||||
resolve_known_tracker
|
||||
)
|
||||
from py_arkworks_bls12381 import Scalar
|
||||
|
||||
PointProjective = Optimized_Point3D[Optimized_Field]
|
||||
|
||||
|
||||
def get_proposer_index_maybe(spec, state, slot, proposer_index=None):
|
||||
|
@ -24,10 +43,9 @@ def get_proposer_index_maybe(spec, state, slot, proposer_index=None):
|
|||
|
||||
|
||||
@only_with_bls()
|
||||
def apply_randao_reveal(spec, state, block, proposer_index=None):
|
||||
def apply_randao_reveal(spec, state, block, proposer_index):
|
||||
assert state.slot <= block.slot
|
||||
|
||||
proposer_index = get_proposer_index_maybe(spec, state, block.slot, proposer_index)
|
||||
privkey = privkeys[proposer_index]
|
||||
|
||||
domain = spec.get_domain(state, spec.DOMAIN_RANDAO, spec.compute_epoch_at_slot(block.slot))
|
||||
|
@ -72,7 +90,7 @@ def apply_empty_block(spec, state, slot=None):
|
|||
return transition_unsigned_block(spec, state, block)
|
||||
|
||||
|
||||
def build_empty_block(spec, state, slot=None):
|
||||
def build_empty_block(spec, state, slot=None, proposer_index=None):
|
||||
"""
|
||||
Build empty block for ``slot``, built upon the latest block header seen by ``state``.
|
||||
Slot must be greater than or equal to the current slot in ``state``.
|
||||
|
@ -87,13 +105,14 @@ def build_empty_block(spec, state, slot=None):
|
|||
spec.process_slots(state, slot)
|
||||
|
||||
state, parent_block_root = get_state_and_beacon_parent_root_at_slot(spec, state, slot)
|
||||
proposer_index = get_beacon_proposer_to_build(spec, state, proposer_index)
|
||||
empty_block = spec.BeaconBlock()
|
||||
empty_block.slot = slot
|
||||
empty_block.proposer_index = spec.get_beacon_proposer_index(state)
|
||||
empty_block.proposer_index = proposer_index
|
||||
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||
empty_block.parent_root = parent_block_root
|
||||
|
||||
apply_randao_reveal(spec, state, empty_block)
|
||||
apply_randao_reveal(spec, state, empty_block, proposer_index)
|
||||
|
||||
if is_post_altair(spec):
|
||||
empty_block.body.sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
|
||||
|
@ -101,11 +120,99 @@ def build_empty_block(spec, state, slot=None):
|
|||
if is_post_bellatrix(spec):
|
||||
empty_block.body.execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
if is_post_whisk(spec):
|
||||
# Whisk opening proof
|
||||
#######
|
||||
|
||||
# Create valid whisk opening proof
|
||||
# TODO: Use k_initial or k_final to handle first and subsequent proposals
|
||||
k_initial = whisk_ks_initial(proposer_index)
|
||||
|
||||
# Sanity check proposer is correct
|
||||
proposer_k_commitment = state.whisk_k_commitments[proposer_index]
|
||||
k_commitment = py_ecc_G1_to_bytes48(multiply(G1, int(k_initial)))
|
||||
if proposer_k_commitment != k_commitment:
|
||||
raise Exception("k proposer_index not eq proposer_k_commitment", proposer_k_commitment, k_commitment)
|
||||
|
||||
proposer_tracker = state.whisk_proposer_trackers[state.slot % spec.WHISK_PROPOSER_TRACKERS_COUNT]
|
||||
if not is_whisk_proposer(proposer_tracker, k_initial):
|
||||
raise Exception("k proposer_index does not match proposer_tracker")
|
||||
|
||||
empty_block.body.whisk_opening_proof = GenerateWhiskTrackerProof(proposer_tracker, Scalar(k_initial))
|
||||
|
||||
# Whisk shuffle proof
|
||||
#######
|
||||
|
||||
shuffle_indices = spec.get_shuffle_indices(empty_block.body.randao_reveal)
|
||||
pre_shuffle_trackers = [state.whisk_candidate_trackers[i] for i in shuffle_indices]
|
||||
|
||||
post_trackers, shuffle_proof = GenerateWhiskShuffleProof(spec.CURDLEPROOFS_CRS, pre_shuffle_trackers)
|
||||
empty_block.body.whisk_post_shuffle_trackers = post_trackers
|
||||
empty_block.body.whisk_shuffle_proof = shuffle_proof
|
||||
|
||||
# Whisk registration proof
|
||||
#######
|
||||
|
||||
# Branching logic depending if first proposal or not
|
||||
if is_first_proposal(spec, state, proposer_index):
|
||||
# Register new tracker
|
||||
k_final = whisk_ks_final(proposer_index)
|
||||
# TODO: Actual logic should pick a random r, but may need to do something fancy to locate trackers quickly
|
||||
r = 2
|
||||
tracker, k_commitment = compute_whisk_tracker_and_commitment(k_final, r)
|
||||
empty_block.body.whisk_registration_proof = GenerateWhiskTrackerProof(tracker, Scalar(k_final))
|
||||
empty_block.body.whisk_tracker = tracker
|
||||
empty_block.body.whisk_k_commitment = k_commitment
|
||||
|
||||
else:
|
||||
# Subsequent proposals, just leave empty
|
||||
empty_block.body.whisk_registration_proof = spec.WhiskTrackerProof()
|
||||
empty_block.body.whisk_tracker = spec.WhiskTracker()
|
||||
empty_block.body.whisk_k_commitment = spec.BLSG1Point()
|
||||
|
||||
return empty_block
|
||||
|
||||
|
||||
def build_empty_block_for_next_slot(spec, state):
|
||||
return build_empty_block(spec, state, state.slot + 1)
|
||||
def is_whisk_proposer(tracker: WhiskTracker, k: int) -> bool:
|
||||
return py_ecc_G1_to_bytes48(multiply(py_ecc_bytes48_to_G1(tracker.r_G), k)) == tracker.k_r_G
|
||||
|
||||
|
||||
def get_beacon_proposer_to_build(spec, state, proposer_index=None):
|
||||
if is_post_whisk(spec):
|
||||
if proposer_index is None:
|
||||
return find_whisk_proposer(spec, state)
|
||||
else:
|
||||
return proposer_index
|
||||
else:
|
||||
return spec.get_beacon_proposer_index(state)
|
||||
|
||||
|
||||
def find_whisk_proposer(spec, state):
|
||||
proposer_tracker = state.whisk_proposer_trackers[state.slot % spec.WHISK_PROPOSER_TRACKERS_COUNT]
|
||||
|
||||
# Check record of known trackers
|
||||
# During the first shuffling phase (epoch < WHISK_EPOCHS_PER_SHUFFLING_PHASE)
|
||||
# proposer trackers are those inserted on the genesis state, and have not gone
|
||||
# through any shuffling. We cache those initial trackers and use `resolve_known_tracker`
|
||||
# to check if the tracker is known, and skip the need to actually find the matching tracker
|
||||
proposer_index = resolve_known_tracker(proposer_tracker)
|
||||
if proposer_index is not None:
|
||||
return proposer_index
|
||||
|
||||
print("proposer_tracker", proposer_tracker)
|
||||
# # First attempt direct equality with trackers
|
||||
# for i, validator in enumerate(state.validators):
|
||||
# # # This is insanely slow
|
||||
# # if validator.whisk_tracker == proposer_tracker:
|
||||
# if True:
|
||||
# return i
|
||||
# # In Whisk where to get proposer from?
|
||||
# raise Exception("proposer_tracker not matched")
|
||||
raise Exception("proposer not known without heavy math")
|
||||
|
||||
|
||||
def build_empty_block_for_next_slot(spec, state, proposer_index=None):
|
||||
return build_empty_block(spec, state, state.slot + 1, proposer_index)
|
||||
|
||||
|
||||
def get_state_and_beacon_parent_root_at_slot(spec, state, slot):
|
||||
|
|
|
@ -18,6 +18,8 @@ CUSTODY_GAME = SpecForkName('custody_game')
|
|||
DAS = SpecForkName('das')
|
||||
EIP6110 = SpecForkName('eip6110')
|
||||
EIP7002 = SpecForkName('eip7002')
|
||||
WHISK = SpecForkName('whisk')
|
||||
EIP7594 = SpecForkName('eip7594')
|
||||
|
||||
#
|
||||
# SpecFork settings
|
||||
|
@ -27,6 +29,8 @@ EIP7002 = SpecForkName('eip7002')
|
|||
MAINNET_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA)
|
||||
LATEST_FORK = MAINNET_FORKS[-1]
|
||||
# The forks that pytest can run with.
|
||||
# Note: when adding a new fork here, all tests from previous forks with decorator `with_X_and_later`
|
||||
# will run on the new fork. To skip this behaviour, add the fork to `ALLOWED_TEST_RUNNER_FORKS`
|
||||
ALL_PHASES = (
|
||||
# Formal forks
|
||||
*MAINNET_FORKS,
|
||||
|
@ -34,13 +38,32 @@ ALL_PHASES = (
|
|||
# Experimental patches
|
||||
EIP6110,
|
||||
EIP7002,
|
||||
EIP7594,
|
||||
)
|
||||
# The forks that have light client specs
|
||||
LIGHT_CLIENT_TESTING_FORKS = (*[item for item in MAINNET_FORKS if item != PHASE0], DENEB)
|
||||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (*MAINNET_FORKS, DENEB, EIP6110)
|
||||
TESTGEN_FORKS = (*MAINNET_FORKS, DENEB, EIP6110, WHISK)
|
||||
# Forks allowed in the test runner `--fork` flag, to fail fast in case of typos
|
||||
ALLOWED_TEST_RUNNER_FORKS = (*ALL_PHASES, WHISK)
|
||||
|
||||
ALL_FORK_UPGRADES = {
|
||||
# NOTE: the same definition as in `pysetup/md_doc_paths.py`
|
||||
PREVIOUS_FORK_OF = {
|
||||
# post_fork_name: pre_fork_name
|
||||
PHASE0: None,
|
||||
ALTAIR: PHASE0,
|
||||
BELLATRIX: ALTAIR,
|
||||
CAPELLA: BELLATRIX,
|
||||
DENEB: CAPELLA,
|
||||
# Experimental patches
|
||||
EIP6110: DENEB,
|
||||
WHISK: CAPELLA,
|
||||
EIP7002: CAPELLA,
|
||||
EIP7594: DENEB,
|
||||
}
|
||||
|
||||
# For fork transition tests
|
||||
POST_FORK_OF = {
|
||||
# pre_fork_name: post_fork_name
|
||||
PHASE0: ALTAIR,
|
||||
ALTAIR: BELLATRIX,
|
||||
|
@ -48,15 +71,11 @@ ALL_FORK_UPGRADES = {
|
|||
CAPELLA: DENEB,
|
||||
DENEB: EIP6110,
|
||||
}
|
||||
ALL_PRE_POST_FORKS = ALL_FORK_UPGRADES.items()
|
||||
AFTER_BELLATRIX_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() if key != PHASE0}
|
||||
AFTER_BELLATRIX_PRE_POST_FORKS = AFTER_BELLATRIX_UPGRADES.items()
|
||||
AFTER_CAPELLA_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items()
|
||||
if key not in [PHASE0, ALTAIR]}
|
||||
AFTER_CAPELLA_PRE_POST_FORKS = AFTER_CAPELLA_UPGRADES.items()
|
||||
AFTER_DENEB_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items()
|
||||
if key not in [PHASE0, ALTAIR, BELLATRIX]}
|
||||
AFTER_DENEB_PRE_POST_FORKS = AFTER_DENEB_UPGRADES.items()
|
||||
|
||||
ALL_PRE_POST_FORKS = POST_FORK_OF.items()
|
||||
DENEB_TRANSITION_UPGRADES_AND_AFTER = {key: value for key, value in POST_FORK_OF.items()
|
||||
if key not in [PHASE0, ALTAIR, BELLATRIX]}
|
||||
AFTER_DENEB_PRE_POST_FORKS = DENEB_TRANSITION_UPGRADES_AND_AFTER.items()
|
||||
|
||||
#
|
||||
# Config and Preset
|
||||
|
|
|
@ -3,7 +3,10 @@ from enum import Enum, auto
|
|||
from eth2spec.test.helpers.attester_slashings import (
|
||||
get_valid_attester_slashing_by_indices,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import next_slots_with_attestations
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
next_slots_with_attestations,
|
||||
state_transition_with_full_block,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
build_empty_block,
|
||||
|
@ -11,12 +14,9 @@ from eth2spec.test.helpers.block import (
|
|||
)
|
||||
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR,
|
||||
BELLATRIX,
|
||||
CAPELLA,
|
||||
DENEB,
|
||||
EIP6110,
|
||||
EIP7002,
|
||||
PHASE0,
|
||||
POST_FORK_OF,
|
||||
PREVIOUS_FORK_OF,
|
||||
)
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_state_and_deposit,
|
||||
|
@ -24,6 +24,9 @@ from eth2spec.test.helpers.deposits import (
|
|||
from eth2spec.test.helpers.proposer_slashings import (
|
||||
get_valid_proposer_slashing,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import (
|
||||
get_next_fork_transition,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slot,
|
||||
state_transition_and_sign_block,
|
||||
|
@ -146,45 +149,37 @@ def state_transition_across_slots_with_ignoring_proposers(spec,
|
|||
next_slot(spec, state)
|
||||
|
||||
|
||||
def get_upgrade_fn(spec, fork):
|
||||
# pylint: disable=unused-argument
|
||||
# NOTE: `spec` is used for the `eval` call
|
||||
assert fork in POST_FORK_OF.values()
|
||||
try:
|
||||
# TODO: make all upgrade_to_* function names consistent?
|
||||
fn = eval(f"spec.upgrade_to_{fork}")
|
||||
return fn
|
||||
except Exception:
|
||||
raise ValueError(f"Unknown fork: {fork}")
|
||||
|
||||
|
||||
def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None):
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
|
||||
assert state.slot % spec.SLOTS_PER_EPOCH == 0
|
||||
assert spec.get_current_epoch(state) == fork_epoch
|
||||
|
||||
if post_spec.fork == ALTAIR:
|
||||
state = post_spec.upgrade_to_altair(state)
|
||||
elif post_spec.fork == BELLATRIX:
|
||||
state = post_spec.upgrade_to_bellatrix(state)
|
||||
elif post_spec.fork == CAPELLA:
|
||||
state = post_spec.upgrade_to_capella(state)
|
||||
elif post_spec.fork == DENEB:
|
||||
state = post_spec.upgrade_to_deneb(state)
|
||||
elif post_spec.fork == EIP6110:
|
||||
state = post_spec.upgrade_to_eip6110(state)
|
||||
elif post_spec.fork == EIP7002:
|
||||
state = post_spec.upgrade_to_eip7002(state)
|
||||
state = get_upgrade_fn(post_spec, post_spec.fork)(state)
|
||||
|
||||
assert state.fork.epoch == fork_epoch
|
||||
|
||||
if post_spec.fork == ALTAIR:
|
||||
assert state.fork.previous_version == post_spec.config.GENESIS_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.ALTAIR_FORK_VERSION
|
||||
elif post_spec.fork == BELLATRIX:
|
||||
assert state.fork.previous_version == post_spec.config.ALTAIR_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.BELLATRIX_FORK_VERSION
|
||||
elif post_spec.fork == CAPELLA:
|
||||
assert state.fork.previous_version == post_spec.config.BELLATRIX_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.CAPELLA_FORK_VERSION
|
||||
elif post_spec.fork == DENEB:
|
||||
assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION
|
||||
elif post_spec.fork == EIP6110:
|
||||
assert state.fork.previous_version == post_spec.config.DENEB_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.EIP6110_FORK_VERSION
|
||||
elif post_spec.fork == EIP7002:
|
||||
assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.EIP7002_FORK_VERSION
|
||||
previous_fork = PREVIOUS_FORK_OF[post_spec.fork]
|
||||
if previous_fork == PHASE0:
|
||||
previous_version = spec.config.GENESIS_FORK_VERSION
|
||||
else:
|
||||
previous_version = getattr(post_spec.config, f"{previous_fork.upper()}_FORK_VERSION")
|
||||
current_version = getattr(post_spec.config, f"{post_spec.fork.upper()}_FORK_VERSION")
|
||||
|
||||
assert state.fork.previous_version == previous_version
|
||||
assert state.fork.current_version == current_version
|
||||
|
||||
if with_block:
|
||||
return state, _state_transition_and_sign_block_at_slot(
|
||||
|
@ -207,6 +202,34 @@ def _transition_until_fork_minus_one(spec, state, fork_epoch):
|
|||
transition_to(spec, state, to_slot)
|
||||
|
||||
|
||||
def transition_across_forks(spec, state, to_slot, phases=None, with_block=False, sync_aggregate=None):
|
||||
assert to_slot > state.slot
|
||||
state = state.copy()
|
||||
block = None
|
||||
to_epoch = spec.compute_epoch_at_slot(to_slot)
|
||||
while state.slot < to_slot:
|
||||
assert block is None
|
||||
epoch = spec.compute_epoch_at_slot(state.slot)
|
||||
post_spec, fork_epoch = get_next_fork_transition(spec, epoch, phases)
|
||||
if fork_epoch is None or to_epoch < fork_epoch:
|
||||
if with_block and (to_slot == state.slot + 1):
|
||||
transition_to(spec, state, to_slot - 1)
|
||||
block = state_transition_with_full_block(
|
||||
spec, state, True, True,
|
||||
sync_aggregate=sync_aggregate)
|
||||
else:
|
||||
transition_to(spec, state, to_slot)
|
||||
else:
|
||||
transition_until_fork(spec, state, fork_epoch)
|
||||
state, block = do_fork(
|
||||
state, spec, post_spec, fork_epoch,
|
||||
with_block=with_block and (to_slot == state.slot + 1),
|
||||
sync_aggregate=sync_aggregate,
|
||||
)
|
||||
spec = post_spec
|
||||
return spec, state, block
|
||||
|
||||
|
||||
def transition_to_next_epoch_and_append_blocks(spec,
|
||||
state,
|
||||
post_tag,
|
||||
|
|
|
@ -1,25 +1,24 @@
|
|||
from .constants import (
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
|
||||
EIP6110, EIP7002,
|
||||
EIP6110, EIP7002, WHISK,
|
||||
PREVIOUS_FORK_OF,
|
||||
)
|
||||
|
||||
|
||||
def is_post_fork(a, b):
|
||||
if a == EIP7002:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP7002]
|
||||
if a == EIP6110:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110]
|
||||
if a == DENEB:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB]
|
||||
if a == CAPELLA:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA]
|
||||
if a == BELLATRIX:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX]
|
||||
if a == ALTAIR:
|
||||
return b in [PHASE0, ALTAIR]
|
||||
if a == PHASE0:
|
||||
return b in [PHASE0]
|
||||
raise ValueError("Unknown fork name %s" % a)
|
||||
def is_post_fork(a, b) -> bool:
|
||||
"""
|
||||
Returns true if fork a is after b, or if a == b
|
||||
"""
|
||||
if a == b:
|
||||
return True
|
||||
|
||||
prev_fork = PREVIOUS_FORK_OF[a]
|
||||
if prev_fork == b:
|
||||
return True
|
||||
elif prev_fork is None:
|
||||
return False
|
||||
else:
|
||||
return is_post_fork(prev_fork, b)
|
||||
|
||||
|
||||
def is_post_altair(spec):
|
||||
|
@ -44,3 +43,31 @@ def is_post_eip6110(spec):
|
|||
|
||||
def is_post_eip7002(spec):
|
||||
return is_post_fork(spec.fork, EIP7002)
|
||||
|
||||
|
||||
def is_post_whisk(spec):
|
||||
return is_post_fork(spec.fork, WHISK)
|
||||
|
||||
|
||||
def get_spec_for_fork_version(spec, fork_version, phases):
|
||||
if phases is None:
|
||||
return spec
|
||||
for fork in [fork for fork in phases if is_post_fork(spec.fork, fork)]:
|
||||
if fork == PHASE0:
|
||||
fork_version_field = 'GENESIS_FORK_VERSION'
|
||||
else:
|
||||
fork_version_field = fork.upper() + '_FORK_VERSION'
|
||||
if fork_version == getattr(spec.config, fork_version_field):
|
||||
return phases[fork]
|
||||
raise ValueError("Unknown fork version %s" % fork_version)
|
||||
|
||||
|
||||
def get_next_fork_transition(spec, epoch, phases):
|
||||
if phases is None:
|
||||
return None, None
|
||||
for fork in [fork for fork in phases if PREVIOUS_FORK_OF[fork] == spec.fork]:
|
||||
assert fork != PHASE0 # PHASE0 does not have previous fork
|
||||
fork_epoch = getattr(phases[fork].config, fork.upper() + '_FORK_EPOCH')
|
||||
assert fork_epoch > epoch # Forks through given epoch already applied
|
||||
return phases[fork], fork_epoch
|
||||
return None, None # Already at latest fork
|
||||
|
|
|
@ -1,13 +1,15 @@
|
|||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110, EIP7002,
|
||||
PHASE0,
|
||||
PREVIOUS_FORK_OF,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_header_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_altair, is_post_bellatrix, is_post_capella, is_post_eip6110, is_post_eip7002,
|
||||
is_post_altair, is_post_bellatrix, is_post_capella, is_post_eip6110, is_post_eip7002, is_post_whisk,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
from eth2spec.test.helpers.whisk import compute_whisk_initial_tracker_cached, compute_whisk_initial_k_commitment_cached
|
||||
|
||||
|
||||
def build_mock_validator(spec, i: int, balance: int):
|
||||
|
@ -76,23 +78,13 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
|||
previous_version = spec.config.GENESIS_FORK_VERSION
|
||||
current_version = spec.config.GENESIS_FORK_VERSION
|
||||
|
||||
if spec.fork == ALTAIR:
|
||||
current_version = spec.config.ALTAIR_FORK_VERSION
|
||||
elif spec.fork == BELLATRIX:
|
||||
previous_version = spec.config.ALTAIR_FORK_VERSION
|
||||
current_version = spec.config.BELLATRIX_FORK_VERSION
|
||||
elif spec.fork == CAPELLA:
|
||||
previous_version = spec.config.BELLATRIX_FORK_VERSION
|
||||
current_version = spec.config.CAPELLA_FORK_VERSION
|
||||
elif spec.fork == DENEB:
|
||||
previous_version = spec.config.CAPELLA_FORK_VERSION
|
||||
current_version = spec.config.DENEB_FORK_VERSION
|
||||
elif spec.fork == EIP6110:
|
||||
previous_version = spec.config.DENEB_FORK_VERSION
|
||||
current_version = spec.config.EIP6110_FORK_VERSION
|
||||
elif spec.fork == EIP7002:
|
||||
previous_version = spec.config.CAPELLA_FORK_VERSION
|
||||
current_version = spec.config.EIP7002_FORK_VERSION
|
||||
if spec.fork != PHASE0:
|
||||
previous_fork = PREVIOUS_FORK_OF[spec.fork]
|
||||
if previous_fork == PHASE0:
|
||||
previous_version = spec.config.GENESIS_FORK_VERSION
|
||||
else:
|
||||
previous_version = getattr(spec.config, f"{previous_fork.upper()}_FORK_VERSION")
|
||||
current_version = getattr(spec.config, f"{spec.fork.upper()}_FORK_VERSION")
|
||||
|
||||
state = spec.BeaconState(
|
||||
genesis_time=0,
|
||||
|
@ -145,4 +137,16 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
|||
if is_post_eip6110(spec):
|
||||
state.deposit_receipts_start_index = spec.UNSET_DEPOSIT_RECEIPTS_START_INDEX
|
||||
|
||||
if is_post_whisk(spec):
|
||||
vc = len(state.validators)
|
||||
for i in range(vc):
|
||||
state.whisk_k_commitments.append(compute_whisk_initial_k_commitment_cached(i))
|
||||
state.whisk_trackers.append(compute_whisk_initial_tracker_cached(i))
|
||||
|
||||
for i in range(spec.WHISK_CANDIDATE_TRACKERS_COUNT):
|
||||
state.whisk_candidate_trackers[i] = compute_whisk_initial_tracker_cached(i % vc)
|
||||
|
||||
for i in range(spec.WHISK_PROPOSER_TRACKERS_COUNT):
|
||||
state.whisk_proposer_trackers[i] = compute_whisk_initial_tracker_cached(i % vc)
|
||||
|
||||
return state
|
||||
|
|
|
@ -4,3 +4,18 @@ from py_ecc.bls import G2ProofOfPossession as bls
|
|||
privkeys = [i + 1 for i in range(32 * 256)]
|
||||
pubkeys = [bls.SkToPk(privkey) for privkey in privkeys]
|
||||
pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)}
|
||||
|
||||
known_whisk_trackers = {}
|
||||
|
||||
|
||||
def register_known_whisk_tracker(k_r_G: bytes, index: int):
|
||||
known_whisk_trackers[k_r_G] = index
|
||||
|
||||
|
||||
def whisk_ks_initial(i: int):
|
||||
return i
|
||||
|
||||
|
||||
# Must be unique among the set `whisk_ks_initial + whisk_ks_final`
|
||||
def whisk_ks_final(i: int):
|
||||
return i + 10000000
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
from eth2spec.test.helpers.state import (
|
||||
transition_to,
|
||||
from eth2spec.test.helpers.fork_transition import (
|
||||
transition_across_forks,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_capella, is_post_deneb,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
|
@ -8,14 +11,23 @@ from eth2spec.test.helpers.sync_committee import (
|
|||
from math import floor
|
||||
|
||||
|
||||
def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None):
|
||||
def compute_start_slot_at_sync_committee_period(spec, sync_committee_period):
|
||||
return spec.compute_start_slot_at_epoch(sync_committee_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
|
||||
|
||||
def compute_start_slot_at_next_sync_committee_period(spec, state):
|
||||
sync_committee_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
return compute_start_slot_at_sync_committee_period(spec, sync_committee_period + 1)
|
||||
|
||||
|
||||
def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None, phases=None):
|
||||
# By default, the sync committee signs the previous slot
|
||||
if signature_slot is None:
|
||||
signature_slot = state.slot + 1
|
||||
assert signature_slot > state.slot
|
||||
|
||||
# Ensure correct sync committee and fork version are selected
|
||||
signature_state = state.copy()
|
||||
transition_to(spec, signature_state, signature_slot)
|
||||
signature_spec, signature_state, _ = transition_across_forks(spec, state, signature_slot, phases)
|
||||
|
||||
# Fetch sync committee
|
||||
committee_indices = compute_committee_indices(signature_state)
|
||||
|
@ -29,12 +41,12 @@ def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None):
|
|||
# Compute sync aggregate
|
||||
sync_committee_bits = [True] * num_participants + [False] * (committee_size - num_participants)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
signature_spec,
|
||||
signature_state,
|
||||
max(signature_slot, 1) - 1,
|
||||
committee_indices[:num_participants],
|
||||
)
|
||||
sync_aggregate = spec.SyncAggregate(
|
||||
sync_aggregate = signature_spec.SyncAggregate(
|
||||
sync_committee_bits=sync_committee_bits,
|
||||
sync_committee_signature=sync_committee_signature,
|
||||
)
|
||||
|
@ -56,13 +68,136 @@ def create_update(spec,
|
|||
|
||||
if with_next:
|
||||
update.next_sync_committee = attested_state.next_sync_committee
|
||||
update.next_sync_committee_branch = spec.compute_merkle_proof(attested_state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
update.next_sync_committee_branch = spec.compute_merkle_proof(attested_state, spec.NEXT_SYNC_COMMITTEE_GINDEX)
|
||||
|
||||
if with_finality:
|
||||
update.finalized_header = spec.block_to_light_client_header(finalized_block)
|
||||
update.finality_branch = spec.compute_merkle_proof(attested_state, spec.FINALIZED_ROOT_INDEX)
|
||||
update.finality_branch = spec.compute_merkle_proof(attested_state, spec.FINALIZED_ROOT_GINDEX)
|
||||
|
||||
update.sync_aggregate, update.signature_slot = get_sync_aggregate(
|
||||
spec, attested_state, num_participants)
|
||||
|
||||
return update
|
||||
|
||||
|
||||
def needs_upgrade_to_capella(spec, new_spec):
|
||||
return is_post_capella(new_spec) and not is_post_capella(spec)
|
||||
|
||||
|
||||
def needs_upgrade_to_deneb(spec, new_spec):
|
||||
return is_post_deneb(new_spec) and not is_post_deneb(spec)
|
||||
|
||||
|
||||
def check_lc_header_equal(spec, new_spec, data, upgraded):
|
||||
assert upgraded.beacon.slot == data.beacon.slot
|
||||
assert upgraded.beacon.hash_tree_root() == data.beacon.hash_tree_root()
|
||||
if is_post_capella(new_spec):
|
||||
if is_post_capella(spec):
|
||||
assert new_spec.get_lc_execution_root(upgraded) == spec.get_lc_execution_root(data)
|
||||
else:
|
||||
assert new_spec.get_lc_execution_root(upgraded) == new_spec.Root()
|
||||
|
||||
|
||||
def upgrade_lc_header_to_new_spec(spec, new_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(spec, new_spec):
|
||||
upgraded = new_spec.upgrade_lc_header_to_capella(upgraded)
|
||||
check_lc_header_equal(spec, new_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(spec, new_spec):
|
||||
upgraded = new_spec.upgrade_lc_header_to_deneb(upgraded)
|
||||
check_lc_header_equal(spec, new_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
def check_lc_bootstrap_equal(spec, new_spec, data, upgraded):
|
||||
check_lc_header_equal(spec, new_spec, data.header, upgraded.header)
|
||||
assert upgraded.current_sync_committee == data.current_sync_committee
|
||||
assert upgraded.current_sync_committee_branch == data.current_sync_committee_branch
|
||||
|
||||
|
||||
def upgrade_lc_bootstrap_to_new_spec(spec, new_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(spec, new_spec):
|
||||
upgraded = new_spec.upgrade_lc_bootstrap_to_capella(upgraded)
|
||||
check_lc_bootstrap_equal(spec, new_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(spec, new_spec):
|
||||
upgraded = new_spec.upgrade_lc_bootstrap_to_deneb(upgraded)
|
||||
check_lc_bootstrap_equal(spec, new_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
def check_lc_update_equal(spec, new_spec, data, upgraded):
|
||||
check_lc_header_equal(spec, new_spec, data.attested_header, upgraded.attested_header)
|
||||
assert upgraded.next_sync_committee == data.next_sync_committee
|
||||
assert upgraded.next_sync_committee_branch == data.next_sync_committee_branch
|
||||
check_lc_header_equal(spec, new_spec, data.finalized_header, upgraded.finalized_header)
|
||||
assert upgraded.sync_aggregate == data.sync_aggregate
|
||||
assert upgraded.signature_slot == data.signature_slot
|
||||
|
||||
|
||||
def upgrade_lc_update_to_new_spec(spec, new_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(spec, new_spec):
|
||||
upgraded = new_spec.upgrade_lc_update_to_capella(upgraded)
|
||||
check_lc_update_equal(spec, new_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(spec, new_spec):
|
||||
upgraded = new_spec.upgrade_lc_update_to_deneb(upgraded)
|
||||
check_lc_update_equal(spec, new_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
def check_lc_finality_update_equal(spec, new_spec, data, upgraded):
|
||||
check_lc_header_equal(spec, new_spec, data.attested_header, upgraded.attested_header)
|
||||
check_lc_header_equal(spec, new_spec, data.finalized_header, upgraded.finalized_header)
|
||||
assert upgraded.sync_aggregate == data.sync_aggregate
|
||||
assert upgraded.signature_slot == data.signature_slot
|
||||
|
||||
|
||||
def upgrade_lc_finality_update_to_new_spec(spec, new_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(spec, new_spec):
|
||||
upgraded = new_spec.upgrade_lc_finality_update_to_capella(upgraded)
|
||||
check_lc_finality_update_equal(spec, new_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(spec, new_spec):
|
||||
upgraded = new_spec.upgrade_lc_finality_update_to_deneb(upgraded)
|
||||
check_lc_finality_update_equal(spec, new_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
||||
|
||||
def check_lc_store_equal(spec, new_spec, data, upgraded):
|
||||
check_lc_header_equal(spec, new_spec, data.finalized_header, upgraded.finalized_header)
|
||||
assert upgraded.current_sync_committee == data.current_sync_committee
|
||||
assert upgraded.next_sync_committee == data.next_sync_committee
|
||||
if upgraded.best_valid_update is None:
|
||||
assert data.best_valid_update is None
|
||||
else:
|
||||
check_lc_update_equal(spec, new_spec, data.best_valid_update, upgraded.best_valid_update)
|
||||
check_lc_header_equal(spec, new_spec, data.optimistic_header, upgraded.optimistic_header)
|
||||
assert upgraded.previous_max_active_participants == data.previous_max_active_participants
|
||||
assert upgraded.current_max_active_participants == data.current_max_active_participants
|
||||
|
||||
|
||||
def upgrade_lc_store_to_new_spec(spec, new_spec, data):
|
||||
upgraded = data
|
||||
|
||||
if needs_upgrade_to_capella(spec, new_spec):
|
||||
upgraded = new_spec.upgrade_lc_store_to_capella(upgraded)
|
||||
check_lc_store_equal(spec, new_spec, data, upgraded)
|
||||
|
||||
if needs_upgrade_to_deneb(spec, new_spec):
|
||||
upgraded = new_spec.upgrade_lc_store_to_deneb(upgraded)
|
||||
check_lc_store_equal(spec, new_spec, data, upgraded)
|
||||
|
||||
return upgraded
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
from typing import (
|
||||
Dict,
|
||||
)
|
||||
from .constants import (
|
||||
MINIMAL, MAINNET,
|
||||
ALL_PHASES, WHISK,
|
||||
)
|
||||
from .typing import (
|
||||
PresetBaseName,
|
||||
SpecForkName,
|
||||
Spec,
|
||||
)
|
||||
|
||||
|
||||
# NOTE: special case like `ALLOWED_TEST_RUNNER_FORKS`
|
||||
ALL_EXECUTABLE_SPEC_NAMES = ALL_PHASES + (WHISK,)
|
||||
|
||||
# import the spec for each fork and preset
|
||||
for fork in ALL_EXECUTABLE_SPEC_NAMES:
|
||||
exec(f"from eth2spec.{fork} import mainnet as spec_{fork}_mainnet, minimal as spec_{fork}_minimal")
|
||||
|
||||
# this is the only output of this file
|
||||
spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
|
||||
MINIMAL: {fork: eval(f"spec_{fork}_minimal") for fork in ALL_EXECUTABLE_SPEC_NAMES},
|
||||
MAINNET: {fork: eval(f"spec_{fork}_mainnet") for fork in ALL_EXECUTABLE_SPEC_NAMES},
|
||||
}
|
|
@ -1,4 +1,18 @@
|
|||
from typing import NewType
|
||||
from typing import (
|
||||
NewType,
|
||||
Protocol,
|
||||
Sequence,
|
||||
)
|
||||
|
||||
SpecForkName = NewType("SpecForkName", str)
|
||||
PresetBaseName = NewType("PresetBaseName", str)
|
||||
SpecForks = Sequence[SpecForkName]
|
||||
|
||||
|
||||
class Configuration(Protocol):
|
||||
PRESET_BASE: str
|
||||
|
||||
|
||||
class Spec(Protocol):
|
||||
fork: str
|
||||
config: Configuration
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
from typing import Tuple, Optional
|
||||
from eth_typing import BLSPubkey
|
||||
from curdleproofs import GenerateWhiskTrackerProof, WhiskTracker
|
||||
from eth2spec.test.helpers.keys import whisk_ks_initial
|
||||
from py_arkworks_bls12381 import G1Point, Scalar
|
||||
|
||||
|
||||
# Map of validator index to initial WhiskTracker (r = 1, k = index)
|
||||
whisk_initial_tracker_cache_by_index = {}
|
||||
# Map of validator index to k commitment (k = index)
|
||||
whisk_initial_k_commitment_cache_by_index = {}
|
||||
# Map of k_r_G to validator index
|
||||
whisk_initial_tracker_cache_by_k_r_G = {}
|
||||
INITIAL_R = 1
|
||||
|
||||
# Generator
|
||||
G1 = G1Point()
|
||||
|
||||
|
||||
def compute_whisk_initial_tracker_cached(i: int) -> WhiskTracker:
|
||||
if i in whisk_initial_tracker_cache_by_index:
|
||||
return whisk_initial_tracker_cache_by_index[i]
|
||||
|
||||
tracker = compute_whisk_tracker(whisk_ks_initial(i), INITIAL_R)
|
||||
whisk_initial_tracker_cache_by_index[i] = tracker
|
||||
whisk_initial_tracker_cache_by_k_r_G[tracker.k_r_G] = i
|
||||
return tracker
|
||||
|
||||
|
||||
def compute_whisk_initial_k_commitment_cached(i: int) -> BLSPubkey:
|
||||
if i in whisk_initial_k_commitment_cache_by_index:
|
||||
return whisk_initial_k_commitment_cache_by_index[i]
|
||||
|
||||
commitment = compute_whisk_k_commitment(whisk_ks_initial(i))
|
||||
whisk_initial_k_commitment_cache_by_index[i] = commitment
|
||||
return commitment
|
||||
|
||||
|
||||
def resolve_known_tracker(tracker: WhiskTracker) -> Optional[int]:
|
||||
if tracker.k_r_G in whisk_initial_tracker_cache_by_k_r_G:
|
||||
return whisk_initial_tracker_cache_by_k_r_G[tracker.k_r_G]
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def g1point_to_bytes(point: G1Point) -> bytes:
|
||||
return bytes(point.to_compressed_bytes())
|
||||
|
||||
|
||||
def compute_whisk_k_commitment(k: int) -> BLSPubkey:
|
||||
return g1point_to_bytes(G1 * Scalar(k))
|
||||
|
||||
|
||||
def compute_whisk_tracker(k: int, r: int) -> WhiskTracker:
|
||||
r_G = G1 * Scalar(r)
|
||||
k_r_G = r_G * Scalar(k)
|
||||
return WhiskTracker(g1point_to_bytes(r_G), g1point_to_bytes(k_r_G))
|
||||
|
||||
|
||||
def compute_whisk_tracker_and_commitment(k: int, r: int) -> Tuple[WhiskTracker, BLSPubkey]:
|
||||
k_G = G1 * Scalar(k)
|
||||
r_G = G1 * Scalar(r)
|
||||
k_r_G = r_G * Scalar(k)
|
||||
tracker = WhiskTracker(g1point_to_bytes(r_G), g1point_to_bytes(k_r_G))
|
||||
commitment = g1point_to_bytes(k_G)
|
||||
return tracker, commitment
|
||||
|
||||
|
||||
# Trigger condition for first proposal
|
||||
def set_as_first_proposal(spec, state, proposer_index: int):
|
||||
if state.whisk_trackers[proposer_index].r_G != spec.BLS_G1_GENERATOR:
|
||||
# Ensure tracker is empty to prevent breaking it
|
||||
assert state.whisk_trackers[proposer_index].r_G == spec.BLSG1Point()
|
||||
state.whisk_trackers[proposer_index].r_G = spec.BLS_G1_GENERATOR
|
||||
|
||||
|
||||
def is_first_proposal(spec, state, proposer_index: int) -> bool:
|
||||
return state.whisk_trackers[proposer_index].r_G == spec.BLS_G1_GENERATOR
|
||||
|
||||
|
||||
def register_tracker(state, proposer_index: int, k: int, r: int):
|
||||
tracker, k_commitment = compute_whisk_tracker_and_commitment(k, r)
|
||||
state.whisk_trackers[proposer_index] = tracker
|
||||
state.whisk_k_commitments[proposer_index] = k_commitment
|
||||
|
||||
|
||||
def set_registration(body, k: int, r: int):
|
||||
tracker, k_commitment = compute_whisk_tracker_and_commitment(k, r)
|
||||
body.whisk_registration_proof = GenerateWhiskTrackerProof(tracker, Scalar(k))
|
||||
body.whisk_tracker = tracker
|
||||
body.whisk_k_commitment = k_commitment
|
||||
|
||||
|
||||
def set_opening_proof(spec, state, block, proposer_index: int, k: int, r: int):
|
||||
tracker, k_commitment = compute_whisk_tracker_and_commitment(k, r)
|
||||
state.whisk_proposer_trackers[state.slot % spec.WHISK_PROPOSER_TRACKERS_COUNT] = tracker
|
||||
state.whisk_k_commitments[proposer_index] = k_commitment
|
||||
block.proposer_index = proposer_index
|
||||
block.body.whisk_opening_proof = GenerateWhiskTrackerProof(tracker, Scalar(k))
|
|
@ -1,5 +1,4 @@
|
|||
from eth2spec.test.context import (
|
||||
MAINNET,
|
||||
spec_state_test,
|
||||
with_altair_and_later,
|
||||
with_presets,
|
||||
|
@ -11,6 +10,7 @@ from eth2spec.test.helpers.attestations import (
|
|||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MAINNET
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
get_genesis_forkchoice_store_and_block,
|
||||
on_tick_and_append_step,
|
||||
|
|
|
@ -286,34 +286,43 @@ def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justif
|
|||
for _ in range(2):
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
|
||||
if is_justifying_previous_epoch:
|
||||
# build chain with head in epoch 3 and justified checkpoint in epoch 2
|
||||
block_a = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block_a = state_transition_and_sign_block(spec, state, block_a)
|
||||
yield from tick_and_add_block(spec, store, signed_block_a, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 3
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
|
||||
else:
|
||||
# fill one more epoch
|
||||
# build chain with head in epoch 4 and justified checkpoint in epoch 3
|
||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||
spec, state, store, True, True, test_steps=test_steps)
|
||||
signed_block_a = state_transition_with_full_block(spec, state, True, True)
|
||||
yield from tick_and_add_block(spec, store, signed_block_a, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
|
||||
spec.get_head(store) == signed_block_a.message.hash_tree_root()
|
||||
|
||||
state = store.block_states[spec.get_head(store)].copy()
|
||||
if is_justifying_previous_epoch:
|
||||
assert state.current_justified_checkpoint.epoch == 2
|
||||
else:
|
||||
assert state.current_justified_checkpoint.epoch == 3
|
||||
state_a = state.copy()
|
||||
|
||||
if is_justifying_previous_epoch:
|
||||
# try to find the block that can justify epoch 3
|
||||
_, justifying_slot = find_next_justifying_slot(spec, state, False, True)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 3
|
||||
assert spec.compute_epoch_at_slot(state.slot) == 3
|
||||
assert state.current_justified_checkpoint.epoch == 2
|
||||
else:
|
||||
# try to find the block that can justify epoch 4
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
assert spec.compute_epoch_at_slot(state.slot) == 4
|
||||
assert state.current_justified_checkpoint.epoch == 3
|
||||
|
||||
if is_justifying_previous_epoch:
|
||||
# try to find the block that can justify epoch 3 by including only previous epoch attesations
|
||||
_, justifying_slot = find_next_justifying_slot(spec, state, False, True)
|
||||
assert spec.compute_epoch_at_slot(justifying_slot) == 4
|
||||
else:
|
||||
# try to find the block that can justify epoch 4 by including current epoch attestations
|
||||
_, justifying_slot = find_next_justifying_slot(spec, state, True, True)
|
||||
assert spec.compute_epoch_at_slot(justifying_slot) == 4
|
||||
|
||||
last_slot_of_z = justifying_slot if enough_ffg else justifying_slot - 1
|
||||
last_slot_of_y = justifying_slot if is_justifying_previous_epoch else last_slot_of_z - 1
|
||||
|
@ -324,15 +333,14 @@ def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justif
|
|||
# build an empty chain to the slot prior epoch boundary
|
||||
signed_blocks_of_empty_chain = []
|
||||
states_of_empty_chain = []
|
||||
|
||||
for slot in range(state.slot + 1, last_slot_of_y + 1):
|
||||
block = build_empty_block(spec, state, slot=slot)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
signed_blocks_of_empty_chain.append(signed_block)
|
||||
states_of_empty_chain.append(state.copy())
|
||||
signed_blocks_of_y.append(signed_block)
|
||||
|
||||
signed_block_y = signed_blocks_of_empty_chain[-1]
|
||||
assert spec.compute_epoch_at_slot(signed_block_y.message.slot) == 4
|
||||
|
||||
# create 2/3 votes for the empty chain
|
||||
attestations_for_y = []
|
||||
|
@ -345,7 +353,6 @@ def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justif
|
|||
|
||||
state = state_a.copy()
|
||||
signed_block_z = None
|
||||
|
||||
for slot in range(state_a.slot + 1, last_slot_of_z + 1):
|
||||
# apply chain y, the empty chain
|
||||
if slot <= last_slot_of_y and len(signed_blocks_of_y) > 0:
|
||||
|
@ -368,12 +375,21 @@ def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justif
|
|||
if is_ready_to_justify(spec, state):
|
||||
break
|
||||
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4
|
||||
assert spec.compute_epoch_at_slot(signed_block_y.message.slot) == 4
|
||||
assert spec.compute_epoch_at_slot(signed_block_z.message.slot) == 4
|
||||
|
||||
# y is not filtered out & wins the LMD competition, so y should be the head
|
||||
y_voting_source_epoch = spec.get_voting_source(store, signed_block_y.message.hash_tree_root()).epoch
|
||||
if is_justifying_previous_epoch:
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2
|
||||
assert y_voting_source_epoch == 2
|
||||
assert y_voting_source_epoch == store.justified_checkpoint.epoch
|
||||
else:
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert y_voting_source_epoch == 3
|
||||
assert y_voting_source_epoch == store.justified_checkpoint.epoch
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
|
||||
if enough_ffg:
|
||||
assert is_ready_to_justify(spec, state)
|
||||
|
@ -386,17 +402,57 @@ def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justif
|
|||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5
|
||||
|
||||
if enough_ffg:
|
||||
# reorg
|
||||
assert spec.get_head(store) == signed_block_z.message.hash_tree_root()
|
||||
if is_justifying_previous_epoch:
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
else:
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
|
||||
else:
|
||||
# no reorg
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
y_voting_source_epoch = spec.get_voting_source(store, signed_block_y.message.hash_tree_root()).epoch
|
||||
if is_justifying_previous_epoch:
|
||||
# y is filtered out & so z should be the head
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert y_voting_source_epoch == 2
|
||||
assert y_voting_source_epoch != store.justified_checkpoint.epoch
|
||||
assert not (y_voting_source_epoch + 2 >= spec.compute_epoch_at_slot(spec.get_current_slot(store)))
|
||||
assert spec.get_head(store) == signed_block_z.message.hash_tree_root()
|
||||
else:
|
||||
if enough_ffg:
|
||||
# y is not filtered out & wins the LMD competition, so y should be the head
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
|
||||
assert y_voting_source_epoch == 3
|
||||
assert y_voting_source_epoch != store.justified_checkpoint.epoch
|
||||
assert y_voting_source_epoch + 2 >= spec.compute_epoch_at_slot(spec.get_current_slot(store))
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
else:
|
||||
# y is not filtered out & wins the LMD competition, so y should be the head
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert y_voting_source_epoch == 3
|
||||
assert y_voting_source_epoch == store.justified_checkpoint.epoch
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
|
||||
# to next epoch
|
||||
next_epoch(spec, state)
|
||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||
assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6
|
||||
|
||||
y_voting_source_epoch = spec.get_voting_source(store, signed_block_y.message.hash_tree_root()).epoch
|
||||
if is_justifying_previous_epoch:
|
||||
# y is filtered out & so z should be the head
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert y_voting_source_epoch == 2
|
||||
assert y_voting_source_epoch != store.justified_checkpoint.epoch
|
||||
assert not (y_voting_source_epoch + 2 >= spec.compute_epoch_at_slot(spec.get_current_slot(store)))
|
||||
assert spec.get_head(store) == signed_block_z.message.hash_tree_root()
|
||||
else:
|
||||
if enough_ffg:
|
||||
# y is filtered out & so z should be the head
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4
|
||||
assert y_voting_source_epoch == 3
|
||||
assert y_voting_source_epoch != store.justified_checkpoint.epoch
|
||||
assert not (y_voting_source_epoch + 2 >= spec.compute_epoch_at_slot(spec.get_current_slot(store)))
|
||||
assert spec.get_head(store) == signed_block_z.message.hash_tree_root()
|
||||
else:
|
||||
# y is not filtered out & wins the LMD competition, so y should be the head
|
||||
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||
assert y_voting_source_epoch == 3
|
||||
assert y_voting_source_epoch == store.justified_checkpoint.epoch
|
||||
assert spec.get_head(store) == signed_block_y.message.hash_tree_root()
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
|
|
@ -0,0 +1,134 @@
|
|||
from eth2spec.test.context import spec_state_test, with_whisk_and_later, expect_assertion_error
|
||||
from eth2spec.test.helpers.keys import whisk_ks_initial
|
||||
from eth2spec.test.helpers.whisk import compute_whisk_tracker
|
||||
from curdleproofs import GenerateWhiskShuffleProof
|
||||
|
||||
|
||||
def set_correct_shuffle_proofs(spec, state, body):
|
||||
pre_shuffle_trackers = get_and_populate_pre_shuffle_trackers(spec, state, body)
|
||||
|
||||
post_trackers, shuffle_proof = GenerateWhiskShuffleProof(spec.CURDLEPROOFS_CRS, pre_shuffle_trackers)
|
||||
body.whisk_post_shuffle_trackers = post_trackers
|
||||
body.whisk_shuffle_proof = shuffle_proof
|
||||
|
||||
|
||||
def get_and_populate_pre_shuffle_trackers(spec, state, body):
|
||||
shuffle_indices = spec.get_shuffle_indices(body.randao_reveal)
|
||||
pre_shuffle_trackers = []
|
||||
for i in shuffle_indices:
|
||||
# Set r to some value > 1 ( = 2+i)
|
||||
tracker = compute_whisk_tracker(whisk_ks_initial(i), 2 + i)
|
||||
state.whisk_candidate_trackers[i] = tracker
|
||||
pre_shuffle_trackers.append(tracker)
|
||||
return pre_shuffle_trackers
|
||||
|
||||
|
||||
def get_pre_shuffle_trackers(spec, state, body):
|
||||
return [state.whisk_candidate_trackers[i] for i in spec.get_shuffle_indices(body.randao_reveal)]
|
||||
|
||||
|
||||
def set_state_epoch(spec, state, epoch):
|
||||
state.slot = epoch * spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
def set_state_epoch_selection_gap(spec, state):
|
||||
set_state_epoch(spec, state, spec.config.WHISK_EPOCHS_PER_SHUFFLING_PHASE - 1)
|
||||
|
||||
|
||||
def empty_block_body(spec):
|
||||
return spec.BeaconBlockBody()
|
||||
|
||||
|
||||
def run_process_shuffled_trackers(spec, state, body, valid=True):
|
||||
yield 'pre', state
|
||||
yield 'body', body
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_shuffled_trackers(state, body))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_shuffled_trackers(state, body)
|
||||
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_shuffle_trackers(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_correct_shuffle_proofs(spec, state, body)
|
||||
yield from run_process_shuffled_trackers(spec, state, body)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_no_shuffle_minus_selection_gap(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_state_epoch_selection_gap(spec, state)
|
||||
yield from run_process_shuffled_trackers(spec, state, body)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_no_shuffle_minus_one_and_selection_gap(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_state_epoch(
|
||||
spec,
|
||||
state,
|
||||
spec.config.WHISK_EPOCHS_PER_SHUFFLING_PHASE - spec.config.WHISK_PROPOSER_SELECTION_GAP - 1
|
||||
)
|
||||
yield from run_process_shuffled_trackers(spec, state, body)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_shuffle_during_selection_gap(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_correct_shuffle_proofs(spec, state, body)
|
||||
set_state_epoch_selection_gap(spec, state)
|
||||
yield from run_process_shuffled_trackers(spec, state, body, valid=False)
|
||||
|
||||
# Invalid cases on shuffle
|
||||
# - wrong proof
|
||||
# - wrong post shuffle
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_shuffle_bad_proof(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_correct_shuffle_proofs(spec, state, body)
|
||||
body.whisk_shuffle_proof = spec.WhiskShuffleProof()
|
||||
yield from run_process_shuffled_trackers(spec, state, body, valid=False)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_shuffle_bad_trackers_zero(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_correct_shuffle_proofs(spec, state, body)
|
||||
body.whisk_post_shuffle_trackers[0] = spec.WhiskTracker()
|
||||
yield from run_process_shuffled_trackers(spec, state, body, valid=False)
|
||||
|
||||
# Invalid cases on gap
|
||||
# - not empty shuffle trackers
|
||||
# - not empty proof
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_gap_non_zero_proof(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
body.whisk_shuffle_proof = spec.WhiskShuffleProof('0xff')
|
||||
set_state_epoch_selection_gap(spec, state)
|
||||
yield from run_process_shuffled_trackers(spec, state, body, valid=False)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_invalid_gap_non_zero_trackers(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
body.whisk_post_shuffle_trackers = get_and_populate_pre_shuffle_trackers(spec, state, body)
|
||||
set_state_epoch_selection_gap(spec, state)
|
||||
yield from run_process_shuffled_trackers(spec, state, body, valid=False)
|
|
@ -0,0 +1,67 @@
|
|||
from eth2spec.test.context import spec_state_test, with_whisk_and_later, expect_assertion_error
|
||||
from eth2spec.test.helpers.whisk import (
|
||||
compute_whisk_k_commitment,
|
||||
compute_whisk_tracker,
|
||||
set_opening_proof
|
||||
)
|
||||
|
||||
|
||||
def empty_block(spec):
|
||||
return spec.BeaconBlock()
|
||||
|
||||
|
||||
def run_process_whisk_opening_proof(spec, state, block, valid=True):
|
||||
yield 'pre', state
|
||||
yield 'block', block
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_whisk_opening_proof(state, block))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_whisk_opening_proof(state, block)
|
||||
|
||||
yield 'post', state
|
||||
|
||||
|
||||
PROPOSER_INDEX = 0
|
||||
K_OK = 2
|
||||
K_WRONG = 3
|
||||
R_OK = 2
|
||||
R_WRONG = 3
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_valid_proof(spec, state):
|
||||
block = empty_block(spec)
|
||||
set_opening_proof(spec, state, block, PROPOSER_INDEX, K_OK, R_OK)
|
||||
run_process_whisk_opening_proof(spec, state, block)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_wrong_commitment(spec, state):
|
||||
block = empty_block(spec)
|
||||
set_opening_proof(spec, state, block, PROPOSER_INDEX, K_OK, R_OK)
|
||||
state.whisk_k_commitments[PROPOSER_INDEX] = compute_whisk_k_commitment(K_WRONG)
|
||||
run_process_whisk_opening_proof(spec, state, block, valid=False)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_wrong_tracker_r(spec, state):
|
||||
block = empty_block(spec)
|
||||
set_opening_proof(spec, state, block, PROPOSER_INDEX, K_OK, R_OK)
|
||||
wrong_tracker = compute_whisk_tracker(K_OK, R_WRONG)
|
||||
state.whisk_proposer_trackers[state.slot % spec.WHISK_PROPOSER_TRACKERS_COUNT] = wrong_tracker
|
||||
run_process_whisk_opening_proof(spec, state, block, valid=False)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_wrong_proof(spec, state):
|
||||
block = empty_block(spec)
|
||||
set_opening_proof(spec, state, block, PROPOSER_INDEX, K_OK, R_OK)
|
||||
block.body.whisk_opening_proof = spec.WhiskTrackerProof()
|
||||
run_process_whisk_opening_proof(spec, state, block, valid=False)
|
|
@ -0,0 +1,108 @@
|
|||
from eth2spec.test.context import spec_state_test, with_whisk_and_later, expect_assertion_error
|
||||
from eth2spec.test.helpers.whisk import (
|
||||
set_as_first_proposal,
|
||||
compute_whisk_k_commitment,
|
||||
set_registration,
|
||||
register_tracker
|
||||
)
|
||||
|
||||
|
||||
def empty_block_body(spec):
|
||||
return spec.BeaconBlockBody()
|
||||
|
||||
|
||||
def set_as_first_proposal_and_proposer(spec, state, proposer_index):
|
||||
state.latest_block_header.proposer_index = proposer_index
|
||||
set_as_first_proposal(spec, state, proposer_index)
|
||||
|
||||
|
||||
def run_process_whisk_registration(spec, state, body, valid=True):
|
||||
yield 'pre', state
|
||||
yield 'body', body
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_whisk_registration(state, body))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_whisk_registration(state, body)
|
||||
|
||||
yield 'post', state
|
||||
|
||||
|
||||
IDENTITY_R = 1
|
||||
OTHER_R = 100_000_2 # Large enough values to not collide with initial k values
|
||||
OTHER_K = 100_000_2
|
||||
PROPOSER_INDEX = 0
|
||||
OTHER_INDEX = 1
|
||||
|
||||
# First proposal
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_first_proposal_ok(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_as_first_proposal_and_proposer(spec, state, PROPOSER_INDEX)
|
||||
set_registration(body, OTHER_K, OTHER_R)
|
||||
yield from run_process_whisk_registration(spec, state, body)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_first_proposal_indentity_tracker(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_as_first_proposal_and_proposer(spec, state, PROPOSER_INDEX)
|
||||
set_registration(body, OTHER_K, IDENTITY_R)
|
||||
yield from run_process_whisk_registration(spec, state, body, valid=False)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_first_proposal_non_unique_k_other(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_as_first_proposal_and_proposer(spec, state, PROPOSER_INDEX)
|
||||
state.whisk_k_commitments[OTHER_INDEX] = compute_whisk_k_commitment(OTHER_K)
|
||||
set_registration(body, OTHER_K, OTHER_R)
|
||||
yield from run_process_whisk_registration(spec, state, body, valid=False)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_first_proposal_non_unique_k_self(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_as_first_proposal_and_proposer(spec, state, PROPOSER_INDEX)
|
||||
state.whisk_k_commitments[PROPOSER_INDEX] = compute_whisk_k_commitment(OTHER_K)
|
||||
set_registration(body, OTHER_K, OTHER_R)
|
||||
yield from run_process_whisk_registration(spec, state, body, valid=False)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_first_proposal_invalid_proof(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_as_first_proposal_and_proposer(spec, state, PROPOSER_INDEX)
|
||||
set_registration(body, OTHER_K, OTHER_R)
|
||||
body.whisk_tracker.k_r_G = spec.BLSG1Point()
|
||||
yield from run_process_whisk_registration(spec, state, body, valid=False)
|
||||
|
||||
# Second proposal
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_second_proposal_ok(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
# An empty body has the correct values for a second proposal
|
||||
# Set tracker to != G1 generator for second proposal condition
|
||||
register_tracker(state, PROPOSER_INDEX, OTHER_K, OTHER_R)
|
||||
yield from run_process_whisk_registration(spec, state, body)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_second_proposal_not_zero(spec, state):
|
||||
body = empty_block_body(spec)
|
||||
set_registration(body, OTHER_K, OTHER_R)
|
||||
register_tracker(state, PROPOSER_INDEX, OTHER_K, OTHER_R)
|
||||
yield from run_process_whisk_registration(spec, state, body, valid=False)
|
|
@ -0,0 +1 @@
|
|||
from .test_whisk import * # noqa: F401 F403
|
|
@ -0,0 +1,51 @@
|
|||
from eth2spec.test.helpers.block import build_empty_block
|
||||
from eth2spec.test.context import spec_state_test, with_whisk_and_later
|
||||
from eth2spec.test.helpers.keys import whisk_ks_initial
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block
|
||||
from eth2spec.test.helpers.whisk import compute_whisk_tracker_and_commitment
|
||||
from curdleproofs import WhiskTracker
|
||||
|
||||
known_whisk_trackers = {}
|
||||
|
||||
|
||||
def assign_proposer_at_slot(state, slot: int):
|
||||
state
|
||||
|
||||
|
||||
def initialize_whisk_full(spec, state):
|
||||
# TODO: De-duplicate code from whisk/fork.md
|
||||
for index in range(len(state.validators)):
|
||||
whisk_k_commitment, whisk_tracker = spec.get_initial_commitments(whisk_ks_initial(index))
|
||||
state.whisk_k_commitments[index] = whisk_k_commitment
|
||||
state.whisk_trackers[index] = whisk_tracker
|
||||
|
||||
# Do a candidate selection followed by a proposer selection so that we have proposers for the upcoming day
|
||||
# Use an old epoch when selecting candidates so that we don't get the same seed as in the next candidate selection
|
||||
spec.select_whisk_candidate_trackers(state, spec.Epoch(0))
|
||||
spec.select_whisk_proposer_trackers(state, spec.Epoch(0))
|
||||
|
||||
|
||||
# Fill candidate trackers with the same tracker so shuffling does not break
|
||||
def fill_candidate_trackers(spec, state, tracker: WhiskTracker):
|
||||
for i in range(spec.WHISK_CANDIDATE_TRACKERS_COUNT):
|
||||
state.whisk_candidate_trackers[i] = tracker
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_state_test
|
||||
def test_whisk__process_block_single_initial(spec, state):
|
||||
assert state.slot == 0
|
||||
proposer_slot_1 = 0
|
||||
tracker_slot_1, k_commitment = compute_whisk_tracker_and_commitment(whisk_ks_initial(proposer_slot_1), 1)
|
||||
state.whisk_k_commitments[proposer_slot_1] = k_commitment
|
||||
state.whisk_proposer_trackers[1] = tracker_slot_1
|
||||
fill_candidate_trackers(spec, state, tracker_slot_1)
|
||||
|
||||
# Produce and process a whisk block
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block(spec, state, 1, proposer_slot_1)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
|
@ -0,0 +1,17 @@
|
|||
from eth2spec.test.context import spec_test, with_whisk_and_later
|
||||
from eth2spec.test.context import single_phase
|
||||
|
||||
|
||||
# Note: remove once whisk is rebased on top of deneb
|
||||
def is_power_of_two(value: int) -> bool:
|
||||
"""
|
||||
Check if ``value`` is a power of two integer.
|
||||
"""
|
||||
return (value > 0) and (value & (value - 1) == 0)
|
||||
|
||||
|
||||
@with_whisk_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_curdleproof(spec):
|
||||
assert is_power_of_two(spec.CURDLEPROOFS_N_BLINDERS + spec.WHISK_VALIDATORS_PER_SHUFFLE)
|
|
@ -4,6 +4,7 @@ from py_ecc.optimized_bls12_381 import ( # noqa: F401
|
|||
G1 as py_ecc_G1,
|
||||
G2 as py_ecc_G2,
|
||||
Z1 as py_ecc_Z1,
|
||||
Z2 as py_ecc_Z2,
|
||||
add as py_ecc_add,
|
||||
multiply as py_ecc_mul,
|
||||
neg as py_ecc_neg,
|
||||
|
@ -243,6 +244,15 @@ def Z1():
|
|||
return py_ecc_Z1
|
||||
|
||||
|
||||
def Z2():
|
||||
"""
|
||||
Returns the identity point in G2
|
||||
"""
|
||||
if bls == arkworks_bls or bls == fastest_bls:
|
||||
return arkworks_G2.identity()
|
||||
return py_ecc_Z2
|
||||
|
||||
|
||||
def G1():
|
||||
"""
|
||||
Returns the chosen generator point in G1
|
||||
|
|
|
@ -9,6 +9,8 @@ Clients should assume forks happen sequentially in the following manner:
|
|||
0. `phase0`
|
||||
1. `altair`
|
||||
2. `bellatrix`
|
||||
3. `capella`
|
||||
4. `deneb`
|
||||
|
||||
For example, if a test case has `post_fork` of `altair`, the test consumer should assume the test begins in `phase0` and use that specification to process the initial state and any blocks up until the fork epoch. After the fork happens, the test consumer should use the specification according to the `altair` fork to process the remaining data.
|
||||
|
||||
|
|
Loading…
Reference in New Issue