Merge branch 'dev' into lc-transitionacrossforks

This commit is contained in:
Etan Kissling 2024-01-15 13:34:04 +01:00
commit 07710e6b4e
No known key found for this signature in database
GPG Key ID: B21DA824C5A3D03D
35 changed files with 17595 additions and 8504 deletions

View File

@ -194,6 +194,19 @@ jobs:
command: make citest fork=whisk
- store_test_results:
path: tests/core/pyspec/test-reports
test-eip7594:
docker:
- image: circleci/python:3.9
working_directory: ~/specs-repo
steps:
- restore_cache:
key: v3-specs-repo-{{ .Branch }}-{{ .Revision }}
- restore_pyspec_cached_venv
- run:
name: Run py-tests
command: make citest fork=eip7594
- store_test_results:
path: tests/core/pyspec/test-reports
table_of_contents:
docker:
- image: circleci/node:10.16.3
@ -323,6 +336,9 @@ workflows:
- test-whisk:
requires:
- install_pyspec_test
- test-eip7594:
requires:
- install_pyspec_test
- table_of_contents
- codespell
- lint:

View File

@ -66,43 +66,12 @@ jobs:
- name: Run linter for test generators
run: make lint_generators
dockerfile-test:
runs-on: self-hosted
needs: preclear
services:
registry:
image: registry:2
ports:
- 5000:5000
steps:
- name: Checkout this repo
uses: actions/checkout@v3.2.0
- name: get git commit hash
id: git_commit_hash
shell: bash
run: |
echo "git_commit_hash=$(echo $(git log --pretty=format:'%h' -n 1))" >> $GITHUB_OUTPUT
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
driver-opts: network=host
- name: Build and push to local registry
uses: docker/build-push-action@v4
with:
context: .
file: ./docker/Dockerfile
push: true
tags: localhost:5000/consensus-specs-dockerfile-test:${{ steps.git_commit_hash.outputs.git_commit_hash }}
- name: Test the image by running the linter
run: |
docker run localhost:5000/consensus-specs-dockerfile-test:${{ steps.git_commit_hash.outputs.git_commit_hash }} make lint
pyspec-tests:
runs-on: self-hosted
needs: [preclear,lint,codespell,table_of_contents]
strategy:
matrix:
version: ["phase0", "altair", "bellatrix", "capella", "deneb", "eip6110", "eip7002", "whisk"]
version: ["phase0", "altair", "bellatrix", "capella", "deneb", "eip6110", "eip7002", "whisk", "eip7594"]
steps:
- name: Checkout this repo
uses: actions/checkout@v3.2.0

1
.gitignore vendored
View File

@ -24,6 +24,7 @@ tests/core/pyspec/eth2spec/deneb/
tests/core/pyspec/eth2spec/eip6110/
tests/core/pyspec/eth2spec/eip7002/
tests/core/pyspec/eth2spec/whisk/
tests/core/pyspec/eth2spec/eip7594/
# coverage reports
.htmlcov

View File

@ -154,3 +154,7 @@ BLOB_SIDECAR_SUBNET_COUNT: 6
WHISK_EPOCHS_PER_SHUFFLING_PHASE: 256
# `Epoch(2)`
WHISK_PROPOSER_SELECTION_GAP: 2
# EIP7594
EIP7594_FORK_VERSION: 0x06000001
EIP7594_FORK_EPOCH: 18446744073709551615

View File

@ -153,3 +153,7 @@ BLOB_SIDECAR_SUBNET_COUNT: 6
# Whisk
WHISK_EPOCHS_PER_SHUFFLING_PHASE: 4
WHISK_PROPOSER_SELECTION_GAP: 1
# EIP7594
EIP7594_FORK_VERSION: 0x06000001
EIP7594_FORK_EPOCH: 18446744073709551615

20
docker/README.md Normal file
View File

@ -0,0 +1,20 @@
## Docker related information
This dockerfile sets up the dependencies required to run consensus-spec tests. The docker image can be locally built with:
- `docker build ./ -t $IMAGE_NAME -f ./docker/Dockerfile`
Handy commands:
- `docker run -it $IMAGE_NAME /bin/sh` will give you a shell inside the docker container to manually run any tests
- `docker run $IMAGE_NAME make citest` will run the make citest command inside the docker container
Ideally manual running of docker containers is for advanced users, we recommend the script based approach described below for most users.
The `scripts/build_run_docker_tests.sh` script will cover most usecases. The script allows the user to configure the fork(altair/bellatrix/capella..), `$IMAGE_NAME` (specifies the container to use), number of cores, preset type (mainnet/minimal), and test all forks flags. Ideally, this is the main way that users interact with the spec tests instead of running it locally with varying versions of dependencies.
E.g:
- `./build_run_test.sh --p mainnet --n 16` will run the mainnet preset tests with 16 threads
- `./build_run_test.sh --a` will run all the tests across all the forks
- `./build_run_test.sh --f deneb --n 16` will only run deneb tests on 16 threads
Results are always placed in a folder called `./testResults`. The results are `.xml` files and contain the fork they represent and the date/time they were run at.

View File

@ -0,0 +1,6 @@
# Mainnet preset - EIP7594
# Misc
# ---------------------------------------------------------------
# `uint64(2**6)` (= 64)
FIELD_ELEMENTS_PER_CELL: 64

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,6 @@
# Minimal preset - EIP7594
# Misc
# ---------------------------------------------------------------
# `uint64(2**6)` (= 64)
FIELD_ELEMENTS_PER_CELL: 64

File diff suppressed because it is too large Load Diff

View File

@ -7,6 +7,7 @@ DENEB = 'deneb'
EIP6110 = 'eip6110'
EIP7002 = 'eip7002'
WHISK = 'whisk'
EIP7594 = 'eip7594'

View File

@ -9,6 +9,7 @@ from .constants import (
EIP6110,
WHISK,
EIP7002,
EIP7594,
)
@ -21,6 +22,7 @@ PREVIOUS_FORK_OF = {
EIP6110: DENEB,
WHISK: CAPELLA,
EIP7002: CAPELLA,
EIP7594: DENEB,
}
ALL_FORKS = list(PREVIOUS_FORK_OF.keys())

View File

@ -6,12 +6,13 @@ from .deneb import DenebSpecBuilder
from .eip6110 import EIP6110SpecBuilder
from .eip7002 import EIP7002SpecBuilder
from .whisk import WhiskSpecBuilder
from .eip7594 import EIP7594SpecBuilder
spec_builders = {
builder.fork: builder
for builder in (
Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder,
EIP6110SpecBuilder, EIP7002SpecBuilder, WhiskSpecBuilder,
EIP6110SpecBuilder, EIP7002SpecBuilder, WhiskSpecBuilder, EIP7594SpecBuilder,
)
}

View File

@ -42,9 +42,9 @@ def compute_merkle_proof(object: SSZObject,
@classmethod
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
return {
'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',
'CURRENT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(54)',
'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',
'FINALIZED_ROOT_GINDEX': 'GeneralizedIndex(105)',
'CURRENT_SYNC_COMMITTEE_GINDEX': 'GeneralizedIndex(54)',
'NEXT_SYNC_COMMITTEE_GINDEX': 'GeneralizedIndex(55)',
}
@classmethod

View File

@ -0,0 +1,20 @@
from typing import Dict
from .base import BaseSpecBuilder
from ..constants import EIP7594
class EIP7594SpecBuilder(BaseSpecBuilder):
fork: str = EIP7594
@classmethod
def imports(cls, preset_name: str):
return f'''
from eth2spec.deneb import {preset_name} as deneb
'''
@classmethod
def hardcoded_custom_type_dep_constants(cls, spec_object) -> Dict[str, str]:
return {
'FIELD_ELEMENTS_PER_CELL': spec_object.preset_vars['FIELD_ELEMENTS_PER_CELL'].value,
}

View File

@ -112,10 +112,11 @@ def _load_kzg_trusted_setups(preset_name):
with open(trusted_setups_file_path, 'r') as f:
json_data = json.load(f)
trusted_setup_G1_monomial = json_data['g1_monomial']
trusted_setup_G1_lagrange = json_data['g1_lagrange']
trusted_setup_G2_monomial = json_data['g2_monomial']
return trusted_setup_G2_monomial, trusted_setup_G1_lagrange
return trusted_setup_G1_monomial, trusted_setup_G1_lagrange, trusted_setup_G2_monomial
def _load_curdleproofs_crs(preset_name):
"""
@ -152,7 +153,7 @@ def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:
def _parse_value(name: str, typed_value: str, type_hint: Optional[str] = None) -> VariableDefinition:
comment = None
if name == "BLS12_381_Q":
if name in ("ROOT_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_EXTENDED", "ROOTS_OF_UNITY_REDUCED"):
comment = "noqa: E501"
typed_value = typed_value.strip()
@ -167,9 +168,10 @@ def _parse_value(name: str, typed_value: str, type_hint: Optional[str] = None) -
def _update_constant_vars_with_kzg_setups(constant_vars, preset_name):
comment = "noqa: E501"
kzg_setups = ALL_KZG_SETUPS[preset_name]
constant_vars['KZG_SETUP_G2_MONOMIAL'] = VariableDefinition(constant_vars['KZG_SETUP_G2_MONOMIAL'].value, str(kzg_setups[0]), comment, None)
constant_vars['KZG_SETUP_G1_MONOMIAL'] = VariableDefinition(constant_vars['KZG_SETUP_G1_MONOMIAL'].value, str(kzg_setups[0]), comment, None)
constant_vars['KZG_SETUP_G1_LAGRANGE'] = VariableDefinition(constant_vars['KZG_SETUP_G1_LAGRANGE'].value, str(kzg_setups[1]), comment, None)
constant_vars['KZG_SETUP_G2_MONOMIAL'] = VariableDefinition(constant_vars['KZG_SETUP_G2_MONOMIAL'].value, str(kzg_setups[2]), comment, None)
def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], preset_name=str) -> SpecObject:
functions: Dict[str, str] = {}

View File

@ -0,0 +1,125 @@
# EIP7594 -- Fork Logic
**Notice**: This document is a work-in-progress for researchers and implementers.
## Table of contents
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Configuration](#configuration)
- [Helper functions](#helper-functions)
- [Misc](#misc)
- [Modified `compute_fork_version`](#modified-compute_fork_version)
- [Fork to EIP7594](#fork-to-eip7594)
- [Fork trigger](#fork-trigger)
- [Upgrading the state](#upgrading-the-state)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
## Introduction
This document describes the process of EIP7594 upgrade.
## Configuration
Warning: this configuration is not definitive.
| Name | Value |
| - | - |
| `EIP7594_FORK_VERSION` | `Version('0x05000000')` |
| `EIP7594_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
## Helper functions
### Misc
#### Modified `compute_fork_version`
```python
def compute_fork_version(epoch: Epoch) -> Version:
"""
Return the fork version at the given ``epoch``.
"""
if epoch >= EIP7594_FORK_EPOCH:
return EIP7594_FORK_VERSION
if epoch >= DENEB_FORK_EPOCH:
return DENEB_FORK_VERSION
if epoch >= CAPELLA_FORK_EPOCH:
return CAPELLA_FORK_VERSION
if epoch >= BELLATRIX_FORK_EPOCH:
return BELLATRIX_FORK_VERSION
if epoch >= ALTAIR_FORK_EPOCH:
return ALTAIR_FORK_VERSION
return GENESIS_FORK_VERSION
```
## Fork to EIP7594
### Fork trigger
EIP7594 does not need a hard fork. We only add this fork doc for compiling this new feature in pyspec.
For now, we assume the condition will be triggered at epoch `EIP7594_FORK_EPOCH`.
Note that for the pure EIP7594 networks, we don't apply `upgrade_to_eip7594` since it starts with EIP7594 version logic.
### Upgrading the state
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP7594_FORK_EPOCH`,
an irregular state change is made to upgrade to EIP7594.
```python
def upgrade_to_eip7594(pre: deneb.BeaconState) -> BeaconState:
epoch = deneb.get_current_epoch(pre)
post = BeaconState(
# Versioning
genesis_time=pre.genesis_time,
genesis_validators_root=pre.genesis_validators_root,
slot=pre.slot,
fork=Fork(
previous_version=pre.fork.current_version,
current_version=EIP7594_FORK_VERSION, # [Modified in EIP7594]
epoch=epoch,
),
# History
latest_block_header=pre.latest_block_header,
block_roots=pre.block_roots,
state_roots=pre.state_roots,
historical_roots=pre.historical_roots,
# Eth1
eth1_data=pre.eth1_data,
eth1_data_votes=pre.eth1_data_votes,
eth1_deposit_index=pre.eth1_deposit_index,
# Registry
validators=pre.validators,
balances=pre.balances,
# Randomness
randao_mixes=pre.randao_mixes,
# Slashings
slashings=pre.slashings,
# Participation
previous_epoch_participation=pre.previous_epoch_participation,
current_epoch_participation=pre.current_epoch_participation,
# Finality
justification_bits=pre.justification_bits,
previous_justified_checkpoint=pre.previous_justified_checkpoint,
current_justified_checkpoint=pre.current_justified_checkpoint,
finalized_checkpoint=pre.finalized_checkpoint,
# Inactivity
inactivity_scores=pre.inactivity_scores,
# Sync
current_sync_committee=pre.current_sync_committee,
next_sync_committee=pre.next_sync_committee,
# Execution-layer
latest_execution_payload_header=pre.latest_execution_payload_header,
# Withdrawals
next_withdrawal_index=pre.next_withdrawal_index,
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
# Deep history valid from Capella onwards
historical_summaries=pre.historical_summaries,
)
return post
```

View File

@ -0,0 +1,525 @@
# Deneb -- Polynomial Commitments
## Table of contents
<!-- TOC -->
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Custom types](#custom-types)
- [Constants](#constants)
- [Preset](#preset)
- [Cells](#cells)
- [Helper functions](#helper-functions)
- [Linear combinations](#linear-combinations)
- [`g2_lincomb`](#g2_lincomb)
- [FFTs](#ffts)
- [`_fft_field`](#_fft_field)
- [`fft_field`](#fft_field)
- [Polynomials in coefficient form](#polynomials-in-coefficient-form)
- [`polynomial_eval_to_coeff`](#polynomial_eval_to_coeff)
- [`add_polynomialcoeff`](#add_polynomialcoeff)
- [`neg_polynomialcoeff`](#neg_polynomialcoeff)
- [`multiply_polynomialcoeff`](#multiply_polynomialcoeff)
- [`divide_polynomialcoeff`](#divide_polynomialcoeff)
- [`shift_polynomialcoeff`](#shift_polynomialcoeff)
- [`interpolate_polynomialcoeff`](#interpolate_polynomialcoeff)
- [`vanishing_polynomialcoeff`](#vanishing_polynomialcoeff)
- [`evaluate_polynomialcoeff`](#evaluate_polynomialcoeff)
- [KZG multiproofs](#kzg-multiproofs)
- [`compute_kzg_proof_multi_impl`](#compute_kzg_proof_multi_impl)
- [`verify_kzg_proof_multi_impl`](#verify_kzg_proof_multi_impl)
- [Cell cosets](#cell-cosets)
- [`coset_for_cell`](#coset_for_cell)
- [Cells](#cells-1)
- [Cell computation](#cell-computation)
- [`compute_cells_and_proofs`](#compute_cells_and_proofs)
- [`compute_cells`](#compute_cells)
- [Cell verification](#cell-verification)
- [`verify_cell_proof`](#verify_cell_proof)
- [`verify_cell_proof_batch`](#verify_cell_proof_batch)
- [Reconstruction](#reconstruction)
- [`recover_polynomial`](#recover_polynomial)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
## Introduction
This document extends [polynomial-commitments.md](polynomial-commitments.md) with the functions required for data availability sampling (DAS). It is not part of the core Deneb spec but an extension that can be optionally implemented to allow nodes to reduce their load using DAS.
For any KZG library extended to support DAS, functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library.
Public functions MUST accept raw bytes as input and perform the required cryptographic normalization before invoking any internal functions.
## Custom types
| Name | SSZ equivalent | Description |
| - | - | - |
| `PolynomialCoeff` | `List[BLSFieldElement, 2 * FIELD_ELEMENTS_PER_BLOB]` | A polynomial in coefficient form |
| `Cell` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_CELL]` | The unit of blob data that can come with their own KZG proofs |
| `CellID` | `uint64` | Cell identifier |
## Constants
| Name | Value | Notes |
| - | - | - |
## Preset
### Cells
Cells are the smallest unit of blob data that can come with their own KZG proofs. Samples can be constructed from one or several cells (e.g. an individual cell or line).
| Name | Value | Description |
| - | - | - |
| `FIELD_ELEMENTS_PER_CELL` | `uint64(64)` | Number of field elements in a cell |
| `BYTES_PER_CELL` | `FIELD_ELEMENTS_PER_CELL * BYTES_PER_FIELD_ELEMENT` | The number of bytes in a cell |
| `CELLS_PER_BLOB` | `((2 * FIELD_ELEMENTS_PER_BLOB) // FIELD_ELEMENTS_PER_CELL)` | The number of cells in a blob |
| `RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN` | `b'RCKZGCBATCH__V1_'` |
## Helper functions
### Linear combinations
#### `g2_lincomb`
```python
def g2_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElement]) -> Bytes96:
"""
BLS multiscalar multiplication in G2. This function can be optimized using Pippenger's algorithm and variants.
"""
assert len(points) == len(scalars)
result = bls.Z2()
for x, a in zip(points, scalars):
result = bls.add(result, bls.multiply(bls.bytes96_to_G2(x), a))
return Bytes96(bls.G2_to_bytes96(result))
```
### FFTs
#### `_fft_field`
```python
def _fft_field(vals: Sequence[BLSFieldElement],
roots_of_unity: Sequence[BLSFieldElement]) -> Sequence[BLSFieldElement]:
if len(vals) == 1:
return vals
L = _fft_field(vals[::2], roots_of_unity[::2])
R = _fft_field(vals[1::2], roots_of_unity[::2])
o = [BLSFieldElement(0) for _ in vals]
for i, (x, y) in enumerate(zip(L, R)):
y_times_root = (int(y) * int(roots_of_unity[i])) % BLS_MODULUS
o[i] = BLSFieldElement((int(x) + y_times_root) % BLS_MODULUS)
o[i + len(L)] = BLSFieldElement((int(x) - y_times_root + BLS_MODULUS) % BLS_MODULUS)
return o
```
#### `fft_field`
```python
def fft_field(vals: Sequence[BLSFieldElement],
roots_of_unity: Sequence[BLSFieldElement],
inv: bool=False) -> Sequence[BLSFieldElement]:
if inv:
# Inverse FFT
invlen = pow(len(vals), BLS_MODULUS - 2, BLS_MODULUS)
return [BLSFieldElement((int(x) * invlen) % BLS_MODULUS)
for x in _fft_field(vals, list(roots_of_unity[0:1]) + list(roots_of_unity[:0:-1]))]
else:
# Regular FFT
return _fft_field(vals, roots_of_unity)
```
### Polynomials in coefficient form
#### `polynomial_eval_to_coeff`
```python
def polynomial_eval_to_coeff(polynomial: Polynomial) -> PolynomialCoeff:
"""
Interpolates a polynomial (given in evaluation form) to a polynomial in coefficient form.
"""
roots_of_unity = compute_roots_of_unity(FIELD_ELEMENTS_PER_BLOB)
polynomial_coeff = fft_field(bit_reversal_permutation(list(polynomial)), roots_of_unity, inv=True)
return polynomial_coeff
```
#### `add_polynomialcoeff`
```python
def add_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> PolynomialCoeff:
"""
Sum the coefficient form polynomials ``a`` and ``b``.
"""
a, b = (a, b) if len(a) >= len(b) else (b, a)
return [(a[i] + (b[i] if i < len(b) else 0)) % BLS_MODULUS for i in range(len(a))]
```
#### `neg_polynomialcoeff`
```python
def neg_polynomialcoeff(a: PolynomialCoeff) -> PolynomialCoeff:
"""
Negative of coefficient form polynomial ``a``
"""
return [(BLS_MODULUS - x) % BLS_MODULUS for x in a]
```
#### `multiply_polynomialcoeff`
```python
def multiply_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> PolynomialCoeff:
"""
Multiplies the coefficient form polynomials ``a`` and ``b``
"""
r = [0]
for power, coef in enumerate(a):
summand = [0] * power + [int(coef) * int(x) % BLS_MODULUS for x in b]
r = add_polynomialcoeff(r, summand)
return r
```
#### `divide_polynomialcoeff`
```python
def divide_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> PolynomialCoeff:
"""
Long polynomial division for two coefficient form polynomials ``a`` and ``b``
"""
a = [x for x in a]
o = []
apos = len(a) - 1
bpos = len(b) - 1
diff = apos - bpos
while diff >= 0:
quot = div(a[apos], b[bpos])
o.insert(0, quot)
for i in range(bpos, -1, -1):
a[diff + i] = (int(a[diff + i]) - int(b[i]) * int(quot)) % BLS_MODULUS
apos -= 1
diff -= 1
return [x % BLS_MODULUS for x in o]
```
#### `shift_polynomialcoeff`
```python
def shift_polynomialcoeff(polynomial_coeff: PolynomialCoeff, factor: BLSFieldElement) -> PolynomialCoeff:
"""
Shift the evaluation of a polynomial in coefficient form by factor.
This results in a new polynomial g(x) = f(factor * x)
"""
factor_power = 1
inv_factor = pow(int(factor), BLS_MODULUS - 2, BLS_MODULUS)
o = []
for p in polynomial_coeff:
o.append(int(p) * factor_power % BLS_MODULUS)
factor_power = factor_power * inv_factor % BLS_MODULUS
return o
```
#### `interpolate_polynomialcoeff`
```python
def interpolate_polynomialcoeff(xs: Sequence[BLSFieldElement], ys: Sequence[BLSFieldElement]) -> PolynomialCoeff:
"""
Lagrange interpolation: Finds the lowest degree polynomial that takes the value ``ys[i]`` at ``x[i]``
for all i.
Outputs a coefficient form polynomial. Leading coefficients may be zero.
"""
assert len(xs) == len(ys)
r = [0]
for i in range(len(xs)):
summand = [ys[i]]
for j in range(len(ys)):
if j != i:
weight_adjustment = bls_modular_inverse(int(xs[i]) - int(xs[j]))
summand = multiply_polynomialcoeff(
summand, [(- int(weight_adjustment) * int(xs[j])) % BLS_MODULUS, weight_adjustment]
)
r = add_polynomialcoeff(r, summand)
return r
```
#### `vanishing_polynomialcoeff`
```python
def vanishing_polynomialcoeff(xs: Sequence[BLSFieldElement]) -> PolynomialCoeff:
"""
Compute the vanishing polynomial on ``xs`` (in coefficient form)
"""
p = [1]
for x in xs:
p = multiply_polynomialcoeff(p, [-int(x), 1])
return p
```
#### `evaluate_polynomialcoeff`
```python
def evaluate_polynomialcoeff(polynomial_coeff: PolynomialCoeff, z: BLSFieldElement) -> BLSFieldElement:
"""
Evaluate a coefficient form polynomial at ``z`` using Horner's schema
"""
y = 0
for coef in polynomial_coeff[::-1]:
y = (int(y) * int(z) + int(coef)) % BLS_MODULUS
return BLSFieldElement(y % BLS_MODULUS)
```
### KZG multiproofs
Extended KZG functions for multiproofs
#### `compute_kzg_proof_multi_impl`
```python
def compute_kzg_proof_multi_impl(
polynomial_coeff: PolynomialCoeff,
zs: Sequence[BLSFieldElement]) -> Tuple[KZGProof, Sequence[BLSFieldElement]]:
"""
Helper function that computes multi-evaluation KZG proofs.
"""
# For all x_i, compute p(x_i) - p(z)
ys = [evaluate_polynomialcoeff(polynomial_coeff, z) for z in zs]
interpolation_polynomial = interpolate_polynomialcoeff(zs, ys)
polynomial_shifted = add_polynomialcoeff(polynomial_coeff, neg_polynomialcoeff(interpolation_polynomial))
# For all x_i, compute (x_i - z)
denominator_poly = vanishing_polynomialcoeff(zs)
# Compute the quotient polynomial directly in evaluation form
quotient_polynomial = divide_polynomialcoeff(polynomial_shifted, denominator_poly)
return KZGProof(g1_lincomb(KZG_SETUP_G1_MONOMIAL[:len(quotient_polynomial)], quotient_polynomial)), ys
```
#### `verify_kzg_proof_multi_impl`
```python
def verify_kzg_proof_multi_impl(commitment: KZGCommitment,
zs: Sequence[BLSFieldElement],
ys: Sequence[BLSFieldElement],
proof: KZGProof) -> bool:
"""
Helper function that verifies a KZG multiproof
"""
assert len(zs) == len(ys)
zero_poly = g2_lincomb(KZG_SETUP_G2_MONOMIAL[:len(zs) + 1], vanishing_polynomialcoeff(zs))
interpolated_poly = g1_lincomb(KZG_SETUP_G1_MONOMIAL[:len(zs)], interpolate_polynomialcoeff(zs, ys))
return (bls.pairing_check([
[bls.bytes48_to_G1(proof), bls.bytes96_to_G2(zero_poly)],
[
bls.add(bls.bytes48_to_G1(commitment), bls.neg(bls.bytes48_to_G1(interpolated_poly))),
bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[0])),
],
]))
```
### Cell cosets
#### `coset_for_cell`
```python
def coset_for_cell(cell_id: int) -> Cell:
"""
Get the coset for a given ``cell_id``
"""
assert cell_id < CELLS_PER_BLOB
roots_of_unity_brp = bit_reversal_permutation(
compute_roots_of_unity(2 * FIELD_ELEMENTS_PER_BLOB)
)
return Cell(roots_of_unity_brp[FIELD_ELEMENTS_PER_CELL * cell_id:FIELD_ELEMENTS_PER_CELL * (cell_id + 1)])
```
## Cells
### Cell computation
#### `compute_cells_and_proofs`
```python
def compute_cells_and_proofs(blob: Blob) -> Tuple[
Vector[Cell, CELLS_PER_BLOB],
Vector[KZGProof, CELLS_PER_BLOB]]:
"""
Compute all the cell proofs for one blob. This is an inefficient O(n^2) algorithm,
for performant implementation the FK20 algorithm that runs in O(n log n) should be
used instead.
Public method.
"""
polynomial = blob_to_polynomial(blob)
polynomial_coeff = polynomial_eval_to_coeff(polynomial)
cells = []
proofs = []
for i in range(CELLS_PER_BLOB):
coset = coset_for_cell(i)
proof, ys = compute_kzg_proof_multi_impl(polynomial_coeff, coset)
cells.append(ys)
proofs.append(proof)
return cells, proofs
```
#### `compute_cells`
```python
def compute_cells(blob: Blob) -> Vector[Cell, CELLS_PER_BLOB]:
"""
Compute the cell data for a blob (without computing the proofs).
Public method.
"""
polynomial = blob_to_polynomial(blob)
polynomial_coeff = polynomial_eval_to_coeff(polynomial)
extended_data = fft_field(polynomial_coeff + [0] * FIELD_ELEMENTS_PER_BLOB,
compute_roots_of_unity(2 * FIELD_ELEMENTS_PER_BLOB))
extended_data_rbo = bit_reversal_permutation(extended_data)
return [extended_data_rbo[i * FIELD_ELEMENTS_PER_CELL:(i + 1) * FIELD_ELEMENTS_PER_CELL]
for i in range(CELLS_PER_BLOB)]
```
### Cell verification
#### `verify_cell_proof`
```python
def verify_cell_proof(commitment: KZGCommitment,
cell_id: int,
cell: Cell,
proof: KZGProof) -> bool:
"""
Check a cell proof
Public method.
"""
coset = coset_for_cell(cell_id)
return verify_kzg_proof_multi_impl(commitment, coset, cell, proof)
```
#### `verify_cell_proof_batch`
```python
def verify_cell_proof_batch(row_commitments: Sequence[KZGCommitment],
row_ids: Sequence[int],
column_ids: Sequence[int],
cells: Sequence[Cell],
proofs: Sequence[KZGProof]) -> bool:
"""
Check multiple cell proofs. This function implements the naive algorithm of checking every cell
individually; an efficient algorithm can be found here:
https://ethresear.ch/t/a-universal-verification-equation-for-data-availability-sampling/13240
This implementation does not require randomness, but for the algorithm that
requires it, `RANDOM_CHALLENGE_KZG_CELL_BATCH_DOMAIN` should be used to compute
the challenge value.
Public method.
"""
# Get commitments via row IDs
commitments = [row_commitments[row_id] for row_id in row_ids]
return all(
verify_kzg_proof_multi_impl(commitment, coset_for_cell(column_id), cell, proof)
for commitment, column_id, cell, proof in zip(commitments, column_ids, cells, proofs)
)
```
## Reconstruction
### `recover_polynomial`
```python
def recover_polynomial(cell_ids: Sequence[CellID], cells: Sequence[Cell]) -> Polynomial:
"""
Recovers a polynomial from 2 * FIELD_ELEMENTS_PER_CELL evaluations, half of which can be missing.
This algorithm uses FFTs to recover cells faster than using Lagrange implementation. However,
a faster version thanks to Qi Zhou can be found here:
https://github.com/ethereum/research/blob/51b530a53bd4147d123ab3e390a9d08605c2cdb8/polynomial_reconstruction/polynomial_reconstruction_danksharding.py
Public method.
"""
assert len(cell_ids) == len(cells)
assert len(cells) >= CELLS_PER_BLOB // 2
missing_cell_ids = [cell_id for cell_id in range(CELLS_PER_BLOB) if cell_id not in cell_ids]
roots_of_unity_reduced = compute_roots_of_unity(CELLS_PER_BLOB)
short_zero_poly = vanishing_polynomialcoeff([
roots_of_unity_reduced[reverse_bits(cell_id, CELLS_PER_BLOB)]
for cell_id in missing_cell_ids
])
full_zero_poly = []
for i in short_zero_poly:
full_zero_poly.append(i)
full_zero_poly.extend([0] * (FIELD_ELEMENTS_PER_CELL - 1))
full_zero_poly = full_zero_poly + [0] * (2 * FIELD_ELEMENTS_PER_BLOB - len(full_zero_poly))
zero_poly_eval = fft_field(full_zero_poly,
compute_roots_of_unity(2 * FIELD_ELEMENTS_PER_BLOB))
zero_poly_eval_brp = bit_reversal_permutation(zero_poly_eval)
for cell_id in missing_cell_ids:
start = cell_id * FIELD_ELEMENTS_PER_CELL
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
assert zero_poly_eval_brp[start:end] == [0] * FIELD_ELEMENTS_PER_CELL
for cell_id in cell_ids:
start = cell_id * FIELD_ELEMENTS_PER_CELL
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
assert all(a != 0 for a in zero_poly_eval_brp[start:end])
extended_evaluation_rbo = [0] * (FIELD_ELEMENTS_PER_BLOB * 2)
for cell_id, cell in zip(cell_ids, cells):
start = cell_id * FIELD_ELEMENTS_PER_CELL
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
extended_evaluation_rbo[start:end] = cell
extended_evaluation = bit_reversal_permutation(extended_evaluation_rbo)
extended_evaluation_times_zero = [BLSFieldElement(int(a) * int(b) % BLS_MODULUS)
for a, b in zip(zero_poly_eval, extended_evaluation)]
roots_of_unity_extended = compute_roots_of_unity(2 * FIELD_ELEMENTS_PER_BLOB)
extended_evaluations_fft = fft_field(extended_evaluation_times_zero, roots_of_unity_extended, inv=True)
shift_factor = BLSFieldElement(PRIMITIVE_ROOT_OF_UNITY)
shift_inv = div(BLSFieldElement(1), shift_factor)
shifted_extended_evaluation = shift_polynomialcoeff(extended_evaluations_fft, shift_factor)
shifted_zero_poly = shift_polynomialcoeff(full_zero_poly, shift_factor)
eval_shifted_extended_evaluation = fft_field(shifted_extended_evaluation, roots_of_unity_extended)
eval_shifted_zero_poly = fft_field(shifted_zero_poly, roots_of_unity_extended)
eval_shifted_reconstructed_poly = [
div(a, b)
for a, b in zip(eval_shifted_extended_evaluation, eval_shifted_zero_poly)
]
shifted_reconstructed_poly = fft_field(eval_shifted_reconstructed_poly, roots_of_unity_extended, inv=True)
reconstructed_poly = shift_polynomialcoeff(shifted_reconstructed_poly, shift_inv)
reconstructed_data = bit_reversal_permutation(fft_field(reconstructed_poly, roots_of_unity_extended))
for cell_id, cell in zip(cell_ids, cells):
start = cell_id * FIELD_ELEMENTS_PER_CELL
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
assert reconstructed_data[start:end] == cell
return reconstructed_data
```

View File

@ -58,6 +58,7 @@ This document specifies basic polynomial operations and KZG polynomial commitmen
| `BLS_MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) |
| `PRIMITIVE_ROOT_OF_UNITY` | `7` | Primitive root of unity of the BLS12_381 (inner) BLS_MODULUS |
### KZG Trusted setup
| Name | Value |
@ -103,7 +104,7 @@ def reverse_bit_order(n: int, order: int) -> int:
```python
def list_to_reverse_bit_order(l: List[int]) -> List[int]:
"""
Convert a list between normal and reverse bit order. This operation is idempotent.
Convert a list between normal and reverse bit order. The permutation is an involution (inverts itself)..
"""
return [l[reverse_bit_order(i, len(l))] for i in range(len(l))]
```

View File

@ -75,7 +75,7 @@ def create_light_client_bootstrap(state: BeaconState,
return LightClientBootstrap(
header=block_to_light_client_header(block),
current_sync_committee=state.current_sync_committee,
current_sync_committee_branch=compute_merkle_proof(state, CURRENT_SYNC_COMMITTEE_INDEX),
current_sync_committee_branch=compute_merkle_proof(state, CURRENT_SYNC_COMMITTEE_GINDEX),
)
```
@ -122,7 +122,7 @@ def create_light_client_update(state: BeaconState,
# `next_sync_committee` is only useful if the message is signed by the current sync committee
if update_attested_period == update_signature_period:
update.next_sync_committee = attested_state.next_sync_committee
update.next_sync_committee_branch = compute_merkle_proof(attested_state, NEXT_SYNC_COMMITTEE_INDEX)
update.next_sync_committee_branch = compute_merkle_proof(attested_state, NEXT_SYNC_COMMITTEE_GINDEX)
# Indicate finality whenever possible
if finalized_block is not None:
@ -131,7 +131,7 @@ def create_light_client_update(state: BeaconState,
assert hash_tree_root(update.finalized_header.beacon) == attested_state.finalized_checkpoint.root
else:
assert attested_state.finalized_checkpoint.root == Bytes32()
update.finality_branch = compute_merkle_proof(attested_state, FINALIZED_ROOT_INDEX)
update.finality_branch = compute_merkle_proof(attested_state, FINALIZED_ROOT_GINDEX)
update.sync_aggregate = block.message.body.sync_aggregate
update.signature_slot = block.message.slot
@ -158,7 +158,7 @@ def create_light_client_finality_update(update: LightClientUpdate) -> LightClien
)
```
Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest `attested_header.beacon.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientFinalityUpdate` whenever `finalized_header` changes.
Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest `attested_header.beacon.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientFinalityUpdate` whenever `finalized_header` changes. If that `LightClientFinalityUpdate` does not have supermajority (> 2/3) sync committee participation, a second `LightClientFinalityUpdate` SHOULD be delivered for the same `finalized_header` once supermajority participation is obtained.
### `create_light_client_optimistic_update`

View File

@ -59,7 +59,7 @@ New global topics are added to provide light clients with the latest updates.
This topic is used to propagate the latest `LightClientFinalityUpdate` to light clients, allowing them to keep track of the latest `finalized_header`.
The following validations MUST pass before forwarding the `finality_update` on the network.
- _[IGNORE]_ The `finalized_header.beacon.slot` is greater than that of all previously forwarded `finality_update`s
- _[IGNORE]_ The `finalized_header.beacon.slot` is greater than that of all previously forwarded `finality_update`s, or it matches the highest previously forwarded slot and also has a `sync_aggregate` indicating supermajority (> 2/3) sync committee participation while the previously forwarded `finality_update` for that slot did not indicate supermajority
- _[IGNORE]_ The `finality_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `finality_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
For full nodes, the following validations MUST additionally pass before forwarding the `finality_update` on the network.

View File

@ -60,9 +60,9 @@ Additional documents describe how the light client sync protocol can be used:
| Name | Value |
| - | - |
| `FINALIZED_ROOT_INDEX` | `get_generalized_index(BeaconState, 'finalized_checkpoint', 'root')` (= 105) |
| `CURRENT_SYNC_COMMITTEE_INDEX` | `get_generalized_index(BeaconState, 'current_sync_committee')` (= 54) |
| `NEXT_SYNC_COMMITTEE_INDEX` | `get_generalized_index(BeaconState, 'next_sync_committee')` (= 55) |
| `FINALIZED_ROOT_GINDEX` | `get_generalized_index(BeaconState, 'finalized_checkpoint', 'root')` (= 105) |
| `CURRENT_SYNC_COMMITTEE_GINDEX` | `get_generalized_index(BeaconState, 'current_sync_committee')` (= 54) |
| `NEXT_SYNC_COMMITTEE_GINDEX` | `get_generalized_index(BeaconState, 'next_sync_committee')` (= 55) |
## Preset
@ -93,7 +93,7 @@ class LightClientBootstrap(Container):
header: LightClientHeader
# Current sync committee corresponding to `header.beacon.state_root`
current_sync_committee: SyncCommittee
current_sync_committee_branch: Vector[Bytes32, floorlog2(CURRENT_SYNC_COMMITTEE_INDEX)]
current_sync_committee_branch: Vector[Bytes32, floorlog2(CURRENT_SYNC_COMMITTEE_GINDEX)]
```
### `LightClientUpdate`
@ -104,10 +104,10 @@ class LightClientUpdate(Container):
attested_header: LightClientHeader
# Next sync committee corresponding to `attested_header.beacon.state_root`
next_sync_committee: SyncCommittee
next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)]
next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_GINDEX)]
# Finalized header corresponding to `attested_header.beacon.state_root`
finalized_header: LightClientHeader
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_GINDEX)]
# Sync committee aggregate signature
sync_aggregate: SyncAggregate
# Slot at which the aggregate signature was created (untrusted)
@ -122,7 +122,7 @@ class LightClientFinalityUpdate(Container):
attested_header: LightClientHeader
# Finalized header corresponding to `attested_header.beacon.state_root`
finalized_header: LightClientHeader
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_GINDEX)]
# Sync committee aggregate signature
sync_aggregate: SyncAggregate
# Slot at which the aggregate signature was created (untrusted)
@ -174,14 +174,14 @@ def is_valid_light_client_header(header: LightClientHeader) -> bool:
```python
def is_sync_committee_update(update: LightClientUpdate) -> bool:
return update.next_sync_committee_branch != [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))]
return update.next_sync_committee_branch != [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_GINDEX))]
```
### `is_finality_update`
```python
def is_finality_update(update: LightClientUpdate) -> bool:
return update.finality_branch != [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
return update.finality_branch != [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_GINDEX))]
```
### `is_better_update`
@ -286,8 +286,8 @@ def initialize_light_client_store(trusted_block_root: Root,
assert is_valid_merkle_branch(
leaf=hash_tree_root(bootstrap.current_sync_committee),
branch=bootstrap.current_sync_committee_branch,
depth=floorlog2(CURRENT_SYNC_COMMITTEE_INDEX),
index=get_subtree_index(CURRENT_SYNC_COMMITTEE_INDEX),
depth=floorlog2(CURRENT_SYNC_COMMITTEE_GINDEX),
index=get_subtree_index(CURRENT_SYNC_COMMITTEE_GINDEX),
root=bootstrap.header.beacon.state_root,
)
@ -358,8 +358,8 @@ def validate_light_client_update(store: LightClientStore,
assert is_valid_merkle_branch(
leaf=finalized_root,
branch=update.finality_branch,
depth=floorlog2(FINALIZED_ROOT_INDEX),
index=get_subtree_index(FINALIZED_ROOT_INDEX),
depth=floorlog2(FINALIZED_ROOT_GINDEX),
index=get_subtree_index(FINALIZED_ROOT_GINDEX),
root=update.attested_header.beacon.state_root,
)
@ -373,8 +373,8 @@ def validate_light_client_update(store: LightClientStore,
assert is_valid_merkle_branch(
leaf=hash_tree_root(update.next_sync_committee),
branch=update.next_sync_committee_branch,
depth=floorlog2(NEXT_SYNC_COMMITTEE_INDEX),
index=get_subtree_index(NEXT_SYNC_COMMITTEE_INDEX),
depth=floorlog2(NEXT_SYNC_COMMITTEE_GINDEX),
index=get_subtree_index(NEXT_SYNC_COMMITTEE_GINDEX),
root=update.attested_header.beacon.state_root,
)
@ -493,7 +493,7 @@ def process_light_client_finality_update(store: LightClientStore,
update = LightClientUpdate(
attested_header=finality_update.attested_header,
next_sync_committee=SyncCommittee(),
next_sync_committee_branch=[Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))],
next_sync_committee_branch=[Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_GINDEX))],
finalized_header=finality_update.finalized_header,
finality_branch=finality_update.finality_branch,
sync_aggregate=finality_update.sync_aggregate,
@ -512,9 +512,9 @@ def process_light_client_optimistic_update(store: LightClientStore,
update = LightClientUpdate(
attested_header=optimistic_update.attested_header,
next_sync_committee=SyncCommittee(),
next_sync_committee_branch=[Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))],
next_sync_committee_branch=[Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_GINDEX))],
finalized_header=LightClientHeader(),
finality_branch=[Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))],
finality_branch=[Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_GINDEX))],
sync_aggregate=optimistic_update.sync_aggregate,
signature_slot=optimistic_update.signature_slot,
)

View File

@ -78,7 +78,8 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
| `BYTES_PER_BLOB` | `uint64(BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB)` | The number of bytes in a blob |
| `G1_POINT_AT_INFINITY` | `Bytes48(b'\xc0' + b'\x00' * 47)` | Serialized form of the point at infinity on the G1 group |
| `KZG_ENDIANNESS` | `'big'` | The endianness of the field elements including blobs |
| `PRIMITIVE_ROOT_OF_UNITY` | `7` | Primitive root of unity of the BLS12_381 (inner) BLS_MODULUS |
| `PRIMITIVE_ROOT_OF_UNITY` | `7` | The primitive root of unity from which all roots of unity should be derived |
## Preset
@ -95,8 +96,9 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
| Name | Value |
| - | - |
| `KZG_SETUP_G2_LENGTH` | `65` |
| `KZG_SETUP_G2_MONOMIAL` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]` |
| `KZG_SETUP_G1_MONOMIAL` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]` |
| `KZG_SETUP_G1_LAGRANGE` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]` |
| `KZG_SETUP_G2_MONOMIAL` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]` |
## Helper functions
@ -592,4 +594,3 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
return verify_kzg_proof_batch(commitments, evaluation_challenges, ys, proofs)
```

View File

@ -10,6 +10,7 @@
- [Basic types](#basic-types)
- [Composite types](#composite-types)
- [Variable-size and fixed-size](#variable-size-and-fixed-size)
- [Byte](#byte)
- [Aliases](#aliases)
- [Default values](#default-values)
- [`is_zero`](#is_zero)
@ -25,6 +26,7 @@
- [Merkleization](#merkleization)
- [Summaries and expansions](#summaries-and-expansions)
- [Implementations](#implementations)
- [JSON mapping](#json-mapping)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
@ -41,6 +43,7 @@
### Basic types
* `uintN`: `N`-bit unsigned integer (where `N in [8, 16, 32, 64, 128, 256]`)
* `byte`: 8-bit opaque data container, equivalent in serialization and hashing to `uint8`
* `boolean`: `True` or `False`
### Composite types
@ -69,15 +72,20 @@
We recursively define "variable-size" types to be lists, unions, `Bitlist` and all types that contain a variable-size type. All other types are said to be "fixed-size".
### Byte
Although the SSZ serialization of `byte` is equivalent to that of `uint8`, the former is used for opaque data while the latter is intended as a number.
### Aliases
For convenience we alias:
* `bit` to `boolean`
* `byte` to `uint8` (this is a basic type)
* `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type)
* `ByteList[N]` to `List[byte, N]`
Aliases are semantically equivalent to their underlying type and therefore share canonical representations both in SSZ and in related formats.
### Default values
Assuming a helper function `default(type)` which returns the default value for `type`, we can recursively define the default value for all types.
@ -256,3 +264,33 @@ We similarly define "summary types" and "expansion types". For example, [`Beacon
## Implementations
See https://github.com/ethereum/eth2.0-specs/issues/2138 for a list of current known implementations.
## JSON mapping
The canonical JSON mapping assigns to each SSZ type a corresponding JSON encoding, enabling an SSZ schema to also define the JSON encoding.
When decoding JSON data, all fields in the SSZ schema must be present with a value. Parsers may ignore additional JSON fields.
| SSZ | JSON | Example |
| --- | --- | --- |
| `uintN` | string | `"0"` |
| `byte` | hex-byte-string | `"0x00"` |
| `boolean` | bool | `false` |
| `Container` | object | `{ "field": ... }` |
| `Vector[type, N]` | array | `[element, ...]` |
| `Vector[byte, N]` | hex-byte-string | `"0x1122"` |
| `Bitvector[N]` | hex-byte-string | `"0x1122"` |
| `List[type, N]` | array | `[element, ...]` |
| `List[byte, N]` | hex-byte-string | `"0x1122"` |
| `Bitlist[N]` | hex-byte-string | `"0x1122"` |
| `Union[type_0, type_1, ...]` | selector-object | `{ "selector": number, "data": type_N }` |
Integers are encoded as strings to avoid loss of precision in 64-bit values.
Aliases are encoded as their underlying type.
`hex-byte-string` is a `0x`-prefixed hex encoding of byte data, as it would appear in an SSZ stream.
`List` and `Vector` of `byte` (and aliases thereof) are encoded as `hex-byte-string`. `Bitlist` and `Bitvector` similarly map their SSZ-byte encodings to a `hex-byte-string`.
`Union` is encoded as an object with a `selector` and `data` field, where the contents of `data` change according to the selector.

View File

@ -10,17 +10,17 @@ from eth2spec.test.context import (
@spec_state_test
def test_current_sync_committee_merkle_proof(spec, state):
yield "object", state
current_sync_committee_branch = spec.compute_merkle_proof(state, spec.CURRENT_SYNC_COMMITTEE_INDEX)
current_sync_committee_branch = spec.compute_merkle_proof(state, spec.CURRENT_SYNC_COMMITTEE_GINDEX)
yield "proof", {
"leaf": "0x" + state.current_sync_committee.hash_tree_root().hex(),
"leaf_index": spec.CURRENT_SYNC_COMMITTEE_INDEX,
"leaf_index": spec.CURRENT_SYNC_COMMITTEE_GINDEX,
"branch": ['0x' + root.hex() for root in current_sync_committee_branch]
}
assert spec.is_valid_merkle_branch(
leaf=state.current_sync_committee.hash_tree_root(),
branch=current_sync_committee_branch,
depth=spec.floorlog2(spec.CURRENT_SYNC_COMMITTEE_INDEX),
index=spec.get_subtree_index(spec.CURRENT_SYNC_COMMITTEE_INDEX),
depth=spec.floorlog2(spec.CURRENT_SYNC_COMMITTEE_GINDEX),
index=spec.get_subtree_index(spec.CURRENT_SYNC_COMMITTEE_GINDEX),
root=state.hash_tree_root(),
)
@ -30,17 +30,17 @@ def test_current_sync_committee_merkle_proof(spec, state):
@spec_state_test
def test_next_sync_committee_merkle_proof(spec, state):
yield "object", state
next_sync_committee_branch = spec.compute_merkle_proof(state, spec.NEXT_SYNC_COMMITTEE_INDEX)
next_sync_committee_branch = spec.compute_merkle_proof(state, spec.NEXT_SYNC_COMMITTEE_GINDEX)
yield "proof", {
"leaf": "0x" + state.next_sync_committee.hash_tree_root().hex(),
"leaf_index": spec.NEXT_SYNC_COMMITTEE_INDEX,
"leaf_index": spec.NEXT_SYNC_COMMITTEE_GINDEX,
"branch": ['0x' + root.hex() for root in next_sync_committee_branch]
}
assert spec.is_valid_merkle_branch(
leaf=state.next_sync_committee.hash_tree_root(),
branch=next_sync_committee_branch,
depth=spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX),
index=spec.get_subtree_index(spec.NEXT_SYNC_COMMITTEE_INDEX),
depth=spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_GINDEX),
index=spec.get_subtree_index(spec.NEXT_SYNC_COMMITTEE_GINDEX),
root=state.hash_tree_root(),
)
@ -50,17 +50,17 @@ def test_next_sync_committee_merkle_proof(spec, state):
@spec_state_test
def test_finality_root_merkle_proof(spec, state):
yield "object", state
finality_branch = spec.compute_merkle_proof(state, spec.FINALIZED_ROOT_INDEX)
finality_branch = spec.compute_merkle_proof(state, spec.FINALIZED_ROOT_GINDEX)
yield "proof", {
"leaf": "0x" + state.finalized_checkpoint.root.hex(),
"leaf_index": spec.FINALIZED_ROOT_INDEX,
"leaf_index": spec.FINALIZED_ROOT_GINDEX,
"branch": ['0x' + root.hex() for root in finality_branch]
}
assert spec.is_valid_merkle_branch(
leaf=state.finalized_checkpoint.root,
branch=finality_branch,
depth=spec.floorlog2(spec.FINALIZED_ROOT_INDEX),
index=spec.get_subtree_index(spec.FINALIZED_ROOT_INDEX),
depth=spec.floorlog2(spec.FINALIZED_ROOT_GINDEX),
index=spec.get_subtree_index(spec.FINALIZED_ROOT_GINDEX),
root=state.hash_tree_root(),
)

View File

@ -27,7 +27,11 @@ from eth2spec.test.helpers.forks import (
is_post_capella, is_post_deneb,
)
from eth2spec.test.helpers.light_client import (
compute_start_slot_at_next_sync_committee_period,
get_sync_aggregate,
upgrade_lc_bootstrap_to_new_spec,
upgrade_lc_update_to_new_spec,
upgrade_lc_store_to_new_spec,
)
from eth2spec.test.helpers.state import (
next_slots,
@ -35,94 +39,6 @@ from eth2spec.test.helpers.state import (
)
def needs_upgrade_to_capella(d_spec, s_spec):
return is_post_capella(s_spec) and not is_post_capella(d_spec)
def needs_upgrade_to_deneb(d_spec, s_spec):
return is_post_deneb(s_spec) and not is_post_deneb(d_spec)
def check_lc_header_equal(d_spec, s_spec, data, upgraded):
assert upgraded.beacon.slot == data.beacon.slot
assert upgraded.beacon.hash_tree_root() == data.beacon.hash_tree_root()
if is_post_capella(s_spec):
if is_post_capella(d_spec):
assert s_spec.get_lc_execution_root(upgraded) == d_spec.get_lc_execution_root(data)
else:
assert s_spec.get_lc_execution_root(upgraded) == s_spec.Root()
def check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded):
check_lc_header_equal(d_spec, s_spec, data.header, upgraded.header)
assert upgraded.current_sync_committee == data.current_sync_committee
assert upgraded.current_sync_committee_branch == data.current_sync_committee_branch
def upgrade_lc_bootstrap_to_store(d_spec, s_spec, data):
upgraded = data
if needs_upgrade_to_capella(d_spec, s_spec):
upgraded = s_spec.upgrade_lc_bootstrap_to_capella(upgraded)
check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded)
if needs_upgrade_to_deneb(d_spec, s_spec):
upgraded = s_spec.upgrade_lc_bootstrap_to_deneb(upgraded)
check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded)
return upgraded
def check_lc_update_equal(d_spec, s_spec, data, upgraded):
check_lc_header_equal(d_spec, s_spec, data.attested_header, upgraded.attested_header)
assert upgraded.next_sync_committee == data.next_sync_committee
assert upgraded.next_sync_committee_branch == data.next_sync_committee_branch
check_lc_header_equal(d_spec, s_spec, data.finalized_header, upgraded.finalized_header)
assert upgraded.sync_aggregate == data.sync_aggregate
assert upgraded.signature_slot == data.signature_slot
def upgrade_lc_update_to_store(d_spec, s_spec, data):
upgraded = data
if needs_upgrade_to_capella(d_spec, s_spec):
upgraded = s_spec.upgrade_lc_update_to_capella(upgraded)
check_lc_update_equal(d_spec, s_spec, data, upgraded)
if needs_upgrade_to_deneb(d_spec, s_spec):
upgraded = s_spec.upgrade_lc_update_to_deneb(upgraded)
check_lc_update_equal(d_spec, s_spec, data, upgraded)
return upgraded
def check_lc_store_equal(d_spec, s_spec, data, upgraded):
check_lc_header_equal(d_spec, s_spec, data.finalized_header, upgraded.finalized_header)
assert upgraded.current_sync_committee == data.current_sync_committee
assert upgraded.next_sync_committee == data.next_sync_committee
if upgraded.best_valid_update is None:
assert data.best_valid_update is None
else:
check_lc_update_equal(d_spec, s_spec, data.best_valid_update, upgraded.best_valid_update)
check_lc_header_equal(d_spec, s_spec, data.optimistic_header, upgraded.optimistic_header)
assert upgraded.previous_max_active_participants == data.previous_max_active_participants
assert upgraded.current_max_active_participants == data.current_max_active_participants
def upgrade_lc_store_to_new_spec(d_spec, s_spec, data):
upgraded = data
if needs_upgrade_to_capella(d_spec, s_spec):
upgraded = s_spec.upgrade_lc_store_to_capella(upgraded)
check_lc_store_equal(d_spec, s_spec, data, upgraded)
if needs_upgrade_to_deneb(d_spec, s_spec):
upgraded = s_spec.upgrade_lc_store_to_deneb(upgraded)
check_lc_store_equal(d_spec, s_spec, data, upgraded)
return upgraded
class LightClientSyncTest(object):
steps: List[Dict[str, Any]]
genesis_validators_root: Any
@ -161,7 +77,7 @@ def setup_test(spec, state, s_spec=None, phases=None):
yield "bootstrap_fork_digest", "meta", encode_hex(data_fork_digest)
yield "bootstrap", data
upgraded = upgrade_lc_bootstrap_to_store(d_spec, test.s_spec, data)
upgraded = upgrade_lc_bootstrap_to_new_spec(d_spec, test.s_spec, data)
test.store = test.s_spec.initialize_light_client_store(trusted_block_root, upgraded)
store_fork_version = get_store_fork_version(test.s_spec)
store_fork_digest = test.s_spec.compute_fork_digest(store_fork_version, test.genesis_validators_root)
@ -234,10 +150,10 @@ def emit_update(test, spec, state, block, attested_state, attested_block, finali
if not with_next:
data.next_sync_committee = spec.SyncCommittee()
data.next_sync_committee_branch = \
[spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
[spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_GINDEX))]
current_slot = state.slot
upgraded = upgrade_lc_update_to_store(d_spec, test.s_spec, data)
upgraded = upgrade_lc_update_to_new_spec(d_spec, test.s_spec, data)
test.s_spec.process_light_client_update(test.store, upgraded, current_slot, test.genesis_validators_root)
yield get_update_file_name(d_spec, data), data
@ -267,15 +183,6 @@ def emit_upgrade_store(test, new_s_spec, phases=None):
})
def compute_start_slot_at_sync_committee_period(spec, sync_committee_period):
return spec.compute_start_slot_at_epoch(sync_committee_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
def compute_start_slot_at_next_sync_committee_period(spec, state):
sync_committee_period = spec.compute_sync_committee_period_at_slot(state.slot)
return compute_start_slot_at_sync_committee_period(spec, sync_committee_period + 1)
@with_light_client
@spec_state_test_with_matching_config
@with_presets([MINIMAL], reason="too slow")

View File

@ -44,7 +44,7 @@ def pytest_addoption(parser):
help="bls-default: make tests that are not dependent on BLS run without BLS"
)
parser.addoption(
"--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro", "arkworks", "fastest"],
"--bls-type", action="store", type=str, default="fastest", choices=["py_ecc", "milagro", "arkworks", "fastest"],
help=(
"bls-type: use specified BLS implementation;"
"fastest: use milagro for signatures and arkworks for everything else (e.g. KZG)"

View File

@ -8,7 +8,7 @@ from eth2spec.utils import bls
from .exceptions import SkippedTest
from .helpers.constants import (
PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB,
EIP6110, EIP7002,
EIP6110, EIP7002, EIP7594,
WHISK,
MINIMAL,
ALL_PHASES,
@ -510,6 +510,7 @@ with_deneb_and_later = with_all_phases_from(DENEB)
with_eip6110_and_later = with_all_phases_from(EIP6110)
with_eip7002_and_later = with_all_phases_from(EIP7002)
with_whisk_and_later = with_all_phases_from(WHISK, all_phases=ALLOWED_TEST_RUNNER_FORKS)
with_eip7594_and_later = with_all_phases_from(EIP7594, all_phases=ALLOWED_TEST_RUNNER_FORKS)
class quoted_str(str):

View File

@ -0,0 +1,97 @@
import random
from eth2spec.test.context import (
spec_test,
single_phase,
with_eip7594_and_later,
)
from eth2spec.test.helpers.sharding import (
get_sample_blob,
)
from eth2spec.utils.bls import BLS_MODULUS
@with_eip7594_and_later
@spec_test
@single_phase
def test_fft(spec):
rng = random.Random(5566)
roots_of_unity = spec.compute_roots_of_unity(spec.FIELD_ELEMENTS_PER_BLOB)
poly_coeff = [rng.randint(0, BLS_MODULUS - 1) for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)]
poly_eval = spec.fft_field(poly_coeff, roots_of_unity)
poly_coeff_inversed = spec.fft_field(poly_eval, roots_of_unity, inv=True)
assert len(poly_eval) == len(poly_coeff) == len(poly_coeff_inversed)
assert poly_coeff_inversed == poly_coeff
@with_eip7594_and_later
@spec_test
@single_phase
def test_verify_cell_proof(spec):
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
cells, proofs = spec.compute_cells_and_proofs(blob)
cell_id = 0
assert spec.verify_cell_proof(commitment, cell_id, cells[cell_id], proofs[cell_id])
cell_id = 1
assert spec.verify_cell_proof(commitment, cell_id, cells[cell_id], proofs[cell_id])
@with_eip7594_and_later
@spec_test
@single_phase
def test_verify_cell_proof_batch(spec):
blob = get_sample_blob(spec)
commitment = spec.blob_to_kzg_commitment(blob)
cells, proofs = spec.compute_cells_and_proofs(blob)
assert spec.verify_cell_proof_batch(
row_commitments=[commitment],
row_ids=[0],
column_ids=[0, 1],
cells=cells[0:1],
proofs=proofs,
)
@with_eip7594_and_later
@spec_test
@single_phase
def test_recover_polynomial(spec):
rng = random.Random(5566)
# Number of samples we will be recovering from
N_SAMPLES = spec.CELLS_PER_BLOB // 2
# Get the data we will be working with
blob = get_sample_blob(spec)
# Get the data in evaluation form
original_polynomial = spec.blob_to_polynomial(blob)
# Extend data with Reed-Solomon and split the extended data in cells
cells = spec.compute_cells(blob)
# Compute the cells we will be recovering from
cell_ids = []
known_cells = []
# First figure out just the indices of the cells
for i in range(N_SAMPLES):
j = rng.randint(0, spec.CELLS_PER_BLOB)
while j in cell_ids:
j = rng.randint(0, spec.CELLS_PER_BLOB)
cell_ids.append(j)
# Now the cells themselves
known_cells = [cells[cell_id] for cell_id in cell_ids]
# Recover the data
recovered_data = spec.recover_polynomial(cell_ids, known_cells)
# Check that the original data match the non-extended portion of the recovered data
assert original_polynomial == recovered_data[:len(recovered_data) // 2]
# Now flatten the cells and check that they match the entirety of the recovered data
flattened_cells = [x for xs in cells for x in xs]
assert flattened_cells == recovered_data

View File

@ -19,6 +19,7 @@ DAS = SpecForkName('das')
EIP6110 = SpecForkName('eip6110')
EIP7002 = SpecForkName('eip7002')
WHISK = SpecForkName('whisk')
EIP7594 = SpecForkName('eip7594')
#
# SpecFork settings
@ -37,6 +38,7 @@ ALL_PHASES = (
# Experimental patches
EIP6110,
EIP7002,
EIP7594,
)
# The forks that have light client specs
LIGHT_CLIENT_TESTING_FORKS = (*[item for item in MAINNET_FORKS if item != PHASE0], DENEB)
@ -57,6 +59,7 @@ PREVIOUS_FORK_OF = {
EIP6110: DENEB,
WHISK: CAPELLA,
EIP7002: CAPELLA,
EIP7594: DENEB,
}
# For fork transition tests

View File

@ -1,6 +1,9 @@
from eth2spec.test.helpers.fork_transition import (
transition_across_forks,
)
from eth2spec.test.helpers.forks import (
is_post_capella, is_post_deneb,
)
from eth2spec.test.helpers.sync_committee import (
compute_aggregate_sync_committee_signature,
compute_committee_indices,
@ -8,6 +11,15 @@ from eth2spec.test.helpers.sync_committee import (
from math import floor
def compute_start_slot_at_sync_committee_period(spec, sync_committee_period):
return spec.compute_start_slot_at_epoch(sync_committee_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
def compute_start_slot_at_next_sync_committee_period(spec, state):
sync_committee_period = spec.compute_sync_committee_period_at_slot(state.slot)
return compute_start_slot_at_sync_committee_period(spec, sync_committee_period + 1)
def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None, phases=None):
# By default, the sync committee signs the previous slot
if signature_slot is None:
@ -56,13 +68,136 @@ def create_update(spec,
if with_next:
update.next_sync_committee = attested_state.next_sync_committee
update.next_sync_committee_branch = spec.compute_merkle_proof(attested_state, spec.NEXT_SYNC_COMMITTEE_INDEX)
update.next_sync_committee_branch = spec.compute_merkle_proof(attested_state, spec.NEXT_SYNC_COMMITTEE_GINDEX)
if with_finality:
update.finalized_header = spec.block_to_light_client_header(finalized_block)
update.finality_branch = spec.compute_merkle_proof(attested_state, spec.FINALIZED_ROOT_INDEX)
update.finality_branch = spec.compute_merkle_proof(attested_state, spec.FINALIZED_ROOT_GINDEX)
update.sync_aggregate, update.signature_slot = get_sync_aggregate(
spec, attested_state, num_participants)
return update
def needs_upgrade_to_capella(spec, new_spec):
return is_post_capella(new_spec) and not is_post_capella(spec)
def needs_upgrade_to_deneb(spec, new_spec):
return is_post_deneb(new_spec) and not is_post_deneb(spec)
def check_lc_header_equal(spec, new_spec, data, upgraded):
assert upgraded.beacon.slot == data.beacon.slot
assert upgraded.beacon.hash_tree_root() == data.beacon.hash_tree_root()
if is_post_capella(new_spec):
if is_post_capella(spec):
assert new_spec.get_lc_execution_root(upgraded) == spec.get_lc_execution_root(data)
else:
assert new_spec.get_lc_execution_root(upgraded) == new_spec.Root()
def upgrade_lc_header_to_new_spec(spec, new_spec, data):
upgraded = data
if needs_upgrade_to_capella(spec, new_spec):
upgraded = new_spec.upgrade_lc_header_to_capella(upgraded)
check_lc_header_equal(spec, new_spec, data, upgraded)
if needs_upgrade_to_deneb(spec, new_spec):
upgraded = new_spec.upgrade_lc_header_to_deneb(upgraded)
check_lc_header_equal(spec, new_spec, data, upgraded)
return upgraded
def check_lc_bootstrap_equal(spec, new_spec, data, upgraded):
check_lc_header_equal(spec, new_spec, data.header, upgraded.header)
assert upgraded.current_sync_committee == data.current_sync_committee
assert upgraded.current_sync_committee_branch == data.current_sync_committee_branch
def upgrade_lc_bootstrap_to_new_spec(spec, new_spec, data):
upgraded = data
if needs_upgrade_to_capella(spec, new_spec):
upgraded = new_spec.upgrade_lc_bootstrap_to_capella(upgraded)
check_lc_bootstrap_equal(spec, new_spec, data, upgraded)
if needs_upgrade_to_deneb(spec, new_spec):
upgraded = new_spec.upgrade_lc_bootstrap_to_deneb(upgraded)
check_lc_bootstrap_equal(spec, new_spec, data, upgraded)
return upgraded
def check_lc_update_equal(spec, new_spec, data, upgraded):
check_lc_header_equal(spec, new_spec, data.attested_header, upgraded.attested_header)
assert upgraded.next_sync_committee == data.next_sync_committee
assert upgraded.next_sync_committee_branch == data.next_sync_committee_branch
check_lc_header_equal(spec, new_spec, data.finalized_header, upgraded.finalized_header)
assert upgraded.sync_aggregate == data.sync_aggregate
assert upgraded.signature_slot == data.signature_slot
def upgrade_lc_update_to_new_spec(spec, new_spec, data):
upgraded = data
if needs_upgrade_to_capella(spec, new_spec):
upgraded = new_spec.upgrade_lc_update_to_capella(upgraded)
check_lc_update_equal(spec, new_spec, data, upgraded)
if needs_upgrade_to_deneb(spec, new_spec):
upgraded = new_spec.upgrade_lc_update_to_deneb(upgraded)
check_lc_update_equal(spec, new_spec, data, upgraded)
return upgraded
def check_lc_finality_update_equal(spec, new_spec, data, upgraded):
check_lc_header_equal(spec, new_spec, data.attested_header, upgraded.attested_header)
check_lc_header_equal(spec, new_spec, data.finalized_header, upgraded.finalized_header)
assert upgraded.sync_aggregate == data.sync_aggregate
assert upgraded.signature_slot == data.signature_slot
def upgrade_lc_finality_update_to_new_spec(spec, new_spec, data):
upgraded = data
if needs_upgrade_to_capella(spec, new_spec):
upgraded = new_spec.upgrade_lc_finality_update_to_capella(upgraded)
check_lc_finality_update_equal(spec, new_spec, data, upgraded)
if needs_upgrade_to_deneb(spec, new_spec):
upgraded = new_spec.upgrade_lc_finality_update_to_deneb(upgraded)
check_lc_finality_update_equal(spec, new_spec, data, upgraded)
return upgraded
def check_lc_store_equal(spec, new_spec, data, upgraded):
check_lc_header_equal(spec, new_spec, data.finalized_header, upgraded.finalized_header)
assert upgraded.current_sync_committee == data.current_sync_committee
assert upgraded.next_sync_committee == data.next_sync_committee
if upgraded.best_valid_update is None:
assert data.best_valid_update is None
else:
check_lc_update_equal(spec, new_spec, data.best_valid_update, upgraded.best_valid_update)
check_lc_header_equal(spec, new_spec, data.optimistic_header, upgraded.optimistic_header)
assert upgraded.previous_max_active_participants == data.previous_max_active_participants
assert upgraded.current_max_active_participants == data.current_max_active_participants
def upgrade_lc_store_to_new_spec(spec, new_spec, data):
upgraded = data
if needs_upgrade_to_capella(spec, new_spec):
upgraded = new_spec.upgrade_lc_store_to_capella(upgraded)
check_lc_store_equal(spec, new_spec, data, upgraded)
if needs_upgrade_to_deneb(spec, new_spec):
upgraded = new_spec.upgrade_lc_store_to_deneb(upgraded)
check_lc_store_equal(spec, new_spec, data, upgraded)
return upgraded

View File

@ -4,6 +4,7 @@ from py_ecc.optimized_bls12_381 import ( # noqa: F401
G1 as py_ecc_G1,
G2 as py_ecc_G2,
Z1 as py_ecc_Z1,
Z2 as py_ecc_Z2,
add as py_ecc_add,
multiply as py_ecc_mul,
neg as py_ecc_neg,
@ -243,6 +244,15 @@ def Z1():
return py_ecc_Z1
def Z2():
"""
Returns the identity point in G2
"""
if bls == arkworks_bls or bls == fastest_bls:
return arkworks_G2.identity()
return py_ecc_Z2
def G1():
"""
Returns the chosen generator point in G1