Merge branch 'dev' into eip7002

This commit is contained in:
Hsiao-Wei Wang 2023-05-20 01:38:08 +08:00
commit d4483e4ca7
No known key found for this signature in database
GPG Key ID: AE3D6B174F971DE4
18 changed files with 487 additions and 221 deletions

View File

@ -384,7 +384,7 @@ from typing import (
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
from eth2spec.utils.ssz.ssz_typing import (
View, boolean, Container, List, Vector, uint8, uint32, uint64,
View, boolean, Container, List, Vector, uint8, uint32, uint64, uint256,
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist)
from eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401
from eth2spec.utils import bls
@ -552,7 +552,7 @@ class BellatrixSpecBuilder(AltairSpecBuilder):
return super().imports(preset_name) + f'''
from typing import Protocol
from eth2spec.altair import {preset_name} as altair
from eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector, uint256
from eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector
'''
@classmethod
@ -589,7 +589,7 @@ class NoopExecutionEngine(ExecutionEngine):
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
pass
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload:
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
# pylint: disable=unused-argument
raise NotImplementedError("no default block production")
@ -1207,7 +1207,7 @@ setup(
extras_require={
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
"lint": ["flake8==5.0.4", "mypy==0.981", "pylint==2.15.3"],
"generator": ["python-snappy==0.6.1", "filelock"],
"generator": ["python-snappy==0.6.1", "filelock", "pathos==0.3.0"],
"docs": ["mkdocs==1.4.2", "mkdocs-material==9.1.5", "mdx-truly-sane-lists==1.3", "mkdocs-awesome-pages-plugin==2.8.0"]
},
install_requires=[

View File

@ -13,7 +13,7 @@
- [Helpers](#helpers)
- [Protocols](#protocols)
- [`ExecutionEngine`](#executionengine)
- [`get_payload`](#get_payload)
- [Modified `get_payload`](#modified-get_payload)
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
- [Block proposal](#block-proposal)
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
@ -40,7 +40,7 @@ Please see related Beacon Chain doc before continuing and use them as a referenc
### `ExecutionEngine`
#### `get_payload`
#### Modified `get_payload`
`get_payload` returns the upgraded EIP-4788 `ExecutionPayload` type.

View File

@ -9,6 +9,7 @@
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Helpers](#helpers)
- [`GetPayloadResponse`](#getpayloadresponse)
- [`get_pow_block_at_terminal_total_difficulty`](#get_pow_block_at_terminal_total_difficulty)
- [`get_terminal_pow_block`](#get_terminal_pow_block)
- [Protocols](#protocols)
@ -36,6 +37,14 @@ Please see related Beacon Chain doc before continuing and use them as a referenc
## Helpers
### `GetPayloadResponse`
```python
@dataclass
class GetPayloadResponse(object):
execution_payload: ExecutionPayload
```
### `get_pow_block_at_terminal_total_difficulty`
```python
@ -83,13 +92,13 @@ The Engine API may be used to implement it with an external execution engine.
#### `get_payload`
Given the `payload_id`, `get_payload` returns the most recent version of the execution payload that
has been built since the corresponding call to `notify_forkchoice_updated` method.
Given the `payload_id`, `get_payload` returns `GetPayloadResponse` with the most recent version of
the execution payload that has been built since the corresponding call to `notify_forkchoice_updated` method.
```python
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload:
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
"""
Return ``execution_payload`` object.
Return ``GetPayloadResponse`` object.
"""
...
```
@ -162,7 +171,7 @@ def get_execution_payload(payload_id: Optional[PayloadId], execution_engine: Exe
# Pre-merge, empty payload
return ExecutionPayload()
else:
return execution_engine.get_payload(payload_id)
return execution_engine.get_payload(payload_id).execution_payload
```
*Note*: It is recommended for a validator to call `prepare_execution_payload` as soon as input parameters become known,

View File

@ -11,9 +11,10 @@
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Helpers](#helpers)
- [Modified `GetPayloadResponse`](#modified-getpayloadresponse)
- [Protocols](#protocols)
- [`ExecutionEngine`](#executionengine)
- [`get_payload`](#get_payload)
- [Modified `get_payload`](#modified-get_payload)
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
- [Block proposal](#block-proposal)
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
@ -39,11 +40,20 @@ Please see related Beacon Chain doc before continuing and use them as a referenc
## Helpers
### Modified `GetPayloadResponse`
```python
@dataclass
class GetPayloadResponse(object):
execution_payload: ExecutionPayload
block_value: uint256
```
## Protocols
### `ExecutionEngine`
#### `get_payload`
#### Modified `get_payload`
`get_payload` returns the upgraded Capella `ExecutionPayload` type.

View File

@ -30,9 +30,8 @@ This is the modification of the fork choice accompanying the Deneb upgrade.
def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment],
blobs: Sequence[Blob],
proofs: Sequence[KZGProof]) -> None:
assert len(expected_kzg_commitments) == len(blobs)
assert len(blobs) == len(proofs)
assert len(expected_kzg_commitments) == len(blobs) == len(proofs)
assert verify_blob_kzg_proof_batch(blobs, expected_kzg_commitments, proofs)
```

View File

@ -22,7 +22,7 @@ The specification of these changes continues in the same format as the network s
- [Topics and messages](#topics-and-messages)
- [Global topics](#global-topics)
- [`beacon_block`](#beacon_block)
- [`blob_sidecar_{index}`](#blob_sidecar_index)
- [`blob_sidecar_{subnet_id}`](#blob_sidecar_subnet_id)
- [Transitioning the gossip](#transitioning-the-gossip)
- [The Req/Resp domain](#the-reqresp-domain)
- [Messages](#messages)
@ -107,7 +107,7 @@ The new topics along with the type of the `data` field of a gossipsub message ar
| Name | Message Type |
| - | - |
| `blob_sidecar_{index}` | `SignedBlobSidecar` (new) |
| `blob_sidecar_{subnet_id}` | `SignedBlobSidecar` (new) |
##### Global topics
@ -117,13 +117,13 @@ Deneb introduces new global topics for blob sidecars.
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in deneb.
###### `blob_sidecar_{index}`
###### `blob_sidecar_{subnet_id}`
This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`.
This topic is used to propagate signed blob sidecars, where each blob index maps to some `subnet_id`.
The following validations MUST pass before forwarding the `signed_blob_sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
- _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`.
- _[REJECT]_ The sidecar is for the correct subnet -- i.e. `compute_subnet_for_blob_sidecar(sidecar.index) == subnet_id`.
- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
- _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
@ -226,7 +226,7 @@ No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time.
The response MUST consist of zero or more `response_chunk`.
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob in the response.
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob sidecar in the response.
Clients MUST respond with at least one sidecar, if they have it.
Clients MAY limit the number of blocks and sidecars in the response.

View File

@ -566,7 +566,7 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
proofs_bytes: Sequence[Bytes48]) -> bool:
"""
Given a list of blobs and blob KZG proofs, verify that they correspond to the provided commitments.
Will return True if there are zero blobs/commitments/proofs.
Public method.
"""

View File

@ -10,8 +10,14 @@
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Constants](#constants)
- [Misc](#misc)
- [Helpers](#helpers)
- [`get_blobs_and_kzg_commitments`](#get_blobs_and_kzg_commitments)
- [`BlobsBundle`](#blobsbundle)
- [Modified `GetPayloadResponse`](#modified-getpayloadresponse)
- [Protocol](#protocol)
- [`ExecutionEngine`](#executionengine)
- [Modified `get_payload`](#modified-get_payload)
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
- [Block and sidecar proposal](#block-and-sidecar-proposal)
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
@ -34,19 +40,50 @@ All behaviors and definitions defined in this document, and documents it extends
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of Deneb](./beacon-chain.md) are requisite for this document and used throughout.
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
## Constants
### Misc
| Name | Value | Unit |
| - | - | :-: |
| `BLOB_SIDECAR_SUBNET_COUNT` | `4` | The number of blob sidecar subnets used in the gossipsub protocol. |
## Helpers
### `get_blobs_and_kzg_commitments`
The interface to retrieve blobs and corresponding kzg commitments.
Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` may be unified.
Implementers may also retrieve blobs individually per transaction.
### `BlobsBundle`
```python
def get_blobs_and_kzg_commitments(
payload_id: PayloadId
) -> Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]:
@dataclass
class BlobsBundle(object):
commitments: Sequence[KZGCommitment]
proofs: Sequence[KZGProof]
blobs: Sequence[Blob]
```
### Modified `GetPayloadResponse`
```python
@dataclass
class GetPayloadResponse(object):
execution_payload: ExecutionPayload
block_value: uint256
blobs_bundle: BlobsBundle
```
## Protocol
### `ExecutionEngine`
#### Modified `get_payload`
Given the `payload_id`, `get_payload` returns the most recent version of the execution payload that
has been built since the corresponding call to `notify_forkchoice_updated` method.
```python
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse:
"""
Return ExecutionPayload, uint256, BlobsBundle objects.
"""
# pylint: disable=unused-argument
...
```
@ -62,7 +99,8 @@ All validator responsibilities remain unchanged other than those noted below.
##### Blob KZG commitments
1. After retrieving the execution payload from the execution engine as specified in Capella,
use the `payload_id` to retrieve `blobs` and `blob_kzg_commitments` via `get_blobs_and_kzg_commitments(payload_id)`.
use the `payload_id` to retrieve `blobs`, `blob_kzg_commitments`, and `blob_kzg_proofs`
via `get_payload(payload_id).blobs_bundle`.
2. Validate `blobs` and `blob_kzg_commitments`:
```python
@ -108,7 +146,7 @@ def get_blob_sidecars(block: BeaconBlock,
```
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the `blob_sidecar_{index}` topics according to its index.
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the associated sidecar topic, the `blob_sidecar_{subnet_id}` pubsub topic.
`signature` is obtained from:
@ -121,6 +159,15 @@ def get_blob_sidecar_signature(state: BeaconState,
return bls.Sign(privkey, signing_root)
```
The `subnet_id` for the `signed_sidecar` is calculated with:
- Let `blob_index = signed_sidecar.message.index`.
- Let `subnet_id = compute_subnet_for_blob_sidecar(blob_index)`.
```python
def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID:
return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT)
```
After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested.
The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs and serve when capable,

View File

@ -10,6 +10,7 @@ This is an accompanying document to [Phase 0 -- The Beacon Chain](./beacon-chain
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Custom types](#custom-types)
- [Constants](#constants)
- [Misc](#misc)
- [Containers](#containers)
@ -82,16 +83,28 @@ A validator is an entity that participates in the consensus of the Ethereum proo
All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Deposit Contract](./deposit-contract.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout.
## Custom types
We define the following Python custom types for type hinting and readability:
| Name | SSZ equivalent | Description |
| - | - | - |
| `NodeID` | `uint256` | node identifier |
| `SubnetID` | `uint64` | subnet identifier |
## Constants
### Misc
| Name | Value | Unit | Duration |
| - | - | :-: | :-: |
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | |
| `RANDOM_SUBNETS_PER_VALIDATOR` | `2**0` (= 1) | subnets | |
| `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators |
| `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
| `ATTESTATION_SUBNET_COUNT` | `64` | The number of attestation subnets used in the gossipsub protocol. |
| `ATTESTATION_SUBNET_EXTRA_BITS` | `0` | The number of extra bits of a NodeId to use when mapping to a subscribed subnet |
| `SUBNETS_PER_NODE` | `2` | The number of long-lived subnets a beacon node should be subscribed to. |
| `ATTESTATION_SUBNET_PREFIX_BITS` | `(ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS)` | |
| `NODE_ID_BITS` | `256` | The bit length of uint256 is 256 |
## Containers
@ -513,7 +526,9 @@ The `subnet_id` for the `attestation` is calculated with:
- Let `subnet_id = compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index)`.
```python
def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
def compute_subnet_for_attestation(committees_per_slot: uint64,
slot: Slot,
committee_index: CommitteeIndex) -> SubnetID:
"""
Compute the correct subnet for an attestation for Phase 0.
Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
@ -521,7 +536,7 @@ def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, comm
slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
return SubnetID((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT)
```
### Attestation aggregation
@ -606,15 +621,31 @@ def get_aggregate_and_proof_signature(state: BeaconState,
## Phase 0 attestation subnet stability
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each validator must:
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each beacon node should:
* Randomly select and remain subscribed to `RANDOM_SUBNETS_PER_VALIDATOR` attestation subnets
* Maintain advertisement of the randomly selected subnets in their node's ENR `attnets` entry by setting the randomly selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets
* Set the lifetime of each random subscription to a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`. At the end of life for a subscription, select a new random subnet, update subnet subscriptions, and publish an updated ENR
* Remain subscribed to `SUBNETS_PER_NODE` for `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs.
* Maintain advertisement of the selected subnets in their node's ENR `attnets` entry by setting the selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets.
* Select these subnets based on their node-id as specified by the following `compute_subscribed_subnets(node_id, epoch)` function.
*Note*: Short lived beacon committee assignments should not be added in into the ENR `attnets` entry.
```python
def compute_subscribed_subnet(node_id: NodeID, epoch: Epoch, index: int) -> SubnetID:
node_id_prefix = node_id >> (NODE_ID_BITS - int(ATTESTATION_SUBNET_PREFIX_BITS))
node_offset = node_id % EPOCHS_PER_SUBNET_SUBSCRIPTION
permutation_seed = hash(uint_to_bytes(uint64((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION)))
permutated_prefix = compute_shuffled_index(
node_id_prefix,
1 << int(ATTESTATION_SUBNET_PREFIX_BITS),
permutation_seed,
)
return SubnetID((permutated_prefix + index) % ATTESTATION_SUBNET_COUNT)
```
*Note*: When preparing for a hard fork, a validator must select and subscribe to random subnets of the future fork versioning at least `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
```python
def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]:
return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)]
```
*Note*: When preparing for a hard fork, a validator must select and subscribe to subnets of the future fork versioning at least `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
## How to avoid slashing

View File

@ -1,4 +1,7 @@
from eth_utils import encode_hex
from dataclasses import (
dataclass,
field,
)
import os
import time
import shutil
@ -8,24 +11,80 @@ import sys
import json
from typing import Iterable, AnyStr, Any, Callable
import traceback
from collections import namedtuple
from ruamel.yaml import (
YAML,
)
from filelock import FileLock
from snappy import compress
from pathos.multiprocessing import ProcessingPool as Pool
from eth_utils import encode_hex
from eth2spec.test import context
from eth2spec.test.exceptions import SkippedTest
from .gen_typing import TestProvider
from .settings import (
GENERATOR_MODE,
MODE_MULTIPROCESSING,
MODE_SINGLE_PROCESS,
NUM_PROCESS,
TIME_THRESHOLD_TO_PRINT,
)
# Flag that the runner does NOT run test via pytest
context.is_pytest = False
TIME_THRESHOLD_TO_PRINT = 1.0 # seconds
@dataclass
class Diagnostics(object):
collected_test_count: int = 0
generated_test_count: int = 0
skipped_test_count: int = 0
test_identifiers: list = field(default_factory=list)
TestCaseParams = namedtuple(
'TestCaseParams', [
'test_case', 'case_dir', 'log_file', 'file_mode',
])
def worker_function(item):
return generate_test_vector(*item)
def get_default_yaml():
yaml = YAML(pure=True)
yaml.default_flow_style = None
def _represent_none(self, _):
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
yaml.representer.add_representer(type(None), _represent_none)
return yaml
def get_cfg_yaml():
# Spec config is using a YAML subset
cfg_yaml = YAML(pure=True)
cfg_yaml.default_flow_style = False # Emit separate line for each key
def cfg_represent_bytes(self, data):
return self.represent_int(encode_hex(data))
cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes)
def cfg_represent_quoted_str(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str)
return cfg_yaml
def validate_output_dir(path_str):
@ -40,6 +99,47 @@ def validate_output_dir(path_str):
return path
def get_test_case_dir(test_case, output_dir):
return (
Path(output_dir) / Path(test_case.preset_name) / Path(test_case.fork_name)
/ Path(test_case.runner_name) / Path(test_case.handler_name)
/ Path(test_case.suite_name) / Path(test_case.case_name)
)
def get_test_identifier(test_case):
return "::".join([
test_case.preset_name,
test_case.fork_name,
test_case.runner_name,
test_case.handler_name,
test_case.suite_name,
test_case.case_name
])
def get_incomplete_tag_file(case_dir):
return case_dir / "INCOMPLETE"
def should_skip_case_dir(case_dir, is_force, diagnostics_obj):
is_skip = False
incomplete_tag_file = get_incomplete_tag_file(case_dir)
if case_dir.exists():
if not is_force and not incomplete_tag_file.exists():
diagnostics_obj.skipped_test_count += 1
print(f'Skipping already existing test: {case_dir}')
is_skip = True
else:
print(f'Warning, output directory {case_dir} already exist,'
' old files will be deleted and it will generate test vector files with the latest version')
# Clear the existing case_dir folder
shutil.rmtree(case_dir)
return is_skip, diagnostics_obj
def run_generator(generator_name, test_providers: Iterable[TestProvider]):
"""
Implementation for a general test generator.
@ -94,28 +194,6 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
else:
file_mode = "w"
yaml = YAML(pure=True)
yaml.default_flow_style = None
def _represent_none(self, _):
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
yaml.representer.add_representer(type(None), _represent_none)
# Spec config is using a YAML subset
cfg_yaml = YAML(pure=True)
cfg_yaml.default_flow_style = False # Emit separate line for each key
def cfg_represent_bytes(self, data):
return self.represent_int(encode_hex(data))
cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes)
def cfg_represent_quoted_str(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str)
log_file = Path(output_dir) / 'testgen_error_log.txt'
print(f"Generating tests into {output_dir}")
@ -129,12 +207,13 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
print(f"Filtering test-generator runs to only include presets: {', '.join(presets)}")
collect_only = args.collect_only
collected_test_count = 0
generated_test_count = 0
skipped_test_count = 0
test_identifiers = []
diagnostics_obj = Diagnostics()
provider_start = time.time()
if GENERATOR_MODE == MODE_MULTIPROCESSING:
all_test_case_params = []
for tprov in test_providers:
if not collect_only:
# runs anything that we don't want to repeat for every test case.
@ -145,146 +224,133 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
if len(presets) != 0 and test_case.preset_name not in presets:
continue
case_dir = (
Path(output_dir) / Path(test_case.preset_name) / Path(test_case.fork_name)
/ Path(test_case.runner_name) / Path(test_case.handler_name)
/ Path(test_case.suite_name) / Path(test_case.case_name)
)
collected_test_count += 1
case_dir = get_test_case_dir(test_case, output_dir)
print(f"Collected test at: {case_dir}")
diagnostics_obj.collected_test_count += 1
incomplete_tag_file = case_dir / "INCOMPLETE"
is_skip, diagnostics_obj = should_skip_case_dir(case_dir, args.force, diagnostics_obj)
if is_skip:
continue
if case_dir.exists():
if not args.force and not incomplete_tag_file.exists():
skipped_test_count += 1
print(f'Skipping already existing test: {case_dir}')
continue
else:
print(f'Warning, output directory {case_dir} already exist,'
f' old files will be deleted and it will generate test vector files with the latest version')
# Clear the existing case_dir folder
shutil.rmtree(case_dir)
if GENERATOR_MODE == MODE_SINGLE_PROCESS:
result = generate_test_vector(test_case, case_dir, log_file, file_mode)
write_result_into_diagnostics_obj(result, diagnostics_obj)
elif GENERATOR_MODE == MODE_MULTIPROCESSING:
item = TestCaseParams(test_case, case_dir, log_file, file_mode)
all_test_case_params.append(item)
print(f'Generating test: {case_dir}')
test_start = time.time()
if GENERATOR_MODE == MODE_MULTIPROCESSING:
with Pool(processes=NUM_PROCESS) as pool:
results = pool.map(worker_function, iter(all_test_case_params))
written_part = False
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
case_dir.mkdir(parents=True, exist_ok=True)
with incomplete_tag_file.open("w") as f:
f.write("\n")
try:
def output_part(out_kind: str, name: str, fn: Callable[[Path, ], None]):
# make sure the test case directory is created before any test part is written.
case_dir.mkdir(parents=True, exist_ok=True)
try:
fn(case_dir)
except IOError as e:
error_message = (
f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}'
)
# Write to error log file
with log_file.open("a+") as f:
f.write(error_message)
traceback.print_exc(file=f)
f.write('\n')
sys.exit(error_message)
meta = dict()
try:
for (name, out_kind, data) in test_case.case_fn():
written_part = True
if out_kind == "meta":
meta[name] = data
elif out_kind == "cfg":
output_part(out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml))
elif out_kind == "data":
output_part(out_kind, name, dump_yaml_fn(data, name, file_mode, yaml))
elif out_kind == "ssz":
output_part(out_kind, name, dump_ssz_fn(data, name, file_mode))
else:
assert False # Unknown kind
except SkippedTest as e:
print(e)
skipped_test_count += 1
shutil.rmtree(case_dir)
continue
# Once all meta data is collected (if any), write it to a meta data file.
if len(meta) != 0:
written_part = True
output_part("data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml))
if not written_part:
print(f"test case {case_dir} did not produce any test case parts")
except Exception as e:
error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}"
# Write to error log file
with log_file.open("a+") as f:
f.write(error_message)
traceback.print_exc(file=f)
f.write('\n')
traceback.print_exc()
else:
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
if not written_part:
shutil.rmtree(case_dir)
else:
generated_test_count += 1
test_identifier = "::".join([
test_case.preset_name,
test_case.fork_name,
test_case.runner_name,
test_case.handler_name,
test_case.suite_name,
test_case.case_name
])
test_identifiers.append(test_identifier)
# Only remove `INCOMPLETE` tag file
os.remove(incomplete_tag_file)
test_end = time.time()
span = round(test_end - test_start, 2)
if span > TIME_THRESHOLD_TO_PRINT:
print(f' - generated in {span} seconds')
for result in results:
write_result_into_diagnostics_obj(result, diagnostics_obj)
provider_end = time.time()
span = round(provider_end - provider_start, 2)
if collect_only:
print(f"Collected {collected_test_count} tests in total")
print(f"Collected {diagnostics_obj.collected_test_count} tests in total")
else:
summary_message = f"completed generation of {generator_name} with {generated_test_count} tests"
summary_message += f" ({skipped_test_count} skipped tests)"
summary_message = f"completed generation of {generator_name} with {diagnostics_obj.generated_test_count} tests"
summary_message += f" ({diagnostics_obj.skipped_test_count} skipped tests)"
if span > TIME_THRESHOLD_TO_PRINT:
summary_message += f" in {span} seconds"
print(summary_message)
diagnostics = {
"collected_test_count": collected_test_count,
"generated_test_count": generated_test_count,
"skipped_test_count": skipped_test_count,
"test_identifiers": test_identifiers,
diagnostics_output = {
"collected_test_count": diagnostics_obj.collected_test_count,
"generated_test_count": diagnostics_obj.generated_test_count,
"skipped_test_count": diagnostics_obj.skipped_test_count,
"test_identifiers": diagnostics_obj.test_identifiers,
"durations": [f"{span} seconds"],
}
diagnostics_path = Path(os.path.join(output_dir, "diagnostics.json"))
diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics.json.lock"))
diagnostics_path = Path(os.path.join(output_dir, "diagnostics_obj.json"))
diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics_obj.json.lock"))
with diagnostics_lock:
diagnostics_path.touch(exist_ok=True)
if os.path.getsize(diagnostics_path) == 0:
with open(diagnostics_path, "w+") as f:
json.dump(diagnostics, f)
json.dump(diagnostics_output, f)
else:
with open(diagnostics_path, "r+") as f:
existing_diagnostics = json.load(f)
for k, v in diagnostics.items():
for k, v in diagnostics_output.items():
existing_diagnostics[k] += v
with open(diagnostics_path, "w+") as f:
json.dump(existing_diagnostics, f)
print(f"wrote diagnostics to {diagnostics_path}")
print(f"wrote diagnostics_obj to {diagnostics_path}")
def generate_test_vector(test_case, case_dir, log_file, file_mode):
cfg_yaml = get_cfg_yaml()
yaml = get_default_yaml()
written_part = False
print(f'Generating test: {case_dir}')
test_start = time.time()
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
incomplete_tag_file = get_incomplete_tag_file(case_dir)
case_dir.mkdir(parents=True, exist_ok=True)
with incomplete_tag_file.open("w") as f:
f.write("\n")
result = None
try:
meta = dict()
try:
written_part, meta = execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml)
except SkippedTest as e:
result = 0 # 0 means skipped
print(e)
shutil.rmtree(case_dir)
return result
# Once all meta data is collected (if any), write it to a meta data file.
if len(meta) != 0:
written_part = True
output_part(case_dir, log_file, "data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml))
except Exception as e:
result = -1 # -1 means error
error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}"
# Write to error log file
with log_file.open("a+") as f:
f.write(error_message)
traceback.print_exc(file=f)
f.write('\n')
print(error_message)
traceback.print_exc()
else:
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
if not written_part:
print(f"[Error] test case {case_dir} did not produce any written_part")
shutil.rmtree(case_dir)
result = -1
else:
result = get_test_identifier(test_case)
# Only remove `INCOMPLETE` tag file
os.remove(incomplete_tag_file)
test_end = time.time()
span = round(test_end - test_start, 2)
if span > TIME_THRESHOLD_TO_PRINT:
print(f' - generated in {span} seconds')
return result
def write_result_into_diagnostics_obj(result, diagnostics_obj):
if result == -1: # error
pass
elif result == 0:
diagnostics_obj.skipped_test_count += 1
elif result is not None:
diagnostics_obj.generated_test_count += 1
diagnostics_obj.test_identifiers.append(result)
else:
raise Exception(f"Unexpected result: {result}")
def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
@ -292,9 +358,45 @@ def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
out_path = case_path / Path(name + '.yaml')
with out_path.open(file_mode) as f:
yaml_encoder.dump(data, f)
f.close()
return dump
def output_part(case_dir, log_file, out_kind: str, name: str, fn: Callable[[Path, ], None]):
# make sure the test case directory is created before any test part is written.
case_dir.mkdir(parents=True, exist_ok=True)
try:
fn(case_dir)
except (IOError, ValueError) as e:
error_message = f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}'
# Write to error log file
with log_file.open("a+") as f:
f.write(error_message)
traceback.print_exc(file=f)
f.write('\n')
print(error_message)
sys.exit(error_message)
def execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml):
result = test_case.case_fn()
written_part = False
for (name, out_kind, data) in result:
written_part = True
if out_kind == "meta":
meta[name] = data
elif out_kind == "cfg":
output_part(case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml))
elif out_kind == "data":
output_part(case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, yaml))
elif out_kind == "ssz":
output_part(case_dir, log_file, out_kind, name, dump_ssz_fn(data, name, file_mode))
else:
raise ValueError("Unknown out_kind %s" % out_kind)
return written_part, meta
def dump_ssz_fn(data: AnyStr, name: str, file_mode: str):
def dump(case_path: Path):
out_path = case_path / Path(name + '.ssz_snappy')

View File

@ -0,0 +1,13 @@
import multiprocessing
# Generator mode setting
MODE_SINGLE_PROCESS = 'MODE_SINGLE_PROCESS'
MODE_MULTIPROCESSING = 'MODE_MULTIPROCESSING'
# Test generator mode
GENERATOR_MODE = MODE_MULTIPROCESSING
# Number of subprocesses when using MODE_MULTIPROCESSING
NUM_PROCESS = multiprocessing.cpu_count() // 2 - 1
# Diagnostics
TIME_THRESHOLD_TO_PRINT = 1.0 # seconds

View File

@ -564,7 +564,7 @@ def _get_basic_dict(ssz_dict: Dict[str, Any]) -> Dict[str, Any]:
return result
def _get_copy_of_spec(spec):
def get_copy_of_spec(spec):
fork = spec.fork
preset = spec.config.PRESET_BASE
module_path = f"eth2spec.{fork}.{preset}"
@ -605,14 +605,14 @@ def with_config_overrides(config_overrides, emitted_fork=None, emit=True):
def decorator(fn):
def wrapper(*args, spec: Spec, **kw):
# Apply config overrides to spec
spec, output_config = spec_with_config_overrides(_get_copy_of_spec(spec), config_overrides)
spec, output_config = spec_with_config_overrides(get_copy_of_spec(spec), config_overrides)
# Apply config overrides to additional phases, if present
if 'phases' in kw:
phases = {}
for fork in kw['phases']:
phases[fork], output = spec_with_config_overrides(
_get_copy_of_spec(kw['phases'][fork]), config_overrides)
get_copy_of_spec(kw['phases'][fork]), config_overrides)
if emitted_fork == fork:
output_config = output
kw['phases'] = phases

View File

@ -150,3 +150,23 @@ def test_incorrect_block_hash(spec, state):
yield 'blocks', [signed_block]
yield 'post', state
@with_deneb_and_later
@spec_state_test
def test_zeroed_commitment(spec, state):
"""
The blob is invalid, but the commitment is in correct form.
"""
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=1, is_valid_blob=False)
assert all(commitment == b'\x00' * 48 for commitment in blob_kzg_commitments)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
signed_block = state_transition_and_sign_block(spec, state, block)
yield 'blocks', [signed_block]
yield 'post', state

View File

@ -50,12 +50,9 @@ class SignedBlobTransaction(Container):
signature: ECDSASignature
def get_sample_blob(spec, rng=None):
if rng is None:
rng = random.Random(5566)
def get_sample_blob(spec, rng=random.Random(5566), is_valid_blob=True):
values = [
rng.randint(0, spec.BLS_MODULUS - 1)
rng.randint(0, spec.BLS_MODULUS - 1) if is_valid_blob else spec.BLS_MODULUS
for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)
]
@ -98,15 +95,19 @@ def get_poly_in_both_forms(spec, rng=None):
return coeffs, evals
def get_sample_opaque_tx(spec, blob_count=1, rng=None):
def get_sample_opaque_tx(spec, blob_count=1, rng=random.Random(5566), is_valid_blob=True):
blobs = []
blob_kzg_commitments = []
blob_kzg_proofs = []
blob_versioned_hashes = []
for _ in range(blob_count):
blob = get_sample_blob(spec, rng)
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment)
blob = get_sample_blob(spec, rng, is_valid_blob=is_valid_blob)
if is_valid_blob:
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment)
else:
blob_commitment = spec.KZGCommitment()
blob_kzg_proof = spec.KZGProof()
blob_versioned_hash = spec.kzg_commitment_to_versioned_hash(blob_commitment)
blobs.append(blob)
blob_kzg_commitments.append(blob_commitment)

View File

@ -75,7 +75,9 @@ def test_time(spec, state):
@with_all_phases
@spec_state_test
def test_networking(spec, state):
assert spec.RANDOM_SUBNETS_PER_VALIDATOR <= spec.ATTESTATION_SUBNET_COUNT
assert spec.SUBNETS_PER_NODE <= spec.ATTESTATION_SUBNET_COUNT
node_id_length = spec.NodeID(1).type_byte_length() # in bytes
assert node_id_length * 8 == spec.NODE_ID_BITS # in bits
@with_all_phases

View File

@ -1,6 +1,12 @@
import random
from eth2spec.test.context import (
single_phase,
spec_state_test,
always_bls, with_phases, with_all_phases,
spec_test,
always_bls,
with_phases,
with_all_phases,
)
from eth2spec.test.helpers.constants import PHASE0
from eth2spec.test.helpers.attestations import build_attestation_data, get_valid_attestation
@ -476,3 +482,34 @@ def test_get_aggregate_and_proof_signature(spec, state):
privkey=privkey,
pubkey=pubkey,
)
def run_compute_subscribed_subnets_arguments(spec, rng=random.Random(1111)):
node_id = rng.randint(0, 2**40 - 1) # try VALIDATOR_REGISTRY_LIMIT
epoch = rng.randint(0, 2**64 - 1)
subnets = spec.compute_subscribed_subnets(node_id, epoch)
assert len(subnets) == spec.SUBNETS_PER_NODE
@with_all_phases
@spec_test
@single_phase
def test_compute_subscribed_subnets_random_1(spec):
rng = random.Random(1111)
run_compute_subscribed_subnets_arguments(spec, rng)
@with_all_phases
@spec_test
@single_phase
def test_compute_subscribed_subnets_random_2(spec):
rng = random.Random(2222)
run_compute_subscribed_subnets_arguments(spec, rng)
@with_all_phases
@spec_test
@single_phase
def test_compute_subscribed_subnets_random_3(spec):
rng = random.Random(3333)
run_compute_subscribed_subnets_arguments(spec, rng)

View File

@ -10,20 +10,13 @@ from typing import (
from pathlib import Path
from eth_utils import encode_hex
from py_ecc.optimized_bls12_381 import ( # noqa: F401
G1,
G2,
Z1,
Z2,
curve_order as BLS_MODULUS,
add,
multiply,
neg,
)
from py_ecc.typing import (
Optimized_Point3D,
)
from eth2spec.utils import bls
from eth2spec.utils.bls import (
BLS_MODULUS,
)
PRIMITIVE_ROOT_OF_UNITY = 7
@ -35,7 +28,7 @@ def generate_setup(generator: Optimized_Point3D, secret: int, length: int) -> Tu
"""
result = [generator]
for _ in range(1, length):
result.append(multiply(result[-1], secret))
result.append(bls.multiply(result[-1], secret))
return tuple(result)
@ -49,9 +42,9 @@ def fft(vals: Sequence[Optimized_Point3D], modulus: int, domain: int) -> Sequenc
R = fft(vals[1::2], modulus, domain[::2])
o = [0] * len(vals)
for i, (x, y) in enumerate(zip(L, R)):
y_times_root = multiply(y, domain[i])
o[i] = add(x, y_times_root)
o[i + len(L)] = add(x, neg(y_times_root))
y_times_root = bls.multiply(y, domain[i])
o[i] = bls.add(x, y_times_root)
o[i + len(L)] = bls.add(x, bls.neg(y_times_root))
return o
@ -90,12 +83,14 @@ def get_lagrange(setup: Sequence[Optimized_Point3D]) -> Tuple[bytes]:
# TODO: introduce an IFFT function for simplicity
fft_output = fft(setup, BLS_MODULUS, domain)
inv_length = pow(len(setup), BLS_MODULUS - 2, BLS_MODULUS)
return tuple(bls.G1_to_bytes48(multiply(fft_output[-i], inv_length)) for i in range(len(fft_output)))
return tuple(bls.G1_to_bytes48(bls.multiply(fft_output[-i], inv_length)) for i in range(len(fft_output)))
def dump_kzg_trusted_setup_files(secret: int, g1_length: int, g2_length: int, output_dir: str) -> None:
setup_g1 = generate_setup(bls.G1, secret, g1_length)
setup_g2 = generate_setup(bls.G2, secret, g2_length)
bls.use_fastest()
setup_g1 = generate_setup(bls.G1(), secret, g1_length)
setup_g2 = generate_setup(bls.G2(), secret, g2_length)
setup_g1_lagrange = get_lagrange(setup_g1)
roots_of_unity = compute_roots_of_unity(g1_length)

View File

@ -114,8 +114,8 @@ Optional step for optimistic sync tests.
This step sets the [`payloadStatus`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#PayloadStatusV1)
value that Execution Layer client mock returns in responses to the following Engine API calls:
* [`engine_newPayloadV1(payload)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1) if `payload.blockHash == payload_info.block_hash`
* [`engine_forkchoiceUpdatedV1(forkchoiceState, ...)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_forkchoiceupdatedv1) if `forkchoiceState.headBlockHash == payload_info.block_hash`
* [`engine_newPayloadV1(payload)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_newpayloadv1) if `payload.blockHash == payload_info.block_hash`
* [`engine_forkchoiceUpdatedV1(forkchoiceState, ...)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_forkchoiceupdatedv1) if `forkchoiceState.headBlockHash == payload_info.block_hash`
*Note:* Status of a payload must be *initialized* via `on_payload_info` before the corresponding `on_block` execution step.