Merge remote-tracking branch 'origin' into att-slot-range
This commit is contained in:
commit
3550821577
|
@ -157,7 +157,7 @@ jobs:
|
|||
path: tests/core/pyspec/test-reports
|
||||
test-eip6110:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
- image: circleci/python:3.9
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
|
|
2
setup.py
2
setup.py
|
@ -1203,7 +1203,7 @@ setup(
|
|||
extras_require={
|
||||
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
|
||||
"lint": ["flake8==5.0.4", "mypy==0.981", "pylint==2.15.3"],
|
||||
"generator": ["python-snappy==0.6.1", "filelock"],
|
||||
"generator": ["python-snappy==0.6.1", "filelock", "pathos==0.3.0"],
|
||||
"docs": ["mkdocs==1.4.2", "mkdocs-material==9.1.5", "mdx-truly-sane-lists==1.3", "mkdocs-awesome-pages-plugin==2.8.0"]
|
||||
},
|
||||
install_requires=[
|
||||
|
|
|
@ -30,9 +30,8 @@ This is the modification of the fork choice accompanying the Deneb upgrade.
|
|||
def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment],
|
||||
blobs: Sequence[Blob],
|
||||
proofs: Sequence[KZGProof]) -> None:
|
||||
assert len(expected_kzg_commitments) == len(blobs)
|
||||
assert len(blobs) == len(proofs)
|
||||
|
||||
assert len(expected_kzg_commitments) == len(blobs) == len(proofs)
|
||||
|
||||
assert verify_blob_kzg_proof_batch(blobs, expected_kzg_commitments, proofs)
|
||||
```
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ The specification of these changes continues in the same format as the network s
|
|||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [`blob_sidecar_{index}`](#blob_sidecar_index)
|
||||
- [`blob_sidecar_{subnet_id}`](#blob_sidecar_subnet_id)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
|
@ -107,7 +107,7 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
|||
|
||||
| Name | Message Type |
|
||||
| - | - |
|
||||
| `blob_sidecar_{index}` | `SignedBlobSidecar` (new) |
|
||||
| `blob_sidecar_{subnet_id}` | `SignedBlobSidecar` (new) |
|
||||
|
||||
##### Global topics
|
||||
|
||||
|
@ -117,13 +117,13 @@ Deneb introduces new global topics for blob sidecars.
|
|||
|
||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in deneb.
|
||||
|
||||
###### `blob_sidecar_{index}`
|
||||
###### `blob_sidecar_{subnet_id}`
|
||||
|
||||
This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`.
|
||||
This topic is used to propagate signed blob sidecars, where each blob index maps to some `subnet_id`.
|
||||
|
||||
The following validations MUST pass before forwarding the `signed_blob_sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
|
||||
|
||||
- _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`.
|
||||
- _[REJECT]_ The sidecar is for the correct subnet -- i.e. `compute_subnet_for_blob_sidecar(sidecar.index) == subnet_id`.
|
||||
- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot).
|
||||
- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
|
||||
- _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved).
|
||||
|
@ -226,7 +226,7 @@ No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time.
|
|||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
|
||||
|
||||
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob in the response.
|
||||
Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob sidecar in the response.
|
||||
|
||||
Clients MUST respond with at least one sidecar, if they have it.
|
||||
Clients MAY limit the number of blocks and sidecars in the response.
|
||||
|
|
|
@ -566,7 +566,7 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob],
|
|||
proofs_bytes: Sequence[Bytes48]) -> bool:
|
||||
"""
|
||||
Given a list of blobs and blob KZG proofs, verify that they correspond to the provided commitments.
|
||||
|
||||
Will return True if there are zero blobs/commitments/proofs.
|
||||
Public method.
|
||||
"""
|
||||
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Constants](#constants)
|
||||
- [Misc](#misc)
|
||||
- [Helpers](#helpers)
|
||||
- [`BlobsBundle`](#blobsbundle)
|
||||
- [Modified `GetPayloadResponse`](#modified-getpayloadresponse)
|
||||
|
@ -38,6 +40,14 @@ All behaviors and definitions defined in this document, and documents it extends
|
|||
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of Deneb](./beacon-chain.md) are requisite for this document and used throughout.
|
||||
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||
|
||||
## Constants
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value | Unit |
|
||||
| - | - | :-: |
|
||||
| `BLOB_SIDECAR_SUBNET_COUNT` | `4` | The number of blob sidecar subnets used in the gossipsub protocol. |
|
||||
|
||||
## Helpers
|
||||
|
||||
### `BlobsBundle`
|
||||
|
@ -136,7 +146,7 @@ def get_blob_sidecars(block: BeaconBlock,
|
|||
|
||||
```
|
||||
|
||||
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the `blob_sidecar_{index}` topics according to its index.
|
||||
Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the associated sidecar topic, the `blob_sidecar_{subnet_id}` pubsub topic.
|
||||
|
||||
`signature` is obtained from:
|
||||
|
||||
|
@ -149,6 +159,15 @@ def get_blob_sidecar_signature(state: BeaconState,
|
|||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
The `subnet_id` for the `signed_sidecar` is calculated with:
|
||||
- Let `blob_index = signed_sidecar.message.index`.
|
||||
- Let `subnet_id = compute_subnet_for_blob_sidecar(blob_index)`.
|
||||
|
||||
```python
|
||||
def compute_subnet_for_blob_sidecar(blob_index: BlobIndex) -> SubnetID:
|
||||
return SubnetID(blob_index % BLOB_SIDECAR_SUBNET_COUNT)
|
||||
```
|
||||
|
||||
After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested.
|
||||
|
||||
The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs and serve when capable,
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
from eth_utils import encode_hex
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
import os
|
||||
import time
|
||||
import shutil
|
||||
|
@ -8,24 +11,80 @@ import sys
|
|||
import json
|
||||
from typing import Iterable, AnyStr, Any, Callable
|
||||
import traceback
|
||||
from collections import namedtuple
|
||||
|
||||
from ruamel.yaml import (
|
||||
YAML,
|
||||
)
|
||||
|
||||
from filelock import FileLock
|
||||
from snappy import compress
|
||||
from pathos.multiprocessing import ProcessingPool as Pool
|
||||
|
||||
from eth_utils import encode_hex
|
||||
|
||||
from eth2spec.test import context
|
||||
from eth2spec.test.exceptions import SkippedTest
|
||||
|
||||
from .gen_typing import TestProvider
|
||||
from .settings import (
|
||||
GENERATOR_MODE,
|
||||
MODE_MULTIPROCESSING,
|
||||
MODE_SINGLE_PROCESS,
|
||||
NUM_PROCESS,
|
||||
TIME_THRESHOLD_TO_PRINT,
|
||||
)
|
||||
|
||||
|
||||
# Flag that the runner does NOT run test via pytest
|
||||
context.is_pytest = False
|
||||
|
||||
|
||||
TIME_THRESHOLD_TO_PRINT = 1.0 # seconds
|
||||
@dataclass
|
||||
class Diagnostics(object):
|
||||
collected_test_count: int = 0
|
||||
generated_test_count: int = 0
|
||||
skipped_test_count: int = 0
|
||||
test_identifiers: list = field(default_factory=list)
|
||||
|
||||
|
||||
TestCaseParams = namedtuple(
|
||||
'TestCaseParams', [
|
||||
'test_case', 'case_dir', 'log_file', 'file_mode',
|
||||
])
|
||||
|
||||
|
||||
def worker_function(item):
|
||||
return generate_test_vector(*item)
|
||||
|
||||
|
||||
def get_default_yaml():
|
||||
yaml = YAML(pure=True)
|
||||
yaml.default_flow_style = None
|
||||
|
||||
def _represent_none(self, _):
|
||||
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
|
||||
|
||||
yaml.representer.add_representer(type(None), _represent_none)
|
||||
|
||||
return yaml
|
||||
|
||||
|
||||
def get_cfg_yaml():
|
||||
# Spec config is using a YAML subset
|
||||
cfg_yaml = YAML(pure=True)
|
||||
cfg_yaml.default_flow_style = False # Emit separate line for each key
|
||||
|
||||
def cfg_represent_bytes(self, data):
|
||||
return self.represent_int(encode_hex(data))
|
||||
|
||||
cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes)
|
||||
|
||||
def cfg_represent_quoted_str(self, data):
|
||||
return self.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
|
||||
|
||||
cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str)
|
||||
return cfg_yaml
|
||||
|
||||
|
||||
def validate_output_dir(path_str):
|
||||
|
@ -40,6 +99,47 @@ def validate_output_dir(path_str):
|
|||
return path
|
||||
|
||||
|
||||
def get_test_case_dir(test_case, output_dir):
|
||||
return (
|
||||
Path(output_dir) / Path(test_case.preset_name) / Path(test_case.fork_name)
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
)
|
||||
|
||||
|
||||
def get_test_identifier(test_case):
|
||||
return "::".join([
|
||||
test_case.preset_name,
|
||||
test_case.fork_name,
|
||||
test_case.runner_name,
|
||||
test_case.handler_name,
|
||||
test_case.suite_name,
|
||||
test_case.case_name
|
||||
])
|
||||
|
||||
|
||||
def get_incomplete_tag_file(case_dir):
|
||||
return case_dir / "INCOMPLETE"
|
||||
|
||||
|
||||
def should_skip_case_dir(case_dir, is_force, diagnostics_obj):
|
||||
is_skip = False
|
||||
incomplete_tag_file = get_incomplete_tag_file(case_dir)
|
||||
|
||||
if case_dir.exists():
|
||||
if not is_force and not incomplete_tag_file.exists():
|
||||
diagnostics_obj.skipped_test_count += 1
|
||||
print(f'Skipping already existing test: {case_dir}')
|
||||
is_skip = True
|
||||
else:
|
||||
print(f'Warning, output directory {case_dir} already exist,'
|
||||
' old files will be deleted and it will generate test vector files with the latest version')
|
||||
# Clear the existing case_dir folder
|
||||
shutil.rmtree(case_dir)
|
||||
|
||||
return is_skip, diagnostics_obj
|
||||
|
||||
|
||||
def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
"""
|
||||
Implementation for a general test generator.
|
||||
|
@ -94,28 +194,6 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
|||
else:
|
||||
file_mode = "w"
|
||||
|
||||
yaml = YAML(pure=True)
|
||||
yaml.default_flow_style = None
|
||||
|
||||
def _represent_none(self, _):
|
||||
return self.represent_scalar('tag:yaml.org,2002:null', 'null')
|
||||
|
||||
yaml.representer.add_representer(type(None), _represent_none)
|
||||
|
||||
# Spec config is using a YAML subset
|
||||
cfg_yaml = YAML(pure=True)
|
||||
cfg_yaml.default_flow_style = False # Emit separate line for each key
|
||||
|
||||
def cfg_represent_bytes(self, data):
|
||||
return self.represent_int(encode_hex(data))
|
||||
|
||||
cfg_yaml.representer.add_representer(bytes, cfg_represent_bytes)
|
||||
|
||||
def cfg_represent_quoted_str(self, data):
|
||||
return self.represent_scalar(u'tag:yaml.org,2002:str', data, style="'")
|
||||
|
||||
cfg_yaml.representer.add_representer(context.quoted_str, cfg_represent_quoted_str)
|
||||
|
||||
log_file = Path(output_dir) / 'testgen_error_log.txt'
|
||||
|
||||
print(f"Generating tests into {output_dir}")
|
||||
|
@ -129,12 +207,13 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
|||
print(f"Filtering test-generator runs to only include presets: {', '.join(presets)}")
|
||||
|
||||
collect_only = args.collect_only
|
||||
collected_test_count = 0
|
||||
generated_test_count = 0
|
||||
skipped_test_count = 0
|
||||
test_identifiers = []
|
||||
|
||||
diagnostics_obj = Diagnostics()
|
||||
provider_start = time.time()
|
||||
|
||||
if GENERATOR_MODE == MODE_MULTIPROCESSING:
|
||||
all_test_case_params = []
|
||||
|
||||
for tprov in test_providers:
|
||||
if not collect_only:
|
||||
# runs anything that we don't want to repeat for every test case.
|
||||
|
@ -145,146 +224,133 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
|||
if len(presets) != 0 and test_case.preset_name not in presets:
|
||||
continue
|
||||
|
||||
case_dir = (
|
||||
Path(output_dir) / Path(test_case.preset_name) / Path(test_case.fork_name)
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
)
|
||||
collected_test_count += 1
|
||||
case_dir = get_test_case_dir(test_case, output_dir)
|
||||
print(f"Collected test at: {case_dir}")
|
||||
diagnostics_obj.collected_test_count += 1
|
||||
|
||||
incomplete_tag_file = case_dir / "INCOMPLETE"
|
||||
is_skip, diagnostics_obj = should_skip_case_dir(case_dir, args.force, diagnostics_obj)
|
||||
if is_skip:
|
||||
continue
|
||||
|
||||
if case_dir.exists():
|
||||
if not args.force and not incomplete_tag_file.exists():
|
||||
skipped_test_count += 1
|
||||
print(f'Skipping already existing test: {case_dir}')
|
||||
continue
|
||||
else:
|
||||
print(f'Warning, output directory {case_dir} already exist,'
|
||||
f' old files will be deleted and it will generate test vector files with the latest version')
|
||||
# Clear the existing case_dir folder
|
||||
shutil.rmtree(case_dir)
|
||||
if GENERATOR_MODE == MODE_SINGLE_PROCESS:
|
||||
result = generate_test_vector(test_case, case_dir, log_file, file_mode)
|
||||
write_result_into_diagnostics_obj(result, diagnostics_obj)
|
||||
elif GENERATOR_MODE == MODE_MULTIPROCESSING:
|
||||
item = TestCaseParams(test_case, case_dir, log_file, file_mode)
|
||||
all_test_case_params.append(item)
|
||||
|
||||
print(f'Generating test: {case_dir}')
|
||||
test_start = time.time()
|
||||
if GENERATOR_MODE == MODE_MULTIPROCESSING:
|
||||
with Pool(processes=NUM_PROCESS) as pool:
|
||||
results = pool.map(worker_function, iter(all_test_case_params))
|
||||
|
||||
written_part = False
|
||||
|
||||
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
with incomplete_tag_file.open("w") as f:
|
||||
f.write("\n")
|
||||
|
||||
try:
|
||||
def output_part(out_kind: str, name: str, fn: Callable[[Path, ], None]):
|
||||
# make sure the test case directory is created before any test part is written.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
fn(case_dir)
|
||||
except IOError as e:
|
||||
error_message = (
|
||||
f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}'
|
||||
)
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
|
||||
sys.exit(error_message)
|
||||
|
||||
meta = dict()
|
||||
|
||||
try:
|
||||
for (name, out_kind, data) in test_case.case_fn():
|
||||
written_part = True
|
||||
if out_kind == "meta":
|
||||
meta[name] = data
|
||||
elif out_kind == "cfg":
|
||||
output_part(out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml))
|
||||
elif out_kind == "data":
|
||||
output_part(out_kind, name, dump_yaml_fn(data, name, file_mode, yaml))
|
||||
elif out_kind == "ssz":
|
||||
output_part(out_kind, name, dump_ssz_fn(data, name, file_mode))
|
||||
else:
|
||||
assert False # Unknown kind
|
||||
except SkippedTest as e:
|
||||
print(e)
|
||||
skipped_test_count += 1
|
||||
shutil.rmtree(case_dir)
|
||||
continue
|
||||
|
||||
# Once all meta data is collected (if any), write it to a meta data file.
|
||||
if len(meta) != 0:
|
||||
written_part = True
|
||||
output_part("data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml))
|
||||
|
||||
if not written_part:
|
||||
print(f"test case {case_dir} did not produce any test case parts")
|
||||
except Exception as e:
|
||||
error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}"
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
traceback.print_exc()
|
||||
else:
|
||||
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
|
||||
if not written_part:
|
||||
shutil.rmtree(case_dir)
|
||||
else:
|
||||
generated_test_count += 1
|
||||
test_identifier = "::".join([
|
||||
test_case.preset_name,
|
||||
test_case.fork_name,
|
||||
test_case.runner_name,
|
||||
test_case.handler_name,
|
||||
test_case.suite_name,
|
||||
test_case.case_name
|
||||
])
|
||||
test_identifiers.append(test_identifier)
|
||||
# Only remove `INCOMPLETE` tag file
|
||||
os.remove(incomplete_tag_file)
|
||||
test_end = time.time()
|
||||
span = round(test_end - test_start, 2)
|
||||
if span > TIME_THRESHOLD_TO_PRINT:
|
||||
print(f' - generated in {span} seconds')
|
||||
for result in results:
|
||||
write_result_into_diagnostics_obj(result, diagnostics_obj)
|
||||
|
||||
provider_end = time.time()
|
||||
span = round(provider_end - provider_start, 2)
|
||||
|
||||
if collect_only:
|
||||
print(f"Collected {collected_test_count} tests in total")
|
||||
print(f"Collected {diagnostics_obj.collected_test_count} tests in total")
|
||||
else:
|
||||
summary_message = f"completed generation of {generator_name} with {generated_test_count} tests"
|
||||
summary_message += f" ({skipped_test_count} skipped tests)"
|
||||
summary_message = f"completed generation of {generator_name} with {diagnostics_obj.generated_test_count} tests"
|
||||
summary_message += f" ({diagnostics_obj.skipped_test_count} skipped tests)"
|
||||
if span > TIME_THRESHOLD_TO_PRINT:
|
||||
summary_message += f" in {span} seconds"
|
||||
print(summary_message)
|
||||
diagnostics = {
|
||||
"collected_test_count": collected_test_count,
|
||||
"generated_test_count": generated_test_count,
|
||||
"skipped_test_count": skipped_test_count,
|
||||
"test_identifiers": test_identifiers,
|
||||
|
||||
diagnostics_output = {
|
||||
"collected_test_count": diagnostics_obj.collected_test_count,
|
||||
"generated_test_count": diagnostics_obj.generated_test_count,
|
||||
"skipped_test_count": diagnostics_obj.skipped_test_count,
|
||||
"test_identifiers": diagnostics_obj.test_identifiers,
|
||||
"durations": [f"{span} seconds"],
|
||||
}
|
||||
diagnostics_path = Path(os.path.join(output_dir, "diagnostics.json"))
|
||||
diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics.json.lock"))
|
||||
diagnostics_path = Path(os.path.join(output_dir, "diagnostics_obj.json"))
|
||||
diagnostics_lock = FileLock(os.path.join(output_dir, "diagnostics_obj.json.lock"))
|
||||
with diagnostics_lock:
|
||||
diagnostics_path.touch(exist_ok=True)
|
||||
if os.path.getsize(diagnostics_path) == 0:
|
||||
with open(diagnostics_path, "w+") as f:
|
||||
json.dump(diagnostics, f)
|
||||
json.dump(diagnostics_output, f)
|
||||
else:
|
||||
with open(diagnostics_path, "r+") as f:
|
||||
existing_diagnostics = json.load(f)
|
||||
for k, v in diagnostics.items():
|
||||
for k, v in diagnostics_output.items():
|
||||
existing_diagnostics[k] += v
|
||||
with open(diagnostics_path, "w+") as f:
|
||||
json.dump(existing_diagnostics, f)
|
||||
print(f"wrote diagnostics to {diagnostics_path}")
|
||||
print(f"wrote diagnostics_obj to {diagnostics_path}")
|
||||
|
||||
|
||||
def generate_test_vector(test_case, case_dir, log_file, file_mode):
|
||||
cfg_yaml = get_cfg_yaml()
|
||||
yaml = get_default_yaml()
|
||||
|
||||
written_part = False
|
||||
|
||||
print(f'Generating test: {case_dir}')
|
||||
test_start = time.time()
|
||||
|
||||
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
|
||||
incomplete_tag_file = get_incomplete_tag_file(case_dir)
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
with incomplete_tag_file.open("w") as f:
|
||||
f.write("\n")
|
||||
|
||||
result = None
|
||||
try:
|
||||
meta = dict()
|
||||
try:
|
||||
written_part, meta = execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml)
|
||||
except SkippedTest as e:
|
||||
result = 0 # 0 means skipped
|
||||
print(e)
|
||||
shutil.rmtree(case_dir)
|
||||
return result
|
||||
|
||||
# Once all meta data is collected (if any), write it to a meta data file.
|
||||
if len(meta) != 0:
|
||||
written_part = True
|
||||
output_part(case_dir, log_file, "data", "meta", dump_yaml_fn(meta, "meta", file_mode, yaml))
|
||||
|
||||
except Exception as e:
|
||||
result = -1 # -1 means error
|
||||
error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}"
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
print(error_message)
|
||||
traceback.print_exc()
|
||||
else:
|
||||
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
|
||||
if not written_part:
|
||||
print(f"[Error] test case {case_dir} did not produce any written_part")
|
||||
shutil.rmtree(case_dir)
|
||||
result = -1
|
||||
else:
|
||||
result = get_test_identifier(test_case)
|
||||
# Only remove `INCOMPLETE` tag file
|
||||
os.remove(incomplete_tag_file)
|
||||
test_end = time.time()
|
||||
span = round(test_end - test_start, 2)
|
||||
if span > TIME_THRESHOLD_TO_PRINT:
|
||||
print(f' - generated in {span} seconds')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def write_result_into_diagnostics_obj(result, diagnostics_obj):
|
||||
if result == -1: # error
|
||||
pass
|
||||
elif result == 0:
|
||||
diagnostics_obj.skipped_test_count += 1
|
||||
elif result is not None:
|
||||
diagnostics_obj.generated_test_count += 1
|
||||
diagnostics_obj.test_identifiers.append(result)
|
||||
else:
|
||||
raise Exception(f"Unexpected result: {result}")
|
||||
|
||||
|
||||
def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
||||
|
@ -292,9 +358,45 @@ def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
|||
out_path = case_path / Path(name + '.yaml')
|
||||
with out_path.open(file_mode) as f:
|
||||
yaml_encoder.dump(data, f)
|
||||
f.close()
|
||||
return dump
|
||||
|
||||
|
||||
def output_part(case_dir, log_file, out_kind: str, name: str, fn: Callable[[Path, ], None]):
|
||||
# make sure the test case directory is created before any test part is written.
|
||||
case_dir.mkdir(parents=True, exist_ok=True)
|
||||
try:
|
||||
fn(case_dir)
|
||||
except (IOError, ValueError) as e:
|
||||
error_message = f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}'
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
print(error_message)
|
||||
sys.exit(error_message)
|
||||
|
||||
|
||||
def execute_test(test_case, case_dir, meta, log_file, file_mode, cfg_yaml, yaml):
|
||||
result = test_case.case_fn()
|
||||
written_part = False
|
||||
for (name, out_kind, data) in result:
|
||||
written_part = True
|
||||
if out_kind == "meta":
|
||||
meta[name] = data
|
||||
elif out_kind == "cfg":
|
||||
output_part(case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, cfg_yaml))
|
||||
elif out_kind == "data":
|
||||
output_part(case_dir, log_file, out_kind, name, dump_yaml_fn(data, name, file_mode, yaml))
|
||||
elif out_kind == "ssz":
|
||||
output_part(case_dir, log_file, out_kind, name, dump_ssz_fn(data, name, file_mode))
|
||||
else:
|
||||
raise ValueError("Unknown out_kind %s" % out_kind)
|
||||
|
||||
return written_part, meta
|
||||
|
||||
|
||||
def dump_ssz_fn(data: AnyStr, name: str, file_mode: str):
|
||||
def dump(case_path: Path):
|
||||
out_path = case_path / Path(name + '.ssz_snappy')
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
import multiprocessing
|
||||
|
||||
|
||||
# Generator mode setting
|
||||
MODE_SINGLE_PROCESS = 'MODE_SINGLE_PROCESS'
|
||||
MODE_MULTIPROCESSING = 'MODE_MULTIPROCESSING'
|
||||
# Test generator mode
|
||||
GENERATOR_MODE = MODE_MULTIPROCESSING
|
||||
# Number of subprocesses when using MODE_MULTIPROCESSING
|
||||
NUM_PROCESS = multiprocessing.cpu_count() // 2 - 1
|
||||
|
||||
# Diagnostics
|
||||
TIME_THRESHOLD_TO_PRINT = 1.0 # seconds
|
|
@ -566,7 +566,7 @@ def _get_basic_dict(ssz_dict: Dict[str, Any]) -> Dict[str, Any]:
|
|||
return result
|
||||
|
||||
|
||||
def _get_copy_of_spec(spec):
|
||||
def get_copy_of_spec(spec):
|
||||
fork = spec.fork
|
||||
preset = spec.config.PRESET_BASE
|
||||
module_path = f"eth2spec.{fork}.{preset}"
|
||||
|
@ -607,14 +607,14 @@ def with_config_overrides(config_overrides, emitted_fork=None, emit=True):
|
|||
def decorator(fn):
|
||||
def wrapper(*args, spec: Spec, **kw):
|
||||
# Apply config overrides to spec
|
||||
spec, output_config = spec_with_config_overrides(_get_copy_of_spec(spec), config_overrides)
|
||||
spec, output_config = spec_with_config_overrides(get_copy_of_spec(spec), config_overrides)
|
||||
|
||||
# Apply config overrides to additional phases, if present
|
||||
if 'phases' in kw:
|
||||
phases = {}
|
||||
for fork in kw['phases']:
|
||||
phases[fork], output = spec_with_config_overrides(
|
||||
_get_copy_of_spec(kw['phases'][fork]), config_overrides)
|
||||
get_copy_of_spec(kw['phases'][fork]), config_overrides)
|
||||
if emitted_fork == fork:
|
||||
output_config = output
|
||||
kw['phases'] = phases
|
||||
|
|
|
@ -150,3 +150,23 @@ def test_incorrect_block_hash(spec, state):
|
|||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_deneb_and_later
|
||||
@spec_state_test
|
||||
def test_zeroed_commitment(spec, state):
|
||||
"""
|
||||
The blob is invalid, but the commitment is in correct form.
|
||||
"""
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=1, is_valid_blob=False)
|
||||
assert all(commitment == b'\x00' * 48 for commitment in blob_kzg_commitments)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
|
|
@ -50,12 +50,9 @@ class SignedBlobTransaction(Container):
|
|||
signature: ECDSASignature
|
||||
|
||||
|
||||
def get_sample_blob(spec, rng=None):
|
||||
if rng is None:
|
||||
rng = random.Random(5566)
|
||||
|
||||
def get_sample_blob(spec, rng=random.Random(5566), is_valid_blob=True):
|
||||
values = [
|
||||
rng.randint(0, spec.BLS_MODULUS - 1)
|
||||
rng.randint(0, spec.BLS_MODULUS - 1) if is_valid_blob else spec.BLS_MODULUS
|
||||
for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)
|
||||
]
|
||||
|
||||
|
@ -98,15 +95,19 @@ def get_poly_in_both_forms(spec, rng=None):
|
|||
return coeffs, evals
|
||||
|
||||
|
||||
def get_sample_opaque_tx(spec, blob_count=1, rng=None):
|
||||
def get_sample_opaque_tx(spec, blob_count=1, rng=random.Random(5566), is_valid_blob=True):
|
||||
blobs = []
|
||||
blob_kzg_commitments = []
|
||||
blob_kzg_proofs = []
|
||||
blob_versioned_hashes = []
|
||||
for _ in range(blob_count):
|
||||
blob = get_sample_blob(spec, rng)
|
||||
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
|
||||
blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment)
|
||||
blob = get_sample_blob(spec, rng, is_valid_blob=is_valid_blob)
|
||||
if is_valid_blob:
|
||||
blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob))
|
||||
blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment)
|
||||
else:
|
||||
blob_commitment = spec.KZGCommitment()
|
||||
blob_kzg_proof = spec.KZGProof()
|
||||
blob_versioned_hash = spec.kzg_commitment_to_versioned_hash(blob_commitment)
|
||||
blobs.append(blob)
|
||||
blob_kzg_commitments.append(blob_commitment)
|
||||
|
|
|
@ -10,20 +10,13 @@ from typing import (
|
|||
from pathlib import Path
|
||||
|
||||
from eth_utils import encode_hex
|
||||
from py_ecc.optimized_bls12_381 import ( # noqa: F401
|
||||
G1,
|
||||
G2,
|
||||
Z1,
|
||||
Z2,
|
||||
curve_order as BLS_MODULUS,
|
||||
add,
|
||||
multiply,
|
||||
neg,
|
||||
)
|
||||
from py_ecc.typing import (
|
||||
Optimized_Point3D,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import (
|
||||
BLS_MODULUS,
|
||||
)
|
||||
|
||||
|
||||
PRIMITIVE_ROOT_OF_UNITY = 7
|
||||
|
@ -35,7 +28,7 @@ def generate_setup(generator: Optimized_Point3D, secret: int, length: int) -> Tu
|
|||
"""
|
||||
result = [generator]
|
||||
for _ in range(1, length):
|
||||
result.append(multiply(result[-1], secret))
|
||||
result.append(bls.multiply(result[-1], secret))
|
||||
return tuple(result)
|
||||
|
||||
|
||||
|
@ -49,9 +42,9 @@ def fft(vals: Sequence[Optimized_Point3D], modulus: int, domain: int) -> Sequenc
|
|||
R = fft(vals[1::2], modulus, domain[::2])
|
||||
o = [0] * len(vals)
|
||||
for i, (x, y) in enumerate(zip(L, R)):
|
||||
y_times_root = multiply(y, domain[i])
|
||||
o[i] = add(x, y_times_root)
|
||||
o[i + len(L)] = add(x, neg(y_times_root))
|
||||
y_times_root = bls.multiply(y, domain[i])
|
||||
o[i] = bls.add(x, y_times_root)
|
||||
o[i + len(L)] = bls.add(x, bls.neg(y_times_root))
|
||||
return o
|
||||
|
||||
|
||||
|
@ -90,12 +83,14 @@ def get_lagrange(setup: Sequence[Optimized_Point3D]) -> Tuple[bytes]:
|
|||
# TODO: introduce an IFFT function for simplicity
|
||||
fft_output = fft(setup, BLS_MODULUS, domain)
|
||||
inv_length = pow(len(setup), BLS_MODULUS - 2, BLS_MODULUS)
|
||||
return tuple(bls.G1_to_bytes48(multiply(fft_output[-i], inv_length)) for i in range(len(fft_output)))
|
||||
return tuple(bls.G1_to_bytes48(bls.multiply(fft_output[-i], inv_length)) for i in range(len(fft_output)))
|
||||
|
||||
|
||||
def dump_kzg_trusted_setup_files(secret: int, g1_length: int, g2_length: int, output_dir: str) -> None:
|
||||
setup_g1 = generate_setup(bls.G1, secret, g1_length)
|
||||
setup_g2 = generate_setup(bls.G2, secret, g2_length)
|
||||
bls.use_fastest()
|
||||
|
||||
setup_g1 = generate_setup(bls.G1(), secret, g1_length)
|
||||
setup_g2 = generate_setup(bls.G2(), secret, g2_length)
|
||||
setup_g1_lagrange = get_lagrange(setup_g1)
|
||||
roots_of_unity = compute_roots_of_unity(g1_length)
|
||||
|
||||
|
|
Loading…
Reference in New Issue