mirror of
https://github.com/status-im/eth2.0-specs.git
synced 2025-01-20 15:38:55 +00:00
Root next_sync_committee
in attested_header
`LightClientUpdate` structures currently use different merkle proof root depending on the presence of `finalized_header`. By always rooting it in the same state (the `attested_header.state_root`), logic gets simpler. Caveats: - In periods of extended non-finality, `update.finalized_header` may now be outdated by several sync committee periods. The old implementation rejected such updates as the `next_sync_committee` in them was stale, but the new implementation can properly handle this case. - The `next_sync_committee` can no longer be considered finalized based on `is_finality_update`. Instead, waiting until `finalized_header` is in the `attested_header`'s sync committee period is now necessary. - Because `update.finalized_header > store.finalized_header` no longer holds (for updates with finality), an `is_better_update` helper is added to improve `best_valid_update` tracking (in the past, finalized updates with supermajority participation would always directly apply) This PR builds on prior work from: - @hwwhww at https://github.com/ethereum/consensus-specs/pull/2829
This commit is contained in:
parent
bfa048a449
commit
c4dbd241ac
@ -19,8 +19,8 @@
|
||||
- [`is_sync_committee_update`](#is_sync_committee_update)
|
||||
- [`is_finality_update`](#is_finality_update)
|
||||
- [`get_subtree_index`](#get_subtree_index)
|
||||
- [`get_active_header`](#get_active_header)
|
||||
- [`get_safety_threshold`](#get_safety_threshold)
|
||||
- [`is_better_update`](#is_better_update)
|
||||
- [Light client state updates](#light-client-state-updates)
|
||||
- [`process_slot_for_light_client_store`](#process_slot_for_light_client_store)
|
||||
- [`validate_light_client_update`](#validate_light_client_update)
|
||||
@ -64,7 +64,7 @@ uses sync committees introduced in [this beacon chain extension](./beacon-chain.
|
||||
class LightClientUpdate(Container):
|
||||
# The beacon block header that is attested to by the sync committee
|
||||
attested_header: BeaconBlockHeader
|
||||
# Next sync committee corresponding to the active header
|
||||
# Next sync committee corresponding to `attested_header`
|
||||
next_sync_committee: SyncCommittee
|
||||
next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)]
|
||||
# The finalized beacon block header attested to by Merkle branch
|
||||
@ -118,19 +118,6 @@ def get_subtree_index(generalized_index: GeneralizedIndex) -> uint64:
|
||||
return uint64(generalized_index % 2**(floorlog2(generalized_index)))
|
||||
```
|
||||
|
||||
### `get_active_header`
|
||||
|
||||
```python
|
||||
def get_active_header(update: LightClientUpdate) -> BeaconBlockHeader:
|
||||
# The "active header" is the header that the update is trying to convince us
|
||||
# to accept. If a finalized header is present, it's the finalized header,
|
||||
# otherwise it's the attested header
|
||||
if is_finality_update(update):
|
||||
return update.finalized_header
|
||||
else:
|
||||
return update.attested_header
|
||||
```
|
||||
|
||||
### `get_safety_threshold`
|
||||
|
||||
```python
|
||||
@ -141,6 +128,50 @@ def get_safety_threshold(store: LightClientStore) -> uint64:
|
||||
) // 2
|
||||
```
|
||||
|
||||
### `is_better_update`
|
||||
|
||||
```python
|
||||
def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdate) -> bool:
|
||||
# Compare supermajority (> 2/3) sync committee participation
|
||||
max_active_participants = len(new_update.sync_aggregate.sync_committee_bits)
|
||||
new_num_active_participants = sum(new_update.sync_aggregate.sync_committee_bits)
|
||||
old_num_active_participants = sum(old_update.sync_aggregate.sync_committee_bits)
|
||||
new_has_supermajority = new_num_active_participants * 3 >= max_active_participants * 2
|
||||
old_has_supermajority = old_num_active_participants * 3 >= max_active_participants * 2
|
||||
if new_has_supermajority != old_has_supermajority:
|
||||
return new_has_supermajority > old_has_supermajority
|
||||
if not new_has_supermajority and new_num_active_participants != old_num_active_participants:
|
||||
return new_num_active_participants > old_num_active_participants
|
||||
|
||||
# Compare indication of any finality
|
||||
new_has_finality = is_finality_update(new_update)
|
||||
old_has_finality = is_finality_update(old_update)
|
||||
if new_has_finality != old_has_finality:
|
||||
return new_has_finality > old_has_finality
|
||||
|
||||
# Compare sync committee finality
|
||||
if new_has_finality:
|
||||
new_has_sync_committee_finality = (
|
||||
compute_sync_committee_period(compute_epoch_at_slot(new_update.finalized_header.slot)) ==
|
||||
compute_sync_committee_period(compute_epoch_at_slot(new_update.attested_header.slot))
|
||||
)
|
||||
old_has_sync_committee_finality = (
|
||||
compute_sync_committee_period(compute_epoch_at_slot(old_update.finalized_header.slot)) ==
|
||||
compute_sync_committee_period(compute_epoch_at_slot(old_update.attested_header.slot))
|
||||
)
|
||||
if new_has_sync_committee_finality != old_has_sync_committee_finality:
|
||||
return new_has_sync_committee_finality > old_has_sync_committee_finality
|
||||
|
||||
# Tiebreaker 1: Sync committee participation beyond supermajority
|
||||
if new_num_active_participants != old_num_active_participants:
|
||||
return new_num_active_participants > old_num_active_participants
|
||||
|
||||
# Tiebreaker 2: Prefer older data (fewer changes to best)
|
||||
if new_update.attested_header.slot != old_update.attested_header.slot:
|
||||
return new_update.attested_header.slot < old_update.attested_header.slot
|
||||
return new_update.signature_slot < old_update.signature_slot
|
||||
```
|
||||
|
||||
## Light client state updates
|
||||
|
||||
A light client maintains its state in a `store` object of type `LightClientStore` and receives `update` objects of type `LightClientUpdate`. Every `update` triggers `process_light_client_update(store, update, current_slot, genesis_validators_root)` where `current_slot` is the current slot based on a local clock. `process_slot_for_light_client_store` is triggered every time the current slot increments.
|
||||
@ -157,6 +188,8 @@ def process_slot_for_light_client_store(store: LightClientStore, current_slot: S
|
||||
and store.best_valid_update is not None
|
||||
):
|
||||
# Forced best update when the update timeout has elapsed
|
||||
if store.best_valid_update.finalized_header.slot <= store.finalized_header.slot:
|
||||
store.best_valid_update.finalized_header = store.best_valid_update.attested_header
|
||||
apply_light_client_update(store, store.best_valid_update)
|
||||
store.best_valid_update = None
|
||||
```
|
||||
@ -168,15 +201,19 @@ def validate_light_client_update(store: LightClientStore,
|
||||
update: LightClientUpdate,
|
||||
current_slot: Slot,
|
||||
genesis_validators_root: Root) -> None:
|
||||
# Verify update slot is larger than slot of current best finalized header
|
||||
active_header = get_active_header(update)
|
||||
assert current_slot >= update.signature_slot > active_header.slot > store.finalized_header.slot
|
||||
# Verify sync committee has sufficient participants
|
||||
sync_aggregate = update.sync_aggregate
|
||||
assert sum(sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
|
||||
# Verify update does not skip a sync committee period
|
||||
finalized_period = compute_sync_committee_period(compute_epoch_at_slot(store.finalized_header.slot))
|
||||
update_period = compute_sync_committee_period(compute_epoch_at_slot(active_header.slot))
|
||||
assert current_slot >= update.signature_slot > update.attested_header.slot >= update.finalized_header.slot
|
||||
store_period = compute_sync_committee_period(compute_epoch_at_slot(store.finalized_header.slot))
|
||||
signature_period = compute_sync_committee_period(compute_epoch_at_slot(update.signature_slot))
|
||||
assert signature_period in (finalized_period, finalized_period + 1)
|
||||
assert signature_period in (store_period, store_period + 1)
|
||||
|
||||
# Verify update is relevant
|
||||
attested_period = compute_sync_committee_period(compute_epoch_at_slot(update.attested_header.slot))
|
||||
assert update.attested_header.slot > store.finalized_header.slot
|
||||
|
||||
# Verify that the `finality_branch`, if present, confirms `finalized_header`
|
||||
# to match the finalized checkpoint root saved in the state of `attested_header`.
|
||||
@ -198,28 +235,23 @@ def validate_light_client_update(store: LightClientStore,
|
||||
)
|
||||
|
||||
# Verify that the `next_sync_committee`, if present, actually is the next sync committee saved in the
|
||||
# state of the `active_header`
|
||||
# state of the `attested_header`
|
||||
if not is_sync_committee_update(update):
|
||||
assert update_period == finalized_period
|
||||
assert attested_period == store_period
|
||||
assert update.next_sync_committee == SyncCommittee()
|
||||
else:
|
||||
if update_period == finalized_period:
|
||||
if attested_period == store_period:
|
||||
assert update.next_sync_committee == store.next_sync_committee
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(update.next_sync_committee),
|
||||
branch=update.next_sync_committee_branch,
|
||||
depth=floorlog2(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
index=get_subtree_index(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
root=active_header.state_root,
|
||||
root=update.attested_header.state_root,
|
||||
)
|
||||
|
||||
sync_aggregate = update.sync_aggregate
|
||||
|
||||
# Verify sync committee has sufficient participants
|
||||
assert sum(sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
|
||||
# Verify sync committee aggregate signature
|
||||
if signature_period == finalized_period:
|
||||
if signature_period == store_period:
|
||||
sync_committee = store.current_sync_committee
|
||||
else:
|
||||
sync_committee = store.next_sync_committee
|
||||
@ -237,13 +269,12 @@ def validate_light_client_update(store: LightClientStore,
|
||||
|
||||
```python
|
||||
def apply_light_client_update(store: LightClientStore, update: LightClientUpdate) -> None:
|
||||
active_header = get_active_header(update)
|
||||
finalized_period = compute_sync_committee_period(compute_epoch_at_slot(store.finalized_header.slot))
|
||||
update_period = compute_sync_committee_period(compute_epoch_at_slot(active_header.slot))
|
||||
if update_period == finalized_period + 1:
|
||||
store_period = compute_sync_committee_period(compute_epoch_at_slot(store.finalized_header.slot))
|
||||
finalized_period = compute_sync_committee_period(compute_epoch_at_slot(update.finalized_header.slot))
|
||||
if finalized_period == store_period + 1:
|
||||
store.current_sync_committee = store.next_sync_committee
|
||||
store.next_sync_committee = update.next_sync_committee
|
||||
store.finalized_header = active_header
|
||||
store.finalized_header = update.finalized_header
|
||||
if store.finalized_header.slot > store.optimistic_header.slot:
|
||||
store.optimistic_header = store.finalized_header
|
||||
```
|
||||
@ -262,7 +293,7 @@ def process_light_client_update(store: LightClientStore,
|
||||
# Update the best update in case we have to force-update to it if the timeout elapses
|
||||
if (
|
||||
store.best_valid_update is None
|
||||
or sum(sync_committee_bits) > sum(store.best_valid_update.sync_aggregate.sync_committee_bits)
|
||||
or is_better_update(update, store.best_valid_update)
|
||||
):
|
||||
store.best_valid_update = update
|
||||
|
||||
@ -282,7 +313,7 @@ def process_light_client_update(store: LightClientStore,
|
||||
# Update finalized header
|
||||
if (
|
||||
sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2
|
||||
and is_finality_update(update)
|
||||
and update.finalized_header.slot > store.finalized_header.slot
|
||||
):
|
||||
# Normal update through 2/3 threshold
|
||||
apply_light_client_update(store, update)
|
||||
|
@ -0,0 +1,132 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_presets,
|
||||
with_altair_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
next_slots_with_attestations,
|
||||
state_transition_with_full_block,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.light_client import (
|
||||
get_sync_aggregate,
|
||||
signed_block_to_header,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slots,
|
||||
)
|
||||
from eth2spec.test.helpers.merkle import build_proof
|
||||
from math import floor
|
||||
|
||||
|
||||
def create_update(spec, test, with_next_sync_committee, with_finality, participation_rate):
|
||||
attested_state, attested_block, finalized_block = test
|
||||
num_participants = floor(spec.SYNC_COMMITTEE_SIZE * participation_rate)
|
||||
|
||||
attested_header = signed_block_to_header(spec, attested_block)
|
||||
|
||||
if with_next_sync_committee:
|
||||
next_sync_committee = attested_state.next_sync_committee
|
||||
next_sync_committee_branch = build_proof(attested_state.get_backing(), spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
else:
|
||||
next_sync_committee = spec.SyncCommittee()
|
||||
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
|
||||
if with_finality:
|
||||
finalized_header = signed_block_to_header(spec, finalized_block)
|
||||
finality_branch = build_proof(attested_state.get_backing(), spec.FINALIZED_ROOT_INDEX)
|
||||
else:
|
||||
finalized_header = spec.BeaconBlockHeader()
|
||||
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||
|
||||
sync_aggregate, signature_slot = get_sync_aggregate(spec, attested_state, num_participants)
|
||||
|
||||
return spec.LightClientUpdate(
|
||||
attested_header=attested_header,
|
||||
next_sync_committee=next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finalized_header=finalized_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_aggregate=sync_aggregate,
|
||||
signature_slot=signature_slot,
|
||||
)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_update_ranking(spec, state):
|
||||
# Set up blocks and states:
|
||||
# - `sig_finalized` / `sig_attested` --> Only signature in next sync committee period
|
||||
# - `att_finalized` / `att_attested` --> Attested header also in next sync committee period
|
||||
# - `fin_finalized` / `fin_attested` --> Finalized header also in next sync committee period
|
||||
# - `lat_finalized` / `lat_attested` --> Like `fin`, but at a later `attested_header.slot`
|
||||
next_slots(spec, state, spec.compute_start_slot_at_epoch(spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 3) - 1)
|
||||
sig_finalized_block = state_transition_with_full_block(spec, state, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
att_finalized_block = state_transition_with_full_block(spec, state, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 2, True, True)
|
||||
sig_attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
sig_attested_state = state.copy()
|
||||
att_attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
att_attested_state = state.copy()
|
||||
fin_finalized_block = att_attested_block
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
fin_attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
fin_attested_state = state.copy()
|
||||
lat_finalized_block = fin_finalized_block
|
||||
lat_attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
lat_attested_state = state.copy()
|
||||
sig = (sig_attested_state, sig_attested_block, sig_finalized_block)
|
||||
att = (att_attested_state, att_attested_block, att_finalized_block)
|
||||
fin = (fin_attested_state, fin_attested_block, fin_finalized_block)
|
||||
lat = (lat_attested_state, lat_attested_block, lat_finalized_block)
|
||||
|
||||
# Create updates (in descending order of quality)
|
||||
updates = [
|
||||
# Updates with sync committee finality
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
|
||||
|
||||
# Updates without sync committee finality
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
|
||||
|
||||
# Updates without indication of any finality
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
|
||||
|
||||
# Updates with low sync committee participation
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
|
||||
|
||||
# Updates with very low sync committee participation
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
|
||||
]
|
||||
yield "updates", updates
|
||||
|
||||
for i in range(len(updates) - 1):
|
||||
assert spec.is_better_update(updates[i], updates[i + 1])
|
@ -29,7 +29,7 @@ def initialize_light_client_store(spec, state):
|
||||
)
|
||||
|
||||
|
||||
def get_sync_aggregate(spec, state, signature_slot=None):
|
||||
def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None):
|
||||
# By default, the sync committee signs the previous slot
|
||||
if signature_slot is None:
|
||||
signature_slot = state.slot + 1
|
||||
@ -42,13 +42,18 @@ def get_sync_aggregate(spec, state, signature_slot=None):
|
||||
committee_indices = compute_committee_indices(spec, signature_state)
|
||||
committee_size = len(committee_indices)
|
||||
|
||||
# By default, use full participation
|
||||
if num_participants is None:
|
||||
num_participants = committee_size
|
||||
assert committee_size >= num_participants >= 0
|
||||
|
||||
# Compute sync aggregate
|
||||
sync_committee_bits = [True] * committee_size
|
||||
sync_committee_bits = [True] * num_participants + [False] * (committee_size - num_participants)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
signature_state,
|
||||
signature_slot,
|
||||
committee_indices,
|
||||
committee_indices[:num_participants],
|
||||
)
|
||||
sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=sync_committee_bits,
|
||||
|
6
tests/formats/sync_protocol/README.md
Normal file
6
tests/formats/sync_protocol/README.md
Normal file
@ -0,0 +1,6 @@
|
||||
# Light client sync protocol tests
|
||||
|
||||
This series of tests provides reference test vectors for the light client sync protocol spec.
|
||||
|
||||
Handlers:
|
||||
- `update_ranking`: see [`LightClientUpdate` ranking test format](./update_ranking.md)
|
21
tests/formats/sync_protocol/update_ranking.md
Normal file
21
tests/formats/sync_protocol/update_ranking.md
Normal file
@ -0,0 +1,21 @@
|
||||
# `LightClientUpdate` ranking tests
|
||||
|
||||
This series of tests provides reference test vectors for validating that `LightClientUpdate` instances are ranked in a canonical order.
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
```yaml
|
||||
updates_count: int -- integer, decimal
|
||||
```
|
||||
|
||||
### `updates_<index>.ssz_snappy`
|
||||
|
||||
A series of files, with `<index>` in range `[0, updates_count)`, ordered by descending precedence according to `is_better_update` (best update at index 0).
|
||||
|
||||
Each file is a SSZ-snappy encoded `LightClientUpdate`.
|
||||
|
||||
## Condition
|
||||
|
||||
A test-runner should load the provided `update` objects and verify that the local implementation ranks them in the same order. Note that the `update` objects are not restricted to a single sync committee period for the scope of this test.
|
5
tests/generators/sync_protocol/README.md
Normal file
5
tests/generators/sync_protocol/README.md
Normal file
@ -0,0 +1,5 @@
|
||||
# Light client sync protocol tests
|
||||
|
||||
The purpose of this test-generator is to provide test-vectors for validating the correct implementation of the light client sync protocol.
|
||||
|
||||
Test-format documentation can be found [here](../../formats/sync_protocol/README.md).
|
0
tests/generators/sync_protocol/__init__.py
Normal file
0
tests/generators/sync_protocol/__init__.py
Normal file
16
tests/generators/sync_protocol/main.py
Normal file
16
tests/generators/sync_protocol/main.py
Normal file
@ -0,0 +1,16 @@
|
||||
from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
altair_mods = {key: 'eth2spec.test.altair.sync_protocol.test_' + key for key in [
|
||||
'update_ranking',
|
||||
]}
|
||||
bellatrix_mods = altair_mods
|
||||
|
||||
all_mods = {
|
||||
ALTAIR: altair_mods,
|
||||
BELLATRIX: bellatrix_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="sync_protocol", all_mods=all_mods)
|
2
tests/generators/sync_protocol/requirements.txt
Normal file
2
tests/generators/sync_protocol/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
pytest>=4.4
|
||||
../../../[generator]
|
Loading…
x
Reference in New Issue
Block a user