commit
15eb331f1e
|
@ -30,7 +30,7 @@ on:
|
|||
- cron: '0 0 * * *'
|
||||
|
||||
jobs:
|
||||
precleanup:
|
||||
preclear:
|
||||
runs-on: self-hosted
|
||||
if: always()
|
||||
steps:
|
||||
|
@ -40,43 +40,39 @@ jobs:
|
|||
rm -rf ./* || true
|
||||
rm -rf ./.??* || true
|
||||
ls -la ./
|
||||
setup-env:
|
||||
runs-on: self-hosted
|
||||
needs: precleanup
|
||||
steps:
|
||||
|
||||
table_of_contents:
|
||||
runs-on: self-hosted
|
||||
needs: preclear
|
||||
steps:
|
||||
- name: Checkout this repo
|
||||
uses: actions/checkout@v3.2.0
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commitRef || env.DEFAULT_BRANCH }}
|
||||
- uses: actions/cache@v3.2.2
|
||||
id: cache-git
|
||||
with:
|
||||
path: ./*
|
||||
key: ${{ github.sha }}
|
||||
|
||||
table_of_contents:
|
||||
runs-on: self-hosted
|
||||
needs: setup-env
|
||||
steps:
|
||||
- uses: actions/cache@v3.2.2
|
||||
id: restore-build
|
||||
with:
|
||||
path: ./*
|
||||
key: ${{ github.sha }}
|
||||
- name: Check table of contents
|
||||
run: sudo npm install -g doctoc@2 && make check_toc
|
||||
|
||||
codespell:
|
||||
runs-on: self-hosted
|
||||
needs: setup-env
|
||||
needs: preclear
|
||||
steps:
|
||||
- name: Checkout this repo
|
||||
uses: actions/checkout@v3.2.0
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commitRef || env.DEFAULT_BRANCH }}
|
||||
- name: Check codespell
|
||||
run: pip install 'codespell<3.0.0,>=2.0.0' --user && make codespell
|
||||
|
||||
lint:
|
||||
runs-on: self-hosted
|
||||
needs: setup-env
|
||||
needs: preclear
|
||||
steps:
|
||||
- name: Checkout this repo
|
||||
uses: actions/checkout@v3.2.0
|
||||
with:
|
||||
ref: ${{ github.event.inputs.commitRef || env.DEFAULT_BRANCH }}
|
||||
- name: Install pyspec requirements
|
||||
run: make install_test
|
||||
- name: Run linter for pyspec
|
||||
run: make lint
|
||||
- name: Run linter for test generators
|
||||
|
@ -84,16 +80,15 @@ jobs:
|
|||
|
||||
pyspec-tests:
|
||||
runs-on: self-hosted
|
||||
needs: [setup-env,lint,codespell,table_of_contents]
|
||||
needs: [preclear,lint,codespell,table_of_contents]
|
||||
strategy:
|
||||
matrix:
|
||||
version: ["phase0", "altair", "bellatrix", "capella", "eip4844"]
|
||||
steps:
|
||||
- uses: actions/cache@v3.2.2
|
||||
id: restore-build
|
||||
- name: Checkout this repo
|
||||
uses: actions/checkout@v3.2.0
|
||||
with:
|
||||
path: ./*
|
||||
key: ${{ github.sha }}
|
||||
ref: ${{ github.event.inputs.commitRef || env.DEFAULT_BRANCH }}
|
||||
- name: set TEST_PRESET_TYPE
|
||||
if: github.event.inputs.test_preset_type != ''
|
||||
run: |
|
||||
|
@ -122,7 +117,7 @@ jobs:
|
|||
|
||||
cleanup:
|
||||
runs-on: self-hosted
|
||||
needs: [setup-env,pyspec-tests,codespell,lint,table_of_contents]
|
||||
needs: [preclear,pyspec-tests,codespell,lint,table_of_contents]
|
||||
if: always()
|
||||
steps:
|
||||
- name: 'Cleanup build folder'
|
||||
|
|
|
@ -38,13 +38,15 @@ def compute_merkle_proof_for_state(state: BeaconState,
|
|||
### `block_to_light_client_header`
|
||||
|
||||
```python
|
||||
def block_to_light_client_header(block: SignedBeaconBlock) -> BeaconBlockHeader:
|
||||
return BeaconBlockHeader(
|
||||
slot=block.message.slot,
|
||||
proposer_index=block.message.proposer_index,
|
||||
parent_root=block.message.parent_root,
|
||||
state_root=block.message.state_root,
|
||||
body_root=hash_tree_root(block.message.body),
|
||||
def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
|
||||
return LightClientHeader(
|
||||
beacon=BeaconBlockHeader(
|
||||
slot=block.message.slot,
|
||||
proposer_index=block.message.proposer_index,
|
||||
parent_root=block.message.parent_root,
|
||||
state_root=block.message.state_root,
|
||||
body_root=hash_tree_root(block.message.body),
|
||||
),
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -125,7 +127,7 @@ def create_light_client_update(state: BeaconState,
|
|||
if finalized_block is not None:
|
||||
if finalized_block.message.slot != GENESIS_SLOT:
|
||||
update.finalized_header = block_to_light_client_header(finalized_block)
|
||||
assert hash_tree_root(update.finalized_header) == attested_state.finalized_checkpoint.root
|
||||
assert hash_tree_root(update.finalized_header.beacon) == attested_state.finalized_checkpoint.root
|
||||
else:
|
||||
assert attested_state.finalized_checkpoint.root == Bytes32()
|
||||
update.finality_branch = compute_merkle_proof_for_state(
|
||||
|
@ -139,8 +141,8 @@ def create_light_client_update(state: BeaconState,
|
|||
|
||||
Full nodes SHOULD provide the best derivable `LightClientUpdate` (according to `is_better_update`) for each sync committee period covering any epochs in range `[max(ALTAIR_FORK_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]` where `current_epoch` is defined by the current wall-clock time. Full nodes MAY also provide `LightClientUpdate` for other sync committee periods.
|
||||
|
||||
- `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.slot`
|
||||
- `LightClientUpdate` are only considered if `compute_sync_committee_period_at_slot(update.attested_header.slot) == compute_sync_committee_period_at_slot(update.signature_slot)`
|
||||
- `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.beacon.slot`
|
||||
- `LightClientUpdate` are only considered if `compute_sync_committee_period_at_slot(update.attested_header.beacon.slot) == compute_sync_committee_period_at_slot(update.signature_slot)`
|
||||
- Only `LightClientUpdate` with `next_sync_committee` as selected by fork choice are provided, regardless of ranking by `is_better_update`. To uniquely identify a non-finalized sync committee fork, all of `period`, `current_sync_committee` and `next_sync_committee` need to be incorporated, as sync committees may reappear over time.
|
||||
|
||||
### `create_light_client_finality_update`
|
||||
|
@ -156,7 +158,7 @@ def create_light_client_finality_update(update: LightClientUpdate) -> LightClien
|
|||
)
|
||||
```
|
||||
|
||||
Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest `attested_header.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientFinalityUpdate` whenever `finalized_header` changes.
|
||||
Full nodes SHOULD provide the `LightClientFinalityUpdate` with the highest `attested_header.beacon.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientFinalityUpdate` whenever `finalized_header` changes.
|
||||
|
||||
### `create_light_client_optimistic_update`
|
||||
|
||||
|
@ -169,4 +171,4 @@ def create_light_client_optimistic_update(update: LightClientUpdate) -> LightCli
|
|||
)
|
||||
```
|
||||
|
||||
Full nodes SHOULD provide the `LightClientOptimisticUpdate` with the highest `attested_header.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientOptimisticUpdate` whenever `attested_header` changes.
|
||||
Full nodes SHOULD provide the `LightClientOptimisticUpdate` with the highest `attested_header.beacon.slot` (if multiple, highest `signature_slot`) as selected by fork choice, and SHOULD support a push mechanism to deliver new `LightClientOptimisticUpdate` whenever `attested_header` changes.
|
||||
|
|
|
@ -23,7 +23,7 @@ This document explains how light clients MAY obtain light client data to sync wi
|
|||
1. The light client MUST be configured out-of-band with a spec/preset (including fork schedule), with `genesis_state` (including `genesis_time` and `genesis_validators_root`), and with a trusted block root. The trusted block SHOULD be within the weak subjectivity period, and its root SHOULD be from a finalized `Checkpoint`.
|
||||
2. The local clock is initialized based on the configured `genesis_time`, and the current fork digest is determined to browse for and connect to relevant light client data providers.
|
||||
3. The light client fetches a [`LightClientBootstrap`](./sync-protocol.md#lightclientbootstrap) object for the configured trusted block root. The `bootstrap` object is passed to [`initialize_light_client_store`](./sync-protocol.md#initialize_light_client_store) to obtain a local [`LightClientStore`](./sync-protocol.md#lightclientstore).
|
||||
4. The light client tracks the sync committee periods `finalized_period` from `store.finalized_header.slot`, `optimistic_period` from `store.optimistic_header.slot`, and `current_period` from `current_slot` based on the local clock.
|
||||
4. The light client tracks the sync committee periods `finalized_period` from `store.finalized_header.beacon.slot`, `optimistic_period` from `store.optimistic_header.beacon.slot`, and `current_period` from `current_slot` based on the local clock.
|
||||
1. When `finalized_period == optimistic_period` and [`is_next_sync_committee_known`](./sync-protocol.md#is_next_sync_committee_known) indicates `False`, the light client fetches a [`LightClientUpdate`](./sync-protocol.md#lightclientupdate) for `finalized_period`. If `finalized_period == current_period`, this fetch SHOULD be scheduled at a random time before `current_period` advances.
|
||||
2. When `finalized_period + 1 < current_period`, the light client fetches a `LightClientUpdate` for each sync committee period in range `[finalized_period + 1, current_period)` (current period excluded)
|
||||
3. When `finalized_period + 1 >= current_period`, the light client keeps observing [`LightClientFinalityUpdate`](./sync-protocol.md#lightclientfinalityupdate) and [`LightClientOptimisticUpdate`](./sync-protocol.md#lightclientoptimisticupdate). Received objects are passed to [`process_light_client_finality_update`](./sync-protocol.md#process_light_client_finality_update) and [`process_light_client_optimistic_update`](./sync-protocol.md#process_light_client_optimistic_update). This ensures that `finalized_header` and `optimistic_header` reflect the latest blocks.
|
||||
|
|
|
@ -59,7 +59,7 @@ New global topics are added to provide light clients with the latest updates.
|
|||
This topic is used to propagate the latest `LightClientFinalityUpdate` to light clients, allowing them to keep track of the latest `finalized_header`.
|
||||
|
||||
The following validations MUST pass before forwarding the `finality_update` on the network.
|
||||
- _[IGNORE]_ The `finalized_header.slot` is greater than that of all previously forwarded `finality_update`s
|
||||
- _[IGNORE]_ The `finalized_header.beacon.slot` is greater than that of all previously forwarded `finality_update`s
|
||||
- _[IGNORE]_ The `finality_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `finality_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
||||
|
||||
For full nodes, the following validations MUST additionally pass before forwarding the `finality_update` on the network.
|
||||
|
@ -67,11 +67,11 @@ For full nodes, the following validations MUST additionally pass before forwardi
|
|||
|
||||
For light clients, the following validations MUST additionally pass before forwarding the `finality_update` on the network.
|
||||
- _[REJECT]_ The `finality_update` is valid -- i.e. validate that `process_light_client_finality_update` does not indicate errors
|
||||
- _[IGNORE]_ The `finality_update` advances the `finalized_header` of the local `LightClientStore` -- i.e. validate that processing `finality_update` increases `store.finalized_header.slot`
|
||||
- _[IGNORE]_ The `finality_update` advances the `finalized_header` of the local `LightClientStore` -- i.e. validate that processing `finality_update` increases `store.finalized_header.beacon.slot`
|
||||
|
||||
Light clients SHOULD call `process_light_client_finality_update` even if the message is ignored.
|
||||
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.slot))`.
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.beacon.slot))`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
|
@ -87,7 +87,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
|||
This topic is used to propagate the latest `LightClientOptimisticUpdate` to light clients, allowing them to keep track of the latest `optimistic_header`.
|
||||
|
||||
The following validations MUST pass before forwarding the `optimistic_update` on the network.
|
||||
- _[IGNORE]_ The `attested_header.slot` is greater than that of all previously forwarded `optimistic_update`s
|
||||
- _[IGNORE]_ The `attested_header.beacon.slot` is greater than that of all previously forwarded `optimistic_update`s
|
||||
- _[IGNORE]_ The `optimistic_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `optimistic_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
||||
|
||||
For full nodes, the following validations MUST additionally pass before forwarding the `optimistic_update` on the network.
|
||||
|
@ -95,11 +95,11 @@ For full nodes, the following validations MUST additionally pass before forwardi
|
|||
|
||||
For light clients, the following validations MUST additionally pass before forwarding the `optimistic_update` on the network.
|
||||
- _[REJECT]_ The `optimistic_update` is valid -- i.e. validate that `process_light_client_optimistic_update` does not indicate errors
|
||||
- _[IGNORE]_ The `optimistic_update` either matches corresponding fields of the most recently forwarded `LightClientFinalityUpdate` (if any), or it advances the `optimistic_header` of the local `LightClientStore` -- i.e. validate that processing `optimistic_update` increases `store.optimistic_header.slot`
|
||||
- _[IGNORE]_ The `optimistic_update` either matches corresponding fields of the most recently forwarded `LightClientFinalityUpdate` (if any), or it advances the `optimistic_header` of the local `LightClientStore` -- i.e. validate that processing `optimistic_update` increases `store.optimistic_header.beacon.slot`
|
||||
|
||||
Light clients SHOULD call `process_light_client_optimistic_update` even if the message is ignored.
|
||||
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.slot))`.
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.beacon.slot))`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
|
@ -142,7 +142,7 @@ Peers SHOULD provide results as defined in [`create_light_client_bootstrap`](./f
|
|||
|
||||
When a `LightClientBootstrap` instance cannot be produced for a given block root, peers SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(bootstrap.header.slot))` is used to select the fork namespace of the Response type.
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(bootstrap.header.beacon.slot))` is used to select the fork namespace of the Response type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
|
@ -180,7 +180,7 @@ The response MUST consist of zero or more `response_chunk`. Each _successful_ `r
|
|||
|
||||
Peers SHOULD provide results as defined in [`create_light_client_update`](./full-node.md#create_light_client_update). They MUST respond with at least the earliest known result within the requested range, and MUST send results in consecutive order (by period). The response MUST NOT contain more than `min(MAX_REQUEST_LIGHT_CLIENT_UPDATES, count)` results.
|
||||
|
||||
For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(update.attested_header.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `update.sync_aggregate`, which is based on `update.signature_slot`.
|
||||
For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(update.attested_header.beacon.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `update.sync_aggregate`, which is based on `update.signature_slot`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
|
@ -211,7 +211,7 @@ Peers SHOULD provide results as defined in [`create_light_client_finality_update
|
|||
|
||||
When no `LightClientFinalityUpdate` is available, peers SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `finality_update.sync_aggregate`, which is based on `finality_update.signature_slot`.
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.beacon.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `finality_update.sync_aggregate`, which is based on `finality_update.signature_slot`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
|
@ -242,7 +242,7 @@ Peers SHOULD provide results as defined in [`create_light_client_optimistic_upda
|
|||
|
||||
When no `LightClientOptimisticUpdate` is available, peers SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `optimistic_update.sync_aggregate`, which is based on `optimistic_update.signature_slot`.
|
||||
A `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.beacon.slot))` is used to select the fork namespace of the Response type. Note that this `fork_version` may be different from the one used to verify the `optimistic_update.sync_aggregate`, which is based on `optimistic_update.signature_slot`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
|
@ -255,7 +255,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
|||
|
||||
## Light clients
|
||||
|
||||
Light clients using libp2p to stay in sync with the network SHOULD subscribe to the [`light_client_finality_update`](#light_client_finality_update) and [`light_client_optimistic_update`](#light_client_optimistic_update) pubsub topics and validate all received messages while the [light client sync process](./light-client.md#light-client-sync-process) supports processing `LightClientFinalityUpdate` and `LightClientOptimistic` structures.
|
||||
Light clients using libp2p to stay in sync with the network SHOULD subscribe to the [`light_client_finality_update`](#light_client_finality_update) and [`light_client_optimistic_update`](#light_client_optimistic_update) pubsub topics and validate all received messages while the [light client sync process](./light-client.md#light-client-sync-process) supports processing `LightClientFinalityUpdate` and `LightClientOptimisticUpdate` structures.
|
||||
|
||||
Light clients MAY also collect historic light client data and make it available to other peers. If they do, they SHOULD advertise supported message endpoints in [the Req/Resp domain](#the-reqresp-domain), and MAY also update the contents of their [`Status`](../../phase0/p2p-interface.md#status) message to reflect the locally available light client data.
|
||||
|
||||
|
@ -273,7 +273,7 @@ All full nodes SHOULD subscribe to and provide stability on the [`light_client_f
|
|||
|
||||
Whenever fork choice selects a new head block with a sync aggregate participation `>= MIN_SYNC_COMMITTEE_PARTICIPANTS` and a post-Altair parent block, full nodes with at least one validator assigned to the current sync committee at the block's `slot` SHOULD broadcast derived light client data as follows:
|
||||
|
||||
- If `finalized_header.slot` increased, a `LightClientFinalityUpdate` SHOULD be broadcasted to the pubsub topic `light_client_finality_update` if no matching message has not yet been forwarded as part of gossip validation.
|
||||
- If `attested_header.slot` increased, a `LightClientOptimisticUpdate` SHOULD be broadcasted to the pubsub topic `light_client_optimistic_update` if no matching message has not yet been forwarded as part of gossip validation.
|
||||
- If `finalized_header.beacon.slot` increased, a `LightClientFinalityUpdate` SHOULD be broadcasted to the pubsub topic `light_client_finality_update` if no matching message has not yet been forwarded as part of gossip validation.
|
||||
- If `attested_header.beacon.slot` increased, a `LightClientOptimisticUpdate` SHOULD be broadcasted to the pubsub topic `light_client_optimistic_update` if no matching message has not yet been forwarded as part of gossip validation.
|
||||
|
||||
These messages SHOULD be broadcasted after one-third of `slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot). To ensure that the corresponding block was given enough time to propagate through the network, they SHOULD NOT be sent earlier. Note that this is different from how other messages are handled, e.g., attestations, which may be sent early.
|
||||
|
|
|
@ -13,12 +13,14 @@
|
|||
- [Preset](#preset)
|
||||
- [Misc](#misc)
|
||||
- [Containers](#containers)
|
||||
- [`LightClientHeader`](#lightclientheader)
|
||||
- [`LightClientBootstrap`](#lightclientbootstrap)
|
||||
- [`LightClientUpdate`](#lightclientupdate)
|
||||
- [`LightClientFinalityUpdate`](#lightclientfinalityupdate)
|
||||
- [`LightClientOptimisticUpdate`](#lightclientoptimisticupdate)
|
||||
- [`LightClientStore`](#lightclientstore)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`is_valid_light_client_header`](#is_valid_light_client_header)
|
||||
- [`is_sync_committee_update`](#is_sync_committee_update)
|
||||
- [`is_finality_update`](#is_finality_update)
|
||||
- [`is_better_update`](#is_better_update)
|
||||
|
@ -73,13 +75,23 @@ Additional documents describe how the light client sync protocol can be used:
|
|||
|
||||
## Containers
|
||||
|
||||
### `LightClientHeader`
|
||||
|
||||
```python
|
||||
class LightClientHeader(Container):
|
||||
# Beacon block header
|
||||
beacon: BeaconBlockHeader
|
||||
```
|
||||
|
||||
Future upgrades may introduce additional fields to this structure, and validate them by extending [`is_valid_light_client_header`](#is_valid_light_client_header).
|
||||
|
||||
### `LightClientBootstrap`
|
||||
|
||||
```python
|
||||
class LightClientBootstrap(Container):
|
||||
# Header matching the requested beacon block root
|
||||
header: BeaconBlockHeader
|
||||
# Current sync committee corresponding to `header.state_root`
|
||||
header: LightClientHeader
|
||||
# Current sync committee corresponding to `header.beacon.state_root`
|
||||
current_sync_committee: SyncCommittee
|
||||
current_sync_committee_branch: Vector[Bytes32, floorlog2(CURRENT_SYNC_COMMITTEE_INDEX)]
|
||||
```
|
||||
|
@ -89,12 +101,12 @@ class LightClientBootstrap(Container):
|
|||
```python
|
||||
class LightClientUpdate(Container):
|
||||
# Header attested to by the sync committee
|
||||
attested_header: BeaconBlockHeader
|
||||
# Next sync committee corresponding to `attested_header.state_root`
|
||||
attested_header: LightClientHeader
|
||||
# Next sync committee corresponding to `attested_header.beacon.state_root`
|
||||
next_sync_committee: SyncCommittee
|
||||
next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)]
|
||||
# Finalized header corresponding to `attested_header.state_root`
|
||||
finalized_header: BeaconBlockHeader
|
||||
# Finalized header corresponding to `attested_header.beacon.state_root`
|
||||
finalized_header: LightClientHeader
|
||||
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
|
||||
# Sync committee aggregate signature
|
||||
sync_aggregate: SyncAggregate
|
||||
|
@ -107,9 +119,9 @@ class LightClientUpdate(Container):
|
|||
```python
|
||||
class LightClientFinalityUpdate(Container):
|
||||
# Header attested to by the sync committee
|
||||
attested_header: BeaconBlockHeader
|
||||
# Finalized header corresponding to `attested_header.state_root`
|
||||
finalized_header: BeaconBlockHeader
|
||||
attested_header: LightClientHeader
|
||||
# Finalized header corresponding to `attested_header.beacon.state_root`
|
||||
finalized_header: LightClientHeader
|
||||
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
|
||||
# Sync committee aggregate signature
|
||||
sync_aggregate: SyncAggregate
|
||||
|
@ -122,7 +134,7 @@ class LightClientFinalityUpdate(Container):
|
|||
```python
|
||||
class LightClientOptimisticUpdate(Container):
|
||||
# Header attested to by the sync committee
|
||||
attested_header: BeaconBlockHeader
|
||||
attested_header: LightClientHeader
|
||||
# Sync committee aggregate signature
|
||||
sync_aggregate: SyncAggregate
|
||||
# Slot at which the aggregate signature was created (untrusted)
|
||||
|
@ -135,14 +147,14 @@ class LightClientOptimisticUpdate(Container):
|
|||
@dataclass
|
||||
class LightClientStore(object):
|
||||
# Header that is finalized
|
||||
finalized_header: BeaconBlockHeader
|
||||
finalized_header: LightClientHeader
|
||||
# Sync committees corresponding to the finalized header
|
||||
current_sync_committee: SyncCommittee
|
||||
next_sync_committee: SyncCommittee
|
||||
# Best available header to switch finalized head to if we see nothing else
|
||||
best_valid_update: Optional[LightClientUpdate]
|
||||
# Most recent available reasonably-safe header
|
||||
optimistic_header: BeaconBlockHeader
|
||||
optimistic_header: LightClientHeader
|
||||
# Max number of active participants in a sync committee (used to calculate safety threshold)
|
||||
previous_max_active_participants: uint64
|
||||
current_max_active_participants: uint64
|
||||
|
@ -150,6 +162,14 @@ class LightClientStore(object):
|
|||
|
||||
## Helper functions
|
||||
|
||||
### `is_valid_light_client_header`
|
||||
|
||||
```python
|
||||
def is_valid_light_client_header(header: LightClientHeader) -> bool:
|
||||
# pylint: disable=unused-argument
|
||||
return True
|
||||
```
|
||||
|
||||
### `is_sync_committee_update`
|
||||
|
||||
```python
|
||||
|
@ -181,11 +201,11 @@ def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdat
|
|||
|
||||
# Compare presence of relevant sync committee
|
||||
new_has_relevant_sync_committee = is_sync_committee_update(new_update) and (
|
||||
compute_sync_committee_period_at_slot(new_update.attested_header.slot)
|
||||
compute_sync_committee_period_at_slot(new_update.attested_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(new_update.signature_slot)
|
||||
)
|
||||
old_has_relevant_sync_committee = is_sync_committee_update(old_update) and (
|
||||
compute_sync_committee_period_at_slot(old_update.attested_header.slot)
|
||||
compute_sync_committee_period_at_slot(old_update.attested_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(old_update.signature_slot)
|
||||
)
|
||||
if new_has_relevant_sync_committee != old_has_relevant_sync_committee:
|
||||
|
@ -200,12 +220,12 @@ def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdat
|
|||
# Compare sync committee finality
|
||||
if new_has_finality:
|
||||
new_has_sync_committee_finality = (
|
||||
compute_sync_committee_period_at_slot(new_update.finalized_header.slot)
|
||||
== compute_sync_committee_period_at_slot(new_update.attested_header.slot)
|
||||
compute_sync_committee_period_at_slot(new_update.finalized_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(new_update.attested_header.beacon.slot)
|
||||
)
|
||||
old_has_sync_committee_finality = (
|
||||
compute_sync_committee_period_at_slot(old_update.finalized_header.slot)
|
||||
== compute_sync_committee_period_at_slot(old_update.attested_header.slot)
|
||||
compute_sync_committee_period_at_slot(old_update.finalized_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(old_update.attested_header.beacon.slot)
|
||||
)
|
||||
if new_has_sync_committee_finality != old_has_sync_committee_finality:
|
||||
return new_has_sync_committee_finality
|
||||
|
@ -215,8 +235,8 @@ def is_better_update(new_update: LightClientUpdate, old_update: LightClientUpdat
|
|||
return new_num_active_participants > old_num_active_participants
|
||||
|
||||
# Tiebreaker 2: Prefer older data (fewer changes to best)
|
||||
if new_update.attested_header.slot != old_update.attested_header.slot:
|
||||
return new_update.attested_header.slot < old_update.attested_header.slot
|
||||
if new_update.attested_header.beacon.slot != old_update.attested_header.beacon.slot:
|
||||
return new_update.attested_header.beacon.slot < old_update.attested_header.beacon.slot
|
||||
return new_update.signature_slot < old_update.signature_slot
|
||||
```
|
||||
|
||||
|
@ -260,14 +280,15 @@ A light client maintains its state in a `store` object of type `LightClientStore
|
|||
```python
|
||||
def initialize_light_client_store(trusted_block_root: Root,
|
||||
bootstrap: LightClientBootstrap) -> LightClientStore:
|
||||
assert hash_tree_root(bootstrap.header) == trusted_block_root
|
||||
assert is_valid_light_client_header(bootstrap.header)
|
||||
assert hash_tree_root(bootstrap.header.beacon) == trusted_block_root
|
||||
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(bootstrap.current_sync_committee),
|
||||
branch=bootstrap.current_sync_committee_branch,
|
||||
depth=floorlog2(CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
index=get_subtree_index(CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
root=bootstrap.header.state_root,
|
||||
root=bootstrap.header.beacon.state_root,
|
||||
)
|
||||
|
||||
return LightClientStore(
|
||||
|
@ -301,8 +322,11 @@ def validate_light_client_update(store: LightClientStore,
|
|||
assert sum(sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
|
||||
# Verify update does not skip a sync committee period
|
||||
assert current_slot >= update.signature_slot > update.attested_header.slot >= update.finalized_header.slot
|
||||
store_period = compute_sync_committee_period_at_slot(store.finalized_header.slot)
|
||||
assert is_valid_light_client_header(update.attested_header)
|
||||
update_attested_slot = update.attested_header.beacon.slot
|
||||
update_finalized_slot = update.finalized_header.beacon.slot
|
||||
assert current_slot >= update.signature_slot > update_attested_slot >= update_finalized_slot
|
||||
store_period = compute_sync_committee_period_at_slot(store.finalized_header.beacon.slot)
|
||||
update_signature_period = compute_sync_committee_period_at_slot(update.signature_slot)
|
||||
if is_next_sync_committee_known(store):
|
||||
assert update_signature_period in (store_period, store_period + 1)
|
||||
|
@ -310,12 +334,12 @@ def validate_light_client_update(store: LightClientStore,
|
|||
assert update_signature_period == store_period
|
||||
|
||||
# Verify update is relevant
|
||||
update_attested_period = compute_sync_committee_period_at_slot(update.attested_header.slot)
|
||||
update_attested_period = compute_sync_committee_period_at_slot(update_attested_slot)
|
||||
update_has_next_sync_committee = not is_next_sync_committee_known(store) and (
|
||||
is_sync_committee_update(update) and update_attested_period == store_period
|
||||
)
|
||||
assert (
|
||||
update.attested_header.slot > store.finalized_header.slot
|
||||
update_attested_slot > store.finalized_header.beacon.slot
|
||||
or update_has_next_sync_committee
|
||||
)
|
||||
|
||||
|
@ -323,19 +347,20 @@ def validate_light_client_update(store: LightClientStore,
|
|||
# to match the finalized checkpoint root saved in the state of `attested_header`.
|
||||
# Note that the genesis finalized checkpoint root is represented as a zero hash.
|
||||
if not is_finality_update(update):
|
||||
assert update.finalized_header == BeaconBlockHeader()
|
||||
assert update.finalized_header == LightClientHeader()
|
||||
else:
|
||||
if update.finalized_header.slot == GENESIS_SLOT:
|
||||
assert update.finalized_header == BeaconBlockHeader()
|
||||
if update_finalized_slot == GENESIS_SLOT:
|
||||
assert update.finalized_header == LightClientHeader()
|
||||
finalized_root = Bytes32()
|
||||
else:
|
||||
finalized_root = hash_tree_root(update.finalized_header)
|
||||
assert is_valid_light_client_header(update.finalized_header)
|
||||
finalized_root = hash_tree_root(update.finalized_header.beacon)
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=finalized_root,
|
||||
branch=update.finality_branch,
|
||||
depth=floorlog2(FINALIZED_ROOT_INDEX),
|
||||
index=get_subtree_index(FINALIZED_ROOT_INDEX),
|
||||
root=update.attested_header.state_root,
|
||||
root=update.attested_header.beacon.state_root,
|
||||
)
|
||||
|
||||
# Verify that the `next_sync_committee`, if present, actually is the next sync committee saved in the
|
||||
|
@ -350,7 +375,7 @@ def validate_light_client_update(store: LightClientStore,
|
|||
branch=update.next_sync_committee_branch,
|
||||
depth=floorlog2(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
index=get_subtree_index(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
root=update.attested_header.state_root,
|
||||
root=update.attested_header.beacon.state_root,
|
||||
)
|
||||
|
||||
# Verify sync committee aggregate signature
|
||||
|
@ -364,7 +389,7 @@ def validate_light_client_update(store: LightClientStore,
|
|||
]
|
||||
fork_version = compute_fork_version(compute_epoch_at_slot(update.signature_slot))
|
||||
domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root)
|
||||
signing_root = compute_signing_root(update.attested_header, domain)
|
||||
signing_root = compute_signing_root(update.attested_header.beacon, domain)
|
||||
assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature)
|
||||
```
|
||||
|
||||
|
@ -372,8 +397,8 @@ def validate_light_client_update(store: LightClientStore,
|
|||
|
||||
```python
|
||||
def apply_light_client_update(store: LightClientStore, update: LightClientUpdate) -> None:
|
||||
store_period = compute_sync_committee_period_at_slot(store.finalized_header.slot)
|
||||
update_finalized_period = compute_sync_committee_period_at_slot(update.finalized_header.slot)
|
||||
store_period = compute_sync_committee_period_at_slot(store.finalized_header.beacon.slot)
|
||||
update_finalized_period = compute_sync_committee_period_at_slot(update.finalized_header.beacon.slot)
|
||||
if not is_next_sync_committee_known(store):
|
||||
assert update_finalized_period == store_period
|
||||
store.next_sync_committee = update.next_sync_committee
|
||||
|
@ -382,9 +407,9 @@ def apply_light_client_update(store: LightClientStore, update: LightClientUpdate
|
|||
store.next_sync_committee = update.next_sync_committee
|
||||
store.previous_max_active_participants = store.current_max_active_participants
|
||||
store.current_max_active_participants = 0
|
||||
if update.finalized_header.slot > store.finalized_header.slot:
|
||||
if update.finalized_header.beacon.slot > store.finalized_header.beacon.slot:
|
||||
store.finalized_header = update.finalized_header
|
||||
if store.finalized_header.slot > store.optimistic_header.slot:
|
||||
if store.finalized_header.beacon.slot > store.optimistic_header.beacon.slot:
|
||||
store.optimistic_header = store.finalized_header
|
||||
```
|
||||
|
||||
|
@ -393,14 +418,14 @@ def apply_light_client_update(store: LightClientStore, update: LightClientUpdate
|
|||
```python
|
||||
def process_light_client_store_force_update(store: LightClientStore, current_slot: Slot) -> None:
|
||||
if (
|
||||
current_slot > store.finalized_header.slot + UPDATE_TIMEOUT
|
||||
current_slot > store.finalized_header.beacon.slot + UPDATE_TIMEOUT
|
||||
and store.best_valid_update is not None
|
||||
):
|
||||
# Forced best update when the update timeout has elapsed.
|
||||
# Because the apply logic waits for `finalized_header.slot` to indicate sync committee finality,
|
||||
# Because the apply logic waits for `finalized_header.beacon.slot` to indicate sync committee finality,
|
||||
# the `attested_header` may be treated as `finalized_header` in extended periods of non-finality
|
||||
# to guarantee progression into later sync committee periods according to `is_better_update`.
|
||||
if store.best_valid_update.finalized_header.slot <= store.finalized_header.slot:
|
||||
if store.best_valid_update.finalized_header.beacon.slot <= store.finalized_header.beacon.slot:
|
||||
store.best_valid_update.finalized_header = store.best_valid_update.attested_header
|
||||
apply_light_client_update(store, store.best_valid_update)
|
||||
store.best_valid_update = None
|
||||
|
@ -433,7 +458,7 @@ def process_light_client_update(store: LightClientStore,
|
|||
# Update the optimistic header
|
||||
if (
|
||||
sum(sync_committee_bits) > get_safety_threshold(store)
|
||||
and update.attested_header.slot > store.optimistic_header.slot
|
||||
and update.attested_header.beacon.slot > store.optimistic_header.beacon.slot
|
||||
):
|
||||
store.optimistic_header = update.attested_header
|
||||
|
||||
|
@ -441,14 +466,14 @@ def process_light_client_update(store: LightClientStore,
|
|||
update_has_finalized_next_sync_committee = (
|
||||
not is_next_sync_committee_known(store)
|
||||
and is_sync_committee_update(update) and is_finality_update(update) and (
|
||||
compute_sync_committee_period_at_slot(update.finalized_header.slot)
|
||||
== compute_sync_committee_period_at_slot(update.attested_header.slot)
|
||||
compute_sync_committee_period_at_slot(update.finalized_header.beacon.slot)
|
||||
== compute_sync_committee_period_at_slot(update.attested_header.beacon.slot)
|
||||
)
|
||||
)
|
||||
if (
|
||||
sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2
|
||||
and (
|
||||
update.finalized_header.slot > store.finalized_header.slot
|
||||
update.finalized_header.beacon.slot > store.finalized_header.beacon.slot
|
||||
or update_has_finalized_next_sync_committee
|
||||
)
|
||||
):
|
||||
|
@ -487,7 +512,7 @@ def process_light_client_optimistic_update(store: LightClientStore,
|
|||
attested_header=optimistic_update.attested_header,
|
||||
next_sync_committee=SyncCommittee(),
|
||||
next_sync_committee_branch=[Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))],
|
||||
finalized_header=BeaconBlockHeader(),
|
||||
finalized_header=LightClientHeader(),
|
||||
finality_branch=[Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))],
|
||||
sync_aggregate=optimistic_update.sync_aggregate,
|
||||
signature_slot=optimistic_update.signature_slot,
|
||||
|
|
|
@ -472,7 +472,8 @@ def process_bls_to_execution_change(state: BeaconState,
|
|||
assert validator.withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX
|
||||
assert validator.withdrawal_credentials[1:] == hash(address_change.from_bls_pubkey)[1:]
|
||||
|
||||
domain = get_domain(state, DOMAIN_BLS_TO_EXECUTION_CHANGE)
|
||||
# Fork-agnostic domain since address changes are valid across forks
|
||||
domain = compute_domain(DOMAIN_BLS_TO_EXECUTION_CHANGE, genesis_validators_root=state.genesis_validators_root)
|
||||
signing_root = compute_signing_root(address_change, domain)
|
||||
assert bls.Verify(address_change.from_bls_pubkey, signing_root, signed_address_change.signature)
|
||||
|
||||
|
|
|
@ -129,8 +129,10 @@ def upgrade_to_capella(pre: bellatrix.BeaconState) -> BeaconState:
|
|||
# Execution-layer
|
||||
latest_execution_payload_header=latest_execution_payload_header,
|
||||
# Withdrawals
|
||||
next_withdrawal_index=WithdrawalIndex(0),
|
||||
next_withdrawal_validator_index=ValidatorIndex(0),
|
||||
next_withdrawal_index=WithdrawalIndex(0), # [New in Capella]
|
||||
next_withdrawal_validator_index=ValidatorIndex(0), # [New in Capella]
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]([]), # [New in Capella]
|
||||
)
|
||||
|
||||
return post
|
||||
|
|
|
@ -61,6 +61,8 @@ This topic is used to propagate signed bls to execution change messages to be in
|
|||
|
||||
The following validations MUST pass before forwarding the `signed_bls_to_execution_change` on the network:
|
||||
|
||||
- _[IGNORE]_ `current_epoch >= CAPELLA_FORK_EPOCH`,
|
||||
where `current_epoch` is defined by the current wall-clock time.
|
||||
- _[IGNORE]_ The `signed_bls_to_execution_change` is the first valid signed bls to execution change received
|
||||
for the validator with index `signed_bls_to_execution_change.message.validator_index`.
|
||||
- _[REJECT]_ All of the conditions within `process_bls_to_execution_change` pass validation.
|
||||
|
|
|
@ -131,6 +131,8 @@ def upgrade_to_eip4844(pre: capella.BeaconState) -> BeaconState:
|
|||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
# Deep history valid from Capella onwards
|
||||
historical_summaries=pre.historical_summaries,
|
||||
)
|
||||
|
||||
return post
|
||||
|
|
|
@ -240,6 +240,9 @@ The following blobs sidecars, where they exist, MUST be sent in consecutive orde
|
|||
|
||||
Clients MAY limit the number of blobs sidecars in the response.
|
||||
|
||||
An empty `BlobSidecar` is one that does not contain any blobs, but contains non-zero `beacon_block_root`, `beacon_block_slot` and a valid `kzg_aggregated_proof`.
|
||||
Clients MAY NOT want to consider empty `BlobSidecar`s in rate limiting logic.
|
||||
|
||||
The response MUST contain no more than `count` blobs sidecars.
|
||||
|
||||
Clients MUST respond with blobs sidecars from their view of the current fork choice
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.3.0-rc.0
|
||||
1.3.0-rc.1
|
||||
|
|
|
@ -56,18 +56,18 @@ def get_update_file_name(spec, update):
|
|||
suffix2 = "f"
|
||||
else:
|
||||
suffix2 = "x"
|
||||
return f"update_{encode_hex(update.attested_header.hash_tree_root())}_{suffix1}{suffix2}"
|
||||
return f"update_{encode_hex(update.attested_header.beacon.hash_tree_root())}_{suffix1}{suffix2}"
|
||||
|
||||
|
||||
def get_checks(store):
|
||||
return {
|
||||
"finalized_header": {
|
||||
'slot': int(store.finalized_header.slot),
|
||||
'beacon_root': encode_hex(store.finalized_header.hash_tree_root()),
|
||||
'slot': int(store.finalized_header.beacon.slot),
|
||||
'beacon_root': encode_hex(store.finalized_header.beacon.hash_tree_root()),
|
||||
},
|
||||
"optimistic_header": {
|
||||
'slot': int(store.optimistic_header.slot),
|
||||
'beacon_root': encode_hex(store.optimistic_header.hash_tree_root()),
|
||||
'slot': int(store.optimistic_header.beacon.slot),
|
||||
'beacon_root': encode_hex(store.optimistic_header.beacon.hash_tree_root()),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -141,10 +141,10 @@ def test_light_client_sync(spec, state):
|
|||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Advance to next sync committee period
|
||||
# ```
|
||||
|
@ -167,10 +167,10 @@ def test_light_client_sync(spec, state):
|
|||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Edge case: Signature in next period
|
||||
# ```
|
||||
|
@ -193,10 +193,10 @@ def test_light_client_sync(spec, state):
|
|||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Edge case: Finalized header not included
|
||||
# ```
|
||||
|
@ -214,10 +214,10 @@ def test_light_client_sync(spec, state):
|
|||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block=None)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Non-finalized case: Attested `next_sync_committee` is not finalized
|
||||
# ```
|
||||
|
@ -236,10 +236,10 @@ def test_light_client_sync(spec, state):
|
|||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Force-update using timeout
|
||||
# ```
|
||||
|
@ -256,10 +256,10 @@ def test_light_client_sync(spec, state):
|
|||
attested_state = state.copy()
|
||||
next_slots(spec, state, spec.UPDATE_TIMEOUT - 1)
|
||||
yield from emit_force_update(test, spec, state)
|
||||
assert test.store.finalized_header.slot == store_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == store_state.slot
|
||||
assert test.store.next_sync_committee == store_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == store_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == store_state.slot
|
||||
|
||||
# Edge case: Finalized header not included, after force-update
|
||||
# ```
|
||||
|
@ -275,10 +275,10 @@ def test_light_client_sync(spec, state):
|
|||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block=None)
|
||||
assert test.store.finalized_header.slot == store_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == store_state.slot
|
||||
assert test.store.next_sync_committee == store_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Edge case: Finalized header older than store
|
||||
# ```
|
||||
|
@ -296,15 +296,15 @@ def test_light_client_sync(spec, state):
|
|||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == store_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == store_state.slot
|
||||
assert test.store.next_sync_committee == store_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
yield from emit_force_update(test, spec, state)
|
||||
assert test.store.finalized_header.slot == attested_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == attested_state.slot
|
||||
assert test.store.next_sync_committee == attested_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Advance to next sync committee period
|
||||
# ```
|
||||
|
@ -327,10 +327,10 @@ def test_light_client_sync(spec, state):
|
|||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Finish test
|
||||
yield from finish_test(test)
|
||||
|
@ -357,10 +357,10 @@ def test_supply_sync_committee_from_past_update(spec, state):
|
|||
|
||||
# Apply `LightClientUpdate` from the past, populating `store.next_sync_committee`
|
||||
yield from emit_update(test, spec, past_state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == state.slot
|
||||
assert test.store.finalized_header.beacon.slot == state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == state.slot
|
||||
|
||||
# Finish test
|
||||
yield from finish_test(test)
|
||||
|
@ -383,10 +383,10 @@ def test_advance_finality_without_sync_committee(spec, state):
|
|||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Advance finality into next sync committee period, but omit `next_sync_committee`
|
||||
transition_to(spec, state, compute_start_slot_at_next_sync_committee_period(spec, state))
|
||||
|
@ -402,10 +402,10 @@ def test_advance_finality_without_sync_committee(spec, state):
|
|||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=False)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert not spec.is_next_sync_committee_known(test.store)
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Advance finality once more, with `next_sync_committee` still unknown
|
||||
past_state = finalized_state
|
||||
|
@ -419,24 +419,24 @@ def test_advance_finality_without_sync_committee(spec, state):
|
|||
|
||||
# Apply `LightClientUpdate` without `finalized_header` nor `next_sync_committee`
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, None, with_next=False)
|
||||
assert test.store.finalized_header.slot == past_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == past_state.slot
|
||||
assert not spec.is_next_sync_committee_known(test.store)
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Apply `LightClientUpdate` with `finalized_header` but no `next_sync_committee`
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=False)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert not spec.is_next_sync_committee_known(test.store)
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Apply full `LightClientUpdate`, supplying `next_sync_committee`
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.finalized_header.beacon.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
assert test.store.optimistic_header.beacon.slot == attested_state.slot
|
||||
|
||||
# Finish test
|
||||
yield from finish_test(test)
|
||||
|
|
|
@ -37,7 +37,7 @@ def test_update_ranking(spec, state):
|
|||
# - `sig_finalized` / `sig_attested` --> Only signature in next sync committee period
|
||||
# - `att_finalized` / `att_attested` --> Attested header also in next sync committee period
|
||||
# - `fin_finalized` / `fin_attested` --> Finalized header also in next sync committee period
|
||||
# - `lat_finalized` / `lat_attested` --> Like `fin`, but at a later `attested_header.slot`
|
||||
# - `lat_finalized` / `lat_attested` --> Like `fin`, but at a later `attested_header.beacon.slot`
|
||||
next_slots(spec, state, spec.compute_start_slot_at_epoch(spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 3) - 1)
|
||||
sig_finalized_block = state_transition_with_full_block(spec, state, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
|
|
|
@ -69,7 +69,7 @@ def test_process_light_client_update_at_period_boundary(spec, state):
|
|||
|
||||
# Forward to slot before next sync committee period so that next block is final one in period
|
||||
next_slots(spec, state, spec.UPDATE_TIMEOUT - 2)
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.slot)
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.beacon.slot)
|
||||
update_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
assert store_period == update_period
|
||||
|
||||
|
@ -104,7 +104,7 @@ def test_process_light_client_update_timeout(spec, state):
|
|||
|
||||
# Forward to next sync committee period
|
||||
next_slots(spec, state, spec.UPDATE_TIMEOUT)
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.slot)
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.beacon.slot)
|
||||
update_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
assert store_period + 1 == update_period
|
||||
|
||||
|
@ -146,7 +146,7 @@ def test_process_light_client_update_finality_updated(spec, state):
|
|||
# Ensure that finality checkpoint has changed
|
||||
assert state.finalized_checkpoint.epoch == 3
|
||||
# Ensure that it's same period
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.slot)
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.beacon.slot)
|
||||
update_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
assert store_period == update_period
|
||||
|
||||
|
|
|
@ -1,11 +1,15 @@
|
|||
from eth2spec.test.context import (
|
||||
spec_configured_state_test,
|
||||
spec_state_test_with_matching_config,
|
||||
spec_test,
|
||||
with_all_phases,
|
||||
with_config_overrides,
|
||||
with_matching_spec_config,
|
||||
with_phases,
|
||||
with_state,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
PHASE0, ALTAIR,
|
||||
PHASE0, ALTAIR, BELLATRIX,
|
||||
ALL_PHASES,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import is_post_fork
|
||||
|
@ -30,7 +34,7 @@ def test_config_override(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test_with_matching_config
|
||||
def test_override_config_fork_epoch(spec, state):
|
||||
def test_config_override_matching_fork_epochs(spec, state):
|
||||
# Fork schedule must be consistent with state fork
|
||||
epoch = spec.get_current_epoch(state)
|
||||
if is_post_fork(spec.fork, ALTAIR):
|
||||
|
@ -56,3 +60,27 @@ def test_override_config_fork_epoch(spec, state):
|
|||
continue
|
||||
fork_epoch_field = fork.upper() + '_FORK_EPOCH'
|
||||
assert getattr(spec.config, fork_epoch_field) <= epoch
|
||||
|
||||
|
||||
@with_phases(phases=[ALTAIR], other_phases=[BELLATRIX])
|
||||
@spec_test
|
||||
@with_config_overrides({
|
||||
'ALTAIR_FORK_VERSION': '0x11111111',
|
||||
'BELLATRIX_FORK_EPOCH': 4,
|
||||
}, emit=False)
|
||||
@with_state
|
||||
@with_matching_spec_config(emitted_fork=BELLATRIX)
|
||||
def test_config_override_across_phases(spec, phases, state):
|
||||
assert state.fork.current_version == spec.config.ALTAIR_FORK_VERSION
|
||||
|
||||
assert spec.config.ALTAIR_FORK_VERSION == spec.Version('0x11111111')
|
||||
assert spec.config.ALTAIR_FORK_EPOCH == 0
|
||||
assert not hasattr(spec.config, 'BELLATRIX_FORK_EPOCH')
|
||||
|
||||
assert phases[ALTAIR].config.ALTAIR_FORK_VERSION == spec.Version('0x11111111')
|
||||
assert phases[ALTAIR].config.ALTAIR_FORK_EPOCH == 0
|
||||
assert not hasattr(phases[ALTAIR].config, 'BELLATRIX_FORK_EPOCH')
|
||||
|
||||
assert phases[ALTAIR].config.ALTAIR_FORK_VERSION == spec.Version('0x11111111')
|
||||
assert phases[BELLATRIX].config.ALTAIR_FORK_EPOCH == 0
|
||||
assert phases[BELLATRIX].config.BELLATRIX_FORK_EPOCH == 4
|
||||
|
|
|
@ -1,7 +1,14 @@
|
|||
from eth2spec.test.helpers.keys import pubkeys
|
||||
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
|
||||
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_capella_and_later, always_bls
|
||||
from eth2spec.test.helpers.constants import CAPELLA, MAINNET
|
||||
from eth2spec.test.context import (
|
||||
always_bls,
|
||||
expect_assertion_error,
|
||||
spec_state_test,
|
||||
with_capella_and_later,
|
||||
with_presets,
|
||||
with_phases,
|
||||
)
|
||||
|
||||
|
||||
def run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=True):
|
||||
|
@ -172,3 +179,67 @@ def test_invalid_bad_signature(spec, state):
|
|||
signed_address_change.signature = spec.BLSSignature(b'\x42' * 96)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_genesis_fork_version(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state, fork_version=spec.config.GENESIS_FORK_VERSION)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_current_fork_version(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state, fork_version=state.fork.current_version)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_previous_fork_version(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state, fork_version=state.fork.previous_version)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_genesis_validators_root(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state, genesis_validators_root=b'\x99' * 32)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_presets([MAINNET], reason="use mainnet fork version")
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_valid_signature_from_staking_deposit_cli(spec, state):
|
||||
validator_index = 1
|
||||
from_bls_pubkey = bytes.fromhex('86248e64705987236ec3c41f6a81d96f98e7b85e842a1d71405b216fa75a9917512f3c94c85779a9729c927ea2aa9ed1') # noqa: E501
|
||||
to_execution_address = bytes.fromhex('3434343434343434343434343434343434343434')
|
||||
signature = bytes.fromhex('8cf4219884b326a04f6664b680cd9a99ad70b5280745af1147477aa9f8b4a2b2b38b8688c6a74a06f275ad4e14c5c0c70e2ed37a15ece5bf7c0724a376ad4c03c79e14dd9f633a3d54abc1ce4e73bec3524a789ab9a69d4d06686a8a67c9e4dc') # noqa: E501
|
||||
|
||||
# Use mainnet `genesis_validators_root`
|
||||
state.genesis_validators_root = bytes.fromhex('4b363db94e286120d76eb905340fdd4e54bfe9f06bf33ff6cf5ad27f511bfe95')
|
||||
validator = state.validators[validator_index]
|
||||
validator.withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(from_bls_pubkey)[1:]
|
||||
|
||||
address_change = spec.BLSToExecutionChange(
|
||||
validator_index=validator_index,
|
||||
from_bls_pubkey=from_bls_pubkey,
|
||||
to_execution_address=to_execution_address,
|
||||
)
|
||||
signed_address_change = spec.SignedBLSToExecutionChange(
|
||||
message=address_change,
|
||||
signature=signature,
|
||||
)
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import pytest
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass
|
||||
import importlib
|
||||
|
||||
|
@ -309,14 +310,18 @@ def config_fork_epoch_overrides(spec, state):
|
|||
return overrides
|
||||
|
||||
|
||||
def spec_state_test_with_matching_config(fn):
|
||||
def with_matching_spec_config(emitted_fork=None):
|
||||
def decorator(fn):
|
||||
def wrapper(*args, spec: Spec, **kw):
|
||||
conf = config_fork_epoch_overrides(spec, kw['state'])
|
||||
overrides = with_config_overrides(conf)
|
||||
return overrides(fn)(*args, spec=spec, **kw)
|
||||
overrides = config_fork_epoch_overrides(spec, kw['state'])
|
||||
deco = with_config_overrides(overrides, emitted_fork)
|
||||
return deco(fn)(*args, spec=spec, **kw)
|
||||
return wrapper
|
||||
return spec_test(with_state(decorator(single_phase(fn))))
|
||||
return decorator
|
||||
|
||||
|
||||
def spec_state_test_with_matching_config(fn):
|
||||
return spec_test(with_state(with_matching_spec_config()(single_phase(fn))))
|
||||
|
||||
|
||||
def expect_assertion_error(fn):
|
||||
|
@ -557,10 +562,30 @@ def _get_copy_of_spec(spec):
|
|||
module_spec = importlib.util.find_spec(module_path)
|
||||
module = importlib.util.module_from_spec(module_spec)
|
||||
module_spec.loader.exec_module(module)
|
||||
|
||||
# Preserve existing config overrides
|
||||
module.config = deepcopy(spec.config)
|
||||
|
||||
return module
|
||||
|
||||
|
||||
def with_config_overrides(config_overrides):
|
||||
def spec_with_config_overrides(spec, config_overrides):
|
||||
# apply our overrides to a copy of it, and apply it to the spec
|
||||
config = spec.config._asdict()
|
||||
config.update((k, config_overrides[k]) for k in config.keys() & config_overrides.keys())
|
||||
config_types = spec.Configuration.__annotations__
|
||||
modified_config = {k: config_types[k](v) for k, v in config.items()}
|
||||
|
||||
spec.config = spec.Configuration(**modified_config)
|
||||
|
||||
# To output the changed config in a format compatible with yaml test vectors,
|
||||
# the dict SSZ objects have to be converted into Python built-in types.
|
||||
output_config = _get_basic_dict(modified_config)
|
||||
|
||||
return spec, output_config
|
||||
|
||||
|
||||
def with_config_overrides(config_overrides, emitted_fork=None, emit=True):
|
||||
"""
|
||||
WARNING: the spec_test decorator must wrap this, to ensure the decorated test actually runs.
|
||||
This decorator forces the test to yield, and pytest doesn't run generator tests, and instead silently passes it.
|
||||
|
@ -570,23 +595,26 @@ def with_config_overrides(config_overrides):
|
|||
"""
|
||||
def decorator(fn):
|
||||
def wrapper(*args, spec: Spec, **kw):
|
||||
spec = _get_copy_of_spec(spec)
|
||||
# Apply config overrides to spec
|
||||
spec, output_config = spec_with_config_overrides(_get_copy_of_spec(spec), config_overrides)
|
||||
|
||||
# apply our overrides to a copy of it, and apply it to the spec
|
||||
config = spec.config._asdict()
|
||||
config.update(config_overrides)
|
||||
config_types = spec.Configuration.__annotations__
|
||||
modified_config = {k: config_types[k](v) for k, v in config.items()}
|
||||
# Apply config overrides to additional phases, if present
|
||||
if 'phases' in kw:
|
||||
phases = {}
|
||||
for fork in kw['phases']:
|
||||
phases[fork], output = spec_with_config_overrides(
|
||||
_get_copy_of_spec(kw['phases'][fork]), config_overrides)
|
||||
if emitted_fork == fork:
|
||||
output_config = output
|
||||
kw['phases'] = phases
|
||||
|
||||
# To output the changed config to could be serialized with yaml test vectors,
|
||||
# the dict SSZ objects have to be converted into Python built-in types.
|
||||
output_config = _get_basic_dict(modified_config)
|
||||
yield 'config', 'cfg', output_config
|
||||
|
||||
spec.config = spec.Configuration(**modified_config)
|
||||
# Emit requested spec (with overrides)
|
||||
if emit:
|
||||
yield 'config', 'cfg', output_config
|
||||
|
||||
# Run the function
|
||||
out = fn(*args, spec=spec, **kw)
|
||||
|
||||
# If it's not returning None like a normal test function,
|
||||
# it's generating things, and we need to complete it before setting back the config.
|
||||
if out is not None:
|
||||
|
|
|
@ -2,7 +2,12 @@ from eth2spec.utils import bls
|
|||
from eth2spec.test.helpers.keys import pubkeys, privkeys, pubkey_to_privkey
|
||||
|
||||
|
||||
def get_signed_address_change(spec, state, validator_index=None, withdrawal_pubkey=None, to_execution_address=None):
|
||||
def get_signed_address_change(spec, state,
|
||||
validator_index=None,
|
||||
withdrawal_pubkey=None,
|
||||
to_execution_address=None,
|
||||
fork_version=None,
|
||||
genesis_validators_root=None):
|
||||
if validator_index is None:
|
||||
validator_index = 0
|
||||
|
||||
|
@ -16,7 +21,15 @@ def get_signed_address_change(spec, state, validator_index=None, withdrawal_pubk
|
|||
if to_execution_address is None:
|
||||
to_execution_address = b'\x42' * 20
|
||||
|
||||
domain = spec.get_domain(state, spec.DOMAIN_BLS_TO_EXECUTION_CHANGE)
|
||||
if genesis_validators_root is None:
|
||||
genesis_validators_root = state.genesis_validators_root
|
||||
|
||||
domain = spec.compute_domain(
|
||||
spec.DOMAIN_BLS_TO_EXECUTION_CHANGE,
|
||||
fork_version=fork_version,
|
||||
genesis_validators_root=genesis_validators_root,
|
||||
)
|
||||
|
||||
address_change = spec.BLSToExecutionChange(
|
||||
validator_index=validator_index,
|
||||
from_bls_pubkey=withdrawal_pubkey,
|
||||
|
|
|
@ -25,12 +25,12 @@ Each step includes checks to verify the expected impact on the `store` object.
|
|||
|
||||
```yaml
|
||||
finalized_header: {
|
||||
slot: int, -- Integer value from store.finalized_header.slot
|
||||
beacon_root: string, -- Encoded 32-byte value from store.finalized_header.hash_tree_root()
|
||||
slot: int, -- Integer value from store.finalized_header.beacon.slot
|
||||
beacon_root: string, -- Encoded 32-byte value from store.finalized_header.beacon.hash_tree_root()
|
||||
}
|
||||
optimistic_header: {
|
||||
slot: int, -- Integer value from store.optimistic_header.slot
|
||||
beacon_root: string, -- Encoded 32-byte value from store.optimistic_header.hash_tree_root()
|
||||
slot: int, -- Integer value from store.optimistic_header.beacon.slot
|
||||
beacon_root: string, -- Encoded 32-byte value from store.optimistic_header.beacon.hash_tree_root()
|
||||
}
|
||||
```
|
||||
|
||||
|
|
Loading…
Reference in New Issue