mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-21 10:48:17 +00:00
Merge branch 'stable' into feat/eip-7495
This commit is contained in:
commit
7582dbe793
12
.github/workflows/ci.yml
vendored
12
.github/workflows/ci.yml
vendored
@ -37,17 +37,17 @@ jobs:
|
||||
cpu: arm64
|
||||
- os: windows
|
||||
cpu: amd64
|
||||
branch: [~, upstream/version-2-0]
|
||||
branch: [~, upstream/version-2-2]
|
||||
exclude:
|
||||
- target:
|
||||
os: macos
|
||||
branch: upstream/version-2-0
|
||||
branch: upstream/version-2-2
|
||||
- target:
|
||||
os: windows
|
||||
branch: upstream/version-2-0
|
||||
branch: upstream/version-2-2
|
||||
include:
|
||||
- branch: upstream/version-2-0
|
||||
branch-short: version-2-0
|
||||
- branch: upstream/version-2-2
|
||||
branch-short: version-2-2
|
||||
nimflags-extra: --mm:refc
|
||||
- target:
|
||||
os: linux
|
||||
@ -212,7 +212,7 @@ jobs:
|
||||
# allowed to prevent potential problems with downloads on different
|
||||
# file systems". However, GitHub Actions workflows do not support a
|
||||
# usual assortment of string functions.
|
||||
name: Unit Test Results ${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ matrix.branch == 'upstream/version-2-0' && 'version-2-0' || matrix.branch }}
|
||||
name: Unit Test Results ${{ matrix.target.os }}-${{ matrix.target.cpu }}-${{ matrix.branch == 'upstream/version-2-2' && 'version-2-2' || matrix.branch }}
|
||||
path: build/*.xml
|
||||
|
||||
devbuild:
|
||||
|
@ -8,11 +8,12 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
## Attestation pool electra processing [Preset: mainnet]
|
||||
```diff
|
||||
+ Aggregated attestations with disjoint comittee bits into a single on-chain aggregate [Pres OK
|
||||
+ Aggregating across committees [Preset: mainnet] OK
|
||||
+ Attestations with disjoint comittee bits and equal data into single on-chain aggregate [Pr OK
|
||||
+ Can add and retrieve simple electra attestations [Preset: mainnet] OK
|
||||
+ Working with electra aggregates [Preset: mainnet] OK
|
||||
```
|
||||
OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||
OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||
## Attestation pool processing [Preset: mainnet]
|
||||
```diff
|
||||
+ Attestation from different branch [Preset: mainnet] OK
|
||||
@ -158,6 +159,14 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
+ parent sanity OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## Combined scenarios [Beacon Node] [Preset: mainnet]
|
||||
```diff
|
||||
+ ImportKeystores should not be blocked by fee recipient setting [Beacon Node] [Preset: main OK
|
||||
+ ImportKeystores should not be blocked by gas limit setting [Beacon Node] [Preset: mainnet] OK
|
||||
+ ImportRemoteKeys should not be blocked by fee recipient setting [Beacon Node] [Preset: mai OK
|
||||
+ ImportRemoteKeys should not be blocked by gas limit setting [Beacon Node] [Preset: mainnet OK
|
||||
```
|
||||
OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||
## DeleteKeys requests [Beacon Node] [Preset: mainnet]
|
||||
```diff
|
||||
+ Deleting not existing key [Beacon Node] [Preset: mainnet] OK
|
||||
@ -592,14 +601,16 @@ OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||
## Fee recipient management [Beacon Node] [Preset: mainnet]
|
||||
```diff
|
||||
+ Configuring the fee recipient [Beacon Node] [Preset: mainnet] OK
|
||||
+ Configuring the fee recipient for dynamic validator [Beacon Node] [Preset: mainnet] OK
|
||||
+ Invalid Authorization Header [Beacon Node] [Preset: mainnet] OK
|
||||
+ Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK
|
||||
+ Missing Authorization header [Beacon Node] [Preset: mainnet] OK
|
||||
+ Obtaining the fee recipient for dynamic validator returns suggested default [Beacon Node] OK
|
||||
+ Obtaining the fee recipient of a missing validator returns 404 [Beacon Node] [Preset: main OK
|
||||
+ Obtaining the fee recipient of an unconfigured validator returns the suggested default [Be OK
|
||||
+ Setting the fee recipient on a missing validator creates a record for it [Beacon Node] [Pr OK
|
||||
```
|
||||
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
## FinalizedBlocks [Preset: mainnet]
|
||||
```diff
|
||||
+ Basic ops [Preset: mainnet] OK
|
||||
@ -630,14 +641,16 @@ OK: 11/11 Fail: 0/11 Skip: 0/11
|
||||
## Gas limit management [Beacon Node] [Preset: mainnet]
|
||||
```diff
|
||||
+ Configuring the gas limit [Beacon Node] [Preset: mainnet] OK
|
||||
+ Configuring the gas limit for dynamic validator [Beacon Node] [Preset: mainnet] OK
|
||||
+ Invalid Authorization Header [Beacon Node] [Preset: mainnet] OK
|
||||
+ Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK
|
||||
+ Missing Authorization header [Beacon Node] [Preset: mainnet] OK
|
||||
+ Obtaining the gas limit for dynamic validator returns suggested default [Beacon Node] [Pre OK
|
||||
+ Obtaining the gas limit of a missing validator returns 404 [Beacon Node] [Preset: mainnet] OK
|
||||
+ Obtaining the gas limit of an unconfigured validator returns the suggested default [Beacon OK
|
||||
+ Setting the gas limit on a missing validator creates a record for it [Beacon Node] [Preset OK
|
||||
```
|
||||
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
## Gossip fork transition
|
||||
```diff
|
||||
+ Gossip fork transition OK
|
||||
@ -984,33 +997,28 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||
## SyncManager test suite
|
||||
```diff
|
||||
+ Process all unviable blocks OK
|
||||
+ [SyncManager] groupBlobs() test OK
|
||||
+ [SyncQueue#Backward] Async unordered push test OK
|
||||
+ [SyncQueue#Backward] Async unordered push with rewind test OK
|
||||
+ [SyncQueue#Backward] Good response with missing values towards end OK
|
||||
+ [SyncQueue#Backward] Handle out-of-band sync progress advancement OK
|
||||
+ [SyncQueue#Backward] Pass through established limits test OK
|
||||
+ [SyncQueue#Backward] Smoke test OK
|
||||
+ [SyncQueue#Backward] Start and finish slots equal OK
|
||||
+ [SyncQueue#Backward] Two full requests success/fail OK
|
||||
+ [SyncQueue# & Backward] Combination of missing parent and good blocks [3 peers] test OK
|
||||
+ [SyncQueue# & Backward] Failure request push test OK
|
||||
+ [SyncQueue# & Backward] Invalid block [3 peers] test OK
|
||||
+ [SyncQueue# & Backward] Smoke [3 peers] test OK
|
||||
+ [SyncQueue# & Backward] Smoke [single peer] test OK
|
||||
+ [SyncQueue# & Backward] Unviable block [3 peers] test OK
|
||||
+ [SyncQueue# & Forward] Combination of missing parent and good blocks [3 peers] test OK
|
||||
+ [SyncQueue# & Forward] Failure request push test OK
|
||||
+ [SyncQueue# & Forward] Invalid block [3 peers] test OK
|
||||
+ [SyncQueue# & Forward] Smoke [3 peers] test OK
|
||||
+ [SyncQueue# & Forward] Smoke [single peer] test OK
|
||||
+ [SyncQueue# & Forward] Unviable block [3 peers] test OK
|
||||
+ [SyncQueue#Backward] Missing parent and exponential rewind [3 peers] test OK
|
||||
+ [SyncQueue#Backward] getRewindPoint() test OK
|
||||
+ [SyncQueue#Forward] Async unordered push test OK
|
||||
+ [SyncQueue#Forward] Async unordered push with rewind test OK
|
||||
+ [SyncQueue#Forward] Good response with missing values towards end OK
|
||||
+ [SyncQueue#Forward] Handle out-of-band sync progress advancement OK
|
||||
+ [SyncQueue#Forward] Pass through established limits test OK
|
||||
+ [SyncQueue#Forward] Smoke test OK
|
||||
+ [SyncQueue#Forward] Start and finish slots equal OK
|
||||
+ [SyncQueue#Forward] Two full requests success/fail OK
|
||||
+ [SyncQueue#Forward] Missing parent and exponential rewind [3 peers] test OK
|
||||
+ [SyncQueue#Forward] getRewindPoint() test OK
|
||||
+ [SyncQueue] checkBlobsResponse() test OK
|
||||
+ [SyncQueue] checkResponse() test OK
|
||||
+ [SyncQueue] contains() test OK
|
||||
+ [SyncQueue] getLastNonEmptySlot() test OK
|
||||
+ [SyncQueue] hasEndGap() test OK
|
||||
```
|
||||
OK: 25/25 Fail: 0/25 Skip: 0/25
|
||||
OK: 20/20 Fail: 0/20 Skip: 0/20
|
||||
## Type helpers
|
||||
```diff
|
||||
+ BeaconBlock OK
|
||||
@ -1154,4 +1162,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
|
||||
---TOTAL---
|
||||
OK: 783/788 Fail: 0/788 Skip: 5/788
|
||||
OK: 787/792 Fail: 0/792 Skip: 5/792
|
||||
|
27
CHANGELOG.md
27
CHANGELOG.md
@ -1,3 +1,30 @@
|
||||
2025-02-13 v25.2.0
|
||||
==================
|
||||
|
||||
Nimbus `v25.2.0` is a `low-urgency` release for mainnet, but `high-urgency` release for Sepolia and Holesky due to Pectra-readiness for their upcoming forks.
|
||||
|
||||
### Improvements
|
||||
|
||||
- Add Holesky and Sepolia Electra fork epochs:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6908
|
||||
|
||||
- Improve syncing smoothness and steadiness:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6722
|
||||
|
||||
- Initiate metrics server later in beacon node startup sequence, to mitigate transient metrics during validator loading:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6902
|
||||
|
||||
### Fixes
|
||||
|
||||
- Fix keymanager API listFeeRecipient and getGasLimit endpoints in presence of web3signer validators:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6916
|
||||
|
||||
- Update builder API registered fee recipient and gas limit from validator client without restart:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6907
|
||||
|
||||
- Fix capital case fork version name being returned in certain beacon API JSON response `version` fields:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6905
|
||||
|
||||
2025-01-28 v25.1.0
|
||||
==================
|
||||
|
||||
|
@ -1472,6 +1472,9 @@ OK: 15/15 Fail: 0/15 Skip: 0/15
|
||||
+ [Invalid] EF - Capella - Operations - Withdrawals - invalid_two_expected_partial_withdrawa OK
|
||||
+ [Valid] EF - Capella - Operations - Withdrawals - all_withdrawal OK
|
||||
+ [Valid] EF - Capella - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK
|
||||
+ [Valid] EF - Capella - Operations - Withdrawals - partially_withdrawable_validator_legac OK
|
||||
+ [Valid] EF - Capella - Operations - Withdrawals - partially_withdrawable_validator_legac OK
|
||||
+ [Valid] EF - Capella - Operations - Withdrawals - partially_withdrawable_validator_legac OK
|
||||
+ [Valid] EF - Capella - Operations - Withdrawals - random_0 OK
|
||||
+ [Valid] EF - Capella - Operations - Withdrawals - random_full_withdrawals_0 OK
|
||||
+ [Valid] EF - Capella - Operations - Withdrawals - random_full_withdrawals_1 OK
|
||||
@ -1502,7 +1505,7 @@ OK: 15/15 Fail: 0/15 Skip: 0/15
|
||||
+ [Valid] EF - Capella - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK
|
||||
+ [Valid] EF - Capella - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK
|
||||
```
|
||||
OK: 50/50 Fail: 0/50 Skip: 0/50
|
||||
OK: 53/53 Fail: 0/53 Skip: 0/53
|
||||
## EF - Capella - Random [Preset: mainnet]
|
||||
```diff
|
||||
+ [Valid] EF - Capella - Random - randomized_0 [Preset: mainnet] OK
|
||||
@ -2026,6 +2029,8 @@ OK: 21/21 Fail: 0/21 Skip: 0/21
|
||||
+ [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_length_1_ext OK
|
||||
+ [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_length_32_ex OK
|
||||
+ [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_length_empty OK
|
||||
+ [Valid] EF - Deneb - Operations - Execution Payload - incorrect_transaction_no_blobs_but OK
|
||||
+ [Valid] EF - Deneb - Operations - Execution Payload - no_commitments_for_transactions OK
|
||||
+ [Valid] EF - Deneb - Operations - Execution Payload - no_transactions_with_commitments OK
|
||||
+ [Valid] EF - Deneb - Operations - Execution Payload - non_empty_extra_data_first_payload OK
|
||||
+ [Valid] EF - Deneb - Operations - Execution Payload - non_empty_extra_data_regular_paylo OK
|
||||
@ -2041,7 +2046,7 @@ OK: 21/21 Fail: 0/21 Skip: 0/21
|
||||
+ [Valid] EF - Deneb - Operations - Execution Payload - zero_length_transaction_regular_pa OK
|
||||
+ [Valid] EF - Deneb - Operations - Execution Payload - zeroed_commitment OK
|
||||
```
|
||||
OK: 38/38 Fail: 0/38 Skip: 0/38
|
||||
OK: 40/40 Fail: 0/40 Skip: 0/40
|
||||
## EF - Deneb - Operations - Proposer Slashing [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Deneb - Operations - Proposer Slashing - invalid_different_proposer_indices OK
|
||||
@ -2133,6 +2138,9 @@ OK: 15/15 Fail: 0/15 Skip: 0/15
|
||||
+ [Invalid] EF - Deneb - Operations - Withdrawals - invalid_two_expected_partial_withdrawal_ OK
|
||||
+ [Valid] EF - Deneb - Operations - Withdrawals - all_withdrawal OK
|
||||
+ [Valid] EF - Deneb - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK
|
||||
+ [Valid] EF - Deneb - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK
|
||||
+ [Valid] EF - Deneb - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK
|
||||
+ [Valid] EF - Deneb - Operations - Withdrawals - partially_withdrawable_validator_legacy_ OK
|
||||
+ [Valid] EF - Deneb - Operations - Withdrawals - random_0 OK
|
||||
+ [Valid] EF - Deneb - Operations - Withdrawals - random_full_withdrawals_0 OK
|
||||
+ [Valid] EF - Deneb - Operations - Withdrawals - random_full_withdrawals_1 OK
|
||||
@ -2163,7 +2171,7 @@ OK: 15/15 Fail: 0/15 Skip: 0/15
|
||||
+ [Valid] EF - Deneb - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balan OK
|
||||
+ [Valid] EF - Deneb - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balan OK
|
||||
```
|
||||
OK: 50/50 Fail: 0/50 Skip: 0/50
|
||||
OK: 53/53 Fail: 0/53 Skip: 0/53
|
||||
## EF - Deneb - Random [Preset: mainnet]
|
||||
```diff
|
||||
+ [Valid] EF - Deneb - Random - randomized_0 [Preset: mainnet] OK
|
||||
@ -2487,6 +2495,7 @@ OK: 13/13 Fail: 0/13 Skip: 0/13
|
||||
```diff
|
||||
+ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_max [Preset: m OK
|
||||
+ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max [Pres OK
|
||||
+ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max_next_ OK
|
||||
+ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_under_max [Pre OK
|
||||
+ Pending deposits - apply_pending_deposit_correct_sig_but_forked_state [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_effective_deposit_with_genesis_fork_version [Pres OK
|
||||
@ -2501,6 +2510,7 @@ OK: 13/13 Fail: 0/13 Skip: 0/13
|
||||
+ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials [Preset: mai OK
|
||||
+ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials_over_min_act OK
|
||||
+ Pending deposits - apply_pending_deposit_over_min_activation [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_over_min_activation_next_increment [Preset: mainn OK
|
||||
+ Pending deposits - apply_pending_deposit_success_top_up_to_withdrawn_validator [Preset: ma OK
|
||||
+ Pending deposits - apply_pending_deposit_top_up__less_effective_balance [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK
|
||||
@ -2517,6 +2527,7 @@ OK: 13/13 Fail: 0/13 Skip: 0/13
|
||||
+ Pending deposits - process_pending_deposits_eth1_bridge_transition_pending [Preset: mainne OK
|
||||
+ Pending deposits - process_pending_deposits_limit_is_reached [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_mixture_of_skipped_and_above_churn [Preset: ma OK
|
||||
+ Pending deposits - process_pending_deposits_multiple_for_new_validator [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_multiple_pending_deposits_above_churn [Preset: OK
|
||||
+ Pending deposits - process_pending_deposits_multiple_pending_deposits_below_churn [Preset: OK
|
||||
+ Pending deposits - process_pending_deposits_multiple_pending_one_skipped [Preset: mainnet] OK
|
||||
@ -2527,7 +2538,7 @@ OK: 13/13 Fail: 0/13 Skip: 0/13
|
||||
+ Pending deposits - process_pending_deposits_withdrawable_validator [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_withdrawable_validator_not_churned [Preset: ma OK
|
||||
```
|
||||
OK: 41/41 Fail: 0/41 Skip: 0/41
|
||||
OK: 44/44 Fail: 0/44 Skip: 0/44
|
||||
## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: mainnet]
|
||||
```diff
|
||||
+ RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK
|
||||
@ -2806,6 +2817,8 @@ OK: 8/8 Fail: 0/8 Skip: 0/8
|
||||
+ [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_length_1_e OK
|
||||
+ [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_length_32_ OK
|
||||
+ [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_length_emp OK
|
||||
+ [Valid] EF - Electra - Operations - Execution Payload - incorrect_transaction_no_blobs_b OK
|
||||
+ [Valid] EF - Electra - Operations - Execution Payload - no_commitments_for_transactions OK
|
||||
+ [Valid] EF - Electra - Operations - Execution Payload - no_transactions_with_commitments OK
|
||||
+ [Valid] EF - Electra - Operations - Execution Payload - non_empty_extra_data_first_paylo OK
|
||||
+ [Valid] EF - Electra - Operations - Execution Payload - non_empty_extra_data_regular_pay OK
|
||||
@ -2821,7 +2834,7 @@ OK: 8/8 Fail: 0/8 Skip: 0/8
|
||||
+ [Valid] EF - Electra - Operations - Execution Payload - zero_length_transaction_regular_ OK
|
||||
+ [Valid] EF - Electra - Operations - Execution Payload - zeroed_commitment OK
|
||||
```
|
||||
OK: 38/38 Fail: 0/38 Skip: 0/38
|
||||
OK: 40/40 Fail: 0/40 Skip: 0/40
|
||||
## EF - Electra - Operations - Proposer Slashing [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Electra - Operations - Proposer Slashing - invalid_different_proposer_indic OK
|
||||
@ -2945,6 +2958,15 @@ OK: 19/19 Fail: 0/19 Skip: 0/19
|
||||
+ [Invalid] EF - Electra - Operations - Withdrawals - invalid_two_expected_partial_withdrawa OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - all_withdrawal OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_compo OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_legac OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_legac OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - partially_withdrawable_validator_legac OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_at_max OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_at_max_mixed_with_ OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - pending_withdrawals_exiting_validator OK
|
||||
@ -2991,7 +3013,7 @@ OK: 19/19 Fail: 0/19 Skip: 0/19
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK
|
||||
+ [Valid] EF - Electra - Operations - Withdrawals - withdrawable_epoch_but_0_effective_bal OK
|
||||
```
|
||||
OK: 66/66 Fail: 0/66 Skip: 0/66
|
||||
OK: 75/75 Fail: 0/75 Skip: 0/75
|
||||
## EF - Electra - Random [Preset: mainnet]
|
||||
```diff
|
||||
+ [Valid] EF - Electra - Random - randomized_0 [Preset: mainnet] OK
|
||||
@ -3147,8 +3169,10 @@ OK: 56/56 Fail: 0/56 Skip: 0/56
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - cl_exit_and_el_withdrawal_request_in_same_block OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - deposit_request_with_same_pubkey_different_with OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__deposit_and_top_up_same_blo OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__deposit_with_same_pubkey_di OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_eth1_deposits [Pres OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_eth1_deposits_up_to OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - deposit_transition__process_max_eth1_deposits [ OK
|
||||
@ -3172,6 +3196,8 @@ OK: 56/56 Fail: 0/56 Skip: 0/56
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: mainnet] OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - multiple_different_proposer_slashings_same_bloc OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - multiple_different_validator_exits_same_block [ OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_differe OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - multiple_el_partial_withdrawal_requests_same_va OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - one_blob [Preset: mainnet] OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - one_blob_max_txs [Preset: mainnet] OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - one_blob_two_txs [Preset: mainnet] OK
|
||||
@ -3190,10 +3216,12 @@ OK: 56/56 Fail: 0/56 Skip: 0/56
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Pres OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: ma OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_switch_to_compounding_request_sa OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_and_withdrawal_request_same_validato OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK
|
||||
+ [Valid] EF - Electra - Sanity - Blocks - zero_blob [Preset: mainnet] OK
|
||||
```
|
||||
OK: 80/80 Fail: 0/80 Skip: 0/80
|
||||
OK: 86/86 Fail: 0/86 Skip: 0/86
|
||||
## EF - Electra - Sanity - Slots [Preset: mainnet]
|
||||
```diff
|
||||
+ EF - Electra - Slots - double_empty_epoch [Preset: mainnet] OK
|
||||
@ -3248,6 +3276,667 @@ OK: 27/27 Fail: 0/27 Skip: 0/27
|
||||
+ test_process_light_client_update_not_timeout OK
|
||||
```
|
||||
OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||
## EF - Fulu - Epoch Processing - Effective balance updates [Preset: mainnet]
|
||||
```diff
|
||||
+ Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK
|
||||
+ Effective balance updates - effective_balance_hysteresis_with_compounding_credentials [Pre OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## EF - Fulu - Epoch Processing - Eth1 data reset [Preset: mainnet]
|
||||
```diff
|
||||
+ Eth1 data reset - eth1_vote_no_reset [Preset: mainnet] OK
|
||||
+ Eth1 data reset - eth1_vote_reset [Preset: mainnet] OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## EF - Fulu - Epoch Processing - Historical summaries update [Preset: mainnet]
|
||||
```diff
|
||||
+ Historical summaries update - historical_summaries_accumulator [Preset: mainnet] OK
|
||||
```
|
||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
## EF - Fulu - Epoch Processing - Inactivity [Preset: mainnet]
|
||||
```diff
|
||||
+ Inactivity - all_zero_inactivity_scores_empty_participation [Preset: mainnet] OK
|
||||
+ Inactivity - all_zero_inactivity_scores_empty_participation_leaking [Preset: mainnet] OK
|
||||
+ Inactivity - all_zero_inactivity_scores_full_participation [Preset: mainnet] OK
|
||||
+ Inactivity - all_zero_inactivity_scores_full_participation_leaking [Preset: mainnet] OK
|
||||
+ Inactivity - all_zero_inactivity_scores_random_participation [Preset: mainnet] OK
|
||||
+ Inactivity - all_zero_inactivity_scores_random_participation_leaking [Preset: mainnet] OK
|
||||
+ Inactivity - genesis [Preset: mainnet] OK
|
||||
+ Inactivity - genesis_random_scores [Preset: mainnet] OK
|
||||
+ Inactivity - random_inactivity_scores_empty_participation [Preset: mainnet] OK
|
||||
+ Inactivity - random_inactivity_scores_empty_participation_leaking [Preset: mainnet] OK
|
||||
+ Inactivity - random_inactivity_scores_full_participation [Preset: mainnet] OK
|
||||
+ Inactivity - random_inactivity_scores_full_participation_leaking [Preset: mainnet] OK
|
||||
+ Inactivity - random_inactivity_scores_random_participation [Preset: mainnet] OK
|
||||
+ Inactivity - random_inactivity_scores_random_participation_leaking [Preset: mainnet] OK
|
||||
+ Inactivity - randomized_state [Preset: mainnet] OK
|
||||
+ Inactivity - randomized_state_leaking [Preset: mainnet] OK
|
||||
+ Inactivity - some_exited_full_random_leaking [Preset: mainnet] OK
|
||||
+ Inactivity - some_slashed_full_random [Preset: mainnet] OK
|
||||
+ Inactivity - some_slashed_full_random_leaking [Preset: mainnet] OK
|
||||
+ Inactivity - some_slashed_zero_scores_full_participation [Preset: mainnet] OK
|
||||
+ Inactivity - some_slashed_zero_scores_full_participation_leaking [Preset: mainnet] OK
|
||||
```
|
||||
OK: 21/21 Fail: 0/21 Skip: 0/21
|
||||
## EF - Fulu - Epoch Processing - Justification & Finalization [Preset: mainnet]
|
||||
```diff
|
||||
+ Justification & Finalization - 123_ok_support [Preset: mainnet] OK
|
||||
+ Justification & Finalization - 123_poor_support [Preset: mainnet] OK
|
||||
+ Justification & Finalization - 12_ok_support [Preset: mainnet] OK
|
||||
+ Justification & Finalization - 12_ok_support_messed_target [Preset: mainnet] OK
|
||||
+ Justification & Finalization - 12_poor_support [Preset: mainnet] OK
|
||||
+ Justification & Finalization - 234_ok_support [Preset: mainnet] OK
|
||||
+ Justification & Finalization - 234_poor_support [Preset: mainnet] OK
|
||||
+ Justification & Finalization - 23_ok_support [Preset: mainnet] OK
|
||||
+ Justification & Finalization - 23_poor_support [Preset: mainnet] OK
|
||||
+ Justification & Finalization - balance_threshold_with_exited_validators [Preset: mainnet] OK
|
||||
```
|
||||
OK: 10/10 Fail: 0/10 Skip: 0/10
|
||||
## EF - Fulu - Epoch Processing - Participation flag updates [Preset: mainnet]
|
||||
```diff
|
||||
+ Participation flag updates - all_zeroed [Preset: mainnet] OK
|
||||
+ Participation flag updates - current_epoch_zeroed [Preset: mainnet] OK
|
||||
+ Participation flag updates - current_filled [Preset: mainnet] OK
|
||||
+ Participation flag updates - filled [Preset: mainnet] OK
|
||||
+ Participation flag updates - previous_epoch_zeroed [Preset: mainnet] OK
|
||||
+ Participation flag updates - previous_filled [Preset: mainnet] OK
|
||||
+ Participation flag updates - random_0 [Preset: mainnet] OK
|
||||
+ Participation flag updates - random_1 [Preset: mainnet] OK
|
||||
+ Participation flag updates - random_2 [Preset: mainnet] OK
|
||||
+ Participation flag updates - random_genesis [Preset: mainnet] OK
|
||||
```
|
||||
OK: 10/10 Fail: 0/10 Skip: 0/10
|
||||
## EF - Fulu - Epoch Processing - Pending consolidations [Preset: mainnet]
|
||||
```diff
|
||||
+ Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK
|
||||
+ Pending consolidations - basic_pending_consolidation [Preset: mainnet] OK
|
||||
+ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: mainnet] OK
|
||||
+ Pending consolidations - pending_consolidation_balance_computation_compounding [Preset: ma OK
|
||||
+ Pending consolidations - pending_consolidation_balance_computation_eth1 [Preset: mainnet] OK
|
||||
+ Pending consolidations - pending_consolidation_compounding_creds [Preset: mainnet] OK
|
||||
+ Pending consolidations - pending_consolidation_future_epoch [Preset: mainnet] OK
|
||||
+ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective [ OK
|
||||
+ Pending consolidations - pending_consolidation_source_balance_greater_than_max_effective_c OK
|
||||
+ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective [Pre OK
|
||||
+ Pending consolidations - pending_consolidation_source_balance_less_than_max_effective_comp OK
|
||||
+ Pending consolidations - pending_consolidation_with_pending_deposit [Preset: mainnet] OK
|
||||
+ Pending consolidations - skip_consolidation_when_source_slashed [Preset: mainnet] OK
|
||||
```
|
||||
OK: 13/13 Fail: 0/13 Skip: 0/13
|
||||
## EF - Fulu - Epoch Processing - Pending deposits [Preset: mainnet]
|
||||
```diff
|
||||
+ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_max [Preset: m OK
|
||||
+ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max [Pres OK
|
||||
+ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_over_max_next_ OK
|
||||
+ Pending deposits - apply_pending_deposit_compounding_withdrawal_credentials_under_max [Pre OK
|
||||
+ Pending deposits - apply_pending_deposit_correct_sig_but_forked_state [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_effective_deposit_with_genesis_fork_version [Pres OK
|
||||
+ Pending deposits - apply_pending_deposit_eth1_withdrawal_credentials [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_incorrect_sig_new_deposit [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_incorrect_sig_top_up [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_incorrect_withdrawal_credentials_top_up [Preset: OK
|
||||
+ Pending deposits - apply_pending_deposit_ineffective_deposit_with_bad_fork_version [Preset OK
|
||||
+ Pending deposits - apply_pending_deposit_key_validate_invalid_decompression [Preset: mainn OK
|
||||
+ Pending deposits - apply_pending_deposit_key_validate_invalid_subgroup [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_min_activation [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials [Preset: mai OK
|
||||
+ Pending deposits - apply_pending_deposit_non_versioned_withdrawal_credentials_over_min_act OK
|
||||
+ Pending deposits - apply_pending_deposit_over_min_activation [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_over_min_activation_next_increment [Preset: mainn OK
|
||||
+ Pending deposits - apply_pending_deposit_success_top_up_to_withdrawn_validator [Preset: ma OK
|
||||
+ Pending deposits - apply_pending_deposit_top_up__less_effective_balance [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_top_up__max_effective_balance_compounding [Preset OK
|
||||
+ Pending deposits - apply_pending_deposit_top_up__min_activation_balance [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_top_up__min_activation_balance_compounding [Prese OK
|
||||
+ Pending deposits - apply_pending_deposit_top_up__zero_balance [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_under_min_activation [Preset: mainnet] OK
|
||||
+ Pending deposits - apply_pending_deposit_with_previous_fork_version [Preset: mainnet] OK
|
||||
+ Pending deposits - ineffective_deposit_with_current_fork_version [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_balance_above_churn [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_balance_equal_churn [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_eth1_bridge_transition_complete [Preset: mainn OK
|
||||
+ Pending deposits - process_pending_deposits_eth1_bridge_transition_not_applied [Preset: ma OK
|
||||
+ Pending deposits - process_pending_deposits_eth1_bridge_transition_pending [Preset: mainne OK
|
||||
+ Pending deposits - process_pending_deposits_limit_is_reached [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_mixture_of_skipped_and_above_churn [Preset: ma OK
|
||||
+ Pending deposits - process_pending_deposits_multiple_for_new_validator [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_multiple_pending_deposits_above_churn [Preset: OK
|
||||
+ Pending deposits - process_pending_deposits_multiple_pending_deposits_below_churn [Preset: OK
|
||||
+ Pending deposits - process_pending_deposits_multiple_pending_one_skipped [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_multiple_skipped_deposits_exiting_validators [ OK
|
||||
+ Pending deposits - process_pending_deposits_not_finalized [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_preexisting_churn [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_skipped_deposit_exiting_validator [Preset: mai OK
|
||||
+ Pending deposits - process_pending_deposits_withdrawable_validator [Preset: mainnet] OK
|
||||
+ Pending deposits - process_pending_deposits_withdrawable_validator_not_churned [Preset: ma OK
|
||||
```
|
||||
OK: 44/44 Fail: 0/44 Skip: 0/44
|
||||
## EF - Fulu - Epoch Processing - RANDAO mixes reset [Preset: mainnet]
|
||||
```diff
|
||||
+ RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK
|
||||
```
|
||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
## EF - Fulu - Epoch Processing - Registry updates [Preset: mainnet]
|
||||
```diff
|
||||
+ Registry updates - activation_queue_activation_and_ejection__1 [Preset: mainnet] OK
|
||||
+ Registry updates - activation_queue_activation_and_ejection__churn_limit [Preset: mainnet] OK
|
||||
+ Registry updates - activation_queue_activation_and_ejection__exceed_churn_limit [Preset: m OK
|
||||
+ Registry updates - activation_queue_efficiency_min [Preset: mainnet] OK
|
||||
+ Registry updates - activation_queue_eligibility__greater_than_min_activation_balance [Pres OK
|
||||
+ Registry updates - activation_queue_eligibility__less_than_min_activation_balance [Preset: OK
|
||||
+ Registry updates - activation_queue_eligibility__min_activation_balance [Preset: mainnet] OK
|
||||
+ Registry updates - activation_queue_eligibility__min_activation_balance_compounding_creds OK
|
||||
+ Registry updates - activation_queue_eligibility__min_activation_balance_eth1_creds [Preset OK
|
||||
+ Registry updates - activation_queue_no_activation_no_finality [Preset: mainnet] OK
|
||||
+ Registry updates - activation_queue_sorting [Preset: mainnet] OK
|
||||
+ Registry updates - activation_queue_to_activated_if_finalized [Preset: mainnet] OK
|
||||
+ Registry updates - add_to_activation_queue [Preset: mainnet] OK
|
||||
+ Registry updates - ejection [Preset: mainnet] OK
|
||||
+ Registry updates - ejection_past_churn_limit_min [Preset: mainnet] OK
|
||||
+ Registry updates - invalid_large_withdrawable_epoch [Preset: mainnet] OK
|
||||
```
|
||||
OK: 16/16 Fail: 0/16 Skip: 0/16
|
||||
## EF - Fulu - Epoch Processing - Rewards and penalties [Preset: mainnet]
|
||||
```diff
|
||||
+ Rewards and penalties - almost_empty_attestations [Preset: mainnet] OK
|
||||
+ Rewards and penalties - almost_empty_attestations_with_leak [Preset: mainnet] OK
|
||||
+ Rewards and penalties - almost_full_attestations [Preset: mainnet] OK
|
||||
+ Rewards and penalties - almost_full_attestations_with_leak [Preset: mainnet] OK
|
||||
+ Rewards and penalties - attestations_some_slashed [Preset: mainnet] OK
|
||||
+ Rewards and penalties - duplicate_attestation [Preset: mainnet] OK
|
||||
+ Rewards and penalties - full_attestation_participation [Preset: mainnet] OK
|
||||
+ Rewards and penalties - full_attestation_participation_with_leak [Preset: mainnet] OK
|
||||
+ Rewards and penalties - full_attestations_misc_balances [Preset: mainnet] OK
|
||||
+ Rewards and penalties - full_attestations_one_validaor_one_gwei [Preset: mainnet] OK
|
||||
+ Rewards and penalties - genesis_epoch_full_attestations_no_rewards [Preset: mainnet] OK
|
||||
+ Rewards and penalties - genesis_epoch_no_attestations_no_penalties [Preset: mainnet] OK
|
||||
+ Rewards and penalties - no_attestations_all_penalties [Preset: mainnet] OK
|
||||
+ Rewards and penalties - random_fill_attestations [Preset: mainnet] OK
|
||||
+ Rewards and penalties - random_fill_attestations_with_leak [Preset: mainnet] OK
|
||||
```
|
||||
OK: 15/15 Fail: 0/15 Skip: 0/15
|
||||
## EF - Fulu - Epoch Processing - Slashings [Preset: mainnet]
|
||||
```diff
|
||||
+ Slashings - low_penalty [Preset: mainnet] OK
|
||||
+ Slashings - max_penalties [Preset: mainnet] OK
|
||||
+ Slashings - minimal_penalty [Preset: mainnet] OK
|
||||
+ Slashings - scaled_penalties [Preset: mainnet] OK
|
||||
+ Slashings - slashings_with_random_state [Preset: mainnet] OK
|
||||
```
|
||||
OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||
## EF - Fulu - Epoch Processing - Slashings reset [Preset: mainnet]
|
||||
```diff
|
||||
+ Slashings reset - flush_slashings [Preset: mainnet] OK
|
||||
```
|
||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
## EF - Fulu - Finality [Preset: mainnet]
|
||||
```diff
|
||||
+ [Valid] EF - Fulu - Finality - finality_no_updates_at_genesis [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Finality - finality_rule_1 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Finality - finality_rule_2 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Finality - finality_rule_3 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Finality - finality_rule_4 [Preset: mainnet] OK
|
||||
```
|
||||
OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||
## EF - Fulu - Fork [Preset: mainnet]
|
||||
```diff
|
||||
+ EF - Fulu - Fork - fork_base_state [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fork_many_next_epoch [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fork_next_epoch [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fork_next_epoch_with_block [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fork_random_low_balances [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fork_random_misc_balances [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fulu_fork_random_0 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fulu_fork_random_1 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fulu_fork_random_2 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fulu_fork_random_3 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fulu_fork_random_low_balances [Preset: mainnet] OK
|
||||
+ EF - Fulu - Fork - fulu_fork_random_misc_balances [Preset: mainnet] OK
|
||||
```
|
||||
OK: 12/12 Fail: 0/12 Skip: 0/12
|
||||
## EF - Fulu - Operations - Attestation [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_after_max_inclusion_slot OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_attestation_data_index_not_zero OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_attestation_signature OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_bad_source_root OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_before_inclusion_delay OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_committee_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_correct_attestation_included_afte OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_current_source_root OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_empty_participants_seemingly_vali OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_empty_participants_zeroes_sig OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_future_target_epoch OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_incorrect_head_and_target_include OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_incorrect_head_included_after_max OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_incorrect_target_included_after_m OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_mismatched_target_and_slot OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_new_source_epoch OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_nonset_committee_bits OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_old_source_epoch OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_old_target_epoch OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_previous_source_root OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_source_root_is_target_root OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_too_few_aggregation_bits OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_too_many_aggregation_bits OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_too_many_committee_bits OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_wrong_index_for_committee_signatu OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_wrong_index_for_slot_0 OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attestation - invalid_wrong_index_for_slot_1 OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - at_max_inclusion_slot OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_max_inclu OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_min_inclu OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_one_epoch OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - correct_attestation_included_at_sqrt_epoc OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_and_target_included_at_epo OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_and_target_included_at_sqr OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_and_target_min_inclusion_d OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_included_at_max_inclusion_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_included_at_min_inclusion_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - incorrect_head_included_at_sqrt_epoch_del OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - incorrect_target_included_at_epoch_delay OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - incorrect_target_included_at_min_inclusio OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - incorrect_target_included_at_sqrt_epoch_d OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - multi_proposer_index_iterations OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - one_basic_attestation OK
|
||||
+ [Valid] EF - Fulu - Operations - Attestation - previous_epoch OK
|
||||
```
|
||||
OK: 45/45 Fail: 0/45 Skip: 0/45
|
||||
## EF - Fulu - Operations - Attester Slashing [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_all_empty_indices OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_bad_extra_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_bad_replaced_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_duplicate_index_double OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_duplicate_index_normal OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_empty_indices OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att1_high_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_bad_extra_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_bad_replaced_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_duplicate_index_double OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_duplicate_index_normal OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_empty_indices OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_att2_high_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_incorrect_sig_1 OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_incorrect_sig_1_and_2 OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_incorrect_sig_2 OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_no_double_or_surround OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_participants_already_slashe OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_same_data OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_unsorted_att_1 OK
|
||||
+ [Invalid] EF - Fulu - Operations - Attester Slashing - invalid_unsorted_att_2 OK
|
||||
+ [Valid] EF - Fulu - Operations - Attester Slashing - already_exited_long_ago OK
|
||||
+ [Valid] EF - Fulu - Operations - Attester Slashing - already_exited_recent OK
|
||||
+ [Valid] EF - Fulu - Operations - Attester Slashing - attestation_from_future OK
|
||||
+ [Valid] EF - Fulu - Operations - Attester Slashing - basic_double OK
|
||||
+ [Valid] EF - Fulu - Operations - Attester Slashing - basic_surround OK
|
||||
+ [Valid] EF - Fulu - Operations - Attester Slashing - low_balances OK
|
||||
+ [Valid] EF - Fulu - Operations - Attester Slashing - misc_balances OK
|
||||
+ [Valid] EF - Fulu - Operations - Attester Slashing - proposer_index_slashed OK
|
||||
+ [Valid] EF - Fulu - Operations - Attester Slashing - with_effective_balance_disparity OK
|
||||
```
|
||||
OK: 30/30 Fail: 0/30 Skip: 0/30
|
||||
## EF - Fulu - Operations - BLS to execution change [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_already_0x01 OK
|
||||
+ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_bad_signature OK
|
||||
+ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_current_fork_version OK
|
||||
+ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_genesis_validators_ro OK
|
||||
+ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_incorrect_from_bls_pu OK
|
||||
+ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_previous_fork_version OK
|
||||
+ [Invalid] EF - Fulu - Operations - BLS to execution change - invalid_val_index_out_of_rang OK
|
||||
+ [Valid] EF - Fulu - Operations - BLS to execution change - genesis_fork_version OK
|
||||
+ [Valid] EF - Fulu - Operations - BLS to execution change - success OK
|
||||
+ [Valid] EF - Fulu - Operations - BLS to execution change - success_exited OK
|
||||
+ [Valid] EF - Fulu - Operations - BLS to execution change - success_in_activation_queue OK
|
||||
+ [Valid] EF - Fulu - Operations - BLS to execution change - success_in_exit_queue OK
|
||||
+ [Valid] EF - Fulu - Operations - BLS to execution change - success_not_activated OK
|
||||
+ [Valid] EF - Fulu - Operations - BLS to execution change - success_withdrawable OK
|
||||
```
|
||||
OK: 14/14 Fail: 0/14 Skip: 0/14
|
||||
## EF - Fulu - Operations - Block Header [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Operations - Block Header - invalid_multiple_blocks_single_slot OK
|
||||
+ [Invalid] EF - Fulu - Operations - Block Header - invalid_parent_root OK
|
||||
+ [Invalid] EF - Fulu - Operations - Block Header - invalid_proposer_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Block Header - invalid_proposer_slashed OK
|
||||
+ [Invalid] EF - Fulu - Operations - Block Header - invalid_slot_block_header OK
|
||||
+ [Valid] EF - Fulu - Operations - Block Header - basic_block_header OK
|
||||
```
|
||||
OK: 6/6 Fail: 0/6 Skip: 0/6
|
||||
## EF - Fulu - Operations - Consolidation Request [Preset: mainnet]
|
||||
```diff
|
||||
+ [Valid] EF - Fulu - Operations - Consolidation Request - basic_switch_to_compounding OK
|
||||
+ [Valid] EF - Fulu - Operations - Consolidation Request - incorrect_not_enough_consolidat OK
|
||||
+ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_exited_so OK
|
||||
+ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_inactive_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_not_autho OK
|
||||
+ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_source_bl OK
|
||||
+ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_source_co OK
|
||||
+ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_unknown_s OK
|
||||
+ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_with_exce OK
|
||||
+ [Valid] EF - Fulu - Operations - Consolidation Request - switch_to_compounding_with_pend OK
|
||||
```
|
||||
OK: 10/10 Fail: 0/10 Skip: 0/10
|
||||
## EF - Fulu - Operations - Deposit [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Operations - Deposit - invalid_bad_merkle_proof OK
|
||||
+ [Invalid] EF - Fulu - Operations - Deposit - invalid_wrong_deposit_for_deposit_count OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - correct_sig_but_forked_state OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - effective_deposit_with_genesis_fork_version OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - incorrect_sig_new_deposit OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - incorrect_sig_top_up OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - incorrect_withdrawal_credentials_top_up OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - ineffective_deposit_with_bad_fork_version OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - ineffective_deposit_with_current_fork_version OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - ineffective_deposit_with_previous_fork_versio OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - key_validate_invalid_decompression OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - key_validate_invalid_subgroup OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - new_deposit_eth1_withdrawal_credentials OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - new_deposit_max OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - new_deposit_non_versioned_withdrawal_credenti OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - new_deposit_over_max OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - new_deposit_under_max OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - success_top_up_to_withdrawn_validator OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - top_up__less_effective_balance OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - top_up__max_effective_balance OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit - top_up__zero_balance OK
|
||||
```
|
||||
OK: 21/21 Fail: 0/21 Skip: 0/21
|
||||
## EF - Fulu - Operations - Deposit Request [Preset: mainnet]
|
||||
```diff
|
||||
+ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_invalid_sig OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_max_effective OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_min_activatio OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_set_start_ind OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_set_start_ind OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_invali OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_max_ef OK
|
||||
+ [Valid] EF - Fulu - Operations - Deposit Request - process_deposit_request_top_up_min_ac OK
|
||||
```
|
||||
OK: 8/8 Fail: 0/8 Skip: 0/8
|
||||
## EF - Fulu - Operations - Execution Payload [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_everything_first_payloa OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_everything_regular_payl OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_execution_first_payload OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_execution_regular_paylo OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_parent_hash_first_paylo OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_parent_hash_regular_pay OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_pre_randao_regular_payl OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_bad_prev_randao_first_paylo OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_correct_input__execution_in OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_exceed_max_blobs_per_block OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_future_timestamp_first_payl OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_future_timestamp_regular_pa OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_past_timestamp_first_payloa OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_past_timestamp_regular_payl OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_randomized_non_validated_ex OK
|
||||
+ [Invalid] EF - Fulu - Operations - Execution Payload - invalid_randomized_non_validated_ex OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_blob_tx_type OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_block_hash OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_commitment OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_commitments_order OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_1_byte OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_1_extr OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_32_ext OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_length_empty OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - incorrect_transaction_no_blobs_but_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - no_commitments_for_transactions OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - no_transactions_with_commitments OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_extra_data_first_payload OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_extra_data_regular_payloa OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_transactions_first_payloa OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - non_empty_transactions_regular_payl OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - randomized_non_validated_execution_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - randomized_non_validated_execution_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - success_first_payload OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - success_first_payload_with_gap_slot OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - success_regular_payload OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - success_regular_payload_with_gap_sl OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - zero_length_transaction_first_paylo OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - zero_length_transaction_regular_pay OK
|
||||
+ [Valid] EF - Fulu - Operations - Execution Payload - zeroed_commitment OK
|
||||
```
|
||||
OK: 40/40 Fail: 0/40 Skip: 0/40
|
||||
## EF - Fulu - Operations - Proposer Slashing [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_different_proposer_indices OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_headers_are_same_sigs_are_d OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_headers_are_same_sigs_are_s OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_proposer_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_1 OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_1_and_2 OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_1_and_2_swap OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_incorrect_sig_2 OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_proposer_is_not_activated OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_proposer_is_slashed OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_proposer_is_withdrawn OK
|
||||
+ [Invalid] EF - Fulu - Operations - Proposer Slashing - invalid_slots_of_different_epochs OK
|
||||
+ [Valid] EF - Fulu - Operations - Proposer Slashing - basic OK
|
||||
+ [Valid] EF - Fulu - Operations - Proposer Slashing - block_header_from_future OK
|
||||
+ [Valid] EF - Fulu - Operations - Proposer Slashing - slashed_and_proposer_index_the_same OK
|
||||
```
|
||||
OK: 15/15 Fail: 0/15 Skip: 0/15
|
||||
## EF - Fulu - Operations - Sync Aggregate [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_bad_domain OK
|
||||
+ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_extra_participant OK
|
||||
+ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_infinite_signature_w OK
|
||||
+ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_infinite_signature_w OK
|
||||
+ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_missing_participant OK
|
||||
+ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_no_participants OK
|
||||
+ [Invalid] EF - Fulu - Operations - Sync Aggregate - invalid_signature_past_block OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - random_all_but_one_participating_with_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - random_high_participation_with_duplica OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - random_low_participation_with_duplicat OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - random_misc_balances_and_half_particip OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - random_only_one_participant_with_dupli OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - random_with_exits_with_duplicates OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_duplicate_commi OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_empty_participa OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_rewards_not_full_partic OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_nonparticipating_e OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_nonparticipating_w OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_participating_exit OK
|
||||
+ [Valid] EF - Fulu - Operations - Sync Aggregate - sync_committee_with_participating_with OK
|
||||
```
|
||||
OK: 26/26 Fail: 0/26 Skip: 0/26
|
||||
## EF - Fulu - Operations - Voluntary Exit [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_incorrect_signature OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_already_exited OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_exit_in_future OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_has_pending_withdraw OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_incorrect_validator_ OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_not_active OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_validator_not_active_long_enou OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_fo OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_current_fo OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_fo OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - invalid_voluntary_exit_with_genesis_fo OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_vers OK
|
||||
+ [Invalid] EF - Fulu - Operations - Voluntary Exit - voluntary_exit_with_previous_fork_vers OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - basic OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - default_exit_epoch_subsequent_exit OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - exit_existing_churn_and_balance_multip OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - exit_existing_churn_and_churn_limit_ba OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - exit_with_balance_equal_to_churn_limit OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - exit_with_balance_multiple_of_churn_li OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - max_balance_exit OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exit OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exits_above_churn OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - min_balance_exits_up_to_churn OK
|
||||
+ [Valid] EF - Fulu - Operations - Voluntary Exit - success_exit_queue__min_churn OK
|
||||
```
|
||||
OK: 24/24 Fail: 0/24 Skip: 0/24
|
||||
## EF - Fulu - Operations - Withdrawal Request [Preset: mainnet]
|
||||
```diff
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - activation_epoch_less_than_shard_c OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_withdrawal_request OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_withdrawal_request_with_comp OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - basic_withdrawal_request_with_firs OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - full_exit_request_has_partial_with OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - incorrect_inactive_validator OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - incorrect_source_address OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - incorrect_withdrawal_credential_pr OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - insufficient_balance OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - insufficient_effective_balance OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - no_compounding_credentials OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - no_excess_balance OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - on_withdrawal_request_initiated_ex OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_activation_epoc OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_incorrect_sourc OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_incorrect_withd OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - partial_withdrawal_on_exit_initiat OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - pending_withdrawals_consume_all_ex OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawal Request - unknown_pubkey OK
|
||||
```
|
||||
OK: 19/19 Fail: 0/19 Skip: 0/19
|
||||
## EF - Fulu - Operations - Withdrawals [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_few_ OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_a_lot_mixed_withdrawable_in_queue OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_a_lot_partially_withdrawable_too_ OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_address_full OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_address_partial OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_amount_full OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_amount_partial OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_incorrect_withdrawal_index OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_many_incorrectly_full OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_many_incorrectly_partial OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_max_per_slot_full_withdrawals_and OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_max_per_slot_partial_withdrawals_ OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_non_withdrawable_non_empty_withdr OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_expected_full_withdrawal_and_ OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_expected_full_withdrawal_and_ OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_expected_partial_withdrawal_a OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_of_many_incorrectly_full OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_one_of_many_incorrectly_partial OK
|
||||
+ [Invalid] EF - Fulu - Operations - Withdrawals - invalid_two_expected_partial_withdrawal_a OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - all_withdrawal OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - no_withdrawals_but_some_next_epoch OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_compound OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_legacy_e OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_legacy_m OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - partially_withdrawable_validator_legacy_m OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_at_max OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_at_max_mixed_with_swe OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_exiting_validator OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_low_effective_balance OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_mixed_with_sweep_and_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_next_epoch OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_no_excess_balance OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_one_skipped_one_effec OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_effective_sweep_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_ineffective_swee OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_ineffective_swee OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - pending_withdrawals_with_sweep_different_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - random_0 OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_0 OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_1 OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_2 OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - random_full_withdrawals_3 OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_1 OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_2 OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_3 OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_4 OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - random_partial_withdrawals_5 OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_all_fully_withdrawable_in_one_swe OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_all_partially_withdrawable_in_one OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_excess_balance_but_no_max_effecti OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_excess_balance_but_no_max_effecti OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_max_partial_withdrawable OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_mixed_fully_and_partial_withdrawa OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_mixed_fully_and_partial_withdrawa OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_no_excess_balance OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_no_excess_balance_compounding OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_no_max_effective_balance OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_no_max_effective_balance_compound OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_one_full_withdrawal OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_active_a OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_exited OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_exited_a OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_in_exit_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawable_not_yet_ OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_one_partial_withdrawal OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_two_partial_withdrawable OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - success_zero_expected_withdrawals OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - withdrawable_epoch_but_0_balance OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balanc OK
|
||||
+ [Valid] EF - Fulu - Operations - Withdrawals - withdrawable_epoch_but_0_effective_balanc OK
|
||||
```
|
||||
OK: 75/75 Fail: 0/75 Skip: 0/75
|
||||
## EF - Fulu - Random [Preset: mainnet]
|
||||
```diff
|
||||
+ [Valid] EF - Fulu - Random - randomized_0 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_1 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_10 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_11 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_12 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_13 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_14 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_15 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_2 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_3 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_4 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_5 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_6 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_7 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_8 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Random - randomized_9 [Preset: mainnet] OK
|
||||
```
|
||||
OK: 16/16 Fail: 0/16 Skip: 0/16
|
||||
## EF - Fulu - Rewards [Preset: mainnet]
|
||||
```diff
|
||||
+ EF - Fulu - Rewards - all_balances_too_low_for_reward [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - empty [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - empty_leak [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_all_correct [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_but_partial_participation [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_but_partial_participation_leak [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_leak [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_0 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_1 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_2 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_3 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_4 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_leak [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_low_balances_0 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_low_balances_1 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_misc_balances [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_seven_epoch_leak [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_ten_epoch_leak [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_without_leak_0 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - full_random_without_leak_and_current_exit_0 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - half_full [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - half_full_leak [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - quarter_full [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - quarter_full_leak [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - some_very_low_effective_balances_that_attested [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - some_very_low_effective_balances_that_attested_leak [Preset: mainnet OK
|
||||
+ EF - Fulu - Rewards - some_very_low_effective_balances_that_did_not_attest [Preset: mainne OK
|
||||
+ EF - Fulu - Rewards - some_very_low_effective_balances_that_did_not_attest_leak [Preset: m OK
|
||||
+ EF - Fulu - Rewards - with_exited_validators [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - with_exited_validators_leak [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - with_not_yet_activated_validators [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - with_not_yet_activated_validators_leak [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - with_slashed_validators [Preset: mainnet] OK
|
||||
+ EF - Fulu - Rewards - with_slashed_validators_leak [Preset: mainnet] OK
|
||||
```
|
||||
OK: 34/34 Fail: 0/34 Skip: 0/34
|
||||
## EF - Fulu - SSZ consensus objects [Preset: mainnet]
|
||||
```diff
|
||||
+ Testing AggregateAndProof OK
|
||||
@ -3311,6 +4000,98 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||
+ Testing WithdrawalRequest OK
|
||||
```
|
||||
OK: 59/59 Fail: 0/59 Skip: 0/59
|
||||
## EF - Fulu - Sanity - Blocks [Preset: mainnet]
|
||||
```diff
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_bls_changes_same_block [Preset: OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_deposit_same_block [Preset: main OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_proposer_slashings_same_block [P OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_duplicate_validator_exit_same_block [Prese OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_block_sig [Preset: mainnet] OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_expected OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_proposer_index_sig_from_proposer OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_incorrect_state_root [Preset: mainnet] OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_is_execution_enabled_false [Preset: mainne OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_one_blob_max_plus_one_txs [Preset: mainnet OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_only_increase_deposit_count [Preset: mainn OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_parent_from_same_slot [Preset: mainnet] OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_prev_slot_block_transition [Preset: mainne OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_same_slot_block_transition [Preset: mainne OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_similar_proposer_slashings_same_block [Pre OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_two_bls_changes_of_different_addresses_sam OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - invalid_withdrawal_fail_second_block_payload_isnt_ OK
|
||||
+ [Invalid] EF - Fulu - Sanity - Blocks - slash_and_exit_same_index [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - attestation [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - attester_slashing [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - balance_driven_status_transitions [Preset: mainnet OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - basic_btec_and_el_withdrawal_request_in_same_block OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - basic_btec_before_el_withdrawal_request [Preset: m OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - basic_el_withdrawal_request [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - block_transition_randomized_payload [Preset: mainn OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - bls_change [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - cl_exit_and_el_withdrawal_request_in_same_block [P OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - deposit_and_bls_change [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - deposit_in_block [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - deposit_request_with_same_pubkey_different_withdra OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - deposit_top_up [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - duplicate_attestation_same_block [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - empty_block_transition [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - empty_block_transition_no_tx [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - empty_epoch_transition [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - exit_and_bls_change [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_0 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_1 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_2 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - full_random_operations_3 [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - full_withdrawal_in_epoch_transition [Preset: mainn OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - high_proposer_index [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - historical_batch [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - inactivity_scores_full_participation_leaking [Pres OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - inactivity_scores_leaking [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - many_partial_withdrawals_in_epoch_transition [Pres OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - mix_blob_tx_and_non_blob_tx [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - multiple_different_proposer_slashings_same_block [ OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - multiple_different_validator_exits_same_block [Pre OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - multiple_el_partial_withdrawal_requests_different_ OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - multiple_el_partial_withdrawal_requests_same_valid OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - one_blob [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - one_blob_two_txs [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - partial_withdrawal_in_epoch_transition [Preset: ma OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - proposer_after_inactive_index [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - proposer_self_slashing [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - proposer_slashing [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - skipped_slots [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - slash_and_exit_diff_index [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__empty [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__full [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee__half [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__empty [Preset: m OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__full [Preset: ma OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - sync_committee_committee_genesis__half [Preset: ma OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - top_up_and_partial_withdrawable_validator [Preset: OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - top_up_to_fully_withdrawn_validator [Preset: mainn OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - voluntary_exit [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - withdrawal_and_switch_to_compounding_request_same_ OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - withdrawal_and_withdrawal_request_same_validator [ OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - withdrawal_success_two_blocks [Preset: mainnet] OK
|
||||
+ [Valid] EF - Fulu - Sanity - Blocks - zero_blob [Preset: mainnet] OK
|
||||
```
|
||||
OK: 73/73 Fail: 0/73 Skip: 0/73
|
||||
## EF - Fulu - Sanity - Slots [Preset: mainnet]
|
||||
```diff
|
||||
+ EF - Fulu - Slots - double_empty_epoch [Preset: mainnet] OK
|
||||
+ EF - Fulu - Slots - empty_epoch [Preset: mainnet] OK
|
||||
+ EF - Fulu - Slots - historical_accumulator [Preset: mainnet] OK
|
||||
+ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey [Preset: mainnet] OK
|
||||
+ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_above_upward_threshold [Preset: OK
|
||||
+ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_below_upward_threshold [Preset: OK
|
||||
+ EF - Fulu - Slots - multiple_pending_deposits_same_pubkey_compounding [Preset: mainnet] OK
|
||||
+ EF - Fulu - Slots - over_epoch_boundary [Preset: mainnet] OK
|
||||
+ EF - Fulu - Slots - pending_consolidation [Preset: mainnet] OK
|
||||
+ EF - Fulu - Slots - slots_1 [Preset: mainnet] OK
|
||||
+ EF - Fulu - Slots - slots_2 [Preset: mainnet] OK
|
||||
```
|
||||
OK: 11/11 Fail: 0/11 Skip: 0/11
|
||||
## EF - Light client - Single merkle proof [Preset: mainnet]
|
||||
```diff
|
||||
+ Light client - Single merkle proof - mainnet/altair/light_client/single_merkle_proof/Beaco OK
|
||||
@ -3331,8 +4112,9 @@ OK: 59/59 Fail: 0/59 Skip: 0/59
|
||||
+ Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK
|
||||
+ Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK
|
||||
+ Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK
|
||||
+ Light client - Single merkle proof - mainnet/fulu/light_client/single_merkle_proof/BeaconB OK
|
||||
```
|
||||
OK: 18/18 Fail: 0/18 Skip: 0/18
|
||||
OK: 19/19 Fail: 0/19 Skip: 0/19
|
||||
## EF - Merkle proof [Preset: mainnet]
|
||||
```diff
|
||||
+ Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
||||
@ -3842,15 +4624,62 @@ OK: 40/40 Fail: 0/40 Skip: 0/40
|
||||
+ ForkChoice - mainnet/deneb/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_slot OK
|
||||
+ ForkChoice - mainnet/deneb/fork_choice/on_block/pyspec_tests/simple_blob_data OK
|
||||
ForkChoice - mainnet/deneb/fork_choice/should_override_forkchoice_update/pyspec_tests/shou Skip
|
||||
+ ForkChoice - mainnet/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_attestations_is_grea OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_boost_ OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_without_att OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/ex_ante/pyspec_tests/ex_ante_vanilla OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/chain_no_attestations OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/discard_equivocations_on_at OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/genesis OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/proposer_boost_correct_head OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_w OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attest OK
|
||||
ForkChoice - mainnet/electra/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip
|
||||
ForkChoice - mainnet/electra/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_ro Skip
|
||||
+ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/basic OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/invalid_data_unavailable OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/invalid_incorrect_proof OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/invalid_wrong_blobs_length OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/invalid_wrong_proofs_length OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/on_block_bad_parent_root OK
|
||||
ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/on_block_future_block Skip
|
||||
+ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/proposer_boost OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/proposer_boost_is_first_blo OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_sl OK
|
||||
+ ForkChoice - mainnet/electra/fork_choice/on_block/pyspec_tests/simple_blob_data OK
|
||||
ForkChoice - mainnet/electra/fork_choice/should_override_forkchoice_update/pyspec_tests/sh Skip
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_attestations_is_greater OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_boost_not OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest_at OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_without_attest OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/ex_ante/pyspec_tests/ex_ante_vanilla OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/chain_no_attestations OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/discard_equivocations_on_attes OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/genesis OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/proposer_boost_correct_head OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_weig OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attestati OK
|
||||
ForkChoice - mainnet/fulu/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip
|
||||
ForkChoice - mainnet/fulu/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_root Skip
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/basic OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_bad_parent_root OK
|
||||
ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/on_block_future_block Skip
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/proposer_boost OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/proposer_boost_is_first_block OK
|
||||
+ ForkChoice - mainnet/fulu/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_slot_ OK
|
||||
ForkChoice - mainnet/fulu/fork_choice/should_override_forkchoice_update/pyspec_tests/shoul Skip
|
||||
```
|
||||
OK: 69/88 Fail: 0/88 Skip: 19/88
|
||||
OK: 106/133 Fail: 0/133 Skip: 27/133
|
||||
## Sync
|
||||
```diff
|
||||
+ Sync - mainnet/bellatrix/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
|
||||
+ Sync - mainnet/capella/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
|
||||
+ Sync - mainnet/deneb/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
|
||||
+ Sync - mainnet/electra/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
|
||||
+ Sync - mainnet/fulu/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
|
||||
```
|
||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||
OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||
|
||||
---TOTAL---
|
||||
OK: 3128/3147 Fail: 0/3147 Skip: 19/3147
|
||||
OK: 3821/3848 Fail: 0/3848 Skip: 27/3848
|
||||
|
File diff suppressed because it is too large
Load Diff
4
Makefile
4
Makefile
@ -235,7 +235,7 @@ local-testnet-minimal:
|
||||
--remote-validators-count 512 \
|
||||
--signer-type $(SIGNER_TYPE) \
|
||||
--deneb-fork-epoch 0 \
|
||||
--electra-fork-epoch 50 \
|
||||
--electra-fork-epoch 2 \
|
||||
--stop-at-epoch 6 \
|
||||
--disable-htop \
|
||||
--enable-payload-builder \
|
||||
@ -264,7 +264,7 @@ local-testnet-mainnet:
|
||||
--data-dir $@ \
|
||||
--nodes 2 \
|
||||
--deneb-fork-epoch 0 \
|
||||
--electra-fork-epoch 50 \
|
||||
--electra-fork-epoch 2 \
|
||||
--stop-at-epoch 6 \
|
||||
--disable-htop \
|
||||
--base-port $$(( $(MAINNET_TESTNET_BASE_PORT) + EXECUTOR_NUMBER * 400 + 0 )) \
|
||||
|
@ -132,7 +132,7 @@ type
|
||||
current_sync_committee*: SyncCommittee # [New in Altair]
|
||||
next_sync_committee*: SyncCommittee # [New in Altair]
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/bellatrix/beacon-chain.md#beaconstate
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/bellatrix/beacon-chain.md#beaconstate
|
||||
# Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ
|
||||
# reading and writing
|
||||
BellatrixBeaconStateNoImmutableValidators* = object
|
||||
|
@ -14,6 +14,7 @@ import
|
||||
|
||||
# Nimble packages
|
||||
chronos, presto, bearssl/rand,
|
||||
metrics, metrics/chronos_httpserver,
|
||||
|
||||
# Local modules
|
||||
"."/[beacon_clock, beacon_chain_db, conf, light_client],
|
||||
@ -86,6 +87,7 @@ type
|
||||
elManager*: ELManager
|
||||
restServer*: RestServerRef
|
||||
keymanagerHost*: ref KeymanagerHost
|
||||
metricsServer*: Opt[MetricsHttpServerRef]
|
||||
keymanagerServer*: RestServerRef
|
||||
keystoreCache*: KeystoreCacheRef
|
||||
eventBus*: EventBus
|
||||
|
@ -61,12 +61,14 @@ func hasBlob*(
|
||||
quarantine: BlobQuarantine,
|
||||
slot: Slot,
|
||||
proposer_index: uint64,
|
||||
index: BlobIndex): bool =
|
||||
index: BlobIndex,
|
||||
kzg_commitment: KzgCommitment): bool =
|
||||
for blob_sidecar in quarantine.blobs.values:
|
||||
template block_header: untyped = blob_sidecar.signed_block_header.message
|
||||
if block_header.slot == slot and
|
||||
block_header.proposer_index == proposer_index and
|
||||
blob_sidecar.index == index:
|
||||
blob_sidecar.index == index and
|
||||
blob_sidecar.kzg_commitment == kzg_commitment:
|
||||
return true
|
||||
false
|
||||
|
||||
|
@ -520,10 +520,6 @@ proc addBackfillBlockData*(
|
||||
"database corrupt?", clearanceBlock = shortLog(clearanceBlock)
|
||||
return err(VerifierError.MissingParent)
|
||||
|
||||
# dag.clearanceState.setStateRoot(trustedStateRoot)
|
||||
# TODO (cheatfate): This is last part of previous TODO comment, we should
|
||||
# set state's `root` to block's `state_root`.
|
||||
|
||||
let proposerVerifyTick = Moment.now()
|
||||
|
||||
if not(isNil(onStateUpdated)):
|
||||
|
@ -2322,7 +2322,7 @@ proc loadExecutionBlockHash*(
|
||||
|
||||
from std/packedsets import PackedSet, incl, items
|
||||
|
||||
func getValidatorChangeStatuses(
|
||||
func getBlsToExecutionChangeStatuses(
|
||||
state: ForkedHashedBeaconState, vis: openArray[ValidatorIndex]):
|
||||
PackedSet[ValidatorIndex] =
|
||||
var res: PackedSet[ValidatorIndex]
|
||||
@ -2338,6 +2338,7 @@ func checkBlsToExecutionChanges(
|
||||
# Within each fork, BLS_WITHDRAWAL_PREFIX to ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
# and never ETH1_ADDRESS_WITHDRAWAL_PREFIX to BLS_WITHDRAWAL_PREFIX. Latter
|
||||
# can still happen via reorgs.
|
||||
#
|
||||
# Cases:
|
||||
# 1) unchanged (BLS_WITHDRAWAL_PREFIX or ETH1_ADDRESS_WITHDRAWAL_PREFIX) from
|
||||
# old to new head.
|
||||
@ -2352,7 +2353,25 @@ func checkBlsToExecutionChanges(
|
||||
# Since it tracks head, it's possible reorgs trigger reporting the same
|
||||
# validator indices multiple times; this is fine.
|
||||
withState(state):
|
||||
anyIt( vis, forkyState.data.validators[it].has_eth1_withdrawal_credential)
|
||||
anyIt(vis, forkyState.data.validators[it].has_eth1_withdrawal_credential)
|
||||
|
||||
func getCompoundingStatuses(
|
||||
state: ForkedHashedBeaconState, vis: openArray[ValidatorIndex]):
|
||||
PackedSet[ValidatorIndex] =
|
||||
var res: PackedSet[ValidatorIndex]
|
||||
withState(state):
|
||||
for vi in vis:
|
||||
if forkyState.data.validators[vi].withdrawal_credentials.data[0] !=
|
||||
COMPOUNDING_WITHDRAWAL_PREFIX:
|
||||
res.incl vi
|
||||
res
|
||||
|
||||
func checkCompoundingChanges(
|
||||
state: ForkedHashedBeaconState, vis: PackedSet[ValidatorIndex]): bool =
|
||||
# Since it tracks head, it's possible reorgs trigger reporting the same
|
||||
# validator indices multiple times; this is fine.
|
||||
withState(state):
|
||||
anyIt(vis, forkyState.data.validators[it].has_compounding_withdrawal_credential)
|
||||
|
||||
proc updateHead*(
|
||||
dag: ChainDAGRef, newHead: BlockRef, quarantine: var Quarantine,
|
||||
@ -2393,7 +2412,9 @@ proc updateHead*(
|
||||
lastHeadStateRoot = getStateRoot(dag.headState)
|
||||
lastHeadMergeComplete = dag.headState.is_merge_transition_complete()
|
||||
lastHeadKind = dag.headState.kind
|
||||
lastKnownValidatorsChangeStatuses = getValidatorChangeStatuses(
|
||||
lastKnownValidatorsChangeStatuses = getBlsToExecutionChangeStatuses(
|
||||
dag.headState, knownValidators)
|
||||
lastKnownCompoundingChangeStatuses = getCompoundingStatuses(
|
||||
dag.headState, knownValidators)
|
||||
|
||||
# Start off by making sure we have the right state - updateState will try
|
||||
@ -2437,6 +2458,11 @@ proc updateHead*(
|
||||
dag.headState, lastKnownValidatorsChangeStatuses):
|
||||
dag.vanityLogs.onKnownBlsToExecutionChange()
|
||||
|
||||
if dag.vanityLogs.onKnownCompoundingChange != nil and
|
||||
checkCompoundingChanges(
|
||||
dag.headState, lastKnownCompoundingChangeStatuses):
|
||||
dag.vanityLogs.onKnownCompoundingChange()
|
||||
|
||||
dag.db.putHeadBlock(newHead.root)
|
||||
|
||||
updateBeaconMetrics(dag.headState, dag.head.bid, cache)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -221,23 +221,17 @@ proc updateExecutionClientHead*(
|
||||
|
||||
func getKnownValidatorsForBlsChangeTracking(
|
||||
self: ConsensusManager, newHead: BlockRef): seq[ValidatorIndex] =
|
||||
# Ensure that large nodes won't be overloaded by a nice-to-have, but
|
||||
# Ensure that large nodes won't be overwhelmed by a nice-to-have, but
|
||||
# inessential cosmetic feature.
|
||||
const MAX_CHECKED_INDICES = 64
|
||||
const MAX_CHECKED_INDICES = 32
|
||||
|
||||
if newHead.bid.slot.epoch >= self.dag.cfg.CAPELLA_FORK_EPOCH:
|
||||
var res = newSeqOfCap[ValidatorIndex](min(
|
||||
len(self.actionTracker.knownValidators), MAX_CHECKED_INDICES))
|
||||
for vi in self.actionTracker.knownValidators.keys():
|
||||
res.add vi
|
||||
if res.len >= MAX_CHECKED_INDICES:
|
||||
break
|
||||
res
|
||||
else:
|
||||
# It is not possible for any BLS to execution changes, for any validator,
|
||||
# to have been yet processed.
|
||||
# https://github.com/nim-lang/Nim/issues/19802
|
||||
(static(@[]))
|
||||
var res = newSeqOfCap[ValidatorIndex](min(
|
||||
len(self.actionTracker.knownValidators), MAX_CHECKED_INDICES))
|
||||
for vi in self.actionTracker.knownValidators.keys():
|
||||
res.add vi
|
||||
if res.len >= MAX_CHECKED_INDICES:
|
||||
break
|
||||
res
|
||||
|
||||
proc updateHead*(self: var ConsensusManager, newHead: BlockRef) =
|
||||
## Trigger fork choice and update the DAG with the new head block
|
||||
|
@ -293,7 +293,7 @@ func makeAttestationData*(
|
||||
|
||||
doAssert current_epoch == epochRef.epoch
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#attestation-data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#attestation-data
|
||||
AttestationData(
|
||||
slot: slot,
|
||||
index: committee_index.asUInt64,
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -7,9 +7,9 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/os,
|
||||
chronicles
|
||||
import chronicles
|
||||
|
||||
from std/os import `/`
|
||||
|
||||
type
|
||||
LogProc = proc() {.gcsafe, raises: [].}
|
||||
@ -38,6 +38,10 @@ type
|
||||
# in case of chain reorgs around the upgrade.
|
||||
onUpgradeToElectra*: LogProc
|
||||
|
||||
# Gets displayed on a change to compounding for a validator known to the
|
||||
# known in a head block.
|
||||
onKnownCompoundingChange*: LogProc
|
||||
|
||||
# Created by https://beatscribe.com (beatscribe#1008 on Discord)
|
||||
# These need to be the main body of the log not to be reformatted or escaped.
|
||||
|
||||
|
@ -82,11 +82,11 @@ type
|
||||
deposits*: seq[Deposit]
|
||||
hasMissingDeposits*: bool
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#get_eth1_data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#get_eth1_data
|
||||
func compute_time_at_slot(genesis_time: uint64, slot: Slot): uint64 =
|
||||
genesis_time + slot * SECONDS_PER_SLOT
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#get_eth1_data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#get_eth1_data
|
||||
func voting_period_start_time(state: ForkedHashedBeaconState): uint64 =
|
||||
let eth1_voting_period_start_slot =
|
||||
getStateField(state, slot) - getStateField(state, slot) mod
|
||||
@ -94,7 +94,7 @@ func voting_period_start_time(state: ForkedHashedBeaconState): uint64 =
|
||||
compute_time_at_slot(
|
||||
getStateField(state, genesis_time), eth1_voting_period_start_slot)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#get_eth1_data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#get_eth1_data
|
||||
func is_candidate_block(cfg: RuntimeConfig,
|
||||
blk: Eth1Block,
|
||||
period_start: uint64): bool =
|
||||
@ -274,7 +274,7 @@ proc trackFinalizedState*(chain: var Eth1Chain,
|
||||
if result:
|
||||
chain.pruneOldBlocks(finalizedStateDepositIndex)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#get_eth1_data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#get_eth1_data
|
||||
proc getBlockProposalData*(chain: var Eth1Chain,
|
||||
state: ForkedHashedBeaconState,
|
||||
finalizedEth1Data: Eth1Data,
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
|
||||
|
||||
# Merkle tree helpers
|
||||
# ---------------------------------------------------------------
|
||||
|
@ -423,8 +423,20 @@ proc validateBlobSidecar*(
|
||||
let block_root = hash_tree_root(block_header)
|
||||
if dag.getBlockRef(block_root).isSome():
|
||||
return errIgnore("BlobSidecar: already have block")
|
||||
|
||||
# This adds KZG commitment matching to the spec gossip validation. It's an
|
||||
# IGNORE condition, so it shouldn't affect Nimbus's scoring, and when some
|
||||
# (slashable) double proposals happen with blobs present, without this one
|
||||
# or the other block, or potentially both, won't get its full set of blobs
|
||||
# through gossip validation and have to backfill them later. There is some
|
||||
# cost in slightly more outgoing bandwidth on such double-proposals but it
|
||||
# remains insignificant compared with other bandwidth usage.
|
||||
#
|
||||
# It would be good to fix this more properly, but this has come up often on
|
||||
# Pectra devnet-6.
|
||||
if blobQuarantine[].hasBlob(
|
||||
block_header.slot, block_header.proposer_index, blob_sidecar.index):
|
||||
block_header.slot, block_header.proposer_index, blob_sidecar.index,
|
||||
blob_sidecar.kzg_commitment):
|
||||
return errIgnore("BlobSidecar: already have valid blob from same proposer")
|
||||
|
||||
# [REJECT] The sidecar's inclusion proof is valid as verified by
|
||||
@ -1073,6 +1085,48 @@ proc validateAttestation*(
|
||||
return pool.checkedResult(v.error)
|
||||
v.get()
|
||||
|
||||
if attestation.attester_index > high(ValidatorIndex).uint64:
|
||||
return errReject("SingleAttestation: attester index too high")
|
||||
let validator_index = attestation.attester_index.ValidatorIndex
|
||||
|
||||
# [REJECT] The signature of `attestation` is valid.
|
||||
# In the spec, is_valid_indexed_attestation is used to verify the signature -
|
||||
# here, we do a batch verification instead
|
||||
var sigchecked = false
|
||||
var sig: CookedSig
|
||||
template doSigCheck: untyped =
|
||||
let
|
||||
fork = pool.dag.forkAtEpoch(attestation.data.slot.epoch)
|
||||
pubkey = pool.dag.validatorKey(validator_index).valueOr:
|
||||
# can't happen, in theory, because we checked the aggregator index above
|
||||
return errIgnore("Attestation: cannot find validator pubkey")
|
||||
|
||||
sigchecked = true
|
||||
sig =
|
||||
if checkSignature:
|
||||
# Attestation signatures are batch-verified
|
||||
let deferredCrypto = batchCrypto
|
||||
.scheduleAttestationCheck(
|
||||
fork, attestation.data, pubkey,
|
||||
attestation.signature)
|
||||
if deferredCrypto.isErr():
|
||||
return pool.checkedReject(deferredCrypto.error)
|
||||
|
||||
let (cryptoFut, sig) = deferredCrypto.get()
|
||||
# Await the crypto check
|
||||
let x = (await cryptoFut)
|
||||
case x
|
||||
of BatchResult.Invalid:
|
||||
return pool.checkedReject("Attestation: invalid signature")
|
||||
of BatchResult.Timeout:
|
||||
beacon_attestations_dropped_queue_full.inc()
|
||||
return errIgnore("Attestation: timeout checking signature")
|
||||
of BatchResult.Valid:
|
||||
sig # keep going only in this case
|
||||
else:
|
||||
attestation.signature.load().valueOr:
|
||||
return pool.checkedReject("Attestation: unable to load signature")
|
||||
|
||||
# The following rule follows implicitly from that we clear out any
|
||||
# unviable blocks from the chain dag:
|
||||
#
|
||||
@ -1080,36 +1134,17 @@ proc validateAttestation*(
|
||||
# defined by attestation.data.beacon_block_root -- i.e.
|
||||
# get_checkpoint_block(store, attestation.data.beacon_block_root,
|
||||
# store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root
|
||||
var sigchecked = false
|
||||
var sig: CookedSig
|
||||
let shufflingRef =
|
||||
pool.dag.findShufflingRef(target.blck.bid, target.slot.epoch).valueOr:
|
||||
# getShufflingRef might be slow here, so first try to eliminate by
|
||||
# signature check
|
||||
sig = attestation.signature.load().valueOr:
|
||||
return pool.checkedReject("SingleAttestation: unable to load signature")
|
||||
sigchecked = true
|
||||
doSigCheck()
|
||||
pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr:
|
||||
# Target is verified - shouldn't happen
|
||||
warn "No shuffling for SingleAttestation - report bug",
|
||||
attestation = shortLog(attestation), target = shortLog(target)
|
||||
return errIgnore("SingleAttestation: no shuffling")
|
||||
|
||||
if attestation.attester_index > high(ValidatorIndex).uint64:
|
||||
return errReject("SingleAttestation: attester index too high")
|
||||
let validator_index = attestation.attester_index.ValidatorIndex
|
||||
|
||||
# [REJECT] The attester is a member of the committee -- i.e.
|
||||
# attestation.attester_index in
|
||||
# get_beacon_committee(state, attestation.data.slot, index).
|
||||
let
|
||||
beacon_committee = get_beacon_committee(
|
||||
shufflingRef, attestation.data.slot,
|
||||
attestation.committee_index.CommitteeIndex)
|
||||
index_in_committee = find(beacon_committee, validator_index)
|
||||
if index_in_committee < 0:
|
||||
return pool.checkedReject("SingleAttestation: attester index not in beacon committee")
|
||||
|
||||
# [REJECT] The committee index is within the expected range -- i.e.
|
||||
# data.index < get_committee_count_per_slot(state, data.target.epoch).
|
||||
let committee_index = block:
|
||||
@ -1119,6 +1154,16 @@ proc validateAttestation*(
|
||||
"Attestation: committee index not within expected range")
|
||||
idx.get()
|
||||
|
||||
# [REJECT] The attester is a member of the committee -- i.e.
|
||||
# attestation.attester_index in
|
||||
# get_beacon_committee(state, attestation.data.slot, index).
|
||||
let
|
||||
beacon_committee = get_beacon_committee(
|
||||
shufflingRef, attestation.data.slot, committee_index)
|
||||
index_in_committee = find(beacon_committee, validator_index)
|
||||
if index_in_committee < 0:
|
||||
return pool.checkedReject("SingleAttestation: attester index not in beacon committee")
|
||||
|
||||
# [REJECT] The attestation is for the correct subnet -- i.e.
|
||||
# compute_subnet_for_attestation(committees_per_slot,
|
||||
# attestation.data.slot, attestation.data.index) == subnet_id, where
|
||||
@ -1136,9 +1181,14 @@ proc validateAttestation*(
|
||||
if not sigchecked:
|
||||
# findShufflingRef did find a cached ShufflingRef, which means the early
|
||||
# signature check was skipped, so do it now.
|
||||
sig = attestation.signature.load().valueOr:
|
||||
return pool.checkedReject("SingleAttestation: unable to load signature")
|
||||
doSigCheck()
|
||||
|
||||
# Only valid attestations go in the list, which keeps validator_index
|
||||
# in range
|
||||
if not (pool.nextAttestationEpoch.lenu64 > validator_index.uint64):
|
||||
pool.nextAttestationEpoch.setLen(validator_index.int + 1)
|
||||
pool.nextAttestationEpoch[validator_index].subnet =
|
||||
attestation.data.target.epoch + 1
|
||||
ok((validator_index, beacon_committee.len, index_in_committee, sig))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
|
||||
@ -1232,14 +1282,16 @@ proc validateAggregate*(
|
||||
# data.index < get_committee_count_per_slot(state, data.target.epoch).
|
||||
let committee_index = block:
|
||||
when signedAggregateAndProof is electra.SignedAggregateAndProof:
|
||||
let idx = get_committee_index_one(aggregate.committee_bits)
|
||||
let agg_idx = get_committee_index_one(aggregate.committee_bits).valueOr:
|
||||
return pool.checkedReject("Aggregate: got multiple committee bits")
|
||||
let idx = shufflingRef.get_committee_index(agg_idx.uint64)
|
||||
elif signedAggregateAndProof is phase0.SignedAggregateAndProof:
|
||||
let idx = shufflingRef.get_committee_index(aggregate.data.index)
|
||||
else:
|
||||
static: doAssert false
|
||||
if idx.isErr():
|
||||
return pool.checkedReject(
|
||||
"Attestation: committee index not within expected range")
|
||||
"Aggregate: committee index not within expected range")
|
||||
idx.get()
|
||||
if not aggregate.aggregation_bits.compatible_with_shuffling(
|
||||
shufflingRef, slot, committee_index):
|
||||
@ -1504,7 +1556,7 @@ proc validateVoluntaryExit*(
|
||||
|
||||
ok()
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/p2p-interface.md#sync_committee_subnet_id
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/p2p-interface.md#sync_committee_subnet_id
|
||||
proc validateSyncCommitteeMessage*(
|
||||
dag: ChainDAGRef,
|
||||
quarantine: ref Quarantine,
|
||||
|
@ -94,7 +94,7 @@ typedef struct ETHConsensusConfig ETHConsensusConfig;
|
||||
* based on the given `config.yaml` file content - If successful.
|
||||
* @return `NULL` - If the given `config.yaml` is malformed or incompatible.
|
||||
*
|
||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/configs/README.md
|
||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/configs/README.md
|
||||
*/
|
||||
ETH_RESULT_USE_CHECK
|
||||
ETHConsensusConfig *_Nullable ETHConsensusConfigCreateFromYaml(const char *configFileContent);
|
||||
@ -150,9 +150,9 @@ typedef struct ETHBeaconState ETHBeaconState;
|
||||
* @return `NULL` - If the given `sszBytes` is malformed.
|
||||
*
|
||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#beaconstate
|
||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#beaconstate
|
||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#beaconstate
|
||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#beaconstate
|
||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/beacon-chain.md#beaconstate
|
||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/beacon-chain.md#beaconstate
|
||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/configs/README.md
|
||||
*/
|
||||
ETH_RESULT_USE_CHECK
|
||||
|
@ -142,10 +142,10 @@ proc ETHBeaconStateCreateFromSsz(
|
||||
##
|
||||
## See:
|
||||
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
|
||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#beaconstate
|
||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#beaconstate
|
||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#beaconstate
|
||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
|
||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/configs/README.md
|
||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/configs/README.md
|
||||
let
|
||||
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
|
||||
return nil
|
||||
|
@ -2267,7 +2267,7 @@ proc getPersistentNetKeys*(
|
||||
func gossipId(
|
||||
data: openArray[byte], phase0Prefix, topic: string): seq[byte] =
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/phase0/p2p-interface.md#topics-and-messages
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/p2p-interface.md#topics-and-messages
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/p2p-interface.md#topics-and-messages
|
||||
const MESSAGE_DOMAIN_VALID_SNAPPY = [0x01'u8, 0x00, 0x00, 0x00]
|
||||
let messageDigest = withEth2Hash:
|
||||
h.update(MESSAGE_DOMAIN_VALID_SNAPPY)
|
||||
@ -2635,7 +2635,7 @@ proc loadCgcnetMetadataAndEnr*(node: Eth2Node, cgcnets: CgcCount) =
|
||||
debug "Updated ENR cgc", cgcnets
|
||||
|
||||
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/validator.md#sync-committee-subnet-stability
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/validator.md#sync-committee-subnet-stability
|
||||
if node.metadata.syncnets == syncnets:
|
||||
return
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -340,9 +340,13 @@ elif const_preset == "mainnet":
|
||||
for network in [mainnetMetadata, sepoliaMetadata, holeskyMetadata]:
|
||||
checkForkConsistency(network.cfg)
|
||||
|
||||
for network in [sepoliaMetadata, holeskyMetadata]:
|
||||
doAssert network.cfg.ELECTRA_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
|
||||
doAssert mainnetMetadata.cfg.DENEB_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
doAssert mainnetMetadata.cfg.ELECTRA_FORK_EPOCH == FAR_FUTURE_EPOCH
|
||||
|
||||
for network in [mainnetMetadata, sepoliaMetadata, holeskyMetadata]:
|
||||
doAssert network.cfg.DENEB_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
doAssert network.cfg.ELECTRA_FORK_EPOCH == FAR_FUTURE_EPOCH
|
||||
doAssert network.cfg.FULU_FORK_EPOCH == FAR_FUTURE_EPOCH
|
||||
doAssert ConsensusFork.high == ConsensusFork.Fulu
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[os, random, terminal, times],
|
||||
std/[os, random, terminal, times, exitprocs],
|
||||
chronos, chronicles,
|
||||
metrics, metrics/chronos_httpserver,
|
||||
stew/[byteutils, io2],
|
||||
@ -151,7 +151,8 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs =
|
||||
onUpgradeToCapella: capellaColor,
|
||||
onKnownBlsToExecutionChange: capellaBlink,
|
||||
onUpgradeToDeneb: denebColor,
|
||||
onUpgradeToElectra: electraColor)
|
||||
onUpgradeToElectra: electraColor,
|
||||
onKnownCompoundingChange: electraBlink)
|
||||
of StdoutLogKind.NoColors:
|
||||
VanityLogs(
|
||||
onMergeTransitionBlock: bellatrixMono,
|
||||
@ -159,7 +160,8 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs =
|
||||
onUpgradeToCapella: capellaMono,
|
||||
onKnownBlsToExecutionChange: capellaMono,
|
||||
onUpgradeToDeneb: denebMono,
|
||||
onUpgradeToElectra: electraMono)
|
||||
onUpgradeToElectra: electraMono,
|
||||
onKnownCompoundingChange: electraMono)
|
||||
of StdoutLogKind.Json, StdoutLogKind.None:
|
||||
VanityLogs(
|
||||
onMergeTransitionBlock:
|
||||
@ -173,7 +175,9 @@ func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs =
|
||||
onUpgradeToDeneb:
|
||||
(proc() = notice "🐟 Proto-Danksharding is ON 🐟"),
|
||||
onUpgradeToElectra:
|
||||
(proc() = notice "🦒 Compounding is ON 🦒"))
|
||||
(proc() = notice "🦒 Compounding is available 🦒"),
|
||||
onKnownCompoundingChange:
|
||||
(proc() = notice "🦒 Compounding is activated 🦒"))
|
||||
|
||||
func getVanityMascot(consensusFork: ConsensusFork): string =
|
||||
case consensusFork
|
||||
@ -381,14 +385,11 @@ proc initFullNode(
|
||||
else:
|
||||
dag.tail.slot
|
||||
|
||||
proc getUntrustedBackfillSlot(): Slot =
|
||||
func getUntrustedBackfillSlot(): Slot =
|
||||
if clist.tail.isSome():
|
||||
clist.tail.get().blck.slot
|
||||
else:
|
||||
getLocalWallSlot()
|
||||
|
||||
func getUntrustedFrontfillSlot(): Slot =
|
||||
getFirstSlotAtFinalizedEpoch()
|
||||
dag.tail.slot
|
||||
|
||||
func getFrontfillSlot(): Slot =
|
||||
max(dag.frontfill.get(BlockId()).slot, dag.horizon)
|
||||
@ -531,7 +532,7 @@ proc initFullNode(
|
||||
dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS,
|
||||
SyncQueueKind.Backward, getLocalHeadSlot,
|
||||
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getUntrustedBackfillSlot,
|
||||
getUntrustedFrontfillSlot, isWithinWeakSubjectivityPeriod,
|
||||
getFrontfillSlot, isWithinWeakSubjectivityPeriod,
|
||||
clistPivotSlot, untrustedBlockVerifier, maxHeadAge = 0,
|
||||
shutdownEvent = node.shutdownEvent,
|
||||
flags = syncManagerFlags)
|
||||
@ -994,7 +995,7 @@ proc init*(T: type BeaconNode,
|
||||
withState(dag.headState):
|
||||
getValidator(forkyState().data.validators.asSeq(), pubkey)
|
||||
|
||||
func getCapellaForkVersion(): Opt[Version] =
|
||||
func getCapellaForkVersion(): Opt[presets.Version] =
|
||||
Opt.some(cfg.CAPELLA_FORK_VERSION)
|
||||
|
||||
func getDenebForkEpoch(): Opt[Epoch] =
|
||||
@ -2059,7 +2060,7 @@ proc installMessageValidators(node: BeaconNode) =
|
||||
MsgSource.gossip, msg, idx)))
|
||||
|
||||
# sync_committee_contribution_and_proof
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof
|
||||
node.network.addAsyncValidator(
|
||||
getSyncCommitteeContributionAndProofTopic(digest), proc (
|
||||
msg: SignedContributionAndProof
|
||||
@ -2069,7 +2070,7 @@ proc installMessageValidators(node: BeaconNode) =
|
||||
MsgSource.gossip, msg)))
|
||||
|
||||
when consensusFork >= ConsensusFork.Capella:
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/p2p-interface.md#bls_to_execution_change
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/p2p-interface.md#bls_to_execution_change
|
||||
node.network.addAsyncValidator(
|
||||
getBlsToExecutionChangeTopic(digest), proc (
|
||||
msg: SignedBLSToExecutionChange
|
||||
@ -2107,6 +2108,8 @@ proc stop(node: BeaconNode) =
|
||||
except CatchableError as exc:
|
||||
warn "Couldn't stop network", msg = exc.msg
|
||||
|
||||
waitFor node.metricsServer.stopMetricsServer()
|
||||
|
||||
node.attachedValidators[].slashingProtection.close()
|
||||
node.attachedValidators[].close()
|
||||
node.db.close()
|
||||
@ -2162,7 +2165,7 @@ var gPidFile: string
|
||||
proc createPidFile(filename: string) {.raises: [IOError].} =
|
||||
writeFile filename, $os.getCurrentProcessId()
|
||||
gPidFile = filename
|
||||
addQuitProc proc {.noconv.} = discard io2.removeFile(gPidFile)
|
||||
addExitProc proc {.noconv.} = discard io2.removeFile(gPidFile)
|
||||
|
||||
proc initializeNetworking(node: BeaconNode) {.async.} =
|
||||
node.installMessageValidators()
|
||||
@ -2374,21 +2377,6 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai
|
||||
|
||||
config.createDumpDirs()
|
||||
|
||||
if config.metricsEnabled:
|
||||
let metricsAddress = config.metricsAddress
|
||||
notice "Starting metrics HTTP server",
|
||||
url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics"
|
||||
try:
|
||||
startMetricsHttpServer($metricsAddress, config.metricsPort)
|
||||
except CatchableError as exc:
|
||||
raise exc
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg # TODO fix metrics
|
||||
|
||||
# Nim GC metrics (for the main thread) will be collected in onSecond(), but
|
||||
# we disable piggy-backing on other metrics here.
|
||||
setSystemMetricsAutomaticUpdate(false)
|
||||
|
||||
# There are no managed event loops in here, to do a graceful shutdown, but
|
||||
# letting the default Ctrl+C handler exit is safe, since we only read from
|
||||
# the db.
|
||||
@ -2431,6 +2419,15 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai
|
||||
|
||||
let node = waitFor BeaconNode.init(rng, config, metadata)
|
||||
|
||||
let metricsServer = (waitFor config.initMetricsServer()).valueOr:
|
||||
return
|
||||
|
||||
# Nim GC metrics (for the main thread) will be collected in onSecond(), but
|
||||
# we disable piggy-backing on other metrics here.
|
||||
setSystemMetricsAutomaticUpdate(false)
|
||||
|
||||
node.metricsServer = metricsServer
|
||||
|
||||
if bnStatus == BeaconNodeStatus.Stopping:
|
||||
return
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -16,7 +16,7 @@ import
|
||||
# Nimble packages
|
||||
chronos, confutils, presto, toml_serialization, metrics,
|
||||
chronicles, chronicles/helpers as chroniclesHelpers, chronicles/topics_registry,
|
||||
stew/io2,
|
||||
stew/io2, metrics, metrics/chronos_httpserver,
|
||||
|
||||
# Local modules
|
||||
./spec/[helpers, keystore],
|
||||
@ -448,6 +448,40 @@ proc initKeymanagerServer*(
|
||||
|
||||
KeymanagerInitResult(server: keymanagerServer, token: token)
|
||||
|
||||
proc initMetricsServer*(
|
||||
config: AnyConf
|
||||
): Future[Result[Opt[MetricsHttpServerRef], string]] {.
|
||||
async: (raises: [CancelledError]).} =
|
||||
if config.metricsEnabled:
|
||||
let
|
||||
metricsAddress = config.metricsAddress
|
||||
metricsPort = config.metricsPort
|
||||
url = "http://" & $metricsAddress & ":" & $metricsPort & "/metrics"
|
||||
|
||||
info "Starting metrics HTTP server", url = url
|
||||
|
||||
let server = MetricsHttpServerRef.new($metricsAddress, metricsPort).valueOr:
|
||||
fatal "Could not start metrics HTTP server",
|
||||
url = url, reason = error
|
||||
return err($error)
|
||||
|
||||
try:
|
||||
await server.start()
|
||||
except MetricsError as exc:
|
||||
fatal "Could not start metrics HTTP server",
|
||||
url = url, reason = exc.msg
|
||||
return err(exc.msg)
|
||||
|
||||
ok(Opt.some(server))
|
||||
else:
|
||||
ok(Opt.none(MetricsHttpServerRef))
|
||||
|
||||
proc stopMetricsServer*(v: Opt[MetricsHttpServerRef]) {.
|
||||
async: (raises: []).} =
|
||||
if v.isSome():
|
||||
info "Shutting down metrics HTTP server"
|
||||
await v.get().close()
|
||||
|
||||
proc quitDoppelganger*() =
|
||||
# Avoid colliding with
|
||||
# https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Process%20Exit%20Codes
|
||||
|
@ -1,5 +1,5 @@
|
||||
# nimbus_signing_node
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -414,7 +414,7 @@ proc asyncInit(sn: SigningNodeRef) {.async: (raises: [SigningNodeError]).} =
|
||||
raise newException(SigningNodeError, "")
|
||||
SigningNodeServer(kind: SigningNodeKind.NonSecure, nserver: res.get())
|
||||
|
||||
proc asyncRun*(sn: SigningNodeRef) {.async: (raises: []).} =
|
||||
proc asyncRun*(sn: SigningNodeRef) {.async: (raises: [SigningNodeError]).} =
|
||||
sn.runKeystoreCachePruningLoopFut =
|
||||
runKeystoreCachePruningLoop(sn.keystoreCache)
|
||||
sn.installApiHandlers()
|
||||
@ -429,6 +429,11 @@ proc asyncRun*(sn: SigningNodeRef) {.async: (raises: []).} =
|
||||
warn "Main loop failed with unexpected error", err_name = $exc.name,
|
||||
reason = $exc.msg
|
||||
|
||||
# This is trick to fool `asyncraises` from generating warning:
|
||||
# No exceptions possible with this operation, `error` always returns nil.
|
||||
if false:
|
||||
raise newException(SigningNodeError, "This error should never happen")
|
||||
|
||||
debug "Stopping main processing loop"
|
||||
var pending: seq[Future[void]]
|
||||
if not(sn.runKeystoreCachePruningLoopFut.finished()):
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -161,38 +161,6 @@ proc initClock(
|
||||
current_slot = currentSlot, current_epoch = currentEpoch
|
||||
res
|
||||
|
||||
proc initMetrics(
|
||||
vc: ValidatorClientRef
|
||||
): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||
if vc.config.metricsEnabled:
|
||||
let
|
||||
metricsAddress = vc.config.metricsAddress
|
||||
metricsPort = vc.config.metricsPort
|
||||
url = "http://" & $metricsAddress & ":" & $metricsPort & "/metrics"
|
||||
info "Starting metrics HTTP server", url = url
|
||||
let server =
|
||||
block:
|
||||
let res = MetricsHttpServerRef.new($metricsAddress, metricsPort)
|
||||
if res.isErr():
|
||||
error "Could not start metrics HTTP server", url = url,
|
||||
error_msg = res.error()
|
||||
return false
|
||||
res.get()
|
||||
vc.metricsServer = Opt.some(server)
|
||||
try:
|
||||
await server.start()
|
||||
except MetricsError as exc:
|
||||
error "Could not start metrics HTTP server", url = url,
|
||||
error_msg = exc.msg, error_name = exc.name
|
||||
return false
|
||||
true
|
||||
|
||||
proc shutdownMetrics(vc: ValidatorClientRef) {.async: (raises: []).} =
|
||||
if vc.config.metricsEnabled:
|
||||
if vc.metricsServer.isSome():
|
||||
info "Shutting down metrics HTTP server"
|
||||
await vc.metricsServer.get().close()
|
||||
|
||||
proc shutdownSlashingProtection(vc: ValidatorClientRef) =
|
||||
info "Closing slashing protection", path = vc.config.validatorsDir()
|
||||
vc.attachedValidators[].slashingProtection.close()
|
||||
@ -351,7 +319,7 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {.
|
||||
|
||||
vc.beaconClock = await vc.initClock()
|
||||
|
||||
if not(await initMetrics(vc)):
|
||||
vc.metricsServer = (await vc.config.initMetricsServer()).valueOr:
|
||||
raise newException(ValidatorClientError,
|
||||
"Could not initialize metrics server")
|
||||
|
||||
@ -368,7 +336,7 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {.
|
||||
vc.attachedValidators = validatorPool
|
||||
|
||||
if not(await initValidators(vc)):
|
||||
await vc.shutdownMetrics()
|
||||
await vc.metricsServer.stopMetricsServer()
|
||||
raise newException(ValidatorClientError,
|
||||
"Could not initialize local validators")
|
||||
|
||||
@ -432,7 +400,7 @@ proc asyncInit(vc: ValidatorClientRef): Future[ValidatorClientRef] {.
|
||||
)
|
||||
except CancelledError:
|
||||
debug "Initialization process interrupted"
|
||||
await vc.shutdownMetrics()
|
||||
await vc.metricsServer.stopMetricsServer()
|
||||
vc.shutdownSlashingProtection()
|
||||
return
|
||||
|
||||
@ -522,7 +490,7 @@ proc asyncRun*(
|
||||
except CancelledError:
|
||||
debug "Main loop interrupted"
|
||||
|
||||
await vc.shutdownMetrics()
|
||||
await vc.metricsServer.stopMetricsServer()
|
||||
vc.shutdownSlashingProtection()
|
||||
|
||||
if doppelEventFut.completed():
|
||||
|
@ -1450,32 +1450,13 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
# Since our validation logic supports batch processing, we will submit all
|
||||
# attestations for validation.
|
||||
for attestation in dres.get():
|
||||
when AttestationType is electra.Attestation:
|
||||
let attester_indices = toSeq(
|
||||
get_attesting_indices(node.dag, attestation, true))
|
||||
if len(attester_indices) != 1:
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidAttestationObjectError,
|
||||
$dres.error)
|
||||
let committee_index = get_committee_index_one(
|
||||
attestation.committee_bits).valueOr:
|
||||
return RestApiResponse.jsonError(Http400,
|
||||
InvalidAttestationObjectError,
|
||||
$dres.error)
|
||||
pendingAttestations.add(node.router.routeAttestation(
|
||||
SingleAttestation(
|
||||
committee_index: committee_index.distinctBase,
|
||||
attester_index: attester_indices[0].uint64,
|
||||
data: attestation.data,
|
||||
signature: attestation.signature)))
|
||||
else:
|
||||
pendingAttestations.add(node.router.routeAttestation(attestation))
|
||||
pendingAttestations.add(node.router.routeAttestation(attestation))
|
||||
|
||||
case consensusVersion.get():
|
||||
of ConsensusFork.Phase0 .. ConsensusFork.Deneb:
|
||||
decodeAttestations(phase0.Attestation)
|
||||
of ConsensusFork.Electra .. ConsensusFork.Fulu:
|
||||
decodeAttestations(electra.Attestation)
|
||||
decodeAttestations(electra.SingleAttestation)
|
||||
|
||||
let failures =
|
||||
block:
|
||||
|
@ -43,8 +43,6 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
Base10.toString(MIN_DEPOSIT_AMOUNT),
|
||||
MAX_EFFECTIVE_BALANCE:
|
||||
Base10.toString(MAX_EFFECTIVE_BALANCE),
|
||||
MAX_EFFECTIVE_BALANCE_ELECTRA:
|
||||
Base10.toString(static(MAX_EFFECTIVE_BALANCE_ELECTRA.uint64)),
|
||||
EFFECTIVE_BALANCE_INCREMENT:
|
||||
Base10.toString(EFFECTIVE_BALANCE_INCREMENT),
|
||||
MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
@ -92,7 +90,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
MAX_VOLUNTARY_EXITS:
|
||||
Base10.toString(MAX_VOLUNTARY_EXITS),
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/presets/mainnet/altair.yaml
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/presets/mainnet/altair.yaml
|
||||
INACTIVITY_PENALTY_QUOTIENT_ALTAIR:
|
||||
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR),
|
||||
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR:
|
||||
@ -108,7 +106,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
UPDATE_TIMEOUT:
|
||||
Base10.toString(UPDATE_TIMEOUT),
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/presets/mainnet/bellatrix.yaml
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/presets/mainnet/bellatrix.yaml
|
||||
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX:
|
||||
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX),
|
||||
MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX:
|
||||
@ -124,7 +122,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
MAX_EXTRA_DATA_BYTES:
|
||||
Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)),
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/presets/mainnet/capella.yaml
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/presets/mainnet/capella.yaml
|
||||
MAX_BLS_TO_EXECUTION_CHANGES:
|
||||
Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)),
|
||||
MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
@ -139,8 +137,6 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
Base10.toString(MAX_BLOB_COMMITMENTS_PER_BLOCK),
|
||||
MAX_BLOBS_PER_BLOCK:
|
||||
Base10.toString(MAX_BLOBS_PER_BLOCK),
|
||||
MAX_BLOBS_PER_BLOCK_ELECTRA:
|
||||
Base10.toString(MAX_BLOBS_PER_BLOCK_ELECTRA),
|
||||
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH:
|
||||
Base10.toString(uint64(KZG_COMMITMENT_INCLUSION_PROOF_DEPTH)),
|
||||
|
||||
@ -325,6 +321,54 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
Base10.toString(uint64(TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)),
|
||||
SYNC_COMMITTEE_SUBNET_COUNT:
|
||||
Base10.toString(uint64(SYNC_COMMITTEE_SUBNET_COUNT)),
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/electra/beacon-chain.md
|
||||
UNSET_DEPOSIT_REQUESTS_START_INDEX:
|
||||
Base10.toString(UNSET_DEPOSIT_REQUESTS_START_INDEX),
|
||||
FULL_EXIT_REQUEST_AMOUNT:
|
||||
Base10.toString(FULL_EXIT_REQUEST_AMOUNT),
|
||||
COMPOUNDING_WITHDRAWAL_PREFIX:
|
||||
to0xHex([byte(COMPOUNDING_WITHDRAWAL_PREFIX)]),
|
||||
DEPOSIT_REQUEST_TYPE:
|
||||
to0xHex([byte(DEPOSIT_REQUEST_TYPE)]),
|
||||
WITHDRAWAL_REQUEST_TYPE:
|
||||
to0xHex([byte(WITHDRAWAL_REQUEST_TYPE)]),
|
||||
CONSOLIDATION_REQUEST_TYPE:
|
||||
to0xHex([byte(CONSOLIDATION_REQUEST_TYPE)]),
|
||||
MIN_ACTIVATION_BALANCE:
|
||||
Base10.toString(uint64(MIN_ACTIVATION_BALANCE)),
|
||||
MAX_EFFECTIVE_BALANCE_ELECTRA:
|
||||
Base10.toString(uint64(MAX_EFFECTIVE_BALANCE_ELECTRA)),
|
||||
MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA:
|
||||
Base10.toString(MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA),
|
||||
WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA:
|
||||
Base10.toString(WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA),
|
||||
PENDING_DEPOSITS_LIMIT:
|
||||
Base10.toString(PENDING_DEPOSITS_LIMIT),
|
||||
PENDING_PARTIAL_WITHDRAWALS_LIMIT:
|
||||
Base10.toString(PENDING_PARTIAL_WITHDRAWALS_LIMIT),
|
||||
PENDING_CONSOLIDATIONS_LIMIT:
|
||||
Base10.toString(PENDING_CONSOLIDATIONS_LIMIT),
|
||||
MAX_ATTESTER_SLASHINGS_ELECTRA:
|
||||
Base10.toString(MAX_ATTESTER_SLASHINGS_ELECTRA),
|
||||
MAX_ATTESTATIONS_ELECTRA:
|
||||
Base10.toString(MAX_ATTESTATIONS_ELECTRA),
|
||||
MAX_DEPOSIT_REQUESTS_PER_PAYLOAD:
|
||||
Base10.toString(uint64(MAX_DEPOSIT_REQUESTS_PER_PAYLOAD)),
|
||||
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD:
|
||||
Base10.toString(uint64(MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD)),
|
||||
MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD:
|
||||
Base10.toString(MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD),
|
||||
MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP:
|
||||
Base10.toString(uint64(MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP)),
|
||||
MAX_PENDING_DEPOSITS_PER_EPOCH:
|
||||
Base10.toString(uint64(MAX_PENDING_DEPOSITS_PER_EPOCH)),
|
||||
MAX_BLOBS_PER_BLOCK_ELECTRA:
|
||||
Base10.toString(uint64(MAX_BLOBS_PER_BLOCK_ELECTRA)),
|
||||
MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA:
|
||||
Base10.toString(cfg.MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA),
|
||||
MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT:
|
||||
Base10.toString(cfg.MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT)
|
||||
)
|
||||
)
|
||||
cachedDepositContract =
|
||||
|
@ -135,10 +135,10 @@ template `+`*(a: TimeDiff, b: Duration): TimeDiff =
|
||||
const
|
||||
# Offsets from the start of the slot to when the corresponding message should
|
||||
# be sent
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#attesting
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#attesting
|
||||
attestationSlotOffset* = TimeDiff(nanoseconds:
|
||||
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#broadcast-aggregate
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#broadcast-aggregate
|
||||
aggregateSlotOffset* = TimeDiff(nanoseconds:
|
||||
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#prepare-sync-committee-message
|
||||
|
@ -400,7 +400,7 @@ func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): G
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/beacon-chain.md#slash_validator
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
||||
proc slash_validator*(
|
||||
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
||||
slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo,
|
||||
@ -935,7 +935,7 @@ func get_base_reward_per_increment*(
|
||||
get_base_reward_per_increment_sqrt(
|
||||
integer_squareroot(distinctBase(total_active_balance)))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#get_base_reward
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#get_base_reward
|
||||
func get_base_reward(
|
||||
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
|
||||
deneb.BeaconState | electra.BeaconState | fulu.BeaconState,
|
||||
@ -1050,7 +1050,7 @@ proc check_attestation*(
|
||||
|
||||
ok()
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
|
||||
proc check_bls_to_execution_change*(
|
||||
genesisFork: Fork,
|
||||
state: capella.BeaconState | deneb.BeaconState | electra.BeaconState |
|
||||
@ -2286,18 +2286,6 @@ func upgrade_to_fulu*(
|
||||
blob_gas_used: pre.latest_execution_payload_header.blob_gas_used,
|
||||
excess_blob_gas: pre.latest_execution_payload_header.excess_blob_gas)
|
||||
|
||||
var max_exit_epoch = FAR_FUTURE_EPOCH
|
||||
for v in pre.validators:
|
||||
if v.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
max_exit_epoch =
|
||||
if max_exit_epoch == FAR_FUTURE_EPOCH:
|
||||
v.exit_epoch
|
||||
else:
|
||||
max(max_exit_epoch, v.exit_epoch)
|
||||
if max_exit_epoch == FAR_FUTURE_EPOCH:
|
||||
max_exit_epoch = get_current_epoch(pre)
|
||||
let earliest_exit_epoch = max_exit_epoch + 1
|
||||
|
||||
let post = (ref fulu.BeaconState)(
|
||||
# Versioning
|
||||
genesis_time: pre.genesis_time,
|
||||
@ -2358,54 +2346,19 @@ func upgrade_to_fulu*(
|
||||
historical_summaries: pre.historical_summaries,
|
||||
|
||||
# [New in Electra:EIP6110]
|
||||
deposit_requests_start_index: UNSET_DEPOSIT_REQUESTS_START_INDEX,
|
||||
deposit_requests_start_index: pre.deposit_requests_start_index,
|
||||
|
||||
# [New in Electra:EIP7251]
|
||||
deposit_balance_to_consume: 0.Gwei,
|
||||
exit_balance_to_consume: 0.Gwei,
|
||||
earliest_exit_epoch: earliest_exit_epoch,
|
||||
consolidation_balance_to_consume: 0.Gwei,
|
||||
earliest_consolidation_epoch:
|
||||
compute_activation_exit_epoch(get_current_epoch(pre))
|
||||
|
||||
# pending_balance_deposits, pending_partial_withdrawals, and
|
||||
# pending_consolidations are default empty lists
|
||||
deposit_balance_to_consume: pre.deposit_balance_to_consume,
|
||||
exit_balance_to_consume: pre.exit_balance_to_consume,
|
||||
earliest_exit_epoch: pre.earliest_exit_epoch,
|
||||
consolidation_balance_to_consume: pre.consolidation_balance_to_consume,
|
||||
earliest_consolidation_epoch: pre.earliest_consolidation_epoch,
|
||||
pending_deposits: pre.pending_deposits,
|
||||
pending_partial_withdrawals: pre.pending_partial_withdrawals,
|
||||
pending_consolidations: pre.pending_consolidations
|
||||
)
|
||||
|
||||
post.exit_balance_to_consume =
|
||||
get_activation_exit_churn_limit(cfg, post[], cache)
|
||||
post.consolidation_balance_to_consume =
|
||||
get_consolidation_churn_limit(cfg, post[], cache)
|
||||
|
||||
# [New in Electra:EIP7251]
|
||||
# add validators that are not yet active to pending balance deposits
|
||||
var pre_activation: seq[(Epoch, uint64)]
|
||||
for index, validator in post.validators:
|
||||
if validator.activation_epoch == FAR_FUTURE_EPOCH:
|
||||
pre_activation.add((validator.activation_eligibility_epoch, index.uint64))
|
||||
sort(pre_activation)
|
||||
|
||||
for (_, index) in pre_activation:
|
||||
let balance = post.balances.item(index)
|
||||
post.balances[index] = 0.Gwei
|
||||
let validator = addr post.validators.mitem(index)
|
||||
validator[].effective_balance = 0.Gwei
|
||||
validator[].activation_eligibility_epoch = FAR_FUTURE_EPOCH
|
||||
# Use bls.G2_POINT_AT_INFINITY as a signature field placeholder and
|
||||
# GENESIS_SLOT to distinguish from a pending deposit request
|
||||
discard post.pending_deposits.add PendingDeposit(
|
||||
pubkey: validator[].pubkey,
|
||||
withdrawal_credentials: validator[].withdrawal_credentials,
|
||||
amount: balance,
|
||||
signature: ValidatorSig.infinity,
|
||||
slot: GENESIS_SLOT)
|
||||
|
||||
# Ensure early adopters of compounding credentials go through the activation
|
||||
# churn
|
||||
for index, validator in post.validators:
|
||||
if has_compounding_withdrawal_credential(validator):
|
||||
queue_excess_active_balance(post[], index.uint64)
|
||||
|
||||
post
|
||||
|
||||
func latest_block_root*(state: ForkyBeaconState, state_root: Eth2Digest):
|
||||
|
@ -40,7 +40,7 @@ static:
|
||||
doAssert ord(TIMELY_HEAD_FLAG_INDEX) == 2
|
||||
|
||||
const
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#incentivization-weights
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#incentivization-weights
|
||||
TIMELY_SOURCE_WEIGHT* = 14
|
||||
TIMELY_TARGET_WEIGHT* = 26
|
||||
TIMELY_HEAD_WEIGHT* = 14
|
||||
@ -96,7 +96,7 @@ type
|
||||
sync_committee_bits*: BitArray[SYNC_COMMITTEE_SIZE]
|
||||
sync_committee_signature*: TrustedSig
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#synccommittee
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#synccommittee
|
||||
SyncCommittee* = object
|
||||
pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey]
|
||||
aggregate_pubkey*: ValidatorPubKey
|
||||
|
@ -74,7 +74,7 @@ export
|
||||
tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto,
|
||||
digest, presets
|
||||
|
||||
const SPEC_VERSION* = "1.5.0-beta.0"
|
||||
const SPEC_VERSION* = "1.5.0-beta.2"
|
||||
## Spec version we're aiming to be compatible with, right now
|
||||
|
||||
const
|
||||
@ -400,7 +400,7 @@ type
|
||||
sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache]
|
||||
|
||||
# This matches the mutable state of the Solidity deposit contract
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/solidity_deposit_contract/deposit_contract.sol
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/solidity_deposit_contract/deposit_contract.sol
|
||||
DepositContractState* = object
|
||||
branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest]
|
||||
deposit_count*: array[32, byte] # Uint256
|
||||
|
@ -53,7 +53,7 @@ type
|
||||
from_bls_pubkey*: ValidatorPubKey
|
||||
to_execution_address*: ExecutionAddress
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/beacon-chain.md#signedblstoexecutionchange
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/beacon-chain.md#signedblstoexecutionchange
|
||||
SignedBLSToExecutionChange* = object
|
||||
message*: BLSToExecutionChange
|
||||
signature*: ValidatorSig
|
||||
@ -676,13 +676,13 @@ func is_valid_light_client_header*(
|
||||
get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
|
||||
header.beacon.body_root)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||
func upgrade_lc_header_to_capella*(
|
||||
pre: altair.LightClientHeader): LightClientHeader =
|
||||
LightClientHeader(
|
||||
beacon: pre.beacon)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||
func upgrade_lc_bootstrap_to_capella*(
|
||||
pre: altair.LightClientBootstrap): LightClientBootstrap =
|
||||
LightClientBootstrap(
|
||||
@ -690,7 +690,7 @@ func upgrade_lc_bootstrap_to_capella*(
|
||||
current_sync_committee: pre.current_sync_committee,
|
||||
current_sync_committee_branch: pre.current_sync_committee_branch)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||
func upgrade_lc_update_to_capella*(
|
||||
pre: altair.LightClientUpdate): LightClientUpdate =
|
||||
LightClientUpdate(
|
||||
@ -702,7 +702,7 @@ func upgrade_lc_update_to_capella*(
|
||||
sync_aggregate: pre.sync_aggregate,
|
||||
signature_slot: pre.signature_slot)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||
func upgrade_lc_finality_update_to_capella*(
|
||||
pre: altair.LightClientFinalityUpdate): LightClientFinalityUpdate =
|
||||
LightClientFinalityUpdate(
|
||||
@ -712,7 +712,7 @@ func upgrade_lc_finality_update_to_capella*(
|
||||
sync_aggregate: pre.sync_aggregate,
|
||||
signature_slot: pre.signature_slot)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||
func upgrade_lc_optimistic_update_to_capella*(
|
||||
pre: altair.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
|
||||
LightClientOptimisticUpdate(
|
||||
|
@ -55,7 +55,7 @@ const
|
||||
DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF* = DomainType([byte 0x08, 0x00, 0x00, 0x00])
|
||||
DOMAIN_CONTRIBUTION_AND_PROOF* = DomainType([byte 0x09, 0x00, 0x00, 0x00])
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/beacon-chain.md#domain-types
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/beacon-chain.md#domain-types
|
||||
DOMAIN_BLS_TO_EXECUTION_CHANGE* = DomainType([byte 0x0a, 0x00, 0x00, 0x00])
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/fork-choice.md#configuration
|
||||
@ -85,7 +85,12 @@ const
|
||||
FULL_EXIT_REQUEST_AMOUNT*: uint64 = 0
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#withdrawal-prefixes
|
||||
COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02
|
||||
COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02'u8
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/electra/beacon-chain.md#execution-layer-triggered-requests
|
||||
DEPOSIT_REQUEST_TYPE* = 0x00'u8
|
||||
WITHDRAWAL_REQUEST_TYPE* = 0x01'u8
|
||||
CONSOLIDATION_REQUEST_TYPE* = 0x02'u8
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/electra/beacon-chain.md#execution-1
|
||||
MAX_BLOBS_PER_BLOCK_ELECTRA* = 9'u64
|
||||
|
@ -382,7 +382,7 @@ type
|
||||
state_root*: Eth2Digest
|
||||
body*: TrustedBeaconBlockBody
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/deneb/beacon-chain.md#beaconblockbody
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/deneb/beacon-chain.md#beaconblockbody
|
||||
BeaconBlockBody* = object
|
||||
randao_reveal*: ValidatorSig
|
||||
eth1_data*: Eth1Data
|
||||
@ -725,7 +725,7 @@ func upgrade_lc_update_to_deneb*(
|
||||
sync_aggregate: pre.sync_aggregate,
|
||||
signature_slot: pre.signature_slot)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/deneb/light-client/fork.md#upgrading-light-client-data
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/deneb/light-client/fork.md#upgrading-light-client-data
|
||||
func upgrade_lc_finality_update_to_deneb*(
|
||||
pre: capella.LightClientFinalityUpdate): LightClientFinalityUpdate =
|
||||
LightClientFinalityUpdate(
|
||||
|
@ -186,13 +186,13 @@ type
|
||||
data*: AttestationData
|
||||
signature*: ValidatorSig
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#aggregateandproof
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#aggregateandproof
|
||||
AggregateAndProof* = object
|
||||
aggregator_index*: uint64 # `ValidatorIndex` after validation
|
||||
aggregate*: Attestation
|
||||
selection_proof*: ValidatorSig
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#signedaggregateandproof
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#signedaggregateandproof
|
||||
SignedAggregateAndProof* = object
|
||||
message*: AggregateAndProof
|
||||
signature*: ValidatorSig
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -257,6 +257,7 @@ RestJson.useDefaultSerializationFor(
|
||||
electra.LightClientUpdate,
|
||||
electra.SignedAggregateAndProof,
|
||||
electra.SignedBeaconBlock,
|
||||
electra.SingleAttestation,
|
||||
electra.TrustedAttestation,
|
||||
electra_mev.BlindedBeaconBlock,
|
||||
electra_mev.BlindedBeaconBlockBody,
|
||||
@ -377,7 +378,7 @@ type
|
||||
|
||||
EncodeArrays* =
|
||||
seq[phase0.Attestation] |
|
||||
seq[electra.Attestation] |
|
||||
seq[electra.SingleAttestation] |
|
||||
seq[PrepareBeaconProposer] |
|
||||
seq[RemoteKeystoreInfo] |
|
||||
seq[RestCommitteeSubscription] |
|
||||
@ -1988,7 +1989,7 @@ proc readValue*(reader: var JsonReader[RestJson],
|
||||
proc writeValue*(writer: var JsonWriter[RestJson],
|
||||
proof: ForkedAggregateAndProof) {.raises: [IOError].} =
|
||||
writer.beginRecord()
|
||||
writer.writeField("version", proof.kind)
|
||||
writer.writeField("version", proof.kind.toString())
|
||||
withAggregateAndProof(proof):
|
||||
writer.writeField("data", forkyProof)
|
||||
writer.endRecord()
|
||||
@ -4067,7 +4068,7 @@ proc readValue*(reader: var JsonReader[RestJson],
|
||||
proc writeValue*(writer: var JsonWriter[RestJson],
|
||||
attestation: ForkedAttestation) {.raises: [IOError].} =
|
||||
writer.beginRecord()
|
||||
writer.writeField("version", attestation.kind)
|
||||
writer.writeField("version", attestation.kind.toString())
|
||||
withAttestation(attestation):
|
||||
writer.writeField("data", forkyAttestation)
|
||||
writer.endRecord()
|
||||
|
@ -178,7 +178,7 @@ type
|
||||
|
||||
ForkyAttestation* =
|
||||
phase0.Attestation |
|
||||
electra.Attestation
|
||||
electra.SingleAttestation
|
||||
|
||||
ForkedAttestation* = object
|
||||
case kind*: ConsensusFork
|
||||
@ -461,6 +461,7 @@ template kind*(
|
||||
electra.MsgTrustedSignedBeaconBlock |
|
||||
electra.TrustedSignedBeaconBlock |
|
||||
electra.Attestation |
|
||||
electra.SingleAttestation |
|
||||
electra.AggregateAndProof |
|
||||
electra.SignedAggregateAndProof |
|
||||
electra_mev.SignedBlindedBeaconBlock]): ConsensusFork =
|
||||
@ -630,31 +631,6 @@ template Forky*(
|
||||
kind: static ConsensusFork): auto =
|
||||
kind.SignedBeaconBlock
|
||||
|
||||
# Workaround method used for tests that involve walking through
|
||||
# `nim-eth2-scenarios` fork dirs, to be removed once Fulu is
|
||||
# included in new release.
|
||||
template withAllButFulu*(
|
||||
x: typedesc[ConsensusFork], body: untyped): untyped =
|
||||
static: doAssert ConsensusFork.high == ConsensusFork.Fulu
|
||||
block:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Electra
|
||||
body
|
||||
block:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Deneb
|
||||
body
|
||||
block:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Capella
|
||||
body
|
||||
block:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Bellatrix
|
||||
body
|
||||
block:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Altair
|
||||
body
|
||||
block:
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Phase0
|
||||
body
|
||||
|
||||
template withAll*(
|
||||
x: typedesc[ConsensusFork], body: untyped): untyped =
|
||||
static: doAssert ConsensusFork.high == ConsensusFork.Fulu
|
||||
|
@ -203,7 +203,7 @@ func get_seed*(state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType):
|
||||
epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)
|
||||
state.get_seed(epoch, domain_type, mix)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#add_flag
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#add_flag
|
||||
func add_flag*(flags: ParticipationFlags, flag_index: TimelyFlag): ParticipationFlags =
|
||||
let flag = ParticipationFlags(1'u8 shl ord(flag_index))
|
||||
flags or flag
|
||||
@ -279,7 +279,7 @@ func get_safety_threshold*(store: ForkyLightClientStore): uint64 =
|
||||
store.current_max_active_participants
|
||||
) div 2
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#is_better_update
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.3/specs/altair/light-client/sync-protocol.md#is_better_update
|
||||
type LightClientUpdateMetadata* = object
|
||||
attested_slot*, finalized_slot*, signature_slot*: Slot
|
||||
has_sync_committee*, has_finality*: bool
|
||||
@ -326,10 +326,10 @@ func is_better_data*(new_meta, old_meta: LightClientUpdateMetadata): bool =
|
||||
old_has_supermajority =
|
||||
hasSupermajoritySyncParticipation(old_meta.num_active_participants)
|
||||
if new_has_supermajority != old_has_supermajority:
|
||||
return new_has_supermajority > old_has_supermajority
|
||||
if not new_has_supermajority:
|
||||
if new_meta.num_active_participants != old_meta.num_active_participants:
|
||||
return new_meta.num_active_participants > old_meta.num_active_participants
|
||||
return new_has_supermajority
|
||||
if not new_has_supermajority and
|
||||
new_meta.num_active_participants != old_meta.num_active_participants:
|
||||
return new_meta.num_active_participants > old_meta.num_active_participants
|
||||
|
||||
# Compare presence of relevant sync committee
|
||||
let
|
||||
@ -340,11 +340,11 @@ func is_better_data*(new_meta, old_meta: LightClientUpdateMetadata): bool =
|
||||
old_meta.attested_slot.sync_committee_period ==
|
||||
old_meta.signature_slot.sync_committee_period
|
||||
if new_has_relevant_sync_committee != old_has_relevant_sync_committee:
|
||||
return new_has_relevant_sync_committee > old_has_relevant_sync_committee
|
||||
return new_has_relevant_sync_committee
|
||||
|
||||
# Compare indication of any finality
|
||||
if new_meta.has_finality != old_meta.has_finality:
|
||||
return new_meta.has_finality > old_meta.has_finality
|
||||
return new_meta.has_finality
|
||||
|
||||
# Compare sync committee finality
|
||||
if new_meta.has_finality:
|
||||
@ -356,14 +356,18 @@ func is_better_data*(new_meta, old_meta: LightClientUpdateMetadata): bool =
|
||||
old_meta.finalized_slot.sync_committee_period ==
|
||||
old_meta.attested_slot.sync_committee_period
|
||||
if new_has_sync_committee_finality != old_has_sync_committee_finality:
|
||||
return new_has_sync_committee_finality > old_has_sync_committee_finality
|
||||
return new_has_sync_committee_finality
|
||||
|
||||
# Tiebreaker 1: Sync committee participation beyond supermajority
|
||||
if new_meta.num_active_participants != old_meta.num_active_participants:
|
||||
return new_meta.num_active_participants > old_meta.num_active_participants
|
||||
|
||||
# Tiebreaker 2: Prefer older data (fewer changes to best data)
|
||||
new_meta.attested_slot < old_meta.attested_slot
|
||||
# Tiebreaker 2: Prefer older data (fewer changes to best)
|
||||
if new_meta.attested_slot != old_meta.attested_slot:
|
||||
return new_meta.attested_slot < old_meta.attested_slot
|
||||
|
||||
# Tiebreaker 3: Prefer updates with earlier signature slots
|
||||
new_meta.signature_slot < old_meta.signature_slot
|
||||
|
||||
template is_better_update*[
|
||||
A, B: SomeForkyLightClientUpdate | ForkedLightClientUpdate](
|
||||
@ -380,7 +384,7 @@ func contextEpoch*(bootstrap: ForkyLightClientBootstrap): Epoch =
|
||||
func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch =
|
||||
update.attested_header.beacon.slot.epoch
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
|
||||
func is_merge_transition_complete*(
|
||||
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
|
||||
electra.BeaconState | fulu.BeaconState): bool =
|
||||
@ -418,7 +422,7 @@ func is_merge_transition_block(
|
||||
not is_merge_transition_complete(state) and
|
||||
body.execution_payload != defaultExecutionPayload
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/bellatrix/beacon-chain.md#is_execution_enabled
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/bellatrix/beacon-chain.md#is_execution_enabled
|
||||
func is_execution_enabled*(
|
||||
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
|
||||
electra.BeaconState | fulu.BeaconState,
|
||||
@ -458,11 +462,6 @@ proc computeTransactionsTrieRoot(
|
||||
func computeRequestsHash(
|
||||
requests: electra.ExecutionRequests): EthHash32 =
|
||||
|
||||
const
|
||||
DEPOSIT_REQUEST_TYPE = 0x00'u8 # EIP-6110
|
||||
WITHDRAWAL_REQUEST_TYPE = 0x01'u8 # EIP-7002
|
||||
CONSOLIDATION_REQUEST_TYPE = 0x02'u8 # EIP-7251
|
||||
|
||||
template individualHash(requestType, requestList): Digest =
|
||||
computeDigest:
|
||||
h.update([requestType.byte])
|
||||
|
@ -1386,7 +1386,7 @@ proc createWallet*(kdfKind: KdfKind,
|
||||
crypto: crypto,
|
||||
nextAccount: nextAccount.get(0))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#bls_withdrawal_prefix
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#bls_withdrawal_prefix
|
||||
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
|
||||
var bytes = eth2digest(k.toRaw())
|
||||
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -20,6 +20,7 @@ type
|
||||
BuilderBid* = object
|
||||
header*: electra.ExecutionPayloadHeader
|
||||
blob_kzg_commitments*: KzgCommitments
|
||||
execution_requests*: ExecutionRequests # [New in Electra]
|
||||
value*: UInt256
|
||||
pubkey*: ValidatorPubKey
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -22,6 +22,7 @@ type
|
||||
BuilderBid* = object
|
||||
header*: ExecutionPayloadHeader
|
||||
blob_kzg_commitments*: KzgCommitments
|
||||
execution_requests*: ExecutionRequests # [New in Electra]
|
||||
value*: UInt256
|
||||
pubkey*: ValidatorPubKey
|
||||
|
||||
|
@ -72,7 +72,7 @@ func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
|
||||
func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string =
|
||||
eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#broadcast-attestation
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#broadcast-attestation
|
||||
func compute_subnet_for_attestation*(
|
||||
committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex):
|
||||
SubnetId =
|
||||
@ -88,19 +88,19 @@ func compute_subnet_for_attestation*(
|
||||
(committees_since_epoch_start + committee_index.asUInt64) mod
|
||||
ATTESTATION_SUBNET_COUNT)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#broadcast-attestation
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#broadcast-attestation
|
||||
func getAttestationTopic*(forkDigest: ForkDigest,
|
||||
subnetId: SubnetId): string =
|
||||
## For subscribing and unsubscribing to/from a subnet.
|
||||
eth2Prefix(forkDigest) & "beacon_attestation_" & $(subnetId) & "/ssz_snappy"
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/p2p-interface.md#topics-and-messages
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/p2p-interface.md#topics-and-messages
|
||||
func getSyncCommitteeTopic*(forkDigest: ForkDigest,
|
||||
subcommitteeIdx: SyncSubcommitteeIndex): string =
|
||||
## For subscribing and unsubscribing to/from a subnet.
|
||||
eth2Prefix(forkDigest) & "sync_committee_" & $subcommitteeIdx & "/ssz_snappy"
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/p2p-interface.md#topics-and-messages
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/p2p-interface.md#topics-and-messages
|
||||
func getSyncCommitteeContributionAndProofTopic*(forkDigest: ForkDigest): string =
|
||||
## For subscribing and unsubscribing to/from a subnet.
|
||||
eth2Prefix(forkDigest) & "sync_committee_contribution_and_proof/ssz_snappy"
|
||||
|
@ -264,7 +264,7 @@ proc get_data_column_sidecars*(signed_beacon_block: electra.SignedBeaconBlock,
|
||||
|
||||
ok(sidecars)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/fulu/peer-sampling.md#get_extended_sample_count
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/fulu/peer-sampling.md#get_extended_sample_count
|
||||
func get_extended_sample_count*(samples_per_slot: int,
|
||||
allowed_failures: int):
|
||||
int =
|
||||
|
@ -8,7 +8,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
# Mainnet preset - Altair
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/presets/mainnet/altair.yaml
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/presets/mainnet/altair.yaml
|
||||
const
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
|
@ -8,7 +8,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
# Mainnet preset - Bellatrix
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/presets/mainnet/bellatrix.yaml
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/presets/mainnet/bellatrix.yaml
|
||||
const
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
|
@ -8,7 +8,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
# Mainnet preset - Capella
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/presets/mainnet/capella.yaml
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/presets/mainnet/capella.yaml
|
||||
const
|
||||
# Max operations per block
|
||||
# ---------------------------------------------------------------
|
||||
|
@ -8,7 +8,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
# Minimal preset - Altair
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/presets/minimal/altair.yaml
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/presets/minimal/altair.yaml
|
||||
const
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
|
@ -8,7 +8,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
# Minimal preset - Bellatrix
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/presets/minimal/bellatrix.yaml
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/presets/minimal/bellatrix.yaml
|
||||
const
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
|
@ -8,7 +8,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
# Minimal preset - Capella
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/presets/minimal/capella.yaml
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/presets/minimal/capella.yaml
|
||||
const
|
||||
# Max operations per block
|
||||
# ---------------------------------------------------------------
|
||||
|
@ -59,7 +59,7 @@ func compute_epoch_signing_root*(
|
||||
let domain = get_domain(fork, DOMAIN_RANDAO, epoch, genesis_validators_root)
|
||||
compute_signing_root(epoch, domain)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#randao-reveal
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#randao-reveal
|
||||
func get_epoch_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest, epoch: Epoch,
|
||||
privkey: ValidatorPrivKey): CookedSig =
|
||||
@ -145,7 +145,7 @@ func compute_attestation_signing_root*(
|
||||
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
|
||||
compute_signing_root(attestation_data, domain)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#aggregate-signature
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#aggregate-signature
|
||||
func get_attestation_signature*(
|
||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||
attestation_data: AttestationData,
|
||||
@ -355,7 +355,7 @@ proc get_contribution_and_proof_signature*(
|
||||
|
||||
blsSign(privkey, signing_root.data)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/validator.md#aggregation-selection
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/validator.md#aggregation-selection
|
||||
func is_sync_committee_aggregator*(signature: ValidatorSig): bool =
|
||||
let
|
||||
signatureDigest = eth2digest(signature.blob)
|
||||
|
@ -382,7 +382,7 @@ func partialBeaconBlock*(
|
||||
_: ExecutionRequests): auto =
|
||||
const consensusFork = typeof(state).kind
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#preparing-for-a-beaconblock
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#preparing-for-a-beaconblock
|
||||
var res = consensusFork.BeaconBlock(
|
||||
slot: state.data.slot,
|
||||
proposer_index: proposer_index.uint64,
|
||||
@ -512,7 +512,7 @@ proc makeBeaconBlockWithRewards*(
|
||||
transactions_root.get
|
||||
|
||||
when executionPayload is deneb.ExecutionPayloadForSigning:
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/deneb/beacon-chain.md#beaconblockbody
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/deneb/beacon-chain.md#beaconblockbody
|
||||
forkyState.data.latest_block_header.body_root = hash_tree_root(
|
||||
[hash_tree_root(randao_reveal),
|
||||
hash_tree_root(eth1_data),
|
||||
@ -535,7 +535,6 @@ proc makeBeaconBlockWithRewards*(
|
||||
forkyState.data.latest_execution_payload_header.transactions_root =
|
||||
transactions_root.get
|
||||
|
||||
debugComment "verify (again) that this is what builder API needs"
|
||||
when executionPayload is electra.ExecutionPayloadForSigning:
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#beaconblockbody
|
||||
forkyState.data.latest_block_header.body_root = hash_tree_root(
|
||||
@ -552,7 +551,8 @@ proc makeBeaconBlockWithRewards*(
|
||||
hash_tree_root(sync_aggregate),
|
||||
execution_payload_root.get,
|
||||
hash_tree_root(validator_changes.bls_to_execution_changes),
|
||||
hash_tree_root(kzg_commitments.get)
|
||||
hash_tree_root(kzg_commitments.get),
|
||||
hash_tree_root(execution_requests)
|
||||
])
|
||||
else:
|
||||
raiseAssert "Attempt to use non-Electra payload with post-Deneb state"
|
||||
@ -577,7 +577,8 @@ proc makeBeaconBlockWithRewards*(
|
||||
hash_tree_root(sync_aggregate),
|
||||
execution_payload_root.get,
|
||||
hash_tree_root(validator_changes.bls_to_execution_changes),
|
||||
hash_tree_root(kzg_commitments.get)
|
||||
hash_tree_root(kzg_commitments.get),
|
||||
hash_tree_root(execution_requests)
|
||||
])
|
||||
else:
|
||||
raiseAssert "Attempt to use non-Fulu payload with post-Electra state"
|
||||
|
@ -10,8 +10,8 @@
|
||||
# State transition - block processing as described in
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/phase0/beacon-chain.md#block-processing
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#block-processing
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/bellatrix/beacon-chain.md#block-processing
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/capella/beacon-chain.md#block-processing
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/bellatrix/beacon-chain.md#block-processing
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/capella/beacon-chain.md#block-processing
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/deneb/beacon-chain.md#block-processing
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#block-processing
|
||||
#
|
||||
@ -801,7 +801,7 @@ func get_participant_reward*(total_active_balance: Gwei): Gwei =
|
||||
func get_proposer_reward*(participant_reward: Gwei): Gwei =
|
||||
participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#sync-aggregate-processing
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#sync-aggregate-processing
|
||||
proc process_sync_aggregate*(
|
||||
state: var (altair.BeaconState | bellatrix.BeaconState |
|
||||
capella.BeaconState | deneb.BeaconState | electra.BeaconState |
|
||||
@ -1213,7 +1213,7 @@ proc process_block*(
|
||||
|
||||
ok(? process_operations(cfg, state, blck.body, 0.Gwei, flags, cache))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#block-processing
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#block-processing
|
||||
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
||||
# copy of datatypes/altair.nim
|
||||
type SomeAltairBlock =
|
||||
|
@ -174,7 +174,7 @@ func is_eligible_validator*(validator: ParticipationInfo): bool =
|
||||
|
||||
from ./datatypes/deneb import BeaconState
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#get_unslashed_participating_indices
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#get_unslashed_participating_indices
|
||||
func get_unslashed_participating_balances*(
|
||||
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
|
||||
deneb.BeaconState | electra.BeaconState | fulu.BeaconState):
|
||||
@ -650,7 +650,7 @@ func get_base_reward_increment*(
|
||||
EFFECTIVE_BALANCE_INCREMENT.Gwei
|
||||
increments * base_reward_per_increment
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#get_flag_index_deltas
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#get_flag_index_deltas
|
||||
func get_flag_index_reward*(
|
||||
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
|
||||
deneb.BeaconState | electra.BeaconState | fulu.BeaconState,
|
||||
@ -976,7 +976,7 @@ func process_registry_updates*(
|
||||
ok()
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.8/specs/bellatrix/beacon-chain.md#slashings
|
||||
func get_adjusted_total_slashing_balance*(
|
||||
state: ForkyBeaconState, total_balance: Gwei): Gwei =
|
||||
@ -1038,7 +1038,7 @@ func get_slashing_penalty*(
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.9/specs/altair/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/bellatrix/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/bellatrix/beacon-chain.md#slashings
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.7/specs/electra/beacon-chain.md#modified-process_slashings
|
||||
func get_slashing(
|
||||
state: ForkyBeaconState, total_balance: Gwei, vidx: ValidatorIndex): Gwei =
|
||||
@ -1140,7 +1140,7 @@ func process_participation_record_updates*(state: var phase0.BeaconState) =
|
||||
state.previous_epoch_attestations.clear()
|
||||
swap(state.previous_epoch_attestations, state.current_epoch_attestations)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#participation-flags-updates
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#participation-flags-updates
|
||||
func process_participation_flag_updates*(
|
||||
state: var (altair.BeaconState | bellatrix.BeaconState |
|
||||
capella.BeaconState | deneb.BeaconState |
|
||||
@ -1502,7 +1502,7 @@ proc process_epoch*(
|
||||
let epoch = get_current_epoch(state)
|
||||
info.init(state)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#justification-and-finalization
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#justification-and-finalization
|
||||
process_justification_and_finalization(state, info.balances, flags)
|
||||
|
||||
# state.slot hasn't been incremented yet.
|
||||
@ -1545,7 +1545,7 @@ proc process_epoch*(
|
||||
let epoch = get_current_epoch(state)
|
||||
info.init(state)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/beacon-chain.md#justification-and-finalization
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/beacon-chain.md#justification-and-finalization
|
||||
process_justification_and_finalization(state, info.balances, flags)
|
||||
|
||||
# state.slot hasn't been incremented yet.
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -7,10 +7,8 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import std/[sequtils, strutils]
|
||||
import chronos, chronicles
|
||||
import
|
||||
../spec/datatypes/[phase0, deneb, fulu],
|
||||
../spec/[forks, network, peerdas_helpers],
|
||||
../networking/eth2_network,
|
||||
../consensus_object_pools/block_quarantine,
|
||||
@ -20,6 +18,8 @@ import
|
||||
../gossip_processing/block_processor
|
||||
|
||||
from std/algorithm import binarySearch, sort
|
||||
from std/sequtils import mapIt
|
||||
from std/strutils import join
|
||||
from ../beacon_clock import GetBeaconTimeFn
|
||||
export block_quarantine, sync_manager
|
||||
|
||||
@ -27,40 +27,40 @@ logScope:
|
||||
topics = "requman"
|
||||
|
||||
const
|
||||
SYNC_MAX_REQUESTED_BLOCKS* = 32 # Spec allows up to MAX_REQUEST_BLOCKS.
|
||||
SYNC_MAX_REQUESTED_BLOCKS = 32 # Spec allows up to MAX_REQUEST_BLOCKS.
|
||||
## Maximum number of blocks which will be requested in each
|
||||
## `beaconBlocksByRoot` invocation.
|
||||
PARALLEL_REQUESTS* = 2
|
||||
## Number of peers we using to resolve our request.
|
||||
PARALLEL_REQUESTS = 2
|
||||
## Number of peers we're using to resolve our request.
|
||||
|
||||
PARALLEL_REQUESTS_DATA_COLUMNS* = 32
|
||||
PARALLEL_REQUESTS_DATA_COLUMNS = 32
|
||||
|
||||
BLOB_GOSSIP_WAIT_TIME_NS* = 2 * 1_000_000_000
|
||||
BLOB_GOSSIP_WAIT_TIME_NS = 2 * 1_000_000_000
|
||||
## How long to wait for blobs to arri ve over gossip before fetching.
|
||||
|
||||
DATA_COLUMN_GOSSIP_WAIT_TIME_NS* = 2 * 1_000_000_000
|
||||
DATA_COLUMN_GOSSIP_WAIT_TIME_NS = 2 * 1_000_000_000
|
||||
## How long to wait for blobs to arri ve over gossip before fetching.
|
||||
|
||||
POLL_INTERVAL = 1.seconds
|
||||
|
||||
type
|
||||
BlockVerifierFn* = proc(
|
||||
BlockVerifierFn = proc(
|
||||
signedBlock: ForkedSignedBeaconBlock,
|
||||
maybeFinalized: bool
|
||||
): Future[Result[void, VerifierError]] {.async: (raises: [CancelledError]).}
|
||||
|
||||
BlockLoaderFn* = proc(
|
||||
BlockLoaderFn = proc(
|
||||
blockRoot: Eth2Digest
|
||||
): Opt[ForkedTrustedSignedBeaconBlock] {.gcsafe, raises: [].}
|
||||
|
||||
BlobLoaderFn* = proc(
|
||||
BlobLoaderFn = proc(
|
||||
blobId: BlobIdentifier): Opt[ref BlobSidecar] {.gcsafe, raises: [].}
|
||||
|
||||
DataColumnLoaderFn* = proc(
|
||||
DataColumnLoaderFn = proc(
|
||||
columnId: DataColumnIdentifier):
|
||||
Opt[ref DataColumnSidecar] {.gcsafe, raises: [].}
|
||||
|
||||
InhibitFn* = proc: bool {.gcsafe, raises: [].}
|
||||
InhibitFn = proc: bool {.gcsafe, raises: [].}
|
||||
|
||||
RequestManager* = object
|
||||
network*: Eth2Node
|
||||
@ -112,7 +112,7 @@ proc init*(T: type RequestManager, network: Eth2Node,
|
||||
blobLoader: blobLoader,
|
||||
dataColumnLoader: dataColumnLoader)
|
||||
|
||||
proc checkResponse(roots: openArray[Eth2Digest],
|
||||
func checkResponse(roots: openArray[Eth2Digest],
|
||||
blocks: openArray[ref ForkedSignedBeaconBlock]): bool =
|
||||
## This procedure checks peer's response.
|
||||
var checks = @roots
|
||||
@ -130,7 +130,7 @@ func cmpSidecarIdentifier(x: BlobIdentifier | DataColumnIdentifier,
|
||||
y: ref BlobSidecar | ref DataColumnSidecar): int =
|
||||
cmp(x.index, y.index)
|
||||
|
||||
proc checkResponse(idList: seq[BlobIdentifier],
|
||||
func checkResponse(idList: seq[BlobIdentifier],
|
||||
blobs: openArray[ref BlobSidecar]): bool =
|
||||
if blobs.len > idList.len:
|
||||
return false
|
||||
@ -154,7 +154,7 @@ proc checkResponse(idList: seq[BlobIdentifier],
|
||||
inc i
|
||||
true
|
||||
|
||||
proc checkResponse(idList: seq[DataColumnIdentifier],
|
||||
func checkResponse(idList: seq[DataColumnIdentifier],
|
||||
columns: openArray[ref DataColumnSidecar]): bool =
|
||||
if columns.len > idList.len:
|
||||
return false
|
||||
@ -295,9 +295,9 @@ proc fetchBlobsFromNetwork(self: RequestManager,
|
||||
if not(isNil(peer)):
|
||||
self.network.peerPool.release(peer)
|
||||
|
||||
proc checkPeerCustody*(rman: RequestManager,
|
||||
peer: Peer):
|
||||
bool =
|
||||
proc checkPeerCustody(rman: RequestManager,
|
||||
peer: Peer):
|
||||
bool =
|
||||
# Returns true if the peer custodies atleast
|
||||
# ONE of the common custody columns, straight
|
||||
# away returns true if the peer is a supernode.
|
||||
|
@ -21,9 +21,6 @@ import
|
||||
export phase0, altair, merge, chronos, chronicles, results,
|
||||
helpers, peer_scores, sync_queue, forks, sync_protocol
|
||||
|
||||
logScope:
|
||||
topics = "syncman"
|
||||
|
||||
const
|
||||
SyncWorkersCount* = 10
|
||||
## Number of sync workers to spawn
|
||||
@ -34,6 +31,12 @@ const
|
||||
StatusExpirationTime* = chronos.minutes(2)
|
||||
## Time time it takes for the peer's status information to expire.
|
||||
|
||||
ConcurrentRequestsCount* = 3
|
||||
## Number of requests performed by one peer in single syncing step
|
||||
|
||||
RepeatingFailuresCount* = 2
|
||||
## Number of repeating errors before starting rewind process.
|
||||
|
||||
WeakSubjectivityLogMessage* =
|
||||
"Database state missing or too old, cannot sync - resync the client " &
|
||||
"using a trusted node or allow lenient long-range syncing with the " &
|
||||
@ -81,6 +84,8 @@ type
|
||||
direction: SyncQueueKind
|
||||
ident*: string
|
||||
flags: set[SyncManagerFlag]
|
||||
concurrentRequestsCount: int
|
||||
repeatingFailuresCount: int
|
||||
|
||||
SyncMoment* = object
|
||||
stamp*: chronos.Moment
|
||||
@ -115,8 +120,10 @@ proc initQueue[A, B](man: SyncManager[A, B]) =
|
||||
of SyncQueueKind.Forward:
|
||||
man.queue = SyncQueue.init(A, man.direction, man.getFirstSlot(),
|
||||
man.getLastSlot(), man.chunkSize,
|
||||
man.concurrentRequestsCount,
|
||||
man.repeatingFailuresCount,
|
||||
man.getSafeSlot, man.blockVerifier,
|
||||
1, man.ident)
|
||||
man.ident)
|
||||
of SyncQueueKind.Backward:
|
||||
let
|
||||
firstSlot = man.getFirstSlot()
|
||||
@ -128,27 +135,34 @@ proc initQueue[A, B](man: SyncManager[A, B]) =
|
||||
else:
|
||||
firstSlot - 1'u64
|
||||
man.queue = SyncQueue.init(A, man.direction, startSlot, lastSlot,
|
||||
man.chunkSize, man.getSafeSlot,
|
||||
man.blockVerifier, 1, man.ident)
|
||||
man.chunkSize,
|
||||
man.concurrentRequestsCount,
|
||||
man.repeatingFailuresCount,
|
||||
man.getSafeSlot,
|
||||
man.blockVerifier, man.ident)
|
||||
|
||||
proc newSyncManager*[A, B](
|
||||
pool: PeerPool[A, B],
|
||||
denebEpoch: Epoch,
|
||||
minEpochsForBlobSidecarsRequests: uint64,
|
||||
direction: SyncQueueKind,
|
||||
getLocalHeadSlotCb: GetSlotCallback,
|
||||
getLocalWallSlotCb: GetSlotCallback,
|
||||
getFinalizedSlotCb: GetSlotCallback,
|
||||
getBackfillSlotCb: GetSlotCallback,
|
||||
getFrontfillSlotCb: GetSlotCallback,
|
||||
weakSubjectivityPeriodCb: GetBoolCallback,
|
||||
progressPivot: Slot,
|
||||
blockVerifier: BlockVerifier,
|
||||
shutdownEvent: AsyncEvent,
|
||||
maxHeadAge = uint64(SLOTS_PER_EPOCH * 1),
|
||||
chunkSize = uint64(SLOTS_PER_EPOCH),
|
||||
flags: set[SyncManagerFlag] = {},
|
||||
concurrentRequestsCount = ConcurrentRequestsCount,
|
||||
repeatingFailuresCount = RepeatingFailuresCount,
|
||||
ident = "main"
|
||||
): SyncManager[A, B] =
|
||||
|
||||
proc newSyncManager*[A, B](pool: PeerPool[A, B],
|
||||
denebEpoch: Epoch,
|
||||
minEpochsForBlobSidecarsRequests: uint64,
|
||||
direction: SyncQueueKind,
|
||||
getLocalHeadSlotCb: GetSlotCallback,
|
||||
getLocalWallSlotCb: GetSlotCallback,
|
||||
getFinalizedSlotCb: GetSlotCallback,
|
||||
getBackfillSlotCb: GetSlotCallback,
|
||||
getFrontfillSlotCb: GetSlotCallback,
|
||||
weakSubjectivityPeriodCb: GetBoolCallback,
|
||||
progressPivot: Slot,
|
||||
blockVerifier: BlockVerifier,
|
||||
shutdownEvent: AsyncEvent,
|
||||
maxHeadAge = uint64(SLOTS_PER_EPOCH * 1),
|
||||
chunkSize = uint64(SLOTS_PER_EPOCH),
|
||||
flags: set[SyncManagerFlag] = {},
|
||||
ident = "main"
|
||||
): SyncManager[A, B] =
|
||||
let (getFirstSlot, getLastSlot, getSafeSlot) = case direction
|
||||
of SyncQueueKind.Forward:
|
||||
(getLocalHeadSlotCb, getLocalWallSlotCb, getFinalizedSlotCb)
|
||||
@ -173,7 +187,9 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B],
|
||||
direction: direction,
|
||||
shutdownEvent: shutdownEvent,
|
||||
ident: ident,
|
||||
flags: flags
|
||||
flags: flags,
|
||||
concurrentRequestsCount: concurrentRequestsCount,
|
||||
repeatingFailuresCount: repeatingFailuresCount
|
||||
)
|
||||
res.initQueue()
|
||||
res
|
||||
@ -182,18 +198,15 @@ proc getBlocks[A, B](man: SyncManager[A, B], peer: A,
|
||||
req: SyncRequest[A]): Future[BeaconBlocksRes] {.
|
||||
async: (raises: [CancelledError], raw: true).} =
|
||||
mixin getScore, `==`
|
||||
|
||||
logScope:
|
||||
peer_score = peer.getScore()
|
||||
peer_speed = peer.netKbps()
|
||||
sync_ident = man.ident
|
||||
direction = man.direction
|
||||
topics = "syncman"
|
||||
|
||||
doAssert(not(req.isEmpty()), "Request must not be empty!")
|
||||
debug "Requesting blocks from peer", request = req
|
||||
debug "Requesting blocks from peer",
|
||||
request = req,
|
||||
peer_score = req.item.getScore(),
|
||||
peer_speed = req.item.netKbps(),
|
||||
sync_ident = man.ident,
|
||||
topics = "syncman"
|
||||
|
||||
beaconBlocksByRange_v2(peer, req.slot, req.count, 1'u64)
|
||||
beaconBlocksByRange_v2(peer, req.data.slot, req.data.count, 1'u64)
|
||||
|
||||
proc shouldGetBlobs[A, B](man: SyncManager[A, B], s: Slot): bool =
|
||||
let
|
||||
@ -204,23 +217,23 @@ proc shouldGetBlobs[A, B](man: SyncManager[A, B], s: Slot): bool =
|
||||
epoch >= wallEpoch - man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS)
|
||||
|
||||
proc shouldGetBlobs[A, B](man: SyncManager[A, B], r: SyncRequest[A]): bool =
|
||||
man.shouldGetBlobs(r.slot) or man.shouldGetBlobs(r.slot + (r.count - 1))
|
||||
man.shouldGetBlobs(r.data.slot) or
|
||||
man.shouldGetBlobs(r.data.slot + (r.data.count - 1))
|
||||
|
||||
proc getBlobSidecars[A, B](man: SyncManager[A, B], peer: A,
|
||||
req: SyncRequest[A]): Future[BlobSidecarsRes]
|
||||
{.async: (raises: [CancelledError], raw: true).} =
|
||||
mixin getScore, `==`
|
||||
|
||||
logScope:
|
||||
peer_score = peer.getScore()
|
||||
peer_speed = peer.netKbps()
|
||||
sync_ident = man.ident
|
||||
direction = man.direction
|
||||
topics = "syncman"
|
||||
|
||||
doAssert(not(req.isEmpty()), "Request must not be empty!")
|
||||
debug "Requesting blobs sidecars from peer", request = req
|
||||
blobSidecarsByRange(peer, req.slot, req.count)
|
||||
debug "Requesting blobs sidecars from peer",
|
||||
request = req,
|
||||
peer_score = req.item.getScore(),
|
||||
peer_speed = req.item.netKbps(),
|
||||
sync_ident = man.ident,
|
||||
topics = "syncman"
|
||||
|
||||
blobSidecarsByRange(peer, req.data.slot, req.data.count)
|
||||
|
||||
proc remainingSlots(man: SyncManager): uint64 =
|
||||
let
|
||||
@ -238,8 +251,8 @@ proc remainingSlots(man: SyncManager): uint64 =
|
||||
0'u64
|
||||
|
||||
func groupBlobs*(
|
||||
blocks: seq[ref ForkedSignedBeaconBlock],
|
||||
blobs: seq[ref BlobSidecar]
|
||||
blocks: openArray[ref ForkedSignedBeaconBlock],
|
||||
blobs: openArray[ref BlobSidecar]
|
||||
): Result[seq[BlobSidecars], string] =
|
||||
var
|
||||
grouped = newSeq[BlobSidecars](len(blocks))
|
||||
@ -287,13 +300,12 @@ proc getSyncBlockData*[T](
|
||||
): Future[SyncBlockDataRes] {.async: (raises: [CancelledError]).} =
|
||||
mixin getScore
|
||||
|
||||
logScope:
|
||||
slot = slot
|
||||
peer_score = peer.getScore()
|
||||
peer_speed = peer.netKbps()
|
||||
topics = "syncman"
|
||||
|
||||
debug "Requesting block from peer"
|
||||
debug "Requesting block from peer",
|
||||
slot = slot,
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
topics = "syncman"
|
||||
|
||||
let blocksRange =
|
||||
block:
|
||||
@ -312,7 +324,12 @@ proc getSyncBlockData*[T](
|
||||
return err("Incorrect number of blocks was returned by peer, " &
|
||||
$len(blocksRange))
|
||||
|
||||
debug "Received block on request"
|
||||
debug "Received block on request",
|
||||
slot = slot,
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
topics = "syncman"
|
||||
|
||||
if blocksRange[0][].slot != slot:
|
||||
peer.updateScore(PeerScoreBadResponse)
|
||||
@ -349,7 +366,13 @@ proc getSyncBlockData*[T](
|
||||
peer.updateScore(PeerScoreBadResponse)
|
||||
return err("Incorrect number of received blobs in the requested range")
|
||||
|
||||
debug "Received blobs on request", blobs_count = len(blobData)
|
||||
debug "Received blobs on request",
|
||||
slot = slot,
|
||||
blobs_count = len(blobData),
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
topics = "syncman"
|
||||
|
||||
let groupedBlobs = groupBlobs(blocksRange, blobData).valueOr:
|
||||
peer.updateScore(PeerScoreNoValues)
|
||||
@ -365,84 +388,204 @@ proc getSyncBlockData*[T](
|
||||
|
||||
ok(SyncBlockData(blocks: blocksRange, blobs: blobsRange))
|
||||
|
||||
proc syncStep[A, B](
|
||||
man: SyncManager[A, B], index: int, peer: A
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
logScope:
|
||||
peer_score = peer.getScore()
|
||||
peer_speed = peer.netKbps()
|
||||
index = index
|
||||
sync_ident = man.ident
|
||||
topics = "syncman"
|
||||
proc getSyncBlockData[A, B](
|
||||
man: SyncManager[A, B],
|
||||
index: int,
|
||||
sr: SyncRequest[A]
|
||||
): Future[SyncBlockDataRes] {.async: (raises: [CancelledError]).} =
|
||||
let
|
||||
peer = sr.item
|
||||
blocks = (await man.getBlocks(peer, sr)).valueOr:
|
||||
peer.updateScore(PeerScoreNoValues)
|
||||
return err("Failed to receive blocks on request, reason: " & $error)
|
||||
blockSlots = mapIt(blocks, it[].slot)
|
||||
|
||||
var
|
||||
debug "Received blocks on request",
|
||||
request = sr,
|
||||
peer_score = sr.item.getScore(),
|
||||
peer_speed = sr.item.netKbps(),
|
||||
index = index,
|
||||
blocks_count = len(blocks),
|
||||
blocks_map = getShortMap(sr, blocks.toSeq()),
|
||||
sync_ident = man.ident,
|
||||
topics = "syncman"
|
||||
|
||||
checkResponse(sr, blockSlots).isOkOr:
|
||||
peer.updateScore(PeerScoreBadResponse)
|
||||
return err("Incorrect blocks sequence received, reason: " & $error)
|
||||
|
||||
let
|
||||
shouldGetBlobs =
|
||||
if not(man.shouldGetBlobs(sr)):
|
||||
false
|
||||
else:
|
||||
var hasBlobs = false
|
||||
for blck in blocks:
|
||||
withBlck(blck[]):
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
if len(forkyBlck.message.body.blob_kzg_commitments) > 0:
|
||||
hasBlobs = true
|
||||
break
|
||||
hasBlobs
|
||||
blobs =
|
||||
if shouldGetBlobs:
|
||||
let
|
||||
res = (await man.getBlobSidecars(peer, sr)).valueOr:
|
||||
peer.updateScore(PeerScoreNoValues)
|
||||
return err("Failed to receive blobs on request, reason: " & $error)
|
||||
blobData = res.asSeq()
|
||||
|
||||
debug "Received blobs on request",
|
||||
request = sr,
|
||||
peer_score = sr.item.getScore(),
|
||||
peer_speed = sr.item.netKbps(),
|
||||
index = index,
|
||||
blobs_count = len(blobData),
|
||||
blobs_map = getShortMap(sr, blobData),
|
||||
sync_ident = man.ident,
|
||||
topics = "syncman"
|
||||
|
||||
if len(blobData) > 0:
|
||||
let blobSlots = mapIt(blobData, it[].signed_block_header.message.slot)
|
||||
checkBlobsResponse(sr, blobSlots).isOkOr:
|
||||
peer.updateScore(PeerScoreBadResponse)
|
||||
return err("Incorrect blobs sequence received, reason: " & $error)
|
||||
|
||||
let groupedBlobs = groupBlobs(blocks.asSeq(), blobData).valueOr:
|
||||
peer.updateScore(PeerScoreNoValues)
|
||||
return err(
|
||||
"Received blobs sequence is inconsistent, reason: " & error)
|
||||
|
||||
groupedBlobs.checkBlobs().isOkOr:
|
||||
peer.updateScore(PeerScoreBadResponse)
|
||||
return err("Received blobs verification failed, reason: " & error)
|
||||
Opt.some(groupedBlobs)
|
||||
else:
|
||||
Opt.none(seq[BlobSidecars])
|
||||
|
||||
ok(SyncBlockData(blocks: blocks.asSeq(), blobs: blobs))
|
||||
|
||||
proc getOrUpdatePeerStatus[A, B](
|
||||
man: SyncManager[A, B], index: int, peer: A
|
||||
): Future[Result[Slot, string]] {.async: (raises: [CancelledError]).} =
|
||||
let
|
||||
headSlot = man.getLocalHeadSlot()
|
||||
wallSlot = man.getLocalWallSlot()
|
||||
peerSlot = peer.getHeadSlot()
|
||||
|
||||
block: # Check that peer status is recent and relevant
|
||||
logScope:
|
||||
peer = peer
|
||||
direction = man.direction
|
||||
debug "Peer's syncing status",
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
index = index,
|
||||
wall_clock_slot = wallSlot,
|
||||
remote_head_slot = peerSlot,
|
||||
local_head_slot = headSlot,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
|
||||
debug "Peer's syncing status", wall_clock_slot = wallSlot,
|
||||
remote_head_slot = peerSlot, local_head_slot = headSlot
|
||||
let
|
||||
peerStatusAge = Moment.now() - peer.getStatusLastTime()
|
||||
needsUpdate =
|
||||
# Latest status we got is old
|
||||
peerStatusAge >= StatusExpirationTime or
|
||||
# The point we need to sync is close to where the peer is
|
||||
man.getFirstSlot() >= peerSlot
|
||||
|
||||
let
|
||||
peerStatusAge = Moment.now() - peer.getStatusLastTime()
|
||||
needsUpdate =
|
||||
# Latest status we got is old
|
||||
peerStatusAge >= StatusExpirationTime or
|
||||
# The point we need to sync is close to where the peer is
|
||||
man.getFirstSlot() >= peerSlot
|
||||
if not(needsUpdate):
|
||||
return ok(peerSlot)
|
||||
|
||||
if needsUpdate:
|
||||
man.workers[index].status = SyncWorkerStatus.UpdatingStatus
|
||||
man.workers[index].status = SyncWorkerStatus.UpdatingStatus
|
||||
|
||||
# Avoid a stampede of requests, but make them more frequent in case the
|
||||
# peer is "close" to the slot range of interest
|
||||
if peerStatusAge < StatusExpirationTime div 2:
|
||||
await sleepAsync(StatusExpirationTime div 2 - peerStatusAge)
|
||||
# Avoid a stampede of requests, but make them more frequent in case the
|
||||
# peer is "close" to the slot range of interest
|
||||
if peerStatusAge < (StatusExpirationTime div 2):
|
||||
await sleepAsync((StatusExpirationTime div 2) - peerStatusAge)
|
||||
|
||||
trace "Updating peer's status information", wall_clock_slot = wallSlot,
|
||||
remote_head_slot = peerSlot, local_head_slot = headSlot
|
||||
trace "Updating peer's status information",
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
index = index,
|
||||
wall_clock_slot = wallSlot,
|
||||
remote_head_slot = peerSlot,
|
||||
local_head_slot = headSlot,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
|
||||
if not(await peer.updateStatus()):
|
||||
peer.updateScore(PeerScoreNoStatus)
|
||||
debug "Failed to get remote peer's status, exiting",
|
||||
peer_head_slot = peerSlot
|
||||
if not(await peer.updateStatus()):
|
||||
peer.updateScore(PeerScoreNoStatus)
|
||||
return err("Failed to get remote peer status")
|
||||
|
||||
return
|
||||
let newPeerSlot = peer.getHeadSlot()
|
||||
if peerSlot >= newPeerSlot:
|
||||
peer.updateScore(PeerScoreStaleStatus)
|
||||
debug "Peer's status information is stale",
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
index = index,
|
||||
wall_clock_slot = wallSlot,
|
||||
remote_old_head_slot = peerSlot,
|
||||
local_head_slot = headSlot,
|
||||
remote_new_head_slot = newPeerSlot,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
else:
|
||||
debug "Peer's status information updated",
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
index = index,
|
||||
wall_clock_slot = wallSlot,
|
||||
remote_old_head_slot = peerSlot,
|
||||
local_head_slot = headSlot,
|
||||
remote_new_head_slot = newPeerSlot,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
peer.updateScore(PeerScoreGoodStatus)
|
||||
ok(newPeerSlot)
|
||||
|
||||
let newPeerSlot = peer.getHeadSlot()
|
||||
if peerSlot >= newPeerSlot:
|
||||
peer.updateScore(PeerScoreStaleStatus)
|
||||
debug "Peer's status information is stale",
|
||||
wall_clock_slot = wallSlot, remote_old_head_slot = peerSlot,
|
||||
local_head_slot = headSlot, remote_new_head_slot = newPeerSlot
|
||||
else:
|
||||
debug "Peer's status information updated", wall_clock_slot = wallSlot,
|
||||
remote_old_head_slot = peerSlot, local_head_slot = headSlot,
|
||||
remote_new_head_slot = newPeerSlot
|
||||
peer.updateScore(PeerScoreGoodStatus)
|
||||
peerSlot = newPeerSlot
|
||||
proc syncStep[A, B](
|
||||
man: SyncManager[A, B], index: int, peer: A
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
|
||||
# Time passed - enough to move slots, if sleep happened
|
||||
let
|
||||
peerSlot = (await man.getOrUpdatePeerStatus(index, peer)).valueOr:
|
||||
return
|
||||
headSlot = man.getLocalHeadSlot()
|
||||
wallSlot = man.getLocalWallSlot()
|
||||
|
||||
if man.remainingSlots() <= man.maxHeadAge:
|
||||
logScope:
|
||||
peer = peer
|
||||
direction = man.direction
|
||||
|
||||
case man.direction
|
||||
of SyncQueueKind.Forward:
|
||||
info "We are in sync with network", wall_clock_slot = wallSlot,
|
||||
remote_head_slot = peerSlot, local_head_slot = headSlot
|
||||
info "We are in sync with network",
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
index = index,
|
||||
wall_clock_slot = wallSlot,
|
||||
remote_head_slot = peerSlot,
|
||||
local_head_slot = headSlot,
|
||||
direction = man.direction,
|
||||
sync_ident = man.ident,
|
||||
topics = "syncman"
|
||||
of SyncQueueKind.Backward:
|
||||
info "Backfill complete", wall_clock_slot = wallSlot,
|
||||
remote_head_slot = peerSlot, local_head_slot = headSlot
|
||||
info "Backfill complete",
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
index = index,
|
||||
wall_clock_slot = wallSlot,
|
||||
remote_head_slot = peerSlot,
|
||||
local_head_slot = headSlot,
|
||||
direction = man.direction,
|
||||
sync_ident = man.ident,
|
||||
topics = "syncman"
|
||||
|
||||
# We clear SyncManager's `notInSyncEvent` so all the workers will become
|
||||
# sleeping soon.
|
||||
@ -462,161 +605,103 @@ proc syncStep[A, B](
|
||||
# Right now we decreasing peer's score a bit, so it will not be
|
||||
# disconnected due to low peer's score, but new fresh peers could replace
|
||||
# peers with low latest head.
|
||||
debug "Peer's head slot is lower then local head slot", peer = peer,
|
||||
wall_clock_slot = wallSlot, remote_head_slot = peerSlot,
|
||||
debug "Peer's head slot is lower then local head slot",
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
index = index,
|
||||
wall_clock_slot = wallSlot,
|
||||
remote_head_slot = peerSlot,
|
||||
local_last_slot = man.getLastSlot(),
|
||||
local_first_slot = man.getFirstSlot(),
|
||||
direction = man.direction
|
||||
direction = man.direction,
|
||||
sync_ident = man.ident,
|
||||
topics = "syncman"
|
||||
peer.updateScore(PeerScoreUseless)
|
||||
return
|
||||
|
||||
# Wall clock keeps ticking, so we need to update the queue
|
||||
man.queue.updateLastSlot(man.getLastSlot())
|
||||
|
||||
man.workers[index].status = SyncWorkerStatus.Requesting
|
||||
let req = man.queue.pop(peerSlot, peer)
|
||||
if req.isEmpty():
|
||||
# SyncQueue could return empty request in 2 cases:
|
||||
# 1. There no more slots in SyncQueue to download (we are synced, but
|
||||
# our ``notInSyncEvent`` is not yet cleared).
|
||||
# 2. Current peer's known head slot is too low to satisfy request.
|
||||
#
|
||||
# To avoid endless loop we going to wait for RESP_TIMEOUT time here.
|
||||
# This time is enough for all pending requests to finish and it is also
|
||||
# enough for main sync loop to clear ``notInSyncEvent``.
|
||||
debug "Empty request received from queue, exiting", peer = peer,
|
||||
local_head_slot = headSlot, remote_head_slot = peerSlot,
|
||||
queue_input_slot = man.queue.inpSlot,
|
||||
queue_output_slot = man.queue.outSlot,
|
||||
queue_last_slot = man.queue.finalSlot, direction = man.direction
|
||||
await sleepAsync(RESP_TIMEOUT_DUR)
|
||||
return
|
||||
proc processCallback() =
|
||||
man.workers[index].status = SyncWorkerStatus.Processing
|
||||
|
||||
debug "Creating new request for peer", wall_clock_slot = wallSlot,
|
||||
remote_head_slot = peerSlot, local_head_slot = headSlot,
|
||||
request = req
|
||||
var jobs: seq[Future[void].Raising([CancelledError])]
|
||||
|
||||
man.workers[index].status = SyncWorkerStatus.Downloading
|
||||
try:
|
||||
for rindex in 0 ..< man.concurrentRequestsCount:
|
||||
man.workers[index].status = SyncWorkerStatus.Requesting
|
||||
let request = man.queue.pop(peerSlot, peer)
|
||||
if request.isEmpty():
|
||||
# SyncQueue could return empty request in 2 cases:
|
||||
# 1. There no more slots in SyncQueue to download (we are synced, but
|
||||
# our ``notInSyncEvent`` is not yet cleared).
|
||||
# 2. Current peer's known head slot is too low to satisfy request.
|
||||
#
|
||||
# To avoid endless loop we going to wait for RESP_TIMEOUT time here.
|
||||
# This time is enough for all pending requests to finish and it is also
|
||||
# enough for main sync loop to clear ``notInSyncEvent``.
|
||||
debug "Empty request received from queue",
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
index = index,
|
||||
local_head_slot = headSlot,
|
||||
remote_head_slot = peerSlot,
|
||||
queue_input_slot = man.queue.inpSlot,
|
||||
queue_output_slot = man.queue.outSlot,
|
||||
queue_last_slot = man.queue.finalSlot,
|
||||
direction = man.direction,
|
||||
sync_ident = man.ident,
|
||||
topics = "syncman"
|
||||
await sleepAsync(RESP_TIMEOUT_DUR)
|
||||
break
|
||||
|
||||
let blocks = await man.getBlocks(peer, req)
|
||||
if blocks.isErr():
|
||||
peer.updateScore(PeerScoreNoValues)
|
||||
man.queue.push(req)
|
||||
debug "Failed to receive blocks on request",
|
||||
request = req, err = blocks.error
|
||||
return
|
||||
let blockData = blocks.get().asSeq()
|
||||
debug "Received blocks on request", blocks_count = len(blockData),
|
||||
blocks_map = getShortMap(req, blockData), request = req
|
||||
man.workers[index].status = SyncWorkerStatus.Downloading
|
||||
let data = (await man.getSyncBlockData(index, request)).valueOr:
|
||||
debug "Failed to get block data",
|
||||
peer = peer,
|
||||
peer_score = peer.getScore(),
|
||||
peer_speed = peer.netKbps(),
|
||||
index = index,
|
||||
reason = error,
|
||||
direction = man.direction,
|
||||
sync_ident = man.ident,
|
||||
topics = "syncman"
|
||||
man.queue.push(request)
|
||||
break
|
||||
|
||||
let slots = mapIt(blockData, it[].slot)
|
||||
checkResponse(req, slots).isOkOr:
|
||||
peer.updateScore(PeerScoreBadResponse)
|
||||
man.queue.push(req)
|
||||
warn "Incorrect blocks sequence received",
|
||||
blocks_count = len(blockData),
|
||||
blocks_map = getShortMap(req, blockData),
|
||||
request = req,
|
||||
reason = error
|
||||
return
|
||||
# Scoring will happen in `syncUpdate`.
|
||||
man.workers[index].status = SyncWorkerStatus.Queueing
|
||||
let
|
||||
peerFinalized = peer.getFinalizedEpoch().start_slot()
|
||||
lastSlot = request.data.slot + request.data.count - 1
|
||||
# The peer claims the block is finalized - our own block processing will
|
||||
# verify this point down the line
|
||||
# TODO descore peers that lie
|
||||
maybeFinalized = lastSlot < peerFinalized
|
||||
|
||||
let shouldGetBlobs =
|
||||
if not man.shouldGetBlobs(req):
|
||||
false
|
||||
else:
|
||||
var hasBlobs = false
|
||||
for blck in blockData:
|
||||
withBlck(blck[]):
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
if forkyBlck.message.body.blob_kzg_commitments.len > 0:
|
||||
hasBlobs = true
|
||||
break
|
||||
hasBlobs
|
||||
jobs.add(man.queue.push(request, data.blocks, data.blobs, maybeFinalized,
|
||||
processCallback))
|
||||
|
||||
let blobData =
|
||||
if shouldGetBlobs:
|
||||
let blobs = await man.getBlobSidecars(peer, req)
|
||||
if blobs.isErr():
|
||||
peer.updateScore(PeerScoreNoValues)
|
||||
man.queue.push(req)
|
||||
debug "Failed to receive blobs on request",
|
||||
request = req, err = blobs.error
|
||||
return
|
||||
let blobData = blobs.get().asSeq()
|
||||
debug "Received blobs on request",
|
||||
blobs_count = len(blobData),
|
||||
blobs_map = getShortMap(req, blobData), request = req
|
||||
if len(jobs) > 0:
|
||||
await allFutures(jobs)
|
||||
|
||||
if len(blobData) > 0:
|
||||
let slots = mapIt(blobData, it[].signed_block_header.message.slot)
|
||||
checkBlobsResponse(req, slots).isOkOr:
|
||||
peer.updateScore(PeerScoreBadResponse)
|
||||
man.queue.push(req)
|
||||
warn "Incorrect blobs sequence received",
|
||||
blobs_count = len(blobData),
|
||||
blobs_map = getShortMap(req, blobData),
|
||||
request = req,
|
||||
reason = error
|
||||
return
|
||||
let groupedBlobs = groupBlobs(blockData, blobData).valueOr:
|
||||
peer.updateScore(PeerScoreNoValues)
|
||||
man.queue.push(req)
|
||||
info "Received blobs sequence is inconsistent",
|
||||
blobs_map = getShortMap(req, blobData),
|
||||
request = req, msg = error
|
||||
return
|
||||
groupedBlobs.checkBlobs().isOkOr:
|
||||
peer.updateScore(PeerScoreBadResponse)
|
||||
man.queue.push(req)
|
||||
warn "Received blobs verification failed",
|
||||
blobs_count = len(blobData),
|
||||
blobs_map = getShortMap(req, blobData),
|
||||
request = req,
|
||||
reason = error
|
||||
return
|
||||
Opt.some(groupedBlobs)
|
||||
else:
|
||||
Opt.none(seq[BlobSidecars])
|
||||
|
||||
if len(blockData) == 0 and man.direction == SyncQueueKind.Backward and
|
||||
req.contains(man.getSafeSlot()):
|
||||
# The sync protocol does not distinguish between:
|
||||
# - All requested slots are empty
|
||||
# - Peer does not have data available about requested range
|
||||
#
|
||||
# However, we include the `backfill` slot in backward sync requests.
|
||||
# If we receive an empty response to a request covering that slot,
|
||||
# we know that the response is incomplete and can descore.
|
||||
peer.updateScore(PeerScoreNoValues)
|
||||
man.queue.push(req)
|
||||
debug "Response does not include known-to-exist block", request = req
|
||||
return
|
||||
|
||||
# Scoring will happen in `syncUpdate`.
|
||||
man.workers[index].status = SyncWorkerStatus.Queueing
|
||||
let
|
||||
peerFinalized = peer.getFinalizedEpoch().start_slot()
|
||||
lastSlot = req.slot + req.count
|
||||
# The peer claims the block is finalized - our own block processing will
|
||||
# verify this point down the line
|
||||
# TODO descore peers that lie
|
||||
maybeFinalized = lastSlot < peerFinalized
|
||||
|
||||
await man.queue.push(req, blockData, blobData, maybeFinalized, proc() =
|
||||
man.workers[index].status = SyncWorkerStatus.Processing)
|
||||
except CancelledError as exc:
|
||||
let pending = jobs.filterIt(not(it.finished)).mapIt(cancelAndWait(it))
|
||||
await noCancel allFutures(pending)
|
||||
raise exc
|
||||
|
||||
proc syncWorker[A, B](
|
||||
man: SyncManager[A, B], index: int
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
mixin getKey, getScore, getHeadSlot
|
||||
|
||||
logScope:
|
||||
index = index
|
||||
sync_ident = man.ident
|
||||
direction = man.direction
|
||||
topics = "syncman"
|
||||
|
||||
debug "Starting syncing worker"
|
||||
debug "Starting syncing worker",
|
||||
index = index,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
|
||||
var peer: A = nil
|
||||
|
||||
@ -634,7 +719,11 @@ proc syncWorker[A, B](
|
||||
if not(isNil(peer)):
|
||||
man.pool.release(peer)
|
||||
|
||||
debug "Sync worker stopped"
|
||||
debug "Sync worker stopped",
|
||||
index = index,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
|
||||
proc getWorkersStats[A, B](man: SyncManager[A, B]): tuple[map: string,
|
||||
sleeping: int,
|
||||
@ -719,18 +808,20 @@ proc syncClose[A, B](
|
||||
proc syncLoop[A, B](
|
||||
man: SyncManager[A, B]
|
||||
) {.async: (raises: [CancelledError]).} =
|
||||
|
||||
logScope:
|
||||
sync_ident = man.ident
|
||||
direction = man.direction
|
||||
topics = "syncman"
|
||||
|
||||
mixin getKey, getScore
|
||||
var pauseTime = 0
|
||||
|
||||
# Update SyncQueue parameters, because callbacks used to calculate parameters
|
||||
# could provide different values at moment when syncLoop() started.
|
||||
man.initQueue()
|
||||
|
||||
man.startWorkers()
|
||||
|
||||
debug "Synchronization loop started"
|
||||
debug "Synchronization loop started",
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
start_slot = man.queue.startSlot,
|
||||
finish_slot = man.queue.finalSlot,
|
||||
topics = "syncman"
|
||||
|
||||
proc averageSpeedTask() {.async: (raises: [CancelledError]).} =
|
||||
while true:
|
||||
@ -778,9 +869,11 @@ proc syncLoop[A, B](
|
||||
pending_workers_count = pending,
|
||||
wall_head_slot = wallSlot,
|
||||
local_head_slot = headSlot,
|
||||
pause_time = $chronos.seconds(pauseTime),
|
||||
avg_sync_speed = man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4),
|
||||
ins_sync_speed = man.insSyncSpeed.formatBiggestFloat(ffDecimal, 4)
|
||||
ins_sync_speed = man.insSyncSpeed.formatBiggestFloat(ffDecimal, 4),
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
of SyncQueueKind.Backward:
|
||||
debug "Current syncing state", workers_map = map,
|
||||
sleeping_workers_count = sleeping,
|
||||
@ -788,9 +881,11 @@ proc syncLoop[A, B](
|
||||
pending_workers_count = pending,
|
||||
wall_head_slot = wallSlot,
|
||||
backfill_slot = man.getSafeSlot(),
|
||||
pause_time = $chronos.seconds(pauseTime),
|
||||
avg_sync_speed = man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4),
|
||||
ins_sync_speed = man.insSyncSpeed.formatBiggestFloat(ffDecimal, 4)
|
||||
ins_sync_speed = man.insSyncSpeed.formatBiggestFloat(ffDecimal, 4),
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
let
|
||||
pivot = man.progressPivot
|
||||
progress =
|
||||
@ -855,10 +950,17 @@ proc syncLoop[A, B](
|
||||
# all sync workers are in `Sleeping` state.
|
||||
if pending > 0:
|
||||
debug "Synchronization loop waits for workers completion",
|
||||
wall_head_slot = wallSlot, local_head_slot = headSlot,
|
||||
difference = (wallSlot - headSlot), max_head_age = man.maxHeadAge,
|
||||
wall_head_slot = wallSlot,
|
||||
local_head_slot = headSlot,
|
||||
difference = (wallSlot - headSlot),
|
||||
max_head_age = man.maxHeadAge,
|
||||
sleeping_workers_count = sleeping,
|
||||
waiting_workers_count = waiting, pending_workers_count = pending
|
||||
waiting_workers_count = waiting,
|
||||
pending_workers_count = pending,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
|
||||
# We already synced, so we should reset all the pending workers from
|
||||
# any state they have.
|
||||
man.queue.clearAndWakeup()
|
||||
@ -871,21 +973,33 @@ proc syncLoop[A, B](
|
||||
await man.syncClose(averageSpeedTaskFut)
|
||||
man.inProgress = false
|
||||
debug "Forward synchronization process finished, exiting",
|
||||
wall_head_slot = wallSlot, local_head_slot = headSlot,
|
||||
wall_head_slot = wallSlot,
|
||||
local_head_slot = headSlot,
|
||||
difference = (wallSlot - headSlot),
|
||||
max_head_age = man.maxHeadAge
|
||||
max_head_age = man.maxHeadAge,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
break
|
||||
else:
|
||||
man.inProgress = false
|
||||
debug "Forward synchronization process finished, sleeping",
|
||||
wall_head_slot = wallSlot, local_head_slot = headSlot,
|
||||
wall_head_slot = wallSlot,
|
||||
local_head_slot = headSlot,
|
||||
difference = (wallSlot - headSlot),
|
||||
max_head_age = man.maxHeadAge
|
||||
max_head_age = man.maxHeadAge,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
else:
|
||||
debug "Synchronization loop sleeping", wall_head_slot = wallSlot,
|
||||
debug "Synchronization loop sleeping",
|
||||
wall_head_slot = wallSlot,
|
||||
local_head_slot = headSlot,
|
||||
difference = (wallSlot - headSlot),
|
||||
max_head_age = man.maxHeadAge
|
||||
max_head_age = man.maxHeadAge,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
of SyncQueueKind.Backward:
|
||||
# Backward syncing is going to be executed only once, so we exit loop
|
||||
# and stop all pending tasks which belongs to this instance (sync
|
||||
@ -893,9 +1007,13 @@ proc syncLoop[A, B](
|
||||
await man.syncClose(averageSpeedTaskFut)
|
||||
man.inProgress = false
|
||||
debug "Backward synchronization process finished, exiting",
|
||||
wall_head_slot = wallSlot, local_head_slot = headSlot,
|
||||
wall_head_slot = wallSlot,
|
||||
local_head_slot = headSlot,
|
||||
backfill_slot = man.getLastSlot(),
|
||||
max_head_age = man.maxHeadAge
|
||||
max_head_age = man.maxHeadAge,
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
break
|
||||
else:
|
||||
if not(man.notInSyncEvent.isSet()):
|
||||
@ -905,10 +1023,14 @@ proc syncLoop[A, B](
|
||||
man.notInSyncEvent.fire()
|
||||
man.inProgress = true
|
||||
debug "Node lost sync for more then preset period",
|
||||
period = man.maxHeadAge, wall_head_slot = wallSlot,
|
||||
period = man.maxHeadAge,
|
||||
wall_head_slot = wallSlot,
|
||||
local_head_slot = headSlot,
|
||||
missing_slots = man.remainingSlots(),
|
||||
progress = float(man.queue.progress())
|
||||
progress = float(man.queue.progress()),
|
||||
sync_ident = man.ident,
|
||||
direction = man.direction,
|
||||
topics = "syncman"
|
||||
else:
|
||||
man.notInSyncEvent.fire()
|
||||
man.inProgress = true
|
||||
|
@ -200,7 +200,7 @@ proc updatePerformance(overseer: SyncOverseerRef, startTick: Moment,
|
||||
|
||||
# Update status string
|
||||
overseer.statusMsg = Opt.some(
|
||||
"fill: " & timeleft.toTimeLeftString() & " (" &
|
||||
timeleft.toTimeLeftString() & " (" &
|
||||
(done * 100).formatBiggestFloat(ffDecimal, 2) & "%) " &
|
||||
overseer.avgSpeed.formatBiggestFloat(ffDecimal, 4) &
|
||||
"slots/s (" & $dag.head.slot & ")")
|
||||
@ -521,8 +521,6 @@ proc mainLoop*(
|
||||
quit 1
|
||||
|
||||
overseer.untrustedInProgress = false
|
||||
# Reset status bar
|
||||
overseer.statusMsg = Opt.none(string)
|
||||
|
||||
# When we finished state rebuilding process - we could start forward
|
||||
# SyncManager which could perform finish sync.
|
||||
|
@ -368,7 +368,7 @@ p2pProtocol BeaconSync(version = 1,
|
||||
# are `not-nil` in the implementation
|
||||
getBlobSidecarsByRange(
|
||||
"1", peer, peer.networkState.dag, response, startSlot, reqCount,
|
||||
MAX_BLOBS_PER_BLOCK, MAX_REQUEST_BLOB_SIDECARS_ELECTRA)
|
||||
MAX_BLOBS_PER_BLOCK_ELECTRA, MAX_REQUEST_BLOB_SIDECARS_ELECTRA)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/fulu/p2p-interface.md#datacolumnsidecarsbyroot-v1
|
||||
proc dataColumnSidecarsByRoot(
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -72,10 +72,12 @@ proc serveAttestation(
|
||||
logScope:
|
||||
attestation = shortLog(atst)
|
||||
try:
|
||||
when atst is electra.Attestation:
|
||||
when atst is electra.SingleAttestation:
|
||||
await vc.submitPoolAttestationsV2(@[atst], ApiStrategyKind.First)
|
||||
else:
|
||||
elif atst is phase0.Attestation:
|
||||
await vc.submitPoolAttestations(@[atst], ApiStrategyKind.First)
|
||||
else:
|
||||
static: doAssert false
|
||||
except ValidatorApiError as exc:
|
||||
warn "Unable to publish attestation", reason = exc.getFailureReason()
|
||||
return false
|
||||
@ -85,7 +87,7 @@ proc serveAttestation(
|
||||
|
||||
let res =
|
||||
if afterElectra:
|
||||
let attestation = registered.toElectraAttestation(signature)
|
||||
let attestation = registered.toSingleAttestation(signature)
|
||||
submitAttestation(attestation)
|
||||
else:
|
||||
let attestation = registered.toAttestation(signature)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -236,7 +236,6 @@ type
|
||||
beaconGenesis*: RestGenesis
|
||||
proposerTasks*: Table[Slot, seq[ProposerTask]]
|
||||
dynamicFeeRecipientsStore*: ref DynamicFeeRecipientsStore
|
||||
validatorsRegCache*: Table[ValidatorPubKey, SignedValidatorRegistrationV1]
|
||||
blocksSeen*: Table[Slot, BlockDataItem]
|
||||
rootsSeen*: Table[Eth2Digest, Slot]
|
||||
processingDelay*: Opt[Duration]
|
||||
@ -1059,18 +1058,17 @@ proc isExpired(vc: ValidatorClientRef,
|
||||
EPOCHS_BETWEEN_VALIDATOR_REGISTRATION
|
||||
|
||||
proc getValidatorRegistration(
|
||||
vc: ValidatorClientRef,
|
||||
validator: AttachedValidator,
|
||||
timestamp: Time,
|
||||
fork: Fork
|
||||
): Result[PendingValidatorRegistration, RegistrationKind] =
|
||||
vc: ValidatorClientRef,
|
||||
validator: AttachedValidator,
|
||||
timestamp: Time,
|
||||
fork: Fork
|
||||
): Result[PendingValidatorRegistration, RegistrationKind] =
|
||||
if validator.index.isNone():
|
||||
debug "Validator registration missing validator index",
|
||||
validator = validatorLog(validator)
|
||||
return err(RegistrationKind.MissingIndex)
|
||||
|
||||
let
|
||||
cached = vc.validatorsRegCache.getOrDefault(validator.pubkey)
|
||||
currentSlot =
|
||||
block:
|
||||
let res = vc.beaconClock.toSlot(timestamp)
|
||||
@ -1078,49 +1076,46 @@ proc getValidatorRegistration(
|
||||
return err(RegistrationKind.IncorrectTime)
|
||||
res.slot
|
||||
|
||||
if cached.isDefault() or vc.isExpired(cached, currentSlot):
|
||||
if not cached.isDefault():
|
||||
# Want to send it to relay, but not recompute perfectly fine cache
|
||||
return ok(PendingValidatorRegistration(registration: cached, future: nil))
|
||||
if validator.externalBuilderRegistration.isSome():
|
||||
let cached = validator.externalBuilderRegistration.get()
|
||||
return
|
||||
if not(vc.isExpired(cached, currentSlot)):
|
||||
err(RegistrationKind.Cached)
|
||||
else:
|
||||
ok(PendingValidatorRegistration(registration: cached, future: nil))
|
||||
|
||||
let
|
||||
feeRecipient = vc.getFeeRecipient(validator, currentSlot.epoch())
|
||||
gasLimit = vc.getGasLimit(validator)
|
||||
var registration =
|
||||
SignedValidatorRegistrationV1(
|
||||
message: ValidatorRegistrationV1(
|
||||
fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)),
|
||||
gas_limit: gasLimit,
|
||||
timestamp: uint64(timestamp.toUnix()),
|
||||
pubkey: validator.pubkey
|
||||
)
|
||||
let
|
||||
feeRecipient = vc.getFeeRecipient(validator, currentSlot.epoch())
|
||||
gasLimit = vc.getGasLimit(validator)
|
||||
|
||||
var registration =
|
||||
SignedValidatorRegistrationV1(
|
||||
message: ValidatorRegistrationV1(
|
||||
fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)),
|
||||
gas_limit: gasLimit,
|
||||
timestamp: uint64(timestamp.toUnix()),
|
||||
pubkey: validator.pubkey
|
||||
)
|
||||
)
|
||||
|
||||
let sigfut = validator.getBuilderSignature(fork, registration.message)
|
||||
if sigfut.finished():
|
||||
# This is short-path if we able to create signature locally.
|
||||
if not(sigfut.completed()):
|
||||
let exc = sigfut.error()
|
||||
debug "Got unexpected exception while signing validator registration",
|
||||
validator = validatorLog(validator), error = exc.name,
|
||||
reason = exc.msg
|
||||
return err(RegistrationKind.ErrorSignature)
|
||||
let sigres = sigfut.value()
|
||||
if sigres.isErr():
|
||||
debug "Failed to get signature for validator registration",
|
||||
validator = validatorLog(validator), reason = sigres.error()
|
||||
return err(RegistrationKind.NoSignature)
|
||||
registration.signature = sigres.get()
|
||||
# Updating cache table with new signed registration data
|
||||
vc.validatorsRegCache[registration.message.pubkey] = registration
|
||||
ok(PendingValidatorRegistration(registration: registration, future: nil))
|
||||
else:
|
||||
# Remote signature service involved, cache will be updated later.
|
||||
ok(PendingValidatorRegistration(registration: registration,
|
||||
future: sigfut))
|
||||
let sigfut = validator.getBuilderSignature(fork, registration.message)
|
||||
if sigfut.finished():
|
||||
# This is short-path if we able to create signature locally.
|
||||
if not(sigfut.completed()):
|
||||
let exc = sigfut.error()
|
||||
debug "Got unexpected exception while signing validator registration",
|
||||
validator = validatorLog(validator), error = exc.name,
|
||||
reason = exc.msg
|
||||
return err(RegistrationKind.ErrorSignature)
|
||||
|
||||
registration.signature = sigfut.value().valueOr:
|
||||
debug "Failed to get signature for validator registration",
|
||||
validator = validatorLog(validator), reason = error
|
||||
return err(RegistrationKind.NoSignature)
|
||||
|
||||
ok(PendingValidatorRegistration(registration: registration, future: nil))
|
||||
else:
|
||||
# Returning cached result.
|
||||
err(RegistrationKind.Cached)
|
||||
ok(PendingValidatorRegistration(registration: registration, future: sigfut))
|
||||
|
||||
proc prepareRegistrationList*(
|
||||
vc: ValidatorClientRef,
|
||||
@ -1131,6 +1126,7 @@ proc prepareRegistrationList*(
|
||||
|
||||
var
|
||||
messages: seq[SignedValidatorRegistrationV1]
|
||||
validators: seq[AttachedValidator]
|
||||
futures: seq[Future[SignatureResult]]
|
||||
registrations: seq[SignedValidatorRegistrationV1]
|
||||
total = vc.attachedValidators[].count()
|
||||
@ -1151,6 +1147,7 @@ proc prepareRegistrationList*(
|
||||
registrations.add(preg.registration)
|
||||
else:
|
||||
messages.add(preg.registration)
|
||||
validators.add(validator)
|
||||
futures.add(preg.future)
|
||||
else:
|
||||
case res.error()
|
||||
@ -1174,8 +1171,7 @@ proc prepareRegistrationList*(
|
||||
var reg = messages[index]
|
||||
reg.signature = sres.get()
|
||||
registrations.add(reg)
|
||||
# Updating cache table
|
||||
vc.validatorsRegCache[reg.message.pubkey] = reg
|
||||
validators[index].externalBuilderRegistration = Opt.some(reg)
|
||||
inc(succeed)
|
||||
else:
|
||||
inc(bad)
|
||||
|
@ -84,6 +84,7 @@ type
|
||||
|
||||
BuilderBid[SBBB] = object
|
||||
blindedBlckPart*: SBBB
|
||||
executionRequests*: ExecutionRequests
|
||||
executionPayloadValue*: UInt256
|
||||
consensusBlockValue*: UInt256
|
||||
|
||||
@ -458,7 +459,7 @@ proc makeBeaconBlockForHeadAndSlot*(
|
||||
execution_payload_root: Opt[Eth2Digest],
|
||||
withdrawals_root: Opt[Eth2Digest],
|
||||
kzg_commitments: Opt[KzgCommitments],
|
||||
execution_requests: ExecutionRequests): # TODO probably need this for builder API, otherwise remove, maybe needs to be Opt
|
||||
execution_requests: ExecutionRequests):
|
||||
Future[ForkedBlockResult] {.async: (raises: [CancelledError]).} =
|
||||
# Advance state to the slot that we're proposing for
|
||||
var cache = StateCache()
|
||||
@ -561,15 +562,18 @@ proc makeBeaconBlockForHeadAndSlot*(
|
||||
request_type_and_payload.toOpenArray(
|
||||
1, request_type_and_payload.len - 1)
|
||||
case request_type_and_payload[0]
|
||||
of 0'u8: execution_requests_buffer.deposits = SSZ.decode(
|
||||
request_payload,
|
||||
List[DepositRequest, Limit MAX_DEPOSIT_REQUESTS_PER_PAYLOAD])
|
||||
of 1'u8: execution_requests_buffer.withdrawals = SSZ.decode(
|
||||
request_payload,
|
||||
List[WithdrawalRequest, Limit MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD])
|
||||
of 2'u8: execution_requests_buffer.consolidations = SSZ.decode(
|
||||
request_payload,
|
||||
List[ConsolidationRequest, Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD])
|
||||
of DEPOSIT_REQUEST_TYPE:
|
||||
execution_requests_buffer.deposits =
|
||||
SSZ.decode(request_payload,
|
||||
List[DepositRequest, Limit MAX_DEPOSIT_REQUESTS_PER_PAYLOAD])
|
||||
of WITHDRAWAL_REQUEST_TYPE:
|
||||
execution_requests_buffer.withdrawals =
|
||||
SSZ.decode(request_payload,
|
||||
List[WithdrawalRequest, Limit MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD])
|
||||
of CONSOLIDATION_REQUEST_TYPE:
|
||||
execution_requests_buffer.consolidations =
|
||||
SSZ.decode(request_payload,
|
||||
List[ConsolidationRequest, Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD])
|
||||
else:
|
||||
return err("Execution layer invalid request type")
|
||||
except CatchableError:
|
||||
@ -606,7 +610,7 @@ proc makeBeaconBlockForHeadAndSlot*(
|
||||
slot, head = shortLog(head), error
|
||||
$error
|
||||
|
||||
var blobsBundleOpt = Opt.none(BlobsBundle)
|
||||
var blobsBundleOpt = Opt.none(deneb.BlobsBundle)
|
||||
when typeof(payload).kind >= ConsensusFork.Deneb:
|
||||
blobsBundleOpt = Opt.some(payload.blobsBundle)
|
||||
|
||||
@ -707,11 +711,23 @@ proc getBlindedExecutionPayload[
|
||||
return err "getBlindedExecutionPayload: signature verification failed"
|
||||
|
||||
template builderBid: untyped = blindedHeader.data.message
|
||||
return ok(BuilderBid[EPH](
|
||||
blindedBlckPart: EPH(
|
||||
execution_payload_header: builderBid.header,
|
||||
blob_kzg_commitments: builderBid.blob_kzg_commitments),
|
||||
executionPayloadValue: builderBid.value))
|
||||
when EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle:
|
||||
return ok(BuilderBid[EPH](
|
||||
blindedBlckPart: EPH(
|
||||
execution_payload_header: builderBid.header,
|
||||
blob_kzg_commitments: builderBid.blob_kzg_commitments),
|
||||
executionRequests: default(ExecutionRequests),
|
||||
executionPayloadValue: builderBid.value))
|
||||
elif EPH is electra_mev.BlindedExecutionPayloadAndBlobsBundle or
|
||||
EPH is fulu_mev.BlindedExecutionPayloadAndBlobsBundle:
|
||||
return ok(BuilderBid[EPH](
|
||||
blindedBlckPart: EPH(
|
||||
execution_payload_header: builderBid.header,
|
||||
blob_kzg_commitments: builderBid.blob_kzg_commitments),
|
||||
executionRequests: builderBid.execution_requests,
|
||||
executionPayloadValue: builderBid.value))
|
||||
else:
|
||||
static: doAssert false
|
||||
|
||||
from ./message_router_mev import
|
||||
copyFields, getFieldNames, unblindAndRouteBlockMEV
|
||||
@ -935,7 +951,7 @@ proc getBlindedBlockParts[
|
||||
slot, validator_index, head = shortLog(head)
|
||||
return err("loadExecutionBlockHash failed")
|
||||
|
||||
executionPayloadHeader =
|
||||
blindedBlockRes =
|
||||
try:
|
||||
awaitWithTimeout(
|
||||
getBlindedExecutionPayload[EPH](
|
||||
@ -949,12 +965,12 @@ proc getBlindedBlockParts[
|
||||
BlindedBlockResult[EPH].err(
|
||||
"getBlindedExecutionPayload REST error: " & exc.msg)
|
||||
|
||||
if executionPayloadHeader.isErr:
|
||||
if blindedBlockRes.isErr:
|
||||
warn "Could not obtain blinded execution payload header",
|
||||
error = executionPayloadHeader.error, slot, validator_index,
|
||||
error = blindedBlockRes.error, slot, validator_index,
|
||||
head = shortLog(head)
|
||||
# Haven't committed to the MEV block, so allow EL fallback.
|
||||
return err(executionPayloadHeader.error)
|
||||
return err(blindedBlockRes.error)
|
||||
|
||||
# When creating this block, need to ensure it uses the MEV-provided execution
|
||||
# payload, both to avoid repeated calls to network services and to ensure the
|
||||
@ -968,11 +984,12 @@ proc getBlindedBlockParts[
|
||||
when EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle:
|
||||
type PayloadType = deneb.ExecutionPayloadForSigning
|
||||
template actualEPH: untyped =
|
||||
executionPayloadHeader.get.blindedBlckPart.execution_payload_header
|
||||
blindedBlockRes.get.blindedBlckPart.execution_payload_header
|
||||
let
|
||||
withdrawals_root = Opt.some actualEPH.withdrawals_root
|
||||
kzg_commitments = Opt.some(
|
||||
executionPayloadHeader.get.blindedBlckPart.blob_kzg_commitments)
|
||||
blindedBlockRes.get.blindedBlckPart.blob_kzg_commitments)
|
||||
execution_requests = default(ExecutionRequests)
|
||||
|
||||
var shimExecutionPayload: PayloadType
|
||||
type DenebEPH =
|
||||
@ -980,14 +997,14 @@ proc getBlindedBlockParts[
|
||||
copyFields(
|
||||
shimExecutionPayload.executionPayload, actualEPH, getFieldNames(DenebEPH))
|
||||
elif EPH is electra_mev.BlindedExecutionPayloadAndBlobsBundle:
|
||||
debugComment "verify (again, after change) this is what builder API needs"
|
||||
type PayloadType = electra.ExecutionPayloadForSigning
|
||||
template actualEPH: untyped =
|
||||
executionPayloadHeader.get.blindedBlckPart.execution_payload_header
|
||||
blindedBlockRes.get.blindedBlckPart.execution_payload_header
|
||||
let
|
||||
withdrawals_root = Opt.some actualEPH.withdrawals_root
|
||||
kzg_commitments = Opt.some(
|
||||
executionPayloadHeader.get.blindedBlckPart.blob_kzg_commitments)
|
||||
blindedBlockRes.get.blindedBlckPart.blob_kzg_commitments)
|
||||
execution_requests = blindedBlockRes.get.executionRequests
|
||||
|
||||
var shimExecutionPayload: PayloadType
|
||||
type ElectraEPH =
|
||||
@ -998,11 +1015,12 @@ proc getBlindedBlockParts[
|
||||
debugFuluComment "verify (again, after change) this is what builder API needs"
|
||||
type PayloadType = fulu.ExecutionPayloadForSigning
|
||||
template actualEPH: untyped =
|
||||
executionPayloadHeader.get.blindedBlckPart.execution_payload_header
|
||||
blindedBlockRes.get.blindedBlckPart.execution_payload_header
|
||||
let
|
||||
withdrawals_root = Opt.some actualEPH.withdrawals_root
|
||||
kzg_commitments = Opt.some(
|
||||
executionPayloadHeader.get.blindedBlckPart.blob_kzg_commitments)
|
||||
blindedBlockRes.get.blindedBlckPart.blob_kzg_commitments)
|
||||
execution_requests = blindedBlockRes.get.executionRequests
|
||||
|
||||
var shimExecutionPayload: PayloadType
|
||||
type FuluEPH =
|
||||
@ -1020,7 +1038,7 @@ proc getBlindedBlockParts[
|
||||
execution_payload_root = Opt.some hash_tree_root(actualEPH),
|
||||
withdrawals_root = withdrawals_root,
|
||||
kzg_commitments = kzg_commitments,
|
||||
execution_requests = default(ExecutionRequests))
|
||||
execution_requests = execution_requests)
|
||||
|
||||
if newBlock.isErr():
|
||||
# Haven't committed to the MEV block, so allow EL fallback.
|
||||
@ -1029,8 +1047,8 @@ proc getBlindedBlockParts[
|
||||
let forkedBlck = newBlock.get()
|
||||
|
||||
return ok(
|
||||
(executionPayloadHeader.get.blindedBlckPart,
|
||||
executionPayloadHeader.get.executionPayloadValue,
|
||||
(blindedBlockRes.get.blindedBlckPart,
|
||||
blindedBlockRes.get.executionPayloadValue,
|
||||
forkedBlck.consensusBlockValue,
|
||||
forkedBlck.blck))
|
||||
|
||||
@ -1072,11 +1090,23 @@ proc getBuilderBid[
|
||||
if unsignedBlindedBlock.isErr:
|
||||
return err unsignedBlindedBlock.error()
|
||||
|
||||
ok(BuilderBid[SBBB](
|
||||
blindedBlckPart: unsignedBlindedBlock.get,
|
||||
executionPayloadValue: bidValue,
|
||||
consensusBlockValue: consensusValue
|
||||
))
|
||||
template execution_requests: untyped =
|
||||
unsignedBlindedBlock.get.message.body.execution_requests
|
||||
when SBBB is deneb_mev.SignedBlindedBeaconBlock:
|
||||
return ok(BuilderBid[SBBB](
|
||||
blindedBlckPart: unsignedBlindedBlock.get,
|
||||
executionRequests: default(ExecutionRequests),
|
||||
executionPayloadValue: bidValue,
|
||||
consensusBlockValue: consensusValue))
|
||||
elif SBBB is electra_mev.SignedBlindedBeaconBlock or
|
||||
SBBB is fulu_mev.SignedBlindedBeaconBlock:
|
||||
return ok(BuilderBid[SBBB](
|
||||
blindedBlckPart: unsignedBlindedBlock.get,
|
||||
executionRequests: execution_requests,
|
||||
executionPayloadValue: bidValue,
|
||||
consensusBlockValue: consensusValue))
|
||||
else:
|
||||
static: doAssert false
|
||||
|
||||
proc proposeBlockMEV(
|
||||
node: BeaconNode, payloadBuilderClient: RestClientRef,
|
||||
@ -1164,16 +1194,25 @@ proc makeBlindedBeaconBlockForHeadAndSlot*[BBB: ForkyBlindedBeaconBlock](
|
||||
blindedBlockParts.get
|
||||
withBlck(forkedBlck):
|
||||
when consensusFork >= ConsensusFork.Deneb:
|
||||
when ((consensusFork == ConsensusFork.Deneb and
|
||||
EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle) or
|
||||
(consensusFork == ConsensusFork.Electra and
|
||||
EPH is electra_mev.BlindedExecutionPayloadAndBlobsBundle) or
|
||||
(consensusFork == ConsensusFork.Fulu and
|
||||
EPH is fulu_mev.BlindedExecutionPayloadAndBlobsBundle)):
|
||||
when (consensusFork == ConsensusFork.Deneb and
|
||||
EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle):
|
||||
return ok(
|
||||
BuilderBid[BBB](
|
||||
blindedBlckPart:
|
||||
constructPlainBlindedBlock[BBB](forkyBlck, executionPayloadHeader),
|
||||
executionRequests: default(ExecutionRequests),
|
||||
executionPayloadValue: bidValue,
|
||||
consensusBlockValue: consensusValue))
|
||||
|
||||
elif (consensusFork == ConsensusFork.Electra and
|
||||
EPH is electra_mev.BlindedExecutionPayloadAndBlobsBundle) or
|
||||
(consensusFork == ConsensusFork.Fulu and
|
||||
EPH is fulu_mev.BlindedExecutionPayloadAndBlobsBundle):
|
||||
return ok(
|
||||
BuilderBid[BBB](
|
||||
blindedBlckPart:
|
||||
constructPlainBlindedBlock[BBB](forkyBlck, executionPayloadHeader),
|
||||
executionRequests: forkyBlck.body.execution_requests,
|
||||
executionPayloadValue: bidValue,
|
||||
consensusBlockValue: consensusValue))
|
||||
else:
|
||||
@ -1770,8 +1809,8 @@ proc signAndSendAggregate(
|
||||
|
||||
signAndSendAggregatedAttestations()
|
||||
else:
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#construct-aggregate
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#aggregateandproof
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#construct-aggregate
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#aggregateandproof
|
||||
var msg = phase0.SignedAggregateAndProof(
|
||||
message: phase0.AggregateAndProof(
|
||||
aggregator_index: distinctBase validator_index,
|
||||
@ -2126,7 +2165,7 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (ra
|
||||
|
||||
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#broadcast-aggregate
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#broadcast-aggregate
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.10/specs/altair/validator.md#broadcast-sync-committee-contribution
|
||||
# Wait 2 / 3 of the slot time to allow messages to propagate, then collect
|
||||
# the result in aggregates
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -1256,9 +1256,6 @@ proc saveLockedKeystore(
|
||||
keystoreDir = validatorsDir / keyName
|
||||
keystoreFile = keystoreDir / KeystoreFileName
|
||||
|
||||
if dirExists(keystoreDir):
|
||||
return err(KeystoreGenerationError(kind: DuplicateKeystoreDir,
|
||||
error: "Keystore directory already exists"))
|
||||
if fileExists(keystoreFile):
|
||||
return err(KeystoreGenerationError(kind: DuplicateKeystoreFile,
|
||||
error: "Keystore file already exists"))
|
||||
@ -1335,9 +1332,6 @@ proc saveLockedKeystore(
|
||||
remotes: urls,
|
||||
flags: flags)
|
||||
|
||||
if dirExists(keystoreDir):
|
||||
return err(KeystoreGenerationError(kind: DuplicateKeystoreDir,
|
||||
error: "Keystore directory already exists"))
|
||||
if fileExists(keystoreFile):
|
||||
return err(KeystoreGenerationError(kind: DuplicateKeystoreFile,
|
||||
error: "Keystore file already exists"))
|
||||
@ -1491,6 +1485,7 @@ proc removeGasLimitFile*(host: KeymanagerHost,
|
||||
if fileExists(path):
|
||||
io2.removeFile(path).isOkOr:
|
||||
return err($uint(error) & " " & ioErrorMsg(error))
|
||||
host.validatorPool[].invalidateValidatorRegistration(pubkey)
|
||||
ok()
|
||||
|
||||
proc removeGraffitiFile*(host: KeymanagerHost,
|
||||
@ -1525,9 +1520,14 @@ proc setGasLimit*(host: KeymanagerHost,
|
||||
? secureCreatePath(validatorKeystoreDir).mapErr(proc(e: auto): string =
|
||||
"Could not create wallet directory [" & validatorKeystoreDir & "]: " & $e)
|
||||
|
||||
io2.writeFile(validatorKeystoreDir / GasLimitFilename, $gasLimit)
|
||||
let res = io2.writeFile(validatorKeystoreDir / GasLimitFilename, $gasLimit)
|
||||
.mapErr(proc(e: auto): string = "Failed to write gas limit file: " & $e)
|
||||
|
||||
if res.isOk:
|
||||
host.validatorPool[].invalidateValidatorRegistration(pubkey)
|
||||
|
||||
res
|
||||
|
||||
proc setGraffiti*(host: KeymanagerHost,
|
||||
pubkey: ValidatorPubKey,
|
||||
graffiti: GraffitiBytes): Result[void, string] =
|
||||
@ -1573,10 +1573,18 @@ func getPerValidatorDefaultFeeRecipient*(
|
||||
(static(default(Eth1Address)))
|
||||
|
||||
proc getSuggestedFeeRecipient*(
|
||||
host: KeymanagerHost, pubkey: ValidatorPubKey,
|
||||
defaultFeeRecipient: Eth1Address):
|
||||
Result[Eth1Address, ValidatorConfigFileStatus] =
|
||||
host.validatorsDir.getSuggestedFeeRecipient(pubkey, defaultFeeRecipient)
|
||||
host: KeymanagerHost,
|
||||
pubkey: ValidatorPubKey,
|
||||
defaultFeeRecipient: Eth1Address
|
||||
): Result[Eth1Address, ValidatorConfigFileStatus] =
|
||||
let res = getSuggestedFeeRecipient(
|
||||
host.validatorsDir, pubkey, defaultFeeRecipient).valueOr:
|
||||
if error == ValidatorConfigFileStatus.noSuchValidator:
|
||||
# Dynamic validators do not have directories.
|
||||
if host.validatorPool[].isDynamic(pubkey):
|
||||
return ok(defaultFeeRecipient)
|
||||
return err(error)
|
||||
ok(res)
|
||||
|
||||
proc getSuggestedFeeRecipient(
|
||||
host: KeymanagerHost, pubkey: ValidatorPubKey,
|
||||
@ -1590,8 +1598,16 @@ proc getSuggestedFeeRecipient(
|
||||
|
||||
proc getSuggestedGasLimit*(
|
||||
host: KeymanagerHost,
|
||||
pubkey: ValidatorPubKey): Result[uint64, ValidatorConfigFileStatus] =
|
||||
host.validatorsDir.getSuggestedGasLimit(pubkey, host.defaultGasLimit)
|
||||
pubkey: ValidatorPubKey
|
||||
): Result[uint64, ValidatorConfigFileStatus] =
|
||||
let res = getSuggestedGasLimit(
|
||||
host.validatorsDir, pubkey, host.defaultGasLimit).valueOr:
|
||||
if error == ValidatorConfigFileStatus.noSuchValidator:
|
||||
# Dynamic validators do not have directories.
|
||||
if host.validatorPool[].isDynamic(pubkey):
|
||||
return ok(host.defaultGasLimit)
|
||||
return err(error)
|
||||
ok(res)
|
||||
|
||||
proc getSuggestedGraffiti*(
|
||||
host: KeymanagerHost,
|
||||
|
@ -36,7 +36,7 @@ export results
|
||||
# - https://notes.ethereum.org/@djrtwo/Bkn3zpwxB#Validator-responsibilities
|
||||
#
|
||||
# Phase 0 spec - Honest Validator - how to avoid slashing
|
||||
# - https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#how-to-avoid-slashing
|
||||
# - https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#how-to-avoid-slashing
|
||||
#
|
||||
# In-depth reading on slashing conditions
|
||||
#
|
||||
@ -58,7 +58,7 @@ export results
|
||||
# 2. An attester can get slashed for signing
|
||||
# two attestations that together violate
|
||||
# the Casper FFG slashing conditions.
|
||||
# - https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#ffg-vote
|
||||
# - https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#ffg-vote
|
||||
# The "source" is the current_justified_epoch
|
||||
# The "target" is the current_epoch
|
||||
#
|
||||
|
@ -257,6 +257,15 @@ proc removeValidator*(pool: var ValidatorPool, pubkey: ValidatorPubKey) =
|
||||
validator = shortLog(validator)
|
||||
validators.set(pool.count().int64)
|
||||
|
||||
proc isDynamic*(pool: var ValidatorPool, pubkey: ValidatorPubKey): bool =
|
||||
## Returns ``true`` if attached validator exists it is dynamic.
|
||||
let validator = pool.validators.getOrDefault(pubkey)
|
||||
if not(isNil(validator)):
|
||||
if (validator.kind == ValidatorKind.Remote) and
|
||||
(RemoteKeystoreFlag.DynamicKeystore in validator.data.flags):
|
||||
return true
|
||||
false
|
||||
|
||||
func needsUpdate*(validator: AttachedValidator): bool =
|
||||
validator.index.isNone() or validator.activationEpoch == FAR_FUTURE_EPOCH
|
||||
|
||||
@ -526,7 +535,7 @@ proc signData(v: AttachedValidator,
|
||||
else:
|
||||
v.signWithDistributedKey(request)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#signature
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#signature
|
||||
proc getBlockSignature*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest, slot: Slot,
|
||||
block_root: Eth2Digest,
|
||||
@ -882,7 +891,7 @@ proc getSyncCommitteeMessage*(v: AttachedValidator,
|
||||
)
|
||||
)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/altair/validator.md#aggregation-selection
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/altair/validator.md#aggregation-selection
|
||||
proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
slot: Slot,
|
||||
@ -918,7 +927,7 @@ proc getContributionAndProofSignature*(v: AttachedValidator, fork: Fork,
|
||||
fork, genesis_validators_root, contribution_and_proof)
|
||||
await v.signData(request)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#randao-reveal
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#randao-reveal
|
||||
proc getEpochSignature*(v: AttachedValidator, fork: Fork,
|
||||
genesis_validators_root: Eth2Digest, epoch: Epoch
|
||||
): Future[SignatureResult]
|
||||
|
@ -18,7 +18,7 @@ const
|
||||
"Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH"
|
||||
|
||||
versionMajor* = 25
|
||||
versionMinor* = 1
|
||||
versionMinor* = 2
|
||||
versionBuild* = 0
|
||||
|
||||
versionBlob* = "stateofus" # Single word - ends up in the default graffiti
|
||||
|
@ -4180,7 +4180,7 @@
|
||||
"response": {
|
||||
"status": {"operator": "equals", "value": "200"},
|
||||
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
|
||||
"body": [{"operator": "jstructcmps", "start": ["data"], "value": {"MAX_COMMITTEES_PER_SLOT":"","TARGET_COMMITTEE_SIZE":"","MAX_VALIDATORS_PER_COMMITTEE":"","SHUFFLE_ROUND_COUNT":"","HYSTERESIS_QUOTIENT":"","HYSTERESIS_DOWNWARD_MULTIPLIER":"","HYSTERESIS_UPWARD_MULTIPLIER":"","MIN_DEPOSIT_AMOUNT":"","MAX_EFFECTIVE_BALANCE":"","MAX_EFFECTIVE_BALANCE_ELECTRA":"","EFFECTIVE_BALANCE_INCREMENT":"","MIN_ATTESTATION_INCLUSION_DELAY":"","SLOTS_PER_EPOCH":"","MIN_SEED_LOOKAHEAD":"","MAX_SEED_LOOKAHEAD":"","EPOCHS_PER_ETH1_VOTING_PERIOD":"","SLOTS_PER_HISTORICAL_ROOT":"","MIN_EPOCHS_TO_INACTIVITY_PENALTY":"","EPOCHS_PER_HISTORICAL_VECTOR":"","EPOCHS_PER_SLASHINGS_VECTOR":"","HISTORICAL_ROOTS_LIMIT":"","VALIDATOR_REGISTRY_LIMIT":"","BASE_REWARD_FACTOR":"","WHISTLEBLOWER_REWARD_QUOTIENT":"","PROPOSER_REWARD_QUOTIENT":"","INACTIVITY_PENALTY_QUOTIENT":"","MIN_SLASHING_PENALTY_QUOTIENT":"","PROPORTIONAL_SLASHING_MULTIPLIER":"","MAX_PROPOSER_SLASHINGS":"","MAX_ATTESTER_SLASHINGS":"","MAX_ATTESTATIONS":"","MAX_DEPOSITS":"","MAX_VOLUNTARY_EXITS":"","INACTIVITY_PENALTY_QUOTIENT_ALTAIR":"","MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":"","PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":"","SYNC_COMMITTEE_SIZE":"","EPOCHS_PER_SYNC_COMMITTEE_PERIOD":"","MIN_SYNC_COMMITTEE_PARTICIPANTS":"","UPDATE_TIMEOUT":"","INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":"","MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":"","PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":"","MAX_BYTES_PER_TRANSACTION":"","MAX_TRANSACTIONS_PER_PAYLOAD":"","BYTES_PER_LOGS_BLOOM":"","MAX_EXTRA_DATA_BYTES":"","MAX_BLS_TO_EXECUTION_CHANGES":"","MAX_WITHDRAWALS_PER_PAYLOAD":"","MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":"","PRESET_BASE":"","CONFIG_NAME":"","TERMINAL_TOTAL_DIFFICULTY":"","TERMINAL_BLOCK_HASH":"","TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":"","MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":"","MIN_GENESIS_TIME":"","GENESIS_FORK_VERSION":"","GENESIS_DELAY":"","ALTAIR_FORK_VERSION":"","ALTAIR_FORK_EPOCH":"","BELLATRIX_FORK_VERSION":"","BELLATRIX_FORK_EPOCH":"","CAPELLA_FORK_VERSION":"","CAPELLA_FORK_EPOCH":"","DENEB_FORK_VERSION":"","DENEB_FORK_EPOCH":"","ELECTRA_FORK_VERSION":"","ELECTRA_FORK_EPOCH":"","SECONDS_PER_SLOT":"","SECONDS_PER_ETH1_BLOCK":"","MIN_VALIDATOR_WITHDRAWABILITY_DELAY":"","FIELD_ELEMENTS_PER_BLOB":"","MAX_BLOB_COMMITMENTS_PER_BLOCK":"","MAX_BLOBS_PER_BLOCK":"","MAX_BLOBS_PER_BLOCK_ELECTRA":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":"","SHARD_COMMITTEE_PERIOD":"","ETH1_FOLLOW_DISTANCE":"","INACTIVITY_SCORE_BIAS":"","INACTIVITY_SCORE_RECOVERY_RATE":"","EJECTION_BALANCE":"","MIN_PER_EPOCH_CHURN_LIMIT":"","CHURN_LIMIT_QUOTIENT":"","MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":"","PROPOSER_SCORE_BOOST":"","REORG_HEAD_WEIGHT_THRESHOLD":"","REORG_PARENT_WEIGHT_THRESHOLD":"","REORG_MAX_EPOCHS_SINCE_FINALIZATION":"","DEPOSIT_CHAIN_ID":"","DEPOSIT_NETWORK_ID":"","DEPOSIT_CONTRACT_ADDRESS":"","GOSSIP_MAX_SIZE":"","MAX_REQUEST_BLOCKS":"","EPOCHS_PER_SUBNET_SUBSCRIPTION":"","MIN_EPOCHS_FOR_BLOCK_REQUESTS":"","MAX_CHUNK_SIZE":"","TTFB_TIMEOUT":"","RESP_TIMEOUT":"","ATTESTATION_PROPAGATION_SLOT_RANGE":"","MAXIMUM_GOSSIP_CLOCK_DISPARITY":"","MESSAGE_DOMAIN_INVALID_SNAPPY":"","MESSAGE_DOMAIN_VALID_SNAPPY":"","SUBNETS_PER_NODE":"","ATTESTATION_SUBNET_COUNT":"","ATTESTATION_SUBNET_EXTRA_BITS":"","ATTESTATION_SUBNET_PREFIX_BITS":"","MAX_REQUEST_BLOCKS_DENEB":"","MAX_REQUEST_BLOB_SIDECARS":"","MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":"","BLOB_SIDECAR_SUBNET_COUNT":"","BLS_WITHDRAWAL_PREFIX":"","ETH1_ADDRESS_WITHDRAWAL_PREFIX":"","DOMAIN_BEACON_PROPOSER":"","DOMAIN_BEACON_ATTESTER":"","DOMAIN_RANDAO":"","DOMAIN_DEPOSIT":"","DOMAIN_VOLUNTARY_EXIT":"","DOMAIN_SELECTION_PROOF":"","DOMAIN_AGGREGATE_AND_PROOF":"","TIMELY_SOURCE_FLAG_INDEX":"","TIMELY_TARGET_FLAG_INDEX":"","TIMELY_HEAD_FLAG_INDEX":"","TIMELY_SOURCE_WEIGHT":"","TIMELY_TARGET_WEIGHT":"","TIMELY_HEAD_WEIGHT":"","SYNC_REWARD_WEIGHT":"","PROPOSER_WEIGHT":"","WEIGHT_DENOMINATOR":"","DOMAIN_SYNC_COMMITTEE":"","DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":"","DOMAIN_CONTRIBUTION_AND_PROOF":"","DOMAIN_BLS_TO_EXECUTION_CHANGE":"","TARGET_AGGREGATORS_PER_COMMITTEE":"","TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":"","SYNC_COMMITTEE_SUBNET_COUNT":""}}]
|
||||
"body": [{"operator": "jstructcmps", "start": ["data"], "value": {"MAX_COMMITTEES_PER_SLOT":"","TARGET_COMMITTEE_SIZE":"","MAX_VALIDATORS_PER_COMMITTEE":"","SHUFFLE_ROUND_COUNT":"","HYSTERESIS_QUOTIENT":"","HYSTERESIS_DOWNWARD_MULTIPLIER":"","HYSTERESIS_UPWARD_MULTIPLIER":"","MIN_DEPOSIT_AMOUNT":"","MAX_EFFECTIVE_BALANCE":"","MAX_EFFECTIVE_BALANCE_ELECTRA":"","EFFECTIVE_BALANCE_INCREMENT":"","MIN_ATTESTATION_INCLUSION_DELAY":"","SLOTS_PER_EPOCH":"","MIN_SEED_LOOKAHEAD":"","MAX_SEED_LOOKAHEAD":"","EPOCHS_PER_ETH1_VOTING_PERIOD":"","SLOTS_PER_HISTORICAL_ROOT":"","MIN_EPOCHS_TO_INACTIVITY_PENALTY":"","EPOCHS_PER_HISTORICAL_VECTOR":"","EPOCHS_PER_SLASHINGS_VECTOR":"","HISTORICAL_ROOTS_LIMIT":"","VALIDATOR_REGISTRY_LIMIT":"","BASE_REWARD_FACTOR":"","WHISTLEBLOWER_REWARD_QUOTIENT":"","PROPOSER_REWARD_QUOTIENT":"","INACTIVITY_PENALTY_QUOTIENT":"","MIN_SLASHING_PENALTY_QUOTIENT":"","PROPORTIONAL_SLASHING_MULTIPLIER":"","MAX_PROPOSER_SLASHINGS":"","MAX_ATTESTER_SLASHINGS":"","MAX_ATTESTATIONS":"","MAX_DEPOSITS":"","MAX_VOLUNTARY_EXITS":"","INACTIVITY_PENALTY_QUOTIENT_ALTAIR":"","MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":"","PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":"","SYNC_COMMITTEE_SIZE":"","EPOCHS_PER_SYNC_COMMITTEE_PERIOD":"","MIN_SYNC_COMMITTEE_PARTICIPANTS":"","UPDATE_TIMEOUT":"","INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":"","MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":"","PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":"","MAX_BYTES_PER_TRANSACTION":"","MAX_TRANSACTIONS_PER_PAYLOAD":"","BYTES_PER_LOGS_BLOOM":"","MAX_EXTRA_DATA_BYTES":"","MAX_BLS_TO_EXECUTION_CHANGES":"","MAX_WITHDRAWALS_PER_PAYLOAD":"","MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":"","PRESET_BASE":"","CONFIG_NAME":"","TERMINAL_TOTAL_DIFFICULTY":"","TERMINAL_BLOCK_HASH":"","TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":"","MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":"","MIN_GENESIS_TIME":"","GENESIS_FORK_VERSION":"","GENESIS_DELAY":"","ALTAIR_FORK_VERSION":"","ALTAIR_FORK_EPOCH":"","BELLATRIX_FORK_VERSION":"","BELLATRIX_FORK_EPOCH":"","CAPELLA_FORK_VERSION":"","CAPELLA_FORK_EPOCH":"","DENEB_FORK_VERSION":"","DENEB_FORK_EPOCH":"","ELECTRA_FORK_VERSION":"","ELECTRA_FORK_EPOCH":"","SECONDS_PER_SLOT":"","SECONDS_PER_ETH1_BLOCK":"","MIN_VALIDATOR_WITHDRAWABILITY_DELAY":"","FIELD_ELEMENTS_PER_BLOB":"","MAX_BLOB_COMMITMENTS_PER_BLOCK":"","MAX_BLOBS_PER_BLOCK":"","MAX_BLOBS_PER_BLOCK_ELECTRA":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":"","SHARD_COMMITTEE_PERIOD":"","ETH1_FOLLOW_DISTANCE":"","INACTIVITY_SCORE_BIAS":"","INACTIVITY_SCORE_RECOVERY_RATE":"","EJECTION_BALANCE":"","MIN_PER_EPOCH_CHURN_LIMIT":"","CHURN_LIMIT_QUOTIENT":"","MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":"","PROPOSER_SCORE_BOOST":"","REORG_HEAD_WEIGHT_THRESHOLD":"","REORG_PARENT_WEIGHT_THRESHOLD":"","REORG_MAX_EPOCHS_SINCE_FINALIZATION":"","DEPOSIT_CHAIN_ID":"","DEPOSIT_NETWORK_ID":"","DEPOSIT_CONTRACT_ADDRESS":"","GOSSIP_MAX_SIZE":"","MAX_REQUEST_BLOCKS":"","EPOCHS_PER_SUBNET_SUBSCRIPTION":"","MIN_EPOCHS_FOR_BLOCK_REQUESTS":"","MAX_CHUNK_SIZE":"","TTFB_TIMEOUT":"","RESP_TIMEOUT":"","ATTESTATION_PROPAGATION_SLOT_RANGE":"","MAXIMUM_GOSSIP_CLOCK_DISPARITY":"","MESSAGE_DOMAIN_INVALID_SNAPPY":"","MESSAGE_DOMAIN_VALID_SNAPPY":"","SUBNETS_PER_NODE":"","ATTESTATION_SUBNET_COUNT":"","ATTESTATION_SUBNET_EXTRA_BITS":"","ATTESTATION_SUBNET_PREFIX_BITS":"","MAX_REQUEST_BLOCKS_DENEB":"","MAX_REQUEST_BLOB_SIDECARS":"","MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":"","BLOB_SIDECAR_SUBNET_COUNT":"","BLS_WITHDRAWAL_PREFIX":"","ETH1_ADDRESS_WITHDRAWAL_PREFIX":"","DOMAIN_BEACON_PROPOSER":"","DOMAIN_BEACON_ATTESTER":"","DOMAIN_RANDAO":"","DOMAIN_DEPOSIT":"","DOMAIN_VOLUNTARY_EXIT":"","DOMAIN_SELECTION_PROOF":"","DOMAIN_AGGREGATE_AND_PROOF":"","TIMELY_SOURCE_FLAG_INDEX":"","TIMELY_TARGET_FLAG_INDEX":"","TIMELY_HEAD_FLAG_INDEX":"","TIMELY_SOURCE_WEIGHT":"","TIMELY_TARGET_WEIGHT":"","TIMELY_HEAD_WEIGHT":"","SYNC_REWARD_WEIGHT":"","PROPOSER_WEIGHT":"","WEIGHT_DENOMINATOR":"","DOMAIN_SYNC_COMMITTEE":"","DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":"","DOMAIN_CONTRIBUTION_AND_PROOF":"","DOMAIN_BLS_TO_EXECUTION_CHANGE":"","TARGET_AGGREGATORS_PER_COMMITTEE":"","TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":"","SYNC_COMMITTEE_SUBNET_COUNT":"", "UNSET_DEPOSIT_REQUESTS_START_INDEX":"", "FULL_EXIT_REQUEST_AMOUNT": "", "COMPOUNDING_WITHDRAWAL_PREFIX": "", "DEPOSIT_REQUEST_TYPE": "", "WITHDRAWAL_REQUEST_TYPE": "", "CONSOLIDATION_REQUEST_TYPE": "", "MIN_ACTIVATION_BALANCE": "", "MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA": "", "WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA": "", "PENDING_DEPOSITS_LIMIT": "", "PENDING_PARTIAL_WITHDRAWALS_LIMIT": "", "PENDING_CONSOLIDATIONS_LIMIT": "", "MAX_ATTESTER_SLASHINGS_ELECTRA": "", "MAX_ATTESTATIONS_ELECTRA": "", "MAX_DEPOSIT_REQUESTS_PER_PAYLOAD": "", "MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD": "", "MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD": "", "MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP": "", "MAX_PENDING_DEPOSITS_PER_EPOCH": "", "MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA": "", "MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT": ""}}]
|
||||
}
|
||||
},
|
||||
{
|
||||
|
@ -14,6 +14,23 @@
|
||||
"londonBlock":0,
|
||||
"shanghaiTime":SHANGHAI_FORK_TIME,
|
||||
"cancunTime":CANCUN_FORK_TIME,
|
||||
"blobSchedule": {
|
||||
"cancun": {
|
||||
"target": 3,
|
||||
"max": 6,
|
||||
"baseFeeUpdateFraction": 3338477
|
||||
},
|
||||
"prague": {
|
||||
"target": 6,
|
||||
"max": 9,
|
||||
"baseFeeUpdateFraction": 5007716
|
||||
},
|
||||
"osaka": {
|
||||
"target": 9,
|
||||
"max": 12,
|
||||
"baseFeeUpdateFraction": 5007716
|
||||
}
|
||||
},
|
||||
"pragueTime":PRAGUE_FORK_TIME,
|
||||
"mergeForkBlock":0,
|
||||
"mergeNetsplitBlock":0,
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -16,12 +16,11 @@ source "${SCRIPTS_DIR}/bash_utils.sh"
|
||||
|
||||
: ${CURL_BINARY:="curl"}
|
||||
: ${STABLE_GETH_BINARY:="${BUILD_DIR}/downloads/geth$EXE_EXTENSION"}
|
||||
: ${GETH_CAPELLA_BINARY:="$STABLE_GETH_BINARY"}
|
||||
: ${GETH_DENEB_BINARY:="$STABLE_GETH_BINARY"}
|
||||
|
||||
download_geth_stable() {
|
||||
if [[ ! -e "${STABLE_GETH_BINARY}" ]]; then
|
||||
GETH_VERSION="1.14.12-293a300d" # https://geth.ethereum.org/downloads
|
||||
GETH_VERSION="1.15.0-756cca7c" # https://geth.ethereum.org/downloads
|
||||
GETH_URL="https://gethstore.blob.core.windows.net/builds/"
|
||||
|
||||
case "${OS}-${ARCH}" in
|
||||
@ -106,10 +105,6 @@ download_status_geth_binary() {
|
||||
fi
|
||||
}
|
||||
|
||||
download_geth_capella() {
|
||||
download_geth_stable
|
||||
}
|
||||
|
||||
download_geth_deneb() {
|
||||
download_geth_stable
|
||||
}
|
||||
|
@ -443,13 +443,8 @@ LAST_SIGNER_NODE_IDX=$(( SIGNER_NODES - 1 ))
|
||||
if [[ "${RUN_GETH}" == "1" ]]; then
|
||||
source "${SCRIPTS_DIR}/geth_binaries.sh"
|
||||
|
||||
if [[ $DENEB_FORK_EPOCH -lt $STOP_AT_EPOCH ]]; then
|
||||
download_geth_deneb
|
||||
GETH_BINARY="$GETH_DENEB_BINARY"
|
||||
else
|
||||
download_geth_capella
|
||||
GETH_BINARY="$GETH_CAPELLA_BINARY"
|
||||
fi
|
||||
download_geth_deneb
|
||||
GETH_BINARY="$GETH_DENEB_BINARY"
|
||||
|
||||
source ./scripts/geth_vars.sh
|
||||
fi
|
||||
@ -810,7 +805,7 @@ if [[ "$REUSE_EXISTING_DATA_DIR" == "0" ]]; then
|
||||
--out-secrets-dir="${SECRETS_DIR}" \
|
||||
--out-deposits-file="${DEPOSITS_FILE}" \
|
||||
--threshold=${REMOTE_SIGNER_THRESHOLD} \
|
||||
--remote-validators-count=${REMOTE_VALIDATORS_COUNT} \
|
||||
--remote-validators-count="${REMOTE_VALIDATORS_COUNT}" \
|
||||
${REMOTE_URLS}
|
||||
fi
|
||||
|
||||
@ -898,7 +893,7 @@ done
|
||||
--genesis-time=$GENESIS_TIME \
|
||||
--capella-fork-epoch=0 \
|
||||
--deneb-fork-epoch=$DENEB_FORK_EPOCH \
|
||||
--electra-fork-epoch=$ELECTRA_FORK_EPOCH \
|
||||
--electra-fork-epoch="${ELECTRA_FORK_EPOCH}" \
|
||||
--execution-genesis-block="$EXECUTION_GENESIS_BLOCK_JSON"
|
||||
|
||||
DIRECTPEER_ENR=$(
|
||||
@ -995,7 +990,7 @@ CONTAINER_BOOTSTRAP_ENR="${CONTAINER_DATA_DIR}/node${BOOTSTRAP_NODE}/beacon_node
|
||||
# --web3-url="$MAIN_WEB3_URL" \
|
||||
# --deposit-contract=$DEPOSIT_CONTRACT_ADDRESS > "$DATA_DIR/log_deposit_maker.txt" 2>&1 &
|
||||
|
||||
for NUM_NODE in $(seq 1 $NUM_NODES); do
|
||||
for NUM_NODE in $(seq 1 "${NUM_NODES}"); do
|
||||
# Copy validators to individual nodes.
|
||||
# The first $NODES_WITH_VALIDATORS nodes split them equally between them,
|
||||
# after skipping the first $USER_VALIDATORS.
|
||||
@ -1077,19 +1072,19 @@ for NUM_NODE in $(seq 1 "${NUM_NODES}"); do
|
||||
NODE_DATA_DIR="${DATA_DIR}/node${NUM_NODE}"
|
||||
CONTAINER_NODE_DATA_DIR="${CONTAINER_DATA_DIR}/node${NUM_NODE}"
|
||||
VALIDATOR_DATA_DIR="${DATA_DIR}/validator${NUM_NODE}"
|
||||
if [[ ${NUM_NODE} == ${BOOTSTRAP_NODE} ]]; then
|
||||
if [[ ${NUM_NODE} == "${BOOTSTRAP_NODE}" ]]; then
|
||||
# Due to star topology, the bootstrap node must relay all attestations,
|
||||
# even if it itself is not interested. --subscribe-all-subnets could be
|
||||
# removed by switching to a fully-connected topology.
|
||||
BOOTSTRAP_ARG="--netkey-file=${CONTAINER_BOOTSTRAP_NETWORK_KEYFILE} --insecure-netkey-password=true --subscribe-all-subnets --direct-peer=$DIRECTPEER_ENR"
|
||||
elif [[ ${NUM_NODE} == ${DIRECTPEER_NODE} ]]; then
|
||||
elif [[ ${NUM_NODE} == "${DIRECTPEER_NODE}" ]]; then
|
||||
# Start a node using the Direct Peer functionality instead of regular bootstraping
|
||||
BOOTSTRAP_ARG="--netkey-file=${DIRECTPEER_NETWORK_KEYFILE} --direct-peer=$(cat $CONTAINER_BOOTSTRAP_ENR) --insecure-netkey-password=true"
|
||||
else
|
||||
BOOTSTRAP_ARG="--bootstrap-file=${CONTAINER_BOOTSTRAP_ENR}"
|
||||
fi
|
||||
|
||||
if [[ ${NUM_NODE} != ${BOOTSTRAP_NODE} ]]; then
|
||||
if [[ ${NUM_NODE} != "${BOOTSTRAP_NODE}" ]]; then
|
||||
if [[ "${CONST_PRESET}" == "minimal" ]]; then
|
||||
# The fast epoch and slot times in the minimal config might cause the
|
||||
# mesh to break down due to re-subscriptions happening within the prune
|
||||
|
@ -23,7 +23,7 @@ import
|
||||
# Test utilities
|
||||
../../testutil, ../../testblockutil
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py#L27-L44
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py#L27-L44
|
||||
proc compute_aggregate_sync_committee_signature(
|
||||
cfg: RuntimeConfig,
|
||||
forked: ForkedHashedBeaconState,
|
||||
|
@ -90,7 +90,7 @@ type
|
||||
rewards*: List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT]
|
||||
penalties*: List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT]
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/specs/phase0/validator.md#eth1block
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/specs/phase0/validator.md#eth1block
|
||||
Eth1Block* = object
|
||||
timestamp*: uint64
|
||||
deposit_root*: Eth2Digest
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -9,4 +9,6 @@
|
||||
{.used.}
|
||||
|
||||
import
|
||||
./test_fixture_ssz_consensus_objects
|
||||
./test_fixture_operations,
|
||||
./test_fixture_ssz_consensus_objects,
|
||||
./test_fixture_state_transition_epoch
|
||||
|
294
tests/consensus_spec/fulu/test_fixture_operations.nim
Normal file
294
tests/consensus_spec/fulu/test_fixture_operations.nim
Normal file
@ -0,0 +1,294 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import
|
||||
# Utilities
|
||||
chronicles,
|
||||
unittest2,
|
||||
# Beacon chain internals
|
||||
../../../beacon_chain/spec/state_transition_block,
|
||||
../../../beacon_chain/spec/datatypes/fulu,
|
||||
# Test utilities
|
||||
../../testutil,
|
||||
../fixtures_utils, ../os_ops,
|
||||
../../helpers/debug_state
|
||||
|
||||
from std/sequtils import anyIt, mapIt, toSeq
|
||||
from std/strutils import contains
|
||||
from ../../../beacon_chain/spec/beaconstate import
|
||||
get_base_reward_per_increment, get_state_exit_queue_info,
|
||||
get_total_active_balance, latest_block_root, process_attestation
|
||||
|
||||
const
|
||||
OpDir = SszTestsDir/const_preset/"fulu"/"operations"
|
||||
OpAttestationsDir = OpDir/"attestation"
|
||||
OpAttSlashingDir = OpDir/"attester_slashing"
|
||||
OpBlockHeaderDir = OpDir/"block_header"
|
||||
OpBlsToExecutionChangeDir = OpDir/"bls_to_execution_change"
|
||||
OpConsolidationRequestDir = OpDir/"consolidation_request"
|
||||
OpDepositRequestDir = OpDir/"deposit_request"
|
||||
OpDepositsDir = OpDir/"deposit"
|
||||
OpWithdrawalRequestDir = OpDir/"withdrawal_request"
|
||||
OpExecutionPayloadDir = OpDir/"execution_payload"
|
||||
OpProposerSlashingDir = OpDir/"proposer_slashing"
|
||||
OpSyncAggregateDir = OpDir/"sync_aggregate"
|
||||
OpVoluntaryExitDir = OpDir/"voluntary_exit"
|
||||
OpWithdrawalsDir = OpDir/"withdrawals"
|
||||
|
||||
baseDescription = "EF - Fulu - Operations - "
|
||||
|
||||
|
||||
const testDirs = toHashSet([
|
||||
OpAttestationsDir, OpAttSlashingDir, OpBlockHeaderDir,
|
||||
OpBlsToExecutionChangeDir, OpConsolidationRequestDir, OpDepositRequestDir,
|
||||
OpDepositsDir, OpWithdrawalRequestDir, OpExecutionPayloadDir,
|
||||
OpProposerSlashingDir, OpSyncAggregateDir, OpVoluntaryExitDir,
|
||||
OpWithdrawalsDir])
|
||||
|
||||
doAssert toHashSet(
|
||||
mapIt(toSeq(walkDir(OpDir, relative = false)), it.path)) == testDirs
|
||||
|
||||
proc runTest[T, U](
|
||||
testSuiteDir, suiteName, opName, applyFile: string,
|
||||
applyProc: U, identifier: string) =
|
||||
let testDir = testSuiteDir / "pyspec_tests" / identifier
|
||||
|
||||
let prefix =
|
||||
if fileExists(testDir/"post.ssz_snappy"):
|
||||
"[Valid] "
|
||||
else:
|
||||
"[Invalid] "
|
||||
|
||||
test prefix & baseDescription & opName & " - " & identifier:
|
||||
let preState = newClone(
|
||||
parseTest(testDir/"pre.ssz_snappy", SSZ, fulu.BeaconState))
|
||||
let done = applyProc(
|
||||
preState[], parseTest(testDir/(applyFile & ".ssz_snappy"), SSZ, T))
|
||||
|
||||
if fileExists(testDir/"post.ssz_snappy"):
|
||||
let postState =
|
||||
newClone(parseTest(
|
||||
testDir/"post.ssz_snappy", SSZ, fulu.BeaconState))
|
||||
|
||||
reportDiff(preState, postState)
|
||||
check:
|
||||
done.isOk()
|
||||
preState[].hash_tree_root() == postState[].hash_tree_root()
|
||||
else:
|
||||
check: done.isErr() # No post state = processing should fail
|
||||
|
||||
suite baseDescription & "Attestation " & preset():
|
||||
proc applyAttestation(
|
||||
preState: var fulu.BeaconState, attestation: electra.Attestation):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
let
|
||||
total_active_balance = get_total_active_balance(preState, cache)
|
||||
base_reward_per_increment =
|
||||
get_base_reward_per_increment(total_active_balance)
|
||||
|
||||
# This returns the proposer reward for including the attestation, which
|
||||
# isn't tested here.
|
||||
discard ? process_attestation(
|
||||
preState, attestation, {strictVerification}, base_reward_per_increment, cache)
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpAttestationsDir):
|
||||
runTest[electra.Attestation, typeof applyAttestation](
|
||||
OpAttestationsDir, suiteName, "Attestation", "attestation",
|
||||
applyAttestation, path)
|
||||
|
||||
suite baseDescription & "Attester Slashing " & preset():
|
||||
proc applyAttesterSlashing(
|
||||
preState: var fulu.BeaconState,
|
||||
attesterSlashing: electra.AttesterSlashing): Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_attester_slashing(
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {},
|
||||
get_state_exit_queue_info(preState), cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpAttSlashingDir):
|
||||
runTest[electra.AttesterSlashing, typeof applyAttesterSlashing](
|
||||
OpAttSlashingDir, suiteName, "Attester Slashing", "attester_slashing",
|
||||
applyAttesterSlashing, path)
|
||||
|
||||
suite baseDescription & "Block Header " & preset():
|
||||
proc applyBlockHeader(
|
||||
preState: var fulu.BeaconState, blck: fulu.BeaconBlock):
|
||||
Result[void, cstring] =
|
||||
if blck.is_execution_block:
|
||||
check blck.body.execution_payload.block_hash ==
|
||||
blck.compute_execution_block_hash()
|
||||
var cache: StateCache
|
||||
process_block_header(preState, blck, {}, cache)
|
||||
|
||||
for path in walkTests(OpBlockHeaderDir):
|
||||
runTest[fulu.BeaconBlock, typeof applyBlockHeader](
|
||||
OpBlockHeaderDir, suiteName, "Block Header", "block",
|
||||
applyBlockHeader, path)
|
||||
|
||||
from ../../../beacon_chain/spec/datatypes/capella import
|
||||
SignedBLSToExecutionChange
|
||||
|
||||
suite baseDescription & "BLS to execution change " & preset():
|
||||
proc applyBlsToExecutionChange(
|
||||
preState: var fulu.BeaconState,
|
||||
signed_address_change: SignedBLSToExecutionChange):
|
||||
Result[void, cstring] =
|
||||
process_bls_to_execution_change(
|
||||
defaultRuntimeConfig, preState, signed_address_change)
|
||||
|
||||
for path in walkTests(OpBlsToExecutionChangeDir):
|
||||
runTest[SignedBLSToExecutionChange, typeof applyBlsToExecutionChange](
|
||||
OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change",
|
||||
applyBlsToExecutionChange, path)
|
||||
|
||||
from ".."/".."/".."/beacon_chain/validator_bucket_sort import
|
||||
sortValidatorBuckets
|
||||
|
||||
suite baseDescription & "Consolidation Request " & preset():
|
||||
proc applyConsolidationRequest(
|
||||
preState: var fulu.BeaconState,
|
||||
consolidation_request: ConsolidationRequest): Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
process_consolidation_request(
|
||||
defaultRuntimeConfig, preState,
|
||||
sortValidatorBuckets(preState.validators.asSeq)[],
|
||||
consolidation_request, cache)
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpConsolidationRequestDir):
|
||||
runTest[ConsolidationRequest, typeof applyConsolidationRequest](
|
||||
OpConsolidationRequestDir, suiteName, "Consolidation Request",
|
||||
"consolidation_request", applyConsolidationRequest, path)
|
||||
|
||||
suite baseDescription & "Deposit " & preset():
|
||||
func applyDeposit(
|
||||
preState: var fulu.BeaconState, deposit: Deposit):
|
||||
Result[void, cstring] =
|
||||
process_deposit(
|
||||
defaultRuntimeConfig, preState,
|
||||
sortValidatorBuckets(preState.validators.asSeq)[], deposit, {})
|
||||
|
||||
for path in walkTests(OpDepositsDir):
|
||||
runTest[Deposit, typeof applyDeposit](
|
||||
OpDepositsDir, suiteName, "Deposit", "deposit", applyDeposit, path)
|
||||
|
||||
suite baseDescription & "Deposit Request " & preset():
|
||||
func applyDepositRequest(
|
||||
preState: var fulu.BeaconState, depositRequest: DepositRequest):
|
||||
Result[void, cstring] =
|
||||
process_deposit_request(
|
||||
defaultRuntimeConfig, preState, depositRequest, {})
|
||||
|
||||
for path in walkTests(OpDepositRequestDir):
|
||||
runTest[DepositRequest, typeof applyDepositRequest](
|
||||
OpDepositRequestDir, suiteName, "Deposit Request", "deposit_request",
|
||||
applyDepositRequest, path)
|
||||
|
||||
suite baseDescription & "Execution Payload " & preset():
|
||||
func makeApplyExecutionPayloadCb(path: string): auto =
|
||||
return proc(
|
||||
preState: var fulu.BeaconState, body: fulu.BeaconBlockBody):
|
||||
Result[void, cstring] {.raises: [IOError].} =
|
||||
let payloadValid = os_ops.readFile(
|
||||
OpExecutionPayloadDir/"pyspec_tests"/path/"execution.yaml"
|
||||
).contains("execution_valid: true")
|
||||
if payloadValid and body.is_execution_block and
|
||||
not body.execution_payload.transactions.anyIt(it.len == 0):
|
||||
let expectedOk = (path != "incorrect_block_hash")
|
||||
check expectedOk == (body.execution_payload.block_hash ==
|
||||
body.compute_execution_block_hash(
|
||||
preState.latest_block_root(
|
||||
assignClone(preState)[].hash_tree_root())))
|
||||
func executePayload(_: fulu.ExecutionPayload): bool = payloadValid
|
||||
process_execution_payload(
|
||||
defaultRuntimeConfig, preState, body, executePayload)
|
||||
|
||||
for path in walkTests(OpExecutionPayloadDir):
|
||||
let applyExecutionPayload = makeApplyExecutionPayloadCb(path)
|
||||
runTest[fulu.BeaconBlockBody, typeof applyExecutionPayload](
|
||||
OpExecutionPayloadDir, suiteName, "Execution Payload", "body",
|
||||
applyExecutionPayload, path)
|
||||
|
||||
suite baseDescription & "Withdrawal Request " & preset():
|
||||
func applyWithdrawalRequest(
|
||||
preState: var fulu.BeaconState, withdrawalRequest: WithdrawalRequest):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
process_withdrawal_request(
|
||||
defaultRuntimeConfig, preState,
|
||||
sortValidatorBuckets(preState.validators.asSeq)[], withdrawalRequest,
|
||||
cache)
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpWithdrawalRequestDir):
|
||||
runTest[WithdrawalRequest, typeof applyWithdrawalRequest](
|
||||
OpWithdrawalRequestDir, suiteName, "Withdrawal Request",
|
||||
"withdrawal_request", applyWithdrawalRequest, path)
|
||||
|
||||
suite baseDescription & "Proposer Slashing " & preset():
|
||||
proc applyProposerSlashing(
|
||||
preState: var fulu.BeaconState, proposerSlashing: ProposerSlashing):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_proposer_slashing(
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {},
|
||||
get_state_exit_queue_info(preState), cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpProposerSlashingDir):
|
||||
runTest[ProposerSlashing, typeof applyProposerSlashing](
|
||||
OpProposerSlashingDir, suiteName, "Proposer Slashing", "proposer_slashing",
|
||||
applyProposerSlashing, path)
|
||||
|
||||
suite baseDescription & "Sync Aggregate " & preset():
|
||||
proc applySyncAggregate(
|
||||
preState: var fulu.BeaconState, syncAggregate: SyncAggregate):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
discard ? process_sync_aggregate(
|
||||
preState, syncAggregate, get_total_active_balance(preState, cache),
|
||||
{}, cache)
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpSyncAggregateDir):
|
||||
runTest[SyncAggregate, typeof applySyncAggregate](
|
||||
OpSyncAggregateDir, suiteName, "Sync Aggregate", "sync_aggregate",
|
||||
applySyncAggregate, path)
|
||||
|
||||
suite baseDescription & "Voluntary Exit " & preset():
|
||||
proc applyVoluntaryExit(
|
||||
preState: var fulu.BeaconState, voluntaryExit: SignedVoluntaryExit):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
if process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {},
|
||||
get_state_exit_queue_info(preState), cache).isOk:
|
||||
ok()
|
||||
else:
|
||||
err("")
|
||||
|
||||
for path in walkTests(OpVoluntaryExitDir):
|
||||
runTest[SignedVoluntaryExit, typeof applyVoluntaryExit](
|
||||
OpVoluntaryExitDir, suiteName, "Voluntary Exit", "voluntary_exit",
|
||||
applyVoluntaryExit, path)
|
||||
|
||||
suite baseDescription & "Withdrawals " & preset():
|
||||
func applyWithdrawals(
|
||||
preState: var fulu.BeaconState,
|
||||
executionPayload: fulu.ExecutionPayload): Result[void, cstring] =
|
||||
process_withdrawals(preState, executionPayload)
|
||||
|
||||
for path in walkTests(OpWithdrawalsDir):
|
||||
runTest[fulu.ExecutionPayload, typeof applyWithdrawals](
|
||||
OpWithdrawalsDir, suiteName, "Withdrawals", "execution_payload",
|
||||
applyWithdrawals, path)
|
88
tests/consensus_spec/fulu/test_fixture_rewards.nim
Normal file
88
tests/consensus_spec/fulu/test_fixture_rewards.nim
Normal file
@ -0,0 +1,88 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import
|
||||
# Beacon chain internals
|
||||
../../../beacon_chain/spec/[beaconstate, validator, helpers, state_transition_epoch],
|
||||
../../../beacon_chain/spec/datatypes/[altair, fulu],
|
||||
# Test utilities
|
||||
../../testutil,
|
||||
../fixtures_utils, ../os_ops
|
||||
|
||||
const
|
||||
RewardsDirBase = SszTestsDir/const_preset/"fulu"/"rewards"
|
||||
RewardsDirBasic = RewardsDirBase/"basic"/"pyspec_tests"
|
||||
RewardsDirLeak = RewardsDirBase/"leak"/"pyspec_tests"
|
||||
RewardsDirRandom = RewardsDirBase/"random"/"pyspec_tests"
|
||||
|
||||
func init(T: type Deltas, len: int): T =
|
||||
if not result.rewards.setLen(len):
|
||||
raiseAssert "setLen"
|
||||
if not result.penalties.setLen(len):
|
||||
raiseAssert "setLen"
|
||||
|
||||
proc runTest(rewardsDir, identifier: string) =
|
||||
let testDir = rewardsDir / identifier
|
||||
|
||||
var info: altair.EpochInfo
|
||||
|
||||
let
|
||||
state = newClone(
|
||||
parseTest(testDir/"pre.ssz_snappy", SSZ, fulu.BeaconState))
|
||||
flagDeltas = [
|
||||
parseTest(testDir/"source_deltas.ssz_snappy", SSZ, Deltas),
|
||||
parseTest(testDir/"target_deltas.ssz_snappy", SSZ, Deltas),
|
||||
parseTest(testDir/"head_deltas.ssz_snappy", SSZ, Deltas)]
|
||||
inactivityPenaltyDeltas =
|
||||
parseTest(testDir/"inactivity_penalty_deltas.ssz_snappy", SSZ, Deltas)
|
||||
|
||||
info.init(state[])
|
||||
let
|
||||
total_balance = info.balances.current_epoch
|
||||
base_reward_per_increment = get_base_reward_per_increment(total_balance)
|
||||
|
||||
var
|
||||
flagDeltas2: array[TimelyFlag, Deltas] = [
|
||||
Deltas.init(state[].validators.len),
|
||||
Deltas.init(state[].validators.len),
|
||||
Deltas.init(state[].validators.len)]
|
||||
inactivityPenaltyDeltas2 = Deltas.init(state[].validators.len)
|
||||
|
||||
let finality_delay = get_finality_delay(state[])
|
||||
|
||||
for validator_index, reward0, reward1, reward2, penalty0, penalty1, penalty2
|
||||
in get_flag_and_inactivity_deltas(
|
||||
defaultRuntimeConfig, state[], base_reward_per_increment, info,
|
||||
finality_delay):
|
||||
if not is_eligible_validator(info.validators[validator_index]):
|
||||
continue
|
||||
flagDeltas2[TimelyFlag.TIMELY_SOURCE_FLAG_INDEX].rewards[validator_index] =
|
||||
reward0
|
||||
flagDeltas2[TimelyFlag.TIMELY_TARGET_FLAG_INDEX].rewards[validator_index] =
|
||||
reward1
|
||||
flagDeltas2[TimelyFlag.TIMELY_HEAD_FLAG_INDEX].rewards[validator_index] =
|
||||
reward2
|
||||
flagDeltas2[TimelyFlag.TIMELY_SOURCE_FLAG_INDEX].penalties[validator_index] =
|
||||
penalty0
|
||||
flagDeltas2[TimelyFlag.TIMELY_TARGET_FLAG_INDEX].penalties[validator_index] =
|
||||
penalty1
|
||||
flagDeltas2[TimelyFlag.TIMELY_HEAD_FLAG_INDEX].penalties[validator_index] =
|
||||
0.Gwei
|
||||
inactivityPenaltyDeltas2.penalties[validator_index] = penalty2
|
||||
|
||||
check:
|
||||
flagDeltas == flagDeltas2
|
||||
inactivityPenaltyDeltas == inactivityPenaltyDeltas2
|
||||
|
||||
suite "EF - Fulu - Rewards " & preset():
|
||||
for rewardsDir in [RewardsDirBasic, RewardsDirLeak, RewardsDirRandom]:
|
||||
for kind, path in walkDir(rewardsDir, relative = true, checkDir = true):
|
||||
test "EF - Fulu - Rewards - " & path & preset():
|
||||
runTest(rewardsDir, path)
|
@ -0,0 +1,165 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
{.used.}
|
||||
|
||||
import
|
||||
# Status internals
|
||||
chronicles,
|
||||
# Beacon chain internals
|
||||
../../../beacon_chain/spec/[presets, state_transition_epoch],
|
||||
../../../beacon_chain/spec/datatypes/altair,
|
||||
# Test utilities
|
||||
../../testutil,
|
||||
../fixtures_utils, ../os_ops,
|
||||
./test_fixture_rewards,
|
||||
../../helpers/debug_state
|
||||
|
||||
from std/sequtils import mapIt, toSeq
|
||||
from std/strutils import rsplit
|
||||
from ../../../beacon_chain/spec/datatypes/fulu import BeaconState
|
||||
|
||||
const
|
||||
RootDir = SszTestsDir/const_preset/"fulu"/"epoch_processing"
|
||||
|
||||
JustificationFinalizationDir = RootDir/"justification_and_finalization"
|
||||
InactivityDir = RootDir/"inactivity_updates"
|
||||
RegistryUpdatesDir = RootDir/"registry_updates"
|
||||
SlashingsDir = RootDir/"slashings"
|
||||
Eth1DataResetDir = RootDir/"eth1_data_reset"
|
||||
EffectiveBalanceUpdatesDir = RootDir/"effective_balance_updates"
|
||||
SlashingsResetDir = RootDir/"slashings_reset"
|
||||
RandaoMixesResetDir = RootDir/"randao_mixes_reset"
|
||||
ParticipationFlagDir = RootDir/"participation_flag_updates"
|
||||
SyncCommitteeDir = RootDir/"sync_committee_updates"
|
||||
RewardsAndPenaltiesDir = RootDir/"rewards_and_penalties"
|
||||
HistoricalSummariesUpdateDir = RootDir/"historical_summaries_update"
|
||||
PendingConsolidationsDir = RootDir/"pending_consolidations"
|
||||
PendingDepositsDir = RootDir/"pending_deposits"
|
||||
|
||||
doAssert (toHashSet(mapIt(toSeq(walkDir(RootDir, relative = false)), it.path)) -
|
||||
toHashSet([SyncCommitteeDir])) ==
|
||||
toHashSet([
|
||||
JustificationFinalizationDir, InactivityDir, RegistryUpdatesDir,
|
||||
SlashingsDir, Eth1DataResetDir, EffectiveBalanceUpdatesDir,
|
||||
SlashingsResetDir, RandaoMixesResetDir, ParticipationFlagDir,
|
||||
RewardsAndPenaltiesDir, HistoricalSummariesUpdateDir,
|
||||
PendingDepositsDir, PendingConsolidationsDir])
|
||||
|
||||
template runSuite(
|
||||
suiteDir, testName: string, transitionProc: untyped): untyped =
|
||||
suite "EF - Fulu - Epoch Processing - " & testName & preset():
|
||||
for testDir in walkDirRec(
|
||||
suiteDir / "pyspec_tests", yieldFilter = {pcDir}, checkDir = true):
|
||||
let unitTestName = testDir.rsplit(DirSep, 1)[1]
|
||||
test testName & " - " & unitTestName & preset():
|
||||
# BeaconState objects are stored on the heap to avoid stack overflow
|
||||
type T = fulu.BeaconState
|
||||
let preState {.inject.} = newClone(parseTest(testDir/"pre.ssz_snappy", SSZ, T))
|
||||
var cache {.inject, used.} = StateCache()
|
||||
template state: untyped {.inject, used.} = preState[]
|
||||
template cfg: untyped {.inject, used.} = defaultRuntimeConfig
|
||||
|
||||
if transitionProc.isOk:
|
||||
let postState =
|
||||
newClone(parseTest(testDir/"post.ssz_snappy", SSZ, T))
|
||||
check: hash_tree_root(preState[]) == hash_tree_root(postState[])
|
||||
reportDiff(preState, postState)
|
||||
else:
|
||||
check: not fileExists(testDir/"post.ssz_snappy")
|
||||
|
||||
# Justification & Finalization
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(JustificationFinalizationDir, "Justification & Finalization"):
|
||||
let info = altair.EpochInfo.init(state)
|
||||
process_justification_and_finalization(state, info.balances)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
# Inactivity updates
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(InactivityDir, "Inactivity"):
|
||||
let info = altair.EpochInfo.init(state)
|
||||
process_inactivity_updates(cfg, state, info)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
# Rewards & Penalties
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(RewardsAndPenaltiesDir, "Rewards and penalties"):
|
||||
var info = altair.EpochInfo.init(state)
|
||||
process_rewards_and_penalties(cfg, state, info)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
# rest in test_fixture_rewards
|
||||
|
||||
# Registry updates
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(RegistryUpdatesDir, "Registry updates"):
|
||||
process_registry_updates(cfg, state, cache)
|
||||
|
||||
# Slashings
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(SlashingsDir, "Slashings"):
|
||||
let info = altair.EpochInfo.init(state)
|
||||
process_slashings(state, info.balances.current_epoch)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
# Eth1 data reset
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(Eth1DataResetDir, "Eth1 data reset"):
|
||||
process_eth1_data_reset(state)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
# Effective balance updates
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(EffectiveBalanceUpdatesDir, "Effective balance updates"):
|
||||
process_effective_balance_updates(state)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
# Slashings reset
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(SlashingsResetDir, "Slashings reset"):
|
||||
process_slashings_reset(state)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
# RANDAO mixes reset
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(RandaoMixesResetDir, "RANDAO mixes reset"):
|
||||
process_randao_mixes_reset(state)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
# Historical roots update
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(HistoricalSummariesUpdateDir, "Historical summaries update"):
|
||||
process_historical_summaries_update(state)
|
||||
|
||||
# Participation flag updates
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(ParticipationFlagDir, "Participation flag updates"):
|
||||
process_participation_flag_updates(state)
|
||||
Result[void, cstring].ok()
|
||||
|
||||
# Pending deposits
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(PendingDepositsDir, "Pending deposits"):
|
||||
process_pending_deposits(cfg, state, cache)
|
||||
|
||||
# Pending consolidations
|
||||
# ---------------------------------------------------------------
|
||||
runSuite(PendingConsolidationsDir, "Pending consolidations"):
|
||||
process_pending_consolidations(cfg, state)
|
||||
|
||||
# Sync committee updates
|
||||
# ---------------------------------------------------------------
|
||||
|
||||
# These are only for minimal, not mainnet
|
||||
when const_preset == "minimal":
|
||||
runSuite(SyncCommitteeDir, "Sync committee updates"):
|
||||
process_sync_committee_updates(state)
|
||||
Result[void, cstring].ok()
|
||||
else:
|
||||
doAssert not dirExists(SyncCommitteeDir)
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -85,4 +85,13 @@ suite "EF - Electra - Fork " & preset():
|
||||
SszTestsDir/const_preset/"electra"/"fork"/"fork"/"pyspec_tests"
|
||||
for kind, path in walkDir(OpForkDir, relative = true, checkDir = true):
|
||||
runTest(deneb.BeaconState, electra.BeaconState, "Electra", OpForkDir,
|
||||
upgrade_to_electra, suiteName, path)
|
||||
upgrade_to_electra, suiteName, path)
|
||||
|
||||
from ../../beacon_chain/spec/datatypes/fulu import BeaconState
|
||||
|
||||
suite "EF - Fulu - Fork " & preset():
|
||||
const OpForkDir =
|
||||
SszTestsDir/const_preset/"fulu"/"fork"/"fork"/"pyspec_tests"
|
||||
for kind, path in walkDir(OpForkDir, relative = true, checkDir = true):
|
||||
runTest(electra.BeaconState, fulu.BeaconState, "Fulu", OpForkDir,
|
||||
upgrade_to_fulu, suiteName, path)
|
||||
|
@ -10,12 +10,12 @@
|
||||
|
||||
import
|
||||
# Status libraries
|
||||
stew/byteutils, chronicles,
|
||||
chronicles,
|
||||
taskpools,
|
||||
# Internals
|
||||
../../beacon_chain/spec/[helpers, forks, state_transition_block],
|
||||
../../beacon_chain/spec/forks,
|
||||
../../beacon_chain/fork_choice/[fork_choice, fork_choice_types],
|
||||
../../beacon_chain/[beacon_chain_db, beacon_clock],
|
||||
../../beacon_chain/beacon_chain_db,
|
||||
../../beacon_chain/consensus_object_pools/[
|
||||
blockchain_dag, block_clearance, block_quarantine, spec_cache],
|
||||
# Third-party
|
||||
@ -28,7 +28,10 @@ from std/json import
|
||||
JsonNode, getBool, getInt, getStr, hasKey, items, len, pairs, `$`, `[]`
|
||||
from std/sequtils import mapIt, toSeq
|
||||
from std/strutils import contains
|
||||
from stew/byteutils import fromHex
|
||||
from ../testbcutil import addHeadBlock
|
||||
from ../../beacon_chain/spec/state_transition_block import
|
||||
check_attester_slashing, validate_blobs
|
||||
|
||||
# Test format described at https://github.com/ethereum/consensus-specs/tree/v1.3.0/tests/formats/fork_choice
|
||||
# Note that our implementation has been optimized with "ProtoArray"
|
||||
@ -37,10 +40,12 @@ from ../testbcutil import addHeadBlock
|
||||
type
|
||||
OpKind = enum
|
||||
opOnTick
|
||||
opOnAttestation
|
||||
opOnPhase0Attestation
|
||||
opOnElectraAttestation
|
||||
opOnBlock
|
||||
opOnMergeBlock
|
||||
opOnAttesterSlashing
|
||||
opOnPhase0AttesterSlashing
|
||||
opOnElectraAttesterSlashing
|
||||
opInvalidateHash
|
||||
opChecks
|
||||
|
||||
@ -54,15 +59,19 @@ type
|
||||
case kind: OpKind
|
||||
of opOnTick:
|
||||
tick: int
|
||||
of opOnAttestation:
|
||||
att: phase0.Attestation
|
||||
of opOnPhase0Attestation:
|
||||
phase0Att: phase0.Attestation
|
||||
of opOnElectraAttestation:
|
||||
electraAtt: electra.Attestation
|
||||
of opOnBlock:
|
||||
blck: ForkedSignedBeaconBlock
|
||||
blobData: Opt[BlobData]
|
||||
of opOnMergeBlock:
|
||||
powBlock: PowBlock
|
||||
of opOnAttesterSlashing:
|
||||
attesterSlashing: phase0.AttesterSlashing
|
||||
of opOnPhase0AttesterSlashing:
|
||||
phase0AttesterSlashing: phase0.AttesterSlashing
|
||||
of opOnElectraAttesterSlashing:
|
||||
electraAttesterSlashing: electra.AttesterSlashing
|
||||
of opInvalidateHash:
|
||||
invalidatedHash: Eth2Digest
|
||||
latestValidHash: Eth2Digest
|
||||
@ -108,12 +117,13 @@ proc loadOps(
|
||||
tick: step["tick"].getInt())
|
||||
elif step.hasKey"attestation":
|
||||
let filename = step["attestation"].getStr()
|
||||
let att = parseTest(
|
||||
path/filename & ".ssz_snappy",
|
||||
SSZ, phase0.Attestation
|
||||
)
|
||||
result.add Operation(kind: opOnAttestation,
|
||||
att: att)
|
||||
if fork >= ConsensusFork.Electra:
|
||||
result.add Operation(
|
||||
kind: opOnElectraAttestation, electraAtt: parseTest(
|
||||
path/filename & ".ssz_snappy", SSZ, electra.Attestation))
|
||||
else:
|
||||
result.add Operation(kind: opOnPhase0Attestation, phase0Att: parseTest(
|
||||
path/filename & ".ssz_snappy", SSZ, phase0.Attestation))
|
||||
elif step.hasKey"block":
|
||||
let filename = step["block"].getStr()
|
||||
doAssert step.hasKey"blobs" == step.hasKey"proofs"
|
||||
@ -141,12 +151,14 @@ proc loadOps(
|
||||
blobData: blobData)
|
||||
elif step.hasKey"attester_slashing":
|
||||
let filename = step["attester_slashing"].getStr()
|
||||
let attesterSlashing = parseTest(
|
||||
path/filename & ".ssz_snappy",
|
||||
SSZ, phase0.AttesterSlashing
|
||||
)
|
||||
result.add Operation(kind: opOnAttesterSlashing,
|
||||
attesterSlashing: attesterSlashing)
|
||||
if fork >= ConsensusFork.Electra:
|
||||
result.add Operation(kind: opOnElectraAttesterSlashing,
|
||||
electraAttesterSlashing: parseTest(
|
||||
path/filename & ".ssz_snappy", SSZ, electra.AttesterSlashing))
|
||||
else:
|
||||
result.add Operation(kind: opOnPhase0AttesterSlashing,
|
||||
phase0AttesterSlashing: parseTest(
|
||||
path/filename & ".ssz_snappy", SSZ, phase0.AttesterSlashing))
|
||||
elif step.hasKey"payload_status":
|
||||
if step["payload_status"]["status"].getStr() == "INVALID":
|
||||
result.add Operation(kind: opInvalidateHash,
|
||||
@ -322,10 +334,16 @@ proc doRunTest(
|
||||
time = BeaconTime(ns_since_genesis: step.tick.seconds.nanoseconds)
|
||||
let status = stores.fkChoice[].update_time(stores.dag, time)
|
||||
doAssert status.isOk == step.valid
|
||||
of opOnAttestation:
|
||||
of opOnPhase0Attestation:
|
||||
let status = stores.fkChoice[].on_attestation(
|
||||
stores.dag, step.att.data.slot, step.att.data.beacon_block_root,
|
||||
toSeq(stores.dag.get_attesting_indices(step.att.asTrusted)), time)
|
||||
stores.dag, step.phase0Att.data.slot, step.phase0Att.data.beacon_block_root,
|
||||
toSeq(stores.dag.get_attesting_indices(step.phase0Att.asTrusted)), time)
|
||||
doAssert status.isOk == step.valid
|
||||
of opOnElectraAttestation:
|
||||
let status = stores.fkChoice[].on_attestation(
|
||||
stores.dag, step.electraAtt.data.slot,
|
||||
step.electraAtt.data.beacon_block_root,
|
||||
toSeq(stores.dag.get_attesting_indices(step.electraAtt, true)), time)
|
||||
doAssert status.isOk == step.valid
|
||||
of opOnBlock:
|
||||
withBlck(step.blck):
|
||||
@ -334,9 +352,16 @@ proc doRunTest(
|
||||
verifier, state[], stateCache,
|
||||
forkyBlck, step.blobData, time, invalidatedHashes)
|
||||
doAssert status.isOk == step.valid
|
||||
of opOnAttesterSlashing:
|
||||
let indices =
|
||||
check_attester_slashing(state[], step.attesterSlashing, flags = {})
|
||||
of opOnPhase0AttesterSlashing:
|
||||
let indices = check_attester_slashing(
|
||||
state[], step.phase0AttesterSlashing, flags = {})
|
||||
if indices.isOk:
|
||||
for idx in indices.get:
|
||||
stores.fkChoice[].process_equivocation(idx)
|
||||
doAssert indices.isOk == step.valid
|
||||
of opOnElectraAttesterSlashing:
|
||||
let indices = check_attester_slashing(
|
||||
state[], step.electraAttesterSlashing, flags = {})
|
||||
if indices.isOk:
|
||||
for idx in indices.get:
|
||||
stores.fkChoice[].process_equivocation(idx)
|
||||
@ -386,8 +411,6 @@ template fcSuite(suiteName: static[string], testPathElem: static[string]) =
|
||||
let testsPath = presetPath/path/testPathElem
|
||||
if kind != pcDir or not os_ops.dirExists(testsPath):
|
||||
continue
|
||||
if testsPath.contains("/electra/") or testsPath.contains("\\electra\\"):
|
||||
continue
|
||||
let fork = forkForPathComponent(path).valueOr:
|
||||
raiseAssert "Unknown test fork: " & testsPath
|
||||
for kind, path in walkDir(testsPath, relative = true, checkDir = true):
|
||||
|
@ -76,7 +76,7 @@ proc runVerifyKzgProofTest(suiteName, suitePath, path: string) =
|
||||
y = fromHex[32](data["input"]["y"].getStr)
|
||||
proof = fromHex[48](data["input"]["proof"].getStr)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/tests/formats/kzg_4844/verify_kzg_proof.md#condition
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/tests/formats/kzg_4844/verify_kzg_proof.md#condition
|
||||
# "If the commitment or proof is invalid (e.g. not on the curve or not in
|
||||
# the G1 subgroup of the BLS curve) or `z` or `y` are not a valid BLS
|
||||
# field element, it should error, i.e. the output should be `null`."
|
||||
@ -236,7 +236,7 @@ proc runVerifyCellKzgProofBatchTest(suiteName, suitePath, path: string) =
|
||||
cells = data["input"]["cells"].mapIt(fromHex[2048](it.getStr))
|
||||
proofs = data["input"]["proofs"].mapIt(fromHex[48](it.getStr))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.0/tests/formats/kzg_7594/verify_cell_kzg_proof_batch.md#condition
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-beta.1/tests/formats/kzg_7594/verify_cell_kzg_proof_batch.md#condition
|
||||
# If the blob is invalid (e.g. incorrect length or one of the 32-byte
|
||||
# blocks does not represent a BLS field element) it should error, i.e. the
|
||||
# the output should be `null`.
|
||||
|
@ -15,7 +15,7 @@ import
|
||||
../testutil
|
||||
|
||||
from std/sequtils import toSeq
|
||||
from std/strutils import toLowerAscii
|
||||
from std/strutils import contains, toLowerAscii
|
||||
from ../../beacon_chain/spec/presets import
|
||||
const_preset, defaultRuntimeConfig
|
||||
from ./fixtures_utils import
|
||||
@ -56,7 +56,13 @@ proc runTest(
|
||||
noRollback).expect("should apply block")
|
||||
withState(fhPreState[]):
|
||||
when consensusFork == ConsensusFork.Deneb:
|
||||
check checkPerValidatorBalanceCalc(forkyState.data)
|
||||
if unitTestName != "randomized_14":
|
||||
# TODO this test as of v1.5.0-beta.2 breaks, but also probably
|
||||
# just remove Deneb-only infrastructure of this sort, since it
|
||||
# doesn't readily adapt to Electra regardless. For now keep to
|
||||
# point to a potentially fixable/unexpected test case which is
|
||||
# involves code not run outside the test suite to begin with.
|
||||
check checkPerValidatorBalanceCalc(forkyState.data)
|
||||
else:
|
||||
let res = state_transition(
|
||||
defaultRuntimeConfig, fhPreState[], blck, cache, info, flags = {},
|
||||
@ -84,6 +90,13 @@ template runForkBlockTests(consensusFork: static ConsensusFork) =
|
||||
|
||||
suite "EF - " & forkHumanName & " - Sanity - Blocks " & preset():
|
||||
for kind, path in walkDir(SanityBlocksDir, relative = true, checkDir = true):
|
||||
# TODO Fulu not in critical path yet so to start with only flag remaining
|
||||
# issues where it needs MAX_BLOBS_PER_BLOCK_FULU (not yet present), so in
|
||||
# process_execution_payload() it doesn't falsely reject two test cases.
|
||||
when consensusFork == ConsensusFork.Fulu:
|
||||
if path.contains("max_blobs_per_block") or
|
||||
path.contains("one_blob_max_txs"):
|
||||
continue
|
||||
consensusFork.runTest(
|
||||
"EF - " & forkHumanName & " - Sanity - Blocks",
|
||||
SanityBlocksDir, suiteName, path)
|
||||
@ -100,5 +113,5 @@ template runForkBlockTests(consensusFork: static ConsensusFork) =
|
||||
"EF - " & forkHumanName & " - Random",
|
||||
RandomDir, suiteName, path)
|
||||
|
||||
withAllButFulu(ConsensusFork):
|
||||
withAll(ConsensusFork):
|
||||
runForkBlockTests(consensusFork)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -53,48 +53,57 @@ func sanitySlotsDir(preset_dir: string): string {.compileTime.} =
|
||||
from ../../beacon_chain/spec/datatypes/phase0 import BeaconState
|
||||
|
||||
suite "EF - Phase 0 - Sanity - Slots " & preset():
|
||||
const phase0SanitySlotsDir = sanitySlotsDir("phase0")
|
||||
const sanitySlotsDir = sanitySlotsDir("phase0")
|
||||
for kind, path in walkDir(
|
||||
phase0SanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(phase0.BeaconState, phase0SanitySlotsDir, "Phase 0", suiteName, path)
|
||||
sanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(phase0.BeaconState, sanitySlotsDir, "Phase 0", suiteName, path)
|
||||
|
||||
from ../../beacon_chain/spec/datatypes/altair import BeaconState
|
||||
|
||||
suite "EF - Altair - Sanity - Slots " & preset():
|
||||
const altairSanitySlotsDir = sanitySlotsDir("altair")
|
||||
const sanitySlotsDir = sanitySlotsDir("altair")
|
||||
for kind, path in walkDir(
|
||||
altairSanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(altair.BeaconState, altairSanitySlotsDir, "Altair", suiteName, path)
|
||||
sanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(altair.BeaconState, sanitySlotsDir, "Altair", suiteName, path)
|
||||
|
||||
from ../../beacon_chain/spec/datatypes/bellatrix import BeaconState
|
||||
|
||||
suite "EF - Bellatrix - Sanity - Slots " & preset():
|
||||
const bellatrixSanitySlotsDir = sanitySlotsDir("bellatrix")
|
||||
const sanitySlotsDir = sanitySlotsDir("bellatrix")
|
||||
for kind, path in walkDir(
|
||||
bellatrixSanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(bellatrix.BeaconState, bellatrixSanitySlotsDir, "Bellatrix", suiteName, path)
|
||||
sanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(bellatrix.BeaconState, sanitySlotsDir, "Bellatrix", suiteName, path)
|
||||
|
||||
from ../../beacon_chain/spec/datatypes/capella import BeaconState
|
||||
|
||||
suite "EF - Capella - Sanity - Slots " & preset():
|
||||
const capellaSanitySlotsDir = sanitySlotsDir("capella")
|
||||
const sanitySlotsDir = sanitySlotsDir("capella")
|
||||
for kind, path in walkDir(
|
||||
capellaSanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(capella.BeaconState, capellaSanitySlotsDir, "Capella", suiteName, path)
|
||||
sanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(capella.BeaconState, sanitySlotsDir, "Capella", suiteName, path)
|
||||
|
||||
from ../../beacon_chain/spec/datatypes/deneb import BeaconState
|
||||
|
||||
suite "EF - Deneb - Sanity - Slots " & preset():
|
||||
const denebSanitySlotsDir = sanitySlotsDir("deneb")
|
||||
const sanitySlotsDir = sanitySlotsDir("deneb")
|
||||
for kind, path in walkDir(
|
||||
denebSanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(deneb.BeaconState, denebSanitySlotsDir, "Deneb", suiteName, path)
|
||||
sanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(deneb.BeaconState, sanitySlotsDir, "Deneb", suiteName, path)
|
||||
|
||||
from ../../beacon_chain/spec/datatypes/electra import BeaconState
|
||||
|
||||
suite "EF - Electra - Sanity - Slots " & preset():
|
||||
const electraSanitySlotsDir = sanitySlotsDir("electra")
|
||||
const sanitySlotsDir = sanitySlotsDir("electra")
|
||||
for kind, path in walkDir(
|
||||
electraSanitySlotsDir, relative = true, checkDir = true):
|
||||
sanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(
|
||||
electra.BeaconState, electraSanitySlotsDir, "Electra", suiteName, path)
|
||||
electra.BeaconState, sanitySlotsDir, "Electra", suiteName, path)
|
||||
|
||||
from ../../beacon_chain/spec/datatypes/fulu import BeaconState
|
||||
|
||||
suite "EF - Fulu - Sanity - Slots " & preset():
|
||||
const sanitySlotsDir = sanitySlotsDir("fulu")
|
||||
for kind, path in walkDir(
|
||||
sanitySlotsDir, relative = true, checkDir = true):
|
||||
runTest(
|
||||
fulu.BeaconState, sanitySlotsDir, "Fulu", suiteName, path)
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -1105,4 +1105,74 @@ suite "Attestation pool electra processing" & preset():
|
||||
# Total aggregations size should be one for that root
|
||||
check:
|
||||
pool[].getElectraAggregatedAttestation(1.Slot, hash_tree_root(att4.data),
|
||||
0.CommitteeIndex).get().aggregation_bits.countOnes() == 1
|
||||
0.CommitteeIndex).get().aggregation_bits.countOnes() == 1
|
||||
|
||||
proc verifyAttestationSignature(
|
||||
pool: AttestationPool,
|
||||
state: ref ForkedHashedBeaconState,
|
||||
cache: var StateCache,
|
||||
attestation: electra.Attestation): bool =
|
||||
withState(state[]):
|
||||
when consensusFork == ConsensusFork.Electra:
|
||||
let
|
||||
fork = pool.dag.cfg.forkAtEpoch(forkyState.data.slot.epoch)
|
||||
attesting_indices = get_attesting_indices(
|
||||
forkyState.data, attestation.data, attestation.aggregation_bits,
|
||||
attestation.committee_bits, cache)
|
||||
verify_attestation_signature(
|
||||
fork, pool.dag.genesis_validators_root, attestation.data,
|
||||
attesting_indices.mapIt(forkyState.data.validators.item(it).pubkey),
|
||||
attestation.signature)
|
||||
else:
|
||||
raiseAssert "must be electra"
|
||||
|
||||
test "Aggregating across committees" & preset():
|
||||
# Add attestation from different committee
|
||||
var maxSlot = 0.Slot
|
||||
for i in 0 ..< 4:
|
||||
let
|
||||
bc = get_beacon_committee(
|
||||
state[], getStateField(state[], slot), i.CommitteeIndex, cache)
|
||||
att = makeElectraAttestation(
|
||||
state[], state[].latest_block_root, bc[0], cache)
|
||||
var att2 = makeElectraAttestation(
|
||||
state[], state[].latest_block_root, bc[1], cache)
|
||||
att2.combine(att)
|
||||
|
||||
pool[].addAttestation(
|
||||
att, @[bc[0]], att.aggregation_bits.len, att.loadSig,
|
||||
att.data.slot.start_beacon_time)
|
||||
|
||||
pool[].addAttestation(
|
||||
att2, @[bc[0], bc[1]], att2.aggregation_bits.len, att2.loadSig,
|
||||
att2.data.slot.start_beacon_time)
|
||||
|
||||
pool[].addAttestation(
|
||||
att, @[bc[0]], att.aggregation_bits.len, att.loadSig,
|
||||
att.data.slot.start_beacon_time)
|
||||
|
||||
pool[].addAttestation(
|
||||
att2, @[bc[0], bc[1]], att2.aggregation_bits.len, att2.loadSig,
|
||||
att2.data.slot.start_beacon_time)
|
||||
|
||||
if att.data.slot > maxSlot:
|
||||
maxSlot = att.data.slot
|
||||
|
||||
check process_slots(
|
||||
defaultRuntimeConfig, state[],
|
||||
maxSlot + MIN_ATTESTATION_INCLUSION_DELAY, cache,
|
||||
info, {}).isOk()
|
||||
|
||||
let attestations = pool[].getElectraAttestationsForBlock(state[], cache)
|
||||
check:
|
||||
attestations.len() == 2
|
||||
attestations[0].aggregation_bits.countOnes() == 4
|
||||
attestations[0].committee_bits.countOnes() == 2
|
||||
attestations[1].aggregation_bits.countOnes() == 4
|
||||
attestations[1].committee_bits.countOnes() == 2
|
||||
check_attestation(
|
||||
state[].electraData.data, attestations[0], {}, cache, true).isOk
|
||||
check_attestation(
|
||||
state[].electraData.data, attestations[1], {}, cache, true).isOk
|
||||
pool[].verifyAttestationSignature(state, cache, attestations[0])
|
||||
pool[].verifyAttestationSignature(state, cache, attestations[1])
|
||||
|
@ -1,5 +1,5 @@
|
||||
# beacon_chain
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2021-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
@ -19,7 +19,8 @@ import
|
||||
../beacon_chain/spec/[crypto, keystore, eth2_merkleization],
|
||||
../beacon_chain/spec/datatypes/base,
|
||||
../beacon_chain/spec/eth2_apis/[rest_keymanager_calls, rest_keymanager_types],
|
||||
../beacon_chain/validators/[keystore_management, slashing_protection_common],
|
||||
../beacon_chain/validators/[keystore_management, slashing_protection_common,
|
||||
validator_pool],
|
||||
../beacon_chain/networking/network_metadata,
|
||||
../beacon_chain/rpc/rest_key_management_api,
|
||||
../beacon_chain/[conf, filepath, beacon_node,
|
||||
@ -35,6 +36,8 @@ type
|
||||
port: int
|
||||
validatorsDir: string
|
||||
secretsDir: string
|
||||
validatorPool: ref ValidatorPool
|
||||
keymanagerHost: ref KeymanagerHost
|
||||
|
||||
# Individual port numbers derived by adding `ord` to configurable base port
|
||||
PortKind {.pure.} = enum
|
||||
@ -99,6 +102,35 @@ const
|
||||
"0x0bbca63e35c7a159fc2f187d300cad9ef5f5e73e55f78c391e7bc2c2feabc2d9d63dfe99edd7058ad0ab9d7f14aade5f"
|
||||
]
|
||||
|
||||
dynamicPrivateKeys {.used.} = [
|
||||
"0x30a2616ee087aaed186c43fcd2c8f6de700c36673b047973c85d9bec2a444750",
|
||||
"0x1957f3cf86be1f88689501453e3432f5d821101b9790bbd43d823b9ac1c4a18b",
|
||||
"0x41df21004d05757df5eedd2c1a4e843503b54680f2c5648235fd37e06785ff5b",
|
||||
"0x2627fd902852ea62057993a59825458684be73f05c3166953e21b35d08a00e4d"
|
||||
]
|
||||
|
||||
dynamicPublicKeys = [
|
||||
"0xa4dc24de501e99eb1a7ad1a0a73781acfc1b4133f1b29ef1536be44d34212a23331640dd30b532fef5a2533fde7f0ef1",
|
||||
"0x94f6f523782134bf87c7371a517f2035d54f4c87ec55916404de3f27c43bafc7405a40e353bf32385d37972a23486fae",
|
||||
"0xa09149fc0d3ccd425051dfc4f2c320d6845c17b27bcb5739e3a8d49820dcab7d4cabfdf60fb05d6e1bc0482bf29d04c5",
|
||||
"0xb57aa0363091b7a14bf68e588ee366559b5abf27a52efd676d39eb7a4d1e8f6f0b0b6d95e0b7041720ddf801b74211ab"
|
||||
]
|
||||
|
||||
scenarioPrivateKeys = [
|
||||
"0x42710c38caa62d63cdac8aab59789befe6a6ac568dc45c4791cf2f5743ef15ba",
|
||||
"0x007b6ced45bc6eaac2fa00eaffc687beda00da64c7b35f53a88c378f5a710521",
|
||||
"0x5a1a6c80eecf980e4165f5254f2bd8cfd4a4390651be8a76deb81298328a3f11",
|
||||
"0x05471e7d96b4a7248f6392601cc90e620074f8a6eadfc6143c8950699021e728"
|
||||
|
||||
]
|
||||
|
||||
scenarioPublicKeys = [
|
||||
"0xa3bdf080a33fb34e9b656bf1e145b63eb9c9db81e07e2d8b70d56bda2124b167df7ac6d6a432e091d024ae5fc352d620",
|
||||
"0x8f1a1887263a6e5987b15f424a6d1b3128ea5357d37cb1a775a90546530a47efef3b737dde9124adde9212b2c8382cd9",
|
||||
"0x92080e161b0601a9f75d20868b64ee573088128ec7e68c11603014b68f6b1b37bfc394ce61e5b515e538fa3f95d3ba6e",
|
||||
"0xa3ad2269fb71074cb2166ee58008967b5e5b13d0a76e992e912ce1ed2073c79450a26406a30182f72d5c57ffa9939f51"
|
||||
]
|
||||
|
||||
newPublicKeysUrl = HttpHostUri(parseUri("http://127.0.0.1/remote"))
|
||||
|
||||
nodeDataDir = dataDir / "node-0"
|
||||
@ -193,6 +225,33 @@ BELLATRIX_FORK_EPOCH: 0
|
||||
fatal "Failed to create token file", err = deposits.error
|
||||
quit 1
|
||||
|
||||
proc addDynamicValidator(kmtest: KeymanagerToTest,
|
||||
pubkey: ValidatorPubKey) =
|
||||
let
|
||||
keystore = KeystoreData(
|
||||
kind: KeystoreKind.Remote,
|
||||
handle: FileLockHandle(opened: false),
|
||||
pubkey: pubkey,
|
||||
remotes: @[
|
||||
RemoteSignerInfo(
|
||||
url: HttpHostUri(HttpHostUri(parseUri("http://127.0.0.1"))),
|
||||
pubkey: pubkey
|
||||
)
|
||||
],
|
||||
flags: {RemoteKeystoreFlag.DynamicKeystore},
|
||||
remoteType: RemoteSignerType.Web3Signer)
|
||||
withdrawalAddress =
|
||||
kmtest.keymanagerHost[].getValidatorWithdrawalAddress(keystore.pubkey)
|
||||
perValidatorDefaultFeeRecipient = getPerValidatorDefaultFeeRecipient(
|
||||
Opt.some(defaultFeeRecipient), withdrawalAddress)
|
||||
feeRecipient = kmtest.keymanagerHost[].getSuggestedFeeRecipient(
|
||||
keystore.pubkey, perValidatorDefaultFeeRecipient).valueOr(
|
||||
perValidatorDefaultFeeRecipient)
|
||||
gasLimit = kmtest.keymanagerHost[].getSuggestedGasLimit(
|
||||
keystore.pubkey).valueOr(defaultGasLimit)
|
||||
discard
|
||||
kmtest.validatorPool[].addValidator(keystore, feeRecipient, gasLimit)
|
||||
|
||||
proc copyHalfValidators(dstDataDir: string, firstHalf: bool) =
|
||||
let dstValidatorsDir = dstDataDir / "validators"
|
||||
|
||||
@ -273,11 +332,14 @@ proc addPreTestRemoteKeystores(validatorsDir: string) =
|
||||
err = res.error
|
||||
quit 1
|
||||
|
||||
proc startBeaconNode(basePort: int) {.raises: [CatchableError].} =
|
||||
proc initBeaconNode(basePort: int): Future[BeaconNode] {.async: (raises: []).} =
|
||||
let rng = HmacDrbgContext.new()
|
||||
|
||||
copyHalfValidators(nodeDataDir, true)
|
||||
addPreTestRemoteKeystores(nodeValidatorsDir)
|
||||
try:
|
||||
copyHalfValidators(nodeDataDir, true)
|
||||
addPreTestRemoteKeystores(nodeValidatorsDir)
|
||||
except CatchableError as exc:
|
||||
raiseAssert exc.msg
|
||||
|
||||
let runNodeConf = try: BeaconNodeConf.load(cmdLine = mapIt([
|
||||
"--tcp-port=" & $(basePort + PortKind.PeerToPeer.ord),
|
||||
@ -302,35 +364,33 @@ proc startBeaconNode(basePort: int) {.raises: [CatchableError].} =
|
||||
except Exception as exc: # TODO fix confutils exceptions
|
||||
raiseAssert exc.msg
|
||||
|
||||
let
|
||||
metadata = loadEth2NetworkMetadata(dataDir).expect("Metadata is compatible")
|
||||
node = waitFor BeaconNode.init(rng, runNodeConf, metadata)
|
||||
try:
|
||||
let metadata =
|
||||
loadEth2NetworkMetadata(dataDir).expect("Metadata is compatible")
|
||||
await BeaconNode.init(rng, runNodeConf, metadata)
|
||||
except CatchableError as exc:
|
||||
raiseAssert exc.msg
|
||||
|
||||
node.start() # This will run until the node is terminated by
|
||||
# setting its `bnStatus` to `Stopping`.
|
||||
# proc startValidatorClient(basePort: int) {.async, thread.} =
|
||||
# let rng = HmacDrbgContext.new()
|
||||
|
||||
# os.removeDir dataDir
|
||||
# copyHalfValidators(vcDataDir, false)
|
||||
# addPreTestRemoteKeystores(vcValidatorsDir)
|
||||
|
||||
proc startValidatorClient(basePort: int) {.async, thread.} =
|
||||
let rng = HmacDrbgContext.new()
|
||||
# let runValidatorClientConf = try: ValidatorClientConf.load(cmdLine = mapIt([
|
||||
# "--beacon-node=http://127.0.0.1:" & $(basePort + PortKind.KeymanagerBN.ord),
|
||||
# "--data-dir=" & vcDataDir,
|
||||
# "--validators-dir=" & vcValidatorsDir,
|
||||
# "--secrets-dir=" & vcSecretsDir,
|
||||
# "--suggested-fee-recipient=" & $defaultFeeRecipient,
|
||||
# "--keymanager=true",
|
||||
# "--keymanager-address=127.0.0.1",
|
||||
# "--keymanager-port=" & $(basePort + PortKind.KeymanagerVC.ord),
|
||||
# "--keymanager-token-file=" & tokenFilePath], it))
|
||||
# except:
|
||||
# quit 1
|
||||
|
||||
copyHalfValidators(vcDataDir, false)
|
||||
addPreTestRemoteKeystores(vcValidatorsDir)
|
||||
|
||||
let runValidatorClientConf = try: ValidatorClientConf.load(cmdLine = mapIt([
|
||||
"--beacon-node=http://127.0.0.1:" & $(basePort + PortKind.KeymanagerBN.ord),
|
||||
"--data-dir=" & vcDataDir,
|
||||
"--validators-dir=" & vcValidatorsDir,
|
||||
"--secrets-dir=" & vcSecretsDir,
|
||||
"--suggested-fee-recipient=" & $defaultFeeRecipient,
|
||||
"--keymanager=true",
|
||||
"--keymanager-address=127.0.0.1",
|
||||
"--keymanager-port=" & $(basePort + PortKind.KeymanagerVC.ord),
|
||||
"--keymanager-token-file=" & tokenFilePath], it))
|
||||
except:
|
||||
quit 1
|
||||
|
||||
await runValidatorClient(runValidatorClientConf, rng)
|
||||
# await runValidatorClient(runValidatorClientConf, rng)
|
||||
|
||||
const
|
||||
password = "7465737470617373776f7264f09f9491"
|
||||
@ -461,6 +521,46 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
|
||||
url: newPublicKeysUrl))
|
||||
ImportRemoteKeystoresBody(remote_keys: res)
|
||||
|
||||
scenarioKeystoreBody1 =
|
||||
block:
|
||||
let
|
||||
privateKey = ValidatorPrivKey.fromHex(scenarioPrivateKeys[0]).tryGet()
|
||||
store = createKeystore(kdfPbkdf2, rng[], privateKey,
|
||||
KeystorePass.init password, salt = salt, iv = iv,
|
||||
description = "Test keystore",
|
||||
path = validateKeyPath("m/12381/60/0/0").expect("Valid Keypath"))
|
||||
KeystoresAndSlashingProtection(
|
||||
keystores: @[store],
|
||||
passwords: @[password],
|
||||
)
|
||||
|
||||
scenarioKeystoreBody2 =
|
||||
block:
|
||||
let
|
||||
privateKey = ValidatorPrivKey.fromHex(scenarioPrivateKeys[1]).tryGet()
|
||||
store = createKeystore(kdfPbkdf2, rng[], privateKey,
|
||||
KeystorePass.init password, salt = salt, iv = iv,
|
||||
description = "Test keystore",
|
||||
path = validateKeyPath("m/12381/60/0/0").expect("Valid Keypath"))
|
||||
KeystoresAndSlashingProtection(
|
||||
keystores: @[store],
|
||||
passwords: @[password],
|
||||
)
|
||||
|
||||
scenarioKeystoreBody3 =
|
||||
block:
|
||||
let
|
||||
publicKey = ValidatorPubKey.fromHex(scenarioPublicKeys[2]).tryGet()
|
||||
store = RemoteKeystoreInfo(pubkey: publicKey, url: newPublicKeysUrl)
|
||||
ImportRemoteKeystoresBody(remote_keys: @[store])
|
||||
|
||||
scenarioKeystoreBody4 =
|
||||
block:
|
||||
let
|
||||
publicKey = ValidatorPubKey.fromHex(scenarioPublicKeys[3]).tryGet()
|
||||
store = RemoteKeystoreInfo(pubkey: publicKey, url: newPublicKeysUrl)
|
||||
ImportRemoteKeystoresBody(remote_keys: @[store])
|
||||
|
||||
template expectedImportStatus(i: int): string =
|
||||
if i < 8:
|
||||
"duplicate"
|
||||
@ -1101,6 +1201,52 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
|
||||
check:
|
||||
finalResultFromApi == defaultFeeRecipient
|
||||
|
||||
asyncTest "Obtaining the fee recipient for dynamic validator returns suggested default" & testFlavour:
|
||||
let
|
||||
pubkey = ValidatorPubKey.fromHex(dynamicPublicKeys[0]).expect("valid key")
|
||||
|
||||
keymanager.addDynamicValidator(pubkey)
|
||||
try:
|
||||
let resultFromApi =
|
||||
await client.listFeeRecipient(pubkey, correctTokenValue)
|
||||
check: resultFromApi == defaultFeeRecipient
|
||||
finally:
|
||||
keymanager.validatorPool[].removeValidator(pubkey)
|
||||
|
||||
asyncTest "Configuring the fee recipient for dynamic validator" & testFlavour:
|
||||
let
|
||||
pubkey = ValidatorPubKey.fromHex(dynamicPublicKeys[1]).expect("valid key")
|
||||
firstFeeRecipient = specifiedFeeRecipient(200)
|
||||
|
||||
await client.setFeeRecipient(pubkey, firstFeeRecipient, correctTokenValue)
|
||||
let firstResultFromApi =
|
||||
await client.listFeeRecipient(pubkey, correctTokenValue)
|
||||
|
||||
check firstResultFromApi == firstFeeRecipient
|
||||
|
||||
keymanager.addDynamicValidator(pubkey)
|
||||
try:
|
||||
let secondResultFromApi =
|
||||
await client.listFeeRecipient(pubkey, correctTokenValue)
|
||||
|
||||
check secondResultFromApi == firstFeeRecipient
|
||||
|
||||
let secondFeeRecipient = specifiedFeeRecipient(300)
|
||||
await client.setFeeRecipient(pubkey, secondFeeRecipient,
|
||||
correctTokenValue)
|
||||
|
||||
let thirdResultFromApi =
|
||||
await client.listFeeRecipient(pubkey, correctTokenValue)
|
||||
check thirdResultFromApi == secondFeeRecipient
|
||||
|
||||
await client.deleteFeeRecipient(pubkey, correctTokenValue)
|
||||
|
||||
let finalResultFromApi =
|
||||
await client.listFeeRecipient(pubkey, correctTokenValue)
|
||||
check finalResultFromApi == defaultFeeRecipient
|
||||
finally:
|
||||
keymanager.validatorPool[].removeValidator(pubkey)
|
||||
|
||||
suite "Gas limit management" & testFlavour:
|
||||
asyncTest "Missing Authorization header" & testFlavour:
|
||||
let pubkey = ValidatorPubKey.fromHex(oldPublicKeys[0]).expect("valid key")
|
||||
@ -1263,6 +1409,51 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
|
||||
check:
|
||||
finalResultFromApi == defaultGasLimit
|
||||
|
||||
asyncTest "Obtaining the gas limit for dynamic validator returns suggested default" & testFlavour:
|
||||
let
|
||||
pubkey = ValidatorPubKey.fromHex(dynamicPublicKeys[2]).expect("valid key")
|
||||
|
||||
keymanager.addDynamicValidator(pubkey)
|
||||
try:
|
||||
let resultFromApi =
|
||||
await client.listGasLimit(pubkey, correctTokenValue)
|
||||
check: resultFromApi == defaultGasLimit
|
||||
finally:
|
||||
keymanager.validatorPool[].removeValidator(pubkey)
|
||||
|
||||
asyncTest "Configuring the gas limit for dynamic validator" & testFlavour:
|
||||
let
|
||||
pubkey = ValidatorPubKey.fromHex(dynamicPublicKeys[3]).expect("valid key")
|
||||
firstGasLimit = 40_000_000'u64
|
||||
|
||||
await client.setGasLimit(pubkey, firstGasLimit, correctTokenValue)
|
||||
let firstResultFromApi =
|
||||
await client.listGasLimit(pubkey, correctTokenValue)
|
||||
|
||||
check firstResultFromApi == firstGasLimit
|
||||
|
||||
keymanager.addDynamicValidator(pubkey)
|
||||
try:
|
||||
let secondResultFromApi =
|
||||
await client.listGasLimit(pubkey, correctTokenValue)
|
||||
|
||||
check secondResultFromApi == firstGasLimit
|
||||
|
||||
let secondGasLimit = 50_000_000'u64
|
||||
await client.setGasLimit(pubkey, secondGasLimit, correctTokenValue)
|
||||
|
||||
let thirdResultFromApi =
|
||||
await client.listGasLimit(pubkey, correctTokenValue)
|
||||
check thirdResultFromApi == secondGasLimit
|
||||
|
||||
await client.deleteGasLimit(pubkey, correctTokenValue)
|
||||
|
||||
let finalResultFromApi =
|
||||
await client.listGasLimit(pubkey, correctTokenValue)
|
||||
check finalResultFromApi == defaultGasLimit
|
||||
finally:
|
||||
keymanager.validatorPool[].removeValidator(pubkey)
|
||||
|
||||
suite "Graffiti management" & testFlavour:
|
||||
asyncTest "Missing Authorization header" & testFlavour:
|
||||
let pubkey = ValidatorPubKey.fromHex(oldPublicKeys[0]).expect("valid key")
|
||||
@ -1690,24 +1881,160 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
|
||||
response.status == 403
|
||||
responseJson["message"].getStr() == InvalidAuthorizationError
|
||||
|
||||
proc delayedTests(basePort: int) {.async.} =
|
||||
suite "Combined scenarios" & testFlavour:
|
||||
asyncTest "ImportKeystores should not be blocked by fee recipient setting" & testFlavour:
|
||||
let
|
||||
publicKey = ValidatorPubKey.fromHex(scenarioPublicKeys[0]).tryGet()
|
||||
localFeeRecipient = specifiedFeeRecipient(500)
|
||||
|
||||
await client.setFeeRecipient(publicKey, localFeeRecipient,
|
||||
correctTokenValue)
|
||||
|
||||
let firstResultFromApi =
|
||||
await client.listFeeRecipient(publicKey, correctTokenValue)
|
||||
check firstResultFromApi == localFeeRecipient
|
||||
|
||||
let
|
||||
response = await client.importKeystoresPlain(
|
||||
scenarioKeystoreBody1,
|
||||
extraHeaders = @[("Authorization", "Bearer " & correctTokenValue)])
|
||||
decoded =
|
||||
try:
|
||||
RestJson.decode(response.data,
|
||||
DataEnclosedObject[seq[RemoteKeystoreStatus]],
|
||||
requireAllFields = true,
|
||||
allowUnknownFields = true)
|
||||
except SerializationError:
|
||||
raiseAssert "Invalid response encoding"
|
||||
check:
|
||||
response.status == 200
|
||||
len(decoded.data) == 1
|
||||
decoded.data[0].status == KeystoreStatus.imported
|
||||
|
||||
let secondResultFromApi =
|
||||
await client.listFeeRecipient(publicKey, correctTokenValue)
|
||||
check secondResultFromApi == localFeeRecipient
|
||||
|
||||
asyncTest "ImportKeystores should not be blocked by gas limit setting" & testFlavour:
|
||||
let
|
||||
publicKey = ValidatorPubKey.fromHex(scenarioPublicKeys[0]).tryGet()
|
||||
localGasLimit = 20_000_000'u64
|
||||
|
||||
await client.setGasLimit(publicKey, localGasLimit, correctTokenValue)
|
||||
|
||||
let firstResultFromApi =
|
||||
await client.listGasLimit(publicKey, correctTokenValue)
|
||||
check firstResultFromApi == localGasLimit
|
||||
|
||||
let
|
||||
response = await client.importKeystoresPlain(
|
||||
scenarioKeystoreBody2,
|
||||
extraHeaders = @[("Authorization", "Bearer " & correctTokenValue)])
|
||||
decoded =
|
||||
try:
|
||||
RestJson.decode(response.data,
|
||||
DataEnclosedObject[seq[RemoteKeystoreStatus]],
|
||||
requireAllFields = true,
|
||||
allowUnknownFields = true)
|
||||
except SerializationError:
|
||||
raiseAssert "Invalid response encoding"
|
||||
check:
|
||||
response.status == 200
|
||||
len(decoded.data) == 1
|
||||
decoded.data[0].status == KeystoreStatus.imported
|
||||
|
||||
let secondResultFromApi =
|
||||
await client.listGasLimit(publicKey, correctTokenValue)
|
||||
check secondResultFromApi == localGasLimit
|
||||
|
||||
asyncTest "ImportRemoteKeys should not be blocked by fee recipient setting" & testFlavour:
|
||||
let
|
||||
publicKey = ValidatorPubKey.fromHex(scenarioPublicKeys[2]).tryGet()
|
||||
localFeeRecipient = specifiedFeeRecipient(600)
|
||||
|
||||
await client.setFeeRecipient(publicKey, localFeeRecipient,
|
||||
correctTokenValue)
|
||||
|
||||
let firstResultFromApi =
|
||||
await client.listFeeRecipient(publicKey, correctTokenValue)
|
||||
check firstResultFromApi == localFeeRecipient
|
||||
|
||||
let
|
||||
response = await client.importRemoteKeysPlain(
|
||||
scenarioKeystoreBody3,
|
||||
extraHeaders = @[("Authorization", "Bearer " & correctTokenValue)])
|
||||
decoded =
|
||||
try:
|
||||
RestJson.decode(response.data,
|
||||
DataEnclosedObject[seq[RemoteKeystoreStatus]],
|
||||
requireAllFields = true,
|
||||
allowUnknownFields = true)
|
||||
except SerializationError:
|
||||
raiseAssert "Invalid response encoding"
|
||||
check:
|
||||
response.status == 200
|
||||
len(decoded.data) == 1
|
||||
decoded.data[0].status == KeystoreStatus.imported
|
||||
|
||||
let secondResultFromApi =
|
||||
await client.listFeeRecipient(publicKey, correctTokenValue)
|
||||
check secondResultFromApi == localFeeRecipient
|
||||
|
||||
asyncTest "ImportRemoteKeys should not be blocked by gas limit setting" & testFlavour:
|
||||
let
|
||||
publicKey = ValidatorPubKey.fromHex(scenarioPublicKeys[3]).tryGet()
|
||||
localGasLimit = 80_000_000'u64
|
||||
|
||||
await client.setGasLimit(publicKey, localGasLimit, correctTokenValue)
|
||||
|
||||
let firstResultFromApi =
|
||||
await client.listGasLimit(publicKey, correctTokenValue)
|
||||
check firstResultFromApi == localGasLimit
|
||||
|
||||
let
|
||||
response = await client.importRemoteKeysPlain(
|
||||
scenarioKeystoreBody4,
|
||||
extraHeaders = @[("Authorization", "Bearer " & correctTokenValue)])
|
||||
decoded =
|
||||
try:
|
||||
RestJson.decode(response.data,
|
||||
DataEnclosedObject[seq[RemoteKeystoreStatus]],
|
||||
requireAllFields = true,
|
||||
allowUnknownFields = true)
|
||||
except SerializationError:
|
||||
raiseAssert "Invalid response encoding"
|
||||
check:
|
||||
response.status == 200
|
||||
len(decoded.data) == 1
|
||||
decoded.data[0].status == KeystoreStatus.imported
|
||||
|
||||
let secondResultFromApi =
|
||||
await client.listGasLimit(publicKey, correctTokenValue)
|
||||
check secondResultFromApi == localGasLimit
|
||||
|
||||
proc delayedTests(basePort: int, pool: ref ValidatorPool,
|
||||
host: ref KeymanagerHost) {.async.} =
|
||||
let
|
||||
beaconNodeKeymanager = KeymanagerToTest(
|
||||
ident: "Beacon Node",
|
||||
port: basePort + PortKind.KeymanagerBN.ord,
|
||||
validatorsDir: nodeValidatorsDir,
|
||||
secretsDir: nodeSecretsDir)
|
||||
secretsDir: nodeSecretsDir,
|
||||
validatorPool: pool,
|
||||
keymanagerHost: host)
|
||||
|
||||
validatorClientKeymanager = KeymanagerToTest(
|
||||
ident: "Validator Client",
|
||||
port: basePort + PortKind.KeymanagerVC.ord,
|
||||
validatorsDir: vcValidatorsDir,
|
||||
secretsDir: vcSecretsDir)
|
||||
secretsDir: vcSecretsDir,
|
||||
validatorPool: pool,
|
||||
keymanagerHost: host)
|
||||
|
||||
while bnStatus != BeaconNodeStatus.Running:
|
||||
await sleepAsync(1.seconds)
|
||||
|
||||
asyncSpawn startValidatorClient(basePort)
|
||||
# asyncSpawn startValidatorClient(basePort)
|
||||
|
||||
await sleepAsync(2.seconds)
|
||||
|
||||
@ -1725,10 +2052,14 @@ proc main(basePort: int) {.async.} =
|
||||
if dirExists(dataDir):
|
||||
os.removeDir dataDir
|
||||
|
||||
asyncSpawn delayedTests(basePort)
|
||||
|
||||
prepareNetwork()
|
||||
startBeaconNode(basePort)
|
||||
|
||||
let node = await initBeaconNode(basePort)
|
||||
|
||||
asyncSpawn delayedTests(basePort, node.attachedValidators,
|
||||
node.keymanagerHost)
|
||||
|
||||
node.start()
|
||||
|
||||
let
|
||||
basePortStr = os.getEnv("NIMBUS_TEST_KEYMANAGER_BASE_PORT", $defaultBasePort)
|
||||
|
File diff suppressed because it is too large
Load Diff
2
vendor/holesky
vendored
2
vendor/holesky
vendored
@ -1 +1 @@
|
||||
Subproject commit f6761b531dae01e30ca05658d01853415465d1e0
|
||||
Subproject commit 34d0a04577b36dcf5ba304a2ba8222c8f1f4e639
|
2
vendor/nim-eth2-scenarios
vendored
2
vendor/nim-eth2-scenarios
vendored
@ -1 +1 @@
|
||||
Subproject commit 1c774c0dad2f9b0072693aa1fa348f6a9e7890d0
|
||||
Subproject commit d84994bdbc5ec7d79ad3e4c71c637941710d04af
|
2
vendor/nim-libbacktrace
vendored
2
vendor/nim-libbacktrace
vendored
@ -1 +1 @@
|
||||
Subproject commit 6da0cda88ab7780bd5fd342327adb91ab84692aa
|
||||
Subproject commit 0a438d70312de253694748346e002418bd127829
|
2
vendor/sepolia
vendored
2
vendor/sepolia
vendored
@ -1 +1 @@
|
||||
Subproject commit da5654742513435bdd6dbc5fd033cf593ce57a0f
|
||||
Subproject commit f5e3652be045250fd2de1631683b110317592bd3
|
Loading…
x
Reference in New Issue
Block a user