Merge branch 'dev/etan/lc-electra' into feat_eip-7688

This commit is contained in:
Etan Kissling 2024-06-25 14:57:39 +02:00
commit 754e9c3b10
No known key found for this signature in database
GPG Key ID: B21DA824C5A3D03D
118 changed files with 2249 additions and 1309 deletions

View File

@ -35,7 +35,7 @@ jobs:
cpu: amd64 cpu: amd64
- os: windows - os: windows
cpu: amd64 cpu: amd64
branch: [~, upstream/version-1-6, upstream/version-2-0] branch: [~, upstream/version-1-6, v2.0.6]
exclude: exclude:
- target: - target:
os: macos os: macos
@ -49,7 +49,7 @@ jobs:
include: include:
- branch: upstream/version-1-6 - branch: upstream/version-1-6
branch-short: version-1-6 branch-short: version-1-6
- branch: upstream/version-2-0 - branch: v2.0.6
branch-short: version-2-0 branch-short: version-2-0
nimflags-extra: --mm:refc nimflags-extra: --mm:refc
- target: - target:
@ -226,9 +226,20 @@ jobs:
- name: Build files with isMainModule - name: Build files with isMainModule
run: | run: |
executables=(
"beacon_chain/el/deposit_contract"
"beacon_chain/fork_choice/fork_choice"
"beacon_chain/fork_choice/proto_array"
"beacon_chain/networking/network_metadata_downloads"
"beacon_chain/era_db"
"beacon_chain/trusted_node_sync"
"benchmarks/rest_api_benchmark"
"tests/mocking/mock_genesis"
)
source env.sh source env.sh
nim c beacon_chain/era_db for executable in "${executables[@]}"; do
nim c beacon_chain/trusted_node_sync nim c --passC:-fsyntax-only --noLinking:on -d:chronicles_log_level=TRACE "${executable}"
done
lint: lint:
name: "Lint" name: "Lint"

View File

@ -1,3 +1,27 @@
2024-06-24 v24.6.0
==================
Nimbus `v24.6.0` is a `low-urgency` release with performance and safety improvements.
### Improvements
* Improve SHA256 protocol object hashing speed by 30%:
https://github.com/status-im/nimbus-eth2/pull/6292
* Ensure that when a paired Nimbus beacon node and validator client improperly share validator keys, they avoid slashing:
https://github.com/status-im/nimbus-eth2/pull/6329
* Add block scoring to validator client to pick the best block from multiple beacon nodes:
https://github.com/status-im/nimbus-eth2/pull/6303
* Enable block monitoring in validator client by default to attest earlier and more reliably:
https://github.com/status-im/nimbus-eth2/pull/6331
### Fixes
* Fix light client libp2p gossip topic subscriptions:
https://github.com/status-im/nimbus-eth2/pull/6351
2024-05-23 v24.5.1 2024-05-23 v24.5.1
================== ==================

View File

@ -2468,14 +2468,20 @@ OK: 10/10 Fail: 0/10 Skip: 0/10
OK: 10/10 Fail: 0/10 Skip: 0/10 OK: 10/10 Fail: 0/10 Skip: 0/10
## EF - Electra - Epoch Processing - Pending balance deposits [Preset: mainnet] ## EF - Electra - Epoch Processing - Pending balance deposits [Preset: mainnet]
```diff ```diff
+ Pending balance deposits - mixture_of_skipped_and_above_churn [Preset: mainnet] OK
+ Pending balance deposits - multiple_pending_deposits_above_churn [Preset: mainnet] OK + Pending balance deposits - multiple_pending_deposits_above_churn [Preset: mainnet] OK
+ Pending balance deposits - multiple_pending_deposits_below_churn [Preset: mainnet] OK + Pending balance deposits - multiple_pending_deposits_below_churn [Preset: mainnet] OK
+ Pending balance deposits - multiple_pending_one_skipped [Preset: mainnet] OK
+ Pending balance deposits - multiple_skipped_deposits_exiting_validators [Preset: mainnet] OK
+ Pending balance deposits - pending_deposit_balance_above_churn [Preset: mainnet] OK + Pending balance deposits - pending_deposit_balance_above_churn [Preset: mainnet] OK
+ Pending balance deposits - pending_deposit_balance_equal_churn [Preset: mainnet] OK + Pending balance deposits - pending_deposit_balance_equal_churn [Preset: mainnet] OK
+ Pending balance deposits - pending_deposit_min_activation_balance [Preset: mainnet] OK + Pending balance deposits - pending_deposit_min_activation_balance [Preset: mainnet] OK
+ Pending balance deposits - pending_deposit_preexisting_churn [Preset: mainnet] OK + Pending balance deposits - pending_deposit_preexisting_churn [Preset: mainnet] OK
+ Pending balance deposits - processing_deposit_of_withdrawable_validator [Preset: mainnet] OK
+ Pending balance deposits - processing_deposit_of_withdrawable_validator_does_not_get_churn OK
+ Pending balance deposits - skipped_deposit_exiting_validator [Preset: mainnet] OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 12/12 Fail: 0/12 Skip: 0/12
## EF - Electra - Epoch Processing - Pending consolidations [Preset: mainnet] ## EF - Electra - Epoch Processing - Pending consolidations [Preset: mainnet]
```diff ```diff
+ Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK + Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK
@ -2673,6 +2679,11 @@ OK: 14/14 Fail: 0/14 Skip: 0/14
+ [Valid] EF - Electra - Operations - Block Header - basic_block_header OK + [Valid] EF - Electra - Operations - Block Header - basic_block_header OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 6/6 Fail: 0/6 Skip: 0/6
## EF - Electra - Operations - Consolidation Request [Preset: mainnet]
```diff
+ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_not_enough_consoli OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## EF - Electra - Operations - Deposit [Preset: mainnet] ## EF - Electra - Operations - Deposit [Preset: mainnet]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Deposit - invalid_bad_merkle_proof OK + [Invalid] EF - Electra - Operations - Deposit - invalid_bad_merkle_proof OK
@ -2698,45 +2709,27 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
+ [Valid] EF - Electra - Operations - Deposit - top_up__zero_balance OK + [Valid] EF - Electra - Operations - Deposit - top_up__zero_balance OK
``` ```
OK: 21/21 Fail: 0/21 Skip: 0/21 OK: 21/21 Fail: 0/21 Skip: 0/21
## EF - Electra - Operations - Deposit Receipt [Preset: mainnet] ## EF - Electra - Operations - Deposit Request [Preset: mainnet]
```diff ```diff
+ [Valid] EF - Electra - Operations - Deposit Receipt - correct_sig_but_forked_state OK + [Valid] EF - Electra - Operations - Deposit Request - correct_sig_but_forked_state OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - effective_deposit_with_genesis_for OK + [Valid] EF - Electra - Operations - Deposit Request - effective_deposit_with_genesis_for OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_sig_new_deposit OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_sig_new_deposit OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_sig_top_up OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_sig_top_up OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_withdrawal_credentials_t OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_withdrawal_credentials_t OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - ineffective_deposit_with_previous_ OK + [Valid] EF - Electra - Operations - Deposit Request - ineffective_deposit_with_previous_ OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - key_validate_invalid_decompression OK + [Valid] EF - Electra - Operations - Deposit Request - key_validate_invalid_decompression OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - key_validate_invalid_subgroup OK + [Valid] EF - Electra - Operations - Deposit Request - key_validate_invalid_subgroup OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_eth1_withdrawal_creden OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_eth1_withdrawal_creden OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_non_versioned_withdraw OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_non_versioned_withdraw OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_over_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_over_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_under_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_under_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - success_top_up_to_withdrawn_valida OK + [Valid] EF - Electra - Operations - Deposit Request - success_top_up_to_withdrawn_valida OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__less_effective_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__less_effective_balance OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__max_effective_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__max_effective_balance OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__zero_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__zero_balance OK
``` ```
OK: 17/17 Fail: 0/17 Skip: 0/17 OK: 17/17 Fail: 0/17 Skip: 0/17
## EF - Electra - Operations - Execution Layer Withdrawal Request [Preset: mainnet]
```diff
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - activation_epoc OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_withdrawa OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_withdrawa OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - incorrect_sourc OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - incorrect_withd OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - insufficient_ef OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - no_compounding_ OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - no_excess_balan OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - on_withdrawal_r OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - pending_withdra OK
```
OK: 14/14 Fail: 0/14 Skip: 0/14
## EF - Electra - Operations - Execution Payload [Preset: mainnet] ## EF - Electra - Operations - Execution Payload [Preset: mainnet]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Execution Payload - invalid_bad_everything_first_pay OK + [Invalid] EF - Electra - Operations - Execution Payload - invalid_bad_everything_first_pay OK
@ -2856,6 +2849,24 @@ OK: 26/26 Fail: 0/26 Skip: 0/26
+ [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__min_churn OK + [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__min_churn OK
``` ```
OK: 24/24 Fail: 0/24 Skip: 0/24 OK: 24/24 Fail: 0/24 Skip: 0/24
## EF - Electra - Operations - Withdrawal Request [Preset: mainnet]
```diff
+ [Valid] EF - Electra - Operations - Withdrawal Request - activation_epoch_less_than_shar OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_withdrawal_request OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_withdrawal_request_with_c OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - incorrect_source_address OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - incorrect_withdrawal_credential OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - insufficient_effective_balance OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - no_compounding_credentials OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - no_excess_balance OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - on_withdrawal_request_initiated OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_activation_e OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_incorrect_so OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_incorrect_wi OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_on_exit_init OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - pending_withdrawals_consume_all OK
```
OK: 14/14 Fail: 0/14 Skip: 0/14
## EF - Electra - Operations - Withdrawals [Preset: mainnet] ## EF - Electra - Operations - Withdrawals [Preset: mainnet]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK + [Invalid] EF - Electra - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK
@ -2982,15 +2993,14 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing BlobIdentifier OK + Testing BlobIdentifier OK
+ Testing BlobSidecar OK + Testing BlobSidecar OK
+ Testing Checkpoint OK + Testing Checkpoint OK
+ Testing Consolidation OK + Testing ConsolidationRequest OK
+ Testing ContributionAndProof OK + Testing ContributionAndProof OK
+ Testing Deposit OK + Testing Deposit OK
+ Testing DepositData OK + Testing DepositData OK
+ Testing DepositMessage OK + Testing DepositMessage OK
+ Testing DepositReceipt OK + Testing DepositRequest OK
+ Testing Eth1Block OK + Testing Eth1Block OK
+ Testing Eth1Data OK + Testing Eth1Data OK
+ Testing ExecutionLayerWithdrawalRequest OK
+ Testing ExecutionPayload OK + Testing ExecutionPayload OK
+ Testing ExecutionPayloadHeader OK + Testing ExecutionPayloadHeader OK
+ Testing Fork OK + Testing Fork OK
@ -3013,7 +3023,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing SignedBLSToExecutionChange OK + Testing SignedBLSToExecutionChange OK
+ Testing SignedBeaconBlock OK + Testing SignedBeaconBlock OK
+ Testing SignedBeaconBlockHeader OK + Testing SignedBeaconBlockHeader OK
+ Testing SignedConsolidation OK
+ Testing SignedContributionAndProof OK + Testing SignedContributionAndProof OK
+ Testing SignedVoluntaryExit OK + Testing SignedVoluntaryExit OK
+ Testing SigningData OK + Testing SigningData OK
@ -3025,8 +3034,9 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing Validator OK + Testing Validator OK
+ Testing VoluntaryExit OK + Testing VoluntaryExit OK
+ Testing Withdrawal OK + Testing Withdrawal OK
+ Testing WithdrawalRequest OK
``` ```
OK: 55/55 Fail: 0/55 Skip: 0/55 OK: 54/54 Fail: 0/54 Skip: 0/54
## EF - Electra - Sanity - Blocks [Preset: mainnet] ## EF - Electra - Sanity - Blocks [Preset: mainnet]
```diff ```diff
+ [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK + [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK
@ -3146,6 +3156,14 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
+ EF - Electra - Transition - transition_with_random_three_quarters_participation [Preset: m OK + EF - Electra - Transition - transition_with_random_three_quarters_participation [Preset: m OK
``` ```
OK: 25/25 Fail: 0/25 Skip: 0/25 OK: 25/25 Fail: 0/25 Skip: 0/25
## EF - Electra - Unittests - Light client - Sync protocol [Preset: mainnet]
```diff
+ process_light_client_update_finality_updated OK
+ process_light_client_update_timeout OK
+ test_process_light_client_update_at_period_boundary OK
+ test_process_light_client_update_not_timeout OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## EF - Light client - Single merkle proof [Preset: mainnet] ## EF - Light client - Single merkle proof [Preset: mainnet]
```diff ```diff
+ Light client - Single merkle proof - mainnet/altair/light_client/single_merkle_proof/Beaco OK + Light client - Single merkle proof - mainnet/altair/light_client/single_merkle_proof/Beaco OK
@ -3675,4 +3693,4 @@ OK: 69/88 Fail: 0/88 Skip: 19/88
OK: 3/3 Fail: 0/3 Skip: 0/3 OK: 3/3 Fail: 0/3 Skip: 0/3
---TOTAL--- ---TOTAL---
OK: 2961/2981 Fail: 0/2981 Skip: 20/2981 OK: 2971/2991 Fail: 0/2991 Skip: 20/2991

View File

@ -2579,14 +2579,20 @@ OK: 10/10 Fail: 0/10 Skip: 0/10
OK: 12/12 Fail: 0/12 Skip: 0/12 OK: 12/12 Fail: 0/12 Skip: 0/12
## EF - Electra - Epoch Processing - Pending balance deposits [Preset: minimal] ## EF - Electra - Epoch Processing - Pending balance deposits [Preset: minimal]
```diff ```diff
+ Pending balance deposits - mixture_of_skipped_and_above_churn [Preset: minimal] OK
+ Pending balance deposits - multiple_pending_deposits_above_churn [Preset: minimal] OK + Pending balance deposits - multiple_pending_deposits_above_churn [Preset: minimal] OK
+ Pending balance deposits - multiple_pending_deposits_below_churn [Preset: minimal] OK + Pending balance deposits - multiple_pending_deposits_below_churn [Preset: minimal] OK
+ Pending balance deposits - multiple_pending_one_skipped [Preset: minimal] OK
+ Pending balance deposits - multiple_skipped_deposits_exiting_validators [Preset: minimal] OK
+ Pending balance deposits - pending_deposit_balance_above_churn [Preset: minimal] OK + Pending balance deposits - pending_deposit_balance_above_churn [Preset: minimal] OK
+ Pending balance deposits - pending_deposit_balance_equal_churn [Preset: minimal] OK + Pending balance deposits - pending_deposit_balance_equal_churn [Preset: minimal] OK
+ Pending balance deposits - pending_deposit_min_activation_balance [Preset: minimal] OK + Pending balance deposits - pending_deposit_min_activation_balance [Preset: minimal] OK
+ Pending balance deposits - pending_deposit_preexisting_churn [Preset: minimal] OK + Pending balance deposits - pending_deposit_preexisting_churn [Preset: minimal] OK
+ Pending balance deposits - processing_deposit_of_withdrawable_validator [Preset: minimal] OK
+ Pending balance deposits - processing_deposit_of_withdrawable_validator_does_not_get_churn OK
+ Pending balance deposits - skipped_deposit_exiting_validator [Preset: minimal] OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 12/12 Fail: 0/12 Skip: 0/12
## EF - Electra - Epoch Processing - Pending consolidations [Preset: minimal] ## EF - Electra - Epoch Processing - Pending consolidations [Preset: minimal]
```diff ```diff
+ Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK + Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK
@ -2802,29 +2808,30 @@ OK: 14/14 Fail: 0/14 Skip: 0/14
+ [Valid] EF - Electra - Operations - Block Header - basic_block_header OK + [Valid] EF - Electra - Operations - Block Header - basic_block_header OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 6/6 Fail: 0/6 Skip: 0/6
## EF - Electra - Operations - Consolidation [Preset: minimal] ## EF - Electra - Operations - Consolidation Request [Preset: minimal]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_before_specified_epoch OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_in_curre OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_different_credentials OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_in_new_c OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_exited_source OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_com OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_exited_target OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_ins OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_inactive_source OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_pre OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_inactive_target OK + [Valid] EF - Electra - Operations - Consolidation Request - consolidation_balance_larger OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_no_execution_withdrawal_cred OK + [Valid] EF - Electra - Operations - Consolidation Request - consolidation_balance_throug OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_not_enough_consolidation_chu OK + [Valid] EF - Electra - Operations - Consolidation Request - consolidation_churn_limit_ba OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_source_equals_target OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_exceed_pending_con OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_source_signature OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_exited_source OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_target_signature OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_exited_target OK
+ [Valid] EF - Electra - Operations - Consolidation - basic_consolidation_in_current_conso OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_inactive_source OK
+ [Valid] EF - Electra - Operations - Consolidation - basic_consolidation_in_new_consolida OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_inactive_target OK
+ [Valid] EF - Electra - Operations - Consolidation - basic_consolidation_with_compounding OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_incorrect_source_a OK
+ [Valid] EF - Electra - Operations - Consolidation - basic_consolidation_with_insufficien OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_no_source_executio OK
+ [Valid] EF - Electra - Operations - Consolidation - basic_consolidation_with_preexisting OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_no_target_executio OK
+ [Valid] EF - Electra - Operations - Consolidation - consolidation_balance_larger_than_ch OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_not_enough_consoli OK
+ [Valid] EF - Electra - Operations - Consolidation - consolidation_balance_through_two_ch OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_equals_targ OK
+ [Valid] EF - Electra - Operations - Consolidation - consolidation_churn_limit_balance OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_unknown_source_pub OK
+ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_unknown_target_pub OK
``` ```
OK: 19/19 Fail: 0/19 Skip: 0/19 OK: 20/20 Fail: 0/20 Skip: 0/20
## EF - Electra - Operations - Deposit [Preset: minimal] ## EF - Electra - Operations - Deposit [Preset: minimal]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Deposit - invalid_bad_merkle_proof OK + [Invalid] EF - Electra - Operations - Deposit - invalid_bad_merkle_proof OK
@ -2850,55 +2857,27 @@ OK: 19/19 Fail: 0/19 Skip: 0/19
+ [Valid] EF - Electra - Operations - Deposit - top_up__zero_balance OK + [Valid] EF - Electra - Operations - Deposit - top_up__zero_balance OK
``` ```
OK: 21/21 Fail: 0/21 Skip: 0/21 OK: 21/21 Fail: 0/21 Skip: 0/21
## EF - Electra - Operations - Deposit Receipt [Preset: minimal] ## EF - Electra - Operations - Deposit Request [Preset: minimal]
```diff ```diff
+ [Valid] EF - Electra - Operations - Deposit Receipt - correct_sig_but_forked_state OK + [Valid] EF - Electra - Operations - Deposit Request - correct_sig_but_forked_state OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - effective_deposit_with_genesis_for OK + [Valid] EF - Electra - Operations - Deposit Request - effective_deposit_with_genesis_for OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_sig_new_deposit OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_sig_new_deposit OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_sig_top_up OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_sig_top_up OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_withdrawal_credentials_t OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_withdrawal_credentials_t OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - ineffective_deposit_with_previous_ OK + [Valid] EF - Electra - Operations - Deposit Request - ineffective_deposit_with_previous_ OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - key_validate_invalid_decompression OK + [Valid] EF - Electra - Operations - Deposit Request - key_validate_invalid_decompression OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - key_validate_invalid_subgroup OK + [Valid] EF - Electra - Operations - Deposit Request - key_validate_invalid_subgroup OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_eth1_withdrawal_creden OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_eth1_withdrawal_creden OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_non_versioned_withdraw OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_non_versioned_withdraw OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_over_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_over_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_under_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_under_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - success_top_up_to_withdrawn_valida OK + [Valid] EF - Electra - Operations - Deposit Request - success_top_up_to_withdrawn_valida OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__less_effective_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__less_effective_balance OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__max_effective_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__max_effective_balance OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__zero_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__zero_balance OK
``` ```
OK: 17/17 Fail: 0/17 Skip: 0/17 OK: 17/17 Fail: 0/17 Skip: 0/17
## EF - Electra - Operations - Execution Layer Withdrawal Request [Preset: minimal]
```diff
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - activation_epoc OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_partial_w OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_partial_w OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_partial_w OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_withdrawa OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_withdrawa OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_withdrawa OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - incorrect_sourc OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - incorrect_withd OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - insufficient_ef OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - no_compounding_ OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - no_excess_balan OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - on_withdrawal_r OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - pending_withdra OK
```
OK: 24/24 Fail: 0/24 Skip: 0/24
## EF - Electra - Operations - Execution Payload [Preset: minimal] ## EF - Electra - Operations - Execution Payload [Preset: minimal]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Execution Payload - invalid_bad_everything_first_pay OK + [Invalid] EF - Electra - Operations - Execution Payload - invalid_bad_everything_first_pay OK
@ -3012,6 +2991,34 @@ OK: 24/24 Fail: 0/24 Skip: 0/24
+ [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK + [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK
``` ```
OK: 20/20 Fail: 0/20 Skip: 0/20 OK: 20/20 Fail: 0/20 Skip: 0/20
## EF - Electra - Operations - Withdrawal Request [Preset: minimal]
```diff
+ [Valid] EF - Electra - Operations - Withdrawal Request - activation_epoch_less_than_shar OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_partial_withdrawal_reques OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_partial_withdrawal_reques OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_partial_withdrawal_reques OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_withdrawal_request OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_withdrawal_request_with_c OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_withdrawal_request_with_f OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - incorrect_source_address OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - incorrect_withdrawal_credential OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - insufficient_effective_balance OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - no_compounding_credentials OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - no_excess_balance OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - on_withdrawal_request_initiated OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_activation_e OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_incorrect_so OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_incorrect_wi OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_on_exit_init OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_queue_full OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - pending_withdrawals_consume_all OK
```
OK: 24/24 Fail: 0/24 Skip: 0/24
## EF - Electra - Operations - Withdrawals [Preset: minimal] ## EF - Electra - Operations - Withdrawals [Preset: minimal]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK + [Invalid] EF - Electra - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK
@ -3139,15 +3146,14 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing BlobIdentifier OK + Testing BlobIdentifier OK
+ Testing BlobSidecar OK + Testing BlobSidecar OK
+ Testing Checkpoint OK + Testing Checkpoint OK
+ Testing Consolidation OK + Testing ConsolidationRequest OK
+ Testing ContributionAndProof OK + Testing ContributionAndProof OK
+ Testing Deposit OK + Testing Deposit OK
+ Testing DepositData OK + Testing DepositData OK
+ Testing DepositMessage OK + Testing DepositMessage OK
+ Testing DepositReceipt OK + Testing DepositRequest OK
+ Testing Eth1Block OK + Testing Eth1Block OK
+ Testing Eth1Data OK + Testing Eth1Data OK
+ Testing ExecutionLayerWithdrawalRequest OK
+ Testing ExecutionPayload OK + Testing ExecutionPayload OK
+ Testing ExecutionPayloadHeader OK + Testing ExecutionPayloadHeader OK
+ Testing Fork OK + Testing Fork OK
@ -3170,7 +3176,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing SignedBLSToExecutionChange OK + Testing SignedBLSToExecutionChange OK
+ Testing SignedBeaconBlock OK + Testing SignedBeaconBlock OK
+ Testing SignedBeaconBlockHeader OK + Testing SignedBeaconBlockHeader OK
+ Testing SignedConsolidation OK
+ Testing SignedContributionAndProof OK + Testing SignedContributionAndProof OK
+ Testing SignedVoluntaryExit OK + Testing SignedVoluntaryExit OK
+ Testing SigningData OK + Testing SigningData OK
@ -3182,8 +3187,9 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing Validator OK + Testing Validator OK
+ Testing VoluntaryExit OK + Testing VoluntaryExit OK
+ Testing Withdrawal OK + Testing Withdrawal OK
+ Testing WithdrawalRequest OK
``` ```
OK: 55/55 Fail: 0/55 Skip: 0/55 OK: 54/54 Fail: 0/54 Skip: 0/54
## EF - Electra - Sanity - Blocks [Preset: minimal] ## EF - Electra - Sanity - Blocks [Preset: minimal]
```diff ```diff
+ [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK + [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK
@ -3315,6 +3321,14 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
+ EF - Electra - Transition - transition_with_voluntary_exit_right_before_fork [Preset: mini OK + EF - Electra - Transition - transition_with_voluntary_exit_right_before_fork [Preset: mini OK
``` ```
OK: 30/30 Fail: 0/30 Skip: 0/30 OK: 30/30 Fail: 0/30 Skip: 0/30
## EF - Electra - Unittests - Light client - Sync protocol [Preset: minimal]
```diff
+ process_light_client_update_finality_updated OK
+ process_light_client_update_timeout OK
+ test_process_light_client_update_at_period_boundary OK
+ test_process_light_client_update_not_timeout OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## EF - Light client - Single merkle proof [Preset: minimal] ## EF - Light client - Single merkle proof [Preset: minimal]
```diff ```diff
+ Light client - Single merkle proof - minimal/altair/light_client/single_merkle_proof/Beaco OK + Light client - Single merkle proof - minimal/altair/light_client/single_merkle_proof/Beaco OK
@ -4005,4 +4019,4 @@ OK: 185/207 Fail: 0/207 Skip: 22/207
OK: 3/3 Fail: 0/3 Skip: 0/3 OK: 3/3 Fail: 0/3 Skip: 0/3
---TOTAL--- ---TOTAL---
OK: 3256/3279 Fail: 0/3279 Skip: 23/3279 OK: 3266/3289 Fail: 0/3289 Skip: 23/3289

View File

@ -162,7 +162,7 @@ DEPOSITS_DELAY := 0
#- "--define:release" cannot be added to "config.nims" #- "--define:release" cannot be added to "config.nims"
#- disable Nim's default parallelisation because it starts too many processes for too little gain #- disable Nim's default parallelisation because it starts too many processes for too little gain
#- https://github.com/status-im/nim-libp2p#use-identify-metrics #- https://github.com/status-im/nim-libp2p#use-identify-metrics
NIM_PARAMS := -d:release --parallelBuild:1 -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku $(NIM_PARAMS) NIM_PARAMS := -d:release --parallelBuild:1 -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku,grandine $(NIM_PARAMS)
ifeq ($(USE_LIBBACKTRACE), 0) ifeq ($(USE_LIBBACKTRACE), 0)
NIM_PARAMS += -d:disable_libbacktrace NIM_PARAMS += -d:disable_libbacktrace

View File

@ -38,9 +38,11 @@ The [Quickstart](https://nimbus.guide/quick-start.html) in particular will help
The [Nimbus REST api](https://nimbus.guide/rest-api.html) is now available from: The [Nimbus REST api](https://nimbus.guide/rest-api.html) is now available from:
* http://testing.mainnet.beacon-api.nimbus.team/
* http://unstable.mainnet.beacon-api.nimbus.team/ * http://unstable.mainnet.beacon-api.nimbus.team/
* http://unstable.prater.beacon-api.nimbus.team/ * http://testing.mainnet.beacon-api.nimbus.team/
* http://unstable.sepolia.beacon-api.nimbus.team/
* http://testing.holesky.beacon-api.nimbus.team/
* http://unstable.holesky.beacon-api.nimbus.team/
Note that right now these are very much unstable testing instances. They may be unresponsive at times - so **please do not rely on them for validating**. We may also disable them at any time. Note that right now these are very much unstable testing instances. They may be unresponsive at times - so **please do not rely on them for validating**. We may also disable them at any time.

View File

@ -542,12 +542,22 @@ proc new*(T: type BeaconChainDB,
"lc_deneb_headers" "lc_deneb_headers"
else: else:
"", "",
electraHeaders:
if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
"lc_electra_headers"
else:
"",
altairCurrentBranches: "lc_altair_current_branches", altairCurrentBranches: "lc_altair_current_branches",
electraCurrentBranches:
if cfg.ELECTRA_FORK_EPOCH != FAR_FUTURE_EPOCH:
"lc_electra_current_branches"
else:
"",
altairSyncCommittees: "lc_altair_sync_committees", altairSyncCommittees: "lc_altair_sync_committees",
legacyAltairBestUpdates: "lc_altair_best_updates", legacyAltairBestUpdates: "lc_altair_best_updates",
bestUpdates: "lc_best_updates", bestUpdates: "lc_best_updates",
sealedPeriods: "lc_sealed_periods")).expectDb() sealedPeriods: "lc_sealed_periods")).expectDb()
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
var blobs : KvStoreRef var blobs : KvStoreRef
if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH: if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:

View File

@ -130,7 +130,7 @@ type
current_sync_committee*: SyncCommittee # [New in Altair] current_sync_committee*: SyncCommittee # [New in Altair]
next_sync_committee*: SyncCommittee # [New in Altair] next_sync_committee*: SyncCommittee # [New in Altair]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#beaconstate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
# Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ # Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ
# reading and writing # reading and writing
BellatrixBeaconStateNoImmutableValidators* = object BellatrixBeaconStateNoImmutableValidators* = object
@ -401,7 +401,7 @@ type
historical_summaries*: historical_summaries*:
HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT] HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT]
deposit_receipts_start_index*: uint64 # [New in Electra:EIP6110] deposit_requests_start_index*: uint64 # [New in Electra:EIP6110]
deposit_balance_to_consume*: Gwei # [New in Electra:EIP7251] deposit_balance_to_consume*: Gwei # [New in Electra:EIP7251]
exit_balance_to_consume*: Gwei # [New in Electra:EIP7251] exit_balance_to_consume*: Gwei # [New in Electra:EIP7251]
earliest_exit_epoch*: Epoch # [New in Electra:EIP7251] earliest_exit_epoch*: Epoch # [New in Electra:EIP7251]

View File

@ -26,15 +26,17 @@ logScope: topics = "lcdata"
# needs to be bundled together with other data to fulfill requests. # needs to be bundled together with other data to fulfill requests.
# Mainnet data size (all columns): # Mainnet data size (all columns):
# - Altair: ~38 KB per `SyncCommitteePeriod` (~1.0 MB per month) # - Altair: ~38 KB per `SyncCommitteePeriod` (~1.0 MB per month)
# - Capella: ~222 KB per `SyncCommitteePeriod` (~6.1 MB per month) # - Capella: ~221 KB per `SyncCommitteePeriod` (~6.0 MB per month)
# - Deneb: ~230 KB per `SyncCommitteePeriod` (~6.3 MB per month) # - Deneb: ~225 KB per `SyncCommitteePeriod` (~6.2 MB per month)
# - Electra: ~249 KB per `SyncCommitteePeriod` (~6.8 MB per month)
# #
# `lc_altair_current_branches` holds Merkle proofs needed to # `lc_xxxxx_current_branches` holds Merkle proofs needed to
# construct `LightClientBootstrap` objects. # construct `LightClientBootstrap` objects.
# SSZ because this data does not compress well, and because this data # SSZ because this data does not compress well, and because this data
# needs to be bundled together with other data to fulfill requests. # needs to be bundled together with other data to fulfill requests.
# Mainnet data size (all columns): # Mainnet data size (all columns):
# - Altair ... Deneb: ~42 KB per `SyncCommitteePeriod` (~1.1 MB per month) # - Altair ... Deneb: ~42 KB per `SyncCommitteePeriod` (~1.1 MB per month)
# - Electra: ~50 KB per `SyncCommitteePeriod` (~1.4 MB per month)
# #
# `lc_altair_sync_committees` contains a copy of finalized sync committees. # `lc_altair_sync_committees` contains a copy of finalized sync committees.
# They are initially populated from the main DAG (usually a fast state access). # They are initially populated from the main DAG (usually a fast state access).
@ -42,7 +44,7 @@ logScope: topics = "lcdata"
# SSZ because this data does not compress well, and because this data # SSZ because this data does not compress well, and because this data
# needs to be bundled together with other data to fulfill requests. # needs to be bundled together with other data to fulfill requests.
# Mainnet data size (all columns): # Mainnet data size (all columns):
# - Altair ... Deneb: ~32 KB per `SyncCommitteePeriod` (~0.9 MB per month) # - Altair ... Electra: ~24 KB per `SyncCommitteePeriod` (~0.7 MB per month)
# #
# `lc_best_updates` holds full `LightClientUpdate` objects in SSZ form. # `lc_best_updates` holds full `LightClientUpdate` objects in SSZ form.
# These objects are frequently queried in bulk, but there is only one per # These objects are frequently queried in bulk, but there is only one per
@ -56,9 +58,10 @@ logScope: topics = "lcdata"
# the fork digest, because the same storage format may be used across forks. # the fork digest, because the same storage format may be used across forks.
# SSZ storage selected due to the small size and reduced logic complexity. # SSZ storage selected due to the small size and reduced logic complexity.
# Mainnet data size (all columns): # Mainnet data size (all columns):
# - Altair: ~33 KB per `SyncCommitteePeriod` (~0.9 MB per month) # - Altair: ~25 KB per `SyncCommitteePeriod` (~0.7 MB per month)
# - Capella: ~34 KB per `SyncCommitteePeriod` (~0.9 MB per month) # - Capella: ~26 KB per `SyncCommitteePeriod` (~0.7 MB per month)
# - Deneb: ~34 KB per `SyncCommitteePeriod` (~0.9 MB per month) # - Deneb: ~26 KB per `SyncCommitteePeriod` (~0.7 MB per month)
# - Electra: ~27 KB per `SyncCommitteePeriod` (~0.7 MB per month)
# #
# `lc_sealed_periods` contains the sync committee periods for which # `lc_sealed_periods` contains the sync committee periods for which
# full light client data was imported. Data for these periods may no longer # full light client data was imported. Data for these periods may no longer
@ -66,6 +69,36 @@ logScope: topics = "lcdata"
# when restarting the program. # when restarting the program.
# Mainnet data size (all columns): # Mainnet data size (all columns):
# - All forks: 8 bytes per `SyncCommitteePeriod` (~0.0 MB per month) # - All forks: 8 bytes per `SyncCommitteePeriod` (~0.0 MB per month)
#
# Header computations:
# - Altair: 256*(112+40)/1024*28/1024
# - Capella: 256*(112+4+600+128+40)/1024*28/1024
# 600 = 32+20+32+32+256+32+8+8+8+8+4+32+32+32+32+32
# - Deneb: 256*(112+4+616+128+40)/1024*28/1024
# 616 = 32+20+32+32+256+32+8+8+8+8+4+32+32+32+32+32+8+8
# - Electra: 256*(112+4+712+128+40)/1024*28/1024
# 712 = 32+20+32+32+256+32+8+8+8+8+4+32+32+32+32+32+8+8+32+32+32
#
# Committee branch computations:
# - Altair: 256*(5*32+8)/1024*28/1024
# - Electra: 256*(6*32+8)/1024*28/1024
#
# Finality branch computations:
# - Altair: 256*(6*32+8)/1024*28/1024
# - Electra: 256*(7*32+8)/1024*28/1024
#
# Committee computations:
# - Altair: (24624+8)/1024*28/1024
# 513*48 = 24624
#
# Aggregate computations:
# - Altair: 112 = 512/8+48
#
# Update computations:
# - Altair: (112+24624+5*32+112+6*32+112+8+9)/1024*28/1024
# - Capella: (4+884+24624+5*32+4+884+6*32+112+8+9)/1024*28/1024
# - Deneb: (4+900+24624+5*32+4+900+6*32+112+8+9)/1024*28/1024
# - Electra: (4+996+24624+6*32+4+996+7*32+112+8+9)/1024*28/1024
type type
LightClientHeaderStore = object LightClientHeaderStore = object
@ -73,6 +106,11 @@ type
putStmt: SqliteStmt[(array[32, byte], int64, seq[byte]), void] putStmt: SqliteStmt[(array[32, byte], int64, seq[byte]), void]
keepFromStmt: SqliteStmt[int64, void] keepFromStmt: SqliteStmt[int64, void]
BranchFork {.pure.} = enum
None = 0,
Altair,
Electra
CurrentSyncCommitteeBranchStore = object CurrentSyncCommitteeBranchStore = object
containsStmt: SqliteStmt[int64, int64] containsStmt: SqliteStmt[int64, int64]
getStmt: SqliteStmt[int64, seq[byte]] getStmt: SqliteStmt[int64, seq[byte]]
@ -110,8 +148,8 @@ type
## Eth2Digest -> (Slot, LightClientHeader) ## Eth2Digest -> (Slot, LightClientHeader)
## Cached block headers to support longer retention than block storage. ## Cached block headers to support longer retention than block storage.
currentBranches: CurrentSyncCommitteeBranchStore currentBranches: array[BranchFork, CurrentSyncCommitteeBranchStore]
## Slot -> altair.CurrentSyncCommitteeBranch ## Slot -> CurrentSyncCommitteeBranch
## Cached data for creating future `LightClientBootstrap` instances. ## Cached data for creating future `LightClientBootstrap` instances.
## Key is the block slot of which the post state was used to get the data. ## Key is the block slot of which the post state was used to get the data.
## Data stored for all finalized epoch boundary blocks. ## Data stored for all finalized epoch boundary blocks.
@ -209,12 +247,14 @@ func putHeader*[T: ForkyLightClientHeader](
proc initCurrentBranchesStore( proc initCurrentBranchesStore(
backend: SqStoreRef, backend: SqStoreRef,
name: string): KvResult[CurrentSyncCommitteeBranchStore] = name, typeName: string): KvResult[CurrentSyncCommitteeBranchStore] =
if name == "":
return ok CurrentSyncCommitteeBranchStore()
if not backend.readOnly: if not backend.readOnly:
? backend.exec(""" ? backend.exec("""
CREATE TABLE IF NOT EXISTS `""" & name & """` ( CREATE TABLE IF NOT EXISTS `""" & name & """` (
`slot` INTEGER PRIMARY KEY, -- `Slot` (up through 2^63-1) `slot` INTEGER PRIMARY KEY, -- `Slot` (up through 2^63-1)
`branch` BLOB -- `altair.CurrentSyncCommitteeBranch` (SSZ) `branch` BLOB -- `""" & typeName & """` (SSZ)
); );
""") """)
if not ? backend.hasTable(name): if not ? backend.hasTable(name):
@ -253,40 +293,46 @@ func close(store: var CurrentSyncCommitteeBranchStore) =
store.putStmt.disposeSafe() store.putStmt.disposeSafe()
store.keepFromStmt.disposeSafe() store.keepFromStmt.disposeSafe()
func hasCurrentSyncCommitteeBranch*( template kind(x: typedesc[altair.CurrentSyncCommitteeBranch]): BranchFork =
BranchFork.Altair
template kind(x: typedesc[electra.CurrentSyncCommitteeBranch]): BranchFork =
BranchFork.Electra
func hasCurrentSyncCommitteeBranch*[T: ForkyCurrentSyncCommitteeBranch](
db: LightClientDataDB, slot: Slot): bool = db: LightClientDataDB, slot: Slot): bool =
if not slot.isSupportedBySQLite or if not slot.isSupportedBySQLite or
distinctBase(db.currentBranches.containsStmt) == nil: distinctBase(db.currentBranches[T.kind].containsStmt) == nil:
return false return false
var exists: int64 var exists: int64
for res in db.currentBranches.containsStmt.exec(slot.int64, exists): for res in db.currentBranches[T.kind].containsStmt.exec(slot.int64, exists):
res.expect("SQL query OK") res.expect("SQL query OK")
doAssert exists == 1 doAssert exists == 1
return true return true
false false
proc getCurrentSyncCommitteeBranch*( proc getCurrentSyncCommitteeBranch*[T: ForkyCurrentSyncCommitteeBranch](
db: LightClientDataDB, slot: Slot): Opt[altair.CurrentSyncCommitteeBranch] = db: LightClientDataDB, slot: Slot): Opt[T] =
if not slot.isSupportedBySQLite or if not slot.isSupportedBySQLite or
distinctBase(db.currentBranches.getStmt) == nil: distinctBase(db.currentBranches[T.kind].getStmt) == nil:
return Opt.none(altair.CurrentSyncCommitteeBranch) return Opt.none(T)
var branch: seq[byte] var branch: seq[byte]
for res in db.currentBranches.getStmt.exec(slot.int64, branch): for res in db.currentBranches[T.kind].getStmt.exec(slot.int64, branch):
res.expect("SQL query OK") res.expect("SQL query OK")
try: try:
return ok SSZ.decode(branch, altair.CurrentSyncCommitteeBranch) return ok SSZ.decode(branch, T)
except SerializationError as exc: except SerializationError as exc:
error "LC data store corrupted", store = "currentBranches", error "LC data store corrupted", store = "currentBranches", kind = T.kind,
slot, exc = exc.msg slot, exc = exc.msg
return Opt.none(altair.CurrentSyncCommitteeBranch) return Opt.none(T)
func putCurrentSyncCommitteeBranch*( func putCurrentSyncCommitteeBranch*[T: ForkyCurrentSyncCommitteeBranch](
db: LightClientDataDB, slot: Slot, db: LightClientDataDB, slot: Slot, branch: T) =
branch: altair.CurrentSyncCommitteeBranch) =
doAssert not db.backend.readOnly # All `stmt` are non-nil doAssert not db.backend.readOnly # All `stmt` are non-nil
if not slot.isSupportedBySQLite: if not slot.isSupportedBySQLite:
return return
let res = db.currentBranches.putStmt.exec((slot.int64, SSZ.encode(branch))) let res = db.currentBranches[T.kind].putStmt.exec(
(slot.int64, SSZ.encode(branch)))
res.expect("SQL query OK") res.expect("SQL query OK")
proc initSyncCommitteesStore( proc initSyncCommitteesStore(
@ -618,9 +664,11 @@ func keepPeriodsFrom*(
let res = db.syncCommittees.keepFromStmt.exec(minPeriod.int64) let res = db.syncCommittees.keepFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK") res.expect("SQL query OK")
let minSlot = min(minPeriod.start_slot, int64.high.Slot) let minSlot = min(minPeriod.start_slot, int64.high.Slot)
block: for branchFork, store in db.currentBranches:
let res = db.currentBranches.keepFromStmt.exec(minSlot.int64) if branchFork > BranchFork.None and
res.expect("SQL query OK") distinctBase(store.keepFromStmt) != nil:
let res = store.keepFromStmt.exec(minSlot.int64)
res.expect("SQL query OK")
for lcDataFork, store in db.headers: for lcDataFork, store in db.headers:
if lcDataFork > LightClientDataFork.None and if lcDataFork > LightClientDataFork.None and
distinctBase(store.keepFromStmt) != nil: distinctBase(store.keepFromStmt) != nil:
@ -631,7 +679,9 @@ type LightClientDataDBNames* = object
altairHeaders*: string altairHeaders*: string
capellaHeaders*: string capellaHeaders*: string
denebHeaders*: string denebHeaders*: string
electraHeaders*: string
altairCurrentBranches*: string altairCurrentBranches*: string
electraCurrentBranches*: string
altairSyncCommittees*: string altairSyncCommittees*: string
legacyAltairBestUpdates*: string legacyAltairBestUpdates*: string
bestUpdates*: string bestUpdates*: string
@ -640,7 +690,7 @@ type LightClientDataDBNames* = object
proc initLightClientDataDB*( proc initLightClientDataDB*(
backend: SqStoreRef, backend: SqStoreRef,
names: LightClientDataDBNames): KvResult[LightClientDataDB] = names: LightClientDataDBNames): KvResult[LightClientDataDB] =
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
let let
headers = [ headers = [
# LightClientDataFork.None # LightClientDataFork.None
@ -653,10 +703,21 @@ proc initLightClientDataDB*(
names.capellaHeaders, "capella.LightClientHeader"), names.capellaHeaders, "capella.LightClientHeader"),
# LightClientDataFork.Deneb # LightClientDataFork.Deneb
? backend.initHeadersStore( ? backend.initHeadersStore(
names.denebHeaders, "deneb.LightClientHeader") names.denebHeaders, "deneb.LightClientHeader"),
# LightClientDataFork.Electra
? backend.initHeadersStore(
names.electraHeaders, "electra.LightClientHeader"),
]
currentBranches = [
# BranchFork.None
CurrentSyncCommitteeBranchStore(),
# BranchFork.Altair
? backend.initCurrentBranchesStore(
names.altairCurrentBranches, "altair.CurrentSyncCommitteeBranch"),
# BranchFork.Electra
? backend.initCurrentBranchesStore(
names.electraCurrentBranches, "electra.CurrentSyncCommitteeBranch"),
] ]
currentBranches =
? backend.initCurrentBranchesStore(names.altairCurrentBranches)
syncCommittees = syncCommittees =
? backend.initSyncCommitteesStore(names.altairSyncCommittees) ? backend.initSyncCommitteesStore(names.altairSyncCommittees)
legacyBestUpdates = legacyBestUpdates =
@ -681,7 +742,9 @@ proc close*(db: LightClientDataDB) =
for lcDataFork in LightClientDataFork: for lcDataFork in LightClientDataFork:
if lcDataFork > LightClientDataFork.None: if lcDataFork > LightClientDataFork.None:
db.headers[lcDataFork].close() db.headers[lcDataFork].close()
db.currentBranches.close() for branchFork in BranchFork:
if branchFork > BranchFork.None:
db.currentBranches[branchFork].close()
db.syncCommittees.close() db.syncCommittees.close()
db.legacyBestUpdates.close() db.legacyBestUpdates.close()
db.bestUpdates.close() db.bestUpdates.close()

View File

@ -27,7 +27,7 @@ type
## which blocks are valid - in particular, blocks are not valid if they ## which blocks are valid - in particular, blocks are not valid if they
## come from the future as seen from the local clock. ## come from the future as seen from the local clock.
## ##
## https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/fork-choice.md#fork-choice ## https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#fork-choice
## ##
# TODO consider NTP and network-adjusted timestamps as outlined here: # TODO consider NTP and network-adjusted timestamps as outlined here:
# https://ethresear.ch/t/network-adjusted-timestamps/4187 # https://ethresear.ch/t/network-adjusted-timestamps/4187

View File

@ -67,6 +67,8 @@ type
config*: BeaconNodeConf config*: BeaconNodeConf
attachedValidators*: ref ValidatorPool attachedValidators*: ref ValidatorPool
optimisticProcessor*: OptimisticProcessor optimisticProcessor*: OptimisticProcessor
optimisticFcuFut*: Future[(PayloadExecutionStatus, Opt[BlockHash])]
.Raising([CancelledError])
lightClient*: LightClient lightClient*: LightClient
dag*: ChainDAGRef dag*: ChainDAGRef
quarantine*: ref Quarantine quarantine*: ref Quarantine
@ -104,6 +106,7 @@ type
## Number of validators that we've checked for activation ## Number of validators that we've checked for activation
processingDelay*: Opt[Duration] processingDelay*: Opt[Duration]
lastValidAttestedBlock*: Opt[BlockSlot] lastValidAttestedBlock*: Opt[BlockSlot]
shutdownEvent*: AsyncEvent
template findIt*(s: openArray, predicate: untyped): int = template findIt*(s: openArray, predicate: untyped): int =
var res = -1 var res = -1

View File

@ -38,61 +38,17 @@ proc initLightClient*(
# for broadcasting light client data as a server. # for broadcasting light client data as a server.
let let
optimisticHandler = proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock): optimisticHandler = proc(
Future[void] {.async: (raises: [CancelledError]).} = signedBlock: ForkedSignedBeaconBlock
debug "New LC optimistic block", ): Future[void] {.async: (raises: [CancelledError]).} =
opt = signedBlock.toBlockId(),
dag = node.dag.head.bid,
wallSlot = node.currentSlot
withBlck(signedBlock): withBlck(signedBlock):
when consensusFork >= ConsensusFork.Bellatrix: when consensusFork >= ConsensusFork.Bellatrix:
if forkyBlck.message.is_execution_block: if forkyBlck.message.is_execution_block:
template blckPayload(): auto = template payload(): auto = forkyBlck.message.body.execution_payload
forkyBlck.message.body.execution_payload if not payload.block_hash.isZero:
if not blckPayload.block_hash.isZero:
# engine_newPayloadV1
discard await node.elManager.newExecutionPayload( discard await node.elManager.newExecutionPayload(
forkyBlck.message) forkyBlck.message)
# Retain optimistic head for other `forkchoiceUpdated` callers.
# May temporarily block `forkchoiceUpdatedV1` calls, e.g., Geth:
# - Refuses `newPayload`: "Ignoring payload while snap syncing"
# - Refuses `fcU`: "Forkchoice requested unknown head"
# Once DAG sync catches up or as new optimistic heads are fetched
# the situation recovers
node.consensusManager[].setOptimisticHead(
forkyBlck.toBlockId(), blckPayload.block_hash)
# engine_forkchoiceUpdatedV1 or engine_forkchoiceUpdatedV2,
# depending on pre or post-Shapella
let beaconHead = node.attestationPool[].getBeaconHead(nil)
template callForkchoiceUpdated(attributes: untyped) =
discard await node.elManager.forkchoiceUpdated(
headBlockHash = blckPayload.block_hash,
safeBlockHash = beaconHead.safeExecutionBlockHash,
finalizedBlockHash = beaconHead.finalizedExecutionBlockHash,
payloadAttributes = Opt.none attributes)
case node.dag.cfg.consensusForkAtEpoch(
forkyBlck.message.slot.epoch)
of ConsensusFork.Deneb, ConsensusFork.Electra:
callForkchoiceUpdated(PayloadAttributesV3)
of ConsensusFork.Capella:
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/shanghai.md#specification-1
# Consensus layer client MUST call this method instead of
# `engine_forkchoiceUpdatedV1` under any of the following
# conditions:
# `headBlockHash` references a block which `timestamp` is
# greater or equal to the Shanghai timestamp
callForkchoiceUpdated(PayloadAttributesV2)
of ConsensusFork.Bellatrix:
callForkchoiceUpdated(PayloadAttributesV1)
of ConsensusFork.Phase0, ConsensusFork.Altair:
discard
else: discard else: discard
optimisticProcessor = initOptimisticProcessor( optimisticProcessor = initOptimisticProcessor(
getBeaconTime, optimisticHandler) getBeaconTime, optimisticHandler)
@ -104,9 +60,46 @@ proc initLightClient*(
proc onOptimisticHeader( proc onOptimisticHeader(
lightClient: LightClient, lightClient: LightClient,
optimisticHeader: ForkedLightClientHeader) = optimisticHeader: ForkedLightClientHeader) =
if node.optimisticFcuFut != nil:
return
withForkyHeader(optimisticHeader): withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon) let bid = forkyHeader.beacon.toBlockId()
logScope:
opt = bid
dag = node.dag.head.bid
wallSlot = node.currentSlot
when lcDataFork >= LightClientDataFork.Capella:
let
consensusFork = node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch)
blockHash = forkyHeader.execution.block_hash
# Retain optimistic head for other `forkchoiceUpdated` callers.
# May temporarily block `forkchoiceUpdated` calls, e.g., Geth:
# - Refuses `newPayload`: "Ignoring payload while snap syncing"
# - Refuses `fcU`: "Forkchoice requested unknown head"
# Once DAG sync catches up or as new optimistic heads are fetched
# the situation recovers
debug "New LC optimistic header"
node.consensusManager[].setOptimisticHead(bid, blockHash)
if not node.consensusManager[]
.shouldSyncOptimistically(node.currentSlot):
return
# engine_forkchoiceUpdated
let beaconHead = node.attestationPool[].getBeaconHead(nil)
withConsensusFork(consensusFork):
when lcDataForkAtConsensusFork(consensusFork) == lcDataFork:
node.optimisticFcuFut = node.elManager.forkchoiceUpdated(
headBlockHash = blockHash,
safeBlockHash = beaconHead.safeExecutionBlockHash,
finalizedBlockHash = beaconHead.finalizedExecutionBlockHash,
payloadAttributes = Opt.none consensusFork.PayloadAttributes)
node.optimisticFcuFut.addCallback do (future: pointer):
node.optimisticFcuFut = nil
else:
# The execution block hash is only available from Capella onward
info "Ignoring new LC optimistic header until Capella"
lightClient.onOptimisticHeader = onOptimisticHeader lightClient.onOptimisticHeader = onOptimisticHeader
lightClient.trustedBlockRoot = config.trustedBlockRoot lightClient.trustedBlockRoot = config.trustedBlockRoot

View File

@ -131,6 +131,10 @@ type
url*: Uri url*: Uri
provenBlockProperties*: seq[string] # empty if this is not a verifying Web3Signer provenBlockProperties*: seq[string] # empty if this is not a verifying Web3Signer
LongRangeSyncMode* {.pure.} = enum
Light = "light",
Lenient = "lenient"
BeaconNodeConf* = object BeaconNodeConf* = object
configFile* {. configFile* {.
desc: "Loads the configuration from a TOML file" desc: "Loads the configuration from a TOML file"
@ -557,6 +561,11 @@ type
desc: "Maximum number of sync committee periods to retain light client data" desc: "Maximum number of sync committee periods to retain light client data"
name: "light-client-data-max-periods" .}: Option[uint64] name: "light-client-data-max-periods" .}: Option[uint64]
longRangeSync* {.
desc: "Enable long-range syncing (genesis sync)",
defaultValue: LongRangeSyncMode.Light,
name: "long-range-sync".}: LongRangeSyncMode
inProcessValidators* {. inProcessValidators* {.
desc: "Disable the push model (the beacon node tells a signing process with the private keys of the validators what to sign and when) and load the validators in the beacon node itself" desc: "Disable the push model (the beacon node tells a signing process with the private keys of the validators what to sign and when) and load the validators in the beacon node itself"
defaultValue: true # the use of the nimbus_signing_process binary by default will be delayed until async I/O over stdin/stdout is developed for the child process. defaultValue: true # the use of the nimbus_signing_process binary by default will be delayed until async I/O over stdin/stdout is developed for the child process.

View File

@ -1058,7 +1058,7 @@ proc getBeaconHead*(
pool.dag.loadExecutionBlockHash(pool.dag.finalizedHead.blck) pool.dag.loadExecutionBlockHash(pool.dag.finalizedHead.blck)
.get(ZERO_HASH) .get(ZERO_HASH)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/fork_choice/safe-block.md#get_safe_execution_payload_hash # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/fork_choice/safe-block.md#get_safe_execution_payload_hash
safeBlockRoot = pool.forkChoice.get_safe_beacon_block_root() safeBlockRoot = pool.forkChoice.get_safe_beacon_block_root()
safeBlock = pool.dag.getBlockRef(safeBlockRoot) safeBlock = pool.dag.getBlockRef(safeBlockRoot)
safeExecutionBlockHash = safeExecutionBlockHash =

View File

@ -33,11 +33,13 @@ type
CachedLightClientData* = object CachedLightClientData* = object
## Cached data from historical non-finalized states to improve speed when ## Cached data from historical non-finalized states to improve speed when
## creating future `LightClientUpdate` and `LightClientBootstrap` instances. ## creating future `LightClientUpdate` and `LightClientBootstrap` instances.
current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch current_sync_committee_branch*:
next_sync_committee_branch*: altair.NextSyncCommitteeBranch LightClientDataFork.high.CurrentSyncCommitteeBranch
next_sync_committee_branch*:
LightClientDataFork.high.NextSyncCommitteeBranch
finalized_slot*: Slot finalized_slot*: Slot
finality_branch*: altair.FinalityBranch finality_branch*: LightClientDataFork.high.FinalityBranch
current_period_best_update*: ref ForkedLightClientUpdate current_period_best_update*: ref ForkedLightClientUpdate
latest_signature_slot*: Slot latest_signature_slot*: Slot

View File

@ -1178,7 +1178,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# should have `previous_version` set to `current_version` while # should have `previous_version` set to `current_version` while
# this doesn't happen to be the case in network that go through # this doesn't happen to be the case in network that go through
# regular hard-fork upgrades. See for example: # regular hard-fork upgrades. See for example:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing
if stateFork.current_version != configFork.current_version: if stateFork.current_version != configFork.current_version:
error "State from database does not match network, check --network parameter", error "State from database does not match network, check --network parameter",
tail = dag.tail, headRef, stateFork, configFork tail = dag.tail, headRef, stateFork, configFork
@ -1972,7 +1972,7 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
prunedHeads = hlen - dag.heads.len, prunedHeads = hlen - dag.heads.len,
dagPruneDur = Moment.now() - startTick dagPruneDur = Moment.now() - startTick
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/sync/optimistic.md#helpers # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#helpers
func is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool = func is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool =
let blck = let blck =
if bid.slot <= dag.finalizedHead.slot: if bid.slot <= dag.finalizedHead.slot:

View File

@ -22,6 +22,15 @@ template nextEpochBoundarySlot(slot: Slot): Slot =
## referring to a block at given slot. ## referring to a block at given slot.
(slot + (SLOTS_PER_EPOCH - 1)).epoch.start_slot (slot + (SLOTS_PER_EPOCH - 1)).epoch.start_slot
func hasCurrentSyncCommitteeBranch(dag: ChainDAGRef, slot: Slot): bool =
let epoch = dag.cfg.consensusForkAtEpoch(slot.epoch)
withLcDataFork(lcDataForkAtConsensusFork(epoch)):
when lcDataFork > LightClientDataFork.None:
hasCurrentSyncCommitteeBranch[lcDataFork.CurrentSyncCommitteeBranch](
dag.lcDataStore.db, slot)
else:
true
proc updateExistingState( proc updateExistingState(
dag: ChainDAGRef, state: var ForkedHashedBeaconState, bsi: BlockSlotId, dag: ChainDAGRef, state: var ForkedHashedBeaconState, bsi: BlockSlotId,
save: bool, cache: var StateCache): bool = save: bool, cache: var StateCache): bool =
@ -226,7 +235,7 @@ proc initLightClientBootstrapForPeriod(
bid = bsi.bid bid = bsi.bid
boundarySlot = bid.slot.nextEpochBoundarySlot boundarySlot = bid.slot.nextEpochBoundarySlot
if boundarySlot == nextBoundarySlot and bid.slot >= lowSlot and if boundarySlot == nextBoundarySlot and bid.slot >= lowSlot and
not dag.lcDataStore.db.hasCurrentSyncCommitteeBranch(bid.slot): not dag.hasCurrentSyncCommitteeBranch(bid.slot):
let bdata = dag.getExistingForkedBlock(bid).valueOr: let bdata = dag.getExistingForkedBlock(bid).valueOr:
dag.handleUnexpectedLightClientError(bid.slot) dag.handleUnexpectedLightClientError(bid.slot)
res.err() res.err()
@ -246,7 +255,7 @@ proc initLightClientBootstrapForPeriod(
forkyBlck.toLightClientHeader(lcDataFork)) forkyBlck.toLightClientHeader(lcDataFork))
dag.lcDataStore.db.putCurrentSyncCommitteeBranch( dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
bid.slot, forkyState.data.build_proof( bid.slot, forkyState.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_GINDEX).get) lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX).get)
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
res res
@ -393,13 +402,13 @@ proc initLightClientUpdateForPeriod(
update = ForkedLightClientUpdate.init(lcDataFork.LightClientUpdate( update = ForkedLightClientUpdate.init(lcDataFork.LightClientUpdate(
attested_header: forkyBlck.toLightClientHeader(lcDataFork), attested_header: forkyBlck.toLightClientHeader(lcDataFork),
next_sync_committee: forkyState.data.next_sync_committee, next_sync_committee: forkyState.data.next_sync_committee,
next_sync_committee_branch: next_sync_committee_branch: forkyState.data.build_proof(
forkyState.data.build_proof(altair.NEXT_SYNC_COMMITTEE_GINDEX).get, lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX).get,
finality_branch: finality_branch:
if finalizedBid.slot != FAR_FUTURE_SLOT: if finalizedBid.slot != FAR_FUTURE_SLOT:
forkyState.data.build_proof(altair.FINALIZED_ROOT_GINDEX).get forkyState.data.build_proof(lcDataFork.FINALIZED_ROOT_GINDEX).get
else: else:
default(FinalityBranch))) default(lcDataFork.FinalityBranch)))
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
do: do:
dag.handleUnexpectedLightClientError(attestedBid.slot) dag.handleUnexpectedLightClientError(attestedBid.slot)
@ -464,17 +473,21 @@ proc cacheLightClientData(
## Cache data for a given block and its post-state to speed up creating future ## Cache data for a given block and its post-state to speed up creating future
## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this ## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this
## block and state. ## block and state.
const lcDataFork = lcDataForkAtConsensusFork(typeof(state).kind)
let let
bid = blck.toBlockId() bid = blck.toBlockId()
cachedData = CachedLightClientData( cachedData = CachedLightClientData(
current_sync_committee_branch: current_sync_committee_branch: normalize_merkle_branch(
state.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_GINDEX).get, state.data.build_proof(lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX).get,
next_sync_committee_branch: LightClientDataFork.high.CURRENT_SYNC_COMMITTEE_GINDEX),
state.data.build_proof(altair.NEXT_SYNC_COMMITTEE_GINDEX).get, next_sync_committee_branch: normalize_merkle_branch(
state.data.build_proof(lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX).get,
LightClientDataFork.high.NEXT_SYNC_COMMITTEE_GINDEX),
finalized_slot: finalized_slot:
state.data.finalized_checkpoint.epoch.start_slot, state.data.finalized_checkpoint.epoch.start_slot,
finality_branch: finality_branch: normalize_merkle_branch(
state.data.build_proof(altair.FINALIZED_ROOT_GINDEX).get, state.data.build_proof(lcDataFork.FINALIZED_ROOT_GINDEX).get,
LightClientDataFork.high.FINALIZED_ROOT_GINDEX),
current_period_best_update: current_period_best_update:
current_period_best_update, current_period_best_update,
latest_signature_slot: latest_signature_slot:
@ -538,15 +551,18 @@ proc assignLightClientData(
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
forkyObject.next_sync_committee = forkyObject.next_sync_committee =
next_sync_committee.get next_sync_committee.get
forkyObject.next_sync_committee_branch = forkyObject.next_sync_committee_branch = normalize_merkle_branch(
attested_data.next_sync_committee_branch attested_data.next_sync_committee_branch,
lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX)
else: else:
doAssert next_sync_committee.isNone doAssert next_sync_committee.isNone
var finalized_slot = attested_data.finalized_slot var finalized_slot = attested_data.finalized_slot
withForkyObject(obj): withForkyObject(obj):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
if finalized_slot == forkyObject.finalized_header.beacon.slot: if finalized_slot == forkyObject.finalized_header.beacon.slot:
forkyObject.finality_branch = attested_data.finality_branch forkyObject.finality_branch = normalize_merkle_branch(
attested_data.finality_branch,
lcDataFork.FINALIZED_ROOT_GINDEX)
elif finalized_slot < max(dag.tail.slot, dag.backfill.slot): elif finalized_slot < max(dag.tail.slot, dag.backfill.slot):
forkyObject.finalized_header.reset() forkyObject.finalized_header.reset()
forkyObject.finality_branch.reset() forkyObject.finality_branch.reset()
@ -564,10 +580,14 @@ proc assignLightClientData(
attested_data.finalized_slot = finalized_slot attested_data.finalized_slot = finalized_slot
dag.lcDataStore.cache.data[attested_bid] = attested_data dag.lcDataStore.cache.data[attested_bid] = attested_data
if finalized_slot == forkyObject.finalized_header.beacon.slot: if finalized_slot == forkyObject.finalized_header.beacon.slot:
forkyObject.finality_branch = attested_data.finality_branch forkyObject.finality_branch = normalize_merkle_branch(
attested_data.finality_branch,
lcDataFork.FINALIZED_ROOT_GINDEX)
elif finalized_slot == GENESIS_SLOT: elif finalized_slot == GENESIS_SLOT:
forkyObject.finalized_header.reset() forkyObject.finalized_header.reset()
forkyObject.finality_branch = attested_data.finality_branch forkyObject.finality_branch = normalize_merkle_branch(
attested_data.finality_branch,
lcDataFork.FINALIZED_ROOT_GINDEX)
else: else:
var fin_header = dag.getExistingLightClientHeader(finalized_bid) var fin_header = dag.getExistingLightClientHeader(finalized_bid)
if fin_header.kind == LightClientDataFork.None: if fin_header.kind == LightClientDataFork.None:
@ -577,7 +597,9 @@ proc assignLightClientData(
else: else:
fin_header.migrateToDataFork(lcDataFork) fin_header.migrateToDataFork(lcDataFork)
forkyObject.finalized_header = fin_header.forky(lcDataFork) forkyObject.finalized_header = fin_header.forky(lcDataFork)
forkyObject.finality_branch = attested_data.finality_branch forkyObject.finality_branch = normalize_merkle_branch(
attested_data.finality_branch,
lcDataFork.FINALIZED_ROOT_GINDEX)
withForkyObject(obj): withForkyObject(obj):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
forkyObject.sync_aggregate = sync_aggregate forkyObject.sync_aggregate = sync_aggregate
@ -701,9 +723,11 @@ proc createLightClientBootstrap(
const lcDataFork = lcDataForkAtConsensusFork(consensusFork) const lcDataFork = lcDataForkAtConsensusFork(consensusFork)
dag.lcDataStore.db.putHeader( dag.lcDataStore.db.putHeader(
forkyBlck.toLightClientHeader(lcDataFork)) forkyBlck.toLightClientHeader(lcDataFork))
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
bid.slot, normalize_merkle_branch(
dag.getLightClientData(bid).current_sync_committee_branch,
lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX))
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
bid.slot, dag.getLightClientData(bid).current_sync_committee_branch)
ok() ok()
proc initLightClientDataCache*(dag: ChainDAGRef) = proc initLightClientDataCache*(dag: ChainDAGRef) =
@ -1014,7 +1038,7 @@ proc getLightClientBootstrap(
# Ensure `current_sync_committee_branch` is known # Ensure `current_sync_committee_branch` is known
if dag.lcDataStore.importMode == LightClientDataImportMode.OnDemand and if dag.lcDataStore.importMode == LightClientDataImportMode.OnDemand and
not dag.lcDataStore.db.hasCurrentSyncCommitteeBranch(slot): not dag.hasCurrentSyncCommitteeBranch(slot):
let let
bsi = dag.getExistingBlockIdAtSlot(slot).valueOr: bsi = dag.getExistingBlockIdAtSlot(slot).valueOr:
return default(ForkedLightClientBootstrap) return default(ForkedLightClientBootstrap)
@ -1022,13 +1046,14 @@ proc getLightClientBootstrap(
dag.withUpdatedExistingState(tmpState[], bsi) do: dag.withUpdatedExistingState(tmpState[], bsi) do:
withState(updatedState): withState(updatedState):
when consensusFork >= ConsensusFork.Altair: when consensusFork >= ConsensusFork.Altair:
const lcDataFork = lcDataForkAtConsensusFork(consensusFork)
if not dag.lcDataStore.db.hasSyncCommittee(period): if not dag.lcDataStore.db.hasSyncCommittee(period):
dag.lcDataStore.db.putSyncCommittee( dag.lcDataStore.db.putSyncCommittee(
period, forkyState.data.current_sync_committee) period, forkyState.data.current_sync_committee)
dag.lcDataStore.db.putHeader(header) dag.lcDataStore.db.putHeader(header)
dag.lcDataStore.db.putCurrentSyncCommitteeBranch( dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
slot, forkyState.data.build_proof( slot, forkyState.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_GINDEX).get) lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX).get)
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
do: return default(ForkedLightClientBootstrap) do: return default(ForkedLightClientBootstrap)
@ -1050,7 +1075,8 @@ proc getLightClientBootstrap(
debug "LC bootstrap unavailable: Sync committee not cached", period debug "LC bootstrap unavailable: Sync committee not cached", period
return default(ForkedLightClientBootstrap)), return default(ForkedLightClientBootstrap)),
current_sync_committee_branch: (block: current_sync_committee_branch: (block:
dag.lcDataStore.db.getCurrentSyncCommitteeBranch(slot).valueOr: getCurrentSyncCommitteeBranch[lcDataFork.CurrentSyncCommitteeBranch](
dag.lcDataStore.db, slot).valueOr:
debug "LC bootstrap unavailable: Committee branch not cached", slot debug "LC bootstrap unavailable: Committee branch not cached", slot
return default(ForkedLightClientBootstrap)))) return default(ForkedLightClientBootstrap))))

View File

@ -53,7 +53,7 @@ iterator get_beacon_committee*(
committees_per_slot * SLOTS_PER_EPOCH committees_per_slot * SLOTS_PER_EPOCH
): yield (index_in_committee, idx) ): yield (index_in_committee, idx)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#get_beacon_committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee*( func get_beacon_committee*(
shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex): shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex):
seq[ValidatorIndex] = seq[ValidatorIndex] =

View File

@ -217,15 +217,16 @@ func produceContribution*(
else: else:
false false
func addAggregateAux(bestVotes: var BestSyncSubcommitteeContributions, func addContribution(
contribution: SyncCommitteeContribution) = contributions: var BestSyncSubcommitteeContributions,
contribution: SyncCommitteeContribution) =
let let
currentBestTotalParticipants = currentBestTotalParticipants =
bestVotes.subnets[contribution.subcommittee_index].totalParticipants contributions.subnets[contribution.subcommittee_index].totalParticipants
newBestTotalParticipants = countOnes(contribution.aggregation_bits) newBestTotalParticipants = countOnes(contribution.aggregation_bits)
if newBestTotalParticipants > currentBestTotalParticipants: if newBestTotalParticipants > currentBestTotalParticipants:
bestVotes.subnets[contribution.subcommittee_index] = contributions.subnets[contribution.subcommittee_index] =
BestSyncSubcommitteeContribution( BestSyncSubcommitteeContribution(
totalParticipants: newBestTotalParticipants, totalParticipants: newBestTotalParticipants,
participationBits: contribution.aggregation_bits, participationBits: contribution.aggregation_bits,
@ -241,10 +242,10 @@ func isSeen*(
seenKey in pool.seenContributionByAuthor seenKey in pool.seenContributionByAuthor
func covers( func covers(
bestVotes: BestSyncSubcommitteeContributions, contributions: BestSyncSubcommitteeContributions,
contribution: SyncCommitteeContribution): bool = contribution: SyncCommitteeContribution): bool =
contribution.aggregation_bits.isSubsetOf( contribution.aggregation_bits.isSubsetOf(
bestVotes.subnets[contribution.subcommittee_index].participationBits) contributions.subnets[contribution.subcommittee_index].participationBits)
func covers*( func covers*(
pool: var SyncCommitteeMsgPool, pool: var SyncCommitteeMsgPool,
@ -271,22 +272,12 @@ proc addContribution(pool: var SyncCommitteeMsgPool,
pool.seenContributionByAuthor.incl seenKey pool.seenContributionByAuthor.incl seenKey
let target = pool.cfg.toSyncMsgTarget(bid, contribution.slot) let target = pool.cfg.toSyncMsgTarget(bid, contribution.slot)
if target notin pool.bestContributions: pool.bestContributions.withValue(target, contributions):
let totalParticipants = countOnes(contribution.aggregation_bits) contributions[].addContribution(contribution)
var initialBestContributions = BestSyncSubcommitteeContributions() do:
var contributions: BestSyncSubcommitteeContributions
initialBestContributions.subnets[contribution.subcommittee_index] = contributions.addContribution(contribution)
BestSyncSubcommitteeContribution( pool.bestContributions[target] = contributions
totalParticipants: totalParticipants,
participationBits: contribution.aggregation_bits,
signature: signature)
pool.bestContributions[target] = initialBestContributions
else:
try:
addAggregateAux(pool.bestContributions[target], contribution)
except KeyError:
raiseAssert "We have checked for the key upfront"
proc addContribution*(pool: var SyncCommitteeMsgPool, proc addContribution*(pool: var SyncCommitteeMsgPool,
scproof: SignedContributionAndProof, scproof: SignedContributionAndProof,
@ -334,11 +325,35 @@ proc produceSyncAggregateAux(
aggregate aggregate
proc produceSyncAggregate*( proc produceSyncAggregate*(
pool: SyncCommitteeMsgPool, pool: var SyncCommitteeMsgPool,
bid: BlockId, bid: BlockId,
signatureSlot: Slot): SyncAggregate = signatureSlot: Slot): SyncAggregate =
# Sync committee signs previous slot, relative to when new block is produced # Sync committee signs previous slot, relative to when new block is produced
let target = pool.cfg.toSyncMsgTarget(bid, max(signatureSlot, 1.Slot) - 1) let
slot = max(signatureSlot, 1.Slot) - 1
target = pool.cfg.toSyncMsgTarget(bid, slot)
var contribution {.noinit.}: SyncCommitteeContribution
pool.bestContributions.withValue(target, contributions):
for subcommitteeIdx in SyncSubcommitteeIndex:
if contributions.subnets[subcommitteeIdx].totalParticipants == 0 and
pool.produceContribution(slot, bid, subcommitteeIdx, contribution):
debug "Did not receive contribution, did aggregate locally",
target, subcommitteeIdx
contributions[].addContribution(contribution)
do:
var
contributions: BestSyncSubcommitteeContributions
didAggregate = false
for subcommitteeIdx in SyncSubcommitteeIndex:
if pool.produceContribution(slot, bid, subcommitteeIdx, contribution):
debug "Did not receive contribution, did aggregate locally",
target, subcommitteeIdx
contributions.addContribution(contribution)
didAggregate = true
if didAggregate:
pool.bestContributions[target] = contributions
if target in pool.bestContributions: if target in pool.bestContributions:
try: try:
produceSyncAggregateAux(pool.bestContributions[target]) produceSyncAggregateAux(pool.bestContributions[target])
@ -349,7 +364,7 @@ proc produceSyncAggregate*(
proc isEpochLeadTime*( proc isEpochLeadTime*(
pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool = pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool =
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#sync-committee-subnet-stability # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
# This ensures a uniform distribution without requiring additional state: # This ensures a uniform distribution without requiring additional state:
# (1/4) = 1/4, 4 slots out # (1/4) = 1/4, 4 slots out
# (3/4) * (1/3) = 1/4, 3 slots out # (3/4) * (1/3) = 1/4, 3 slots out

View File

@ -502,21 +502,27 @@ func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV4):
template getTransaction(tt: TypedTransaction): bellatrix.Transaction = template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
bellatrix.Transaction.init(tt.distinctBase) bellatrix.Transaction.init(tt.distinctBase)
template getDepositReceipt(dr: DepositReceiptV1): DepositReceipt = template getDepositRequest(dr: DepositRequestV1): DepositRequest =
DepositReceipt( DepositRequest(
pubkey: ValidatorPubKey(blob: dr.pubkey.distinctBase), pubkey: ValidatorPubKey(blob: dr.pubkey.distinctBase),
withdrawal_credentials: dr.withdrawalCredentials.asEth2Digest, withdrawal_credentials: dr.withdrawalCredentials.asEth2Digest,
amount: dr.amount.Gwei, amount: dr.amount.Gwei,
signature: ValidatorSig(blob: dr.signature.distinctBase), signature: ValidatorSig(blob: dr.signature.distinctBase),
index: dr.index.uint64) index: dr.index.uint64)
template getExecutionLayerWithdrawalRequest(elwr: WithdrawalRequestV1): template getWithdrawalRequest(wr: WithdrawalRequestV1): WithdrawalRequest =
ExecutionLayerWithdrawalRequest = WithdrawalRequest(
ExecutionLayerWithdrawalRequest( source_address: ExecutionAddress(data: wr.sourceAddress.distinctBase),
source_address: ExecutionAddress(data: elwr.sourceAddress.distinctBase),
validator_pubkey: ValidatorPubKey( validator_pubkey: ValidatorPubKey(
blob: elwr.validatorPublicKey.distinctBase), blob: wr.validatorPublicKey.distinctBase),
amount: elwr.amount.Gwei) amount: wr.amount.Gwei)
template getConsolidationRequest(cr: ConsolidationRequestV1):
ConsolidationRequest =
ConsolidationRequest(
source_address: ExecutionAddress(data: cr.sourceAddress.distinctBase),
source_pubkey: ValidatorPubKey(blob: cr.sourcePubkey.distinctBase),
target_pubkey: ValidatorPubKey(blob: cr.targetPubkey.distinctBase))
electra.ExecutionPayload( electra.ExecutionPayload(
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest, parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
@ -540,14 +546,17 @@ func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV4):
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)), mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64, blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64, excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64,
deposit_receipts: deposit_requests:
List[electra.DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD].init( List[electra.DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.depositRequests, it.getDepositReceipt)), mapIt(rpcExecutionPayload.depositRequests, it.getDepositRequest)),
withdrawal_requests: withdrawal_requests: List[electra.WithdrawalRequest,
List[electra.ExecutionLayerWithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD].init(
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD].init( mapIt(rpcExecutionPayload.withdrawalRequests,
mapIt(rpcExecutionPayload.withdrawalRequests, it.getWithdrawalRequest)),
it.getExecutionLayerWithdrawalRequest))) consolidation_requests: List[electra.ConsolidationRequest,
Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.consolidationRequests,
it.getConsolidationRequest)))
func asConsensusType*(payload: engine_api.GetPayloadV4Response): func asConsensusType*(payload: engine_api.GetPayloadV4Response):
electra.ExecutionPayloadForSigning = electra.ExecutionPayloadForSigning =
@ -647,20 +656,26 @@ func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction = template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
TypedTransaction(tt.distinctBase) TypedTransaction(tt.distinctBase)
template getDepositReceipt(dr: DepositReceipt): DepositReceiptV1 = template getDepositRequest(dr: DepositRequest): DepositRequestV1 =
DepositReceiptV1( DepositRequestV1(
pubkey: FixedBytes[RawPubKeySize](dr.pubkey.blob), pubkey: FixedBytes[RawPubKeySize](dr.pubkey.blob),
withdrawalCredentials: FixedBytes[32](dr.withdrawal_credentials.data), withdrawalCredentials: FixedBytes[32](dr.withdrawal_credentials.data),
amount: dr.amount.Quantity, amount: dr.amount.Quantity,
signature: FixedBytes[RawSigSize](dr.signature.blob), signature: FixedBytes[RawSigSize](dr.signature.blob),
index: dr.index.Quantity) index: dr.index.Quantity)
template getExecutionLayerWithdrawalRequest( template getWithdrawalRequest(wr: WithdrawalRequest): WithdrawalRequestV1 =
elwr: ExecutionLayerWithdrawalRequest): WithdrawalRequestV1 =
WithdrawalRequestV1( WithdrawalRequestV1(
sourceAddress: Address(elwr.source_address.data), sourceAddress: Address(wr.source_address.data),
validatorPublicKey: FixedBytes[RawPubKeySize](elwr.validator_pubkey.blob), validatorPublicKey: FixedBytes[RawPubKeySize](wr.validator_pubkey.blob),
amount: elwr.amount.Quantity) amount: wr.amount.Quantity)
template getConsolidationRequest(cr: ConsolidationRequest):
ConsolidationRequestV1 =
ConsolidationRequestV1(
sourceAddress: Address(cr.source_address.data),
sourcePubkey: FixedBytes[RawPubKeySize](cr.source_pubkey.blob),
targetPubkey: FixedBytes[RawPubKeySize](cr.target_pubkey.blob))
engine_api.ExecutionPayloadV4( engine_api.ExecutionPayloadV4(
parentHash: executionPayload.parent_hash.asBlockHash, parentHash: executionPayload.parent_hash.asBlockHash,
@ -682,10 +697,11 @@ func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
blobGasUsed: Quantity(executionPayload.blob_gas_used), blobGasUsed: Quantity(executionPayload.blob_gas_used),
excessBlobGas: Quantity(executionPayload.excess_blob_gas), excessBlobGas: Quantity(executionPayload.excess_blob_gas),
depositRequests: mapIt( depositRequests: mapIt(
executionPayload.deposit_receipts, it.getDepositReceipt), executionPayload.deposit_requests, it.getDepositRequest),
withdrawalRequests: withdrawalRequests: mapIt(
mapIt(executionPayload.withdrawal_requests, executionPayload.withdrawal_requests, it.getWithdrawalRequest),
it.getExecutionLayerWithdrawalRequest)) consolidationRequests: mapIt(
executionPayload.consolidation_requests, it.getConsolidationRequest))
func isConnected(connection: ELConnection): bool = func isConnected(connection: ELConnection): bool =
connection.web3.isSome connection.web3.isSome
@ -1531,7 +1547,6 @@ proc exchangeConfigWithSingleEL(
# https://chainid.network/ # https://chainid.network/
expectedChain = case m.eth1Network.get expectedChain = case m.eth1Network.get
of mainnet: 1.Quantity of mainnet: 1.Quantity
of goerli: 5.Quantity
of sepolia: 11155111.Quantity of sepolia: 11155111.Quantity
of holesky: 17000.Quantity of holesky: 17000.Quantity
if expectedChain != providerChain: if expectedChain != providerChain:

View File

@ -7,7 +7,7 @@
{.push raises: [].} {.push raises: [].}
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/tests/core/pyspec/eth2spec/utils/merkle_minimal.py # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
# Merkle tree helpers # Merkle tree helpers
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -113,7 +113,7 @@ proc update_justified(
self.update_justified(dag, blck, justified.epoch) self.update_justified(dag, blck, justified.epoch)
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/fork-choice.md#update_checkpoints # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#update_checkpoints
proc update_checkpoints( proc update_checkpoints(
self: var Checkpoints, dag: ChainDAGRef, self: var Checkpoints, dag: ChainDAGRef,
checkpoints: FinalityCheckpoints): FcResult[void] = checkpoints: FinalityCheckpoints): FcResult[void] =
@ -377,7 +377,7 @@ proc get_head*(self: var ForkChoice,
self.checkpoints.justified.balances, self.checkpoints.justified.balances,
self.checkpoints.proposer_boost_root) self.checkpoints.proposer_boost_root)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/fork_choice/safe-block.md#get_safe_beacon_block_root # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/fork_choice/safe-block.md#get_safe_beacon_block_root
func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest = func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest =
# Use most recent justified block as a stopgap # Use most recent justified block as a stopgap
self.checkpoints.justified.checkpoint.root self.checkpoints.justified.checkpoint.root
@ -502,8 +502,8 @@ when isMainModule:
for i in 0 ..< validator_count: for i in 0 ..< validator_count:
indices.add fakeHash(i), i indices.add fakeHash(i), i
votes.add default(VoteTracker) votes.add default(VoteTracker)
old_balances.add 0 old_balances.add 0.Gwei
new_balances.add 0 new_balances.add 0.Gwei
let err = deltas.compute_deltas( let err = deltas.compute_deltas(
indices, indices_offset = 0, votes, old_balances, new_balances indices, indices_offset = 0, votes, old_balances, new_balances

View File

@ -545,11 +545,16 @@ proc storeBlock(
# TODO run https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#blob-kzg-commitments # TODO run https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#blob-kzg-commitments
# https://github.com/ethereum/execution-apis/blob/main/src/engine/experimental/blob-extension.md#specification # https://github.com/ethereum/execution-apis/blob/main/src/engine/experimental/blob-extension.md#specification
# "This validation MUST be instantly run in all cases even during active sync process." # "This validation MUST be instantly run in all cases even during active
# sync process."
# #
# Client software MUST validate `blockHash` value as being equivalent to # Client software MUST validate `blockHash` value as being equivalent to
# `Keccak256(RLP(ExecutionBlockHeader))` # `Keccak256(RLP(ExecutionBlockHeader))`
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#specification # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#specification
#
# This should simulate an unsynced EL, which still must perform these
# checks. This means it must be able to do so without context, beyond
# whatever data the block itself contains.
when typeof(signedBlock).kind >= ConsensusFork.Bellatrix and typeof(signedBlock).kind <= ConsensusFork.Deneb: when typeof(signedBlock).kind >= ConsensusFork.Bellatrix and typeof(signedBlock).kind <= ConsensusFork.Deneb:
debugComment "electra can do this in principle" debugComment "electra can do this in principle"
template payload(): auto = signedBlock.message.body.execution_payload template payload(): auto = signedBlock.message.body.execution_payload
@ -562,8 +567,6 @@ proc storeBlock(
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
self.consensusManager.quarantine[].addUnviable(signedBlock.root) self.consensusManager.quarantine[].addUnviable(signedBlock.root)
return err((VerifierError.Invalid, ProcessingStatus.completed)) return err((VerifierError.Invalid, ProcessingStatus.completed))
else:
discard
let newPayloadTick = Moment.now() let newPayloadTick = Moment.now()
@ -838,7 +841,7 @@ proc processBlock(
# - MUST NOT optimistically import the block. # - MUST NOT optimistically import the block.
# - MUST NOT apply the block to the fork choice store. # - MUST NOT apply the block to the fork choice store.
# - MAY queue the block for later processing. # - MAY queue the block for later processing.
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/sync/optimistic.md#execution-engine-errors # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#execution-engine-errors
await sleepAsync(chronos.seconds(1)) await sleepAsync(chronos.seconds(1))
self[].enqueueBlock( self[].enqueueBlock(
entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized, entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized,

View File

@ -302,7 +302,7 @@ template validateBeaconBlockBellatrix(
# #
# `is_merge_transition_complete(state)` tests for # `is_merge_transition_complete(state)` tests for
# `state.latest_execution_payload_header != ExecutionPayloadHeader()`, while # `state.latest_execution_payload_header != ExecutionPayloadHeader()`, while
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing
# shows that `state.latest_execution_payload_header` being default or not is # shows that `state.latest_execution_payload_header` being default or not is
# exactly equivalent to whether that block's execution payload is default or # exactly equivalent to whether that block's execution payload is default or
# not, so test cached block information rather than reconstructing a state. # not, so test cached block information rather than reconstructing a state.
@ -1181,7 +1181,7 @@ proc validateAggregate*(
ok((attesting_indices, sig)) ok((attesting_indices, sig))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/p2p-interface.md#bls_to_execution_change # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#bls_to_execution_change
proc validateBlsToExecutionChange*( proc validateBlsToExecutionChange*(
pool: ValidatorChangePool, batchCrypto: ref BatchCrypto, pool: ValidatorChangePool, batchCrypto: ref BatchCrypto,
signed_address_change: SignedBLSToExecutionChange, signed_address_change: SignedBLSToExecutionChange,

View File

@ -209,47 +209,62 @@ proc tryForceUpdate(
finalizedSlot = forkyStore.finalized_header.beacon.slot, finalizedSlot = forkyStore.finalized_header.beacon.slot,
optimisticSlot = forkyStore.optimistic_header.beacon.slot optimisticSlot = forkyStore.optimistic_header.beacon.slot
proc doProcessObject(
self: var LightClientProcessor,
bootstrap: ForkedLightClientBootstrap,
wallTime: BeaconTime): Result[void, VerifierError] =
if bootstrap.kind == LightClientDataFork.None:
err(VerifierError.Invalid)
elif self.store[].kind > LightClientDataFork.None:
err(VerifierError.Duplicate)
else:
let trustedBlockRoot = self.getTrustedBlockRoot()
if trustedBlockRoot.isNone:
err(VerifierError.MissingParent)
else:
withForkyBootstrap(bootstrap):
when lcDataFork > LightClientDataFork.None:
let initRes = initialize_light_client_store(
trustedBlockRoot.get, forkyBootstrap, self.cfg)
if initRes.isErr:
err(initRes.error)
else:
self.store[] = ForkedLightClientStore.init(initRes.get)
ok()
else:
raiseAssert "Unreachable; bootstrap.kind was checked"
proc doProcessObject(
self: var LightClientProcessor,
update: SomeForkedLightClientUpdate,
wallTime: BeaconTime): Result[void, VerifierError] =
if update.kind == LightClientDataFork.None:
err(VerifierError.Invalid)
elif self.store[].kind == LightClientDataFork.None:
err(VerifierError.MissingParent)
else:
withForkyObject(update):
when lcDataFork > LightClientDataFork.None:
if lcDataFork > self.store[].kind:
info "Upgrading light client",
oldFork = self.store[].kind, newFork = lcDataFork
self.store[].migrateToDataFork(lcDataFork)
withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
let
wallSlot = wallTime.slotOrZero()
upgradedUpdate = update.migratingToDataFork(lcDataFork)
process_light_client_update(
forkyStore, upgradedUpdate.forky(lcDataFork), wallSlot,
self.cfg, self.genesis_validators_root)
else:
raiseAssert "Unreachable; self.store[].kind was checked"
proc processObject( proc processObject(
self: var LightClientProcessor, self: var LightClientProcessor,
obj: SomeForkedLightClientObject, obj: SomeForkedLightClientObject,
wallTime: BeaconTime): Result[void, VerifierError] = wallTime: BeaconTime): Result[void, VerifierError] =
let let res = self.doProcessObject(obj, wallTime)
res = withForkyObject(obj):
when lcDataFork > LightClientDataFork.None:
when forkyObject is ForkyLightClientBootstrap:
if self.store[].kind > LightClientDataFork.None:
err(VerifierError.Duplicate)
else:
let trustedBlockRoot = self.getTrustedBlockRoot()
if trustedBlockRoot.isNone:
err(VerifierError.MissingParent)
else:
let initRes = initialize_light_client_store(
trustedBlockRoot.get, forkyObject, self.cfg)
if initRes.isErr:
err(initRes.error)
else:
self.store[] = ForkedLightClientStore.init(initRes.get)
ok()
elif forkyObject is SomeForkyLightClientUpdate:
if self.store[].kind == LightClientDataFork.None:
err(VerifierError.MissingParent)
else:
if lcDataFork > self.store[].kind:
info "Upgrading light client",
oldFork = self.store[].kind, newFork = lcDataFork
self.store[].migrateToDataFork(lcDataFork)
withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
let
wallSlot = wallTime.slotOrZero()
upgradedObject = obj.migratingToDataFork(lcDataFork)
process_light_client_update(
forkyStore, upgradedObject.forky(lcDataFork), wallSlot,
self.cfg, self.genesis_validators_root)
else: raiseAssert "Unreachable"
else:
err(VerifierError.Invalid)
withForkyObject(obj): withForkyObject(obj):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:

View File

@ -20,29 +20,19 @@ export gossip_validation
logScope: logScope:
topics = "gossip_opt" topics = "gossip_opt"
const
# Maximum `blocks` to cache (not validated; deleted on new optimistic header)
maxBlocks = 16 # <= `GOSSIP_MAX_SIZE_BELLATRIX` (10 MB) each
# Minimum interval at which spam is logged
minLogInterval = chronos.seconds(5)
type type
MsgTrustedBlockProcessor* = OptimisticBlockVerifier* = proc(
proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock): Future[void] {. signedBlock: ForkedSignedBeaconBlock
async: (raises: [CancelledError]).} ): Future[void] {.async: (raises: [CancelledError]).}
OptimisticProcessor* = ref object OptimisticProcessor* = ref object
getBeaconTime: GetBeaconTimeFn getBeaconTime: GetBeaconTimeFn
optimisticVerifier: MsgTrustedBlockProcessor optimisticVerifier: OptimisticBlockVerifier
blocks: Table[Eth2Digest, ref ForkedSignedBeaconBlock]
latestOptimisticSlot: Slot
processFut: Future[void].Raising([CancelledError]) processFut: Future[void].Raising([CancelledError])
logMoment: Moment
proc initOptimisticProcessor*( proc initOptimisticProcessor*(
getBeaconTime: GetBeaconTimeFn, getBeaconTime: GetBeaconTimeFn,
optimisticVerifier: MsgTrustedBlockProcessor): OptimisticProcessor = optimisticVerifier: OptimisticBlockVerifier): OptimisticProcessor =
OptimisticProcessor( OptimisticProcessor(
getBeaconTime: getBeaconTime, getBeaconTime: getBeaconTime,
optimisticVerifier: optimisticVerifier) optimisticVerifier: optimisticVerifier)
@ -56,9 +46,6 @@ proc validateBeaconBlock(
(wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero): (wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero):
return errIgnore("BeaconBlock: slot too high") return errIgnore("BeaconBlock: slot too high")
if signed_beacon_block.message.slot <= self.latestOptimisticSlot:
return errIgnore("BeaconBlock: no significant progress")
if not signed_beacon_block.message.is_execution_block(): if not signed_beacon_block.message.is_execution_block():
return errIgnore("BeaconBlock: no execution block") return errIgnore("BeaconBlock: no execution block")
@ -93,32 +80,16 @@ proc processSignedBeaconBlock*(
debug "Dropping optimistic block", error = v.error debug "Dropping optimistic block", error = v.error
return err(v.error) return err(v.error)
# Note that validation of blocks is delayed by ~4/3 slots because we have to # Only process one block at a time (backpressure)
# wait for the sync committee to sign the correct block and for that signature trace "Optimistic block validated"
# to be included in the next block. Therefore, we skip block validation here if self.processFut == nil:
# and cache the block in memory. Because there is no validation, we have to self.processFut = self.optimisticVerifier(
# mitigate against bogus blocks, mostly by bounding the caches. Assuming that ForkedSignedBeaconBlock.init(signedBlock))
# any denial-of-service attacks eventually subside, care is taken to recover.
template logWithSpamProtection(body: untyped): untyped =
block:
let now = Moment.now()
if self.logMoment + minLogInterval <= now:
logScope: minLogInterval
body
self.logMoment = now
# Store block for later verification proc handleFinishedProcess(future: pointer) =
if not self.blocks.hasKey(signedBlock.root): self.processFut = nil
# If `blocks` is full, we got spammed with multiple blocks for a slot,
# of the optimistic header advancements have been all withheld from us. self.processFut.addCallback(handleFinishedProcess)
# Whenever the optimistic header advances, old blocks are cleared,
# so we can simply ignore additional spam blocks until that happens.
if self.blocks.len >= maxBlocks:
logWithSpamProtection:
error "`blocks` full - ignoring", maxBlocks
else:
self.blocks[signedBlock.root] =
newClone(ForkedSignedBeaconBlock.init(signedBlock))
# Block validation is delegated to the sync committee and is done with delay. # Block validation is delegated to the sync committee and is done with delay.
# If we forward invalid spam blocks, we may be disconnected + IP banned, # If we forward invalid spam blocks, we may be disconnected + IP banned,
@ -127,40 +98,4 @@ proc processSignedBeaconBlock*(
# However, we are actively contributing to other topics, so some of the # However, we are actively contributing to other topics, so some of the
# negative peer score may be offset through those different topics. # negative peer score may be offset through those different topics.
# The practical impact depends on the actually deployed scoring heuristics. # The practical impact depends on the actually deployed scoring heuristics.
trace "Optimistic block cached"
return errIgnore("Validation delegated to sync committee") return errIgnore("Validation delegated to sync committee")
proc setOptimisticHeader*(
self: OptimisticProcessor, optimisticHeader: BeaconBlockHeader) =
# If irrelevant, skip processing
if optimisticHeader.slot <= self.latestOptimisticSlot:
return
self.latestOptimisticSlot = optimisticHeader.slot
# Delete blocks that are no longer of interest
let blockRoot = optimisticHeader.hash_tree_root()
var
rootsToDelete: seq[Eth2Digest]
signedBlock: ref ForkedMsgTrustedSignedBeaconBlock
for root, blck in self.blocks:
if root == blockRoot:
signedBlock = blck.asMsgTrusted()
if blck[].slot <= optimisticHeader.slot:
rootsToDelete.add root
for root in rootsToDelete:
self.blocks.del root
# Block must be known
if signedBlock == nil:
return
# If a block is already being processed, skip (backpressure)
if self.processFut != nil:
return
self.processFut = self.optimisticVerifier(signedBlock[])
proc handleFinishedProcess(future: pointer) =
self.processFut = nil
self.processFut.addCallback(handleFinishedProcess)

View File

@ -94,7 +94,7 @@ typedef struct ETHConsensusConfig ETHConsensusConfig;
* based on the given `config.yaml` file content - If successful. * based on the given `config.yaml` file content - If successful.
* @return `NULL` - If the given `config.yaml` is malformed or incompatible. * @return `NULL` - If the given `config.yaml` is malformed or incompatible.
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/configs/README.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHConsensusConfig *ETHConsensusConfigCreateFromYaml(const char *configFileContent); ETHConsensusConfig *ETHConsensusConfigCreateFromYaml(const char *configFileContent);
@ -151,9 +151,9 @@ typedef struct ETHBeaconState ETHBeaconState;
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/configs/README.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHBeaconState *ETHBeaconStateCreateFromSsz( ETHBeaconState *ETHBeaconStateCreateFromSsz(
@ -325,8 +325,8 @@ typedef struct ETHLightClientStore ETHLightClientStore;
* *
* @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap * @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
* @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream * @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#weak-subjectivity-period * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHLightClientStore *ETHLightClientStoreCreateFromBootstrap( ETHLightClientStore *ETHLightClientStoreCreateFromBootstrap(
@ -579,7 +579,7 @@ typedef struct ETHLightClientHeader ETHLightClientHeader;
* *
* @return Latest finalized header. * @return Latest finalized header.
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader( const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader(
@ -597,8 +597,8 @@ const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader(
* *
* @return Whether or not the next sync committee is currently known. * @return Whether or not the next sync committee is currently known.
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *store); bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *store);

View File

@ -9,6 +9,7 @@
import import
std/[json, sequtils, times], std/[json, sequtils, times],
stew/saturation_arith,
eth/common/[eth_types_rlp, transaction], eth/common/[eth_types_rlp, transaction],
eth/keys, eth/keys,
eth/p2p/discoveryv5/random2, eth/p2p/discoveryv5/random2,
@ -77,7 +78,7 @@ proc ETHConsensusConfigCreateFromYaml(
## * `NULL` - If the given `config.yaml` is malformed or incompatible. ## * `NULL` - If the given `config.yaml` is malformed or incompatible.
## ##
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/configs/README.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
let cfg = RuntimeConfig.new() let cfg = RuntimeConfig.new()
try: try:
cfg[] = readRuntimeConfig($configFileContent, "config.yaml")[0] cfg[] = readRuntimeConfig($configFileContent, "config.yaml")[0]
@ -143,9 +144,9 @@ proc ETHBeaconStateCreateFromSsz(
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/configs/README.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
let let
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr: consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
return nil return nil
@ -328,8 +329,8 @@ proc ETHLightClientStoreCreateFromBootstrap(
## See: ## See:
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap ## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream ## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#weak-subjectivity-period ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
let let
mediaType = MediaType.init($mediaType) mediaType = MediaType.init($mediaType)
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr: consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
@ -754,8 +755,8 @@ func ETHLightClientStoreIsNextSyncCommitteeKnown(
## * Whether or not the next sync committee is currently known. ## * Whether or not the next sync committee is currently known.
## ##
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
store[].is_next_sync_committee_known store[].is_next_sync_committee_known
func ETHLightClientStoreGetOptimisticHeader( func ETHLightClientStoreGetOptimisticHeader(
@ -795,7 +796,7 @@ func ETHLightClientStoreGetSafetyThreshold(
## * Light client store safety threshold. ## * Light client store safety threshold.
## ##
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#get_safety_threshold ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#get_safety_threshold
store[].get_safety_threshold.cint store[].get_safety_threshold.cint
proc ETHLightClientHeaderCreateCopy( proc ETHLightClientHeaderCreateCopy(
@ -1257,8 +1258,8 @@ proc ETHExecutionBlockHeaderCreateFromJson(
logsBloom: distinctBase(data.logsBloom), logsBloom: distinctBase(data.logsBloom),
difficulty: data.difficulty, difficulty: data.difficulty,
number: distinctBase(data.number), number: distinctBase(data.number),
gasLimit: distinctBase(data.gasLimit), gasLimit: GasInt.saturate distinctBase(data.gasLimit),
gasUsed: distinctBase(data.gasUsed), gasUsed: GasInt.saturate distinctBase(data.gasUsed),
timestamp: EthTime(distinctBase(data.timestamp)), timestamp: EthTime(distinctBase(data.timestamp)),
extraData: distinctBase(data.extraData), extraData: distinctBase(data.extraData),
mixHash: data.mixHash.asEth2Digest, mixHash: data.mixHash.asEth2Digest,
@ -1613,9 +1614,9 @@ proc ETHTransactionsCreateFromJson(
chainId: distinctBase(tx.chainId).u256, chainId: distinctBase(tx.chainId).u256,
`from`: ExecutionAddress(data: fromAddress), `from`: ExecutionAddress(data: fromAddress),
nonce: tx.nonce, nonce: tx.nonce,
maxPriorityFeePerGas: tx.maxPriorityFeePerGas, maxPriorityFeePerGas: tx.maxPriorityFeePerGas.uint64,
maxFeePerGas: tx.maxFeePerGas, maxFeePerGas: tx.maxFeePerGas.uint64,
gas: tx.gasLimit, gas: tx.gasLimit.uint64,
destinationType: destinationType, destinationType: destinationType,
to: ExecutionAddress(data: toAddress), to: ExecutionAddress(data: toAddress),
value: tx.value, value: tx.value,

View File

@ -342,25 +342,26 @@ proc installMessageValidators*(
for consensusFork in ConsensusFork: for consensusFork in ConsensusFork:
withLcDataFork(lcDataForkAtConsensusFork(consensusFork)): withLcDataFork(lcDataForkAtConsensusFork(consensusFork)):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
let closureScope:
contextFork = consensusFork # Avoid capturing `Deneb` (Nim 1.6) let
digest = forkDigests[].atConsensusFork(contextFork) contextFork = consensusFork
digest = forkDigests[].atConsensusFork(contextFork)
# light_client_optimistic_update # light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_finality_update # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_finality_update
lightClient.network.addValidator( lightClient.network.addValidator(
getLightClientFinalityUpdateTopic(digest), proc ( getLightClientFinalityUpdateTopic(digest), proc (
msg: lcDataFork.LightClientFinalityUpdate msg: lcDataFork.LightClientFinalityUpdate
): ValidationResult = ): ValidationResult =
validate(msg, contextFork, processLightClientFinalityUpdate)) validate(msg, contextFork, processLightClientFinalityUpdate))
# light_client_optimistic_update # light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
lightClient.network.addValidator( lightClient.network.addValidator(
getLightClientOptimisticUpdateTopic(digest), proc ( getLightClientOptimisticUpdateTopic(digest), proc (
msg: lcDataFork.LightClientOptimisticUpdate msg: lcDataFork.LightClientOptimisticUpdate
): ValidationResult = ): ValidationResult =
validate(msg, contextFork, processLightClientOptimisticUpdate)) validate(msg, contextFork, processLightClientOptimisticUpdate))
proc updateGossipStatus*( proc updateGossipStatus*(
lightClient: LightClient, slot: Slot, dagIsBehind = default(Option[bool])) = lightClient: LightClient, slot: Slot, dagIsBehind = default(Option[bool])) =

View File

@ -78,7 +78,7 @@ proc loadBootstrapFile*(bootstrapFile: string,
proc new*(T: type Eth2DiscoveryProtocol, proc new*(T: type Eth2DiscoveryProtocol,
config: BeaconNodeConf | LightClientConf, config: BeaconNodeConf | LightClientConf,
enrIp: Option[IpAddress], enrTcpPort, enrUdpPort: Option[Port], enrIp: Opt[IpAddress], enrTcpPort, enrUdpPort: Opt[Port],
pk: PrivateKey, pk: PrivateKey,
enrFields: openArray[(string, seq[byte])], rng: ref HmacDrbgContext): enrFields: openArray[(string, seq[byte])], rng: ref HmacDrbgContext):
T = T =

View File

@ -1765,7 +1765,7 @@ proc new(T: type Eth2Node,
enrForkId: ENRForkID, discoveryForkId: ENRForkID, enrForkId: ENRForkID, discoveryForkId: ENRForkID,
forkDigests: ref ForkDigests, getBeaconTime: GetBeaconTimeFn, forkDigests: ref ForkDigests, getBeaconTime: GetBeaconTimeFn,
switch: Switch, pubsub: GossipSub, switch: Switch, pubsub: GossipSub,
ip: Option[IpAddress], tcpPort, udpPort: Option[Port], ip: Opt[IpAddress], tcpPort, udpPort: Opt[Port],
privKey: keys.PrivateKey, discovery: bool, privKey: keys.PrivateKey, discovery: bool,
directPeers: DirectPeers, directPeers: DirectPeers,
rng: ref HmacDrbgContext): T {.raises: [CatchableError].} = rng: ref HmacDrbgContext): T {.raises: [CatchableError].} =
@ -2520,7 +2520,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
node.metadata.seq_number += 1 node.metadata.seq_number += 1
node.metadata.attnets = attnets node.metadata.attnets = attnets
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#attestation-subnet-subscription
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-bitfield # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
let res = node.discovery.updateRecord({ let res = node.discovery.updateRecord({
enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets) enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets)
@ -2533,7 +2533,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
debug "Stability subnets changed; updated ENR attnets", attnets debug "Stability subnets changed; updated ENR attnets", attnets
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) = proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#sync-committee-subnet-stability # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
if node.metadata.syncnets == syncnets: if node.metadata.syncnets == syncnets:
return return

View File

@ -47,7 +47,6 @@ type
Eth1Network* = enum Eth1Network* = enum
mainnet mainnet
goerli
sepolia sepolia
holesky holesky
@ -304,7 +303,7 @@ elif const_preset == "mainnet":
useBakedInGenesis = Opt.some "mainnet") useBakedInGenesis = Opt.some "mainnet")
holeskyMetadata = loadCompileTimeNetworkMetadata( holeskyMetadata = loadCompileTimeNetworkMetadata(
vendorDir & "/holesky/custom_config_data", vendorDir & "/holesky/metadata",
Opt.some holesky, Opt.some holesky,
downloadGenesisFrom = Opt.some DownloadInfo( downloadGenesisFrom = Opt.some DownloadInfo(
url: "https://github.com/status-im/nimbus-eth2/releases/download/v23.9.1/holesky-genesis.ssz.sz", url: "https://github.com/status-im/nimbus-eth2/releases/download/v23.9.1/holesky-genesis.ssz.sz",

View File

@ -373,6 +373,21 @@ proc initFullNode(
func getFrontfillSlot(): Slot = func getFrontfillSlot(): Slot =
max(dag.frontfill.get(BlockId()).slot, dag.horizon) max(dag.frontfill.get(BlockId()).slot, dag.horizon)
proc isWithinWeakSubjectivityPeriod(): bool =
let
currentSlot = node.beaconClock.now().slotOrZero()
checkpoint = Checkpoint(
epoch: epoch(getStateField(node.dag.headState, slot)),
root: getStateField(node.dag.headState, latest_block_header).state_root)
is_within_weak_subjectivity_period(node.dag.cfg, currentSlot,
node.dag.headState, checkpoint)
proc eventWaiter(): Future[void] {.async: (raises: [CancelledError]).} =
await node.shutdownEvent.wait()
bnStatus = BeaconNodeStatus.Stopping
asyncSpawn eventWaiter()
let let
quarantine = newClone( quarantine = newClone(
Quarantine.init()) Quarantine.init())
@ -441,19 +456,29 @@ proc initFullNode(
blockProcessor, node.validatorMonitor, dag, attestationPool, blockProcessor, node.validatorMonitor, dag, attestationPool,
validatorChangePool, node.attachedValidators, syncCommitteeMsgPool, validatorChangePool, node.attachedValidators, syncCommitteeMsgPool,
lightClientPool, quarantine, blobQuarantine, rng, getBeaconTime, taskpool) lightClientPool, quarantine, blobQuarantine, rng, getBeaconTime, taskpool)
syncManagerFlags =
if node.config.longRangeSync != LongRangeSyncMode.Lenient:
{SyncManagerFlag.NoGenesisSync}
else:
{}
syncManager = newSyncManager[Peer, PeerId]( syncManager = newSyncManager[Peer, PeerId](
node.network.peerPool, node.network.peerPool,
dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS,
SyncQueueKind.Forward, getLocalHeadSlot, SyncQueueKind.Forward, getLocalHeadSlot,
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot,
getFrontfillSlot, dag.tail.slot, blockVerifier) getFrontfillSlot, isWithinWeakSubjectivityPeriod,
dag.tail.slot, blockVerifier,
shutdownEvent = node.shutdownEvent,
flags = syncManagerFlags)
backfiller = newSyncManager[Peer, PeerId]( backfiller = newSyncManager[Peer, PeerId](
node.network.peerPool, node.network.peerPool,
dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS,
SyncQueueKind.Backward, getLocalHeadSlot, SyncQueueKind.Backward, getLocalHeadSlot,
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot,
getFrontfillSlot, dag.backfill.slot, blockVerifier, getFrontfillSlot, isWithinWeakSubjectivityPeriod,
maxHeadAge = 0) dag.backfill.slot, blockVerifier, maxHeadAge = 0,
shutdownEvent = node.shutdownEvent,
flags = syncManagerFlags)
router = (ref MessageRouter)( router = (ref MessageRouter)(
processor: processor, processor: processor,
network: node.network) network: node.network)
@ -554,6 +579,27 @@ proc init*(T: type BeaconNode,
template cfg: auto = metadata.cfg template cfg: auto = metadata.cfg
template eth1Network: auto = metadata.eth1Network template eth1Network: auto = metadata.eth1Network
if not(isDir(config.databaseDir)):
# If database directory missing, we going to use genesis state to check
# for weak_subjectivity_period.
let
genesisState =
await fetchGenesisState(
metadata, config.genesisState, config.genesisStateUrl)
genesisTime = getStateField(genesisState[], genesis_time)
beaconClock = BeaconClock.init(genesisTime).valueOr:
fatal "Invalid genesis time in genesis state", genesisTime
quit 1
currentSlot = beaconClock.now().slotOrZero()
checkpoint = Checkpoint(
epoch: epoch(getStateField(genesisState[], slot)),
root: getStateField(genesisState[], latest_block_header).state_root)
if config.longRangeSync == LongRangeSyncMode.Light:
if not is_within_weak_subjectivity_period(metadata.cfg, currentSlot,
genesisState[], checkpoint):
fatal WeakSubjectivityLogMessage, current_slot = currentSlot
quit 1
try: try:
if config.numThreads < 0: if config.numThreads < 0:
fatal "The number of threads --numThreads cannot be negative." fatal "The number of threads --numThreads cannot be negative."
@ -885,6 +931,7 @@ proc init*(T: type BeaconNode,
beaconClock: beaconClock, beaconClock: beaconClock,
validatorMonitor: validatorMonitor, validatorMonitor: validatorMonitor,
stateTtlCache: stateTtlCache, stateTtlCache: stateTtlCache,
shutdownEvent: newAsyncEvent(),
dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init())) dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()))
node.initLightClient( node.initLightClient(
@ -1619,7 +1666,7 @@ func syncStatus(node: BeaconNode, wallSlot: Slot): string =
node.syncManager.syncStatus & optimisticSuffix & lightClientSuffix node.syncManager.syncStatus & optimisticSuffix & lightClientSuffix
elif node.backfiller.inProgress: elif node.backfiller.inProgress:
"backfill: " & node.backfiller.syncStatus "backfill: " & node.backfiller.syncStatus
elif optimistic_head: elif optimisticHead:
"synced/opt" "synced/opt"
else: else:
"synced" "synced"
@ -1768,7 +1815,8 @@ proc installMessageValidators(node: BeaconNode) =
node.network.addAsyncValidator( node.network.addAsyncValidator(
getAttestationTopic(digest, subnet_id), proc ( getAttestationTopic(digest, subnet_id), proc (
attestation: electra.Attestation attestation: electra.Attestation
): Future[ValidationResult] {.async: (raises: [CancelledError]).} = ): Future[ValidationResult] {.
async: (raises: [CancelledError]).} =
return toValidationResult( return toValidationResult(
await node.processor.processAttestation( await node.processor.processAttestation(
MsgSource.gossip, attestation, subnet_id, MsgSource.gossip, attestation, subnet_id,
@ -1780,7 +1828,8 @@ proc installMessageValidators(node: BeaconNode) =
node.network.addAsyncValidator( node.network.addAsyncValidator(
getAttestationTopic(digest, subnet_id), proc ( getAttestationTopic(digest, subnet_id), proc (
attestation: phase0.Attestation attestation: phase0.Attestation
): Future[ValidationResult] {.async: (raises: [CancelledError]).} = ): Future[ValidationResult] {.
async: (raises: [CancelledError]).} =
return toValidationResult( return toValidationResult(
await node.processor.processAttestation( await node.processor.processAttestation(
MsgSource.gossip, attestation, subnet_id, MsgSource.gossip, attestation, subnet_id,
@ -1860,7 +1909,7 @@ proc installMessageValidators(node: BeaconNode) =
MsgSource.gossip, msg))) MsgSource.gossip, msg)))
when consensusFork >= ConsensusFork.Capella: when consensusFork >= ConsensusFork.Capella:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/p2p-interface.md#bls_to_execution_change # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#bls_to_execution_change
node.network.addAsyncValidator( node.network.addAsyncValidator(
getBlsToExecutionChangeTopic(digest), proc ( getBlsToExecutionChangeTopic(digest), proc (
msg: SignedBLSToExecutionChange msg: SignedBLSToExecutionChange
@ -2269,9 +2318,9 @@ proc doRecord(config: BeaconNodeConf, rng: var HmacDrbgContext) {.
let record = enr.Record.init( let record = enr.Record.init(
config.seqNumber, config.seqNumber,
netKeys.seckey.asEthKey, netKeys.seckey.asEthKey,
some(config.ipExt), Opt.some(config.ipExt),
some(config.tcpPortExt), Opt.some(config.tcpPortExt),
some(config.udpPortExt), Opt.some(config.udpPortExt),
fieldPairs).expect("Record within size limits") fieldPairs).expect("Record within size limits")
echo record.toURI() echo record.toURI()

View File

@ -107,23 +107,15 @@ programMain:
else: else:
nil nil
optimisticHandler = proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock): optimisticHandler = proc(
Future[void] {.async: (raises: [CancelledError]).} = signedBlock: ForkedSignedBeaconBlock
notice "New LC optimistic block", ): Future[void] {.async: (raises: [CancelledError]).} =
opt = signedBlock.toBlockId(),
wallSlot = getBeaconTime().slotOrZero
withBlck(signedBlock): withBlck(signedBlock):
when consensusFork >= ConsensusFork.Bellatrix: when consensusFork >= ConsensusFork.Bellatrix:
if forkyBlck.message.is_execution_block: if forkyBlck.message.is_execution_block:
template payload(): auto = forkyBlck.message.body.execution_payload template payload(): auto = forkyBlck.message.body.execution_payload
if elManager != nil and not payload.block_hash.isZero: if elManager != nil and not payload.block_hash.isZero:
discard await elManager.newExecutionPayload(forkyBlck.message) discard await elManager.newExecutionPayload(forkyBlck.message)
discard await elManager.forkchoiceUpdated(
headBlockHash = payload.block_hash,
safeBlockHash = payload.block_hash, # stub value
finalizedBlockHash = ZERO_HASH,
payloadAttributes = Opt.none(consensusFork.PayloadAttributes))
else: discard else: discard
optimisticProcessor = initOptimisticProcessor( optimisticProcessor = initOptimisticProcessor(
getBeaconTime, optimisticHandler) getBeaconTime, optimisticHandler)
@ -153,26 +145,54 @@ programMain:
waitFor network.startListening() waitFor network.startListening()
waitFor network.start() waitFor network.start()
func isSynced(optimisticSlot: Slot, wallSlot: Slot): bool =
# Check whether light client has synced sufficiently close to wall slot
const maxAge = 2 * SLOTS_PER_EPOCH
optimisticSlot >= max(wallSlot, maxAge.Slot) - maxAge
proc onFinalizedHeader( proc onFinalizedHeader(
lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) = lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) =
withForkyHeader(finalizedHeader): withForkyHeader(finalizedHeader):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
info "New LC finalized header", info "New LC finalized header",
finalized_header = shortLog(forkyHeader) finalized_header = shortLog(forkyHeader)
let let
period = forkyHeader.beacon.slot.sync_committee_period period = forkyHeader.beacon.slot.sync_committee_period
syncCommittee = lightClient.finalizedSyncCommittee.expect("Init OK") syncCommittee = lightClient.finalizedSyncCommittee.expect("Init OK")
db.putSyncCommittee(period, syncCommittee) db.putSyncCommittee(period, syncCommittee)
db.putLatestFinalizedHeader(finalizedHeader) db.putLatestFinalizedHeader(finalizedHeader)
var optimisticFcuFut: Future[(PayloadExecutionStatus, Opt[BlockHash])]
.Raising([CancelledError])
proc onOptimisticHeader( proc onOptimisticHeader(
lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) = lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) =
if optimisticFcuFut != nil:
return
withForkyHeader(optimisticHeader): withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
info "New LC optimistic header", logScope: optimistic_header = shortLog(forkyHeader)
optimistic_header = shortLog(forkyHeader) when lcDataFork >= LightClientDataFork.Capella:
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon) let
bid = forkyHeader.beacon.toBlockId()
consensusFork = cfg.consensusForkAtEpoch(bid.slot.epoch)
blockHash = forkyHeader.execution.block_hash
info "New LC optimistic header"
if elManager == nil or blockHash.isZero or
not isSynced(bid.slot, getBeaconTime().slotOrZero()):
return
withConsensusFork(consensusFork):
when lcDataForkAtConsensusFork(consensusFork) == lcDataFork:
optimisticFcuFut = elManager.forkchoiceUpdated(
headBlockHash = blockHash,
safeBlockHash = blockHash, # stub value
finalizedBlockHash = ZERO_HASH,
payloadAttributes = Opt.none(consensusFork.PayloadAttributes))
optimisticFcuFut.addCallback do (future: pointer):
optimisticFcuFut = nil
else:
info "Ignoring new LC optimistic header until Capella"
lightClient.onFinalizedHeader = onFinalizedHeader lightClient.onFinalizedHeader = onFinalizedHeader
lightClient.onOptimisticHeader = onOptimisticHeader lightClient.onOptimisticHeader = onOptimisticHeader
@ -204,9 +224,7 @@ programMain:
let optimisticHeader = lightClient.optimisticHeader let optimisticHeader = lightClient.optimisticHeader
withForkyHeader(optimisticHeader): withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
# Check whether light client has synced sufficiently close to wall slot isSynced(forkyHeader.beacon.slot, wallSlot)
const maxAge = 2 * SLOTS_PER_EPOCH
forkyHeader.beacon.slot >= max(wallSlot, maxAge.Slot) - maxAge
else: else:
false false

View File

@ -90,7 +90,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_VOLUNTARY_EXITS: MAX_VOLUNTARY_EXITS:
Base10.toString(MAX_VOLUNTARY_EXITS), Base10.toString(MAX_VOLUNTARY_EXITS),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/altair.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/altair.yaml
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: INACTIVITY_PENALTY_QUOTIENT_ALTAIR:
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR), Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR),
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR:
@ -106,7 +106,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
UPDATE_TIMEOUT: UPDATE_TIMEOUT:
Base10.toString(UPDATE_TIMEOUT), Base10.toString(UPDATE_TIMEOUT),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/bellatrix.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/bellatrix.yaml
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: INACTIVITY_PENALTY_QUOTIENT_BELLATRIX:
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX), Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX),
MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX:
@ -122,7 +122,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_EXTRA_DATA_BYTES: MAX_EXTRA_DATA_BYTES:
Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)), Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/capella.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/capella.yaml
MAX_BLS_TO_EXECUTION_CHANGES: MAX_BLS_TO_EXECUTION_CHANGES:
Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)), Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)),
MAX_WITHDRAWALS_PER_PAYLOAD: MAX_WITHDRAWALS_PER_PAYLOAD:
@ -130,7 +130,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP:
Base10.toString(uint64(MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)), Base10.toString(uint64(MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/deneb.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/deneb.yaml
FIELD_ELEMENTS_PER_BLOB: FIELD_ELEMENTS_PER_BLOB:
Base10.toString(deneb_preset.FIELD_ELEMENTS_PER_BLOB), Base10.toString(deneb_preset.FIELD_ELEMENTS_PER_BLOB),
MAX_BLOB_COMMITMENTS_PER_BLOCK: MAX_BLOB_COMMITMENTS_PER_BLOCK:

View File

@ -43,7 +43,7 @@ const
GENESIS_SLOT* = Slot(0) GENESIS_SLOT* = Slot(0)
GENESIS_EPOCH* = Epoch(0) # compute_epoch_at_slot(GENESIS_SLOT) GENESIS_EPOCH* = Epoch(0) # compute_epoch_at_slot(GENESIS_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/fork-choice.md#constant # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#constant
INTERVALS_PER_SLOT* = 3 INTERVALS_PER_SLOT* = 3
FAR_FUTURE_BEACON_TIME* = BeaconTime(ns_since_genesis: int64.high()) FAR_FUTURE_BEACON_TIME* = BeaconTime(ns_since_genesis: int64.high())
@ -139,16 +139,16 @@ const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate
aggregateSlotOffset* = TimeDiff(nanoseconds: aggregateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#prepare-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds: syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#broadcast-sync-committee-contribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution
syncContributionSlotOffset* = TimeDiff(nanoseconds: syncContributionSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/p2p-interface.md#sync-committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee
lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds: lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/p2p-interface.md#sync-committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee
lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds: lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
@ -188,7 +188,7 @@ func epoch*(slot: Slot): Epoch = # aka compute_epoch_at_slot
if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH
else: Epoch(slot div SLOTS_PER_EPOCH) else: Epoch(slot div SLOTS_PER_EPOCH)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/fork-choice.md#compute_slots_since_epoch_start # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#compute_slots_since_epoch_start
func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start
## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`) ## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`)
(slot mod SLOTS_PER_EPOCH) (slot mod SLOTS_PER_EPOCH)
@ -196,7 +196,7 @@ func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_st
template is_epoch*(slot: Slot): bool = template is_epoch*(slot: Slot): bool =
slot.since_epoch_start == 0 slot.since_epoch_start == 0
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
func start_slot*(epoch: Epoch): Slot = # aka compute_start_slot_at_epoch func start_slot*(epoch: Epoch): Slot = # aka compute_start_slot_at_epoch
## Return the start slot of ``epoch``. ## Return the start slot of ``epoch``.
const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH) const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH)
@ -216,7 +216,7 @@ iterator slots*(epoch: Epoch): Slot =
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH: for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
yield slot yield slot
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#sync-committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee
template sync_committee_period*(epoch: Epoch): SyncCommitteePeriod = template sync_committee_period*(epoch: Epoch): SyncCommitteePeriod =
if epoch == FAR_FUTURE_EPOCH: FAR_FUTURE_PERIOD if epoch == FAR_FUTURE_EPOCH: FAR_FUTURE_PERIOD
else: SyncCommitteePeriod(epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD) else: SyncCommitteePeriod(epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD)

View File

@ -67,7 +67,7 @@ func get_validator_from_deposit*(
effective_balance: effective_balance effective_balance: effective_balance
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-get_validator_from_deposit # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-get_validator_from_deposit
func get_validator_from_deposit*( func get_validator_from_deposit*(
state: electra.BeaconState, deposit: DepositData): Validator = state: electra.BeaconState, deposit: DepositData): Validator =
Validator( Validator(
@ -86,7 +86,7 @@ func compute_activation_exit_epoch*(epoch: Epoch): Epoch =
## ``epoch`` take effect. ## ``epoch`` take effect.
epoch + 1 + MAX_SEED_LOOKAHEAD epoch + 1 + MAX_SEED_LOOKAHEAD
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#get_validator_churn_limit # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_validator_churn_limit
func get_validator_churn_limit*( func get_validator_churn_limit*(
cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache): cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache):
uint64 = uint64 =
@ -301,7 +301,7 @@ from ./datatypes/deneb import BeaconState
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-slash_validator
func get_slashing_penalty*( func get_slashing_penalty*(
state: ForkyBeaconState, validator_effective_balance: Gwei): Gwei = state: ForkyBeaconState, validator_effective_balance: Gwei): Gwei =
@ -319,7 +319,7 @@ func get_slashing_penalty*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_whistleblower_reward*( func get_whistleblower_reward*(
state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState | state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState |
capella.BeaconState | deneb.BeaconState, capella.BeaconState | deneb.BeaconState,
@ -333,7 +333,7 @@ func get_whistleblower_reward*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): Gwei = func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): Gwei =
when state is phase0.BeaconState: when state is phase0.BeaconState:
whistleblower_reward div PROPOSER_REWARD_QUOTIENT whistleblower_reward div PROPOSER_REWARD_QUOTIENT
@ -346,7 +346,7 @@ func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): G
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
proc slash_validator*( proc slash_validator*(
cfg: RuntimeConfig, state: var ForkyBeaconState, cfg: RuntimeConfig, state: var ForkyBeaconState,
slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo, slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo,
@ -419,7 +419,7 @@ func get_initial_beacon_block*(state: altair.HashedBeaconState):
altair.TrustedSignedBeaconBlock( altair.TrustedSignedBeaconBlock(
message: message, root: hash_tree_root(message)) message: message, root: hash_tree_root(message))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing
func get_initial_beacon_block*(state: bellatrix.HashedBeaconState): func get_initial_beacon_block*(state: bellatrix.HashedBeaconState):
bellatrix.TrustedSignedBeaconBlock = bellatrix.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted # The genesis block is implicitly trusted
@ -431,7 +431,7 @@ func get_initial_beacon_block*(state: bellatrix.HashedBeaconState):
bellatrix.TrustedSignedBeaconBlock( bellatrix.TrustedSignedBeaconBlock(
message: message, root: hash_tree_root(message)) message: message, root: hash_tree_root(message))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#testing
func get_initial_beacon_block*(state: capella.HashedBeaconState): func get_initial_beacon_block*(state: capella.HashedBeaconState):
capella.TrustedSignedBeaconBlock = capella.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted # The genesis block is implicitly trusted
@ -589,7 +589,7 @@ iterator get_attesting_indices_iter*(state: ForkyBeaconState,
if bits[index_in_committee]: if bits[index_in_committee]:
yield validator_index yield validator_index
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#modified-get_attesting_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-get_attesting_indices
iterator get_attesting_indices_iter*( iterator get_attesting_indices_iter*(
state: electra.BeaconState, state: electra.BeaconState,
data: AttestationData, data: AttestationData,
@ -617,7 +617,7 @@ func get_attesting_indices*(
toSeq(get_attesting_indices_iter(state, data, aggregation_bits, cache)) toSeq(get_attesting_indices_iter(state, data, aggregation_bits, cache))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#get_attesting_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_attesting_indices
func get_attesting_indices*( func get_attesting_indices*(
state: ForkyBeaconState, data: AttestationData, state: ForkyBeaconState, data: AttestationData,
aggregation_bits: ElectraCommitteeValidatorsBits, committee_bits: auto, aggregation_bits: ElectraCommitteeValidatorsBits, committee_bits: auto,
@ -734,7 +734,7 @@ func check_attestation_target_epoch(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-process_attestation # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-process_attestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#modified-process_attestation # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/beacon-chain.md#modified-process_attestation
func check_attestation_inclusion( func check_attestation_inclusion(
consensusFork: static ConsensusFork, attestation_slot: Slot, consensusFork: static ConsensusFork, attestation_slot: Slot,
current_slot: Slot): Result[void, cstring] = current_slot: Slot): Result[void, cstring] =
@ -763,7 +763,7 @@ func check_attestation_index(
Result[CommitteeIndex, cstring] = Result[CommitteeIndex, cstring] =
check_attestation_index(data.index, committees_per_slot) check_attestation_index(data.index, committees_per_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices
func get_attestation_participation_flag_indices( func get_attestation_participation_flag_indices(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState, state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState,
data: AttestationData, inclusion_delay: uint64): set[TimelyFlag] = data: AttestationData, inclusion_delay: uint64): set[TimelyFlag] =
@ -1122,6 +1122,7 @@ proc process_attestation*(
ok(proposer_reward) ok(proposer_reward)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee_indices # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee_indices
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-get_next_sync_committee_indices
func get_next_sync_committee_keys( func get_next_sync_committee_keys(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState | electra.BeaconState): deneb.BeaconState | electra.BeaconState):
@ -1153,42 +1154,29 @@ func get_next_sync_committee_keys(
candidate_index = active_validator_indices[shuffled_index] candidate_index = active_validator_indices[shuffled_index]
random_byte = eth2digest(hash_buffer).data[i mod 32] random_byte = eth2digest(hash_buffer).data[i mod 32]
effective_balance = state.validators[candidate_index].effective_balance effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= const meb =
MAX_EFFECTIVE_BALANCE.Gwei * random_byte: when typeof(state).kind >= ConsensusFork.Electra:
MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei # [Modified in Electra:EIP7251]
else:
MAX_EFFECTIVE_BALANCE.Gwei
if effective_balance * MAX_RANDOM_BYTE >= meb * random_byte:
res[index] = state.validators[candidate_index].pubkey res[index] = state.validators[candidate_index].pubkey
inc index inc index
i += 1'u64 i += 1'u64
res res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential
func has_eth1_withdrawal_credential*(validator: Validator): bool = func has_eth1_withdrawal_credential*(validator: Validator): bool =
## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential. ## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential.
validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#is_fully_withdrawable_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-is_compounding_withdrawal_credential
func is_fully_withdrawable_validator(
validator: Validator, balance: Gwei, epoch: Epoch): bool =
## Check if ``validator`` is fully withdrawable.
has_eth1_withdrawal_credential(validator) and
validator.withdrawable_epoch <= epoch and balance > 0.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#is_partially_withdrawable_validator
func is_partially_withdrawable_validator(
validator: Validator, balance: Gwei): bool =
## Check if ``validator`` is partially withdrawable.
let
has_max_effective_balance =
validator.effective_balance == MAX_EFFECTIVE_BALANCE.Gwei
has_excess_balance = balance > MAX_EFFECTIVE_BALANCE.Gwei
has_eth1_withdrawal_credential(validator) and
has_max_effective_balance and has_excess_balance
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-is_compounding_withdrawal_credential
func is_compounding_withdrawal_credential*( func is_compounding_withdrawal_credential*(
withdrawal_credentials: Eth2Digest): bool = withdrawal_credentials: Eth2Digest): bool =
withdrawal_credentials.data[0] == COMPOUNDING_WITHDRAWAL_PREFIX withdrawal_credentials.data[0] == COMPOUNDING_WITHDRAWAL_PREFIX
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-has_compounding_withdrawal_credential # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-has_compounding_withdrawal_credential
func has_compounding_withdrawal_credential*(validator: Validator): bool = func has_compounding_withdrawal_credential*(validator: Validator): bool =
## Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal ## Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal
## credential. ## credential.
@ -1200,6 +1188,43 @@ func has_execution_withdrawal_credential*(validator: Validator): bool =
has_compounding_withdrawal_credential(validator) or has_compounding_withdrawal_credential(validator) or
has_eth1_withdrawal_credential(validator) has_eth1_withdrawal_credential(validator)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#is_fully_withdrawable_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_fully_withdrawable_validator
func is_fully_withdrawable_validator(
fork: static ConsensusFork, validator: Validator, balance: Gwei,
epoch: Epoch): bool =
## Check if ``validator`` is fully withdrawable.
when fork >= ConsensusFork.Electra:
# [Modified in Electra:EIP7251]
has_execution_withdrawal_credential(validator) and
validator.withdrawable_epoch <= epoch and balance > 0.Gwei
else:
has_eth1_withdrawal_credential(validator) and
validator.withdrawable_epoch <= epoch and balance > 0.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#is_partially_withdrawable_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_partially_withdrawable_validator
func is_partially_withdrawable_validator(
fork: static ConsensusFork, validator: Validator, balance: Gwei): bool =
## Check if ``validator`` is partially withdrawable.
when fork >= ConsensusFork.Electra:
# [Modified in Electra:EIP7251]
let
max_effective_balance = get_validator_max_effective_balance(validator)
has_max_effective_balance =
validator.effective_balance == max_effective_balance
has_excess_balance =
balance > max_effective_balance # [Modified in Electra:EIP7251]
has_execution_withdrawal_credential(validator) and
has_max_effective_balance and has_excess_balance
else:
let
has_max_effective_balance =
validator.effective_balance == static(MAX_EFFECTIVE_BALANCE.Gwei)
has_excess_balance = balance > static(MAX_EFFECTIVE_BALANCE.Gwei)
has_eth1_withdrawal_credential(validator) and
has_max_effective_balance and has_excess_balance
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#get_validator_max_effective_balance # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#get_validator_max_effective_balance
func get_validator_max_effective_balance(validator: Validator): Gwei = func get_validator_max_effective_balance(validator: Validator): Gwei =
## Get max effective balance for ``validator``. ## Get max effective balance for ``validator``.
@ -1237,7 +1262,7 @@ func switch_to_compounding_validator*(
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-get_pending_balance_to_withdraw # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-get_pending_balance_to_withdraw
func get_pending_balance_to_withdraw*( func get_pending_balance_to_withdraw*(
state: Electra.BeaconState, validator_index: ValidatorIndex): Gwei = state: electra.BeaconState, validator_index: ValidatorIndex): Gwei =
var pending_balance: Gwei var pending_balance: Gwei
for withdrawal in state.pending_partial_withdrawals: for withdrawal in state.pending_partial_withdrawals:
if withdrawal.index == validator_index: if withdrawal.index == validator_index:
@ -1247,21 +1272,21 @@ func get_pending_balance_to_withdraw*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#new-get_expected_withdrawals # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#new-get_expected_withdrawals
func get_expected_withdrawals*( func get_expected_withdrawals*(
state: capella.BeaconState | deneb.BeaconState | electra.BeaconState): state: capella.BeaconState | deneb.BeaconState): seq[Withdrawal] =
seq[Withdrawal] =
let let
epoch = get_current_epoch(state) epoch = get_current_epoch(state)
num_validators = lenu64(state.validators) num_validators = lenu64(state.validators)
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
var var
withdrawal_index = state.next_withdrawal_index withdrawal_index = state.next_withdrawal_index
validator_index = state.next_withdrawal_validator_index validator_index = state.next_withdrawal_validator_index
withdrawals: seq[Withdrawal] = @[] withdrawals: seq[Withdrawal] = @[]
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
for _ in 0 ..< bound: for _ in 0 ..< bound:
let let
validator = state.validators[validator_index] validator = state.validators[validator_index]
balance = state.balances[validator_index] balance = state.balances[validator_index]
if is_fully_withdrawable_validator(validator, balance, epoch): if is_fully_withdrawable_validator(
typeof(state).kind, validator, balance, epoch):
var w = Withdrawal( var w = Withdrawal(
index: withdrawal_index, index: withdrawal_index,
validator_index: validator_index, validator_index: validator_index,
@ -1269,7 +1294,8 @@ func get_expected_withdrawals*(
w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1] w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1]
withdrawals.add w withdrawals.add w
withdrawal_index = WithdrawalIndex(withdrawal_index + 1) withdrawal_index = WithdrawalIndex(withdrawal_index + 1)
elif is_partially_withdrawable_validator(validator, balance): elif is_partially_withdrawable_validator(
typeof(state).kind, validator, balance):
var w = Withdrawal( var w = Withdrawal(
index: withdrawal_index, index: withdrawal_index,
validator_index: validator_index, validator_index: validator_index,
@ -1282,6 +1308,82 @@ func get_expected_withdrawals*(
validator_index = (validator_index + 1) mod num_validators validator_index = (validator_index + 1) mod num_validators
withdrawals withdrawals
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-get_expected_withdrawals
# This partials count is used in exactly one place, while in general being able
# to cleanly treat the results of get_expected_withdrawals as a seq[Withdrawal]
# are valuable enough to make that the default version of this spec function.
func get_expected_withdrawals_with_partial_count*(state: electra.BeaconState):
(seq[Withdrawal], uint64) =
let epoch = get_current_epoch(state)
var
withdrawal_index = state.next_withdrawal_index
withdrawals: seq[Withdrawal] = @[]
# [New in Electra:EIP7251] Consume pending partial withdrawals
for withdrawal in state.pending_partial_withdrawals:
if withdrawal.withdrawable_epoch > epoch or
len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP:
break
let
validator = state.validators[withdrawal.index]
has_sufficient_effective_balance =
validator.effective_balance >= static(MIN_ACTIVATION_BALANCE.Gwei)
has_excess_balance =
state.balances[withdrawal.index] > static(MIN_ACTIVATION_BALANCE.Gwei)
if validator.exit_epoch == FAR_FUTURE_EPOCH and
has_sufficient_effective_balance and has_excess_balance:
let withdrawable_balance = min(
state.balances[withdrawal.index] - static(MIN_ACTIVATION_BALANCE.Gwei),
withdrawal.amount)
var w = Withdrawal(
index: withdrawal_index,
validator_index: withdrawal.index,
amount: withdrawable_balance)
w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1]
withdrawals.add w
withdrawal_index += 1
let partial_withdrawals_count = lenu64(withdrawals)
let
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
num_validators = lenu64(state.validators)
var validator_index = state.next_withdrawal_validator_index
# Sweep for remaining.
for _ in 0 ..< bound:
let
validator = state.validators[validator_index]
balance = state.balances[validator_index]
if is_fully_withdrawable_validator(
typeof(state).kind, validator, balance, epoch):
var w = Withdrawal(
index: withdrawal_index,
validator_index: validator_index,
amount: balance)
w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1]
withdrawals.add w
withdrawal_index = WithdrawalIndex(withdrawal_index + 1)
elif is_partially_withdrawable_validator(
typeof(state).kind, validator, balance):
var w = Withdrawal(
index: withdrawal_index,
validator_index: validator_index,
# [Modified in Electra:EIP7251]
amount: balance - get_validator_max_effective_balance(validator))
w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1]
withdrawals.add w
withdrawal_index = WithdrawalIndex(withdrawal_index + 1)
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
break
validator_index = (validator_index + 1) mod num_validators
(withdrawals, partial_withdrawals_count)
func get_expected_withdrawals*(state: electra.BeaconState): seq[Withdrawal] =
get_expected_withdrawals_with_partial_count(state)[0]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee
func get_next_sync_committee*( func get_next_sync_committee*(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
@ -1404,8 +1506,8 @@ proc initialize_hashed_beacon_state_from_eth1*(
cfg, eth1_block_hash, eth1_timestamp, deposits, flags)) cfg, eth1_block_hash, eth1_timestamp, deposits, flags))
result.root = hash_tree_root(result.data) result.root = hash_tree_root(result.data)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#testing
proc initialize_beacon_state_from_eth1*( proc initialize_beacon_state_from_eth1*(
cfg: RuntimeConfig, cfg: RuntimeConfig,
@ -1757,7 +1859,7 @@ func upgrade_to_capella*(cfg: RuntimeConfig, pre: bellatrix.BeaconState):
# historical_summaries initialized to correct default automatically # historical_summaries initialized to correct default automatically
) )
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/deneb/fork.md#upgrading-the-state # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/fork.md#upgrading-the-state
func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState): func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState):
ref deneb.BeaconState = ref deneb.BeaconState =
let let
@ -1842,7 +1944,7 @@ func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState):
historical_summaries: pre.historical_summaries historical_summaries: pre.historical_summaries
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/fork.md#upgrading-the-state # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/fork.md#upgrading-the-state
func upgrade_to_electra*( func upgrade_to_electra*(
cfg: RuntimeConfig, pre: deneb.BeaconState, cache: var StateCache): cfg: RuntimeConfig, pre: deneb.BeaconState, cache: var StateCache):
ref electra.BeaconState = ref electra.BeaconState =
@ -1866,8 +1968,9 @@ func upgrade_to_electra*(
withdrawals_root: pre.latest_execution_payload_header.withdrawals_root, withdrawals_root: pre.latest_execution_payload_header.withdrawals_root,
blob_gas_used: 0, blob_gas_used: 0,
excess_blob_gas: 0, excess_blob_gas: 0,
deposit_receipts_root: ZERO_HASH, # [New in Electra:EIP6110] deposit_requests_root: ZERO_HASH, # [New in Electra:EIP6110]
withdrawal_requests_root: ZERO_HASH, # [New in ELectra:EIP7002] withdrawal_requests_root: ZERO_HASH, # [New in Electra:EIP7002],
consolidation_requests_root: ZERO_HASH # [New in Electra:EIP7251]
) )
var max_exit_epoch = FAR_FUTURE_EPOCH var max_exit_epoch = FAR_FUTURE_EPOCH
@ -1942,7 +2045,7 @@ func upgrade_to_electra*(
historical_summaries: pre.historical_summaries, historical_summaries: pre.historical_summaries,
# [New in Electra:EIP6110] # [New in Electra:EIP6110]
deposit_receipts_start_index: UNSET_DEPOSIT_RECEIPTS_START_INDEX, deposit_requests_start_index: UNSET_DEPOSIT_REQUESTS_START_INDEX,
# [New in Electra:EIP7251] # [New in Electra:EIP7251]
deposit_balance_to_consume: 0.Gwei, deposit_balance_to_consume: 0.Gwei,

View File

@ -51,7 +51,7 @@ const
PARTICIPATION_FLAG_WEIGHTS*: array[TimelyFlag, uint64] = PARTICIPATION_FLAG_WEIGHTS*: array[TimelyFlag, uint64] =
[uint64 TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT] [uint64 TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#misc # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#misc
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16 TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16
SYNC_COMMITTEE_SUBNET_COUNT* = 4 SYNC_COMMITTEE_SUBNET_COUNT* = 4
@ -60,7 +60,7 @@ const
# The first member (`genesis_time`) is 32, subsequent members +1 each. # The first member (`genesis_time`) is 32, subsequent members +1 each.
# If there are ever more than 32 members in `BeaconState`, indices change! # If there are ever more than 32 members in `BeaconState`, indices change!
# `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `52 * 2 + 1`. # `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `52 * 2 + 1`.
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/ssz/merkle-proofs.md # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
FINALIZED_ROOT_GINDEX* = 105.GeneralizedIndex # finalized_checkpoint > root FINALIZED_ROOT_GINDEX* = 105.GeneralizedIndex # finalized_checkpoint > root
CURRENT_SYNC_COMMITTEE_GINDEX* = 54.GeneralizedIndex # current_sync_committee CURRENT_SYNC_COMMITTEE_GINDEX* = 54.GeneralizedIndex # current_sync_committee
NEXT_SYNC_COMMITTEE_GINDEX* = 55.GeneralizedIndex # next_sync_committee NEXT_SYNC_COMMITTEE_GINDEX* = 55.GeneralizedIndex # next_sync_committee
@ -75,7 +75,7 @@ static: doAssert TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT +
type type
### New types ### New types
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#synccommitteemessage # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#synccommitteemessage
SyncCommitteeMessage* = object SyncCommitteeMessage* = object
slot*: Slot slot*: Slot
## Slot to which this contribution pertains ## Slot to which this contribution pertains
@ -89,7 +89,7 @@ type
signature*: ValidatorSig signature*: ValidatorSig
## Signature by the validator over the block root of `slot` ## Signature by the validator over the block root of `slot`
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#synccommitteecontribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#synccommitteecontribution
SyncCommitteeAggregationBits* = SyncCommitteeAggregationBits* =
BitArray[SYNC_SUBCOMMITTEE_SIZE] BitArray[SYNC_SUBCOMMITTEE_SIZE]
@ -111,18 +111,18 @@ type
signature*: ValidatorSig signature*: ValidatorSig
## Signature by the validator(s) over the block root of `slot` ## Signature by the validator(s) over the block root of `slot`
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#contributionandproof # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#contributionandproof
ContributionAndProof* = object ContributionAndProof* = object
aggregator_index*: uint64 # `ValidatorIndex` after validation aggregator_index*: uint64 # `ValidatorIndex` after validation
contribution*: SyncCommitteeContribution contribution*: SyncCommitteeContribution
selection_proof*: ValidatorSig selection_proof*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#signedcontributionandproof # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#signedcontributionandproof
SignedContributionAndProof* = object SignedContributionAndProof* = object
message*: ContributionAndProof message*: ContributionAndProof
signature*: ValidatorSig signature*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#syncaggregatorselectiondata # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#syncaggregatorselectiondata
SyncAggregatorSelectionData* = object SyncAggregatorSelectionData* = object
slot*: Slot slot*: Slot
subcommittee_index*: uint64 # `SyncSubcommitteeIndex` after validation subcommittee_index*: uint64 # `SyncSubcommitteeIndex` after validation
@ -138,7 +138,7 @@ type
NextSyncCommitteeBranch* = NextSyncCommitteeBranch* =
array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest] array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#lightclientheader # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientheader
LightClientHeader* = object LightClientHeader* = object
beacon*: BeaconBlockHeader beacon*: BeaconBlockHeader
## Beacon block header ## Beacon block header
@ -152,7 +152,7 @@ type
## Current sync committee corresponding to `header.beacon.state_root` ## Current sync committee corresponding to `header.beacon.state_root`
current_sync_committee_branch*: CurrentSyncCommitteeBranch current_sync_committee_branch*: CurrentSyncCommitteeBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientupdate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientupdate
LightClientUpdate* = object LightClientUpdate* = object
attested_header*: LightClientHeader attested_header*: LightClientHeader
## Header attested to by the sync committee ## Header attested to by the sync committee
@ -582,7 +582,7 @@ chronicles.formatIt SyncCommitteeContribution: shortLog(it)
chronicles.formatIt ContributionAndProof: shortLog(it) chronicles.formatIt ContributionAndProof: shortLog(it)
chronicles.formatIt SignedContributionAndProof: shortLog(it) chronicles.formatIt SignedContributionAndProof: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#is_valid_light_client_header # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_valid_light_client_header
func is_valid_light_client_header*( func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool = header: LightClientHeader, cfg: RuntimeConfig): bool =
true true

View File

@ -76,7 +76,7 @@ export
tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto, tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto,
digest, presets, kzg4844 digest, presets, kzg4844
const SPEC_VERSION* = "1.5.0-alpha.2" const SPEC_VERSION* = "1.5.0-alpha.3"
## Spec version we're aiming to be compatible with, right now ## Spec version we're aiming to be compatible with, right now
const const
@ -334,7 +334,7 @@ type
withdrawable_epoch*: Epoch withdrawable_epoch*: Epoch
## When validator can withdraw funds ## When validator can withdraw funds
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#pendingattestation # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#pendingattestation
PendingAttestation* = object PendingAttestation* = object
aggregation_bits*: CommitteeValidatorsBits aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData data*: AttestationData
@ -343,7 +343,7 @@ type
proposer_index*: uint64 # `ValidatorIndex` after validation proposer_index*: uint64 # `ValidatorIndex` after validation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#historicalbatch # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#historicalbatch
HistoricalBatch* = object HistoricalBatch* = object
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
@ -380,7 +380,7 @@ type
sync_committee_bits*: BitArray[SYNC_COMMITTEE_SIZE] sync_committee_bits*: BitArray[SYNC_COMMITTEE_SIZE]
sync_committee_signature*: TrustedSig sync_committee_signature*: TrustedSig
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#custom-types # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#custom-types
Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION] Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION]
ExecutionAddress* = object ExecutionAddress* = object
@ -396,8 +396,8 @@ type
address*: ExecutionAddress address*: ExecutionAddress
amount*: Gwei amount*: Gwei
# https://github.com/ethereum/consensus-specs/blob/94a0b6c581f2809aa8aca4ef7ee6fbb63f9d74e9/specs/electra/beacon-chain.md#depositreceipt # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#depositrequest
DepositReceipt* = object DepositRequest* = object
pubkey*: ValidatorPubKey pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest withdrawal_credentials*: Eth2Digest
amount*: Gwei amount*: Gwei
@ -405,18 +405,24 @@ type
index*: uint64 index*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/electra/beacon-chain.md#executionlayerwithdrawalrequest # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/electra/beacon-chain.md#executionlayerwithdrawalrequest
ExecutionLayerWithdrawalRequest* = object WithdrawalRequest* = object
source_address*: ExecutionAddress source_address*: ExecutionAddress
validator_pubkey*: ValidatorPubKey validator_pubkey*: ValidatorPubKey
amount*: Gwei amount*: Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#consolidationrequest
ConsolidationRequest* = object
source_address*: ExecutionAddress
source_pubkey*: ValidatorPubKey
target_pubkey*: ValidatorPubKey
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#blstoexecutionchange # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#blstoexecutionchange
BLSToExecutionChange* = object BLSToExecutionChange* = object
validator_index*: uint64 validator_index*: uint64
from_bls_pubkey*: ValidatorPubKey from_bls_pubkey*: ValidatorPubKey
to_execution_address*: ExecutionAddress to_execution_address*: ExecutionAddress
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#signedblstoexecutionchange # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#signedblstoexecutionchange
SignedBLSToExecutionChange* = object SignedBLSToExecutionChange* = object
message*: BLSToExecutionChange message*: BLSToExecutionChange
signature*: ValidatorSig signature*: ValidatorSig
@ -442,7 +448,7 @@ type
message*: Consolidation message*: Consolidation
signature*: TrustedSig signature*: TrustedSig
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#historicalsummary # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#historicalsummary
HistoricalSummary* = object HistoricalSummary* = object
# `HistoricalSummary` matches the components of the phase0 # `HistoricalSummary` matches the components of the phase0
# `HistoricalBatch` making the two hash_tree_root-compatible. # `HistoricalBatch` making the two hash_tree_root-compatible.
@ -489,7 +495,7 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body_root*: Eth2Digest body_root*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signingdata # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#signingdata
SigningData* = object SigningData* = object
object_root*: Eth2Digest object_root*: Eth2Digest
domain*: Eth2Domain domain*: Eth2Domain
@ -518,7 +524,7 @@ type
sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache] sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache]
# This matches the mutable state of the Solidity deposit contract # This matches the mutable state of the Solidity deposit contract
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/solidity_deposit_contract/deposit_contract.sol # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/solidity_deposit_contract/deposit_contract.sol
DepositContractState* = object DepositContractState* = object
branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest] branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest]
deposit_count*: array[32, byte] # Uint256 deposit_count*: array[32, byte] # Uint256

View File

@ -35,7 +35,7 @@ const
NEWPAYLOAD_TIMEOUT* = 8.seconds NEWPAYLOAD_TIMEOUT* = 8.seconds
type type
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#executionpayload # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#executionpayload
ExecutionPayload* = object ExecutionPayload* = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
@ -63,7 +63,7 @@ type
executionPayload*: ExecutionPayload executionPayload*: ExecutionPayload
blockValue*: Wei blockValue*: Wei
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#executionpayloadheader # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#executionpayloadheader
ExecutionPayloadHeader* = object ExecutionPayloadHeader* = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
@ -87,13 +87,13 @@ type
ExecutePayload* = proc( ExecutePayload* = proc(
execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].} execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].}
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/fork-choice.md#powblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/fork-choice.md#powblock
PowBlock* = object PowBlock* = object
block_hash*: Eth2Digest block_hash*: Eth2Digest
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
total_difficulty*: Eth2Digest # uint256 total_difficulty*: Eth2Digest # uint256
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#beaconstate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
BeaconState* = object BeaconState* = object
# Versioning # Versioning
genesis_time*: uint64 genesis_time*: uint64
@ -218,7 +218,7 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body*: TrustedBeaconBlockBody body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#beaconblockbody # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object BeaconBlockBody* = object
randao_reveal*: ValidatorSig randao_reveal*: ValidatorSig
eth1_data*: Eth1Data eth1_data*: Eth1Data

View File

@ -32,7 +32,7 @@ const
# This index is rooted in `BeaconBlockBody`. # This index is rooted in `BeaconBlockBody`.
# The first member (`randao_reveal`) is 16, subsequent members +1 each. # The first member (`randao_reveal`) is 16, subsequent members +1 each.
# If there are ever more than 16 members in `BeaconBlockBody`, indices change! # If there are ever more than 16 members in `BeaconBlockBody`, indices change!
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/ssz/merkle-proofs.md # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
EXECUTION_PAYLOAD_GINDEX* = 25.GeneralizedIndex # execution_payload EXECUTION_PAYLOAD_GINDEX* = 25.GeneralizedIndex # execution_payload
type type
@ -104,7 +104,7 @@ type
## Execution payload header corresponding to `beacon.body_root` (from Capella onward) ## Execution payload header corresponding to `beacon.body_root` (from Capella onward)
execution_branch*: ExecutionBranch execution_branch*: ExecutionBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientbootstrap # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
LightClientBootstrap* = object LightClientBootstrap* = object
header*: LightClientHeader header*: LightClientHeader
## Header matching the requested beacon block root ## Header matching the requested beacon block root
@ -193,7 +193,7 @@ type
## (used to compute safety threshold) ## (used to compute safety threshold)
current_max_active_participants*: uint64 current_max_active_participants*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#beaconstate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
BeaconState* = object BeaconState* = object
# Versioning # Versioning
genesis_time*: uint64 genesis_time*: uint64
@ -643,13 +643,13 @@ func is_valid_light_client_header*(
get_subtree_index(EXECUTION_PAYLOAD_GINDEX), get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
header.beacon.body_root) header.beacon.body_root)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_header_to_capella*( func upgrade_lc_header_to_capella*(
pre: altair.LightClientHeader): LightClientHeader = pre: altair.LightClientHeader): LightClientHeader =
LightClientHeader( LightClientHeader(
beacon: pre.beacon) beacon: pre.beacon)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_bootstrap_to_capella*( func upgrade_lc_bootstrap_to_capella*(
pre: altair.LightClientBootstrap): LightClientBootstrap = pre: altair.LightClientBootstrap): LightClientBootstrap =
LightClientBootstrap( LightClientBootstrap(
@ -657,7 +657,7 @@ func upgrade_lc_bootstrap_to_capella*(
current_sync_committee: pre.current_sync_committee, current_sync_committee: pre.current_sync_committee,
current_sync_committee_branch: pre.current_sync_committee_branch) current_sync_committee_branch: pre.current_sync_committee_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_update_to_capella*( func upgrade_lc_update_to_capella*(
pre: altair.LightClientUpdate): LightClientUpdate = pre: altair.LightClientUpdate): LightClientUpdate =
LightClientUpdate( LightClientUpdate(
@ -669,7 +669,7 @@ func upgrade_lc_update_to_capella*(
sync_aggregate: pre.sync_aggregate, sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot) signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_finality_update_to_capella*( func upgrade_lc_finality_update_to_capella*(
pre: altair.LightClientFinalityUpdate): LightClientFinalityUpdate = pre: altair.LightClientFinalityUpdate): LightClientFinalityUpdate =
LightClientFinalityUpdate( LightClientFinalityUpdate(
@ -679,7 +679,7 @@ func upgrade_lc_finality_update_to_capella*(
sync_aggregate: pre.sync_aggregate, sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot) signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_optimistic_update_to_capella*( func upgrade_lc_optimistic_update_to_capella*(
pre: altair.LightClientOptimisticUpdate): LightClientOptimisticUpdate = pre: altair.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
LightClientOptimisticUpdate( LightClientOptimisticUpdate(
@ -730,7 +730,7 @@ chronicles.formatIt LightClientUpdate: shortLog(it)
chronicles.formatIt LightClientFinalityUpdate: shortLog(it) chronicles.formatIt LightClientFinalityUpdate: shortLog(it)
chronicles.formatIt LightClientOptimisticUpdate: shortLog(it) chronicles.formatIt LightClientOptimisticUpdate: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-the-store # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-the-store
func upgrade_lc_store_to_capella*( func upgrade_lc_store_to_capella*(
pre: altair.LightClientStore): LightClientStore = pre: altair.LightClientStore): LightClientStore =
let best_valid_update = let best_valid_update =

View File

@ -55,7 +55,7 @@ const
DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF* = DomainType([byte 0x08, 0x00, 0x00, 0x00]) DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF* = DomainType([byte 0x08, 0x00, 0x00, 0x00])
DOMAIN_CONTRIBUTION_AND_PROOF* = DomainType([byte 0x09, 0x00, 0x00, 0x00]) DOMAIN_CONTRIBUTION_AND_PROOF* = DomainType([byte 0x09, 0x00, 0x00, 0x00])
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#domain-types # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#domain-types
DOMAIN_BLS_TO_EXECUTION_CHANGE* = DomainType([byte 0x0a, 0x00, 0x00, 0x00]) DOMAIN_BLS_TO_EXECUTION_CHANGE* = DomainType([byte 0x0a, 0x00, 0x00, 0x00])
# https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#domains # https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#domains
@ -83,9 +83,9 @@ const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/p2p-interface.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/p2p-interface.md#configuration
MAX_REQUEST_BLOCKS_DENEB*: uint64 = 128 # TODO Make use of in request code MAX_REQUEST_BLOCKS_DENEB*: uint64 = 128 # TODO Make use of in request code
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#misc # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#misc
UNSET_DEPOSIT_RECEIPTS_START_INDEX*: uint64 = not 0'u64 UNSET_DEPOSIT_REQUESTS_START_INDEX*: uint64 = not 0'u64
FULL_EXIT_REQUEST_AMOUNT*: uint64 = 0 FULL_EXIT_REQUEST_AMOUNT*: uint64 = 0
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#withdrawal-prefixes # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#withdrawal-prefixes
COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02 COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02

View File

@ -74,7 +74,7 @@ type
kzg_commitment*: KzgCommitment kzg_commitment*: KzgCommitment
versioned_hash*: string # TODO should be string; VersionedHash not distinct versioned_hash*: string # TODO should be string; VersionedHash not distinct
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blobidentifier # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/p2p-interface.md#blobidentifier
BlobIdentifier* = object BlobIdentifier* = object
block_root*: Eth2Digest block_root*: Eth2Digest
index*: BlobIndex index*: BlobIndex
@ -380,7 +380,7 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body*: TrustedBeaconBlockBody body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#beaconblockbody # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object BeaconBlockBody* = object
randao_reveal*: ValidatorSig randao_reveal*: ValidatorSig
eth1_data*: Eth1Data eth1_data*: Eth1Data
@ -464,7 +464,7 @@ type
bls_to_execution_changes*: SignedBLSToExecutionChangeList bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: KzgCommitments # [New in Deneb] blob_kzg_commitments*: KzgCommitments # [New in Deneb]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signedbeaconblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object SignedBeaconBlock* = object
message*: BeaconBlock message*: BeaconBlock
signature*: ValidatorSig signature*: ValidatorSig
@ -604,7 +604,7 @@ func kzg_commitment_inclusion_proof_gindex*(
# This index is rooted in `BeaconBlockBody`. # This index is rooted in `BeaconBlockBody`.
# The first member (`randao_reveal`) is 16, subsequent members +1 each. # The first member (`randao_reveal`) is 16, subsequent members +1 each.
# If there are ever more than 16 members in `BeaconBlockBody`, indices change! # If there are ever more than 16 members in `BeaconBlockBody`, indices change!
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/ssz/merkle-proofs.md # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
const const
# blob_kzg_commitments # blob_kzg_commitments
BLOB_KZG_COMMITMENTS_GINDEX = BLOB_KZG_COMMITMENTS_GINDEX =
@ -624,14 +624,16 @@ func kzg_commitment_inclusion_proof_gindex*(
BLOB_KZG_COMMITMENTS_FIRST_GINDEX + index BLOB_KZG_COMMITMENTS_FIRST_GINDEX + index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root
func get_lc_execution_root*( func get_lc_execution_root*(
header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest = header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest =
let epoch = header.beacon.slot.epoch let epoch = header.beacon.slot.epoch
# [New in Deneb]
if epoch >= cfg.DENEB_FORK_EPOCH: if epoch >= cfg.DENEB_FORK_EPOCH:
return hash_tree_root(header.execution) return hash_tree_root(header.execution)
# [Modified in Deneb]
if epoch >= cfg.CAPELLA_FORK_EPOCH: if epoch >= cfg.CAPELLA_FORK_EPOCH:
let execution_header = capella.ExecutionPayloadHeader( let execution_header = capella.ExecutionPayloadHeader(
parent_hash: header.execution.parent_hash, parent_hash: header.execution.parent_hash,
@ -653,11 +655,12 @@ func get_lc_execution_root*(
ZERO_HASH ZERO_HASH
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header
func is_valid_light_client_header*( func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool = header: LightClientHeader, cfg: RuntimeConfig): bool =
let epoch = header.beacon.slot.epoch let epoch = header.beacon.slot.epoch
# [New in Deneb:EIP4844]
if epoch < cfg.DENEB_FORK_EPOCH: if epoch < cfg.DENEB_FORK_EPOCH:
if header.execution.blob_gas_used != 0 or if header.execution.blob_gas_used != 0 or
header.execution.excess_blob_gas != 0: header.execution.excess_blob_gas != 0:
@ -675,7 +678,7 @@ func is_valid_light_client_header*(
get_subtree_index(EXECUTION_PAYLOAD_GINDEX), get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
header.beacon.body_root) header.beacon.body_root)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_header_to_deneb*( func upgrade_lc_header_to_deneb*(
pre: capella.LightClientHeader): LightClientHeader = pre: capella.LightClientHeader): LightClientHeader =
LightClientHeader( LightClientHeader(
@ -700,7 +703,7 @@ func upgrade_lc_header_to_deneb*(
excess_blob_gas: 0), # [New in Deneb:EIP4844] excess_blob_gas: 0), # [New in Deneb:EIP4844]
execution_branch: pre.execution_branch) execution_branch: pre.execution_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_bootstrap_to_deneb*( func upgrade_lc_bootstrap_to_deneb*(
pre: capella.LightClientBootstrap): LightClientBootstrap = pre: capella.LightClientBootstrap): LightClientBootstrap =
LightClientBootstrap( LightClientBootstrap(
@ -708,7 +711,7 @@ func upgrade_lc_bootstrap_to_deneb*(
current_sync_committee: pre.current_sync_committee, current_sync_committee: pre.current_sync_committee,
current_sync_committee_branch: pre.current_sync_committee_branch) current_sync_committee_branch: pre.current_sync_committee_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_update_to_deneb*( func upgrade_lc_update_to_deneb*(
pre: capella.LightClientUpdate): LightClientUpdate = pre: capella.LightClientUpdate): LightClientUpdate =
LightClientUpdate( LightClientUpdate(
@ -720,7 +723,7 @@ func upgrade_lc_update_to_deneb*(
sync_aggregate: pre.sync_aggregate, sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot) signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_finality_update_to_deneb*( func upgrade_lc_finality_update_to_deneb*(
pre: capella.LightClientFinalityUpdate): LightClientFinalityUpdate = pre: capella.LightClientFinalityUpdate): LightClientFinalityUpdate =
LightClientFinalityUpdate( LightClientFinalityUpdate(
@ -730,7 +733,7 @@ func upgrade_lc_finality_update_to_deneb*(
sync_aggregate: pre.sync_aggregate, sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot) signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_optimistic_update_to_deneb*( func upgrade_lc_optimistic_update_to_deneb*(
pre: capella.LightClientOptimisticUpdate): LightClientOptimisticUpdate = pre: capella.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
LightClientOptimisticUpdate( LightClientOptimisticUpdate(
@ -781,7 +784,7 @@ chronicles.formatIt LightClientUpdate: shortLog(it)
chronicles.formatIt LightClientFinalityUpdate: shortLog(it) chronicles.formatIt LightClientFinalityUpdate: shortLog(it)
chronicles.formatIt LightClientOptimisticUpdate: shortLog(it) chronicles.formatIt LightClientOptimisticUpdate: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-the-store # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-the-store
func upgrade_lc_store_to_deneb*( func upgrade_lc_store_to_deneb*(
pre: capella.LightClientStore): LightClientStore = pre: capella.LightClientStore): LightClientStore =
let best_valid_update = let best_valid_update =

View File

@ -29,27 +29,28 @@ from stew/bitops2 import log2trunc
from stew/byteutils import to0xHex from stew/byteutils import to0xHex
from ./altair import from ./altair import
EpochParticipationFlags, InactivityScores, SyncAggregate, SyncCommittee, EpochParticipationFlags, InactivityScores, SyncAggregate, SyncCommittee,
TrustedSyncAggregate TrustedSyncAggregate, num_active_participants
from ./bellatrix import BloomLogs, ExecutionAddress, Transaction from ./bellatrix import BloomLogs, ExecutionAddress, Transaction
from ./capella import from ./capella import
HistoricalSummary, SignedBLSToExecutionChangeList, Withdrawal ExecutionBranch, HistoricalSummary, SignedBLSToExecutionChangeList,
Withdrawal, EXECUTION_PAYLOAD_GINDEX
from ./deneb import Blobs, BlobsBundle, KzgCommitments, KzgProofs from ./deneb import Blobs, BlobsBundle, KzgCommitments, KzgProofs
export json_serialization, stable, kzg4844 export json_serialization, stable, kzg4844
const const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#constants # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/sync-protocol.md#constants
# All of these indices are rooted in `BeaconState`. # All of these indices are rooted in `BeaconState`.
# The first member (`genesis_time`) is 64, subsequent members +1 each. # The first member (`genesis_time`) is 64, subsequent members +1 each.
# If there are ever more than 64 members in `BeaconState`, indices change! # If there are ever more than 64 members in `BeaconState`, indices change!
# `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `84 * 2 + 1`. # `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `84 * 2 + 1`.
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/ssz/merkle-proofs.md # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
FINALIZED_ROOT_GINDEX = 169.GeneralizedIndex # finalized_checkpoint > root FINALIZED_ROOT_GINDEX* = 169.GeneralizedIndex # finalized_checkpoint > root
CURRENT_SYNC_COMMITTEE_GINDEX = 86.GeneralizedIndex # current_sync_committee CURRENT_SYNC_COMMITTEE_GINDEX* = 86.GeneralizedIndex # current_sync_committee
NEXT_SYNC_COMMITTEE_GINDEX = 87.GeneralizedIndex # next_sync_committee NEXT_SYNC_COMMITTEE_GINDEX* = 87.GeneralizedIndex # next_sync_committee
type type
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#indexedattestation # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#indexedattestation
IndexedAttestation* {. IndexedAttestation* {.
sszProfile: StableIndexedAttestation.} = object sszProfile: StableIndexedAttestation.} = object
attesting_indices*: attesting_indices*:
@ -79,9 +80,8 @@ type
attestation_1*: TrustedIndexedAttestation # Modified in Electra:EIP7549] attestation_1*: TrustedIndexedAttestation # Modified in Electra:EIP7549]
attestation_2*: TrustedIndexedAttestation # Modified in Electra:EIP7549] attestation_2*: TrustedIndexedAttestation # Modified in Electra:EIP7549]
# https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#executionpayload # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#executionpayload
ExecutionPayload* {. ExecutionPayload* {.sszProfile: StableExecutionPayload.} = object
sszProfile: StableExecutionPayload.} = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
fee_recipient*: ExecutionAddress fee_recipient*: ExecutionAddress
@ -105,20 +105,22 @@ type
withdrawals*: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] withdrawals*: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
blob_gas_used*: uint64 blob_gas_used*: uint64
excess_blob_gas*: uint64 excess_blob_gas*: uint64
deposit_receipts*: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] deposit_requests*: List[DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD]
## [New in Electra:EIP6110] ## [New in Electra:EIP6110]
withdrawal_requests*: withdrawal_requests*:
List[ExecutionLayerWithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD] List[WithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD]
## [New in Electra:EIP6110] ## [New in Electra:EIP6110]
consolidation_requests*:
List[ConsolidationRequest, Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD]
## [New in Electra:EIP7251]
ExecutionPayloadForSigning* = object ExecutionPayloadForSigning* = object
executionPayload*: ExecutionPayload executionPayload*: ExecutionPayload
blockValue*: Wei blockValue*: Wei
blobsBundle*: BlobsBundle blobsBundle*: BlobsBundle
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#executionpayloadheader # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#executionpayloadheader
ExecutionPayloadHeader* {. ExecutionPayloadHeader* {.sszProfile: StableExecutionPayloadHeader.} = object
sszProfile: StableExecutionPayloadHeader.} = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
fee_recipient*: ExecutionAddress fee_recipient*: ExecutionAddress
@ -140,22 +142,14 @@ type
withdrawals_root*: Eth2Digest withdrawals_root*: Eth2Digest
blob_gas_used*: uint64 blob_gas_used*: uint64
excess_blob_gas*: uint64 excess_blob_gas*: uint64
deposit_receipts_root*: Eth2Digest # [New in Electra:EIP6110] deposit_requests_root*: Eth2Digest # [New in Electra:EIP6110]
withdrawal_requests_root*: Eth2Digest # [New in Electra:EIP7002:EIP7251] withdrawal_requests_root*: Eth2Digest # [New in Electra:EIP7002:EIP7251]
consolidation_requests_root*: Eth2Digest # [New in Electra:EIP7251]
ExecutePayload* = proc( ExecutePayload* = proc(
execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].} execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].}
FinalityBranch = # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#aggregateandproof
array[log2trunc(FINALIZED_ROOT_GINDEX), Eth2Digest]
CurrentSyncCommitteeBranch =
array[log2trunc(CURRENT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
NextSyncCommitteeBranch =
array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#aggregateandproof
AggregateAndProof* = object AggregateAndProof* = object
aggregator_index*: uint64 # `ValidatorIndex` after validation aggregator_index*: uint64 # `ValidatorIndex` after validation
aggregate*: Attestation aggregate*: Attestation
@ -166,6 +160,15 @@ type
message*: AggregateAndProof message*: AggregateAndProof
signature*: ValidatorSig signature*: ValidatorSig
FinalityBranch* =
array[log2trunc(FINALIZED_ROOT_GINDEX), Eth2Digest]
CurrentSyncCommitteeBranch* =
array[log2trunc(CURRENT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
NextSyncCommitteeBranch* =
array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
LightClientHeader* = object LightClientHeader* = object
beacon*: BeaconBlockHeader beacon*: BeaconBlockHeader
@ -175,7 +178,7 @@ type
## Execution payload header corresponding to `beacon.body_root` (from Capella onward) ## Execution payload header corresponding to `beacon.body_root` (from Capella onward)
execution_branch*: capella.ExecutionBranch execution_branch*: capella.ExecutionBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientbootstrap # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
LightClientBootstrap* = object LightClientBootstrap* = object
header*: LightClientHeader header*: LightClientHeader
## Header matching the requested beacon block root ## Header matching the requested beacon block root
@ -332,7 +335,7 @@ type
historical_summaries*: historical_summaries*:
HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT] HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT]
deposit_receipts_start_index*: uint64 # [New in Electra:EIP6110] deposit_requests_start_index*: uint64 # [New in Electra:EIP6110]
deposit_balance_to_consume*: Gwei # [New in Electra:EIP7251] deposit_balance_to_consume*: Gwei # [New in Electra:EIP7251]
exit_balance_to_consume*: Gwei # [New in Electra:EIP7251] exit_balance_to_consume*: Gwei # [New in Electra:EIP7251]
earliest_exit_epoch*: Epoch # [New in Electra:EIP7251] earliest_exit_epoch*: Epoch # [New in Electra:EIP7251]
@ -360,7 +363,7 @@ type
data*: BeaconState data*: BeaconState
root*: Eth2Digest # hash_tree_root(data) root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#beaconblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose ## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to ## a new block. Once the block as been proposed, it is transmitted to
@ -417,9 +420,8 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body*: TrustedBeaconBlockBody body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#beaconblockbody # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#beaconblockbody
BeaconBlockBody* {. BeaconBlockBody* {.sszProfile: StableBeaconBlockBody.} = object
sszProfile: StableBeaconBlockBody.} = object
randao_reveal*: ValidatorSig randao_reveal*: ValidatorSig
eth1_data*: Eth1Data eth1_data*: Eth1Data
## Eth1 data vote ## Eth1 data vote
@ -443,8 +445,6 @@ type
execution_payload*: electra.ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] execution_payload*: electra.ExecutionPayload # [Modified in Electra:EIP6110:EIP7002]
bls_to_execution_changes*: SignedBLSToExecutionChangeList bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: KzgCommitments blob_kzg_commitments*: KzgCommitments
consolidations*: List[SignedConsolidation, Limit MAX_CONSOLIDATIONS]
## [New in Electra:EIP7251]
SigVerifiedBeaconBlockBody* {. SigVerifiedBeaconBlockBody* {.
sszProfile: StableBeaconBlockBody.} = object sszProfile: StableBeaconBlockBody.} = object
@ -485,8 +485,6 @@ type
execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002]
bls_to_execution_changes*: SignedBLSToExecutionChangeList bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: KzgCommitments blob_kzg_commitments*: KzgCommitments
consolidations*: List[TrustedSignedConsolidation, Limit MAX_CONSOLIDATIONS]
## [New in Electra:EIP7251]
TrustedBeaconBlockBody* {. TrustedBeaconBlockBody* {.
sszProfile: StableBeaconBlockBody.} = object sszProfile: StableBeaconBlockBody.} = object
@ -515,8 +513,6 @@ type
execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002]
bls_to_execution_changes*: SignedBLSToExecutionChangeList bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: KzgCommitments blob_kzg_commitments*: KzgCommitments
consolidations*: List[TrustedSignedConsolidation, Limit MAX_CONSOLIDATIONS]
## [New in Electra:EIP7251]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signedbeaconblock # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object SignedBeaconBlock* = object
@ -556,9 +552,8 @@ type
root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block root* {.dontSerialize.}: Eth2Digest # cached root of signed beacon block
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#attestation # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#attestation
Attestation* {. Attestation* {.sszProfile: StableAttestation.} = object
sszProfile: StableAttestation.} = object
aggregation_bits*: ElectraCommitteeValidatorsBits aggregation_bits*: ElectraCommitteeValidatorsBits
data*: AttestationData data*: AttestationData
signature*: ValidatorSig signature*: ValidatorSig
@ -646,6 +641,233 @@ func shortLog*(v: ExecutionPayload): auto =
excess_blob_gas: $(v.excess_blob_gas) excess_blob_gas: $(v.excess_blob_gas)
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/sync-protocol.md#modified-get_lc_execution_root
func get_lc_execution_root*(
header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest =
let epoch = header.beacon.slot.epoch
# [New in Electra]
if epoch >= cfg.ELECTRA_FORK_EPOCH:
return hash_tree_root(header.execution)
# [Modified in Electra]
if epoch >= cfg.DENEB_FORK_EPOCH:
let execution_header = deneb.ExecutionPayloadHeader(
parent_hash: header.execution.parent_hash,
fee_recipient: header.execution.fee_recipient,
state_root: header.execution.state_root,
receipts_root: header.execution.receipts_root,
logs_bloom: header.execution.logs_bloom,
prev_randao: header.execution.prev_randao,
block_number: header.execution.block_number,
gas_limit: header.execution.gas_limit,
gas_used: header.execution.gas_used,
timestamp: header.execution.timestamp,
extra_data: header.execution.extra_data,
base_fee_per_gas: header.execution.base_fee_per_gas,
block_hash: header.execution.block_hash,
transactions_root: header.execution.transactions_root,
withdrawals_root: header.execution.withdrawals_root,
blob_gas_used: header.execution.blob_gas_used,
excess_blob_gas: header.execution.excess_blob_gas)
return hash_tree_root(execution_header)
if epoch >= cfg.CAPELLA_FORK_EPOCH:
let execution_header = capella.ExecutionPayloadHeader(
parent_hash: header.execution.parent_hash,
fee_recipient: header.execution.fee_recipient,
state_root: header.execution.state_root,
receipts_root: header.execution.receipts_root,
logs_bloom: header.execution.logs_bloom,
prev_randao: header.execution.prev_randao,
block_number: header.execution.block_number,
gas_limit: header.execution.gas_limit,
gas_used: header.execution.gas_used,
timestamp: header.execution.timestamp,
extra_data: header.execution.extra_data,
base_fee_per_gas: header.execution.base_fee_per_gas,
block_hash: header.execution.block_hash,
transactions_root: header.execution.transactions_root,
withdrawals_root: header.execution.withdrawals_root)
return hash_tree_root(execution_header)
ZERO_HASH
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/sync-protocol.md#modified-is_valid_light_client_header
func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool =
let epoch = header.beacon.slot.epoch
# [New in Electra:EIP6110:EIP7002:EIP7251]
if epoch < cfg.ELECTRA_FORK_EPOCH:
if not header.execution.deposit_requests_root.isZero or
not header.execution.withdrawal_requests_root.isZero or
not header.execution.consolidation_requests_root.isZero:
return false
if epoch < cfg.DENEB_FORK_EPOCH:
if header.execution.blob_gas_used != 0 or
header.execution.excess_blob_gas != 0:
return false
if epoch < cfg.CAPELLA_FORK_EPOCH:
return
header.execution == default(ExecutionPayloadHeader) and
header.execution_branch == default(ExecutionBranch)
is_valid_merkle_branch(
get_lc_execution_root(header, cfg),
header.execution_branch,
log2trunc(EXECUTION_PAYLOAD_GINDEX),
get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
header.beacon.body_root)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#normalize_merkle_branch
func normalize_merkle_branch*[N](
branch: array[N, Eth2Digest],
gindex: static GeneralizedIndex): auto =
const depth = log2trunc(gindex)
var res: array[depth, Eth2Digest]
when depth >= branch.len:
const num_extra = depth - branch.len
res[num_extra ..< depth] = branch
else:
const num_extra = branch.len - depth
for node in branch[0 ..< num_extra]:
doAssert node.isZero, "Truncation of Merkle branch cannot lose info"
res[0 ..< depth] = branch[num_extra ..< branch.len]
res
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_header_to_electra*(
pre: deneb.LightClientHeader): LightClientHeader =
LightClientHeader(
beacon: pre.beacon,
execution: ExecutionPayloadHeader(
parent_hash: pre.execution.parent_hash,
fee_recipient: pre.execution.fee_recipient,
state_root: pre.execution.state_root,
receipts_root: pre.execution.receipts_root,
logs_bloom: pre.execution.logs_bloom,
prev_randao: pre.execution.prev_randao,
block_number: pre.execution.block_number,
gas_limit: pre.execution.gas_limit,
gas_used: pre.execution.gas_used,
timestamp: pre.execution.timestamp,
extra_data: pre.execution.extra_data,
base_fee_per_gas: pre.execution.base_fee_per_gas,
block_hash: pre.execution.block_hash,
transactions_root: pre.execution.transactions_root,
withdrawals_root: pre.execution.withdrawals_root,
blob_gas_used: pre.execution.blob_gas_used,
excess_blob_gas: pre.execution.blob_gas_used,
deposit_requests_root: ZERO_HASH, # [New in Electra:EIP6110]
withdrawal_requests_root: ZERO_HASH, # [New in Electra:EIP7002:EIP7251]
consolidation_requests_root: ZERO_HASH), # [New in Electra:EIP7251]
execution_branch: pre.execution_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_bootstrap_to_electra*(
pre: deneb.LightClientBootstrap): LightClientBootstrap =
LightClientBootstrap(
header: upgrade_lc_header_to_electra(pre.header),
current_sync_committee: pre.current_sync_committee,
current_sync_committee_branch: normalize_merkle_branch(
pre.current_sync_committee_branch, CURRENT_SYNC_COMMITTEE_GINDEX))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_update_to_electra*(
pre: deneb.LightClientUpdate): LightClientUpdate =
LightClientUpdate(
attested_header: upgrade_lc_header_to_electra(pre.attested_header),
next_sync_committee: pre.next_sync_committee,
next_sync_committee_branch: normalize_merkle_branch(
pre.next_sync_committee_branch, NEXT_SYNC_COMMITTEE_GINDEX),
finalized_header: upgrade_lc_header_to_electra(pre.finalized_header),
finality_branch: normalize_merkle_branch(
pre.finality_branch, FINALIZED_ROOT_GINDEX),
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_finality_update_to_electra*(
pre: deneb.LightClientFinalityUpdate): LightClientFinalityUpdate =
LightClientFinalityUpdate(
attested_header: upgrade_lc_header_to_electra(pre.attested_header),
finalized_header: upgrade_lc_header_to_electra(pre.finalized_header),
finality_branch: normalize_merkle_branch(
pre.finality_branch, FINALIZED_ROOT_GINDEX),
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_optimistic_update_to_electra*(
pre: deneb.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
LightClientOptimisticUpdate(
attested_header: upgrade_lc_header_to_electra(pre.attested_header),
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
func shortLog*(v: LightClientHeader): auto =
(
beacon: shortLog(v.beacon),
execution: (
block_hash: v.execution.block_hash,
block_number: v.execution.block_number)
)
func shortLog*(v: LightClientBootstrap): auto =
(
header: shortLog(v.header)
)
func shortLog*(v: LightClientUpdate): auto =
(
attested: shortLog(v.attested_header),
has_next_sync_committee:
v.next_sync_committee != default(typeof(v.next_sync_committee)),
finalized: shortLog(v.finalized_header),
num_active_participants: v.sync_aggregate.num_active_participants,
signature_slot: v.signature_slot
)
func shortLog*(v: LightClientFinalityUpdate): auto =
(
attested: shortLog(v.attested_header),
finalized: shortLog(v.finalized_header),
num_active_participants: v.sync_aggregate.num_active_participants,
signature_slot: v.signature_slot
)
func shortLog*(v: LightClientOptimisticUpdate): auto =
(
attested: shortLog(v.attested_header),
num_active_participants: v.sync_aggregate.num_active_participants,
signature_slot: v.signature_slot,
)
chronicles.formatIt LightClientBootstrap: shortLog(it)
chronicles.formatIt LightClientUpdate: shortLog(it)
chronicles.formatIt LightClientFinalityUpdate: shortLog(it)
chronicles.formatIt LightClientOptimisticUpdate: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-the-store
func upgrade_lc_store_to_electra*(
pre: deneb.LightClientStore): LightClientStore =
let best_valid_update =
if pre.best_valid_update.isNone:
Opt.none(LightClientUpdate)
else:
Opt.some upgrade_lc_update_to_electra(pre.best_valid_update.get)
LightClientStore(
finalized_header: upgrade_lc_header_to_electra(pre.finalized_header),
current_sync_committee: pre.current_sync_committee,
next_sync_committee: pre.next_sync_committee,
best_valid_update: best_valid_update,
optimistic_header: upgrade_lc_header_to_electra(pre.optimistic_header),
previous_max_active_participants: pre.previous_max_active_participants,
current_max_active_participants: pre.current_max_active_participants)
template asSigned*( template asSigned*(
x: SigVerifiedSignedBeaconBlock | x: SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock | MsgTrustedSignedBeaconBlock |

View File

@ -66,10 +66,13 @@ type
withdrawals*: Opt[List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]] withdrawals*: Opt[List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]]
blob_gas_used*: Opt[uint64] blob_gas_used*: Opt[uint64]
excess_blob_gas*: Opt[uint64] excess_blob_gas*: Opt[uint64]
deposit_receipts*: Opt[List[DepositReceipt, deposit_requests*:
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD]] Opt[List[DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD]]
withdrawal_requests*: Opt[List[ExecutionLayerWithdrawalRequest, withdrawal_requests*:
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD]] Opt[List[WithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD]]
consolidation_requests*:
Opt[List[ConsolidationRequest,
Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD]]
StableExecutionPayloadHeader* {. StableExecutionPayloadHeader* {.
sszStableContainer: MAX_EXECUTION_PAYLOAD_FIELDS.} = object sszStableContainer: MAX_EXECUTION_PAYLOAD_FIELDS.} = object
@ -94,8 +97,9 @@ type
withdrawals_root*: Opt[Eth2Digest] withdrawals_root*: Opt[Eth2Digest]
blob_gas_used*: Opt[uint64] blob_gas_used*: Opt[uint64]
excess_blob_gas*: Opt[uint64] excess_blob_gas*: Opt[uint64]
deposit_receipts_root*: Opt[Eth2Digest] deposit_requests_root*: Opt[Eth2Digest]
withdrawal_requests_root*: Opt[Eth2Digest] withdrawal_requests_root*: Opt[Eth2Digest]
consolidation_requests_root*: Opt[Eth2Digest]
StableBeaconBlockBody* {. StableBeaconBlockBody* {.
sszStableContainer: MAX_BEACON_BLOCK_BODY_FIELDS.} = object sszStableContainer: MAX_BEACON_BLOCK_BODY_FIELDS.} = object
@ -121,7 +125,6 @@ type
execution_payload*: Opt[StableExecutionPayload] execution_payload*: Opt[StableExecutionPayload]
bls_to_execution_changes*: Opt[SignedBLSToExecutionChangeList] bls_to_execution_changes*: Opt[SignedBLSToExecutionChangeList]
blob_kzg_commitments*: Opt[KzgCommitments] blob_kzg_commitments*: Opt[KzgCommitments]
consolidations*: Opt[List[SignedConsolidation, Limit MAX_CONSOLIDATIONS]]
StableBeaconState* {.sszStableContainer: MAX_BEACON_STATE_FIELDS.} = object StableBeaconState* {.sszStableContainer: MAX_BEACON_STATE_FIELDS.} = object
# Versioning # Versioning
@ -189,7 +192,7 @@ type
historical_summaries*: historical_summaries*:
Opt[HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT]] Opt[HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT]]
deposit_receipts_start_index*: Opt[uint64] deposit_requests_start_index*: Opt[uint64]
deposit_balance_to_consume*: Opt[Gwei] deposit_balance_to_consume*: Opt[Gwei]
exit_balance_to_consume*: Opt[Gwei] exit_balance_to_consume*: Opt[Gwei]
earliest_exit_epoch*: Opt[Epoch] earliest_exit_epoch*: Opt[Epoch]

View File

@ -51,7 +51,7 @@ RestJson.useDefaultSerializationFor(
BlobSidecarInfoObject, BlobSidecarInfoObject,
BlobsBundle, BlobsBundle,
Checkpoint, Checkpoint,
Consolidation, ConsolidationRequest,
ContributionAndProof, ContributionAndProof,
DataEnclosedObject, DataEnclosedObject,
DataMetaEnclosedObject, DataMetaEnclosedObject,
@ -65,14 +65,13 @@ RestJson.useDefaultSerializationFor(
DenebSignedBlockContents, DenebSignedBlockContents,
Deposit, Deposit,
DepositData, DepositData,
DepositReceipt, DepositRequest,
DepositTreeSnapshot, DepositTreeSnapshot,
DistributedKeystoreInfo, DistributedKeystoreInfo,
ElectraSignedBlockContents, ElectraSignedBlockContents,
EmptyBody, EmptyBody,
Eth1Data, Eth1Data,
EventBeaconBlockObject, EventBeaconBlockObject,
ExecutionLayerWithdrawalRequest,
Fork, Fork,
GetBlockAttestationsResponse, GetBlockAttestationsResponse,
GetBlockHeaderResponse, GetBlockHeaderResponse,
@ -169,7 +168,6 @@ RestJson.useDefaultSerializationFor(
SetGraffitiRequest, SetGraffitiRequest,
SignedBLSToExecutionChange, SignedBLSToExecutionChange,
SignedBeaconBlockHeader, SignedBeaconBlockHeader,
SignedConsolidation,
SignedContributionAndProof, SignedContributionAndProof,
SignedValidatorRegistrationV1, SignedValidatorRegistrationV1,
SignedVoluntaryExit, SignedVoluntaryExit,
@ -194,6 +192,7 @@ RestJson.useDefaultSerializationFor(
Web3SignerSyncCommitteeMessageData, Web3SignerSyncCommitteeMessageData,
Web3SignerValidatorRegistration, Web3SignerValidatorRegistration,
Withdrawal, Withdrawal,
WithdrawalRequest,
altair.BeaconBlock, altair.BeaconBlock,
altair.BeaconBlockBody, altair.BeaconBlockBody,
altair.BeaconState, altair.BeaconState,
@ -253,6 +252,11 @@ RestJson.useDefaultSerializationFor(
electra.ExecutionPayload, electra.ExecutionPayload,
electra.ExecutionPayloadHeader, electra.ExecutionPayloadHeader,
electra.IndexedAttestation, electra.IndexedAttestation,
electra.LightClientBootstrap,
electra.LightClientFinalityUpdate,
electra.LightClientHeader,
electra.LightClientOptimisticUpdate,
electra.LightClientUpdate,
electra.SignedBeaconBlock, electra.SignedBeaconBlock,
electra.TrustedAttestation, electra.TrustedAttestation,
electra_mev.BlindedBeaconBlock, electra_mev.BlindedBeaconBlock,

View File

@ -292,7 +292,7 @@ type
RestWithdrawalPrefix* = distinct array[1, byte] RestWithdrawalPrefix* = distinct array[1, byte]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#executionpayload # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#executionpayload
RestExecutionPayload* = object RestExecutionPayload* = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest

View File

@ -1325,8 +1325,10 @@ func forkVersion*(cfg: RuntimeConfig, consensusFork: ConsensusFork): Version =
func lcDataForkAtConsensusFork*( func lcDataForkAtConsensusFork*(
consensusFork: ConsensusFork): LightClientDataFork = consensusFork: ConsensusFork): LightClientDataFork =
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
if consensusFork >= ConsensusFork.Deneb: if consensusFork >= ConsensusFork.Electra:
LightClientDataFork.Electra
elif consensusFork >= ConsensusFork.Deneb:
LightClientDataFork.Deneb LightClientDataFork.Deneb
elif consensusFork >= ConsensusFork.Capella: elif consensusFork >= ConsensusFork.Capella:
LightClientDataFork.Capella LightClientDataFork.Capella

View File

@ -16,32 +16,42 @@ type
None = 0, # only use non-0 in DB to detect accidentally uninitialized data None = 0, # only use non-0 in DB to detect accidentally uninitialized data
Altair = 1, Altair = 1,
Capella = 2, Capella = 2,
Deneb = 3 Deneb = 3,
Electra = 4
ForkyCurrentSyncCommitteeBranch* =
altair.CurrentSyncCommitteeBranch |
electra.CurrentSyncCommitteeBranch
ForkyLightClientHeader* = ForkyLightClientHeader* =
altair.LightClientHeader | altair.LightClientHeader |
capella.LightClientHeader | capella.LightClientHeader |
deneb.LightClientHeader deneb.LightClientHeader |
electra.LightClientHeader
ForkyLightClientBootstrap* = ForkyLightClientBootstrap* =
altair.LightClientBootstrap | altair.LightClientBootstrap |
capella.LightClientBootstrap | capella.LightClientBootstrap |
deneb.LightClientBootstrap deneb.LightClientBootstrap |
electra.LightClientBootstrap
ForkyLightClientUpdate* = ForkyLightClientUpdate* =
altair.LightClientUpdate | altair.LightClientUpdate |
capella.LightClientUpdate | capella.LightClientUpdate |
deneb.LightClientUpdate deneb.LightClientUpdate |
electra.LightClientUpdate
ForkyLightClientFinalityUpdate* = ForkyLightClientFinalityUpdate* =
altair.LightClientFinalityUpdate | altair.LightClientFinalityUpdate |
capella.LightClientFinalityUpdate | capella.LightClientFinalityUpdate |
deneb.LightClientFinalityUpdate deneb.LightClientFinalityUpdate |
electra.LightClientFinalityUpdate
ForkyLightClientOptimisticUpdate* = ForkyLightClientOptimisticUpdate* =
altair.LightClientOptimisticUpdate | altair.LightClientOptimisticUpdate |
capella.LightClientOptimisticUpdate | capella.LightClientOptimisticUpdate |
deneb.LightClientOptimisticUpdate deneb.LightClientOptimisticUpdate |
electra.LightClientOptimisticUpdate
SomeForkyLightClientUpdateWithSyncCommittee* = SomeForkyLightClientUpdateWithSyncCommittee* =
ForkyLightClientUpdate ForkyLightClientUpdate
@ -62,7 +72,8 @@ type
ForkyLightClientStore* = ForkyLightClientStore* =
altair.LightClientStore | altair.LightClientStore |
capella.LightClientStore | capella.LightClientStore |
deneb.LightClientStore deneb.LightClientStore |
electra.LightClientStore
ForkedLightClientHeader* = object ForkedLightClientHeader* = object
case kind*: LightClientDataFork case kind*: LightClientDataFork
@ -74,6 +85,8 @@ type
capellaData*: capella.LightClientHeader capellaData*: capella.LightClientHeader
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientHeader denebData*: deneb.LightClientHeader
of LightClientDataFork.Electra:
electraData*: electra.LightClientHeader
ForkedLightClientBootstrap* = object ForkedLightClientBootstrap* = object
case kind*: LightClientDataFork case kind*: LightClientDataFork
@ -85,6 +98,8 @@ type
capellaData*: capella.LightClientBootstrap capellaData*: capella.LightClientBootstrap
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientBootstrap denebData*: deneb.LightClientBootstrap
of LightClientDataFork.Electra:
electraData*: electra.LightClientBootstrap
ForkedLightClientUpdate* = object ForkedLightClientUpdate* = object
case kind*: LightClientDataFork case kind*: LightClientDataFork
@ -96,6 +111,8 @@ type
capellaData*: capella.LightClientUpdate capellaData*: capella.LightClientUpdate
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientUpdate denebData*: deneb.LightClientUpdate
of LightClientDataFork.Electra:
electraData*: electra.LightClientUpdate
ForkedLightClientFinalityUpdate* = object ForkedLightClientFinalityUpdate* = object
case kind*: LightClientDataFork case kind*: LightClientDataFork
@ -107,6 +124,8 @@ type
capellaData*: capella.LightClientFinalityUpdate capellaData*: capella.LightClientFinalityUpdate
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientFinalityUpdate denebData*: deneb.LightClientFinalityUpdate
of LightClientDataFork.Electra:
electraData*: electra.LightClientFinalityUpdate
ForkedLightClientOptimisticUpdate* = object ForkedLightClientOptimisticUpdate* = object
case kind*: LightClientDataFork case kind*: LightClientDataFork
@ -118,6 +137,8 @@ type
capellaData*: capella.LightClientOptimisticUpdate capellaData*: capella.LightClientOptimisticUpdate
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientOptimisticUpdate denebData*: deneb.LightClientOptimisticUpdate
of LightClientDataFork.Electra:
electraData*: electra.LightClientOptimisticUpdate
SomeForkedLightClientUpdateWithSyncCommittee* = SomeForkedLightClientUpdateWithSyncCommittee* =
ForkedLightClientUpdate ForkedLightClientUpdate
@ -145,11 +166,15 @@ type
capellaData*: capella.LightClientStore capellaData*: capella.LightClientStore
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientStore denebData*: deneb.LightClientStore
of LightClientDataFork.Electra:
electraData*: electra.LightClientStore
func lcDataForkAtEpoch*( func lcDataForkAtEpoch*(
cfg: RuntimeConfig, epoch: Epoch): LightClientDataFork = cfg: RuntimeConfig, epoch: Epoch): LightClientDataFork =
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
if epoch >= cfg.DENEB_FORK_EPOCH: if epoch >= cfg.ELECTRA_FORK_EPOCH:
LightClientDataFork.Electra
elif epoch >= cfg.DENEB_FORK_EPOCH:
LightClientDataFork.Deneb LightClientDataFork.Deneb
elif epoch >= cfg.CAPELLA_FORK_EPOCH: elif epoch >= cfg.CAPELLA_FORK_EPOCH:
LightClientDataFork.Capella LightClientDataFork.Capella
@ -188,8 +213,71 @@ template kind*(
deneb.LightClientStore]): LightClientDataFork = deneb.LightClientStore]): LightClientDataFork =
LightClientDataFork.Deneb LightClientDataFork.Deneb
template kind*(
x: typedesc[ # `SomeLightClientObject` doesn't work here (Nim 1.6)
electra.LightClientHeader |
electra.LightClientBootstrap |
electra.LightClientUpdate |
electra.LightClientFinalityUpdate |
electra.LightClientOptimisticUpdate |
electra.LightClientStore]): LightClientDataFork =
LightClientDataFork.Electra
template FINALIZED_ROOT_GINDEX*(
kind: static LightClientDataFork): GeneralizedIndex =
when kind >= LightClientDataFork.Electra:
electra.FINALIZED_ROOT_GINDEX
elif kind >= LightClientDataFork.Altair:
altair.FINALIZED_ROOT_GINDEX
else:
static: raiseAssert "Unreachable"
template FinalityBranch*(kind: static LightClientDataFork): auto =
when kind >= LightClientDataFork.Electra:
typedesc[electra.FinalityBranch]
elif kind >= LightClientDataFork.Altair:
typedesc[altair.FinalityBranch]
else:
static: raiseAssert "Unreachable"
template CURRENT_SYNC_COMMITTEE_GINDEX*(
kind: static LightClientDataFork): GeneralizedIndex =
when kind >= LightClientDataFork.Electra:
electra.CURRENT_SYNC_COMMITTEE_GINDEX
elif kind >= LightClientDataFork.Altair:
altair.CURRENT_SYNC_COMMITTEE_GINDEX
else:
static: raiseAssert "Unreachable"
template CurrentSyncCommitteeBranch*(kind: static LightClientDataFork): auto =
when kind >= LightClientDataFork.Electra:
typedesc[electra.CurrentSyncCommitteeBranch]
elif kind >= LightClientDataFork.Altair:
typedesc[altair.CurrentSyncCommitteeBranch]
else:
static: raiseAssert "Unreachable"
template NEXT_SYNC_COMMITTEE_GINDEX*(
kind: static LightClientDataFork): GeneralizedIndex =
when kind >= LightClientDataFork.Electra:
electra.NEXT_SYNC_COMMITTEE_GINDEX
elif kind >= LightClientDataFork.Altair:
altair.NEXT_SYNC_COMMITTEE_GINDEX
else:
static: raiseAssert "Unreachable"
template NextSyncCommitteeBranch*(kind: static LightClientDataFork): auto =
when kind >= LightClientDataFork.Electra:
typedesc[electra.NextSyncCommitteeBranch]
elif kind >= LightClientDataFork.Altair:
typedesc[altair.NextSyncCommitteeBranch]
else:
static: raiseAssert "Unreachable"
template LightClientHeader*(kind: static LightClientDataFork): auto = template LightClientHeader*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientHeader]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientHeader] typedesc[deneb.LightClientHeader]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientHeader] typedesc[capella.LightClientHeader]
@ -199,7 +287,9 @@ template LightClientHeader*(kind: static LightClientDataFork): auto =
static: raiseAssert "Unreachable" static: raiseAssert "Unreachable"
template LightClientBootstrap*(kind: static LightClientDataFork): auto = template LightClientBootstrap*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientBootstrap]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientBootstrap] typedesc[deneb.LightClientBootstrap]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientBootstrap] typedesc[capella.LightClientBootstrap]
@ -209,7 +299,9 @@ template LightClientBootstrap*(kind: static LightClientDataFork): auto =
static: raiseAssert "Unreachable" static: raiseAssert "Unreachable"
template LightClientUpdate*(kind: static LightClientDataFork): auto = template LightClientUpdate*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientUpdate]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientUpdate] typedesc[deneb.LightClientUpdate]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientUpdate] typedesc[capella.LightClientUpdate]
@ -219,7 +311,9 @@ template LightClientUpdate*(kind: static LightClientDataFork): auto =
static: raiseAssert "Unreachable" static: raiseAssert "Unreachable"
template LightClientFinalityUpdate*(kind: static LightClientDataFork): auto = template LightClientFinalityUpdate*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientFinalityUpdate]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientFinalityUpdate] typedesc[deneb.LightClientFinalityUpdate]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientFinalityUpdate] typedesc[capella.LightClientFinalityUpdate]
@ -229,7 +323,9 @@ template LightClientFinalityUpdate*(kind: static LightClientDataFork): auto =
static: raiseAssert "Unreachable" static: raiseAssert "Unreachable"
template LightClientOptimisticUpdate*(kind: static LightClientDataFork): auto = template LightClientOptimisticUpdate*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientOptimisticUpdate]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientOptimisticUpdate] typedesc[deneb.LightClientOptimisticUpdate]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientOptimisticUpdate] typedesc[capella.LightClientOptimisticUpdate]
@ -239,7 +335,9 @@ template LightClientOptimisticUpdate*(kind: static LightClientDataFork): auto =
static: raiseAssert "Unreachable" static: raiseAssert "Unreachable"
template LightClientStore*(kind: static LightClientDataFork): auto = template LightClientStore*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientStore]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientStore] typedesc[deneb.LightClientStore]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientStore] typedesc[capella.LightClientStore]
@ -298,7 +396,10 @@ template Forked*(x: typedesc[ForkyLightClientStore]): auto =
template withAll*( template withAll*(
x: typedesc[LightClientDataFork], body: untyped): untyped = x: typedesc[LightClientDataFork], body: untyped): untyped =
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
block:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
body
block: block:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
body body
@ -315,6 +416,9 @@ template withAll*(
template withLcDataFork*( template withLcDataFork*(
x: LightClientDataFork, body: untyped): untyped = x: LightClientDataFork, body: untyped): untyped =
case x case x
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
body body
@ -331,6 +435,10 @@ template withLcDataFork*(
template withForkyHeader*( template withForkyHeader*(
x: ForkedLightClientHeader, body: untyped): untyped = x: ForkedLightClientHeader, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyHeader: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyHeader: untyped {.inject, used.} = x.denebData template forkyHeader: untyped {.inject, used.} = x.denebData
@ -350,6 +458,10 @@ template withForkyHeader*(
template withForkyBootstrap*( template withForkyBootstrap*(
x: ForkedLightClientBootstrap, body: untyped): untyped = x: ForkedLightClientBootstrap, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyBootstrap: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyBootstrap: untyped {.inject, used.} = x.denebData template forkyBootstrap: untyped {.inject, used.} = x.denebData
@ -369,6 +481,10 @@ template withForkyBootstrap*(
template withForkyUpdate*( template withForkyUpdate*(
x: ForkedLightClientUpdate, body: untyped): untyped = x: ForkedLightClientUpdate, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyUpdate: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyUpdate: untyped {.inject, used.} = x.denebData template forkyUpdate: untyped {.inject, used.} = x.denebData
@ -388,6 +504,10 @@ template withForkyUpdate*(
template withForkyFinalityUpdate*( template withForkyFinalityUpdate*(
x: ForkedLightClientFinalityUpdate, body: untyped): untyped = x: ForkedLightClientFinalityUpdate, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyFinalityUpdate: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyFinalityUpdate: untyped {.inject, used.} = x.denebData template forkyFinalityUpdate: untyped {.inject, used.} = x.denebData
@ -407,6 +527,10 @@ template withForkyFinalityUpdate*(
template withForkyOptimisticUpdate*( template withForkyOptimisticUpdate*(
x: ForkedLightClientOptimisticUpdate, body: untyped): untyped = x: ForkedLightClientOptimisticUpdate, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyOptimisticUpdate: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyOptimisticUpdate: untyped {.inject, used.} = x.denebData template forkyOptimisticUpdate: untyped {.inject, used.} = x.denebData
@ -426,6 +550,10 @@ template withForkyOptimisticUpdate*(
template withForkyObject*( template withForkyObject*(
x: SomeForkedLightClientObject, body: untyped): untyped = x: SomeForkedLightClientObject, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyObject: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyObject: untyped {.inject, used.} = x.denebData template forkyObject: untyped {.inject, used.} = x.denebData
@ -445,6 +573,10 @@ template withForkyObject*(
template withForkyStore*( template withForkyStore*(
x: ForkedLightClientStore, body: untyped): untyped = x: ForkedLightClientStore, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyStore: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyStore: untyped {.inject, used.} = x.denebData template forkyStore: untyped {.inject, used.} = x.denebData
@ -473,7 +605,9 @@ func init*(
type ResultType = typeof(forkyData).Forked type ResultType = typeof(forkyData).Forked
static: doAssert ResultType is x static: doAssert ResultType is x
const kind = typeof(forkyData).kind const kind = typeof(forkyData).kind
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
ResultType(kind: kind, electraData: forkyData)
elif kind == LightClientDataFork.Deneb:
ResultType(kind: kind, denebData: forkyData) ResultType(kind: kind, denebData: forkyData)
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
ResultType(kind: kind, capellaData: forkyData) ResultType(kind: kind, capellaData: forkyData)
@ -488,7 +622,9 @@ template forky*(
SomeForkedLightClientObject | SomeForkedLightClientObject |
ForkedLightClientStore, ForkedLightClientStore,
kind: static LightClientDataFork): untyped = kind: static LightClientDataFork): untyped =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
x.electraData
elif kind == LightClientDataFork.Deneb:
x.denebData x.denebData
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
x.capellaData x.capellaData
@ -641,7 +777,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_header_to_deneb( denebData: upgrade_lc_header_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientHeader(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_header_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migrateToDataFork*( func migrateToDataFork*(
@ -676,7 +820,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_bootstrap_to_deneb( denebData: upgrade_lc_bootstrap_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientBootstrap(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_bootstrap_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migrateToDataFork*( func migrateToDataFork*(
@ -711,7 +863,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_update_to_deneb( denebData: upgrade_lc_update_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientUpdate(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_update_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migrateToDataFork*( func migrateToDataFork*(
@ -746,7 +906,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_finality_update_to_deneb( denebData: upgrade_lc_finality_update_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientFinalityUpdate(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_finality_update_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migrateToDataFork*( func migrateToDataFork*(
@ -781,7 +949,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_optimistic_update_to_deneb( denebData: upgrade_lc_optimistic_update_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientOptimisticUpdate(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_optimistic_update_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migrateToDataFork*( func migrateToDataFork*(
@ -816,7 +992,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_store_to_deneb( denebData: upgrade_lc_store_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientStore(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_store_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migratingToDataFork*[ func migratingToDataFork*[
@ -951,6 +1135,108 @@ func toDenebLightClientHeader(
execution_branch: blck.message.body.build_proof( execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_GINDEX).get) capella.EXECUTION_PAYLOAD_GINDEX).get)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/full-node.md#modified-block_to_light_client_header
func toElectraLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock
): electra.LightClientHeader =
# Note that during fork transitions, `finalized_header` may still
# point to earlier forks. While Bellatrix blocks also contain an
# `ExecutionPayload` (minus `withdrawals_root`), it was not included
# in the corresponding light client data. To ensure compatibility
# with legacy data going through `upgrade_lc_header_to_capella`,
# leave out execution data.
electra.LightClientHeader(
beacon: blck.message.toBeaconBlockHeader())
func toElectraLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock
): electra.LightClientHeader =
template payload: untyped = blck.message.body.execution_payload
electra.LightClientHeader(
beacon: blck.message.toBeaconBlockHeader(),
execution: electra.ExecutionPayloadHeader(
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions_root: hash_tree_root(payload.transactions),
withdrawals_root: hash_tree_root(payload.withdrawals)),
execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_GINDEX).get)
func toElectraLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock
): electra.LightClientHeader =
template payload: untyped = blck.message.body.execution_payload
electra.LightClientHeader(
beacon: blck.message.toBeaconBlockHeader(),
execution: electra.ExecutionPayloadHeader(
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions_root: hash_tree_root(payload.transactions),
withdrawals_root: hash_tree_root(payload.withdrawals),
blob_gas_used: payload.blob_gas_used,
excess_blob_gas: payload.excess_blob_gas),
execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_GINDEX).get)
func toElectraLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
electra.SignedBeaconBlock | electra.TrustedSignedBeaconBlock
): electra.LightClientHeader =
template payload: untyped = blck.message.body.execution_payload
electra.LightClientHeader(
beacon: blck.message.toBeaconBlockHeader(),
execution: electra.ExecutionPayloadHeader(
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions_root: hash_tree_root(payload.transactions),
withdrawals_root: hash_tree_root(payload.withdrawals),
blob_gas_used: payload.blob_gas_used,
excess_blob_gas: payload.excess_blob_gas,
deposit_requests_root: hash_tree_root(payload.deposit_requests),
withdrawal_requests_root: hash_tree_root(payload.withdrawal_requests),
consolidation_requests_root:
hash_tree_root(payload.consolidation_requests)),
execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_GINDEX).get)
func toLightClientHeader*( func toLightClientHeader*(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6) blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock | phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
@ -960,9 +1246,8 @@ func toLightClientHeader*(
deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock | deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock |
electra.SignedBeaconBlock | electra.TrustedSignedBeaconBlock, electra.SignedBeaconBlock | electra.TrustedSignedBeaconBlock,
kind: static LightClientDataFork): auto = kind: static LightClientDataFork): auto =
when blck is electra.SignedBeaconBlock or blck is electra.TrustedSignedBeaconBlock: when kind == LightClientDataFork.Electra:
debugComment "toLightClientHeader electra missing" blck.toElectraLightClientHeader()
default(deneb.LightClientHeader)
elif kind == LightClientDataFork.Deneb: elif kind == LightClientDataFork.Deneb:
blck.toDenebLightClientHeader() blck.toDenebLightClientHeader()
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
@ -990,9 +1275,13 @@ func shortLog*[
capellaData: typeof(x.capellaData.shortLog()) capellaData: typeof(x.capellaData.shortLog())
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData: typeof(x.denebData.shortLog()) denebData: typeof(x.denebData.shortLog())
of LightClientDataFork.Electra:
electraData: typeof(x.electraData.shortLog())
let xKind = x.kind # Nim 1.6.12: Using `kind: x.kind` inside case is broken let xKind = x.kind # Nim 1.6.12: Using `kind: x.kind` inside case is broken
case xKind case xKind
of LightClientDataFork.Electra:
ResultType(kind: xKind, electraData: x.electraData.shortLog())
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
ResultType(kind: xKind, denebData: x.denebData.shortLog()) ResultType(kind: xKind, denebData: x.denebData.shortLog())
of LightClientDataFork.Capella: of LightClientDataFork.Capella:

View File

@ -25,7 +25,7 @@ import
export export
eth2_merkleization, forks, rlp, ssz_codec eth2_merkleization, forks, rlp, ssz_codec
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#constants # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#constants
const ETH_TO_GWEI = 1_000_000_000.Gwei const ETH_TO_GWEI = 1_000_000_000.Gwei
func toEther*(gwei: Gwei): Ether = func toEther*(gwei: Gwei): Ether =
@ -159,7 +159,7 @@ func compute_domain*(
result[0..3] = domain_type.data result[0..3] = domain_type.data
result[4..31] = fork_data_root.data.toOpenArray(0, 27) result[4..31] = fork_data_root.data.toOpenArray(0, 27)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#get_domain # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_domain
func get_domain*( func get_domain*(
fork: Fork, fork: Fork,
domain_type: DomainType, domain_type: DomainType,
@ -255,7 +255,7 @@ func create_blob_sidecars*(
res.add(sidecar) res.add(sidecar)
res res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#is_sync_committee_update # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_sync_committee_update
template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool = template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool =
when update is SomeForkyLightClientUpdateWithSyncCommittee: when update is SomeForkyLightClientUpdateWithSyncCommittee:
update.next_sync_committee_branch != update.next_sync_committee_branch !=
@ -271,7 +271,7 @@ template is_finality_update*(update: SomeForkyLightClientUpdate): bool =
else: else:
false false
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
template is_next_sync_committee_known*(store: ForkyLightClientStore): bool = template is_next_sync_committee_known*(store: ForkyLightClientStore): bool =
store.next_sync_committee != store.next_sync_committee !=
static(default(typeof(store.next_sync_committee))) static(default(typeof(store.next_sync_committee)))
@ -384,7 +384,7 @@ func contextEpoch*(bootstrap: ForkyLightClientBootstrap): Epoch =
func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch = func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch =
update.attested_header.beacon.slot.epoch update.attested_header.beacon.slot.epoch
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#is_merge_transition_complete # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
func is_merge_transition_complete*( func is_merge_transition_complete*(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
electra.BeaconState): bool = electra.BeaconState): bool =
@ -392,7 +392,7 @@ func is_merge_transition_complete*(
default(typeof(state.latest_execution_payload_header)) default(typeof(state.latest_execution_payload_header))
state.latest_execution_payload_header != defaultExecutionPayloadHeader state.latest_execution_payload_header != defaultExecutionPayloadHeader
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/sync/optimistic.md#helpers # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#helpers
func is_execution_block*(blck: SomeForkyBeaconBlock): bool = func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
when typeof(blck).kind >= ConsensusFork.Bellatrix: when typeof(blck).kind >= ConsensusFork.Bellatrix:
const defaultExecutionPayload = const defaultExecutionPayload =
@ -401,7 +401,7 @@ func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
else: else:
false false
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#is_merge_transition_block # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_merge_transition_block
func is_merge_transition_block( func is_merge_transition_block(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
electra.BeaconState, electra.BeaconState,
@ -417,7 +417,7 @@ func is_merge_transition_block(
not is_merge_transition_complete(state) and not is_merge_transition_complete(state) and
body.execution_payload != defaultExecutionPayload body.execution_payload != defaultExecutionPayload
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#is_execution_enabled # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_execution_enabled
func is_execution_enabled*( func is_execution_enabled*(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
electra.BeaconState, electra.BeaconState,
@ -431,7 +431,7 @@ func is_execution_enabled*(
electra.SigVerifiedBeaconBlockBody): bool = electra.SigVerifiedBeaconBlockBody): bool =
is_merge_transition_block(state, body) or is_merge_transition_complete(state) is_merge_transition_block(state, body) or is_merge_transition_complete(state)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot
func compute_timestamp_at_slot*(state: ForkyBeaconState, slot: Slot): uint64 = func compute_timestamp_at_slot*(state: ForkyBeaconState, slot: Slot): uint64 =
# Note: This function is unsafe with respect to overflows and underflows. # Note: This function is unsafe with respect to overflows and underflows.
let slots_since_genesis = slot - GENESIS_SLOT let slots_since_genesis = slot - GENESIS_SLOT
@ -513,9 +513,9 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
logsBloom : payload.logs_bloom.data, logsBloom : payload.logs_bloom.data,
difficulty : default(DifficultyInt), difficulty : default(DifficultyInt),
number : payload.block_number, number : payload.block_number,
gasLimit : cast[GasInt](payload.gas_limit), gasLimit : GasInt.saturate(payload.gas_limit),
gasUsed : cast[GasInt](payload.gas_used), gasUsed : GasInt.saturate(payload.gas_used),
timestamp : EthTime(int64.saturate payload.timestamp), timestamp : EthTime(payload.timestamp),
extraData : payload.extra_data.asSeq, extraData : payload.extra_data.asSeq,
mixHash : payload.prev_randao, # EIP-4399 `mixHash` -> `prevRandao` mixHash : payload.prev_randao, # EIP-4399 `mixHash` -> `prevRandao`
nonce : default(BlockNonce), nonce : default(BlockNonce),

View File

@ -1380,13 +1380,13 @@ proc createWallet*(kdfKind: KdfKind,
crypto: crypto, crypto: crypto,
nextAccount: nextAccount.get(0)) nextAccount: nextAccount.get(0))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#bls_withdrawal_prefix # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#bls_withdrawal_prefix
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest = func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
var bytes = eth2digest(k.toRaw()) var bytes = eth2digest(k.toRaw())
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8 bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
bytes bytes
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/deposit-contract.md#withdrawal-credentials # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/deposit-contract.md#withdrawal-credentials
func makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest = func makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest =
makeWithdrawalCredentials(k.toPubKey()) makeWithdrawalCredentials(k.toPubKey())

View File

@ -15,6 +15,21 @@ import
from ../consensus_object_pools/block_pools_types import VerifierError from ../consensus_object_pools/block_pools_types import VerifierError
export block_pools_types.VerifierError export block_pools_types.VerifierError
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/sync-protocol.md#is_valid_normalized_merkle_branch
func is_valid_normalized_merkle_branch[N](
leaf: Eth2Digest,
branch: array[N, Eth2Digest],
gindex: static GeneralizedIndex,
root: Eth2Digest): bool =
const
depth = log2trunc(gindex)
index = get_subtree_index(gindex)
num_extra = branch.len - depth
for i in 0 ..< num_extra:
if not branch[i].isZero:
return false
is_valid_merkle_branch(leaf, branch[num_extra .. ^1], depth, index, root)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#initialize_light_client_store # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
func initialize_light_client_store*( func initialize_light_client_store*(
trusted_block_root: Eth2Digest, trusted_block_root: Eth2Digest,
@ -29,13 +44,15 @@ func initialize_light_client_store*(
if hash_tree_root(bootstrap.header.beacon) != trusted_block_root: if hash_tree_root(bootstrap.header.beacon) != trusted_block_root:
return ResultType.err(VerifierError.Invalid) return ResultType.err(VerifierError.Invalid)
if not is_valid_merkle_branch( withLcDataFork(lcDataForkAtConsensusFork(
hash_tree_root(bootstrap.current_sync_committee), cfg.consensusForkAtEpoch(bootstrap.header.beacon.slot.epoch))):
bootstrap.current_sync_committee_branch, when lcDataFork > LightClientDataFork.None:
log2trunc(altair.CURRENT_SYNC_COMMITTEE_GINDEX), if not is_valid_normalized_merkle_branch(
get_subtree_index(altair.CURRENT_SYNC_COMMITTEE_GINDEX), hash_tree_root(bootstrap.current_sync_committee),
bootstrap.header.beacon.state_root): bootstrap.current_sync_committee_branch,
return ResultType.err(VerifierError.Invalid) lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX,
bootstrap.header.beacon.state_root):
return ResultType.err(VerifierError.Invalid)
return ResultType.ok(typeof(bootstrap).kind.LightClientStore( return ResultType.ok(typeof(bootstrap).kind.LightClientStore(
finalized_header: bootstrap.header, finalized_header: bootstrap.header,
@ -109,13 +126,15 @@ proc validate_light_client_update*(
finalized_root.reset() finalized_root.reset()
else: else:
return err(VerifierError.Invalid) return err(VerifierError.Invalid)
if not is_valid_merkle_branch( withLcDataFork(lcDataForkAtConsensusFork(
finalized_root, cfg.consensusForkAtEpoch(update.attested_header.beacon.slot.epoch))):
update.finality_branch, when lcDataFork > LightClientDataFork.None:
log2trunc(altair.FINALIZED_ROOT_GINDEX), if not is_valid_normalized_merkle_branch(
get_subtree_index(altair.FINALIZED_ROOT_GINDEX), finalized_root,
update.attested_header.beacon.state_root): update.finality_branch,
return err(VerifierError.Invalid) lcDataFork.FINALIZED_ROOT_GINDEX,
update.attested_header.beacon.state_root):
return err(VerifierError.Invalid)
# Verify that the `next_sync_committee`, if present, actually is the # Verify that the `next_sync_committee`, if present, actually is the
# next sync committee saved in the state of the `attested_header` # next sync committee saved in the state of the `attested_header`
@ -128,13 +147,15 @@ proc validate_light_client_update*(
if attested_period == store_period and is_next_sync_committee_known: if attested_period == store_period and is_next_sync_committee_known:
if update.next_sync_committee != store.next_sync_committee: if update.next_sync_committee != store.next_sync_committee:
return err(VerifierError.UnviableFork) return err(VerifierError.UnviableFork)
if not is_valid_merkle_branch( withLcDataFork(lcDataForkAtConsensusFork(
hash_tree_root(update.next_sync_committee), cfg.consensusForkAtEpoch(update.attested_header.beacon.slot.epoch))):
update.next_sync_committee_branch, when lcDataFork > LightClientDataFork.None:
log2trunc(altair.NEXT_SYNC_COMMITTEE_GINDEX), if not is_valid_normalized_merkle_branch(
get_subtree_index(altair.NEXT_SYNC_COMMITTEE_GINDEX), hash_tree_root(update.next_sync_committee),
update.attested_header.beacon.state_root): update.next_sync_committee_branch,
return err(VerifierError.Invalid) lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX,
update.attested_header.beacon.state_root):
return err(VerifierError.Invalid)
# Verify sync committee aggregate signature # Verify sync committee aggregate signature
let sync_committee = let sync_committee =

View File

@ -44,7 +44,6 @@ type
List[SignedBLSToExecutionChange, List[SignedBLSToExecutionChange,
Limit MAX_BLS_TO_EXECUTION_CHANGES] Limit MAX_BLS_TO_EXECUTION_CHANGES]
blob_kzg_commitments*: KzgCommitments # [New in Deneb] blob_kzg_commitments*: KzgCommitments # [New in Deneb]
consolidations*: List[SignedConsolidation, Limit MAX_CONSOLIDATIONS]
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblock # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblock
BlindedBeaconBlock* = object BlindedBeaconBlock* = object
@ -142,11 +141,12 @@ func toSignedBlindedBeaconBlock*(blck: electra.SignedBeaconBlock):
hash_tree_root(blck.message.body.execution_payload.transactions), hash_tree_root(blck.message.body.execution_payload.transactions),
withdrawals_root: withdrawals_root:
hash_tree_root(blck.message.body.execution_payload.withdrawals), hash_tree_root(blck.message.body.execution_payload.withdrawals),
deposit_receipts_root: hash_tree_root( deposit_requests_root: hash_tree_root(
blck.message.body.execution_payload.deposit_receipts), blck.message.body.execution_payload.deposit_requests),
withdrawal_requests_root: withdrawal_requests_root: hash_tree_root(
hash_tree_root( blck.message.body.execution_payload.withdrawal_requests),
blck.message.body.execution_payload.withdrawal_requests)), consolidation_requests_root: hash_tree_root(
blck.message.body.execution_payload.consolidation_requests)),
bls_to_execution_changes: blck.message.body.bls_to_execution_changes, bls_to_execution_changes: blck.message.body.bls_to_execution_changes,
blob_kzg_commitments: blck.message.body.blob_kzg_commitments)), blob_kzg_commitments: blck.message.body.blob_kzg_commitments)),
signature: blck.signature) signature: blck.signature)

View File

@ -14,8 +14,8 @@ import
export base export base
const const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#topics-and-messages # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/p2p-interface.md#topics-and-messages # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages
topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy" topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy"
topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy" topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy"
topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy" topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy"
@ -63,7 +63,7 @@ func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string =
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string = func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/p2p-interface.md#topics-and-messages # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages
func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string = func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string =
eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix
@ -197,7 +197,7 @@ func getTargetGossipState*(
targetForks targetForks
func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] = func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] =
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#sync-committee-subnet-stability # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
if epoch.is_sync_committee_period(): if epoch.is_sync_committee_period():
return Opt.some 0'u64 return Opt.some 0'u64
let epochsBefore = let epochsBefore =
@ -216,7 +216,7 @@ func getSyncSubnets*(
if not nodeHasPubkey(pubkey): if not nodeHasPubkey(pubkey):
continue continue
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#broadcast-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-message
# The first quarter of the pubkeys map to subnet 0, the second quarter to # The first quarter of the pubkeys map to subnet 0, the second quarter to
# subnet 1, the third quarter to subnet 2 and the final quarter to subnet # subnet 1, the third quarter to subnet 2 and the final quarter to subnet
# 3. # 3.

View File

@ -787,7 +787,7 @@ proc readRuntimeConfig*(
"MAX_REQUEST_BLOB_SIDECARS" "MAX_REQUEST_BLOB_SIDECARS"
checkCompatibility BLOB_SIDECAR_SUBNET_COUNT checkCompatibility BLOB_SIDECAR_SUBNET_COUNT
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/fork-choice.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#configuration
# Isn't being used as a preset in the usual way: at any time, there's one correct value # Isn't being used as a preset in the usual way: at any time, there's one correct value
checkCompatibility PROPOSER_SCORE_BOOST checkCompatibility PROPOSER_SCORE_BOOST
checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Gnosis preset - Electra (Gnosis version not avilable yet; EF mainnet for now) # Gnosis preset - Electra (Gnosis version not avilable yet; EF mainnet for now)
# https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/presets/mainnet/electra.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/electra.yaml
const const
# Gwei values # Gwei values
# --------------------------------------------------------------- # ---------------------------------------------------------------
@ -40,12 +40,12 @@ const
# `uint64(2**3)` (= 8) # `uint64(2**3)` (= 8)
MAX_ATTESTATIONS_ELECTRA*: uint64 = 8 MAX_ATTESTATIONS_ELECTRA*: uint64 = 8
# `uint64(2**0)` (= 1) # `uint64(2**0)` (= 1)
MAX_CONSOLIDATIONS*: uint64 = 1 MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 1
# Execution # Execution
# --------------------------------------------------------------- # ---------------------------------------------------------------
# 2**13 (= 8192) receipts # 2**13 (= 8192) deposit requests
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD* = 8192 MAX_DEPOSIT_REQUESTS_PER_PAYLOAD* = 8192
# 2**4 (= 16) withdrawal requests # 2**4 (= 16) withdrawal requests
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 16 MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 16

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Altair # Mainnet preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/altair.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/altair.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Bellatrix # Mainnet preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/bellatrix.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/bellatrix.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Capella # Mainnet preset - Capella
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/capella.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/capella.yaml
const const
# Max operations per block # Max operations per block
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Deneb # Mainnet preset - Deneb
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/deneb.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/deneb.yaml
const const
# `uint64(4096)` # `uint64(4096)`
FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096 FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Electra preset - Electra # Electra preset - Electra
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/presets/mainnet/electra.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/electra.yaml
const const
# Gwei values # Gwei values
# --------------------------------------------------------------- # ---------------------------------------------------------------
@ -40,12 +40,12 @@ const
# `uint64(2**3)` (= 8) # `uint64(2**3)` (= 8)
MAX_ATTESTATIONS_ELECTRA*: uint64 = 8 MAX_ATTESTATIONS_ELECTRA*: uint64 = 8
# `uint64(2**0)` (= 1) # `uint64(2**0)` (= 1)
MAX_CONSOLIDATIONS*: uint64 = 1 MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 1
# Execution # Execution
# --------------------------------------------------------------- # ---------------------------------------------------------------
# 2**13 (= 8192) receipts # 2**13 (= 8192) deposit requests
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD* = 8192 MAX_DEPOSIT_REQUESTS_PER_PAYLOAD* = 8192
# 2**4 (= 16) withdrawal requests # 2**4 (= 16) withdrawal requests
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 16 MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 16

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Altair # Minimal preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/minimal/altair.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/altair.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Bellatrix # Minimal preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/minimal/bellatrix.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/bellatrix.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Capella # Minimal preset - Capella
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/minimal/capella.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/capella.yaml
const const
# Max operations per block # Max operations per block
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Deneb # Minimal preset - Deneb
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/minimal/deneb.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/deneb.yaml
const const
# `uint64(4096)` # `uint64(4096)`
FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096 FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096

View File

@ -40,12 +40,12 @@ const
# `uint64(2**3)` (= 8) # `uint64(2**3)` (= 8)
MAX_ATTESTATIONS_ELECTRA*: uint64 = 8 MAX_ATTESTATIONS_ELECTRA*: uint64 = 8
# `uint64(2**0)` (= 1) # `uint64(2**0)` (= 1)
MAX_CONSOLIDATIONS*: uint64 = 1 MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 1
# Execution # Execution
# --------------------------------------------------------------- # ---------------------------------------------------------------
# [customized] # [customized]
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD* = 4 MAX_DEPOSIT_REQUESTS_PER_PAYLOAD* = 4
# [customized] 2**1 (= 2) withdrawal requests # [customized] 2**1 (= 2) withdrawal requests
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 2 MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 2

View File

@ -269,7 +269,7 @@ proc verify_voluntary_exit_signature*(
blsVerify(pubkey, signing_root.data, signature) blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#prepare-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
func compute_sync_committee_message_signing_root*( func compute_sync_committee_message_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest = slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest =
@ -304,7 +304,7 @@ proc verify_sync_committee_signature*(
blsFastAggregateVerify(pubkeys, signing_root.data, signature) blsFastAggregateVerify(pubkeys, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#aggregation-selection # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection
func compute_sync_committee_selection_proof_signing_root*( func compute_sync_committee_selection_proof_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest = slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest =
@ -335,7 +335,7 @@ proc verify_sync_committee_selection_proof*(
blsVerify(pubkey, signing_root.data, signature) blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#signature # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#signature
func compute_contribution_and_proof_signing_root*( func compute_contribution_and_proof_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
msg: ContributionAndProof): Eth2Digest = msg: ContributionAndProof): Eth2Digest =
@ -353,7 +353,7 @@ proc get_contribution_and_proof_signature*(
blsSign(privkey, signing_root.data) blsSign(privkey, signing_root.data)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#aggregation-selection # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection
func is_sync_committee_aggregator*(signature: ValidatorSig): bool = func is_sync_committee_aggregator*(signature: ValidatorSig): bool =
let let
signatureDigest = eth2digest(signature.blob) signatureDigest = eth2digest(signature.blob)
@ -393,7 +393,7 @@ proc verify_builder_signature*(
let signing_root = compute_builder_signing_root(fork, msg) let signing_root = compute_builder_signing_root(fork, msg)
blsVerify(pubkey, signing_root.data, signature) blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#new-process_bls_to_execution_change # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
func compute_bls_to_execution_change_signing_root*( func compute_bls_to_execution_change_signing_root*(
genesisFork: Fork, genesis_validators_root: Eth2Digest, genesisFork: Fork, genesis_validators_root: Eth2Digest,
msg: BLSToExecutionChange): Eth2Digest = msg: BLSToExecutionChange): Eth2Digest =
@ -421,23 +421,3 @@ proc verify_bls_to_execution_change_signature*(
let signing_root = compute_bls_to_execution_change_signing_root( let signing_root = compute_bls_to_execution_change_signing_root(
genesisFork, genesis_validators_root, msg.message) genesisFork, genesis_validators_root, msg.message)
blsVerify(pubkey, signing_root.data, signature) blsVerify(pubkey, signing_root.data, signature)
func compute_consolidation_signing_root(
genesisFork: Fork, genesis_validators_root: Eth2Digest,
msg: Consolidation): Eth2Digest =
# Uses genesis fork version regardless
doAssert genesisFork.current_version == genesisFork.previous_version
let domain = compute_domain(
DOMAIN_CONSOLIDATION, genesisFork.current_version,
genesis_validators_root=genesis_validators_root)
compute_signing_root(msg, domain)
proc verify_consolidation_signature*(
genesisFork: Fork, genesis_validators_root: Eth2Digest,
msg: SignedConsolidation | TrustedSignedConsolidation,
pubkeys: openArray[ValidatorPubKey]): bool =
withTrust(msg.signature):
let signing_root = compute_consolidation_signing_root(
genesisFork, genesis_validators_root, msg.message)
blsFastAggregateVerify(pubkeys, signing_root.data, msg.signature)

View File

@ -83,7 +83,7 @@ func aggregateAttesters(
# Aggregation spec requires non-empty collection # Aggregation spec requires non-empty collection
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04 # - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
# Consensus specs require at least one attesting index in attestation # Consensus specs require at least one attesting index in attestation
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_valid_indexed_attestation # - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
return err("aggregateAttesters: no attesting indices") return err("aggregateAttesters: no attesting indices")
let let
@ -462,10 +462,6 @@ proc collectSignatureSets*(
genesis_fork, genesis_validators_root, bls_change.message, genesis_fork, genesis_validators_root, bls_change.message,
validator_pubkey, sig) validator_pubkey, sig)
block:
# 9. Consolidations
debugComment "check consolidations signatures"
ok() ok()
proc batchVerify*(verifier: var BatchVerifier, sigs: openArray[SignatureSet]): bool = proc batchVerify*(verifier: var BatchVerifier, sigs: openArray[SignatureSet]): bool =

View File

@ -361,12 +361,11 @@ func partialBeaconBlock*(
deposits: seq[Deposit], deposits: seq[Deposit],
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
execution_payload: ForkyExecutionPayloadForSigning, execution_payload: ForkyExecutionPayloadForSigning
consolidations: openArray[SignedConsolidation]
): auto = ): auto =
const consensusFork = typeof(state).kind const consensusFork = typeof(state).kind
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#preparing-for-a-beaconblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#preparing-for-a-beaconblock
var res = consensusFork.BeaconBlock( var res = consensusFork.BeaconBlock(
slot: state.data.slot, slot: state.data.slot,
proposer_index: proposer_index.uint64, proposer_index: proposer_index.uint64,
@ -386,7 +385,7 @@ func partialBeaconBlock*(
when consensusFork >= ConsensusFork.Altair: when consensusFork >= ConsensusFork.Altair:
res.body.sync_aggregate = sync_aggregate res.body.sync_aggregate = sync_aggregate
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/validator.md#block-proposal # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/validator.md#block-proposal
when consensusFork >= ConsensusFork.Bellatrix: when consensusFork >= ConsensusFork.Bellatrix:
res.body.execution_payload = execution_payload.executionPayload res.body.execution_payload = execution_payload.executionPayload
@ -412,8 +411,7 @@ func partialBeaconBlock*(
deposits: seq[Deposit], deposits: seq[Deposit],
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
execution_payload: ForkyExecutionPayloadForSigning, execution_payload: ForkyExecutionPayloadForSigning
consolidations: seq[SignedConsolidation],
): auto = ): auto =
const consensusFork = typeof(state).kind const consensusFork = typeof(state).kind
@ -436,10 +434,7 @@ func partialBeaconBlock*(
sync_aggregate: sync_aggregate, sync_aggregate: sync_aggregate,
execution_payload: execution_payload.executionPayload, execution_payload: execution_payload.executionPayload,
bls_to_execution_changes: validator_changes.bls_to_execution_changes, bls_to_execution_changes: validator_changes.bls_to_execution_changes,
blob_kzg_commitments: execution_payload.blobsBundle.commitments, blob_kzg_commitments: execution_payload.blobsBundle.commitments))
consolidations:
List[SignedConsolidation, Limit MAX_CONSOLIDATIONS].init(
consolidations)))
proc makeBeaconBlockWithRewards*( proc makeBeaconBlockWithRewards*(
cfg: RuntimeConfig, cfg: RuntimeConfig,
@ -453,7 +448,6 @@ proc makeBeaconBlockWithRewards*(
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
executionPayload: ForkyExecutionPayloadForSigning, executionPayload: ForkyExecutionPayloadForSigning,
consolidations: seq[SignedConsolidation],
rollback: RollbackForkedHashedProc, rollback: RollbackForkedHashedProc,
cache: var StateCache, cache: var StateCache,
# TODO: # TODO:
@ -480,7 +474,7 @@ proc makeBeaconBlockWithRewards*(
partialBeaconBlock( partialBeaconBlock(
cfg, state.`kind Data`, proposer_index, randao_reveal, eth1_data, cfg, state.`kind Data`, proposer_index, randao_reveal, eth1_data,
graffiti, attestations, deposits, validator_changes, sync_aggregate, graffiti, attestations, deposits, validator_changes, sync_aggregate,
executionPayload, consolidations)) executionPayload))
let res = process_block( let res = process_block(
cfg, state.`kind Data`.data, blck.`kind Data`.asSigVerified(), cfg, state.`kind Data`.data, blck.`kind Data`.asSigVerified(),
@ -524,7 +518,7 @@ proc makeBeaconBlockWithRewards*(
transactions_root.get transactions_root.get
when executionPayload is electra.ExecutionPayloadForSigning: when executionPayload is electra.ExecutionPayloadForSigning:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/electra/beacon-chain.md#beaconblockbody # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#beaconblockbody
forkyState.data.latest_block_header.body_root = hash_tree_root( forkyState.data.latest_block_header.body_root = hash_tree_root(
[hash_tree_root(randao_reveal), [hash_tree_root(randao_reveal),
hash_tree_root(eth1_data), hash_tree_root(eth1_data),
@ -539,9 +533,7 @@ proc makeBeaconBlockWithRewards*(
hash_tree_root(sync_aggregate), hash_tree_root(sync_aggregate),
execution_payload_root.get, execution_payload_root.get,
hash_tree_root(validator_changes.bls_to_execution_changes), hash_tree_root(validator_changes.bls_to_execution_changes),
hash_tree_root(kzg_commitments.get), hash_tree_root(kzg_commitments.get)
hash_tree_root(List[SignedConsolidation, Limit MAX_CONSOLIDATIONS].init(
consolidations))
]) ])
else: else:
raiseAssert "Attempt to use non-Electra payload with post-Deneb state" raiseAssert "Attempt to use non-Electra payload with post-Deneb state"
@ -584,7 +576,6 @@ proc makeBeaconBlock*(
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
executionPayload: ForkyExecutionPayloadForSigning, executionPayload: ForkyExecutionPayloadForSigning,
consolidations: seq[SignedConsolidation],
rollback: RollbackForkedHashedProc, cache: var StateCache, rollback: RollbackForkedHashedProc, cache: var StateCache,
verificationFlags: UpdateFlags, verificationFlags: UpdateFlags,
transactions_root: Opt[Eth2Digest], transactions_root: Opt[Eth2Digest],
@ -595,7 +586,7 @@ proc makeBeaconBlock*(
? makeBeaconBlockWithRewards( ? makeBeaconBlockWithRewards(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, validator_changes, sync_aggregate, attestations, deposits, validator_changes, sync_aggregate,
executionPayload, consolidations, rollback, cache, verificationFlags, executionPayload, rollback, cache, verificationFlags,
transactions_root, execution_payload_root, kzg_commitments) transactions_root, execution_payload_root, kzg_commitments)
ok(blockAndRewards.blck) ok(blockAndRewards.blck)
@ -608,13 +599,12 @@ proc makeBeaconBlock*(
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
executionPayload: ForkyExecutionPayloadForSigning, executionPayload: ForkyExecutionPayloadForSigning,
consolidations: seq[SignedConsolidation],
rollback: RollbackForkedHashedProc, cache: var StateCache): rollback: RollbackForkedHashedProc, cache: var StateCache):
Result[ForkedBeaconBlock, cstring] = Result[ForkedBeaconBlock, cstring] =
makeBeaconBlock( makeBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, validator_changes, sync_aggregate, attestations, deposits, validator_changes, sync_aggregate,
executionPayload, consolidations, rollback, cache, executionPayload, rollback, cache,
verificationFlags = {}, transactions_root = Opt.none Eth2Digest, verificationFlags = {}, transactions_root = Opt.none Eth2Digest,
execution_payload_root = Opt.none Eth2Digest, execution_payload_root = Opt.none Eth2Digest,
kzg_commitments = Opt.none KzgCommitments) kzg_commitments = Opt.none KzgCommitments)
@ -628,14 +618,13 @@ proc makeBeaconBlock*(
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
executionPayload: ForkyExecutionPayloadForSigning, executionPayload: ForkyExecutionPayloadForSigning,
consolidations: seq[SignedConsolidation],
rollback: RollbackForkedHashedProc, rollback: RollbackForkedHashedProc,
cache: var StateCache, verificationFlags: UpdateFlags): cache: var StateCache, verificationFlags: UpdateFlags):
Result[ForkedBeaconBlock, cstring] = Result[ForkedBeaconBlock, cstring] =
makeBeaconBlock( makeBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, validator_changes, sync_aggregate, attestations, deposits, validator_changes, sync_aggregate,
executionPayload, consolidations, rollback, cache, executionPayload, rollback, cache,
verificationFlags = verificationFlags, verificationFlags = verificationFlags,
transactions_root = Opt.none Eth2Digest, transactions_root = Opt.none Eth2Digest,
execution_payload_root = Opt.none Eth2Digest, execution_payload_root = Opt.none Eth2Digest,

View File

@ -10,8 +10,8 @@
# State transition - block processing, as described in # State transition - block processing, as described in
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#block-processing
# #
@ -135,7 +135,7 @@ func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
(validator.activation_epoch <= epoch) and (validator.activation_epoch <= epoch) and
(epoch < validator.withdrawable_epoch) (epoch < validator.withdrawable_epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#proposer-slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#proposer-slashings
proc check_proposer_slashing*( proc check_proposer_slashing*(
state: ForkyBeaconState, proposer_slashing: SomeProposerSlashing, state: ForkyBeaconState, proposer_slashing: SomeProposerSlashing,
flags: UpdateFlags): flags: UpdateFlags):
@ -397,22 +397,22 @@ proc process_deposit*(
apply_deposit(cfg, state, bloom_filter, deposit.data, flags) apply_deposit(cfg, state, bloom_filter, deposit.data, flags)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-process_deposit_receipt # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_deposit_request
func process_deposit_receipt*( func process_deposit_request*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
bloom_filter: var PubkeyBloomFilter, deposit_receipt: DepositReceipt, bloom_filter: var PubkeyBloomFilter, deposit_request: DepositRequest,
flags: UpdateFlags): Result[void, cstring] = flags: UpdateFlags): Result[void, cstring] =
# Set deposit receipt start index # Set deposit request start index
if state.deposit_receipts_start_index == if state.deposit_requests_start_index ==
UNSET_DEPOSIT_RECEIPTS_START_INDEX: UNSET_DEPOSIT_REQUESTS_START_INDEX:
state.deposit_receipts_start_index = deposit_receipt.index state.deposit_requests_start_index = deposit_request.index
apply_deposit( apply_deposit(
cfg, state, bloom_filter, DepositData( cfg, state, bloom_filter, DepositData(
pubkey: deposit_receipt.pubkey, pubkey: deposit_request.pubkey,
withdrawal_credentials: deposit_receipt.withdrawal_credentials, withdrawal_credentials: deposit_request.withdrawal_credentials,
amount: deposit_receipt.amount, amount: deposit_request.amount,
signature: deposit_receipt.signature), flags) signature: deposit_request.signature), flags)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#voluntary-exits # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#modified-process_voluntary_exit # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#modified-process_voluntary_exit
@ -507,13 +507,12 @@ proc process_bls_to_execution_change*(
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/electra/beacon-chain.md#new-process_execution_layer_withdrawal_request # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_withdrawal_request
func process_execution_layer_withdrawal_request*( func process_withdrawal_request*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
execution_layer_withdrawal_request: ExecutionLayerWithdrawalRequest, withdrawal_request: WithdrawalRequest, cache: var StateCache) =
cache: var StateCache) =
let let
amount = execution_layer_withdrawal_request.amount amount = withdrawal_request.amount
is_full_exit_request = amount == static(FULL_EXIT_REQUEST_AMOUNT.Gwei) is_full_exit_request = amount == static(FULL_EXIT_REQUEST_AMOUNT.Gwei)
# If partial withdrawal queue is full, only full exits are processed # If partial withdrawal queue is full, only full exits are processed
@ -522,7 +521,8 @@ func process_execution_layer_withdrawal_request*(
return return
let let
request_pubkey = execution_layer_withdrawal_request.validator_pubkey request_pubkey = withdrawal_request.validator_pubkey
# Verify pubkey exists
index = findValidatorIndex(state, request_pubkey).valueOr: index = findValidatorIndex(state, request_pubkey).valueOr:
return return
validator = state.validators.item(index) validator = state.validators.item(index)
@ -532,7 +532,7 @@ func process_execution_layer_withdrawal_request*(
has_correct_credential = has_execution_withdrawal_credential(validator) has_correct_credential = has_execution_withdrawal_credential(validator)
is_correct_source_address = is_correct_source_address =
validator.withdrawal_credentials.data.toOpenArray(12, 31) == validator.withdrawal_credentials.data.toOpenArray(12, 31) ==
execution_layer_withdrawal_request.source_address.data withdrawal_request.source_address.data
if not (has_correct_credential and is_correct_source_address): if not (has_correct_credential and is_correct_source_address):
return return
@ -588,67 +588,66 @@ func process_execution_layer_withdrawal_request*(
withdrawable_epoch: withdrawable_epoch, withdrawable_epoch: withdrawable_epoch,
)) ))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#consolidations # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_consolidation_request
proc process_consolidation*( proc process_consolidation_request*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
signed_consolidation: SignedConsolidation | TrustedSignedConsolidation, consolidation_request: ConsolidationRequest,
cache: var StateCache): Result[void, cstring] = cache: var StateCache) =
# If the pending consolidations queue is full, no consolidations are allowed # If the pending consolidations queue is full, consolidation requests are
# in the block # ignored
if not(lenu64(state.pending_consolidations) < PENDING_CONSOLIDATIONS_LIMIT): if not(lenu64(state.pending_consolidations) < PENDING_CONSOLIDATIONS_LIMIT):
return err("Consolidation: too many pending consolidations already") return
# If there is too little available consolidation churn limit, no # If there is too little available consolidation churn limit, consolidation
# consolidations are allowed in the block # requests are ignored
if not (get_consolidation_churn_limit(cfg, state, cache) > if not (get_consolidation_churn_limit(cfg, state, cache) >
static(MIN_ACTIVATION_BALANCE.Gwei)): static(MIN_ACTIVATION_BALANCE.Gwei)):
return err("Consolidation: insufficient available consolidation churn limit") return
let consolidation = signed_consolidation.message
# Verify that source != target, so a consolidation cannot be used as an exit.
if not(consolidation.source_index != consolidation.target_index):
return err("Consolidation: a consolidation cannot be used as an exit")
let let
source_validator = addr state.validators.mitem(consolidation.source_index) # Verify pubkeys exists
target_validator = state.validators.item(consolidation.target_index) source_index =
findValidatorIndex(state, consolidation_request.source_pubkey).valueOr:
return
target_index =
findValidatorIndex(state, consolidation_request.target_pubkey).valueOr:
return
# Verify that source != target, so a consolidation cannot be used as an exit.
if source_index == target_index:
return
let
source_validator = addr state.validators.mitem(source_index)
target_validator = state.validators.item(target_index)
# Verify source withdrawal credentials
let
has_correct_credential =
has_execution_withdrawal_credential(source_validator[])
is_correct_source_address =
source_validator.withdrawal_credentials.data.toOpenArray(12, 31) ==
consolidation_request.source_address.data
if not (has_correct_credential and is_correct_source_address):
return
# Verify that target has execution withdrawal credentials
if not has_execution_withdrawal_credential(target_validator):
return
# Verify the source and the target are active # Verify the source and the target are active
let current_epoch = get_current_epoch(state) let current_epoch = get_current_epoch(state)
if not is_active_validator(source_validator[], current_epoch): if not is_active_validator(source_validator[], current_epoch):
return err("Consolidation: source validator not active") return
if not is_active_validator(target_validator, current_epoch): if not is_active_validator(target_validator, current_epoch):
return err("Consolidation: target validator not active") return
# Verify exits for source and target have not been initiated # Verify exits for source and target have not been initiated
if not (source_validator[].exit_epoch == FAR_FUTURE_EPOCH): if source_validator[].exit_epoch != FAR_FUTURE_EPOCH:
return err("Consolidation: exit for source validator already initiated") return
if not (target_validator.exit_epoch == FAR_FUTURE_EPOCH): if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
return err("Consolidation: exit for target validator already initiated") return
# Consolidations must specify an epoch when they become valid; they are not
# valid before then
if not (current_epoch >= consolidation.epoch):
return err("Consolidation: consolidation not valid before specified epoch")
# Verify the source and the target have Execution layer withdrawal credentials
if not has_execution_withdrawal_credential(source_validator[]):
return err("Consolidation: source doesn't have execution layer withdrawal credentials")
if not has_execution_withdrawal_credential(target_validator):
return err("Consolidation: target doesn't have execution layer withdrawal credentials")
# Verify the same withdrawal address
if not (source_validator[].withdrawal_credentials.data.toOpenArray(12, 31) ==
target_validator.withdrawal_credentials.data.toOpenArray(12, 31)):
return err("Consolidation: source and target don't have same withdrawal address")
# Verify consolidation is signed by the source and the target
if not verify_consolidation_signature(
cfg.genesisFork, state.genesis_validators_root, signed_consolidation,
[source_validator[].pubkey, target_validator.pubkey]):
return err("Consolidation: invalid signature")
# Initiate source validator exit and append pending consolidation # Initiate source validator exit and append pending consolidation
source_validator[].exit_epoch = compute_consolidation_epoch_and_update_churn( source_validator[].exit_epoch = compute_consolidation_epoch_and_update_churn(
@ -657,10 +656,7 @@ proc process_consolidation*(
source_validator[].exit_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY source_validator[].exit_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY
debugComment "check HashList add return value" debugComment "check HashList add return value"
discard state.pending_consolidations.add(PendingConsolidation( discard state.pending_consolidations.add(PendingConsolidation(
source_index: consolidation.source_index, source_index: source_index.uint64, target_index: target_index.uint64))
target_index: consolidation.target_index))
ok()
type type
# https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.5.0#/Rewards/getBlockRewards # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.5.0#/Rewards/getBlockRewards
@ -672,7 +668,7 @@ type
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#operations # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#operations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#modified-process_operations # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#modified-process_operations
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#modified-process_operations # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#operations
proc process_operations( proc process_operations(
cfg: RuntimeConfig, state: var ForkyBeaconState, cfg: RuntimeConfig, state: var ForkyBeaconState,
body: SomeForkyBeaconBlockBody, base_reward_per_increment: Gwei, body: SomeForkyBeaconBlockBody, base_reward_per_increment: Gwei,
@ -683,7 +679,7 @@ proc process_operations(
# Disable former deposit mechanism once all prior deposits are processed # Disable former deposit mechanism once all prior deposits are processed
let let
eth1_deposit_index_limit = eth1_deposit_index_limit =
min(state.eth1_data.deposit_count, state.deposit_receipts_start_index) min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
req_deposits = req_deposits =
if state.eth1_deposit_index < eth1_deposit_index_limit: if state.eth1_deposit_index < eth1_deposit_index_limit:
min( min(
@ -733,17 +729,17 @@ proc process_operations(
for op in body.bls_to_execution_changes: for op in body.bls_to_execution_changes:
? process_bls_to_execution_change(cfg, state, op) ? process_bls_to_execution_change(cfg, state, op)
# [New in Electra:EIP7002:EIP7251]
when typeof(body).kind >= ConsensusFork.Electra: when typeof(body).kind >= ConsensusFork.Electra:
for op in body.execution_payload.withdrawal_requests: for op in body.execution_payload.deposit_requests:
process_execution_layer_withdrawal_request( debugComment "combine with previous Bloom filter construction"
cfg, state, op, cache)
for op in body.execution_payload.deposit_receipts:
debugComment "combine with previous bloom filter construction"
let bloom_filter = constructBloomFilter(state.validators.asSeq) let bloom_filter = constructBloomFilter(state.validators.asSeq)
? process_deposit_receipt(cfg, state, bloom_filter[], op, {}) ? process_deposit_request(cfg, state, bloom_filter[], op, {})
for op in body.consolidations: for op in body.execution_payload.withdrawal_requests:
? process_consolidation(cfg, state, op, cache) # [New in Electra:EIP7002:7251]
process_withdrawal_request(cfg, state, op, cache)
for op in body.execution_payload.consolidation_requests:
# [New in Electra:EIP7251]
process_consolidation_request(cfg, state, op, cache)
ok(operations_rewards) ok(operations_rewards)
@ -971,7 +967,7 @@ type SomeElectraBeaconBlockBody =
electra.BeaconBlockBody | electra.SigVerifiedBeaconBlockBody | electra.BeaconBlockBody | electra.SigVerifiedBeaconBlockBody |
electra.TrustedBeaconBlockBody electra.TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#modified-process_execution_payload # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-process_execution_payload
proc process_execution_payload*( proc process_execution_payload*(
state: var electra.BeaconState, body: SomeElectraBeaconBlockBody, state: var electra.BeaconState, body: SomeElectraBeaconBlockBody,
notify_new_payload: electra.ExecutePayload): Result[void, cstring] = notify_new_payload: electra.ExecutePayload): Result[void, cstring] =
@ -1018,20 +1014,33 @@ proc process_execution_payload*(
withdrawals_root: hash_tree_root(payload.withdrawals), withdrawals_root: hash_tree_root(payload.withdrawals),
blob_gas_used: payload.blob_gas_used, blob_gas_used: payload.blob_gas_used,
excess_blob_gas: payload.excess_blob_gas, excess_blob_gas: payload.excess_blob_gas,
deposit_receipts_root: deposit_requests_root:
hash_tree_root(payload.deposit_receipts), # [New in Electra:EIP6110] hash_tree_root(payload.deposit_requests), # [New in Electra:EIP6110]
withdrawal_requests_root: withdrawal_requests_root:
hash_tree_root(payload.withdrawal_requests)) # [New in Electra:EIP7002:EIP7251] hash_tree_root(payload.withdrawal_requests), # [New in Electra:EIP7002:EIP7251]
consolidation_requests_root:
hash_tree_root(payload.consolidation_requests)) # [New in Electra:EIP7251]
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#new-process_withdrawals # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#new-process_withdrawals
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-process_withdrawals
func process_withdrawals*( func process_withdrawals*(
state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState), state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState),
payload: capella.ExecutionPayload | deneb.ExecutionPayload | payload: capella.ExecutionPayload | deneb.ExecutionPayload |
electra.ExecutionPayload): electra.ExecutionPayload):
Result[void, cstring] = Result[void, cstring] =
let expected_withdrawals = get_expected_withdrawals(state) when typeof(state).kind >= ConsensusFork.Electra:
let (expected_withdrawals, partial_withdrawals_count) =
get_expected_withdrawals_with_partial_count(state)
# Update pending partial withdrawals [New in Electra:EIP7251]
# Moved slightly earlier to be in same when block
state.pending_partial_withdrawals =
HashList[PendingPartialWithdrawal, Limit PENDING_PARTIAL_WITHDRAWALS_LIMIT].init(
state.pending_partial_withdrawals.asSeq[partial_withdrawals_count .. ^1])
else:
let expected_withdrawals = get_expected_withdrawals(state)
if not (len(payload.withdrawals) == len(expected_withdrawals)): if not (len(payload.withdrawals) == len(expected_withdrawals)):
return err("process_withdrawals: different numbers of payload and expected withdrawals") return err("process_withdrawals: different numbers of payload and expected withdrawals")
@ -1137,7 +1146,7 @@ proc process_block*(
ok(operations_rewards) ok(operations_rewards)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095 # TODO workaround for https://github.com/nim-lang/Nim/issues/18095
type SomeBellatrixBlock = type SomeBellatrixBlock =
bellatrix.BeaconBlock | bellatrix.SigVerifiedBeaconBlock | bellatrix.TrustedBeaconBlock bellatrix.BeaconBlock | bellatrix.SigVerifiedBeaconBlock | bellatrix.TrustedBeaconBlock

View File

@ -10,7 +10,7 @@
# State transition - epoch processing, as described in # State transition - epoch processing, as described in
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#epoch-processing
# #
# The entry point is `process_epoch`, which is at the bottom of this file. # The entry point is `process_epoch`, which is at the bottom of this file.
@ -535,7 +535,7 @@ func get_attestation_component_delta(
else: else:
RewardDelta(penalties: base_reward) RewardDelta(penalties: base_reward)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#components-of-attestation-deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#components-of-attestation-deltas
func get_source_delta*( func get_source_delta*(
validator: RewardStatus, validator: RewardStatus,
base_reward: Gwei, base_reward: Gwei,
@ -694,14 +694,14 @@ func get_unslashed_participating_increment*(
flag_index: TimelyFlag): uint64 = flag_index: TimelyFlag): uint64 =
info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT.Gwei info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#get_flag_index_deltas
func get_active_increments*( func get_active_increments*(
info: altair.EpochInfo | bellatrix.BeaconState): uint64 = info: altair.EpochInfo | bellatrix.BeaconState): uint64 =
info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT.Gwei info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#modified-get_inactivity_penalty_deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-get_inactivity_penalty_deltas
# Combines get_flag_index_deltas() and get_inactivity_penalty_deltas() # Combines get_flag_index_deltas() and get_inactivity_penalty_deltas()
template get_flag_and_inactivity_delta( template get_flag_and_inactivity_delta(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
@ -961,7 +961,7 @@ func process_registry_updates*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
func get_adjusted_total_slashing_balance*( func get_adjusted_total_slashing_balance*(
state: ForkyBeaconState, total_balance: Gwei): Gwei = state: ForkyBeaconState, total_balance: Gwei): Gwei =
const multiplier = const multiplier =
@ -980,14 +980,14 @@ func get_adjusted_total_slashing_balance*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool = func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool =
validator.slashed and validator.slashed and
epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
func get_slashing_penalty*(validator: Validator, func get_slashing_penalty*(validator: Validator,
adjusted_total_slashing_balance, adjusted_total_slashing_balance,
total_balance: Gwei): Gwei = total_balance: Gwei): Gwei =
@ -999,7 +999,7 @@ func get_slashing_penalty*(validator: Validator,
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) = func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) =
let let
epoch = get_current_epoch(state) epoch = get_current_epoch(state)
@ -1113,7 +1113,7 @@ func process_historical_roots_update*(state: var ForkyBeaconState) =
if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0: if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0:
# Equivalent to hash_tree_root(foo: HistoricalBatch), but without using # Equivalent to hash_tree_root(foo: HistoricalBatch), but without using
# significant additional stack or heap. # significant additional stack or heap.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#historicalbatch # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#historicalbatch
# In response to https://github.com/status-im/nimbus-eth2/issues/921 # In response to https://github.com/status-im/nimbus-eth2/issues/921
if not state.historical_roots.add state.compute_historical_root(): if not state.historical_roots.add state.compute_historical_root():
raiseAssert "no more room for historical roots, so long and thanks for the fish!" raiseAssert "no more room for historical roots, so long and thanks for the fish!"
@ -1202,7 +1202,7 @@ func process_inactivity_updates*(
if pre_inactivity_score != inactivity_score: if pre_inactivity_score != inactivity_score:
state.inactivity_scores[index] = inactivity_score state.inactivity_scores[index] = inactivity_score
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#historical-summaries-updates # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#historical-summaries-updates
func process_historical_summaries_update*( func process_historical_summaries_update*(
state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState)): state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState)):
Result[void, cstring] = Result[void, cstring] =
@ -1218,25 +1218,45 @@ func process_historical_summaries_update*(
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-process_pending_balance_deposits # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_pending_balance_deposits
func process_pending_balance_deposits*( func process_pending_balance_deposits*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
cache: var StateCache): Result[void, cstring] = cache: var StateCache): Result[void, cstring] =
let let available_for_processing = state.deposit_balance_to_consume +
available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(cfg, state, cache)
get_activation_exit_churn_limit(cfg, state, cache)
var var
processed_amount = 0.Gwei processed_amount = 0.Gwei
next_deposit_index = 0.Gwei next_deposit_index = 0
deposits_to_postpone: seq[PendingBalanceDeposit]
for deposit in state.pending_balance_deposits: for deposit in state.pending_balance_deposits:
if processed_amount + deposit.amount > available_for_processing: let validator = state.validators.item(deposit.index)
break
let deposit_validator_index = ValidatorIndex.init(deposit.index).valueOr: let deposit_validator_index = ValidatorIndex.init(deposit.index).valueOr:
# TODO this function in spec doesn't really have error returns as such
return err("process_pending_balance_deposits: deposit index out of range") return err("process_pending_balance_deposits: deposit index out of range")
increase_balance(state, deposit_validator_index, deposit.amount)
processed_amount += deposit.amount # Validator is exiting, postpone the deposit until after withdrawable epoch
inc next_deposit_index if validator.exit_epoch < FAR_FUTURE_EPOCH:
if get_current_epoch(state) <= validator.withdrawable_epoch:
deposits_to_postpone.add(deposit)
# Deposited balance will never become active. Increase balance but do not
# consume churn
else:
increase_balance(state, deposit_validator_index, deposit.amount)
# Validator is not exiting, attempt to process deposit
else:
# Deposit does not fit in the churn, no more deposit processing in this
# epoch.
if processed_amount + deposit.amount > available_for_processing:
break
# Deposit fits in the churn, process it. Increase balance and consume churn.
else:
increase_balance(state, deposit_validator_index, deposit.amount)
processed_amount += deposit.amount
# Regardless of how the deposit was handled, we move on in the queue.
next_deposit_index += 1
state.pending_balance_deposits = state.pending_balance_deposits =
HashList[PendingBalanceDeposit, Limit PENDING_BALANCE_DEPOSITS_LIMIT].init( HashList[PendingBalanceDeposit, Limit PENDING_BALANCE_DEPOSITS_LIMIT].init(
@ -1248,6 +1268,10 @@ func process_pending_balance_deposits*(
state.deposit_balance_to_consume = state.deposit_balance_to_consume =
available_for_processing - processed_amount available_for_processing - processed_amount
debugComment "yet another in-theory-might-overflow-maybe things, look at these more carefully"
if len(deposits_to_postpone) > 0:
discard state.pending_balance_deposits.add deposits_to_postpone
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-process_pending_consolidations # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-process_pending_consolidations
@ -1345,7 +1369,7 @@ func init*(
deneb.BeaconState | electra.BeaconState): T = deneb.BeaconState | electra.BeaconState): T =
init(result, state) init(result, state)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#epoch-processing
proc process_epoch*( proc process_epoch*(
cfg: RuntimeConfig, cfg: RuntimeConfig,
state: var (altair.BeaconState | bellatrix.BeaconState), state: var (altair.BeaconState | bellatrix.BeaconState),

View File

@ -158,7 +158,7 @@ func get_shuffled_active_validator_indices*(
withState(state): withState(state):
cache.get_shuffled_active_validator_indices(forkyState.data, epoch) cache.get_shuffled_active_validator_indices(forkyState.data, epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#get_active_validator_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_active_validator_indices
func count_active_validators*(state: ForkyBeaconState, func count_active_validators*(state: ForkyBeaconState,
epoch: Epoch, epoch: Epoch,
cache: var StateCache): uint64 = cache: var StateCache): uint64 =
@ -349,6 +349,7 @@ func compute_inverted_shuffled_index*(
countdown(SHUFFLE_ROUND_COUNT.uint8 - 1, 0'u8, 1) countdown(SHUFFLE_ROUND_COUNT.uint8 - 1, 0'u8, 1)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#compute_proposer_index # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#compute_proposer_index
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-compute_proposer_index
template compute_proposer_index(state: ForkyBeaconState, template compute_proposer_index(state: ForkyBeaconState,
indices: openArray[ValidatorIndex], seed: Eth2Digest, indices: openArray[ValidatorIndex], seed: Eth2Digest,
unshuffleTransform: untyped): Opt[ValidatorIndex] = unshuffleTransform: untyped): Opt[ValidatorIndex] =
@ -373,8 +374,13 @@ template compute_proposer_index(state: ForkyBeaconState,
candidate_index = indices[unshuffleTransform] candidate_index = indices[unshuffleTransform]
random_byte = (eth2digest(buffer).data)[i mod 32] random_byte = (eth2digest(buffer).data)[i mod 32]
effective_balance = state.validators[candidate_index].effective_balance effective_balance = state.validators[candidate_index].effective_balance
const max_effective_balance =
when typeof(state).kind >= ConsensusFork.Electra:
MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei # [Modified in Electra:EIP7251]
else:
MAX_EFFECTIVE_BALANCE.Gwei
if effective_balance * MAX_RANDOM_BYTE >= if effective_balance * MAX_RANDOM_BYTE >=
MAX_EFFECTIVE_BALANCE.Gwei * random_byte: max_effective_balance * random_byte:
res = Opt.some(candidate_index) res = Opt.some(candidate_index)
break break
i += 1 i += 1
@ -388,7 +394,7 @@ func compute_proposer_index(state: ForkyBeaconState,
## Return from ``indices`` a random index sampled by effective balance. ## Return from ``indices`` a random index sampled by effective balance.
compute_proposer_index(state, indices, seed, shuffled_index) compute_proposer_index(state, indices, seed, shuffled_index)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#get_beacon_proposer_index # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*( func get_beacon_proposer_index*(
state: ForkyBeaconState, cache: var StateCache, slot: Slot): state: ForkyBeaconState, cache: var StateCache, slot: Slot):
Opt[ValidatorIndex] = Opt[ValidatorIndex] =

View File

@ -10,10 +10,10 @@
import import
./datatypes/base, ./beaconstate, ./forks, ./helpers ./datatypes/base, ./beaconstate, ./forks, ./helpers
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#configuration
const SAFETY_DECAY* = 10'u64 const SAFETY_DECAY* = 10'u64
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period
func compute_weak_subjectivity_period( func compute_weak_subjectivity_period(
cfg: RuntimeConfig, state: ForkyBeaconState): uint64 = cfg: RuntimeConfig, state: ForkyBeaconState): uint64 =
## Returns the weak subjectivity period for the current ``state``. ## Returns the weak subjectivity period for the current ``state``.
@ -49,7 +49,7 @@ func compute_weak_subjectivity_period(
ws_period ws_period
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period
func is_within_weak_subjectivity_period*(cfg: RuntimeConfig, current_slot: Slot, func is_within_weak_subjectivity_period*(cfg: RuntimeConfig, current_slot: Slot,
ws_state: ForkedHashedBeaconState, ws_state: ForkedHashedBeaconState,
ws_checkpoint: Checkpoint): bool = ws_checkpoint: Checkpoint): bool =

View File

@ -328,7 +328,7 @@ template query[E](
): Future[bool].Raising([CancelledError]) = ): Future[bool].Raising([CancelledError]) =
self.query(e, Nothing()) self.query(e, Nothing())
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md#light-client-sync-process # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md#light-client-sync-process
proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} = proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} =
var nextSyncTaskTime = self.getBeaconTime() var nextSyncTaskTime = self.getBeaconTime()
while true: while true:

View File

@ -90,7 +90,7 @@ p2pProtocol LightClientSync(version = 1,
debug "LC bootstrap request done", peer, blockRoot debug "LC bootstrap request done", peer, blockRoot
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
proc lightClientUpdatesByRange( proc lightClientUpdatesByRange(
peer: Peer, peer: Peer,
startPeriod: SyncCommitteePeriod, startPeriod: SyncCommitteePeriod,
@ -134,7 +134,7 @@ p2pProtocol LightClientSync(version = 1,
debug "LC updates by range request done", peer, startPeriod, count, found debug "LC updates by range request done", peer, startPeriod, count, found
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
proc lightClientFinalityUpdate( proc lightClientFinalityUpdate(
peer: Peer, peer: Peer,
response: SingleChunkResponse[ForkedLightClientFinalityUpdate]) response: SingleChunkResponse[ForkedLightClientFinalityUpdate])

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
import std/[strutils, sequtils, algorithm] import std/[strutils, sequtils, algorithm]
import stew/base10, chronos, chronicles import stew/base10, chronos, chronicles, results
import import
../spec/datatypes/[phase0, altair], ../spec/datatypes/[phase0, altair],
../spec/eth2_apis/rest_types, ../spec/eth2_apis/rest_types,
@ -34,13 +34,20 @@ const
StatusExpirationTime* = chronos.minutes(2) StatusExpirationTime* = chronos.minutes(2)
## Time time it takes for the peer's status information to expire. ## Time time it takes for the peer's status information to expire.
WeakSubjectivityLogMessage* =
"Database state missing or too old, cannot sync - resync the client " &
"using a trusted node or allow lenient long-range syncing with the " &
"`--long-range-sync=lenient` option. See " &
"https://nimbus.guide/faq.html#what-is-long-range-sync " &
"for more information"
type type
SyncWorkerStatus* {.pure.} = enum SyncWorkerStatus* {.pure.} = enum
Sleeping, WaitingPeer, UpdatingStatus, Requesting, Downloading, Queueing, Sleeping, WaitingPeer, UpdatingStatus, Requesting, Downloading, Queueing,
Processing Processing
SyncManagerFlag* {.pure.} = enum SyncManagerFlag* {.pure.} = enum
NoMonitor NoMonitor, NoGenesisSync
SyncWorker*[A, B] = object SyncWorker*[A, B] = object
future: Future[void].Raising([CancelledError]) future: Future[void].Raising([CancelledError])
@ -52,6 +59,7 @@ type
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: uint64 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: uint64
responseTimeout: chronos.Duration responseTimeout: chronos.Duration
maxHeadAge: uint64 maxHeadAge: uint64
isWithinWeakSubjectivityPeriod: GetBoolCallback
getLocalHeadSlot: GetSlotCallback getLocalHeadSlot: GetSlotCallback
getLocalWallSlot: GetSlotCallback getLocalWallSlot: GetSlotCallback
getSafeSlot: GetSlotCallback getSafeSlot: GetSlotCallback
@ -60,6 +68,7 @@ type
progressPivot: Slot progressPivot: Slot
workers: array[SyncWorkersCount, SyncWorker[A, B]] workers: array[SyncWorkersCount, SyncWorker[A, B]]
notInSyncEvent: AsyncEvent notInSyncEvent: AsyncEvent
shutdownEvent: AsyncEvent
rangeAge: uint64 rangeAge: uint64
chunkSize: uint64 chunkSize: uint64
queue: SyncQueue[A] queue: SyncQueue[A]
@ -124,8 +133,10 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B],
getFinalizedSlotCb: GetSlotCallback, getFinalizedSlotCb: GetSlotCallback,
getBackfillSlotCb: GetSlotCallback, getBackfillSlotCb: GetSlotCallback,
getFrontfillSlotCb: GetSlotCallback, getFrontfillSlotCb: GetSlotCallback,
weakSubjectivityPeriodCb: GetBoolCallback,
progressPivot: Slot, progressPivot: Slot,
blockVerifier: BlockVerifier, blockVerifier: BlockVerifier,
shutdownEvent: AsyncEvent,
maxHeadAge = uint64(SLOTS_PER_EPOCH * 1), maxHeadAge = uint64(SLOTS_PER_EPOCH * 1),
chunkSize = uint64(SLOTS_PER_EPOCH), chunkSize = uint64(SLOTS_PER_EPOCH),
flags: set[SyncManagerFlag] = {}, flags: set[SyncManagerFlag] = {},
@ -143,6 +154,7 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B],
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: minEpochsForBlobSidecarsRequests, MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: minEpochsForBlobSidecarsRequests,
getLocalHeadSlot: getLocalHeadSlotCb, getLocalHeadSlot: getLocalHeadSlotCb,
getLocalWallSlot: getLocalWallSlotCb, getLocalWallSlot: getLocalWallSlotCb,
isWithinWeakSubjectivityPeriod: weakSubjectivityPeriodCb,
getSafeSlot: getSafeSlot, getSafeSlot: getSafeSlot,
getFirstSlot: getFirstSlot, getFirstSlot: getFirstSlot,
getLastSlot: getLastSlot, getLastSlot: getLastSlot,
@ -152,6 +164,7 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B],
blockVerifier: blockVerifier, blockVerifier: blockVerifier,
notInSyncEvent: newAsyncEvent(), notInSyncEvent: newAsyncEvent(),
direction: direction, direction: direction,
shutdownEvent: shutdownEvent,
ident: ident, ident: ident,
flags: flags flags: flags
) )
@ -566,6 +579,11 @@ proc startWorkers[A, B](man: SyncManager[A, B]) =
for i in 0 ..< len(man.workers): for i in 0 ..< len(man.workers):
man.workers[i].future = syncWorker[A, B](man, i) man.workers[i].future = syncWorker[A, B](man, i)
proc stopWorkers[A, B](man: SyncManager[A, B]) {.async: (raises: []).} =
# Cancelling all the synchronization workers.
let pending = man.workers.mapIt(it.future.cancelAndWait())
await noCancel allFutures(pending)
proc toTimeLeftString*(d: Duration): string = proc toTimeLeftString*(d: Duration): string =
if d == InfiniteDuration: if d == InfiniteDuration:
"--h--m" "--h--m"
@ -711,6 +729,14 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4) & man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4) &
"slots/s (" & map & ":" & currentSlot & ")" "slots/s (" & map & ":" & currentSlot & ")"
if (man.queue.kind == SyncQueueKind.Forward) and
(SyncManagerFlag.NoGenesisSync in man.flags):
if not(man.isWithinWeakSubjectivityPeriod()):
fatal WeakSubjectivityLogMessage, current_slot = wallSlot
await man.stopWorkers()
man.shutdownEvent.fire()
return
if man.remainingSlots() <= man.maxHeadAge: if man.remainingSlots() <= man.maxHeadAge:
man.notInSyncEvent.clear() man.notInSyncEvent.clear()
# We are marking SyncManager as not working only when we are in sync and # We are marking SyncManager as not working only when we are in sync and

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
import std/[heapqueue, tables, strutils, sequtils, math] import std/[heapqueue, tables, strutils, sequtils, math]
import stew/base10, chronos, chronicles import stew/base10, chronos, chronicles, results
import import
../spec/datatypes/[base, phase0, altair], ../spec/datatypes/[base, phase0, altair],
../spec/[helpers, forks], ../spec/[helpers, forks],
@ -24,6 +24,7 @@ logScope:
type type
GetSlotCallback* = proc(): Slot {.gcsafe, raises: [].} GetSlotCallback* = proc(): Slot {.gcsafe, raises: [].}
GetBoolCallback* = proc(): bool {.gcsafe, raises: [].}
ProcessingCallback* = proc() {.gcsafe, raises: [].} ProcessingCallback* = proc() {.gcsafe, raises: [].}
BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock, BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock,
blobs: Opt[BlobSidecars], maybeFinalized: bool): blobs: Opt[BlobSidecars], maybeFinalized: bool):

View File

@ -21,7 +21,7 @@ import
from presto import RestDecodingError from presto import RestDecodingError
const const
largeRequestsTimeout = 60.seconds # Downloading large items such as states. largeRequestsTimeout = 90.seconds # Downloading large items such as states.
smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots. smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots.
proc fetchDepositSnapshot( proc fetchDepositSnapshot(
@ -171,7 +171,7 @@ proc doTrustedNodeSync*(
let stateId = let stateId =
case syncTarget.kind case syncTarget.kind
of TrustedNodeSyncKind.TrustedBlockRoot: of TrustedNodeSyncKind.TrustedBlockRoot:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md#light-client-sync-process # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md#light-client-sync-process
const lcDataFork = LightClientDataFork.high const lcDataFork = LightClientDataFork.high
var bestViableCheckpoint: Opt[tuple[slot: Slot, state_root: Eth2Digest]] var bestViableCheckpoint: Opt[tuple[slot: Slot, state_root: Eth2Digest]]
func trackBestViableCheckpoint(store: lcDataFork.LightClientStore) = func trackBestViableCheckpoint(store: lcDataFork.LightClientStore) =

View File

@ -539,7 +539,6 @@ proc makeBeaconBlockForHeadAndSlot*(
slot, validator_index slot, validator_index
return err("Unable to get execution payload") return err("Unable to get execution payload")
debugComment "flesh out consolidations"
let res = makeBeaconBlockWithRewards( let res = makeBeaconBlockWithRewards(
node.dag.cfg, node.dag.cfg,
state[], state[],
@ -552,7 +551,6 @@ proc makeBeaconBlockForHeadAndSlot*(
exits, exits,
node.syncCommitteeMsgPool[].produceSyncAggregate(head.bid, slot), node.syncCommitteeMsgPool[].produceSyncAggregate(head.bid, slot),
payload, payload,
@[], # consolidations
noRollback, # Temporary state - no need for rollback noRollback, # Temporary state - no need for rollback
cache, cache,
verificationFlags = {}, verificationFlags = {},
@ -1950,8 +1948,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (ra
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#broadcast-aggregate
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#broadcast-sync-committee-contribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution
# Wait 2 / 3 of the slot time to allow messages to propagate, then collect # Wait 2 / 3 of the slot time to allow messages to propagate, then collect
# the result in aggregates # the result in aggregates
static: static:

View File

@ -38,7 +38,8 @@ macro copyFields*(
# unblinded objects, and can't simply be copied. # unblinded objects, and can't simply be copied.
"transactions_root", "execution_payload", "transactions_root", "execution_payload",
"execution_payload_header", "body", "withdrawals_root", "execution_payload_header", "body", "withdrawals_root",
"deposit_receipts_root", "withdrawal_requests_root"]: "deposit_requests_root", "withdrawal_requests_root",
"consolidation_requests_root"]:
# TODO use stew/assign2 # TODO use stew/assign2
result.add newAssignment( result.add newAssignment(
newDotExpr(dst, ident(name)), newDotExpr(src, ident(name))) newDotExpr(dst, ident(name)), newDotExpr(src, ident(name)))

View File

@ -36,7 +36,7 @@ export results
# - https://notes.ethereum.org/@djrtwo/Bkn3zpwxB#Validator-responsibilities # - https://notes.ethereum.org/@djrtwo/Bkn3zpwxB#Validator-responsibilities
# #
# Phase 0 spec - Honest Validator - how to avoid slashing # Phase 0 spec - Honest Validator - how to avoid slashing
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#how-to-avoid-slashing # - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#how-to-avoid-slashing
# #
# In-depth reading on slashing conditions # In-depth reading on slashing conditions
# #

View File

@ -767,7 +767,7 @@ proc getAggregateAndProofSignature*(v: AttachedValidator,
fork, genesis_validators_root, aggregate_and_proof) fork, genesis_validators_root, aggregate_and_proof)
await v.signData(request) await v.signData(request)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#prepare-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
proc getSyncCommitteeMessage*(v: AttachedValidator, proc getSyncCommitteeMessage*(v: AttachedValidator,
fork: Fork, fork: Fork,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
@ -798,7 +798,7 @@ proc getSyncCommitteeMessage*(v: AttachedValidator,
) )
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#aggregation-selection # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection
proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork, proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
slot: Slot, slot: Slot,
@ -818,7 +818,7 @@ proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
) )
await v.signData(request) await v.signData(request)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#broadcast-sync-committee-contribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution
proc getContributionAndProofSignature*(v: AttachedValidator, fork: Fork, proc getContributionAndProofSignature*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
contribution_and_proof: ContributionAndProof contribution_and_proof: ContributionAndProof

View File

@ -18,8 +18,8 @@ const
"Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH" "Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH"
versionMajor* = 24 versionMajor* = 24
versionMinor* = 5 versionMinor* = 6
versionBuild* = 1 versionBuild* = 0
versionBlob* = "stateofus" # Single word - ends up in the default graffiti versionBlob* = "stateofus" # Single word - ends up in the default graffiti

View File

@ -1,3 +1,12 @@
# beacon_chain
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import import
chronicles, chronicles/[topics_registry, timings], chronicles, chronicles/[topics_registry, timings],
confutils, confutils/std/net, confutils, confutils/std/net,
@ -6,11 +15,11 @@ import
type type
Config = object Config = object
serverIpAddress {. serverIpAddress {.
defaultValue: ValidIpAddress.init("127.0.0.1") defaultValue: static(parseIpAddress("127.0.0.1"))
defaultValueDesc: "127.0.0.1" defaultValueDesc: "127.0.0.1"
desc: "IP address of the beacon node's REST server" desc: "IP address of the beacon node's REST server"
abbr: "a" abbr: "a"
name: "address" }: ValidIpAddress name: "address" }: IpAddress
serverPort {. serverPort {.
defaultValue: 5052 defaultValue: 5052
@ -29,7 +38,7 @@ type
abbr: "n" abbr: "n"
name: "count" }: uint name: "count" }: uint
proc main = proc main() {.raises: [ConfigurationError, HttpError, OSError].} =
let config = Config.load let config = Config.load
let serverAddress = initTAddress(config.serverIpAddress, config.serverPort) let serverAddress = initTAddress(config.serverIpAddress, config.serverPort)
let client = RestClientRef.new(serverAddress) let client = RestClientRef.new(serverAddress)
@ -43,10 +52,10 @@ proc main =
info.logTime(apiName): info.logTime(apiName):
for slot in config.startSlot ..< (config.startSlot + config.requestsCount): for slot in config.startSlot ..< (config.startSlot + config.requestsCount):
let ident = StateIdent(kind: StateQueryKind.Slot, slot: slot.Slot) let ident = StateIdent(kind: StateQueryKind.Slot, slot: slot.Slot)
discard waitFor client.`apiNameIdent`(ident) discard waitFor noCancel client.`apiNameIdent`(ident)
benchmark(getStateRoot) benchmark(getStateRoot)
benchmark(getStateFork) benchmark(getStateForkPlain)
benchmark(getStateFinalityCheckpoints) benchmark(getStateFinalityCheckpoints)
benchmark(getStateValidatorBalances) benchmark(getStateValidatorBalances)

2
ci/Jenkinsfile vendored
View File

@ -183,5 +183,5 @@ def getAgentLabel() {
} }
def nimCommitForJob() { def nimCommitForJob() {
return JOB_NAME.contains('nimv2') ? 'upstream/version-2-0' : '' return JOB_NAME.contains('nimv2') ? 'v2.0.6' : ''
} }

View File

@ -6,7 +6,7 @@ This is a WIP document to explain the attestation flows.
It is important to distinguish attestation `validation` from attestation `verification`. It is important to distinguish attestation `validation` from attestation `verification`.
- Attestation `validation` is defined in the P2P specs. Validated attestations can be forwarded on GossipSub. - Attestation `validation` is defined in the P2P specs. Validated attestations can be forwarded on GossipSub.
- Aggregated: https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof - Aggregated: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
- Unaggregated: https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id - Unaggregated: https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
- Attestation `verification` is defined in the consensus specs. Verified attestations can affect fork choice and may be included in a block. - Attestation `verification` is defined in the consensus specs. Verified attestations can affect fork choice and may be included in a block.
- https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations

View File

@ -9,7 +9,7 @@ Important distinction:
https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_block. https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_block.
A validated block can be forwarded on gossipsub. A validated block can be forwarded on gossipsub.
- and we distinguish `verification` which is defined in consensus specs: - and we distinguish `verification` which is defined in consensus specs:
https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#block-processing https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#block-processing
A block needs to be verified to enter fork choice, the DAG and the BeaconChainDB A block needs to be verified to enter fork choice, the DAG and the BeaconChainDB
In particular in terms of costly checks validating a block only requires checking: In particular in terms of costly checks validating a block only requires checking:

View File

@ -183,7 +183,7 @@ Each era is identified by when it ends. Thus, the genesis era is era `0`, follow
`.era` file names follow a simple convention: `<config-name>-<era-number>-<era-count>-<short-historical-root>.era`: `.era` file names follow a simple convention: `<config-name>-<era-number>-<era-count>-<short-historical-root>.era`:
* `config-name` is the `CONFIG_NAME` field of the runtime configation (`mainnet`, `prater`, `sepolia`, `holesky`, etc) * `config-name` is the `CONFIG_NAME` field of the runtime configation (`mainnet`, `sepolia`, `holesky`, etc)
* `era-number` is the number of the _first_ era stored in the file - for example, the genesis era file has number 0 - as a 5-digit 0-filled decimal integer * `era-number` is the number of the _first_ era stored in the file - for example, the genesis era file has number 0 - as a 5-digit 0-filled decimal integer
* `short-era-root` is the first 4 bytes of the last historical root in the _last_ state in the era file, lower-case hex-encoded (8 characters), except the genesis era which instead uses the `genesis_validators_root` field from the genesis state. * `short-era-root` is the first 4 bytes of the last historical root in the _last_ state in the era file, lower-case hex-encoded (8 characters), except the genesis era which instead uses the `genesis_validators_root` field from the genesis state.
* The root is available as `state.historical_roots[era - 1]` except for genesis, which is `state.genesis_validators_root` * The root is available as `state.historical_roots[era - 1]` except for genesis, which is `state.genesis_validators_root`

View File

@ -104,7 +104,7 @@ The following sections explain how to do this for certain EL clients.
## Running the light client ## Running the light client
The light client starts syncing from a trusted block. The light client starts syncing from a trusted block.
This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client. This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client.
### 1. Obtaining a trusted block root ### 1. Obtaining a trusted block root
@ -178,17 +178,15 @@ INF 2022-11-21 18:03:27.984+01:00 New LC optimistic header opt
WRN 2022-11-21 18:03:31.419+01:00 Peer count low, no new peers discovered topics="networking" discovered_nodes=0 new_peers=@[] current_peers=7 wanted_peers=160 WRN 2022-11-21 18:03:31.419+01:00 Peer count low, no new peers discovered topics="networking" discovered_nodes=0 new_peers=@[] current_peers=7 wanted_peers=160
INF 2022-11-21 18:03:36.001+01:00 Slot start slot=1109718 epoch=34678 sync=synced peers=7 head=c5464508:1109716 finalized=c092a1d1:1109216 delay=1ms98us INF 2022-11-21 18:03:36.001+01:00 Slot start slot=1109718 epoch=34678 sync=synced peers=7 head=c5464508:1109716 finalized=c092a1d1:1109216 delay=1ms98us
INF 2022-11-21 18:03:40.012+01:00 New LC optimistic header optimistic_header="(beacon: (slot: 1109717, proposer_index: 835, parent_root: \"c5464508\", state_root: \"13f823f8\"))" INF 2022-11-21 18:03:40.012+01:00 New LC optimistic header optimistic_header="(beacon: (slot: 1109717, proposer_index: 835, parent_root: \"c5464508\", state_root: \"13f823f8\"))"
NTC 2022-11-21 18:03:40.012+01:00 New LC optimistic block opt=99ab28aa:1109717 wallSlot=1109718
WRN 2022-11-21 18:03:40.422+01:00 Peer count low, no new peers discovered topics="networking" discovered_nodes=1 new_peers=@[] current_peers=7 wanted_peers=160 WRN 2022-11-21 18:03:40.422+01:00 Peer count low, no new peers discovered topics="networking" discovered_nodes=1 new_peers=@[] current_peers=7 wanted_peers=160
INF 2022-11-21 18:03:48.001+01:00 Slot start slot=1109719 epoch=34678 sync=synced peers=7 head=99ab28aa:1109717 finalized=c092a1d1:1109216 delay=1ms53us INF 2022-11-21 18:03:48.001+01:00 Slot start slot=1109719 epoch=34678 sync=synced peers=7 head=99ab28aa:1109717 finalized=c092a1d1:1109216 delay=1ms53us
WRN 2022-11-21 18:03:50.205+01:00 Peer count low, no new peers discovered topics="networking" discovered_nodes=0 new_peers=@[] current_peers=7 wanted_peers=160 WRN 2022-11-21 18:03:50.205+01:00 Peer count low, no new peers discovered topics="networking" discovered_nodes=0 new_peers=@[] current_peers=7 wanted_peers=160
INF 2022-11-21 18:04:00.001+01:00 Slot start slot=1109720 epoch=34678 sync=synced peers=7 head=99ab28aa:1109717 finalized=c092a1d1:1109216 delay=1ms145us INF 2022-11-21 18:04:00.001+01:00 Slot start slot=1109720 epoch=34678 sync=synced peers=7 head=99ab28aa:1109717 finalized=c092a1d1:1109216 delay=1ms145us
INF 2022-11-21 18:04:03.982+01:00 New LC optimistic header optimistic_header="(beacon: (slot: 1109718, proposer_index: 1202, parent_root: \"99ab28aa\", state_root: \"7f7f88d2\"))" INF 2022-11-21 18:04:03.982+01:00 New LC optimistic header optimistic_header="(beacon: (slot: 1109718, proposer_index: 1202, parent_root: \"99ab28aa\", state_root: \"7f7f88d2\"))"
NTC 2022-11-21 18:04:03.982+01:00 New LC optimistic block opt=ab007266:1109718 wallSlot=1109720
``` ```
!!! note !!! note
The [light client protocol](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md) depends on consensus layer (CL) full nodes to serve additional data. The [light client protocol](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md) depends on consensus layer (CL) full nodes to serve additional data.
As this is a new protocol, not all implementations are supporting it yet. As this is a new protocol, not all implementations are supporting it yet.
Therefore, it may take several minutes to discover supporting peers, during which no log messages may be produced. Therefore, it may take several minutes to discover supporting peers, during which no log messages may be produced.

View File

@ -112,6 +112,7 @@ The following options are available:
--light-client-data-import-mode Which classes of light client data to import. Must be one of: none, only-new, --light-client-data-import-mode Which classes of light client data to import. Must be one of: none, only-new,
full (slow startup), on-demand (may miss validator duties) [=only-new]. full (slow startup), on-demand (may miss validator duties) [=only-new].
--light-client-data-max-periods Maximum number of sync committee periods to retain light client data. --light-client-data-max-periods Maximum number of sync committee periods to retain light client data.
--long-range-sync Enable long-range syncing (genesis sync) [=LongRangeSyncMode.Light].
--in-process-validators Disable the push model (the beacon node tells a signing process with the private --in-process-validators Disable the push model (the beacon node tells a signing process with the private
keys of the validators what to sign and when) and load the validators in the keys of the validators what to sign and when) and load the validators in the
beacon node itself [=true]. beacon node itself [=true].

View File

@ -135,7 +135,7 @@ If you are already using a threshold signing setup (e.g. based on Vouch and Dirk
The verifying Web3Signer is an experimental extension to the [Web3Signer protocol](https://consensys.github.io/web3signer/web3signer-eth2.html#tag/Signing/operation/ETH2_SIGN) which allows the remote signer to verify certain details of the signed blocks before creating a signature (for example, the signer may require the signed block to have a particular fee recipient value). The verifying Web3Signer is an experimental extension to the [Web3Signer protocol](https://consensys.github.io/web3signer/web3signer-eth2.html#tag/Signing/operation/ETH2_SIGN) which allows the remote signer to verify certain details of the signed blocks before creating a signature (for example, the signer may require the signed block to have a particular fee recipient value).
To enable this use case, the `BLOCK_V2` request type of the `/api/v1/eth2/sign/{identifier}` endpoint is extended with an additional array field named `proofs`. The array consists of objects with the properties `index`, `proof` and `value`, where `index` is an arbitrary [generalized index](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/ssz/merkle-proofs.md#generalized-merkle-tree-index) of any property nested under the block body and `proof` is its corresponding Merkle proof against the block body root included in the request. The `value` property is optional and it is included only when the SSZ hash of the field included in the Merkle proof doesn't match its value. To enable this use case, the `BLOCK_V2` request type of the `/api/v1/eth2/sign/{identifier}` endpoint is extended with an additional array field named `proofs`. The array consists of objects with the properties `index`, `proof` and `value`, where `index` is an arbitrary [generalized index](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md#generalized-merkle-tree-index) of any property nested under the block body and `proof` is its corresponding Merkle proof against the block body root included in the request. The `value` property is optional and it is included only when the SSZ hash of the field included in the Merkle proof doesn't match its value.
Since the generalized index of a particular field may change in a hard-fork, in the remote keystore format the proven fields are usually specified by their name: Since the generalized index of a particular field may change in a hard-fork, in the remote keystore format the proven fields are usually specified by their name:

View File

@ -379,9 +379,9 @@ proc createEnr(rng: var HmacDrbgContext,
bootstrapEnr = enr.Record.init( bootstrapEnr = enr.Record.init(
1, # sequence number 1, # sequence number
networkKeys.seckey.asEthKey, networkKeys.seckey.asEthKey,
some(address), Opt.some(address),
some(port), Opt.some(port),
some(port), Opt.some(port),
[ [
toFieldPair(enrForkIdField, forkId), toFieldPair(enrForkIdField, forkId),
toFieldPair(enrAttestationSubnetsField, SSZ.encode(netMetadata.attnets)) toFieldPair(enrAttestationSubnetsField, SSZ.encode(netMetadata.attnets))

View File

@ -85,7 +85,7 @@ proc makeSimulationBlock(
var blck = partialBeaconBlock( var blck = partialBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, exits, sync_aggregate, execution_payload, @[]) attestations, deposits, exits, sync_aggregate, execution_payload)
let res = process_block( let res = process_block(
cfg, state.data, blck.asSigVerified(), verificationFlags, cache) cfg, state.data, blck.asSigVerified(), verificationFlags, cache)
@ -128,7 +128,7 @@ proc makeSimulationBlock(
var blck = partialBeaconBlock( var blck = partialBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, exits, sync_aggregate, execution_payload, @[]) attestations, deposits, exits, sync_aggregate, execution_payload)
let res = process_block( let res = process_block(
cfg, state.data, blck.asSigVerified(), verificationFlags, cache) cfg, state.data, blck.asSigVerified(), verificationFlags, cache)

View File

@ -295,7 +295,6 @@ cli do(validatorsDir: string, secretsDir: string,
BeaconBlockValidatorChanges(), BeaconBlockValidatorChanges(),
syncAggregate, syncAggregate,
payload, payload,
@[], # consolidations
noRollback, noRollback,
cache).get() cache).get()

View File

@ -1,68 +0,0 @@
# beacon_chain
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
# https://notes.ethereum.org/@9AeMAlpyQYaAAyuj47BzRw/rkwW3ceVY
# Monitor traffic: socat -v TCP-LISTEN:9550,fork TCP-CONNECT:127.0.0.1:8550
import
std/options,
stew/results,
chronos,
../beacon_chain/el/el_manager
from std/os import paramCount, paramStr
from nimcrypto/utils import fromHex
from web3/engine_api_types import PayloadExecutionStatus
from ../beacon_chain/networking/network_metadata import Eth1Network
from ../beacon_chain/spec/datatypes/base import ZERO_HASH
from ../beacon_chain/spec/presets import Eth1Address, defaultRuntimeConfig
# TODO factor this out and have a version with the result of the JWT secret
# slurp for testing purposes
proc readJwtSecret(jwtSecretFile: string): Result[seq[byte], cstring] =
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/authentication.md#key-distribution
# If such a parameter is given, but the file cannot be read, or does not
# contain a hex-encoded key of 256 bits, the client SHOULD treat this as an
# error: either abort the startup, or show error and continue without
# exposing the authenticated port.
const MIN_SECRET_LEN = 32
try:
let lines = readLines(jwtSecretFile, 1)
if lines.len > 0:
# Secret JWT key is parsed in constant time using nimcrypto:
# https://github.com/cheatfate/nimcrypto/pull/44
let secret = utils.fromHex(lines[0])
if secret.len >= MIN_SECRET_LEN:
ok(secret)
else:
err("JWT secret not at least 256 bits")
else:
err("JWT secret file empty")
except IOError as exc:
err("JWT secret file could not be read from")
proc run() {.async.} =
if paramCount() < 2:
echo "args are: web3url jwtsecretfilename"
let
elManager = newClone ELManager.init(
defaultRuntimeConfig, db = nil, nil, @[paramStr(1)],
none(DepositContractSnapshot), none(Eth1Network), false,
some readJwtSecret(paramStr(2)).get)
try:
await elManager.exchangeTransitionConfiguration()
except ValueError as exc:
# Expected, since nothing here sets up the Nimbus TTD correctly
echo "exchangeTransitionConfiguration ValueError: " & exc.msg
echo "Invalid TTD errors are fine in this context"
waitFor run()

View File

@ -23,7 +23,7 @@ import
# Test utilities # Test utilities
../../testutil, ../../testblockutil ../../testutil, ../../testblockutil
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py#L27-L44 # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py#L27-L44
proc compute_aggregate_sync_committee_signature( proc compute_aggregate_sync_committee_signature(
cfg: RuntimeConfig, cfg: RuntimeConfig,
forked: ForkedHashedBeaconState, forked: ForkedHashedBeaconState,
@ -172,14 +172,15 @@ proc runTest(storeDataFork: static LightClientDataFork) =
# Sync committee signing the attested_header # Sync committee signing the attested_header
(sync_aggregate, signature_slot) = get_sync_aggregate(cfg, forked[]) (sync_aggregate, signature_slot) = get_sync_aggregate(cfg, forked[])
next_sync_committee = SyncCommittee() next_sync_committee = SyncCommittee()
next_sync_committee_branch = default(altair.NextSyncCommitteeBranch) next_sync_committee_branch =
default(storeDataFork.NextSyncCommitteeBranch)
# Ensure that finality checkpoint is genesis # Ensure that finality checkpoint is genesis
check state.finalized_checkpoint.epoch == 0 check state.finalized_checkpoint.epoch == 0
# Finality is unchanged # Finality is unchanged
let let
finality_header = default(storeDataFork.LightClientHeader) finality_header = default(storeDataFork.LightClientHeader)
finality_branch = default(altair.FinalityBranch) finality_branch = default(storeDataFork.FinalityBranch)
update = storeDataFork.LightClientUpdate( update = storeDataFork.LightClientUpdate(
attested_header: attested_header, attested_header: attested_header,
@ -228,11 +229,12 @@ proc runTest(storeDataFork: static LightClientDataFork) =
# Sync committee signing the attested_header # Sync committee signing the attested_header
(sync_aggregate, signature_slot) = get_sync_aggregate(cfg, forked[]) (sync_aggregate, signature_slot) = get_sync_aggregate(cfg, forked[])
next_sync_committee = SyncCommittee() next_sync_committee = SyncCommittee()
next_sync_committee_branch = default(altair.NextSyncCommitteeBranch) next_sync_committee_branch =
default(storeDataFork.NextSyncCommitteeBranch)
# Finality is unchanged # Finality is unchanged
finality_header = default(storeDataFork.LightClientHeader) finality_header = default(storeDataFork.LightClientHeader)
finality_branch = default(altair.FinalityBranch) finality_branch = default(storeDataFork.FinalityBranch)
update = storeDataFork.LightClientUpdate( update = storeDataFork.LightClientUpdate(
attested_header: attested_header, attested_header: attested_header,
@ -283,12 +285,13 @@ proc runTest(storeDataFork: static LightClientDataFork) =
# Sync committee is updated # Sync committee is updated
template next_sync_committee(): auto = state.next_sync_committee template next_sync_committee(): auto = state.next_sync_committee
let let
next_sync_committee_branch = next_sync_committee_branch = normalize_merkle_branch(
state.build_proof(altair.NEXT_SYNC_COMMITTEE_GINDEX).get state.build_proof(altair.NEXT_SYNC_COMMITTEE_GINDEX).get,
storeDataFork.NEXT_SYNC_COMMITTEE_GINDEX)
# Finality is unchanged # Finality is unchanged
finality_header = default(storeDataFork.LightClientHeader) finality_header = default(storeDataFork.LightClientHeader)
finality_branch = default(altair.FinalityBranch) finality_branch = default(storeDataFork.FinalityBranch)
update = storeDataFork.LightClientUpdate( update = storeDataFork.LightClientUpdate(
attested_header: attested_header, attested_header: attested_header,
@ -345,7 +348,8 @@ proc runTest(storeDataFork: static LightClientDataFork) =
# Updated sync_committee and finality # Updated sync_committee and finality
next_sync_committee = SyncCommittee() next_sync_committee = SyncCommittee()
next_sync_committee_branch = default(altair.NextSyncCommitteeBranch) next_sync_committee_branch =
default(storeDataFork.NextSyncCommitteeBranch)
finalized_block = blocks[SLOTS_PER_EPOCH - 1].altairData finalized_block = blocks[SLOTS_PER_EPOCH - 1].altairData
finalized_header = finalized_block.toLightClientHeader(storeDataFork) finalized_header = finalized_block.toLightClientHeader(storeDataFork)
check: check:
@ -354,7 +358,9 @@ proc runTest(storeDataFork: static LightClientDataFork) =
finalized_header.beacon.hash_tree_root() == finalized_header.beacon.hash_tree_root() ==
state.finalized_checkpoint.root state.finalized_checkpoint.root
let let
finality_branch = state.build_proof(altair.FINALIZED_ROOT_GINDEX).get finality_branch = normalize_merkle_branch(
state.build_proof(altair.FINALIZED_ROOT_GINDEX).get,
storeDataFork.FINALIZED_ROOT_GINDEX)
update = storeDataFork.LightClientUpdate( update = storeDataFork.LightClientUpdate(
attested_header: attested_header, attested_header: attested_header,

View File

@ -32,10 +32,10 @@ const
OpAttSlashingDir = OpDir/"attester_slashing" OpAttSlashingDir = OpDir/"attester_slashing"
OpBlockHeaderDir = OpDir/"block_header" OpBlockHeaderDir = OpDir/"block_header"
OpBlsToExecutionChangeDir = OpDir/"bls_to_execution_change" OpBlsToExecutionChangeDir = OpDir/"bls_to_execution_change"
OpConsolidationDir = OpDir/"consolidation" OpConsolidationRequestDir = OpDir/"consolidation_request"
OpDepositReceiptDir = OpDir/"deposit_receipt" OpDepositRequestDir = OpDir/"deposit_request"
OpDepositsDir = OpDir/"deposit" OpDepositsDir = OpDir/"deposit"
OpExecutionLayerWithdrawalRequestDir = OpDir/"execution_layer_withdrawal_request" OpWithdrawalRequestDir = OpDir/"withdrawal_request"
OpExecutionPayloadDir = OpDir/"execution_payload" OpExecutionPayloadDir = OpDir/"execution_payload"
OpProposerSlashingDir = OpDir/"proposer_slashing" OpProposerSlashingDir = OpDir/"proposer_slashing"
OpSyncAggregateDir = OpDir/"sync_aggregate" OpSyncAggregateDir = OpDir/"sync_aggregate"
@ -45,14 +45,13 @@ const
baseDescription = "EF - Electra - Operations - " baseDescription = "EF - Electra - Operations - "
var testDirs = toHashSet([ const testDirs = toHashSet([
OpAttestationsDir, OpAttSlashingDir, OpBlockHeaderDir, OpAttestationsDir, OpAttSlashingDir, OpBlockHeaderDir,
OpBlsToExecutionChangeDir, OpDepositReceiptDir, OpDepositsDir, OpBlsToExecutionChangeDir, OpConsolidationRequestDir, OpDepositRequestDir,
OpExecutionLayerWithdrawalRequestDir, OpExecutionPayloadDir, OpDepositsDir, OpWithdrawalRequestDir, OpExecutionPayloadDir,
OpProposerSlashingDir, OpSyncAggregateDir, OpVoluntaryExitDir, OpProposerSlashingDir, OpSyncAggregateDir, OpVoluntaryExitDir,
OpWithdrawalsDir]) OpWithdrawalsDir])
when const_preset == "minimal":
testDirs.incl OpConsolidationDir
doAssert toHashSet( doAssert toHashSet(
mapIt(toSeq(walkDir(OpDir, relative = false)), it.path)) == testDirs mapIt(toSeq(walkDir(OpDir, relative = false)), it.path)) == testDirs
@ -149,24 +148,19 @@ suite baseDescription & "BLS to execution change " & preset():
OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change", OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change",
applyBlsToExecutionChange, path) applyBlsToExecutionChange, path)
when const_preset == "minimal": suite baseDescription & "Consolidation Request " & preset():
suite baseDescription & "Consolidation " & preset(): proc applyConsolidationRequest(
proc applyConsolidation( preState: var electra.BeaconState,
preState: var electra.BeaconState, consolidation_request: ConsolidationRequest): Result[void, cstring] =
signed_consolidation: SignedConsolidation): var cache: StateCache
Result[void, cstring] = process_consolidation_request(
var cache: StateCache defaultRuntimeConfig, preState, consolidation_request, cache)
process_consolidation( ok()
defaultRuntimeConfig, preState, signed_consolidation, cache)
for path in walkTests(OpConsolidationDir): for path in walkTests(OpConsolidationRequestDir):
if path in [ runTest[ConsolidationRequest, typeof applyConsolidationRequest](
"invalid_exceed_pending_consolidations_limit", # apparently invalid prestate SSZ OpConsolidationRequestDir, suiteName, "Consolidation Request",
]: "consolidation_request", applyConsolidationRequest, path)
continue
runTest[SignedConsolidation, typeof applyConsolidation](
OpConsolidationDir, suiteName, "Consolidation", "consolidation",
applyConsolidation, path)
from ".."/".."/".."/beacon_chain/bloomfilter import constructBloomFilter from ".."/".."/".."/beacon_chain/bloomfilter import constructBloomFilter
@ -182,18 +176,18 @@ suite baseDescription & "Deposit " & preset():
runTest[Deposit, typeof applyDeposit]( runTest[Deposit, typeof applyDeposit](
OpDepositsDir, suiteName, "Deposit", "deposit", applyDeposit, path) OpDepositsDir, suiteName, "Deposit", "deposit", applyDeposit, path)
suite baseDescription & "Deposit Receipt " & preset(): suite baseDescription & "Deposit Request " & preset():
func applyDepositReceipt( func applyDepositRequest(
preState: var electra.BeaconState, depositReceipt: DepositReceipt): preState: var electra.BeaconState, depositRequest: DepositRequest):
Result[void, cstring] = Result[void, cstring] =
process_deposit_receipt( process_deposit_request(
defaultRuntimeConfig, preState, defaultRuntimeConfig, preState,
constructBloomFilter(preState.validators.asSeq)[], depositReceipt, {}) constructBloomFilter(preState.validators.asSeq)[], depositRequest, {})
for path in walkTests(OpDepositReceiptDir): for path in walkTests(OpDepositRequestDir):
runTest[DepositReceipt, typeof applyDepositReceipt]( runTest[DepositRequest, typeof applyDepositRequest](
OpDepositReceiptDir, suiteName, "Deposit Receipt", "deposit_receipt", OpDepositRequestDir, suiteName, "Deposit Request", "deposit_request",
applyDepositReceipt, path) applyDepositRequest, path)
suite baseDescription & "Execution Payload " & preset(): suite baseDescription & "Execution Payload " & preset():
func makeApplyExecutionPayloadCb(path: string): auto = func makeApplyExecutionPayloadCb(path: string): auto =
@ -212,23 +206,19 @@ suite baseDescription & "Execution Payload " & preset():
OpExecutionPayloadDir, suiteName, "Execution Payload", "body", OpExecutionPayloadDir, suiteName, "Execution Payload", "body",
applyExecutionPayload, path) applyExecutionPayload, path)
suite baseDescription & "Execution Layer Withdrawal Request " & preset(): suite baseDescription & "Withdrawal Request " & preset():
func applyExecutionLayerWithdrawalRequest( func applyWithdrawalRequest(
preState: var electra.BeaconState, preState: var electra.BeaconState, withdrawalRequest: WithdrawalRequest):
executionLayerWithdrawalRequest: ExecutionLayerWithdrawalRequest):
Result[void, cstring] = Result[void, cstring] =
var cache: StateCache var cache: StateCache
process_execution_layer_withdrawal_request( process_withdrawal_request(
defaultRuntimeConfig, preState, executionLayerWithdrawalRequest, cache) defaultRuntimeConfig, preState, withdrawalRequest, cache)
ok() ok()
for path in walkTests(OpExecutionLayerWithdrawalRequestDir): for path in walkTests(OpWithdrawalRequestDir):
runTest[ExecutionLayerWithdrawalRequest, runTest[WithdrawalRequest, typeof applyWithdrawalRequest](
typeof applyExecutionLayerWithdrawalRequest]( OpWithdrawalRequestDir, suiteName, "Withdrawal Request",
OpExecutionLayerWithdrawalRequestDir, suiteName, "withdrawal_request", applyWithdrawalRequest, path)
"Execution Layer Withdrawal Request",
"execution_layer_withdrawal_request",
applyExecutionLayerWithdrawalRequest, path)
suite baseDescription & "Proposer Slashing " & preset(): suite baseDescription & "Proposer Slashing " & preset():
proc applyProposerSlashing( proc applyProposerSlashing(

View File

@ -128,16 +128,14 @@ suite "EF - Electra - SSZ consensus objects " & preset():
of "BlobSidecar": checkSSZ(BlobSidecar, path, hash) of "BlobSidecar": checkSSZ(BlobSidecar, path, hash)
of "BLSToExecutionChange": checkSSZ(BLSToExecutionChange, path, hash) of "BLSToExecutionChange": checkSSZ(BLSToExecutionChange, path, hash)
of "Checkpoint": checkSSZ(Checkpoint, path, hash) of "Checkpoint": checkSSZ(Checkpoint, path, hash)
of "Consolidation": checkSSZ(Consolidation, path, hash) of "ConsolidationRequest": checkSSZ(ConsolidationRequest, path, hash)
of "ContributionAndProof": checkSSZ(ContributionAndProof, path, hash) of "ContributionAndProof": checkSSZ(ContributionAndProof, path, hash)
of "Deposit": checkSSZ(Deposit, path, hash) of "Deposit": checkSSZ(Deposit, path, hash)
of "DepositData": checkSSZ(DepositData, path, hash) of "DepositData": checkSSZ(DepositData, path, hash)
of "DepositMessage": checkSSZ(DepositMessage, path, hash) of "DepositMessage": checkSSZ(DepositMessage, path, hash)
of "DepositReceipt": checkSSZ(DepositReceipt, path, hash) of "DepositRequest": checkSSZ(DepositRequest, path, hash)
of "Eth1Block": checkSSZ(Eth1Block, path, hash) of "Eth1Block": checkSSZ(Eth1Block, path, hash)
of "Eth1Data": checkSSZ(Eth1Data, path, hash) of "Eth1Data": checkSSZ(Eth1Data, path, hash)
of "ExecutionLayerWithdrawalRequest":
checkSSZ(ExecutionLayerWithdrawalRequest, path, hash)
of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash) of "ExecutionPayload": checkSSZ(ExecutionPayload, path, hash)
of "ExecutionPayloadHeader": of "ExecutionPayloadHeader":
checkSSZ(ExecutionPayloadHeader, path, hash) checkSSZ(ExecutionPayloadHeader, path, hash)
@ -172,7 +170,6 @@ suite "EF - Electra - SSZ consensus objects " & preset():
checkSSZ(SignedBLSToExecutionChange, path, hash) checkSSZ(SignedBLSToExecutionChange, path, hash)
of "SignedContributionAndProof": of "SignedContributionAndProof":
checkSSZ(SignedContributionAndProof, path, hash) checkSSZ(SignedContributionAndProof, path, hash)
of "SignedConsolidation": checkSSZ(SignedConsolidation, path, hash)
of "SignedVoluntaryExit": checkSSZ(SignedVoluntaryExit, path, hash) of "SignedVoluntaryExit": checkSSZ(SignedVoluntaryExit, path, hash)
of "SigningData": checkSSZ(SigningData, path, hash) of "SigningData": checkSSZ(SigningData, path, hash)
of "SyncAggregate": checkSSZ(SyncAggregate, path, hash) of "SyncAggregate": checkSSZ(SyncAggregate, path, hash)
@ -185,5 +182,6 @@ suite "EF - Electra - SSZ consensus objects " & preset():
of "Withdrawal": checkSSZ(Withdrawal, path, hash) of "Withdrawal": checkSSZ(Withdrawal, path, hash)
of "Validator": checkSSZ(Validator, path, hash) of "Validator": checkSSZ(Validator, path, hash)
of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash) of "VoluntaryExit": checkSSZ(VoluntaryExit, path, hash)
of "WithdrawalRequest": checkSSZ(WithdrawalRequest, path, hash)
else: else:
raise newException(ValueError, "Unsupported test: " & sszType) raise newException(ValueError, "Unsupported test: " & sszType)

View File

@ -39,4 +39,4 @@ proc initGenesisState*(
when isMainModule: when isMainModule:
# Smoke test # Smoke test
let state = initGenesisState(num_validators = SLOTS_PER_EPOCH) let state = initGenesisState(num_validators = SLOTS_PER_EPOCH)
doAssert state.validators.len == SLOTS_PER_EPOCH doAssert getStateField(state[], validators).lenu64 == SLOTS_PER_EPOCH

View File

@ -17,7 +17,7 @@ import
proc new(T: type Eth2DiscoveryProtocol, proc new(T: type Eth2DiscoveryProtocol,
pk: keys.PrivateKey, pk: keys.PrivateKey,
enrIp: Option[IpAddress], enrTcpPort, enrUdpPort: Option[Port], enrIp: Opt[IpAddress], enrTcpPort, enrUdpPort: Opt[Port],
bindPort: Port, bindIp: IpAddress, bindPort: Port, bindIp: IpAddress,
enrFields: openArray[(string, seq[byte])] = [], enrFields: openArray[(string, seq[byte])] = [],
rng: ref HmacDrbgContext): T = rng: ref HmacDrbgContext): T =
@ -32,7 +32,7 @@ proc generateNode(rng: ref HmacDrbgContext, port: Port,
except ValueError: except ValueError:
raiseAssert "Argument is a valid IP address" raiseAssert "Argument is a valid IP address"
Eth2DiscoveryProtocol.new(keys.PrivateKey.random(rng[]), Eth2DiscoveryProtocol.new(keys.PrivateKey.random(rng[]),
some(ip), some(port), some(port), port, ip, enrFields, rng = rng) Opt.some(ip), Opt.some(port), Opt.some(port), port, ip, enrFields, rng = rng)
# TODO: Add tests with a syncnets preference # TODO: Add tests with a syncnets preference
const noSyncnetsPreference = SyncnetBits() const noSyncnetsPreference = SyncnetBits()

Some files were not shown because too many files have changed in this diff Show More