Merge branch 'feat/eip-7495' into feat/eip-6493

This commit is contained in:
Etan Kissling 2024-08-06 13:28:29 +02:00
commit e7f0b273ca
No known key found for this signature in database
GPG Key ID: B21DA824C5A3D03D
166 changed files with 3133 additions and 1478 deletions

View File

@ -35,20 +35,15 @@ jobs:
cpu: amd64 cpu: amd64
- os: windows - os: windows
cpu: amd64 cpu: amd64
branch: [~, upstream/version-1-6, upstream/version-2-0] branch: [~, upstream/version-2-0]
exclude: exclude:
- target: - target:
os: macos os: macos
branch: upstream/version-1-6 branch: upstream/version-2-0
- target: - target:
os: windows os: windows
branch: upstream/version-1-6 branch: upstream/version-2-0
- target:
os: windows
branch: ~
include: include:
- branch: upstream/version-1-6
branch-short: version-1-6
- branch: upstream/version-2-0 - branch: upstream/version-2-0
branch-short: version-2-0 branch-short: version-2-0
nimflags-extra: --mm:refc nimflags-extra: --mm:refc
@ -226,9 +221,20 @@ jobs:
- name: Build files with isMainModule - name: Build files with isMainModule
run: | run: |
executables=(
"beacon_chain/el/deposit_contract"
"beacon_chain/fork_choice/fork_choice"
"beacon_chain/fork_choice/proto_array"
"beacon_chain/networking/network_metadata_downloads"
"beacon_chain/era_db"
"beacon_chain/trusted_node_sync"
"benchmarks/rest_api_benchmark"
"tests/mocking/mock_genesis"
)
source env.sh source env.sh
nim c beacon_chain/era_db for executable in "${executables[@]}"; do
nim c beacon_chain/trusted_node_sync nim c --passC:-fsyntax-only --noLinking:on -d:chronicles_log_level=TRACE "${executable}"
done
lint: lint:
name: "Lint" name: "Lint"

View File

@ -7,7 +7,7 @@
name: PR block name: PR block
on: on:
pull_request: pull_request_target:
branches: branches:
- stable - stable

View File

@ -54,6 +54,7 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
+ sanity check Deneb blocks [Preset: mainnet] OK + sanity check Deneb blocks [Preset: mainnet] OK
+ sanity check Deneb states [Preset: mainnet] OK + sanity check Deneb states [Preset: mainnet] OK
+ sanity check Deneb states, reusing buffers [Preset: mainnet] OK + sanity check Deneb states, reusing buffers [Preset: mainnet] OK
+ sanity check Electra blocks [Preset: mainnet] OK
+ sanity check blobs [Preset: mainnet] OK + sanity check blobs [Preset: mainnet] OK
+ sanity check genesis roundtrip [Preset: mainnet] OK + sanity check genesis roundtrip [Preset: mainnet] OK
+ sanity check phase 0 blocks [Preset: mainnet] OK + sanity check phase 0 blocks [Preset: mainnet] OK
@ -62,7 +63,7 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
+ sanity check phase 0 states, reusing buffers [Preset: mainnet] OK + sanity check phase 0 states, reusing buffers [Preset: mainnet] OK
+ sanity check state diff roundtrip [Preset: mainnet] OK + sanity check state diff roundtrip [Preset: mainnet] OK
``` ```
OK: 25/25 Fail: 0/25 Skip: 0/25 OK: 26/26 Fail: 0/26 Skip: 0/26
## Beacon state [Preset: mainnet] ## Beacon state [Preset: mainnet]
```diff ```diff
+ Smoke test initialize_beacon_state_from_eth1 [Preset: mainnet] OK + Smoke test initialize_beacon_state_from_eth1 [Preset: mainnet] OK
@ -441,12 +442,10 @@ OK: 253/253 Fail: 0/253 Skip: 0/253
+ Testing boolean inputs - valid OK + Testing boolean inputs - valid OK
+ Testing containers inputs - invalid - skipping BitsStruct OK + Testing containers inputs - invalid - skipping BitsStruct OK
+ Testing containers inputs - valid - skipping BitsStruct OK + Testing containers inputs - valid - skipping BitsStruct OK
+ Testing profiles inputs - valid OK
+ Testing stablecontainers inputs - valid OK
+ Testing uints inputs - invalid OK + Testing uints inputs - invalid OK
+ Testing uints inputs - valid OK + Testing uints inputs - valid OK
``` ```
OK: 12/14 Fail: 0/14 Skip: 2/14 OK: 10/12 Fail: 0/12 Skip: 2/12
## EIP-4881 ## EIP-4881
```diff ```diff
+ deposit_cases OK + deposit_cases OK
@ -1034,4 +1033,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 9/9 Fail: 0/9 Skip: 0/9 OK: 9/9 Fail: 0/9 Skip: 0/9
---TOTAL--- ---TOTAL---
OK: 689/694 Fail: 0/694 Skip: 5/694 OK: 690/695 Fail: 0/695 Skip: 5/695

View File

@ -1,3 +1,33 @@
2024-07-29 v24.7.0
==================
Nimbus `v24.7.0` is a `low-urgency` release with beacon API improvements and fixes.
### Improvements
* Add support for publishBlindedBlockV2 beacon API endpoint:
https://github.com/status-im/nimbus-eth2/pull/6413
* Improve block proposal rewards in the absence of pre-aggregated sync contributions:
https://github.com/status-im/nimbus-eth2/pull/6384
### Fixes
* Fix SSZ decoding for beacon API publishBlock and publishBlockV2 endpoints:
https://github.com/status-im/nimbus-eth2/pull/6408
* Fix `statuses` parameter handling in postStateValidators beacon API endpoint:
https://github.com/status-im/nimbus-eth2/pull/6391
* Restore functioning Sepolia bootnodes, as previous bootnodes had gradually vanished:
https://github.com/status-im/nimbus-eth2/pull/6421
* Fix IP addresses returned by getNetworkIdentity beacon API endpoint:
https://github.com/status-im/nimbus-eth2/pull/6422
* Ensure Keymanager API fee recipient changes propagate to builder API relays:
https://github.com/status-im/nimbus-eth2/pull/6412
2024-06-24 v24.6.0 2024-06-24 v24.6.0
================== ==================

View File

@ -2468,14 +2468,20 @@ OK: 10/10 Fail: 0/10 Skip: 0/10
OK: 10/10 Fail: 0/10 Skip: 0/10 OK: 10/10 Fail: 0/10 Skip: 0/10
## EF - Electra - Epoch Processing - Pending balance deposits [Preset: mainnet] ## EF - Electra - Epoch Processing - Pending balance deposits [Preset: mainnet]
```diff ```diff
+ Pending balance deposits - mixture_of_skipped_and_above_churn [Preset: mainnet] OK
+ Pending balance deposits - multiple_pending_deposits_above_churn [Preset: mainnet] OK + Pending balance deposits - multiple_pending_deposits_above_churn [Preset: mainnet] OK
+ Pending balance deposits - multiple_pending_deposits_below_churn [Preset: mainnet] OK + Pending balance deposits - multiple_pending_deposits_below_churn [Preset: mainnet] OK
+ Pending balance deposits - multiple_pending_one_skipped [Preset: mainnet] OK
+ Pending balance deposits - multiple_skipped_deposits_exiting_validators [Preset: mainnet] OK
+ Pending balance deposits - pending_deposit_balance_above_churn [Preset: mainnet] OK + Pending balance deposits - pending_deposit_balance_above_churn [Preset: mainnet] OK
+ Pending balance deposits - pending_deposit_balance_equal_churn [Preset: mainnet] OK + Pending balance deposits - pending_deposit_balance_equal_churn [Preset: mainnet] OK
+ Pending balance deposits - pending_deposit_min_activation_balance [Preset: mainnet] OK + Pending balance deposits - pending_deposit_min_activation_balance [Preset: mainnet] OK
+ Pending balance deposits - pending_deposit_preexisting_churn [Preset: mainnet] OK + Pending balance deposits - pending_deposit_preexisting_churn [Preset: mainnet] OK
+ Pending balance deposits - processing_deposit_of_withdrawable_validator [Preset: mainnet] OK
+ Pending balance deposits - processing_deposit_of_withdrawable_validator_does_not_get_churn OK
+ Pending balance deposits - skipped_deposit_exiting_validator [Preset: mainnet] OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 12/12 Fail: 0/12 Skip: 0/12
## EF - Electra - Epoch Processing - Pending consolidations [Preset: mainnet] ## EF - Electra - Epoch Processing - Pending consolidations [Preset: mainnet]
```diff ```diff
+ Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK + Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK
@ -2673,6 +2679,11 @@ OK: 14/14 Fail: 0/14 Skip: 0/14
+ [Valid] EF - Electra - Operations - Block Header - basic_block_header OK + [Valid] EF - Electra - Operations - Block Header - basic_block_header OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 6/6 Fail: 0/6 Skip: 0/6
## EF - Electra - Operations - Consolidation Request [Preset: mainnet]
```diff
+ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_not_enough_consoli OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## EF - Electra - Operations - Deposit [Preset: mainnet] ## EF - Electra - Operations - Deposit [Preset: mainnet]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Deposit - invalid_bad_merkle_proof OK + [Invalid] EF - Electra - Operations - Deposit - invalid_bad_merkle_proof OK
@ -2698,45 +2709,27 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
+ [Valid] EF - Electra - Operations - Deposit - top_up__zero_balance OK + [Valid] EF - Electra - Operations - Deposit - top_up__zero_balance OK
``` ```
OK: 21/21 Fail: 0/21 Skip: 0/21 OK: 21/21 Fail: 0/21 Skip: 0/21
## EF - Electra - Operations - Deposit Receipt [Preset: mainnet] ## EF - Electra - Operations - Deposit Request [Preset: mainnet]
```diff ```diff
+ [Valid] EF - Electra - Operations - Deposit Receipt - correct_sig_but_forked_state OK + [Valid] EF - Electra - Operations - Deposit Request - correct_sig_but_forked_state OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - effective_deposit_with_genesis_for OK + [Valid] EF - Electra - Operations - Deposit Request - effective_deposit_with_genesis_for OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_sig_new_deposit OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_sig_new_deposit OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_sig_top_up OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_sig_top_up OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_withdrawal_credentials_t OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_withdrawal_credentials_t OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - ineffective_deposit_with_previous_ OK + [Valid] EF - Electra - Operations - Deposit Request - ineffective_deposit_with_previous_ OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - key_validate_invalid_decompression OK + [Valid] EF - Electra - Operations - Deposit Request - key_validate_invalid_decompression OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - key_validate_invalid_subgroup OK + [Valid] EF - Electra - Operations - Deposit Request - key_validate_invalid_subgroup OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_eth1_withdrawal_creden OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_eth1_withdrawal_creden OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_non_versioned_withdraw OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_non_versioned_withdraw OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_over_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_over_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_under_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_under_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - success_top_up_to_withdrawn_valida OK + [Valid] EF - Electra - Operations - Deposit Request - success_top_up_to_withdrawn_valida OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__less_effective_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__less_effective_balance OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__max_effective_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__max_effective_balance OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__zero_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__zero_balance OK
``` ```
OK: 17/17 Fail: 0/17 Skip: 0/17 OK: 17/17 Fail: 0/17 Skip: 0/17
## EF - Electra - Operations - Execution Layer Withdrawal Request [Preset: mainnet]
```diff
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - activation_epoc OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_withdrawa OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_withdrawa OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - incorrect_sourc OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - incorrect_withd OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - insufficient_ef OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - no_compounding_ OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - no_excess_balan OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - on_withdrawal_r OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - pending_withdra OK
```
OK: 14/14 Fail: 0/14 Skip: 0/14
## EF - Electra - Operations - Execution Payload [Preset: mainnet] ## EF - Electra - Operations - Execution Payload [Preset: mainnet]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Execution Payload - invalid_bad_everything_first_pay OK + [Invalid] EF - Electra - Operations - Execution Payload - invalid_bad_everything_first_pay OK
@ -2856,6 +2849,24 @@ OK: 26/26 Fail: 0/26 Skip: 0/26
+ [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__min_churn OK + [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__min_churn OK
``` ```
OK: 24/24 Fail: 0/24 Skip: 0/24 OK: 24/24 Fail: 0/24 Skip: 0/24
## EF - Electra - Operations - Withdrawal Request [Preset: mainnet]
```diff
+ [Valid] EF - Electra - Operations - Withdrawal Request - activation_epoch_less_than_shar OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_withdrawal_request OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_withdrawal_request_with_c OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - incorrect_source_address OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - incorrect_withdrawal_credential OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - insufficient_effective_balance OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - no_compounding_credentials OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - no_excess_balance OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - on_withdrawal_request_initiated OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_activation_e OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_incorrect_so OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_incorrect_wi OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_on_exit_init OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - pending_withdrawals_consume_all OK
```
OK: 14/14 Fail: 0/14 Skip: 0/14
## EF - Electra - Operations - Withdrawals [Preset: mainnet] ## EF - Electra - Operations - Withdrawals [Preset: mainnet]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK + [Invalid] EF - Electra - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK
@ -2982,15 +2993,14 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing BlobIdentifier OK + Testing BlobIdentifier OK
+ Testing BlobSidecar OK + Testing BlobSidecar OK
+ Testing Checkpoint OK + Testing Checkpoint OK
+ Testing Consolidation OK + Testing ConsolidationRequest OK
+ Testing ContributionAndProof OK + Testing ContributionAndProof OK
+ Testing Deposit OK + Testing Deposit OK
+ Testing DepositData OK + Testing DepositData OK
+ Testing DepositMessage OK + Testing DepositMessage OK
+ Testing DepositReceipt OK + Testing DepositRequest OK
+ Testing Eth1Block OK + Testing Eth1Block OK
+ Testing Eth1Data OK + Testing Eth1Data OK
+ Testing ExecutionLayerWithdrawalRequest OK
+ Testing ExecutionPayload OK + Testing ExecutionPayload OK
+ Testing ExecutionPayloadHeader OK + Testing ExecutionPayloadHeader OK
+ Testing Fork OK + Testing Fork OK
@ -3013,7 +3023,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing SignedBLSToExecutionChange OK + Testing SignedBLSToExecutionChange OK
+ Testing SignedBeaconBlock OK + Testing SignedBeaconBlock OK
+ Testing SignedBeaconBlockHeader OK + Testing SignedBeaconBlockHeader OK
+ Testing SignedConsolidation OK
+ Testing SignedContributionAndProof OK + Testing SignedContributionAndProof OK
+ Testing SignedVoluntaryExit OK + Testing SignedVoluntaryExit OK
+ Testing SigningData OK + Testing SigningData OK
@ -3025,8 +3034,9 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing Validator OK + Testing Validator OK
+ Testing VoluntaryExit OK + Testing VoluntaryExit OK
+ Testing Withdrawal OK + Testing Withdrawal OK
+ Testing WithdrawalRequest OK
``` ```
OK: 55/55 Fail: 0/55 Skip: 0/55 OK: 54/54 Fail: 0/54 Skip: 0/54
## EF - Electra - Sanity - Blocks [Preset: mainnet] ## EF - Electra - Sanity - Blocks [Preset: mainnet]
```diff ```diff
+ [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK + [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK
@ -3146,6 +3156,14 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
+ EF - Electra - Transition - transition_with_random_three_quarters_participation [Preset: m OK + EF - Electra - Transition - transition_with_random_three_quarters_participation [Preset: m OK
``` ```
OK: 25/25 Fail: 0/25 Skip: 0/25 OK: 25/25 Fail: 0/25 Skip: 0/25
## EF - Electra - Unittests - Light client - Sync protocol [Preset: mainnet]
```diff
+ process_light_client_update_finality_updated OK
+ process_light_client_update_timeout OK
+ test_process_light_client_update_at_period_boundary OK
+ test_process_light_client_update_not_timeout OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## EF - Light client - Single merkle proof [Preset: mainnet] ## EF - Light client - Single merkle proof [Preset: mainnet]
```diff ```diff
+ Light client - Single merkle proof - mainnet/altair/light_client/single_merkle_proof/Beaco OK + Light client - Single merkle proof - mainnet/altair/light_client/single_merkle_proof/Beaco OK
@ -3675,4 +3693,4 @@ OK: 69/88 Fail: 0/88 Skip: 19/88
OK: 3/3 Fail: 0/3 Skip: 0/3 OK: 3/3 Fail: 0/3 Skip: 0/3
---TOTAL--- ---TOTAL---
OK: 2961/2981 Fail: 0/2981 Skip: 20/2981 OK: 2971/2991 Fail: 0/2991 Skip: 20/2991

View File

@ -2579,14 +2579,20 @@ OK: 10/10 Fail: 0/10 Skip: 0/10
OK: 12/12 Fail: 0/12 Skip: 0/12 OK: 12/12 Fail: 0/12 Skip: 0/12
## EF - Electra - Epoch Processing - Pending balance deposits [Preset: minimal] ## EF - Electra - Epoch Processing - Pending balance deposits [Preset: minimal]
```diff ```diff
+ Pending balance deposits - mixture_of_skipped_and_above_churn [Preset: minimal] OK
+ Pending balance deposits - multiple_pending_deposits_above_churn [Preset: minimal] OK + Pending balance deposits - multiple_pending_deposits_above_churn [Preset: minimal] OK
+ Pending balance deposits - multiple_pending_deposits_below_churn [Preset: minimal] OK + Pending balance deposits - multiple_pending_deposits_below_churn [Preset: minimal] OK
+ Pending balance deposits - multiple_pending_one_skipped [Preset: minimal] OK
+ Pending balance deposits - multiple_skipped_deposits_exiting_validators [Preset: minimal] OK
+ Pending balance deposits - pending_deposit_balance_above_churn [Preset: minimal] OK + Pending balance deposits - pending_deposit_balance_above_churn [Preset: minimal] OK
+ Pending balance deposits - pending_deposit_balance_equal_churn [Preset: minimal] OK + Pending balance deposits - pending_deposit_balance_equal_churn [Preset: minimal] OK
+ Pending balance deposits - pending_deposit_min_activation_balance [Preset: minimal] OK + Pending balance deposits - pending_deposit_min_activation_balance [Preset: minimal] OK
+ Pending balance deposits - pending_deposit_preexisting_churn [Preset: minimal] OK + Pending balance deposits - pending_deposit_preexisting_churn [Preset: minimal] OK
+ Pending balance deposits - processing_deposit_of_withdrawable_validator [Preset: minimal] OK
+ Pending balance deposits - processing_deposit_of_withdrawable_validator_does_not_get_churn OK
+ Pending balance deposits - skipped_deposit_exiting_validator [Preset: minimal] OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 12/12 Fail: 0/12 Skip: 0/12
## EF - Electra - Epoch Processing - Pending consolidations [Preset: minimal] ## EF - Electra - Epoch Processing - Pending consolidations [Preset: minimal]
```diff ```diff
+ Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK + Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK
@ -2802,29 +2808,30 @@ OK: 14/14 Fail: 0/14 Skip: 0/14
+ [Valid] EF - Electra - Operations - Block Header - basic_block_header OK + [Valid] EF - Electra - Operations - Block Header - basic_block_header OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 6/6 Fail: 0/6 Skip: 0/6
## EF - Electra - Operations - Consolidation [Preset: minimal] ## EF - Electra - Operations - Consolidation Request [Preset: minimal]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_before_specified_epoch OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_in_curre OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_different_credentials OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_in_new_c OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_exited_source OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_com OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_exited_target OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_ins OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_inactive_source OK + [Valid] EF - Electra - Operations - Consolidation Request - basic_consolidation_with_pre OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_inactive_target OK + [Valid] EF - Electra - Operations - Consolidation Request - consolidation_balance_larger OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_no_execution_withdrawal_cred OK + [Valid] EF - Electra - Operations - Consolidation Request - consolidation_balance_throug OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_not_enough_consolidation_chu OK + [Valid] EF - Electra - Operations - Consolidation Request - consolidation_churn_limit_ba OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_source_equals_target OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_exceed_pending_con OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_source_signature OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_exited_source OK
+ [Invalid] EF - Electra - Operations - Consolidation - invalid_target_signature OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_exited_target OK
+ [Valid] EF - Electra - Operations - Consolidation - basic_consolidation_in_current_conso OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_inactive_source OK
+ [Valid] EF - Electra - Operations - Consolidation - basic_consolidation_in_new_consolida OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_inactive_target OK
+ [Valid] EF - Electra - Operations - Consolidation - basic_consolidation_with_compounding OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_incorrect_source_a OK
+ [Valid] EF - Electra - Operations - Consolidation - basic_consolidation_with_insufficien OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_no_source_executio OK
+ [Valid] EF - Electra - Operations - Consolidation - basic_consolidation_with_preexisting OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_no_target_executio OK
+ [Valid] EF - Electra - Operations - Consolidation - consolidation_balance_larger_than_ch OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_not_enough_consoli OK
+ [Valid] EF - Electra - Operations - Consolidation - consolidation_balance_through_two_ch OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_source_equals_targ OK
+ [Valid] EF - Electra - Operations - Consolidation - consolidation_churn_limit_balance OK + [Valid] EF - Electra - Operations - Consolidation Request - incorrect_unknown_source_pub OK
+ [Valid] EF - Electra - Operations - Consolidation Request - incorrect_unknown_target_pub OK
``` ```
OK: 19/19 Fail: 0/19 Skip: 0/19 OK: 20/20 Fail: 0/20 Skip: 0/20
## EF - Electra - Operations - Deposit [Preset: minimal] ## EF - Electra - Operations - Deposit [Preset: minimal]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Deposit - invalid_bad_merkle_proof OK + [Invalid] EF - Electra - Operations - Deposit - invalid_bad_merkle_proof OK
@ -2850,55 +2857,27 @@ OK: 19/19 Fail: 0/19 Skip: 0/19
+ [Valid] EF - Electra - Operations - Deposit - top_up__zero_balance OK + [Valid] EF - Electra - Operations - Deposit - top_up__zero_balance OK
``` ```
OK: 21/21 Fail: 0/21 Skip: 0/21 OK: 21/21 Fail: 0/21 Skip: 0/21
## EF - Electra - Operations - Deposit Receipt [Preset: minimal] ## EF - Electra - Operations - Deposit Request [Preset: minimal]
```diff ```diff
+ [Valid] EF - Electra - Operations - Deposit Receipt - correct_sig_but_forked_state OK + [Valid] EF - Electra - Operations - Deposit Request - correct_sig_but_forked_state OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - effective_deposit_with_genesis_for OK + [Valid] EF - Electra - Operations - Deposit Request - effective_deposit_with_genesis_for OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_sig_new_deposit OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_sig_new_deposit OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_sig_top_up OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_sig_top_up OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - incorrect_withdrawal_credentials_t OK + [Valid] EF - Electra - Operations - Deposit Request - incorrect_withdrawal_credentials_t OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - ineffective_deposit_with_previous_ OK + [Valid] EF - Electra - Operations - Deposit Request - ineffective_deposit_with_previous_ OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - key_validate_invalid_decompression OK + [Valid] EF - Electra - Operations - Deposit Request - key_validate_invalid_decompression OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - key_validate_invalid_subgroup OK + [Valid] EF - Electra - Operations - Deposit Request - key_validate_invalid_subgroup OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_eth1_withdrawal_creden OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_eth1_withdrawal_creden OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_non_versioned_withdraw OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_non_versioned_withdraw OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_over_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_over_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - new_deposit_under_max OK + [Valid] EF - Electra - Operations - Deposit Request - new_deposit_under_max OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - success_top_up_to_withdrawn_valida OK + [Valid] EF - Electra - Operations - Deposit Request - success_top_up_to_withdrawn_valida OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__less_effective_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__less_effective_balance OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__max_effective_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__max_effective_balance OK
+ [Valid] EF - Electra - Operations - Deposit Receipt - top_up__zero_balance OK + [Valid] EF - Electra - Operations - Deposit Request - top_up__zero_balance OK
``` ```
OK: 17/17 Fail: 0/17 Skip: 0/17 OK: 17/17 Fail: 0/17 Skip: 0/17
## EF - Electra - Operations - Execution Layer Withdrawal Request [Preset: minimal]
```diff
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - activation_epoc OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_partial_w OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_partial_w OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_partial_w OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_withdrawa OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_withdrawa OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - basic_withdrawa OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - incorrect_sourc OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - incorrect_withd OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - insufficient_ef OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - no_compounding_ OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - no_excess_balan OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - on_withdrawal_r OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - partial_withdra OK
+ [Valid] EF - Electra - Operations - Execution Layer Withdrawal Request - pending_withdra OK
```
OK: 24/24 Fail: 0/24 Skip: 0/24
## EF - Electra - Operations - Execution Payload [Preset: minimal] ## EF - Electra - Operations - Execution Payload [Preset: minimal]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Execution Payload - invalid_bad_everything_first_pay OK + [Invalid] EF - Electra - Operations - Execution Payload - invalid_bad_everything_first_pay OK
@ -3012,6 +2991,34 @@ OK: 24/24 Fail: 0/24 Skip: 0/24
+ [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK + [Valid] EF - Electra - Operations - Voluntary Exit - success_exit_queue__scaled_churn OK
``` ```
OK: 20/20 Fail: 0/20 Skip: 0/20 OK: 20/20 Fail: 0/20 Skip: 0/20
## EF - Electra - Operations - Withdrawal Request [Preset: minimal]
```diff
+ [Valid] EF - Electra - Operations - Withdrawal Request - activation_epoch_less_than_shar OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_partial_withdrawal_reques OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_partial_withdrawal_reques OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_partial_withdrawal_reques OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_withdrawal_request OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_withdrawal_request_with_c OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - basic_withdrawal_request_with_f OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - incorrect_source_address OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - incorrect_withdrawal_credential OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - insufficient_effective_balance OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - no_compounding_credentials OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - no_excess_balance OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - on_withdrawal_request_initiated OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_activation_e OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_incorrect_so OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_incorrect_wi OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_on_exit_init OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_queue_full OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - partial_withdrawal_request_with OK
+ [Valid] EF - Electra - Operations - Withdrawal Request - pending_withdrawals_consume_all OK
```
OK: 24/24 Fail: 0/24 Skip: 0/24
## EF - Electra - Operations - Withdrawals [Preset: minimal] ## EF - Electra - Operations - Withdrawals [Preset: minimal]
```diff ```diff
+ [Invalid] EF - Electra - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK + [Invalid] EF - Electra - Operations - Withdrawals - invalid_a_lot_fully_withdrawable_too_f OK
@ -3139,15 +3146,14 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing BlobIdentifier OK + Testing BlobIdentifier OK
+ Testing BlobSidecar OK + Testing BlobSidecar OK
+ Testing Checkpoint OK + Testing Checkpoint OK
+ Testing Consolidation OK + Testing ConsolidationRequest OK
+ Testing ContributionAndProof OK + Testing ContributionAndProof OK
+ Testing Deposit OK + Testing Deposit OK
+ Testing DepositData OK + Testing DepositData OK
+ Testing DepositMessage OK + Testing DepositMessage OK
+ Testing DepositReceipt OK + Testing DepositRequest OK
+ Testing Eth1Block OK + Testing Eth1Block OK
+ Testing Eth1Data OK + Testing Eth1Data OK
+ Testing ExecutionLayerWithdrawalRequest OK
+ Testing ExecutionPayload OK + Testing ExecutionPayload OK
+ Testing ExecutionPayloadHeader OK + Testing ExecutionPayloadHeader OK
+ Testing Fork OK + Testing Fork OK
@ -3170,7 +3176,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing SignedBLSToExecutionChange OK + Testing SignedBLSToExecutionChange OK
+ Testing SignedBeaconBlock OK + Testing SignedBeaconBlock OK
+ Testing SignedBeaconBlockHeader OK + Testing SignedBeaconBlockHeader OK
+ Testing SignedConsolidation OK
+ Testing SignedContributionAndProof OK + Testing SignedContributionAndProof OK
+ Testing SignedVoluntaryExit OK + Testing SignedVoluntaryExit OK
+ Testing SigningData OK + Testing SigningData OK
@ -3182,8 +3187,9 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing Validator OK + Testing Validator OK
+ Testing VoluntaryExit OK + Testing VoluntaryExit OK
+ Testing Withdrawal OK + Testing Withdrawal OK
+ Testing WithdrawalRequest OK
``` ```
OK: 55/55 Fail: 0/55 Skip: 0/55 OK: 54/54 Fail: 0/54 Skip: 0/54
## EF - Electra - Sanity - Blocks [Preset: minimal] ## EF - Electra - Sanity - Blocks [Preset: minimal]
```diff ```diff
+ [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK + [Invalid] EF - Electra - Sanity - Blocks - deposit_transition__invalid_eth1_deposits_overl OK
@ -3315,6 +3321,14 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
+ EF - Electra - Transition - transition_with_voluntary_exit_right_before_fork [Preset: mini OK + EF - Electra - Transition - transition_with_voluntary_exit_right_before_fork [Preset: mini OK
``` ```
OK: 30/30 Fail: 0/30 Skip: 0/30 OK: 30/30 Fail: 0/30 Skip: 0/30
## EF - Electra - Unittests - Light client - Sync protocol [Preset: minimal]
```diff
+ process_light_client_update_finality_updated OK
+ process_light_client_update_timeout OK
+ test_process_light_client_update_at_period_boundary OK
+ test_process_light_client_update_not_timeout OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## EF - Light client - Single merkle proof [Preset: minimal] ## EF - Light client - Single merkle proof [Preset: minimal]
```diff ```diff
+ Light client - Single merkle proof - minimal/altair/light_client/single_merkle_proof/Beaco OK + Light client - Single merkle proof - minimal/altair/light_client/single_merkle_proof/Beaco OK
@ -4005,4 +4019,4 @@ OK: 185/207 Fail: 0/207 Skip: 22/207
OK: 3/3 Fail: 0/3 Skip: 0/3 OK: 3/3 Fail: 0/3 Skip: 0/3
---TOTAL--- ---TOTAL---
OK: 3256/3279 Fail: 0/3279 Skip: 23/3279 OK: 3266/3289 Fail: 0/3289 Skip: 23/3289

View File

@ -38,9 +38,11 @@ The [Quickstart](https://nimbus.guide/quick-start.html) in particular will help
The [Nimbus REST api](https://nimbus.guide/rest-api.html) is now available from: The [Nimbus REST api](https://nimbus.guide/rest-api.html) is now available from:
* http://testing.mainnet.beacon-api.nimbus.team/
* http://unstable.mainnet.beacon-api.nimbus.team/ * http://unstable.mainnet.beacon-api.nimbus.team/
* http://unstable.prater.beacon-api.nimbus.team/ * http://testing.mainnet.beacon-api.nimbus.team/
* http://unstable.sepolia.beacon-api.nimbus.team/
* http://testing.holesky.beacon-api.nimbus.team/
* http://unstable.holesky.beacon-api.nimbus.team/
Note that right now these are very much unstable testing instances. They may be unresponsive at times - so **please do not rely on them for validating**. We may also disable them at any time. Note that right now these are very much unstable testing instances. They may be unresponsive at times - so **please do not rely on them for validating**. We may also disable them at any time.

View File

@ -542,12 +542,22 @@ proc new*(T: type BeaconChainDB,
"lc_deneb_headers" "lc_deneb_headers"
else: else:
"", "",
electraHeaders:
if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
"lc_electra_headers"
else:
"",
altairCurrentBranches: "lc_altair_current_branches", altairCurrentBranches: "lc_altair_current_branches",
electraCurrentBranches:
if cfg.ELECTRA_FORK_EPOCH != FAR_FUTURE_EPOCH:
"lc_electra_current_branches"
else:
"",
altairSyncCommittees: "lc_altair_sync_committees", altairSyncCommittees: "lc_altair_sync_committees",
legacyAltairBestUpdates: "lc_altair_best_updates", legacyAltairBestUpdates: "lc_altair_best_updates",
bestUpdates: "lc_best_updates", bestUpdates: "lc_best_updates",
sealedPeriods: "lc_sealed_periods")).expectDb() sealedPeriods: "lc_sealed_periods")).expectDb()
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
var blobs : KvStoreRef var blobs : KvStoreRef
if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH: if cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
@ -1356,7 +1366,8 @@ proc containsBlock*(
proc containsBlock*[ proc containsBlock*[
X: altair.TrustedSignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock | X: altair.TrustedSignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock |
capella.TrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock]( capella.TrustedSignedBeaconBlock | deneb.TrustedSignedBeaconBlock |
electra.TrustedSignedBeaconBlock](
db: BeaconChainDB, key: Eth2Digest, T: type X): bool = db: BeaconChainDB, key: Eth2Digest, T: type X): bool =
db.blocks[X.kind].contains(key.data).expectDb() db.blocks[X.kind].contains(key.data).expectDb()

View File

@ -130,7 +130,7 @@ type
current_sync_committee*: SyncCommittee # [New in Altair] current_sync_committee*: SyncCommittee # [New in Altair]
next_sync_committee*: SyncCommittee # [New in Altair] next_sync_committee*: SyncCommittee # [New in Altair]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#beaconstate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
# Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ # Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ
# reading and writing # reading and writing
BellatrixBeaconStateNoImmutableValidators* = object BellatrixBeaconStateNoImmutableValidators* = object
@ -401,7 +401,7 @@ type
historical_summaries*: historical_summaries*:
HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT] HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT]
deposit_receipts_start_index*: uint64 # [New in Electra:EIP6110] deposit_requests_start_index*: uint64 # [New in Electra:EIP6110]
deposit_balance_to_consume*: Gwei # [New in Electra:EIP7251] deposit_balance_to_consume*: Gwei # [New in Electra:EIP7251]
exit_balance_to_consume*: Gwei # [New in Electra:EIP7251] exit_balance_to_consume*: Gwei # [New in Electra:EIP7251]
earliest_exit_epoch*: Epoch # [New in Electra:EIP7251] earliest_exit_epoch*: Epoch # [New in Electra:EIP7251]

View File

@ -26,15 +26,17 @@ logScope: topics = "lcdata"
# needs to be bundled together with other data to fulfill requests. # needs to be bundled together with other data to fulfill requests.
# Mainnet data size (all columns): # Mainnet data size (all columns):
# - Altair: ~38 KB per `SyncCommitteePeriod` (~1.0 MB per month) # - Altair: ~38 KB per `SyncCommitteePeriod` (~1.0 MB per month)
# - Capella: ~222 KB per `SyncCommitteePeriod` (~6.1 MB per month) # - Capella: ~221 KB per `SyncCommitteePeriod` (~6.0 MB per month)
# - Deneb: ~230 KB per `SyncCommitteePeriod` (~6.3 MB per month) # - Deneb: ~225 KB per `SyncCommitteePeriod` (~6.2 MB per month)
# - Electra: ~249 KB per `SyncCommitteePeriod` (~6.8 MB per month)
# #
# `lc_altair_current_branches` holds Merkle proofs needed to # `lc_xxxxx_current_branches` holds Merkle proofs needed to
# construct `LightClientBootstrap` objects. # construct `LightClientBootstrap` objects.
# SSZ because this data does not compress well, and because this data # SSZ because this data does not compress well, and because this data
# needs to be bundled together with other data to fulfill requests. # needs to be bundled together with other data to fulfill requests.
# Mainnet data size (all columns): # Mainnet data size (all columns):
# - Altair ... Deneb: ~42 KB per `SyncCommitteePeriod` (~1.1 MB per month) # - Altair ... Deneb: ~42 KB per `SyncCommitteePeriod` (~1.1 MB per month)
# - Electra: ~50 KB per `SyncCommitteePeriod` (~1.4 MB per month)
# #
# `lc_altair_sync_committees` contains a copy of finalized sync committees. # `lc_altair_sync_committees` contains a copy of finalized sync committees.
# They are initially populated from the main DAG (usually a fast state access). # They are initially populated from the main DAG (usually a fast state access).
@ -42,7 +44,7 @@ logScope: topics = "lcdata"
# SSZ because this data does not compress well, and because this data # SSZ because this data does not compress well, and because this data
# needs to be bundled together with other data to fulfill requests. # needs to be bundled together with other data to fulfill requests.
# Mainnet data size (all columns): # Mainnet data size (all columns):
# - Altair ... Deneb: ~32 KB per `SyncCommitteePeriod` (~0.9 MB per month) # - Altair ... Electra: ~24 KB per `SyncCommitteePeriod` (~0.7 MB per month)
# #
# `lc_best_updates` holds full `LightClientUpdate` objects in SSZ form. # `lc_best_updates` holds full `LightClientUpdate` objects in SSZ form.
# These objects are frequently queried in bulk, but there is only one per # These objects are frequently queried in bulk, but there is only one per
@ -56,9 +58,10 @@ logScope: topics = "lcdata"
# the fork digest, because the same storage format may be used across forks. # the fork digest, because the same storage format may be used across forks.
# SSZ storage selected due to the small size and reduced logic complexity. # SSZ storage selected due to the small size and reduced logic complexity.
# Mainnet data size (all columns): # Mainnet data size (all columns):
# - Altair: ~33 KB per `SyncCommitteePeriod` (~0.9 MB per month) # - Altair: ~25 KB per `SyncCommitteePeriod` (~0.7 MB per month)
# - Capella: ~34 KB per `SyncCommitteePeriod` (~0.9 MB per month) # - Capella: ~26 KB per `SyncCommitteePeriod` (~0.7 MB per month)
# - Deneb: ~34 KB per `SyncCommitteePeriod` (~0.9 MB per month) # - Deneb: ~26 KB per `SyncCommitteePeriod` (~0.7 MB per month)
# - Electra: ~27 KB per `SyncCommitteePeriod` (~0.7 MB per month)
# #
# `lc_sealed_periods` contains the sync committee periods for which # `lc_sealed_periods` contains the sync committee periods for which
# full light client data was imported. Data for these periods may no longer # full light client data was imported. Data for these periods may no longer
@ -66,6 +69,36 @@ logScope: topics = "lcdata"
# when restarting the program. # when restarting the program.
# Mainnet data size (all columns): # Mainnet data size (all columns):
# - All forks: 8 bytes per `SyncCommitteePeriod` (~0.0 MB per month) # - All forks: 8 bytes per `SyncCommitteePeriod` (~0.0 MB per month)
#
# Header computations:
# - Altair: 256*(112+40)/1024*28/1024
# - Capella: 256*(112+4+600+128+40)/1024*28/1024
# 600 = 32+20+32+32+256+32+8+8+8+8+4+32+32+32+32+32
# - Deneb: 256*(112+4+616+128+40)/1024*28/1024
# 616 = 32+20+32+32+256+32+8+8+8+8+4+32+32+32+32+32+8+8
# - Electra: 256*(112+4+712+128+40)/1024*28/1024
# 712 = 32+20+32+32+256+32+8+8+8+8+4+32+32+32+32+32+8+8+32+32+32
#
# Committee branch computations:
# - Altair: 256*(5*32+8)/1024*28/1024
# - Electra: 256*(6*32+8)/1024*28/1024
#
# Finality branch computations:
# - Altair: 256*(6*32+8)/1024*28/1024
# - Electra: 256*(7*32+8)/1024*28/1024
#
# Committee computations:
# - Altair: (24624+8)/1024*28/1024
# 513*48 = 24624
#
# Aggregate computations:
# - Altair: 112 = 512/8+48
#
# Update computations:
# - Altair: (112+24624+5*32+112+6*32+112+8+9)/1024*28/1024
# - Capella: (4+884+24624+5*32+4+884+6*32+112+8+9)/1024*28/1024
# - Deneb: (4+900+24624+5*32+4+900+6*32+112+8+9)/1024*28/1024
# - Electra: (4+996+24624+6*32+4+996+7*32+112+8+9)/1024*28/1024
type type
LightClientHeaderStore = object LightClientHeaderStore = object
@ -73,6 +106,11 @@ type
putStmt: SqliteStmt[(array[32, byte], int64, seq[byte]), void] putStmt: SqliteStmt[(array[32, byte], int64, seq[byte]), void]
keepFromStmt: SqliteStmt[int64, void] keepFromStmt: SqliteStmt[int64, void]
BranchFork {.pure.} = enum
None = 0,
Altair,
Electra
CurrentSyncCommitteeBranchStore = object CurrentSyncCommitteeBranchStore = object
containsStmt: SqliteStmt[int64, int64] containsStmt: SqliteStmt[int64, int64]
getStmt: SqliteStmt[int64, seq[byte]] getStmt: SqliteStmt[int64, seq[byte]]
@ -110,8 +148,8 @@ type
## Eth2Digest -> (Slot, LightClientHeader) ## Eth2Digest -> (Slot, LightClientHeader)
## Cached block headers to support longer retention than block storage. ## Cached block headers to support longer retention than block storage.
currentBranches: CurrentSyncCommitteeBranchStore currentBranches: array[BranchFork, CurrentSyncCommitteeBranchStore]
## Slot -> altair.CurrentSyncCommitteeBranch ## Slot -> CurrentSyncCommitteeBranch
## Cached data for creating future `LightClientBootstrap` instances. ## Cached data for creating future `LightClientBootstrap` instances.
## Key is the block slot of which the post state was used to get the data. ## Key is the block slot of which the post state was used to get the data.
## Data stored for all finalized epoch boundary blocks. ## Data stored for all finalized epoch boundary blocks.
@ -209,12 +247,14 @@ func putHeader*[T: ForkyLightClientHeader](
proc initCurrentBranchesStore( proc initCurrentBranchesStore(
backend: SqStoreRef, backend: SqStoreRef,
name: string): KvResult[CurrentSyncCommitteeBranchStore] = name, typeName: string): KvResult[CurrentSyncCommitteeBranchStore] =
if name == "":
return ok CurrentSyncCommitteeBranchStore()
if not backend.readOnly: if not backend.readOnly:
? backend.exec(""" ? backend.exec("""
CREATE TABLE IF NOT EXISTS `""" & name & """` ( CREATE TABLE IF NOT EXISTS `""" & name & """` (
`slot` INTEGER PRIMARY KEY, -- `Slot` (up through 2^63-1) `slot` INTEGER PRIMARY KEY, -- `Slot` (up through 2^63-1)
`branch` BLOB -- `altair.CurrentSyncCommitteeBranch` (SSZ) `branch` BLOB -- `""" & typeName & """` (SSZ)
); );
""") """)
if not ? backend.hasTable(name): if not ? backend.hasTable(name):
@ -253,40 +293,46 @@ func close(store: var CurrentSyncCommitteeBranchStore) =
store.putStmt.disposeSafe() store.putStmt.disposeSafe()
store.keepFromStmt.disposeSafe() store.keepFromStmt.disposeSafe()
func hasCurrentSyncCommitteeBranch*( template kind(x: typedesc[altair.CurrentSyncCommitteeBranch]): BranchFork =
BranchFork.Altair
template kind(x: typedesc[electra.CurrentSyncCommitteeBranch]): BranchFork =
BranchFork.Electra
func hasCurrentSyncCommitteeBranch*[T: ForkyCurrentSyncCommitteeBranch](
db: LightClientDataDB, slot: Slot): bool = db: LightClientDataDB, slot: Slot): bool =
if not slot.isSupportedBySQLite or if not slot.isSupportedBySQLite or
distinctBase(db.currentBranches.containsStmt) == nil: distinctBase(db.currentBranches[T.kind].containsStmt) == nil:
return false return false
var exists: int64 var exists: int64
for res in db.currentBranches.containsStmt.exec(slot.int64, exists): for res in db.currentBranches[T.kind].containsStmt.exec(slot.int64, exists):
res.expect("SQL query OK") res.expect("SQL query OK")
doAssert exists == 1 doAssert exists == 1
return true return true
false false
proc getCurrentSyncCommitteeBranch*( proc getCurrentSyncCommitteeBranch*[T: ForkyCurrentSyncCommitteeBranch](
db: LightClientDataDB, slot: Slot): Opt[altair.CurrentSyncCommitteeBranch] = db: LightClientDataDB, slot: Slot): Opt[T] =
if not slot.isSupportedBySQLite or if not slot.isSupportedBySQLite or
distinctBase(db.currentBranches.getStmt) == nil: distinctBase(db.currentBranches[T.kind].getStmt) == nil:
return Opt.none(altair.CurrentSyncCommitteeBranch) return Opt.none(T)
var branch: seq[byte] var branch: seq[byte]
for res in db.currentBranches.getStmt.exec(slot.int64, branch): for res in db.currentBranches[T.kind].getStmt.exec(slot.int64, branch):
res.expect("SQL query OK") res.expect("SQL query OK")
try: try:
return ok SSZ.decode(branch, altair.CurrentSyncCommitteeBranch) return ok SSZ.decode(branch, T)
except SerializationError as exc: except SerializationError as exc:
error "LC data store corrupted", store = "currentBranches", error "LC data store corrupted", store = "currentBranches", kind = T.kind,
slot, exc = exc.msg slot, exc = exc.msg
return Opt.none(altair.CurrentSyncCommitteeBranch) return Opt.none(T)
func putCurrentSyncCommitteeBranch*( func putCurrentSyncCommitteeBranch*[T: ForkyCurrentSyncCommitteeBranch](
db: LightClientDataDB, slot: Slot, db: LightClientDataDB, slot: Slot, branch: T) =
branch: altair.CurrentSyncCommitteeBranch) =
doAssert not db.backend.readOnly # All `stmt` are non-nil doAssert not db.backend.readOnly # All `stmt` are non-nil
if not slot.isSupportedBySQLite: if not slot.isSupportedBySQLite:
return return
let res = db.currentBranches.putStmt.exec((slot.int64, SSZ.encode(branch))) let res = db.currentBranches[T.kind].putStmt.exec(
(slot.int64, SSZ.encode(branch)))
res.expect("SQL query OK") res.expect("SQL query OK")
proc initSyncCommitteesStore( proc initSyncCommitteesStore(
@ -618,9 +664,11 @@ func keepPeriodsFrom*(
let res = db.syncCommittees.keepFromStmt.exec(minPeriod.int64) let res = db.syncCommittees.keepFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK") res.expect("SQL query OK")
let minSlot = min(minPeriod.start_slot, int64.high.Slot) let minSlot = min(minPeriod.start_slot, int64.high.Slot)
block: for branchFork, store in db.currentBranches:
let res = db.currentBranches.keepFromStmt.exec(minSlot.int64) if branchFork > BranchFork.None and
res.expect("SQL query OK") distinctBase(store.keepFromStmt) != nil:
let res = store.keepFromStmt.exec(minSlot.int64)
res.expect("SQL query OK")
for lcDataFork, store in db.headers: for lcDataFork, store in db.headers:
if lcDataFork > LightClientDataFork.None and if lcDataFork > LightClientDataFork.None and
distinctBase(store.keepFromStmt) != nil: distinctBase(store.keepFromStmt) != nil:
@ -631,7 +679,9 @@ type LightClientDataDBNames* = object
altairHeaders*: string altairHeaders*: string
capellaHeaders*: string capellaHeaders*: string
denebHeaders*: string denebHeaders*: string
electraHeaders*: string
altairCurrentBranches*: string altairCurrentBranches*: string
electraCurrentBranches*: string
altairSyncCommittees*: string altairSyncCommittees*: string
legacyAltairBestUpdates*: string legacyAltairBestUpdates*: string
bestUpdates*: string bestUpdates*: string
@ -640,7 +690,7 @@ type LightClientDataDBNames* = object
proc initLightClientDataDB*( proc initLightClientDataDB*(
backend: SqStoreRef, backend: SqStoreRef,
names: LightClientDataDBNames): KvResult[LightClientDataDB] = names: LightClientDataDBNames): KvResult[LightClientDataDB] =
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
let let
headers = [ headers = [
# LightClientDataFork.None # LightClientDataFork.None
@ -653,10 +703,21 @@ proc initLightClientDataDB*(
names.capellaHeaders, "capella.LightClientHeader"), names.capellaHeaders, "capella.LightClientHeader"),
# LightClientDataFork.Deneb # LightClientDataFork.Deneb
? backend.initHeadersStore( ? backend.initHeadersStore(
names.denebHeaders, "deneb.LightClientHeader") names.denebHeaders, "deneb.LightClientHeader"),
# LightClientDataFork.Electra
? backend.initHeadersStore(
names.electraHeaders, "electra.LightClientHeader"),
]
currentBranches = [
# BranchFork.None
CurrentSyncCommitteeBranchStore(),
# BranchFork.Altair
? backend.initCurrentBranchesStore(
names.altairCurrentBranches, "altair.CurrentSyncCommitteeBranch"),
# BranchFork.Electra
? backend.initCurrentBranchesStore(
names.electraCurrentBranches, "electra.CurrentSyncCommitteeBranch"),
] ]
currentBranches =
? backend.initCurrentBranchesStore(names.altairCurrentBranches)
syncCommittees = syncCommittees =
? backend.initSyncCommitteesStore(names.altairSyncCommittees) ? backend.initSyncCommitteesStore(names.altairSyncCommittees)
legacyBestUpdates = legacyBestUpdates =
@ -681,7 +742,9 @@ proc close*(db: LightClientDataDB) =
for lcDataFork in LightClientDataFork: for lcDataFork in LightClientDataFork:
if lcDataFork > LightClientDataFork.None: if lcDataFork > LightClientDataFork.None:
db.headers[lcDataFork].close() db.headers[lcDataFork].close()
db.currentBranches.close() for branchFork in BranchFork:
if branchFork > BranchFork.None:
db.currentBranches[branchFork].close()
db.syncCommittees.close() db.syncCommittees.close()
db.legacyBestUpdates.close() db.legacyBestUpdates.close()
db.bestUpdates.close() db.bestUpdates.close()

View File

@ -27,7 +27,7 @@ type
## which blocks are valid - in particular, blocks are not valid if they ## which blocks are valid - in particular, blocks are not valid if they
## come from the future as seen from the local clock. ## come from the future as seen from the local clock.
## ##
## https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/fork-choice.md#fork-choice ## https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#fork-choice
## ##
# TODO consider NTP and network-adjusted timestamps as outlined here: # TODO consider NTP and network-adjusted timestamps as outlined here:
# https://ethresear.ch/t/network-adjusted-timestamps/4187 # https://ethresear.ch/t/network-adjusted-timestamps/4187

View File

@ -106,6 +106,7 @@ type
## Number of validators that we've checked for activation ## Number of validators that we've checked for activation
processingDelay*: Opt[Duration] processingDelay*: Opt[Duration]
lastValidAttestedBlock*: Opt[BlockSlot] lastValidAttestedBlock*: Opt[BlockSlot]
shutdownEvent*: AsyncEvent
template findIt*(s: openArray, predicate: untyped): int = template findIt*(s: openArray, predicate: untyped): int =
var res = -1 var res = -1

View File

@ -131,6 +131,10 @@ type
url*: Uri url*: Uri
provenBlockProperties*: seq[string] # empty if this is not a verifying Web3Signer provenBlockProperties*: seq[string] # empty if this is not a verifying Web3Signer
LongRangeSyncMode* {.pure.} = enum
Light = "light",
Lenient = "lenient"
BeaconNodeConf* = object BeaconNodeConf* = object
configFile* {. configFile* {.
desc: "Loads the configuration from a TOML file" desc: "Loads the configuration from a TOML file"
@ -557,6 +561,12 @@ type
desc: "Maximum number of sync committee periods to retain light client data" desc: "Maximum number of sync committee periods to retain light client data"
name: "light-client-data-max-periods" .}: Option[uint64] name: "light-client-data-max-periods" .}: Option[uint64]
longRangeSync* {.
hidden
desc: "Enable long-range syncing (genesis sync)",
defaultValue: LongRangeSyncMode.Lenient,
name: "debug-long-range-sync".}: LongRangeSyncMode
inProcessValidators* {. inProcessValidators* {.
desc: "Disable the push model (the beacon node tells a signing process with the private keys of the validators what to sign and when) and load the validators in the beacon node itself" desc: "Disable the push model (the beacon node tells a signing process with the private keys of the validators what to sign and when) and load the validators in the beacon node itself"
defaultValue: true # the use of the nimbus_signing_process binary by default will be delayed until async I/O over stdin/stdout is developed for the child process. defaultValue: true # the use of the nimbus_signing_process binary by default will be delayed until async I/O over stdin/stdout is developed for the child process.
@ -1260,8 +1270,11 @@ func completeCmdArg*(T: type WalletName, input: string): seq[string] =
return @[] return @[]
proc parseCmdArg*(T: type enr.Record, p: string): T {.raises: [ValueError].} = proc parseCmdArg*(T: type enr.Record, p: string): T {.raises: [ValueError].} =
if not fromURI(result, p): let res = enr.Record.fromURI(p)
raise newException(ValueError, "Invalid ENR") if res.isErr:
raise newException(ValueError, "Invalid ENR:" & $res.error)
res.value
func completeCmdArg*(T: type enr.Record, val: string): seq[string] = func completeCmdArg*(T: type enr.Record, val: string): seq[string] =
return @[] return @[]

View File

@ -1058,7 +1058,7 @@ proc getBeaconHead*(
pool.dag.loadExecutionBlockHash(pool.dag.finalizedHead.blck) pool.dag.loadExecutionBlockHash(pool.dag.finalizedHead.blck)
.get(ZERO_HASH) .get(ZERO_HASH)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/fork_choice/safe-block.md#get_safe_execution_payload_hash # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/fork_choice/safe-block.md#get_safe_execution_payload_hash
safeBlockRoot = pool.forkChoice.get_safe_beacon_block_root() safeBlockRoot = pool.forkChoice.get_safe_beacon_block_root()
safeBlock = pool.dag.getBlockRef(safeBlockRoot) safeBlock = pool.dag.getBlockRef(safeBlockRoot)
safeExecutionBlockHash = safeExecutionBlockHash =

View File

@ -33,11 +33,13 @@ type
CachedLightClientData* = object CachedLightClientData* = object
## Cached data from historical non-finalized states to improve speed when ## Cached data from historical non-finalized states to improve speed when
## creating future `LightClientUpdate` and `LightClientBootstrap` instances. ## creating future `LightClientUpdate` and `LightClientBootstrap` instances.
current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch current_sync_committee_branch*:
next_sync_committee_branch*: altair.NextSyncCommitteeBranch LightClientDataFork.high.CurrentSyncCommitteeBranch
next_sync_committee_branch*:
LightClientDataFork.high.NextSyncCommitteeBranch
finalized_slot*: Slot finalized_slot*: Slot
finality_branch*: altair.FinalityBranch finality_branch*: LightClientDataFork.high.FinalityBranch
current_period_best_update*: ref ForkedLightClientUpdate current_period_best_update*: ref ForkedLightClientUpdate
latest_signature_slot*: Slot latest_signature_slot*: Slot

View File

@ -1178,7 +1178,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# should have `previous_version` set to `current_version` while # should have `previous_version` set to `current_version` while
# this doesn't happen to be the case in network that go through # this doesn't happen to be the case in network that go through
# regular hard-fork upgrades. See for example: # regular hard-fork upgrades. See for example:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing
if stateFork.current_version != configFork.current_version: if stateFork.current_version != configFork.current_version:
error "State from database does not match network, check --network parameter", error "State from database does not match network, check --network parameter",
tail = dag.tail, headRef, stateFork, configFork tail = dag.tail, headRef, stateFork, configFork
@ -1972,7 +1972,7 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
prunedHeads = hlen - dag.heads.len, prunedHeads = hlen - dag.heads.len,
dagPruneDur = Moment.now() - startTick dagPruneDur = Moment.now() - startTick
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/sync/optimistic.md#helpers # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#helpers
func is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool = func is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool =
let blck = let blck =
if bid.slot <= dag.finalizedHead.slot: if bid.slot <= dag.finalizedHead.slot:

View File

@ -22,6 +22,15 @@ template nextEpochBoundarySlot(slot: Slot): Slot =
## referring to a block at given slot. ## referring to a block at given slot.
(slot + (SLOTS_PER_EPOCH - 1)).epoch.start_slot (slot + (SLOTS_PER_EPOCH - 1)).epoch.start_slot
func hasCurrentSyncCommitteeBranch(dag: ChainDAGRef, slot: Slot): bool =
let epoch = dag.cfg.consensusForkAtEpoch(slot.epoch)
withLcDataFork(lcDataForkAtConsensusFork(epoch)):
when lcDataFork > LightClientDataFork.None:
hasCurrentSyncCommitteeBranch[lcDataFork.CurrentSyncCommitteeBranch](
dag.lcDataStore.db, slot)
else:
true
proc updateExistingState( proc updateExistingState(
dag: ChainDAGRef, state: var ForkedHashedBeaconState, bsi: BlockSlotId, dag: ChainDAGRef, state: var ForkedHashedBeaconState, bsi: BlockSlotId,
save: bool, cache: var StateCache): bool = save: bool, cache: var StateCache): bool =
@ -226,7 +235,7 @@ proc initLightClientBootstrapForPeriod(
bid = bsi.bid bid = bsi.bid
boundarySlot = bid.slot.nextEpochBoundarySlot boundarySlot = bid.slot.nextEpochBoundarySlot
if boundarySlot == nextBoundarySlot and bid.slot >= lowSlot and if boundarySlot == nextBoundarySlot and bid.slot >= lowSlot and
not dag.lcDataStore.db.hasCurrentSyncCommitteeBranch(bid.slot): not dag.hasCurrentSyncCommitteeBranch(bid.slot):
let bdata = dag.getExistingForkedBlock(bid).valueOr: let bdata = dag.getExistingForkedBlock(bid).valueOr:
dag.handleUnexpectedLightClientError(bid.slot) dag.handleUnexpectedLightClientError(bid.slot)
res.err() res.err()
@ -246,7 +255,7 @@ proc initLightClientBootstrapForPeriod(
forkyBlck.toLightClientHeader(lcDataFork)) forkyBlck.toLightClientHeader(lcDataFork))
dag.lcDataStore.db.putCurrentSyncCommitteeBranch( dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
bid.slot, forkyState.data.build_proof( bid.slot, forkyState.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_GINDEX).get) lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX).get)
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
res res
@ -393,13 +402,13 @@ proc initLightClientUpdateForPeriod(
update = ForkedLightClientUpdate.init(lcDataFork.LightClientUpdate( update = ForkedLightClientUpdate.init(lcDataFork.LightClientUpdate(
attested_header: forkyBlck.toLightClientHeader(lcDataFork), attested_header: forkyBlck.toLightClientHeader(lcDataFork),
next_sync_committee: forkyState.data.next_sync_committee, next_sync_committee: forkyState.data.next_sync_committee,
next_sync_committee_branch: next_sync_committee_branch: forkyState.data.build_proof(
forkyState.data.build_proof(altair.NEXT_SYNC_COMMITTEE_GINDEX).get, lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX).get,
finality_branch: finality_branch:
if finalizedBid.slot != FAR_FUTURE_SLOT: if finalizedBid.slot != FAR_FUTURE_SLOT:
forkyState.data.build_proof(altair.FINALIZED_ROOT_GINDEX).get forkyState.data.build_proof(lcDataFork.FINALIZED_ROOT_GINDEX).get
else: else:
default(FinalityBranch))) default(lcDataFork.FinalityBranch)))
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
do: do:
dag.handleUnexpectedLightClientError(attestedBid.slot) dag.handleUnexpectedLightClientError(attestedBid.slot)
@ -464,17 +473,21 @@ proc cacheLightClientData(
## Cache data for a given block and its post-state to speed up creating future ## Cache data for a given block and its post-state to speed up creating future
## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this ## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this
## block and state. ## block and state.
const lcDataFork = lcDataForkAtConsensusFork(typeof(state).kind)
let let
bid = blck.toBlockId() bid = blck.toBlockId()
cachedData = CachedLightClientData( cachedData = CachedLightClientData(
current_sync_committee_branch: current_sync_committee_branch: normalize_merkle_branch(
state.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_GINDEX).get, state.data.build_proof(lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX).get,
next_sync_committee_branch: LightClientDataFork.high.CURRENT_SYNC_COMMITTEE_GINDEX),
state.data.build_proof(altair.NEXT_SYNC_COMMITTEE_GINDEX).get, next_sync_committee_branch: normalize_merkle_branch(
state.data.build_proof(lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX).get,
LightClientDataFork.high.NEXT_SYNC_COMMITTEE_GINDEX),
finalized_slot: finalized_slot:
state.data.finalized_checkpoint.epoch.start_slot, state.data.finalized_checkpoint.epoch.start_slot,
finality_branch: finality_branch: normalize_merkle_branch(
state.data.build_proof(altair.FINALIZED_ROOT_GINDEX).get, state.data.build_proof(lcDataFork.FINALIZED_ROOT_GINDEX).get,
LightClientDataFork.high.FINALIZED_ROOT_GINDEX),
current_period_best_update: current_period_best_update:
current_period_best_update, current_period_best_update,
latest_signature_slot: latest_signature_slot:
@ -538,15 +551,18 @@ proc assignLightClientData(
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
forkyObject.next_sync_committee = forkyObject.next_sync_committee =
next_sync_committee.get next_sync_committee.get
forkyObject.next_sync_committee_branch = forkyObject.next_sync_committee_branch = normalize_merkle_branch(
attested_data.next_sync_committee_branch attested_data.next_sync_committee_branch,
lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX)
else: else:
doAssert next_sync_committee.isNone doAssert next_sync_committee.isNone
var finalized_slot = attested_data.finalized_slot var finalized_slot = attested_data.finalized_slot
withForkyObject(obj): withForkyObject(obj):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
if finalized_slot == forkyObject.finalized_header.beacon.slot: if finalized_slot == forkyObject.finalized_header.beacon.slot:
forkyObject.finality_branch = attested_data.finality_branch forkyObject.finality_branch = normalize_merkle_branch(
attested_data.finality_branch,
lcDataFork.FINALIZED_ROOT_GINDEX)
elif finalized_slot < max(dag.tail.slot, dag.backfill.slot): elif finalized_slot < max(dag.tail.slot, dag.backfill.slot):
forkyObject.finalized_header.reset() forkyObject.finalized_header.reset()
forkyObject.finality_branch.reset() forkyObject.finality_branch.reset()
@ -564,10 +580,14 @@ proc assignLightClientData(
attested_data.finalized_slot = finalized_slot attested_data.finalized_slot = finalized_slot
dag.lcDataStore.cache.data[attested_bid] = attested_data dag.lcDataStore.cache.data[attested_bid] = attested_data
if finalized_slot == forkyObject.finalized_header.beacon.slot: if finalized_slot == forkyObject.finalized_header.beacon.slot:
forkyObject.finality_branch = attested_data.finality_branch forkyObject.finality_branch = normalize_merkle_branch(
attested_data.finality_branch,
lcDataFork.FINALIZED_ROOT_GINDEX)
elif finalized_slot == GENESIS_SLOT: elif finalized_slot == GENESIS_SLOT:
forkyObject.finalized_header.reset() forkyObject.finalized_header.reset()
forkyObject.finality_branch = attested_data.finality_branch forkyObject.finality_branch = normalize_merkle_branch(
attested_data.finality_branch,
lcDataFork.FINALIZED_ROOT_GINDEX)
else: else:
var fin_header = dag.getExistingLightClientHeader(finalized_bid) var fin_header = dag.getExistingLightClientHeader(finalized_bid)
if fin_header.kind == LightClientDataFork.None: if fin_header.kind == LightClientDataFork.None:
@ -577,7 +597,9 @@ proc assignLightClientData(
else: else:
fin_header.migrateToDataFork(lcDataFork) fin_header.migrateToDataFork(lcDataFork)
forkyObject.finalized_header = fin_header.forky(lcDataFork) forkyObject.finalized_header = fin_header.forky(lcDataFork)
forkyObject.finality_branch = attested_data.finality_branch forkyObject.finality_branch = normalize_merkle_branch(
attested_data.finality_branch,
lcDataFork.FINALIZED_ROOT_GINDEX)
withForkyObject(obj): withForkyObject(obj):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
forkyObject.sync_aggregate = sync_aggregate forkyObject.sync_aggregate = sync_aggregate
@ -701,9 +723,11 @@ proc createLightClientBootstrap(
const lcDataFork = lcDataForkAtConsensusFork(consensusFork) const lcDataFork = lcDataForkAtConsensusFork(consensusFork)
dag.lcDataStore.db.putHeader( dag.lcDataStore.db.putHeader(
forkyBlck.toLightClientHeader(lcDataFork)) forkyBlck.toLightClientHeader(lcDataFork))
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
bid.slot, normalize_merkle_branch(
dag.getLightClientData(bid).current_sync_committee_branch,
lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX))
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
bid.slot, dag.getLightClientData(bid).current_sync_committee_branch)
ok() ok()
proc initLightClientDataCache*(dag: ChainDAGRef) = proc initLightClientDataCache*(dag: ChainDAGRef) =
@ -1014,7 +1038,7 @@ proc getLightClientBootstrap(
# Ensure `current_sync_committee_branch` is known # Ensure `current_sync_committee_branch` is known
if dag.lcDataStore.importMode == LightClientDataImportMode.OnDemand and if dag.lcDataStore.importMode == LightClientDataImportMode.OnDemand and
not dag.lcDataStore.db.hasCurrentSyncCommitteeBranch(slot): not dag.hasCurrentSyncCommitteeBranch(slot):
let let
bsi = dag.getExistingBlockIdAtSlot(slot).valueOr: bsi = dag.getExistingBlockIdAtSlot(slot).valueOr:
return default(ForkedLightClientBootstrap) return default(ForkedLightClientBootstrap)
@ -1022,13 +1046,14 @@ proc getLightClientBootstrap(
dag.withUpdatedExistingState(tmpState[], bsi) do: dag.withUpdatedExistingState(tmpState[], bsi) do:
withState(updatedState): withState(updatedState):
when consensusFork >= ConsensusFork.Altair: when consensusFork >= ConsensusFork.Altair:
const lcDataFork = lcDataForkAtConsensusFork(consensusFork)
if not dag.lcDataStore.db.hasSyncCommittee(period): if not dag.lcDataStore.db.hasSyncCommittee(period):
dag.lcDataStore.db.putSyncCommittee( dag.lcDataStore.db.putSyncCommittee(
period, forkyState.data.current_sync_committee) period, forkyState.data.current_sync_committee)
dag.lcDataStore.db.putHeader(header) dag.lcDataStore.db.putHeader(header)
dag.lcDataStore.db.putCurrentSyncCommitteeBranch( dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
slot, forkyState.data.build_proof( slot, forkyState.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_GINDEX).get) lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX).get)
else: raiseAssert "Unreachable" else: raiseAssert "Unreachable"
do: return default(ForkedLightClientBootstrap) do: return default(ForkedLightClientBootstrap)
@ -1050,7 +1075,8 @@ proc getLightClientBootstrap(
debug "LC bootstrap unavailable: Sync committee not cached", period debug "LC bootstrap unavailable: Sync committee not cached", period
return default(ForkedLightClientBootstrap)), return default(ForkedLightClientBootstrap)),
current_sync_committee_branch: (block: current_sync_committee_branch: (block:
dag.lcDataStore.db.getCurrentSyncCommitteeBranch(slot).valueOr: getCurrentSyncCommitteeBranch[lcDataFork.CurrentSyncCommitteeBranch](
dag.lcDataStore.db, slot).valueOr:
debug "LC bootstrap unavailable: Committee branch not cached", slot debug "LC bootstrap unavailable: Committee branch not cached", slot
return default(ForkedLightClientBootstrap)))) return default(ForkedLightClientBootstrap))))

View File

@ -53,7 +53,7 @@ iterator get_beacon_committee*(
committees_per_slot * SLOTS_PER_EPOCH committees_per_slot * SLOTS_PER_EPOCH
): yield (index_in_committee, idx) ): yield (index_in_committee, idx)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#get_beacon_committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee*( func get_beacon_committee*(
shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex): shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex):
seq[ValidatorIndex] = seq[ValidatorIndex] =
@ -123,21 +123,21 @@ iterator get_attesting_indices*(shufflingRef: ShufflingRef,
iterator get_attesting_indices*( iterator get_attesting_indices*(
dag: ChainDAGRef, attestation: phase0.TrustedAttestation, dag: ChainDAGRef, attestation: phase0.TrustedAttestation,
on_chain: static bool = true): ValidatorIndex = on_chain: static bool = true): ValidatorIndex =
block: # `return` is not allowed in an inline iterator block gaiBlock: # `return` is not allowed in an inline iterator
let let
slot = slot =
check_attestation_slot_target(attestation.data).valueOr: check_attestation_slot_target(attestation.data).valueOr:
warn "Invalid attestation slot in trusted attestation", warn "Invalid attestation slot in trusted attestation",
attestation = shortLog(attestation) attestation = shortLog(attestation)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
break break gaiBlock
blck = blck =
dag.getBlockRef(attestation.data.beacon_block_root).valueOr: dag.getBlockRef(attestation.data.beacon_block_root).valueOr:
# Attestation block unknown - this is fairly common because we # Attestation block unknown - this is fairly common because we
# discard alternative histories on restart # discard alternative histories on restart
debug "Pruned block in trusted attestation", debug "Pruned block in trusted attestation",
attestation = shortLog(attestation) attestation = shortLog(attestation)
break break gaiBlock
target = target =
blck.atCheckpoint(attestation.data.target).valueOr: blck.atCheckpoint(attestation.data.target).valueOr:
# This may happen when there's no block at the epoch boundary slot # This may happen when there's no block at the epoch boundary slot
@ -148,7 +148,7 @@ iterator get_attesting_indices*(
blck = shortLog(blck), blck = shortLog(blck),
attestation = shortLog(attestation) attestation = shortLog(attestation)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
break break gaiBlock
shufflingRef = shufflingRef =
dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr: dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr:
warn "Attestation shuffling not found", warn "Attestation shuffling not found",
@ -156,7 +156,7 @@ iterator get_attesting_indices*(
attestation = shortLog(attestation) attestation = shortLog(attestation)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
break break gaiBlock
committeesPerSlot = get_committee_count_per_slot(shufflingRef) committeesPerSlot = get_committee_count_per_slot(shufflingRef)
committeeIndex = committeeIndex =
@ -166,7 +166,7 @@ iterator get_attesting_indices*(
attestation = shortLog(attestation) attestation = shortLog(attestation)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
break break gaiBlock
for validator in get_attesting_indices( for validator in get_attesting_indices(
shufflingRef, slot, committeeIndex, attestation.aggregation_bits): shufflingRef, slot, committeeIndex, attestation.aggregation_bits):
@ -175,21 +175,21 @@ iterator get_attesting_indices*(
iterator get_attesting_indices*( iterator get_attesting_indices*(
dag: ChainDAGRef, attestation: electra.TrustedAttestation, dag: ChainDAGRef, attestation: electra.TrustedAttestation,
on_chain: static bool): ValidatorIndex = on_chain: static bool): ValidatorIndex =
block: # `return` is not allowed in an inline iterator block gaiBlock: # `return` is not allowed in an inline iterator
let let
slot = slot =
check_attestation_slot_target(attestation.data).valueOr: check_attestation_slot_target(attestation.data).valueOr:
warn "Invalid attestation slot in trusted attestation", warn "Invalid attestation slot in trusted attestation",
attestation = shortLog(attestation) attestation = shortLog(attestation)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
break break gaiBlock
blck = blck =
dag.getBlockRef(attestation.data.beacon_block_root).valueOr: dag.getBlockRef(attestation.data.beacon_block_root).valueOr:
# Attestation block unknown - this is fairly common because we # Attestation block unknown - this is fairly common because we
# discard alternative histories on restart # discard alternative histories on restart
debug "Pruned block in trusted attestation", debug "Pruned block in trusted attestation",
attestation = shortLog(attestation) attestation = shortLog(attestation)
break break gaiBlock
target = target =
blck.atCheckpoint(attestation.data.target).valueOr: blck.atCheckpoint(attestation.data.target).valueOr:
# This may happen when there's no block at the epoch boundary slot # This may happen when there's no block at the epoch boundary slot
@ -200,7 +200,7 @@ iterator get_attesting_indices*(
blck = shortLog(blck), blck = shortLog(blck),
attestation = shortLog(attestation) attestation = shortLog(attestation)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
break break gaiBlock
shufflingRef = shufflingRef =
dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr: dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr:
warn "Attestation shuffling not found", warn "Attestation shuffling not found",
@ -208,7 +208,7 @@ iterator get_attesting_indices*(
attestation = shortLog(attestation) attestation = shortLog(attestation)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
break break gaiBlock
for validator in get_attesting_indices( for validator in get_attesting_indices(
shufflingRef, slot, attestation.committee_bits, shufflingRef, slot, attestation.committee_bits,

View File

@ -217,15 +217,16 @@ func produceContribution*(
else: else:
false false
func addAggregateAux(bestVotes: var BestSyncSubcommitteeContributions, func addContribution(
contribution: SyncCommitteeContribution) = contributions: var BestSyncSubcommitteeContributions,
contribution: SyncCommitteeContribution) =
let let
currentBestTotalParticipants = currentBestTotalParticipants =
bestVotes.subnets[contribution.subcommittee_index].totalParticipants contributions.subnets[contribution.subcommittee_index].totalParticipants
newBestTotalParticipants = countOnes(contribution.aggregation_bits) newBestTotalParticipants = countOnes(contribution.aggregation_bits)
if newBestTotalParticipants > currentBestTotalParticipants: if newBestTotalParticipants > currentBestTotalParticipants:
bestVotes.subnets[contribution.subcommittee_index] = contributions.subnets[contribution.subcommittee_index] =
BestSyncSubcommitteeContribution( BestSyncSubcommitteeContribution(
totalParticipants: newBestTotalParticipants, totalParticipants: newBestTotalParticipants,
participationBits: contribution.aggregation_bits, participationBits: contribution.aggregation_bits,
@ -241,10 +242,10 @@ func isSeen*(
seenKey in pool.seenContributionByAuthor seenKey in pool.seenContributionByAuthor
func covers( func covers(
bestVotes: BestSyncSubcommitteeContributions, contributions: BestSyncSubcommitteeContributions,
contribution: SyncCommitteeContribution): bool = contribution: SyncCommitteeContribution): bool =
contribution.aggregation_bits.isSubsetOf( contribution.aggregation_bits.isSubsetOf(
bestVotes.subnets[contribution.subcommittee_index].participationBits) contributions.subnets[contribution.subcommittee_index].participationBits)
func covers*( func covers*(
pool: var SyncCommitteeMsgPool, pool: var SyncCommitteeMsgPool,
@ -271,22 +272,12 @@ proc addContribution(pool: var SyncCommitteeMsgPool,
pool.seenContributionByAuthor.incl seenKey pool.seenContributionByAuthor.incl seenKey
let target = pool.cfg.toSyncMsgTarget(bid, contribution.slot) let target = pool.cfg.toSyncMsgTarget(bid, contribution.slot)
if target notin pool.bestContributions: pool.bestContributions.withValue(target, contributions):
let totalParticipants = countOnes(contribution.aggregation_bits) contributions[].addContribution(contribution)
var initialBestContributions = BestSyncSubcommitteeContributions() do:
var contributions: BestSyncSubcommitteeContributions
initialBestContributions.subnets[contribution.subcommittee_index] = contributions.addContribution(contribution)
BestSyncSubcommitteeContribution( pool.bestContributions[target] = contributions
totalParticipants: totalParticipants,
participationBits: contribution.aggregation_bits,
signature: signature)
pool.bestContributions[target] = initialBestContributions
else:
try:
addAggregateAux(pool.bestContributions[target], contribution)
except KeyError:
raiseAssert "We have checked for the key upfront"
proc addContribution*(pool: var SyncCommitteeMsgPool, proc addContribution*(pool: var SyncCommitteeMsgPool,
scproof: SignedContributionAndProof, scproof: SignedContributionAndProof,
@ -334,11 +325,35 @@ proc produceSyncAggregateAux(
aggregate aggregate
proc produceSyncAggregate*( proc produceSyncAggregate*(
pool: SyncCommitteeMsgPool, pool: var SyncCommitteeMsgPool,
bid: BlockId, bid: BlockId,
signatureSlot: Slot): SyncAggregate = signatureSlot: Slot): SyncAggregate =
# Sync committee signs previous slot, relative to when new block is produced # Sync committee signs previous slot, relative to when new block is produced
let target = pool.cfg.toSyncMsgTarget(bid, max(signatureSlot, 1.Slot) - 1) let
slot = max(signatureSlot, 1.Slot) - 1
target = pool.cfg.toSyncMsgTarget(bid, slot)
var contribution {.noinit.}: SyncCommitteeContribution
pool.bestContributions.withValue(target, contributions):
for subcommitteeIdx in SyncSubcommitteeIndex:
if contributions.subnets[subcommitteeIdx].totalParticipants == 0 and
pool.produceContribution(slot, bid, subcommitteeIdx, contribution):
debug "Did not receive contribution, did aggregate locally",
target, subcommitteeIdx
contributions[].addContribution(contribution)
do:
var
contributions: BestSyncSubcommitteeContributions
didAggregate = false
for subcommitteeIdx in SyncSubcommitteeIndex:
if pool.produceContribution(slot, bid, subcommitteeIdx, contribution):
debug "Did not receive contribution, did aggregate locally",
target, subcommitteeIdx
contributions.addContribution(contribution)
didAggregate = true
if didAggregate:
pool.bestContributions[target] = contributions
if target in pool.bestContributions: if target in pool.bestContributions:
try: try:
produceSyncAggregateAux(pool.bestContributions[target]) produceSyncAggregateAux(pool.bestContributions[target])
@ -349,7 +364,7 @@ proc produceSyncAggregate*(
proc isEpochLeadTime*( proc isEpochLeadTime*(
pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool = pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool =
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#sync-committee-subnet-stability # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
# This ensures a uniform distribution without requiring additional state: # This ensures a uniform distribution without requiring additional state:
# (1/4) = 1/4, 4 slots out # (1/4) = 1/4, 4 slots out
# (3/4) * (1/3) = 1/4, 3 slots out # (3/4) * (1/3) = 1/4, 3 slots out

View File

@ -491,9 +491,11 @@ func asConsensusType*(payload: engine_api.GetPayloadV3Response):
# Both are defined as `array[N, byte]` under the hood. # Both are defined as `array[N, byte]` under the hood.
blobsBundle: deneb.BlobsBundle( blobsBundle: deneb.BlobsBundle(
commitments: KzgCommitments.init( commitments: KzgCommitments.init(
payload.blobsBundle.commitments.mapIt(it.bytes)), payload.blobsBundle.commitments.mapIt(
kzg_abi.KzgCommitment(bytes: it.bytes))),
proofs: KzgProofs.init( proofs: KzgProofs.init(
payload.blobsBundle.proofs.mapIt(it.bytes)), payload.blobsBundle.proofs.mapIt(
kzg_abi.KzgProof(bytes: it.bytes))),
blobs: Blobs.init( blobs: Blobs.init(
payload.blobsBundle.blobs.mapIt(it.bytes)))) payload.blobsBundle.blobs.mapIt(it.bytes))))
@ -502,21 +504,28 @@ func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV4):
template getTransaction(tt: TypedTransaction): bellatrix.Transaction = template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
bellatrix.Transaction.init(tt.distinctBase) bellatrix.Transaction.init(tt.distinctBase)
template getDepositReceipt(dr: DepositReceiptV1): DepositReceipt = template getDepositRequest(
DepositReceipt( dr: DepositRequestV1): electra.DepositRequest =
electra.DepositRequest(
pubkey: ValidatorPubKey(blob: dr.pubkey.distinctBase), pubkey: ValidatorPubKey(blob: dr.pubkey.distinctBase),
withdrawal_credentials: dr.withdrawalCredentials.asEth2Digest, withdrawal_credentials: dr.withdrawalCredentials.asEth2Digest,
amount: dr.amount.Gwei, amount: dr.amount.Gwei,
signature: ValidatorSig(blob: dr.signature.distinctBase), signature: ValidatorSig(blob: dr.signature.distinctBase),
index: dr.index.uint64) index: dr.index.uint64)
template getExecutionLayerWithdrawalRequest(elwr: WithdrawalRequestV1): template getWithdrawalRequest(
ExecutionLayerWithdrawalRequest = wr: WithdrawalRequestV1): electra.WithdrawalRequest =
ExecutionLayerWithdrawalRequest( electra.WithdrawalRequest(
source_address: ExecutionAddress(data: elwr.sourceAddress.distinctBase), source_address: ExecutionAddress(data: wr.sourceAddress.distinctBase),
validator_pubkey: ValidatorPubKey( validator_pubkey: ValidatorPubKey(blob: wr.validatorPubkey.distinctBase),
blob: elwr.validatorPublicKey.distinctBase), amount: wr.amount.Gwei)
amount: elwr.amount.Gwei)
template getConsolidationRequest(
cr: ConsolidationRequestV1): electra.ConsolidationRequest =
electra.ConsolidationRequest(
source_address: ExecutionAddress(data: cr.sourceAddress.distinctBase),
source_pubkey: ValidatorPubKey(blob: cr.sourcePubkey.distinctBase),
target_pubkey: ValidatorPubKey(blob: cr.targetPubkey.distinctBase))
electra.ExecutionPayload( electra.ExecutionPayload(
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest, parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
@ -540,14 +549,17 @@ func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV4):
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)), mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64, blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64, excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64,
deposit_receipts: deposit_requests:
List[electra.DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD].init( List[electra.DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.depositRequests, it.getDepositReceipt)), mapIt(rpcExecutionPayload.depositRequests, it.getDepositRequest)),
withdrawal_requests: withdrawal_requests: List[electra.WithdrawalRequest,
List[electra.ExecutionLayerWithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD].init(
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD].init( mapIt(rpcExecutionPayload.withdrawalRequests,
mapIt(rpcExecutionPayload.withdrawalRequests, it.getWithdrawalRequest)),
it.getExecutionLayerWithdrawalRequest))) consolidation_requests: List[electra.ConsolidationRequest,
Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.consolidationRequests,
it.getConsolidationRequest)))
func asConsensusType*(payload: engine_api.GetPayloadV4Response): func asConsensusType*(payload: engine_api.GetPayloadV4Response):
electra.ExecutionPayloadForSigning = electra.ExecutionPayloadForSigning =
@ -560,9 +572,11 @@ func asConsensusType*(payload: engine_api.GetPayloadV4Response):
# Both are defined as `array[N, byte]` under the hood. # Both are defined as `array[N, byte]` under the hood.
blobsBundle: deneb.BlobsBundle( blobsBundle: deneb.BlobsBundle(
commitments: KzgCommitments.init( commitments: KzgCommitments.init(
payload.blobsBundle.commitments.mapIt(it.bytes)), payload.blobsBundle.commitments.mapIt(
kzg_abi.KzgCommitment(bytes: it.bytes))),
proofs: KzgProofs.init( proofs: KzgProofs.init(
payload.blobsBundle.proofs.mapIt(it.bytes)), payload.blobsBundle.proofs.mapIt(
kzg_abi.KzgProof(bytes: it.bytes))),
blobs: Blobs.init( blobs: Blobs.init(
payload.blobsBundle.blobs.mapIt(it.bytes)))) payload.blobsBundle.blobs.mapIt(it.bytes))))
@ -647,20 +661,28 @@ func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction = template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
TypedTransaction(tt.distinctBase) TypedTransaction(tt.distinctBase)
template getDepositReceipt(dr: DepositReceipt): DepositReceiptV1 = template getDepositRequest(
DepositReceiptV1( dr: electra.DepositRequest): DepositRequestV1 =
DepositRequestV1(
pubkey: FixedBytes[RawPubKeySize](dr.pubkey.blob), pubkey: FixedBytes[RawPubKeySize](dr.pubkey.blob),
withdrawalCredentials: FixedBytes[32](dr.withdrawal_credentials.data), withdrawalCredentials: FixedBytes[32](dr.withdrawal_credentials.data),
amount: dr.amount.Quantity, amount: dr.amount.Quantity,
signature: FixedBytes[RawSigSize](dr.signature.blob), signature: FixedBytes[RawSigSize](dr.signature.blob),
index: dr.index.Quantity) index: dr.index.Quantity)
template getExecutionLayerWithdrawalRequest( template getWithdrawalRequest(
elwr: ExecutionLayerWithdrawalRequest): WithdrawalRequestV1 = wr: electra.WithdrawalRequest): WithdrawalRequestV1 =
WithdrawalRequestV1( WithdrawalRequestV1(
sourceAddress: Address(elwr.source_address.data), sourceAddress: Address(wr.source_address.data),
validatorPublicKey: FixedBytes[RawPubKeySize](elwr.validator_pubkey.blob), validatorPubkey: FixedBytes[RawPubKeySize](wr.validator_pubkey.blob),
amount: elwr.amount.Quantity) amount: wr.amount.Quantity)
template getConsolidationRequest(
cr: electra.ConsolidationRequest): ConsolidationRequestV1 =
ConsolidationRequestV1(
sourceAddress: Address(cr.source_address.data),
sourcePubkey: FixedBytes[RawPubKeySize](cr.source_pubkey.blob),
targetPubkey: FixedBytes[RawPubKeySize](cr.target_pubkey.blob))
engine_api.ExecutionPayloadV4( engine_api.ExecutionPayloadV4(
parentHash: executionPayload.parent_hash.asBlockHash, parentHash: executionPayload.parent_hash.asBlockHash,
@ -682,10 +704,11 @@ func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
blobGasUsed: Quantity(executionPayload.blob_gas_used), blobGasUsed: Quantity(executionPayload.blob_gas_used),
excessBlobGas: Quantity(executionPayload.excess_blob_gas), excessBlobGas: Quantity(executionPayload.excess_blob_gas),
depositRequests: mapIt( depositRequests: mapIt(
executionPayload.deposit_receipts, it.getDepositReceipt), executionPayload.deposit_requests, it.getDepositRequest),
withdrawalRequests: withdrawalRequests: mapIt(
mapIt(executionPayload.withdrawal_requests, executionPayload.withdrawal_requests, it.getWithdrawalRequest),
it.getExecutionLayerWithdrawalRequest)) consolidationRequests: mapIt(
executionPayload.consolidation_requests, it.getConsolidationRequest))
func isConnected(connection: ELConnection): bool = func isConnected(connection: ELConnection): bool =
connection.web3.isSome connection.web3.isSome
@ -1531,7 +1554,6 @@ proc exchangeConfigWithSingleEL(
# https://chainid.network/ # https://chainid.network/
expectedChain = case m.eth1Network.get expectedChain = case m.eth1Network.get
of mainnet: 1.Quantity of mainnet: 1.Quantity
of goerli: 5.Quantity
of sepolia: 11155111.Quantity of sepolia: 11155111.Quantity
of holesky: 17000.Quantity of holesky: 17000.Quantity
if expectedChain != providerChain: if expectedChain != providerChain:

View File

@ -7,7 +7,7 @@
{.push raises: [].} {.push raises: [].}
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/tests/core/pyspec/eth2spec/utils/merkle_minimal.py # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
# Merkle tree helpers # Merkle tree helpers
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -113,7 +113,7 @@ proc update_justified(
self.update_justified(dag, blck, justified.epoch) self.update_justified(dag, blck, justified.epoch)
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/fork-choice.md#update_checkpoints # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#update_checkpoints
proc update_checkpoints( proc update_checkpoints(
self: var Checkpoints, dag: ChainDAGRef, self: var Checkpoints, dag: ChainDAGRef,
checkpoints: FinalityCheckpoints): FcResult[void] = checkpoints: FinalityCheckpoints): FcResult[void] =
@ -377,7 +377,7 @@ proc get_head*(self: var ForkChoice,
self.checkpoints.justified.balances, self.checkpoints.justified.balances,
self.checkpoints.proposer_boost_root) self.checkpoints.proposer_boost_root)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/fork_choice/safe-block.md#get_safe_beacon_block_root # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/fork_choice/safe-block.md#get_safe_beacon_block_root
func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest = func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest =
# Use most recent justified block as a stopgap # Use most recent justified block as a stopgap
self.checkpoints.justified.checkpoint.root self.checkpoints.justified.checkpoint.root
@ -502,8 +502,8 @@ when isMainModule:
for i in 0 ..< validator_count: for i in 0 ..< validator_count:
indices.add fakeHash(i), i indices.add fakeHash(i), i
votes.add default(VoteTracker) votes.add default(VoteTracker)
old_balances.add 0 old_balances.add 0.Gwei
new_balances.add 0 new_balances.add 0.Gwei
let err = deltas.compute_deltas( let err = deltas.compute_deltas(
indices, indices_offset = 0, votes, old_balances, new_balances indices, indices_offset = 0, votes, old_balances, new_balances

View File

@ -186,7 +186,7 @@ proc storeBackfillBlock(
let blobs = blobsOpt.get() let blobs = blobsOpt.get()
let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq
if blobs.len > 0 or kzgCommits.len > 0: if blobs.len > 0 or kzgCommits.len > 0:
let r = validate_blobs(kzgCommits, blobs.mapIt(it.blob), let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)),
blobs.mapIt(it.kzg_proof)) blobs.mapIt(it.kzg_proof))
if r.isErr(): if r.isErr():
debug "backfill blob validation failed", debug "backfill blob validation failed",
@ -545,13 +545,17 @@ proc storeBlock(
# TODO run https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#blob-kzg-commitments # TODO run https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#blob-kzg-commitments
# https://github.com/ethereum/execution-apis/blob/main/src/engine/experimental/blob-extension.md#specification # https://github.com/ethereum/execution-apis/blob/main/src/engine/experimental/blob-extension.md#specification
# "This validation MUST be instantly run in all cases even during active sync process." # "This validation MUST be instantly run in all cases even during active
# sync process."
# #
# Client software MUST validate `blockHash` value as being equivalent to # Client software MUST validate `blockHash` value as being equivalent to
# `Keccak256(RLP(ExecutionBlockHeader))` # `Keccak256(RLP(ExecutionBlockHeader))`
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#specification # https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#specification
when typeof(signedBlock).kind >= ConsensusFork.Bellatrix and typeof(signedBlock).kind <= ConsensusFork.Deneb: #
debugComment "electra can do this in principle" # This should simulate an unsynced EL, which still must perform these
# checks. This means it must be able to do so without context, beyond
# whatever data the block itself contains.
when typeof(signedBlock).kind >= ConsensusFork.Bellatrix:
template payload(): auto = signedBlock.message.body.execution_payload template payload(): auto = signedBlock.message.body.execution_payload
if signedBlock.message.is_execution_block and if signedBlock.message.is_execution_block and
payload.block_hash != payload.block_hash !=
@ -562,8 +566,6 @@ proc storeBlock(
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
self.consensusManager.quarantine[].addUnviable(signedBlock.root) self.consensusManager.quarantine[].addUnviable(signedBlock.root)
return err((VerifierError.Invalid, ProcessingStatus.completed)) return err((VerifierError.Invalid, ProcessingStatus.completed))
else:
discard
let newPayloadTick = Moment.now() let newPayloadTick = Moment.now()
@ -575,7 +577,7 @@ proc storeBlock(
let blobs = blobsOpt.get() let blobs = blobsOpt.get()
let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq let kzgCommits = signedBlock.message.body.blob_kzg_commitments.asSeq
if blobs.len > 0 or kzgCommits.len > 0: if blobs.len > 0 or kzgCommits.len > 0:
let r = validate_blobs(kzgCommits, blobs.mapIt(it.blob), let r = validate_blobs(kzgCommits, blobs.mapIt(KzgBlob(bytes: it.blob)),
blobs.mapIt(it.kzg_proof)) blobs.mapIt(it.kzg_proof))
if r.isErr(): if r.isErr():
debug "blob validation failed", debug "blob validation failed",
@ -838,7 +840,7 @@ proc processBlock(
# - MUST NOT optimistically import the block. # - MUST NOT optimistically import the block.
# - MUST NOT apply the block to the fork choice store. # - MUST NOT apply the block to the fork choice store.
# - MAY queue the block for later processing. # - MAY queue the block for later processing.
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/sync/optimistic.md#execution-engine-errors # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#execution-engine-errors
await sleepAsync(chronos.seconds(1)) await sleepAsync(chronos.seconds(1))
self[].enqueueBlock( self[].enqueueBlock(
entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized, entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized,

View File

@ -302,7 +302,7 @@ template validateBeaconBlockBellatrix(
# #
# `is_merge_transition_complete(state)` tests for # `is_merge_transition_complete(state)` tests for
# `state.latest_execution_payload_header != ExecutionPayloadHeader()`, while # `state.latest_execution_payload_header != ExecutionPayloadHeader()`, while
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing
# shows that `state.latest_execution_payload_header` being default or not is # shows that `state.latest_execution_payload_header` being default or not is
# exactly equivalent to whether that block's execution payload is default or # exactly equivalent to whether that block's execution payload is default or
# not, so test cached block information rather than reconstructing a state. # not, so test cached block information rather than reconstructing a state.
@ -458,7 +458,7 @@ proc validateBlobSidecar*(
# blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof)`. # blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof)`.
block: block:
let ok = verifyProof( let ok = verifyProof(
blob_sidecar.blob, KzgBlob(bytes: blob_sidecar.blob),
blob_sidecar.kzg_commitment, blob_sidecar.kzg_commitment,
blob_sidecar.kzg_proof).valueOr: blob_sidecar.kzg_proof).valueOr:
return dag.checkedReject("BlobSidecar: blob verify failed") return dag.checkedReject("BlobSidecar: blob verify failed")
@ -1132,7 +1132,6 @@ proc validateAggregate*(
Future[Result[ Future[Result[
tuple[attestingIndices: seq[ValidatorIndex], sig: CookedSig], tuple[attestingIndices: seq[ValidatorIndex], sig: CookedSig],
ValidationError]] {.async: (raises: [CancelledError]).} = ValidationError]] {.async: (raises: [CancelledError]).} =
debugComment "is not"
template aggregate_and_proof: untyped = signedAggregateAndProof.message template aggregate_and_proof: untyped = signedAggregateAndProof.message
template aggregate: untyped = aggregate_and_proof.aggregate template aggregate: untyped = aggregate_and_proof.aggregate
@ -1181,7 +1180,7 @@ proc validateAggregate*(
ok((attesting_indices, sig)) ok((attesting_indices, sig))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/p2p-interface.md#bls_to_execution_change # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#bls_to_execution_change
proc validateBlsToExecutionChange*( proc validateBlsToExecutionChange*(
pool: ValidatorChangePool, batchCrypto: ref BatchCrypto, pool: ValidatorChangePool, batchCrypto: ref BatchCrypto,
signed_address_change: SignedBLSToExecutionChange, signed_address_change: SignedBLSToExecutionChange,

View File

@ -209,47 +209,62 @@ proc tryForceUpdate(
finalizedSlot = forkyStore.finalized_header.beacon.slot, finalizedSlot = forkyStore.finalized_header.beacon.slot,
optimisticSlot = forkyStore.optimistic_header.beacon.slot optimisticSlot = forkyStore.optimistic_header.beacon.slot
proc doProcessObject(
self: var LightClientProcessor,
bootstrap: ForkedLightClientBootstrap,
wallTime: BeaconTime): Result[void, VerifierError] =
if bootstrap.kind == LightClientDataFork.None:
err(VerifierError.Invalid)
elif self.store[].kind > LightClientDataFork.None:
err(VerifierError.Duplicate)
else:
let trustedBlockRoot = self.getTrustedBlockRoot()
if trustedBlockRoot.isNone:
err(VerifierError.MissingParent)
else:
withForkyBootstrap(bootstrap):
when lcDataFork > LightClientDataFork.None:
let initRes = initialize_light_client_store(
trustedBlockRoot.get, forkyBootstrap, self.cfg)
if initRes.isErr:
err(initRes.error)
else:
self.store[] = ForkedLightClientStore.init(initRes.get)
ok()
else:
raiseAssert "Unreachable; bootstrap.kind was checked"
proc doProcessObject(
self: var LightClientProcessor,
update: SomeForkedLightClientUpdate,
wallTime: BeaconTime): Result[void, VerifierError] =
if update.kind == LightClientDataFork.None:
err(VerifierError.Invalid)
elif self.store[].kind == LightClientDataFork.None:
err(VerifierError.MissingParent)
else:
withForkyObject(update):
when lcDataFork > LightClientDataFork.None:
if lcDataFork > self.store[].kind:
info "Upgrading light client",
oldFork = self.store[].kind, newFork = lcDataFork
self.store[].migrateToDataFork(lcDataFork)
withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
let
wallSlot = wallTime.slotOrZero()
upgradedUpdate = update.migratingToDataFork(lcDataFork)
process_light_client_update(
forkyStore, upgradedUpdate.forky(lcDataFork), wallSlot,
self.cfg, self.genesis_validators_root)
else:
raiseAssert "Unreachable; self.store[].kind was checked"
proc processObject( proc processObject(
self: var LightClientProcessor, self: var LightClientProcessor,
obj: SomeForkedLightClientObject, obj: SomeForkedLightClientObject,
wallTime: BeaconTime): Result[void, VerifierError] = wallTime: BeaconTime): Result[void, VerifierError] =
let let res = self.doProcessObject(obj, wallTime)
res = withForkyObject(obj):
when lcDataFork > LightClientDataFork.None:
when forkyObject is ForkyLightClientBootstrap:
if self.store[].kind > LightClientDataFork.None:
err(VerifierError.Duplicate)
else:
let trustedBlockRoot = self.getTrustedBlockRoot()
if trustedBlockRoot.isNone:
err(VerifierError.MissingParent)
else:
let initRes = initialize_light_client_store(
trustedBlockRoot.get, forkyObject, self.cfg)
if initRes.isErr:
err(initRes.error)
else:
self.store[] = ForkedLightClientStore.init(initRes.get)
ok()
elif forkyObject is SomeForkyLightClientUpdate:
if self.store[].kind == LightClientDataFork.None:
err(VerifierError.MissingParent)
else:
if lcDataFork > self.store[].kind:
info "Upgrading light client",
oldFork = self.store[].kind, newFork = lcDataFork
self.store[].migrateToDataFork(lcDataFork)
withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
let
wallSlot = wallTime.slotOrZero()
upgradedObject = obj.migratingToDataFork(lcDataFork)
process_light_client_update(
forkyStore, upgradedObject.forky(lcDataFork), wallSlot,
self.cfg, self.genesis_validators_root)
else: raiseAssert "Unreachable"
else:
err(VerifierError.Invalid)
withForkyObject(obj): withForkyObject(obj):
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:

View File

@ -63,7 +63,7 @@ typedef struct ETHRandomNumber ETHRandomNumber;
* @return `NULL` - If an error occurred. * @return `NULL` - If an error occurred.
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHRandomNumber *ETHRandomNumberCreate(void); ETHRandomNumber *_Nullable ETHRandomNumberCreate(void);
/** /**
* Destroys a cryptographically secure random number generator. * Destroys a cryptographically secure random number generator.
@ -94,10 +94,10 @@ typedef struct ETHConsensusConfig ETHConsensusConfig;
* based on the given `config.yaml` file content - If successful. * based on the given `config.yaml` file content - If successful.
* @return `NULL` - If the given `config.yaml` is malformed or incompatible. * @return `NULL` - If the given `config.yaml` is malformed or incompatible.
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/configs/README.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHConsensusConfig *ETHConsensusConfigCreateFromYaml(const char *configFileContent); ETHConsensusConfig *_Nullable ETHConsensusConfigCreateFromYaml(const char *configFileContent);
/** /**
* Destroys an Ethereum Consensus Layer network configuration. * Destroys an Ethereum Consensus Layer network configuration.
@ -151,12 +151,12 @@ typedef struct ETHBeaconState ETHBeaconState;
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/configs/README.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHBeaconState *ETHBeaconStateCreateFromSsz( ETHBeaconState *_Nullable ETHBeaconStateCreateFromSsz(
const ETHConsensusConfig *cfg, const ETHConsensusConfig *cfg,
const char *consensusVersion, const char *consensusVersion,
const void *sszBytes, const void *sszBytes,
@ -251,7 +251,7 @@ typedef struct ETHBeaconClock ETHBeaconClock;
* NULL if the state contained an invalid time. * NULL if the state contained an invalid time.
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHBeaconClock *ETHBeaconClockCreateFromState( ETHBeaconClock *_Nullable ETHBeaconClockCreateFromState(
const ETHConsensusConfig *cfg, const ETHBeaconState *state); const ETHConsensusConfig *cfg, const ETHBeaconState *state);
/** /**
@ -325,11 +325,11 @@ typedef struct ETHLightClientStore ETHLightClientStore;
* *
* @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap * @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
* @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream * @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#weak-subjectivity-period * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHLightClientStore *ETHLightClientStoreCreateFromBootstrap( ETHLightClientStore *_Nullable ETHLightClientStoreCreateFromBootstrap(
const ETHConsensusConfig *cfg, const ETHConsensusConfig *cfg,
const ETHRoot *trustedBlockRoot, const ETHRoot *trustedBlockRoot,
const char *mediaType, const char *mediaType,
@ -579,7 +579,7 @@ typedef struct ETHLightClientHeader ETHLightClientHeader;
* *
* @return Latest finalized header. * @return Latest finalized header.
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader( const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader(
@ -597,8 +597,8 @@ const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader(
* *
* @return Whether or not the next sync committee is currently known. * @return Whether or not the next sync committee is currently known.
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *store); bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *store);
@ -1040,7 +1040,7 @@ typedef struct ETHExecutionBlockHeader ETHExecutionBlockHeader;
* @see https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblockbyhash * @see https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblockbyhash
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHExecutionBlockHeader *ETHExecutionBlockHeaderCreateFromJson( ETHExecutionBlockHeader *_Nullable ETHExecutionBlockHeaderCreateFromJson(
const ETHRoot *executionHash, const ETHRoot *executionHash,
const char *blockHeaderJson); const char *blockHeaderJson);
@ -1129,7 +1129,7 @@ typedef struct ETHTransactions ETHTransactions;
* @see https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblockbyhash * @see https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_getblockbyhash
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHTransactions *ETHTransactionsCreateFromJson( ETHTransactions *_Nullable ETHTransactionsCreateFromJson(
const ETHRoot *_Nullable transactionsRoot, const ETHRoot *_Nullable transactionsRoot,
const char *transactionsJson); const char *transactionsJson);
@ -1588,7 +1588,7 @@ typedef struct ETHReceipts ETHReceipts;
* @see https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionreceipt * @see https://ethereum.org/en/developers/docs/apis/json-rpc/#eth_gettransactionreceipt
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHReceipts *ETHReceiptsCreateFromJson( ETHReceipts *_Nullable ETHReceiptsCreateFromJson(
const ETHRoot *_Nullable receiptsRoot, const ETHRoot *_Nullable receiptsRoot,
const char *receiptsJson, const char *receiptsJson,
const ETHTransactions *transactions); const ETHTransactions *transactions);

View File

@ -9,7 +9,6 @@
import import
std/[json, sequtils, times], std/[json, sequtils, times],
stew/saturation_arith,
eth/common/[eth_types_rlp, transaction], eth/common/[eth_types_rlp, transaction],
eth/keys, eth/keys,
eth/p2p/discoveryv5/random2, eth/p2p/discoveryv5/random2,
@ -79,7 +78,7 @@ proc ETHConsensusConfigCreateFromYaml(
## * `NULL` - If the given `config.yaml` is malformed or incompatible. ## * `NULL` - If the given `config.yaml` is malformed or incompatible.
## ##
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/configs/README.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
let cfg = RuntimeConfig.new() let cfg = RuntimeConfig.new()
try: try:
cfg[] = readRuntimeConfig($configFileContent, "config.yaml")[0] cfg[] = readRuntimeConfig($configFileContent, "config.yaml")[0]
@ -145,9 +144,9 @@ proc ETHBeaconStateCreateFromSsz(
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/configs/README.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
let let
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr: consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
return nil return nil
@ -330,8 +329,8 @@ proc ETHLightClientStoreCreateFromBootstrap(
## See: ## See:
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap ## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream ## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#weak-subjectivity-period ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
let let
mediaType = MediaType.init($mediaType) mediaType = MediaType.init($mediaType)
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr: consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
@ -756,8 +755,8 @@ func ETHLightClientStoreIsNextSyncCommitteeKnown(
## * Whether or not the next sync committee is currently known. ## * Whether or not the next sync committee is currently known.
## ##
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
store[].is_next_sync_committee_known store[].is_next_sync_committee_known
func ETHLightClientStoreGetOptimisticHeader( func ETHLightClientStoreGetOptimisticHeader(
@ -797,7 +796,7 @@ func ETHLightClientStoreGetSafetyThreshold(
## * Light client store safety threshold. ## * Light client store safety threshold.
## ##
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#get_safety_threshold ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#get_safety_threshold
store[].get_safety_threshold.cint store[].get_safety_threshold.cint
proc ETHLightClientHeaderCreateCopy( proc ETHLightClientHeaderCreateCopy(
@ -1243,10 +1242,8 @@ proc ETHExecutionBlockHeaderCreateFromJson(
# Construct block header # Construct block header
static: # `GasInt` is signed. We only use it for hashing. static: # `GasInt` is signed. We only use it for hashing.
doAssert sizeof(int64) == sizeof(data.gasLimit) doAssert sizeof(uint64) == sizeof(data.gasLimit)
doAssert sizeof(int64) == sizeof(data.gasUsed) doAssert sizeof(uint64) == sizeof(data.gasUsed)
if distinctBase(data.timestamp) > int64.high.uint64:
return nil
if data.nonce.isNone: if data.nonce.isNone:
return nil return nil
let blockHeader = ExecutionBlockHeader( let blockHeader = ExecutionBlockHeader(
@ -1259,8 +1256,8 @@ proc ETHExecutionBlockHeaderCreateFromJson(
logsBloom: distinctBase(data.logsBloom), logsBloom: distinctBase(data.logsBloom),
difficulty: data.difficulty, difficulty: data.difficulty,
number: distinctBase(data.number), number: distinctBase(data.number),
gasLimit: GasInt.saturate distinctBase(data.gasLimit), gasLimit: distinctBase(data.gasLimit),
gasUsed: GasInt.saturate distinctBase(data.gasUsed), gasUsed: distinctBase(data.gasUsed),
timestamp: EthTime(distinctBase(data.timestamp)), timestamp: EthTime(distinctBase(data.timestamp)),
extraData: distinctBase(data.extraData), extraData: distinctBase(data.extraData),
mixHash: data.mixHash.asEth2Digest, mixHash: data.mixHash.asEth2Digest,
@ -1323,7 +1320,7 @@ proc ETHExecutionBlockHeaderCreateFromJson(
var tr = initHexaryTrie(newMemoryDB()) var tr = initHexaryTrie(newMemoryDB())
for i, wd in wds: for i, wd in wds:
try: try:
tr.put(rlp.encode(i), wd.bytes) tr.put(rlp.encode(i.uint), wd.bytes)
except RlpError: except RlpError:
raiseAssert "Unreachable" raiseAssert "Unreachable"
if tr.rootHash() != data.withdrawalsRoot.get.asEth2Digest: if tr.rootHash() != data.withdrawalsRoot.get.asEth2Digest:
@ -1505,25 +1502,15 @@ proc ETHTransactionsCreateFromJson(
# Construct transaction # Construct transaction
static: static:
doAssert sizeof(uint64) == sizeof(ChainId) doAssert sizeof(uint64) == sizeof(ChainId)
doAssert sizeof(int64) == sizeof(data.gasPrice) doAssert sizeof(uint64) == sizeof(data.gas)
doAssert sizeof(int64) == sizeof(data.maxPriorityFeePerGas.get) doAssert sizeof(uint64) == sizeof(data.gasPrice)
doAssert sizeof(uint64) == sizeof(data.maxPriorityFeePerGas.get)
doAssert sizeof(UInt256) == sizeof(data.maxFeePerBlobGas.get) doAssert sizeof(UInt256) == sizeof(data.maxFeePerBlobGas.get)
if distinctBase(data.chainId.get(0.Quantity)) > distinctBase(ChainId.high): if distinctBase(data.chainId.get(0.Quantity)) > distinctBase(ChainId.high):
return nil return nil
if distinctBase(data.gasPrice) > int64.high.uint64:
return nil
if distinctBase(data.maxFeePerGas.get(0.Quantity)) > int64.high.uint64:
return nil
if distinctBase(data.maxPriorityFeePerGas.get(0.Quantity)) >
int64.high.uint64:
return nil
if data.maxFeePerBlobGas.get(0.u256) > if data.maxFeePerBlobGas.get(0.u256) >
uint64.high.u256: uint64.high.u256:
return nil return nil
if distinctBase(data.gas) > int64.high.uint64:
return nil
if distinctBase(data.v) > int64.high.uint64:
return nil
if data.yParity.isSome: if data.yParity.isSome:
# This is not always included, but if it is, make sure it's correct # This is not always included, but if it is, make sure it's correct
let yParity = data.yParity.get let yParity = data.yParity.get
@ -1563,7 +1550,7 @@ proc ETHTransactionsCreateFromJson(
ExecutionHash256(data: distinctBase(it))) ExecutionHash256(data: distinctBase(it)))
else: else:
@[], @[],
V: data.v.uint64, V: distinctBase(data.v),
R: data.r, R: data.r,
S: data.s) S: data.s)
rlpBytes = rlpBytes =
@ -1749,7 +1736,7 @@ proc ETHTransactionsCreateFromJson(
var tr = initHexaryTrie(newMemoryDB()) var tr = initHexaryTrie(newMemoryDB())
for i, transaction in txs: for i, transaction in txs:
try: try:
tr.put(rlp.encode(i), distinctBase(transaction.bytes)) tr.put(rlp.encode(i.uint), distinctBase(transaction.bytes))
except RlpError: except RlpError:
raiseAssert "Unreachable" raiseAssert "Unreachable"
if tr.rootHash() != transactionsRoot[]: if tr.rootHash() != transactionsRoot[]:
@ -2429,7 +2416,7 @@ proc ETHReceiptsCreateFromJson(
var tr = initHexaryTrie(newMemoryDB()) var tr = initHexaryTrie(newMemoryDB())
for i, rec in recs: for i, rec in recs:
try: try:
tr.put(rlp.encode(i), rec.bytes) tr.put(rlp.encode(i.uint), rec.bytes)
except RlpError: except RlpError:
raiseAssert "Unreachable" raiseAssert "Unreachable"
if tr.rootHash() != receiptsRoot[]: if tr.rootHash() != receiptsRoot[]:

View File

@ -0,0 +1,142 @@
# beacon_chain
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import stew/base10
import std/tables
import libp2p/[multiaddress, multicodec, peerstore]
type
Eth2Agent* {.pure.} = enum
Unknown,
Nimbus,
Lighthouse,
Prysm,
Teku,
Lodestar,
Grandine
func `$`*(a: Eth2Agent): string =
case a
of Eth2Agent.Unknown:
"pending/unknown"
of Eth2Agent.Nimbus:
"nimbus"
of Eth2Agent.Lighthouse:
"lighthouse"
of Eth2Agent.Prysm:
"prysm"
of Eth2Agent.Teku:
"teku"
of Eth2Agent.Lodestar:
"lodestar"
of Eth2Agent.Grandine:
"grandine"
const
# Lighthouse errors could be found here
# https://github.com/sigp/lighthouse/blob/5fdd3b39bb8150d1ea8622e42e0166ed46af7693/beacon_node/lighthouse_network/src/rpc/methods.rs#L171
LighthouseErrors = [
(128'u64, "Unable to verify network"),
(129'u64, "The node has too many connected peers"),
(250'u64, "Peer score is too low"),
(251'u64, "The peer is banned"),
(252'u64, "The IP address the peer is using is banned"),
].toTable()
# Prysm errors could be found here
# https://github.com/prysmaticlabs/prysm/blob/7a394062e1054d73014e793819cb9cf0d20ff2e3/beacon-chain/p2p/types/rpc_goodbye_codes.go#L12
PrysmErrors = [
(128'u64, "Unable to verify network"),
(129'u64, "The node has too many connected peers"),
(250'u64, "Peer score is too low"),
(251'u64, "The peer is banned")
].toTable()
# Lodestar errors could be found here
# https://github.com/ChainSafe/lodestar/blob/7280234bea66b49da3900b916a1b54c4666e4173/packages/beacon-node/src/constants/network.ts#L20
LodestarErrors = [
(128'u64, "Unable to verify network"),
(129'u64, "The node has too many connected peers"),
(250'u64, "Peer score is too low"),
(251'u64, "The peer is banned")
].toTable()
# Teku errors could be found here
# https://github.com/Consensys/teku/blob/a3f7ebc75f24ec942286b0c1ae192e411f84aa7e/ethereum/spec/src/main/java/tech/pegasys/teku/spec/datastructures/networking/libp2p/rpc/GoodbyeMessage.java#L42
TekuErrors = [
(128'u64, "Unable to verify network"),
(129'u64, "The node has too many connected peers"),
(130'u64, "Too many requests from the peer")
].toTable()
# Nimbus errors could be found here
# https://github.com/status-im/nimbus-eth2/blob/9b6b42c8f9792e657397bb3669a80b57da470c04/beacon_chain/networking/eth2_network.nim#L176
NimbusErrors = [
(237'u64, "Peer score is too low")
].toTable()
# Grandine errors could be found here
# https://github.com/grandinetech/eth2_libp2p/blob/63a0c5e662847b86b1d5617478e39bccd39df0a9/src/rpc/methods.rs#L246
GrandineErrors = [
(128'u64, "Unable to verify network"),
(129'u64, "The node has too many connected peers"),
(250'u64, "Peer score is too low"),
(251'u64, "The peer is banned"),
(252'u64, "The IP address the peer is using is banned"),
].toTable()
# This is combination of all the errors, we need it when remote agent is not
# identified yet.
UnknownErrors = [
(128'u64, "Unable to verify network"),
(129'u64, "The node has too many connected peers"),
(130'u64, "Too many requests from the peer"),
(237'u64, "Peer score is too low"),
(250'u64, "Peer score is too low"),
(251'u64, "The peer is banned"),
(252'u64, "The IP address the peer is using is banned"),
].toTable()
func disconnectReasonName*(agent: Eth2Agent, code: uint64): string =
if code < 128'u64:
case code
of 0'u64:
"Unknown error (0)"
of 1'u64:
"Client shutdown (1)"
of 2'u64:
"Irrelevant network (2)"
of 3'u64:
"Fault or error (3)"
else:
let
scode = " (" & Base10.toString(code) & ")"
defaultMessage = "Disconnected"
defaultMessage & scode
else:
let
scode = " (" & Base10.toString(code) & ")"
defaultMessage = "Disconnected"
case agent
of Eth2Agent.Unknown:
UnknownErrors.getOrDefault(code, defaultMessage) & scode
of Eth2Agent.Nimbus:
NimbusErrors.getOrDefault(code, defaultMessage) & scode
of Eth2Agent.Lighthouse:
LighthouseErrors.getOrDefault(code, defaultMessage) & scode
of Eth2Agent.Prysm:
PrysmErrors.getOrDefault(code, defaultMessage) & scode
of Eth2Agent.Teku:
TekuErrors.getOrDefault(code, defaultMessage) & scode
of Eth2Agent.Lodestar:
LodestarErrors.getOrDefault(code, defaultMessage) & scode
of Eth2Agent.Grandine:
GrandineErrors.getOrDefault(code, defaultMessage) & scode

View File

@ -25,13 +25,13 @@ type
Eth2DiscoveryId* = NodeId Eth2DiscoveryId* = NodeId
func parseBootstrapAddress*(address: string): func parseBootstrapAddress*(address: string):
Result[enr.Record, cstring] = Result[enr.Record, string] =
let lowerCaseAddress = toLowerAscii(address) let lowerCaseAddress = toLowerAscii(address)
if lowerCaseAddress.startsWith("enr:"): if lowerCaseAddress.startsWith("enr:"):
var enrRec: enr.Record let res = enr.Record.fromURI(address)
if enrRec.fromURI(address): if res.isOk():
return ok enrRec return ok res.value
return err "Invalid ENR bootstrap record" return err "Invalid bootstrap ENR: " & $res.error
elif lowerCaseAddress.startsWith("enode:"): elif lowerCaseAddress.startsWith("enode:"):
return err "ENode bootstrap addresses are not supported" return err "ENode bootstrap addresses are not supported"
else: else:
@ -78,7 +78,7 @@ proc loadBootstrapFile*(bootstrapFile: string,
proc new*(T: type Eth2DiscoveryProtocol, proc new*(T: type Eth2DiscoveryProtocol,
config: BeaconNodeConf | LightClientConf, config: BeaconNodeConf | LightClientConf,
enrIp: Option[IpAddress], enrTcpPort, enrUdpPort: Option[Port], enrIp: Opt[IpAddress], enrTcpPort, enrUdpPort: Opt[Port],
pk: PrivateKey, pk: PrivateKey,
enrFields: openArray[(string, seq[byte])], rng: ref HmacDrbgContext): enrFields: openArray[(string, seq[byte])], rng: ref HmacDrbgContext):
T = T =

View File

@ -23,18 +23,20 @@ import
libp2p/protocols/pubsub/[ libp2p/protocols/pubsub/[
pubsub, gossipsub, rpc/message, rpc/messages, peertable, pubsubpeer], pubsub, gossipsub, rpc/message, rpc/messages, peertable, pubsubpeer],
libp2p/stream/connection, libp2p/stream/connection,
libp2p/services/wildcardresolverservice,
eth/[keys, async_utils], eth/[keys, async_utils],
eth/net/nat, eth/p2p/discoveryv5/[enr, node, random2], eth/net/nat, eth/p2p/discoveryv5/[enr, node, random2],
".."/[version, conf, beacon_clock, conf_light_client], ".."/[version, conf, beacon_clock, conf_light_client],
../spec/datatypes/[phase0, altair, bellatrix], ../spec/datatypes/[phase0, altair, bellatrix],
../spec/[eth2_ssz_serialization, network, helpers, forks], ../spec/[eth2_ssz_serialization, network, helpers, forks],
../validators/keystore_management, ../validators/keystore_management,
"."/[eth2_discovery, eth2_protocol_dsl, libp2p_json_serialization, peer_pool, peer_scores] "."/[eth2_discovery, eth2_protocol_dsl, eth2_agents,
libp2p_json_serialization, peer_pool, peer_scores]
export export
tables, chronos, ratelimit, version, multiaddress, peerinfo, p2pProtocol, tables, chronos, ratelimit, version, multiaddress, peerinfo, p2pProtocol,
connection, libp2p_json_serialization, eth2_ssz_serialization, results, connection, libp2p_json_serialization, eth2_ssz_serialization, results,
eth2_discovery, peer_pool, peer_scores eth2_discovery, peer_pool, peer_scores, eth2_agents
logScope: logScope:
topics = "networking" topics = "networking"
@ -81,6 +83,7 @@ type
rng*: ref HmacDrbgContext rng*: ref HmacDrbgContext
peers*: Table[PeerId, Peer] peers*: Table[PeerId, Peer]
directPeers*: DirectPeers directPeers*: DirectPeers
announcedAddresses*: seq[MultiAddress]
validTopics: HashSet[string] validTopics: HashSet[string]
peerPingerHeartbeatFut: Future[void].Raising([CancelledError]) peerPingerHeartbeatFut: Future[void].Raising([CancelledError])
peerTrimmerHeartbeatFut: Future[void].Raising([CancelledError]) peerTrimmerHeartbeatFut: Future[void].Raising([CancelledError])
@ -96,6 +99,7 @@ type
Peer* = ref object Peer* = ref object
network*: Eth2Node network*: Eth2Node
peerId*: PeerId peerId*: PeerId
remoteAgent*: Eth2Agent
discoveryId*: Eth2DiscoveryId discoveryId*: Eth2DiscoveryId
connectionState*: ConnectionState connectionState*: ConnectionState
protocolStates*: seq[RootRef] protocolStates*: seq[RootRef]
@ -336,6 +340,31 @@ func shortProtocolId(protocolId: string): string =
protocolId.high protocolId.high
protocolId[start..ends] protocolId[start..ends]
proc updateAgent*(peer: Peer) =
let
agent = toLowerAscii(peer.network.switch.peerStore[AgentBook][peer.peerId])
# proto = peer.network.switch.peerStore[ProtoVersionBook][peer.peerId]
if "nimbus" in agent:
peer.remoteAgent = Eth2Agent.Nimbus
elif "lighthouse" in agent:
peer.remoteAgent = Eth2Agent.Lighthouse
elif "teku" in agent:
peer.remoteAgent = Eth2Agent.Teku
elif "lodestar" in agent:
peer.remoteAgent = Eth2Agent.Lodestar
elif "prysm" in agent:
peer.remoteAgent = Eth2Agent.Prysm
elif "grandine" in agent:
peer.remoteAgent = Eth2Agent.Grandine
else:
peer.remoteAgent = Eth2Agent.Unknown
proc getRemoteAgent*(peer: Peer): Eth2Agent =
if peer.remoteAgent == Eth2Agent.Unknown:
peer.updateAgent()
peer.remoteAgent
proc openStream(node: Eth2Node, proc openStream(node: Eth2Node,
peer: Peer, peer: Peer,
protocolId: string): Future[NetRes[Connection]] protocolId: string): Future[NetRes[Connection]]
@ -1388,7 +1417,7 @@ proc connectWorker(node: Eth2Node, index: int) {.async: (raises: [CancelledError
node.connTable.excl(remotePeerAddr.peerId) node.connTable.excl(remotePeerAddr.peerId)
proc toPeerAddr(node: Node): Result[PeerAddr, cstring] = proc toPeerAddr(node: Node): Result[PeerAddr, cstring] =
let nodeRecord = ? node.record.toTypedRecord() let nodeRecord = TypedRecord.fromRecord(node.record)
let peerAddr = ? nodeRecord.toPeerAddr(tcpProtocol) let peerAddr = ? nodeRecord.toPeerAddr(tcpProtocol)
ok(peerAddr) ok(peerAddr)
@ -1765,9 +1794,9 @@ proc new(T: type Eth2Node,
enrForkId: ENRForkID, discoveryForkId: ENRForkID, enrForkId: ENRForkID, discoveryForkId: ENRForkID,
forkDigests: ref ForkDigests, getBeaconTime: GetBeaconTimeFn, forkDigests: ref ForkDigests, getBeaconTime: GetBeaconTimeFn,
switch: Switch, pubsub: GossipSub, switch: Switch, pubsub: GossipSub,
ip: Option[IpAddress], tcpPort, udpPort: Option[Port], ip: Opt[IpAddress], tcpPort, udpPort: Opt[Port],
privKey: keys.PrivateKey, discovery: bool, privKey: keys.PrivateKey, discovery: bool,
directPeers: DirectPeers, directPeers: DirectPeers, announcedAddresses: openArray[MultiAddress],
rng: ref HmacDrbgContext): T {.raises: [CatchableError].} = rng: ref HmacDrbgContext): T {.raises: [CatchableError].} =
when not defined(local_testnet): when not defined(local_testnet):
let let
@ -1811,6 +1840,7 @@ proc new(T: type Eth2Node,
connectTimeout: connectTimeout, connectTimeout: connectTimeout,
seenThreshold: seenThreshold, seenThreshold: seenThreshold,
directPeers: directPeers, directPeers: directPeers,
announcedAddresses: @announcedAddresses,
quota: TokenBucket.new(maxGlobalQuota, fullReplenishTime) quota: TokenBucket.new(maxGlobalQuota, fullReplenishTime)
) )
@ -1879,11 +1909,9 @@ proc start*(node: Eth2Node) {.async: (raises: [CancelledError]).} =
notice "Discovery disabled; trying bootstrap nodes", notice "Discovery disabled; trying bootstrap nodes",
nodes = node.discovery.bootstrapRecords.len nodes = node.discovery.bootstrapRecords.len
for enr in node.discovery.bootstrapRecords: for enr in node.discovery.bootstrapRecords:
let tr = enr.toTypedRecord() let pa = TypedRecord.fromRecord(enr).toPeerAddr(tcpProtocol)
if tr.isOk(): if pa.isOk():
let pa = tr.get().toPeerAddr(tcpProtocol) await node.connQueue.addLast(pa.get())
if pa.isOk():
await node.connQueue.addLast(pa.get())
node.peerPingerHeartbeatFut = node.peerPingerHeartbeat() node.peerPingerHeartbeatFut = node.peerPingerHeartbeat()
node.peerTrimmerHeartbeatFut = node.peerTrimmerHeartbeat() node.peerTrimmerHeartbeatFut = node.peerTrimmerHeartbeat()
@ -2223,6 +2251,8 @@ func gossipId(
proc newBeaconSwitch(config: BeaconNodeConf | LightClientConf, proc newBeaconSwitch(config: BeaconNodeConf | LightClientConf,
seckey: PrivateKey, address: MultiAddress, seckey: PrivateKey, address: MultiAddress,
rng: ref HmacDrbgContext): Switch {.raises: [CatchableError].} = rng: ref HmacDrbgContext): Switch {.raises: [CatchableError].} =
let service: Service = WildcardAddressResolverService.new()
var sb = var sb =
if config.enableYamux: if config.enableYamux:
SwitchBuilder.new().withYamux() SwitchBuilder.new().withYamux()
@ -2239,6 +2269,7 @@ proc newBeaconSwitch(config: BeaconNodeConf | LightClientConf,
.withMaxConnections(config.maxPeers) .withMaxConnections(config.maxPeers)
.withAgentVersion(config.agentString) .withAgentVersion(config.agentString)
.withTcpTransport({ServerFlags.ReuseAddr}) .withTcpTransport({ServerFlags.ReuseAddr})
.withServices(@[service])
.build() .build()
proc createEth2Node*(rng: ref HmacDrbgContext, proc createEth2Node*(rng: ref HmacDrbgContext,
@ -2272,7 +2303,10 @@ proc createEth2Node*(rng: ref HmacDrbgContext,
let (peerId, address) = let (peerId, address) =
if s.startsWith("enr:"): if s.startsWith("enr:"):
let let
typedEnr = parseBootstrapAddress(s).get().toTypedRecord().get() enr = parseBootstrapAddress(s).valueOr:
fatal "Failed to parse bootstrap address", enr=s
quit 1
typedEnr = TypedRecord.fromRecord(enr)
peerAddress = toPeerAddr(typedEnr, tcpProtocol).get() peerAddress = toPeerAddr(typedEnr, tcpProtocol).get()
(peerAddress.peerId, peerAddress.addrs[0]) (peerAddress.peerId, peerAddress.addrs[0])
elif s.startsWith("/"): elif s.startsWith("/"):
@ -2359,7 +2393,8 @@ proc createEth2Node*(rng: ref HmacDrbgContext,
let node = Eth2Node.new( let node = Eth2Node.new(
config, cfg, enrForkId, discoveryForkId, forkDigests, getBeaconTime, switch, pubsub, extIp, config, cfg, enrForkId, discoveryForkId, forkDigests, getBeaconTime, switch, pubsub, extIp,
extTcpPort, extUdpPort, netKeys.seckey.asEthKey, extTcpPort, extUdpPort, netKeys.seckey.asEthKey,
discovery = config.discv5Enabled, directPeers, rng = rng) discovery = config.discv5Enabled, directPeers, announcedAddresses,
rng = rng)
node.pubsub.subscriptionValidator = node.pubsub.subscriptionValidator =
proc(topic: string): bool {.gcsafe, raises: [].} = proc(topic: string): bool {.gcsafe, raises: [].} =
@ -2520,7 +2555,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
node.metadata.seq_number += 1 node.metadata.seq_number += 1
node.metadata.attnets = attnets node.metadata.attnets = attnets
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#attestation-subnet-subscription
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-bitfield # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
let res = node.discovery.updateRecord({ let res = node.discovery.updateRecord({
enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets) enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets)
@ -2533,7 +2568,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
debug "Stability subnets changed; updated ENR attnets", attnets debug "Stability subnets changed; updated ENR attnets", attnets
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) = proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#sync-committee-subnet-stability # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
if node.metadata.syncnets == syncnets: if node.metadata.syncnets == syncnets:
return return

View File

@ -47,7 +47,6 @@ type
Eth1Network* = enum Eth1Network* = enum
mainnet mainnet
goerli
sepolia sepolia
holesky holesky
@ -295,7 +294,7 @@ elif const_preset == "mainnet":
vendorDir & "/mainnet/metadata/genesis.ssz") vendorDir & "/mainnet/metadata/genesis.ssz")
sepoliaGenesis* = slurp( sepoliaGenesis* = slurp(
vendorDir & "/sepolia/bepolia/genesis.ssz") vendorDir & "/sepolia/metadata/genesis.ssz")
const const
mainnetMetadata = loadCompileTimeNetworkMetadata( mainnetMetadata = loadCompileTimeNetworkMetadata(
@ -304,14 +303,14 @@ elif const_preset == "mainnet":
useBakedInGenesis = Opt.some "mainnet") useBakedInGenesis = Opt.some "mainnet")
holeskyMetadata = loadCompileTimeNetworkMetadata( holeskyMetadata = loadCompileTimeNetworkMetadata(
vendorDir & "/holesky/custom_config_data", vendorDir & "/holesky/metadata",
Opt.some holesky, Opt.some holesky,
downloadGenesisFrom = Opt.some DownloadInfo( downloadGenesisFrom = Opt.some DownloadInfo(
url: "https://github.com/status-im/nimbus-eth2/releases/download/v23.9.1/holesky-genesis.ssz.sz", url: "https://github.com/status-im/nimbus-eth2/releases/download/v23.9.1/holesky-genesis.ssz.sz",
digest: Eth2Digest.fromHex "0x0ea3f6f9515823b59c863454675fefcd1d8b4f2dbe454db166206a41fda060a0")) digest: Eth2Digest.fromHex "0x0ea3f6f9515823b59c863454675fefcd1d8b4f2dbe454db166206a41fda060a0"))
sepoliaMetadata = loadCompileTimeNetworkMetadata( sepoliaMetadata = loadCompileTimeNetworkMetadata(
vendorDir & "/sepolia/bepolia", vendorDir & "/sepolia/metadata",
Opt.some sepolia, Opt.some sepolia,
useBakedInGenesis = Opt.some "sepolia") useBakedInGenesis = Opt.some "sepolia")
@ -344,7 +343,7 @@ proc getMetadataForNetwork*(networkName: string): Eth2NetworkMetadata =
quit 1 quit 1
if networkName in ["goerli", "prater"]: if networkName in ["goerli", "prater"]:
warn "Goerli is deprecated and will stop being supported; https://blog.ethereum.org/2023/11/30/goerli-lts-update suggests migrating to Holesky or Sepolia" warn "Goerli is deprecated and unsupported; https://blog.ethereum.org/2023/11/30/goerli-lts-update suggests migrating to Holesky or Sepolia"
let metadata = let metadata =
when const_preset == "gnosis": when const_preset == "gnosis":

View File

@ -36,7 +36,7 @@ cdecl(eth2_mainnet_genesis_size):
.quad eth2_mainnet_genesis_end - eth2_mainnet_genesis_data .quad eth2_mainnet_genesis_end - eth2_mainnet_genesis_data
eth2_sepolia_genesis_data: eth2_sepolia_genesis_data:
.incbin "sepolia/bepolia/genesis.ssz" .incbin "sepolia/metadata/genesis.ssz"
eth2_sepolia_genesis_end: eth2_sepolia_genesis_end:
.global cdecl(eth2_sepolia_genesis_size) .global cdecl(eth2_sepolia_genesis_size)
.p2align 3 .p2align 3

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
import import
chronicles, chronicles, stew/base10, metrics,
../spec/network, ../spec/network,
".."/[beacon_clock], ".."/[beacon_clock],
../networking/eth2_network, ../networking/eth2_network,
@ -37,6 +37,9 @@ type
statusLastTime: chronos.Moment statusLastTime: chronos.Moment
statusMsg: StatusMsg statusMsg: StatusMsg
declareCounter nbc_disconnects_count,
"Number disconnected peers", labels = ["agent", "reason"]
func shortLog*(s: StatusMsg): auto = func shortLog*(s: StatusMsg): auto =
( (
forkDigest: s.forkDigest, forkDigest: s.forkDigest,
@ -47,13 +50,6 @@ func shortLog*(s: StatusMsg): auto =
) )
chronicles.formatIt(StatusMsg): shortLog(it) chronicles.formatIt(StatusMsg): shortLog(it)
func disconnectReasonName(reason: uint64): string =
# haha, nim doesn't support uint64 in `case`!
if reason == uint64(ClientShutDown): "Client shutdown"
elif reason == uint64(IrrelevantNetwork): "Irrelevant network"
elif reason == uint64(FaultOrError): "Fault or error"
else: "Disconnected (" & $reason & ")"
func forkDigestAtEpoch(state: PeerSyncNetworkState, func forkDigestAtEpoch(state: PeerSyncNetworkState,
epoch: Epoch): ForkDigest = epoch: Epoch): ForkDigest =
state.forkDigests[].atEpoch(epoch, state.cfg) state.forkDigests[].atEpoch(epoch, state.cfg)
@ -131,9 +127,9 @@ p2pProtocol PeerSync(version = 1,
networkState = PeerSyncNetworkState, networkState = PeerSyncNetworkState,
peerState = PeerSyncPeerState): peerState = PeerSyncPeerState):
onPeerConnected do (peer: Peer, incoming: bool) {.async: (raises: [CancelledError]).}: onPeerConnected do (peer: Peer, incoming: bool) {.
debug "Peer connected", async: (raises: [CancelledError]).}:
peer, peerId = shortLog(peer.peerId), incoming debug "Peer connected", peer, peerId = shortLog(peer.peerId), incoming
# Per the eth2 protocol, whoever dials must send a status message when # Per the eth2 protocol, whoever dials must send a status message when
# connected for the first time, but because of how libp2p works, there may # connected for the first time, but because of how libp2p works, there may
# be a race between incoming and outgoing connections and disconnects that # be a race between incoming and outgoing connections and disconnects that
@ -152,6 +148,7 @@ p2pProtocol PeerSync(version = 1,
if theirStatus.isOk: if theirStatus.isOk:
discard await peer.handleStatus(peer.networkState, theirStatus.get()) discard await peer.handleStatus(peer.networkState, theirStatus.get())
peer.updateAgent()
else: else:
debug "Status response not received in time", debug "Status response not received in time",
peer, errorKind = theirStatus.error.kind peer, errorKind = theirStatus.error.kind
@ -179,9 +176,13 @@ p2pProtocol PeerSync(version = 1,
{.libp2pProtocol("metadata", 2).} = {.libp2pProtocol("metadata", 2).} =
peer.network.metadata peer.network.metadata
proc goodbye(peer: Peer, reason: uint64) proc goodbye(peer: Peer, reason: uint64) {.
{.async, libp2pProtocol("goodbye", 1).} = async, libp2pProtocol("goodbye", 1).} =
debug "Received Goodbye message", reason = disconnectReasonName(reason), peer let remoteAgent = peer.getRemoteAgent()
nbc_disconnects_count.inc(1, [$remoteAgent, Base10.toString(reason)])
debug "Received Goodbye message",
reason = disconnectReasonName(remoteAgent, reason),
remote_agent = $remoteAgent, peer
proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) = proc setStatusMsg(peer: Peer, statusMsg: StatusMsg) =
debug "Peer status", peer, statusMsg debug "Peer status", peer, statusMsg

View File

@ -373,6 +373,21 @@ proc initFullNode(
func getFrontfillSlot(): Slot = func getFrontfillSlot(): Slot =
max(dag.frontfill.get(BlockId()).slot, dag.horizon) max(dag.frontfill.get(BlockId()).slot, dag.horizon)
proc isWithinWeakSubjectivityPeriod(): bool =
let
currentSlot = node.beaconClock.now().slotOrZero()
checkpoint = Checkpoint(
epoch: epoch(getStateField(node.dag.headState, slot)),
root: getStateField(node.dag.headState, latest_block_header).state_root)
is_within_weak_subjectivity_period(node.dag.cfg, currentSlot,
node.dag.headState, checkpoint)
proc eventWaiter(): Future[void] {.async: (raises: [CancelledError]).} =
await node.shutdownEvent.wait()
bnStatus = BeaconNodeStatus.Stopping
asyncSpawn eventWaiter()
let let
quarantine = newClone( quarantine = newClone(
Quarantine.init()) Quarantine.init())
@ -441,19 +456,29 @@ proc initFullNode(
blockProcessor, node.validatorMonitor, dag, attestationPool, blockProcessor, node.validatorMonitor, dag, attestationPool,
validatorChangePool, node.attachedValidators, syncCommitteeMsgPool, validatorChangePool, node.attachedValidators, syncCommitteeMsgPool,
lightClientPool, quarantine, blobQuarantine, rng, getBeaconTime, taskpool) lightClientPool, quarantine, blobQuarantine, rng, getBeaconTime, taskpool)
syncManagerFlags =
if node.config.longRangeSync != LongRangeSyncMode.Lenient:
{SyncManagerFlag.NoGenesisSync}
else:
{}
syncManager = newSyncManager[Peer, PeerId]( syncManager = newSyncManager[Peer, PeerId](
node.network.peerPool, node.network.peerPool,
dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS,
SyncQueueKind.Forward, getLocalHeadSlot, SyncQueueKind.Forward, getLocalHeadSlot,
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot,
getFrontfillSlot, dag.tail.slot, blockVerifier) getFrontfillSlot, isWithinWeakSubjectivityPeriod,
dag.tail.slot, blockVerifier,
shutdownEvent = node.shutdownEvent,
flags = syncManagerFlags)
backfiller = newSyncManager[Peer, PeerId]( backfiller = newSyncManager[Peer, PeerId](
node.network.peerPool, node.network.peerPool,
dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS,
SyncQueueKind.Backward, getLocalHeadSlot, SyncQueueKind.Backward, getLocalHeadSlot,
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot, getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot,
getFrontfillSlot, dag.backfill.slot, blockVerifier, getFrontfillSlot, isWithinWeakSubjectivityPeriod,
maxHeadAge = 0) dag.backfill.slot, blockVerifier, maxHeadAge = 0,
shutdownEvent = node.shutdownEvent,
flags = syncManagerFlags)
router = (ref MessageRouter)( router = (ref MessageRouter)(
processor: processor, processor: processor,
network: node.network) network: node.network)
@ -554,6 +579,27 @@ proc init*(T: type BeaconNode,
template cfg: auto = metadata.cfg template cfg: auto = metadata.cfg
template eth1Network: auto = metadata.eth1Network template eth1Network: auto = metadata.eth1Network
if not(isDir(config.databaseDir)):
# If database directory missing, we going to use genesis state to check
# for weak_subjectivity_period.
let
genesisState =
await fetchGenesisState(
metadata, config.genesisState, config.genesisStateUrl)
genesisTime = getStateField(genesisState[], genesis_time)
beaconClock = BeaconClock.init(genesisTime).valueOr:
fatal "Invalid genesis time in genesis state", genesisTime
quit 1
currentSlot = beaconClock.now().slotOrZero()
checkpoint = Checkpoint(
epoch: epoch(getStateField(genesisState[], slot)),
root: getStateField(genesisState[], latest_block_header).state_root)
if config.longRangeSync == LongRangeSyncMode.Light:
if not is_within_weak_subjectivity_period(metadata.cfg, currentSlot,
genesisState[], checkpoint):
fatal WeakSubjectivityLogMessage, current_slot = currentSlot
quit 1
try: try:
if config.numThreads < 0: if config.numThreads < 0:
fatal "The number of threads --numThreads cannot be negative." fatal "The number of threads --numThreads cannot be negative."
@ -780,6 +826,7 @@ proc init*(T: type BeaconNode,
RestServerRef.init(config.restAddress, config.restPort, RestServerRef.init(config.restAddress, config.restPort,
config.restAllowedOrigin, config.restAllowedOrigin,
validateBeaconApiQueries, validateBeaconApiQueries,
nimbusAgentStr,
config) config)
else: else:
nil nil
@ -885,6 +932,7 @@ proc init*(T: type BeaconNode,
beaconClock: beaconClock, beaconClock: beaconClock,
validatorMonitor: validatorMonitor, validatorMonitor: validatorMonitor,
stateTtlCache: stateTtlCache, stateTtlCache: stateTtlCache,
shutdownEvent: newAsyncEvent(),
dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init())) dynamicFeeRecipientsStore: newClone(DynamicFeeRecipientsStore.init()))
node.initLightClient( node.initLightClient(
@ -1862,7 +1910,7 @@ proc installMessageValidators(node: BeaconNode) =
MsgSource.gossip, msg))) MsgSource.gossip, msg)))
when consensusFork >= ConsensusFork.Capella: when consensusFork >= ConsensusFork.Capella:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/p2p-interface.md#bls_to_execution_change # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#bls_to_execution_change
node.network.addAsyncValidator( node.network.addAsyncValidator(
getBlsToExecutionChangeTopic(digest), proc ( getBlsToExecutionChangeTopic(digest), proc (
msg: SignedBLSToExecutionChange msg: SignedBLSToExecutionChange
@ -2271,9 +2319,9 @@ proc doRecord(config: BeaconNodeConf, rng: var HmacDrbgContext) {.
let record = enr.Record.init( let record = enr.Record.init(
config.seqNumber, config.seqNumber,
netKeys.seckey.asEthKey, netKeys.seckey.asEthKey,
some(config.ipExt), Opt.some(config.ipExt),
some(config.tcpPortExt), Opt.some(config.tcpPortExt),
some(config.udpPortExt), Opt.some(config.udpPortExt),
fieldPairs).expect("Record within size limits") fieldPairs).expect("Record within size limits")
echo record.toURI() echo record.toURI()

View File

@ -357,6 +357,7 @@ proc init*(T: type RestServerRef,
port: Port, port: Port,
allowedOrigin: Option[string], allowedOrigin: Option[string],
validateFn: PatternCallback, validateFn: PatternCallback,
ident: string,
config: AnyConf): T = config: AnyConf): T =
let let
address = initTAddress(ip, port) address = initTAddress(ip, port)
@ -375,6 +376,7 @@ proc init*(T: type RestServerRef,
let res = RestServerRef.new(RestRouter.init(validateFn, allowedOrigin), let res = RestServerRef.new(RestRouter.init(validateFn, allowedOrigin),
address, serverFlags = serverFlags, address, serverFlags = serverFlags,
serverIdent = ident,
httpHeadersTimeout = headersTimeout, httpHeadersTimeout = headersTimeout,
maxHeadersSize = maxHeadersSize, maxHeadersSize = maxHeadersSize,
maxRequestBodySize = maxRequestBodySize, maxRequestBodySize = maxRequestBodySize,
@ -428,11 +430,13 @@ proc initKeymanagerServer*(
RestServerRef.init(config.keymanagerAddress, config.keymanagerPort, RestServerRef.init(config.keymanagerAddress, config.keymanagerPort,
config.keymanagerAllowedOrigin, config.keymanagerAllowedOrigin,
validateKeymanagerApiQueries, validateKeymanagerApiQueries,
nimbusAgentStr,
config) config)
else: else:
RestServerRef.init(config.keymanagerAddress, config.keymanagerPort, RestServerRef.init(config.keymanagerAddress, config.keymanagerPort,
config.keymanagerAllowedOrigin, config.keymanagerAllowedOrigin,
validateKeymanagerApiQueries, validateKeymanagerApiQueries,
nimbusAgentStr,
config) config)
else: else:
nil nil

View File

@ -17,6 +17,10 @@ import
const const
PREGENESIS_EPOCHS_COUNT = 1 PREGENESIS_EPOCHS_COUNT = 1
declareGauge validator_client_node_counts,
"Number of connected beacon nodes and their status",
labels = ["status"]
proc initGenesis(vc: ValidatorClientRef): Future[RestGenesis] {.async.} = proc initGenesis(vc: ValidatorClientRef): Future[RestGenesis] {.async.} =
info "Initializing genesis", nodes_count = len(vc.beaconNodes) info "Initializing genesis", nodes_count = len(vc.beaconNodes)
var nodes = vc.beaconNodes var nodes = vc.beaconNodes
@ -214,19 +218,24 @@ proc runVCSlotLoop(vc: ValidatorClientRef) {.async.} =
vc.processingDelay = Opt.some(nanoseconds(delay.nanoseconds)) vc.processingDelay = Opt.some(nanoseconds(delay.nanoseconds))
let
counts = vc.getNodeCounts()
# Good nodes are nodes which can be used for ALL the requests.
goodNodes = counts.data[int(RestBeaconNodeStatus.Synced)]
# Viable nodes are nodes which can be used only SOME of the requests.
viableNodes = counts.data[int(RestBeaconNodeStatus.OptSynced)] +
counts.data[int(RestBeaconNodeStatus.NotSynced)] +
counts.data[int(RestBeaconNodeStatus.Compatible)]
# Bad nodes are nodes which can't be used at all.
badNodes = counts.data[int(RestBeaconNodeStatus.Offline)] +
counts.data[int(RestBeaconNodeStatus.Online)] +
counts.data[int(RestBeaconNodeStatus.Incompatible)]
validator_client_node_counts.set(int64(goodNodes), ["good"])
validator_client_node_counts.set(int64(viableNodes), ["viable"])
validator_client_node_counts.set(int64(badNodes), ["bad"])
if len(vc.beaconNodes) > 1: if len(vc.beaconNodes) > 1:
let
counts = vc.getNodeCounts()
# Good nodes are nodes which can be used for ALL the requests.
goodNodes = counts.data[int(RestBeaconNodeStatus.Synced)]
# Viable nodes are nodes which can be used only SOME of the requests.
viableNodes = counts.data[int(RestBeaconNodeStatus.OptSynced)] +
counts.data[int(RestBeaconNodeStatus.NotSynced)] +
counts.data[int(RestBeaconNodeStatus.Compatible)]
# Bad nodes are nodes which can't be used at all.
badNodes = counts.data[int(RestBeaconNodeStatus.Offline)] +
counts.data[int(RestBeaconNodeStatus.Online)] +
counts.data[int(RestBeaconNodeStatus.Incompatible)]
info "Slot start", info "Slot start",
slot = shortLog(wallSlot), slot = shortLog(wallSlot),
epoch = shortLog(wallSlot.epoch()), epoch = shortLog(wallSlot.epoch()),

View File

@ -425,7 +425,11 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
Http400, InvalidRequestBodyError, $error) Http400, InvalidRequestBodyError, $error)
let let
ids = request.ids.valueOr: @[] ids = request.ids.valueOr: @[]
filter = request.status.valueOr: AllValidatorFilterKinds filter =
if request.status.isNone() or len(request.status.get) == 0:
AllValidatorFilterKinds
else:
request.status.get
(ids, filter) (ids, filter)
sid = state_id.valueOr: sid = state_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError, return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
@ -1102,6 +1106,89 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
RestApiResponse.jsonMsgResponse(BlockValidationSuccess) RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
# https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlindedBlockV2
router.api(MethodPost, "/eth/v2/beacon/blinded_blocks") do (
broadcast_validation: Option[BroadcastValidationType],
contentBody: Option[ContentBody]) -> RestApiResponse:
if contentBody.isNone():
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
let
currentEpochFork =
node.dag.cfg.consensusForkAtEpoch(node.currentSlot().epoch())
version = request.headers.getString("eth-consensus-version")
validation =
if broadcast_validation.isNone():
BroadcastValidationType.Gossip
else:
let res = broadcast_validation.get().valueOr:
return RestApiResponse.jsonError(Http400,
InvalidBroadcastValidationType)
# TODO (cheatfate): support 'consensus' and
# 'consensus_and_equivocation' broadcast_validation types.
if res != BroadcastValidationType.Gossip:
return RestApiResponse.jsonError(Http500,
"Only `gossip` broadcast_validation option supported")
res
body = contentBody.get()
if (body.contentType == OctetStreamMediaType) and
(currentEpochFork.toString != version):
return RestApiResponse.jsonError(Http400, BlockIncorrectFork)
withConsensusFork(currentEpochFork):
# TODO (cheatfate): handle broadcast_validation flag
when consensusFork >= ConsensusFork.Deneb:
let
restBlock = decodeBodyJsonOrSsz(
consensusFork.SignedBlindedBeaconBlock, body).valueOr:
return RestApiResponse.jsonError(error)
payloadBuilderClient = node.getPayloadBuilderClient(
restBlock.message.proposer_index).valueOr:
return RestApiResponse.jsonError(
Http400, "Unable to initialize payload builder client: " & $error)
res = await node.unblindAndRouteBlockMEV(
payloadBuilderClient, restBlock)
if res.isErr():
return RestApiResponse.jsonError(
Http500, InternalServerError, $res.error)
if res.get().isNone():
return RestApiResponse.jsonError(Http202, BlockValidationError)
return RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
elif consensusFork >= ConsensusFork.Bellatrix:
return RestApiResponse.jsonError(
Http400, $consensusFork & " builder API unsupported")
else:
# Pre-Bellatrix, this endpoint will accept a `SignedBeaconBlock`.
#
# This is mostly the same as /eth/v1/beacon/blocks for phase 0 and
# altair.
var
restBlock = decodeBody(
RestPublishedSignedBeaconBlock, body, version).valueOr:
return RestApiResponse.jsonError(error)
forked = ForkedSignedBeaconBlock(restBlock)
if forked.kind != node.dag.cfg.consensusForkAtEpoch(
getForkedBlockField(forked, slot).epoch):
return RestApiResponse.jsonError(Http400, InvalidBlockObjectError)
let res = withBlck(forked):
forkyBlck.root = hash_tree_root(forkyBlck.message)
await node.router.routeSignedBeaconBlock(
forkyBlck, Opt.none(seq[BlobSidecar]),
checkValidator = true)
if res.isErr():
return RestApiResponse.jsonError(
Http503, BeaconNodeInSyncError, $res.error)
elif res.get().isNone():
return RestApiResponse.jsonError(Http202, BlockValidationError)
RestApiResponse.jsonMsgResponse(BlockValidationSuccess)
# https://ethereum.github.io/beacon-APIs/#/Beacon/getBlock # https://ethereum.github.io/beacon-APIs/#/Beacon/getBlock
router.api2(MethodGet, "/eth/v1/beacon/blocks/{block_id}") do ( router.api2(MethodGet, "/eth/v1/beacon/blocks/{block_id}") do (
block_id: BlockIdent) -> RestApiResponse: block_id: BlockIdent) -> RestApiResponse:

View File

@ -90,7 +90,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_VOLUNTARY_EXITS: MAX_VOLUNTARY_EXITS:
Base10.toString(MAX_VOLUNTARY_EXITS), Base10.toString(MAX_VOLUNTARY_EXITS),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/altair.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/altair.yaml
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: INACTIVITY_PENALTY_QUOTIENT_ALTAIR:
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR), Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR),
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR:
@ -106,7 +106,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
UPDATE_TIMEOUT: UPDATE_TIMEOUT:
Base10.toString(UPDATE_TIMEOUT), Base10.toString(UPDATE_TIMEOUT),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/bellatrix.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/bellatrix.yaml
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: INACTIVITY_PENALTY_QUOTIENT_BELLATRIX:
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX), Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX),
MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX:
@ -122,7 +122,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_EXTRA_DATA_BYTES: MAX_EXTRA_DATA_BYTES:
Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)), Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/capella.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/capella.yaml
MAX_BLS_TO_EXECUTION_CHANGES: MAX_BLS_TO_EXECUTION_CHANGES:
Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)), Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)),
MAX_WITHDRAWALS_PER_PAYLOAD: MAX_WITHDRAWALS_PER_PAYLOAD:
@ -130,7 +130,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP:
Base10.toString(uint64(MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)), Base10.toString(uint64(MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/deneb.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/deneb.yaml
FIELD_ELEMENTS_PER_BLOB: FIELD_ELEMENTS_PER_BLOB:
Base10.toString(deneb_preset.FIELD_ELEMENTS_PER_BLOB), Base10.toString(deneb_preset.FIELD_ELEMENTS_PER_BLOB),
MAX_BLOB_COMMITMENTS_PER_BLOCK: MAX_BLOB_COMMITMENTS_PER_BLOCK:

View File

@ -106,65 +106,51 @@ proc getLastSeenAddress(node: BeaconNode, id: PeerId): string =
$addrs[len(addrs) - 1] $addrs[len(addrs) - 1]
else: else:
"" ""
proc getDiscoveryAddresses(node: BeaconNode): Option[seq[string]] = proc getDiscoveryAddresses(node: BeaconNode): seq[string] =
let restr = node.network.enrRecord().toTypedRecord() let
if restr.isErr(): typedRec = TypedRecord.fromRecord(node.network.enrRecord())
return none[seq[string]]() peerAddr = typedRec.toPeerAddr(udpProtocol).valueOr:
let respa = restr.get().toPeerAddr(udpProtocol) return default(seq[string])
if respa.isErr(): maddress = MultiAddress.init(multiCodec("p2p"), peerAddr.peerId).valueOr:
return none[seq[string]]() return default(seq[string])
let pa = respa.get()
let mpa = MultiAddress.init(multiCodec("p2p"), pa.peerId)
if mpa.isErr():
return none[seq[string]]()
var addresses = newSeqOfCap[string](len(pa.addrs))
for item in pa.addrs:
let resa = concat(item, mpa.get())
if resa.isOk():
addresses.add($(resa.get()))
return some(addresses)
proc getP2PAddresses(node: BeaconNode): Option[seq[string]] = var addresses: seq[string]
let pinfo = node.network.switch.peerInfo for item in peerAddr.addrs:
let mpa = MultiAddress.init(multiCodec("p2p"), pinfo.peerId) let res = concat(item, maddress)
if mpa.isErr(): if res.isOk():
return none[seq[string]]() addresses.add($(res.get()))
var addresses = newSeqOfCap[string](len(pinfo.addrs)) addresses
proc getP2PAddresses(node: BeaconNode): seq[string] =
let
pinfo = node.network.switch.peerInfo
maddress = MultiAddress.init(multiCodec("p2p"), pinfo.peerId).valueOr:
return default(seq[string])
var addresses: seq[string]
for item in node.network.announcedAddresses:
let res = concat(item, maddress)
if res.isOk():
addresses.add($(res.get()))
for item in pinfo.addrs: for item in pinfo.addrs:
let resa = concat(item, mpa.get()) let res = concat(item, maddress)
if resa.isOk(): if res.isOk():
addresses.add($(resa.get())) addresses.add($(res.get()))
return some(addresses) addresses
proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) = proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
let let
cachedVersion = cachedVersion =
RestApiResponse.prepareJsonResponse((version: "Nimbus/" & fullVersionStr)) RestApiResponse.prepareJsonResponse((version: nimbusAgentStr))
# https://ethereum.github.io/beacon-APIs/#/Node/getNetworkIdentity # https://ethereum.github.io/beacon-APIs/#/Node/getNetworkIdentity
router.api2(MethodGet, "/eth/v1/node/identity") do () -> RestApiResponse: router.api2(MethodGet, "/eth/v1/node/identity") do () -> RestApiResponse:
let discoveryAddresses =
block:
let res = node.getDiscoveryAddresses()
if res.isSome():
res.get()
else:
newSeq[string](0)
let p2pAddresses =
block:
let res = node.getP2PAddresses()
if res.isSome():
res.get()
else:
newSeq[string]()
RestApiResponse.jsonResponse( RestApiResponse.jsonResponse(
( (
peer_id: $node.network.peerId(), peer_id: $node.network.peerId(),
enr: node.network.enrRecord().toURI(), enr: node.network.enrRecord().toURI(),
p2p_addresses: p2pAddresses, p2p_addresses: node.getP2PAddresses(),
discovery_addresses: discoveryAddresses, discovery_addresses: node.getDiscoveryAddresses(),
metadata: ( metadata: (
seq_number: node.network.metadata.seq_number, seq_number: node.network.metadata.seq_number,
syncnets: to0xHex(node.network.metadata.syncnets.bytes), syncnets: to0xHex(node.network.metadata.syncnets.bytes),
@ -297,4 +283,4 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
Http206 Http206
else: else:
Http200 Http200
RestApiResponse.response("", status, contentType = "") RestApiResponse.response(status)

View File

@ -1102,7 +1102,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
numUpdatedFeeRecipients = numUpdated, numUpdatedFeeRecipients = numUpdated,
numRefreshedFeeRecipients = numRefreshed numRefreshedFeeRecipients = numRefreshed
RestApiResponse.response("", Http200, "text/plain") RestApiResponse.response(Http200)
# https://ethereum.github.io/beacon-APIs/#/Validator/registerValidator # https://ethereum.github.io/beacon-APIs/#/Validator/registerValidator
# https://github.com/ethereum/beacon-APIs/blob/v2.3.0/apis/validator/register_validator.yaml # https://github.com/ethereum/beacon-APIs/blob/v2.3.0/apis/validator/register_validator.yaml
@ -1129,7 +1129,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
node.externalBuilderRegistrations[signedValidatorRegistration.message.pubkey] = node.externalBuilderRegistrations[signedValidatorRegistration.message.pubkey] =
signedValidatorRegistration signedValidatorRegistration
RestApiResponse.response("", Http200, "text/plain") RestApiResponse.response(Http200)
# https://ethereum.github.io/beacon-APIs/#/Validator/getLiveness # https://ethereum.github.io/beacon-APIs/#/Validator/getLiveness
router.api2(MethodPost, "/eth/v1/validator/liveness/{epoch}") do ( router.api2(MethodPost, "/eth/v1/validator/liveness/{epoch}") do (

View File

@ -43,7 +43,7 @@ const
GENESIS_SLOT* = Slot(0) GENESIS_SLOT* = Slot(0)
GENESIS_EPOCH* = Epoch(0) # compute_epoch_at_slot(GENESIS_SLOT) GENESIS_EPOCH* = Epoch(0) # compute_epoch_at_slot(GENESIS_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/fork-choice.md#constant # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#constant
INTERVALS_PER_SLOT* = 3 INTERVALS_PER_SLOT* = 3
FAR_FUTURE_BEACON_TIME* = BeaconTime(ns_since_genesis: int64.high()) FAR_FUTURE_BEACON_TIME* = BeaconTime(ns_since_genesis: int64.high())
@ -139,16 +139,16 @@ const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate
aggregateSlotOffset* = TimeDiff(nanoseconds: aggregateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#prepare-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds: syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#broadcast-sync-committee-contribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution
syncContributionSlotOffset* = TimeDiff(nanoseconds: syncContributionSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/p2p-interface.md#sync-committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee
lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds: lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/p2p-interface.md#sync-committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee
lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds: lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
@ -188,7 +188,7 @@ func epoch*(slot: Slot): Epoch = # aka compute_epoch_at_slot
if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH
else: Epoch(slot div SLOTS_PER_EPOCH) else: Epoch(slot div SLOTS_PER_EPOCH)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/fork-choice.md#compute_slots_since_epoch_start # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#compute_slots_since_epoch_start
func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start
## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`) ## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`)
(slot mod SLOTS_PER_EPOCH) (slot mod SLOTS_PER_EPOCH)
@ -196,7 +196,7 @@ func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_st
template is_epoch*(slot: Slot): bool = template is_epoch*(slot: Slot): bool =
slot.since_epoch_start == 0 slot.since_epoch_start == 0
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
func start_slot*(epoch: Epoch): Slot = # aka compute_start_slot_at_epoch func start_slot*(epoch: Epoch): Slot = # aka compute_start_slot_at_epoch
## Return the start slot of ``epoch``. ## Return the start slot of ``epoch``.
const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH) const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH)
@ -216,7 +216,7 @@ iterator slots*(epoch: Epoch): Slot =
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH: for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
yield slot yield slot
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#sync-committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee
template sync_committee_period*(epoch: Epoch): SyncCommitteePeriod = template sync_committee_period*(epoch: Epoch): SyncCommitteePeriod =
if epoch == FAR_FUTURE_EPOCH: FAR_FUTURE_PERIOD if epoch == FAR_FUTURE_EPOCH: FAR_FUTURE_PERIOD
else: SyncCommitteePeriod(epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD) else: SyncCommitteePeriod(epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD)

View File

@ -67,7 +67,7 @@ func get_validator_from_deposit*(
effective_balance: effective_balance effective_balance: effective_balance
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-get_validator_from_deposit # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-get_validator_from_deposit
func get_validator_from_deposit*( func get_validator_from_deposit*(
state: electra.BeaconState, deposit: DepositData): Validator = state: electra.BeaconState, deposit: DepositData): Validator =
Validator( Validator(
@ -86,7 +86,7 @@ func compute_activation_exit_epoch*(epoch: Epoch): Epoch =
## ``epoch`` take effect. ## ``epoch`` take effect.
epoch + 1 + MAX_SEED_LOOKAHEAD epoch + 1 + MAX_SEED_LOOKAHEAD
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#get_validator_churn_limit # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_validator_churn_limit
func get_validator_churn_limit*( func get_validator_churn_limit*(
cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache): cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache):
uint64 = uint64 =
@ -301,7 +301,7 @@ from ./datatypes/deneb import BeaconState
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-slash_validator
func get_slashing_penalty*( func get_slashing_penalty*(
state: ForkyBeaconState, validator_effective_balance: Gwei): Gwei = state: ForkyBeaconState, validator_effective_balance: Gwei): Gwei =
@ -319,7 +319,7 @@ func get_slashing_penalty*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_whistleblower_reward*( func get_whistleblower_reward*(
state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState | state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState |
capella.BeaconState | deneb.BeaconState, capella.BeaconState | deneb.BeaconState,
@ -333,7 +333,7 @@ func get_whistleblower_reward*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): Gwei = func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): Gwei =
when state is phase0.BeaconState: when state is phase0.BeaconState:
whistleblower_reward div PROPOSER_REWARD_QUOTIENT whistleblower_reward div PROPOSER_REWARD_QUOTIENT
@ -346,7 +346,7 @@ func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): G
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
proc slash_validator*( proc slash_validator*(
cfg: RuntimeConfig, state: var ForkyBeaconState, cfg: RuntimeConfig, state: var ForkyBeaconState,
slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo, slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo,
@ -419,7 +419,7 @@ func get_initial_beacon_block*(state: altair.HashedBeaconState):
altair.TrustedSignedBeaconBlock( altair.TrustedSignedBeaconBlock(
message: message, root: hash_tree_root(message)) message: message, root: hash_tree_root(message))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing
func get_initial_beacon_block*(state: bellatrix.HashedBeaconState): func get_initial_beacon_block*(state: bellatrix.HashedBeaconState):
bellatrix.TrustedSignedBeaconBlock = bellatrix.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted # The genesis block is implicitly trusted
@ -431,7 +431,7 @@ func get_initial_beacon_block*(state: bellatrix.HashedBeaconState):
bellatrix.TrustedSignedBeaconBlock( bellatrix.TrustedSignedBeaconBlock(
message: message, root: hash_tree_root(message)) message: message, root: hash_tree_root(message))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#testing
func get_initial_beacon_block*(state: capella.HashedBeaconState): func get_initial_beacon_block*(state: capella.HashedBeaconState):
capella.TrustedSignedBeaconBlock = capella.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted # The genesis block is implicitly trusted
@ -515,10 +515,17 @@ template get_total_balance(
max(EFFECTIVE_BALANCE_INCREMENT.Gwei, res) max(EFFECTIVE_BALANCE_INCREMENT.Gwei, res)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_eligible_for_activation_queue # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_eligible_for_activation_queue
func is_eligible_for_activation_queue*(validator: Validator): bool = # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_eligible_for_activation_queue
func is_eligible_for_activation_queue*(
fork: static ConsensusFork, validator: Validator): bool =
## Check if ``validator`` is eligible to be placed into the activation queue. ## Check if ``validator`` is eligible to be placed into the activation queue.
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and when fork <= ConsensusFork.Deneb:
validator.effective_balance == MAX_EFFECTIVE_BALANCE.Gwei validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
validator.effective_balance == MAX_EFFECTIVE_BALANCE.Gwei
else:
# [Modified in Electra:EIP7251]
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
validator.effective_balance >= MIN_ACTIVATION_BALANCE.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_eligible_for_activation # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_eligible_for_activation
func is_eligible_for_activation*( func is_eligible_for_activation*(
@ -589,7 +596,7 @@ iterator get_attesting_indices_iter*(state: ForkyBeaconState,
if bits[index_in_committee]: if bits[index_in_committee]:
yield validator_index yield validator_index
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#modified-get_attesting_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-get_attesting_indices
iterator get_attesting_indices_iter*( iterator get_attesting_indices_iter*(
state: electra.BeaconState, state: electra.BeaconState,
data: AttestationData, data: AttestationData,
@ -617,7 +624,7 @@ func get_attesting_indices*(
toSeq(get_attesting_indices_iter(state, data, aggregation_bits, cache)) toSeq(get_attesting_indices_iter(state, data, aggregation_bits, cache))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#get_attesting_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_attesting_indices
func get_attesting_indices*( func get_attesting_indices*(
state: ForkyBeaconState, data: AttestationData, state: ForkyBeaconState, data: AttestationData,
aggregation_bits: ElectraCommitteeValidatorsBits, committee_bits: auto, aggregation_bits: ElectraCommitteeValidatorsBits, committee_bits: auto,
@ -734,7 +741,7 @@ func check_attestation_target_epoch(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-process_attestation # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-process_attestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#modified-process_attestation # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/beacon-chain.md#modified-process_attestation
func check_attestation_inclusion( func check_attestation_inclusion(
consensusFork: static ConsensusFork, attestation_slot: Slot, consensusFork: static ConsensusFork, attestation_slot: Slot,
current_slot: Slot): Result[void, cstring] = current_slot: Slot): Result[void, cstring] =
@ -763,7 +770,7 @@ func check_attestation_index(
Result[CommitteeIndex, cstring] = Result[CommitteeIndex, cstring] =
check_attestation_index(data.index, committees_per_slot) check_attestation_index(data.index, committees_per_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices
func get_attestation_participation_flag_indices( func get_attestation_participation_flag_indices(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState, state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState,
data: AttestationData, inclusion_delay: uint64): set[TimelyFlag] = data: AttestationData, inclusion_delay: uint64): set[TimelyFlag] =
@ -1122,6 +1129,7 @@ proc process_attestation*(
ok(proposer_reward) ok(proposer_reward)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee_indices # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee_indices
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-get_next_sync_committee_indices
func get_next_sync_committee_keys( func get_next_sync_committee_keys(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState | electra.BeaconState): deneb.BeaconState | electra.BeaconState):
@ -1153,42 +1161,29 @@ func get_next_sync_committee_keys(
candidate_index = active_validator_indices[shuffled_index] candidate_index = active_validator_indices[shuffled_index]
random_byte = eth2digest(hash_buffer).data[i mod 32] random_byte = eth2digest(hash_buffer).data[i mod 32]
effective_balance = state.validators[candidate_index].effective_balance effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >= const meb =
MAX_EFFECTIVE_BALANCE.Gwei * random_byte: when typeof(state).kind >= ConsensusFork.Electra:
MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei # [Modified in Electra:EIP7251]
else:
MAX_EFFECTIVE_BALANCE.Gwei
if effective_balance * MAX_RANDOM_BYTE >= meb * random_byte:
res[index] = state.validators[candidate_index].pubkey res[index] = state.validators[candidate_index].pubkey
inc index inc index
i += 1'u64 i += 1'u64
res res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential
func has_eth1_withdrawal_credential*(validator: Validator): bool = func has_eth1_withdrawal_credential*(validator: Validator): bool =
## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential. ## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential.
validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#is_fully_withdrawable_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-is_compounding_withdrawal_credential
func is_fully_withdrawable_validator(
validator: Validator, balance: Gwei, epoch: Epoch): bool =
## Check if ``validator`` is fully withdrawable.
has_eth1_withdrawal_credential(validator) and
validator.withdrawable_epoch <= epoch and balance > 0.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#is_partially_withdrawable_validator
func is_partially_withdrawable_validator(
validator: Validator, balance: Gwei): bool =
## Check if ``validator`` is partially withdrawable.
let
has_max_effective_balance =
validator.effective_balance == MAX_EFFECTIVE_BALANCE.Gwei
has_excess_balance = balance > MAX_EFFECTIVE_BALANCE.Gwei
has_eth1_withdrawal_credential(validator) and
has_max_effective_balance and has_excess_balance
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-is_compounding_withdrawal_credential
func is_compounding_withdrawal_credential*( func is_compounding_withdrawal_credential*(
withdrawal_credentials: Eth2Digest): bool = withdrawal_credentials: Eth2Digest): bool =
withdrawal_credentials.data[0] == COMPOUNDING_WITHDRAWAL_PREFIX withdrawal_credentials.data[0] == COMPOUNDING_WITHDRAWAL_PREFIX
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-has_compounding_withdrawal_credential # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-has_compounding_withdrawal_credential
func has_compounding_withdrawal_credential*(validator: Validator): bool = func has_compounding_withdrawal_credential*(validator: Validator): bool =
## Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal ## Check if ``validator`` has an 0x02 prefixed "compounding" withdrawal
## credential. ## credential.
@ -1200,6 +1195,43 @@ func has_execution_withdrawal_credential*(validator: Validator): bool =
has_compounding_withdrawal_credential(validator) or has_compounding_withdrawal_credential(validator) or
has_eth1_withdrawal_credential(validator) has_eth1_withdrawal_credential(validator)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#is_fully_withdrawable_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_fully_withdrawable_validator
func is_fully_withdrawable_validator(
fork: static ConsensusFork, validator: Validator, balance: Gwei,
epoch: Epoch): bool =
## Check if ``validator`` is fully withdrawable.
when fork >= ConsensusFork.Electra:
# [Modified in Electra:EIP7251]
has_execution_withdrawal_credential(validator) and
validator.withdrawable_epoch <= epoch and balance > 0.Gwei
else:
has_eth1_withdrawal_credential(validator) and
validator.withdrawable_epoch <= epoch and balance > 0.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#is_partially_withdrawable_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_partially_withdrawable_validator
func is_partially_withdrawable_validator(
fork: static ConsensusFork, validator: Validator, balance: Gwei): bool =
## Check if ``validator`` is partially withdrawable.
when fork >= ConsensusFork.Electra:
# [Modified in Electra:EIP7251]
let
max_effective_balance = get_validator_max_effective_balance(validator)
has_max_effective_balance =
validator.effective_balance == max_effective_balance
has_excess_balance =
balance > max_effective_balance # [Modified in Electra:EIP7251]
has_execution_withdrawal_credential(validator) and
has_max_effective_balance and has_excess_balance
else:
let
has_max_effective_balance =
validator.effective_balance == static(MAX_EFFECTIVE_BALANCE.Gwei)
has_excess_balance = balance > static(MAX_EFFECTIVE_BALANCE.Gwei)
has_eth1_withdrawal_credential(validator) and
has_max_effective_balance and has_excess_balance
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#get_validator_max_effective_balance # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#get_validator_max_effective_balance
func get_validator_max_effective_balance(validator: Validator): Gwei = func get_validator_max_effective_balance(validator: Validator): Gwei =
## Get max effective balance for ``validator``. ## Get max effective balance for ``validator``.
@ -1237,7 +1269,7 @@ func switch_to_compounding_validator*(
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-get_pending_balance_to_withdraw # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-get_pending_balance_to_withdraw
func get_pending_balance_to_withdraw*( func get_pending_balance_to_withdraw*(
state: Electra.BeaconState, validator_index: ValidatorIndex): Gwei = state: electra.BeaconState, validator_index: ValidatorIndex): Gwei =
var pending_balance: Gwei var pending_balance: Gwei
for withdrawal in state.pending_partial_withdrawals: for withdrawal in state.pending_partial_withdrawals:
if withdrawal.index == validator_index: if withdrawal.index == validator_index:
@ -1247,21 +1279,21 @@ func get_pending_balance_to_withdraw*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#new-get_expected_withdrawals # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#new-get_expected_withdrawals
func get_expected_withdrawals*( func get_expected_withdrawals*(
state: capella.BeaconState | deneb.BeaconState | electra.BeaconState): state: capella.BeaconState | deneb.BeaconState): seq[Withdrawal] =
seq[Withdrawal] =
let let
epoch = get_current_epoch(state) epoch = get_current_epoch(state)
num_validators = lenu64(state.validators) num_validators = lenu64(state.validators)
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
var var
withdrawal_index = state.next_withdrawal_index withdrawal_index = state.next_withdrawal_index
validator_index = state.next_withdrawal_validator_index validator_index = state.next_withdrawal_validator_index
withdrawals: seq[Withdrawal] = @[] withdrawals: seq[Withdrawal] = @[]
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
for _ in 0 ..< bound: for _ in 0 ..< bound:
let let
validator = state.validators[validator_index] validator = state.validators[validator_index]
balance = state.balances[validator_index] balance = state.balances[validator_index]
if is_fully_withdrawable_validator(validator, balance, epoch): if is_fully_withdrawable_validator(
typeof(state).kind, validator, balance, epoch):
var w = Withdrawal( var w = Withdrawal(
index: withdrawal_index, index: withdrawal_index,
validator_index: validator_index, validator_index: validator_index,
@ -1269,7 +1301,8 @@ func get_expected_withdrawals*(
w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1] w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1]
withdrawals.add w withdrawals.add w
withdrawal_index = WithdrawalIndex(withdrawal_index + 1) withdrawal_index = WithdrawalIndex(withdrawal_index + 1)
elif is_partially_withdrawable_validator(validator, balance): elif is_partially_withdrawable_validator(
typeof(state).kind, validator, balance):
var w = Withdrawal( var w = Withdrawal(
index: withdrawal_index, index: withdrawal_index,
validator_index: validator_index, validator_index: validator_index,
@ -1282,6 +1315,82 @@ func get_expected_withdrawals*(
validator_index = (validator_index + 1) mod num_validators validator_index = (validator_index + 1) mod num_validators
withdrawals withdrawals
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-get_expected_withdrawals
# This partials count is used in exactly one place, while in general being able
# to cleanly treat the results of get_expected_withdrawals as a seq[Withdrawal]
# are valuable enough to make that the default version of this spec function.
func get_expected_withdrawals_with_partial_count*(state: electra.BeaconState):
(seq[Withdrawal], uint64) =
let epoch = get_current_epoch(state)
var
withdrawal_index = state.next_withdrawal_index
withdrawals: seq[Withdrawal] = @[]
# [New in Electra:EIP7251] Consume pending partial withdrawals
for withdrawal in state.pending_partial_withdrawals:
if withdrawal.withdrawable_epoch > epoch or
len(withdrawals) == MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP:
break
let
validator = state.validators[withdrawal.index]
has_sufficient_effective_balance =
validator.effective_balance >= static(MIN_ACTIVATION_BALANCE.Gwei)
has_excess_balance =
state.balances[withdrawal.index] > static(MIN_ACTIVATION_BALANCE.Gwei)
if validator.exit_epoch == FAR_FUTURE_EPOCH and
has_sufficient_effective_balance and has_excess_balance:
let withdrawable_balance = min(
state.balances[withdrawal.index] - static(MIN_ACTIVATION_BALANCE.Gwei),
withdrawal.amount)
var w = Withdrawal(
index: withdrawal_index,
validator_index: withdrawal.index,
amount: withdrawable_balance)
w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1]
withdrawals.add w
withdrawal_index += 1
let partial_withdrawals_count = lenu64(withdrawals)
let
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
num_validators = lenu64(state.validators)
var validator_index = state.next_withdrawal_validator_index
# Sweep for remaining.
for _ in 0 ..< bound:
let
validator = state.validators[validator_index]
balance = state.balances[validator_index]
if is_fully_withdrawable_validator(
typeof(state).kind, validator, balance, epoch):
var w = Withdrawal(
index: withdrawal_index,
validator_index: validator_index,
amount: balance)
w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1]
withdrawals.add w
withdrawal_index = WithdrawalIndex(withdrawal_index + 1)
elif is_partially_withdrawable_validator(
typeof(state).kind, validator, balance):
var w = Withdrawal(
index: withdrawal_index,
validator_index: validator_index,
# [Modified in Electra:EIP7251]
amount: balance - get_validator_max_effective_balance(validator))
w.address.data[0..19] = validator.withdrawal_credentials.data[12..^1]
withdrawals.add w
withdrawal_index = WithdrawalIndex(withdrawal_index + 1)
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
break
validator_index = (validator_index + 1) mod num_validators
(withdrawals, partial_withdrawals_count)
func get_expected_withdrawals*(state: electra.BeaconState): seq[Withdrawal] =
get_expected_withdrawals_with_partial_count(state)[0]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee
func get_next_sync_committee*( func get_next_sync_committee*(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
@ -1404,8 +1513,8 @@ proc initialize_hashed_beacon_state_from_eth1*(
cfg, eth1_block_hash, eth1_timestamp, deposits, flags)) cfg, eth1_block_hash, eth1_timestamp, deposits, flags))
result.root = hash_tree_root(result.data) result.root = hash_tree_root(result.data)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#testing
proc initialize_beacon_state_from_eth1*( proc initialize_beacon_state_from_eth1*(
cfg: RuntimeConfig, cfg: RuntimeConfig,
@ -1757,7 +1866,7 @@ func upgrade_to_capella*(cfg: RuntimeConfig, pre: bellatrix.BeaconState):
# historical_summaries initialized to correct default automatically # historical_summaries initialized to correct default automatically
) )
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/deneb/fork.md#upgrading-the-state # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/fork.md#upgrading-the-state
func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState): func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState):
ref deneb.BeaconState = ref deneb.BeaconState =
let let
@ -1842,7 +1951,7 @@ func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState):
historical_summaries: pre.historical_summaries historical_summaries: pre.historical_summaries
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/fork.md#upgrading-the-state # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/fork.md#upgrading-the-state
func upgrade_to_electra*( func upgrade_to_electra*(
cfg: RuntimeConfig, pre: deneb.BeaconState, cache: var StateCache): cfg: RuntimeConfig, pre: deneb.BeaconState, cache: var StateCache):
ref electra.BeaconState = ref electra.BeaconState =
@ -1866,8 +1975,9 @@ func upgrade_to_electra*(
withdrawals_root: pre.latest_execution_payload_header.withdrawals_root, withdrawals_root: pre.latest_execution_payload_header.withdrawals_root,
blob_gas_used: 0, blob_gas_used: 0,
excess_blob_gas: 0, excess_blob_gas: 0,
deposit_receipts_root: ZERO_HASH, # [New in Electra:EIP6110] deposit_requests_root: ZERO_HASH, # [New in Electra:EIP6110]
withdrawal_requests_root: ZERO_HASH, # [New in ELectra:EIP7002] withdrawal_requests_root: ZERO_HASH, # [New in Electra:EIP7002],
consolidation_requests_root: ZERO_HASH # [New in Electra:EIP7251]
) )
var max_exit_epoch = FAR_FUTURE_EPOCH var max_exit_epoch = FAR_FUTURE_EPOCH
@ -1942,7 +2052,7 @@ func upgrade_to_electra*(
historical_summaries: pre.historical_summaries, historical_summaries: pre.historical_summaries,
# [New in Electra:EIP6110] # [New in Electra:EIP6110]
deposit_receipts_start_index: UNSET_DEPOSIT_RECEIPTS_START_INDEX, deposit_requests_start_index: UNSET_DEPOSIT_REQUESTS_START_INDEX,
# [New in Electra:EIP7251] # [New in Electra:EIP7251]
deposit_balance_to_consume: 0.Gwei, deposit_balance_to_consume: 0.Gwei,

View File

@ -51,7 +51,7 @@ const
PARTICIPATION_FLAG_WEIGHTS*: array[TimelyFlag, uint64] = PARTICIPATION_FLAG_WEIGHTS*: array[TimelyFlag, uint64] =
[uint64 TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT] [uint64 TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#misc # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#misc
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16 TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16
SYNC_COMMITTEE_SUBNET_COUNT* = 4 SYNC_COMMITTEE_SUBNET_COUNT* = 4
@ -60,7 +60,7 @@ const
# The first member (`genesis_time`) is 32, subsequent members +1 each. # The first member (`genesis_time`) is 32, subsequent members +1 each.
# If there are ever more than 32 members in `BeaconState`, indices change! # If there are ever more than 32 members in `BeaconState`, indices change!
# `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `52 * 2 + 1`. # `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `52 * 2 + 1`.
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/ssz/merkle-proofs.md # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
FINALIZED_ROOT_GINDEX* = 105.GeneralizedIndex # finalized_checkpoint > root FINALIZED_ROOT_GINDEX* = 105.GeneralizedIndex # finalized_checkpoint > root
CURRENT_SYNC_COMMITTEE_GINDEX* = 54.GeneralizedIndex # current_sync_committee CURRENT_SYNC_COMMITTEE_GINDEX* = 54.GeneralizedIndex # current_sync_committee
NEXT_SYNC_COMMITTEE_GINDEX* = 55.GeneralizedIndex # next_sync_committee NEXT_SYNC_COMMITTEE_GINDEX* = 55.GeneralizedIndex # next_sync_committee
@ -98,7 +98,7 @@ type
pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey] pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey]
aggregate_pubkey*: ValidatorPubKey aggregate_pubkey*: ValidatorPubKey
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#synccommitteemessage # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#synccommitteemessage
SyncCommitteeMessage* = object SyncCommitteeMessage* = object
slot*: Slot slot*: Slot
## Slot to which this contribution pertains ## Slot to which this contribution pertains
@ -112,7 +112,7 @@ type
signature*: ValidatorSig signature*: ValidatorSig
## Signature by the validator over the block root of `slot` ## Signature by the validator over the block root of `slot`
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#synccommitteecontribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#synccommitteecontribution
SyncCommitteeAggregationBits* = SyncCommitteeAggregationBits* =
BitArray[SYNC_SUBCOMMITTEE_SIZE] BitArray[SYNC_SUBCOMMITTEE_SIZE]
@ -134,18 +134,18 @@ type
signature*: ValidatorSig signature*: ValidatorSig
## Signature by the validator(s) over the block root of `slot` ## Signature by the validator(s) over the block root of `slot`
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#contributionandproof # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#contributionandproof
ContributionAndProof* = object ContributionAndProof* = object
aggregator_index*: uint64 # `ValidatorIndex` after validation aggregator_index*: uint64 # `ValidatorIndex` after validation
contribution*: SyncCommitteeContribution contribution*: SyncCommitteeContribution
selection_proof*: ValidatorSig selection_proof*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#signedcontributionandproof # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#signedcontributionandproof
SignedContributionAndProof* = object SignedContributionAndProof* = object
message*: ContributionAndProof message*: ContributionAndProof
signature*: ValidatorSig signature*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#syncaggregatorselectiondata # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#syncaggregatorselectiondata
SyncAggregatorSelectionData* = object SyncAggregatorSelectionData* = object
slot*: Slot slot*: Slot
subcommittee_index*: uint64 # `SyncSubcommitteeIndex` after validation subcommittee_index*: uint64 # `SyncSubcommitteeIndex` after validation
@ -161,7 +161,7 @@ type
NextSyncCommitteeBranch* = NextSyncCommitteeBranch* =
array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest] array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#lightclientheader # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientheader
LightClientHeader* = object LightClientHeader* = object
beacon*: BeaconBlockHeader beacon*: BeaconBlockHeader
## Beacon block header ## Beacon block header
@ -175,7 +175,7 @@ type
## Current sync committee corresponding to `header.beacon.state_root` ## Current sync committee corresponding to `header.beacon.state_root`
current_sync_committee_branch*: CurrentSyncCommitteeBranch current_sync_committee_branch*: CurrentSyncCommitteeBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientupdate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientupdate
LightClientUpdate* = object LightClientUpdate* = object
attested_header*: LightClientHeader attested_header*: LightClientHeader
## Header attested to by the sync committee ## Header attested to by the sync committee
@ -665,7 +665,7 @@ chronicles.formatIt SyncCommitteeContribution: shortLog(it)
chronicles.formatIt ContributionAndProof: shortLog(it) chronicles.formatIt ContributionAndProof: shortLog(it)
chronicles.formatIt SignedContributionAndProof: shortLog(it) chronicles.formatIt SignedContributionAndProof: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#is_valid_light_client_header # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_valid_light_client_header
func is_valid_light_client_header*( func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool = header: LightClientHeader, cfg: RuntimeConfig): bool =
true true
@ -684,7 +684,7 @@ func shortLog*(v: LightClientUpdate): auto =
( (
attested: shortLog(v.attested_header), attested: shortLog(v.attested_header),
has_next_sync_committee: has_next_sync_committee:
v.next_sync_committee != default(typeof(v.next_sync_committee)), v.next_sync_committee != static(default(typeof(v.next_sync_committee))),
finalized: shortLog(v.finalized_header), finalized: shortLog(v.finalized_header),
num_active_participants: v.sync_aggregate.num_active_participants, num_active_participants: v.sync_aggregate.num_active_participants,
signature_slot: v.signature_slot signature_slot: v.signature_slot

View File

@ -74,7 +74,7 @@ export
tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto, tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto,
digest, presets digest, presets
const SPEC_VERSION* = "1.5.0-alpha.2" const SPEC_VERSION* = "1.5.0-alpha.3"
## Spec version we're aiming to be compatible with, right now ## Spec version we're aiming to be compatible with, right now
const const
@ -326,7 +326,7 @@ type
withdrawable_epoch*: Epoch withdrawable_epoch*: Epoch
## When validator can withdraw funds ## When validator can withdraw funds
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#pendingattestation # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#pendingattestation
PendingAttestation* = object PendingAttestation* = object
aggregation_bits*: CommitteeValidatorsBits aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData data*: AttestationData
@ -335,7 +335,7 @@ type
proposer_index*: uint64 # `ValidatorIndex` after validation proposer_index*: uint64 # `ValidatorIndex` after validation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#historicalbatch # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#historicalbatch
HistoricalBatch* = object HistoricalBatch* = object
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
@ -371,7 +371,7 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body_root*: Eth2Digest body_root*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signingdata # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#signingdata
SigningData* = object SigningData* = object
object_root*: Eth2Digest object_root*: Eth2Digest
domain*: Eth2Domain domain*: Eth2Domain
@ -400,7 +400,7 @@ type
sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache] sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache]
# This matches the mutable state of the Solidity deposit contract # This matches the mutable state of the Solidity deposit contract
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/solidity_deposit_contract/deposit_contract.sol # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/solidity_deposit_contract/deposit_contract.sol
DepositContractState* = object DepositContractState* = object
branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest] branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest]
deposit_count*: array[32, byte] # Uint256 deposit_count*: array[32, byte] # Uint256

View File

@ -35,7 +35,7 @@ const
NEWPAYLOAD_TIMEOUT* = 8.seconds NEWPAYLOAD_TIMEOUT* = 8.seconds
type type
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#custom-types # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#custom-types
Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION] Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION]
ExecutionAddress* = object ExecutionAddress* = object
@ -44,7 +44,7 @@ type
BloomLogs* = object BloomLogs* = object
data*: array[BYTES_PER_LOGS_BLOOM, byte] data*: array[BYTES_PER_LOGS_BLOOM, byte]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#executionpayload # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#executionpayload
ExecutionPayload* = object ExecutionPayload* = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
@ -72,7 +72,7 @@ type
executionPayload*: ExecutionPayload executionPayload*: ExecutionPayload
blockValue*: Wei blockValue*: Wei
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#executionpayloadheader # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#executionpayloadheader
ExecutionPayloadHeader* = object ExecutionPayloadHeader* = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
@ -96,13 +96,13 @@ type
ExecutePayload* = proc( ExecutePayload* = proc(
execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].} execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].}
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/fork-choice.md#powblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/fork-choice.md#powblock
PowBlock* = object PowBlock* = object
block_hash*: Eth2Digest block_hash*: Eth2Digest
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
total_difficulty*: Eth2Digest # uint256 total_difficulty*: Eth2Digest # uint256
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#beaconstate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
BeaconState* = object BeaconState* = object
# Versioning # Versioning
genesis_time*: uint64 genesis_time*: uint64
@ -227,7 +227,7 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body*: TrustedBeaconBlockBody body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#beaconblockbody # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object BeaconBlockBody* = object
randao_reveal*: ValidatorSig randao_reveal*: ValidatorSig
eth1_data*: Eth1Data eth1_data*: Eth1Data

View File

@ -32,7 +32,7 @@ const
# This index is rooted in `BeaconBlockBody`. # This index is rooted in `BeaconBlockBody`.
# The first member (`randao_reveal`) is 16, subsequent members +1 each. # The first member (`randao_reveal`) is 16, subsequent members +1 each.
# If there are ever more than 16 members in `BeaconBlockBody`, indices change! # If there are ever more than 16 members in `BeaconBlockBody`, indices change!
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/ssz/merkle-proofs.md # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
EXECUTION_PAYLOAD_GINDEX* = 25.GeneralizedIndex # execution_payload EXECUTION_PAYLOAD_GINDEX* = 25.GeneralizedIndex # execution_payload
type type
@ -52,12 +52,12 @@ type
from_bls_pubkey*: ValidatorPubKey from_bls_pubkey*: ValidatorPubKey
to_execution_address*: ExecutionAddress to_execution_address*: ExecutionAddress
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#signedblstoexecutionchange # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#signedblstoexecutionchange
SignedBLSToExecutionChange* = object SignedBLSToExecutionChange* = object
message*: BLSToExecutionChange message*: BLSToExecutionChange
signature*: ValidatorSig signature*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#historicalsummary # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#historicalsummary
HistoricalSummary* = object HistoricalSummary* = object
# `HistoricalSummary` matches the components of the phase0 # `HistoricalSummary` matches the components of the phase0
# `HistoricalBatch` making the two hash_tree_root-compatible. # `HistoricalBatch` making the two hash_tree_root-compatible.
@ -132,7 +132,7 @@ type
## Execution payload header corresponding to `beacon.body_root` (from Capella onward) ## Execution payload header corresponding to `beacon.body_root` (from Capella onward)
execution_branch*: ExecutionBranch execution_branch*: ExecutionBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientbootstrap # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
LightClientBootstrap* = object LightClientBootstrap* = object
header*: LightClientHeader header*: LightClientHeader
## Header matching the requested beacon block root ## Header matching the requested beacon block root
@ -221,7 +221,7 @@ type
## (used to compute safety threshold) ## (used to compute safety threshold)
current_max_active_participants*: uint64 current_max_active_participants*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#beaconstate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
BeaconState* = object BeaconState* = object
# Versioning # Versioning
genesis_time*: uint64 genesis_time*: uint64
@ -674,8 +674,8 @@ func is_valid_light_client_header*(
if epoch < cfg.CAPELLA_FORK_EPOCH: if epoch < cfg.CAPELLA_FORK_EPOCH:
return return
header.execution == default(ExecutionPayloadHeader) and header.execution == static(default(ExecutionPayloadHeader)) and
header.execution_branch == default(ExecutionBranch) header.execution_branch == static(default(ExecutionBranch))
is_valid_merkle_branch( is_valid_merkle_branch(
get_lc_execution_root(header, cfg), get_lc_execution_root(header, cfg),
@ -684,13 +684,13 @@ func is_valid_light_client_header*(
get_subtree_index(EXECUTION_PAYLOAD_GINDEX), get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
header.beacon.body_root) header.beacon.body_root)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_header_to_capella*( func upgrade_lc_header_to_capella*(
pre: altair.LightClientHeader): LightClientHeader = pre: altair.LightClientHeader): LightClientHeader =
LightClientHeader( LightClientHeader(
beacon: pre.beacon) beacon: pre.beacon)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_bootstrap_to_capella*( func upgrade_lc_bootstrap_to_capella*(
pre: altair.LightClientBootstrap): LightClientBootstrap = pre: altair.LightClientBootstrap): LightClientBootstrap =
LightClientBootstrap( LightClientBootstrap(
@ -698,7 +698,7 @@ func upgrade_lc_bootstrap_to_capella*(
current_sync_committee: pre.current_sync_committee, current_sync_committee: pre.current_sync_committee,
current_sync_committee_branch: pre.current_sync_committee_branch) current_sync_committee_branch: pre.current_sync_committee_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_update_to_capella*( func upgrade_lc_update_to_capella*(
pre: altair.LightClientUpdate): LightClientUpdate = pre: altair.LightClientUpdate): LightClientUpdate =
LightClientUpdate( LightClientUpdate(
@ -710,7 +710,7 @@ func upgrade_lc_update_to_capella*(
sync_aggregate: pre.sync_aggregate, sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot) signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_finality_update_to_capella*( func upgrade_lc_finality_update_to_capella*(
pre: altair.LightClientFinalityUpdate): LightClientFinalityUpdate = pre: altair.LightClientFinalityUpdate): LightClientFinalityUpdate =
LightClientFinalityUpdate( LightClientFinalityUpdate(
@ -720,7 +720,7 @@ func upgrade_lc_finality_update_to_capella*(
sync_aggregate: pre.sync_aggregate, sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot) signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_optimistic_update_to_capella*( func upgrade_lc_optimistic_update_to_capella*(
pre: altair.LightClientOptimisticUpdate): LightClientOptimisticUpdate = pre: altair.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
LightClientOptimisticUpdate( LightClientOptimisticUpdate(
@ -745,7 +745,7 @@ func shortLog*(v: LightClientUpdate): auto =
( (
attested: shortLog(v.attested_header), attested: shortLog(v.attested_header),
has_next_sync_committee: has_next_sync_committee:
v.next_sync_committee != default(typeof(v.next_sync_committee)), v.next_sync_committee != static(default(typeof(v.next_sync_committee))),
finalized: shortLog(v.finalized_header), finalized: shortLog(v.finalized_header),
num_active_participants: v.sync_aggregate.num_active_participants, num_active_participants: v.sync_aggregate.num_active_participants,
signature_slot: v.signature_slot signature_slot: v.signature_slot
@ -771,7 +771,7 @@ chronicles.formatIt LightClientUpdate: shortLog(it)
chronicles.formatIt LightClientFinalityUpdate: shortLog(it) chronicles.formatIt LightClientFinalityUpdate: shortLog(it)
chronicles.formatIt LightClientOptimisticUpdate: shortLog(it) chronicles.formatIt LightClientOptimisticUpdate: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/light-client/fork.md#upgrading-the-store # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-the-store
func upgrade_lc_store_to_capella*( func upgrade_lc_store_to_capella*(
pre: altair.LightClientStore): LightClientStore = pre: altair.LightClientStore): LightClientStore =
let best_valid_update = let best_valid_update =

View File

@ -55,7 +55,7 @@ const
DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF* = DomainType([byte 0x08, 0x00, 0x00, 0x00]) DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF* = DomainType([byte 0x08, 0x00, 0x00, 0x00])
DOMAIN_CONTRIBUTION_AND_PROOF* = DomainType([byte 0x09, 0x00, 0x00, 0x00]) DOMAIN_CONTRIBUTION_AND_PROOF* = DomainType([byte 0x09, 0x00, 0x00, 0x00])
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#domain-types # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#domain-types
DOMAIN_BLS_TO_EXECUTION_CHANGE* = DomainType([byte 0x0a, 0x00, 0x00, 0x00]) DOMAIN_BLS_TO_EXECUTION_CHANGE* = DomainType([byte 0x0a, 0x00, 0x00, 0x00])
# https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#domains # https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#domains
@ -83,9 +83,9 @@ const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/p2p-interface.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/p2p-interface.md#configuration
MAX_REQUEST_BLOCKS_DENEB*: uint64 = 128 # TODO Make use of in request code MAX_REQUEST_BLOCKS_DENEB*: uint64 = 128 # TODO Make use of in request code
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#misc # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#misc
UNSET_DEPOSIT_RECEIPTS_START_INDEX*: uint64 = not 0'u64 UNSET_DEPOSIT_REQUESTS_START_INDEX*: uint64 = not 0'u64
FULL_EXIT_REQUEST_AMOUNT*: uint64 = 0 FULL_EXIT_REQUEST_AMOUNT*: uint64 = 0
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#withdrawal-prefixes # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#withdrawal-prefixes
COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02 COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02

View File

@ -76,7 +76,7 @@ type
kzg_commitment*: KzgCommitment kzg_commitment*: KzgCommitment
versioned_hash*: string # TODO should be string; VersionedHash not distinct versioned_hash*: string # TODO should be string; VersionedHash not distinct
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blobidentifier # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/p2p-interface.md#blobidentifier
BlobIdentifier* = object BlobIdentifier* = object
block_root*: Eth2Digest block_root*: Eth2Digest
index*: BlobIndex index*: BlobIndex
@ -382,7 +382,7 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body*: TrustedBeaconBlockBody body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#beaconblockbody # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object BeaconBlockBody* = object
randao_reveal*: ValidatorSig randao_reveal*: ValidatorSig
eth1_data*: Eth1Data eth1_data*: Eth1Data
@ -466,7 +466,7 @@ type
bls_to_execution_changes*: SignedBLSToExecutionChangeList bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: KzgCommitments # [New in Deneb] blob_kzg_commitments*: KzgCommitments # [New in Deneb]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signedbeaconblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object SignedBeaconBlock* = object
message*: BeaconBlock message*: BeaconBlock
signature*: ValidatorSig signature*: ValidatorSig
@ -528,7 +528,7 @@ func initHashedBeaconState*(s: BeaconState): HashedBeaconState =
HashedBeaconState(data: s) HashedBeaconState(data: s)
func shortLog*(v: KzgCommitment | KzgProof): auto = func shortLog*(v: KzgCommitment | KzgProof): auto =
to0xHex(v) to0xHex(v.bytes)
func shortLog*(v: Blob): auto = func shortLog*(v: Blob): auto =
to0xHex(v.toOpenArray(0, 31)) to0xHex(v.toOpenArray(0, 31))
@ -606,7 +606,7 @@ func kzg_commitment_inclusion_proof_gindex*(
# This index is rooted in `BeaconBlockBody`. # This index is rooted in `BeaconBlockBody`.
# The first member (`randao_reveal`) is 16, subsequent members +1 each. # The first member (`randao_reveal`) is 16, subsequent members +1 each.
# If there are ever more than 16 members in `BeaconBlockBody`, indices change! # If there are ever more than 16 members in `BeaconBlockBody`, indices change!
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/ssz/merkle-proofs.md # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
const const
# blob_kzg_commitments # blob_kzg_commitments
BLOB_KZG_COMMITMENTS_GINDEX = BLOB_KZG_COMMITMENTS_GINDEX =
@ -626,14 +626,16 @@ func kzg_commitment_inclusion_proof_gindex*(
BLOB_KZG_COMMITMENTS_FIRST_GINDEX + index BLOB_KZG_COMMITMENTS_FIRST_GINDEX + index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root
func get_lc_execution_root*( func get_lc_execution_root*(
header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest = header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest =
let epoch = header.beacon.slot.epoch let epoch = header.beacon.slot.epoch
# [New in Deneb]
if epoch >= cfg.DENEB_FORK_EPOCH: if epoch >= cfg.DENEB_FORK_EPOCH:
return hash_tree_root(header.execution) return hash_tree_root(header.execution)
# [Modified in Deneb]
if epoch >= cfg.CAPELLA_FORK_EPOCH: if epoch >= cfg.CAPELLA_FORK_EPOCH:
let execution_header = capella.ExecutionPayloadHeader( let execution_header = capella.ExecutionPayloadHeader(
parent_hash: header.execution.parent_hash, parent_hash: header.execution.parent_hash,
@ -655,11 +657,12 @@ func get_lc_execution_root*(
ZERO_HASH ZERO_HASH
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header
func is_valid_light_client_header*( func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool = header: LightClientHeader, cfg: RuntimeConfig): bool =
let epoch = header.beacon.slot.epoch let epoch = header.beacon.slot.epoch
# [New in Deneb:EIP4844]
if epoch < cfg.DENEB_FORK_EPOCH: if epoch < cfg.DENEB_FORK_EPOCH:
if header.execution.blob_gas_used != 0 or if header.execution.blob_gas_used != 0 or
header.execution.excess_blob_gas != 0: header.execution.excess_blob_gas != 0:
@ -667,8 +670,8 @@ func is_valid_light_client_header*(
if epoch < cfg.CAPELLA_FORK_EPOCH: if epoch < cfg.CAPELLA_FORK_EPOCH:
return return
header.execution == default(ExecutionPayloadHeader) and header.execution == static(default(ExecutionPayloadHeader)) and
header.execution_branch == default(ExecutionBranch) header.execution_branch == static(default(ExecutionBranch))
is_valid_merkle_branch( is_valid_merkle_branch(
get_lc_execution_root(header, cfg), get_lc_execution_root(header, cfg),
@ -677,7 +680,7 @@ func is_valid_light_client_header*(
get_subtree_index(EXECUTION_PAYLOAD_GINDEX), get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
header.beacon.body_root) header.beacon.body_root)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_header_to_deneb*( func upgrade_lc_header_to_deneb*(
pre: capella.LightClientHeader): LightClientHeader = pre: capella.LightClientHeader): LightClientHeader =
LightClientHeader( LightClientHeader(
@ -702,7 +705,7 @@ func upgrade_lc_header_to_deneb*(
excess_blob_gas: 0), # [New in Deneb:EIP4844] excess_blob_gas: 0), # [New in Deneb:EIP4844]
execution_branch: pre.execution_branch) execution_branch: pre.execution_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_bootstrap_to_deneb*( func upgrade_lc_bootstrap_to_deneb*(
pre: capella.LightClientBootstrap): LightClientBootstrap = pre: capella.LightClientBootstrap): LightClientBootstrap =
LightClientBootstrap( LightClientBootstrap(
@ -710,7 +713,7 @@ func upgrade_lc_bootstrap_to_deneb*(
current_sync_committee: pre.current_sync_committee, current_sync_committee: pre.current_sync_committee,
current_sync_committee_branch: pre.current_sync_committee_branch) current_sync_committee_branch: pre.current_sync_committee_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_update_to_deneb*( func upgrade_lc_update_to_deneb*(
pre: capella.LightClientUpdate): LightClientUpdate = pre: capella.LightClientUpdate): LightClientUpdate =
LightClientUpdate( LightClientUpdate(
@ -722,7 +725,7 @@ func upgrade_lc_update_to_deneb*(
sync_aggregate: pre.sync_aggregate, sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot) signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_finality_update_to_deneb*( func upgrade_lc_finality_update_to_deneb*(
pre: capella.LightClientFinalityUpdate): LightClientFinalityUpdate = pre: capella.LightClientFinalityUpdate): LightClientFinalityUpdate =
LightClientFinalityUpdate( LightClientFinalityUpdate(
@ -732,7 +735,7 @@ func upgrade_lc_finality_update_to_deneb*(
sync_aggregate: pre.sync_aggregate, sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot) signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_optimistic_update_to_deneb*( func upgrade_lc_optimistic_update_to_deneb*(
pre: capella.LightClientOptimisticUpdate): LightClientOptimisticUpdate = pre: capella.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
LightClientOptimisticUpdate( LightClientOptimisticUpdate(
@ -757,7 +760,7 @@ func shortLog*(v: LightClientUpdate): auto =
( (
attested: shortLog(v.attested_header), attested: shortLog(v.attested_header),
has_next_sync_committee: has_next_sync_committee:
v.next_sync_committee != default(typeof(v.next_sync_committee)), v.next_sync_committee != static(default(typeof(v.next_sync_committee))),
finalized: shortLog(v.finalized_header), finalized: shortLog(v.finalized_header),
num_active_participants: v.sync_aggregate.num_active_participants, num_active_participants: v.sync_aggregate.num_active_participants,
signature_slot: v.signature_slot signature_slot: v.signature_slot
@ -783,7 +786,7 @@ chronicles.formatIt LightClientUpdate: shortLog(it)
chronicles.formatIt LightClientFinalityUpdate: shortLog(it) chronicles.formatIt LightClientFinalityUpdate: shortLog(it)
chronicles.formatIt LightClientOptimisticUpdate: shortLog(it) chronicles.formatIt LightClientOptimisticUpdate: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/deneb/light-client/fork.md#upgrading-the-store # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/fork.md#upgrading-the-store
func upgrade_lc_store_to_deneb*( func upgrade_lc_store_to_deneb*(
pre: capella.LightClientStore): LightClientStore = pre: capella.LightClientStore): LightClientStore =
let best_valid_update = let best_valid_update =

View File

@ -29,35 +29,36 @@ from stew/bitops2 import log2trunc
from stew/byteutils import to0xHex from stew/byteutils import to0xHex
from ./altair import from ./altair import
EpochParticipationFlags, InactivityScores, SyncAggregate, SyncCommittee, EpochParticipationFlags, InactivityScores, SyncAggregate, SyncCommittee,
TrustedSyncAggregate TrustedSyncAggregate, num_active_participants
from ./bellatrix import BloomLogs, ExecutionAddress, Transaction from ./bellatrix import BloomLogs, ExecutionAddress, Transaction
from ./capella import from ./capella import
HistoricalSummary, SignedBLSToExecutionChangeList, Withdrawal ExecutionBranch, HistoricalSummary, SignedBLSToExecutionChangeList,
Withdrawal, EXECUTION_PAYLOAD_GINDEX
from ./deneb import Blobs, BlobsBundle, KzgCommitments, KzgProofs from ./deneb import Blobs, BlobsBundle, KzgCommitments, KzgProofs
export json_serialization, base, kzg4844 export json_serialization, base, kzg4844
const const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#constants # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/sync-protocol.md#constants
# All of these indices are rooted in `BeaconState`. # All of these indices are rooted in `BeaconState`.
# The first member (`genesis_time`) is 64, subsequent members +1 each. # The first member (`genesis_time`) is 64, subsequent members +1 each.
# If there are ever more than 64 members in `BeaconState`, indices change! # If there are ever more than 64 members in `BeaconState`, indices change!
# `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `84 * 2 + 1`. # `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `84 * 2 + 1`.
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/ssz/merkle-proofs.md # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
FINALIZED_ROOT_GINDEX = 169.GeneralizedIndex # finalized_checkpoint > root FINALIZED_ROOT_GINDEX* = 169.GeneralizedIndex # finalized_checkpoint > root
CURRENT_SYNC_COMMITTEE_GINDEX = 86.GeneralizedIndex # current_sync_committee CURRENT_SYNC_COMMITTEE_GINDEX* = 86.GeneralizedIndex # current_sync_committee
NEXT_SYNC_COMMITTEE_GINDEX = 87.GeneralizedIndex # next_sync_committee NEXT_SYNC_COMMITTEE_GINDEX* = 87.GeneralizedIndex # next_sync_committee
type type
# https://github.com/ethereum/consensus-specs/blob/94a0b6c581f2809aa8aca4ef7ee6fbb63f9d74e9/specs/electra/beacon-chain.md#depositreceipt # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#depositrequest
DepositReceipt* = object DepositRequest* = object
pubkey*: ValidatorPubKey pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest withdrawal_credentials*: Eth2Digest
amount*: Gwei amount*: Gwei
signature*: ValidatorSig signature*: ValidatorSig
index*: uint64 index*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#indexedattestation # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#indexedattestation
IndexedAttestation* = object IndexedAttestation* = object
attesting_indices*: attesting_indices*:
List[uint64, Limit MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT] List[uint64, Limit MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT]
@ -85,7 +86,7 @@ type
attestation_1*: TrustedIndexedAttestation # Modified in Electra:EIP7549] attestation_1*: TrustedIndexedAttestation # Modified in Electra:EIP7549]
attestation_2*: TrustedIndexedAttestation # Modified in Electra:EIP7549] attestation_2*: TrustedIndexedAttestation # Modified in Electra:EIP7549]
# https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#executionpayload # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#executionpayload
ExecutionPayload* = object ExecutionPayload* = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
@ -110,18 +111,21 @@ type
withdrawals*: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] withdrawals*: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
blob_gas_used*: uint64 blob_gas_used*: uint64
excess_blob_gas*: uint64 excess_blob_gas*: uint64
deposit_receipts*: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] deposit_requests*: List[DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD]
## [New in Electra:EIP6110] ## [New in Electra:EIP6110]
withdrawal_requests*: withdrawal_requests*:
List[ExecutionLayerWithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD] List[WithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD]
## [New in Electra:EIP6110] ## [New in Electra:EIP7002:EIP7251]
consolidation_requests*:
List[ConsolidationRequest, Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD]
## [New in Electra:EIP7251]
ExecutionPayloadForSigning* = object ExecutionPayloadForSigning* = object
executionPayload*: ExecutionPayload executionPayload*: ExecutionPayload
blockValue*: Wei blockValue*: Wei
blobsBundle*: BlobsBundle blobsBundle*: BlobsBundle
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#executionpayloadheader # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#executionpayloadheader
ExecutionPayloadHeader* = object ExecutionPayloadHeader* = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
@ -144,8 +148,9 @@ type
withdrawals_root*: Eth2Digest withdrawals_root*: Eth2Digest
blob_gas_used*: uint64 blob_gas_used*: uint64
excess_blob_gas*: uint64 excess_blob_gas*: uint64
deposit_receipts_root*: Eth2Digest # [New in Electra:EIP6110] deposit_requests_root*: Eth2Digest # [New in Electra:EIP6110]
withdrawal_requests_root*: Eth2Digest # [New in Electra:EIP7002:EIP7251] withdrawal_requests_root*: Eth2Digest # [New in Electra:EIP7002:EIP7251]
consolidation_requests_root*: Eth2Digest # [New in Electra:EIP7251]
ExecutePayload* = proc( ExecutePayload* = proc(
execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].} execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].}
@ -162,41 +167,23 @@ type
withdrawable_epoch*: Epoch withdrawable_epoch*: Epoch
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/electra/beacon-chain.md#executionlayerwithdrawalrequest # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/electra/beacon-chain.md#executionlayerwithdrawalrequest
ExecutionLayerWithdrawalRequest* = object WithdrawalRequest* = object
source_address*: ExecutionAddress source_address*: ExecutionAddress
validator_pubkey*: ValidatorPubKey validator_pubkey*: ValidatorPubKey
amount*: Gwei amount*: Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/electra/beacon-chain.md#consolidation
Consolidation* = object
source_index*: uint64
target_index*: uint64
epoch*: Epoch
# https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#signedconsolidation
SignedConsolidation* = object
message*: Consolidation
signature*: ValidatorSig
TrustedSignedConsolidation* = object
message*: Consolidation
signature*: TrustedSig
# https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#pendingconsolidation # https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#pendingconsolidation
PendingConsolidation* = object PendingConsolidation* = object
source_index*: uint64 source_index*: uint64
target_index*: uint64 target_index*: uint64
FinalityBranch = # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#consolidationrequest
array[log2trunc(FINALIZED_ROOT_GINDEX), Eth2Digest] ConsolidationRequest* = object
source_address*: ExecutionAddress
source_pubkey*: ValidatorPubKey
target_pubkey*: ValidatorPubKey
CurrentSyncCommitteeBranch = # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#aggregateandproof
array[log2trunc(CURRENT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
NextSyncCommitteeBranch =
array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#aggregateandproof
AggregateAndProof* = object AggregateAndProof* = object
aggregator_index*: uint64 # `ValidatorIndex` after validation aggregator_index*: uint64 # `ValidatorIndex` after validation
aggregate*: Attestation aggregate*: Attestation
@ -207,6 +194,15 @@ type
message*: AggregateAndProof message*: AggregateAndProof
signature*: ValidatorSig signature*: ValidatorSig
FinalityBranch* =
array[log2trunc(FINALIZED_ROOT_GINDEX), Eth2Digest]
CurrentSyncCommitteeBranch* =
array[log2trunc(CURRENT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
NextSyncCommitteeBranch* =
array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
LightClientHeader* = object LightClientHeader* = object
beacon*: BeaconBlockHeader beacon*: BeaconBlockHeader
@ -216,7 +212,7 @@ type
## Execution payload header corresponding to `beacon.body_root` (from Capella onward) ## Execution payload header corresponding to `beacon.body_root` (from Capella onward)
execution_branch*: capella.ExecutionBranch execution_branch*: capella.ExecutionBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientbootstrap # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
LightClientBootstrap* = object LightClientBootstrap* = object
header*: LightClientHeader header*: LightClientHeader
## Header matching the requested beacon block root ## Header matching the requested beacon block root
@ -372,7 +368,7 @@ type
historical_summaries*: historical_summaries*:
HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT] HashList[HistoricalSummary, Limit HISTORICAL_ROOTS_LIMIT]
deposit_receipts_start_index*: uint64 # [New in Electra:EIP6110] deposit_requests_start_index*: uint64 # [New in Electra:EIP6110]
deposit_balance_to_consume*: Gwei # [New in Electra:EIP7251] deposit_balance_to_consume*: Gwei # [New in Electra:EIP7251]
exit_balance_to_consume*: Gwei # [New in Electra:EIP7251] exit_balance_to_consume*: Gwei # [New in Electra:EIP7251]
earliest_exit_epoch*: Epoch # [New in Electra:EIP7251] earliest_exit_epoch*: Epoch # [New in Electra:EIP7251]
@ -400,7 +396,7 @@ type
data*: BeaconState data*: BeaconState
root*: Eth2Digest # hash_tree_root(data) root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#beaconblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose ## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to ## a new block. Once the block as been proposed, it is transmitted to
@ -457,7 +453,7 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body*: TrustedBeaconBlockBody body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#beaconblockbody # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object BeaconBlockBody* = object
randao_reveal*: ValidatorSig randao_reveal*: ValidatorSig
eth1_data*: Eth1Data eth1_data*: Eth1Data
@ -482,8 +478,6 @@ type
execution_payload*: electra.ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] execution_payload*: electra.ExecutionPayload # [Modified in Electra:EIP6110:EIP7002]
bls_to_execution_changes*: SignedBLSToExecutionChangeList bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: KzgCommitments blob_kzg_commitments*: KzgCommitments
consolidations*: List[SignedConsolidation, Limit MAX_CONSOLIDATIONS]
## [New in Electra:EIP7251]
SigVerifiedBeaconBlockBody* = object SigVerifiedBeaconBlockBody* = object
## A BeaconBlock body with signatures verified ## A BeaconBlock body with signatures verified
@ -523,8 +517,6 @@ type
execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002]
bls_to_execution_changes*: SignedBLSToExecutionChangeList bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: KzgCommitments blob_kzg_commitments*: KzgCommitments
consolidations*: List[TrustedSignedConsolidation, Limit MAX_CONSOLIDATIONS]
## [New in Electra:EIP7251]
TrustedBeaconBlockBody* = object TrustedBeaconBlockBody* = object
## A full verified block ## A full verified block
@ -552,8 +544,6 @@ type
execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002] execution_payload*: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002]
bls_to_execution_changes*: SignedBLSToExecutionChangeList bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: KzgCommitments blob_kzg_commitments*: KzgCommitments
consolidations*: List[TrustedSignedConsolidation, Limit MAX_CONSOLIDATIONS]
## [New in Electra:EIP7251]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signedbeaconblock # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object SignedBeaconBlock* = object
@ -598,12 +588,12 @@ type
AttestationCommitteeBits* = BitArray[MAX_COMMITTEES_PER_SLOT.int] AttestationCommitteeBits* = BitArray[MAX_COMMITTEES_PER_SLOT.int]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#attestation # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#attestation
Attestation* = object Attestation* = object
aggregation_bits*: ElectraCommitteeValidatorsBits aggregation_bits*: ElectraCommitteeValidatorsBits
data*: AttestationData data*: AttestationData
committee_bits*: AttestationCommitteeBits # [New in Electra:EIP7549]
signature*: ValidatorSig signature*: ValidatorSig
committee_bits*: AttestationCommitteeBits # [New in Electra:EIP7549]
TrustedAttestation* = object TrustedAttestation* = object
# The Trusted version, at the moment, implies that the cryptographic signature was checked. # The Trusted version, at the moment, implies that the cryptographic signature was checked.
@ -611,8 +601,8 @@ type
# Currently the code MUST verify the state transition as soon as the signature is verified # Currently the code MUST verify the state transition as soon as the signature is verified
aggregation_bits*: ElectraCommitteeValidatorsBits aggregation_bits*: ElectraCommitteeValidatorsBits
data*: AttestationData data*: AttestationData
committee_bits*: AttestationCommitteeBits # [New in Electra:EIP7549]
signature*: TrustedSig signature*: TrustedSig
committee_bits*: AttestationCommitteeBits # [New in Electra:EIP7549]
SomeSignedBeaconBlock* = SomeSignedBeaconBlock* =
SignedBeaconBlock | SignedBeaconBlock |
@ -686,6 +676,233 @@ func shortLog*(v: ExecutionPayload): auto =
excess_blob_gas: $(v.excess_blob_gas) excess_blob_gas: $(v.excess_blob_gas)
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/sync-protocol.md#modified-get_lc_execution_root
func get_lc_execution_root*(
header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest =
let epoch = header.beacon.slot.epoch
# [New in Electra]
if epoch >= cfg.ELECTRA_FORK_EPOCH:
return hash_tree_root(header.execution)
# [Modified in Electra]
if epoch >= cfg.DENEB_FORK_EPOCH:
let execution_header = deneb.ExecutionPayloadHeader(
parent_hash: header.execution.parent_hash,
fee_recipient: header.execution.fee_recipient,
state_root: header.execution.state_root,
receipts_root: header.execution.receipts_root,
logs_bloom: header.execution.logs_bloom,
prev_randao: header.execution.prev_randao,
block_number: header.execution.block_number,
gas_limit: header.execution.gas_limit,
gas_used: header.execution.gas_used,
timestamp: header.execution.timestamp,
extra_data: header.execution.extra_data,
base_fee_per_gas: header.execution.base_fee_per_gas,
block_hash: header.execution.block_hash,
transactions_root: header.execution.transactions_root,
withdrawals_root: header.execution.withdrawals_root,
blob_gas_used: header.execution.blob_gas_used,
excess_blob_gas: header.execution.excess_blob_gas)
return hash_tree_root(execution_header)
if epoch >= cfg.CAPELLA_FORK_EPOCH:
let execution_header = capella.ExecutionPayloadHeader(
parent_hash: header.execution.parent_hash,
fee_recipient: header.execution.fee_recipient,
state_root: header.execution.state_root,
receipts_root: header.execution.receipts_root,
logs_bloom: header.execution.logs_bloom,
prev_randao: header.execution.prev_randao,
block_number: header.execution.block_number,
gas_limit: header.execution.gas_limit,
gas_used: header.execution.gas_used,
timestamp: header.execution.timestamp,
extra_data: header.execution.extra_data,
base_fee_per_gas: header.execution.base_fee_per_gas,
block_hash: header.execution.block_hash,
transactions_root: header.execution.transactions_root,
withdrawals_root: header.execution.withdrawals_root)
return hash_tree_root(execution_header)
ZERO_HASH
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/sync-protocol.md#modified-is_valid_light_client_header
func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool =
let epoch = header.beacon.slot.epoch
# [New in Electra:EIP6110:EIP7002:EIP7251]
if epoch < cfg.ELECTRA_FORK_EPOCH:
if not header.execution.deposit_requests_root.isZero or
not header.execution.withdrawal_requests_root.isZero or
not header.execution.consolidation_requests_root.isZero:
return false
if epoch < cfg.DENEB_FORK_EPOCH:
if header.execution.blob_gas_used != 0 or
header.execution.excess_blob_gas != 0:
return false
if epoch < cfg.CAPELLA_FORK_EPOCH:
return
header.execution == static(default(ExecutionPayloadHeader)) and
header.execution_branch == static(default(ExecutionBranch))
is_valid_merkle_branch(
get_lc_execution_root(header, cfg),
header.execution_branch,
log2trunc(EXECUTION_PAYLOAD_GINDEX),
get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
header.beacon.body_root)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#normalize_merkle_branch
func normalize_merkle_branch*[N](
branch: array[N, Eth2Digest],
gindex: static GeneralizedIndex): auto =
const depth = log2trunc(gindex)
var res: array[depth, Eth2Digest]
when depth >= branch.len:
const num_extra = depth - branch.len
res[num_extra ..< depth] = branch
else:
const num_extra = branch.len - depth
for node in branch[0 ..< num_extra]:
doAssert node.isZero, "Truncation of Merkle branch cannot lose info"
res[0 ..< depth] = branch[num_extra ..< branch.len]
res
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_header_to_electra*(
pre: deneb.LightClientHeader): LightClientHeader =
LightClientHeader(
beacon: pre.beacon,
execution: ExecutionPayloadHeader(
parent_hash: pre.execution.parent_hash,
fee_recipient: pre.execution.fee_recipient,
state_root: pre.execution.state_root,
receipts_root: pre.execution.receipts_root,
logs_bloom: pre.execution.logs_bloom,
prev_randao: pre.execution.prev_randao,
block_number: pre.execution.block_number,
gas_limit: pre.execution.gas_limit,
gas_used: pre.execution.gas_used,
timestamp: pre.execution.timestamp,
extra_data: pre.execution.extra_data,
base_fee_per_gas: pre.execution.base_fee_per_gas,
block_hash: pre.execution.block_hash,
transactions_root: pre.execution.transactions_root,
withdrawals_root: pre.execution.withdrawals_root,
blob_gas_used: pre.execution.blob_gas_used,
excess_blob_gas: pre.execution.blob_gas_used,
deposit_requests_root: ZERO_HASH, # [New in Electra:EIP6110]
withdrawal_requests_root: ZERO_HASH, # [New in Electra:EIP7002:EIP7251]
consolidation_requests_root: ZERO_HASH), # [New in Electra:EIP7251]
execution_branch: pre.execution_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_bootstrap_to_electra*(
pre: deneb.LightClientBootstrap): LightClientBootstrap =
LightClientBootstrap(
header: upgrade_lc_header_to_electra(pre.header),
current_sync_committee: pre.current_sync_committee,
current_sync_committee_branch: normalize_merkle_branch(
pre.current_sync_committee_branch, CURRENT_SYNC_COMMITTEE_GINDEX))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_update_to_electra*(
pre: deneb.LightClientUpdate): LightClientUpdate =
LightClientUpdate(
attested_header: upgrade_lc_header_to_electra(pre.attested_header),
next_sync_committee: pre.next_sync_committee,
next_sync_committee_branch: normalize_merkle_branch(
pre.next_sync_committee_branch, NEXT_SYNC_COMMITTEE_GINDEX),
finalized_header: upgrade_lc_header_to_electra(pre.finalized_header),
finality_branch: normalize_merkle_branch(
pre.finality_branch, FINALIZED_ROOT_GINDEX),
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_finality_update_to_electra*(
pre: deneb.LightClientFinalityUpdate): LightClientFinalityUpdate =
LightClientFinalityUpdate(
attested_header: upgrade_lc_header_to_electra(pre.attested_header),
finalized_header: upgrade_lc_header_to_electra(pre.finalized_header),
finality_branch: normalize_merkle_branch(
pre.finality_branch, FINALIZED_ROOT_GINDEX),
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_optimistic_update_to_electra*(
pre: deneb.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
LightClientOptimisticUpdate(
attested_header: upgrade_lc_header_to_electra(pre.attested_header),
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
func shortLog*(v: LightClientHeader): auto =
(
beacon: shortLog(v.beacon),
execution: (
block_hash: v.execution.block_hash,
block_number: v.execution.block_number)
)
func shortLog*(v: LightClientBootstrap): auto =
(
header: shortLog(v.header)
)
func shortLog*(v: LightClientUpdate): auto =
(
attested: shortLog(v.attested_header),
has_next_sync_committee:
v.next_sync_committee != static(default(typeof(v.next_sync_committee))),
finalized: shortLog(v.finalized_header),
num_active_participants: v.sync_aggregate.num_active_participants,
signature_slot: v.signature_slot
)
func shortLog*(v: LightClientFinalityUpdate): auto =
(
attested: shortLog(v.attested_header),
finalized: shortLog(v.finalized_header),
num_active_participants: v.sync_aggregate.num_active_participants,
signature_slot: v.signature_slot
)
func shortLog*(v: LightClientOptimisticUpdate): auto =
(
attested: shortLog(v.attested_header),
num_active_participants: v.sync_aggregate.num_active_participants,
signature_slot: v.signature_slot,
)
chronicles.formatIt LightClientBootstrap: shortLog(it)
chronicles.formatIt LightClientUpdate: shortLog(it)
chronicles.formatIt LightClientFinalityUpdate: shortLog(it)
chronicles.formatIt LightClientOptimisticUpdate: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-the-store
func upgrade_lc_store_to_electra*(
pre: deneb.LightClientStore): LightClientStore =
let best_valid_update =
if pre.best_valid_update.isNone:
Opt.none(LightClientUpdate)
else:
Opt.some upgrade_lc_update_to_electra(pre.best_valid_update.get)
LightClientStore(
finalized_header: upgrade_lc_header_to_electra(pre.finalized_header),
current_sync_committee: pre.current_sync_committee,
next_sync_committee: pre.next_sync_committee,
best_valid_update: best_valid_update,
optimistic_header: upgrade_lc_header_to_electra(pre.optimistic_header),
previous_max_active_participants: pre.previous_max_active_participants,
current_max_active_participants: pre.current_max_active_participants)
template asSigned*( template asSigned*(
x: SigVerifiedSignedBeaconBlock | x: SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock | MsgTrustedSignedBeaconBlock |

View File

@ -51,7 +51,7 @@ RestJson.useDefaultSerializationFor(
BlobSidecarInfoObject, BlobSidecarInfoObject,
BlobsBundle, BlobsBundle,
Checkpoint, Checkpoint,
Consolidation, ConsolidationRequest,
ContributionAndProof, ContributionAndProof,
DataEnclosedObject, DataEnclosedObject,
DataMetaEnclosedObject, DataMetaEnclosedObject,
@ -65,14 +65,13 @@ RestJson.useDefaultSerializationFor(
DenebSignedBlockContents, DenebSignedBlockContents,
Deposit, Deposit,
DepositData, DepositData,
DepositReceipt, DepositRequest,
DepositTreeSnapshot, DepositTreeSnapshot,
DistributedKeystoreInfo, DistributedKeystoreInfo,
ElectraSignedBlockContents, ElectraSignedBlockContents,
EmptyBody, EmptyBody,
Eth1Data, Eth1Data,
EventBeaconBlockObject, EventBeaconBlockObject,
ExecutionLayerWithdrawalRequest,
Fork, Fork,
GetBlockAttestationsResponse, GetBlockAttestationsResponse,
GetBlockHeaderResponse, GetBlockHeaderResponse,
@ -169,7 +168,6 @@ RestJson.useDefaultSerializationFor(
SetGraffitiRequest, SetGraffitiRequest,
SignedBLSToExecutionChange, SignedBLSToExecutionChange,
SignedBeaconBlockHeader, SignedBeaconBlockHeader,
SignedConsolidation,
SignedContributionAndProof, SignedContributionAndProof,
SignedValidatorRegistrationV1, SignedValidatorRegistrationV1,
SignedVoluntaryExit, SignedVoluntaryExit,
@ -194,6 +192,7 @@ RestJson.useDefaultSerializationFor(
Web3SignerSyncCommitteeMessageData, Web3SignerSyncCommitteeMessageData,
Web3SignerValidatorRegistration, Web3SignerValidatorRegistration,
Withdrawal, Withdrawal,
WithdrawalRequest,
altair.BeaconBlock, altair.BeaconBlock,
altair.BeaconBlockBody, altair.BeaconBlockBody,
altair.BeaconState, altair.BeaconState,
@ -253,6 +252,11 @@ RestJson.useDefaultSerializationFor(
electra.ExecutionPayload, electra.ExecutionPayload,
electra.ExecutionPayloadHeader, electra.ExecutionPayloadHeader,
electra.IndexedAttestation, electra.IndexedAttestation,
electra.LightClientBootstrap,
electra.LightClientFinalityUpdate,
electra.LightClientHeader,
electra.LightClientOptimisticUpdate,
electra.LightClientUpdate,
electra.SignedBeaconBlock, electra.SignedBeaconBlock,
electra.TrustedAttestation, electra.TrustedAttestation,
electra_mev.BlindedBeaconBlock, electra_mev.BlindedBeaconBlock,
@ -1358,7 +1362,7 @@ proc readValue*(reader: var JsonReader[RestJson],
value: var (KzgCommitment|KzgProof)) {. value: var (KzgCommitment|KzgProof)) {.
raises: [IOError, SerializationError].} = raises: [IOError, SerializationError].} =
try: try:
hexToByteArray(reader.readValue(string), distinctBase(value)) hexToByteArray(reader.readValue(string), distinctBase(value.bytes))
except ValueError: except ValueError:
raiseUnexpectedValue(reader, raiseUnexpectedValue(reader,
"KzgCommitment value should be a valid hex string") "KzgCommitment value should be a valid hex string")
@ -1366,7 +1370,7 @@ proc readValue*(reader: var JsonReader[RestJson],
proc writeValue*( proc writeValue*(
writer: var JsonWriter[RestJson], value: KzgCommitment | KzgProof writer: var JsonWriter[RestJson], value: KzgCommitment | KzgProof
) {.raises: [IOError].} = ) {.raises: [IOError].} =
writeValue(writer, hexOriginal(distinctBase(value))) writeValue(writer, hexOriginal(distinctBase(value.bytes)))
## GraffitiBytes ## GraffitiBytes
proc writeValue*( proc writeValue*(
@ -3532,7 +3536,9 @@ proc decodeBody*(
of ConsensusFork.Phase0: of ConsensusFork.Phase0:
let blck = let blck =
try: try:
SSZ.decode(body.data, phase0.SignedBeaconBlock) var res = SSZ.decode(body.data, phase0.SignedBeaconBlock)
res.root = hash_tree_root(res.message)
res
except SerializationError as exc: except SerializationError as exc:
return err(RestErrorMessage.init(Http400, UnableDecodeError, return err(RestErrorMessage.init(Http400, UnableDecodeError,
[version, exc.formatMsg("<data>")])) [version, exc.formatMsg("<data>")]))
@ -3544,7 +3550,9 @@ proc decodeBody*(
of ConsensusFork.Altair: of ConsensusFork.Altair:
let blck = let blck =
try: try:
SSZ.decode(body.data, altair.SignedBeaconBlock) var res = SSZ.decode(body.data, altair.SignedBeaconBlock)
res.root = hash_tree_root(res.message)
res
except SerializationError as exc: except SerializationError as exc:
return err(RestErrorMessage.init(Http400, UnableDecodeError, return err(RestErrorMessage.init(Http400, UnableDecodeError,
[version, exc.formatMsg("<data>")])) [version, exc.formatMsg("<data>")]))
@ -3556,7 +3564,9 @@ proc decodeBody*(
of ConsensusFork.Bellatrix: of ConsensusFork.Bellatrix:
let blck = let blck =
try: try:
SSZ.decode(body.data, bellatrix.SignedBeaconBlock) var res = SSZ.decode(body.data, bellatrix.SignedBeaconBlock)
res.root = hash_tree_root(res.message)
res
except SerializationError as exc: except SerializationError as exc:
return err(RestErrorMessage.init(Http400, UnableDecodeError, return err(RestErrorMessage.init(Http400, UnableDecodeError,
[version, exc.formatMsg("<data>")])) [version, exc.formatMsg("<data>")]))
@ -3568,7 +3578,9 @@ proc decodeBody*(
of ConsensusFork.Capella: of ConsensusFork.Capella:
let blck = let blck =
try: try:
SSZ.decode(body.data, capella.SignedBeaconBlock) var res = SSZ.decode(body.data, capella.SignedBeaconBlock)
res.root = hash_tree_root(res.message)
res
except SerializationError as exc: except SerializationError as exc:
return err(RestErrorMessage.init(Http400, UnableDecodeError, return err(RestErrorMessage.init(Http400, UnableDecodeError,
[version, exc.formatMsg("<data>")])) [version, exc.formatMsg("<data>")]))
@ -3580,7 +3592,9 @@ proc decodeBody*(
of ConsensusFork.Deneb: of ConsensusFork.Deneb:
let blckContents = let blckContents =
try: try:
SSZ.decode(body.data, DenebSignedBlockContents) var res = SSZ.decode(body.data, DenebSignedBlockContents)
res.signed_block.root = hash_tree_root(res.signed_block.message)
res
except SerializationError as exc: except SerializationError as exc:
return err(RestErrorMessage.init(Http400, UnableDecodeError, return err(RestErrorMessage.init(Http400, UnableDecodeError,
[version, exc.formatMsg("<data>")])) [version, exc.formatMsg("<data>")]))
@ -3592,7 +3606,9 @@ proc decodeBody*(
of ConsensusFork.Electra: of ConsensusFork.Electra:
let blckContents = let blckContents =
try: try:
SSZ.decode(body.data, ElectraSignedBlockContents) var res = SSZ.decode(body.data, ElectraSignedBlockContents)
res.signed_block.root = hash_tree_root(res.signed_block.message)
res
except SerializationError as exc: except SerializationError as exc:
return err(RestErrorMessage.init(Http400, UnableDecodeError, return err(RestErrorMessage.init(Http400, UnableDecodeError,
[version, exc.formatMsg("<data>")])) [version, exc.formatMsg("<data>")]))

View File

@ -292,7 +292,7 @@ type
RestWithdrawalPrefix* = distinct array[1, byte] RestWithdrawalPrefix* = distinct array[1, byte]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#executionpayload # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#executionpayload
RestExecutionPayload* = object RestExecutionPayload* = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest

View File

@ -1325,8 +1325,10 @@ func forkVersion*(cfg: RuntimeConfig, consensusFork: ConsensusFork): Version =
func lcDataForkAtConsensusFork*( func lcDataForkAtConsensusFork*(
consensusFork: ConsensusFork): LightClientDataFork = consensusFork: ConsensusFork): LightClientDataFork =
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
if consensusFork >= ConsensusFork.Deneb: if consensusFork >= ConsensusFork.Electra:
LightClientDataFork.Electra
elif consensusFork >= ConsensusFork.Deneb:
LightClientDataFork.Deneb LightClientDataFork.Deneb
elif consensusFork >= ConsensusFork.Capella: elif consensusFork >= ConsensusFork.Capella:
LightClientDataFork.Capella LightClientDataFork.Capella

View File

@ -16,32 +16,42 @@ type
None = 0, # only use non-0 in DB to detect accidentally uninitialized data None = 0, # only use non-0 in DB to detect accidentally uninitialized data
Altair = 1, Altair = 1,
Capella = 2, Capella = 2,
Deneb = 3 Deneb = 3,
Electra = 4
ForkyCurrentSyncCommitteeBranch* =
altair.CurrentSyncCommitteeBranch |
electra.CurrentSyncCommitteeBranch
ForkyLightClientHeader* = ForkyLightClientHeader* =
altair.LightClientHeader | altair.LightClientHeader |
capella.LightClientHeader | capella.LightClientHeader |
deneb.LightClientHeader deneb.LightClientHeader |
electra.LightClientHeader
ForkyLightClientBootstrap* = ForkyLightClientBootstrap* =
altair.LightClientBootstrap | altair.LightClientBootstrap |
capella.LightClientBootstrap | capella.LightClientBootstrap |
deneb.LightClientBootstrap deneb.LightClientBootstrap |
electra.LightClientBootstrap
ForkyLightClientUpdate* = ForkyLightClientUpdate* =
altair.LightClientUpdate | altair.LightClientUpdate |
capella.LightClientUpdate | capella.LightClientUpdate |
deneb.LightClientUpdate deneb.LightClientUpdate |
electra.LightClientUpdate
ForkyLightClientFinalityUpdate* = ForkyLightClientFinalityUpdate* =
altair.LightClientFinalityUpdate | altair.LightClientFinalityUpdate |
capella.LightClientFinalityUpdate | capella.LightClientFinalityUpdate |
deneb.LightClientFinalityUpdate deneb.LightClientFinalityUpdate |
electra.LightClientFinalityUpdate
ForkyLightClientOptimisticUpdate* = ForkyLightClientOptimisticUpdate* =
altair.LightClientOptimisticUpdate | altair.LightClientOptimisticUpdate |
capella.LightClientOptimisticUpdate | capella.LightClientOptimisticUpdate |
deneb.LightClientOptimisticUpdate deneb.LightClientOptimisticUpdate |
electra.LightClientOptimisticUpdate
SomeForkyLightClientUpdateWithSyncCommittee* = SomeForkyLightClientUpdateWithSyncCommittee* =
ForkyLightClientUpdate ForkyLightClientUpdate
@ -62,7 +72,8 @@ type
ForkyLightClientStore* = ForkyLightClientStore* =
altair.LightClientStore | altair.LightClientStore |
capella.LightClientStore | capella.LightClientStore |
deneb.LightClientStore deneb.LightClientStore |
electra.LightClientStore
ForkedLightClientHeader* = object ForkedLightClientHeader* = object
case kind*: LightClientDataFork case kind*: LightClientDataFork
@ -74,6 +85,8 @@ type
capellaData*: capella.LightClientHeader capellaData*: capella.LightClientHeader
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientHeader denebData*: deneb.LightClientHeader
of LightClientDataFork.Electra:
electraData*: electra.LightClientHeader
ForkedLightClientBootstrap* = object ForkedLightClientBootstrap* = object
case kind*: LightClientDataFork case kind*: LightClientDataFork
@ -85,6 +98,8 @@ type
capellaData*: capella.LightClientBootstrap capellaData*: capella.LightClientBootstrap
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientBootstrap denebData*: deneb.LightClientBootstrap
of LightClientDataFork.Electra:
electraData*: electra.LightClientBootstrap
ForkedLightClientUpdate* = object ForkedLightClientUpdate* = object
case kind*: LightClientDataFork case kind*: LightClientDataFork
@ -96,6 +111,8 @@ type
capellaData*: capella.LightClientUpdate capellaData*: capella.LightClientUpdate
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientUpdate denebData*: deneb.LightClientUpdate
of LightClientDataFork.Electra:
electraData*: electra.LightClientUpdate
ForkedLightClientFinalityUpdate* = object ForkedLightClientFinalityUpdate* = object
case kind*: LightClientDataFork case kind*: LightClientDataFork
@ -107,6 +124,8 @@ type
capellaData*: capella.LightClientFinalityUpdate capellaData*: capella.LightClientFinalityUpdate
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientFinalityUpdate denebData*: deneb.LightClientFinalityUpdate
of LightClientDataFork.Electra:
electraData*: electra.LightClientFinalityUpdate
ForkedLightClientOptimisticUpdate* = object ForkedLightClientOptimisticUpdate* = object
case kind*: LightClientDataFork case kind*: LightClientDataFork
@ -118,6 +137,8 @@ type
capellaData*: capella.LightClientOptimisticUpdate capellaData*: capella.LightClientOptimisticUpdate
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientOptimisticUpdate denebData*: deneb.LightClientOptimisticUpdate
of LightClientDataFork.Electra:
electraData*: electra.LightClientOptimisticUpdate
SomeForkedLightClientUpdateWithSyncCommittee* = SomeForkedLightClientUpdateWithSyncCommittee* =
ForkedLightClientUpdate ForkedLightClientUpdate
@ -145,11 +166,15 @@ type
capellaData*: capella.LightClientStore capellaData*: capella.LightClientStore
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData*: deneb.LightClientStore denebData*: deneb.LightClientStore
of LightClientDataFork.Electra:
electraData*: electra.LightClientStore
func lcDataForkAtEpoch*( func lcDataForkAtEpoch*(
cfg: RuntimeConfig, epoch: Epoch): LightClientDataFork = cfg: RuntimeConfig, epoch: Epoch): LightClientDataFork =
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
if epoch >= cfg.DENEB_FORK_EPOCH: if epoch >= cfg.ELECTRA_FORK_EPOCH:
LightClientDataFork.Electra
elif epoch >= cfg.DENEB_FORK_EPOCH:
LightClientDataFork.Deneb LightClientDataFork.Deneb
elif epoch >= cfg.CAPELLA_FORK_EPOCH: elif epoch >= cfg.CAPELLA_FORK_EPOCH:
LightClientDataFork.Capella LightClientDataFork.Capella
@ -159,7 +184,8 @@ func lcDataForkAtEpoch*(
LightClientDataFork.None LightClientDataFork.None
template kind*( template kind*(
x: typedesc[ # `SomeLightClientObject` doesn't work here (Nim 1.6) # `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
x: typedesc[
altair.LightClientHeader | altair.LightClientHeader |
altair.LightClientBootstrap | altair.LightClientBootstrap |
altair.LightClientUpdate | altair.LightClientUpdate |
@ -169,7 +195,8 @@ template kind*(
LightClientDataFork.Altair LightClientDataFork.Altair
template kind*( template kind*(
x: typedesc[ # `SomeLightClientObject` doesn't work here (Nim 1.6) # `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
x: typedesc[
capella.LightClientHeader | capella.LightClientHeader |
capella.LightClientBootstrap | capella.LightClientBootstrap |
capella.LightClientUpdate | capella.LightClientUpdate |
@ -179,7 +206,8 @@ template kind*(
LightClientDataFork.Capella LightClientDataFork.Capella
template kind*( template kind*(
x: typedesc[ # `SomeLightClientObject` doesn't work here (Nim 1.6) # `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
x: typedesc[
deneb.LightClientHeader | deneb.LightClientHeader |
deneb.LightClientBootstrap | deneb.LightClientBootstrap |
deneb.LightClientUpdate | deneb.LightClientUpdate |
@ -188,8 +216,72 @@ template kind*(
deneb.LightClientStore]): LightClientDataFork = deneb.LightClientStore]): LightClientDataFork =
LightClientDataFork.Deneb LightClientDataFork.Deneb
template kind*(
# `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
x: typedesc[
electra.LightClientHeader |
electra.LightClientBootstrap |
electra.LightClientUpdate |
electra.LightClientFinalityUpdate |
electra.LightClientOptimisticUpdate |
electra.LightClientStore]): LightClientDataFork =
LightClientDataFork.Electra
template FINALIZED_ROOT_GINDEX*(
kind: static LightClientDataFork): GeneralizedIndex =
when kind >= LightClientDataFork.Electra:
electra.FINALIZED_ROOT_GINDEX
elif kind >= LightClientDataFork.Altair:
altair.FINALIZED_ROOT_GINDEX
else:
static: raiseAssert "Unreachable"
template FinalityBranch*(kind: static LightClientDataFork): auto =
when kind >= LightClientDataFork.Electra:
typedesc[electra.FinalityBranch]
elif kind >= LightClientDataFork.Altair:
typedesc[altair.FinalityBranch]
else:
static: raiseAssert "Unreachable"
template CURRENT_SYNC_COMMITTEE_GINDEX*(
kind: static LightClientDataFork): GeneralizedIndex =
when kind >= LightClientDataFork.Electra:
electra.CURRENT_SYNC_COMMITTEE_GINDEX
elif kind >= LightClientDataFork.Altair:
altair.CURRENT_SYNC_COMMITTEE_GINDEX
else:
static: raiseAssert "Unreachable"
template CurrentSyncCommitteeBranch*(kind: static LightClientDataFork): auto =
when kind >= LightClientDataFork.Electra:
typedesc[electra.CurrentSyncCommitteeBranch]
elif kind >= LightClientDataFork.Altair:
typedesc[altair.CurrentSyncCommitteeBranch]
else:
static: raiseAssert "Unreachable"
template NEXT_SYNC_COMMITTEE_GINDEX*(
kind: static LightClientDataFork): GeneralizedIndex =
when kind >= LightClientDataFork.Electra:
electra.NEXT_SYNC_COMMITTEE_GINDEX
elif kind >= LightClientDataFork.Altair:
altair.NEXT_SYNC_COMMITTEE_GINDEX
else:
static: raiseAssert "Unreachable"
template NextSyncCommitteeBranch*(kind: static LightClientDataFork): auto =
when kind >= LightClientDataFork.Electra:
typedesc[electra.NextSyncCommitteeBranch]
elif kind >= LightClientDataFork.Altair:
typedesc[altair.NextSyncCommitteeBranch]
else:
static: raiseAssert "Unreachable"
template LightClientHeader*(kind: static LightClientDataFork): auto = template LightClientHeader*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientHeader]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientHeader] typedesc[deneb.LightClientHeader]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientHeader] typedesc[capella.LightClientHeader]
@ -199,7 +291,9 @@ template LightClientHeader*(kind: static LightClientDataFork): auto =
static: raiseAssert "Unreachable" static: raiseAssert "Unreachable"
template LightClientBootstrap*(kind: static LightClientDataFork): auto = template LightClientBootstrap*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientBootstrap]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientBootstrap] typedesc[deneb.LightClientBootstrap]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientBootstrap] typedesc[capella.LightClientBootstrap]
@ -209,7 +303,9 @@ template LightClientBootstrap*(kind: static LightClientDataFork): auto =
static: raiseAssert "Unreachable" static: raiseAssert "Unreachable"
template LightClientUpdate*(kind: static LightClientDataFork): auto = template LightClientUpdate*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientUpdate]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientUpdate] typedesc[deneb.LightClientUpdate]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientUpdate] typedesc[capella.LightClientUpdate]
@ -219,7 +315,9 @@ template LightClientUpdate*(kind: static LightClientDataFork): auto =
static: raiseAssert "Unreachable" static: raiseAssert "Unreachable"
template LightClientFinalityUpdate*(kind: static LightClientDataFork): auto = template LightClientFinalityUpdate*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientFinalityUpdate]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientFinalityUpdate] typedesc[deneb.LightClientFinalityUpdate]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientFinalityUpdate] typedesc[capella.LightClientFinalityUpdate]
@ -229,7 +327,9 @@ template LightClientFinalityUpdate*(kind: static LightClientDataFork): auto =
static: raiseAssert "Unreachable" static: raiseAssert "Unreachable"
template LightClientOptimisticUpdate*(kind: static LightClientDataFork): auto = template LightClientOptimisticUpdate*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientOptimisticUpdate]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientOptimisticUpdate] typedesc[deneb.LightClientOptimisticUpdate]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientOptimisticUpdate] typedesc[capella.LightClientOptimisticUpdate]
@ -239,7 +339,9 @@ template LightClientOptimisticUpdate*(kind: static LightClientDataFork): auto =
static: raiseAssert "Unreachable" static: raiseAssert "Unreachable"
template LightClientStore*(kind: static LightClientDataFork): auto = template LightClientStore*(kind: static LightClientDataFork): auto =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
typedesc[electra.LightClientStore]
elif kind == LightClientDataFork.Deneb:
typedesc[deneb.LightClientStore] typedesc[deneb.LightClientStore]
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
typedesc[capella.LightClientStore] typedesc[capella.LightClientStore]
@ -298,7 +400,10 @@ template Forked*(x: typedesc[ForkyLightClientStore]): auto =
template withAll*( template withAll*(
x: typedesc[LightClientDataFork], body: untyped): untyped = x: typedesc[LightClientDataFork], body: untyped): untyped =
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
block:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
body
block: block:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
body body
@ -315,6 +420,9 @@ template withAll*(
template withLcDataFork*( template withLcDataFork*(
x: LightClientDataFork, body: untyped): untyped = x: LightClientDataFork, body: untyped): untyped =
case x case x
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
body body
@ -331,6 +439,10 @@ template withLcDataFork*(
template withForkyHeader*( template withForkyHeader*(
x: ForkedLightClientHeader, body: untyped): untyped = x: ForkedLightClientHeader, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyHeader: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyHeader: untyped {.inject, used.} = x.denebData template forkyHeader: untyped {.inject, used.} = x.denebData
@ -350,6 +462,10 @@ template withForkyHeader*(
template withForkyBootstrap*( template withForkyBootstrap*(
x: ForkedLightClientBootstrap, body: untyped): untyped = x: ForkedLightClientBootstrap, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyBootstrap: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyBootstrap: untyped {.inject, used.} = x.denebData template forkyBootstrap: untyped {.inject, used.} = x.denebData
@ -369,6 +485,10 @@ template withForkyBootstrap*(
template withForkyUpdate*( template withForkyUpdate*(
x: ForkedLightClientUpdate, body: untyped): untyped = x: ForkedLightClientUpdate, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyUpdate: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyUpdate: untyped {.inject, used.} = x.denebData template forkyUpdate: untyped {.inject, used.} = x.denebData
@ -388,6 +508,10 @@ template withForkyUpdate*(
template withForkyFinalityUpdate*( template withForkyFinalityUpdate*(
x: ForkedLightClientFinalityUpdate, body: untyped): untyped = x: ForkedLightClientFinalityUpdate, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyFinalityUpdate: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyFinalityUpdate: untyped {.inject, used.} = x.denebData template forkyFinalityUpdate: untyped {.inject, used.} = x.denebData
@ -407,6 +531,10 @@ template withForkyFinalityUpdate*(
template withForkyOptimisticUpdate*( template withForkyOptimisticUpdate*(
x: ForkedLightClientOptimisticUpdate, body: untyped): untyped = x: ForkedLightClientOptimisticUpdate, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyOptimisticUpdate: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyOptimisticUpdate: untyped {.inject, used.} = x.denebData template forkyOptimisticUpdate: untyped {.inject, used.} = x.denebData
@ -426,6 +554,10 @@ template withForkyOptimisticUpdate*(
template withForkyObject*( template withForkyObject*(
x: SomeForkedLightClientObject, body: untyped): untyped = x: SomeForkedLightClientObject, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyObject: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyObject: untyped {.inject, used.} = x.denebData template forkyObject: untyped {.inject, used.} = x.denebData
@ -445,6 +577,10 @@ template withForkyObject*(
template withForkyStore*( template withForkyStore*(
x: ForkedLightClientStore, body: untyped): untyped = x: ForkedLightClientStore, body: untyped): untyped =
case x.kind case x.kind
of LightClientDataFork.Electra:
const lcDataFork {.inject, used.} = LightClientDataFork.Electra
template forkyStore: untyped {.inject, used.} = x.electraData
body
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
const lcDataFork {.inject, used.} = LightClientDataFork.Deneb const lcDataFork {.inject, used.} = LightClientDataFork.Deneb
template forkyStore: untyped {.inject, used.} = x.denebData template forkyStore: untyped {.inject, used.} = x.denebData
@ -473,7 +609,9 @@ func init*(
type ResultType = typeof(forkyData).Forked type ResultType = typeof(forkyData).Forked
static: doAssert ResultType is x static: doAssert ResultType is x
const kind = typeof(forkyData).kind const kind = typeof(forkyData).kind
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
ResultType(kind: kind, electraData: forkyData)
elif kind == LightClientDataFork.Deneb:
ResultType(kind: kind, denebData: forkyData) ResultType(kind: kind, denebData: forkyData)
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
ResultType(kind: kind, capellaData: forkyData) ResultType(kind: kind, capellaData: forkyData)
@ -488,7 +626,9 @@ template forky*(
SomeForkedLightClientObject | SomeForkedLightClientObject |
ForkedLightClientStore, ForkedLightClientStore,
kind: static LightClientDataFork): untyped = kind: static LightClientDataFork): untyped =
when kind == LightClientDataFork.Deneb: when kind == LightClientDataFork.Electra:
x.electraData
elif kind == LightClientDataFork.Deneb:
x.denebData x.denebData
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
x.capellaData x.capellaData
@ -641,7 +781,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_header_to_deneb( denebData: upgrade_lc_header_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientHeader(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_header_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migrateToDataFork*( func migrateToDataFork*(
@ -676,7 +824,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_bootstrap_to_deneb( denebData: upgrade_lc_bootstrap_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientBootstrap(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_bootstrap_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migrateToDataFork*( func migrateToDataFork*(
@ -711,7 +867,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_update_to_deneb( denebData: upgrade_lc_update_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientUpdate(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_update_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migrateToDataFork*( func migrateToDataFork*(
@ -746,7 +910,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_finality_update_to_deneb( denebData: upgrade_lc_finality_update_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientFinalityUpdate(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_finality_update_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migrateToDataFork*( func migrateToDataFork*(
@ -781,7 +953,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_optimistic_update_to_deneb( denebData: upgrade_lc_optimistic_update_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientOptimisticUpdate(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_optimistic_update_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migrateToDataFork*( func migrateToDataFork*(
@ -816,7 +996,15 @@ func migrateToDataFork*(
denebData: upgrade_lc_store_to_deneb( denebData: upgrade_lc_store_to_deneb(
x.forky(LightClientDataFork.Capella))) x.forky(LightClientDataFork.Capella)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Deneb # Upgrade to Electra
when newKind >= LightClientDataFork.Electra:
if x.kind == LightClientDataFork.Deneb:
x = ForkedLightClientStore(
kind: LightClientDataFork.Electra,
electraData: upgrade_lc_store_to_electra(
x.forky(LightClientDataFork.Deneb)))
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
doAssert x.kind == newKind doAssert x.kind == newKind
func migratingToDataFork*[ func migratingToDataFork*[
@ -831,7 +1019,8 @@ func migratingToDataFork*[
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/full-node.md#block_to_light_client_header # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/full-node.md#block_to_light_client_header
func toAltairLightClientHeader( func toAltairLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6) # `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock | phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock | altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock
@ -841,7 +1030,8 @@ func toAltairLightClientHeader(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/full-node.md#modified-block_to_light_client_header # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/full-node.md#modified-block_to_light_client_header
func toCapellaLightClientHeader( func toCapellaLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6) # `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock | phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock | altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock
@ -856,7 +1046,8 @@ func toCapellaLightClientHeader(
beacon: blck.message.toBeaconBlockHeader()) beacon: blck.message.toBeaconBlockHeader())
func toCapellaLightClientHeader( func toCapellaLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6) # `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock
): capella.LightClientHeader = ): capella.LightClientHeader =
template payload: untyped = blck.message.body.execution_payload template payload: untyped = blck.message.body.execution_payload
@ -883,7 +1074,8 @@ func toCapellaLightClientHeader(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.0/specs/deneb/light-client/full-node.md#modified-block_to_light_client_header # https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.0/specs/deneb/light-client/full-node.md#modified-block_to_light_client_header
func toDenebLightClientHeader( func toDenebLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6) # `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock | phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock | altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock
@ -898,7 +1090,8 @@ func toDenebLightClientHeader(
beacon: blck.message.toBeaconBlockHeader()) beacon: blck.message.toBeaconBlockHeader())
func toDenebLightClientHeader( func toDenebLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6) # `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock
): deneb.LightClientHeader = ): deneb.LightClientHeader =
template payload: untyped = blck.message.body.execution_payload template payload: untyped = blck.message.body.execution_payload
@ -924,7 +1117,8 @@ func toDenebLightClientHeader(
capella.EXECUTION_PAYLOAD_GINDEX).get) capella.EXECUTION_PAYLOAD_GINDEX).get)
func toDenebLightClientHeader( func toDenebLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6) # `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock
): deneb.LightClientHeader = ): deneb.LightClientHeader =
template payload: untyped = blck.message.body.execution_payload template payload: untyped = blck.message.body.execution_payload
@ -951,8 +1145,115 @@ func toDenebLightClientHeader(
execution_branch: blck.message.body.build_proof( execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_GINDEX).get) capella.EXECUTION_PAYLOAD_GINDEX).get)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/full-node.md#modified-block_to_light_client_header
func toElectraLightClientHeader(
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock
): electra.LightClientHeader =
# Note that during fork transitions, `finalized_header` may still
# point to earlier forks. While Bellatrix blocks also contain an
# `ExecutionPayload` (minus `withdrawals_root`), it was not included
# in the corresponding light client data. To ensure compatibility
# with legacy data going through `upgrade_lc_header_to_capella`,
# leave out execution data.
electra.LightClientHeader(
beacon: blck.message.toBeaconBlockHeader())
func toElectraLightClientHeader(
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
capella.SignedBeaconBlock | capella.TrustedSignedBeaconBlock
): electra.LightClientHeader =
template payload: untyped = blck.message.body.execution_payload
electra.LightClientHeader(
beacon: blck.message.toBeaconBlockHeader(),
execution: electra.ExecutionPayloadHeader(
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions_root: hash_tree_root(payload.transactions),
withdrawals_root: hash_tree_root(payload.withdrawals)),
execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_GINDEX).get)
func toElectraLightClientHeader(
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock
): electra.LightClientHeader =
template payload: untyped = blck.message.body.execution_payload
electra.LightClientHeader(
beacon: blck.message.toBeaconBlockHeader(),
execution: electra.ExecutionPayloadHeader(
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions_root: hash_tree_root(payload.transactions),
withdrawals_root: hash_tree_root(payload.withdrawals),
blob_gas_used: payload.blob_gas_used,
excess_blob_gas: payload.excess_blob_gas),
execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_GINDEX).get)
func toElectraLightClientHeader(
# `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
electra.SignedBeaconBlock | electra.TrustedSignedBeaconBlock
): electra.LightClientHeader =
template payload: untyped = blck.message.body.execution_payload
electra.LightClientHeader(
beacon: blck.message.toBeaconBlockHeader(),
execution: electra.ExecutionPayloadHeader(
parent_hash: payload.parent_hash,
fee_recipient: payload.fee_recipient,
state_root: payload.state_root,
receipts_root: payload.receipts_root,
logs_bloom: payload.logs_bloom,
prev_randao: payload.prev_randao,
block_number: payload.block_number,
gas_limit: payload.gas_limit,
gas_used: payload.gas_used,
timestamp: payload.timestamp,
extra_data: payload.extra_data,
base_fee_per_gas: payload.base_fee_per_gas,
block_hash: payload.block_hash,
transactions_root: hash_tree_root(payload.transactions),
withdrawals_root: hash_tree_root(payload.withdrawals),
blob_gas_used: payload.blob_gas_used,
excess_blob_gas: payload.excess_blob_gas,
deposit_requests_root: hash_tree_root(payload.deposit_requests),
withdrawal_requests_root: hash_tree_root(payload.withdrawal_requests),
consolidation_requests_root:
hash_tree_root(payload.consolidation_requests)),
execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_GINDEX).get)
func toLightClientHeader*( func toLightClientHeader*(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6) # `SomeSignedBeaconBlock`: https://github.com/nim-lang/Nim/issues/18095
blck:
phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock | phase0.SignedBeaconBlock | phase0.TrustedSignedBeaconBlock |
altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock | altair.SignedBeaconBlock | altair.TrustedSignedBeaconBlock |
bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock | bellatrix.SignedBeaconBlock | bellatrix.TrustedSignedBeaconBlock |
@ -960,9 +1261,8 @@ func toLightClientHeader*(
deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock | deneb.SignedBeaconBlock | deneb.TrustedSignedBeaconBlock |
electra.SignedBeaconBlock | electra.TrustedSignedBeaconBlock, electra.SignedBeaconBlock | electra.TrustedSignedBeaconBlock,
kind: static LightClientDataFork): auto = kind: static LightClientDataFork): auto =
when blck is electra.SignedBeaconBlock or blck is electra.TrustedSignedBeaconBlock: when kind == LightClientDataFork.Electra:
debugComment "toLightClientHeader electra missing" blck.toElectraLightClientHeader()
default(deneb.LightClientHeader)
elif kind == LightClientDataFork.Deneb: elif kind == LightClientDataFork.Deneb:
blck.toDenebLightClientHeader() blck.toDenebLightClientHeader()
elif kind == LightClientDataFork.Capella: elif kind == LightClientDataFork.Capella:
@ -990,9 +1290,13 @@ func shortLog*[
capellaData: typeof(x.capellaData.shortLog()) capellaData: typeof(x.capellaData.shortLog())
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
denebData: typeof(x.denebData.shortLog()) denebData: typeof(x.denebData.shortLog())
of LightClientDataFork.Electra:
electraData: typeof(x.electraData.shortLog())
let xKind = x.kind # Nim 1.6.12: Using `kind: x.kind` inside case is broken let xKind = x.kind # https://github.com/nim-lang/Nim/issues/23762
case xKind case xKind
of LightClientDataFork.Electra:
ResultType(kind: xKind, electraData: x.electraData.shortLog())
of LightClientDataFork.Deneb: of LightClientDataFork.Deneb:
ResultType(kind: xKind, denebData: x.denebData.shortLog()) ResultType(kind: xKind, denebData: x.denebData.shortLog())
of LightClientDataFork.Capella: of LightClientDataFork.Capella:

View File

@ -11,7 +11,7 @@
import import
# Status libraries # Status libraries
stew/[byteutils, endians2, objects, saturation_arith], stew/[byteutils, endians2, objects],
chronicles, chronicles,
eth/common/[eth_types, eth_types_rlp], eth/common/[eth_types, eth_types_rlp],
eth/rlp, eth/trie/[db, hexary], eth/rlp, eth/trie/[db, hexary],
@ -25,7 +25,7 @@ import
export export
eth2_merkleization, forks, rlp, ssz_codec eth2_merkleization, forks, rlp, ssz_codec
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#constants # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#constants
const ETH_TO_GWEI = 1_000_000_000.Gwei const ETH_TO_GWEI = 1_000_000_000.Gwei
func toEther*(gwei: Gwei): Ether = func toEther*(gwei: Gwei): Ether =
@ -39,6 +39,9 @@ type
ExecutionTransaction* = eth_types.Transaction ExecutionTransaction* = eth_types.Transaction
ExecutionReceipt* = eth_types.Receipt ExecutionReceipt* = eth_types.Receipt
ExecutionWithdrawal* = eth_types.Withdrawal ExecutionWithdrawal* = eth_types.Withdrawal
ExecutionDepositRequest* = eth_types.DepositRequest
ExecutionWithdrawalRequest* = eth_types.WithdrawalRequest
ExecutionConsolidationRequest* = eth_types.ConsolidationRequest
ExecutionBlockHeader* = eth_types.BlockHeader ExecutionBlockHeader* = eth_types.BlockHeader
FinalityCheckpoints* = object FinalityCheckpoints* = object
@ -159,7 +162,7 @@ func compute_domain*(
result[0..3] = domain_type.data result[0..3] = domain_type.data
result[4..31] = fork_data_root.data.toOpenArray(0, 27) result[4..31] = fork_data_root.data.toOpenArray(0, 27)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#get_domain # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_domain
func get_domain*( func get_domain*(
fork: Fork, fork: Fork,
domain_type: DomainType, domain_type: DomainType,
@ -255,7 +258,7 @@ func create_blob_sidecars*(
res.add(sidecar) res.add(sidecar)
res res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#is_sync_committee_update # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_sync_committee_update
template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool = template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool =
when update is SomeForkyLightClientUpdateWithSyncCommittee: when update is SomeForkyLightClientUpdateWithSyncCommittee:
update.next_sync_committee_branch != update.next_sync_committee_branch !=
@ -271,7 +274,7 @@ template is_finality_update*(update: SomeForkyLightClientUpdate): bool =
else: else:
false false
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
template is_next_sync_committee_known*(store: ForkyLightClientStore): bool = template is_next_sync_committee_known*(store: ForkyLightClientStore): bool =
store.next_sync_committee != store.next_sync_committee !=
static(default(typeof(store.next_sync_committee))) static(default(typeof(store.next_sync_committee)))
@ -384,7 +387,7 @@ func contextEpoch*(bootstrap: ForkyLightClientBootstrap): Epoch =
func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch = func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch =
update.attested_header.beacon.slot.epoch update.attested_header.beacon.slot.epoch
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#is_merge_transition_complete # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
func is_merge_transition_complete*( func is_merge_transition_complete*(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
electra.BeaconState): bool = electra.BeaconState): bool =
@ -392,7 +395,7 @@ func is_merge_transition_complete*(
default(typeof(state.latest_execution_payload_header)) default(typeof(state.latest_execution_payload_header))
state.latest_execution_payload_header != defaultExecutionPayloadHeader state.latest_execution_payload_header != defaultExecutionPayloadHeader
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/sync/optimistic.md#helpers # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#helpers
func is_execution_block*(blck: SomeForkyBeaconBlock): bool = func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
when typeof(blck).kind >= ConsensusFork.Bellatrix: when typeof(blck).kind >= ConsensusFork.Bellatrix:
const defaultExecutionPayload = const defaultExecutionPayload =
@ -401,7 +404,7 @@ func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
else: else:
false false
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#is_merge_transition_block # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_merge_transition_block
func is_merge_transition_block( func is_merge_transition_block(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
electra.BeaconState, electra.BeaconState,
@ -417,7 +420,7 @@ func is_merge_transition_block(
not is_merge_transition_complete(state) and not is_merge_transition_complete(state) and
body.execution_payload != defaultExecutionPayload body.execution_payload != defaultExecutionPayload
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#is_execution_enabled # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_execution_enabled
func is_execution_enabled*( func is_execution_enabled*(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
electra.BeaconState, electra.BeaconState,
@ -431,7 +434,7 @@ func is_execution_enabled*(
electra.SigVerifiedBeaconBlockBody): bool = electra.SigVerifiedBeaconBlockBody): bool =
is_merge_transition_block(state, body) or is_merge_transition_complete(state) is_merge_transition_block(state, body) or is_merge_transition_complete(state)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot
func compute_timestamp_at_slot*(state: ForkyBeaconState, slot: Slot): uint64 = func compute_timestamp_at_slot*(state: ForkyBeaconState, slot: Slot): uint64 =
# Note: This function is unsafe with respect to overflows and underflows. # Note: This function is unsafe with respect to overflows and underflows.
let slots_since_genesis = slot - GENESIS_SLOT let slots_since_genesis = slot - GENESIS_SLOT
@ -445,9 +448,10 @@ proc computeTransactionsTrieRoot*(
var tr = initHexaryTrie(newMemoryDB()) var tr = initHexaryTrie(newMemoryDB())
for i, transaction in payload.transactions: for i, transaction in payload.transactions:
try: try:
tr.put(rlp.encode(i), distinctBase(transaction)) # Already RLP encoded # Transactions are already RLP encoded
tr.put(rlp.encode(i.uint), distinctBase(transaction))
except RlpError as exc: except RlpError as exc:
doAssert false, "HexaryTrie.put failed: " & $exc.msg raiseAssert "HexaryTrie.put failed: " & $exc.msg
tr.rootHash() tr.rootHash()
func toExecutionWithdrawal*( func toExecutionWithdrawal*(
@ -468,9 +472,77 @@ proc computeWithdrawalsTrieRoot*(
var tr = initHexaryTrie(newMemoryDB()) var tr = initHexaryTrie(newMemoryDB())
for i, withdrawal in payload.withdrawals: for i, withdrawal in payload.withdrawals:
try: try:
tr.put(rlp.encode(i), rlp.encode(toExecutionWithdrawal(withdrawal))) tr.put(rlp.encode(i.uint), rlp.encode(toExecutionWithdrawal(withdrawal)))
except RlpError as exc: except RlpError as exc:
doAssert false, "HexaryTrie.put failed: " & $exc.msg raiseAssert "HexaryTrie.put failed: " & $exc.msg
tr.rootHash()
func toExecutionDepositRequest*(
request: electra.DepositRequest): ExecutionDepositRequest =
ExecutionDepositRequest(
pubkey: request.pubkey.blob,
withdrawalCredentials: request.withdrawal_credentials.data,
amount: distinctBase(request.amount),
signature: request.signature.blob,
index: request.index)
func toExecutionWithdrawalRequest*(
request: electra.WithdrawalRequest): ExecutionWithdrawalRequest =
ExecutionWithdrawalRequest(
sourceAddress: request.source_address.data,
validatorPubkey: request.validator_pubkey.blob,
amount: distinctBase(request.amount))
func toExecutionConsolidationRequest*(
request: electra.ConsolidationRequest): ExecutionConsolidationRequest =
ExecutionConsolidationRequest(
sourceAddress: request.source_address.data,
sourcePubkey: request.source_pubkey.blob,
targetPubkey: request.target_pubkey.blob)
# https://eips.ethereum.org/EIPS/eip-7685
proc computeRequestsTrieRoot*(
payload: electra.ExecutionPayload): ExecutionHash256 =
if payload.deposit_requests.len == 0 and
payload.withdrawal_requests.len == 0 and
payload.consolidation_requests.len == 0:
return EMPTY_ROOT_HASH
var
tr = initHexaryTrie(newMemoryDB())
i = 0'u64
static:
doAssert DEPOSIT_REQUEST_TYPE < WITHDRAWAL_REQUEST_TYPE
doAssert WITHDRAWAL_REQUEST_TYPE < CONSOLIDATION_REQUEST_TYPE
# EIP-6110
for request in payload.deposit_requests:
try:
tr.put(rlp.encode(i.uint), rlp.encode(
toExecutionDepositRequest(request)))
except RlpError as exc:
raiseAssert "HexaryTree.put failed: " & $exc.msg
inc i
# EIP-7002
for request in payload.withdrawal_requests:
try:
tr.put(rlp.encode(i.uint), rlp.encode(
toExecutionWithdrawalRequest(request)))
except RlpError as exc:
raiseAssert "HexaryTree.put failed: " & $exc.msg
inc i
# EIP-7251
for request in payload.consolidation_requests:
try:
tr.put(rlp.encode(i.uint), rlp.encode(
toExecutionConsolidationRequest(request)))
except RlpError as exc:
raiseAssert "HexaryTree.put failed: " & $exc.msg
inc i
tr.rootHash() tr.rootHash()
proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader = proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
@ -502,6 +574,11 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
Opt.some ExecutionHash256(data: blck.parent_root.data) Opt.some ExecutionHash256(data: blck.parent_root.data)
else: else:
Opt.none(ExecutionHash256) Opt.none(ExecutionHash256)
requestsRoot =
when typeof(payload).kind >= ConsensusFork.Electra:
Opt.some payload.computeRequestsTrieRoot()
else:
Opt.none(ExecutionHash256)
ExecutionBlockHeader( ExecutionBlockHeader(
parentHash : payload.parent_hash, parentHash : payload.parent_hash,
@ -513,8 +590,8 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
logsBloom : payload.logs_bloom.data, logsBloom : payload.logs_bloom.data,
difficulty : default(DifficultyInt), difficulty : default(DifficultyInt),
number : payload.block_number, number : payload.block_number,
gasLimit : GasInt.saturate(payload.gas_limit), gasLimit : payload.gas_limit,
gasUsed : GasInt.saturate(payload.gas_used), gasUsed : payload.gas_used,
timestamp : EthTime(payload.timestamp), timestamp : EthTime(payload.timestamp),
extraData : payload.extra_data.asSeq, extraData : payload.extra_data.asSeq,
mixHash : payload.prev_randao, # EIP-4399 `mixHash` -> `prevRandao` mixHash : payload.prev_randao, # EIP-4399 `mixHash` -> `prevRandao`
@ -523,7 +600,8 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
withdrawalsRoot : withdrawalsRoot, withdrawalsRoot : withdrawalsRoot,
blobGasUsed : blobGasUsed, # EIP-4844 blobGasUsed : blobGasUsed, # EIP-4844
excessBlobGas : excessBlobGas, # EIP-4844 excessBlobGas : excessBlobGas, # EIP-4844
parentBeaconBlockRoot : parentBeaconBlockRoot) # EIP-4788 parentBeaconBlockRoot : parentBeaconBlockRoot, # EIP-4788
requestsRoot : requestsRoot) # EIP-7685
proc compute_execution_block_hash*(blck: ForkyBeaconBlock): Eth2Digest = proc compute_execution_block_hash*(blck: ForkyBeaconBlock): Eth2Digest =
rlpHash blockToBlockHeader(blck) rlpHash blockToBlockHeader(blck)

View File

@ -1380,13 +1380,13 @@ proc createWallet*(kdfKind: KdfKind,
crypto: crypto, crypto: crypto,
nextAccount: nextAccount.get(0)) nextAccount: nextAccount.get(0))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#bls_withdrawal_prefix # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#bls_withdrawal_prefix
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest = func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
var bytes = eth2digest(k.toRaw()) var bytes = eth2digest(k.toRaw())
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8 bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
bytes bytes
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/deposit-contract.md#withdrawal-credentials # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/deposit-contract.md#withdrawal-credentials
func makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest = func makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest =
makeWithdrawalCredentials(k.toPubKey()) makeWithdrawalCredentials(k.toPubKey())

View File

@ -15,6 +15,21 @@ import
from ../consensus_object_pools/block_pools_types import VerifierError from ../consensus_object_pools/block_pools_types import VerifierError
export block_pools_types.VerifierError export block_pools_types.VerifierError
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/sync-protocol.md#is_valid_normalized_merkle_branch
func is_valid_normalized_merkle_branch[N](
leaf: Eth2Digest,
branch: array[N, Eth2Digest],
gindex: static GeneralizedIndex,
root: Eth2Digest): bool =
const
depth = log2trunc(gindex)
index = get_subtree_index(gindex)
num_extra = branch.len - depth
for i in 0 ..< num_extra:
if not branch[i].isZero:
return false
is_valid_merkle_branch(leaf, branch[num_extra .. ^1], depth, index, root)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#initialize_light_client_store # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
func initialize_light_client_store*( func initialize_light_client_store*(
trusted_block_root: Eth2Digest, trusted_block_root: Eth2Digest,
@ -29,13 +44,15 @@ func initialize_light_client_store*(
if hash_tree_root(bootstrap.header.beacon) != trusted_block_root: if hash_tree_root(bootstrap.header.beacon) != trusted_block_root:
return ResultType.err(VerifierError.Invalid) return ResultType.err(VerifierError.Invalid)
if not is_valid_merkle_branch( withLcDataFork(lcDataForkAtConsensusFork(
hash_tree_root(bootstrap.current_sync_committee), cfg.consensusForkAtEpoch(bootstrap.header.beacon.slot.epoch))):
bootstrap.current_sync_committee_branch, when lcDataFork > LightClientDataFork.None:
log2trunc(altair.CURRENT_SYNC_COMMITTEE_GINDEX), if not is_valid_normalized_merkle_branch(
get_subtree_index(altair.CURRENT_SYNC_COMMITTEE_GINDEX), hash_tree_root(bootstrap.current_sync_committee),
bootstrap.header.beacon.state_root): bootstrap.current_sync_committee_branch,
return ResultType.err(VerifierError.Invalid) lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX,
bootstrap.header.beacon.state_root):
return ResultType.err(VerifierError.Invalid)
return ResultType.ok(typeof(bootstrap).kind.LightClientStore( return ResultType.ok(typeof(bootstrap).kind.LightClientStore(
finalized_header: bootstrap.header, finalized_header: bootstrap.header,
@ -109,13 +126,15 @@ proc validate_light_client_update*(
finalized_root.reset() finalized_root.reset()
else: else:
return err(VerifierError.Invalid) return err(VerifierError.Invalid)
if not is_valid_merkle_branch( withLcDataFork(lcDataForkAtConsensusFork(
finalized_root, cfg.consensusForkAtEpoch(update.attested_header.beacon.slot.epoch))):
update.finality_branch, when lcDataFork > LightClientDataFork.None:
log2trunc(altair.FINALIZED_ROOT_GINDEX), if not is_valid_normalized_merkle_branch(
get_subtree_index(altair.FINALIZED_ROOT_GINDEX), finalized_root,
update.attested_header.beacon.state_root): update.finality_branch,
return err(VerifierError.Invalid) lcDataFork.FINALIZED_ROOT_GINDEX,
update.attested_header.beacon.state_root):
return err(VerifierError.Invalid)
# Verify that the `next_sync_committee`, if present, actually is the # Verify that the `next_sync_committee`, if present, actually is the
# next sync committee saved in the state of the `attested_header` # next sync committee saved in the state of the `attested_header`
@ -128,13 +147,15 @@ proc validate_light_client_update*(
if attested_period == store_period and is_next_sync_committee_known: if attested_period == store_period and is_next_sync_committee_known:
if update.next_sync_committee != store.next_sync_committee: if update.next_sync_committee != store.next_sync_committee:
return err(VerifierError.UnviableFork) return err(VerifierError.UnviableFork)
if not is_valid_merkle_branch( withLcDataFork(lcDataForkAtConsensusFork(
hash_tree_root(update.next_sync_committee), cfg.consensusForkAtEpoch(update.attested_header.beacon.slot.epoch))):
update.next_sync_committee_branch, when lcDataFork > LightClientDataFork.None:
log2trunc(altair.NEXT_SYNC_COMMITTEE_GINDEX), if not is_valid_normalized_merkle_branch(
get_subtree_index(altair.NEXT_SYNC_COMMITTEE_GINDEX), hash_tree_root(update.next_sync_committee),
update.attested_header.beacon.state_root): update.next_sync_committee_branch,
return err(VerifierError.Invalid) lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX,
update.attested_header.beacon.state_root):
return err(VerifierError.Invalid)
# Verify sync committee aggregate signature # Verify sync committee aggregate signature
let sync_committee = let sync_committee =

View File

@ -44,7 +44,6 @@ type
List[SignedBLSToExecutionChange, List[SignedBLSToExecutionChange,
Limit MAX_BLS_TO_EXECUTION_CHANGES] Limit MAX_BLS_TO_EXECUTION_CHANGES]
blob_kzg_commitments*: KzgCommitments # [New in Deneb] blob_kzg_commitments*: KzgCommitments # [New in Deneb]
consolidations*: List[SignedConsolidation, Limit MAX_CONSOLIDATIONS]
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblock # https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#blindedbeaconblock
BlindedBeaconBlock* = object BlindedBeaconBlock* = object
@ -142,11 +141,12 @@ func toSignedBlindedBeaconBlock*(blck: electra.SignedBeaconBlock):
hash_tree_root(blck.message.body.execution_payload.transactions), hash_tree_root(blck.message.body.execution_payload.transactions),
withdrawals_root: withdrawals_root:
hash_tree_root(blck.message.body.execution_payload.withdrawals), hash_tree_root(blck.message.body.execution_payload.withdrawals),
deposit_receipts_root: hash_tree_root( deposit_requests_root: hash_tree_root(
blck.message.body.execution_payload.deposit_receipts), blck.message.body.execution_payload.deposit_requests),
withdrawal_requests_root: withdrawal_requests_root: hash_tree_root(
hash_tree_root( blck.message.body.execution_payload.withdrawal_requests),
blck.message.body.execution_payload.withdrawal_requests)), consolidation_requests_root: hash_tree_root(
blck.message.body.execution_payload.consolidation_requests)),
bls_to_execution_changes: blck.message.body.bls_to_execution_changes, bls_to_execution_changes: blck.message.body.bls_to_execution_changes,
blob_kzg_commitments: blck.message.body.blob_kzg_commitments)), blob_kzg_commitments: blck.message.body.blob_kzg_commitments)),
signature: blck.signature) signature: blck.signature)

View File

@ -14,8 +14,8 @@ import
export base export base
const const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#topics-and-messages # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/p2p-interface.md#topics-and-messages # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages
topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy" topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy"
topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy" topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy"
topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy" topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy"
@ -63,7 +63,7 @@ func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string =
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string = func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/p2p-interface.md#topics-and-messages # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages
func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string = func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string =
eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix
@ -197,7 +197,7 @@ func getTargetGossipState*(
targetForks targetForks
func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] = func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] =
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#sync-committee-subnet-stability # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
if epoch.is_sync_committee_period(): if epoch.is_sync_committee_period():
return Opt.some 0'u64 return Opt.some 0'u64
let epochsBefore = let epochsBefore =
@ -216,7 +216,7 @@ func getSyncSubnets*(
if not nodeHasPubkey(pubkey): if not nodeHasPubkey(pubkey):
continue continue
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#broadcast-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-message
# The first quarter of the pubkeys map to subnet 0, the second quarter to # The first quarter of the pubkeys map to subnet 0, the second quarter to
# subnet 1, the third quarter to subnet 2 and the final quarter to subnet # subnet 1, the third quarter to subnet 2 and the final quarter to subnet
# 3. # 3.

View File

@ -787,7 +787,7 @@ proc readRuntimeConfig*(
"MAX_REQUEST_BLOB_SIDECARS" "MAX_REQUEST_BLOB_SIDECARS"
checkCompatibility BLOB_SIDECAR_SUBNET_COUNT checkCompatibility BLOB_SIDECAR_SUBNET_COUNT
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/fork-choice.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#configuration
# Isn't being used as a preset in the usual way: at any time, there's one correct value # Isn't being used as a preset in the usual way: at any time, there's one correct value
checkCompatibility PROPOSER_SCORE_BOOST checkCompatibility PROPOSER_SCORE_BOOST
checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Gnosis preset - Electra (Gnosis version not avilable yet; EF mainnet for now) # Gnosis preset - Electra (Gnosis version not avilable yet; EF mainnet for now)
# https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/presets/mainnet/electra.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/electra.yaml
const const
# Gwei values # Gwei values
# --------------------------------------------------------------- # ---------------------------------------------------------------
@ -40,12 +40,12 @@ const
# `uint64(2**3)` (= 8) # `uint64(2**3)` (= 8)
MAX_ATTESTATIONS_ELECTRA*: uint64 = 8 MAX_ATTESTATIONS_ELECTRA*: uint64 = 8
# `uint64(2**0)` (= 1) # `uint64(2**0)` (= 1)
MAX_CONSOLIDATIONS*: uint64 = 1 MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 1
# Execution # Execution
# --------------------------------------------------------------- # ---------------------------------------------------------------
# 2**13 (= 8192) receipts # 2**13 (= 8192) deposit requests
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD* = 8192 MAX_DEPOSIT_REQUESTS_PER_PAYLOAD* = 8192
# 2**4 (= 16) withdrawal requests # 2**4 (= 16) withdrawal requests
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 16 MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 16

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Altair # Mainnet preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/altair.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/altair.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Bellatrix # Mainnet preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/bellatrix.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/bellatrix.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Capella # Mainnet preset - Capella
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/capella.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/capella.yaml
const const
# Max operations per block # Max operations per block
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Deneb # Mainnet preset - Deneb
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/mainnet/deneb.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/deneb.yaml
const const
# `uint64(4096)` # `uint64(4096)`
FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096 FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Electra preset - Electra # Electra preset - Electra
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/presets/mainnet/electra.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/electra.yaml
const const
# Gwei values # Gwei values
# --------------------------------------------------------------- # ---------------------------------------------------------------
@ -40,12 +40,12 @@ const
# `uint64(2**3)` (= 8) # `uint64(2**3)` (= 8)
MAX_ATTESTATIONS_ELECTRA*: uint64 = 8 MAX_ATTESTATIONS_ELECTRA*: uint64 = 8
# `uint64(2**0)` (= 1) # `uint64(2**0)` (= 1)
MAX_CONSOLIDATIONS*: uint64 = 1 MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 1
# Execution # Execution
# --------------------------------------------------------------- # ---------------------------------------------------------------
# 2**13 (= 8192) receipts # 2**13 (= 8192) deposit requests
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD* = 8192 MAX_DEPOSIT_REQUESTS_PER_PAYLOAD* = 8192
# 2**4 (= 16) withdrawal requests # 2**4 (= 16) withdrawal requests
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 16 MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 16

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Altair # Minimal preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/minimal/altair.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/altair.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Bellatrix # Minimal preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/minimal/bellatrix.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/bellatrix.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Capella # Minimal preset - Capella
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/minimal/capella.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/capella.yaml
const const
# Max operations per block # Max operations per block
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Deneb # Minimal preset - Deneb
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/presets/minimal/deneb.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/deneb.yaml
const const
# `uint64(4096)` # `uint64(4096)`
FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096 FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096

View File

@ -40,12 +40,12 @@ const
# `uint64(2**3)` (= 8) # `uint64(2**3)` (= 8)
MAX_ATTESTATIONS_ELECTRA*: uint64 = 8 MAX_ATTESTATIONS_ELECTRA*: uint64 = 8
# `uint64(2**0)` (= 1) # `uint64(2**0)` (= 1)
MAX_CONSOLIDATIONS*: uint64 = 1 MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD*: uint64 = 1
# Execution # Execution
# --------------------------------------------------------------- # ---------------------------------------------------------------
# [customized] # [customized]
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD* = 4 MAX_DEPOSIT_REQUESTS_PER_PAYLOAD* = 4
# [customized] 2**1 (= 2) withdrawal requests # [customized] 2**1 (= 2) withdrawal requests
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 2 MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD* = 2

View File

@ -269,7 +269,7 @@ proc verify_voluntary_exit_signature*(
blsVerify(pubkey, signing_root.data, signature) blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#prepare-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
func compute_sync_committee_message_signing_root*( func compute_sync_committee_message_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest = slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest =
@ -304,7 +304,7 @@ proc verify_sync_committee_signature*(
blsFastAggregateVerify(pubkeys, signing_root.data, signature) blsFastAggregateVerify(pubkeys, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#aggregation-selection # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection
func compute_sync_committee_selection_proof_signing_root*( func compute_sync_committee_selection_proof_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest = slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest =
@ -335,7 +335,7 @@ proc verify_sync_committee_selection_proof*(
blsVerify(pubkey, signing_root.data, signature) blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#signature # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#signature
func compute_contribution_and_proof_signing_root*( func compute_contribution_and_proof_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
msg: ContributionAndProof): Eth2Digest = msg: ContributionAndProof): Eth2Digest =
@ -353,7 +353,7 @@ proc get_contribution_and_proof_signature*(
blsSign(privkey, signing_root.data) blsSign(privkey, signing_root.data)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#aggregation-selection # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection
func is_sync_committee_aggregator*(signature: ValidatorSig): bool = func is_sync_committee_aggregator*(signature: ValidatorSig): bool =
let let
signatureDigest = eth2digest(signature.blob) signatureDigest = eth2digest(signature.blob)
@ -393,7 +393,7 @@ proc verify_builder_signature*(
let signing_root = compute_builder_signing_root(fork, msg) let signing_root = compute_builder_signing_root(fork, msg)
blsVerify(pubkey, signing_root.data, signature) blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#new-process_bls_to_execution_change # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
func compute_bls_to_execution_change_signing_root*( func compute_bls_to_execution_change_signing_root*(
genesisFork: Fork, genesis_validators_root: Eth2Digest, genesisFork: Fork, genesis_validators_root: Eth2Digest,
msg: BLSToExecutionChange): Eth2Digest = msg: BLSToExecutionChange): Eth2Digest =
@ -421,23 +421,3 @@ proc verify_bls_to_execution_change_signature*(
let signing_root = compute_bls_to_execution_change_signing_root( let signing_root = compute_bls_to_execution_change_signing_root(
genesisFork, genesis_validators_root, msg.message) genesisFork, genesis_validators_root, msg.message)
blsVerify(pubkey, signing_root.data, signature) blsVerify(pubkey, signing_root.data, signature)
func compute_consolidation_signing_root(
genesisFork: Fork, genesis_validators_root: Eth2Digest,
msg: Consolidation): Eth2Digest =
# Uses genesis fork version regardless
doAssert genesisFork.current_version == genesisFork.previous_version
let domain = compute_domain(
DOMAIN_CONSOLIDATION, genesisFork.current_version,
genesis_validators_root=genesis_validators_root)
compute_signing_root(msg, domain)
proc verify_consolidation_signature*(
genesisFork: Fork, genesis_validators_root: Eth2Digest,
msg: SignedConsolidation | TrustedSignedConsolidation,
pubkeys: openArray[ValidatorPubKey]): bool =
withTrust(msg.signature):
let signing_root = compute_consolidation_signing_root(
genesisFork, genesis_validators_root, msg.message)
blsFastAggregateVerify(pubkeys, signing_root.data, msg.signature)

View File

@ -83,7 +83,7 @@ func aggregateAttesters(
# Aggregation spec requires non-empty collection # Aggregation spec requires non-empty collection
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04 # - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
# Consensus specs require at least one attesting index in attestation # Consensus specs require at least one attesting index in attestation
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_valid_indexed_attestation # - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
return err("aggregateAttesters: no attesting indices") return err("aggregateAttesters: no attesting indices")
let let
@ -462,10 +462,6 @@ proc collectSignatureSets*(
genesis_fork, genesis_validators_root, bls_change.message, genesis_fork, genesis_validators_root, bls_change.message,
validator_pubkey, sig) validator_pubkey, sig)
block:
# 9. Consolidations
debugComment "check consolidations signatures"
ok() ok()
proc batchVerify*(verifier: var BatchVerifier, sigs: openArray[SignatureSet]): bool = proc batchVerify*(verifier: var BatchVerifier, sigs: openArray[SignatureSet]): bool =

View File

@ -361,12 +361,11 @@ func partialBeaconBlock*(
deposits: seq[Deposit], deposits: seq[Deposit],
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
execution_payload: ForkyExecutionPayloadForSigning, execution_payload: ForkyExecutionPayloadForSigning
consolidations: openArray[SignedConsolidation]
): auto = ): auto =
const consensusFork = typeof(state).kind const consensusFork = typeof(state).kind
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#preparing-for-a-beaconblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#preparing-for-a-beaconblock
var res = consensusFork.BeaconBlock( var res = consensusFork.BeaconBlock(
slot: state.data.slot, slot: state.data.slot,
proposer_index: proposer_index.uint64, proposer_index: proposer_index.uint64,
@ -386,7 +385,7 @@ func partialBeaconBlock*(
when consensusFork >= ConsensusFork.Altair: when consensusFork >= ConsensusFork.Altair:
res.body.sync_aggregate = sync_aggregate res.body.sync_aggregate = sync_aggregate
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/validator.md#block-proposal # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/validator.md#block-proposal
when consensusFork >= ConsensusFork.Bellatrix: when consensusFork >= ConsensusFork.Bellatrix:
res.body.execution_payload = execution_payload.executionPayload res.body.execution_payload = execution_payload.executionPayload
@ -412,8 +411,7 @@ func partialBeaconBlock*(
deposits: seq[Deposit], deposits: seq[Deposit],
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
execution_payload: ForkyExecutionPayloadForSigning, execution_payload: ForkyExecutionPayloadForSigning
consolidations: seq[SignedConsolidation],
): auto = ): auto =
const consensusFork = typeof(state).kind const consensusFork = typeof(state).kind
@ -436,10 +434,7 @@ func partialBeaconBlock*(
sync_aggregate: sync_aggregate, sync_aggregate: sync_aggregate,
execution_payload: execution_payload.executionPayload, execution_payload: execution_payload.executionPayload,
bls_to_execution_changes: validator_changes.bls_to_execution_changes, bls_to_execution_changes: validator_changes.bls_to_execution_changes,
blob_kzg_commitments: execution_payload.blobsBundle.commitments, blob_kzg_commitments: execution_payload.blobsBundle.commitments))
consolidations:
List[SignedConsolidation, Limit MAX_CONSOLIDATIONS].init(
consolidations)))
proc makeBeaconBlockWithRewards*( proc makeBeaconBlockWithRewards*(
cfg: RuntimeConfig, cfg: RuntimeConfig,
@ -453,7 +448,6 @@ proc makeBeaconBlockWithRewards*(
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
executionPayload: ForkyExecutionPayloadForSigning, executionPayload: ForkyExecutionPayloadForSigning,
consolidations: seq[SignedConsolidation],
rollback: RollbackForkedHashedProc, rollback: RollbackForkedHashedProc,
cache: var StateCache, cache: var StateCache,
# TODO: # TODO:
@ -480,7 +474,7 @@ proc makeBeaconBlockWithRewards*(
partialBeaconBlock( partialBeaconBlock(
cfg, state.`kind Data`, proposer_index, randao_reveal, eth1_data, cfg, state.`kind Data`, proposer_index, randao_reveal, eth1_data,
graffiti, attestations, deposits, validator_changes, sync_aggregate, graffiti, attestations, deposits, validator_changes, sync_aggregate,
executionPayload, consolidations)) executionPayload))
let res = process_block( let res = process_block(
cfg, state.`kind Data`.data, blck.`kind Data`.asSigVerified(), cfg, state.`kind Data`.data, blck.`kind Data`.asSigVerified(),
@ -524,7 +518,7 @@ proc makeBeaconBlockWithRewards*(
transactions_root.get transactions_root.get
when executionPayload is electra.ExecutionPayloadForSigning: when executionPayload is electra.ExecutionPayloadForSigning:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/electra/beacon-chain.md#beaconblockbody # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#beaconblockbody
forkyState.data.latest_block_header.body_root = hash_tree_root( forkyState.data.latest_block_header.body_root = hash_tree_root(
[hash_tree_root(randao_reveal), [hash_tree_root(randao_reveal),
hash_tree_root(eth1_data), hash_tree_root(eth1_data),
@ -539,9 +533,7 @@ proc makeBeaconBlockWithRewards*(
hash_tree_root(sync_aggregate), hash_tree_root(sync_aggregate),
execution_payload_root.get, execution_payload_root.get,
hash_tree_root(validator_changes.bls_to_execution_changes), hash_tree_root(validator_changes.bls_to_execution_changes),
hash_tree_root(kzg_commitments.get), hash_tree_root(kzg_commitments.get)
hash_tree_root(List[SignedConsolidation, Limit MAX_CONSOLIDATIONS].init(
consolidations))
]) ])
else: else:
raiseAssert "Attempt to use non-Electra payload with post-Deneb state" raiseAssert "Attempt to use non-Electra payload with post-Deneb state"
@ -584,7 +576,6 @@ proc makeBeaconBlock*(
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
executionPayload: ForkyExecutionPayloadForSigning, executionPayload: ForkyExecutionPayloadForSigning,
consolidations: seq[SignedConsolidation],
rollback: RollbackForkedHashedProc, cache: var StateCache, rollback: RollbackForkedHashedProc, cache: var StateCache,
verificationFlags: UpdateFlags, verificationFlags: UpdateFlags,
transactions_root: Opt[Eth2Digest], transactions_root: Opt[Eth2Digest],
@ -595,7 +586,7 @@ proc makeBeaconBlock*(
? makeBeaconBlockWithRewards( ? makeBeaconBlockWithRewards(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, validator_changes, sync_aggregate, attestations, deposits, validator_changes, sync_aggregate,
executionPayload, consolidations, rollback, cache, verificationFlags, executionPayload, rollback, cache, verificationFlags,
transactions_root, execution_payload_root, kzg_commitments) transactions_root, execution_payload_root, kzg_commitments)
ok(blockAndRewards.blck) ok(blockAndRewards.blck)
@ -608,13 +599,12 @@ proc makeBeaconBlock*(
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
executionPayload: ForkyExecutionPayloadForSigning, executionPayload: ForkyExecutionPayloadForSigning,
consolidations: seq[SignedConsolidation],
rollback: RollbackForkedHashedProc, cache: var StateCache): rollback: RollbackForkedHashedProc, cache: var StateCache):
Result[ForkedBeaconBlock, cstring] = Result[ForkedBeaconBlock, cstring] =
makeBeaconBlock( makeBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, validator_changes, sync_aggregate, attestations, deposits, validator_changes, sync_aggregate,
executionPayload, consolidations, rollback, cache, executionPayload, rollback, cache,
verificationFlags = {}, transactions_root = Opt.none Eth2Digest, verificationFlags = {}, transactions_root = Opt.none Eth2Digest,
execution_payload_root = Opt.none Eth2Digest, execution_payload_root = Opt.none Eth2Digest,
kzg_commitments = Opt.none KzgCommitments) kzg_commitments = Opt.none KzgCommitments)
@ -628,14 +618,13 @@ proc makeBeaconBlock*(
validator_changes: BeaconBlockValidatorChanges, validator_changes: BeaconBlockValidatorChanges,
sync_aggregate: SyncAggregate, sync_aggregate: SyncAggregate,
executionPayload: ForkyExecutionPayloadForSigning, executionPayload: ForkyExecutionPayloadForSigning,
consolidations: seq[SignedConsolidation],
rollback: RollbackForkedHashedProc, rollback: RollbackForkedHashedProc,
cache: var StateCache, verificationFlags: UpdateFlags): cache: var StateCache, verificationFlags: UpdateFlags):
Result[ForkedBeaconBlock, cstring] = Result[ForkedBeaconBlock, cstring] =
makeBeaconBlock( makeBeaconBlock(
cfg, state, proposer_index, randao_reveal, eth1_data, graffiti, cfg, state, proposer_index, randao_reveal, eth1_data, graffiti,
attestations, deposits, validator_changes, sync_aggregate, attestations, deposits, validator_changes, sync_aggregate,
executionPayload, consolidations, rollback, cache, executionPayload, rollback, cache,
verificationFlags = verificationFlags, verificationFlags = verificationFlags,
transactions_root = Opt.none Eth2Digest, transactions_root = Opt.none Eth2Digest,
execution_payload_root = Opt.none Eth2Digest, execution_payload_root = Opt.none Eth2Digest,

View File

@ -10,8 +10,8 @@
# State transition - block processing, as described in # State transition - block processing, as described in
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#block-processing
# #
@ -135,7 +135,7 @@ func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
(validator.activation_epoch <= epoch) and (validator.activation_epoch <= epoch) and
(epoch < validator.withdrawable_epoch) (epoch < validator.withdrawable_epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#proposer-slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#proposer-slashings
proc check_proposer_slashing*( proc check_proposer_slashing*(
state: ForkyBeaconState, proposer_slashing: SomeProposerSlashing, state: ForkyBeaconState, proposer_slashing: SomeProposerSlashing,
flags: UpdateFlags): flags: UpdateFlags):
@ -397,22 +397,22 @@ proc process_deposit*(
apply_deposit(cfg, state, bloom_filter, deposit.data, flags) apply_deposit(cfg, state, bloom_filter, deposit.data, flags)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-process_deposit_receipt # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_deposit_request
func process_deposit_receipt*( func process_deposit_request*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
bloom_filter: var PubkeyBloomFilter, deposit_receipt: DepositReceipt, bloom_filter: var PubkeyBloomFilter, deposit_request: DepositRequest,
flags: UpdateFlags): Result[void, cstring] = flags: UpdateFlags): Result[void, cstring] =
# Set deposit receipt start index # Set deposit request start index
if state.deposit_receipts_start_index == if state.deposit_requests_start_index ==
UNSET_DEPOSIT_RECEIPTS_START_INDEX: UNSET_DEPOSIT_REQUESTS_START_INDEX:
state.deposit_receipts_start_index = deposit_receipt.index state.deposit_requests_start_index = deposit_request.index
apply_deposit( apply_deposit(
cfg, state, bloom_filter, DepositData( cfg, state, bloom_filter, DepositData(
pubkey: deposit_receipt.pubkey, pubkey: deposit_request.pubkey,
withdrawal_credentials: deposit_receipt.withdrawal_credentials, withdrawal_credentials: deposit_request.withdrawal_credentials,
amount: deposit_receipt.amount, amount: deposit_request.amount,
signature: deposit_receipt.signature), flags) signature: deposit_request.signature), flags)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#voluntary-exits # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#modified-process_voluntary_exit # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#modified-process_voluntary_exit
@ -507,13 +507,12 @@ proc process_bls_to_execution_change*(
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/electra/beacon-chain.md#new-process_execution_layer_withdrawal_request # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_withdrawal_request
func process_execution_layer_withdrawal_request*( func process_withdrawal_request*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
execution_layer_withdrawal_request: ExecutionLayerWithdrawalRequest, withdrawal_request: WithdrawalRequest, cache: var StateCache) =
cache: var StateCache) =
let let
amount = execution_layer_withdrawal_request.amount amount = withdrawal_request.amount
is_full_exit_request = amount == static(FULL_EXIT_REQUEST_AMOUNT.Gwei) is_full_exit_request = amount == static(FULL_EXIT_REQUEST_AMOUNT.Gwei)
# If partial withdrawal queue is full, only full exits are processed # If partial withdrawal queue is full, only full exits are processed
@ -522,7 +521,8 @@ func process_execution_layer_withdrawal_request*(
return return
let let
request_pubkey = execution_layer_withdrawal_request.validator_pubkey request_pubkey = withdrawal_request.validator_pubkey
# Verify pubkey exists
index = findValidatorIndex(state, request_pubkey).valueOr: index = findValidatorIndex(state, request_pubkey).valueOr:
return return
validator = state.validators.item(index) validator = state.validators.item(index)
@ -532,7 +532,7 @@ func process_execution_layer_withdrawal_request*(
has_correct_credential = has_execution_withdrawal_credential(validator) has_correct_credential = has_execution_withdrawal_credential(validator)
is_correct_source_address = is_correct_source_address =
validator.withdrawal_credentials.data.toOpenArray(12, 31) == validator.withdrawal_credentials.data.toOpenArray(12, 31) ==
execution_layer_withdrawal_request.source_address.data withdrawal_request.source_address.data
if not (has_correct_credential and is_correct_source_address): if not (has_correct_credential and is_correct_source_address):
return return
@ -588,67 +588,66 @@ func process_execution_layer_withdrawal_request*(
withdrawable_epoch: withdrawable_epoch, withdrawable_epoch: withdrawable_epoch,
)) ))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#consolidations # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_consolidation_request
proc process_consolidation*( proc process_consolidation_request*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
signed_consolidation: SignedConsolidation | TrustedSignedConsolidation, consolidation_request: ConsolidationRequest,
cache: var StateCache): Result[void, cstring] = cache: var StateCache) =
# If the pending consolidations queue is full, no consolidations are allowed # If the pending consolidations queue is full, consolidation requests are
# in the block # ignored
if not(lenu64(state.pending_consolidations) < PENDING_CONSOLIDATIONS_LIMIT): if not(lenu64(state.pending_consolidations) < PENDING_CONSOLIDATIONS_LIMIT):
return err("Consolidation: too many pending consolidations already") return
# If there is too little available consolidation churn limit, no # If there is too little available consolidation churn limit, consolidation
# consolidations are allowed in the block # requests are ignored
if not (get_consolidation_churn_limit(cfg, state, cache) > if not (get_consolidation_churn_limit(cfg, state, cache) >
static(MIN_ACTIVATION_BALANCE.Gwei)): static(MIN_ACTIVATION_BALANCE.Gwei)):
return err("Consolidation: insufficient available consolidation churn limit") return
let consolidation = signed_consolidation.message
# Verify that source != target, so a consolidation cannot be used as an exit.
if not(consolidation.source_index != consolidation.target_index):
return err("Consolidation: a consolidation cannot be used as an exit")
let let
source_validator = addr state.validators.mitem(consolidation.source_index) # Verify pubkeys exists
target_validator = state.validators.item(consolidation.target_index) source_index =
findValidatorIndex(state, consolidation_request.source_pubkey).valueOr:
return
target_index =
findValidatorIndex(state, consolidation_request.target_pubkey).valueOr:
return
# Verify that source != target, so a consolidation cannot be used as an exit.
if source_index == target_index:
return
let
source_validator = addr state.validators.mitem(source_index)
target_validator = state.validators.item(target_index)
# Verify source withdrawal credentials
let
has_correct_credential =
has_execution_withdrawal_credential(source_validator[])
is_correct_source_address =
source_validator.withdrawal_credentials.data.toOpenArray(12, 31) ==
consolidation_request.source_address.data
if not (has_correct_credential and is_correct_source_address):
return
# Verify that target has execution withdrawal credentials
if not has_execution_withdrawal_credential(target_validator):
return
# Verify the source and the target are active # Verify the source and the target are active
let current_epoch = get_current_epoch(state) let current_epoch = get_current_epoch(state)
if not is_active_validator(source_validator[], current_epoch): if not is_active_validator(source_validator[], current_epoch):
return err("Consolidation: source validator not active") return
if not is_active_validator(target_validator, current_epoch): if not is_active_validator(target_validator, current_epoch):
return err("Consolidation: target validator not active") return
# Verify exits for source and target have not been initiated # Verify exits for source and target have not been initiated
if not (source_validator[].exit_epoch == FAR_FUTURE_EPOCH): if source_validator[].exit_epoch != FAR_FUTURE_EPOCH:
return err("Consolidation: exit for source validator already initiated") return
if not (target_validator.exit_epoch == FAR_FUTURE_EPOCH): if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
return err("Consolidation: exit for target validator already initiated") return
# Consolidations must specify an epoch when they become valid; they are not
# valid before then
if not (current_epoch >= consolidation.epoch):
return err("Consolidation: consolidation not valid before specified epoch")
# Verify the source and the target have Execution layer withdrawal credentials
if not has_execution_withdrawal_credential(source_validator[]):
return err("Consolidation: source doesn't have execution layer withdrawal credentials")
if not has_execution_withdrawal_credential(target_validator):
return err("Consolidation: target doesn't have execution layer withdrawal credentials")
# Verify the same withdrawal address
if not (source_validator[].withdrawal_credentials.data.toOpenArray(12, 31) ==
target_validator.withdrawal_credentials.data.toOpenArray(12, 31)):
return err("Consolidation: source and target don't have same withdrawal address")
# Verify consolidation is signed by the source and the target
if not verify_consolidation_signature(
cfg.genesisFork, state.genesis_validators_root, signed_consolidation,
[source_validator[].pubkey, target_validator.pubkey]):
return err("Consolidation: invalid signature")
# Initiate source validator exit and append pending consolidation # Initiate source validator exit and append pending consolidation
source_validator[].exit_epoch = compute_consolidation_epoch_and_update_churn( source_validator[].exit_epoch = compute_consolidation_epoch_and_update_churn(
@ -657,10 +656,7 @@ proc process_consolidation*(
source_validator[].exit_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY source_validator[].exit_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY
debugComment "check HashList add return value" debugComment "check HashList add return value"
discard state.pending_consolidations.add(PendingConsolidation( discard state.pending_consolidations.add(PendingConsolidation(
source_index: consolidation.source_index, source_index: source_index.uint64, target_index: target_index.uint64))
target_index: consolidation.target_index))
ok()
type type
# https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.5.0#/Rewards/getBlockRewards # https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.5.0#/Rewards/getBlockRewards
@ -672,7 +668,7 @@ type
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#operations # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#operations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#modified-process_operations # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#modified-process_operations
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#modified-process_operations # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#operations
proc process_operations( proc process_operations(
cfg: RuntimeConfig, state: var ForkyBeaconState, cfg: RuntimeConfig, state: var ForkyBeaconState,
body: SomeForkyBeaconBlockBody, base_reward_per_increment: Gwei, body: SomeForkyBeaconBlockBody, base_reward_per_increment: Gwei,
@ -683,7 +679,7 @@ proc process_operations(
# Disable former deposit mechanism once all prior deposits are processed # Disable former deposit mechanism once all prior deposits are processed
let let
eth1_deposit_index_limit = eth1_deposit_index_limit =
min(state.eth1_data.deposit_count, state.deposit_receipts_start_index) min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
req_deposits = req_deposits =
if state.eth1_deposit_index < eth1_deposit_index_limit: if state.eth1_deposit_index < eth1_deposit_index_limit:
min( min(
@ -733,17 +729,17 @@ proc process_operations(
for op in body.bls_to_execution_changes: for op in body.bls_to_execution_changes:
? process_bls_to_execution_change(cfg, state, op) ? process_bls_to_execution_change(cfg, state, op)
# [New in Electra:EIP7002:EIP7251]
when typeof(body).kind >= ConsensusFork.Electra: when typeof(body).kind >= ConsensusFork.Electra:
for op in body.execution_payload.withdrawal_requests: for op in body.execution_payload.deposit_requests:
process_execution_layer_withdrawal_request( debugComment "combine with previous Bloom filter construction"
cfg, state, op, cache)
for op in body.execution_payload.deposit_receipts:
debugComment "combine with previous bloom filter construction"
let bloom_filter = constructBloomFilter(state.validators.asSeq) let bloom_filter = constructBloomFilter(state.validators.asSeq)
? process_deposit_receipt(cfg, state, bloom_filter[], op, {}) ? process_deposit_request(cfg, state, bloom_filter[], op, {})
for op in body.consolidations: for op in body.execution_payload.withdrawal_requests:
? process_consolidation(cfg, state, op, cache) # [New in Electra:EIP7002:7251]
process_withdrawal_request(cfg, state, op, cache)
for op in body.execution_payload.consolidation_requests:
# [New in Electra:EIP7251]
process_consolidation_request(cfg, state, op, cache)
ok(operations_rewards) ok(operations_rewards)
@ -971,7 +967,7 @@ type SomeElectraBeaconBlockBody =
electra.BeaconBlockBody | electra.SigVerifiedBeaconBlockBody | electra.BeaconBlockBody | electra.SigVerifiedBeaconBlockBody |
electra.TrustedBeaconBlockBody electra.TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#modified-process_execution_payload # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-process_execution_payload
proc process_execution_payload*( proc process_execution_payload*(
state: var electra.BeaconState, body: SomeElectraBeaconBlockBody, state: var electra.BeaconState, body: SomeElectraBeaconBlockBody,
notify_new_payload: electra.ExecutePayload): Result[void, cstring] = notify_new_payload: electra.ExecutePayload): Result[void, cstring] =
@ -1018,20 +1014,33 @@ proc process_execution_payload*(
withdrawals_root: hash_tree_root(payload.withdrawals), withdrawals_root: hash_tree_root(payload.withdrawals),
blob_gas_used: payload.blob_gas_used, blob_gas_used: payload.blob_gas_used,
excess_blob_gas: payload.excess_blob_gas, excess_blob_gas: payload.excess_blob_gas,
deposit_receipts_root: deposit_requests_root:
hash_tree_root(payload.deposit_receipts), # [New in Electra:EIP6110] hash_tree_root(payload.deposit_requests), # [New in Electra:EIP6110]
withdrawal_requests_root: withdrawal_requests_root:
hash_tree_root(payload.withdrawal_requests)) # [New in Electra:EIP7002:EIP7251] hash_tree_root(payload.withdrawal_requests), # [New in Electra:EIP7002:EIP7251]
consolidation_requests_root:
hash_tree_root(payload.consolidation_requests)) # [New in Electra:EIP7251]
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/beacon-chain.md#new-process_withdrawals # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#new-process_withdrawals
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-process_withdrawals
func process_withdrawals*( func process_withdrawals*(
state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState), state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState),
payload: capella.ExecutionPayload | deneb.ExecutionPayload | payload: capella.ExecutionPayload | deneb.ExecutionPayload |
electra.ExecutionPayload): electra.ExecutionPayload):
Result[void, cstring] = Result[void, cstring] =
let expected_withdrawals = get_expected_withdrawals(state) when typeof(state).kind >= ConsensusFork.Electra:
let (expected_withdrawals, partial_withdrawals_count) =
get_expected_withdrawals_with_partial_count(state)
# Update pending partial withdrawals [New in Electra:EIP7251]
# Moved slightly earlier to be in same when block
state.pending_partial_withdrawals =
HashList[PendingPartialWithdrawal, Limit PENDING_PARTIAL_WITHDRAWALS_LIMIT].init(
state.pending_partial_withdrawals.asSeq[partial_withdrawals_count .. ^1])
else:
let expected_withdrawals = get_expected_withdrawals(state)
if not (len(payload.withdrawals) == len(expected_withdrawals)): if not (len(payload.withdrawals) == len(expected_withdrawals)):
return err("process_withdrawals: different numbers of payload and expected withdrawals") return err("process_withdrawals: different numbers of payload and expected withdrawals")
@ -1076,7 +1085,7 @@ func kzg_commitment_to_versioned_hash*(
var res: VersionedHash var res: VersionedHash
res[0] = VERSIONED_HASH_VERSION_KZG res[0] = VERSIONED_HASH_VERSION_KZG
res[1 .. 31] = eth2digest(kzg_commitment).data.toOpenArray(1, 31) res[1 .. 31] = eth2digest(kzg_commitment.bytes).data.toOpenArray(1, 31)
res res
proc validate_blobs*( proc validate_blobs*(
@ -1137,7 +1146,7 @@ proc process_block*(
ok(operations_rewards) ok(operations_rewards)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095 # TODO workaround for https://github.com/nim-lang/Nim/issues/18095
type SomeBellatrixBlock = type SomeBellatrixBlock =
bellatrix.BeaconBlock | bellatrix.SigVerifiedBeaconBlock | bellatrix.TrustedBeaconBlock bellatrix.BeaconBlock | bellatrix.SigVerifiedBeaconBlock | bellatrix.TrustedBeaconBlock

View File

@ -10,7 +10,7 @@
# State transition - epoch processing, as described in # State transition - epoch processing, as described in
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#epoch-processing
# #
# The entry point is `process_epoch`, which is at the bottom of this file. # The entry point is `process_epoch`, which is at the bottom of this file.
@ -535,7 +535,7 @@ func get_attestation_component_delta(
else: else:
RewardDelta(penalties: base_reward) RewardDelta(penalties: base_reward)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#components-of-attestation-deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#components-of-attestation-deltas
func get_source_delta*( func get_source_delta*(
validator: RewardStatus, validator: RewardStatus,
base_reward: Gwei, base_reward: Gwei,
@ -694,26 +694,24 @@ func get_unslashed_participating_increment*(
flag_index: TimelyFlag): uint64 = flag_index: TimelyFlag): uint64 =
info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT.Gwei info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#get_flag_index_deltas
func get_active_increments*( func get_active_increments*(
info: altair.EpochInfo | bellatrix.BeaconState): uint64 = info: altair.EpochInfo | bellatrix.BeaconState): uint64 =
info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT.Gwei info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#modified-get_inactivity_penalty_deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-get_inactivity_penalty_deltas
# Combines get_flag_index_deltas() and get_inactivity_penalty_deltas() # Combines get_flag_index_deltas() and get_inactivity_penalty_deltas()
template get_flag_and_inactivity_delta( template get_flag_and_inactivity_delta(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState | electra.BeaconState, deneb.BeaconState | electra.BeaconState,
base_reward_per_increment: Gwei, finality_delay: uint64, base_reward_per_increment: Gwei, finality_delay: uint64,
previous_epoch: Epoch, previous_epoch: Epoch, active_increments: uint64,
active_increments: uint64,
penalty_denominator: uint64, penalty_denominator: uint64,
epoch_participation: ptr EpochParticipationFlags, epoch_participation: ptr EpochParticipationFlags,
participating_increments: array[3, uint64], participating_increments: array[3, uint64], info: var altair.EpochInfo,
info: var altair.EpochInfo, vidx: ValidatorIndex, inactivity_score: uint64
vidx: ValidatorIndex
): (ValidatorIndex, Gwei, Gwei, Gwei, Gwei, Gwei, Gwei) = ): (ValidatorIndex, Gwei, Gwei, Gwei, Gwei, Gwei, Gwei) =
let let
base_reward = get_base_reward_increment(state, vidx, base_reward_per_increment) base_reward = get_base_reward_increment(state, vidx, base_reward_per_increment)
@ -751,7 +749,7 @@ template get_flag_and_inactivity_delta(
0.Gwei 0.Gwei
else: else:
let penalty_numerator = let penalty_numerator =
state.validators[vidx].effective_balance * state.inactivity_scores[vidx] state.validators[vidx].effective_balance * inactivity_score
penalty_numerator div penalty_denominator penalty_numerator div penalty_denominator
(vidx, reward(TIMELY_SOURCE_FLAG_INDEX), (vidx, reward(TIMELY_SOURCE_FLAG_INDEX),
@ -804,7 +802,46 @@ iterator get_flag_and_inactivity_deltas*(
yield get_flag_and_inactivity_delta( yield get_flag_and_inactivity_delta(
state, base_reward_per_increment, finality_delay, previous_epoch, state, base_reward_per_increment, finality_delay, previous_epoch,
active_increments, penalty_denominator, epoch_participation, active_increments, penalty_denominator, epoch_participation,
participating_increments, info, vidx) participating_increments, info, vidx, state.inactivity_scores[vidx])
func get_flag_and_inactivity_delta_for_validator(
cfg: RuntimeConfig,
state: deneb.BeaconState | electra.BeaconState,
base_reward_per_increment: Gwei, info: var altair.EpochInfo,
finality_delay: uint64, vidx: ValidatorIndex, inactivity_score: Gwei):
Opt[(ValidatorIndex, Gwei, Gwei, Gwei, Gwei, Gwei, Gwei)] =
## Return the deltas for a given ``flag_index`` by scanning through the
## participation flags.
const INACTIVITY_PENALTY_QUOTIENT =
when state is altair.BeaconState:
INACTIVITY_PENALTY_QUOTIENT_ALTAIR
else:
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX
static: doAssert ord(high(TimelyFlag)) == 2
let
previous_epoch = get_previous_epoch(state)
active_increments = get_active_increments(info)
penalty_denominator =
cfg.INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT
epoch_participation =
if previous_epoch == get_current_epoch(state):
unsafeAddr state.current_epoch_participation
else:
unsafeAddr state.previous_epoch_participation
participating_increments = [
get_unslashed_participating_increment(info, TIMELY_SOURCE_FLAG_INDEX),
get_unslashed_participating_increment(info, TIMELY_TARGET_FLAG_INDEX),
get_unslashed_participating_increment(info, TIMELY_HEAD_FLAG_INDEX)]
if not is_eligible_validator(info.validators[vidx]):
return Opt.none((ValidatorIndex, Gwei, Gwei, Gwei, Gwei, Gwei, Gwei))
Opt.some get_flag_and_inactivity_delta(
state, base_reward_per_increment, finality_delay, previous_epoch,
active_increments, penalty_denominator, epoch_participation,
participating_increments, info, vidx, inactivity_score.uint64)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1 # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1
func process_rewards_and_penalties*( func process_rewards_and_penalties*(
@ -895,7 +932,8 @@ func process_registry_updates*(
var maybe_exit_queue_info: Opt[ExitQueueInfo] var maybe_exit_queue_info: Opt[ExitQueueInfo]
for vidx in state.validators.vindices: for vidx in state.validators.vindices:
if is_eligible_for_activation_queue(state.validators.item(vidx)): if is_eligible_for_activation_queue(
typeof(state).kind, state.validators.item(vidx)):
state.validators.mitem(vidx).activation_eligibility_epoch = state.validators.mitem(vidx).activation_eligibility_epoch =
get_current_epoch(state) + 1 get_current_epoch(state) + 1
@ -940,7 +978,7 @@ func process_registry_updates*(
# Process activation eligibility and ejections # Process activation eligibility and ejections
for index in 0 ..< state.validators.len: for index in 0 ..< state.validators.len:
let validator = state.validators.item(index) let validator = state.validators.item(index)
if is_eligible_for_activation_queue(validator): if is_eligible_for_activation_queue(typeof(state).kind, validator):
# Usually not too many at once, so do this individually # Usually not too many at once, so do this individually
state.validators.mitem(index).activation_eligibility_epoch = state.validators.mitem(index).activation_eligibility_epoch =
get_current_epoch(state) + 1 get_current_epoch(state) + 1
@ -961,7 +999,7 @@ func process_registry_updates*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
func get_adjusted_total_slashing_balance*( func get_adjusted_total_slashing_balance*(
state: ForkyBeaconState, total_balance: Gwei): Gwei = state: ForkyBeaconState, total_balance: Gwei): Gwei =
const multiplier = const multiplier =
@ -980,14 +1018,14 @@ func get_adjusted_total_slashing_balance*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool = func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool =
validator.slashed and validator.slashed and
epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
func get_slashing_penalty*(validator: Validator, func get_slashing_penalty*(validator: Validator,
adjusted_total_slashing_balance, adjusted_total_slashing_balance,
total_balance: Gwei): Gwei = total_balance: Gwei): Gwei =
@ -999,7 +1037,23 @@ func get_slashing_penalty*(validator: Validator,
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
func get_slashing(
state: ForkyBeaconState, total_balance: Gwei, vidx: ValidatorIndex): Gwei =
# For efficiency reasons, it doesn't make sense to have process_slashings use
# this per-validator index version, but keep them parallel otherwise.
let
epoch = get_current_epoch(state)
adjusted_total_slashing_balance = get_adjusted_total_slashing_balance(
state, total_balance)
let validator = unsafeAddr state.validators.item(vidx)
if slashing_penalty_applies(validator[], epoch):
get_slashing_penalty(
validator[], adjusted_total_slashing_balance, total_balance)
else:
0.Gwei
func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) = func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) =
let let
epoch = get_current_epoch(state) epoch = get_current_epoch(state)
@ -1113,7 +1167,7 @@ func process_historical_roots_update*(state: var ForkyBeaconState) =
if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0: if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0:
# Equivalent to hash_tree_root(foo: HistoricalBatch), but without using # Equivalent to hash_tree_root(foo: HistoricalBatch), but without using
# significant additional stack or heap. # significant additional stack or heap.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#historicalbatch # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#historicalbatch
# In response to https://github.com/status-im/nimbus-eth2/issues/921 # In response to https://github.com/status-im/nimbus-eth2/issues/921
if not state.historical_roots.add state.compute_historical_root(): if not state.historical_roots.add state.compute_historical_root():
raiseAssert "no more room for historical roots, so long and thanks for the fish!" raiseAssert "no more room for historical roots, so long and thanks for the fish!"
@ -1164,7 +1218,7 @@ template compute_inactivity_update(
# TODO activeness already checked; remove redundant checks between # TODO activeness already checked; remove redundant checks between
# is_active_validator and is_unslashed_participating_index # is_active_validator and is_unslashed_participating_index
if is_unslashed_participating_index( if is_unslashed_participating_index(
state, TIMELY_TARGET_FLAG_INDEX, previous_epoch, index.ValidatorIndex): state, TIMELY_TARGET_FLAG_INDEX, previous_epoch, index):
inactivity_score -= min(1'u64, inactivity_score) inactivity_score -= min(1'u64, inactivity_score)
else: else:
inactivity_score += cfg.INACTIVITY_SCORE_BIAS inactivity_score += cfg.INACTIVITY_SCORE_BIAS
@ -1195,6 +1249,7 @@ func process_inactivity_updates*(
let let
pre_inactivity_score = state.inactivity_scores.asSeq()[index] pre_inactivity_score = state.inactivity_scores.asSeq()[index]
index = index.ValidatorIndex # intentional shadowing
inactivity_score = inactivity_score =
compute_inactivity_update(cfg, state, info, pre_inactivity_score) compute_inactivity_update(cfg, state, info, pre_inactivity_score)
@ -1202,7 +1257,7 @@ func process_inactivity_updates*(
if pre_inactivity_score != inactivity_score: if pre_inactivity_score != inactivity_score:
state.inactivity_scores[index] = inactivity_score state.inactivity_scores[index] = inactivity_score
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#historical-summaries-updates # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#historical-summaries-updates
func process_historical_summaries_update*( func process_historical_summaries_update*(
state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState)): state: var (capella.BeaconState | deneb.BeaconState | electra.BeaconState)):
Result[void, cstring] = Result[void, cstring] =
@ -1218,25 +1273,45 @@ func process_historical_summaries_update*(
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-process_pending_balance_deposits # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_pending_balance_deposits
func process_pending_balance_deposits*( func process_pending_balance_deposits*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
cache: var StateCache): Result[void, cstring] = cache: var StateCache): Result[void, cstring] =
let let available_for_processing = state.deposit_balance_to_consume +
available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(cfg, state, cache)
get_activation_exit_churn_limit(cfg, state, cache)
var var
processed_amount = 0.Gwei processed_amount = 0.Gwei
next_deposit_index = 0.Gwei next_deposit_index = 0
deposits_to_postpone: seq[PendingBalanceDeposit]
for deposit in state.pending_balance_deposits: for deposit in state.pending_balance_deposits:
if processed_amount + deposit.amount > available_for_processing: let validator = state.validators.item(deposit.index)
break
let deposit_validator_index = ValidatorIndex.init(deposit.index).valueOr: let deposit_validator_index = ValidatorIndex.init(deposit.index).valueOr:
# TODO this function in spec doesn't really have error returns as such
return err("process_pending_balance_deposits: deposit index out of range") return err("process_pending_balance_deposits: deposit index out of range")
increase_balance(state, deposit_validator_index, deposit.amount)
processed_amount += deposit.amount # Validator is exiting, postpone the deposit until after withdrawable epoch
inc next_deposit_index if validator.exit_epoch < FAR_FUTURE_EPOCH:
if get_current_epoch(state) <= validator.withdrawable_epoch:
deposits_to_postpone.add(deposit)
# Deposited balance will never become active. Increase balance but do not
# consume churn
else:
increase_balance(state, deposit_validator_index, deposit.amount)
# Validator is not exiting, attempt to process deposit
else:
# Deposit does not fit in the churn, no more deposit processing in this
# epoch.
if processed_amount + deposit.amount > available_for_processing:
break
# Deposit fits in the churn, process it. Increase balance and consume churn.
else:
increase_balance(state, deposit_validator_index, deposit.amount)
processed_amount += deposit.amount
# Regardless of how the deposit was handled, we move on in the queue.
next_deposit_index += 1
state.pending_balance_deposits = state.pending_balance_deposits =
HashList[PendingBalanceDeposit, Limit PENDING_BALANCE_DEPOSITS_LIMIT].init( HashList[PendingBalanceDeposit, Limit PENDING_BALANCE_DEPOSITS_LIMIT].init(
@ -1248,6 +1323,10 @@ func process_pending_balance_deposits*(
state.deposit_balance_to_consume = state.deposit_balance_to_consume =
available_for_processing - processed_amount available_for_processing - processed_amount
debugComment "yet another in-theory-might-overflow-maybe things, look at these more carefully"
if len(deposits_to_postpone) > 0:
discard state.pending_balance_deposits.add deposits_to_postpone
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-process_pending_consolidations # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#new-process_pending_consolidations
@ -1345,7 +1424,7 @@ func init*(
deneb.BeaconState | electra.BeaconState): T = deneb.BeaconState | electra.BeaconState): T =
init(result, state) init(result, state)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#epoch-processing
proc process_epoch*( proc process_epoch*(
cfg: RuntimeConfig, cfg: RuntimeConfig,
state: var (altair.BeaconState | bellatrix.BeaconState), state: var (altair.BeaconState | bellatrix.BeaconState),
@ -1483,3 +1562,108 @@ proc process_epoch*(
process_sync_committee_updates(state) process_sync_committee_updates(state)
ok() ok()
proc get_validator_balance_after_epoch*(
cfg: RuntimeConfig,
state: deneb.BeaconState | electra.BeaconState,
flags: UpdateFlags, cache: var StateCache, info: var altair.EpochInfo,
index: ValidatorIndex): Gwei =
# Run a subset of process_epoch() which affects an individual validator,
# without modifying state itself
info.init(state) # TODO avoid quadratic aspects here
# Can't use process_justification_and_finalization(), but use its helper
# function. Used to calculate inactivity_score.
let jf_info =
# process_justification_and_finalization() skips first two epochs
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
JustificationAndFinalizationInfo(
previous_justified_checkpoint: state.previous_justified_checkpoint,
current_justified_checkpoint: state.current_justified_checkpoint,
finalized_checkpoint: state.finalized_checkpoint,
justification_bits: state.justification_bits)
else:
weigh_justification_and_finalization(
state, info.balances.current_epoch,
info.balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX],
info.balances.current_epoch_TIMELY_TARGET, flags)
# Used as part of process_rewards_and_penalties
let inactivity_score =
# process_inactivity_updates skips GENESIS_EPOCH and ineligible validators
if get_current_epoch(state) == GENESIS_EPOCH or
not is_eligible_validator(info.validators[index]):
0.Gwei
else:
let
finality_delay =
get_previous_epoch(state) - jf_info.finalized_checkpoint.epoch
not_in_inactivity_leak = not is_in_inactivity_leak(finality_delay)
pre_inactivity_score = state.inactivity_scores.asSeq()[index]
# This is a template which uses not_in_inactivity_leak and index
compute_inactivity_update(cfg, state, info, pre_inactivity_score).Gwei
# process_rewards_and_penalties for a single validator
let reward_and_penalties_balance = block:
# process_rewards_and_penalties doesn't run at GENESIS_EPOCH
if get_current_epoch(state) == GENESIS_EPOCH:
state.balances.item(index)
else:
let
total_active_balance = info.balances.current_epoch
base_reward_per_increment = get_base_reward_per_increment(
total_active_balance)
finality_delay = get_finality_delay(state)
var balance = state.balances.item(index)
let maybeDelta = get_flag_and_inactivity_delta_for_validator(
cfg, state, base_reward_per_increment, info, finality_delay, index,
inactivity_score)
if maybeDelta.isOk:
# Can't use isErrOr in generics
let (validator_index, reward0, reward1, reward2, penalty0, penalty1, penalty2) =
maybeDelta.get
info.validators[validator_index].delta.rewards += reward0 + reward1 + reward2
info.validators[validator_index].delta.penalties += penalty0 + penalty1 + penalty2
increase_balance(balance, info.validators[index].delta.rewards)
decrease_balance(balance, info.validators[index].delta.penalties)
balance
# The two directly balance-changing operations, from Altair through Deneb,
# are these. The rest is necessary to look past a single epoch transition,
# but that's not the use case here.
var post_epoch_balance = reward_and_penalties_balance
decrease_balance(
post_epoch_balance,
get_slashing(state, info.balances.current_epoch, index))
# Electra adds process_pending_balance_deposit to the list of potential
# balance-changing epoch operations. This should probably be cached, so
# the 16+ invocations of this function each time, e.g., withdrawals are
# calculated don't repeat it, if it's empirically too expensive. Limits
# exist on how large this structure can get though.
when type(state).kind >= ConsensusFork.Electra:
let available_for_processing = state.deposit_balance_to_consume +
get_activation_exit_churn_limit(cfg, state, cache)
var processed_amount = 0.Gwei
for deposit in state.pending_balance_deposits:
let
validator = state.validators.item(deposit.index)
deposit_validator_index = ValidatorIndex.init(deposit.index).valueOr:
break
# Validator is exiting, postpone the deposit until after withdrawable epoch
if validator.exit_epoch < FAR_FUTURE_EPOCH:
if not(get_current_epoch(state) <= validator.withdrawable_epoch) and
deposit_validator_index == index:
increase_balance(post_epoch_balance, deposit.amount)
# Validator is not exiting, attempt to process deposit
else:
if not(processed_amount + deposit.amount > available_for_processing):
if deposit_validator_index == index:
increase_balance(post_epoch_balance, deposit.amount)
processed_amount += deposit.amount
post_epoch_balance

View File

@ -158,7 +158,7 @@ func get_shuffled_active_validator_indices*(
withState(state): withState(state):
cache.get_shuffled_active_validator_indices(forkyState.data, epoch) cache.get_shuffled_active_validator_indices(forkyState.data, epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#get_active_validator_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_active_validator_indices
func count_active_validators*(state: ForkyBeaconState, func count_active_validators*(state: ForkyBeaconState,
epoch: Epoch, epoch: Epoch,
cache: var StateCache): uint64 = cache: var StateCache): uint64 =
@ -349,6 +349,7 @@ func compute_inverted_shuffled_index*(
countdown(SHUFFLE_ROUND_COUNT.uint8 - 1, 0'u8, 1) countdown(SHUFFLE_ROUND_COUNT.uint8 - 1, 0'u8, 1)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#compute_proposer_index # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#compute_proposer_index
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-compute_proposer_index
template compute_proposer_index(state: ForkyBeaconState, template compute_proposer_index(state: ForkyBeaconState,
indices: openArray[ValidatorIndex], seed: Eth2Digest, indices: openArray[ValidatorIndex], seed: Eth2Digest,
unshuffleTransform: untyped): Opt[ValidatorIndex] = unshuffleTransform: untyped): Opt[ValidatorIndex] =
@ -373,8 +374,13 @@ template compute_proposer_index(state: ForkyBeaconState,
candidate_index = indices[unshuffleTransform] candidate_index = indices[unshuffleTransform]
random_byte = (eth2digest(buffer).data)[i mod 32] random_byte = (eth2digest(buffer).data)[i mod 32]
effective_balance = state.validators[candidate_index].effective_balance effective_balance = state.validators[candidate_index].effective_balance
const max_effective_balance =
when typeof(state).kind >= ConsensusFork.Electra:
MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei # [Modified in Electra:EIP7251]
else:
MAX_EFFECTIVE_BALANCE.Gwei
if effective_balance * MAX_RANDOM_BYTE >= if effective_balance * MAX_RANDOM_BYTE >=
MAX_EFFECTIVE_BALANCE.Gwei * random_byte: max_effective_balance * random_byte:
res = Opt.some(candidate_index) res = Opt.some(candidate_index)
break break
i += 1 i += 1
@ -388,7 +394,7 @@ func compute_proposer_index(state: ForkyBeaconState,
## Return from ``indices`` a random index sampled by effective balance. ## Return from ``indices`` a random index sampled by effective balance.
compute_proposer_index(state, indices, seed, shuffled_index) compute_proposer_index(state, indices, seed, shuffled_index)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#get_beacon_proposer_index # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*( func get_beacon_proposer_index*(
state: ForkyBeaconState, cache: var StateCache, slot: Slot): state: ForkyBeaconState, cache: var StateCache, slot: Slot):
Opt[ValidatorIndex] = Opt[ValidatorIndex] =

View File

@ -10,10 +10,10 @@
import import
./datatypes/base, ./beaconstate, ./forks, ./helpers ./datatypes/base, ./beaconstate, ./forks, ./helpers
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#configuration
const SAFETY_DECAY* = 10'u64 const SAFETY_DECAY* = 10'u64
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period
func compute_weak_subjectivity_period( func compute_weak_subjectivity_period(
cfg: RuntimeConfig, state: ForkyBeaconState): uint64 = cfg: RuntimeConfig, state: ForkyBeaconState): uint64 =
## Returns the weak subjectivity period for the current ``state``. ## Returns the weak subjectivity period for the current ``state``.
@ -49,7 +49,7 @@ func compute_weak_subjectivity_period(
ws_period ws_period
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period
func is_within_weak_subjectivity_period*(cfg: RuntimeConfig, current_slot: Slot, func is_within_weak_subjectivity_period*(cfg: RuntimeConfig, current_slot: Slot,
ws_state: ForkedHashedBeaconState, ws_state: ForkedHashedBeaconState,
ws_checkpoint: Checkpoint): bool = ws_checkpoint: Checkpoint): bool =

View File

@ -328,7 +328,7 @@ template query[E](
): Future[bool].Raising([CancelledError]) = ): Future[bool].Raising([CancelledError]) =
self.query(e, Nothing()) self.query(e, Nothing())
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md#light-client-sync-process # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md#light-client-sync-process
proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} = proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} =
var nextSyncTaskTime = self.getBeaconTime() var nextSyncTaskTime = self.getBeaconTime()
while true: while true:

View File

@ -90,7 +90,7 @@ p2pProtocol LightClientSync(version = 1,
debug "LC bootstrap request done", peer, blockRoot debug "LC bootstrap request done", peer, blockRoot
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
proc lightClientUpdatesByRange( proc lightClientUpdatesByRange(
peer: Peer, peer: Peer,
startPeriod: SyncCommitteePeriod, startPeriod: SyncCommitteePeriod,
@ -134,7 +134,7 @@ p2pProtocol LightClientSync(version = 1,
debug "LC updates by range request done", peer, startPeriod, count, found debug "LC updates by range request done", peer, startPeriod, count, found
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
proc lightClientFinalityUpdate( proc lightClientFinalityUpdate(
peer: Peer, peer: Peer,
response: SingleChunkResponse[ForkedLightClientFinalityUpdate]) response: SingleChunkResponse[ForkedLightClientFinalityUpdate])

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
import std/[strutils, sequtils, algorithm] import std/[strutils, sequtils, algorithm]
import stew/base10, chronos, chronicles import stew/base10, chronos, chronicles, results
import import
../spec/datatypes/[phase0, altair], ../spec/datatypes/[phase0, altair],
../spec/eth2_apis/rest_types, ../spec/eth2_apis/rest_types,
@ -34,13 +34,20 @@ const
StatusExpirationTime* = chronos.minutes(2) StatusExpirationTime* = chronos.minutes(2)
## Time time it takes for the peer's status information to expire. ## Time time it takes for the peer's status information to expire.
WeakSubjectivityLogMessage* =
"Database state missing or too old, cannot sync - resync the client " &
"using a trusted node or allow lenient long-range syncing with the " &
"`--long-range-sync=lenient` option. See " &
"https://nimbus.guide/faq.html#what-is-long-range-sync " &
"for more information"
type type
SyncWorkerStatus* {.pure.} = enum SyncWorkerStatus* {.pure.} = enum
Sleeping, WaitingPeer, UpdatingStatus, Requesting, Downloading, Queueing, Sleeping, WaitingPeer, UpdatingStatus, Requesting, Downloading, Queueing,
Processing Processing
SyncManagerFlag* {.pure.} = enum SyncManagerFlag* {.pure.} = enum
NoMonitor NoMonitor, NoGenesisSync
SyncWorker*[A, B] = object SyncWorker*[A, B] = object
future: Future[void].Raising([CancelledError]) future: Future[void].Raising([CancelledError])
@ -52,6 +59,7 @@ type
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: uint64 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: uint64
responseTimeout: chronos.Duration responseTimeout: chronos.Duration
maxHeadAge: uint64 maxHeadAge: uint64
isWithinWeakSubjectivityPeriod: GetBoolCallback
getLocalHeadSlot: GetSlotCallback getLocalHeadSlot: GetSlotCallback
getLocalWallSlot: GetSlotCallback getLocalWallSlot: GetSlotCallback
getSafeSlot: GetSlotCallback getSafeSlot: GetSlotCallback
@ -60,6 +68,7 @@ type
progressPivot: Slot progressPivot: Slot
workers: array[SyncWorkersCount, SyncWorker[A, B]] workers: array[SyncWorkersCount, SyncWorker[A, B]]
notInSyncEvent: AsyncEvent notInSyncEvent: AsyncEvent
shutdownEvent: AsyncEvent
rangeAge: uint64 rangeAge: uint64
chunkSize: uint64 chunkSize: uint64
queue: SyncQueue[A] queue: SyncQueue[A]
@ -124,8 +133,10 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B],
getFinalizedSlotCb: GetSlotCallback, getFinalizedSlotCb: GetSlotCallback,
getBackfillSlotCb: GetSlotCallback, getBackfillSlotCb: GetSlotCallback,
getFrontfillSlotCb: GetSlotCallback, getFrontfillSlotCb: GetSlotCallback,
weakSubjectivityPeriodCb: GetBoolCallback,
progressPivot: Slot, progressPivot: Slot,
blockVerifier: BlockVerifier, blockVerifier: BlockVerifier,
shutdownEvent: AsyncEvent,
maxHeadAge = uint64(SLOTS_PER_EPOCH * 1), maxHeadAge = uint64(SLOTS_PER_EPOCH * 1),
chunkSize = uint64(SLOTS_PER_EPOCH), chunkSize = uint64(SLOTS_PER_EPOCH),
flags: set[SyncManagerFlag] = {}, flags: set[SyncManagerFlag] = {},
@ -143,6 +154,7 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B],
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: minEpochsForBlobSidecarsRequests, MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: minEpochsForBlobSidecarsRequests,
getLocalHeadSlot: getLocalHeadSlotCb, getLocalHeadSlot: getLocalHeadSlotCb,
getLocalWallSlot: getLocalWallSlotCb, getLocalWallSlot: getLocalWallSlotCb,
isWithinWeakSubjectivityPeriod: weakSubjectivityPeriodCb,
getSafeSlot: getSafeSlot, getSafeSlot: getSafeSlot,
getFirstSlot: getFirstSlot, getFirstSlot: getFirstSlot,
getLastSlot: getLastSlot, getLastSlot: getLastSlot,
@ -152,6 +164,7 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B],
blockVerifier: blockVerifier, blockVerifier: blockVerifier,
notInSyncEvent: newAsyncEvent(), notInSyncEvent: newAsyncEvent(),
direction: direction, direction: direction,
shutdownEvent: shutdownEvent,
ident: ident, ident: ident,
flags: flags flags: flags
) )
@ -566,6 +579,11 @@ proc startWorkers[A, B](man: SyncManager[A, B]) =
for i in 0 ..< len(man.workers): for i in 0 ..< len(man.workers):
man.workers[i].future = syncWorker[A, B](man, i) man.workers[i].future = syncWorker[A, B](man, i)
proc stopWorkers[A, B](man: SyncManager[A, B]) {.async: (raises: []).} =
# Cancelling all the synchronization workers.
let pending = man.workers.mapIt(it.future.cancelAndWait())
await noCancel allFutures(pending)
proc toTimeLeftString*(d: Duration): string = proc toTimeLeftString*(d: Duration): string =
if d == InfiniteDuration: if d == InfiniteDuration:
"--h--m" "--h--m"
@ -711,6 +729,14 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4) & man.avgSyncSpeed.formatBiggestFloat(ffDecimal, 4) &
"slots/s (" & map & ":" & currentSlot & ")" "slots/s (" & map & ":" & currentSlot & ")"
if (man.queue.kind == SyncQueueKind.Forward) and
(SyncManagerFlag.NoGenesisSync in man.flags):
if not(man.isWithinWeakSubjectivityPeriod()):
fatal WeakSubjectivityLogMessage, current_slot = wallSlot
await man.stopWorkers()
man.shutdownEvent.fire()
return
if man.remainingSlots() <= man.maxHeadAge: if man.remainingSlots() <= man.maxHeadAge:
man.notInSyncEvent.clear() man.notInSyncEvent.clear()
# We are marking SyncManager as not working only when we are in sync and # We are marking SyncManager as not working only when we are in sync and

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
import std/[heapqueue, tables, strutils, sequtils, math] import std/[heapqueue, tables, strutils, sequtils, math]
import stew/base10, chronos, chronicles import stew/base10, chronos, chronicles, results
import import
../spec/datatypes/[base, phase0, altair], ../spec/datatypes/[base, phase0, altair],
../spec/[helpers, forks], ../spec/[helpers, forks],
@ -24,6 +24,7 @@ logScope:
type type
GetSlotCallback* = proc(): Slot {.gcsafe, raises: [].} GetSlotCallback* = proc(): Slot {.gcsafe, raises: [].}
GetBoolCallback* = proc(): bool {.gcsafe, raises: [].}
ProcessingCallback* = proc() {.gcsafe, raises: [].} ProcessingCallback* = proc() {.gcsafe, raises: [].}
BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock, BlockVerifier* = proc(signedBlock: ForkedSignedBeaconBlock,
blobs: Opt[BlobSidecars], maybeFinalized: bool): blobs: Opt[BlobSidecars], maybeFinalized: bool):

View File

@ -21,7 +21,7 @@ import
from presto import RestDecodingError from presto import RestDecodingError
const const
largeRequestsTimeout = 60.seconds # Downloading large items such as states. largeRequestsTimeout = 90.seconds # Downloading large items such as states.
smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots. smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots.
proc fetchDepositSnapshot( proc fetchDepositSnapshot(
@ -171,7 +171,7 @@ proc doTrustedNodeSync*(
let stateId = let stateId =
case syncTarget.kind case syncTarget.kind
of TrustedNodeSyncKind.TrustedBlockRoot: of TrustedNodeSyncKind.TrustedBlockRoot:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/light-client.md#light-client-sync-process # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md#light-client-sync-process
const lcDataFork = LightClientDataFork.high const lcDataFork = LightClientDataFork.high
var bestViableCheckpoint: Opt[tuple[slot: Slot, state_root: Eth2Digest]] var bestViableCheckpoint: Opt[tuple[slot: Slot, state_root: Eth2Digest]]
func trackBestViableCheckpoint(store: lcDataFork.LightClientStore) = func trackBestViableCheckpoint(store: lcDataFork.LightClientStore) =

View File

@ -539,7 +539,6 @@ proc makeBeaconBlockForHeadAndSlot*(
slot, validator_index slot, validator_index
return err("Unable to get execution payload") return err("Unable to get execution payload")
debugComment "flesh out consolidations"
let res = makeBeaconBlockWithRewards( let res = makeBeaconBlockWithRewards(
node.dag.cfg, node.dag.cfg,
state[], state[],
@ -552,7 +551,6 @@ proc makeBeaconBlockForHeadAndSlot*(
exits, exits,
node.syncCommitteeMsgPool[].produceSyncAggregate(head.bid, slot), node.syncCommitteeMsgPool[].produceSyncAggregate(head.bid, slot),
payload, payload,
@[], # consolidations
noRollback, # Temporary state - no need for rollback noRollback, # Temporary state - no need for rollback
cache, cache,
verificationFlags = {}, verificationFlags = {},
@ -1751,8 +1749,10 @@ proc registerValidatorsPerBuilder(
validatorRegistrations.add @[validatorRegistration] validatorRegistrations.add @[validatorRegistration]
# First, check for VC-added keys; cheaper because provided pre-signed # First, check for VC-added keys; cheaper because provided pre-signed
# See issue #5599: currently VC have no way to provide BN with per-validator builders per the specs, so we have to # See issue #5599: currently VC have no way to provide BN with per-validator
# resort to use the BN fallback default (--payload-builder-url value, obtained by calling getPayloadBuilderAddress) # builders per the specs, so we have to resort to use the BN fallback
# default (--payload-builder-url value, obtained by calling
# getPayloadBuilderAddress)
var nonExitedVcPubkeys: HashSet[ValidatorPubKey] var nonExitedVcPubkeys: HashSet[ValidatorPubKey]
if node.externalBuilderRegistrations.len > 0 and if node.externalBuilderRegistrations.len > 0 and
payloadBuilderAddress == node.config.getPayloadBuilderAddress.value: payloadBuilderAddress == node.config.getPayloadBuilderAddress.value:
@ -1966,8 +1966,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (ra
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#broadcast-aggregate
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#broadcast-sync-committee-contribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution
# Wait 2 / 3 of the slot time to allow messages to propagate, then collect # Wait 2 / 3 of the slot time to allow messages to propagate, then collect
# the result in aggregates # the result in aggregates
static: static:

View File

@ -1481,6 +1481,7 @@ proc removeFeeRecipientFile*(host: KeymanagerHost,
if fileExists(path): if fileExists(path):
io2.removeFile(path).isOkOr: io2.removeFile(path).isOkOr:
return err($uint(error) & " " & ioErrorMsg(error)) return err($uint(error) & " " & ioErrorMsg(error))
host.validatorPool[].invalidateValidatorRegistration(pubkey)
ok() ok()
proc removeGasLimitFile*(host: KeymanagerHost, proc removeGasLimitFile*(host: KeymanagerHost,
@ -1499,15 +1500,22 @@ proc removeGraffitiFile*(host: KeymanagerHost,
return err($uint(error) & " " & ioErrorMsg(error)) return err($uint(error) & " " & ioErrorMsg(error))
ok() ok()
proc setFeeRecipient*(host: KeymanagerHost, pubkey: ValidatorPubKey, feeRecipient: Eth1Address): Result[void, string] = proc setFeeRecipient*(
host: KeymanagerHost, pubkey: ValidatorPubKey, feeRecipient: Eth1Address):
Result[void, string] =
let validatorKeystoreDir = host.validatorKeystoreDir(pubkey) let validatorKeystoreDir = host.validatorKeystoreDir(pubkey)
? secureCreatePath(validatorKeystoreDir).mapErr(proc(e: auto): string = ? secureCreatePath(validatorKeystoreDir).mapErr(proc(e: auto): string =
"Could not create wallet directory [" & validatorKeystoreDir & "]: " & $e) "Could not create wallet directory [" & validatorKeystoreDir & "]: " & $e)
io2.writeFile(validatorKeystoreDir / FeeRecipientFilename, $feeRecipient) let res = io2.writeFile(
validatorKeystoreDir / FeeRecipientFilename, $feeRecipient)
.mapErr(proc(e: auto): string = "Failed to write fee recipient file: " & $e) .mapErr(proc(e: auto): string = "Failed to write fee recipient file: " & $e)
if res.isOk:
host.validatorPool[].invalidateValidatorRegistration(pubkey)
res
proc setGasLimit*(host: KeymanagerHost, proc setGasLimit*(host: KeymanagerHost,
pubkey: ValidatorPubKey, pubkey: ValidatorPubKey,
gasLimit: uint64): Result[void, string] = gasLimit: uint64): Result[void, string] =

View File

@ -117,8 +117,10 @@ proc routeSignedBeaconBlock*(
let blobs = blobsOpt.get() let blobs = blobsOpt.get()
let kzgCommits = blck.message.body.blob_kzg_commitments.asSeq let kzgCommits = blck.message.body.blob_kzg_commitments.asSeq
if blobs.len > 0 or kzgCommits.len > 0: if blobs.len > 0 or kzgCommits.len > 0:
let res = validate_blobs(kzgCommits, blobs.mapIt(it.blob), let res = validate_blobs(
blobs.mapIt(it.kzg_proof)) kzgCommits,
blobs.mapIt(KzgBlob(bytes: it.blob)),
blobs.mapIt(it.kzg_proof))
if res.isErr(): if res.isErr():
warn "blobs failed validation", warn "blobs failed validation",
blockRoot = shortLog(blck.root), blockRoot = shortLog(blck.root),

View File

@ -38,7 +38,8 @@ macro copyFields*(
# unblinded objects, and can't simply be copied. # unblinded objects, and can't simply be copied.
"transactions_root", "execution_payload", "transactions_root", "execution_payload",
"execution_payload_header", "body", "withdrawals_root", "execution_payload_header", "body", "withdrawals_root",
"deposit_receipts_root", "withdrawal_requests_root"]: "deposit_requests_root", "withdrawal_requests_root",
"consolidation_requests_root"]:
# TODO use stew/assign2 # TODO use stew/assign2
result.add newAssignment( result.add newAssignment(
newDotExpr(dst, ident(name)), newDotExpr(src, ident(name))) newDotExpr(dst, ident(name)), newDotExpr(src, ident(name)))
@ -128,7 +129,7 @@ proc unblindAndRouteBlockMEV*(
bundle.data.blobs_bundle.commitments: bundle.data.blobs_bundle.commitments:
return err("unblinded blobs bundle has unexpected commitments") return err("unblinded blobs bundle has unexpected commitments")
let ok = verifyProofs( let ok = verifyProofs(
asSeq blobs_bundle.blobs, blobs_bundle.blobs.mapIt(KzgBlob(bytes: it)),
asSeq blobs_bundle.commitments, asSeq blobs_bundle.commitments,
asSeq blobs_bundle.proofs).valueOr: asSeq blobs_bundle.proofs).valueOr:
return err("unblinded blobs bundle fails verification") return err("unblinded blobs bundle fails verification")

View File

@ -36,7 +36,7 @@ export results
# - https://notes.ethereum.org/@djrtwo/Bkn3zpwxB#Validator-responsibilities # - https://notes.ethereum.org/@djrtwo/Bkn3zpwxB#Validator-responsibilities
# #
# Phase 0 spec - Honest Validator - how to avoid slashing # Phase 0 spec - Honest Validator - how to avoid slashing
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#how-to-avoid-slashing # - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#how-to-avoid-slashing
# #
# In-depth reading on slashing conditions # In-depth reading on slashing conditions
# #

View File

@ -288,6 +288,15 @@ proc updateValidator*(pool: var ValidatorPool,
validator.activationEpoch = activationEpoch validator.activationEpoch = activationEpoch
func invalidateValidatorRegistration*(
pool: var ValidatorPool, pubkey: ValidatorPubKey) =
# When the per-validator fee recipient changes via keymanager, the builder
# API validator registration needs to be recomputed. This will happen when
# next the registrations are sent, but ensure here that will happen rather
# than relying on a now-outdated, cached, validator registration.
pool.getValidator(pubkey).isErrOr:
value.externalBuilderRegistration.reset()
proc close*(pool: var ValidatorPool) = proc close*(pool: var ValidatorPool) =
## Unlock and close all validator keystore's files managed by ``pool``. ## Unlock and close all validator keystore's files managed by ``pool``.
for validator in pool.validators.values(): for validator in pool.validators.values():
@ -767,7 +776,7 @@ proc getAggregateAndProofSignature*(v: AttachedValidator,
fork, genesis_validators_root, aggregate_and_proof) fork, genesis_validators_root, aggregate_and_proof)
await v.signData(request) await v.signData(request)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#prepare-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
proc getSyncCommitteeMessage*(v: AttachedValidator, proc getSyncCommitteeMessage*(v: AttachedValidator,
fork: Fork, fork: Fork,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
@ -798,7 +807,7 @@ proc getSyncCommitteeMessage*(v: AttachedValidator,
) )
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#aggregation-selection # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection
proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork, proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
slot: Slot, slot: Slot,
@ -818,7 +827,7 @@ proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
) )
await v.signData(request) await v.signData(request)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/validator.md#broadcast-sync-committee-contribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution
proc getContributionAndProofSignature*(v: AttachedValidator, fork: Fork, proc getContributionAndProofSignature*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
contribution_and_proof: ContributionAndProof contribution_and_proof: ContributionAndProof

View File

@ -18,7 +18,7 @@ const
"Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH" "Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH"
versionMajor* = 24 versionMajor* = 24
versionMinor* = 6 versionMinor* = 7
versionBuild* = 0 versionBuild* = 0
versionBlob* = "stateofus" # Single word - ends up in the default graffiti versionBlob* = "stateofus" # Single word - ends up in the default graffiti
@ -51,6 +51,8 @@ const
fullVersionStr* = "v" & versionAsStr & "-" & gitRevision & "-" & versionBlob fullVersionStr* = "v" & versionAsStr & "-" & gitRevision & "-" & versionBlob
nimbusAgentStr* = "Nimbus/" & fullVersionStr
func getNimGitHash*(): string = func getNimGitHash*(): string =
const gitPrefix = "git hash: " const gitPrefix = "git hash: "
let tmp = splitLines(nimFullBanner) let tmp = splitLines(nimFullBanner)

View File

@ -1,3 +1,12 @@
# beacon_chain
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import import
chronicles, chronicles/[topics_registry, timings], chronicles, chronicles/[topics_registry, timings],
confutils, confutils/std/net, confutils, confutils/std/net,
@ -6,11 +15,11 @@ import
type type
Config = object Config = object
serverIpAddress {. serverIpAddress {.
defaultValue: ValidIpAddress.init("127.0.0.1") defaultValue: static(parseIpAddress("127.0.0.1"))
defaultValueDesc: "127.0.0.1" defaultValueDesc: "127.0.0.1"
desc: "IP address of the beacon node's REST server" desc: "IP address of the beacon node's REST server"
abbr: "a" abbr: "a"
name: "address" }: ValidIpAddress name: "address" }: IpAddress
serverPort {. serverPort {.
defaultValue: 5052 defaultValue: 5052
@ -29,7 +38,7 @@ type
abbr: "n" abbr: "n"
name: "count" }: uint name: "count" }: uint
proc main = proc main() {.raises: [ConfigurationError, HttpError, OSError].} =
let config = Config.load let config = Config.load
let serverAddress = initTAddress(config.serverIpAddress, config.serverPort) let serverAddress = initTAddress(config.serverIpAddress, config.serverPort)
let client = RestClientRef.new(serverAddress) let client = RestClientRef.new(serverAddress)
@ -43,10 +52,10 @@ proc main =
info.logTime(apiName): info.logTime(apiName):
for slot in config.startSlot ..< (config.startSlot + config.requestsCount): for slot in config.startSlot ..< (config.startSlot + config.requestsCount):
let ident = StateIdent(kind: StateQueryKind.Slot, slot: slot.Slot) let ident = StateIdent(kind: StateQueryKind.Slot, slot: slot.Slot)
discard waitFor client.`apiNameIdent`(ident) discard waitFor noCancel client.`apiNameIdent`(ident)
benchmark(getStateRoot) benchmark(getStateRoot)
benchmark(getStateFork) benchmark(getStateForkPlain)
benchmark(getStateFinalityCheckpoints) benchmark(getStateFinalityCheckpoints)
benchmark(getStateValidatorBalances) benchmark(getStateValidatorBalances)

2
ci/Jenkinsfile vendored
View File

@ -183,5 +183,5 @@ def getAgentLabel() {
} }
def nimCommitForJob() { def nimCommitForJob() {
return JOB_NAME.contains('nimv2') ? 'upstream/version-2-0' : '' return JOB_NAME.contains('nimv2') ? 'v2.0.6' : ''
} }

View File

@ -187,9 +187,6 @@ switch("warning", "CaseTransition:off")
# do its (N)RVO pass: https://github.com/nim-lang/RFCs/issues/230 # do its (N)RVO pass: https://github.com/nim-lang/RFCs/issues/230
switch("warning", "ObservableStores:off") switch("warning", "ObservableStores:off")
# Too many false positives for "Warning: method has lock level <unknown>, but another method has 0 [LockLevel]"
switch("warning", "LockLevel:off")
# Too many right now to read compiler output. Warnings are legitimate, but # Too many right now to read compiler output. Warnings are legitimate, but
# should be fixed out-of-band of `unstable` branch. # should be fixed out-of-band of `unstable` branch.
switch("warning", "BareExcept:off") switch("warning", "BareExcept:off")
@ -218,7 +215,8 @@ put("server.always", "-fno-lto")
put("assembly.always", "-fno-lto") put("assembly.always", "-fno-lto")
# Secp256k1 # Secp256k1
put("secp256k1.always", "-fno-lto") # -fomit-frame-pointer for https://github.com/status-im/nimbus-eth2/issues/6324
put("secp256k1.always", "-fno-lto -fomit-frame-pointer")
# BearSSL - only RNGs # BearSSL - only RNGs
put("aesctr_drbg.always", "-fno-lto") put("aesctr_drbg.always", "-fno-lto")

View File

@ -6,7 +6,7 @@ This is a WIP document to explain the attestation flows.
It is important to distinguish attestation `validation` from attestation `verification`. It is important to distinguish attestation `validation` from attestation `verification`.
- Attestation `validation` is defined in the P2P specs. Validated attestations can be forwarded on GossipSub. - Attestation `validation` is defined in the P2P specs. Validated attestations can be forwarded on GossipSub.
- Aggregated: https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof - Aggregated: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
- Unaggregated: https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id - Unaggregated: https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
- Attestation `verification` is defined in the consensus specs. Verified attestations can affect fork choice and may be included in a block. - Attestation `verification` is defined in the consensus specs. Verified attestations can affect fork choice and may be included in a block.
- https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations

View File

@ -9,7 +9,7 @@ Important distinction:
https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_block. https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_block.
A validated block can be forwarded on gossipsub. A validated block can be forwarded on gossipsub.
- and we distinguish `verification` which is defined in consensus specs: - and we distinguish `verification` which is defined in consensus specs:
https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#block-processing https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#block-processing
A block needs to be verified to enter fork choice, the DAG and the BeaconChainDB A block needs to be verified to enter fork choice, the DAG and the BeaconChainDB
In particular in terms of costly checks validating a block only requires checking: In particular in terms of costly checks validating a block only requires checking:

View File

@ -183,7 +183,7 @@ Each era is identified by when it ends. Thus, the genesis era is era `0`, follow
`.era` file names follow a simple convention: `<config-name>-<era-number>-<era-count>-<short-historical-root>.era`: `.era` file names follow a simple convention: `<config-name>-<era-number>-<era-count>-<short-historical-root>.era`:
* `config-name` is the `CONFIG_NAME` field of the runtime configation (`mainnet`, `prater`, `sepolia`, `holesky`, etc) * `config-name` is the `CONFIG_NAME` field of the runtime configation (`mainnet`, `sepolia`, `holesky`, etc)
* `era-number` is the number of the _first_ era stored in the file - for example, the genesis era file has number 0 - as a 5-digit 0-filled decimal integer * `era-number` is the number of the _first_ era stored in the file - for example, the genesis era file has number 0 - as a 5-digit 0-filled decimal integer
* `short-era-root` is the first 4 bytes of the last historical root in the _last_ state in the era file, lower-case hex-encoded (8 characters), except the genesis era which instead uses the `genesis_validators_root` field from the genesis state. * `short-era-root` is the first 4 bytes of the last historical root in the _last_ state in the era file, lower-case hex-encoded (8 characters), except the genesis era which instead uses the `genesis_validators_root` field from the genesis state.
* The root is available as `state.historical_roots[era - 1]` except for genesis, which is `state.genesis_validators_root` * The root is available as `state.historical_roots[era - 1]` except for genesis, which is `state.genesis_validators_root`

View File

@ -66,7 +66,7 @@ watchdog==2.1.9
# via mkdocs # via mkdocs
wheel==0.38.1 wheel==0.38.1
# via pip-tools # via pip-tools
zipp==3.8.1 zipp==3.19.1
# via importlib-metadata # via importlib-metadata
# The following packages are considered to be unsafe in a requirements file: # The following packages are considered to be unsafe in a requirements file:

View File

@ -104,7 +104,7 @@ The following sections explain how to do this for certain EL clients.
## Running the light client ## Running the light client
The light client starts syncing from a trusted block. The light client starts syncing from a trusted block.
This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client. This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client.
### 1. Obtaining a trusted block root ### 1. Obtaining a trusted block root
@ -186,7 +186,7 @@ INF 2022-11-21 18:04:03.982+01:00 New LC optimistic header opt
``` ```
!!! note !!! note
The [light client protocol](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/altair/light-client/sync-protocol.md) depends on consensus layer (CL) full nodes to serve additional data. The [light client protocol](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md) depends on consensus layer (CL) full nodes to serve additional data.
As this is a new protocol, not all implementations are supporting it yet. As this is a new protocol, not all implementations are supporting it yet.
Therefore, it may take several minutes to discover supporting peers, during which no log messages may be produced. Therefore, it may take several minutes to discover supporting peers, during which no log messages may be produced.

View File

@ -135,7 +135,7 @@ If you are already using a threshold signing setup (e.g. based on Vouch and Dirk
The verifying Web3Signer is an experimental extension to the [Web3Signer protocol](https://consensys.github.io/web3signer/web3signer-eth2.html#tag/Signing/operation/ETH2_SIGN) which allows the remote signer to verify certain details of the signed blocks before creating a signature (for example, the signer may require the signed block to have a particular fee recipient value). The verifying Web3Signer is an experimental extension to the [Web3Signer protocol](https://consensys.github.io/web3signer/web3signer-eth2.html#tag/Signing/operation/ETH2_SIGN) which allows the remote signer to verify certain details of the signed blocks before creating a signature (for example, the signer may require the signed block to have a particular fee recipient value).
To enable this use case, the `BLOCK_V2` request type of the `/api/v1/eth2/sign/{identifier}` endpoint is extended with an additional array field named `proofs`. The array consists of objects with the properties `index`, `proof` and `value`, where `index` is an arbitrary [generalized index](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/ssz/merkle-proofs.md#generalized-merkle-tree-index) of any property nested under the block body and `proof` is its corresponding Merkle proof against the block body root included in the request. The `value` property is optional and it is included only when the SSZ hash of the field included in the Merkle proof doesn't match its value. To enable this use case, the `BLOCK_V2` request type of the `/api/v1/eth2/sign/{identifier}` endpoint is extended with an additional array field named `proofs`. The array consists of objects with the properties `index`, `proof` and `value`, where `index` is an arbitrary [generalized index](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md#generalized-merkle-tree-index) of any property nested under the block body and `proof` is its corresponding Merkle proof against the block body root included in the request. The `value` property is optional and it is included only when the SSZ hash of the field included in the Merkle proof doesn't match its value.
Since the generalized index of a particular field may change in a hard-fork, in the remote keystore format the proven fields are usually specified by their name: Since the generalized index of a particular field may change in a hard-fork, in the remote keystore format the proven fields are usually specified by their name:

View File

@ -383,6 +383,7 @@ proc cmdDumpState(conf: DbConf) =
bellatrixState = (ref bellatrix.HashedBeaconState)() bellatrixState = (ref bellatrix.HashedBeaconState)()
capellaState = (ref capella.HashedBeaconState)() capellaState = (ref capella.HashedBeaconState)()
denebState = (ref deneb.HashedBeaconState)() denebState = (ref deneb.HashedBeaconState)()
electraState = (ref electra.HashedBeaconState)()
for stateRoot in conf.stateRoot: for stateRoot in conf.stateRoot:
if shouldShutDown: quit QuitSuccess if shouldShutDown: quit QuitSuccess
@ -401,6 +402,7 @@ proc cmdDumpState(conf: DbConf) =
doit(bellatrixState[]) doit(bellatrixState[])
doit(capellaState[]) doit(capellaState[])
doit(denebState[]) doit(denebState[])
doit(electraState[])
echo "Couldn't load ", stateRoot echo "Couldn't load ", stateRoot

View File

@ -379,9 +379,9 @@ proc createEnr(rng: var HmacDrbgContext,
bootstrapEnr = enr.Record.init( bootstrapEnr = enr.Record.init(
1, # sequence number 1, # sequence number
networkKeys.seckey.asEthKey, networkKeys.seckey.asEthKey,
some(address), Opt.some(address),
some(port), Opt.some(port),
some(port), Opt.some(port),
[ [
toFieldPair(enrForkIdField, forkId), toFieldPair(enrForkIdField, forkId),
toFieldPair(enrAttestationSubnetsField, SSZ.encode(netMetadata.attnets)) toFieldPair(enrAttestationSubnetsField, SSZ.encode(netMetadata.attnets))

Some files were not shown because too many files have changed in this diff Show More