Merge branch 'stable' into feat/eip-7495
This commit is contained in:
commit
9248ce02e2
|
@ -0,0 +1,9 @@
|
||||||
|
# Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except
|
||||||
|
# according to those terms.
|
||||||
|
vendor/*
|
|
@ -35,18 +35,8 @@ jobs:
|
||||||
cpu: amd64
|
cpu: amd64
|
||||||
- os: windows
|
- os: windows
|
||||||
cpu: amd64
|
cpu: amd64
|
||||||
branch: [~, upstream/version-2-0]
|
branch: [~]
|
||||||
exclude:
|
|
||||||
- target:
|
|
||||||
os: macos
|
|
||||||
branch: upstream/version-2-0
|
|
||||||
- target:
|
|
||||||
os: windows
|
|
||||||
branch: upstream/version-2-0
|
|
||||||
include:
|
include:
|
||||||
- branch: upstream/version-2-0
|
|
||||||
branch-short: version-2-0
|
|
||||||
nimflags-extra: --mm:refc
|
|
||||||
- target:
|
- target:
|
||||||
os: linux
|
os: linux
|
||||||
builder: ['self-hosted','ubuntu-22.04']
|
builder: ['self-hosted','ubuntu-22.04']
|
||||||
|
|
|
@ -88,10 +88,10 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
## Blinded block conversions
|
## Blinded block conversions
|
||||||
```diff
|
```diff
|
||||||
+ Bellatrix toSignedBlindedBlock OK
|
+ Bellatrix toSignedBlindedBeaconBlock OK
|
||||||
+ Capella toSignedBlindedBlock OK
|
+ Capella toSignedBlindedBeaconBlock OK
|
||||||
+ Deneb toSignedBlindedBlock OK
|
+ Deneb toSignedBlindedBeaconBlock OK
|
||||||
+ Electra toSignedBlindedBlock OK
|
+ Electra toSignedBlindedBeaconBlock OK
|
||||||
```
|
```
|
||||||
OK: 4/4 Fail: 0/4 Skip: 0/4
|
OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||||
## Block pool altair processing [Preset: mainnet]
|
## Block pool altair processing [Preset: mainnet]
|
||||||
|
@ -464,16 +464,20 @@ OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||||
+ URL parsing OK
|
+ URL parsing OK
|
||||||
```
|
```
|
||||||
OK: 5/5 Fail: 0/5 Skip: 0/5
|
OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||||
## Eth1 monitor
|
## Engine API conversions
|
||||||
```diff
|
```diff
|
||||||
+ Deposits chain OK
|
|
||||||
+ Rewrite URLs OK
|
|
||||||
+ Roundtrip engine RPC V1 and bellatrix ExecutionPayload representations OK
|
+ Roundtrip engine RPC V1 and bellatrix ExecutionPayload representations OK
|
||||||
+ Roundtrip engine RPC V2 and capella ExecutionPayload representations OK
|
+ Roundtrip engine RPC V2 and capella ExecutionPayload representations OK
|
||||||
+ Roundtrip engine RPC V3 and deneb ExecutionPayload representations OK
|
+ Roundtrip engine RPC V3 and deneb ExecutionPayload representations OK
|
||||||
+ Roundtrip engine RPC V4 and electra ExecutionPayload representations OK
|
+ Roundtrip engine RPC V4 and electra ExecutionPayload representations OK
|
||||||
```
|
```
|
||||||
OK: 6/6 Fail: 0/6 Skip: 0/6
|
OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||||
|
## Eth1 monitor
|
||||||
|
```diff
|
||||||
|
+ Deposits chain OK
|
||||||
|
+ Rewrite URLs OK
|
||||||
|
```
|
||||||
|
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||||
## Eth2 specific discovery tests
|
## Eth2 specific discovery tests
|
||||||
```diff
|
```diff
|
||||||
+ Invalid attnets field OK
|
+ Invalid attnets field OK
|
||||||
|
@ -833,9 +837,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
## Spec helpers
|
## Spec helpers
|
||||||
```diff
|
```diff
|
||||||
+ build_proof - BeaconState OK
|
+ build_proof - BeaconState OK
|
||||||
|
+ hypergeom_cdf OK
|
||||||
+ integer_squareroot OK
|
+ integer_squareroot OK
|
||||||
```
|
```
|
||||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||||
## Specific field types
|
## Specific field types
|
||||||
```diff
|
```diff
|
||||||
+ root update OK
|
+ root update OK
|
||||||
|
@ -932,10 +937,10 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
|
||||||
+ Dynamic validator set: updateDynamicValidators() test OK
|
+ Dynamic validator set: updateDynamicValidators() test OK
|
||||||
```
|
```
|
||||||
OK: 4/4 Fail: 0/4 Skip: 0/4
|
OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||||
## ValidatorPubKey Bloom filter
|
## ValidatorPubKey bucket sort
|
||||||
```diff
|
```diff
|
||||||
+ incremental construction with no false positives/negatives OK
|
+ incremental construction OK
|
||||||
+ one-shot construction with no false positives/negatives OK
|
+ one-shot construction OK
|
||||||
```
|
```
|
||||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||||
## Zero signature sanity checks
|
## Zero signature sanity checks
|
||||||
|
@ -1033,4 +1038,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 690/695 Fail: 0/695 Skip: 5/695
|
OK: 691/696 Fail: 0/696 Skip: 5/696
|
||||||
|
|
33
CHANGELOG.md
33
CHANGELOG.md
|
@ -1,3 +1,36 @@
|
||||||
|
2024-08-29 v24.8.0
|
||||||
|
==================
|
||||||
|
|
||||||
|
Nimbus `v24.8.0` is a `low-urgency` release with beacon API improvements and fixes.
|
||||||
|
|
||||||
|
### Improvements
|
||||||
|
|
||||||
|
* Increase speed of processing blocks with deposits by 25%:
|
||||||
|
https://github.com/status-im/nimbus-eth2/pull/6469
|
||||||
|
|
||||||
|
* Avoid running light client sync in background when node is synced:
|
||||||
|
https://github.com/status-im/nimbus-eth2/pull/6505
|
||||||
|
|
||||||
|
* Add additional Sepolia bootnode:
|
||||||
|
https://github.com/status-im/nimbus-eth2/pull/6490
|
||||||
|
|
||||||
|
### Fixes
|
||||||
|
|
||||||
|
* Add timeouts to failed execution layer requests:
|
||||||
|
https://github.com/status-im/nimbus-eth2/pull/6441
|
||||||
|
|
||||||
|
* Use correct fork digest when broadcasting blob sidecars, sync committee, and sync contribution messages:
|
||||||
|
https://github.com/status-im/nimbus-eth2/pull/6440
|
||||||
|
|
||||||
|
* Fix Holesky genesis state being downloaded multiple times:
|
||||||
|
https://github.com/status-im/nimbus-eth2/pull/6452
|
||||||
|
|
||||||
|
* Check blob versioned hashes when optimistic syncing:
|
||||||
|
https://github.com/status-im/nimbus-eth2/pull/6501
|
||||||
|
|
||||||
|
* Increase trusted node sync state downloading timeout to 120 seconds:
|
||||||
|
https://github.com/status-im/nimbus-eth2/pull/6487
|
||||||
|
|
||||||
2024-07-29 v24.7.0
|
2024-07-29 v24.7.0
|
||||||
==================
|
==================
|
||||||
|
|
||||||
|
|
|
@ -2487,9 +2487,12 @@ OK: 12/12 Fail: 0/12 Skip: 0/12
|
||||||
+ Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK
|
+ Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK
|
||||||
+ Pending consolidations - basic_pending_consolidation [Preset: mainnet] OK
|
+ Pending consolidations - basic_pending_consolidation [Preset: mainnet] OK
|
||||||
+ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: mainnet] OK
|
+ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: mainnet] OK
|
||||||
|
+ Pending consolidations - pending_consolidation_compounding_creds [Preset: mainnet] OK
|
||||||
|
+ Pending consolidations - pending_consolidation_future_epoch [Preset: mainnet] OK
|
||||||
|
+ Pending consolidations - pending_consolidation_with_pending_deposit [Preset: mainnet] OK
|
||||||
+ Pending consolidations - skip_consolidation_when_source_slashed [Preset: mainnet] OK
|
+ Pending consolidations - skip_consolidation_when_source_slashed [Preset: mainnet] OK
|
||||||
```
|
```
|
||||||
OK: 4/4 Fail: 0/4 Skip: 0/4
|
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||||
## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: mainnet]
|
## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
+ RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK
|
+ RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK
|
||||||
|
@ -2561,13 +2564,15 @@ OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||||
+ EF - Electra - Fork - electra_fork_random_low_balances [Preset: mainnet] OK
|
+ EF - Electra - Fork - electra_fork_random_low_balances [Preset: mainnet] OK
|
||||||
+ EF - Electra - Fork - electra_fork_random_misc_balances [Preset: mainnet] OK
|
+ EF - Electra - Fork - electra_fork_random_misc_balances [Preset: mainnet] OK
|
||||||
+ EF - Electra - Fork - fork_base_state [Preset: mainnet] OK
|
+ EF - Electra - Fork - fork_base_state [Preset: mainnet] OK
|
||||||
|
+ EF - Electra - Fork - fork_has_compounding_withdrawal_credential [Preset: mainnet] OK
|
||||||
+ EF - Electra - Fork - fork_many_next_epoch [Preset: mainnet] OK
|
+ EF - Electra - Fork - fork_many_next_epoch [Preset: mainnet] OK
|
||||||
+ EF - Electra - Fork - fork_next_epoch [Preset: mainnet] OK
|
+ EF - Electra - Fork - fork_next_epoch [Preset: mainnet] OK
|
||||||
+ EF - Electra - Fork - fork_next_epoch_with_block [Preset: mainnet] OK
|
+ EF - Electra - Fork - fork_next_epoch_with_block [Preset: mainnet] OK
|
||||||
|
+ EF - Electra - Fork - fork_pre_activation [Preset: mainnet] OK
|
||||||
+ EF - Electra - Fork - fork_random_low_balances [Preset: mainnet] OK
|
+ EF - Electra - Fork - fork_random_low_balances [Preset: mainnet] OK
|
||||||
+ EF - Electra - Fork - fork_random_misc_balances [Preset: mainnet] OK
|
+ EF - Electra - Fork - fork_random_misc_balances [Preset: mainnet] OK
|
||||||
```
|
```
|
||||||
OK: 12/12 Fail: 0/12 Skip: 0/12
|
OK: 14/14 Fail: 0/14 Skip: 0/14
|
||||||
## EF - Electra - Operations - Attestation [Preset: mainnet]
|
## EF - Electra - Operations - Attestation [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
+ [Invalid] EF - Electra - Operations - Attestation - invalid_after_max_inclusion_slot OK
|
+ [Invalid] EF - Electra - Operations - Attestation - invalid_after_max_inclusion_slot OK
|
||||||
|
@ -3180,8 +3185,12 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||||
+ Light client - Single merkle proof - mainnet/deneb/light_client/single_merkle_proof/Beacon OK
|
+ Light client - Single merkle proof - mainnet/deneb/light_client/single_merkle_proof/Beacon OK
|
||||||
+ Light client - Single merkle proof - mainnet/deneb/light_client/single_merkle_proof/Beacon OK
|
+ Light client - Single merkle proof - mainnet/deneb/light_client/single_merkle_proof/Beacon OK
|
||||||
+ Light client - Single merkle proof - mainnet/deneb/light_client/single_merkle_proof/Beacon OK
|
+ Light client - Single merkle proof - mainnet/deneb/light_client/single_merkle_proof/Beacon OK
|
||||||
|
+ Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK
|
||||||
|
+ Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK
|
||||||
|
+ Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK
|
||||||
|
+ Light client - Single merkle proof - mainnet/electra/light_client/single_merkle_proof/Beac OK
|
||||||
```
|
```
|
||||||
OK: 14/14 Fail: 0/14 Skip: 0/14
|
OK: 18/18 Fail: 0/18 Skip: 0/18
|
||||||
## EF - Merkle proof [Preset: mainnet]
|
## EF - Merkle proof [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
Merkle proof - Single merkle proof - eip7594 Skip
|
Merkle proof - Single merkle proof - eip7594 Skip
|
||||||
|
@ -3189,8 +3198,12 @@ OK: 14/14 Fail: 0/14 Skip: 0/14
|
||||||
+ Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
+ Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
||||||
+ Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
+ Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
||||||
+ Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
+ Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
||||||
|
+ Merkle proof - Single merkle proof - mainnet/electra/merkle_proof/single_merkle_proof/Beac OK
|
||||||
|
+ Merkle proof - Single merkle proof - mainnet/electra/merkle_proof/single_merkle_proof/Beac OK
|
||||||
|
+ Merkle proof - Single merkle proof - mainnet/electra/merkle_proof/single_merkle_proof/Beac OK
|
||||||
|
+ Merkle proof - Single merkle proof - mainnet/electra/merkle_proof/single_merkle_proof/Beac OK
|
||||||
```
|
```
|
||||||
OK: 4/5 Fail: 0/5 Skip: 1/5
|
OK: 8/9 Fail: 0/9 Skip: 1/9
|
||||||
## EF - Phase 0 - Epoch Processing - Effective balance updates [Preset: mainnet]
|
## EF - Phase 0 - Epoch Processing - Effective balance updates [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
+ Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK
|
+ Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK
|
||||||
|
@ -3693,4 +3706,4 @@ OK: 69/88 Fail: 0/88 Skip: 19/88
|
||||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 2971/2991 Fail: 0/2991 Skip: 20/2991
|
OK: 2984/3004 Fail: 0/3004 Skip: 20/3004
|
||||||
|
|
|
@ -2598,9 +2598,12 @@ OK: 12/12 Fail: 0/12 Skip: 0/12
|
||||||
+ Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK
|
+ Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK
|
||||||
+ Pending consolidations - basic_pending_consolidation [Preset: minimal] OK
|
+ Pending consolidations - basic_pending_consolidation [Preset: minimal] OK
|
||||||
+ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: minimal] OK
|
+ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: minimal] OK
|
||||||
|
+ Pending consolidations - pending_consolidation_compounding_creds [Preset: minimal] OK
|
||||||
|
+ Pending consolidations - pending_consolidation_future_epoch [Preset: minimal] OK
|
||||||
|
+ Pending consolidations - pending_consolidation_with_pending_deposit [Preset: minimal] OK
|
||||||
+ Pending consolidations - skip_consolidation_when_source_slashed [Preset: minimal] OK
|
+ Pending consolidations - skip_consolidation_when_source_slashed [Preset: minimal] OK
|
||||||
```
|
```
|
||||||
OK: 4/4 Fail: 0/4 Skip: 0/4
|
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||||
## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: minimal]
|
## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: minimal]
|
||||||
```diff
|
```diff
|
||||||
+ RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK
|
+ RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK
|
||||||
|
@ -2689,14 +2692,16 @@ OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||||
+ EF - Electra - Fork - electra_fork_random_low_balances [Preset: minimal] OK
|
+ EF - Electra - Fork - electra_fork_random_low_balances [Preset: minimal] OK
|
||||||
+ EF - Electra - Fork - electra_fork_random_misc_balances [Preset: minimal] OK
|
+ EF - Electra - Fork - electra_fork_random_misc_balances [Preset: minimal] OK
|
||||||
+ EF - Electra - Fork - fork_base_state [Preset: minimal] OK
|
+ EF - Electra - Fork - fork_base_state [Preset: minimal] OK
|
||||||
|
+ EF - Electra - Fork - fork_has_compounding_withdrawal_credential [Preset: minimal] OK
|
||||||
+ EF - Electra - Fork - fork_many_next_epoch [Preset: minimal] OK
|
+ EF - Electra - Fork - fork_many_next_epoch [Preset: minimal] OK
|
||||||
+ EF - Electra - Fork - fork_next_epoch [Preset: minimal] OK
|
+ EF - Electra - Fork - fork_next_epoch [Preset: minimal] OK
|
||||||
+ EF - Electra - Fork - fork_next_epoch_with_block [Preset: minimal] OK
|
+ EF - Electra - Fork - fork_next_epoch_with_block [Preset: minimal] OK
|
||||||
|
+ EF - Electra - Fork - fork_pre_activation [Preset: minimal] OK
|
||||||
+ EF - Electra - Fork - fork_random_large_validator_set [Preset: minimal] OK
|
+ EF - Electra - Fork - fork_random_large_validator_set [Preset: minimal] OK
|
||||||
+ EF - Electra - Fork - fork_random_low_balances [Preset: minimal] OK
|
+ EF - Electra - Fork - fork_random_low_balances [Preset: minimal] OK
|
||||||
+ EF - Electra - Fork - fork_random_misc_balances [Preset: minimal] OK
|
+ EF - Electra - Fork - fork_random_misc_balances [Preset: minimal] OK
|
||||||
```
|
```
|
||||||
OK: 14/14 Fail: 0/14 Skip: 0/14
|
OK: 16/16 Fail: 0/16 Skip: 0/16
|
||||||
## EF - Electra - Operations - Attestation [Preset: minimal]
|
## EF - Electra - Operations - Attestation [Preset: minimal]
|
||||||
```diff
|
```diff
|
||||||
+ [Invalid] EF - Electra - Operations - Attestation - invalid_after_max_inclusion_slot OK
|
+ [Invalid] EF - Electra - Operations - Attestation - invalid_after_max_inclusion_slot OK
|
||||||
|
@ -3345,40 +3350,55 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||||
+ Light client - Single merkle proof - minimal/deneb/light_client/single_merkle_proof/Beacon OK
|
+ Light client - Single merkle proof - minimal/deneb/light_client/single_merkle_proof/Beacon OK
|
||||||
+ Light client - Single merkle proof - minimal/deneb/light_client/single_merkle_proof/Beacon OK
|
+ Light client - Single merkle proof - minimal/deneb/light_client/single_merkle_proof/Beacon OK
|
||||||
+ Light client - Single merkle proof - minimal/deneb/light_client/single_merkle_proof/Beacon OK
|
+ Light client - Single merkle proof - minimal/deneb/light_client/single_merkle_proof/Beacon OK
|
||||||
|
+ Light client - Single merkle proof - minimal/electra/light_client/single_merkle_proof/Beac OK
|
||||||
|
+ Light client - Single merkle proof - minimal/electra/light_client/single_merkle_proof/Beac OK
|
||||||
|
+ Light client - Single merkle proof - minimal/electra/light_client/single_merkle_proof/Beac OK
|
||||||
|
+ Light client - Single merkle proof - minimal/electra/light_client/single_merkle_proof/Beac OK
|
||||||
```
|
```
|
||||||
OK: 14/14 Fail: 0/14 Skip: 0/14
|
OK: 18/18 Fail: 0/18 Skip: 0/18
|
||||||
## EF - Light client - Sync [Preset: minimal]
|
## EF - Light client - Sync [Preset: minimal]
|
||||||
```diff
|
```diff
|
||||||
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/advance_finality_witho OK
|
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/advance_finality_witho OK
|
||||||
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/capella_store_with_leg OK
|
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/capella_store_with_leg OK
|
||||||
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/deneb_store_with_legac OK
|
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/deneb_store_with_legac OK
|
||||||
|
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/electra_store_with_leg OK
|
||||||
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/light_client_sync OK
|
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/light_client_sync OK
|
||||||
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/supply_sync_committee_ OK
|
+ Light client - Sync - minimal/altair/light_client/sync/pyspec_tests/supply_sync_committee_ OK
|
||||||
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/advance_finality_wi OK
|
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/advance_finality_wi OK
|
||||||
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/capella_deneb_fork OK
|
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/capella_deneb_fork OK
|
||||||
|
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/capella_electra_for OK
|
||||||
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/capella_fork OK
|
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/capella_fork OK
|
||||||
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/capella_store_with_ OK
|
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/capella_store_with_ OK
|
||||||
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/deneb_store_with_le OK
|
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/deneb_store_with_le OK
|
||||||
|
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/electra_store_with_ OK
|
||||||
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/light_client_sync OK
|
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/light_client_sync OK
|
||||||
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/supply_sync_committ OK
|
+ Light client - Sync - minimal/bellatrix/light_client/sync/pyspec_tests/supply_sync_committ OK
|
||||||
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/advance_finality_with OK
|
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/advance_finality_with OK
|
||||||
|
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/deneb_electra_fork OK
|
||||||
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/deneb_fork OK
|
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/deneb_fork OK
|
||||||
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/deneb_store_with_lega OK
|
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/deneb_store_with_lega OK
|
||||||
|
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/electra_store_with_le OK
|
||||||
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/light_client_sync OK
|
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/light_client_sync OK
|
||||||
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/supply_sync_committee OK
|
+ Light client - Sync - minimal/capella/light_client/sync/pyspec_tests/supply_sync_committee OK
|
||||||
+ Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/advance_finality_withou OK
|
+ Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/advance_finality_withou OK
|
||||||
|
+ Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/electra_fork OK
|
||||||
|
+ Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/electra_store_with_lega OK
|
||||||
+ Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/light_client_sync OK
|
+ Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/light_client_sync OK
|
||||||
+ Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/supply_sync_committee_f OK
|
+ Light client - Sync - minimal/deneb/light_client/sync/pyspec_tests/supply_sync_committee_f OK
|
||||||
|
+ Light client - Sync - minimal/electra/light_client/sync/pyspec_tests/advance_finality_with OK
|
||||||
|
+ Light client - Sync - minimal/electra/light_client/sync/pyspec_tests/light_client_sync OK
|
||||||
|
+ Light client - Sync - minimal/electra/light_client/sync/pyspec_tests/supply_sync_committee OK
|
||||||
```
|
```
|
||||||
OK: 20/20 Fail: 0/20 Skip: 0/20
|
OK: 30/30 Fail: 0/30 Skip: 0/30
|
||||||
## EF - Light client - Update ranking [Preset: minimal]
|
## EF - Light client - Update ranking [Preset: minimal]
|
||||||
```diff
|
```diff
|
||||||
+ Light client - Update ranking - minimal/altair/light_client/update_ranking/pyspec_tests/up OK
|
+ Light client - Update ranking - minimal/altair/light_client/update_ranking/pyspec_tests/up OK
|
||||||
+ Light client - Update ranking - minimal/bellatrix/light_client/update_ranking/pyspec_tests OK
|
+ Light client - Update ranking - minimal/bellatrix/light_client/update_ranking/pyspec_tests OK
|
||||||
+ Light client - Update ranking - minimal/capella/light_client/update_ranking/pyspec_tests/u OK
|
+ Light client - Update ranking - minimal/capella/light_client/update_ranking/pyspec_tests/u OK
|
||||||
+ Light client - Update ranking - minimal/deneb/light_client/update_ranking/pyspec_tests/upd OK
|
+ Light client - Update ranking - minimal/deneb/light_client/update_ranking/pyspec_tests/upd OK
|
||||||
|
+ Light client - Update ranking - minimal/electra/light_client/update_ranking/pyspec_tests/u OK
|
||||||
```
|
```
|
||||||
OK: 4/4 Fail: 0/4 Skip: 0/4
|
OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||||
## EF - Merkle proof [Preset: minimal]
|
## EF - Merkle proof [Preset: minimal]
|
||||||
```diff
|
```diff
|
||||||
Merkle proof - Single merkle proof - eip7594 Skip
|
Merkle proof - Single merkle proof - eip7594 Skip
|
||||||
|
@ -3386,8 +3406,12 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||||
+ Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
+ Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
||||||
+ Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
+ Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
||||||
+ Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
+ Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK
|
||||||
|
+ Merkle proof - Single merkle proof - minimal/electra/merkle_proof/single_merkle_proof/Beac OK
|
||||||
|
+ Merkle proof - Single merkle proof - minimal/electra/merkle_proof/single_merkle_proof/Beac OK
|
||||||
|
+ Merkle proof - Single merkle proof - minimal/electra/merkle_proof/single_merkle_proof/Beac OK
|
||||||
|
+ Merkle proof - Single merkle proof - minimal/electra/merkle_proof/single_merkle_proof/Beac OK
|
||||||
```
|
```
|
||||||
OK: 4/5 Fail: 0/5 Skip: 1/5
|
OK: 8/9 Fail: 0/9 Skip: 1/9
|
||||||
## EF - Phase 0 - Epoch Processing - Effective balance updates [Preset: minimal]
|
## EF - Phase 0 - Epoch Processing - Effective balance updates [Preset: minimal]
|
||||||
```diff
|
```diff
|
||||||
+ Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK
|
+ Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK
|
||||||
|
@ -4019,4 +4043,4 @@ OK: 185/207 Fail: 0/207 Skip: 22/207
|
||||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 3266/3289 Fail: 0/3289 Skip: 23/3289
|
OK: 3290/3313 Fail: 0/3313 Skip: 23/3313
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
# Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except
|
||||||
|
# according to those terms.
|
||||||
|
|
||||||
|
FROM debian:testing-slim AS build
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
|
||||||
|
RUN apt-get clean && apt update \
|
||||||
|
&& apt -y install build-essential git-lfs
|
||||||
|
|
||||||
|
RUN ldd --version ldd
|
||||||
|
|
||||||
|
ADD . /root/nimbus-eth2
|
||||||
|
|
||||||
|
RUN cd /root/nimbus-eth2 \
|
||||||
|
&& make -j$(nproc) update \
|
||||||
|
&& make -j$(nproc) V=1 NIMFLAGS="-d:const_preset=mainnet -d:disableMarchNative" LOG_LEVEL=TRACE nimbus_beacon_node
|
||||||
|
|
||||||
|
|
||||||
|
# --------------------------------- #
|
||||||
|
# Starting new image to reduce size #
|
||||||
|
# --------------------------------- #
|
||||||
|
FROM debian:testing-slim as deploy
|
||||||
|
|
||||||
|
SHELL ["/bin/bash", "-c"]
|
||||||
|
RUN apt-get clean && apt update \
|
||||||
|
&& apt -y install build-essential
|
||||||
|
RUN apt update && apt -y upgrade
|
||||||
|
|
||||||
|
RUN ldd --version ldd
|
||||||
|
|
||||||
|
RUN rm -rf /home/user/nimbus-eth2/build/nimbus_beacon_node
|
||||||
|
|
||||||
|
# "COPY" creates new image layers, so we cram all we can into one command
|
||||||
|
COPY --from=build /root/nimbus-eth2/build/nimbus_beacon_node /home/user/nimbus-eth2/build/nimbus_beacon_node
|
||||||
|
|
||||||
|
ENV PATH="/home/user/nimbus-eth2/build:${PATH}"
|
||||||
|
ENTRYPOINT ["nimbus_beacon_node"]
|
||||||
|
WORKDIR /home/user/nimbus-eth2/build
|
||||||
|
|
||||||
|
STOPSIGNAL SIGINT
|
|
@ -130,7 +130,7 @@ type
|
||||||
current_sync_committee*: SyncCommittee # [New in Altair]
|
current_sync_committee*: SyncCommittee # [New in Altair]
|
||||||
next_sync_committee*: SyncCommittee # [New in Altair]
|
next_sync_committee*: SyncCommittee # [New in Altair]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#beaconstate
|
||||||
# Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ
|
# Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ
|
||||||
# reading and writing
|
# reading and writing
|
||||||
BellatrixBeaconStateNoImmutableValidators* = object
|
BellatrixBeaconStateNoImmutableValidators* = object
|
||||||
|
|
|
@ -27,7 +27,7 @@ type
|
||||||
## which blocks are valid - in particular, blocks are not valid if they
|
## which blocks are valid - in particular, blocks are not valid if they
|
||||||
## come from the future as seen from the local clock.
|
## come from the future as seen from the local clock.
|
||||||
##
|
##
|
||||||
## https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#fork-choice
|
## https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/fork-choice.md#fork-choice
|
||||||
##
|
##
|
||||||
# TODO consider NTP and network-adjusted timestamps as outlined here:
|
# TODO consider NTP and network-adjusted timestamps as outlined here:
|
||||||
# https://ethresear.ch/t/network-adjusted-timestamps/4187
|
# https://ethresear.ch/t/network-adjusted-timestamps/4187
|
||||||
|
|
|
@ -52,9 +52,15 @@ proc initLightClient*(
|
||||||
optimisticProcessor = initOptimisticProcessor(
|
optimisticProcessor = initOptimisticProcessor(
|
||||||
getBeaconTime, optimisticHandler)
|
getBeaconTime, optimisticHandler)
|
||||||
|
|
||||||
|
shouldInhibitSync = func(): bool =
|
||||||
|
if node.syncManager != nil:
|
||||||
|
not node.syncManager.inProgress # No LC sync needed if DAG is in sync
|
||||||
|
else:
|
||||||
|
false
|
||||||
lightClient = createLightClient(
|
lightClient = createLightClient(
|
||||||
node.network, rng, config, cfg, forkDigests, getBeaconTime,
|
node.network, rng, config, cfg, forkDigests, getBeaconTime,
|
||||||
genesis_validators_root, LightClientFinalizationMode.Strict)
|
genesis_validators_root, LightClientFinalizationMode.Strict,
|
||||||
|
shouldInhibitSync = shouldInhibitSync)
|
||||||
|
|
||||||
if config.syncLightClient:
|
if config.syncLightClient:
|
||||||
proc onOptimisticHeader(
|
proc onOptimisticHeader(
|
||||||
|
|
|
@ -1,49 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2024 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
import "."/spec/crypto
|
|
||||||
|
|
||||||
from stew/bitops2 import getBit, setBit
|
|
||||||
from "."/spec/datatypes/base import Validator, pubkey
|
|
||||||
from "."/spec/helpers import bytes_to_uint32
|
|
||||||
|
|
||||||
const
|
|
||||||
# https://hur.st/bloomfilter/?n=4M&p=&m=8MiB&k=
|
|
||||||
pubkeyBloomFilterScale = 23 # 21 too small, 22 borderline, 24 also ok
|
|
||||||
|
|
||||||
type
|
|
||||||
PubkeyBloomFilter* = object
|
|
||||||
data: array[1 shl pubkeyBloomFilterScale, byte]
|
|
||||||
|
|
||||||
iterator bloomFilterHashes(pubkey: ValidatorPubKey): auto =
|
|
||||||
const pubkeyBloomFilterMask = (1 shl pubkeyBloomFilterScale) - 1
|
|
||||||
for r in countup(0'u32, 20'u32, 4'u32):
|
|
||||||
# ValidatorPubKeys have fairly uniform entropy; using enough hash
|
|
||||||
# functions also reduces risk of low-entropy portions
|
|
||||||
yield pubkey.blob.toOpenArray(r, r+3).bytes_to_uint32 and
|
|
||||||
pubkeyBloomFilterMask
|
|
||||||
|
|
||||||
template incl*(bloomFilter: var PubkeyBloomFilter, pubkey: ValidatorPubKey) =
|
|
||||||
for bloomFilterHash in bloomFilterHashes(pubkey):
|
|
||||||
setBit(bloomFilter.data, bloomFilterHash)
|
|
||||||
|
|
||||||
func constructBloomFilter*(x: openArray[Validator]): auto =
|
|
||||||
let res = new PubkeyBloomFilter
|
|
||||||
for m in x:
|
|
||||||
incl(res[], m.pubkey)
|
|
||||||
res
|
|
||||||
|
|
||||||
func mightContain*(
|
|
||||||
bloomFilter: PubkeyBloomFilter, pubkey: ValidatorPubKey): bool =
|
|
||||||
# Might return false positive, but never false negative
|
|
||||||
for bloomFilterHash in bloomFilterHashes(pubkey):
|
|
||||||
if not getBit(bloomFilter.data, bloomFilterHash):
|
|
||||||
return false
|
|
||||||
|
|
||||||
true
|
|
|
@ -32,8 +32,6 @@ import
|
||||||
|
|
||||||
from std/os import getHomeDir, parentDir, `/`
|
from std/os import getHomeDir, parentDir, `/`
|
||||||
from std/strutils import parseBiggestUInt, replace
|
from std/strutils import parseBiggestUInt, replace
|
||||||
from fork_choice/fork_choice_types
|
|
||||||
import ForkChoiceVersion
|
|
||||||
from consensus_object_pools/block_pools_types_light_client
|
from consensus_object_pools/block_pools_types_light_client
|
||||||
import LightClientDataImportMode
|
import LightClientDataImportMode
|
||||||
|
|
||||||
|
@ -676,12 +674,6 @@ type
|
||||||
desc: "Bandwidth estimate for the node (bits per second)"
|
desc: "Bandwidth estimate for the node (bits per second)"
|
||||||
name: "debug-bandwidth-estimate" .}: Option[Natural]
|
name: "debug-bandwidth-estimate" .}: Option[Natural]
|
||||||
|
|
||||||
forkChoiceVersion* {.
|
|
||||||
hidden
|
|
||||||
desc: "Forkchoice version to use. " &
|
|
||||||
"Must be one of: stable"
|
|
||||||
name: "debug-forkchoice-version" .}: Option[ForkChoiceVersion]
|
|
||||||
|
|
||||||
of BNStartUpCmd.wallets:
|
of BNStartUpCmd.wallets:
|
||||||
case walletsCmd* {.command.}: WalletsCmd
|
case walletsCmd* {.command.}: WalletsCmd
|
||||||
of WalletsCmd.create:
|
of WalletsCmd.create:
|
||||||
|
|
|
@ -104,7 +104,6 @@ declareGauge attestation_pool_block_attestation_packing_time,
|
||||||
|
|
||||||
proc init*(T: type AttestationPool, dag: ChainDAGRef,
|
proc init*(T: type AttestationPool, dag: ChainDAGRef,
|
||||||
quarantine: ref Quarantine,
|
quarantine: ref Quarantine,
|
||||||
forkChoiceVersion = ForkChoiceVersion.Stable,
|
|
||||||
onAttestation: OnPhase0AttestationCallback = nil,
|
onAttestation: OnPhase0AttestationCallback = nil,
|
||||||
onElectraAttestation: OnElectraAttestationCallback = nil): T =
|
onElectraAttestation: OnElectraAttestationCallback = nil): T =
|
||||||
## Initialize an AttestationPool from the dag `headState`
|
## Initialize an AttestationPool from the dag `headState`
|
||||||
|
@ -113,7 +112,7 @@ proc init*(T: type AttestationPool, dag: ChainDAGRef,
|
||||||
let finalizedEpochRef = dag.getFinalizedEpochRef()
|
let finalizedEpochRef = dag.getFinalizedEpochRef()
|
||||||
|
|
||||||
var forkChoice = ForkChoice.init(
|
var forkChoice = ForkChoice.init(
|
||||||
finalizedEpochRef, dag.finalizedHead.blck, forkChoiceVersion)
|
finalizedEpochRef, dag.finalizedHead.blck)
|
||||||
|
|
||||||
# Feed fork choice with unfinalized history - during startup, block pool only
|
# Feed fork choice with unfinalized history - during startup, block pool only
|
||||||
# keeps track of a single history so we just need to follow it
|
# keeps track of a single history so we just need to follow it
|
||||||
|
@ -200,6 +199,7 @@ proc addForkChoiceVotes(
|
||||||
error "Couldn't add attestation to fork choice, bug?", err = v.error()
|
error "Couldn't add attestation to fork choice, bug?", err = v.error()
|
||||||
|
|
||||||
func candidateIdx(pool: AttestationPool, slot: Slot): Opt[int] =
|
func candidateIdx(pool: AttestationPool, slot: Slot): Opt[int] =
|
||||||
|
static: doAssert pool.phase0Candidates.len == pool.electraCandidates.len
|
||||||
if slot >= pool.startingSlot and
|
if slot >= pool.startingSlot and
|
||||||
slot < (pool.startingSlot + pool.phase0Candidates.lenu64):
|
slot < (pool.startingSlot + pool.phase0Candidates.lenu64):
|
||||||
Opt.some(int(slot mod pool.phase0Candidates.lenu64))
|
Opt.some(int(slot mod pool.phase0Candidates.lenu64))
|
||||||
|
@ -210,8 +210,8 @@ proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) =
|
||||||
if wallSlot + 1 < pool.phase0Candidates.lenu64:
|
if wallSlot + 1 < pool.phase0Candidates.lenu64:
|
||||||
return # Genesis
|
return # Genesis
|
||||||
|
|
||||||
let
|
static: doAssert pool.phase0Candidates.len == pool.electraCandidates.len
|
||||||
newStartingSlot = wallSlot + 1 - pool.phase0Candidates.lenu64
|
let newStartingSlot = wallSlot + 1 - pool.phase0Candidates.lenu64
|
||||||
|
|
||||||
if newStartingSlot < pool.startingSlot:
|
if newStartingSlot < pool.startingSlot:
|
||||||
error "Current slot older than attestation pool view, clock reset?",
|
error "Current slot older than attestation pool view, clock reset?",
|
||||||
|
@ -224,10 +224,12 @@ proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) =
|
||||||
if newStartingSlot - pool.startingSlot >= pool.phase0Candidates.lenu64():
|
if newStartingSlot - pool.startingSlot >= pool.phase0Candidates.lenu64():
|
||||||
# In case many slots passed since the last update, avoid iterating over
|
# In case many slots passed since the last update, avoid iterating over
|
||||||
# the same indices over and over
|
# the same indices over and over
|
||||||
pool.phase0Candidates = default(type(pool.phase0Candidates))
|
pool.phase0Candidates.reset()
|
||||||
|
pool.electraCandidates.reset()
|
||||||
else:
|
else:
|
||||||
for i in pool.startingSlot..newStartingSlot:
|
for i in pool.startingSlot..newStartingSlot:
|
||||||
pool.phase0Candidates[i.uint64 mod pool.phase0Candidates.lenu64].reset()
|
pool.phase0Candidates[i.uint64 mod pool.phase0Candidates.lenu64].reset()
|
||||||
|
pool.electraCandidates[i.uint64 mod pool.electraCandidates.lenu64].reset()
|
||||||
|
|
||||||
pool.startingSlot = newStartingSlot
|
pool.startingSlot = newStartingSlot
|
||||||
|
|
||||||
|
@ -507,6 +509,7 @@ func covers*(
|
||||||
if candidateIdx.isNone:
|
if candidateIdx.isNone:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
|
debugComment "foo"
|
||||||
# needs to know more than attestationdata now
|
# needs to know more than attestationdata now
|
||||||
#let attestation_data_root = hash_tree_root(data)
|
#let attestation_data_root = hash_tree_root(data)
|
||||||
#pool.electraCandidates[candidateIdx.get()].withValue(attestation_data_root, entry):
|
#pool.electraCandidates[candidateIdx.get()].withValue(attestation_data_root, entry):
|
||||||
|
@ -651,7 +654,8 @@ func score(
|
||||||
proc check_attestation_compatible*(
|
proc check_attestation_compatible*(
|
||||||
dag: ChainDAGRef,
|
dag: ChainDAGRef,
|
||||||
state: ForkyHashedBeaconState,
|
state: ForkyHashedBeaconState,
|
||||||
attestation: SomeAttestation | electra.Attestation | electra.TrustedAttestation): Result[void, cstring] =
|
attestation: SomeAttestation | electra.Attestation |
|
||||||
|
electra.TrustedAttestation): Result[void, cstring] =
|
||||||
let
|
let
|
||||||
targetEpoch = attestation.data.target.epoch
|
targetEpoch = attestation.data.target.epoch
|
||||||
compatibleRoot = state.dependent_root(targetEpoch.get_previous_epoch)
|
compatibleRoot = state.dependent_root(targetEpoch.get_previous_epoch)
|
||||||
|
|
|
@ -29,7 +29,8 @@ type
|
||||||
block_root*: Eth2Digest
|
block_root*: Eth2Digest
|
||||||
indices*: seq[BlobIndex]
|
indices*: seq[BlobIndex]
|
||||||
|
|
||||||
OnBlobSidecarCallback = proc(data: BlobSidecar) {.gcsafe, raises: [].}
|
OnBlobSidecarCallback = proc(
|
||||||
|
data: BlobSidecarInfoObject) {.gcsafe, raises: [].}
|
||||||
|
|
||||||
func shortLog*(x: seq[BlobIndex]): string =
|
func shortLog*(x: seq[BlobIndex]): string =
|
||||||
"<" & x.mapIt($it).join(", ") & ">"
|
"<" & x.mapIt($it).join(", ") & ">"
|
||||||
|
|
|
@ -1178,7 +1178,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
||||||
# should have `previous_version` set to `current_version` while
|
# should have `previous_version` set to `current_version` while
|
||||||
# this doesn't happen to be the case in network that go through
|
# this doesn't happen to be the case in network that go through
|
||||||
# regular hard-fork upgrades. See for example:
|
# regular hard-fork upgrades. See for example:
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#testing
|
||||||
if stateFork.current_version != configFork.current_version:
|
if stateFork.current_version != configFork.current_version:
|
||||||
error "State from database does not match network, check --network parameter",
|
error "State from database does not match network, check --network parameter",
|
||||||
tail = dag.tail, headRef, stateFork, configFork
|
tail = dag.tail, headRef, stateFork, configFork
|
||||||
|
@ -1972,7 +1972,7 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
|
||||||
prunedHeads = hlen - dag.heads.len,
|
prunedHeads = hlen - dag.heads.len,
|
||||||
dagPruneDur = Moment.now() - startTick
|
dagPruneDur = Moment.now() - startTick
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#helpers
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/sync/optimistic.md#helpers
|
||||||
func is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool =
|
func is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool =
|
||||||
let blck =
|
let blck =
|
||||||
if bid.slot <= dag.finalizedHead.slot:
|
if bid.slot <= dag.finalizedHead.slot:
|
||||||
|
|
|
@ -255,7 +255,7 @@ proc initLightClientBootstrapForPeriod(
|
||||||
forkyBlck.toLightClientHeader(lcDataFork))
|
forkyBlck.toLightClientHeader(lcDataFork))
|
||||||
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
|
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
|
||||||
bid.slot, forkyState.data.build_proof(
|
bid.slot, forkyState.data.build_proof(
|
||||||
lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX).get)
|
lcDataFork.current_sync_committee_gindex).get)
|
||||||
else: raiseAssert "Unreachable"
|
else: raiseAssert "Unreachable"
|
||||||
res
|
res
|
||||||
|
|
||||||
|
@ -403,10 +403,10 @@ proc initLightClientUpdateForPeriod(
|
||||||
attested_header: forkyBlck.toLightClientHeader(lcDataFork),
|
attested_header: forkyBlck.toLightClientHeader(lcDataFork),
|
||||||
next_sync_committee: forkyState.data.next_sync_committee,
|
next_sync_committee: forkyState.data.next_sync_committee,
|
||||||
next_sync_committee_branch: forkyState.data.build_proof(
|
next_sync_committee_branch: forkyState.data.build_proof(
|
||||||
lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX).get,
|
lcDataFork.next_sync_committee_gindex).get,
|
||||||
finality_branch:
|
finality_branch:
|
||||||
if finalizedBid.slot != FAR_FUTURE_SLOT:
|
if finalizedBid.slot != FAR_FUTURE_SLOT:
|
||||||
forkyState.data.build_proof(lcDataFork.FINALIZED_ROOT_GINDEX).get
|
forkyState.data.build_proof(lcDataFork.finalized_root_gindex).get
|
||||||
else:
|
else:
|
||||||
default(lcDataFork.FinalityBranch)))
|
default(lcDataFork.FinalityBranch)))
|
||||||
else: raiseAssert "Unreachable"
|
else: raiseAssert "Unreachable"
|
||||||
|
@ -478,16 +478,16 @@ proc cacheLightClientData(
|
||||||
bid = blck.toBlockId()
|
bid = blck.toBlockId()
|
||||||
cachedData = CachedLightClientData(
|
cachedData = CachedLightClientData(
|
||||||
current_sync_committee_branch: normalize_merkle_branch(
|
current_sync_committee_branch: normalize_merkle_branch(
|
||||||
state.data.build_proof(lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX).get,
|
state.data.build_proof(lcDataFork.current_sync_committee_gindex).get,
|
||||||
LightClientDataFork.high.CURRENT_SYNC_COMMITTEE_GINDEX),
|
LightClientDataFork.high.current_sync_committee_gindex),
|
||||||
next_sync_committee_branch: normalize_merkle_branch(
|
next_sync_committee_branch: normalize_merkle_branch(
|
||||||
state.data.build_proof(lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX).get,
|
state.data.build_proof(lcDataFork.next_sync_committee_gindex).get,
|
||||||
LightClientDataFork.high.NEXT_SYNC_COMMITTEE_GINDEX),
|
LightClientDataFork.high.next_sync_committee_gindex),
|
||||||
finalized_slot:
|
finalized_slot:
|
||||||
state.data.finalized_checkpoint.epoch.start_slot,
|
state.data.finalized_checkpoint.epoch.start_slot,
|
||||||
finality_branch: normalize_merkle_branch(
|
finality_branch: normalize_merkle_branch(
|
||||||
state.data.build_proof(lcDataFork.FINALIZED_ROOT_GINDEX).get,
|
state.data.build_proof(lcDataFork.finalized_root_gindex).get,
|
||||||
LightClientDataFork.high.FINALIZED_ROOT_GINDEX),
|
LightClientDataFork.high.finalized_root_gindex),
|
||||||
current_period_best_update:
|
current_period_best_update:
|
||||||
current_period_best_update,
|
current_period_best_update,
|
||||||
latest_signature_slot:
|
latest_signature_slot:
|
||||||
|
@ -553,7 +553,7 @@ proc assignLightClientData(
|
||||||
next_sync_committee.get
|
next_sync_committee.get
|
||||||
forkyObject.next_sync_committee_branch = normalize_merkle_branch(
|
forkyObject.next_sync_committee_branch = normalize_merkle_branch(
|
||||||
attested_data.next_sync_committee_branch,
|
attested_data.next_sync_committee_branch,
|
||||||
lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX)
|
lcDataFork.next_sync_committee_gindex)
|
||||||
else:
|
else:
|
||||||
doAssert next_sync_committee.isNone
|
doAssert next_sync_committee.isNone
|
||||||
var finalized_slot = attested_data.finalized_slot
|
var finalized_slot = attested_data.finalized_slot
|
||||||
|
@ -562,7 +562,7 @@ proc assignLightClientData(
|
||||||
if finalized_slot == forkyObject.finalized_header.beacon.slot:
|
if finalized_slot == forkyObject.finalized_header.beacon.slot:
|
||||||
forkyObject.finality_branch = normalize_merkle_branch(
|
forkyObject.finality_branch = normalize_merkle_branch(
|
||||||
attested_data.finality_branch,
|
attested_data.finality_branch,
|
||||||
lcDataFork.FINALIZED_ROOT_GINDEX)
|
lcDataFork.finalized_root_gindex)
|
||||||
elif finalized_slot < max(dag.tail.slot, dag.backfill.slot):
|
elif finalized_slot < max(dag.tail.slot, dag.backfill.slot):
|
||||||
forkyObject.finalized_header.reset()
|
forkyObject.finalized_header.reset()
|
||||||
forkyObject.finality_branch.reset()
|
forkyObject.finality_branch.reset()
|
||||||
|
@ -582,12 +582,12 @@ proc assignLightClientData(
|
||||||
if finalized_slot == forkyObject.finalized_header.beacon.slot:
|
if finalized_slot == forkyObject.finalized_header.beacon.slot:
|
||||||
forkyObject.finality_branch = normalize_merkle_branch(
|
forkyObject.finality_branch = normalize_merkle_branch(
|
||||||
attested_data.finality_branch,
|
attested_data.finality_branch,
|
||||||
lcDataFork.FINALIZED_ROOT_GINDEX)
|
lcDataFork.finalized_root_gindex)
|
||||||
elif finalized_slot == GENESIS_SLOT:
|
elif finalized_slot == GENESIS_SLOT:
|
||||||
forkyObject.finalized_header.reset()
|
forkyObject.finalized_header.reset()
|
||||||
forkyObject.finality_branch = normalize_merkle_branch(
|
forkyObject.finality_branch = normalize_merkle_branch(
|
||||||
attested_data.finality_branch,
|
attested_data.finality_branch,
|
||||||
lcDataFork.FINALIZED_ROOT_GINDEX)
|
lcDataFork.finalized_root_gindex)
|
||||||
else:
|
else:
|
||||||
var fin_header = dag.getExistingLightClientHeader(finalized_bid)
|
var fin_header = dag.getExistingLightClientHeader(finalized_bid)
|
||||||
if fin_header.kind == LightClientDataFork.None:
|
if fin_header.kind == LightClientDataFork.None:
|
||||||
|
@ -599,7 +599,7 @@ proc assignLightClientData(
|
||||||
forkyObject.finalized_header = fin_header.forky(lcDataFork)
|
forkyObject.finalized_header = fin_header.forky(lcDataFork)
|
||||||
forkyObject.finality_branch = normalize_merkle_branch(
|
forkyObject.finality_branch = normalize_merkle_branch(
|
||||||
attested_data.finality_branch,
|
attested_data.finality_branch,
|
||||||
lcDataFork.FINALIZED_ROOT_GINDEX)
|
lcDataFork.finalized_root_gindex)
|
||||||
withForkyObject(obj):
|
withForkyObject(obj):
|
||||||
when lcDataFork > LightClientDataFork.None:
|
when lcDataFork > LightClientDataFork.None:
|
||||||
forkyObject.sync_aggregate = sync_aggregate
|
forkyObject.sync_aggregate = sync_aggregate
|
||||||
|
@ -726,7 +726,7 @@ proc createLightClientBootstrap(
|
||||||
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
|
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
|
||||||
bid.slot, normalize_merkle_branch(
|
bid.slot, normalize_merkle_branch(
|
||||||
dag.getLightClientData(bid).current_sync_committee_branch,
|
dag.getLightClientData(bid).current_sync_committee_branch,
|
||||||
lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX))
|
lcDataFork.current_sync_committee_gindex))
|
||||||
else: raiseAssert "Unreachable"
|
else: raiseAssert "Unreachable"
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
@ -1053,7 +1053,7 @@ proc getLightClientBootstrap(
|
||||||
dag.lcDataStore.db.putHeader(header)
|
dag.lcDataStore.db.putHeader(header)
|
||||||
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
|
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
|
||||||
slot, forkyState.data.build_proof(
|
slot, forkyState.data.build_proof(
|
||||||
lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX).get)
|
lcDataFork.current_sync_committee_gindex).get)
|
||||||
else: raiseAssert "Unreachable"
|
else: raiseAssert "Unreachable"
|
||||||
do: return default(ForkedLightClientBootstrap)
|
do: return default(ForkedLightClientBootstrap)
|
||||||
|
|
||||||
|
|
|
@ -15,6 +15,7 @@ import
|
||||||
../beacon_clock,
|
../beacon_clock,
|
||||||
./common_tools
|
./common_tools
|
||||||
|
|
||||||
|
from ../el/engine_api_conversions import asBlockHash
|
||||||
from ../spec/beaconstate import
|
from ../spec/beaconstate import
|
||||||
get_expected_withdrawals, has_eth1_withdrawal_credential
|
get_expected_withdrawals, has_eth1_withdrawal_credential
|
||||||
from ../spec/datatypes/capella import Withdrawal
|
from ../spec/datatypes/capella import Withdrawal
|
||||||
|
|
|
@ -53,7 +53,7 @@ iterator get_beacon_committee*(
|
||||||
committees_per_slot * SLOTS_PER_EPOCH
|
committees_per_slot * SLOTS_PER_EPOCH
|
||||||
): yield (index_in_committee, idx)
|
): yield (index_in_committee, idx)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_committee
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_beacon_committee
|
||||||
func get_beacon_committee*(
|
func get_beacon_committee*(
|
||||||
shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex):
|
shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex):
|
||||||
seq[ValidatorIndex] =
|
seq[ValidatorIndex] =
|
||||||
|
|
|
@ -364,7 +364,7 @@ proc produceSyncAggregate*(
|
||||||
|
|
||||||
proc isEpochLeadTime*(
|
proc isEpochLeadTime*(
|
||||||
pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool =
|
pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool =
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#sync-committee-subnet-stability
|
||||||
# This ensures a uniform distribution without requiring additional state:
|
# This ensures a uniform distribution without requiring additional state:
|
||||||
# (1/4) = 1/4, 4 slots out
|
# (1/4) = 1/4, 4 slots out
|
||||||
# (3/4) * (1/3) = 1/4, 3 slots out
|
# (3/4) * (1/3) = 1/4, 3 slots out
|
||||||
|
|
|
@ -20,7 +20,7 @@ import
|
||||||
../spec/[eth2_merkleization, forks],
|
../spec/[eth2_merkleization, forks],
|
||||||
../networking/network_metadata,
|
../networking/network_metadata,
|
||||||
".."/beacon_node_status,
|
".."/beacon_node_status,
|
||||||
"."/[eth1_chain, el_conf]
|
"."/[el_conf, engine_api_conversions, eth1_chain]
|
||||||
|
|
||||||
from std/times import getTime, inSeconds, initTime, `-`
|
from std/times import getTime, inSeconds, initTime, `-`
|
||||||
from ../spec/engine_authentication import getSignedIatToken
|
from ../spec/engine_authentication import getSignedIatToken
|
||||||
|
@ -40,6 +40,12 @@ type
|
||||||
Int64LeBytes = DynamicBytes[8, 8]
|
Int64LeBytes = DynamicBytes[8, 8]
|
||||||
WithoutTimeout* = distinct int
|
WithoutTimeout* = distinct int
|
||||||
|
|
||||||
|
SomeEnginePayloadWithValue =
|
||||||
|
BellatrixExecutionPayloadWithValue |
|
||||||
|
GetPayloadV2Response |
|
||||||
|
GetPayloadV3Response |
|
||||||
|
GetPayloadV4Response
|
||||||
|
|
||||||
contract(DepositContract):
|
contract(DepositContract):
|
||||||
proc deposit(pubkey: PubKeyBytes,
|
proc deposit(pubkey: PubKeyBytes,
|
||||||
withdrawalCredentials: WithdrawalCredentialsBytes,
|
withdrawalCredentials: WithdrawalCredentialsBytes,
|
||||||
|
@ -198,16 +204,6 @@ type
|
||||||
merkleTreeIndex: Int64LeBytes,
|
merkleTreeIndex: Int64LeBytes,
|
||||||
j: JsonNode) {.gcsafe, raises: [].}
|
j: JsonNode) {.gcsafe, raises: [].}
|
||||||
|
|
||||||
BellatrixExecutionPayloadWithValue* = object
|
|
||||||
executionPayload*: ExecutionPayloadV1
|
|
||||||
blockValue*: UInt256
|
|
||||||
|
|
||||||
SomeEnginePayloadWithValue =
|
|
||||||
BellatrixExecutionPayloadWithValue |
|
|
||||||
GetPayloadV2Response |
|
|
||||||
GetPayloadV3Response |
|
|
||||||
GetPayloadV4Response
|
|
||||||
|
|
||||||
declareCounter failed_web3_requests,
|
declareCounter failed_web3_requests,
|
||||||
"Failed web3 requests"
|
"Failed web3 requests"
|
||||||
|
|
||||||
|
@ -376,340 +372,6 @@ template eth1ChainBlocks*(m: ELManager): Deque[Eth1Block] =
|
||||||
# doAssert SECONDS_PER_ETH1_BLOCK * cfg.ETH1_FOLLOW_DISTANCE < GENESIS_DELAY,
|
# doAssert SECONDS_PER_ETH1_BLOCK * cfg.ETH1_FOLLOW_DISTANCE < GENESIS_DELAY,
|
||||||
# "Invalid configuration: GENESIS_DELAY is set too low"
|
# "Invalid configuration: GENESIS_DELAY is set too low"
|
||||||
|
|
||||||
func asConsensusWithdrawal(w: WithdrawalV1): capella.Withdrawal =
|
|
||||||
capella.Withdrawal(
|
|
||||||
index: w.index.uint64,
|
|
||||||
validator_index: w.validatorIndex.uint64,
|
|
||||||
address: ExecutionAddress(data: w.address.distinctBase),
|
|
||||||
amount: Gwei w.amount)
|
|
||||||
|
|
||||||
func asEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 =
|
|
||||||
WithdrawalV1(
|
|
||||||
index: Quantity(w.index),
|
|
||||||
validatorIndex: Quantity(w.validator_index),
|
|
||||||
address: Address(w.address.data),
|
|
||||||
amount: Quantity(w.amount))
|
|
||||||
|
|
||||||
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1):
|
|
||||||
bellatrix.ExecutionPayload =
|
|
||||||
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
|
|
||||||
bellatrix.Transaction.init(tt.distinctBase)
|
|
||||||
|
|
||||||
bellatrix.ExecutionPayload(
|
|
||||||
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
|
|
||||||
feeRecipient:
|
|
||||||
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
|
|
||||||
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
|
|
||||||
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
|
|
||||||
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
|
|
||||||
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
|
|
||||||
block_number: rpcExecutionPayload.blockNumber.uint64,
|
|
||||||
gas_limit: rpcExecutionPayload.gasLimit.uint64,
|
|
||||||
gas_used: rpcExecutionPayload.gasUsed.uint64,
|
|
||||||
timestamp: rpcExecutionPayload.timestamp.uint64,
|
|
||||||
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
|
|
||||||
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
|
|
||||||
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
|
|
||||||
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
|
|
||||||
mapIt(rpcExecutionPayload.transactions, it.getTransaction)))
|
|
||||||
|
|
||||||
func asConsensusType*(payloadWithValue: BellatrixExecutionPayloadWithValue):
|
|
||||||
bellatrix.ExecutionPayloadForSigning =
|
|
||||||
bellatrix.ExecutionPayloadForSigning(
|
|
||||||
executionPayload: payloadWithValue.executionPayload.asConsensusType,
|
|
||||||
blockValue: payloadWithValue.blockValue)
|
|
||||||
|
|
||||||
template maybeDeref[T](o: Opt[T]): T = o.get
|
|
||||||
template maybeDeref[V](v: V): V = v
|
|
||||||
|
|
||||||
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1OrV2|ExecutionPayloadV2):
|
|
||||||
capella.ExecutionPayload =
|
|
||||||
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
|
|
||||||
bellatrix.Transaction.init(tt.distinctBase)
|
|
||||||
|
|
||||||
capella.ExecutionPayload(
|
|
||||||
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
|
|
||||||
feeRecipient:
|
|
||||||
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
|
|
||||||
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
|
|
||||||
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
|
|
||||||
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
|
|
||||||
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
|
|
||||||
block_number: rpcExecutionPayload.blockNumber.uint64,
|
|
||||||
gas_limit: rpcExecutionPayload.gasLimit.uint64,
|
|
||||||
gas_used: rpcExecutionPayload.gasUsed.uint64,
|
|
||||||
timestamp: rpcExecutionPayload.timestamp.uint64,
|
|
||||||
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
|
|
||||||
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
|
|
||||||
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
|
|
||||||
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
|
|
||||||
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
|
|
||||||
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
|
|
||||||
mapIt(maybeDeref rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)))
|
|
||||||
|
|
||||||
func asConsensusType*(payloadWithValue: engine_api.GetPayloadV2Response):
|
|
||||||
capella.ExecutionPayloadForSigning =
|
|
||||||
capella.ExecutionPayloadForSigning(
|
|
||||||
executionPayload: payloadWithValue.executionPayload.asConsensusType,
|
|
||||||
blockValue: payloadWithValue.blockValue)
|
|
||||||
|
|
||||||
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV3):
|
|
||||||
deneb.ExecutionPayload =
|
|
||||||
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
|
|
||||||
bellatrix.Transaction.init(tt.distinctBase)
|
|
||||||
|
|
||||||
deneb.ExecutionPayload(
|
|
||||||
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
|
|
||||||
feeRecipient:
|
|
||||||
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
|
|
||||||
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
|
|
||||||
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
|
|
||||||
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
|
|
||||||
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
|
|
||||||
block_number: rpcExecutionPayload.blockNumber.uint64,
|
|
||||||
gas_limit: rpcExecutionPayload.gasLimit.uint64,
|
|
||||||
gas_used: rpcExecutionPayload.gasUsed.uint64,
|
|
||||||
timestamp: rpcExecutionPayload.timestamp.uint64,
|
|
||||||
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
|
|
||||||
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
|
|
||||||
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
|
|
||||||
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
|
|
||||||
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
|
|
||||||
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
|
|
||||||
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
|
|
||||||
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
|
|
||||||
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64)
|
|
||||||
|
|
||||||
func asConsensusType*(payload: engine_api.GetPayloadV3Response):
|
|
||||||
deneb.ExecutionPayloadForSigning =
|
|
||||||
deneb.ExecutionPayloadForSigning(
|
|
||||||
executionPayload: payload.executionPayload.asConsensusType,
|
|
||||||
blockValue: payload.blockValue,
|
|
||||||
# TODO
|
|
||||||
# The `mapIt` calls below are necessary only because we use different distinct
|
|
||||||
# types for KZG commitments and Blobs in the `web3` and the `deneb` spec types.
|
|
||||||
# Both are defined as `array[N, byte]` under the hood.
|
|
||||||
blobsBundle: deneb.BlobsBundle(
|
|
||||||
commitments: KzgCommitments.init(
|
|
||||||
payload.blobsBundle.commitments.mapIt(
|
|
||||||
kzg_abi.KzgCommitment(bytes: it.bytes))),
|
|
||||||
proofs: KzgProofs.init(
|
|
||||||
payload.blobsBundle.proofs.mapIt(
|
|
||||||
kzg_abi.KzgProof(bytes: it.bytes))),
|
|
||||||
blobs: Blobs.init(
|
|
||||||
payload.blobsBundle.blobs.mapIt(it.bytes))))
|
|
||||||
|
|
||||||
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV4):
|
|
||||||
electra.ExecutionPayload =
|
|
||||||
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
|
|
||||||
bellatrix.Transaction.init(tt.distinctBase)
|
|
||||||
|
|
||||||
template getDepositRequest(
|
|
||||||
dr: DepositRequestV1): electra.DepositRequest =
|
|
||||||
electra.DepositRequest(
|
|
||||||
pubkey: ValidatorPubKey(blob: dr.pubkey.distinctBase),
|
|
||||||
withdrawal_credentials: dr.withdrawalCredentials.asEth2Digest,
|
|
||||||
amount: dr.amount.Gwei,
|
|
||||||
signature: ValidatorSig(blob: dr.signature.distinctBase),
|
|
||||||
index: dr.index.uint64)
|
|
||||||
|
|
||||||
template getWithdrawalRequest(
|
|
||||||
wr: WithdrawalRequestV1): electra.WithdrawalRequest =
|
|
||||||
electra.WithdrawalRequest(
|
|
||||||
source_address: ExecutionAddress(data: wr.sourceAddress.distinctBase),
|
|
||||||
validator_pubkey: ValidatorPubKey(blob: wr.validatorPubkey.distinctBase),
|
|
||||||
amount: wr.amount.Gwei)
|
|
||||||
|
|
||||||
template getConsolidationRequest(
|
|
||||||
cr: ConsolidationRequestV1): electra.ConsolidationRequest =
|
|
||||||
electra.ConsolidationRequest(
|
|
||||||
source_address: ExecutionAddress(data: cr.sourceAddress.distinctBase),
|
|
||||||
source_pubkey: ValidatorPubKey(blob: cr.sourcePubkey.distinctBase),
|
|
||||||
target_pubkey: ValidatorPubKey(blob: cr.targetPubkey.distinctBase))
|
|
||||||
|
|
||||||
electra.ExecutionPayload(
|
|
||||||
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
|
|
||||||
feeRecipient:
|
|
||||||
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
|
|
||||||
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
|
|
||||||
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
|
|
||||||
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
|
|
||||||
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
|
|
||||||
block_number: rpcExecutionPayload.blockNumber.uint64,
|
|
||||||
gas_limit: rpcExecutionPayload.gasLimit.uint64,
|
|
||||||
gas_used: rpcExecutionPayload.gasUsed.uint64,
|
|
||||||
timestamp: rpcExecutionPayload.timestamp.uint64,
|
|
||||||
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(
|
|
||||||
rpcExecutionPayload.extraData.bytes),
|
|
||||||
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
|
|
||||||
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
|
|
||||||
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
|
|
||||||
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
|
|
||||||
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
|
|
||||||
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
|
|
||||||
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
|
|
||||||
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64,
|
|
||||||
deposit_requests:
|
|
||||||
List[electra.DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD].init(
|
|
||||||
mapIt(rpcExecutionPayload.depositRequests, it.getDepositRequest)),
|
|
||||||
withdrawal_requests: List[electra.WithdrawalRequest,
|
|
||||||
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD].init(
|
|
||||||
mapIt(rpcExecutionPayload.withdrawalRequests,
|
|
||||||
it.getWithdrawalRequest)),
|
|
||||||
consolidation_requests: List[electra.ConsolidationRequest,
|
|
||||||
Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD].init(
|
|
||||||
mapIt(rpcExecutionPayload.consolidationRequests,
|
|
||||||
it.getConsolidationRequest)))
|
|
||||||
|
|
||||||
func asConsensusType*(payload: engine_api.GetPayloadV4Response):
|
|
||||||
electra.ExecutionPayloadForSigning =
|
|
||||||
electra.ExecutionPayloadForSigning(
|
|
||||||
executionPayload: payload.executionPayload.asConsensusType,
|
|
||||||
blockValue: payload.blockValue,
|
|
||||||
# TODO
|
|
||||||
# The `mapIt` calls below are necessary only because we use different distinct
|
|
||||||
# types for KZG commitments and Blobs in the `web3` and the `deneb` spec types.
|
|
||||||
# Both are defined as `array[N, byte]` under the hood.
|
|
||||||
blobsBundle: deneb.BlobsBundle(
|
|
||||||
commitments: KzgCommitments.init(
|
|
||||||
payload.blobsBundle.commitments.mapIt(
|
|
||||||
kzg_abi.KzgCommitment(bytes: it.bytes))),
|
|
||||||
proofs: KzgProofs.init(
|
|
||||||
payload.blobsBundle.proofs.mapIt(
|
|
||||||
kzg_abi.KzgProof(bytes: it.bytes))),
|
|
||||||
blobs: Blobs.init(
|
|
||||||
payload.blobsBundle.blobs.mapIt(it.bytes))))
|
|
||||||
|
|
||||||
func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload):
|
|
||||||
ExecutionPayloadV1 =
|
|
||||||
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
|
||||||
TypedTransaction(tt.distinctBase)
|
|
||||||
|
|
||||||
engine_api.ExecutionPayloadV1(
|
|
||||||
parentHash: executionPayload.parent_hash.asBlockHash,
|
|
||||||
feeRecipient: Address(executionPayload.fee_recipient.data),
|
|
||||||
stateRoot: executionPayload.state_root.asBlockHash,
|
|
||||||
receiptsRoot: executionPayload.receipts_root.asBlockHash,
|
|
||||||
logsBloom:
|
|
||||||
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
|
|
||||||
prevRandao: executionPayload.prev_randao.asBlockHash,
|
|
||||||
blockNumber: Quantity(executionPayload.block_number),
|
|
||||||
gasLimit: Quantity(executionPayload.gas_limit),
|
|
||||||
gasUsed: Quantity(executionPayload.gas_used),
|
|
||||||
timestamp: Quantity(executionPayload.timestamp),
|
|
||||||
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
|
|
||||||
baseFeePerGas: executionPayload.base_fee_per_gas,
|
|
||||||
blockHash: executionPayload.block_hash.asBlockHash,
|
|
||||||
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction))
|
|
||||||
|
|
||||||
template toEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 =
|
|
||||||
WithdrawalV1(
|
|
||||||
index: Quantity(w.index),
|
|
||||||
validatorIndex: Quantity(w.validator_index),
|
|
||||||
address: Address(w.address.data),
|
|
||||||
amount: Quantity(w.amount))
|
|
||||||
|
|
||||||
func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload):
|
|
||||||
ExecutionPayloadV2 =
|
|
||||||
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
|
||||||
TypedTransaction(tt.distinctBase)
|
|
||||||
engine_api.ExecutionPayloadV2(
|
|
||||||
parentHash: executionPayload.parent_hash.asBlockHash,
|
|
||||||
feeRecipient: Address(executionPayload.fee_recipient.data),
|
|
||||||
stateRoot: executionPayload.state_root.asBlockHash,
|
|
||||||
receiptsRoot: executionPayload.receipts_root.asBlockHash,
|
|
||||||
logsBloom:
|
|
||||||
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
|
|
||||||
prevRandao: executionPayload.prev_randao.asBlockHash,
|
|
||||||
blockNumber: Quantity(executionPayload.block_number),
|
|
||||||
gasLimit: Quantity(executionPayload.gas_limit),
|
|
||||||
gasUsed: Quantity(executionPayload.gas_used),
|
|
||||||
timestamp: Quantity(executionPayload.timestamp),
|
|
||||||
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
|
|
||||||
baseFeePerGas: executionPayload.base_fee_per_gas,
|
|
||||||
blockHash: executionPayload.block_hash.asBlockHash,
|
|
||||||
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
|
|
||||||
withdrawals: mapIt(executionPayload.withdrawals, it.toEngineWithdrawal))
|
|
||||||
|
|
||||||
func asEngineExecutionPayload*(executionPayload: deneb.ExecutionPayload):
|
|
||||||
ExecutionPayloadV3 =
|
|
||||||
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
|
||||||
TypedTransaction(tt.distinctBase)
|
|
||||||
|
|
||||||
engine_api.ExecutionPayloadV3(
|
|
||||||
parentHash: executionPayload.parent_hash.asBlockHash,
|
|
||||||
feeRecipient: Address(executionPayload.fee_recipient.data),
|
|
||||||
stateRoot: executionPayload.state_root.asBlockHash,
|
|
||||||
receiptsRoot: executionPayload.receipts_root.asBlockHash,
|
|
||||||
logsBloom:
|
|
||||||
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
|
|
||||||
prevRandao: executionPayload.prev_randao.asBlockHash,
|
|
||||||
blockNumber: Quantity(executionPayload.block_number),
|
|
||||||
gasLimit: Quantity(executionPayload.gas_limit),
|
|
||||||
gasUsed: Quantity(executionPayload.gas_used),
|
|
||||||
timestamp: Quantity(executionPayload.timestamp),
|
|
||||||
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
|
|
||||||
baseFeePerGas: executionPayload.base_fee_per_gas,
|
|
||||||
blockHash: executionPayload.block_hash.asBlockHash,
|
|
||||||
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
|
|
||||||
withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal),
|
|
||||||
blobGasUsed: Quantity(executionPayload.blob_gas_used),
|
|
||||||
excessBlobGas: Quantity(executionPayload.excess_blob_gas))
|
|
||||||
|
|
||||||
func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
|
|
||||||
ExecutionPayloadV4 =
|
|
||||||
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
|
||||||
TypedTransaction(tt.distinctBase)
|
|
||||||
|
|
||||||
template getDepositRequest(
|
|
||||||
dr: electra.DepositRequest): DepositRequestV1 =
|
|
||||||
DepositRequestV1(
|
|
||||||
pubkey: FixedBytes[RawPubKeySize](dr.pubkey.blob),
|
|
||||||
withdrawalCredentials: FixedBytes[32](dr.withdrawal_credentials.data),
|
|
||||||
amount: dr.amount.Quantity,
|
|
||||||
signature: FixedBytes[RawSigSize](dr.signature.blob),
|
|
||||||
index: dr.index.Quantity)
|
|
||||||
|
|
||||||
template getWithdrawalRequest(
|
|
||||||
wr: electra.WithdrawalRequest): WithdrawalRequestV1 =
|
|
||||||
WithdrawalRequestV1(
|
|
||||||
sourceAddress: Address(wr.source_address.data),
|
|
||||||
validatorPubkey: FixedBytes[RawPubKeySize](wr.validator_pubkey.blob),
|
|
||||||
amount: wr.amount.Quantity)
|
|
||||||
|
|
||||||
template getConsolidationRequest(
|
|
||||||
cr: electra.ConsolidationRequest): ConsolidationRequestV1 =
|
|
||||||
ConsolidationRequestV1(
|
|
||||||
sourceAddress: Address(cr.source_address.data),
|
|
||||||
sourcePubkey: FixedBytes[RawPubKeySize](cr.source_pubkey.blob),
|
|
||||||
targetPubkey: FixedBytes[RawPubKeySize](cr.target_pubkey.blob))
|
|
||||||
|
|
||||||
engine_api.ExecutionPayloadV4(
|
|
||||||
parentHash: executionPayload.parent_hash.asBlockHash,
|
|
||||||
feeRecipient: Address(executionPayload.fee_recipient.data),
|
|
||||||
stateRoot: executionPayload.state_root.asBlockHash,
|
|
||||||
receiptsRoot: executionPayload.receipts_root.asBlockHash,
|
|
||||||
logsBloom:
|
|
||||||
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
|
|
||||||
prevRandao: executionPayload.prev_randao.asBlockHash,
|
|
||||||
blockNumber: Quantity(executionPayload.block_number),
|
|
||||||
gasLimit: Quantity(executionPayload.gas_limit),
|
|
||||||
gasUsed: Quantity(executionPayload.gas_used),
|
|
||||||
timestamp: Quantity(executionPayload.timestamp),
|
|
||||||
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
|
|
||||||
baseFeePerGas: executionPayload.base_fee_per_gas,
|
|
||||||
blockHash: executionPayload.block_hash.asBlockHash,
|
|
||||||
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
|
|
||||||
withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal),
|
|
||||||
blobGasUsed: Quantity(executionPayload.blob_gas_used),
|
|
||||||
excessBlobGas: Quantity(executionPayload.excess_blob_gas),
|
|
||||||
depositRequests: mapIt(
|
|
||||||
executionPayload.deposit_requests, it.getDepositRequest),
|
|
||||||
withdrawalRequests: mapIt(
|
|
||||||
executionPayload.withdrawal_requests, it.getWithdrawalRequest),
|
|
||||||
consolidationRequests: mapIt(
|
|
||||||
executionPayload.consolidation_requests, it.getConsolidationRequest))
|
|
||||||
|
|
||||||
func isConnected(connection: ELConnection): bool =
|
func isConnected(connection: ELConnection): bool =
|
||||||
connection.web3.isSome
|
connection.web3.isSome
|
||||||
|
|
||||||
|
@ -1359,6 +1021,14 @@ proc sendNewPayload*(
|
||||||
if len(pendingRequests) == 0:
|
if len(pendingRequests) == 0:
|
||||||
# All requests failed, we will continue our attempts until deadline
|
# All requests failed, we will continue our attempts until deadline
|
||||||
# is not finished.
|
# is not finished.
|
||||||
|
|
||||||
|
# To avoid continous spam of requests when EL node is offline we
|
||||||
|
# going to sleep until next attempt for
|
||||||
|
# (NEWPAYLOAD_TIMEOUT / 4) time (2.seconds).
|
||||||
|
let timeout =
|
||||||
|
chronos.nanoseconds(NEWPAYLOAD_TIMEOUT.nanoseconds div 4)
|
||||||
|
await sleepAsync(timeout)
|
||||||
|
|
||||||
break mainLoop
|
break mainLoop
|
||||||
|
|
||||||
proc forkchoiceUpdatedForSingleEL(
|
proc forkchoiceUpdatedForSingleEL(
|
||||||
|
@ -1532,6 +1202,14 @@ proc forkchoiceUpdated*(
|
||||||
if len(pendingRequests) == 0:
|
if len(pendingRequests) == 0:
|
||||||
# All requests failed, we will continue our attempts until deadline
|
# All requests failed, we will continue our attempts until deadline
|
||||||
# is not finished.
|
# is not finished.
|
||||||
|
|
||||||
|
# To avoid continous spam of requests when EL node is offline we
|
||||||
|
# going to sleep until next attempt for
|
||||||
|
# (FORKCHOICEUPDATED_TIMEOUT / 4) time (2.seconds).
|
||||||
|
let timeout =
|
||||||
|
chronos.nanoseconds(FORKCHOICEUPDATED_TIMEOUT.nanoseconds div 4)
|
||||||
|
await sleepAsync(timeout)
|
||||||
|
|
||||||
break mainLoop
|
break mainLoop
|
||||||
|
|
||||||
# TODO can't be defined within exchangeConfigWithSingleEL
|
# TODO can't be defined within exchangeConfigWithSingleEL
|
||||||
|
|
|
@ -0,0 +1,359 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
../spec/datatypes/[bellatrix, capella, deneb, electra],
|
||||||
|
web3/[engine_api, engine_api_types]
|
||||||
|
|
||||||
|
from std/sequtils import mapIt
|
||||||
|
|
||||||
|
type
|
||||||
|
BellatrixExecutionPayloadWithValue* = object
|
||||||
|
executionPayload*: ExecutionPayloadV1
|
||||||
|
blockValue*: UInt256
|
||||||
|
|
||||||
|
func asEth2Digest*(x: BlockHash): Eth2Digest =
|
||||||
|
Eth2Digest(data: array[32, byte](x))
|
||||||
|
|
||||||
|
template asBlockHash*(x: Eth2Digest): BlockHash =
|
||||||
|
BlockHash(x.data)
|
||||||
|
|
||||||
|
func asConsensusWithdrawal*(w: WithdrawalV1): capella.Withdrawal =
|
||||||
|
capella.Withdrawal(
|
||||||
|
index: w.index.uint64,
|
||||||
|
validator_index: w.validatorIndex.uint64,
|
||||||
|
address: ExecutionAddress(data: w.address.distinctBase),
|
||||||
|
amount: Gwei w.amount)
|
||||||
|
|
||||||
|
func asEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 =
|
||||||
|
WithdrawalV1(
|
||||||
|
index: Quantity(w.index),
|
||||||
|
validatorIndex: Quantity(w.validator_index),
|
||||||
|
address: Address(w.address.data),
|
||||||
|
amount: Quantity(w.amount))
|
||||||
|
|
||||||
|
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1):
|
||||||
|
bellatrix.ExecutionPayload =
|
||||||
|
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
|
||||||
|
bellatrix.Transaction.init(tt.distinctBase)
|
||||||
|
|
||||||
|
bellatrix.ExecutionPayload(
|
||||||
|
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
|
||||||
|
feeRecipient:
|
||||||
|
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
|
||||||
|
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
|
||||||
|
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
|
||||||
|
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
|
||||||
|
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
|
||||||
|
block_number: rpcExecutionPayload.blockNumber.uint64,
|
||||||
|
gas_limit: rpcExecutionPayload.gasLimit.uint64,
|
||||||
|
gas_used: rpcExecutionPayload.gasUsed.uint64,
|
||||||
|
timestamp: rpcExecutionPayload.timestamp.uint64,
|
||||||
|
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
|
||||||
|
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
|
||||||
|
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
|
||||||
|
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
|
||||||
|
mapIt(rpcExecutionPayload.transactions, it.getTransaction)))
|
||||||
|
|
||||||
|
func asConsensusType*(payloadWithValue: BellatrixExecutionPayloadWithValue):
|
||||||
|
bellatrix.ExecutionPayloadForSigning =
|
||||||
|
bellatrix.ExecutionPayloadForSigning(
|
||||||
|
executionPayload: payloadWithValue.executionPayload.asConsensusType,
|
||||||
|
blockValue: payloadWithValue.blockValue)
|
||||||
|
|
||||||
|
template maybeDeref*[T](o: Opt[T]): T = o.get
|
||||||
|
template maybeDeref*[V](v: V): V = v
|
||||||
|
|
||||||
|
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1OrV2|ExecutionPayloadV2):
|
||||||
|
capella.ExecutionPayload =
|
||||||
|
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
|
||||||
|
bellatrix.Transaction.init(tt.distinctBase)
|
||||||
|
|
||||||
|
capella.ExecutionPayload(
|
||||||
|
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
|
||||||
|
feeRecipient:
|
||||||
|
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
|
||||||
|
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
|
||||||
|
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
|
||||||
|
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
|
||||||
|
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
|
||||||
|
block_number: rpcExecutionPayload.blockNumber.uint64,
|
||||||
|
gas_limit: rpcExecutionPayload.gasLimit.uint64,
|
||||||
|
gas_used: rpcExecutionPayload.gasUsed.uint64,
|
||||||
|
timestamp: rpcExecutionPayload.timestamp.uint64,
|
||||||
|
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
|
||||||
|
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
|
||||||
|
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
|
||||||
|
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
|
||||||
|
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
|
||||||
|
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
|
||||||
|
mapIt(maybeDeref rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)))
|
||||||
|
|
||||||
|
func asConsensusType*(payloadWithValue: engine_api.GetPayloadV2Response):
|
||||||
|
capella.ExecutionPayloadForSigning =
|
||||||
|
capella.ExecutionPayloadForSigning(
|
||||||
|
executionPayload: payloadWithValue.executionPayload.asConsensusType,
|
||||||
|
blockValue: payloadWithValue.blockValue)
|
||||||
|
|
||||||
|
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV3):
|
||||||
|
deneb.ExecutionPayload =
|
||||||
|
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
|
||||||
|
bellatrix.Transaction.init(tt.distinctBase)
|
||||||
|
|
||||||
|
deneb.ExecutionPayload(
|
||||||
|
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
|
||||||
|
feeRecipient:
|
||||||
|
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
|
||||||
|
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
|
||||||
|
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
|
||||||
|
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
|
||||||
|
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
|
||||||
|
block_number: rpcExecutionPayload.blockNumber.uint64,
|
||||||
|
gas_limit: rpcExecutionPayload.gasLimit.uint64,
|
||||||
|
gas_used: rpcExecutionPayload.gasUsed.uint64,
|
||||||
|
timestamp: rpcExecutionPayload.timestamp.uint64,
|
||||||
|
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
|
||||||
|
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
|
||||||
|
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
|
||||||
|
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
|
||||||
|
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
|
||||||
|
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
|
||||||
|
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
|
||||||
|
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
|
||||||
|
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64)
|
||||||
|
|
||||||
|
func asConsensusType*(payload: engine_api.GetPayloadV3Response):
|
||||||
|
deneb.ExecutionPayloadForSigning =
|
||||||
|
deneb.ExecutionPayloadForSigning(
|
||||||
|
executionPayload: payload.executionPayload.asConsensusType,
|
||||||
|
blockValue: payload.blockValue,
|
||||||
|
# TODO
|
||||||
|
# The `mapIt` calls below are necessary only because we use different distinct
|
||||||
|
# types for KZG commitments and Blobs in the `web3` and the `deneb` spec types.
|
||||||
|
# Both are defined as `array[N, byte]` under the hood.
|
||||||
|
blobsBundle: deneb.BlobsBundle(
|
||||||
|
commitments: KzgCommitments.init(
|
||||||
|
payload.blobsBundle.commitments.mapIt(
|
||||||
|
kzg_abi.KzgCommitment(bytes: it.bytes))),
|
||||||
|
proofs: KzgProofs.init(
|
||||||
|
payload.blobsBundle.proofs.mapIt(
|
||||||
|
kzg_abi.KzgProof(bytes: it.bytes))),
|
||||||
|
blobs: Blobs.init(
|
||||||
|
payload.blobsBundle.blobs.mapIt(it.bytes))))
|
||||||
|
|
||||||
|
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV4):
|
||||||
|
electra.ExecutionPayload =
|
||||||
|
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
|
||||||
|
bellatrix.Transaction.init(tt.distinctBase)
|
||||||
|
|
||||||
|
template getDepositRequest(
|
||||||
|
dr: DepositRequestV1): electra.DepositRequest =
|
||||||
|
electra.DepositRequest(
|
||||||
|
pubkey: ValidatorPubKey(blob: dr.pubkey.distinctBase),
|
||||||
|
withdrawal_credentials: dr.withdrawalCredentials.asEth2Digest,
|
||||||
|
amount: dr.amount.Gwei,
|
||||||
|
signature: ValidatorSig(blob: dr.signature.distinctBase),
|
||||||
|
index: dr.index.uint64)
|
||||||
|
|
||||||
|
template getWithdrawalRequest(
|
||||||
|
wr: WithdrawalRequestV1): electra.WithdrawalRequest =
|
||||||
|
electra.WithdrawalRequest(
|
||||||
|
source_address: ExecutionAddress(data: wr.sourceAddress.distinctBase),
|
||||||
|
validator_pubkey: ValidatorPubKey(blob: wr.validatorPubkey.distinctBase),
|
||||||
|
amount: wr.amount.Gwei)
|
||||||
|
|
||||||
|
template getConsolidationRequest(
|
||||||
|
cr: ConsolidationRequestV1): electra.ConsolidationRequest =
|
||||||
|
electra.ConsolidationRequest(
|
||||||
|
source_address: ExecutionAddress(data: cr.sourceAddress.distinctBase),
|
||||||
|
source_pubkey: ValidatorPubKey(blob: cr.sourcePubkey.distinctBase),
|
||||||
|
target_pubkey: ValidatorPubKey(blob: cr.targetPubkey.distinctBase))
|
||||||
|
|
||||||
|
electra.ExecutionPayload(
|
||||||
|
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
|
||||||
|
feeRecipient:
|
||||||
|
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
|
||||||
|
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
|
||||||
|
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
|
||||||
|
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
|
||||||
|
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
|
||||||
|
block_number: rpcExecutionPayload.blockNumber.uint64,
|
||||||
|
gas_limit: rpcExecutionPayload.gasLimit.uint64,
|
||||||
|
gas_used: rpcExecutionPayload.gasUsed.uint64,
|
||||||
|
timestamp: rpcExecutionPayload.timestamp.uint64,
|
||||||
|
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(
|
||||||
|
rpcExecutionPayload.extraData.bytes),
|
||||||
|
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
|
||||||
|
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
|
||||||
|
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
|
||||||
|
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
|
||||||
|
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
|
||||||
|
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
|
||||||
|
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
|
||||||
|
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64,
|
||||||
|
deposit_requests:
|
||||||
|
List[electra.DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD].init(
|
||||||
|
mapIt(rpcExecutionPayload.depositRequests, it.getDepositRequest)),
|
||||||
|
withdrawal_requests: List[electra.WithdrawalRequest,
|
||||||
|
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD].init(
|
||||||
|
mapIt(rpcExecutionPayload.withdrawalRequests,
|
||||||
|
it.getWithdrawalRequest)),
|
||||||
|
consolidation_requests: List[electra.ConsolidationRequest,
|
||||||
|
Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD].init(
|
||||||
|
mapIt(rpcExecutionPayload.consolidationRequests,
|
||||||
|
it.getConsolidationRequest)))
|
||||||
|
|
||||||
|
func asConsensusType*(payload: engine_api.GetPayloadV4Response):
|
||||||
|
electra.ExecutionPayloadForSigning =
|
||||||
|
electra.ExecutionPayloadForSigning(
|
||||||
|
executionPayload: payload.executionPayload.asConsensusType,
|
||||||
|
blockValue: payload.blockValue,
|
||||||
|
# TODO
|
||||||
|
# The `mapIt` calls below are necessary only because we use different distinct
|
||||||
|
# types for KZG commitments and Blobs in the `web3` and the `deneb` spec types.
|
||||||
|
# Both are defined as `array[N, byte]` under the hood.
|
||||||
|
blobsBundle: deneb.BlobsBundle(
|
||||||
|
commitments: KzgCommitments.init(
|
||||||
|
payload.blobsBundle.commitments.mapIt(
|
||||||
|
kzg_abi.KzgCommitment(bytes: it.bytes))),
|
||||||
|
proofs: KzgProofs.init(
|
||||||
|
payload.blobsBundle.proofs.mapIt(
|
||||||
|
kzg_abi.KzgProof(bytes: it.bytes))),
|
||||||
|
blobs: Blobs.init(
|
||||||
|
payload.blobsBundle.blobs.mapIt(it.bytes))))
|
||||||
|
|
||||||
|
func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload):
|
||||||
|
ExecutionPayloadV1 =
|
||||||
|
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
||||||
|
TypedTransaction(tt.distinctBase)
|
||||||
|
|
||||||
|
engine_api.ExecutionPayloadV1(
|
||||||
|
parentHash: executionPayload.parent_hash.asBlockHash,
|
||||||
|
feeRecipient: Address(executionPayload.fee_recipient.data),
|
||||||
|
stateRoot: executionPayload.state_root.asBlockHash,
|
||||||
|
receiptsRoot: executionPayload.receipts_root.asBlockHash,
|
||||||
|
logsBloom:
|
||||||
|
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
|
||||||
|
prevRandao: executionPayload.prev_randao.asBlockHash,
|
||||||
|
blockNumber: Quantity(executionPayload.block_number),
|
||||||
|
gasLimit: Quantity(executionPayload.gas_limit),
|
||||||
|
gasUsed: Quantity(executionPayload.gas_used),
|
||||||
|
timestamp: Quantity(executionPayload.timestamp),
|
||||||
|
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
|
||||||
|
baseFeePerGas: executionPayload.base_fee_per_gas,
|
||||||
|
blockHash: executionPayload.block_hash.asBlockHash,
|
||||||
|
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction))
|
||||||
|
|
||||||
|
template toEngineWithdrawal*(w: capella.Withdrawal): WithdrawalV1 =
|
||||||
|
WithdrawalV1(
|
||||||
|
index: Quantity(w.index),
|
||||||
|
validatorIndex: Quantity(w.validator_index),
|
||||||
|
address: Address(w.address.data),
|
||||||
|
amount: Quantity(w.amount))
|
||||||
|
|
||||||
|
func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload):
|
||||||
|
ExecutionPayloadV2 =
|
||||||
|
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
||||||
|
TypedTransaction(tt.distinctBase)
|
||||||
|
engine_api.ExecutionPayloadV2(
|
||||||
|
parentHash: executionPayload.parent_hash.asBlockHash,
|
||||||
|
feeRecipient: Address(executionPayload.fee_recipient.data),
|
||||||
|
stateRoot: executionPayload.state_root.asBlockHash,
|
||||||
|
receiptsRoot: executionPayload.receipts_root.asBlockHash,
|
||||||
|
logsBloom:
|
||||||
|
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
|
||||||
|
prevRandao: executionPayload.prev_randao.asBlockHash,
|
||||||
|
blockNumber: Quantity(executionPayload.block_number),
|
||||||
|
gasLimit: Quantity(executionPayload.gas_limit),
|
||||||
|
gasUsed: Quantity(executionPayload.gas_used),
|
||||||
|
timestamp: Quantity(executionPayload.timestamp),
|
||||||
|
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
|
||||||
|
baseFeePerGas: executionPayload.base_fee_per_gas,
|
||||||
|
blockHash: executionPayload.block_hash.asBlockHash,
|
||||||
|
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
|
||||||
|
withdrawals: mapIt(executionPayload.withdrawals, it.toEngineWithdrawal))
|
||||||
|
|
||||||
|
func asEngineExecutionPayload*(executionPayload: deneb.ExecutionPayload):
|
||||||
|
ExecutionPayloadV3 =
|
||||||
|
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
||||||
|
TypedTransaction(tt.distinctBase)
|
||||||
|
|
||||||
|
engine_api.ExecutionPayloadV3(
|
||||||
|
parentHash: executionPayload.parent_hash.asBlockHash,
|
||||||
|
feeRecipient: Address(executionPayload.fee_recipient.data),
|
||||||
|
stateRoot: executionPayload.state_root.asBlockHash,
|
||||||
|
receiptsRoot: executionPayload.receipts_root.asBlockHash,
|
||||||
|
logsBloom:
|
||||||
|
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
|
||||||
|
prevRandao: executionPayload.prev_randao.asBlockHash,
|
||||||
|
blockNumber: Quantity(executionPayload.block_number),
|
||||||
|
gasLimit: Quantity(executionPayload.gas_limit),
|
||||||
|
gasUsed: Quantity(executionPayload.gas_used),
|
||||||
|
timestamp: Quantity(executionPayload.timestamp),
|
||||||
|
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
|
||||||
|
baseFeePerGas: executionPayload.base_fee_per_gas,
|
||||||
|
blockHash: executionPayload.block_hash.asBlockHash,
|
||||||
|
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
|
||||||
|
withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal),
|
||||||
|
blobGasUsed: Quantity(executionPayload.blob_gas_used),
|
||||||
|
excessBlobGas: Quantity(executionPayload.excess_blob_gas))
|
||||||
|
|
||||||
|
func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
|
||||||
|
ExecutionPayloadV4 =
|
||||||
|
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
||||||
|
TypedTransaction(tt.distinctBase)
|
||||||
|
|
||||||
|
template getDepositRequest(
|
||||||
|
dr: electra.DepositRequest): DepositRequestV1 =
|
||||||
|
DepositRequestV1(
|
||||||
|
pubkey: FixedBytes[RawPubKeySize](dr.pubkey.blob),
|
||||||
|
withdrawalCredentials: FixedBytes[32](dr.withdrawal_credentials.data),
|
||||||
|
amount: dr.amount.Quantity,
|
||||||
|
signature: FixedBytes[RawSigSize](dr.signature.blob),
|
||||||
|
index: dr.index.Quantity)
|
||||||
|
|
||||||
|
template getWithdrawalRequest(
|
||||||
|
wr: electra.WithdrawalRequest): WithdrawalRequestV1 =
|
||||||
|
WithdrawalRequestV1(
|
||||||
|
sourceAddress: Address(wr.source_address.data),
|
||||||
|
validatorPubkey: FixedBytes[RawPubKeySize](wr.validator_pubkey.blob),
|
||||||
|
amount: wr.amount.Quantity)
|
||||||
|
|
||||||
|
template getConsolidationRequest(
|
||||||
|
cr: electra.ConsolidationRequest): ConsolidationRequestV1 =
|
||||||
|
ConsolidationRequestV1(
|
||||||
|
sourceAddress: Address(cr.source_address.data),
|
||||||
|
sourcePubkey: FixedBytes[RawPubKeySize](cr.source_pubkey.blob),
|
||||||
|
targetPubkey: FixedBytes[RawPubKeySize](cr.target_pubkey.blob))
|
||||||
|
|
||||||
|
engine_api.ExecutionPayloadV4(
|
||||||
|
parentHash: executionPayload.parent_hash.asBlockHash,
|
||||||
|
feeRecipient: Address(executionPayload.fee_recipient.data),
|
||||||
|
stateRoot: executionPayload.state_root.asBlockHash,
|
||||||
|
receiptsRoot: executionPayload.receipts_root.asBlockHash,
|
||||||
|
logsBloom:
|
||||||
|
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
|
||||||
|
prevRandao: executionPayload.prev_randao.asBlockHash,
|
||||||
|
blockNumber: Quantity(executionPayload.block_number),
|
||||||
|
gasLimit: Quantity(executionPayload.gas_limit),
|
||||||
|
gasUsed: Quantity(executionPayload.gas_used),
|
||||||
|
timestamp: Quantity(executionPayload.timestamp),
|
||||||
|
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
|
||||||
|
baseFeePerGas: executionPayload.base_fee_per_gas,
|
||||||
|
blockHash: executionPayload.block_hash.asBlockHash,
|
||||||
|
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
|
||||||
|
withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal),
|
||||||
|
blobGasUsed: Quantity(executionPayload.blob_gas_used),
|
||||||
|
excessBlobGas: Quantity(executionPayload.excess_blob_gas),
|
||||||
|
depositRequests: mapIt(
|
||||||
|
executionPayload.deposit_requests, it.getDepositRequest),
|
||||||
|
withdrawalRequests: mapIt(
|
||||||
|
executionPayload.withdrawal_requests, it.getWithdrawalRequest),
|
||||||
|
consolidationRequests: mapIt(
|
||||||
|
executionPayload.consolidation_requests, it.getConsolidationRequest))
|
|
@ -16,6 +16,8 @@ import
|
||||||
web3/[conversions, eth_api_types],
|
web3/[conversions, eth_api_types],
|
||||||
./merkle_minimal
|
./merkle_minimal
|
||||||
|
|
||||||
|
from ./engine_api_conversions import asBlockHash, asEth2Digest
|
||||||
|
|
||||||
export beacon_chain_db, deques, digest, base, forks
|
export beacon_chain_db, deques, digest, base, forks
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
|
@ -80,12 +82,6 @@ type
|
||||||
deposits*: seq[Deposit]
|
deposits*: seq[Deposit]
|
||||||
hasMissingDeposits*: bool
|
hasMissingDeposits*: bool
|
||||||
|
|
||||||
func asEth2Digest*(x: BlockHash): Eth2Digest =
|
|
||||||
Eth2Digest(data: array[32, byte](x))
|
|
||||||
|
|
||||||
template asBlockHash*(x: Eth2Digest): BlockHash =
|
|
||||||
BlockHash(x.data)
|
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#get_eth1_data
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#get_eth1_data
|
||||||
func compute_time_at_slot(genesis_time: uint64, slot: Slot): uint64 =
|
func compute_time_at_slot(genesis_time: uint64, slot: Slot): uint64 =
|
||||||
genesis_time + slot * SECONDS_PER_SLOT
|
genesis_time + slot * SECONDS_PER_SLOT
|
||||||
|
@ -115,7 +111,7 @@ template findBlock(chain: Eth1Chain, eth1Data: Eth1Data): Eth1Block =
|
||||||
|
|
||||||
func makeSuccessorWithoutDeposits*(existingBlock: Eth1Block,
|
func makeSuccessorWithoutDeposits*(existingBlock: Eth1Block,
|
||||||
successor: BlockObject): Eth1Block =
|
successor: BlockObject): Eth1Block =
|
||||||
result = Eth1Block(
|
Eth1Block(
|
||||||
hash: successor.hash.asEth2Digest,
|
hash: successor.hash.asEth2Digest,
|
||||||
number: Eth1BlockNumber successor.number,
|
number: Eth1BlockNumber successor.number,
|
||||||
timestamp: Eth1BlockTimestamp successor.timestamp)
|
timestamp: Eth1BlockTimestamp successor.timestamp)
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
|
||||||
|
|
||||||
# Merkle tree helpers
|
# Merkle tree helpers
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -49,13 +49,11 @@ func compute_deltas(
|
||||||
logScope: topics = "fork_choice"
|
logScope: topics = "fork_choice"
|
||||||
|
|
||||||
func init*(
|
func init*(
|
||||||
T: type ForkChoiceBackend, checkpoints: FinalityCheckpoints,
|
T: type ForkChoiceBackend, checkpoints: FinalityCheckpoints): T =
|
||||||
version: ForkChoiceVersion): T =
|
T(proto_array: ProtoArray.init(checkpoints))
|
||||||
T(proto_array: ProtoArray.init(checkpoints, version))
|
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
T: type ForkChoice, epochRef: EpochRef, blck: BlockRef,
|
T: type ForkChoice, epochRef: EpochRef, blck: BlockRef): T =
|
||||||
version: ForkChoiceVersion): T =
|
|
||||||
## Initialize a fork choice context for a finalized state - in the finalized
|
## Initialize a fork choice context for a finalized state - in the finalized
|
||||||
## state, the justified and finalized checkpoints are the same, so only one
|
## state, the justified and finalized checkpoints are the same, so only one
|
||||||
## is used here
|
## is used here
|
||||||
|
@ -67,10 +65,8 @@ proc init*(
|
||||||
backend: ForkChoiceBackend.init(
|
backend: ForkChoiceBackend.init(
|
||||||
FinalityCheckpoints(
|
FinalityCheckpoints(
|
||||||
justified: checkpoint,
|
justified: checkpoint,
|
||||||
finalized: checkpoint),
|
finalized: checkpoint)),
|
||||||
version),
|
|
||||||
checkpoints: Checkpoints(
|
checkpoints: Checkpoints(
|
||||||
version: version,
|
|
||||||
justified: BalanceCheckpoint(
|
justified: BalanceCheckpoint(
|
||||||
checkpoint: checkpoint,
|
checkpoint: checkpoint,
|
||||||
total_active_balance: epochRef.total_active_balance,
|
total_active_balance: epochRef.total_active_balance,
|
||||||
|
@ -113,7 +109,7 @@ proc update_justified(
|
||||||
self.update_justified(dag, blck, justified.epoch)
|
self.update_justified(dag, blck, justified.epoch)
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#update_checkpoints
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/fork-choice.md#update_checkpoints
|
||||||
proc update_checkpoints(
|
proc update_checkpoints(
|
||||||
self: var Checkpoints, dag: ChainDAGRef,
|
self: var Checkpoints, dag: ChainDAGRef,
|
||||||
checkpoints: FinalityCheckpoints): FcResult[void] =
|
checkpoints: FinalityCheckpoints): FcResult[void] =
|
||||||
|
@ -377,7 +373,7 @@ proc get_head*(self: var ForkChoice,
|
||||||
self.checkpoints.justified.balances,
|
self.checkpoints.justified.balances,
|
||||||
self.checkpoints.proposer_boost_root)
|
self.checkpoints.proposer_boost_root)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/fork_choice/safe-block.md#get_safe_beacon_block_root
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/fork_choice/safe-block.md#get_safe_beacon_block_root
|
||||||
func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest =
|
func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest =
|
||||||
# Use most recent justified block as a stopgap
|
# Use most recent justified block as a stopgap
|
||||||
self.checkpoints.justified.checkpoint.root
|
self.checkpoints.justified.checkpoint.root
|
||||||
|
|
|
@ -29,14 +29,6 @@ import
|
||||||
# ----------------------------------------------------------------------
|
# ----------------------------------------------------------------------
|
||||||
|
|
||||||
type
|
type
|
||||||
ForkChoiceVersion* {.pure.} = enum
|
|
||||||
## Controls which version of fork choice to run.
|
|
||||||
Stable = "stable"
|
|
||||||
## Use current version from stable Ethereum consensus specifications
|
|
||||||
Pr3431 = "pr3431"
|
|
||||||
## https://github.com/ethereum/consensus-specs/pull/3431
|
|
||||||
## https://github.com/ethereum/consensus-specs/issues/3466
|
|
||||||
|
|
||||||
fcKind* = enum
|
fcKind* = enum
|
||||||
## Fork Choice Error Kinds
|
## Fork Choice Error Kinds
|
||||||
fcFinalizedNodeUnknown
|
fcFinalizedNodeUnknown
|
||||||
|
@ -96,7 +88,6 @@ type
|
||||||
## Subtracted from logical index to get the physical index
|
## Subtracted from logical index to get the physical index
|
||||||
|
|
||||||
ProtoArray* = object
|
ProtoArray* = object
|
||||||
version*: ForkChoiceVersion
|
|
||||||
currentEpoch*: Epoch
|
currentEpoch*: Epoch
|
||||||
checkpoints*: FinalityCheckpoints
|
checkpoints*: FinalityCheckpoints
|
||||||
nodes*: ProtoNodes
|
nodes*: ProtoNodes
|
||||||
|
@ -121,7 +112,6 @@ type
|
||||||
balances*: seq[Gwei]
|
balances*: seq[Gwei]
|
||||||
|
|
||||||
Checkpoints* = object
|
Checkpoints* = object
|
||||||
version*: ForkChoiceVersion
|
|
||||||
time*: BeaconTime
|
time*: BeaconTime
|
||||||
justified*: BalanceCheckpoint
|
justified*: BalanceCheckpoint
|
||||||
finalized*: Checkpoint
|
finalized*: Checkpoint
|
||||||
|
|
|
@ -90,8 +90,7 @@ func nodeLeadsToViableHead(
|
||||||
# ----------------------------------------------------------------------
|
# ----------------------------------------------------------------------
|
||||||
|
|
||||||
func init*(
|
func init*(
|
||||||
T: type ProtoArray, checkpoints: FinalityCheckpoints,
|
T: type ProtoArray, checkpoints: FinalityCheckpoints): T =
|
||||||
version: ForkChoiceVersion): T =
|
|
||||||
let node = ProtoNode(
|
let node = ProtoNode(
|
||||||
bid: BlockId(
|
bid: BlockId(
|
||||||
slot: checkpoints.finalized.epoch.start_slot,
|
slot: checkpoints.finalized.epoch.start_slot,
|
||||||
|
@ -103,8 +102,7 @@ func init*(
|
||||||
bestChild: none(int),
|
bestChild: none(int),
|
||||||
bestDescendant: none(int))
|
bestDescendant: none(int))
|
||||||
|
|
||||||
T(version: version,
|
T(checkpoints: checkpoints,
|
||||||
checkpoints: checkpoints,
|
|
||||||
nodes: ProtoNodes(buf: @[node], offset: 0),
|
nodes: ProtoNodes(buf: @[node], offset: 0),
|
||||||
indices: {node.bid.root: 0}.toTable())
|
indices: {node.bid.root: 0}.toTable())
|
||||||
|
|
||||||
|
@ -536,23 +534,10 @@ func nodeIsViableForHead(
|
||||||
node.checkpoints.justified.epoch == self.checkpoints.justified.epoch
|
node.checkpoints.justified.epoch == self.checkpoints.justified.epoch
|
||||||
|
|
||||||
if not correctJustified:
|
if not correctJustified:
|
||||||
case self.version
|
# The voting source should be either at the same height as the store's
|
||||||
of ForkChoiceVersion.Stable:
|
# justified checkpoint or not more than two epochs ago
|
||||||
# If the previous epoch is justified, the block should be pulled-up.
|
correctJustified =
|
||||||
# In this case, check that unrealized justification is higher than the
|
node.checkpoints.justified.epoch + 2 >= self.currentEpoch
|
||||||
# store and that the voting source is not more than two epochs ago
|
|
||||||
if self.isPreviousEpochJustified and
|
|
||||||
node.bid.slot.epoch == self.currentEpoch:
|
|
||||||
let unrealized =
|
|
||||||
self.currentEpochTips.getOrDefault(nodeIdx, node.checkpoints)
|
|
||||||
correctJustified =
|
|
||||||
unrealized.justified.epoch >= self.checkpoints.justified.epoch and
|
|
||||||
node.checkpoints.justified.epoch + 2 >= self.currentEpoch
|
|
||||||
of ForkChoiceVersion.Pr3431:
|
|
||||||
# The voting source should be either at the same height as the store's
|
|
||||||
# justified checkpoint or not more than two epochs ago
|
|
||||||
correctJustified =
|
|
||||||
node.checkpoints.justified.epoch + 2 >= self.currentEpoch
|
|
||||||
|
|
||||||
return
|
return
|
||||||
if not correctJustified:
|
if not correctJustified:
|
||||||
|
|
|
@ -99,7 +99,7 @@ type
|
||||||
Batch* = object
|
Batch* = object
|
||||||
## A batch represents up to BatchedCryptoSize non-aggregated signatures
|
## A batch represents up to BatchedCryptoSize non-aggregated signatures
|
||||||
created: Moment
|
created: Moment
|
||||||
sigsets: seq[SignatureSet]
|
multiSets: Table[array[32, byte], MultiSignatureSet]
|
||||||
items: seq[BatchItem]
|
items: seq[BatchItem]
|
||||||
|
|
||||||
VerifierItem = object
|
VerifierItem = object
|
||||||
|
@ -198,7 +198,7 @@ proc complete(batchCrypto: var BatchCrypto, batch: var Batch, ok: bool) =
|
||||||
|
|
||||||
batchCrypto.counts.batches += 1
|
batchCrypto.counts.batches += 1
|
||||||
batchCrypto.counts.signatures += batch.items.len()
|
batchCrypto.counts.signatures += batch.items.len()
|
||||||
batchCrypto.counts.aggregates += batch.sigsets.len()
|
batchCrypto.counts.aggregates += batch.multiSets.len()
|
||||||
|
|
||||||
if batchCrypto.counts.batches >= 256:
|
if batchCrypto.counts.batches >= 256:
|
||||||
# Not too often, so as not to overwhelm our metrics
|
# Not too often, so as not to overwhelm our metrics
|
||||||
|
@ -208,16 +208,6 @@ proc complete(batchCrypto: var BatchCrypto, batch: var Batch, ok: bool) =
|
||||||
|
|
||||||
reset(batchCrypto.counts)
|
reset(batchCrypto.counts)
|
||||||
|
|
||||||
func combine(a: var Signature, b: Signature) =
|
|
||||||
var tmp = AggregateSignature.init(CookedSig(a))
|
|
||||||
tmp.aggregate(b)
|
|
||||||
a = Signature(tmp.finish())
|
|
||||||
|
|
||||||
func combine(a: var PublicKey, b: PublicKey) =
|
|
||||||
var tmp = AggregatePublicKey.init(CookedPubKey(a))
|
|
||||||
tmp.aggregate(b)
|
|
||||||
a = PublicKey(tmp.finish())
|
|
||||||
|
|
||||||
proc batchVerifyTask(task: ptr BatchTask) {.nimcall.} =
|
proc batchVerifyTask(task: ptr BatchTask) {.nimcall.} =
|
||||||
# Task suitable for running in taskpools - look, no GC!
|
# Task suitable for running in taskpools - look, no GC!
|
||||||
let
|
let
|
||||||
|
@ -237,12 +227,29 @@ proc spawnBatchVerifyTask(tp: Taskpool, task: ptr BatchTask) =
|
||||||
# Possibly related to: https://github.com/nim-lang/Nim/issues/22305
|
# Possibly related to: https://github.com/nim-lang/Nim/issues/22305
|
||||||
tp.spawn batchVerifyTask(task)
|
tp.spawn batchVerifyTask(task)
|
||||||
|
|
||||||
proc batchVerifyAsync*(
|
func combine(
|
||||||
verifier: ref BatchVerifier, signal: ThreadSignalPtr,
|
multiSet: MultiSignatureSet,
|
||||||
|
verifier: ref BatchVerifier): SignatureSet =
|
||||||
|
var secureRandomBytes: array[32, byte]
|
||||||
|
verifier[].rng[].generate(secureRandomBytes)
|
||||||
|
multiSet.combine(secureRandomBytes)
|
||||||
|
|
||||||
|
func combineAll(
|
||||||
|
multiSets: Table[array[32, byte], MultiSignatureSet],
|
||||||
|
verifier: ref BatchVerifier): seq[SignatureSet] =
|
||||||
|
var sigsets = newSeqOfCap[SignatureSet](multiSets.len)
|
||||||
|
for multiSet in multiSets.values():
|
||||||
|
sigsets.add multiSet.combine(verifier)
|
||||||
|
sigsets
|
||||||
|
|
||||||
|
proc batchVerifyAsync(
|
||||||
|
verifier: ref BatchVerifier,
|
||||||
|
signal: ThreadSignalPtr,
|
||||||
batch: ref Batch): Future[bool] {.async: (raises: [CancelledError]).} =
|
batch: ref Batch): Future[bool] {.async: (raises: [CancelledError]).} =
|
||||||
|
let sigsets = batch[].multiSets.combineAll(verifier)
|
||||||
var task = BatchTask(
|
var task = BatchTask(
|
||||||
setsPtr: makeUncheckedArray(baseAddr batch[].sigsets),
|
setsPtr: makeUncheckedArray(baseAddr sigsets),
|
||||||
numSets: batch[].sigsets.len,
|
numSets: sigsets.len,
|
||||||
taskpool: verifier[].taskpool,
|
taskpool: verifier[].taskpool,
|
||||||
cache: addr verifier[].sigVerifCache,
|
cache: addr verifier[].sigVerifCache,
|
||||||
signal: signal,
|
signal: signal,
|
||||||
|
@ -264,18 +271,18 @@ proc batchVerifyAsync*(
|
||||||
task.ok.load()
|
task.ok.load()
|
||||||
|
|
||||||
proc processBatch(
|
proc processBatch(
|
||||||
batchCrypto: ref BatchCrypto, batch: ref Batch,
|
batchCrypto: ref BatchCrypto,
|
||||||
verifier: ref BatchVerifier, signal: ThreadSignalPtr) {.async: (raises: [CancelledError]).} =
|
batch: ref Batch,
|
||||||
let
|
verifier: ref BatchVerifier,
|
||||||
numSets = batch[].sigsets.len()
|
signal: ThreadSignalPtr) {.async: (raises: [CancelledError]).} =
|
||||||
|
let numSets = batch[].multiSets.len
|
||||||
|
|
||||||
if numSets == 0:
|
if numSets == 0:
|
||||||
# Nothing to do in this batch, can happen when a batch is created without
|
# Nothing to do in this batch, can happen when a batch is created without
|
||||||
# there being any signatures successfully added to it
|
# there being any signatures successfully added to it
|
||||||
return
|
return
|
||||||
|
|
||||||
let
|
let startTick = Moment.now()
|
||||||
startTick = Moment.now()
|
|
||||||
|
|
||||||
# If the hardware is too slow to keep up or an event caused a temporary
|
# If the hardware is too slow to keep up or an event caused a temporary
|
||||||
# buildup of signature verification tasks, the batch will be dropped so as to
|
# buildup of signature verification tasks, the batch will be dropped so as to
|
||||||
|
@ -300,13 +307,19 @@ proc processBatch(
|
||||||
# may not be beneficial to use batch verification:
|
# may not be beneficial to use batch verification:
|
||||||
# https://github.com/status-im/nim-blscurve/blob/3956f63dd0ed5d7939f6195ee09e4c5c1ace9001/blscurve/bls_batch_verifier.nim#L390
|
# https://github.com/status-im/nim-blscurve/blob/3956f63dd0ed5d7939f6195ee09e4c5c1ace9001/blscurve/bls_batch_verifier.nim#L390
|
||||||
if numSets == 1:
|
if numSets == 1:
|
||||||
blsVerify(batch[].sigsets[0])
|
var r: bool
|
||||||
|
for multiSet in batch[].multiSets.values():
|
||||||
|
r = blsVerify(multiSet.combine(verifier))
|
||||||
|
break
|
||||||
|
r
|
||||||
elif batchCrypto[].taskpool.numThreads > 1 and numSets > 3:
|
elif batchCrypto[].taskpool.numThreads > 1 and numSets > 3:
|
||||||
await batchVerifyAsync(verifier, signal, batch)
|
await batchVerifyAsync(verifier, signal, batch)
|
||||||
else:
|
else:
|
||||||
let secureRandomBytes = verifier[].rng[].generate(array[32, byte])
|
let secureRandomBytes = verifier[].rng[].generate(array[32, byte])
|
||||||
batchVerifySerial(
|
batchVerifySerial(
|
||||||
verifier[].sigVerifCache, batch.sigsets, secureRandomBytes)
|
verifier[].sigVerifCache,
|
||||||
|
batch.multiSets.combineAll(verifier),
|
||||||
|
secureRandomBytes)
|
||||||
|
|
||||||
trace "batch crypto - finished",
|
trace "batch crypto - finished",
|
||||||
numSets, items = batch[].items.len(), ok,
|
numSets, items = batch[].items.len(), ok,
|
||||||
|
@ -366,18 +379,10 @@ proc verifySoon(
|
||||||
batch = batchCrypto[].getBatch()
|
batch = batchCrypto[].getBatch()
|
||||||
fut = newFuture[BatchResult](name)
|
fut = newFuture[BatchResult](name)
|
||||||
|
|
||||||
var found = false
|
batch[].multiSets.withValue(sigset.message, multiSet):
|
||||||
# Find existing signature sets with the same message - if we can verify an
|
multiSet[].add sigset
|
||||||
# aggregate instead of several signatures, that is _much_ faster
|
do:
|
||||||
for item in batch[].sigsets.mitems():
|
batch[].multiSets[sigset.message] = MultiSignatureSet.init sigset
|
||||||
if item.message == sigset.message:
|
|
||||||
item.signature.combine(sigset.signature)
|
|
||||||
item.pubkey.combine(sigset.pubkey)
|
|
||||||
found = true
|
|
||||||
break
|
|
||||||
|
|
||||||
if not found:
|
|
||||||
batch[].sigsets.add sigset
|
|
||||||
|
|
||||||
# We need to keep the "original" sigset to allow verifying each signature
|
# We need to keep the "original" sigset to allow verifying each signature
|
||||||
# one by one in the case the combined operation fails
|
# one by one in the case the combined operation fails
|
||||||
|
|
|
@ -9,11 +9,11 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
chronicles, chronos, metrics,
|
chronicles, chronos, metrics,
|
||||||
../spec/[forks, signatures, signatures_batch],
|
../spec/[forks, helpers_el, signatures, signatures_batch],
|
||||||
../sszdump
|
../sszdump
|
||||||
|
|
||||||
from std/deques import Deque, addLast, contains, initDeque, items, len, shrink
|
from std/deques import Deque, addLast, contains, initDeque, items, len, shrink
|
||||||
from std/sequtils import mapIt
|
from std/sequtils import anyIt, mapIt
|
||||||
from ../consensus_object_pools/consensus_manager import
|
from ../consensus_object_pools/consensus_manager import
|
||||||
ConsensusManager, checkNextProposer, optimisticExecutionBlockHash,
|
ConsensusManager, checkNextProposer, optimisticExecutionBlockHash,
|
||||||
runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead,
|
runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead,
|
||||||
|
@ -541,31 +541,41 @@ proc storeBlock(
|
||||||
|
|
||||||
if NewPayloadStatus.noResponse == payloadStatus:
|
if NewPayloadStatus.noResponse == payloadStatus:
|
||||||
# When the execution layer is not available to verify the payload, we do the
|
# When the execution layer is not available to verify the payload, we do the
|
||||||
# required check on the CL side instead and proceed as if the EL was syncing
|
# required checks on the CL instead and proceed as if the EL was syncing
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#verify_and_notify_new_payload
|
||||||
# TODO run https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#blob-kzg-commitments
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/beacon-chain.md#modified-verify_and_notify_new_payload
|
||||||
# https://github.com/ethereum/execution-apis/blob/main/src/engine/experimental/blob-extension.md#specification
|
|
||||||
# "This validation MUST be instantly run in all cases even during active
|
|
||||||
# sync process."
|
|
||||||
#
|
|
||||||
# Client software MUST validate `blockHash` value as being equivalent to
|
|
||||||
# `Keccak256(RLP(ExecutionBlockHeader))`
|
|
||||||
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#specification
|
|
||||||
#
|
|
||||||
# This should simulate an unsynced EL, which still must perform these
|
|
||||||
# checks. This means it must be able to do so without context, beyond
|
|
||||||
# whatever data the block itself contains.
|
|
||||||
when typeof(signedBlock).kind >= ConsensusFork.Bellatrix:
|
when typeof(signedBlock).kind >= ConsensusFork.Bellatrix:
|
||||||
template payload(): auto = signedBlock.message.body.execution_payload
|
if signedBlock.message.is_execution_block:
|
||||||
if signedBlock.message.is_execution_block and
|
template payload(): auto = signedBlock.message.body.execution_payload
|
||||||
payload.block_hash !=
|
|
||||||
|
template returnWithError(msg: string, extraMsg = ""): untyped =
|
||||||
|
if extraMsg != "":
|
||||||
|
debug msg, reason = extraMsg, executionPayload = shortLog(payload)
|
||||||
|
else:
|
||||||
|
debug msg, executionPayload = shortLog(payload)
|
||||||
|
self[].dumpInvalidBlock(signedBlock)
|
||||||
|
doAssert strictVerification notin dag.updateFlags
|
||||||
|
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
||||||
|
return err((VerifierError.Invalid, ProcessingStatus.completed))
|
||||||
|
|
||||||
|
if payload.transactions.anyIt(it.len == 0):
|
||||||
|
returnWithError "Execution block contains zero length transactions"
|
||||||
|
|
||||||
|
if payload.block_hash !=
|
||||||
signedBlock.message.compute_execution_block_hash():
|
signedBlock.message.compute_execution_block_hash():
|
||||||
debug "Execution block hash validation failed",
|
returnWithError "Execution block hash validation failed"
|
||||||
execution_payload = shortLog(payload)
|
|
||||||
self[].dumpInvalidBlock(signedBlock)
|
# [New in Deneb:EIP4844]
|
||||||
doAssert strictVerification notin dag.updateFlags
|
when typeof(signedBlock).kind >= ConsensusFork.Deneb:
|
||||||
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
let blobsRes = signedBlock.message.is_valid_versioned_hashes
|
||||||
return err((VerifierError.Invalid, ProcessingStatus.completed))
|
if blobsRes.isErr:
|
||||||
|
returnWithError "Blob versioned hashes invalid", blobsRes.error
|
||||||
|
else:
|
||||||
|
# If there are EIP-4844 (type 3) transactions in the payload with
|
||||||
|
# versioned hashes, the transactions would be rejected by the EL
|
||||||
|
# based on payload timestamp (only allowed post Deneb);
|
||||||
|
# There are no `blob_kzg_commitments` before Deneb to compare against
|
||||||
|
discard
|
||||||
|
|
||||||
let newPayloadTick = Moment.now()
|
let newPayloadTick = Moment.now()
|
||||||
|
|
||||||
|
@ -840,7 +850,7 @@ proc processBlock(
|
||||||
# - MUST NOT optimistically import the block.
|
# - MUST NOT optimistically import the block.
|
||||||
# - MUST NOT apply the block to the fork choice store.
|
# - MUST NOT apply the block to the fork choice store.
|
||||||
# - MAY queue the block for later processing.
|
# - MAY queue the block for later processing.
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#execution-engine-errors
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/sync/optimistic.md#execution-engine-errors
|
||||||
await sleepAsync(chronos.seconds(1))
|
await sleepAsync(chronos.seconds(1))
|
||||||
self[].enqueueBlock(
|
self[].enqueueBlock(
|
||||||
entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized,
|
entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized,
|
||||||
|
|
|
@ -11,6 +11,7 @@ import
|
||||||
# Status
|
# Status
|
||||||
chronicles, chronos, metrics,
|
chronicles, chronos, metrics,
|
||||||
results,
|
results,
|
||||||
|
stew/byteutils,
|
||||||
# Internals
|
# Internals
|
||||||
../spec/[
|
../spec/[
|
||||||
beaconstate, state_transition_block, forks, helpers, network, signatures],
|
beaconstate, state_transition_block, forks, helpers, network, signatures],
|
||||||
|
@ -302,7 +303,7 @@ template validateBeaconBlockBellatrix(
|
||||||
#
|
#
|
||||||
# `is_merge_transition_complete(state)` tests for
|
# `is_merge_transition_complete(state)` tests for
|
||||||
# `state.latest_execution_payload_header != ExecutionPayloadHeader()`, while
|
# `state.latest_execution_payload_header != ExecutionPayloadHeader()`, while
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#block-processing
|
||||||
# shows that `state.latest_execution_payload_header` being default or not is
|
# shows that `state.latest_execution_payload_header` being default or not is
|
||||||
# exactly equivalent to whether that block's execution payload is default or
|
# exactly equivalent to whether that block's execution payload is default or
|
||||||
# not, so test cached block information rather than reconstructing a state.
|
# not, so test cached block information rather than reconstructing a state.
|
||||||
|
@ -467,7 +468,13 @@ proc validateBlobSidecar*(
|
||||||
|
|
||||||
# Send notification about new blob sidecar via callback
|
# Send notification about new blob sidecar via callback
|
||||||
if not(isNil(blobQuarantine.onBlobSidecarCallback)):
|
if not(isNil(blobQuarantine.onBlobSidecarCallback)):
|
||||||
blobQuarantine.onBlobSidecarCallback(blob_sidecar)
|
blobQuarantine.onBlobSidecarCallback BlobSidecarInfoObject(
|
||||||
|
block_root: hash_tree_root(blob_sidecar.signed_block_header.message),
|
||||||
|
index: blob_sidecar.index,
|
||||||
|
slot: blob_sidecar.signed_block_header.message.slot,
|
||||||
|
kzg_commitment: blob_sidecar.kzg_commitment,
|
||||||
|
versioned_hash:
|
||||||
|
blob_sidecar.kzg_commitment.kzg_commitment_to_versioned_hash.to0xHex())
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
@ -1180,7 +1187,7 @@ proc validateAggregate*(
|
||||||
|
|
||||||
ok((attesting_indices, sig))
|
ok((attesting_indices, sig))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#bls_to_execution_change
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/p2p-interface.md#bls_to_execution_change
|
||||||
proc validateBlsToExecutionChange*(
|
proc validateBlsToExecutionChange*(
|
||||||
pool: ValidatorChangePool, batchCrypto: ref BatchCrypto,
|
pool: ValidatorChangePool, batchCrypto: ref BatchCrypto,
|
||||||
signed_address_change: SignedBLSToExecutionChange,
|
signed_address_change: SignedBLSToExecutionChange,
|
||||||
|
|
|
@ -94,7 +94,7 @@ typedef struct ETHConsensusConfig ETHConsensusConfig;
|
||||||
* based on the given `config.yaml` file content - If successful.
|
* based on the given `config.yaml` file content - If successful.
|
||||||
* @return `NULL` - If the given `config.yaml` is malformed or incompatible.
|
* @return `NULL` - If the given `config.yaml` is malformed or incompatible.
|
||||||
*
|
*
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/configs/README.md
|
||||||
*/
|
*/
|
||||||
ETH_RESULT_USE_CHECK
|
ETH_RESULT_USE_CHECK
|
||||||
ETHConsensusConfig *_Nullable ETHConsensusConfigCreateFromYaml(const char *configFileContent);
|
ETHConsensusConfig *_Nullable ETHConsensusConfigCreateFromYaml(const char *configFileContent);
|
||||||
|
@ -151,9 +151,9 @@ typedef struct ETHBeaconState ETHBeaconState;
|
||||||
*
|
*
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#beaconstate
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/configs/README.md
|
||||||
*/
|
*/
|
||||||
ETH_RESULT_USE_CHECK
|
ETH_RESULT_USE_CHECK
|
||||||
ETHBeaconState *_Nullable ETHBeaconStateCreateFromSsz(
|
ETHBeaconState *_Nullable ETHBeaconStateCreateFromSsz(
|
||||||
|
@ -325,8 +325,8 @@ typedef struct ETHLightClientStore ETHLightClientStore;
|
||||||
*
|
*
|
||||||
* @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
|
* @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
|
||||||
* @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
|
* @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
|
||||||
*/
|
*/
|
||||||
ETH_RESULT_USE_CHECK
|
ETH_RESULT_USE_CHECK
|
||||||
ETHLightClientStore *_Nullable ETHLightClientStoreCreateFromBootstrap(
|
ETHLightClientStore *_Nullable ETHLightClientStoreCreateFromBootstrap(
|
||||||
|
@ -579,7 +579,7 @@ typedef struct ETHLightClientHeader ETHLightClientHeader;
|
||||||
*
|
*
|
||||||
* @return Latest finalized header.
|
* @return Latest finalized header.
|
||||||
*
|
*
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
|
||||||
*/
|
*/
|
||||||
ETH_RESULT_USE_CHECK
|
ETH_RESULT_USE_CHECK
|
||||||
const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader(
|
const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader(
|
||||||
|
@ -598,7 +598,7 @@ const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader(
|
||||||
* @return Whether or not the next sync committee is currently known.
|
* @return Whether or not the next sync committee is currently known.
|
||||||
*
|
*
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md
|
||||||
*/
|
*/
|
||||||
ETH_RESULT_USE_CHECK
|
ETH_RESULT_USE_CHECK
|
||||||
bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *store);
|
bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *store);
|
||||||
|
@ -695,7 +695,7 @@ typedef struct ETHBeaconBlockHeader ETHBeaconBlockHeader;
|
||||||
*
|
*
|
||||||
* @return Beacon block header.
|
* @return Beacon block header.
|
||||||
*
|
*
|
||||||
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#beaconblockheader
|
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#beaconblockheader
|
||||||
*/
|
*/
|
||||||
ETH_RESULT_USE_CHECK
|
ETH_RESULT_USE_CHECK
|
||||||
const ETHBeaconBlockHeader *ETHLightClientHeaderGetBeacon(
|
const ETHBeaconBlockHeader *ETHLightClientHeaderGetBeacon(
|
||||||
|
|
|
@ -17,7 +17,7 @@ import
|
||||||
json_rpc/jsonmarshal,
|
json_rpc/jsonmarshal,
|
||||||
secp256k1,
|
secp256k1,
|
||||||
web3/[engine_api_types, eth_api_types, conversions],
|
web3/[engine_api_types, eth_api_types, conversions],
|
||||||
../el/eth1_chain,
|
../el/[engine_api_conversions, eth1_chain],
|
||||||
../spec/eth2_apis/[eth2_rest_serialization, rest_light_client_calls],
|
../spec/eth2_apis/[eth2_rest_serialization, rest_light_client_calls],
|
||||||
../spec/[helpers, light_client_sync],
|
../spec/[helpers, light_client_sync],
|
||||||
../sync/light_client_sync_helpers,
|
../sync/light_client_sync_helpers,
|
||||||
|
@ -77,7 +77,7 @@ proc ETHConsensusConfigCreateFromYaml(
|
||||||
## * `NULL` - If the given `config.yaml` is malformed or incompatible.
|
## * `NULL` - If the given `config.yaml` is malformed or incompatible.
|
||||||
##
|
##
|
||||||
## See:
|
## See:
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
|
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/configs/README.md
|
||||||
let cfg = RuntimeConfig.new()
|
let cfg = RuntimeConfig.new()
|
||||||
try:
|
try:
|
||||||
cfg[] = readRuntimeConfig($configFileContent, "config.yaml")[0]
|
cfg[] = readRuntimeConfig($configFileContent, "config.yaml")[0]
|
||||||
|
@ -143,9 +143,9 @@ proc ETHBeaconStateCreateFromSsz(
|
||||||
## See:
|
## See:
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
|
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate
|
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
|
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#beaconstate
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
|
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md
|
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/configs/README.md
|
||||||
let
|
let
|
||||||
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
|
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
|
||||||
return nil
|
return nil
|
||||||
|
@ -328,8 +328,8 @@ proc ETHLightClientStoreCreateFromBootstrap(
|
||||||
## See:
|
## See:
|
||||||
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
|
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
|
||||||
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
|
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
|
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
|
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
|
||||||
let
|
let
|
||||||
mediaType = MediaType.init($mediaType)
|
mediaType = MediaType.init($mediaType)
|
||||||
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
|
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
|
||||||
|
@ -755,7 +755,7 @@ func ETHLightClientStoreIsNextSyncCommitteeKnown(
|
||||||
##
|
##
|
||||||
## See:
|
## See:
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
|
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md
|
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md
|
||||||
store[].is_next_sync_committee_known
|
store[].is_next_sync_committee_known
|
||||||
|
|
||||||
func ETHLightClientStoreGetOptimisticHeader(
|
func ETHLightClientStoreGetOptimisticHeader(
|
||||||
|
@ -841,7 +841,7 @@ proc ETHLightClientHeaderCopyBeaconRoot(
|
||||||
## * Pointer to a copy of the given header's beacon block root.
|
## * Pointer to a copy of the given header's beacon block root.
|
||||||
##
|
##
|
||||||
## See:
|
## See:
|
||||||
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#hash_tree_root
|
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#hash_tree_root
|
||||||
discard cfg # Future-proof against new fields, see `get_lc_execution_root`.
|
discard cfg # Future-proof against new fields, see `get_lc_execution_root`.
|
||||||
let root = Eth2Digest.new()
|
let root = Eth2Digest.new()
|
||||||
root[] = header[].beacon.hash_tree_root()
|
root[] = header[].beacon.hash_tree_root()
|
||||||
|
|
|
@ -86,7 +86,8 @@ proc createLightClient(
|
||||||
getBeaconTime: GetBeaconTimeFn,
|
getBeaconTime: GetBeaconTimeFn,
|
||||||
genesis_validators_root: Eth2Digest,
|
genesis_validators_root: Eth2Digest,
|
||||||
finalizationMode: LightClientFinalizationMode,
|
finalizationMode: LightClientFinalizationMode,
|
||||||
strictVerification = false
|
strictVerification = false,
|
||||||
|
shouldInhibitSync: light_client_manager.GetBoolCallback = nil
|
||||||
): LightClient =
|
): LightClient =
|
||||||
let lightClient = LightClient(
|
let lightClient = LightClient(
|
||||||
network: network,
|
network: network,
|
||||||
|
@ -177,7 +178,8 @@ proc createLightClient(
|
||||||
lightClient.network, rng, getTrustedBlockRoot,
|
lightClient.network, rng, getTrustedBlockRoot,
|
||||||
bootstrapVerifier, updateVerifier, finalityVerifier, optimisticVerifier,
|
bootstrapVerifier, updateVerifier, finalityVerifier, optimisticVerifier,
|
||||||
isLightClientStoreInitialized, isNextSyncCommitteeKnown,
|
isLightClientStoreInitialized, isNextSyncCommitteeKnown,
|
||||||
getFinalizedPeriod, getOptimisticPeriod, getBeaconTime)
|
getFinalizedPeriod, getOptimisticPeriod, getBeaconTime,
|
||||||
|
shouldInhibitSync = shouldInhibitSync)
|
||||||
|
|
||||||
lightClient.gossipState = {}
|
lightClient.gossipState = {}
|
||||||
|
|
||||||
|
@ -191,13 +193,15 @@ proc createLightClient*(
|
||||||
forkDigests: ref ForkDigests,
|
forkDigests: ref ForkDigests,
|
||||||
getBeaconTime: GetBeaconTimeFn,
|
getBeaconTime: GetBeaconTimeFn,
|
||||||
genesis_validators_root: Eth2Digest,
|
genesis_validators_root: Eth2Digest,
|
||||||
finalizationMode: LightClientFinalizationMode
|
finalizationMode: LightClientFinalizationMode,
|
||||||
|
shouldInhibitSync: light_client_manager.GetBoolCallback = nil
|
||||||
): LightClient =
|
): LightClient =
|
||||||
createLightClient(
|
createLightClient(
|
||||||
network, rng,
|
network, rng,
|
||||||
config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming,
|
config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming,
|
||||||
cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode,
|
cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode,
|
||||||
strictVerification = config.strictVerification)
|
strictVerification = config.strictVerification,
|
||||||
|
shouldInhibitSync = shouldInhibitSync)
|
||||||
|
|
||||||
proc createLightClient*(
|
proc createLightClient*(
|
||||||
network: Eth2Node,
|
network: Eth2Node,
|
||||||
|
@ -207,12 +211,14 @@ proc createLightClient*(
|
||||||
forkDigests: ref ForkDigests,
|
forkDigests: ref ForkDigests,
|
||||||
getBeaconTime: GetBeaconTimeFn,
|
getBeaconTime: GetBeaconTimeFn,
|
||||||
genesis_validators_root: Eth2Digest,
|
genesis_validators_root: Eth2Digest,
|
||||||
finalizationMode: LightClientFinalizationMode
|
finalizationMode: LightClientFinalizationMode,
|
||||||
|
shouldInhibitSync: light_client_manager.GetBoolCallback = nil
|
||||||
): LightClient =
|
): LightClient =
|
||||||
createLightClient(
|
createLightClient(
|
||||||
network, rng,
|
network, rng,
|
||||||
dumpEnabled = false, dumpDirInvalid = ".", dumpDirIncoming = ".",
|
dumpEnabled = false, dumpDirInvalid = ".", dumpDirIncoming = ".",
|
||||||
cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode)
|
cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode,
|
||||||
|
shouldInhibitSync = shouldInhibitSync)
|
||||||
|
|
||||||
proc start*(lightClient: LightClient) =
|
proc start*(lightClient: LightClient) =
|
||||||
notice "Starting light client",
|
notice "Starting light client",
|
||||||
|
|
|
@ -9,7 +9,6 @@
|
||||||
|
|
||||||
import stew/base10
|
import stew/base10
|
||||||
import std/tables
|
import std/tables
|
||||||
import libp2p/[multiaddress, multicodec, peerstore]
|
|
||||||
|
|
||||||
type
|
type
|
||||||
Eth2Agent* {.pure.} = enum
|
Eth2Agent* {.pure.} = enum
|
||||||
|
|
|
@ -176,7 +176,7 @@ type
|
||||||
MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [].}
|
MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [].}
|
||||||
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [].}
|
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [].}
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#goodbye
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#goodbye
|
||||||
DisconnectionReason* = enum
|
DisconnectionReason* = enum
|
||||||
# might see other values on the wire!
|
# might see other values on the wire!
|
||||||
ClientShutDown = 1
|
ClientShutDown = 1
|
||||||
|
@ -2555,8 +2555,8 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
|
||||||
node.metadata.seq_number += 1
|
node.metadata.seq_number += 1
|
||||||
node.metadata.attnets = attnets
|
node.metadata.attnets = attnets
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#attestation-subnet-subscription
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
|
||||||
let res = node.discovery.updateRecord({
|
let res = node.discovery.updateRecord({
|
||||||
enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets)
|
enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets)
|
||||||
})
|
})
|
||||||
|
@ -2568,7 +2568,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
|
||||||
debug "Stability subnets changed; updated ENR attnets", attnets
|
debug "Stability subnets changed; updated ENR attnets", attnets
|
||||||
|
|
||||||
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
|
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#sync-committee-subnet-stability
|
||||||
if node.metadata.syncnets == syncnets:
|
if node.metadata.syncnets == syncnets:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -2694,23 +2694,28 @@ proc broadcastBlobSidecar*(
|
||||||
node: Eth2Node, subnet_id: BlobId, blob: deneb.BlobSidecar):
|
node: Eth2Node, subnet_id: BlobId, blob: deneb.BlobSidecar):
|
||||||
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
||||||
let
|
let
|
||||||
forkPrefix = node.forkDigestAtEpoch(node.getWallEpoch)
|
contextEpoch = blob.signed_block_header.message.slot.epoch
|
||||||
topic = getBlobSidecarTopic(forkPrefix, subnet_id)
|
topic = getBlobSidecarTopic(
|
||||||
|
node.forkDigestAtEpoch(contextEpoch), subnet_id)
|
||||||
node.broadcast(topic, blob)
|
node.broadcast(topic, blob)
|
||||||
|
|
||||||
proc broadcastSyncCommitteeMessage*(
|
proc broadcastSyncCommitteeMessage*(
|
||||||
node: Eth2Node, msg: SyncCommitteeMessage,
|
node: Eth2Node, msg: SyncCommitteeMessage,
|
||||||
subcommitteeIdx: SyncSubcommitteeIndex):
|
subcommitteeIdx: SyncSubcommitteeIndex):
|
||||||
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
||||||
let topic = getSyncCommitteeTopic(
|
let
|
||||||
node.forkDigestAtEpoch(node.getWallEpoch), subcommitteeIdx)
|
contextEpoch = msg.slot.epoch
|
||||||
|
topic = getSyncCommitteeTopic(
|
||||||
|
node.forkDigestAtEpoch(contextEpoch), subcommitteeIdx)
|
||||||
node.broadcast(topic, msg)
|
node.broadcast(topic, msg)
|
||||||
|
|
||||||
proc broadcastSignedContributionAndProof*(
|
proc broadcastSignedContributionAndProof*(
|
||||||
node: Eth2Node, msg: SignedContributionAndProof):
|
node: Eth2Node, msg: SignedContributionAndProof):
|
||||||
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
Future[SendResult] {.async: (raises: [CancelledError], raw: true).} =
|
||||||
let topic = getSyncCommitteeContributionAndProofTopic(
|
let
|
||||||
node.forkDigestAtEpoch(node.getWallEpoch))
|
contextEpoch = msg.message.contribution.slot.epoch
|
||||||
|
topic = getSyncCommitteeContributionAndProofTopic(
|
||||||
|
node.forkDigestAtEpoch(contextEpoch))
|
||||||
node.broadcast(topic, msg)
|
node.broadcast(topic, msg)
|
||||||
|
|
||||||
proc broadcastLightClientFinalityUpdate*(
|
proc broadcastLightClientFinalityUpdate*(
|
||||||
|
|
|
@ -293,15 +293,8 @@ proc initFullNode(
|
||||||
node.eventBus.propSlashQueue.emit(data)
|
node.eventBus.propSlashQueue.emit(data)
|
||||||
proc onAttesterSlashingAdded(data: phase0.AttesterSlashing) =
|
proc onAttesterSlashingAdded(data: phase0.AttesterSlashing) =
|
||||||
node.eventBus.attSlashQueue.emit(data)
|
node.eventBus.attSlashQueue.emit(data)
|
||||||
proc onBlobSidecarAdded(data: BlobSidecar) =
|
proc onBlobSidecarAdded(data: BlobSidecarInfoObject) =
|
||||||
node.eventBus.blobSidecarQueue.emit(
|
node.eventBus.blobSidecarQueue.emit(data)
|
||||||
BlobSidecarInfoObject(
|
|
||||||
block_root: hash_tree_root(data.signed_block_header.message),
|
|
||||||
index: data.index,
|
|
||||||
slot: data.signed_block_header.message.slot,
|
|
||||||
kzg_commitment: data.kzg_commitment,
|
|
||||||
versioned_hash:
|
|
||||||
data.kzg_commitment.kzg_commitment_to_versioned_hash.to0xHex))
|
|
||||||
proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) =
|
proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) =
|
||||||
let optimistic =
|
let optimistic =
|
||||||
if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH:
|
if node.currentSlot().epoch() >= dag.cfg.BELLATRIX_FORK_EPOCH:
|
||||||
|
@ -392,7 +385,7 @@ proc initFullNode(
|
||||||
quarantine = newClone(
|
quarantine = newClone(
|
||||||
Quarantine.init())
|
Quarantine.init())
|
||||||
attestationPool = newClone(AttestationPool.init(
|
attestationPool = newClone(AttestationPool.init(
|
||||||
dag, quarantine, config.forkChoiceVersion.get, onAttestationReceived))
|
dag, quarantine, onAttestationReceived))
|
||||||
syncCommitteeMsgPool = newClone(
|
syncCommitteeMsgPool = newClone(
|
||||||
SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution))
|
SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution))
|
||||||
lightClientPool = newClone(
|
lightClientPool = newClone(
|
||||||
|
@ -574,7 +567,9 @@ proc init*(T: type BeaconNode,
|
||||||
config: BeaconNodeConf,
|
config: BeaconNodeConf,
|
||||||
metadata: Eth2NetworkMetadata): Future[BeaconNode]
|
metadata: Eth2NetworkMetadata): Future[BeaconNode]
|
||||||
{.async.} =
|
{.async.} =
|
||||||
var taskpool: TaskPoolPtr
|
var
|
||||||
|
taskpool: TaskPoolPtr
|
||||||
|
genesisState: ref ForkedHashedBeaconState = nil
|
||||||
|
|
||||||
template cfg: auto = metadata.cfg
|
template cfg: auto = metadata.cfg
|
||||||
template eth1Network: auto = metadata.eth1Network
|
template eth1Network: auto = metadata.eth1Network
|
||||||
|
@ -582,10 +577,10 @@ proc init*(T: type BeaconNode,
|
||||||
if not(isDir(config.databaseDir)):
|
if not(isDir(config.databaseDir)):
|
||||||
# If database directory missing, we going to use genesis state to check
|
# If database directory missing, we going to use genesis state to check
|
||||||
# for weak_subjectivity_period.
|
# for weak_subjectivity_period.
|
||||||
|
genesisState =
|
||||||
|
await fetchGenesisState(
|
||||||
|
metadata, config.genesisState, config.genesisStateUrl)
|
||||||
let
|
let
|
||||||
genesisState =
|
|
||||||
await fetchGenesisState(
|
|
||||||
metadata, config.genesisState, config.genesisStateUrl)
|
|
||||||
genesisTime = getStateField(genesisState[], genesis_time)
|
genesisTime = getStateField(genesisState[], genesis_time)
|
||||||
beaconClock = BeaconClock.init(genesisTime).valueOr:
|
beaconClock = BeaconClock.init(genesisTime).valueOr:
|
||||||
fatal "Invalid genesis time in genesis state", genesisTime
|
fatal "Invalid genesis time in genesis state", genesisTime
|
||||||
|
@ -640,15 +635,15 @@ proc init*(T: type BeaconNode,
|
||||||
db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false)
|
db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false)
|
||||||
|
|
||||||
if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr:
|
if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr:
|
||||||
var genesisState: ref ForkedHashedBeaconState
|
|
||||||
let trustedBlockRoot =
|
let trustedBlockRoot =
|
||||||
if config.trustedStateRoot.isSome or config.trustedBlockRoot.isSome:
|
if config.trustedStateRoot.isSome or config.trustedBlockRoot.isSome:
|
||||||
config.trustedBlockRoot
|
config.trustedBlockRoot
|
||||||
elif cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH:
|
elif cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH:
|
||||||
# Sync can be bootstrapped from the genesis block root
|
# Sync can be bootstrapped from the genesis block root
|
||||||
genesisState = await fetchGenesisState(
|
if genesisState.isNil:
|
||||||
metadata, config.genesisState, config.genesisStateUrl)
|
genesisState = await fetchGenesisState(
|
||||||
if genesisState != nil:
|
metadata, config.genesisState, config.genesisStateUrl)
|
||||||
|
if not genesisState.isNil:
|
||||||
let genesisBlockRoot = get_initial_beacon_block(genesisState[]).root
|
let genesisBlockRoot = get_initial_beacon_block(genesisState[]).root
|
||||||
notice "Neither `--trusted-block-root` nor `--trusted-state-root` " &
|
notice "Neither `--trusted-block-root` nor `--trusted-state-root` " &
|
||||||
"provided with `--external-beacon-api-url`, " &
|
"provided with `--external-beacon-api-url`, " &
|
||||||
|
@ -669,7 +664,7 @@ proc init*(T: type BeaconNode,
|
||||||
trustedBlockRoot = config.trustedBlockRoot,
|
trustedBlockRoot = config.trustedBlockRoot,
|
||||||
trustedStateRoot = config.trustedStateRoot
|
trustedStateRoot = config.trustedStateRoot
|
||||||
else:
|
else:
|
||||||
if genesisState == nil:
|
if genesisState.isNil:
|
||||||
genesisState = await fetchGenesisState(
|
genesisState = await fetchGenesisState(
|
||||||
metadata, config.genesisState, config.genesisStateUrl)
|
metadata, config.genesisState, config.genesisStateUrl)
|
||||||
await db.doRunTrustedNodeSync(
|
await db.doRunTrustedNodeSync(
|
||||||
|
@ -735,15 +730,18 @@ proc init*(T: type BeaconNode,
|
||||||
var networkGenesisValidatorsRoot = metadata.bakedGenesisValidatorsRoot
|
var networkGenesisValidatorsRoot = metadata.bakedGenesisValidatorsRoot
|
||||||
|
|
||||||
if not ChainDAGRef.isInitialized(db).isOk():
|
if not ChainDAGRef.isInitialized(db).isOk():
|
||||||
let genesisState =
|
genesisState =
|
||||||
if checkpointState != nil and
|
if not checkpointState.isNil and
|
||||||
getStateField(checkpointState[], slot) == 0:
|
getStateField(checkpointState[], slot) == 0:
|
||||||
checkpointState
|
checkpointState
|
||||||
else:
|
else:
|
||||||
await fetchGenesisState(
|
if genesisState.isNil:
|
||||||
metadata, config.genesisState, config.genesisStateUrl)
|
await fetchGenesisState(
|
||||||
|
metadata, config.genesisState, config.genesisStateUrl)
|
||||||
|
else:
|
||||||
|
genesisState
|
||||||
|
|
||||||
if genesisState == nil and checkpointState == nil:
|
if genesisState.isNil and checkpointState.isNil:
|
||||||
fatal "No database and no genesis snapshot found. Please supply a genesis.ssz " &
|
fatal "No database and no genesis snapshot found. Please supply a genesis.ssz " &
|
||||||
"with the network configuration"
|
"with the network configuration"
|
||||||
quit 1
|
quit 1
|
||||||
|
@ -1246,8 +1244,6 @@ proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) =
|
||||||
for validator in node.attachedValidators[]:
|
for validator in node.attachedValidators[]:
|
||||||
validator.doppelgangerChecked(epoch - 1)
|
validator.doppelgangerChecked(epoch - 1)
|
||||||
|
|
||||||
from ./spec/state_transition_epoch import effective_balance_might_update
|
|
||||||
|
|
||||||
proc maybeUpdateActionTrackerNextEpoch(
|
proc maybeUpdateActionTrackerNextEpoch(
|
||||||
node: BeaconNode, forkyState: ForkyHashedBeaconState, nextEpoch: Epoch) =
|
node: BeaconNode, forkyState: ForkyHashedBeaconState, nextEpoch: Epoch) =
|
||||||
if node.consensusManager[].actionTracker.needsUpdate(
|
if node.consensusManager[].actionTracker.needsUpdate(
|
||||||
|
@ -1793,7 +1789,7 @@ proc installMessageValidators(node: BeaconNode) =
|
||||||
let digest = forkDigests[].atConsensusFork(consensusFork)
|
let digest = forkDigests[].atConsensusFork(consensusFork)
|
||||||
|
|
||||||
# beacon_block
|
# beacon_block
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#beacon_block
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#beacon_block
|
||||||
node.network.addValidator(
|
node.network.addValidator(
|
||||||
getBeaconBlocksTopic(digest), proc (
|
getBeaconBlocksTopic(digest), proc (
|
||||||
signedBlock: consensusFork.SignedBeaconBlock
|
signedBlock: consensusFork.SignedBeaconBlock
|
||||||
|
@ -1910,7 +1906,7 @@ proc installMessageValidators(node: BeaconNode) =
|
||||||
MsgSource.gossip, msg)))
|
MsgSource.gossip, msg)))
|
||||||
|
|
||||||
when consensusFork >= ConsensusFork.Capella:
|
when consensusFork >= ConsensusFork.Capella:
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#bls_to_execution_change
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/p2p-interface.md#bls_to_execution_change
|
||||||
node.network.addAsyncValidator(
|
node.network.addAsyncValidator(
|
||||||
getBlsToExecutionChangeTopic(digest), proc (
|
getBlsToExecutionChangeTopic(digest), proc (
|
||||||
msg: SignedBLSToExecutionChange
|
msg: SignedBLSToExecutionChange
|
||||||
|
@ -2253,8 +2249,6 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai
|
||||||
# works
|
# works
|
||||||
for node in metadata.bootstrapNodes:
|
for node in metadata.bootstrapNodes:
|
||||||
config.bootstrapNodes.add node
|
config.bootstrapNodes.add node
|
||||||
if config.forkChoiceVersion.isNone:
|
|
||||||
config.forkChoiceVersion = some(ForkChoiceVersion.Pr3431)
|
|
||||||
|
|
||||||
## Ctrl+C handling
|
## Ctrl+C handling
|
||||||
proc controlCHandler() {.noconv.} =
|
proc controlCHandler() {.noconv.} =
|
||||||
|
|
|
@ -43,6 +43,8 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||||
Base10.toString(MIN_DEPOSIT_AMOUNT),
|
Base10.toString(MIN_DEPOSIT_AMOUNT),
|
||||||
MAX_EFFECTIVE_BALANCE:
|
MAX_EFFECTIVE_BALANCE:
|
||||||
Base10.toString(MAX_EFFECTIVE_BALANCE),
|
Base10.toString(MAX_EFFECTIVE_BALANCE),
|
||||||
|
MAX_EFFECTIVE_BALANCE_ELECTRA:
|
||||||
|
Base10.toString(static(MAX_EFFECTIVE_BALANCE_ELECTRA.uint64)),
|
||||||
EFFECTIVE_BALANCE_INCREMENT:
|
EFFECTIVE_BALANCE_INCREMENT:
|
||||||
Base10.toString(EFFECTIVE_BALANCE_INCREMENT),
|
Base10.toString(EFFECTIVE_BALANCE_INCREMENT),
|
||||||
MIN_ATTESTATION_INCLUSION_DELAY:
|
MIN_ATTESTATION_INCLUSION_DELAY:
|
||||||
|
@ -90,7 +92,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||||
MAX_VOLUNTARY_EXITS:
|
MAX_VOLUNTARY_EXITS:
|
||||||
Base10.toString(MAX_VOLUNTARY_EXITS),
|
Base10.toString(MAX_VOLUNTARY_EXITS),
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/altair.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/altair.yaml
|
||||||
INACTIVITY_PENALTY_QUOTIENT_ALTAIR:
|
INACTIVITY_PENALTY_QUOTIENT_ALTAIR:
|
||||||
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR),
|
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR),
|
||||||
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR:
|
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR:
|
||||||
|
@ -106,7 +108,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||||
UPDATE_TIMEOUT:
|
UPDATE_TIMEOUT:
|
||||||
Base10.toString(UPDATE_TIMEOUT),
|
Base10.toString(UPDATE_TIMEOUT),
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/bellatrix.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/bellatrix.yaml
|
||||||
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX:
|
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX:
|
||||||
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX),
|
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX),
|
||||||
MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX:
|
MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX:
|
||||||
|
@ -122,7 +124,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||||
MAX_EXTRA_DATA_BYTES:
|
MAX_EXTRA_DATA_BYTES:
|
||||||
Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)),
|
Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)),
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/capella.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/capella.yaml
|
||||||
MAX_BLS_TO_EXECUTION_CHANGES:
|
MAX_BLS_TO_EXECUTION_CHANGES:
|
||||||
Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)),
|
Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)),
|
||||||
MAX_WITHDRAWALS_PER_PAYLOAD:
|
MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||||
|
@ -175,6 +177,10 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||||
"0x" & $cfg.DENEB_FORK_VERSION,
|
"0x" & $cfg.DENEB_FORK_VERSION,
|
||||||
DENEB_FORK_EPOCH:
|
DENEB_FORK_EPOCH:
|
||||||
Base10.toString(uint64(cfg.DENEB_FORK_EPOCH)),
|
Base10.toString(uint64(cfg.DENEB_FORK_EPOCH)),
|
||||||
|
ELECTRA_FORK_VERSION:
|
||||||
|
"0x" & $cfg.ELECTRA_FORK_VERSION,
|
||||||
|
ELECTRA_FORK_EPOCH:
|
||||||
|
Base10.toString(uint64(cfg.ELECTRA_FORK_EPOCH)),
|
||||||
SECONDS_PER_SLOT:
|
SECONDS_PER_SLOT:
|
||||||
Base10.toString(SECONDS_PER_SLOT),
|
Base10.toString(SECONDS_PER_SLOT),
|
||||||
SECONDS_PER_ETH1_BLOCK:
|
SECONDS_PER_ETH1_BLOCK:
|
||||||
|
|
|
@ -90,8 +90,7 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||||
var response = GetForkChoiceResponse(
|
var response = GetForkChoiceResponse(
|
||||||
justified_checkpoint: forkChoice.checkpoints.justified.checkpoint,
|
justified_checkpoint: forkChoice.checkpoints.justified.checkpoint,
|
||||||
finalized_checkpoint: forkChoice.checkpoints.finalized,
|
finalized_checkpoint: forkChoice.checkpoints.finalized,
|
||||||
extra_data: RestExtraData(
|
extra_data: RestExtraData())
|
||||||
version: some($forkChoice.backend.proto_array.version)))
|
|
||||||
|
|
||||||
for item in forkChoice.backend.proto_array:
|
for item in forkChoice.backend.proto_array:
|
||||||
let
|
let
|
||||||
|
|
|
@ -43,7 +43,7 @@ const
|
||||||
GENESIS_SLOT* = Slot(0)
|
GENESIS_SLOT* = Slot(0)
|
||||||
GENESIS_EPOCH* = Epoch(0) # compute_epoch_at_slot(GENESIS_SLOT)
|
GENESIS_EPOCH* = Epoch(0) # compute_epoch_at_slot(GENESIS_SLOT)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#constant
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/fork-choice.md#constant
|
||||||
INTERVALS_PER_SLOT* = 3
|
INTERVALS_PER_SLOT* = 3
|
||||||
|
|
||||||
FAR_FUTURE_BEACON_TIME* = BeaconTime(ns_since_genesis: int64.high())
|
FAR_FUTURE_BEACON_TIME* = BeaconTime(ns_since_genesis: int64.high())
|
||||||
|
@ -139,16 +139,16 @@ const
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate
|
||||||
aggregateSlotOffset* = TimeDiff(nanoseconds:
|
aggregateSlotOffset* = TimeDiff(nanoseconds:
|
||||||
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
|
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#prepare-sync-committee-message
|
||||||
syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds:
|
syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds:
|
||||||
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
|
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#broadcast-sync-committee-contribution
|
||||||
syncContributionSlotOffset* = TimeDiff(nanoseconds:
|
syncContributionSlotOffset* = TimeDiff(nanoseconds:
|
||||||
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
|
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#sync-committee
|
||||||
lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds:
|
lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds:
|
||||||
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
|
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#sync-committee
|
||||||
lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds:
|
lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds:
|
||||||
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
|
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ func epoch*(slot: Slot): Epoch = # aka compute_epoch_at_slot
|
||||||
if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH
|
if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH
|
||||||
else: Epoch(slot div SLOTS_PER_EPOCH)
|
else: Epoch(slot div SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#compute_slots_since_epoch_start
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/fork-choice.md#compute_slots_since_epoch_start
|
||||||
func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start
|
func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start
|
||||||
## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`)
|
## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`)
|
||||||
(slot mod SLOTS_PER_EPOCH)
|
(slot mod SLOTS_PER_EPOCH)
|
||||||
|
@ -196,7 +196,7 @@ func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_st
|
||||||
template is_epoch*(slot: Slot): bool =
|
template is_epoch*(slot: Slot): bool =
|
||||||
slot.since_epoch_start == 0
|
slot.since_epoch_start == 0
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
|
||||||
func start_slot*(epoch: Epoch): Slot = # aka compute_start_slot_at_epoch
|
func start_slot*(epoch: Epoch): Slot = # aka compute_start_slot_at_epoch
|
||||||
## Return the start slot of ``epoch``.
|
## Return the start slot of ``epoch``.
|
||||||
const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH)
|
const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH)
|
||||||
|
@ -216,7 +216,7 @@ iterator slots*(epoch: Epoch): Slot =
|
||||||
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
|
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
|
||||||
yield slot
|
yield slot
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#sync-committee
|
||||||
template sync_committee_period*(epoch: Epoch): SyncCommitteePeriod =
|
template sync_committee_period*(epoch: Epoch): SyncCommitteePeriod =
|
||||||
if epoch == FAR_FUTURE_EPOCH: FAR_FUTURE_PERIOD
|
if epoch == FAR_FUTURE_EPOCH: FAR_FUTURE_PERIOD
|
||||||
else: SyncCommitteePeriod(epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
else: SyncCommitteePeriod(epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
|
|
@ -86,7 +86,7 @@ func compute_activation_exit_epoch*(epoch: Epoch): Epoch =
|
||||||
## ``epoch`` take effect.
|
## ``epoch`` take effect.
|
||||||
epoch + 1 + MAX_SEED_LOOKAHEAD
|
epoch + 1 + MAX_SEED_LOOKAHEAD
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_validator_churn_limit
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_validator_churn_limit
|
||||||
func get_validator_churn_limit*(
|
func get_validator_churn_limit*(
|
||||||
cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache):
|
cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache):
|
||||||
uint64 =
|
uint64 =
|
||||||
|
@ -96,7 +96,7 @@ func get_validator_churn_limit*(
|
||||||
count_active_validators(
|
count_active_validators(
|
||||||
state, state.get_current_epoch(), cache) div cfg.CHURN_LIMIT_QUOTIENT)
|
state, state.get_current_epoch(), cache) div cfg.CHURN_LIMIT_QUOTIENT)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#new-get_validator_activation_churn_limit
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/beacon-chain.md#new-get_validator_activation_churn_limit
|
||||||
func get_validator_activation_churn_limit*(
|
func get_validator_activation_churn_limit*(
|
||||||
cfg: RuntimeConfig, state: deneb.BeaconState | electra.BeaconState,
|
cfg: RuntimeConfig, state: deneb.BeaconState | electra.BeaconState,
|
||||||
cache: var StateCache): uint64 =
|
cache: var StateCache): uint64 =
|
||||||
|
@ -270,7 +270,7 @@ func compute_consolidation_epoch_and_update_churn*(
|
||||||
|
|
||||||
state.earliest_consolidation_epoch
|
state.earliest_consolidation_epoch
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated--initiate_validator_exit
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#modified-initiate_validator_exit
|
||||||
func initiate_validator_exit*(
|
func initiate_validator_exit*(
|
||||||
cfg: RuntimeConfig, state: var electra.BeaconState,
|
cfg: RuntimeConfig, state: var electra.BeaconState,
|
||||||
index: ValidatorIndex, exit_queue_info: ExitQueueInfo,
|
index: ValidatorIndex, exit_queue_info: ExitQueueInfo,
|
||||||
|
@ -301,7 +301,7 @@ from ./datatypes/deneb import BeaconState
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-slash_validator
|
||||||
func get_slashing_penalty*(
|
func get_slashing_penalty*(
|
||||||
state: ForkyBeaconState, validator_effective_balance: Gwei): Gwei =
|
state: ForkyBeaconState, validator_effective_balance: Gwei): Gwei =
|
||||||
|
@ -319,21 +319,21 @@ func get_slashing_penalty*(
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
||||||
func get_whistleblower_reward*(
|
func get_whistleblower_reward*(
|
||||||
state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState |
|
state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState |
|
||||||
capella.BeaconState | deneb.BeaconState,
|
capella.BeaconState | deneb.BeaconState,
|
||||||
validator_effective_balance: Gwei): Gwei =
|
validator_effective_balance: Gwei): Gwei =
|
||||||
validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT
|
validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#modified-slash_validator
|
||||||
func get_whistleblower_reward*(
|
func get_whistleblower_reward*(
|
||||||
state: electra.BeaconState, validator_effective_balance: Gwei): Gwei =
|
state: electra.BeaconState, validator_effective_balance: Gwei): Gwei =
|
||||||
validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA
|
validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
||||||
func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): Gwei =
|
func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): Gwei =
|
||||||
when state is phase0.BeaconState:
|
when state is phase0.BeaconState:
|
||||||
whistleblower_reward div PROPOSER_REWARD_QUOTIENT
|
whistleblower_reward div PROPOSER_REWARD_QUOTIENT
|
||||||
|
@ -346,7 +346,7 @@ func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): G
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
||||||
proc slash_validator*(
|
proc slash_validator*(
|
||||||
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
||||||
slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo,
|
slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo,
|
||||||
|
@ -407,7 +407,7 @@ func get_initial_beacon_block*(state: phase0.HashedBeaconState):
|
||||||
phase0.TrustedSignedBeaconBlock(
|
phase0.TrustedSignedBeaconBlock(
|
||||||
message: message, root: hash_tree_root(message))
|
message: message, root: hash_tree_root(message))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#initialize-state-for-pure-altair-testnets-and-test-vectors
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#initialize-state-for-pure-altair-testnets-and-test-vectors
|
||||||
func get_initial_beacon_block*(state: altair.HashedBeaconState):
|
func get_initial_beacon_block*(state: altair.HashedBeaconState):
|
||||||
altair.TrustedSignedBeaconBlock =
|
altair.TrustedSignedBeaconBlock =
|
||||||
# The genesis block is implicitly trusted
|
# The genesis block is implicitly trusted
|
||||||
|
@ -419,7 +419,7 @@ func get_initial_beacon_block*(state: altair.HashedBeaconState):
|
||||||
altair.TrustedSignedBeaconBlock(
|
altair.TrustedSignedBeaconBlock(
|
||||||
message: message, root: hash_tree_root(message))
|
message: message, root: hash_tree_root(message))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#testing
|
||||||
func get_initial_beacon_block*(state: bellatrix.HashedBeaconState):
|
func get_initial_beacon_block*(state: bellatrix.HashedBeaconState):
|
||||||
bellatrix.TrustedSignedBeaconBlock =
|
bellatrix.TrustedSignedBeaconBlock =
|
||||||
# The genesis block is implicitly trusted
|
# The genesis block is implicitly trusted
|
||||||
|
@ -624,7 +624,7 @@ func get_attesting_indices*(
|
||||||
|
|
||||||
toSeq(get_attesting_indices_iter(state, data, aggregation_bits, cache))
|
toSeq(get_attesting_indices_iter(state, data, aggregation_bits, cache))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_attesting_indices
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_attesting_indices
|
||||||
func get_attesting_indices*(
|
func get_attesting_indices*(
|
||||||
state: ForkyBeaconState, data: AttestationData,
|
state: ForkyBeaconState, data: AttestationData,
|
||||||
aggregation_bits: ElectraCommitteeValidatorsBits, committee_bits: auto,
|
aggregation_bits: ElectraCommitteeValidatorsBits, committee_bits: auto,
|
||||||
|
@ -770,7 +770,7 @@ func check_attestation_index(
|
||||||
Result[CommitteeIndex, cstring] =
|
Result[CommitteeIndex, cstring] =
|
||||||
check_attestation_index(data.index, committees_per_slot)
|
check_attestation_index(data.index, committees_per_slot)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices
|
||||||
func get_attestation_participation_flag_indices(
|
func get_attestation_participation_flag_indices(
|
||||||
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState,
|
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState,
|
||||||
data: AttestationData, inclusion_delay: uint64): set[TimelyFlag] =
|
data: AttestationData, inclusion_delay: uint64): set[TimelyFlag] =
|
||||||
|
@ -1128,7 +1128,7 @@ proc process_attestation*(
|
||||||
|
|
||||||
ok(proposer_reward)
|
ok(proposer_reward)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee_indices
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#get_next_sync_committee_indices
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-get_next_sync_committee_indices
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-get_next_sync_committee_indices
|
||||||
func get_next_sync_committee_keys(
|
func get_next_sync_committee_keys(
|
||||||
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
|
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
|
||||||
|
@ -1173,7 +1173,7 @@ func get_next_sync_committee_keys(
|
||||||
i += 1'u64
|
i += 1'u64
|
||||||
res
|
res
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential
|
||||||
func has_eth1_withdrawal_credential*(validator: Validator): bool =
|
func has_eth1_withdrawal_credential*(validator: Validator): bool =
|
||||||
## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential.
|
## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential.
|
||||||
validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||||
|
@ -1195,7 +1195,7 @@ func has_execution_withdrawal_credential*(validator: Validator): bool =
|
||||||
has_compounding_withdrawal_credential(validator) or
|
has_compounding_withdrawal_credential(validator) or
|
||||||
has_eth1_withdrawal_credential(validator)
|
has_eth1_withdrawal_credential(validator)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#is_fully_withdrawable_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/beacon-chain.md#is_fully_withdrawable_validator
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_fully_withdrawable_validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_fully_withdrawable_validator
|
||||||
func is_fully_withdrawable_validator(
|
func is_fully_withdrawable_validator(
|
||||||
fork: static ConsensusFork, validator: Validator, balance: Gwei,
|
fork: static ConsensusFork, validator: Validator, balance: Gwei,
|
||||||
|
@ -1277,21 +1277,60 @@ func get_pending_balance_to_withdraw*(
|
||||||
|
|
||||||
pending_balance
|
pending_balance
|
||||||
|
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#effective-balances-updates
|
||||||
|
template effective_balance_might_update*(
|
||||||
|
balance: Gwei, effective_balance: Gwei): bool =
|
||||||
|
const
|
||||||
|
HYSTERESIS_INCREMENT =
|
||||||
|
EFFECTIVE_BALANCE_INCREMENT.Gwei div HYSTERESIS_QUOTIENT
|
||||||
|
DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
|
||||||
|
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
|
||||||
|
balance + DOWNWARD_THRESHOLD < effective_balance or
|
||||||
|
effective_balance + UPWARD_THRESHOLD < balance
|
||||||
|
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#effective-balances-updates
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#updated-process_effective_balance_updates
|
||||||
|
template get_effective_balance_update*(
|
||||||
|
consensusFork: static ConsensusFork, balance: Gwei,
|
||||||
|
effective_balance: Gwei, vidx: uint64): Gwei =
|
||||||
|
when consensusFork <= ConsensusFork.Deneb:
|
||||||
|
min(
|
||||||
|
balance - balance mod EFFECTIVE_BALANCE_INCREMENT.Gwei,
|
||||||
|
MAX_EFFECTIVE_BALANCE.Gwei)
|
||||||
|
else:
|
||||||
|
debugComment "amortize validator read access"
|
||||||
|
let effective_balance_limit =
|
||||||
|
if has_compounding_withdrawal_credential(state.validators.item(vidx)):
|
||||||
|
MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei
|
||||||
|
else:
|
||||||
|
MIN_ACTIVATION_BALANCE.Gwei
|
||||||
|
min(
|
||||||
|
balance - balance mod EFFECTIVE_BALANCE_INCREMENT.Gwei,
|
||||||
|
effective_balance_limit)
|
||||||
|
|
||||||
|
template get_updated_effective_balance*(
|
||||||
|
consensusFork: static ConsensusFork, balance: Gwei,
|
||||||
|
effective_balance: Gwei, vidx: uint64): Gwei =
|
||||||
|
if effective_balance_might_update(balance, effective_balance):
|
||||||
|
get_effective_balance_update(consensusFork, balance, effective_balance, vidx)
|
||||||
|
else:
|
||||||
|
balance
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#new-get_expected_withdrawals
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#new-get_expected_withdrawals
|
||||||
func get_expected_withdrawals*(
|
template get_expected_withdrawals_aux*(
|
||||||
state: capella.BeaconState | deneb.BeaconState): seq[Withdrawal] =
|
state: capella.BeaconState | deneb.BeaconState, epoch: Epoch,
|
||||||
|
fetch_balance: untyped): seq[Withdrawal] =
|
||||||
let
|
let
|
||||||
epoch = get_current_epoch(state)
|
|
||||||
num_validators = lenu64(state.validators)
|
num_validators = lenu64(state.validators)
|
||||||
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||||
var
|
var
|
||||||
withdrawal_index = state.next_withdrawal_index
|
withdrawal_index = state.next_withdrawal_index
|
||||||
validator_index = state.next_withdrawal_validator_index
|
validator_index {.inject.} = state.next_withdrawal_validator_index
|
||||||
withdrawals: seq[Withdrawal] = @[]
|
withdrawals: seq[Withdrawal] = @[]
|
||||||
for _ in 0 ..< bound:
|
for _ in 0 ..< bound:
|
||||||
let
|
let
|
||||||
validator = state.validators[validator_index]
|
validator = state.validators[validator_index]
|
||||||
balance = state.balances[validator_index]
|
balance = fetch_balance
|
||||||
if is_fully_withdrawable_validator(
|
if is_fully_withdrawable_validator(
|
||||||
typeof(state).kind, validator, balance, epoch):
|
typeof(state).kind, validator, balance, epoch):
|
||||||
var w = Withdrawal(
|
var w = Withdrawal(
|
||||||
|
@ -1315,13 +1354,20 @@ func get_expected_withdrawals*(
|
||||||
validator_index = (validator_index + 1) mod num_validators
|
validator_index = (validator_index + 1) mod num_validators
|
||||||
withdrawals
|
withdrawals
|
||||||
|
|
||||||
|
func get_expected_withdrawals*(
|
||||||
|
state: capella.BeaconState | deneb.BeaconState): seq[Withdrawal] =
|
||||||
|
get_expected_withdrawals_aux(state, get_current_epoch(state)) do:
|
||||||
|
state.balances[validator_index]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-get_expected_withdrawals
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-get_expected_withdrawals
|
||||||
# This partials count is used in exactly one place, while in general being able
|
# This partials count is used in exactly one place, while in general being able
|
||||||
# to cleanly treat the results of get_expected_withdrawals as a seq[Withdrawal]
|
# to cleanly treat the results of get_expected_withdrawals as a seq[Withdrawal]
|
||||||
# are valuable enough to make that the default version of this spec function.
|
# are valuable enough to make that the default version of this spec function.
|
||||||
func get_expected_withdrawals_with_partial_count*(state: electra.BeaconState):
|
template get_expected_withdrawals_with_partial_count_aux*(
|
||||||
|
state: electra.BeaconState, epoch: Epoch, fetch_balance: untyped):
|
||||||
(seq[Withdrawal], uint64) =
|
(seq[Withdrawal], uint64) =
|
||||||
let epoch = get_current_epoch(state)
|
doAssert epoch - get_current_epoch(state) in [0'u64, 1'u64]
|
||||||
|
|
||||||
var
|
var
|
||||||
withdrawal_index = state.next_withdrawal_index
|
withdrawal_index = state.next_withdrawal_index
|
||||||
withdrawals: seq[Withdrawal] = @[]
|
withdrawals: seq[Withdrawal] = @[]
|
||||||
|
@ -1333,16 +1379,31 @@ func get_expected_withdrawals_with_partial_count*(state: electra.BeaconState):
|
||||||
break
|
break
|
||||||
|
|
||||||
let
|
let
|
||||||
validator = state.validators[withdrawal.index]
|
validator = state.validators.item(withdrawal.index)
|
||||||
|
|
||||||
|
# Keep a uniform variable name available for injected code
|
||||||
|
validator_index {.inject.} = withdrawal.index
|
||||||
|
|
||||||
|
# Here, can't use the pre-stored effective balance because this template
|
||||||
|
# might be called on the next slot and therefore next epoch, after which
|
||||||
|
# the effective balance might have updated.
|
||||||
|
effective_balance_at_slot =
|
||||||
|
if epoch == get_current_epoch(state):
|
||||||
|
validator.effective_balance
|
||||||
|
else:
|
||||||
|
get_updated_effective_balance(
|
||||||
|
typeof(state).kind, fetch_balance, validator.effective_balance,
|
||||||
|
validator_index)
|
||||||
|
|
||||||
has_sufficient_effective_balance =
|
has_sufficient_effective_balance =
|
||||||
validator.effective_balance >= static(MIN_ACTIVATION_BALANCE.Gwei)
|
effective_balance_at_slot >= static(MIN_ACTIVATION_BALANCE.Gwei)
|
||||||
has_excess_balance =
|
has_excess_balance = fetch_balance > static(MIN_ACTIVATION_BALANCE.Gwei)
|
||||||
state.balances[withdrawal.index] > static(MIN_ACTIVATION_BALANCE.Gwei)
|
|
||||||
if validator.exit_epoch == FAR_FUTURE_EPOCH and
|
if validator.exit_epoch == FAR_FUTURE_EPOCH and
|
||||||
has_sufficient_effective_balance and has_excess_balance:
|
has_sufficient_effective_balance and has_excess_balance:
|
||||||
let withdrawable_balance = min(
|
let
|
||||||
state.balances[withdrawal.index] - static(MIN_ACTIVATION_BALANCE.Gwei),
|
withdrawable_balance = min(
|
||||||
withdrawal.amount)
|
fetch_balance - static(MIN_ACTIVATION_BALANCE.Gwei),
|
||||||
|
withdrawal.amount)
|
||||||
var w = Withdrawal(
|
var w = Withdrawal(
|
||||||
index: withdrawal_index,
|
index: withdrawal_index,
|
||||||
validator_index: withdrawal.index,
|
validator_index: withdrawal.index,
|
||||||
|
@ -1356,13 +1417,13 @@ func get_expected_withdrawals_with_partial_count*(state: electra.BeaconState):
|
||||||
let
|
let
|
||||||
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||||
num_validators = lenu64(state.validators)
|
num_validators = lenu64(state.validators)
|
||||||
var validator_index = state.next_withdrawal_validator_index
|
var validator_index {.inject.} = state.next_withdrawal_validator_index
|
||||||
|
|
||||||
# Sweep for remaining.
|
# Sweep for remaining.
|
||||||
for _ in 0 ..< bound:
|
for _ in 0 ..< bound:
|
||||||
let
|
let
|
||||||
validator = state.validators[validator_index]
|
validator = state.validators.item(validator_index)
|
||||||
balance = state.balances[validator_index]
|
balance = fetch_balance
|
||||||
if is_fully_withdrawable_validator(
|
if is_fully_withdrawable_validator(
|
||||||
typeof(state).kind, validator, balance, epoch):
|
typeof(state).kind, validator, balance, epoch):
|
||||||
var w = Withdrawal(
|
var w = Withdrawal(
|
||||||
|
@ -1388,6 +1449,12 @@ func get_expected_withdrawals_with_partial_count*(state: electra.BeaconState):
|
||||||
|
|
||||||
(withdrawals, partial_withdrawals_count)
|
(withdrawals, partial_withdrawals_count)
|
||||||
|
|
||||||
|
template get_expected_withdrawals_with_partial_count*(
|
||||||
|
state: electra.BeaconState): (seq[Withdrawal], uint64) =
|
||||||
|
get_expected_withdrawals_with_partial_count_aux(
|
||||||
|
state, get_current_epoch(state)) do:
|
||||||
|
state.balances.item(validator_index)
|
||||||
|
|
||||||
func get_expected_withdrawals*(state: electra.BeaconState): seq[Withdrawal] =
|
func get_expected_withdrawals*(state: electra.BeaconState): seq[Withdrawal] =
|
||||||
get_expected_withdrawals_with_partial_count(state)[0]
|
get_expected_withdrawals_with_partial_count(state)[0]
|
||||||
|
|
||||||
|
@ -1513,7 +1580,7 @@ proc initialize_hashed_beacon_state_from_eth1*(
|
||||||
cfg, eth1_block_hash, eth1_timestamp, deposits, flags))
|
cfg, eth1_block_hash, eth1_timestamp, deposits, flags))
|
||||||
result.root = hash_tree_root(result.data)
|
result.root = hash_tree_root(result.data)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#testing
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#testing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#testing
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#testing
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#testing
|
||||||
proc initialize_beacon_state_from_eth1*(
|
proc initialize_beacon_state_from_eth1*(
|
||||||
|
@ -1866,7 +1933,7 @@ func upgrade_to_capella*(cfg: RuntimeConfig, pre: bellatrix.BeaconState):
|
||||||
# historical_summaries initialized to correct default automatically
|
# historical_summaries initialized to correct default automatically
|
||||||
)
|
)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/fork.md#upgrading-the-state
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/fork.md#upgrading-the-state
|
||||||
func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState):
|
func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState):
|
||||||
ref deneb.BeaconState =
|
ref deneb.BeaconState =
|
||||||
let
|
let
|
||||||
|
@ -1951,7 +2018,7 @@ func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState):
|
||||||
historical_summaries: pre.historical_summaries
|
historical_summaries: pre.historical_summaries
|
||||||
)
|
)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/fork.md#upgrading-the-state
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/fork.md#upgrading-the-state
|
||||||
func upgrade_to_electra*(
|
func upgrade_to_electra*(
|
||||||
cfg: RuntimeConfig, pre: deneb.BeaconState, cache: var StateCache):
|
cfg: RuntimeConfig, pre: deneb.BeaconState, cache: var StateCache):
|
||||||
ref electra.BeaconState =
|
ref electra.BeaconState =
|
||||||
|
|
|
@ -245,14 +245,14 @@ proc blsVerify*(
|
||||||
# Guard against invalid signature blobs that fail to parse
|
# Guard against invalid signature blobs that fail to parse
|
||||||
parsedSig.isSome() and blsVerify(pubkey, message, parsedSig.get())
|
parsedSig.isSome() and blsVerify(pubkey, message, parsedSig.get())
|
||||||
|
|
||||||
func blsVerify*(sigSet: SignatureSet): bool =
|
func blsVerify*(sigset: SignatureSet): bool =
|
||||||
## Unbatched verification
|
## Unbatched verification
|
||||||
## of 1 SignatureSet
|
## of 1 SignatureSet
|
||||||
## tuple[pubkey: blscurve.PublicKey, message: array[32, byte], blscurve.signature: Signature]
|
## tuple[pubkey: blscurve.PublicKey, message: array[32, byte], blscurve.signature: Signature]
|
||||||
verify(
|
verify(
|
||||||
sigSet.pubkey,
|
sigset.pubkey,
|
||||||
sigSet.message,
|
sigset.message,
|
||||||
sigSet.signature
|
sigset.signature
|
||||||
)
|
)
|
||||||
|
|
||||||
func blsSign*(privkey: ValidatorPrivKey, message: openArray[byte]): CookedSig =
|
func blsSign*(privkey: ValidatorPrivKey, message: openArray[byte]): CookedSig =
|
||||||
|
|
|
@ -51,7 +51,7 @@ const
|
||||||
PARTICIPATION_FLAG_WEIGHTS*: array[TimelyFlag, uint64] =
|
PARTICIPATION_FLAG_WEIGHTS*: array[TimelyFlag, uint64] =
|
||||||
[uint64 TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT]
|
[uint64 TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#misc
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#misc
|
||||||
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16
|
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16
|
||||||
SYNC_COMMITTEE_SUBNET_COUNT* = 4
|
SYNC_COMMITTEE_SUBNET_COUNT* = 4
|
||||||
|
|
||||||
|
@ -61,9 +61,12 @@ const
|
||||||
# If there are ever more than 32 members in `BeaconState`, indices change!
|
# If there are ever more than 32 members in `BeaconState`, indices change!
|
||||||
# `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `52 * 2 + 1`.
|
# `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `52 * 2 + 1`.
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
|
||||||
FINALIZED_ROOT_GINDEX* = 105.GeneralizedIndex # finalized_checkpoint > root
|
# finalized_checkpoint > root
|
||||||
CURRENT_SYNC_COMMITTEE_GINDEX* = 54.GeneralizedIndex # current_sync_committee
|
FINALIZED_ROOT_GINDEX* = 105.GeneralizedIndex
|
||||||
NEXT_SYNC_COMMITTEE_GINDEX* = 55.GeneralizedIndex # next_sync_committee
|
# current_sync_committee
|
||||||
|
CURRENT_SYNC_COMMITTEE_GINDEX* = 54.GeneralizedIndex
|
||||||
|
# next_sync_committee
|
||||||
|
NEXT_SYNC_COMMITTEE_GINDEX* = 55.GeneralizedIndex
|
||||||
|
|
||||||
SYNC_SUBCOMMITTEE_SIZE* = SYNC_COMMITTEE_SIZE div SYNC_COMMITTEE_SUBNET_COUNT
|
SYNC_SUBCOMMITTEE_SIZE* = SYNC_COMMITTEE_SIZE div SYNC_COMMITTEE_SUBNET_COUNT
|
||||||
|
|
||||||
|
@ -98,7 +101,7 @@ type
|
||||||
pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey]
|
pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey]
|
||||||
aggregate_pubkey*: ValidatorPubKey
|
aggregate_pubkey*: ValidatorPubKey
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#synccommitteemessage
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#synccommitteemessage
|
||||||
SyncCommitteeMessage* = object
|
SyncCommitteeMessage* = object
|
||||||
slot*: Slot
|
slot*: Slot
|
||||||
## Slot to which this contribution pertains
|
## Slot to which this contribution pertains
|
||||||
|
@ -112,7 +115,7 @@ type
|
||||||
signature*: ValidatorSig
|
signature*: ValidatorSig
|
||||||
## Signature by the validator over the block root of `slot`
|
## Signature by the validator over the block root of `slot`
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#synccommitteecontribution
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#synccommitteecontribution
|
||||||
SyncCommitteeAggregationBits* =
|
SyncCommitteeAggregationBits* =
|
||||||
BitArray[SYNC_SUBCOMMITTEE_SIZE]
|
BitArray[SYNC_SUBCOMMITTEE_SIZE]
|
||||||
|
|
||||||
|
@ -134,18 +137,18 @@ type
|
||||||
signature*: ValidatorSig
|
signature*: ValidatorSig
|
||||||
## Signature by the validator(s) over the block root of `slot`
|
## Signature by the validator(s) over the block root of `slot`
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#contributionandproof
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#contributionandproof
|
||||||
ContributionAndProof* = object
|
ContributionAndProof* = object
|
||||||
aggregator_index*: uint64 # `ValidatorIndex` after validation
|
aggregator_index*: uint64 # `ValidatorIndex` after validation
|
||||||
contribution*: SyncCommitteeContribution
|
contribution*: SyncCommitteeContribution
|
||||||
selection_proof*: ValidatorSig
|
selection_proof*: ValidatorSig
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#signedcontributionandproof
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#signedcontributionandproof
|
||||||
SignedContributionAndProof* = object
|
SignedContributionAndProof* = object
|
||||||
message*: ContributionAndProof
|
message*: ContributionAndProof
|
||||||
signature*: ValidatorSig
|
signature*: ValidatorSig
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#syncaggregatorselectiondata
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#syncaggregatorselectiondata
|
||||||
SyncAggregatorSelectionData* = object
|
SyncAggregatorSelectionData* = object
|
||||||
slot*: Slot
|
slot*: Slot
|
||||||
subcommittee_index*: uint64 # `SyncSubcommitteeIndex` after validation
|
subcommittee_index*: uint64 # `SyncSubcommitteeIndex` after validation
|
||||||
|
|
|
@ -74,7 +74,7 @@ export
|
||||||
tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto,
|
tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto,
|
||||||
digest, presets
|
digest, presets
|
||||||
|
|
||||||
const SPEC_VERSION* = "1.5.0-alpha.3"
|
const SPEC_VERSION* = "1.5.0-alpha.5"
|
||||||
## Spec version we're aiming to be compatible with, right now
|
## Spec version we're aiming to be compatible with, right now
|
||||||
|
|
||||||
const
|
const
|
||||||
|
@ -304,7 +304,7 @@ type
|
||||||
HashedValidatorPubKey* = object
|
HashedValidatorPubKey* = object
|
||||||
value*: ptr HashedValidatorPubKeyItem
|
value*: ptr HashedValidatorPubKeyItem
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#validator
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#validator
|
||||||
Validator* = object
|
Validator* = object
|
||||||
pubkeyData*{.serializedFieldName: "pubkey".}: HashedValidatorPubKey
|
pubkeyData*{.serializedFieldName: "pubkey".}: HashedValidatorPubKey
|
||||||
|
|
||||||
|
@ -326,7 +326,7 @@ type
|
||||||
withdrawable_epoch*: Epoch
|
withdrawable_epoch*: Epoch
|
||||||
## When validator can withdraw funds
|
## When validator can withdraw funds
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#pendingattestation
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#pendingattestation
|
||||||
PendingAttestation* = object
|
PendingAttestation* = object
|
||||||
aggregation_bits*: CommitteeValidatorsBits
|
aggregation_bits*: CommitteeValidatorsBits
|
||||||
data*: AttestationData
|
data*: AttestationData
|
||||||
|
@ -335,7 +335,7 @@ type
|
||||||
|
|
||||||
proposer_index*: uint64 # `ValidatorIndex` after validation
|
proposer_index*: uint64 # `ValidatorIndex` after validation
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#historicalbatch
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#historicalbatch
|
||||||
HistoricalBatch* = object
|
HistoricalBatch* = object
|
||||||
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
|
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
|
||||||
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
|
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
|
||||||
|
@ -363,7 +363,7 @@ type
|
||||||
message*: VoluntaryExit
|
message*: VoluntaryExit
|
||||||
signature*: TrustedSig
|
signature*: TrustedSig
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#beaconblockheader
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#beaconblockheader
|
||||||
BeaconBlockHeader* = object
|
BeaconBlockHeader* = object
|
||||||
slot*: Slot
|
slot*: Slot
|
||||||
proposer_index*: uint64 # `ValidatorIndex` after validation
|
proposer_index*: uint64 # `ValidatorIndex` after validation
|
||||||
|
@ -371,7 +371,7 @@ type
|
||||||
state_root*: Eth2Digest
|
state_root*: Eth2Digest
|
||||||
body_root*: Eth2Digest
|
body_root*: Eth2Digest
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#signingdata
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#signingdata
|
||||||
SigningData* = object
|
SigningData* = object
|
||||||
object_root*: Eth2Digest
|
object_root*: Eth2Digest
|
||||||
domain*: Eth2Domain
|
domain*: Eth2Domain
|
||||||
|
@ -400,7 +400,7 @@ type
|
||||||
sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache]
|
sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache]
|
||||||
|
|
||||||
# This matches the mutable state of the Solidity deposit contract
|
# This matches the mutable state of the Solidity deposit contract
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/solidity_deposit_contract/deposit_contract.sol
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/solidity_deposit_contract/deposit_contract.sol
|
||||||
DepositContractState* = object
|
DepositContractState* = object
|
||||||
branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest]
|
branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest]
|
||||||
deposit_count*: array[32, byte] # Uint256
|
deposit_count*: array[32, byte] # Uint256
|
||||||
|
|
|
@ -35,7 +35,7 @@ const
|
||||||
NEWPAYLOAD_TIMEOUT* = 8.seconds
|
NEWPAYLOAD_TIMEOUT* = 8.seconds
|
||||||
|
|
||||||
type
|
type
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#custom-types
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#custom-types
|
||||||
Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION]
|
Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION]
|
||||||
|
|
||||||
ExecutionAddress* = object
|
ExecutionAddress* = object
|
||||||
|
@ -44,7 +44,7 @@ type
|
||||||
BloomLogs* = object
|
BloomLogs* = object
|
||||||
data*: array[BYTES_PER_LOGS_BLOOM, byte]
|
data*: array[BYTES_PER_LOGS_BLOOM, byte]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#executionpayload
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#executionpayload
|
||||||
ExecutionPayload* = object
|
ExecutionPayload* = object
|
||||||
# Execution block header fields
|
# Execution block header fields
|
||||||
parent_hash*: Eth2Digest
|
parent_hash*: Eth2Digest
|
||||||
|
@ -72,7 +72,7 @@ type
|
||||||
executionPayload*: ExecutionPayload
|
executionPayload*: ExecutionPayload
|
||||||
blockValue*: Wei
|
blockValue*: Wei
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#executionpayloadheader
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#executionpayloadheader
|
||||||
ExecutionPayloadHeader* = object
|
ExecutionPayloadHeader* = object
|
||||||
# Execution block header fields
|
# Execution block header fields
|
||||||
parent_hash*: Eth2Digest
|
parent_hash*: Eth2Digest
|
||||||
|
@ -102,7 +102,7 @@ type
|
||||||
parent_hash*: Eth2Digest
|
parent_hash*: Eth2Digest
|
||||||
total_difficulty*: Eth2Digest # uint256
|
total_difficulty*: Eth2Digest # uint256
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#beaconstate
|
||||||
BeaconState* = object
|
BeaconState* = object
|
||||||
# Versioning
|
# Versioning
|
||||||
genesis_time*: uint64
|
genesis_time*: uint64
|
||||||
|
@ -227,7 +227,7 @@ type
|
||||||
state_root*: Eth2Digest
|
state_root*: Eth2Digest
|
||||||
body*: TrustedBeaconBlockBody
|
body*: TrustedBeaconBlockBody
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconblockbody
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#beaconblockbody
|
||||||
BeaconBlockBody* = object
|
BeaconBlockBody* = object
|
||||||
randao_reveal*: ValidatorSig
|
randao_reveal*: ValidatorSig
|
||||||
eth1_data*: Eth1Data
|
eth1_data*: Eth1Data
|
||||||
|
|
|
@ -32,8 +32,9 @@ const
|
||||||
# This index is rooted in `BeaconBlockBody`.
|
# This index is rooted in `BeaconBlockBody`.
|
||||||
# The first member (`randao_reveal`) is 16, subsequent members +1 each.
|
# The first member (`randao_reveal`) is 16, subsequent members +1 each.
|
||||||
# If there are ever more than 16 members in `BeaconBlockBody`, indices change!
|
# If there are ever more than 16 members in `BeaconBlockBody`, indices change!
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/ssz/merkle-proofs.md
|
||||||
EXECUTION_PAYLOAD_GINDEX* = 25.GeneralizedIndex # execution_payload
|
# execution_payload
|
||||||
|
EXECUTION_PAYLOAD_GINDEX* = 25.GeneralizedIndex
|
||||||
|
|
||||||
type
|
type
|
||||||
SignedBLSToExecutionChangeList* =
|
SignedBLSToExecutionChangeList* =
|
||||||
|
@ -123,7 +124,7 @@ type
|
||||||
ExecutionBranch* =
|
ExecutionBranch* =
|
||||||
array[log2trunc(EXECUTION_PAYLOAD_GINDEX), Eth2Digest]
|
array[log2trunc(EXECUTION_PAYLOAD_GINDEX), Eth2Digest]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
|
||||||
LightClientHeader* = object
|
LightClientHeader* = object
|
||||||
beacon*: BeaconBlockHeader
|
beacon*: BeaconBlockHeader
|
||||||
## Beacon block header
|
## Beacon block header
|
||||||
|
@ -357,7 +358,7 @@ type
|
||||||
state_root*: Eth2Digest
|
state_root*: Eth2Digest
|
||||||
body*: TrustedBeaconBlockBody
|
body*: TrustedBeaconBlockBody
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#beaconblockbody
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/beacon-chain.md#beaconblockbody
|
||||||
BeaconBlockBody* = object
|
BeaconBlockBody* = object
|
||||||
randao_reveal*: ValidatorSig
|
randao_reveal*: ValidatorSig
|
||||||
eth1_data*: Eth1Data
|
eth1_data*: Eth1Data
|
||||||
|
@ -698,7 +699,7 @@ func upgrade_lc_bootstrap_to_capella*(
|
||||||
current_sync_committee: pre.current_sync_committee,
|
current_sync_committee: pre.current_sync_committee,
|
||||||
current_sync_committee_branch: pre.current_sync_committee_branch)
|
current_sync_committee_branch: pre.current_sync_committee_branch)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/light-client/fork.md#upgrading-light-client-data
|
||||||
func upgrade_lc_update_to_capella*(
|
func upgrade_lc_update_to_capella*(
|
||||||
pre: altair.LightClientUpdate): LightClientUpdate =
|
pre: altair.LightClientUpdate): LightClientUpdate =
|
||||||
LightClientUpdate(
|
LightClientUpdate(
|
||||||
|
|
|
@ -87,5 +87,5 @@ const
|
||||||
UNSET_DEPOSIT_REQUESTS_START_INDEX*: uint64 = not 0'u64
|
UNSET_DEPOSIT_REQUESTS_START_INDEX*: uint64 = not 0'u64
|
||||||
FULL_EXIT_REQUEST_AMOUNT*: uint64 = 0
|
FULL_EXIT_REQUEST_AMOUNT*: uint64 = 0
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#withdrawal-prefixes
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#withdrawal-prefixes
|
||||||
COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02
|
COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02
|
||||||
|
|
|
@ -76,7 +76,7 @@ type
|
||||||
kzg_commitment*: KzgCommitment
|
kzg_commitment*: KzgCommitment
|
||||||
versioned_hash*: string # TODO should be string; VersionedHash not distinct
|
versioned_hash*: string # TODO should be string; VersionedHash not distinct
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/p2p-interface.md#blobidentifier
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/p2p-interface.md#blobidentifier
|
||||||
BlobIdentifier* = object
|
BlobIdentifier* = object
|
||||||
block_root*: Eth2Digest
|
block_root*: Eth2Digest
|
||||||
index*: BlobIndex
|
index*: BlobIndex
|
||||||
|
@ -167,7 +167,7 @@ type
|
||||||
## Current sync committee corresponding to `header.beacon.state_root`
|
## Current sync committee corresponding to `header.beacon.state_root`
|
||||||
current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch
|
current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientupdate
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#lightclientupdate
|
||||||
LightClientUpdate* = object
|
LightClientUpdate* = object
|
||||||
attested_header*: LightClientHeader
|
attested_header*: LightClientHeader
|
||||||
## Header attested to by the sync committee
|
## Header attested to by the sync committee
|
||||||
|
@ -466,7 +466,7 @@ type
|
||||||
bls_to_execution_changes*: SignedBLSToExecutionChangeList
|
bls_to_execution_changes*: SignedBLSToExecutionChangeList
|
||||||
blob_kzg_commitments*: KzgCommitments # [New in Deneb]
|
blob_kzg_commitments*: KzgCommitments # [New in Deneb]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#signedbeaconblock
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#signedbeaconblock
|
||||||
SignedBeaconBlock* = object
|
SignedBeaconBlock* = object
|
||||||
message*: BeaconBlock
|
message*: BeaconBlock
|
||||||
signature*: ValidatorSig
|
signature*: ValidatorSig
|
||||||
|
@ -626,7 +626,7 @@ func kzg_commitment_inclusion_proof_gindex*(
|
||||||
|
|
||||||
BLOB_KZG_COMMITMENTS_FIRST_GINDEX + index
|
BLOB_KZG_COMMITMENTS_FIRST_GINDEX + index
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root
|
||||||
func get_lc_execution_root*(
|
func get_lc_execution_root*(
|
||||||
header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest =
|
header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest =
|
||||||
let epoch = header.beacon.slot.epoch
|
let epoch = header.beacon.slot.epoch
|
||||||
|
@ -657,7 +657,7 @@ func get_lc_execution_root*(
|
||||||
|
|
||||||
ZERO_HASH
|
ZERO_HASH
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header
|
||||||
func is_valid_light_client_header*(
|
func is_valid_light_client_header*(
|
||||||
header: LightClientHeader, cfg: RuntimeConfig): bool =
|
header: LightClientHeader, cfg: RuntimeConfig): bool =
|
||||||
let epoch = header.beacon.slot.epoch
|
let epoch = header.beacon.slot.epoch
|
||||||
|
|
|
@ -45,9 +45,12 @@ const
|
||||||
# If there are ever more than 64 members in `BeaconState`, indices change!
|
# If there are ever more than 64 members in `BeaconState`, indices change!
|
||||||
# `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `84 * 2 + 1`.
|
# `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `84 * 2 + 1`.
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md
|
||||||
FINALIZED_ROOT_GINDEX* = 169.GeneralizedIndex # finalized_checkpoint > root
|
# finalized_checkpoint > root
|
||||||
CURRENT_SYNC_COMMITTEE_GINDEX* = 86.GeneralizedIndex # current_sync_committee
|
FINALIZED_ROOT_GINDEX_ELECTRA* = 169.GeneralizedIndex
|
||||||
NEXT_SYNC_COMMITTEE_GINDEX* = 87.GeneralizedIndex # next_sync_committee
|
# current_sync_committee
|
||||||
|
CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA* = 86.GeneralizedIndex
|
||||||
|
# next_sync_committee
|
||||||
|
NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA* = 87.GeneralizedIndex
|
||||||
|
|
||||||
type
|
type
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#depositrequest
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#depositrequest
|
||||||
|
@ -155,7 +158,7 @@ type
|
||||||
ExecutePayload* = proc(
|
ExecutePayload* = proc(
|
||||||
execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].}
|
execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].}
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#depositreceipt
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#pendingbalancedeposit
|
||||||
PendingBalanceDeposit* = object
|
PendingBalanceDeposit* = object
|
||||||
index*: uint64
|
index*: uint64
|
||||||
amount*: Gwei
|
amount*: Gwei
|
||||||
|
@ -183,7 +186,7 @@ type
|
||||||
source_pubkey*: ValidatorPubKey
|
source_pubkey*: ValidatorPubKey
|
||||||
target_pubkey*: ValidatorPubKey
|
target_pubkey*: ValidatorPubKey
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#aggregateandproof
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#aggregateandproof
|
||||||
AggregateAndProof* = object
|
AggregateAndProof* = object
|
||||||
aggregator_index*: uint64 # `ValidatorIndex` after validation
|
aggregator_index*: uint64 # `ValidatorIndex` after validation
|
||||||
aggregate*: Attestation
|
aggregate*: Attestation
|
||||||
|
@ -195,13 +198,13 @@ type
|
||||||
signature*: ValidatorSig
|
signature*: ValidatorSig
|
||||||
|
|
||||||
FinalityBranch* =
|
FinalityBranch* =
|
||||||
array[log2trunc(FINALIZED_ROOT_GINDEX), Eth2Digest]
|
array[log2trunc(FINALIZED_ROOT_GINDEX_ELECTRA), Eth2Digest]
|
||||||
|
|
||||||
CurrentSyncCommitteeBranch* =
|
CurrentSyncCommitteeBranch* =
|
||||||
array[log2trunc(CURRENT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
|
array[log2trunc(CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA), Eth2Digest]
|
||||||
|
|
||||||
NextSyncCommitteeBranch* =
|
NextSyncCommitteeBranch* =
|
||||||
array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
|
array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA), Eth2Digest]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
|
||||||
LightClientHeader* = object
|
LightClientHeader* = object
|
||||||
|
@ -240,7 +243,7 @@ type
|
||||||
signature_slot*: Slot
|
signature_slot*: Slot
|
||||||
## Slot at which the aggregate signature was created (untrusted)
|
## Slot at which the aggregate signature was created (untrusted)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
|
||||||
LightClientFinalityUpdate* = object
|
LightClientFinalityUpdate* = object
|
||||||
# Header attested to by the sync committee
|
# Header attested to by the sync committee
|
||||||
attested_header*: LightClientHeader
|
attested_header*: LightClientHeader
|
||||||
|
@ -396,7 +399,7 @@ type
|
||||||
data*: BeaconState
|
data*: BeaconState
|
||||||
root*: Eth2Digest # hash_tree_root(data)
|
root*: Eth2Digest # hash_tree_root(data)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#beaconblock
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#beaconblock
|
||||||
BeaconBlock* = object
|
BeaconBlock* = object
|
||||||
## For each slot, a proposer is chosen from the validator pool to propose
|
## For each slot, a proposer is chosen from the validator pool to propose
|
||||||
## a new block. Once the block as been proposed, it is transmitted to
|
## a new block. Once the block as been proposed, it is transmitted to
|
||||||
|
@ -795,7 +798,7 @@ func upgrade_lc_header_to_electra*(
|
||||||
transactions_root: pre.execution.transactions_root,
|
transactions_root: pre.execution.transactions_root,
|
||||||
withdrawals_root: pre.execution.withdrawals_root,
|
withdrawals_root: pre.execution.withdrawals_root,
|
||||||
blob_gas_used: pre.execution.blob_gas_used,
|
blob_gas_used: pre.execution.blob_gas_used,
|
||||||
excess_blob_gas: pre.execution.blob_gas_used,
|
excess_blob_gas: pre.execution.excess_blob_gas,
|
||||||
deposit_requests_root: ZERO_HASH, # [New in Electra:EIP6110]
|
deposit_requests_root: ZERO_HASH, # [New in Electra:EIP6110]
|
||||||
withdrawal_requests_root: ZERO_HASH, # [New in Electra:EIP7002:EIP7251]
|
withdrawal_requests_root: ZERO_HASH, # [New in Electra:EIP7002:EIP7251]
|
||||||
consolidation_requests_root: ZERO_HASH), # [New in Electra:EIP7251]
|
consolidation_requests_root: ZERO_HASH), # [New in Electra:EIP7251]
|
||||||
|
@ -808,7 +811,7 @@ func upgrade_lc_bootstrap_to_electra*(
|
||||||
header: upgrade_lc_header_to_electra(pre.header),
|
header: upgrade_lc_header_to_electra(pre.header),
|
||||||
current_sync_committee: pre.current_sync_committee,
|
current_sync_committee: pre.current_sync_committee,
|
||||||
current_sync_committee_branch: normalize_merkle_branch(
|
current_sync_committee_branch: normalize_merkle_branch(
|
||||||
pre.current_sync_committee_branch, CURRENT_SYNC_COMMITTEE_GINDEX))
|
pre.current_sync_committee_branch, CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/light-client/fork.md#upgrading-light-client-data
|
||||||
func upgrade_lc_update_to_electra*(
|
func upgrade_lc_update_to_electra*(
|
||||||
|
@ -817,10 +820,10 @@ func upgrade_lc_update_to_electra*(
|
||||||
attested_header: upgrade_lc_header_to_electra(pre.attested_header),
|
attested_header: upgrade_lc_header_to_electra(pre.attested_header),
|
||||||
next_sync_committee: pre.next_sync_committee,
|
next_sync_committee: pre.next_sync_committee,
|
||||||
next_sync_committee_branch: normalize_merkle_branch(
|
next_sync_committee_branch: normalize_merkle_branch(
|
||||||
pre.next_sync_committee_branch, NEXT_SYNC_COMMITTEE_GINDEX),
|
pre.next_sync_committee_branch, NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA),
|
||||||
finalized_header: upgrade_lc_header_to_electra(pre.finalized_header),
|
finalized_header: upgrade_lc_header_to_electra(pre.finalized_header),
|
||||||
finality_branch: normalize_merkle_branch(
|
finality_branch: normalize_merkle_branch(
|
||||||
pre.finality_branch, FINALIZED_ROOT_GINDEX),
|
pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA),
|
||||||
sync_aggregate: pre.sync_aggregate,
|
sync_aggregate: pre.sync_aggregate,
|
||||||
signature_slot: pre.signature_slot)
|
signature_slot: pre.signature_slot)
|
||||||
|
|
||||||
|
@ -831,7 +834,7 @@ func upgrade_lc_finality_update_to_electra*(
|
||||||
attested_header: upgrade_lc_header_to_electra(pre.attested_header),
|
attested_header: upgrade_lc_header_to_electra(pre.attested_header),
|
||||||
finalized_header: upgrade_lc_header_to_electra(pre.finalized_header),
|
finalized_header: upgrade_lc_header_to_electra(pre.finalized_header),
|
||||||
finality_branch: normalize_merkle_branch(
|
finality_branch: normalize_merkle_branch(
|
||||||
pre.finality_branch, FINALIZED_ROOT_GINDEX),
|
pre.finality_branch, FINALIZED_ROOT_GINDEX_ELECTRA),
|
||||||
sync_aggregate: pre.sync_aggregate,
|
sync_aggregate: pre.sync_aggregate,
|
||||||
signature_slot: pre.signature_slot)
|
signature_slot: pre.signature_slot)
|
||||||
|
|
||||||
|
|
|
@ -601,7 +601,7 @@ type
|
||||||
extra_data*: Option[RestNodeExtraData]
|
extra_data*: Option[RestNodeExtraData]
|
||||||
|
|
||||||
RestExtraData* = object
|
RestExtraData* = object
|
||||||
version*: Option[string]
|
discard
|
||||||
|
|
||||||
GetForkChoiceResponse* = object
|
GetForkChoiceResponse* = object
|
||||||
justified_checkpoint*: Checkpoint
|
justified_checkpoint*: Checkpoint
|
||||||
|
|
|
@ -169,20 +169,6 @@ type
|
||||||
of LightClientDataFork.Electra:
|
of LightClientDataFork.Electra:
|
||||||
electraData*: electra.LightClientStore
|
electraData*: electra.LightClientStore
|
||||||
|
|
||||||
func lcDataForkAtEpoch*(
|
|
||||||
cfg: RuntimeConfig, epoch: Epoch): LightClientDataFork =
|
|
||||||
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
|
|
||||||
if epoch >= cfg.ELECTRA_FORK_EPOCH:
|
|
||||||
LightClientDataFork.Electra
|
|
||||||
elif epoch >= cfg.DENEB_FORK_EPOCH:
|
|
||||||
LightClientDataFork.Deneb
|
|
||||||
elif epoch >= cfg.CAPELLA_FORK_EPOCH:
|
|
||||||
LightClientDataFork.Capella
|
|
||||||
elif epoch >= cfg.ALTAIR_FORK_EPOCH:
|
|
||||||
LightClientDataFork.Altair
|
|
||||||
else:
|
|
||||||
LightClientDataFork.None
|
|
||||||
|
|
||||||
template kind*(
|
template kind*(
|
||||||
# `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
|
# `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
|
||||||
x: typedesc[
|
x: typedesc[
|
||||||
|
@ -227,12 +213,12 @@ template kind*(
|
||||||
electra.LightClientStore]): LightClientDataFork =
|
electra.LightClientStore]): LightClientDataFork =
|
||||||
LightClientDataFork.Electra
|
LightClientDataFork.Electra
|
||||||
|
|
||||||
template FINALIZED_ROOT_GINDEX*(
|
template finalized_root_gindex*(
|
||||||
kind: static LightClientDataFork): GeneralizedIndex =
|
kind: static LightClientDataFork): GeneralizedIndex =
|
||||||
when kind >= LightClientDataFork.Electra:
|
when kind >= LightClientDataFork.Electra:
|
||||||
electra.FINALIZED_ROOT_GINDEX
|
FINALIZED_ROOT_GINDEX_ELECTRA
|
||||||
elif kind >= LightClientDataFork.Altair:
|
elif kind >= LightClientDataFork.Altair:
|
||||||
altair.FINALIZED_ROOT_GINDEX
|
FINALIZED_ROOT_GINDEX
|
||||||
else:
|
else:
|
||||||
static: raiseAssert "Unreachable"
|
static: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
@ -244,12 +230,12 @@ template FinalityBranch*(kind: static LightClientDataFork): auto =
|
||||||
else:
|
else:
|
||||||
static: raiseAssert "Unreachable"
|
static: raiseAssert "Unreachable"
|
||||||
|
|
||||||
template CURRENT_SYNC_COMMITTEE_GINDEX*(
|
template current_sync_committee_gindex*(
|
||||||
kind: static LightClientDataFork): GeneralizedIndex =
|
kind: static LightClientDataFork): GeneralizedIndex =
|
||||||
when kind >= LightClientDataFork.Electra:
|
when kind >= LightClientDataFork.Electra:
|
||||||
electra.CURRENT_SYNC_COMMITTEE_GINDEX
|
CURRENT_SYNC_COMMITTEE_GINDEX_ELECTRA
|
||||||
elif kind >= LightClientDataFork.Altair:
|
elif kind >= LightClientDataFork.Altair:
|
||||||
altair.CURRENT_SYNC_COMMITTEE_GINDEX
|
CURRENT_SYNC_COMMITTEE_GINDEX
|
||||||
else:
|
else:
|
||||||
static: raiseAssert "Unreachable"
|
static: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
@ -261,12 +247,12 @@ template CurrentSyncCommitteeBranch*(kind: static LightClientDataFork): auto =
|
||||||
else:
|
else:
|
||||||
static: raiseAssert "Unreachable"
|
static: raiseAssert "Unreachable"
|
||||||
|
|
||||||
template NEXT_SYNC_COMMITTEE_GINDEX*(
|
template next_sync_committee_gindex*(
|
||||||
kind: static LightClientDataFork): GeneralizedIndex =
|
kind: static LightClientDataFork): GeneralizedIndex =
|
||||||
when kind >= LightClientDataFork.Electra:
|
when kind >= LightClientDataFork.Electra:
|
||||||
electra.NEXT_SYNC_COMMITTEE_GINDEX
|
NEXT_SYNC_COMMITTEE_GINDEX_ELECTRA
|
||||||
elif kind >= LightClientDataFork.Altair:
|
elif kind >= LightClientDataFork.Altair:
|
||||||
altair.NEXT_SYNC_COMMITTEE_GINDEX
|
NEXT_SYNC_COMMITTEE_GINDEX
|
||||||
else:
|
else:
|
||||||
static: raiseAssert "Unreachable"
|
static: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ import
|
||||||
export
|
export
|
||||||
eth2_merkleization, forks, rlp, ssz_codec
|
eth2_merkleization, forks, rlp, ssz_codec
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#constants
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#constants
|
||||||
const ETH_TO_GWEI = 1_000_000_000.Gwei
|
const ETH_TO_GWEI = 1_000_000_000.Gwei
|
||||||
|
|
||||||
func toEther*(gwei: Gwei): Ether =
|
func toEther*(gwei: Gwei): Ether =
|
||||||
|
@ -162,7 +162,7 @@ func compute_domain*(
|
||||||
result[0..3] = domain_type.data
|
result[0..3] = domain_type.data
|
||||||
result[4..31] = fork_data_root.data.toOpenArray(0, 27)
|
result[4..31] = fork_data_root.data.toOpenArray(0, 27)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_domain
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_domain
|
||||||
func get_domain*(
|
func get_domain*(
|
||||||
fork: Fork,
|
fork: Fork,
|
||||||
domain_type: DomainType,
|
domain_type: DomainType,
|
||||||
|
@ -258,7 +258,7 @@ func create_blob_sidecars*(
|
||||||
res.add(sidecar)
|
res.add(sidecar)
|
||||||
res
|
res
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_sync_committee_update
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#is_sync_committee_update
|
||||||
template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool =
|
template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool =
|
||||||
when update is SomeForkyLightClientUpdateWithSyncCommittee:
|
when update is SomeForkyLightClientUpdateWithSyncCommittee:
|
||||||
update.next_sync_committee_branch !=
|
update.next_sync_committee_branch !=
|
||||||
|
@ -387,7 +387,7 @@ func contextEpoch*(bootstrap: ForkyLightClientBootstrap): Epoch =
|
||||||
func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch =
|
func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch =
|
||||||
update.attested_header.beacon.slot.epoch
|
update.attested_header.beacon.slot.epoch
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
|
||||||
func is_merge_transition_complete*(
|
func is_merge_transition_complete*(
|
||||||
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
|
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
|
||||||
electra.BeaconState): bool =
|
electra.BeaconState): bool =
|
||||||
|
@ -395,7 +395,7 @@ func is_merge_transition_complete*(
|
||||||
default(typeof(state.latest_execution_payload_header))
|
default(typeof(state.latest_execution_payload_header))
|
||||||
state.latest_execution_payload_header != defaultExecutionPayloadHeader
|
state.latest_execution_payload_header != defaultExecutionPayloadHeader
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#helpers
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/sync/optimistic.md#helpers
|
||||||
func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
|
func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
|
||||||
when typeof(blck).kind >= ConsensusFork.Bellatrix:
|
when typeof(blck).kind >= ConsensusFork.Bellatrix:
|
||||||
const defaultExecutionPayload =
|
const defaultExecutionPayload =
|
||||||
|
@ -404,7 +404,7 @@ func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
|
||||||
else:
|
else:
|
||||||
false
|
false
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_merge_transition_block
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#is_merge_transition_block
|
||||||
func is_merge_transition_block(
|
func is_merge_transition_block(
|
||||||
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
|
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
|
||||||
electra.BeaconState,
|
electra.BeaconState,
|
||||||
|
@ -420,7 +420,7 @@ func is_merge_transition_block(
|
||||||
not is_merge_transition_complete(state) and
|
not is_merge_transition_complete(state) and
|
||||||
body.execution_payload != defaultExecutionPayload
|
body.execution_payload != defaultExecutionPayload
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_execution_enabled
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#is_execution_enabled
|
||||||
func is_execution_enabled*(
|
func is_execution_enabled*(
|
||||||
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
|
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
|
||||||
electra.BeaconState,
|
electra.BeaconState,
|
||||||
|
@ -434,7 +434,7 @@ func is_execution_enabled*(
|
||||||
electra.SigVerifiedBeaconBlockBody): bool =
|
electra.SigVerifiedBeaconBlockBody): bool =
|
||||||
is_merge_transition_block(state, body) or is_merge_transition_complete(state)
|
is_merge_transition_block(state, body) or is_merge_transition_complete(state)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot
|
||||||
func compute_timestamp_at_slot*(state: ForkyBeaconState, slot: Slot): uint64 =
|
func compute_timestamp_at_slot*(state: ForkyBeaconState, slot: Slot): uint64 =
|
||||||
# Note: This function is unsafe with respect to overflows and underflows.
|
# Note: This function is unsafe with respect to overflows and underflows.
|
||||||
let slots_since_genesis = slot - GENESIS_SLOT
|
let slots_since_genesis = slot - GENESIS_SLOT
|
||||||
|
@ -605,3 +605,26 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
|
||||||
|
|
||||||
proc compute_execution_block_hash*(blck: ForkyBeaconBlock): Eth2Digest =
|
proc compute_execution_block_hash*(blck: ForkyBeaconBlock): Eth2Digest =
|
||||||
rlpHash blockToBlockHeader(blck)
|
rlpHash blockToBlockHeader(blck)
|
||||||
|
|
||||||
|
from std/math import exp, ln
|
||||||
|
from std/sequtils import foldl
|
||||||
|
|
||||||
|
func ln_binomial(n, k: int): float64 =
|
||||||
|
if k > n:
|
||||||
|
low(float64)
|
||||||
|
else:
|
||||||
|
template ln_factorial(n: int): float64 =
|
||||||
|
(2 .. n).foldl(a + ln(b.float64), 0.0)
|
||||||
|
ln_factorial(n) - ln_factorial(k) - ln_factorial(n - k)
|
||||||
|
|
||||||
|
func hypergeom_cdf*(k: int, population: int, successes: int, draws: int):
|
||||||
|
float64 =
|
||||||
|
if k < draws + successes - population:
|
||||||
|
0.0
|
||||||
|
elif k >= min(successes, draws):
|
||||||
|
1.0
|
||||||
|
else:
|
||||||
|
let ln_denom = ln_binomial(population, draws)
|
||||||
|
(0 .. k).foldl(a + exp(
|
||||||
|
ln_binomial(successes, b) +
|
||||||
|
ln_binomial(population - successes, draws - b) - ln_denom), 0.0)
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import
|
||||||
|
std/typetraits,
|
||||||
|
eth/common/eth_types_rlp,
|
||||||
|
"."/[helpers, state_transition_block]
|
||||||
|
|
||||||
|
func readExecutionTransaction(
|
||||||
|
txBytes: bellatrix.Transaction): Result[ExecutionTransaction, string] =
|
||||||
|
# Nim 2.0.8: `rlp.decode(distinctBase(txBytes), ExecutionTransaction)`
|
||||||
|
# uses the generic `read` from `rlp.nim` instead of the specific `read`
|
||||||
|
# from `eth_types_rlp.nim`, leading to compilation error.
|
||||||
|
# Doing this in two steps works around this resolution order issue.
|
||||||
|
var rlp = rlpFromBytes(distinctBase(txBytes))
|
||||||
|
try:
|
||||||
|
ok rlp.read(ExecutionTransaction)
|
||||||
|
except RlpError as exc:
|
||||||
|
err("Invalid transaction: " & exc.msg)
|
||||||
|
|
||||||
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.4/specs/deneb/beacon-chain.md#is_valid_versioned_hashes
|
||||||
|
func is_valid_versioned_hashes*(blck: ForkyBeaconBlock): Result[void, string] =
|
||||||
|
static: doAssert typeof(blck).kind >= ConsensusFork.Deneb
|
||||||
|
template transactions: untyped = blck.body.execution_payload.transactions
|
||||||
|
template commitments: untyped = blck.body.blob_kzg_commitments
|
||||||
|
|
||||||
|
var i = 0
|
||||||
|
for txBytes in transactions:
|
||||||
|
if txBytes.len == 0 or txBytes[0] != TxEip4844.byte:
|
||||||
|
continue # Only blob transactions may have blobs
|
||||||
|
let tx = ? txBytes.readExecutionTransaction()
|
||||||
|
for vHash in tx.versionedHashes:
|
||||||
|
if commitments.len <= i:
|
||||||
|
return err("Extra blobs without matching `blob_kzg_commitments`")
|
||||||
|
if vHash.data != kzg_commitment_to_versioned_hash(commitments[i]):
|
||||||
|
return err("Invalid `blob_versioned_hash` at index " & $i)
|
||||||
|
inc i
|
||||||
|
if i != commitments.len:
|
||||||
|
return err("Extra `blob_kzg_commitments` without matching blobs")
|
||||||
|
ok()
|
|
@ -1380,13 +1380,13 @@ proc createWallet*(kdfKind: KdfKind,
|
||||||
crypto: crypto,
|
crypto: crypto,
|
||||||
nextAccount: nextAccount.get(0))
|
nextAccount: nextAccount.get(0))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#bls_withdrawal_prefix
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#bls_withdrawal_prefix
|
||||||
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
|
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
|
||||||
var bytes = eth2digest(k.toRaw())
|
var bytes = eth2digest(k.toRaw())
|
||||||
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
|
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
|
||||||
bytes
|
bytes
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/deposit-contract.md#withdrawal-credentials
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/deposit-contract.md#withdrawal-credentials
|
||||||
func makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest =
|
func makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest =
|
||||||
makeWithdrawalCredentials(k.toPubKey())
|
makeWithdrawalCredentials(k.toPubKey())
|
||||||
|
|
||||||
|
|
|
@ -50,7 +50,7 @@ func initialize_light_client_store*(
|
||||||
if not is_valid_normalized_merkle_branch(
|
if not is_valid_normalized_merkle_branch(
|
||||||
hash_tree_root(bootstrap.current_sync_committee),
|
hash_tree_root(bootstrap.current_sync_committee),
|
||||||
bootstrap.current_sync_committee_branch,
|
bootstrap.current_sync_committee_branch,
|
||||||
lcDataFork.CURRENT_SYNC_COMMITTEE_GINDEX,
|
lcDataFork.current_sync_committee_gindex,
|
||||||
bootstrap.header.beacon.state_root):
|
bootstrap.header.beacon.state_root):
|
||||||
return ResultType.err(VerifierError.Invalid)
|
return ResultType.err(VerifierError.Invalid)
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ proc validate_light_client_update*(
|
||||||
if not is_valid_normalized_merkle_branch(
|
if not is_valid_normalized_merkle_branch(
|
||||||
finalized_root,
|
finalized_root,
|
||||||
update.finality_branch,
|
update.finality_branch,
|
||||||
lcDataFork.FINALIZED_ROOT_GINDEX,
|
lcDataFork.finalized_root_gindex,
|
||||||
update.attested_header.beacon.state_root):
|
update.attested_header.beacon.state_root):
|
||||||
return err(VerifierError.Invalid)
|
return err(VerifierError.Invalid)
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ proc validate_light_client_update*(
|
||||||
if not is_valid_normalized_merkle_branch(
|
if not is_valid_normalized_merkle_branch(
|
||||||
hash_tree_root(update.next_sync_committee),
|
hash_tree_root(update.next_sync_committee),
|
||||||
update.next_sync_committee_branch,
|
update.next_sync_committee_branch,
|
||||||
lcDataFork.NEXT_SYNC_COMMITTEE_GINDEX,
|
lcDataFork.next_sync_committee_gindex,
|
||||||
update.attested_header.beacon.state_root):
|
update.attested_header.beacon.state_root):
|
||||||
return err(VerifierError.Invalid)
|
return err(VerifierError.Invalid)
|
||||||
|
|
||||||
|
|
|
@ -14,8 +14,8 @@ import
|
||||||
export base
|
export base
|
||||||
|
|
||||||
const
|
const
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#topics-and-messages
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#topics-and-messages
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/p2p-interface.md#topics-and-messages
|
||||||
topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy"
|
topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy"
|
||||||
topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy"
|
topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy"
|
||||||
topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy"
|
topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy"
|
||||||
|
@ -27,7 +27,7 @@ const
|
||||||
# The spec now includes this as a bare uint64 as `RESP_TIMEOUT`
|
# The spec now includes this as a bare uint64 as `RESP_TIMEOUT`
|
||||||
RESP_TIMEOUT_DUR* = RESP_TIMEOUT.int64.seconds
|
RESP_TIMEOUT_DUR* = RESP_TIMEOUT.int64.seconds
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#configuration
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#configuration
|
||||||
MAX_REQUEST_LIGHT_CLIENT_UPDATES* = 128
|
MAX_REQUEST_LIGHT_CLIENT_UPDATES* = 128
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#configuration
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#configuration
|
||||||
|
@ -63,7 +63,7 @@ func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string =
|
||||||
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
|
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
|
||||||
eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix
|
eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/p2p-interface.md#topics-and-messages
|
||||||
func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string =
|
func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string =
|
||||||
eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix
|
eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix
|
||||||
|
|
||||||
|
@ -197,7 +197,7 @@ func getTargetGossipState*(
|
||||||
targetForks
|
targetForks
|
||||||
|
|
||||||
func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] =
|
func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] =
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#sync-committee-subnet-stability
|
||||||
if epoch.is_sync_committee_period():
|
if epoch.is_sync_committee_period():
|
||||||
return Opt.some 0'u64
|
return Opt.some 0'u64
|
||||||
let epochsBefore =
|
let epochsBefore =
|
||||||
|
@ -216,7 +216,7 @@ func getSyncSubnets*(
|
||||||
if not nodeHasPubkey(pubkey):
|
if not nodeHasPubkey(pubkey):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-message
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#broadcast-sync-committee-message
|
||||||
# The first quarter of the pubkeys map to subnet 0, the second quarter to
|
# The first quarter of the pubkeys map to subnet 0, the second quarter to
|
||||||
# subnet 1, the third quarter to subnet 2 and the final quarter to subnet
|
# subnet 1, the third quarter to subnet 2 and the final quarter to subnet
|
||||||
# 3.
|
# 3.
|
||||||
|
|
|
@ -787,7 +787,7 @@ proc readRuntimeConfig*(
|
||||||
"MAX_REQUEST_BLOB_SIDECARS"
|
"MAX_REQUEST_BLOB_SIDECARS"
|
||||||
checkCompatibility BLOB_SIDECAR_SUBNET_COUNT
|
checkCompatibility BLOB_SIDECAR_SUBNET_COUNT
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#configuration
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/fork-choice.md#configuration
|
||||||
# Isn't being used as a preset in the usual way: at any time, there's one correct value
|
# Isn't being used as a preset in the usual way: at any time, there's one correct value
|
||||||
checkCompatibility PROPOSER_SCORE_BOOST
|
checkCompatibility PROPOSER_SCORE_BOOST
|
||||||
checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD
|
checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# Gnosis preset - Electra (Gnosis version not avilable yet; EF mainnet for now)
|
# Gnosis preset - Electra (Gnosis version not avilable yet; EF mainnet for now)
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/electra.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/electra.yaml
|
||||||
const
|
const
|
||||||
# Gwei values
|
# Gwei values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# Mainnet preset - Altair
|
# Mainnet preset - Altair
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/altair.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/altair.yaml
|
||||||
const
|
const
|
||||||
# Updated penalty values
|
# Updated penalty values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# Mainnet preset - Bellatrix
|
# Mainnet preset - Bellatrix
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/bellatrix.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/bellatrix.yaml
|
||||||
const
|
const
|
||||||
# Updated penalty values
|
# Updated penalty values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# Mainnet preset - Capella
|
# Mainnet preset - Capella
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/capella.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/capella.yaml
|
||||||
const
|
const
|
||||||
# Max operations per block
|
# Max operations per block
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# Electra preset - Electra
|
# Electra preset - Electra
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/electra.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/electra.yaml
|
||||||
const
|
const
|
||||||
# Gwei values
|
# Gwei values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# Minimal preset - Altair
|
# Minimal preset - Altair
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/altair.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/minimal/altair.yaml
|
||||||
const
|
const
|
||||||
# Updated penalty values
|
# Updated penalty values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# Minimal preset - Bellatrix
|
# Minimal preset - Bellatrix
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/bellatrix.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/minimal/bellatrix.yaml
|
||||||
const
|
const
|
||||||
# Updated penalty values
|
# Updated penalty values
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
# Minimal preset - Capella
|
# Minimal preset - Capella
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/capella.yaml
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/minimal/capella.yaml
|
||||||
const
|
const
|
||||||
# Max operations per block
|
# Max operations per block
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -143,7 +143,7 @@ func compute_attestation_signing_root*(
|
||||||
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
|
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
|
||||||
compute_signing_root(attestation_data, domain)
|
compute_signing_root(attestation_data, domain)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#aggregate-signature
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#aggregate-signature
|
||||||
func get_attestation_signature*(
|
func get_attestation_signature*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||||
attestation_data: AttestationData,
|
attestation_data: AttestationData,
|
||||||
|
@ -269,7 +269,7 @@ proc verify_voluntary_exit_signature*(
|
||||||
|
|
||||||
blsVerify(pubkey, signing_root.data, signature)
|
blsVerify(pubkey, signing_root.data, signature)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#prepare-sync-committee-message
|
||||||
func compute_sync_committee_message_signing_root*(
|
func compute_sync_committee_message_signing_root*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||||
slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest =
|
slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest =
|
||||||
|
@ -304,7 +304,7 @@ proc verify_sync_committee_signature*(
|
||||||
|
|
||||||
blsFastAggregateVerify(pubkeys, signing_root.data, signature)
|
blsFastAggregateVerify(pubkeys, signing_root.data, signature)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#aggregation-selection
|
||||||
func compute_sync_committee_selection_proof_signing_root*(
|
func compute_sync_committee_selection_proof_signing_root*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||||
slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest =
|
slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest =
|
||||||
|
@ -335,7 +335,7 @@ proc verify_sync_committee_selection_proof*(
|
||||||
|
|
||||||
blsVerify(pubkey, signing_root.data, signature)
|
blsVerify(pubkey, signing_root.data, signature)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#signature
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#signature
|
||||||
func compute_contribution_and_proof_signing_root*(
|
func compute_contribution_and_proof_signing_root*(
|
||||||
fork: Fork, genesis_validators_root: Eth2Digest,
|
fork: Fork, genesis_validators_root: Eth2Digest,
|
||||||
msg: ContributionAndProof): Eth2Digest =
|
msg: ContributionAndProof): Eth2Digest =
|
||||||
|
@ -353,7 +353,7 @@ proc get_contribution_and_proof_signature*(
|
||||||
|
|
||||||
blsSign(privkey, signing_root.data)
|
blsSign(privkey, signing_root.data)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#aggregation-selection
|
||||||
func is_sync_committee_aggregator*(signature: ValidatorSig): bool =
|
func is_sync_committee_aggregator*(signature: ValidatorSig): bool =
|
||||||
let
|
let
|
||||||
signatureDigest = eth2digest(signature.blob)
|
signatureDigest = eth2digest(signature.blob)
|
||||||
|
|
|
@ -83,7 +83,7 @@ func aggregateAttesters(
|
||||||
# Aggregation spec requires non-empty collection
|
# Aggregation spec requires non-empty collection
|
||||||
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
|
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
|
||||||
# Consensus specs require at least one attesting index in attestation
|
# Consensus specs require at least one attesting index in attestation
|
||||||
# - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
|
# - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
|
||||||
return err("aggregateAttesters: no attesting indices")
|
return err("aggregateAttesters: no attesting indices")
|
||||||
|
|
||||||
let
|
let
|
||||||
|
|
|
@ -365,7 +365,7 @@ func partialBeaconBlock*(
|
||||||
): auto =
|
): auto =
|
||||||
const consensusFork = typeof(state).kind
|
const consensusFork = typeof(state).kind
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#preparing-for-a-beaconblock
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#preparing-for-a-beaconblock
|
||||||
var res = consensusFork.BeaconBlock(
|
var res = consensusFork.BeaconBlock(
|
||||||
slot: state.data.slot,
|
slot: state.data.slot,
|
||||||
proposer_index: proposer_index.uint64,
|
proposer_index: proposer_index.uint64,
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
# State transition - block processing, as described in
|
# State transition - block processing, as described in
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#block-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#block-processing
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#block-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#block-processing
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#block-processing
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#block-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#block-processing
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#block-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#block-processing
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#block-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#block-processing
|
||||||
|
@ -82,7 +82,7 @@ func `xor`[T: array](a, b: T): T =
|
||||||
for i in 0..<result.len:
|
for i in 0..<result.len:
|
||||||
result[i] = a[i] xor b[i]
|
result[i] = a[i] xor b[i]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#randao
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#randao
|
||||||
proc process_randao(
|
proc process_randao(
|
||||||
state: var ForkyBeaconState, body: SomeForkyBeaconBlockBody,
|
state: var ForkyBeaconState, body: SomeForkyBeaconBlockBody,
|
||||||
flags: UpdateFlags, cache: var StateCache): Result[void, cstring] =
|
flags: UpdateFlags, cache: var StateCache): Result[void, cstring] =
|
||||||
|
@ -135,7 +135,7 @@ func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
|
||||||
(validator.activation_epoch <= epoch) and
|
(validator.activation_epoch <= epoch) and
|
||||||
(epoch < validator.withdrawable_epoch)
|
(epoch < validator.withdrawable_epoch)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#proposer-slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#proposer-slashings
|
||||||
proc check_proposer_slashing*(
|
proc check_proposer_slashing*(
|
||||||
state: ForkyBeaconState, proposer_slashing: SomeProposerSlashing,
|
state: ForkyBeaconState, proposer_slashing: SomeProposerSlashing,
|
||||||
flags: UpdateFlags):
|
flags: UpdateFlags):
|
||||||
|
@ -275,48 +275,20 @@ proc process_attester_slashing*(
|
||||||
|
|
||||||
ok((proposer_reward, cur_exit_queue_info))
|
ok((proposer_reward, cur_exit_queue_info))
|
||||||
|
|
||||||
func findValidatorIndex*(state: ForkyBeaconState, pubkey: ValidatorPubKey):
|
from ".."/validator_bucket_sort import
|
||||||
Opt[ValidatorIndex] =
|
BucketSortedValidators, add, findValidatorIndex, sortValidatorBuckets
|
||||||
# This linear scan is unfortunate, but should be fairly fast as we do a simple
|
|
||||||
# byte comparison of the key. The alternative would be to build a Table, but
|
|
||||||
# given that each block can hold no more than 16 deposits, it's slower to
|
|
||||||
# build the table and use it for lookups than to scan it like this.
|
|
||||||
# Once we have a reusable, long-lived cache, this should be revisited
|
|
||||||
#
|
|
||||||
# For deposit processing purposes, two broad cases exist, either
|
|
||||||
#
|
|
||||||
# (a) someone has deposited all 32 required ETH as a single transaction,
|
|
||||||
# in which case the index doesn't yet exist so the search order does
|
|
||||||
# not matter so long as it's generally in an order memory controller
|
|
||||||
# prefetching can predict; or
|
|
||||||
#
|
|
||||||
# (b) the deposit has been split into multiple parts, typically not far
|
|
||||||
# apart from each other, such that on average one would expect this
|
|
||||||
# validator index to be nearer the maximal than minimal index.
|
|
||||||
#
|
|
||||||
# countdown() infinite-loops if the lower bound with uint32 is 0, so
|
|
||||||
# shift indices by 1, which avoids triggering unsigned wraparound.
|
|
||||||
for vidx in countdown(state.validators.len.uint32, 1):
|
|
||||||
if state.validators.asSeq[vidx - 1].pubkey == pubkey:
|
|
||||||
return Opt[ValidatorIndex].ok((vidx - 1).ValidatorIndex)
|
|
||||||
|
|
||||||
from ".."/bloomfilter import
|
|
||||||
PubkeyBloomFilter, constructBloomFilter, incl, mightContain
|
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/phase0/beacon-chain.md#deposits
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/phase0/beacon-chain.md#deposits
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated--apply_deposit
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated--apply_deposit
|
||||||
proc apply_deposit(
|
proc apply_deposit(
|
||||||
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
||||||
bloom_filter: var PubkeyBloomFilter, deposit_data: DepositData,
|
bucketSortedValidators: var BucketSortedValidators,
|
||||||
flags: UpdateFlags): Result[void, cstring] =
|
deposit_data: DepositData, flags: UpdateFlags): Result[void, cstring] =
|
||||||
let
|
let
|
||||||
pubkey = deposit_data.pubkey
|
pubkey = deposit_data.pubkey
|
||||||
amount = deposit_data.amount
|
amount = deposit_data.amount
|
||||||
index =
|
index = findValidatorIndex(
|
||||||
if bloom_filter.mightContain(pubkey):
|
state.validators.asSeq, bucketSortedValidators, pubkey)
|
||||||
findValidatorIndex(state, pubkey)
|
|
||||||
else:
|
|
||||||
Opt.none(ValidatorIndex)
|
|
||||||
|
|
||||||
if index.isSome():
|
if index.isSome():
|
||||||
# Increase balance by deposit amount
|
# Increase balance by deposit amount
|
||||||
|
@ -358,14 +330,15 @@ proc apply_deposit(
|
||||||
return err("apply_deposit: too many validators (current_epoch_participation)")
|
return err("apply_deposit: too many validators (current_epoch_participation)")
|
||||||
if not state.inactivity_scores.add(0'u64):
|
if not state.inactivity_scores.add(0'u64):
|
||||||
return err("apply_deposit: too many validators (inactivity_scores)")
|
return err("apply_deposit: too many validators (inactivity_scores)")
|
||||||
|
let new_vidx = state.validators.lenu64 - 1
|
||||||
when typeof(state).kind >= ConsensusFork.Electra:
|
when typeof(state).kind >= ConsensusFork.Electra:
|
||||||
debugComment "check hashlist add return"
|
debugComment "check hashlist add return"
|
||||||
|
|
||||||
# [New in Electra:EIP7251]
|
# [New in Electra:EIP7251]
|
||||||
discard state.pending_balance_deposits.add PendingBalanceDeposit(
|
discard state.pending_balance_deposits.add PendingBalanceDeposit(
|
||||||
index: state.validators.lenu64 - 1, amount: amount)
|
index: new_vidx, amount: amount)
|
||||||
doAssert state.validators.len == state.balances.len
|
doAssert state.validators.len == state.balances.len
|
||||||
bloom_filter.incl pubkey
|
bucketSortedValidators.add new_vidx.ValidatorIndex
|
||||||
else:
|
else:
|
||||||
# Deposits may come with invalid signatures - in that case, they are not
|
# Deposits may come with invalid signatures - in that case, they are not
|
||||||
# turned into a validator but still get processed to keep the deposit
|
# turned into a validator but still get processed to keep the deposit
|
||||||
|
@ -375,10 +348,11 @@ proc apply_deposit(
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/phase0/beacon-chain.md#deposits
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#deposits
|
||||||
proc process_deposit*(
|
proc process_deposit*(
|
||||||
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
||||||
bloom_filter: var PubkeyBloomFilter, deposit: Deposit, flags: UpdateFlags):
|
bucketSortedValidators: var BucketSortedValidators,
|
||||||
|
deposit: Deposit, flags: UpdateFlags):
|
||||||
Result[void, cstring] =
|
Result[void, cstring] =
|
||||||
## Process an Eth1 deposit, registering a validator or increasing its balance.
|
## Process an Eth1 deposit, registering a validator or increasing its balance.
|
||||||
|
|
||||||
|
@ -395,12 +369,13 @@ proc process_deposit*(
|
||||||
# Deposits must be processed in order
|
# Deposits must be processed in order
|
||||||
state.eth1_deposit_index += 1
|
state.eth1_deposit_index += 1
|
||||||
|
|
||||||
apply_deposit(cfg, state, bloom_filter, deposit.data, flags)
|
apply_deposit(cfg, state, bucketSortedValidators, deposit.data, flags)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_deposit_request
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#new-process_deposit_request
|
||||||
func process_deposit_request*(
|
func process_deposit_request*(
|
||||||
cfg: RuntimeConfig, state: var electra.BeaconState,
|
cfg: RuntimeConfig, state: var electra.BeaconState,
|
||||||
bloom_filter: var PubkeyBloomFilter, deposit_request: DepositRequest,
|
bucketSortedValidators: var BucketSortedValidators,
|
||||||
|
deposit_request: DepositRequest,
|
||||||
flags: UpdateFlags): Result[void, cstring] =
|
flags: UpdateFlags): Result[void, cstring] =
|
||||||
# Set deposit request start index
|
# Set deposit request start index
|
||||||
if state.deposit_requests_start_index ==
|
if state.deposit_requests_start_index ==
|
||||||
|
@ -408,7 +383,7 @@ func process_deposit_request*(
|
||||||
state.deposit_requests_start_index = deposit_request.index
|
state.deposit_requests_start_index = deposit_request.index
|
||||||
|
|
||||||
apply_deposit(
|
apply_deposit(
|
||||||
cfg, state, bloom_filter, DepositData(
|
cfg, state, bucketSortedValidators, DepositData(
|
||||||
pubkey: deposit_request.pubkey,
|
pubkey: deposit_request.pubkey,
|
||||||
withdrawal_credentials: deposit_request.withdrawal_credentials,
|
withdrawal_credentials: deposit_request.withdrawal_credentials,
|
||||||
amount: deposit_request.amount,
|
amount: deposit_request.amount,
|
||||||
|
@ -510,6 +485,7 @@ proc process_bls_to_execution_change*(
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_withdrawal_request
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_withdrawal_request
|
||||||
func process_withdrawal_request*(
|
func process_withdrawal_request*(
|
||||||
cfg: RuntimeConfig, state: var electra.BeaconState,
|
cfg: RuntimeConfig, state: var electra.BeaconState,
|
||||||
|
bucketSortedValidators: BucketSortedValidators,
|
||||||
withdrawal_request: WithdrawalRequest, cache: var StateCache) =
|
withdrawal_request: WithdrawalRequest, cache: var StateCache) =
|
||||||
let
|
let
|
||||||
amount = withdrawal_request.amount
|
amount = withdrawal_request.amount
|
||||||
|
@ -523,7 +499,9 @@ func process_withdrawal_request*(
|
||||||
let
|
let
|
||||||
request_pubkey = withdrawal_request.validator_pubkey
|
request_pubkey = withdrawal_request.validator_pubkey
|
||||||
# Verify pubkey exists
|
# Verify pubkey exists
|
||||||
index = findValidatorIndex(state, request_pubkey).valueOr:
|
index = findValidatorIndex(
|
||||||
|
state.validators.asSeq, bucketSortedValidators,
|
||||||
|
request_pubkey).valueOr:
|
||||||
return
|
return
|
||||||
validator = state.validators.item(index)
|
validator = state.validators.item(index)
|
||||||
|
|
||||||
|
@ -591,6 +569,7 @@ func process_withdrawal_request*(
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_consolidation_request
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_consolidation_request
|
||||||
proc process_consolidation_request*(
|
proc process_consolidation_request*(
|
||||||
cfg: RuntimeConfig, state: var electra.BeaconState,
|
cfg: RuntimeConfig, state: var electra.BeaconState,
|
||||||
|
bucketSortedValidators: BucketSortedValidators,
|
||||||
consolidation_request: ConsolidationRequest,
|
consolidation_request: ConsolidationRequest,
|
||||||
cache: var StateCache) =
|
cache: var StateCache) =
|
||||||
# If the pending consolidations queue is full, consolidation requests are
|
# If the pending consolidations queue is full, consolidation requests are
|
||||||
|
@ -606,11 +585,14 @@ proc process_consolidation_request*(
|
||||||
|
|
||||||
let
|
let
|
||||||
# Verify pubkeys exists
|
# Verify pubkeys exists
|
||||||
source_index =
|
source_index = findValidatorIndex(
|
||||||
findValidatorIndex(state, consolidation_request.source_pubkey).valueOr:
|
state.validators.asSeq, bucketSortedValidators,
|
||||||
|
consolidation_request.source_pubkey).valueOr:
|
||||||
return
|
return
|
||||||
target_index =
|
target_index =
|
||||||
findValidatorIndex(state, consolidation_request.target_pubkey).valueOr:
|
findValidatorIndex(
|
||||||
|
state.validators.asSeq, bucketSortedValidators,
|
||||||
|
consolidation_request.target_pubkey).valueOr:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Verify that source != target, so a consolidation cannot be used as an exit.
|
# Verify that source != target, so a consolidation cannot be used as an exit.
|
||||||
|
@ -698,12 +680,26 @@ proc process_operations(
|
||||||
|
|
||||||
# It costs a full validator set scan to construct these values; only do so if
|
# It costs a full validator set scan to construct these values; only do so if
|
||||||
# there will be some kind of exit.
|
# there will be some kind of exit.
|
||||||
var exit_queue_info =
|
# TODO Electra doesn't use exit_queue_info, don't calculate
|
||||||
if body.proposer_slashings.len + body.attester_slashings.len +
|
var
|
||||||
body.voluntary_exits.len > 0:
|
exit_queue_info =
|
||||||
get_state_exit_queue_info(state)
|
if body.proposer_slashings.len + body.attester_slashings.len +
|
||||||
else:
|
body.voluntary_exits.len > 0:
|
||||||
default(ExitQueueInfo) # not used
|
get_state_exit_queue_info(state)
|
||||||
|
else:
|
||||||
|
default(ExitQueueInfo) # not used
|
||||||
|
bsv_use =
|
||||||
|
when typeof(body).kind >= ConsensusFork.Electra:
|
||||||
|
body.deposits.len + body.execution_payload.deposit_requests.len +
|
||||||
|
body.execution_payload.withdrawal_requests.len +
|
||||||
|
body.execution_payload.consolidation_requests.len > 0
|
||||||
|
else:
|
||||||
|
body.deposits.len > 0
|
||||||
|
bsv =
|
||||||
|
if bsv_use:
|
||||||
|
sortValidatorBuckets(state.validators.asSeq)
|
||||||
|
else:
|
||||||
|
nil # this is a logic error, effectively assert
|
||||||
|
|
||||||
for op in body.proposer_slashings:
|
for op in body.proposer_slashings:
|
||||||
let (proposer_slashing_reward, new_exit_queue_info) =
|
let (proposer_slashing_reward, new_exit_queue_info) =
|
||||||
|
@ -718,10 +714,8 @@ proc process_operations(
|
||||||
for op in body.attestations:
|
for op in body.attestations:
|
||||||
operations_rewards.attestations +=
|
operations_rewards.attestations +=
|
||||||
? process_attestation(state, op, flags, base_reward_per_increment, cache)
|
? process_attestation(state, op, flags, base_reward_per_increment, cache)
|
||||||
if body.deposits.len > 0:
|
for op in body.deposits:
|
||||||
let bloom_filter = constructBloomFilter(state.validators.asSeq)
|
? process_deposit(cfg, state, bsv[], op, flags)
|
||||||
for op in body.deposits:
|
|
||||||
? process_deposit(cfg, state, bloom_filter[], op, flags)
|
|
||||||
for op in body.voluntary_exits:
|
for op in body.voluntary_exits:
|
||||||
exit_queue_info = ? process_voluntary_exit(
|
exit_queue_info = ? process_voluntary_exit(
|
||||||
cfg, state, op, flags, exit_queue_info, cache)
|
cfg, state, op, flags, exit_queue_info, cache)
|
||||||
|
@ -731,15 +725,13 @@ proc process_operations(
|
||||||
|
|
||||||
when typeof(body).kind >= ConsensusFork.Electra:
|
when typeof(body).kind >= ConsensusFork.Electra:
|
||||||
for op in body.execution_payload.deposit_requests:
|
for op in body.execution_payload.deposit_requests:
|
||||||
debugComment "combine with previous Bloom filter construction"
|
? process_deposit_request(cfg, state, bsv[], op, {})
|
||||||
let bloom_filter = constructBloomFilter(state.validators.asSeq)
|
|
||||||
? process_deposit_request(cfg, state, bloom_filter[], op, {})
|
|
||||||
for op in body.execution_payload.withdrawal_requests:
|
for op in body.execution_payload.withdrawal_requests:
|
||||||
# [New in Electra:EIP7002:7251]
|
# [New in Electra:EIP7002:7251]
|
||||||
process_withdrawal_request(cfg, state, op, cache)
|
process_withdrawal_request(cfg, state, bsv[], op, cache)
|
||||||
for op in body.execution_payload.consolidation_requests:
|
for op in body.execution_payload.consolidation_requests:
|
||||||
# [New in Electra:EIP7251]
|
# [New in Electra:EIP7251]
|
||||||
process_consolidation_request(cfg, state, op, cache)
|
process_consolidation_request(cfg, state, bsv[], op, cache)
|
||||||
|
|
||||||
ok(operations_rewards)
|
ok(operations_rewards)
|
||||||
|
|
||||||
|
@ -756,11 +748,11 @@ func get_participant_reward*(total_active_balance: Gwei): Gwei =
|
||||||
WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH
|
WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH
|
||||||
max_participant_rewards div SYNC_COMMITTEE_SIZE
|
max_participant_rewards div SYNC_COMMITTEE_SIZE
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#sync-aggregate-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#sync-aggregate-processing
|
||||||
func get_proposer_reward*(participant_reward: Gwei): Gwei =
|
func get_proposer_reward*(participant_reward: Gwei): Gwei =
|
||||||
participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
|
participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#sync-aggregate-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#sync-aggregate-processing
|
||||||
proc process_sync_aggregate*(
|
proc process_sync_aggregate*(
|
||||||
state: var (altair.BeaconState | bellatrix.BeaconState |
|
state: var (altair.BeaconState | bellatrix.BeaconState |
|
||||||
capella.BeaconState | deneb.BeaconState | electra.BeaconState),
|
capella.BeaconState | deneb.BeaconState | electra.BeaconState),
|
||||||
|
@ -1117,7 +1109,7 @@ proc process_block*(
|
||||||
|
|
||||||
ok(? process_operations(cfg, state, blck.body, 0.Gwei, flags, cache))
|
ok(? process_operations(cfg, state, blck.body, 0.Gwei, flags, cache))
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#block-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#block-processing
|
||||||
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
||||||
# copy of datatypes/altair.nim
|
# copy of datatypes/altair.nim
|
||||||
type SomeAltairBlock =
|
type SomeAltairBlock =
|
||||||
|
@ -1146,7 +1138,7 @@ proc process_block*(
|
||||||
|
|
||||||
ok(operations_rewards)
|
ok(operations_rewards)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#block-processing
|
||||||
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
|
||||||
type SomeBellatrixBlock =
|
type SomeBellatrixBlock =
|
||||||
bellatrix.BeaconBlock | bellatrix.SigVerifiedBeaconBlock | bellatrix.TrustedBeaconBlock
|
bellatrix.BeaconBlock | bellatrix.SigVerifiedBeaconBlock | bellatrix.TrustedBeaconBlock
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
# State transition - epoch processing, as described in
|
# State transition - epoch processing, as described in
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#epoch-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#epoch-processing
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#epoch-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#epoch-processing
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#epoch-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#epoch-processing
|
||||||
#
|
#
|
||||||
# The entry point is `process_epoch`, which is at the bottom of this file.
|
# The entry point is `process_epoch`, which is at the bottom of this file.
|
||||||
|
@ -535,7 +535,7 @@ func get_attestation_component_delta(
|
||||||
else:
|
else:
|
||||||
RewardDelta(penalties: base_reward)
|
RewardDelta(penalties: base_reward)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#components-of-attestation-deltas
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#components-of-attestation-deltas
|
||||||
func get_source_delta*(
|
func get_source_delta*(
|
||||||
validator: RewardStatus,
|
validator: RewardStatus,
|
||||||
base_reward: Gwei,
|
base_reward: Gwei,
|
||||||
|
@ -694,14 +694,14 @@ func get_unslashed_participating_increment*(
|
||||||
flag_index: TimelyFlag): uint64 =
|
flag_index: TimelyFlag): uint64 =
|
||||||
info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT.Gwei
|
info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT.Gwei
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#get_flag_index_deltas
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#get_flag_index_deltas
|
||||||
func get_active_increments*(
|
func get_active_increments*(
|
||||||
info: altair.EpochInfo | bellatrix.BeaconState): uint64 =
|
info: altair.EpochInfo | bellatrix.BeaconState): uint64 =
|
||||||
info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT.Gwei
|
info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT.Gwei
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-get_inactivity_penalty_deltas
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#modified-get_inactivity_penalty_deltas
|
||||||
# Combines get_flag_index_deltas() and get_inactivity_penalty_deltas()
|
# Combines get_flag_index_deltas() and get_inactivity_penalty_deltas()
|
||||||
template get_flag_and_inactivity_delta(
|
template get_flag_and_inactivity_delta(
|
||||||
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
|
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
|
||||||
|
@ -843,7 +843,7 @@ func get_flag_and_inactivity_delta_for_validator(
|
||||||
active_increments, penalty_denominator, epoch_participation,
|
active_increments, penalty_denominator, epoch_participation,
|
||||||
participating_increments, info, vidx, inactivity_score.uint64)
|
participating_increments, info, vidx, inactivity_score.uint64)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#rewards-and-penalties-1
|
||||||
func process_rewards_and_penalties*(
|
func process_rewards_and_penalties*(
|
||||||
state: var phase0.BeaconState, info: var phase0.EpochInfo) =
|
state: var phase0.BeaconState, info: var phase0.EpochInfo) =
|
||||||
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are
|
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are
|
||||||
|
@ -866,7 +866,7 @@ func process_rewards_and_penalties*(
|
||||||
decrease_balance(balance, v.delta.penalties)
|
decrease_balance(balance, v.delta.penalties)
|
||||||
state.balances.asSeq()[idx] = balance
|
state.balances.asSeq()[idx] = balance
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#rewards-and-penalties
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#rewards-and-penalties
|
||||||
func process_rewards_and_penalties*(
|
func process_rewards_and_penalties*(
|
||||||
cfg: RuntimeConfig,
|
cfg: RuntimeConfig,
|
||||||
state: var (altair.BeaconState | bellatrix.BeaconState |
|
state: var (altair.BeaconState | bellatrix.BeaconState |
|
||||||
|
@ -902,7 +902,7 @@ func process_rewards_and_penalties*(
|
||||||
|
|
||||||
from std/heapqueue import HeapQueue, `[]`, len, push, replace
|
from std/heapqueue import HeapQueue, `[]`, len, push, replace
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#registry-updates
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#registry-updates
|
||||||
func process_registry_updates*(
|
func process_registry_updates*(
|
||||||
cfg: RuntimeConfig,
|
cfg: RuntimeConfig,
|
||||||
state: var (phase0.BeaconState | altair.BeaconState |
|
state: var (phase0.BeaconState | altair.BeaconState |
|
||||||
|
@ -971,7 +971,7 @@ func process_registry_updates*(
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated--process_registry_updates
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#modified-process_registry_updates
|
||||||
func process_registry_updates*(
|
func process_registry_updates*(
|
||||||
cfg: RuntimeConfig, state: var electra.BeaconState, cache: var StateCache):
|
cfg: RuntimeConfig, state: var electra.BeaconState, cache: var StateCache):
|
||||||
Result[void, cstring] =
|
Result[void, cstring] =
|
||||||
|
@ -999,7 +999,7 @@ func process_registry_updates*(
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#slashings
|
||||||
func get_adjusted_total_slashing_balance*(
|
func get_adjusted_total_slashing_balance*(
|
||||||
state: ForkyBeaconState, total_balance: Gwei): Gwei =
|
state: ForkyBeaconState, total_balance: Gwei): Gwei =
|
||||||
const multiplier =
|
const multiplier =
|
||||||
|
@ -1018,14 +1018,14 @@ func get_adjusted_total_slashing_balance*(
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#slashings
|
||||||
func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool =
|
func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool =
|
||||||
validator.slashed and
|
validator.slashed and
|
||||||
epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch
|
epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#slashings
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#slashings
|
||||||
func get_slashing_penalty*(validator: Validator,
|
func get_slashing_penalty*(validator: Validator,
|
||||||
adjusted_total_slashing_balance,
|
adjusted_total_slashing_balance,
|
||||||
total_balance: Gwei): Gwei =
|
total_balance: Gwei): Gwei =
|
||||||
|
@ -1036,8 +1036,8 @@ func get_slashing_penalty*(validator: Validator,
|
||||||
penalty_numerator div total_balance * increment
|
penalty_numerator div total_balance * increment
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#slashings
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#slashings
|
||||||
func get_slashing(
|
func get_slashing(
|
||||||
state: ForkyBeaconState, total_balance: Gwei, vidx: ValidatorIndex): Gwei =
|
state: ForkyBeaconState, total_balance: Gwei, vidx: ValidatorIndex): Gwei =
|
||||||
# For efficiency reasons, it doesn't make sense to have process_slashings use
|
# For efficiency reasons, it doesn't make sense to have process_slashings use
|
||||||
|
@ -1075,61 +1075,18 @@ func process_eth1_data_reset*(state: var ForkyBeaconState) =
|
||||||
if next_epoch mod EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
|
if next_epoch mod EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
|
||||||
state.eth1_data_votes = default(type state.eth1_data_votes)
|
state.eth1_data_votes = default(type state.eth1_data_votes)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#effective-balances-updates
|
|
||||||
template effective_balance_might_update*(
|
|
||||||
balance: Gwei, effective_balance: Gwei): bool =
|
|
||||||
const
|
|
||||||
HYSTERESIS_INCREMENT =
|
|
||||||
EFFECTIVE_BALANCE_INCREMENT.Gwei div HYSTERESIS_QUOTIENT
|
|
||||||
DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
|
|
||||||
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
|
|
||||||
balance + DOWNWARD_THRESHOLD < effective_balance or
|
|
||||||
effective_balance + UPWARD_THRESHOLD < balance
|
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#effective-balances-updates
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#effective-balances-updates
|
||||||
func process_effective_balance_updates*(
|
|
||||||
state: var (phase0.BeaconState | altair.BeaconState |
|
|
||||||
bellatrix.BeaconState | capella.BeaconState |
|
|
||||||
deneb.BeaconState)) =
|
|
||||||
# Update effective balances with hysteresis
|
|
||||||
for vidx in state.validators.vindices:
|
|
||||||
let
|
|
||||||
balance = state.balances.item(vidx)
|
|
||||||
effective_balance = state.validators.item(vidx).effective_balance
|
|
||||||
if effective_balance_might_update(balance, effective_balance):
|
|
||||||
let new_effective_balance =
|
|
||||||
min(
|
|
||||||
balance - balance mod EFFECTIVE_BALANCE_INCREMENT.Gwei,
|
|
||||||
MAX_EFFECTIVE_BALANCE.Gwei)
|
|
||||||
# Protect against unnecessary cache invalidation
|
|
||||||
if new_effective_balance != effective_balance:
|
|
||||||
state.validators.mitem(vidx).effective_balance = new_effective_balance
|
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#updated-process_effective_balance_updates
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#updated-process_effective_balance_updates
|
||||||
func process_effective_balance_updates*(state: var electra.BeaconState) =
|
func process_effective_balance_updates*(state: var ForkyBeaconState) =
|
||||||
# Update effective balances with hysteresis
|
# Update effective balances with hysteresis
|
||||||
for vidx in state.validators.vindices:
|
for vidx in state.validators.vindices:
|
||||||
let
|
let
|
||||||
balance = state.balances.item(vidx)
|
balance = state.balances.item(vidx)
|
||||||
effective_balance = state.validators.item(vidx).effective_balance
|
effective_balance = state.validators.item(vidx).effective_balance
|
||||||
|
|
||||||
if effective_balance_might_update(balance, effective_balance):
|
if effective_balance_might_update(balance, effective_balance):
|
||||||
debugComment "amortize validator read access"
|
let new_effective_balance = get_effective_balance_update(
|
||||||
# Wrapping MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei and
|
typeof(state).kind, balance, effective_balance, vidx.distinctBase)
|
||||||
# MIN_ACTIVATION_BALANCE.Gwei in static() results
|
|
||||||
# in
|
|
||||||
# beacon_chain/spec/state_transition_epoch.nim(1067, 20) Error: expected: ':', but got: '('
|
|
||||||
# even though it'd be better to statically verify safety
|
|
||||||
let
|
|
||||||
effective_balance_limit =
|
|
||||||
if has_compounding_withdrawal_credential(
|
|
||||||
state.validators.item(vidx)):
|
|
||||||
MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei
|
|
||||||
else:
|
|
||||||
MIN_ACTIVATION_BALANCE.Gwei
|
|
||||||
new_effective_balance =
|
|
||||||
min(
|
|
||||||
balance - balance mod EFFECTIVE_BALANCE_INCREMENT.Gwei,
|
|
||||||
effective_balance_limit)
|
|
||||||
# Protect against unnecessary cache invalidation
|
# Protect against unnecessary cache invalidation
|
||||||
if new_effective_balance != effective_balance:
|
if new_effective_balance != effective_balance:
|
||||||
state.validators.mitem(vidx).effective_balance = new_effective_balance
|
state.validators.mitem(vidx).effective_balance = new_effective_balance
|
||||||
|
@ -1167,7 +1124,7 @@ func process_historical_roots_update*(state: var ForkyBeaconState) =
|
||||||
if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0:
|
if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0:
|
||||||
# Equivalent to hash_tree_root(foo: HistoricalBatch), but without using
|
# Equivalent to hash_tree_root(foo: HistoricalBatch), but without using
|
||||||
# significant additional stack or heap.
|
# significant additional stack or heap.
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#historicalbatch
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#historicalbatch
|
||||||
# In response to https://github.com/status-im/nimbus-eth2/issues/921
|
# In response to https://github.com/status-im/nimbus-eth2/issues/921
|
||||||
if not state.historical_roots.add state.compute_historical_root():
|
if not state.historical_roots.add state.compute_historical_root():
|
||||||
raiseAssert "no more room for historical roots, so long and thanks for the fish!"
|
raiseAssert "no more room for historical roots, so long and thanks for the fish!"
|
||||||
|
@ -1277,8 +1234,10 @@ func process_historical_summaries_update*(
|
||||||
func process_pending_balance_deposits*(
|
func process_pending_balance_deposits*(
|
||||||
cfg: RuntimeConfig, state: var electra.BeaconState,
|
cfg: RuntimeConfig, state: var electra.BeaconState,
|
||||||
cache: var StateCache): Result[void, cstring] =
|
cache: var StateCache): Result[void, cstring] =
|
||||||
let available_for_processing = state.deposit_balance_to_consume +
|
let
|
||||||
get_activation_exit_churn_limit(cfg, state, cache)
|
next_epoch = get_current_epoch(state) + 1
|
||||||
|
available_for_processing = state.deposit_balance_to_consume +
|
||||||
|
get_activation_exit_churn_limit(cfg, state, cache)
|
||||||
var
|
var
|
||||||
processed_amount = 0.Gwei
|
processed_amount = 0.Gwei
|
||||||
next_deposit_index = 0
|
next_deposit_index = 0
|
||||||
|
@ -1293,7 +1252,7 @@ func process_pending_balance_deposits*(
|
||||||
|
|
||||||
# Validator is exiting, postpone the deposit until after withdrawable epoch
|
# Validator is exiting, postpone the deposit until after withdrawable epoch
|
||||||
if validator.exit_epoch < FAR_FUTURE_EPOCH:
|
if validator.exit_epoch < FAR_FUTURE_EPOCH:
|
||||||
if get_current_epoch(state) <= validator.withdrawable_epoch:
|
if next_epoch <= validator.withdrawable_epoch:
|
||||||
deposits_to_postpone.add(deposit)
|
deposits_to_postpone.add(deposit)
|
||||||
# Deposited balance will never become active. Increase balance but do not
|
# Deposited balance will never become active. Increase balance but do not
|
||||||
# consume churn
|
# consume churn
|
||||||
|
@ -1333,6 +1292,7 @@ func process_pending_balance_deposits*(
|
||||||
func process_pending_consolidations*(
|
func process_pending_consolidations*(
|
||||||
cfg: RuntimeConfig, state: var electra.BeaconState):
|
cfg: RuntimeConfig, state: var electra.BeaconState):
|
||||||
Result[void, cstring] =
|
Result[void, cstring] =
|
||||||
|
let next_epoch = get_current_epoch(state) + 1
|
||||||
var next_pending_consolidation = 0
|
var next_pending_consolidation = 0
|
||||||
for pending_consolidation in state.pending_consolidations:
|
for pending_consolidation in state.pending_consolidations:
|
||||||
let source_validator =
|
let source_validator =
|
||||||
|
@ -1340,7 +1300,7 @@ func process_pending_consolidations*(
|
||||||
if source_validator.slashed:
|
if source_validator.slashed:
|
||||||
next_pending_consolidation += 1
|
next_pending_consolidation += 1
|
||||||
continue
|
continue
|
||||||
if source_validator.withdrawable_epoch > get_current_epoch(state):
|
if source_validator.withdrawable_epoch > next_epoch:
|
||||||
break
|
break
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -1424,7 +1384,7 @@ func init*(
|
||||||
deneb.BeaconState | electra.BeaconState): T =
|
deneb.BeaconState | electra.BeaconState): T =
|
||||||
init(result, state)
|
init(result, state)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#epoch-processing
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#epoch-processing
|
||||||
proc process_epoch*(
|
proc process_epoch*(
|
||||||
cfg: RuntimeConfig,
|
cfg: RuntimeConfig,
|
||||||
state: var (altair.BeaconState | bellatrix.BeaconState),
|
state: var (altair.BeaconState | bellatrix.BeaconState),
|
||||||
|
@ -1451,7 +1411,7 @@ proc process_epoch*(
|
||||||
|
|
||||||
process_inactivity_updates(cfg, state, info) # [New in Altair]
|
process_inactivity_updates(cfg, state, info) # [New in Altair]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#rewards-and-penalties
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#rewards-and-penalties
|
||||||
process_rewards_and_penalties(cfg, state, info) # [Modified in Altair]
|
process_rewards_and_penalties(cfg, state, info) # [Modified in Altair]
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#registry-updates
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#registry-updates
|
||||||
|
@ -1503,7 +1463,7 @@ proc process_epoch*(
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#registry-updates
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#registry-updates
|
||||||
? process_registry_updates(cfg, state, cache)
|
? process_registry_updates(cfg, state, cache)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#slashings
|
||||||
process_slashings(state, info.balances.current_epoch)
|
process_slashings(state, info.balances.current_epoch)
|
||||||
|
|
||||||
process_eth1_data_reset(state)
|
process_eth1_data_reset(state)
|
||||||
|
@ -1526,7 +1486,7 @@ proc process_epoch*(
|
||||||
|
|
||||||
info.init(state)
|
info.init(state)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#justification-and-finalization
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#justification-and-finalization
|
||||||
process_justification_and_finalization(state, info.balances, flags)
|
process_justification_and_finalization(state, info.balances, flags)
|
||||||
|
|
||||||
# state.slot hasn't been incremented yet.
|
# state.slot hasn't been incremented yet.
|
||||||
|
@ -1564,9 +1524,8 @@ proc process_epoch*(
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc get_validator_balance_after_epoch*(
|
proc get_validator_balance_after_epoch*(
|
||||||
cfg: RuntimeConfig,
|
cfg: RuntimeConfig, state: deneb.BeaconState | electra.BeaconState,
|
||||||
state: deneb.BeaconState | electra.BeaconState,
|
cache: var StateCache, info: var altair.EpochInfo,
|
||||||
flags: UpdateFlags, cache: var StateCache, info: var altair.EpochInfo,
|
|
||||||
index: ValidatorIndex): Gwei =
|
index: ValidatorIndex): Gwei =
|
||||||
# Run a subset of process_epoch() which affects an individual validator,
|
# Run a subset of process_epoch() which affects an individual validator,
|
||||||
# without modifying state itself
|
# without modifying state itself
|
||||||
|
@ -1586,7 +1545,7 @@ proc get_validator_balance_after_epoch*(
|
||||||
weigh_justification_and_finalization(
|
weigh_justification_and_finalization(
|
||||||
state, info.balances.current_epoch,
|
state, info.balances.current_epoch,
|
||||||
info.balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX],
|
info.balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX],
|
||||||
info.balances.current_epoch_TIMELY_TARGET, flags)
|
info.balances.current_epoch_TIMELY_TARGET, {})
|
||||||
|
|
||||||
# Used as part of process_rewards_and_penalties
|
# Used as part of process_rewards_and_penalties
|
||||||
let inactivity_score =
|
let inactivity_score =
|
||||||
|
@ -1667,3 +1626,21 @@ proc get_validator_balance_after_epoch*(
|
||||||
processed_amount += deposit.amount
|
processed_amount += deposit.amount
|
||||||
|
|
||||||
post_epoch_balance
|
post_epoch_balance
|
||||||
|
|
||||||
|
proc get_next_slot_expected_withdrawals*(
|
||||||
|
cfg: RuntimeConfig, state: deneb.BeaconState, cache: var StateCache,
|
||||||
|
info: var altair.EpochInfo): seq[Withdrawal] =
|
||||||
|
get_expected_withdrawals_aux(state, (state.slot + 1).epoch) do:
|
||||||
|
# validator_index is defined by an injected symbol within the template
|
||||||
|
get_validator_balance_after_epoch(
|
||||||
|
cfg, state, cache, info, validator_index.ValidatorIndex)
|
||||||
|
|
||||||
|
proc get_next_slot_expected_withdrawals*(
|
||||||
|
cfg: RuntimeConfig, state: electra.BeaconState, cache: var StateCache,
|
||||||
|
info: var altair.EpochInfo): seq[Withdrawal] =
|
||||||
|
let (res, _) = get_expected_withdrawals_with_partial_count_aux(
|
||||||
|
state, (state.slot + 1).epoch) do:
|
||||||
|
# validator_index is defined by an injected symbol within the template
|
||||||
|
get_validator_balance_after_epoch(
|
||||||
|
cfg, state, cache, info, validator_index.ValidatorIndex)
|
||||||
|
res
|
||||||
|
|
|
@ -158,7 +158,7 @@ func get_shuffled_active_validator_indices*(
|
||||||
withState(state):
|
withState(state):
|
||||||
cache.get_shuffled_active_validator_indices(forkyState.data, epoch)
|
cache.get_shuffled_active_validator_indices(forkyState.data, epoch)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_active_validator_indices
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_active_validator_indices
|
||||||
func count_active_validators*(state: ForkyBeaconState,
|
func count_active_validators*(state: ForkyBeaconState,
|
||||||
epoch: Epoch,
|
epoch: Epoch,
|
||||||
cache: var StateCache): uint64 =
|
cache: var StateCache): uint64 =
|
||||||
|
@ -394,7 +394,7 @@ func compute_proposer_index(state: ForkyBeaconState,
|
||||||
## Return from ``indices`` a random index sampled by effective balance.
|
## Return from ``indices`` a random index sampled by effective balance.
|
||||||
compute_proposer_index(state, indices, seed, shuffled_index)
|
compute_proposer_index(state, indices, seed, shuffled_index)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_beacon_proposer_index
|
||||||
func get_beacon_proposer_index*(
|
func get_beacon_proposer_index*(
|
||||||
state: ForkyBeaconState, cache: var StateCache, slot: Slot):
|
state: ForkyBeaconState, cache: var StateCache, slot: Slot):
|
||||||
Opt[ValidatorIndex] =
|
Opt[ValidatorIndex] =
|
||||||
|
|
|
@ -10,10 +10,10 @@
|
||||||
import
|
import
|
||||||
./datatypes/base, ./beaconstate, ./forks, ./helpers
|
./datatypes/base, ./beaconstate, ./forks, ./helpers
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#configuration
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#configuration
|
||||||
const SAFETY_DECAY* = 10'u64
|
const SAFETY_DECAY* = 10'u64
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period
|
||||||
func compute_weak_subjectivity_period(
|
func compute_weak_subjectivity_period(
|
||||||
cfg: RuntimeConfig, state: ForkyBeaconState): uint64 =
|
cfg: RuntimeConfig, state: ForkyBeaconState): uint64 =
|
||||||
## Returns the weak subjectivity period for the current ``state``.
|
## Returns the weak subjectivity period for the current ``state``.
|
||||||
|
@ -49,7 +49,7 @@ func compute_weak_subjectivity_period(
|
||||||
|
|
||||||
ws_period
|
ws_period
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period
|
||||||
func is_within_weak_subjectivity_period*(cfg: RuntimeConfig, current_slot: Slot,
|
func is_within_weak_subjectivity_period*(cfg: RuntimeConfig, current_slot: Slot,
|
||||||
ws_state: ForkedHashedBeaconState,
|
ws_state: ForkedHashedBeaconState,
|
||||||
ws_checkpoint: Checkpoint): bool =
|
ws_checkpoint: Checkpoint): bool =
|
||||||
|
|
|
@ -65,6 +65,7 @@ type
|
||||||
getFinalizedPeriod: GetSyncCommitteePeriodCallback
|
getFinalizedPeriod: GetSyncCommitteePeriodCallback
|
||||||
getOptimisticPeriod: GetSyncCommitteePeriodCallback
|
getOptimisticPeriod: GetSyncCommitteePeriodCallback
|
||||||
getBeaconTime: GetBeaconTimeFn
|
getBeaconTime: GetBeaconTimeFn
|
||||||
|
shouldInhibitSync: GetBoolCallback
|
||||||
loopFuture: Future[void].Raising([CancelledError])
|
loopFuture: Future[void].Raising([CancelledError])
|
||||||
|
|
||||||
func init*(
|
func init*(
|
||||||
|
@ -80,7 +81,8 @@ func init*(
|
||||||
isNextSyncCommitteeKnown: GetBoolCallback,
|
isNextSyncCommitteeKnown: GetBoolCallback,
|
||||||
getFinalizedPeriod: GetSyncCommitteePeriodCallback,
|
getFinalizedPeriod: GetSyncCommitteePeriodCallback,
|
||||||
getOptimisticPeriod: GetSyncCommitteePeriodCallback,
|
getOptimisticPeriod: GetSyncCommitteePeriodCallback,
|
||||||
getBeaconTime: GetBeaconTimeFn
|
getBeaconTime: GetBeaconTimeFn,
|
||||||
|
shouldInhibitSync: GetBoolCallback = nil
|
||||||
): LightClientManager =
|
): LightClientManager =
|
||||||
## Initialize light client manager.
|
## Initialize light client manager.
|
||||||
LightClientManager(
|
LightClientManager(
|
||||||
|
@ -95,8 +97,8 @@ func init*(
|
||||||
isNextSyncCommitteeKnown: isNextSyncCommitteeKnown,
|
isNextSyncCommitteeKnown: isNextSyncCommitteeKnown,
|
||||||
getFinalizedPeriod: getFinalizedPeriod,
|
getFinalizedPeriod: getFinalizedPeriod,
|
||||||
getOptimisticPeriod: getOptimisticPeriod,
|
getOptimisticPeriod: getOptimisticPeriod,
|
||||||
getBeaconTime: getBeaconTime
|
getBeaconTime: getBeaconTime,
|
||||||
)
|
shouldInhibitSync: shouldInhibitSync)
|
||||||
|
|
||||||
proc isGossipSupported*(
|
proc isGossipSupported*(
|
||||||
self: LightClientManager,
|
self: LightClientManager,
|
||||||
|
@ -328,13 +330,14 @@ template query[E](
|
||||||
): Future[bool].Raising([CancelledError]) =
|
): Future[bool].Raising([CancelledError]) =
|
||||||
self.query(e, Nothing())
|
self.query(e, Nothing())
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md#light-client-sync-process
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md#light-client-sync-process
|
||||||
proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} =
|
proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} =
|
||||||
var nextSyncTaskTime = self.getBeaconTime()
|
var nextSyncTaskTime = self.getBeaconTime()
|
||||||
while true:
|
while true:
|
||||||
# Periodically wake and check for changes
|
# Periodically wake and check for changes
|
||||||
let wallTime = self.getBeaconTime()
|
let wallTime = self.getBeaconTime()
|
||||||
if wallTime < nextSyncTaskTime or
|
if wallTime < nextSyncTaskTime or
|
||||||
|
(self.shouldInhibitSync != nil and self.shouldInhibitSync()) or
|
||||||
self.network.peerPool.lenAvailable < 1:
|
self.network.peerPool.lenAvailable < 1:
|
||||||
await sleepAsync(chronos.seconds(2))
|
await sleepAsync(chronos.seconds(2))
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -90,7 +90,7 @@ p2pProtocol LightClientSync(version = 1,
|
||||||
|
|
||||||
debug "LC bootstrap request done", peer, blockRoot
|
debug "LC bootstrap request done", peer, blockRoot
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
|
||||||
proc lightClientUpdatesByRange(
|
proc lightClientUpdatesByRange(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
startPeriod: SyncCommitteePeriod,
|
startPeriod: SyncCommitteePeriod,
|
||||||
|
@ -134,7 +134,7 @@ p2pProtocol LightClientSync(version = 1,
|
||||||
|
|
||||||
debug "LC updates by range request done", peer, startPeriod, count, found
|
debug "LC updates by range request done", peer, startPeriod, count, found
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
|
||||||
proc lightClientFinalityUpdate(
|
proc lightClientFinalityUpdate(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
response: SingleChunkResponse[ForkedLightClientFinalityUpdate])
|
response: SingleChunkResponse[ForkedLightClientFinalityUpdate])
|
||||||
|
@ -160,7 +160,7 @@ p2pProtocol LightClientSync(version = 1,
|
||||||
|
|
||||||
debug "LC finality update request done", peer
|
debug "LC finality update request done", peer
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
|
||||||
proc lightClientOptimisticUpdate(
|
proc lightClientOptimisticUpdate(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
response: SingleChunkResponse[ForkedLightClientOptimisticUpdate])
|
response: SingleChunkResponse[ForkedLightClientOptimisticUpdate])
|
||||||
|
|
|
@ -44,50 +44,19 @@ proc readChunkPayload*(
|
||||||
var contextBytes: ForkDigest
|
var contextBytes: ForkDigest
|
||||||
try:
|
try:
|
||||||
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
||||||
except CancelledError as exc:
|
|
||||||
raise exc
|
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
return neterr UnexpectedEOF
|
return neterr UnexpectedEOF
|
||||||
|
let contextFork =
|
||||||
|
peer.network.forkDigests[].consensusForkForDigest(contextBytes).valueOr:
|
||||||
|
return neterr InvalidContextBytes
|
||||||
|
|
||||||
static: doAssert ConsensusFork.high == ConsensusFork.Electra
|
withConsensusFork(contextFork):
|
||||||
if contextBytes == peer.network.forkDigests.phase0:
|
let res = await readChunkPayload(
|
||||||
let res = await readChunkPayload(conn, peer, phase0.SignedBeaconBlock)
|
conn, peer, consensusFork.SignedBeaconBlock)
|
||||||
if res.isOk:
|
if res.isOk:
|
||||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
||||||
else:
|
else:
|
||||||
return err(res.error)
|
return err(res.error)
|
||||||
elif contextBytes == peer.network.forkDigests.altair:
|
|
||||||
let res = await readChunkPayload(conn, peer, altair.SignedBeaconBlock)
|
|
||||||
if res.isOk:
|
|
||||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
|
||||||
else:
|
|
||||||
return err(res.error)
|
|
||||||
elif contextBytes == peer.network.forkDigests.bellatrix:
|
|
||||||
let res = await readChunkPayload(conn, peer, bellatrix.SignedBeaconBlock)
|
|
||||||
if res.isOk:
|
|
||||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
|
||||||
else:
|
|
||||||
return err(res.error)
|
|
||||||
elif contextBytes == peer.network.forkDigests.capella:
|
|
||||||
let res = await readChunkPayload(conn, peer, capella.SignedBeaconBlock)
|
|
||||||
if res.isOk:
|
|
||||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
|
||||||
else:
|
|
||||||
return err(res.error)
|
|
||||||
elif contextBytes == peer.network.forkDigests.deneb:
|
|
||||||
let res = await readChunkPayload(conn, peer, deneb.SignedBeaconBlock)
|
|
||||||
if res.isOk:
|
|
||||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
|
||||||
else:
|
|
||||||
return err(res.error)
|
|
||||||
elif contextBytes == peer.network.forkDigests.electra:
|
|
||||||
let res = await readChunkPayload(conn, peer, electra.SignedBeaconBlock)
|
|
||||||
if res.isOk:
|
|
||||||
return ok newClone(ForkedSignedBeaconBlock.init(res.get))
|
|
||||||
else:
|
|
||||||
return err(res.error)
|
|
||||||
else:
|
|
||||||
return neterr InvalidContextBytes
|
|
||||||
|
|
||||||
proc readChunkPayload*(
|
proc readChunkPayload*(
|
||||||
conn: Connection, peer: Peer, MsgType: type (ref BlobSidecar)):
|
conn: Connection, peer: Peer, MsgType: type (ref BlobSidecar)):
|
||||||
|
@ -95,19 +64,21 @@ proc readChunkPayload*(
|
||||||
var contextBytes: ForkDigest
|
var contextBytes: ForkDigest
|
||||||
try:
|
try:
|
||||||
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
await conn.readExactly(addr contextBytes, sizeof contextBytes)
|
||||||
except CancelledError as exc:
|
|
||||||
raise exc
|
|
||||||
except CatchableError:
|
except CatchableError:
|
||||||
return neterr UnexpectedEOF
|
return neterr UnexpectedEOF
|
||||||
|
let contextFork =
|
||||||
|
peer.network.forkDigests[].consensusForkForDigest(contextBytes).valueOr:
|
||||||
|
return neterr InvalidContextBytes
|
||||||
|
|
||||||
if contextBytes == peer.network.forkDigests.deneb:
|
withConsensusFork(contextFork):
|
||||||
let res = await readChunkPayload(conn, peer, BlobSidecar)
|
when consensusFork >= ConsensusFork.Deneb:
|
||||||
if res.isOk:
|
let res = await readChunkPayload(conn, peer, BlobSidecar)
|
||||||
return ok newClone(res.get)
|
if res.isOk:
|
||||||
|
return ok newClone(res.get)
|
||||||
|
else:
|
||||||
|
return err(res.error)
|
||||||
else:
|
else:
|
||||||
return err(res.error)
|
return neterr InvalidContextBytes
|
||||||
else:
|
|
||||||
return neterr InvalidContextBytes
|
|
||||||
|
|
||||||
{.pop.} # TODO fix p2p macro for raises
|
{.pop.} # TODO fix p2p macro for raises
|
||||||
|
|
||||||
|
|
|
@ -21,7 +21,7 @@ import
|
||||||
from presto import RestDecodingError
|
from presto import RestDecodingError
|
||||||
|
|
||||||
const
|
const
|
||||||
largeRequestsTimeout = 90.seconds # Downloading large items such as states.
|
largeRequestsTimeout = 120.seconds # Downloading large items such as states.
|
||||||
smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots.
|
smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots.
|
||||||
|
|
||||||
proc fetchDepositSnapshot(
|
proc fetchDepositSnapshot(
|
||||||
|
@ -171,7 +171,7 @@ proc doTrustedNodeSync*(
|
||||||
let stateId =
|
let stateId =
|
||||||
case syncTarget.kind
|
case syncTarget.kind
|
||||||
of TrustedNodeSyncKind.TrustedBlockRoot:
|
of TrustedNodeSyncKind.TrustedBlockRoot:
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md#light-client-sync-process
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md#light-client-sync-process
|
||||||
const lcDataFork = LightClientDataFork.high
|
const lcDataFork = LightClientDataFork.high
|
||||||
var bestViableCheckpoint: Opt[tuple[slot: Slot, state_root: Eth2Digest]]
|
var bestViableCheckpoint: Opt[tuple[slot: Slot, state_root: Eth2Digest]]
|
||||||
func trackBestViableCheckpoint(store: lcDataFork.LightClientStore) =
|
func trackBestViableCheckpoint(store: lcDataFork.LightClientStore) =
|
||||||
|
|
|
@ -0,0 +1,90 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
|
import std/typetraits
|
||||||
|
import "."/spec/crypto
|
||||||
|
from "."/spec/datatypes/base import Validator, ValidatorIndex, pubkey, `==`
|
||||||
|
|
||||||
|
const
|
||||||
|
BUCKET_BITS = 9 # >= 13 gets slow to construct
|
||||||
|
NUM_BUCKETS = 1 shl BUCKET_BITS
|
||||||
|
|
||||||
|
type
|
||||||
|
# `newSeqUninitialized` requires its type to be SomeNumber
|
||||||
|
IntValidatorIndex = distinctBase ValidatorIndex
|
||||||
|
|
||||||
|
BucketSortedValidators* = object
|
||||||
|
bucketSorted*: seq[IntValidatorIndex]
|
||||||
|
bucketUpperBounds: array[NUM_BUCKETS, uint] # avoids over/underflow checks
|
||||||
|
extraItems*: seq[ValidatorIndex]
|
||||||
|
|
||||||
|
template getBucketNumber(h: ValidatorPubKey): uint =
|
||||||
|
# This assumes https://en.wikipedia.org/wiki/Avalanche_effect for uniform
|
||||||
|
# distribution across pubkeys. ValidatorPubKey specifically satisfies this
|
||||||
|
# criterion. If required, can look at more input bytes, but ultimately it
|
||||||
|
# doesn't affect correctness, only speed.
|
||||||
|
|
||||||
|
# Otherwise need more than 2 bytes of input
|
||||||
|
static: doAssert BUCKET_BITS <= 16
|
||||||
|
|
||||||
|
const BUCKET_MASK = (NUM_BUCKETS - 1)
|
||||||
|
((h.blob[0] * 256 + h.blob[1]) and BUCKET_MASK)
|
||||||
|
|
||||||
|
func sortValidatorBuckets*(validators: openArray[Validator]):
|
||||||
|
ref BucketSortedValidators {.noinline.} =
|
||||||
|
var bucketSizes: array[NUM_BUCKETS, uint]
|
||||||
|
for validator in validators:
|
||||||
|
inc bucketSizes[getBucketNumber(validator.pubkey)]
|
||||||
|
|
||||||
|
var
|
||||||
|
bucketInsertPositions: array[NUM_BUCKETS, uint]
|
||||||
|
accum: uint
|
||||||
|
for i, s in bucketSizes:
|
||||||
|
accum += s
|
||||||
|
bucketInsertPositions[i] = accum
|
||||||
|
doAssert accum == validators.len.uint
|
||||||
|
let res = (ref BucketSortedValidators)(
|
||||||
|
bucketSorted: newSeqUninitialized[IntValidatorIndex](validators.len),
|
||||||
|
bucketUpperBounds: bucketInsertPositions)
|
||||||
|
|
||||||
|
for i, validator in validators:
|
||||||
|
let insertPos =
|
||||||
|
addr bucketInsertPositions[getBucketNumber(validator.pubkey)]
|
||||||
|
dec insertPos[]
|
||||||
|
res.bucketSorted[insertPos[]] = i.IntValidatorIndex
|
||||||
|
|
||||||
|
doAssert bucketInsertPositions[0] == 0
|
||||||
|
for i in 1 ..< NUM_BUCKETS:
|
||||||
|
doAssert res.bucketUpperBounds[i - 1] == bucketInsertPositions[i]
|
||||||
|
|
||||||
|
res
|
||||||
|
|
||||||
|
func add*(
|
||||||
|
bucketSortedValidators: var BucketSortedValidators,
|
||||||
|
validatorIndex: ValidatorIndex) =
|
||||||
|
bucketSortedValidators.extraItems.add validatorIndex
|
||||||
|
|
||||||
|
func findValidatorIndex*(
|
||||||
|
validators: openArray[Validator], bsv: BucketSortedValidators,
|
||||||
|
pubkey: ValidatorPubKey): Opt[ValidatorIndex] =
|
||||||
|
for validatorIndex in bsv.extraItems:
|
||||||
|
if validators[validatorIndex.distinctBase].pubkey == pubkey:
|
||||||
|
return Opt.some validatorIndex
|
||||||
|
let
|
||||||
|
bucketNumber = getBucketNumber(pubkey)
|
||||||
|
lowerBounds =
|
||||||
|
if bucketNumber == 0:
|
||||||
|
0'u
|
||||||
|
else:
|
||||||
|
bsv.bucketUpperBounds[bucketNumber - 1]
|
||||||
|
|
||||||
|
for i in lowerBounds ..< bsv.bucketUpperBounds[bucketNumber]:
|
||||||
|
if validators[bsv.bucketSorted[i]].pubkey == pubkey:
|
||||||
|
return Opt.some bsv.bucketSorted[i].ValidatorIndex
|
||||||
|
Opt.none ValidatorIndex
|
|
@ -443,8 +443,8 @@ proc getExecutionPayload(
|
||||||
feeRecipient = $feeRecipient
|
feeRecipient = $feeRecipient
|
||||||
|
|
||||||
node.elManager.getPayload(
|
node.elManager.getPayload(
|
||||||
PayloadType, beaconHead.blck.bid.root, executionHead, latestSafe,
|
PayloadType, beaconHead.blck.bid.root, executionHead, latestSafe,
|
||||||
latestFinalized, timestamp, random, feeRecipient, withdrawals)
|
latestFinalized, timestamp, random, feeRecipient, withdrawals)
|
||||||
|
|
||||||
# BlockRewards has issues resolving somehow otherwise
|
# BlockRewards has issues resolving somehow otherwise
|
||||||
import ".."/spec/state_transition_block
|
import ".."/spec/state_transition_block
|
||||||
|
@ -1966,8 +1966,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (ra
|
||||||
|
|
||||||
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
|
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#broadcast-aggregate
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#broadcast-aggregate
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#broadcast-sync-committee-contribution
|
||||||
# Wait 2 / 3 of the slot time to allow messages to propagate, then collect
|
# Wait 2 / 3 of the slot time to allow messages to propagate, then collect
|
||||||
# the result in aggregates
|
# the result in aggregates
|
||||||
static:
|
static:
|
||||||
|
|
|
@ -209,7 +209,7 @@ template withContext*(db: SlashingProtectionDB, body: untyped): untyped =
|
||||||
index: ValidatorIndex,
|
index: ValidatorIndex,
|
||||||
validator: ValidatorPubKey,
|
validator: ValidatorPubKey,
|
||||||
source, target: Epoch,
|
source, target: Epoch,
|
||||||
attestation_signing_root: Eth2Digest): Result[void, BadVote] =
|
attestation_signing_root: Eth2Digest): Result[void, BadVote] {.redefine.} =
|
||||||
registerAttestationInContextV2(Opt.some(index), validator, source, target, attestation_signing_root)
|
registerAttestationInContextV2(Opt.some(index), validator, source, target, attestation_signing_root)
|
||||||
block:
|
block:
|
||||||
body
|
body
|
||||||
|
|
|
@ -36,7 +36,7 @@ export results
|
||||||
# - https://notes.ethereum.org/@djrtwo/Bkn3zpwxB#Validator-responsibilities
|
# - https://notes.ethereum.org/@djrtwo/Bkn3zpwxB#Validator-responsibilities
|
||||||
#
|
#
|
||||||
# Phase 0 spec - Honest Validator - how to avoid slashing
|
# Phase 0 spec - Honest Validator - how to avoid slashing
|
||||||
# - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#how-to-avoid-slashing
|
# - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#how-to-avoid-slashing
|
||||||
#
|
#
|
||||||
# In-depth reading on slashing conditions
|
# In-depth reading on slashing conditions
|
||||||
#
|
#
|
||||||
|
|
|
@ -776,7 +776,7 @@ proc getAggregateAndProofSignature*(v: AttachedValidator,
|
||||||
fork, genesis_validators_root, aggregate_and_proof)
|
fork, genesis_validators_root, aggregate_and_proof)
|
||||||
await v.signData(request)
|
await v.signData(request)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#prepare-sync-committee-message
|
||||||
proc getSyncCommitteeMessage*(v: AttachedValidator,
|
proc getSyncCommitteeMessage*(v: AttachedValidator,
|
||||||
fork: Fork,
|
fork: Fork,
|
||||||
genesis_validators_root: Eth2Digest,
|
genesis_validators_root: Eth2Digest,
|
||||||
|
@ -807,7 +807,7 @@ proc getSyncCommitteeMessage*(v: AttachedValidator,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#aggregation-selection
|
||||||
proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
|
proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
|
||||||
genesis_validators_root: Eth2Digest,
|
genesis_validators_root: Eth2Digest,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
|
@ -827,7 +827,7 @@ proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
|
||||||
)
|
)
|
||||||
await v.signData(request)
|
await v.signData(request)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#broadcast-sync-committee-contribution
|
||||||
proc getContributionAndProofSignature*(v: AttachedValidator, fork: Fork,
|
proc getContributionAndProofSignature*(v: AttachedValidator, fork: Fork,
|
||||||
genesis_validators_root: Eth2Digest,
|
genesis_validators_root: Eth2Digest,
|
||||||
contribution_and_proof: ContributionAndProof
|
contribution_and_proof: ContributionAndProof
|
||||||
|
|
|
@ -18,7 +18,7 @@ const
|
||||||
"Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH"
|
"Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH"
|
||||||
|
|
||||||
versionMajor* = 24
|
versionMajor* = 24
|
||||||
versionMinor* = 7
|
versionMinor* = 8
|
||||||
versionBuild* = 0
|
versionBuild* = 0
|
||||||
|
|
||||||
versionBlob* = "stateofus" # Single word - ends up in the default graffiti
|
versionBlob* = "stateofus" # Single word - ends up in the default graffiti
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
* * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
* * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
* at your option. This file may not be copied, modified, or distributed except according to those terms.
|
* at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
*/
|
*/
|
||||||
library 'status-jenkins-lib@v1.8.14'
|
library 'status-jenkins-lib@v1.9.2'
|
||||||
|
|
||||||
pipeline {
|
pipeline {
|
||||||
/* This way we run the same Jenkinsfile on different platforms. */
|
/* This way we run the same Jenkinsfile on different platforms. */
|
||||||
|
|
|
@ -1,85 +0,0 @@
|
||||||
#!/usr/bin/env groovy
|
|
||||||
/* beacon_chain
|
|
||||||
* Copyright (c) 2019-2024 Status Research & Development GmbH
|
|
||||||
* Licensed and distributed under either of
|
|
||||||
* * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
* * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
* at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
*/
|
|
||||||
library 'status-jenkins-lib@nix/flake-build'
|
|
||||||
|
|
||||||
pipeline {
|
|
||||||
/* This way we run the same Jenkinsfile on different platforms. */
|
|
||||||
agent { label params.AGENT_LABEL }
|
|
||||||
|
|
||||||
parameters {
|
|
||||||
string(
|
|
||||||
name: 'AGENT_LABEL',
|
|
||||||
description: 'Label for targetted CI slave host: linux/macos',
|
|
||||||
defaultValue: params.AGENT_LABEL ?: getAgentLabel(),
|
|
||||||
)
|
|
||||||
choice(
|
|
||||||
name: 'VERBOSITY',
|
|
||||||
description: 'Value for the V make flag to increase log verbosity',
|
|
||||||
choices: [0, 1, 2]
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
options {
|
|
||||||
timestamps()
|
|
||||||
ansiColor('xterm')
|
|
||||||
/* This also includes wait time in the queue. */
|
|
||||||
timeout(time: 1, unit: 'HOURS')
|
|
||||||
/* Limit builds retained. */
|
|
||||||
buildDiscarder(logRotator(
|
|
||||||
numToKeepStr: '5',
|
|
||||||
daysToKeepStr: '30',
|
|
||||||
))
|
|
||||||
/* Abort old builds for non-main branches. */
|
|
||||||
disableConcurrentBuilds(
|
|
||||||
abortPrevious: !isMainBranch()
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
stages {
|
|
||||||
stage('Beacon Node') {
|
|
||||||
steps { script {
|
|
||||||
nix.flake('beacon_node')
|
|
||||||
} }
|
|
||||||
}
|
|
||||||
|
|
||||||
stage('Version check') {
|
|
||||||
steps { script {
|
|
||||||
sh 'result/bin/nimbus_beacon_node --version'
|
|
||||||
} }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
post {
|
|
||||||
always {
|
|
||||||
cleanWs(
|
|
||||||
disableDeferredWipeout: true,
|
|
||||||
deleteDirs: true
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
def isMainBranch() {
|
|
||||||
return ['stable', 'testing', 'unstable'].contains(env.BRANCH_NAME)
|
|
||||||
}
|
|
||||||
|
|
||||||
/* This allows us to use one Jenkinsfile and run
|
|
||||||
* jobs on different platforms based on job name. */
|
|
||||||
def getAgentLabel() {
|
|
||||||
if (params.AGENT_LABEL) { return params.AGENT_LABEL }
|
|
||||||
/* We extract the name of the job from currentThread because
|
|
||||||
* before an agent is picket env is not available. */
|
|
||||||
def tokens = Thread.currentThread().getName().split('/')
|
|
||||||
def labels = []
|
|
||||||
/* Check if the job path contains any of the valid labels. */
|
|
||||||
['linux', 'macos', 'x86_64', 'aarch64', 'arm64'].each {
|
|
||||||
if (tokens.contains(it)) { labels.add(it) }
|
|
||||||
}
|
|
||||||
return labels.join(' && ')
|
|
||||||
}
|
|
|
@ -0,0 +1 @@
|
||||||
|
nix.Jenkinsfile
|
|
@ -0,0 +1,85 @@
|
||||||
|
#!/usr/bin/env groovy
|
||||||
|
/* beacon_chain
|
||||||
|
* Copyright (c) 2019-2024 Status Research & Development GmbH
|
||||||
|
* Licensed and distributed under either of
|
||||||
|
* * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
* * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
* at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
*/
|
||||||
|
library 'status-jenkins-lib@v1.9.2'
|
||||||
|
|
||||||
|
pipeline {
|
||||||
|
/* This way we run the same Jenkinsfile on different platforms. */
|
||||||
|
agent { label params.AGENT_LABEL }
|
||||||
|
|
||||||
|
parameters {
|
||||||
|
string(
|
||||||
|
name: 'AGENT_LABEL',
|
||||||
|
description: 'Label for targetted CI slave host: linux/macos',
|
||||||
|
defaultValue: params.AGENT_LABEL ?: getAgentLabel(),
|
||||||
|
)
|
||||||
|
choice(
|
||||||
|
name: 'VERBOSITY',
|
||||||
|
description: 'Value for the V make flag to increase log verbosity',
|
||||||
|
choices: [0, 1, 2]
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
options {
|
||||||
|
timestamps()
|
||||||
|
ansiColor('xterm')
|
||||||
|
/* This also includes wait time in the queue. */
|
||||||
|
timeout(time: 1, unit: 'HOURS')
|
||||||
|
/* Limit builds retained. */
|
||||||
|
buildDiscarder(logRotator(
|
||||||
|
numToKeepStr: '5',
|
||||||
|
daysToKeepStr: '30',
|
||||||
|
))
|
||||||
|
/* Abort old builds for non-main branches. */
|
||||||
|
disableConcurrentBuilds(
|
||||||
|
abortPrevious: !isMainBranch()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
stages {
|
||||||
|
stage('Beacon Node') {
|
||||||
|
steps { script {
|
||||||
|
nix.flake('beacon_node')
|
||||||
|
} }
|
||||||
|
}
|
||||||
|
|
||||||
|
stage('Version check') {
|
||||||
|
steps { script {
|
||||||
|
sh 'result/bin/nimbus_beacon_node --version'
|
||||||
|
} }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
post {
|
||||||
|
always {
|
||||||
|
cleanWs(
|
||||||
|
disableDeferredWipeout: true,
|
||||||
|
deleteDirs: true
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def isMainBranch() {
|
||||||
|
return ['stable', 'testing', 'unstable'].contains(env.BRANCH_NAME)
|
||||||
|
}
|
||||||
|
|
||||||
|
/* This allows us to use one Jenkinsfile and run
|
||||||
|
* jobs on different platforms based on job name. */
|
||||||
|
def getAgentLabel() {
|
||||||
|
if (params.AGENT_LABEL) { return params.AGENT_LABEL }
|
||||||
|
/* We extract the name of the job from currentThread because
|
||||||
|
* before an agent is picket env is not available. */
|
||||||
|
def tokens = Thread.currentThread().getName().split('/')
|
||||||
|
def labels = []
|
||||||
|
/* Check if the job path contains any of the valid labels. */
|
||||||
|
['linux', 'macos', 'x86_64', 'aarch64', 'arm64'].each {
|
||||||
|
if (tokens.contains(it)) { labels.add(it) }
|
||||||
|
}
|
||||||
|
return labels.join(' && ')
|
||||||
|
}
|
|
@ -120,6 +120,11 @@ elif defined(macosx) and defined(arm64):
|
||||||
# Apple's Clang can't handle "-march=native" on M1: https://github.com/status-im/nimbus-eth2/issues/2758
|
# Apple's Clang can't handle "-march=native" on M1: https://github.com/status-im/nimbus-eth2/issues/2758
|
||||||
switch("passC", "-mcpu=apple-m1")
|
switch("passC", "-mcpu=apple-m1")
|
||||||
switch("passL", "-mcpu=apple-m1")
|
switch("passL", "-mcpu=apple-m1")
|
||||||
|
elif defined(riscv64):
|
||||||
|
# riscv64 needs specification of ISA with extensions. 'gc' is widely supported
|
||||||
|
# and seems to be the minimum extensions needed to build.
|
||||||
|
switch("passC", "-march=rv64gc")
|
||||||
|
switch("passL", "-march=rv64gc")
|
||||||
else:
|
else:
|
||||||
switch("passC", "-march=native")
|
switch("passC", "-march=native")
|
||||||
switch("passL", "-march=native")
|
switch("passL", "-march=native")
|
||||||
|
|
|
@ -6,7 +6,7 @@ This is a WIP document to explain the attestation flows.
|
||||||
|
|
||||||
It is important to distinguish attestation `validation` from attestation `verification`.
|
It is important to distinguish attestation `validation` from attestation `verification`.
|
||||||
- Attestation `validation` is defined in the P2P specs. Validated attestations can be forwarded on GossipSub.
|
- Attestation `validation` is defined in the P2P specs. Validated attestations can be forwarded on GossipSub.
|
||||||
- Aggregated: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
|
- Aggregated: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
|
||||||
- Unaggregated: https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
|
- Unaggregated: https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
|
||||||
- Attestation `verification` is defined in the consensus specs. Verified attestations can affect fork choice and may be included in a block.
|
- Attestation `verification` is defined in the consensus specs. Verified attestations can affect fork choice and may be included in a block.
|
||||||
- https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations
|
- https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations
|
||||||
|
|
|
@ -9,7 +9,7 @@ Important distinction:
|
||||||
https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_block.
|
https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_block.
|
||||||
A validated block can be forwarded on gossipsub.
|
A validated block can be forwarded on gossipsub.
|
||||||
- and we distinguish `verification` which is defined in consensus specs:
|
- and we distinguish `verification` which is defined in consensus specs:
|
||||||
https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#block-processing
|
https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#block-processing
|
||||||
A block needs to be verified to enter fork choice, the DAG and the BeaconChainDB
|
A block needs to be verified to enter fork choice, the DAG and the BeaconChainDB
|
||||||
|
|
||||||
In particular in terms of costly checks validating a block only requires checking:
|
In particular in terms of costly checks validating a block only requires checking:
|
||||||
|
|
|
@ -104,7 +104,7 @@ The following sections explain how to do this for certain EL clients.
|
||||||
## Running the light client
|
## Running the light client
|
||||||
|
|
||||||
The light client starts syncing from a trusted block.
|
The light client starts syncing from a trusted block.
|
||||||
This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client.
|
This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client.
|
||||||
|
|
||||||
### 1. Obtaining a trusted block root
|
### 1. Obtaining a trusted block root
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
{
|
||||||
|
"opRetro": {
|
||||||
|
"projectId": "0xe346264e87202b47f1057eb0b0fcaa0ea7f83e14507ca4585a91a5d94e0e92c0"
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
# Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except
|
||||||
|
# according to those terms.
|
||||||
|
|
||||||
|
participants_matrix:
|
||||||
|
el:
|
||||||
|
- el_type: geth
|
||||||
|
- el_type: nethermind
|
||||||
|
- el_type: erigon
|
||||||
|
cl:
|
||||||
|
- cl_type: nimbus
|
||||||
|
cl_image: <image-placeholder>
|
||||||
|
- cl_type: lighthouse
|
||||||
|
- cl_type: prysm
|
||||||
|
additional_services:
|
||||||
|
- tx_spammer
|
||||||
|
- assertoor
|
||||||
|
- beacon_metrics_gazer
|
||||||
|
mev_type: null
|
||||||
|
assertoor_params:
|
||||||
|
image: "ethpandaops/assertoor:master"
|
||||||
|
run_stability_check: true
|
||||||
|
run_block_proposal_check: true
|
||||||
|
run_transaction_test: true
|
||||||
|
run_blob_transaction_test: true
|
||||||
|
run_opcodes_transaction_test: true
|
|
@ -406,6 +406,9 @@ func collectFromAttestations(
|
||||||
rewardsAndPenalties[index].inclusion_delay =
|
rewardsAndPenalties[index].inclusion_delay =
|
||||||
some(inclusionDelay.uint64)
|
some(inclusionDelay.uint64)
|
||||||
|
|
||||||
|
from ".."/beacon_chain/validator_bucket_sort import
|
||||||
|
findValidatorIndex, sortValidatorBuckets
|
||||||
|
|
||||||
proc collectFromDeposits(
|
proc collectFromDeposits(
|
||||||
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
||||||
forkedState: ForkedHashedBeaconState,
|
forkedState: ForkedHashedBeaconState,
|
||||||
|
@ -414,9 +417,12 @@ proc collectFromDeposits(
|
||||||
cfg: RuntimeConfig) =
|
cfg: RuntimeConfig) =
|
||||||
withStateAndBlck(forkedState, forkedBlock):
|
withStateAndBlck(forkedState, forkedBlock):
|
||||||
for deposit in forkyBlck.message.body.deposits:
|
for deposit in forkyBlck.message.body.deposits:
|
||||||
let pubkey = deposit.data.pubkey
|
let
|
||||||
let amount = deposit.data.amount
|
pubkey = deposit.data.pubkey
|
||||||
var index = findValidatorIndex(forkyState.data, pubkey)
|
amount = deposit.data.amount
|
||||||
|
var index = findValidatorIndex(
|
||||||
|
forkyState.data.validators.asSeq, sortValidatorBuckets(
|
||||||
|
forkyState.data.validators.asSeq)[], pubkey)
|
||||||
if index.isNone:
|
if index.isNone:
|
||||||
if pubkey in pubkeyToIndex:
|
if pubkey in pubkeyToIndex:
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -24,6 +24,7 @@ import
|
||||||
from std/os import changeFileExt, fileExists
|
from std/os import changeFileExt, fileExists
|
||||||
from std/sequtils import mapIt, toSeq
|
from std/sequtils import mapIt, toSeq
|
||||||
from std/times import toUnix
|
from std/times import toUnix
|
||||||
|
from ../beacon_chain/el/engine_api_conversions import asEth2Digest
|
||||||
from ../beacon_chain/spec/beaconstate import initialize_beacon_state_from_eth1
|
from ../beacon_chain/spec/beaconstate import initialize_beacon_state_from_eth1
|
||||||
from ../tests/mocking/mock_genesis import mockEth1BlockHash
|
from ../tests/mocking/mock_genesis import mockEth1BlockHash
|
||||||
|
|
||||||
|
|
|
@ -4012,7 +4012,7 @@
|
||||||
"response": {
|
"response": {
|
||||||
"status": {"operator": "equals", "value": "200"},
|
"status": {"operator": "equals", "value": "200"},
|
||||||
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
|
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
|
||||||
"body": [{"operator": "jstructcmps", "start": ["data"], "value": {"MAX_COMMITTEES_PER_SLOT":"","TARGET_COMMITTEE_SIZE":"","MAX_VALIDATORS_PER_COMMITTEE":"","SHUFFLE_ROUND_COUNT":"","HYSTERESIS_QUOTIENT":"","HYSTERESIS_DOWNWARD_MULTIPLIER":"","HYSTERESIS_UPWARD_MULTIPLIER":"","MIN_DEPOSIT_AMOUNT":"","MAX_EFFECTIVE_BALANCE":"","EFFECTIVE_BALANCE_INCREMENT":"","MIN_ATTESTATION_INCLUSION_DELAY":"","SLOTS_PER_EPOCH":"","MIN_SEED_LOOKAHEAD":"","MAX_SEED_LOOKAHEAD":"","EPOCHS_PER_ETH1_VOTING_PERIOD":"","SLOTS_PER_HISTORICAL_ROOT":"","MIN_EPOCHS_TO_INACTIVITY_PENALTY":"","EPOCHS_PER_HISTORICAL_VECTOR":"","EPOCHS_PER_SLASHINGS_VECTOR":"","HISTORICAL_ROOTS_LIMIT":"","VALIDATOR_REGISTRY_LIMIT":"","BASE_REWARD_FACTOR":"","WHISTLEBLOWER_REWARD_QUOTIENT":"","PROPOSER_REWARD_QUOTIENT":"","INACTIVITY_PENALTY_QUOTIENT":"","MIN_SLASHING_PENALTY_QUOTIENT":"","PROPORTIONAL_SLASHING_MULTIPLIER":"","MAX_PROPOSER_SLASHINGS":"","MAX_ATTESTER_SLASHINGS":"","MAX_ATTESTATIONS":"","MAX_DEPOSITS":"","MAX_VOLUNTARY_EXITS":"","INACTIVITY_PENALTY_QUOTIENT_ALTAIR":"","MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":"","PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":"","SYNC_COMMITTEE_SIZE":"","EPOCHS_PER_SYNC_COMMITTEE_PERIOD":"","MIN_SYNC_COMMITTEE_PARTICIPANTS":"","UPDATE_TIMEOUT":"","INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":"","MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":"","PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":"","MAX_BYTES_PER_TRANSACTION":"","MAX_TRANSACTIONS_PER_PAYLOAD":"","BYTES_PER_LOGS_BLOOM":"","MAX_EXTRA_DATA_BYTES":"","MAX_BLS_TO_EXECUTION_CHANGES":"","MAX_WITHDRAWALS_PER_PAYLOAD":"","MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":"","PRESET_BASE":"","CONFIG_NAME":"","TERMINAL_TOTAL_DIFFICULTY":"","TERMINAL_BLOCK_HASH":"","TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":"","MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":"","MIN_GENESIS_TIME":"","GENESIS_FORK_VERSION":"","GENESIS_DELAY":"","ALTAIR_FORK_VERSION":"","ALTAIR_FORK_EPOCH":"","BELLATRIX_FORK_VERSION":"","BELLATRIX_FORK_EPOCH":"","CAPELLA_FORK_VERSION":"","CAPELLA_FORK_EPOCH":"","DENEB_FORK_VERSION":"","DENEB_FORK_EPOCH":"","SECONDS_PER_SLOT":"","SECONDS_PER_ETH1_BLOCK":"","MIN_VALIDATOR_WITHDRAWABILITY_DELAY":"","FIELD_ELEMENTS_PER_BLOB":"","MAX_BLOB_COMMITMENTS_PER_BLOCK":"","MAX_BLOBS_PER_BLOCK":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":"","SHARD_COMMITTEE_PERIOD":"","ETH1_FOLLOW_DISTANCE":"","INACTIVITY_SCORE_BIAS":"","INACTIVITY_SCORE_RECOVERY_RATE":"","EJECTION_BALANCE":"","MIN_PER_EPOCH_CHURN_LIMIT":"","CHURN_LIMIT_QUOTIENT":"","MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":"","PROPOSER_SCORE_BOOST":"","REORG_HEAD_WEIGHT_THRESHOLD":"","REORG_PARENT_WEIGHT_THRESHOLD":"","REORG_MAX_EPOCHS_SINCE_FINALIZATION":"","DEPOSIT_CHAIN_ID":"","DEPOSIT_NETWORK_ID":"","DEPOSIT_CONTRACT_ADDRESS":"","GOSSIP_MAX_SIZE":"","MAX_REQUEST_BLOCKS":"","EPOCHS_PER_SUBNET_SUBSCRIPTION":"","MIN_EPOCHS_FOR_BLOCK_REQUESTS":"","MAX_CHUNK_SIZE":"","TTFB_TIMEOUT":"","RESP_TIMEOUT":"","ATTESTATION_PROPAGATION_SLOT_RANGE":"","MAXIMUM_GOSSIP_CLOCK_DISPARITY":"","MESSAGE_DOMAIN_INVALID_SNAPPY":"","MESSAGE_DOMAIN_VALID_SNAPPY":"","SUBNETS_PER_NODE":"","ATTESTATION_SUBNET_COUNT":"","ATTESTATION_SUBNET_EXTRA_BITS":"","ATTESTATION_SUBNET_PREFIX_BITS":"","MAX_REQUEST_BLOCKS_DENEB":"","MAX_REQUEST_BLOB_SIDECARS":"","MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":"","BLOB_SIDECAR_SUBNET_COUNT":"","BLS_WITHDRAWAL_PREFIX":"","ETH1_ADDRESS_WITHDRAWAL_PREFIX":"","DOMAIN_BEACON_PROPOSER":"","DOMAIN_BEACON_ATTESTER":"","DOMAIN_RANDAO":"","DOMAIN_DEPOSIT":"","DOMAIN_VOLUNTARY_EXIT":"","DOMAIN_SELECTION_PROOF":"","DOMAIN_AGGREGATE_AND_PROOF":"","TIMELY_SOURCE_FLAG_INDEX":"","TIMELY_TARGET_FLAG_INDEX":"","TIMELY_HEAD_FLAG_INDEX":"","TIMELY_SOURCE_WEIGHT":"","TIMELY_TARGET_WEIGHT":"","TIMELY_HEAD_WEIGHT":"","SYNC_REWARD_WEIGHT":"","PROPOSER_WEIGHT":"","WEIGHT_DENOMINATOR":"","DOMAIN_SYNC_COMMITTEE":"","DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":"","DOMAIN_CONTRIBUTION_AND_PROOF":"","DOMAIN_BLS_TO_EXECUTION_CHANGE":"","TARGET_AGGREGATORS_PER_COMMITTEE":"","TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":"","SYNC_COMMITTEE_SUBNET_COUNT":""}}]
|
"body": [{"operator": "jstructcmps", "start": ["data"], "value": {"MAX_COMMITTEES_PER_SLOT":"","TARGET_COMMITTEE_SIZE":"","MAX_VALIDATORS_PER_COMMITTEE":"","SHUFFLE_ROUND_COUNT":"","HYSTERESIS_QUOTIENT":"","HYSTERESIS_DOWNWARD_MULTIPLIER":"","HYSTERESIS_UPWARD_MULTIPLIER":"","MIN_DEPOSIT_AMOUNT":"","MAX_EFFECTIVE_BALANCE":"","MAX_EFFECTIVE_BALANCE_ELECTRA":"","EFFECTIVE_BALANCE_INCREMENT":"","MIN_ATTESTATION_INCLUSION_DELAY":"","SLOTS_PER_EPOCH":"","MIN_SEED_LOOKAHEAD":"","MAX_SEED_LOOKAHEAD":"","EPOCHS_PER_ETH1_VOTING_PERIOD":"","SLOTS_PER_HISTORICAL_ROOT":"","MIN_EPOCHS_TO_INACTIVITY_PENALTY":"","EPOCHS_PER_HISTORICAL_VECTOR":"","EPOCHS_PER_SLASHINGS_VECTOR":"","HISTORICAL_ROOTS_LIMIT":"","VALIDATOR_REGISTRY_LIMIT":"","BASE_REWARD_FACTOR":"","WHISTLEBLOWER_REWARD_QUOTIENT":"","PROPOSER_REWARD_QUOTIENT":"","INACTIVITY_PENALTY_QUOTIENT":"","MIN_SLASHING_PENALTY_QUOTIENT":"","PROPORTIONAL_SLASHING_MULTIPLIER":"","MAX_PROPOSER_SLASHINGS":"","MAX_ATTESTER_SLASHINGS":"","MAX_ATTESTATIONS":"","MAX_DEPOSITS":"","MAX_VOLUNTARY_EXITS":"","INACTIVITY_PENALTY_QUOTIENT_ALTAIR":"","MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":"","PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":"","SYNC_COMMITTEE_SIZE":"","EPOCHS_PER_SYNC_COMMITTEE_PERIOD":"","MIN_SYNC_COMMITTEE_PARTICIPANTS":"","UPDATE_TIMEOUT":"","INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":"","MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":"","PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":"","MAX_BYTES_PER_TRANSACTION":"","MAX_TRANSACTIONS_PER_PAYLOAD":"","BYTES_PER_LOGS_BLOOM":"","MAX_EXTRA_DATA_BYTES":"","MAX_BLS_TO_EXECUTION_CHANGES":"","MAX_WITHDRAWALS_PER_PAYLOAD":"","MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":"","PRESET_BASE":"","CONFIG_NAME":"","TERMINAL_TOTAL_DIFFICULTY":"","TERMINAL_BLOCK_HASH":"","TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":"","MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":"","MIN_GENESIS_TIME":"","GENESIS_FORK_VERSION":"","GENESIS_DELAY":"","ALTAIR_FORK_VERSION":"","ALTAIR_FORK_EPOCH":"","BELLATRIX_FORK_VERSION":"","BELLATRIX_FORK_EPOCH":"","CAPELLA_FORK_VERSION":"","CAPELLA_FORK_EPOCH":"","DENEB_FORK_VERSION":"","DENEB_FORK_EPOCH":"","ELECTRA_FORK_VERSION":"","ELECTRA_FORK_EPOCH":"","SECONDS_PER_SLOT":"","SECONDS_PER_ETH1_BLOCK":"","MIN_VALIDATOR_WITHDRAWABILITY_DELAY":"","FIELD_ELEMENTS_PER_BLOB":"","MAX_BLOB_COMMITMENTS_PER_BLOCK":"","MAX_BLOBS_PER_BLOCK":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":"","SHARD_COMMITTEE_PERIOD":"","ETH1_FOLLOW_DISTANCE":"","INACTIVITY_SCORE_BIAS":"","INACTIVITY_SCORE_RECOVERY_RATE":"","EJECTION_BALANCE":"","MIN_PER_EPOCH_CHURN_LIMIT":"","CHURN_LIMIT_QUOTIENT":"","MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":"","PROPOSER_SCORE_BOOST":"","REORG_HEAD_WEIGHT_THRESHOLD":"","REORG_PARENT_WEIGHT_THRESHOLD":"","REORG_MAX_EPOCHS_SINCE_FINALIZATION":"","DEPOSIT_CHAIN_ID":"","DEPOSIT_NETWORK_ID":"","DEPOSIT_CONTRACT_ADDRESS":"","GOSSIP_MAX_SIZE":"","MAX_REQUEST_BLOCKS":"","EPOCHS_PER_SUBNET_SUBSCRIPTION":"","MIN_EPOCHS_FOR_BLOCK_REQUESTS":"","MAX_CHUNK_SIZE":"","TTFB_TIMEOUT":"","RESP_TIMEOUT":"","ATTESTATION_PROPAGATION_SLOT_RANGE":"","MAXIMUM_GOSSIP_CLOCK_DISPARITY":"","MESSAGE_DOMAIN_INVALID_SNAPPY":"","MESSAGE_DOMAIN_VALID_SNAPPY":"","SUBNETS_PER_NODE":"","ATTESTATION_SUBNET_COUNT":"","ATTESTATION_SUBNET_EXTRA_BITS":"","ATTESTATION_SUBNET_PREFIX_BITS":"","MAX_REQUEST_BLOCKS_DENEB":"","MAX_REQUEST_BLOB_SIDECARS":"","MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":"","BLOB_SIDECAR_SUBNET_COUNT":"","BLS_WITHDRAWAL_PREFIX":"","ETH1_ADDRESS_WITHDRAWAL_PREFIX":"","DOMAIN_BEACON_PROPOSER":"","DOMAIN_BEACON_ATTESTER":"","DOMAIN_RANDAO":"","DOMAIN_DEPOSIT":"","DOMAIN_VOLUNTARY_EXIT":"","DOMAIN_SELECTION_PROOF":"","DOMAIN_AGGREGATE_AND_PROOF":"","TIMELY_SOURCE_FLAG_INDEX":"","TIMELY_TARGET_FLAG_INDEX":"","TIMELY_HEAD_FLAG_INDEX":"","TIMELY_SOURCE_WEIGHT":"","TIMELY_TARGET_WEIGHT":"","TIMELY_HEAD_WEIGHT":"","SYNC_REWARD_WEIGHT":"","PROPOSER_WEIGHT":"","WEIGHT_DENOMINATOR":"","DOMAIN_SYNC_COMMITTEE":"","DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":"","DOMAIN_CONTRIBUTION_AND_PROOF":"","DOMAIN_BLS_TO_EXECUTION_CHANGE":"","TARGET_AGGREGATORS_PER_COMMITTEE":"","TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":"","SYNC_COMMITTEE_SUBNET_COUNT":""}}]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|
|
@ -141,14 +141,14 @@ func nfuzz_block_header(input: openArray[byte], xoutput: ptr byte,
|
||||||
decodeAndProcess(BlockHeaderInput):
|
decodeAndProcess(BlockHeaderInput):
|
||||||
process_block_header(data.state, data.beaconBlock.message, flags, cache).isOk
|
process_block_header(data.state, data.beaconBlock.message, flags, cache).isOk
|
||||||
|
|
||||||
from ".."/beacon_chain/bloomfilter import constructBloomFilter
|
from ".."/beacon_chain/validator_bucket_sort import sortValidatorBuckets
|
||||||
|
|
||||||
proc nfuzz_deposit(input: openArray[byte], xoutput: ptr byte,
|
proc nfuzz_deposit(input: openArray[byte], xoutput: ptr byte,
|
||||||
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError].} =
|
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError].} =
|
||||||
decodeAndProcess(DepositInput):
|
decodeAndProcess(DepositInput):
|
||||||
process_deposit(
|
process_deposit(
|
||||||
getRuntimeConfig(some "mainnet"), data.state,
|
getRuntimeConfig(some "mainnet"), data.state,
|
||||||
constructBloomFilter(data.state.validators.asSeq)[], data.deposit,
|
sortValidatorBuckets(data.state.validators.asSeq)[], data.deposit,
|
||||||
flags).isOk
|
flags).isOk
|
||||||
|
|
||||||
proc nfuzz_proposer_slashing(input: openArray[byte], xoutput: ptr byte,
|
proc nfuzz_proposer_slashing(input: openArray[byte], xoutput: ptr byte,
|
||||||
|
|
|
@ -8,5 +8,5 @@ in pkgs.fetchFromGitHub {
|
||||||
repo = "checksums";
|
repo = "checksums";
|
||||||
rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile;
|
rev = tools.findKeyValue "^ +ChecksumsStableCommit = \"([a-f0-9]+)\"$" sourceFile;
|
||||||
# WARNING: Requires manual updates when Nim compiler version changes.
|
# WARNING: Requires manual updates when Nim compiler version changes.
|
||||||
hash = "sha256-AIiMBqLcGJCTkINHfJ2dN3ogitU7Za9Z9Sv9zjKeOQk=";
|
hash = "sha256-RB2IXs2xcfYHhV9d7l1mtHW51mtsrqrYRapSoTikvHw=";
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@ let
|
||||||
in {
|
in {
|
||||||
findKeyValue = regex: sourceFile:
|
findKeyValue = regex: sourceFile:
|
||||||
let
|
let
|
||||||
linesFrom = sourceFile: splitString "\n" (fileContents sourceFile);
|
linesFrom = file: splitString "\n" (fileContents file);
|
||||||
matching = regex: lines: map (line: match regex line) lines;
|
matching = regex: lines: map (line: match regex line) lines;
|
||||||
extractMatch = matches: last (flatten (remove null matches));
|
extractMatch = matches: last (flatten (remove null matches));
|
||||||
in
|
in
|
||||||
|
|
|
@ -0,0 +1,252 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -euo pipefail
|
||||||
|
# Copyright (c) 2024 Status Research & Development GmbH
|
||||||
|
# Licensed under either of
|
||||||
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||||
|
# http://opensource.org/licenses/MIT)
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except
|
||||||
|
# according to those terms.
|
||||||
|
|
||||||
|
# ------------------------------------------------
|
||||||
|
# Inputs on how to run checks
|
||||||
|
# ------------------------------------------------
|
||||||
|
echo
|
||||||
|
printf "Do you want to run the checks in terminal or visit the assertoor URL? (terminal/url) "
|
||||||
|
read reply
|
||||||
|
|
||||||
|
echo
|
||||||
|
printf "Build new changes (yes/no)? "
|
||||||
|
read use_previous_image
|
||||||
|
|
||||||
|
# ------------------------------------------------
|
||||||
|
# Installation Checks
|
||||||
|
# ------------------------------------------------
|
||||||
|
|
||||||
|
# Checking for docker installation
|
||||||
|
echo "Checking docker installation"
|
||||||
|
if command -v docker &> /dev/null; then
|
||||||
|
echo "Docker installation found"
|
||||||
|
else
|
||||||
|
echo "Docker installation not found. Please install docker."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Checking kurtosis installation"
|
||||||
|
if command -v kurtosis &> /dev/null; then
|
||||||
|
echo "Kurtosis installation found"
|
||||||
|
else
|
||||||
|
echo "Kurtosis installation not found. Installing kurtosis"
|
||||||
|
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
|
||||||
|
sudo apt-get update
|
||||||
|
sudo apt-get install -y kurtosis
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Install jq if not installed already
|
||||||
|
if [ "$(which jq)" != "" ];
|
||||||
|
then
|
||||||
|
echo "jq is already installed"
|
||||||
|
else
|
||||||
|
echo "jq is not installed. Installing jq"
|
||||||
|
sudo apt-get install -y jq
|
||||||
|
fi
|
||||||
|
|
||||||
|
new_cl_image="localtestnet"
|
||||||
|
|
||||||
|
# ------------------------------------------------
|
||||||
|
# Build the Docker Image
|
||||||
|
# ------------------------------------------------
|
||||||
|
if [[ "$use_previous_image" == "no" ]]; then
|
||||||
|
echo "Using the previously built Docker image"
|
||||||
|
echo
|
||||||
|
echo -n "Please enter the docker image name (default: localtestnet) "
|
||||||
|
read -r cl_image
|
||||||
|
if [[ "$cl_image" == "" ]]; then
|
||||||
|
new_cl_image="localtestnet"
|
||||||
|
else
|
||||||
|
new_cl_image=$cl_image
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Starting the Docker Build!"
|
||||||
|
# Build the docker Image
|
||||||
|
sudo docker build . -t localtestnet
|
||||||
|
# The new el_image value
|
||||||
|
new_cl_image="localtestnet"
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------
|
||||||
|
# Run the Kurtosis Tests
|
||||||
|
# ------------------------------------------------
|
||||||
|
|
||||||
|
# Use sed to replace the el_image value in the file
|
||||||
|
cat kurtosis-network-params.yml | envsubst > assertoor.yaml
|
||||||
|
sed -i "s/cl_image: .*/cl_image: $new_cl_image/" assertoor.yaml
|
||||||
|
|
||||||
|
sudo kurtosis run \
|
||||||
|
--enclave nimbus-localtestnet \
|
||||||
|
github.com/ethpandaops/ethereum-package \
|
||||||
|
--args-file assertoor.yaml
|
||||||
|
|
||||||
|
enclave_dump=$(kurtosis enclave inspect nimbus-localtestnet)
|
||||||
|
assertoor_url=$(echo "$enclave_dump" | grep assertoor | grep http | sed 's/.*\(http:\/\/[0-9.:]\+\).*/\1/')
|
||||||
|
|
||||||
|
# ------------------------------------------------
|
||||||
|
# Remove Generated File
|
||||||
|
# ------------------------------------------------
|
||||||
|
rm assertoor.yaml
|
||||||
|
|
||||||
|
# Check the user's input and respond accordingly
|
||||||
|
if [[ "$reply" == "url" ]]; then
|
||||||
|
echo "You chose to visit the assertoor URL."
|
||||||
|
echo "Assertoor Checks Please Visit -> ${assertoor_url}"
|
||||||
|
echo "Please visit the URL to check the status of the tests"
|
||||||
|
echo "The kurtosis enclave needs to be cleared, after the tests are done. Please run the following command ----- sudo kurtosis enclave rm -f nimbus-localtestnet"
|
||||||
|
else
|
||||||
|
echo "Running the checks over terminal"
|
||||||
|
|
||||||
|
|
||||||
|
# ------------------------------------------------
|
||||||
|
# Check for Test Status
|
||||||
|
# ------------------------------------------------
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
GRAY='\033[0;37m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
RED='\033[0;31m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
# print assertor logs
|
||||||
|
assertoor_container=$(docker container list | grep assertoor | sed 's/^\([^ ]\+\) .*$/\1/')
|
||||||
|
docker logs -f "$assertoor_container" &
|
||||||
|
|
||||||
|
# helper to fetch task status for specific test id
|
||||||
|
get_tasks_status() {
|
||||||
|
tasks=$(curl -s "${assertoor_url}"/api/v1/test_run/"$1" | jq -c ".data.tasks[] | {index, parent_index, name, title, status, result}")
|
||||||
|
declare -A task_graph_map
|
||||||
|
task_graph_map[0]=""
|
||||||
|
|
||||||
|
while read task; do
|
||||||
|
task_id=$(echo "$task" | jq -r ".index")
|
||||||
|
task_parent=$(echo "$task" | jq -r ".parent_index")
|
||||||
|
task_name=$(echo "$task" | jq -r ".name")
|
||||||
|
task_title=$(echo "$task" | jq -r ".title")
|
||||||
|
task_status=$(echo "$task" | jq -r ".status")
|
||||||
|
task_result=$(echo "$task" | jq -r ".result")
|
||||||
|
|
||||||
|
task_graph="${task_graph_map[$task_parent]}"
|
||||||
|
task_graph_map[$task_id]="$task_graph |"
|
||||||
|
if [ ! -z "$task_graph" ]; then
|
||||||
|
task_graph="${task_graph}- "
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$task_status" == "pending" ]; then
|
||||||
|
task_status="${GRAY}pending ${NC}"
|
||||||
|
elif [ "$task_status" == "running" ]; then
|
||||||
|
task_status="${YELLOW}running ${NC}"
|
||||||
|
elif [ "$task_status" == "complete" ]; then
|
||||||
|
task_status="${GREEN}complete${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$task_result" == "none" ]; then
|
||||||
|
task_result="${GRAY}none ${NC}"
|
||||||
|
elif [ "$task_result" == "success" ]; then
|
||||||
|
task_result="${GREEN}success${NC}"
|
||||||
|
elif [ "$task_result" == "failure" ]; then
|
||||||
|
task_result="${RED}failure${NC}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo -e " $(printf '%-4s' "$task_id")\t$task_status\t$task_result\t$(printf '%-50s' "$task_graph$task_name") \t$task_title"
|
||||||
|
done <<< $(echo "$tasks")
|
||||||
|
}
|
||||||
|
|
||||||
|
# poll & check test status
|
||||||
|
final_test_result=""
|
||||||
|
failed_test_id=""
|
||||||
|
while true
|
||||||
|
do
|
||||||
|
pending_tests=0
|
||||||
|
failed_tests=0
|
||||||
|
total_tests=0
|
||||||
|
running_test=""
|
||||||
|
|
||||||
|
status_lines=()
|
||||||
|
task_lines=""
|
||||||
|
status_lines+=("$(date +'%Y-%m-%d %H:%M:%S') Test Status:")
|
||||||
|
|
||||||
|
tests=$(curl -s "${assertoor_url}"/api/v1/test_runs | jq -c ".data[] | {run_id, test_id, name, status}")
|
||||||
|
while read -r test; do
|
||||||
|
if [ -z "$test" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
run_id=$(echo "$test" | jq -r ".run_id")
|
||||||
|
test_id=$(echo "$test" | jq -r ".test_id")
|
||||||
|
test_name=$(echo "$test" | jq -r ".name")
|
||||||
|
test_status=$(echo "$test" | jq -r ".status")
|
||||||
|
|
||||||
|
if [ "$test_status" == "pending" ]; then
|
||||||
|
pending_tests=$(expr $pending_tests + 1)
|
||||||
|
status_name="${GRAY}pending${NC}"
|
||||||
|
elif [ "$test_status" == "running" ]; then
|
||||||
|
pending_tests=$(expr $pending_tests + 1)
|
||||||
|
running_test="$run_id"
|
||||||
|
status_name="${YELLOW}running${NC}"
|
||||||
|
|
||||||
|
elif [ "$test_status" == "success" ]; then
|
||||||
|
status_name="${GREEN}success${NC}"
|
||||||
|
elif [ "$test_status" == "failure" ]; then
|
||||||
|
failed_tests=$(expr $failed_tests + 1)
|
||||||
|
failed_test_id="$run_id"
|
||||||
|
status_name="${RED}failure${NC}"
|
||||||
|
else
|
||||||
|
status_name="$test_status"
|
||||||
|
fi
|
||||||
|
status_lines+=(" $(printf '%-3s' "$test_id") $status_name \t$test_name")
|
||||||
|
total_tests=$(expr $total_tests + 1)
|
||||||
|
done <<< $(echo "$tests")
|
||||||
|
|
||||||
|
for status_line in "${status_lines[@]}"
|
||||||
|
do
|
||||||
|
echo -e "$status_line"
|
||||||
|
done
|
||||||
|
|
||||||
|
if [ -n "$running_test" ]; then
|
||||||
|
task_lines=$(get_tasks_status "$running_test")
|
||||||
|
echo "Active Test Task Status:"
|
||||||
|
echo "$task_lines"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$failed_tests" -gt 0 ]; then
|
||||||
|
final_test_result="failure"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
if [ "$total_tests" -gt 0 ] && [ "$pending_tests" -le 0 ]; then
|
||||||
|
final_test_result="success"
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep 60
|
||||||
|
done
|
||||||
|
|
||||||
|
# save test results & status to github output
|
||||||
|
echo "test_result=$(echo "$final_test_result")"
|
||||||
|
echo "test_status"
|
||||||
|
for status_line in "${status_lines[@]}"
|
||||||
|
do
|
||||||
|
echo -e "$status_line"
|
||||||
|
done
|
||||||
|
echo
|
||||||
|
|
||||||
|
if [ -n "$failed_test_id" ]; then
|
||||||
|
echo "failed_test_status"
|
||||||
|
get_tasks_status "$failed_test_id"
|
||||||
|
echo ""
|
||||||
|
else
|
||||||
|
echo "failed_test_status="
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ------------------------------------------------
|
||||||
|
# Cleanup
|
||||||
|
# ------------------------------------------------
|
||||||
|
sudo kurtosis enclave rm -f nimbus-localtestnet
|
||||||
|
fi
|
|
@ -21,11 +21,11 @@ import # Unit test
|
||||||
./test_block_dag,
|
./test_block_dag,
|
||||||
./test_block_processor,
|
./test_block_processor,
|
||||||
./test_block_quarantine,
|
./test_block_quarantine,
|
||||||
./test_bloom_filter,
|
|
||||||
./test_conf,
|
./test_conf,
|
||||||
./test_datatypes,
|
./test_datatypes,
|
||||||
./test_deposit_snapshots,
|
./test_deposit_snapshots,
|
||||||
./test_discovery,
|
./test_discovery,
|
||||||
|
./test_engine_api_conversions,
|
||||||
./test_engine_authentication,
|
./test_engine_authentication,
|
||||||
./test_el_manager,
|
./test_el_manager,
|
||||||
./test_el_conf,
|
./test_el_conf,
|
||||||
|
@ -51,6 +51,7 @@ import # Unit test
|
||||||
./test_sync_committee_pool,
|
./test_sync_committee_pool,
|
||||||
./test_sync_manager,
|
./test_sync_manager,
|
||||||
./test_toblindedblock,
|
./test_toblindedblock,
|
||||||
|
./test_validator_bucket_sort,
|
||||||
./test_validator_change_pool,
|
./test_validator_change_pool,
|
||||||
./test_validator_pool,
|
./test_validator_pool,
|
||||||
./test_zero_signature,
|
./test_zero_signature,
|
||||||
|
|
|
@ -23,7 +23,7 @@ import
|
||||||
# Test utilities
|
# Test utilities
|
||||||
../../testutil, ../../testblockutil
|
../../testutil, ../../testblockutil
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py#L27-L44
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py#L27-L44
|
||||||
proc compute_aggregate_sync_committee_signature(
|
proc compute_aggregate_sync_committee_signature(
|
||||||
cfg: RuntimeConfig,
|
cfg: RuntimeConfig,
|
||||||
forked: ForkedHashedBeaconState,
|
forked: ForkedHashedBeaconState,
|
||||||
|
@ -133,7 +133,7 @@ proc block_for_next_slot(
|
||||||
addTestBlock(
|
addTestBlock(
|
||||||
forked, cache, attestations = attestations, cfg = cfg)
|
forked, cache, attestations = attestations, cfg = cfg)
|
||||||
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
|
||||||
func initialize_light_client_store(
|
func initialize_light_client_store(
|
||||||
state: auto, storeDataFork: static LightClientDataFork): auto =
|
state: auto, storeDataFork: static LightClientDataFork): auto =
|
||||||
storeDataFork.LightClientStore(
|
storeDataFork.LightClientStore(
|
||||||
|
@ -286,8 +286,8 @@ proc runTest(storeDataFork: static LightClientDataFork) =
|
||||||
template next_sync_committee(): auto = state.next_sync_committee
|
template next_sync_committee(): auto = state.next_sync_committee
|
||||||
let
|
let
|
||||||
next_sync_committee_branch = normalize_merkle_branch(
|
next_sync_committee_branch = normalize_merkle_branch(
|
||||||
state.build_proof(altair.NEXT_SYNC_COMMITTEE_GINDEX).get,
|
state.build_proof(NEXT_SYNC_COMMITTEE_GINDEX).get,
|
||||||
storeDataFork.NEXT_SYNC_COMMITTEE_GINDEX)
|
storeDataFork.next_sync_committee_gindex)
|
||||||
|
|
||||||
# Finality is unchanged
|
# Finality is unchanged
|
||||||
finality_header = default(storeDataFork.LightClientHeader)
|
finality_header = default(storeDataFork.LightClientHeader)
|
||||||
|
@ -359,8 +359,8 @@ proc runTest(storeDataFork: static LightClientDataFork) =
|
||||||
state.finalized_checkpoint.root
|
state.finalized_checkpoint.root
|
||||||
let
|
let
|
||||||
finality_branch = normalize_merkle_branch(
|
finality_branch = normalize_merkle_branch(
|
||||||
state.build_proof(altair.FINALIZED_ROOT_GINDEX).get,
|
state.build_proof(FINALIZED_ROOT_GINDEX).get,
|
||||||
storeDataFork.FINALIZED_ROOT_GINDEX)
|
storeDataFork.finalized_root_gindex)
|
||||||
|
|
||||||
update = storeDataFork.LightClientUpdate(
|
update = storeDataFork.LightClientUpdate(
|
||||||
attested_header: attested_header,
|
attested_header: attested_header,
|
||||||
|
|
|
@ -114,7 +114,8 @@ suite baseDescription & "Block Header " & preset():
|
||||||
runTest[altair.BeaconBlock, typeof applyBlockHeader](
|
runTest[altair.BeaconBlock, typeof applyBlockHeader](
|
||||||
OpBlockHeaderDir, suiteName, "Block Header", "block", applyBlockHeader, path)
|
OpBlockHeaderDir, suiteName, "Block Header", "block", applyBlockHeader, path)
|
||||||
|
|
||||||
from ".."/".."/".."/beacon_chain/bloomfilter import constructBloomFilter
|
from ".."/".."/".."/beacon_chain/validator_bucket_sort import
|
||||||
|
sortValidatorBuckets
|
||||||
|
|
||||||
suite baseDescription & "Deposit " & preset():
|
suite baseDescription & "Deposit " & preset():
|
||||||
proc applyDeposit(
|
proc applyDeposit(
|
||||||
|
@ -122,7 +123,7 @@ suite baseDescription & "Deposit " & preset():
|
||||||
Result[void, cstring] =
|
Result[void, cstring] =
|
||||||
process_deposit(
|
process_deposit(
|
||||||
defaultRuntimeConfig, preState,
|
defaultRuntimeConfig, preState,
|
||||||
constructBloomFilter(preState.validators.asSeq)[], deposit, {})
|
sortValidatorBuckets(preState.validators.asSeq)[], deposit, {})
|
||||||
|
|
||||||
for path in walkTests(OpDepositsDir):
|
for path in walkTests(OpDepositsDir):
|
||||||
runTest[Deposit, typeof applyDeposit](
|
runTest[Deposit, typeof applyDeposit](
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue