Merge commit 'f48ce6c0c73537009853e0c7e0782820ee69713e' into dev/etan/lc-wasm4

This commit is contained in:
Etan Kissling 2024-01-25 22:05:04 +01:00
commit eb05bbce20
No known key found for this signature in database
GPG Key ID: B21DA824C5A3D03D
142 changed files with 3407 additions and 1749 deletions

View File

@ -215,7 +215,7 @@ jobs:
if: ${{ !cancelled() }} && github.event_name == 'pull_request'
run: |
excluded_files="config.yaml"
excluded_extensions="ans|json|md|png|service|ssz|txt"
excluded_extensions="ans|json|json\\.template|md|png|service|ssz|txt"
current_year=$(date +"%Y")
outdated_files=()

View File

@ -358,7 +358,6 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
```diff
+ Aggregate and proof signatures OK
+ Attestation signatures OK
+ Blob sidecar signatures OK
+ Deposit signatures OK
+ Slot signatures OK
+ Sync committee message signatures OK
@ -366,7 +365,7 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
+ Sync committee signed contribution and proof signatures OK
+ Voluntary exit signatures OK
```
OK: 9/9 Fail: 0/9 Skip: 0/9
OK: 8/8 Fail: 0/8 Skip: 0/8
## Network metadata
```diff
+ goerli OK
@ -596,13 +595,15 @@ OK: 24/24 Fail: 0/24 Skip: 0/24
OK: 1/1 Fail: 0/1 Skip: 0/1
## Validator Client test suite
```diff
+ /eth/v1/validator/beacon_committee_selections serialization/deserialization test OK
+ /eth/v1/validator/sync_committee_selections serialization/deserialization test OK
+ bestSuccess() API timeout test OK
+ firstSuccessParallel() API timeout test OK
+ getAttestationDataScore() test vectors OK
+ getLiveness() response deserialization test OK
+ normalizeUri() test vectors OK
```
OK: 5/5 Fail: 0/5 Skip: 0/5
OK: 7/7 Fail: 0/7 Skip: 0/7
## Validator change pool testing suite
```diff
+ addValidatorChangeMessage/getAttesterSlashingMessage OK
@ -715,4 +716,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 9/9 Fail: 0/9 Skip: 0/9
---TOTAL---
OK: 404/409 Fail: 0/409 Skip: 5/409
OK: 405/410 Fail: 0/410 Skip: 5/410

View File

@ -2258,7 +2258,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing SignedBLSToExecutionChange OK
+ Testing SignedBeaconBlock OK
+ Testing SignedBeaconBlockHeader OK
+ Testing SignedBlobSidecar OK
+ Testing SignedContributionAndProof OK
+ Testing SignedVoluntaryExit OK
+ Testing SigningData OK
@ -2271,7 +2270,7 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing VoluntaryExit OK
+ Testing Withdrawal OK
```
OK: 49/49 Fail: 0/49 Skip: 0/49
OK: 48/48 Fail: 0/48 Skip: 0/48
## EF - Deneb - Sanity - Blocks [Preset: mainnet]
```diff
+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_all_zeroed_sig [Preset: mainnet] OK
@ -2411,6 +2410,11 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
+ Light client - Single merkle proof - mainnet/deneb/light_client/single_merkle_proof/Beacon OK
```
OK: 14/14 Fail: 0/14 Skip: 0/14
## EF - Merkle proof [Preset: mainnet]
```diff
+ Merkle proof - Single merkle proof - mainnet/deneb/merkle_proof/single_merkle_proof/Beacon OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## EF - Phase 0 - Epoch Processing - Effective balance updates [Preset: mainnet]
```diff
+ Effective balance updates - effective_balance_hysteresis [Preset: mainnet] OK
@ -2825,6 +2829,8 @@ OK: 40/40 Fail: 0/40 Skip: 0/40
+ ForkChoice - mainnet/altair/fork_choice/get_head/pyspec_tests/proposer_boost_correct_head OK
+ ForkChoice - mainnet/altair/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_we OK
+ ForkChoice - mainnet/altair/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attesta OK
ForkChoice - mainnet/altair/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip
ForkChoice - mainnet/altair/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_roo Skip
+ ForkChoice - mainnet/altair/fork_choice/on_block/pyspec_tests/basic OK
+ ForkChoice - mainnet/altair/fork_choice/on_block/pyspec_tests/on_block_bad_parent_root OK
ForkChoice - mainnet/altair/fork_choice/on_block/pyspec_tests/on_block_future_block Skip
@ -2842,6 +2848,8 @@ OK: 40/40 Fail: 0/40 Skip: 0/40
+ ForkChoice - mainnet/bellatrix/fork_choice/get_head/pyspec_tests/proposer_boost_correct_he OK
+ ForkChoice - mainnet/bellatrix/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier OK
+ ForkChoice - mainnet/bellatrix/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_atte OK
ForkChoice - mainnet/bellatrix/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_ro Skip
ForkChoice - mainnet/bellatrix/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_ Skip
+ ForkChoice - mainnet/bellatrix/fork_choice/on_block/pyspec_tests/basic OK
+ ForkChoice - mainnet/bellatrix/fork_choice/on_block/pyspec_tests/on_block_bad_parent_root OK
ForkChoice - mainnet/bellatrix/fork_choice/on_block/pyspec_tests/on_block_future_block Skip
@ -2852,6 +2860,7 @@ OK: 40/40 Fail: 0/40 Skip: 0/40
ForkChoice - mainnet/bellatrix/fork_choice/on_merge_block/pyspec_tests/block_lookup_failed Skip
ForkChoice - mainnet/bellatrix/fork_choice/on_merge_block/pyspec_tests/too_early_for_merge Skip
ForkChoice - mainnet/bellatrix/fork_choice/on_merge_block/pyspec_tests/too_late_for_merge Skip
ForkChoice - mainnet/bellatrix/fork_choice/should_override_forkchoice_update/pyspec_tests/ Skip
+ ForkChoice - mainnet/capella/fork_choice/ex_ante/pyspec_tests/ex_ante_attestations_is_grea OK
+ ForkChoice - mainnet/capella/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_boost_ OK
+ ForkChoice - mainnet/capella/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest OK
@ -2863,12 +2872,15 @@ OK: 40/40 Fail: 0/40 Skip: 0/40
+ ForkChoice - mainnet/capella/fork_choice/get_head/pyspec_tests/proposer_boost_correct_head OK
+ ForkChoice - mainnet/capella/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_w OK
+ ForkChoice - mainnet/capella/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attest OK
ForkChoice - mainnet/capella/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip
ForkChoice - mainnet/capella/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_ro Skip
+ ForkChoice - mainnet/capella/fork_choice/on_block/pyspec_tests/basic OK
+ ForkChoice - mainnet/capella/fork_choice/on_block/pyspec_tests/on_block_bad_parent_root OK
ForkChoice - mainnet/capella/fork_choice/on_block/pyspec_tests/on_block_future_block Skip
+ ForkChoice - mainnet/capella/fork_choice/on_block/pyspec_tests/proposer_boost OK
+ ForkChoice - mainnet/capella/fork_choice/on_block/pyspec_tests/proposer_boost_is_first_blo OK
+ ForkChoice - mainnet/capella/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_sl OK
ForkChoice - mainnet/capella/fork_choice/should_override_forkchoice_update/pyspec_tests/sh Skip
+ ForkChoice - mainnet/deneb/fork_choice/ex_ante/pyspec_tests/ex_ante_attestations_is_greate OK
+ ForkChoice - mainnet/deneb/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_boost_no OK
+ ForkChoice - mainnet/deneb/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest_a OK
@ -2880,6 +2892,8 @@ OK: 40/40 Fail: 0/40 Skip: 0/40
+ ForkChoice - mainnet/deneb/fork_choice/get_head/pyspec_tests/proposer_boost_correct_head OK
+ ForkChoice - mainnet/deneb/fork_choice/get_head/pyspec_tests/shorter_chain_but_heavier_wei OK
+ ForkChoice - mainnet/deneb/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attestat OK
ForkChoice - mainnet/deneb/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip
ForkChoice - mainnet/deneb/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_root Skip
+ ForkChoice - mainnet/deneb/fork_choice/on_block/pyspec_tests/basic OK
+ ForkChoice - mainnet/deneb/fork_choice/on_block/pyspec_tests/invalid_data_unavailable OK
+ ForkChoice - mainnet/deneb/fork_choice/on_block/pyspec_tests/invalid_incorrect_proof OK
@ -2891,8 +2905,9 @@ OK: 40/40 Fail: 0/40 Skip: 0/40
+ ForkChoice - mainnet/deneb/fork_choice/on_block/pyspec_tests/proposer_boost_is_first_block OK
+ ForkChoice - mainnet/deneb/fork_choice/on_block/pyspec_tests/proposer_boost_root_same_slot OK
+ ForkChoice - mainnet/deneb/fork_choice/on_block/pyspec_tests/simple_blob_data OK
ForkChoice - mainnet/deneb/fork_choice/should_override_forkchoice_update/pyspec_tests/shou Skip
```
OK: 69/77 Fail: 0/77 Skip: 8/77
OK: 69/88 Fail: 0/88 Skip: 19/88
## Sync
```diff
+ Sync - mainnet/bellatrix/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
@ -2902,4 +2917,4 @@ OK: 69/77 Fail: 0/77 Skip: 8/77
OK: 3/3 Fail: 0/3 Skip: 0/3
---TOTAL---
OK: 2336/2344 Fail: 0/2344 Skip: 8/2344
OK: 2336/2355 Fail: 0/2355 Skip: 19/2355

View File

@ -2355,7 +2355,6 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing SignedBLSToExecutionChange OK
+ Testing SignedBeaconBlock OK
+ Testing SignedBeaconBlockHeader OK
+ Testing SignedBlobSidecar OK
+ Testing SignedContributionAndProof OK
+ Testing SignedVoluntaryExit OK
+ Testing SigningData OK
@ -2368,7 +2367,7 @@ OK: 34/34 Fail: 0/34 Skip: 0/34
+ Testing VoluntaryExit OK
+ Testing Withdrawal OK
```
OK: 49/49 Fail: 0/49 Skip: 0/49
OK: 48/48 Fail: 0/48 Skip: 0/48
## EF - Deneb - Sanity - Blocks [Preset: minimal]
```diff
+ [Invalid] EF - Deneb - Sanity - Blocks - invalid_all_zeroed_sig [Preset: minimal] OK
@ -2551,6 +2550,11 @@ OK: 20/20 Fail: 0/20 Skip: 0/20
+ Light client - Update ranking - minimal/deneb/light_client/update_ranking/pyspec_tests/upd OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
## EF - Merkle proof [Preset: minimal]
```diff
+ Merkle proof - Single merkle proof - minimal/deneb/merkle_proof/single_merkle_proof/Beacon OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## EF - Phase 0 - Epoch Processing - Effective balance updates [Preset: minimal]
```diff
+ Effective balance updates - effective_balance_hysteresis [Preset: minimal] OK
@ -2977,6 +2981,8 @@ OK: 45/45 Fail: 0/45 Skip: 0/45
+ ForkChoice - minimal/altair/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attesta OK
+ ForkChoice - minimal/altair/fork_choice/get_head/pyspec_tests/voting_source_beyond_two_epo OK
+ ForkChoice - minimal/altair/fork_choice/get_head/pyspec_tests/voting_source_within_two_epo OK
ForkChoice - minimal/altair/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip
ForkChoice - minimal/altair/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_roo Skip
+ ForkChoice - minimal/altair/fork_choice/on_block/pyspec_tests/basic OK
+ ForkChoice - minimal/altair/fork_choice/on_block/pyspec_tests/incompatible_justification_u OK
+ ForkChoice - minimal/altair/fork_choice/on_block/pyspec_tests/incompatible_justification_u OK
@ -3023,6 +3029,8 @@ OK: 45/45 Fail: 0/45 Skip: 0/45
+ ForkChoice - minimal/bellatrix/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_atte OK
+ ForkChoice - minimal/bellatrix/fork_choice/get_head/pyspec_tests/voting_source_beyond_two_ OK
+ ForkChoice - minimal/bellatrix/fork_choice/get_head/pyspec_tests/voting_source_within_two_ OK
ForkChoice - minimal/bellatrix/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_ro Skip
ForkChoice - minimal/bellatrix/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_ Skip
+ ForkChoice - minimal/bellatrix/fork_choice/on_block/pyspec_tests/basic OK
+ ForkChoice - minimal/bellatrix/fork_choice/on_block/pyspec_tests/incompatible_justificatio OK
+ ForkChoice - minimal/bellatrix/fork_choice/on_block/pyspec_tests/incompatible_justificatio OK
@ -3058,6 +3066,8 @@ OK: 45/45 Fail: 0/45 Skip: 0/45
+ ForkChoice - minimal/bellatrix/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delay OK
+ ForkChoice - minimal/bellatrix/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delay OK
+ ForkChoice - minimal/bellatrix/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_witho OK
ForkChoice - minimal/bellatrix/fork_choice/should_override_forkchoice_update/pyspec_tests/ Skip
ForkChoice - minimal/bellatrix/fork_choice/should_override_forkchoice_update/pyspec_tests/ Skip
+ ForkChoice - minimal/bellatrix/fork_choice/withholding/pyspec_tests/withholding_attack OK
+ ForkChoice - minimal/bellatrix/fork_choice/withholding/pyspec_tests/withholding_attack_unv OK
+ ForkChoice - minimal/capella/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest OK
@ -3073,6 +3083,8 @@ OK: 45/45 Fail: 0/45 Skip: 0/45
+ ForkChoice - minimal/capella/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attest OK
+ ForkChoice - minimal/capella/fork_choice/get_head/pyspec_tests/voting_source_beyond_two_ep OK
+ ForkChoice - minimal/capella/fork_choice/get_head/pyspec_tests/voting_source_within_two_ep OK
ForkChoice - minimal/capella/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip
ForkChoice - minimal/capella/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_ro Skip
+ ForkChoice - minimal/capella/fork_choice/on_block/pyspec_tests/basic OK
+ ForkChoice - minimal/capella/fork_choice/on_block/pyspec_tests/incompatible_justification_ OK
+ ForkChoice - minimal/capella/fork_choice/on_block/pyspec_tests/incompatible_justification_ OK
@ -3104,6 +3116,8 @@ OK: 45/45 Fail: 0/45 Skip: 0/45
+ ForkChoice - minimal/capella/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delayed OK
+ ForkChoice - minimal/capella/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delayed OK
+ ForkChoice - minimal/capella/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_without OK
ForkChoice - minimal/capella/fork_choice/should_override_forkchoice_update/pyspec_tests/sh Skip
ForkChoice - minimal/capella/fork_choice/should_override_forkchoice_update/pyspec_tests/sh Skip
+ ForkChoice - minimal/capella/fork_choice/withholding/pyspec_tests/withholding_attack OK
+ ForkChoice - minimal/capella/fork_choice/withholding/pyspec_tests/withholding_attack_unvia OK
+ ForkChoice - minimal/deneb/fork_choice/ex_ante/pyspec_tests/ex_ante_sandwich_with_honest_a OK
@ -3119,6 +3133,8 @@ OK: 45/45 Fail: 0/45 Skip: 0/45
+ ForkChoice - minimal/deneb/fork_choice/get_head/pyspec_tests/split_tie_breaker_no_attestat OK
+ ForkChoice - minimal/deneb/fork_choice/get_head/pyspec_tests/voting_source_beyond_two_epoc OK
+ ForkChoice - minimal/deneb/fork_choice/get_head/pyspec_tests/voting_source_within_two_epoc OK
ForkChoice - minimal/deneb/fork_choice/get_proposer_head/pyspec_tests/basic_is_head_root Skip
ForkChoice - minimal/deneb/fork_choice/get_proposer_head/pyspec_tests/basic_is_parent_root Skip
+ ForkChoice - minimal/deneb/fork_choice/on_block/pyspec_tests/basic OK
+ ForkChoice - minimal/deneb/fork_choice/on_block/pyspec_tests/incompatible_justification_up OK
+ ForkChoice - minimal/deneb/fork_choice/on_block/pyspec_tests/incompatible_justification_up OK
@ -3155,10 +3171,12 @@ OK: 45/45 Fail: 0/45 Skip: 0/45
+ ForkChoice - minimal/deneb/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delayed_j OK
+ ForkChoice - minimal/deneb/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_delayed_j OK
+ ForkChoice - minimal/deneb/fork_choice/reorg/pyspec_tests/simple_attempted_reorg_without_e OK
ForkChoice - minimal/deneb/fork_choice/should_override_forkchoice_update/pyspec_tests/shou Skip
ForkChoice - minimal/deneb/fork_choice/should_override_forkchoice_update/pyspec_tests/shou Skip
+ ForkChoice - minimal/deneb/fork_choice/withholding/pyspec_tests/withholding_attack OK
+ ForkChoice - minimal/deneb/fork_choice/withholding/pyspec_tests/withholding_attack_unviabl OK
```
OK: 185/193 Fail: 0/193 Skip: 8/193
OK: 185/207 Fail: 0/207 Skip: 22/207
## Sync
```diff
+ Sync - minimal/bellatrix/sync/optimistic/pyspec_tests/from_syncing_to_invalid OK
@ -3168,4 +3186,4 @@ OK: 185/193 Fail: 0/193 Skip: 8/193
OK: 3/3 Fail: 0/3 Skip: 0/3
---TOTAL---
OK: 2578/2586 Fail: 0/2586 Skip: 8/2586
OK: 2578/2600 Fail: 0/2600 Skip: 22/2600

View File

@ -118,9 +118,9 @@ ifneq ($(OS), Windows_NT)
PLATFORM_SPECIFIC_TARGETS += gnosis-build
endif
# We don't need the `vendor/holesky/public-keys/all.txt` file but fetching it
# We don't need these `vendor/holesky` files but fetching them
# may trigger 'This repository is over its data quota' from GitHub
GIT_SUBMODULE_CONFIG := -c lfs.fetchexclude=/public-keys/all.txt
GIT_SUBMODULE_CONFIG := -c lfs.fetchexclude=/public-keys/all.txt,/custom_config_data/genesis.ssz
ifeq ($(NIM_PARAMS),)
# "variables.mk" was not included, so we update the submodules.
@ -841,10 +841,10 @@ book:
"$(MAKE)" -C docs book
auditors-book:
[[ "$$(mdbook --version)" = "mdbook v0.4.28" ]] || { echo "'mdbook v0.4.28' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook-toc --version)" == "mdbook-toc 0.8.0" ]] || { echo "'mdbook-toc 0.8.0' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook-open-on-gh --version)" == "mdbook-open-on-gh 2.3.3" ]] || { echo "'mdbook-open-on-gh 2.3.3' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook-admonish --version)" == "mdbook-admonish 1.7.0" ]] || { echo "'mdbook-open-on-gh 1.7.0' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook --version)" = "mdbook v0.4.35" ]] || { echo "'mdbook v0.4.28' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook-toc --version)" == "mdbook-toc 0.14.1" ]] || { echo "'mdbook-toc 0.14.1' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook-open-on-gh --version)" == "mdbook-open-on-gh 2.4.1" ]] || { echo "'mdbook-open-on-gh 2.4.1' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
[[ "$$(mdbook-admonish --version)" == "mdbook-admonish 1.13.1" ]] || { echo "'mdbook-open-on-gh 1.13.1' not found in PATH. See 'docs/README.md'. Aborting."; exit 1; }
cd docs/the_auditors_handbook && \
mdbook build

View File

@ -793,7 +793,8 @@ proc putBlock*(
proc putBlobSidecar*(
db: BeaconChainDB,
value: BlobSidecar) =
db.blobs.putSZSSZ(blobkey(value.block_root, value.index), value)
let block_root = hash_tree_root(value.signed_block_header.message)
db.blobs.putSZSSZ(blobkey(block_root, value.index), value)
proc delBlobSidecar*(
db: BeaconChainDB,

View File

@ -16,7 +16,7 @@ from ./spec/datatypes/capella import
from ./spec/datatypes/deneb import ExecutionPayloadHeader
type
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconstate
# Memory-representation-equivalent to a phase0 BeaconState for in-place SSZ
# reading and writing
Phase0BeaconStateNoImmutableValidators* = object
@ -69,7 +69,7 @@ type
current_justified_checkpoint*: Checkpoint
finalized_checkpoint*: Checkpoint
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#beaconstate
# Memory-representation-equivalent to an Altair BeaconState for in-place SSZ
# reading and writing
AltairBeaconStateNoImmutableValidators* = object
@ -127,7 +127,7 @@ type
current_sync_committee*: SyncCommittee # [New in Altair]
next_sync_committee*: SyncCommittee # [New in Altair]
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#beaconstate
# Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ
# reading and writing
BellatrixBeaconStateNoImmutableValidators* = object
@ -186,7 +186,7 @@ type
# Execution
latest_execution_payload_header*: bellatrix.ExecutionPayloadHeader # [New in Bellatrix]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#beaconstate
# with indirect changes via ExecutionPayload
# Memory-representation-equivalent to a Capella BeaconState for in-place SSZ
# reading and writing
@ -258,7 +258,7 @@ type
HashList[HistoricalSummary,
Limit HISTORICAL_ROOTS_LIMIT] # [New in Capella]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#beaconstate
# with indirect changes via ExecutionPayloadHeader
# Memory-representation-equivalent to a Deneb BeaconState for in-place SSZ
# reading and writing

View File

@ -36,7 +36,7 @@ from consensus_object_pools/block_pools_types_light_client
export
uri, nat, enr,
defaultEth2TcpPort, enabledLogLevel, ValidIpAddress,
defaultEth2TcpPort, enabledLogLevel,
defs, parseCmdArg, completeCmdArg, network_metadata,
el_conf, network, BlockHashOrNumber,
confTomlDefs, confTomlNet, confTomlUri,
@ -47,8 +47,8 @@ declareGauge network_name, "network name", ["name"]
const
# TODO: How should we select between IPv4 and IPv6
# Maybe there should be a config option for this.
defaultListenAddress* = (static ValidIpAddress.init("0.0.0.0"))
defaultAdminListenAddress* = (static ValidIpAddress.init("127.0.0.1"))
defaultListenAddress* = (static parseIpAddress("0.0.0.0"))
defaultAdminListenAddress* = (static parseIpAddress("127.0.0.1"))
defaultSigningNodeRequestTimeout* = 60
defaultBeaconNode* = "http://127.0.0.1:" & $defaultEth2RestPort
defaultBeaconNodeUri* = parseUri(defaultBeaconNode)
@ -292,7 +292,7 @@ type
desc: "Listening address for the Ethereum LibP2P and Discovery v5 traffic"
defaultValue: defaultListenAddress
defaultValueDesc: $defaultListenAddressDesc
name: "listen-address" .}: ValidIpAddress
name: "listen-address" .}: IpAddress
tcpPort* {.
desc: "Listening TCP port for Ethereum LibP2P traffic"
@ -339,16 +339,27 @@ type
desc: "Weak subjectivity checkpoint in the format block_root:epoch_number"
name: "weak-subjectivity-checkpoint" .}: Option[Checkpoint]
externalBeaconApiUrl* {.
desc: "External beacon API to use for syncing (on empty database)"
name: "external-beacon-api-url" .}: Option[string]
syncLightClient* {.
desc: "Accelerate execution layer sync using light client"
desc: "Accelerate sync using light client"
defaultValue: true
name: "sync-light-client" .}: bool
trustedBlockRoot* {.
hidden
desc: "Recent trusted finalized block root to initialize light client from"
desc: "Recent trusted finalized block root to sync from external " &
"beacon API (with `--external-beacon-api-url`). " &
"Uses the light client sync protocol to obtain the latest " &
"finalized checkpoint (LC is initialized from trusted block root)"
name: "trusted-block-root" .}: Option[Eth2Digest]
trustedStateRoot* {.
desc: "Recent trusted finalized state root to sync from external " &
"beacon API (with `--external-beacon-api-url`)"
name: "trusted-state-root" .}: Option[Eth2Digest]
finalizedCheckpointState* {.
desc: "SSZ file specifying a recent finalized state"
name: "finalized-checkpoint-state" .}: Option[InputFile]
@ -408,7 +419,7 @@ type
desc: "Listening address of the metrics server"
defaultValue: defaultAdminListenAddress
defaultValueDesc: $defaultAdminListenAddressDesc
name: "metrics-address" .}: ValidIpAddress
name: "metrics-address" .}: IpAddress
metricsPort* {.
desc: "Listening HTTP port of the metrics server"
@ -449,7 +460,7 @@ type
# Deprecated > 1.7.0
hidden
desc: "Deprecated for removal"
name: "rpc-address" .}: Option[ValidIpAddress]
name: "rpc-address" .}: Option[IpAddress]
restEnabled* {.
desc: "Enable the REST server"
@ -466,7 +477,7 @@ type
desc: "Listening address of the REST server"
defaultValue: defaultAdminListenAddress
defaultValueDesc: $defaultAdminListenAddressDesc
name: "rest-address" .}: ValidIpAddress
name: "rest-address" .}: IpAddress
restAllowedOrigin* {.
desc: "Limit the access to the REST API to a particular hostname " &
@ -520,7 +531,7 @@ type
desc: "Listening port for the REST keymanager API"
defaultValue: defaultAdminListenAddress
defaultValueDesc: $defaultAdminListenAddressDesc
name: "keymanager-address" .}: ValidIpAddress
name: "keymanager-address" .}: IpAddress
keymanagerAllowedOrigin* {.
desc: "Limit the access to the Keymanager API to a particular hostname " &
@ -776,7 +787,7 @@ type
of RecordCmd.create:
ipExt* {.
desc: "External IP address"
name: "ip" .}: ValidIpAddress
name: "ip" .}: IpAddress
tcpPortExt* {.
desc: "External TCP port"
@ -973,7 +984,7 @@ type
desc: "Listening port for the REST keymanager API"
defaultValue: defaultAdminListenAddress
defaultValueDesc: $defaultAdminListenAddressDesc
name: "keymanager-address" .}: ValidIpAddress
name: "keymanager-address" .}: IpAddress
keymanagerAllowedOrigin* {.
desc: "Limit the access to the Keymanager API to a particular hostname " &
@ -993,7 +1004,7 @@ type
desc: "Listening address of the metrics server (BETA)"
defaultValue: defaultAdminListenAddress
defaultValueDesc: $defaultAdminListenAddressDesc
name: "metrics-address" .}: ValidIpAddress
name: "metrics-address" .}: IpAddress
metricsPort* {.
desc: "Listening HTTP port of the metrics server (BETA)"
@ -1016,6 +1027,11 @@ type
defaultValue: false
name: "payload-builder" .}: bool
distributedEnabled* {.
desc: "Enable usage of Obol middleware (BETA)"
defaultValue: false
name: "distributed".}: bool
beaconNodes* {.
desc: "URL addresses to one or more beacon node HTTP REST APIs",
defaultValue: @[defaultBeaconNodeUri]
@ -1091,7 +1107,7 @@ type
desc: "Listening address of the REST HTTP server"
defaultValue: defaultAdminListenAddress
defaultValueDesc: $defaultAdminListenAddressDesc
name: "bind-address" .}: ValidIpAddress
name: "bind-address" .}: IpAddress
tlsEnabled* {.
desc: "Use secure TLS communication for REST server"

View File

@ -65,7 +65,7 @@ type LightClientConf* = object
desc: "Listening address for the Ethereum LibP2P and Discovery v5 traffic"
defaultValue: defaultListenAddress
defaultValueDesc: $defaultListenAddressDesc
name: "listen-address" .}: ValidIpAddress
name: "listen-address" .}: IpAddress
tcpPort* {.
desc: "Listening TCP port for Ethereum LibP2P traffic"

View File

@ -4,12 +4,12 @@ This folder holds the various consensus object pools needed for a blockchain cli
Object in those pools have passed the "gossip validation" filter according
to specs:
- blocks: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/p2p-interface.md#beacon_block
- aggregate attestations: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
- blocks: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_block
- aggregate attestations: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
- unaggregated attestation: https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
- voluntary exits: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/p2p-interface.md#voluntary_exit
- Attester slashings: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/p2p-interface.md#attester_slashing
- Proposer slashings: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/p2p-interface.md#proposer_slashing
- voluntary exits: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#voluntary_exit
- Attester slashings: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attester_slashing
- Proposer slashings: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#proposer_slashing
After "gossip validation" the consensus objects can be rebroadcasted as they are optimistically good, however for internal processing further verification is needed.
For blocks, this means verifying state transition and all contained cryptographic signatures (instead of just the proposer signature).

View File

@ -739,7 +739,7 @@ func getAggregatedAttestation*(pool: var AttestationPool,
index: CommitteeIndex): Opt[Attestation] =
## Select the attestation that has the most votes going for it in the given
## slot/index
## https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/validator.md#construct-aggregate
## https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/validator.md#construct-aggregate
let candidateIdx = pool.candidateIdx(slot)
if candidateIdx.isNone:
return Opt.none(Attestation)
@ -769,7 +769,7 @@ proc getBeaconHead*(
finalizedExecutionPayloadHash =
pool.dag.loadExecutionBlockHash(pool.dag.finalizedHead.blck)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/fork_choice/safe-block.md#get_safe_execution_payload_hash
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/fork_choice/safe-block.md#get_safe_execution_payload_hash
safeBlockRoot = pool.forkChoice.get_safe_beacon_block_root()
safeBlock = pool.dag.getBlockRef(safeBlockRoot)
safeExecutionPayloadHash =

View File

@ -7,7 +7,7 @@
{.push raises: [].}
import ../spec/datatypes/deneb
import ../spec/helpers
from std/sequtils import mapIt
from std/strutils import join
@ -37,8 +37,9 @@ func put*(quarantine: var BlobQuarantine, blobSidecar: ref BlobSidecar) =
oldest_blob_key = k
break
quarantine.blobs.del oldest_blob_key
discard quarantine.blobs.hasKeyOrPut((blobSidecar.block_root,
blobSidecar.index), blobSidecar)
let block_root = hash_tree_root(blobSidecar.signed_block_header.message)
discard quarantine.blobs.hasKeyOrPut(
(block_root, blobSidecar.index), blobSidecar)
func blobIndices*(quarantine: BlobQuarantine, digest: Eth2Digest):
seq[BlobIndex] =
@ -48,8 +49,18 @@ func blobIndices*(quarantine: BlobQuarantine, digest: Eth2Digest):
r.add(i)
r
func hasBlob*(quarantine: BlobQuarantine, blobSidecar: BlobSidecar): bool =
quarantine.blobs.hasKey((blobSidecar.block_root, blobSidecar.index))
func hasBlob*(
quarantine: BlobQuarantine,
slot: Slot,
proposer_index: uint64,
index: BlobIndex): bool =
for blob_sidecar in quarantine.blobs.values:
template block_header: untyped = blob_sidecar.signed_block_header.message
if block_header.slot == slot and
block_header.proposer_index == proposer_index and
blob_sidecar.index == index:
return true
false
func popBlobs*(quarantine: var BlobQuarantine, digest: Eth2Digest):
seq[ref BlobSidecar] =

View File

@ -134,7 +134,7 @@ func link*(parent, child: BlockRef) =
func get_ancestor*(blck: BlockRef, slot: Slot,
maxDepth = 100'i64 * 365 * 24 * 60 * 60 div SECONDS_PER_SLOT.int):
BlockRef =
## https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/fork-choice.md#get_ancestor
## https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/fork-choice.md#get_ancestor
## Return the most recent block as of the time at `slot` that not more recent
## than `blck` itself
if isNil(blck): return nil

View File

@ -553,8 +553,6 @@ func init*(
dag.putShufflingRef(tmp)
tmp
attester_dependent_root = withState(state):
forkyState.attester_dependent_root
total_active_balance = withState(state):
get_total_active_balance(forkyState.data, cache)
epochRef = EpochRef(
@ -1119,7 +1117,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# should have `previous_version` set to `current_version` while
# this doesn't happen to be the case in network that go through
# regular hard-fork upgrades. See for example:
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#testing
if stateFork.current_version != configFork.current_version:
error "State from database does not match network, check --network parameter",
tail = dag.tail, headRef, stateFork, configFork
@ -1922,7 +1920,7 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
prunedHeads = hlen - dag.heads.len,
dagPruneDur = Moment.now() - startTick
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/sync/optimistic.md#helpers
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/sync/optimistic.md#helpers
template is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool =
let blck =
if bid.slot <= dag.finalizedHead.slot:
@ -2421,7 +2419,6 @@ proc updateHead*(
if not(isNil(dag.onHeadChanged)):
let
currentEpoch = epoch(newHead.slot)
depRoot = withState(dag.headState): forkyState.proposer_dependent_root
prevDepRoot = withState(dag.headState):
forkyState.attester_dependent_root
@ -2613,7 +2610,7 @@ func aggregateAll*(
# Aggregation spec requires non-empty collection
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
# Consensus specs require at least one attesting index in attestation
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
return err("aggregate: no attesting keys")
let
@ -2733,7 +2730,6 @@ proc rebuildIndex*(dag: ChainDAGRef) =
if state_root.isZero:
# If we can find an era file with this state, use it as an alternative
# starting point - ignore failures for now
var bytes: seq[byte]
if dag.era.getState(
historicalRoots, historicalSummaries, slot, state[]).isOk():
state_root = getStateRoot(state[])

View File

@ -222,7 +222,7 @@ proc initLightClientBootstrapForPeriod(
forkyBlck.toLightClientHeader(lcDataFork))
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
bid.slot, forkyState.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_INDEX).get)
altair.CURRENT_SYNC_COMMITTEE_GINDEX).get)
else: raiseAssert "Unreachable"
res
@ -371,10 +371,10 @@ proc initLightClientUpdateForPeriod(
attested_header: forkyBlck.toLightClientHeader(lcDataFork),
next_sync_committee: forkyState.data.next_sync_committee,
next_sync_committee_branch:
forkyState.data.build_proof(altair.NEXT_SYNC_COMMITTEE_INDEX).get,
forkyState.data.build_proof(altair.NEXT_SYNC_COMMITTEE_GINDEX).get,
finality_branch:
if finalizedBid.slot != FAR_FUTURE_SLOT:
forkyState.data.build_proof(altair.FINALIZED_ROOT_INDEX).get
forkyState.data.build_proof(altair.FINALIZED_ROOT_GINDEX).get
else:
default(FinalityBranch)))
else: raiseAssert "Unreachable"
@ -442,13 +442,13 @@ proc cacheLightClientData(
## block and state.
let cachedData = CachedLightClientData(
current_sync_committee_branch:
state.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_INDEX).get,
state.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_GINDEX).get,
next_sync_committee_branch:
state.data.build_proof(altair.NEXT_SYNC_COMMITTEE_INDEX).get,
state.data.build_proof(altair.NEXT_SYNC_COMMITTEE_GINDEX).get,
finalized_slot:
state.data.finalized_checkpoint.epoch.start_slot,
finality_branch:
state.data.build_proof(altair.FINALIZED_ROOT_INDEX).get)
state.data.build_proof(altair.FINALIZED_ROOT_GINDEX).get)
if dag.lcDataStore.cache.data.hasKeyOrPut(bid, cachedData):
doAssert false, "Redundant `cacheLightClientData` call"
@ -723,11 +723,7 @@ proc initLightClientDataCache*(dag: ChainDAGRef) =
blocks.add bid
# Process blocks (reuses `dag.headState`, but restores it to the current head)
var
tmpState = assignClone(dag.headState)
tmpCache, cache: StateCache
oldCheckpoint: Checkpoint
cpIndex = 0
var cache: StateCache
for i in countdown(blocks.high, blocks.low):
bid = blocks[i]
if not dag.updateExistingState(
@ -960,7 +956,7 @@ proc getLightClientBootstrap(
dag.lcDataStore.db.putHeader(header)
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
slot, forkyState.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_INDEX).get)
altair.CURRENT_SYNC_COMMITTEE_GINDEX).get)
else: raiseAssert "Unreachable"
do: return default(ForkedLightClientBootstrap)

View File

@ -25,7 +25,7 @@ logScope: topics = "spec_cache"
func count_active_validators*(shufflingRef: ShufflingRef): uint64 =
shufflingRef.shuffled_active_validator_indices.lenu64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_committee_count_per_slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_committee_count_per_slot
func get_committee_count_per_slot*(shufflingRef: ShufflingRef): uint64 =
get_committee_count_per_slot(count_active_validators(shufflingRef))
@ -38,7 +38,7 @@ func get_committee_index*(shufflingRef: ShufflingRef, index: uint64):
Result[CommitteeIndex, cstring] =
check_attestation_index(index, get_committee_count_per_slot(shufflingRef))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_beacon_committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_beacon_committee
iterator get_beacon_committee*(
shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex):
(int, ValidatorIndex) =
@ -51,7 +51,7 @@ iterator get_beacon_committee*(
committees_per_slot * SLOTS_PER_EPOCH
): yield (index_in_committee, idx)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_beacon_committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee*(
shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex):
seq[ValidatorIndex] =
@ -64,7 +64,7 @@ func get_beacon_committee*(
committees_per_slot * SLOTS_PER_EPOCH
)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_beacon_committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee_len*(
shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex): uint64 =
## Return the number of members in the beacon committee at ``slot`` for ``index``.
@ -76,7 +76,7 @@ func get_beacon_committee_len*(
committees_per_slot * SLOTS_PER_EPOCH
)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_attesting_indices
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_attesting_indices
iterator get_attesting_indices*(shufflingRef: ShufflingRef,
slot: Slot,
committee_index: CommitteeIndex,
@ -155,7 +155,7 @@ func get_attesting_indices_one*(shufflingRef: ShufflingRef,
res = some(validator_index)
res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_attesting_indices
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_attesting_indices
func get_attesting_indices*(shufflingRef: ShufflingRef,
slot: Slot,
committee_index: CommitteeIndex,
@ -191,7 +191,7 @@ func makeAttestationData*(
epoch: current_epoch,
root: epoch_boundary_block.blck.root))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/validator.md#validator-assignments
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/validator.md#validator-assignments
iterator get_committee_assignments*(
shufflingRef: ShufflingRef, validator_indices: HashSet[ValidatorIndex]):
tuple[committee_index: CommitteeIndex,

View File

@ -228,7 +228,7 @@ proc restValidatorExit(config: BeaconNodeConf) {.async.} =
block:
let s = spec.getOrDefault("DENEB_FORK_EPOCH", $FAR_FUTURE_EPOCH)
Epoch(Base10.decode(uint64, s).get(uint64(FAR_FUTURE_EPOCH)))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/beacon-chain.md#modified-process_voluntary_exit
if currentEpoch >= denebForkEpoch:
let capellaForkVersion =

View File

@ -71,9 +71,6 @@ func unknownRoleMsg(role: string): string =
template raiseError(reader: var TomlReader, msg: string) =
raiseTomlErr(reader.lex, msg)
template raiseError(reader: var JsonReader, msg: string) =
raiseTomlErr(reader.lex, msg)
proc readValue*(reader: var TomlReader, value: var EngineApiRoles)
{.raises: [SerializationError, IOError].} =
let roles = reader.readValue seq[string]

View File

@ -96,7 +96,6 @@ const
type
Eth1BlockNumber* = uint64
Eth1BlockTimestamp* = uint64
Eth1BlockHeader = engine_api.BlockHeader
Eth1Block* = ref object
hash*: Eth2Digest
@ -393,9 +392,6 @@ template trackedRequestWithTimeout[T](connection: ELConnection,
template cfg(m: ELManager): auto =
m.eth1Chain.cfg
template db(m: ELManager): BeaconChainDB =
m.eth1Chain.db
func hasJwtSecret*(m: ELManager): bool =
for c in m.elConnections:
if c.engineUrl.jwtSecret.isSome:
@ -409,12 +405,6 @@ func isSynced*(m: ELManager): bool =
template eth1ChainBlocks*(m: ELManager): Deque[Eth1Block] =
m.eth1Chain.blocks
template finalizedDepositsMerkleizer(m: ELManager): auto =
m.eth1Chain.finalizedDepositsMerkleizer
template headMerkleizer(m: ELManager): auto =
m.eth1Chain.headMerkleizer
template toGaugeValue(x: Quantity): int64 =
toGaugeValue(distinctBase x)
@ -423,7 +413,7 @@ template toGaugeValue(x: Quantity): int64 =
# doAssert SECONDS_PER_ETH1_BLOCK * cfg.ETH1_FOLLOW_DISTANCE < GENESIS_DELAY,
# "Invalid configuration: GENESIS_DELAY is set too low"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/validator.md#get_eth1_data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/validator.md#get_eth1_data
func compute_time_at_slot(genesis_time: uint64, slot: Slot): uint64 =
genesis_time + slot * SECONDS_PER_SLOT
@ -561,10 +551,13 @@ func asConsensusType*(payload: engine_api.GetPayloadV3Response):
# The `mapIt` calls below are necessary only because we use different distinct
# types for KZG commitments and Blobs in the `web3` and the `deneb` spec types.
# Both are defined as `array[N, byte]` under the hood.
kzgs: KzgCommitments payload.blobsBundle.commitments.mapIt(it.bytes),
proofs: payload.blobsBundle.proofs.mapIt(it.bytes),
blobs: Blobs payload.blobsBundle.blobs.mapIt(it.bytes)
)
blobsBundle: BlobsBundle(
commitments: KzgCommitments.init(
payload.blobsBundle.commitments.mapIt(it.bytes)),
proofs: KzgProofs.init(
payload.blobsBundle.proofs.mapIt(it.bytes)),
blobs: Blobs.init(
payload.blobsBundle.blobs.mapIt(it.bytes))))
func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload):
ExecutionPayloadV1 =
@ -885,15 +878,6 @@ template EngineApiResponseType*(T: type capella.ExecutionPayloadForSigning): typ
template EngineApiResponseType*(T: type deneb.ExecutionPayloadForSigning): type =
engine_api.GetPayloadV3Response
template payload(response: engine_api.ExecutionPayloadV1): engine_api.ExecutionPayloadV1 =
response
template payload(response: engine_api.GetPayloadV2Response): engine_api.ExecutionPayloadV1OrV2 =
response.executionPayload
template payload(response: engine_api.GetPayloadV3Response): engine_api.ExecutionPayloadV3 =
response.executionPayload
template toEngineWithdrawals*(withdrawals: seq[capella.Withdrawal]): seq[WithdrawalV1] =
mapIt(withdrawals, toEngineWithdrawal(it))
@ -1396,8 +1380,6 @@ proc exchangeConfigWithSingleEL(m: ELManager, connection: ELConnection) {.async.
# https://eips.ethereum.org/EIPS/eip-155#list-of-chain-ids
expectedChain = case m.eth1Network.get
of mainnet: 1.Quantity
of ropsten: 3.Quantity
of rinkeby: 4.Quantity
of goerli: 5.Quantity
of sepolia: 11155111.Quantity # https://chainid.network/
of holesky: 17000.Quantity
@ -1810,10 +1792,6 @@ func new*(T: type ELConnection,
engineUrl: engineUrl,
depositContractSyncStatus: DepositContractSyncStatus.unknown)
template getOrDefault[T, E](r: Result[T, E]): T =
type TT = T
get(r, default(TT))
proc init*(T: type Eth1Chain,
cfg: RuntimeConfig,
db: BeaconChainDB,
@ -2017,12 +1995,6 @@ proc syncBlockRange(m: ELManager,
blockNumber = lastBlock.number,
depositsProcessed = lastBlock.depositCount
func init(T: type FullBlockId, blk: Eth1BlockHeader|BlockObject): T =
FullBlockId(number: Eth1BlockNumber blk.number, hash: blk.hash)
func isNewLastBlock(m: ELManager, blk: Eth1BlockHeader|BlockObject): bool =
m.latestEth1Block.isNone or blk.number.uint64 > m.latestEth1BlockNumber
func hasConnection*(m: ELManager): bool =
m.elConnections.len > 0
@ -2121,7 +2093,6 @@ proc syncEth1Chain(m: ELManager, connection: ELConnection) {.async.} =
debug "Starting Eth1 syncing", `from` = shortLog(m.eth1Chain.blocks[^1])
var didPollOnce = false
while true:
debug "syncEth1Chain tick"
@ -2182,7 +2153,7 @@ proc startChainSyncingLoop(m: ELManager) {.async.} =
continue
await syncEth1Chain(m, syncedConnectionFut.read)
except CatchableError as err:
except CatchableError:
await sleepAsync(10.seconds)
# A more detailed error is already logged by trackEngineApiRequest
@ -2238,17 +2209,17 @@ proc testWeb3Provider*(web3Url: Uri,
stdout.write "\n"
res
let
chainId = request "Chain ID":
web3.provider.eth_chainId()
discard request "Chain ID":
web3.provider.eth_chainId()
discard request "Sync status":
web3.provider.eth_syncing()
let
latestBlock = request "Latest block":
web3.provider.eth_getBlockByNumber(blockId("latest"), false)
syncStatus = request "Sync status":
web3.provider.eth_syncing()
ns = web3.contractSender(DepositContract, depositContractAddress)
depositRoot = request "Deposit root":
ns.get_deposit_root.call(blockNumber = latestBlock.number.uint64)
discard request "Deposit root":
ns.get_deposit_root.call(blockNumber = latestBlock.number.uint64)

View File

@ -7,7 +7,7 @@
{.push raises: [].}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
# Merkle tree helpers
# ---------------------------------------------------------------

View File

@ -316,7 +316,7 @@ proc getBlock*(
readSszBytes(tmp, result.get(), updateRoot = root.isNone)
if root.isSome():
result.get().root = root.get()
except CatchableError as exc:
except CatchableError:
result.err()
proc getStateSZ*(

View File

@ -377,7 +377,7 @@ proc get_head*(self: var ForkChoice,
self.checkpoints.justified.balances,
self.checkpoints.proposer_boost_root)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/fork_choice/safe-block.md#get_safe_beacon_block_root
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/fork_choice/safe-block.md#get_safe_beacon_block_root
func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest =
# Use most recent justified block as a stopgap
self.checkpoints.justified.checkpoint.root

View File

@ -9,7 +9,7 @@ This folder holds a collection of modules to:
Gossip validation is different from consensus verification in particular for blocks.
- Blocks: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#beacon_block
- Blocks: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_block
- Attestations (aggregated): https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
- Attestations (unaggregated): https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#attestation-subnets
- Voluntary exits: https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#voluntary_exit

View File

@ -804,7 +804,7 @@ proc processBlock(
# - MUST NOT optimistically import the block.
# - MUST NOT apply the block to the fork choice store.
# - MAY queue the block for later processing.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/sync/optimistic.md#execution-engine-errors
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/sync/optimistic.md#execution-engine-errors
await sleepAsync(chronos.seconds(1))
self[].enqueueBlock(
entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized,

View File

@ -245,7 +245,7 @@ proc processSignedBeaconBlock*(
Opt.some(self.blobQuarantine[].popBlobs(signedBlock.root))
else:
if not self.quarantine[].addBlobless(self.dag.finalizedHead.slot,
signedBlock):
signedBlock):
notice "Block quarantine full (blobless)",
blockRoot = shortLog(signedBlock.root),
blck = shortLog(signedBlock.message),
@ -273,30 +273,26 @@ proc processSignedBeaconBlock*(
v
proc processSignedBlobSidecar*(
proc processBlobSidecar*(
self: var Eth2Processor, src: MsgSource,
signedBlobSidecar: deneb.SignedBlobSidecar, idx: BlobIndex): ValidationRes =
blobSidecar: deneb.BlobSidecar, subnet_id: BlobId): ValidationRes =
template block_header: untyped = blobSidecar.signed_block_header.message
let
wallTime = self.getCurrentBeaconTime()
(afterGenesis, wallSlot) = wallTime.toSlot()
logScope:
blob = shortLog(signedBlobSidecar.message)
signature = shortLog(signedBlobSidecar.signature)
blob = shortLog(blobSidecar)
wallSlot
# Potential under/overflows are fine; would just create odd metrics and logs
let delay = wallTime - signedBlobSidecar.message.slot.start_beacon_time
if self.blobQuarantine[].hasBlob(signedBlobSidecar.message):
debug "Blob received, already in quarantine", delay
return ValidationRes.ok
else:
debug "Blob received", delay
let delay = wallTime - block_header.slot.start_beacon_time
debug "Blob received", delay
let v =
self.dag.validateBlobSidecar(self.quarantine, self.blobQuarantine,
signedBlobSidecar, wallTime, idx)
blobSidecar, wallTime, subnet_id)
if v.isErr():
debug "Dropping blob", error = v.error()
@ -304,21 +300,19 @@ proc processSignedBlobSidecar*(
return v
debug "Blob validated, putting in blob quarantine"
self.blobQuarantine[].put(newClone(signedBlobSidecar.message))
self.blobQuarantine[].put(newClone(blobSidecar))
var skippedBlocks = false
if (let o = self.quarantine[].popBlobless(
signedBlobSidecar.message.block_root); o.isSome):
let block_root = hash_tree_root(block_header)
if (let o = self.quarantine[].popBlobless(block_root); o.isSome):
let blobless = o.unsafeGet()
if self.blobQuarantine[].hasBlobs(blobless):
self.blockProcessor[].enqueueBlock(
MsgSource.gossip,
ForkedSignedBeaconBlock.init(blobless),
Opt.some(self.blobQuarantine[].popBlobs(
signedBlobSidecar.message.block_root))
)
Opt.some(self.blobQuarantine[].popBlobs(block_root)))
else:
discard self.quarantine[].addBlobless(self.dag.finalizedHead.slot,
blobless)
@ -658,7 +652,7 @@ proc processSignedContributionAndProof*(
err(v.error())
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
proc processLightClientFinalityUpdate*(
self: var Eth2Processor, src: MsgSource,
finality_update: ForkedLightClientFinalityUpdate
@ -674,7 +668,7 @@ proc processLightClientFinalityUpdate*(
beacon_light_client_finality_update_dropped.inc(1, [$v.error[0]])
v
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#process_light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#process_light_client_optimistic_update
proc processLightClientOptimisticUpdate*(
self: var Eth2Processor, src: MsgSource,
optimistic_update: ForkedLightClientOptimisticUpdate

View File

@ -182,6 +182,20 @@ func check_attestation_subnet(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/p2p-interface.md#verify_blob_sidecar_inclusion_proof
func verify_blob_sidecar_inclusion_proof(
blob_sidecar: deneb.BlobSidecar): Result[void, ValidationError] =
let gindex = kzg_commitment_inclusion_proof_gindex(blob_sidecar.index)
if not is_valid_merkle_branch(
hash_tree_root(blob_sidecar.kzg_commitment),
blob_sidecar.kzg_commitment_inclusion_proof,
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH,
get_subtree_index(gindex),
blob_sidecar.signed_block_header.message.body_root):
return errReject("BlobSidecar: inclusion proof not valid")
ok()
# Gossip Validation
# ----------------------------------------------------------------
@ -276,7 +290,7 @@ template validateBeaconBlockBellatrix(
#
# `is_merge_transition_complete(state)` tests for
# `state.latest_execution_payload_header != ExecutionPayloadHeader()`, while
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#block-processing
# shows that `state.latest_execution_payload_header` being default or not is
# exactly equivalent to whether that block's execution payload is default or
# not, so test cached block information rather than reconstructing a state.
@ -302,85 +316,132 @@ template validateBeaconBlockBellatrix(
# cannot occur here, because Nimbus's optimistic sync waits for either
# `ACCEPTED` or `SYNCING` from the EL to get this far.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id
proc validateBlobSidecar*(
dag: ChainDAGRef, quarantine: ref Quarantine,
blobQuarantine: ref BlobQuarantine,sbs: SignedBlobSidecar,
wallTime: BeaconTime, idx: BlobIndex): Result[void, ValidationError] =
blobQuarantine: ref BlobQuarantine, blob_sidecar: BlobSidecar,
wallTime: BeaconTime, subnet_id: BlobId): Result[void, ValidationError] =
# Some of the checks below have been reordered compared to the spec, to
# perform the cheap checks first - in particular, we want to avoid loading
# an `EpochRef` and checking signatures. This reordering might lead to
# different IGNORE/REJECT results in turn affecting gossip scores.
template block_header: untyped = blob_sidecar.signed_block_header.message
# [REJECT] The sidecar is for the correct topic --
# i.e. sidecar.index matches the topic {index}.
if sbs.message.index != idx:
return dag.checkedReject("SignedBlobSidecar: mismatched gossip topic index")
# [REJECT] The sidecar's index is consistent with `MAX_BLOBS_PER_BLOCK`
# -- i.e. `blob_sidecar.index < MAX_BLOBS_PER_BLOCK`
if not (blob_sidecar.index < MAX_BLOBS_PER_BLOCK):
return dag.checkedReject("BlobSidecar: index inconsistent")
if dag.getBlockRef(sbs.message.block_root).isSome():
return errIgnore("SignedBlobSidecar: already have block")
# [REJECT] The sidecar is for the correct subnet -- i.e.
# `compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id`.
if not (compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id):
return dag.checkedReject("BlobSidecar: subnet incorrect")
# [IGNORE] The sidecar is not from a future slot (with a
# MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. validate that
# sidecar.slot <= current_slot (a client MAY queue future sidecars
# `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that
# `block_header.slot <= current_slot` (a client MAY queue future sidecars
# for processing at the appropriate slot).
if not (sbs.message.slot <=
if not (block_header.slot <=
(wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero):
return errIgnore("SignedBlobSidecar: slot too high")
return errIgnore("BlobSidecar: slot too high")
# [IGNORE] The block is from a slot greater than the latest
# finalized slot -- i.e. validate that
# signed_beacon_block.message.slot >
# compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
if not (sbs.message.slot > dag.finalizedHead.slot):
return errIgnore("SignedBlobSidecar: slot already finalized")
# [IGNORE] The sidecar is from a slot greater than the latest
# finalized slot -- i.e. validate that `block_header.slot >
# compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
if not (block_header.slot > dag.finalizedHead.slot):
return errIgnore("BlobSidecar: slot already finalized")
# [IGNORE] The block's parent (defined by block.parent_root) has
# been seen (via both gossip and non-gossip sources) (a client MAY
# queue blocks for processing once the parent block is retrieved).
# [REJECT] The sidecar's block's parent (defined by sidecar.block_parent_root)
# passes validation.
let parentRes = dag.getBlockRef(sbs.message.block_parent_root)
if parentRes.isErr:
if sbs.message.block_parent_root in quarantine[].unviable:
return dag.checkedReject("SignedBlobSidecar: parent not validated")
# [IGNORE] The sidecar is the first sidecar for the tuple
# (block_header.slot, block_header.proposer_index, blob_sidecar.index)
# with valid header signature, sidecar inclusion proof, and kzg proof.
let block_root = hash_tree_root(block_header)
if dag.getBlockRef(block_root).isSome():
return errIgnore("BlobSidecar: already have block")
if blobQuarantine[].hasBlob(
block_header.slot, block_header.proposer_index, blob_sidecar.index):
return errIgnore("BlobSidecar: already have valid blob from same proposer")
# [REJECT] The sidecar's inclusion proof is valid as verified by
# `verify_blob_sidecar_inclusion_proof(blob_sidecar)`.
block:
let v = verify_blob_sidecar_inclusion_proof(blob_sidecar)
if v.isErr:
return dag.checkedReject(v.error)
# [IGNORE] The sidecar's block's parent (defined by
# `block_header.parent_root`) has been seen (via both gossip and
# non-gossip sources) (a client MAY queue sidecars for processing
# once the parent block is retrieved).
#
# [REJECT] The sidecar's block's parent (defined by
# `block_header.parent_root`) passes validation.
let parent = dag.getBlockRef(block_header.parent_root).valueOr:
if block_header.parent_root in quarantine[].unviable:
quarantine[].addUnviable(block_root)
return dag.checkedReject("BlobSidecar: parent not validated")
else:
return errIgnore("SignedBlobSidecar: parent not found")
template parent: untyped = parentRes.get
quarantine[].addMissing(block_header.parent_root)
return errIgnore("BlobSidecar: parent not found")
# [REJECT] The sidecar is from a higher slot than the sidecar's
# block's parent (defined by sidecar.block_parent_root).
if sbs.message.slot <= parent.bid.slot:
return dag.checkedReject("SignedBlobSidecar: slot lower than parents'")
# block's parent (defined by `block_header.parent_root`).
if not (block_header.slot > parent.bid.slot):
return dag.checkedReject("BlobSidecar: slot lower than parents'")
# [REJECT] The sidecar is proposed by the expected proposer_index
# [REJECT] The current finalized_checkpoint is an ancestor of the sidecar's
# block -- i.e. `get_checkpoint_block(store, block_header.parent_root,
# store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`.
let
finalized_checkpoint = getStateField(dag.headState, finalized_checkpoint)
ancestor = get_ancestor(parent, finalized_checkpoint.epoch.start_slot)
if ancestor.isNil:
# This shouldn't happen: we should always be able to trace the parent back
# to the finalized checkpoint (else it wouldn't be in the DAG)
return errIgnore("BlobSidecar: Can't find ancestor")
if not (
finalized_checkpoint.root == ancestor.root or
finalized_checkpoint.root.isZero):
quarantine[].addUnviable(block_root)
return dag.checkedReject(
"BlobSidecar: Finalized checkpoint not an ancestor")
# [REJECT] The sidecar is proposed by the expected `proposer_index`
# for the block's slot in the context of the current shuffling
# (defined by block_parent_root/slot). If the proposer_index
# cannot immediately be verified against the expected shuffling,
# the sidecar MAY be queued for later processing while proposers
# (defined by `block_header.parent_root`/`block_header.slot`).
# If the proposer_index cannot immediately be verified against the expected
# shuffling, the sidecar MAY be queued for later processing while proposers
# for the block's branch are calculated -- in such a case do not
# REJECT, instead IGNORE this message.
let
proposer = getProposer(
dag, parent, sbs.message.slot).valueOr:
warn "cannot compute proposer for blob"
return errIgnore("SignedBlobSidecar: Cannot compute proposer")
let proposer = getProposer(dag, parent, block_header.slot).valueOr:
warn "cannot compute proposer for blob"
return errIgnore("BlobSidecar: Cannot compute proposer") # internal issue
if uint64(proposer) != sbs.message.proposer_index:
return dag.checkedReject("SignedBlobSidecar: Unexpected proposer")
if uint64(proposer) != block_header.proposer_index:
return dag.checkedReject("BlobSidecar: Unexpected proposer")
# [REJECT] The proposer signature, signed_blob_sidecar.signature,
# is valid as verified by verify_sidecar_signature.
if not verify_blob_signature(
dag.forkAtEpoch(sbs.message.slot.epoch),
getStateField(dag.headState, genesis_validators_root),
sbs.message.slot,
sbs.message,
dag.validatorKey(proposer).get(),
sbs.signature):
return dag.checkedReject("SignedBlobSidecar: invalid blob signature")
# [REJECT] The proposer signature of `blob_sidecar.signed_block_header`,
# is valid with respect to the `block_header.proposer_index` pubkey.
if not verify_block_signature(
dag.forkAtEpoch(block_header.slot.epoch),
getStateField(dag.headState, genesis_validators_root),
block_header.slot,
block_root,
dag.validatorKey(proposer).get(),
blob_sidecar.signed_block_header.signature):
return dag.checkedReject("BlobSidecar: Invalid proposer signature")
# [IGNORE] The sidecar is the only sidecar with valid signature
# received for the tuple (sidecar.block_root, sidecar.index).
if blobQuarantine[].hasBlob(sbs.message):
return errIgnore(
"SignedBlobSidecar: already have blob with valid signature")
# [REJECT] The sidecar's blob is valid as verified by `verify_blob_kzg_proof(
# blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof)`.
block:
let ok = verifyProof(
blob_sidecar.blob,
blob_sidecar.kzg_commitment,
blob_sidecar.kzg_proof).valueOr:
return dag.checkedReject("BlobSidecar: blob verify failed")
if not ok:
return dag.checkedReject("BlobSidecar: blob invalid")
ok()
@ -498,7 +559,7 @@ proc validateBeaconBlock*(
blockRoot = shortLog(signed_beacon_block.root),
blck = shortLog(signed_beacon_block.message),
signature = shortLog(signed_beacon_block.signature)
return errIgnore("BeaconBlock: Parent not found")
return errIgnore("BeaconBlock: parent not found")
# Continues block parent validity checking in optimistic case, where it does
# appear as a `BlockRef` (and not handled above) but isn't usable for gossip
@ -539,12 +600,12 @@ proc validateBeaconBlock*(
let
proposer = getProposer(
dag, parent, signed_beacon_block.message.slot).valueOr:
warn "cannot compute proposer for message"
warn "cannot compute proposer for block"
return errIgnore("BeaconBlock: Cannot compute proposer") # internal issue
if uint64(proposer) != signed_beacon_block.message.proposer_index:
quarantine[].addUnviable(signed_beacon_block.root)
return dag.checkedReject("BeaconBlock: Unexpected proposer proposer")
return dag.checkedReject("BeaconBlock: Unexpected proposer")
# [REJECT] The proposer signature, signed_beacon_block.signature, is valid
# with respect to the proposer_index pubkey.
@ -1012,7 +1073,7 @@ proc validateAttesterSlashing*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#proposer_slashing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#proposer_slashing
proc validateProposerSlashing*(
pool: ValidatorChangePool, proposer_slashing: ProposerSlashing):
Result[void, ValidationError] =
@ -1305,7 +1366,7 @@ proc validateContribution*(
return ok((blck.bid, sig, participants))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#light_client_finality_update
proc validateLightClientFinalityUpdate*(
pool: var LightClientPool, dag: ChainDAGRef,
finality_update: ForkedLightClientFinalityUpdate,
@ -1341,7 +1402,7 @@ proc validateLightClientFinalityUpdate*(
pool.latestForwardedFinalitySlot = finalized_slot
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
proc validateLightClientOptimisticUpdate*(
pool: var LightClientPool, dag: ChainDAGRef,
optimistic_update: ForkedLightClientOptimisticUpdate,

View File

@ -522,7 +522,7 @@ func toValidationError(
# previously forwarded `optimistic_update`s
errIgnore($r.error)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
proc processLightClientFinalityUpdate*(
self: var LightClientProcessor, src: MsgSource,
finality_update: ForkedLightClientFinalityUpdate
@ -537,7 +537,7 @@ proc processLightClientFinalityUpdate*(
self.latestFinalityUpdate = finality_update.toOptimistic
v
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
proc processLightClientOptimisticUpdate*(
self: var LightClientProcessor, src: MsgSource,
optimistic_update: ForkedLightClientOptimisticUpdate

View File

@ -149,11 +149,11 @@ typedef struct ETHBeaconState ETHBeaconState;
* representation - If successful.
* @return `NULL` - If the given `sszBytes` is malformed.
*
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/altair/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/bellatrix/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/capella/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/configs/README.md
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/configs/README.md
*/
ETH_RESULT_USE_CHECK
ETHBeaconState *ETHBeaconStateCreateFromSsz(
@ -595,7 +595,7 @@ const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader(
* @return Whether or not the next sync committee is currently known.
*
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/light-client/light-client.md
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/light-client.md
*/
ETH_RESULT_USE_CHECK
bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *store);
@ -669,7 +669,7 @@ void ETHLightClientHeaderDestroy(ETHLightClientHeader *header);
*
* @return Pointer to a copy of the given header's beacon block root.
*
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/beacon-chain.md#hash_tree_root
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#hash_tree_root
*/
ETH_RESULT_USE_CHECK
ETHRoot *ETHLightClientHeaderCopyBeaconRoot(

View File

@ -79,7 +79,7 @@ proc ETHConsensusConfigCreateFromYaml(
## * `NULL` - If the given `config.yaml` is malformed or incompatible.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/configs/README.md
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/configs/README.md
let cfg = RuntimeConfig.new()
try:
cfg[] = readRuntimeConfig($configFileContent, "config.yaml")[0]
@ -143,11 +143,11 @@ proc ETHBeaconStateCreateFromSsz(
## * `NULL` - If the given `sszBytes` is malformed.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/configs/README.md
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/configs/README.md
let
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
return nil
@ -196,7 +196,7 @@ proc ETHRootDestroy(root: ptr Eth2Digest) {.exported.} =
## * `root` - Merkle root.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#custom-types
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#custom-types
root.destroy()
proc ETHForkDigestsCreateFromState(
@ -215,7 +215,7 @@ proc ETHForkDigestsCreateFromState(
## * Pointer to an initialized fork digests cache based on the beacon state.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_fork_digest
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_fork_digest
let forkDigests = ForkDigests.new()
forkDigests[] = ForkDigests.init(
cfg[], getStateField(state[], genesis_validators_root))
@ -266,7 +266,7 @@ proc ETHBeaconClockGetSlot(beaconClock: ptr BeaconClock): cint {.exported.} =
## * `0` - If genesis is still pending.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#custom-types
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#custom-types
beaconClock[].now().slotOrZero().cint
const lcDataFork = LightClientDataFork.high
@ -325,8 +325,8 @@ proc ETHLightClientStoreCreateFromBootstrap(
## See:
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/light-client.md
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/light-client.md
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
let
mediaType = MediaType.init($mediaType)
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
@ -732,7 +732,7 @@ func ETHLightClientStoreGetFinalizedHeader(
## * Latest finalized header.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
addr store[].finalized_header
func ETHLightClientStoreIsNextSyncCommitteeKnown(
@ -751,8 +751,8 @@ func ETHLightClientStoreIsNextSyncCommitteeKnown(
## * Whether or not the next sync committee is currently known.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/light-client.md
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/light-client.md
store[].is_next_sync_committee_known
func ETHLightClientStoreGetOptimisticHeader(
@ -771,7 +771,7 @@ func ETHLightClientStoreGetOptimisticHeader(
## * Latest optimistic header.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
addr store[].optimistic_header
func ETHLightClientStoreGetSafetyThreshold(
@ -792,7 +792,7 @@ func ETHLightClientStoreGetSafetyThreshold(
## * Light client store safety threshold.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#get_safety_threshold
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#get_safety_threshold
store[].get_safety_threshold.cint
proc ETHLightClientHeaderCreateCopy(
@ -838,7 +838,7 @@ proc ETHLightClientHeaderCopyBeaconRoot(
## * Pointer to a copy of the given header's beacon block root.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#hash_tree_root
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#hash_tree_root
discard cfg # Future-proof against new fields, see `get_lc_execution_root`.
let root = Eth2Digest.new()
root[] = header[].beacon.hash_tree_root()
@ -860,7 +860,7 @@ func ETHLightClientHeaderGetBeacon(
## * Beacon block header.
##
## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconblockheader
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconblockheader
addr header[].beacon
func ETHBeaconBlockHeaderGetSlot(

View File

@ -347,7 +347,7 @@ proc installMessageValidators*(
digest = forkDigests[].atConsensusFork(contextFork)
# light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#light_client_finality_update
lightClient.network.addValidator(
getLightClientFinalityUpdateTopic(digest), proc (
msg: lcDataFork.LightClientFinalityUpdate
@ -355,7 +355,7 @@ proc installMessageValidators*(
validate(msg, contextFork, processLightClientFinalityUpdate))
# light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
lightClient.network.addValidator(
getLightClientOptimisticUpdateTopic(digest), proc (
msg: lcDataFork.LightClientOptimisticUpdate

View File

@ -174,7 +174,7 @@ type
MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [CatchableError].}
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [].}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#goodbye
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#goodbye
DisconnectionReason* = enum
# might see other values on the wire!
ClientShutDown = 1
@ -517,6 +517,17 @@ proc disconnect*(peer: Peer, reason: DisconnectionReason,
trace "Exception while disconnecting peer", peer = peer.peerId,
reason = reason
proc releasePeer*(peer: Peer) =
## Checks for peer's score and disconnects peer if score is less than
## `PeerScoreLowLimit`.
if peer.connectionState notin {ConnectionState.Disconnecting,
ConnectionState.Disconnected}:
if peer.score < PeerScoreLowLimit:
debug "Peer was disconnected due to low score", peer = peer,
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
score_high_limit = PeerScoreHighLimit
asyncSpawn(peer.disconnect(PeerScoreLow))
include eth/p2p/p2p_backends_helpers
include eth/p2p/p2p_tracing
@ -1233,7 +1244,7 @@ proc handleIncomingStream(network: Eth2Node,
finally:
await conn.closeWithEOF()
discard network.peerPool.checkPeerScore(peer)
releasePeer(peer)
proc toPeerAddr*(r: enr.TypedRecord,
proto: IpTransportProtocol): Result[PeerAddr, cstring] =
@ -1869,27 +1880,13 @@ proc new(T: type Eth2Node,
peer.score >= PeerScoreLowLimit
proc onDeletePeer(peer: Peer) =
if peer.connectionState notin {ConnectionState.Disconnecting,
ConnectionState.Disconnected}:
if peer.score < PeerScoreLowLimit:
debug "Peer was removed from PeerPool due to low score", peer = peer,
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
score_high_limit = PeerScoreHighLimit
asyncSpawn(peer.disconnect(PeerScoreLow))
else:
debug "Peer was removed from PeerPool", peer = peer,
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
score_high_limit = PeerScoreHighLimit
asyncSpawn(peer.disconnect(FaultOrError)) # Shouldn't actually happen!
peer.releasePeer()
node.peerPool.setScoreCheck(scoreCheck)
node.peerPool.setOnDeletePeer(onDeletePeer)
node
template publicKey(node: Eth2Node): keys.PublicKey =
node.discovery.privKey.toPublicKey
proc startListening*(node: Eth2Node) {.async.} =
if node.discoveryEnabled:
try:
@ -2173,14 +2170,6 @@ proc peerTrimmerHeartbeat(node: Eth2Node) {.async.} =
func asEthKey*(key: PrivateKey): keys.PrivateKey =
keys.PrivateKey(key.skkey)
proc initAddress(T: type MultiAddress, str: string): T =
let address = MultiAddress.init(str)
if IPFS.match(address) and matchPartial(multiaddress.TCP, address):
result = address
else:
raise newException(MultiAddressError,
"Invalid bootstrap node multi-address")
template tcpEndPoint(address, port): auto =
MultiAddress.init(address, tcpProtocol, port)
@ -2263,7 +2252,7 @@ proc getPersistentNetKeys*(
func gossipId(
data: openArray[byte], phase0Prefix, topic: string): seq[byte] =
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/p2p-interface.md#topics-and-messages
const MESSAGE_DOMAIN_VALID_SNAPPY = [0x01'u8, 0x00, 0x00, 0x00]
let messageDigest = withEth2Hash:
h.update(MESSAGE_DOMAIN_VALID_SNAPPY)
@ -2315,7 +2304,8 @@ proc createEth2Node*(rng: ref HmacDrbgContext,
cfg, getBeaconTime().slotOrZero.epoch, genesis_validators_root)
(extIp, extTcpPort, extUdpPort) = try: setupAddress(
config.nat, config.listenAddress, config.tcpPort, config.udpPort, clientId)
config.nat, ValidIpAddress.init config.listenAddress, config.tcpPort,
config.udpPort, clientId)
except CatchableError as exc: raise exc
except Exception as exc: raiseAssert exc.msg
@ -2337,7 +2327,8 @@ proc createEth2Node*(rng: ref HmacDrbgContext,
info "Adding privileged direct peer", peerId, address
res
hostAddress = tcpEndPoint(config.listenAddress, config.tcpPort)
hostAddress = tcpEndPoint(
ValidIpAddress.init config.listenAddress, config.tcpPort)
announcedAddresses = if extIp.isNone() or extTcpPort.isNone(): @[]
else: @[tcpEndPoint(extIp.get(), extTcpPort.get())]
@ -2568,7 +2559,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
node.metadata.attnets = attnets
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#attestation-subnet-subscription
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
let res = node.discovery.updateRecord({
enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets)
})
@ -2580,7 +2571,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
debug "Stability subnets changed; updated ENR attnets", attnets
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/validator.md#sync-committee-subnet-stability
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/validator.md#sync-committee-subnet-stability
if node.metadata.syncnets == syncnets:
return
@ -2687,7 +2678,7 @@ proc broadcastBeaconBlock*(
node.broadcast(topic, blck)
proc broadcastBlobSidecar*(
node: Eth2Node, subnet_id: SubnetId, blob: deneb.SignedBlobSidecar):
node: Eth2Node, subnet_id: BlobId, blob: deneb.BlobSidecar):
Future[SendResult] =
let
forkPrefix = node.forkDigestAtEpoch(node.getWallEpoch)

View File

@ -41,8 +41,6 @@ type
Eth1Network* = enum
mainnet
ropsten
rinkeby
goerli
sepolia
holesky
@ -474,7 +472,7 @@ when const_preset in ["mainnet", "gnosis"]:
toOpenArray(metadata.genesis.bakedBytes, 0, sizeof(BeaconStateHeader) - 1),
BeaconStateHeader)
Opt.some header.genesis_validators_root
except SerializationError as err:
except SerializationError:
raiseAssert "Invalid baken-in genesis state"
else:
Opt.none Eth2Digest

View File

@ -114,6 +114,57 @@ declareGauge next_action_wait,
logScope: topics = "beacnde"
proc doRunTrustedNodeSync(
db: BeaconChainDB,
metadata: Eth2NetworkMetadata,
databaseDir: string,
eraDir: string,
restUrl: string,
stateId: Option[string],
trustedBlockRoot: Option[Eth2Digest],
backfill: bool,
reindex: bool,
downloadDepositSnapshot: bool) {.async.} =
let
cfg = metadata.cfg
syncTarget =
if stateId.isSome:
if trustedBlockRoot.isSome:
warn "Ignoring `trustedBlockRoot`, `stateId` is set",
stateId, trustedBlockRoot
TrustedNodeSyncTarget(
kind: TrustedNodeSyncKind.StateId,
stateId: stateId.get)
elif trustedBlockRoot.isSome:
TrustedNodeSyncTarget(
kind: TrustedNodeSyncKind.TrustedBlockRoot,
trustedBlockRoot: trustedBlockRoot.get)
else:
TrustedNodeSyncTarget(
kind: TrustedNodeSyncKind.StateId,
stateId: "finalized")
genesis =
if metadata.hasGenesis:
let genesisBytes = try: await metadata.fetchGenesisBytes()
except CatchableError as err:
error "Failed to obtain genesis state",
source = metadata.genesis.sourceDesc,
err = err.msg
quit 1
newClone(readSszForkedHashedBeaconState(cfg, genesisBytes))
else: nil
await db.doTrustedNodeSync(
cfg,
databaseDir,
eraDir,
restUrl,
syncTarget,
backfill,
reindex,
downloadDepositSnapshot,
genesis)
func getVanityLogs(stdoutKind: StdoutLogKind): VanityLogs =
case stdoutKind
of StdoutLogKind.Auto: raiseAssert "inadmissable here"
@ -372,11 +423,15 @@ proc initFullNode(
validatorChangePool, node.attachedValidators, syncCommitteeMsgPool,
lightClientPool, quarantine, blobQuarantine, rng, getBeaconTime, taskpool)
syncManager = newSyncManager[Peer, PeerId](
node.network.peerPool, dag.cfg.DENEB_FORK_EPOCH, SyncQueueKind.Forward, getLocalHeadSlot,
node.network.peerPool,
dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS,
SyncQueueKind.Forward, getLocalHeadSlot,
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot,
getFrontfillSlot, dag.tail.slot, blockVerifier)
backfiller = newSyncManager[Peer, PeerId](
node.network.peerPool, dag.cfg.DENEB_FORK_EPOCH, SyncQueueKind.Backward, getLocalHeadSlot,
node.network.peerPool,
dag.cfg.DENEB_FORK_EPOCH, dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS,
SyncQueueKind.Backward, getLocalHeadSlot,
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot,
getFrontfillSlot, dag.backfill.slot, blockVerifier,
maxHeadAge = 0)
@ -504,6 +559,26 @@ proc init*(T: type BeaconNode,
)
db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false)
if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr:
if config.trustedStateRoot.isNone and config.trustedBlockRoot.isNone:
warn "Ignoring `--external-beacon-api-url`, neither " &
"`--trusted-block-root` nor `--trusted-state-root` are provided",
externalBeaconApiUrl = config.externalBeaconApiUrl.get,
trustedBlockRoot = config.trustedBlockRoot,
trustedStateRoot = config.trustedStateRoot
else:
await db.doRunTrustedNodeSync(
metadata,
config.databaseDir,
config.eraDir,
config.externalBeaconApiUrl.get,
config.trustedStateRoot.map do (x: Eth2Digest) -> string:
"0x" & x.data.toHex,
config.trustedBlockRoot,
backfill = false,
reindex = false,
downloadDepositSnapshot = false)
if config.finalizedCheckpointBlock.isSome:
warn "--finalized-checkpoint-block has been deprecated, ignoring"
@ -800,7 +875,7 @@ func forkDigests(node: BeaconNode): auto =
node.dag.forkDigests.deneb]
forkDigestsArray
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/p2p-interface.md#attestation-subnet-subscription
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-subscription
proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) =
if node.gossipState.card == 0:
# When disconnected, updateGossipState is responsible for all things
@ -1255,7 +1330,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
proc pruneBlobs(node: BeaconNode, slot: Slot) =
let blobPruneEpoch = (slot.epoch -
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1)
node.dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1)
if slot.is_epoch() and blobPruneEpoch >= node.dag.cfg.DENEB_FORK_EPOCH:
var blocks: array[SLOTS_PER_EPOCH.int, BlockId]
var count = 0
@ -1598,7 +1673,7 @@ proc installMessageValidators(node: BeaconNode) =
MsgSource.gossip, signedAggregateAndProof)))
# attester_slashing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/p2p-interface.md#attester_slashing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attester_slashing
node.network.addValidator(
getAttesterSlashingsTopic(digest), proc (
attesterSlashing: AttesterSlashing
@ -1642,7 +1717,7 @@ proc installMessageValidators(node: BeaconNode) =
MsgSource.gossip, msg, idx)))
# sync_committee_contribution_and_proof
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof
node.network.addAsyncValidator(
getSyncCommitteeContributionAndProofTopic(digest), proc (
msg: SignedContributionAndProof
@ -1652,7 +1727,7 @@ proc installMessageValidators(node: BeaconNode) =
MsgSource.gossip, msg)))
when consensusFork >= ConsensusFork.Capella:
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/p2p-interface.md#bls_to_execution_change
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/p2p-interface.md#bls_to_execution_change
node.network.addAsyncValidator(
getBlsToExecutionChangeTopic(digest), proc (
msg: SignedBLSToExecutionChange
@ -1662,18 +1737,18 @@ proc installMessageValidators(node: BeaconNode) =
MsgSource.gossip, msg)))
when consensusFork >= ConsensusFork.Deneb:
# blob_sidecar_{index}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id
for i in 0 ..< BLOB_SIDECAR_SUBNET_COUNT:
# blob_sidecar_{subnet_id}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id
for it in BlobId:
closureScope: # Needed for inner `proc`; don't lift it out of loop.
let idx = i
let subnet_id = it
node.network.addValidator(
getBlobSidecarTopic(digest, SubnetId(idx)), proc (
signedBlobSidecar: SignedBlobSidecar
getBlobSidecarTopic(digest, subnet_id), proc (
blobSidecar: deneb.BlobSidecar
): ValidationResult =
toValidationResult(
node.processor[].processSignedBlobSidecar(
MsgSource.gossip, signedBlobSidecar, idx)))
node.processor[].processBlobSidecar(
MsgSource.gossip, blobSidecar, subnet_id)))
node.installLightClientMessageValidators()
@ -2015,9 +2090,7 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai
bnStatus = BeaconNodeStatus.Stopping
c_signal(ansi_c.SIGTERM, SIGTERMHandler)
let node = waitFor BeaconNode.init(rng, config, metadata)
if node.dag.cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
if metadata.cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
let res =
if config.trustedSetupFile.isNone:
conf.loadKzgTrustedSetup()
@ -2026,6 +2099,8 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai
if res.isErr():
raiseAssert res.error()
let node = waitFor BeaconNode.init(rng, config, metadata)
if bnStatus == BeaconNodeStatus.Stopping:
return
@ -2057,7 +2132,7 @@ proc doRecord(config: BeaconNodeConf, rng: var HmacDrbgContext) {.
let record = enr.Record.init(
config.seqNumber,
netKeys.seckey.asEthKey,
some(config.ipExt),
some(ValidIpAddress.init config.ipExt),
some(config.tcpPortExt),
some(config.udpPortExt),
fieldPairs).expect("Record within size limits")
@ -2143,51 +2218,24 @@ proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [CatchableError].} =
of BNStartUpCmd.web3: doWeb3Cmd(config, rng[])
of BNStartUpCmd.slashingdb: doSlashingInterchange(config)
of BNStartUpCmd.trustedNodeSync:
let
network = loadEth2Network(config)
cfg = network.cfg
syncTarget =
if config.stateId.isSome:
if config.lcTrustedBlockRoot.isSome:
warn "Ignoring `trustedBlockRoot`, `stateId` is set",
stateId = config.stateId,
trustedBlockRoot = config.lcTrustedBlockRoot
TrustedNodeSyncTarget(
kind: TrustedNodeSyncKind.StateId,
stateId: config.stateId.get)
elif config.lcTrustedBlockRoot.isSome:
TrustedNodeSyncTarget(
kind: TrustedNodeSyncKind.TrustedBlockRoot,
trustedBlockRoot: config.lcTrustedBlockRoot.get)
else:
TrustedNodeSyncTarget(
kind: TrustedNodeSyncKind.StateId,
stateId: "finalized")
genesis =
if network.hasGenesis:
let genesisBytes = try: waitFor network.fetchGenesisBytes()
except CatchableError as err:
error "Failed to obtain genesis state",
source = network.genesis.sourceDesc,
err = err.msg
quit 1
newClone(readSszForkedHashedBeaconState(cfg, genesisBytes))
else: nil
if config.blockId.isSome():
error "--blockId option has been removed - use --state-id instead!"
quit 1
waitFor doTrustedNodeSync(
cfg,
let
metadata = loadEth2Network(config)
db = BeaconChainDB.new(config.databaseDir, metadata.cfg, inMemory = false)
waitFor db.doRunTrustedNodeSync(
metadata,
config.databaseDir,
config.eraDir,
config.trustedNodeUrl,
syncTarget,
config.stateId,
config.lcTrustedBlockRoot,
config.backfillBlocks,
config.reindex,
config.downloadDepositSnapshot,
genesis)
config.downloadDepositSnapshot)
db.close()
{.pop.} # TODO moduletests exceptions

View File

@ -356,9 +356,10 @@ proc init*(T: type RestServerRef,
allowedOrigin: Option[string],
validateFn: PatternCallback,
config: AnyConf): T =
let address = initTAddress(ip, port)
let serverFlags = {HttpServerFlags.QueryCommaSeparatedArray,
HttpServerFlags.NotifyDisconnect}
let
address = initTAddress(ip, port)
serverFlags = {HttpServerFlags.QueryCommaSeparatedArray,
HttpServerFlags.NotifyDisconnect}
# We increase default timeout to help validator clients who poll our server
# at least once per slot (12.seconds).
let
@ -370,26 +371,20 @@ proc init*(T: type RestServerRef,
maxHeadersSize = config.restMaxRequestHeadersSize * 1024
maxRequestBodySize = config.restMaxRequestBodySize * 1024
let res = try:
RestServerRef.new(RestRouter.init(validateFn, allowedOrigin),
address, serverFlags = serverFlags,
httpHeadersTimeout = headersTimeout,
maxHeadersSize = maxHeadersSize,
maxRequestBodySize = maxRequestBodySize)
except CatchableError as err:
notice "Rest server could not be started", address = $address,
reason = err.msg
return nil
let res = RestServerRef.new(RestRouter.init(validateFn, allowedOrigin),
address, serverFlags = serverFlags,
httpHeadersTimeout = headersTimeout,
maxHeadersSize = maxHeadersSize,
maxRequestBodySize = maxRequestBodySize,
errorType = string)
if res.isErr():
notice "Rest server could not be started", address = $address,
notice "REST HTTP server could not be started", address = $address,
reason = res.error()
nil
else:
notice "Starting REST HTTP server",
url = "http://" & $ip & ":" & $port & "/"
res.get()
let server = res.get()
notice "Starting REST HTTP server", url = "http://" & $server.localAddress()
server
type
KeymanagerInitResult* = object

View File

@ -873,28 +873,29 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
of ConsensusFork.Phase0:
var blck = restBlock.phase0Data
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
await node.router.routeSignedBeaconBlock(
blck, Opt.none(seq[BlobSidecar]))
of ConsensusFork.Altair:
var blck = restBlock.altairData
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
await node.router.routeSignedBeaconBlock(
blck, Opt.none(seq[BlobSidecar]))
of ConsensusFork.Bellatrix:
var blck = restBlock.bellatrixData
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
await node.router.routeSignedBeaconBlock(
blck, Opt.none(seq[BlobSidecar]))
of ConsensusFork.Capella:
var blck = restBlock.capellaData
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
await node.router.routeSignedBeaconBlock(
blck, Opt.none(seq[BlobSidecar]))
of ConsensusFork.Deneb:
var blck = restBlock.denebData.signed_block
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(
blck, Opt.some(asSeq restBlock.denebData.signed_blob_sidecars))
blck, Opt.some(blck.create_blob_sidecars(
restBlock.denebData.kzg_proofs, restBlock.denebData.blobs)))
if res.isErr():
return RestApiResponse.jsonError(
@ -906,19 +907,32 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
# https://ethereum.github.io/beacon-APIs/#/Beacon/publishBlockV2
router.api(MethodPost, "/eth/v2/beacon/blocks") do (
broadcast_validation: Option[BroadcastValidationType],
contentBody: Option[ContentBody]) -> RestApiResponse:
let res =
block:
if contentBody.isNone():
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
if request.headers.getString("broadcast_validation") != "gossip":
# TODO (henridf): support 'consensus' and 'consensus_and_equivocation'
# broadcast_validation
return RestApiResponse.jsonError(
Http500, "gossip broadcast_validation only supported")
let
body = contentBody.get()
version = request.headers.getString("eth-consensus-version")
validation =
block:
let res =
if broadcast_validation.isNone():
BroadcastValidationType.Gossip
else:
broadcast_validation.get().valueOr:
return RestApiResponse.jsonError(Http400,
InvalidBroadcastValidationType)
# TODO (henridf): support 'consensus' and
# 'consensus_and_equivocation' broadcast_validation types.
if res != BroadcastValidationType.Gossip:
return RestApiResponse.jsonError(Http500,
"Only `gossip` broadcast_validation option supported")
res
body =
block:
if contentBody.isNone():
return RestApiResponse.jsonError(Http400, EmptyRequestBodyError)
contentBody.get()
var
restBlock = decodeBodyJsonOrSsz(RestPublishedSignedBlockContents,
body, version).valueOr:
@ -935,28 +949,29 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
of ConsensusFork.Phase0:
var blck = restBlock.phase0Data
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
await node.router.routeSignedBeaconBlock(
blck, Opt.none(seq[BlobSidecar]))
of ConsensusFork.Altair:
var blck = restBlock.altairData
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
await node.router.routeSignedBeaconBlock(
blck, Opt.none(seq[BlobSidecar]))
of ConsensusFork.Bellatrix:
var blck = restBlock.bellatrixData
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
await node.router.routeSignedBeaconBlock(
blck, Opt.none(seq[BlobSidecar]))
of ConsensusFork.Capella:
var blck = restBlock.capellaData
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(blck,
Opt.none(SignedBlobSidecars))
await node.router.routeSignedBeaconBlock(
blck, Opt.none(seq[BlobSidecar]))
of ConsensusFork.Deneb:
var blck = restBlock.denebData.signed_block
blck.root = hash_tree_root(blck.message)
await node.router.routeSignedBeaconBlock(
blck, Opt.some(asSeq restBlock.denebData.signed_blob_sidecars))
blck, Opt.some(blck.create_blob_sidecars(
restBlock.denebData.kzg_proofs, restBlock.denebData.blobs)))
if res.isErr():
return RestApiResponse.jsonError(
@ -1053,8 +1068,8 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
let res = withBlck(forked):
forkyBlck.root = hash_tree_root(forkyBlck.message)
await node.router.routeSignedBeaconBlock(forkyBlck,
Opt.none(SignedBlobSidecars))
await node.router.routeSignedBeaconBlock(
forkyBlck, Opt.none(seq[BlobSidecar]))
if res.isErr():
return RestApiResponse.jsonError(
@ -1374,3 +1389,50 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
VoluntaryExitValidationError,
$res.error())
return RestApiResponse.jsonMsgResponse(VoluntaryExitValidationSuccess)
# https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.2#/Beacon/getBlobSidecars
# https://github.com/ethereum/beacon-APIs/blob/v2.4.2/apis/beacon/blob_sidecars/blob_sidecars.yaml
router.api(MethodGet, "/eth/v1/beacon/blob_sidecars/{block_id}") do (
block_id: BlockIdent, indices: seq[uint64]) -> RestApiResponse:
let
bid = block_id.valueOr:
return RestApiResponse.jsonError(Http400, InvalidBlockIdValueError,
$error)
bdata = node.getForkedBlock(bid).valueOr:
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
contentType = block:
let res = preferredContentType(jsonMediaType,
sszMediaType)
if res.isErr():
return RestApiResponse.jsonError(Http406, ContentNotAcceptableError)
res.get()
# https://github.com/ethereum/beacon-APIs/blob/v2.4.2/types/deneb/blob_sidecar.yaml#L2-L28
let data = newClone(default(List[BlobSidecar, Limit MAX_BLOBS_PER_BLOCK]))
if indices.isErr:
return RestApiResponse.jsonError(Http400,
InvalidSidecarIndexValueError)
let indexFilter = indices.get.toHashSet
for blobIndex in 0'u64 ..< MAX_BLOBS_PER_BLOCK:
if indexFilter.len > 0 and blobIndex notin indexFilter:
continue
var blobSidecar = new BlobSidecar
if node.dag.db.getBlobSidecar(bdata.root, blobIndex, blobSidecar[]):
discard data[].add blobSidecar[]
return
if contentType == sszMediaType:
RestApiResponse.sszResponse(
data[], headers = [("eth-consensus-version",
node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch).toString())])
elif contentType == jsonMediaType:
RestApiResponse.jsonResponse(data)
else:
RestApiResponse.jsonError(Http500, InvalidAcceptError)

View File

@ -88,7 +88,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_VOLUNTARY_EXITS:
Base10.toString(MAX_VOLUNTARY_EXITS),
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/mainnet/altair.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/mainnet/altair.yaml
INACTIVITY_PENALTY_QUOTIENT_ALTAIR:
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR),
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR:
@ -104,7 +104,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
UPDATE_TIMEOUT:
Base10.toString(UPDATE_TIMEOUT),
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/mainnet/bellatrix.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/mainnet/bellatrix.yaml
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX:
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX),
MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX:
@ -120,7 +120,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_EXTRA_DATA_BYTES:
Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)),
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/mainnet/capella.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/mainnet/capella.yaml
MAX_BLS_TO_EXECUTION_CHANGES:
Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)),
MAX_WITHDRAWALS_PER_PAYLOAD:
@ -128,7 +128,17 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP:
Base10.toString(uint64(MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)),
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/configs/mainnet.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/mainnet/deneb.yaml
FIELD_ELEMENTS_PER_BLOB:
Base10.toString(deneb_preset.FIELD_ELEMENTS_PER_BLOB),
MAX_BLOB_COMMITMENTS_PER_BLOCK:
Base10.toString(MAX_BLOB_COMMITMENTS_PER_BLOCK),
MAX_BLOBS_PER_BLOCK:
Base10.toString(MAX_BLOBS_PER_BLOCK),
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH:
Base10.toString(uint64(KZG_COMMITMENT_INCLUSION_PROOF_DEPTH)),
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/configs/mainnet.yaml
PRESET_BASE:
cfg.PRESET_BASE,
CONFIG_NAME:
@ -187,12 +197,56 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
Base10.toString(cfg.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT),
PROPOSER_SCORE_BOOST:
Base10.toString(PROPOSER_SCORE_BOOST),
REORG_HEAD_WEIGHT_THRESHOLD:
Base10.toString(REORG_HEAD_WEIGHT_THRESHOLD),
REORG_PARENT_WEIGHT_THRESHOLD:
Base10.toString(REORG_PARENT_WEIGHT_THRESHOLD),
REORG_MAX_EPOCHS_SINCE_FINALIZATION:
Base10.toString(uint64(REORG_MAX_EPOCHS_SINCE_FINALIZATION)),
DEPOSIT_CHAIN_ID:
Base10.toString(cfg.DEPOSIT_CHAIN_ID),
DEPOSIT_NETWORK_ID:
Base10.toString(cfg.DEPOSIT_NETWORK_ID),
DEPOSIT_CONTRACT_ADDRESS:
$cfg.DEPOSIT_CONTRACT_ADDRESS,
GOSSIP_MAX_SIZE:
Base10.toString(GOSSIP_MAX_SIZE),
MAX_REQUEST_BLOCKS:
Base10.toString(MAX_REQUEST_BLOCKS),
EPOCHS_PER_SUBNET_SUBSCRIPTION:
Base10.toString(EPOCHS_PER_SUBNET_SUBSCRIPTION),
MIN_EPOCHS_FOR_BLOCK_REQUESTS:
Base10.toString(cfg.MIN_EPOCHS_FOR_BLOCK_REQUESTS),
MAX_CHUNK_SIZE:
Base10.toString(MAX_CHUNK_SIZE),
TTFB_TIMEOUT:
Base10.toString(TTFB_TIMEOUT),
RESP_TIMEOUT:
Base10.toString(RESP_TIMEOUT),
ATTESTATION_PROPAGATION_SLOT_RANGE:
Base10.toString(ATTESTATION_PROPAGATION_SLOT_RANGE),
MAXIMUM_GOSSIP_CLOCK_DISPARITY:
Base10.toString(MAXIMUM_GOSSIP_CLOCK_DISPARITY.milliseconds.uint64),
MESSAGE_DOMAIN_INVALID_SNAPPY:
to0xHex(MESSAGE_DOMAIN_INVALID_SNAPPY),
MESSAGE_DOMAIN_VALID_SNAPPY:
to0xHex(MESSAGE_DOMAIN_VALID_SNAPPY),
SUBNETS_PER_NODE:
Base10.toString(SUBNETS_PER_NODE),
ATTESTATION_SUBNET_COUNT:
Base10.toString(ATTESTATION_SUBNET_COUNT),
ATTESTATION_SUBNET_EXTRA_BITS:
Base10.toString(ATTESTATION_SUBNET_EXTRA_BITS),
ATTESTATION_SUBNET_PREFIX_BITS:
Base10.toString(ATTESTATION_SUBNET_PREFIX_BITS),
MAX_REQUEST_BLOCKS_DENEB:
Base10.toString(MAX_REQUEST_BLOCKS_DENEB),
MAX_REQUEST_BLOB_SIDECARS:
Base10.toString(MAX_REQUEST_BLOB_SIDECARS),
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS:
Base10.toString(cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS),
BLOB_SIDECAR_SUBNET_COUNT:
Base10.toString(BLOB_SIDECAR_SUBNET_COUNT),
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#constants
# GENESIS_SLOT
@ -255,8 +309,6 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#constants
TARGET_AGGREGATORS_PER_COMMITTEE:
Base10.toString(TARGET_AGGREGATORS_PER_COMMITTEE),
ATTESTATION_SUBNET_COUNT:
Base10.toString(ATTESTATION_SUBNET_COUNT),
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/validator.md#constants
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE:

View File

@ -241,4 +241,7 @@ const
"Failed to obtain fork information"
InvalidTimestampValue* =
"Invalid or missing timestamp value"
InvalidSidecarIndexValueError* =
"Invalid blob index"
InvalidBroadcastValidationType* =
"Invalid broadcast_validation type value"

View File

@ -176,24 +176,19 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
if state.isErr():
return RestApiResponse.jsonError(Http400, InvalidPeerStateValueError,
$state.error())
let sres = validateState(state.get())
if sres.isErr():
validateState(state.get()).valueOr:
return RestApiResponse.jsonError(Http400, InvalidPeerStateValueError,
$sres.error())
sres.get()
$error)
let directionMask =
block:
if direction.isErr():
return RestApiResponse.jsonError(Http400,
InvalidPeerDirectionValueError,
$direction.error())
let dres = validateDirection(direction.get())
if dres.isErr():
validateDirection(direction.get()).valueOr:
return RestApiResponse.jsonError(Http400,
InvalidPeerDirectionValueError,
$dres.error())
dres.get()
$error)
var res: seq[RestNodePeer]
for peer in node.network.peers.values():
if (peer.connectionState in connectionMask) and
@ -209,7 +204,8 @@ proc installNodeApiHandlers*(router: var RestRouter, node: BeaconNode) =
proto: node.network.switch.peerStore[ProtoVersionBook][peer.peerId]
)
res.add(peer)
return RestApiResponse.jsonResponseWMeta(res, (count: uint64(len(res))))
return RestApiResponse.jsonResponseWMeta(res,
(count: RestNumeric(len(res))))
# https://ethereum.github.io/beacon-APIs/#/Node/getPeerCount
router.api(MethodGet, "/eth/v1/node/peer_count") do () -> RestApiResponse:

View File

@ -20,10 +20,6 @@ export
results, eth2_rest_serialization, blockchain_dag, presto, rest_types,
rest_constants, rest_common
type
ValidatorIndexError* {.pure.} = enum
UnsupportedValue, TooHighValue
func match(data: openArray[char], charset: set[char]): int =
for ch in data:
if ch notin charset:
@ -216,26 +212,6 @@ template strData*(body: ContentBody): string =
bind fromBytes
string.fromBytes(body.data)
func toValidatorIndex*(value: RestValidatorIndex): Result[ValidatorIndex,
ValidatorIndexError] =
when sizeof(ValidatorIndex) == 4:
if uint64(value) < VALIDATOR_REGISTRY_LIMIT:
# On x86 platform Nim allows only `int32` indexes, so all the indexes in
# range `2^31 <= x < 2^32` are not supported.
if uint64(value) <= uint64(high(int32)):
ok(ValidatorIndex(value))
else:
err(ValidatorIndexError.UnsupportedValue)
else:
err(ValidatorIndexError.TooHighValue)
elif sizeof(ValidatorIndex) == 8:
if uint64(value) < VALIDATOR_REGISTRY_LIMIT:
ok(ValidatorIndex(value))
else:
err(ValidatorIndexError.TooHighValue)
else:
doAssert(false, "ValidatorIndex type size is incorrect")
func syncCommitteeParticipants*(forkedState: ForkedHashedBeaconState,
epoch: Epoch
): Result[seq[ValidatorPubKey], cstring] =

View File

@ -392,21 +392,13 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
qslot, proposer, qrandao, qskip_randao_verification):
return RestApiResponse.jsonError(Http400, InvalidRandaoRevealValue)
let res =
case node.dag.cfg.consensusForkAtEpoch(qslot.epoch)
of ConsensusFork.Deneb:
let res = withConsensusFork(
node.dag.cfg.consensusForkAtEpoch(qslot.epoch)):
when consensusFork >= ConsensusFork.Bellatrix:
await makeBeaconBlockForHeadAndSlot(
deneb.ExecutionPayloadForSigning,
consensusFork.ExecutionPayloadForSigning,
node, qrandao, proposer, qgraffiti, qhead, qslot)
of ConsensusFork.Capella:
await makeBeaconBlockForHeadAndSlot(
capella.ExecutionPayloadForSigning,
node, qrandao, proposer, qgraffiti, qhead, qslot)
of ConsensusFork.Bellatrix:
await makeBeaconBlockForHeadAndSlot(
bellatrix.ExecutionPayloadForSigning,
node, qrandao, proposer, qgraffiti, qhead, qslot)
of ConsensusFork.Altair, ConsensusFork.Phase0:
else:
return RestApiResponse.jsonError(Http400, InvalidSlotValueError)
if res.isErr():
return RestApiResponse.jsonError(Http400, res.error())
@ -414,39 +406,19 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
return
withBlck(message.blck):
let data =
when forkyBlck is deneb.BeaconBlock:
let bundle = message.blobsBundleOpt.get()
let blockRoot = hash_tree_root(forkyBlck)
var sidecars = newSeqOfCap[BlobSidecar](bundle.blobs.len)
for i in 0..<bundle.blobs.len:
let sidecar = deneb.BlobSidecar(
block_root: blockRoot,
index: BlobIndex(i),
slot: forkyBlck.slot,
block_parent_root: forkyBlck.parent_root,
proposer_index: forkyBlck.proposer_index,
blob: bundle.blobs[i],
kzg_commitment: bundle.kzgs[i],
kzg_proof: bundle.proofs[i]
)
sidecars.add(sidecar)
when consensusFork >= ConsensusFork.Deneb:
let blobsBundle = message.blobsBundleOpt.get()
DenebBlockContents(
`block`: forkyBlck,
blob_sidecars: List[BlobSidecar,
Limit MAX_BLOBS_PER_BLOCK].init(sidecars))
elif forkyBlck is phase0.BeaconBlock or
forkyBlck is altair.BeaconBlock or
forkyBlck is bellatrix.BeaconBlock or
forkyBlck is capella.BeaconBlock:
forkyBlck
kzg_proofs: blobsBundle.proofs,
blobs: blobsBundle.blobs)
else:
static: raiseAssert "produceBlockV2 received unexpected version"
forkyBlck
if contentType == sszMediaType:
let headers = [("eth-consensus-version", message.blck.kind.toString())]
let headers = [("eth-consensus-version", consensusFork.toString())]
RestApiResponse.sszResponse(data, headers)
elif contentType == jsonMediaType:
RestApiResponse.jsonResponseWVersion(data, message.blck.kind)
RestApiResponse.jsonResponseWVersion(data, consensusFork)
else:
raiseAssert "preferredContentType() returns invalid content type"

View File

@ -142,13 +142,13 @@ const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/validator.md#prepare-sync-committee-message
syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/validator.md#broadcast-sync-committee-contribution
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/validator.md#broadcast-sync-committee-contribution
syncContributionSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#sync-committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#sync-committee
lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#sync-committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#sync-committee
lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
@ -188,7 +188,7 @@ func epoch*(slot: Slot): Epoch = # aka compute_epoch_at_slot
if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH
else: Epoch(slot div SLOTS_PER_EPOCH)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/fork-choice.md#compute_slots_since_epoch_start
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/fork-choice.md#compute_slots_since_epoch_start
func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start
## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`)
(slot mod SLOTS_PER_EPOCH)
@ -196,14 +196,14 @@ func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_st
template is_epoch*(slot: Slot): bool =
slot.since_epoch_start == 0
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
func start_slot*(epoch: Epoch): Slot = # aka compute_start_slot_at_epoch
## Return the start slot of ``epoch``.
const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH)
if epoch >= maxEpoch: FAR_FUTURE_SLOT
else: Slot(epoch * SLOTS_PER_EPOCH)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_previous_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_previous_epoch
func get_previous_epoch*(current_epoch: Epoch): Epoch =
## Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
if current_epoch == GENESIS_EPOCH:

View File

@ -22,7 +22,7 @@ from ./datatypes/capella import BeaconState, ExecutionPayloadHeader, Withdrawal
export extras, forks, validator, chronicles
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#increase_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#increase_balance
func increase_balance*(balance: var Gwei, delta: Gwei) =
balance += delta
@ -32,7 +32,7 @@ func increase_balance*(
if delta != 0: # avoid dirtying the balance cache if not needed
increase_balance(state.balances.mitem(index), delta)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#decrease_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#decrease_balance
func decrease_balance*(balance: var Gwei, delta: Gwei) =
balance =
if delta > balance:
@ -66,13 +66,13 @@ func get_validator_from_deposit*(deposit: DepositData):
effective_balance: effective_balance
)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_activation_exit_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_activation_exit_epoch
func compute_activation_exit_epoch*(epoch: Epoch): Epoch =
## Return the epoch during which validator activations and exits initiated in
## ``epoch`` take effect.
epoch + 1 + MAX_SEED_LOOKAHEAD
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_validator_churn_limit
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_validator_churn_limit
func get_validator_churn_limit*(
cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache):
uint64 =
@ -91,7 +91,7 @@ func get_validator_activation_churn_limit*(
cfg.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT,
get_validator_churn_limit(cfg, state, cache))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#initiate_validator_exit
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#initiate_validator_exit
func initiate_validator_exit*(
cfg: RuntimeConfig, state: var ForkyBeaconState,
index: ValidatorIndex, cache: var StateCache): Result[void, cstring] =
@ -142,9 +142,9 @@ func initiate_validator_exit*(
from ./datatypes/deneb import BeaconState
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_slashing_penalty*(state: ForkyBeaconState,
validator_effective_balance: Gwei): Gwei =
# TODO Consider whether this is better than splitting the functions apart; in
@ -159,15 +159,15 @@ func get_slashing_penalty*(state: ForkyBeaconState,
else:
{.fatal: "invalid BeaconState type".}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_whistleblower_reward*(validator_effective_balance: Gwei): Gwei =
validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): Gwei =
when state is phase0.BeaconState:
whistleblower_reward div PROPOSER_REWARD_QUOTIENT
@ -177,9 +177,9 @@ func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): G
else:
{.fatal: "invalid BeaconState type".}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#modified-slash_validator
proc slash_validator*(
cfg: RuntimeConfig, state: var ForkyBeaconState,
slashed_index: ValidatorIndex, cache: var StateCache):
@ -232,7 +232,7 @@ func genesis_time_from_eth1_timestamp(
cfg: RuntimeConfig, eth1_timestamp: uint64): uint64 =
eth1_timestamp + cfg.GENESIS_DELAY
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#genesis-block
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#genesis-block
func get_initial_beacon_block*(state: phase0.HashedBeaconState):
phase0.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted
@ -256,7 +256,7 @@ func get_initial_beacon_block*(state: altair.HashedBeaconState):
altair.TrustedSignedBeaconBlock(
message: message, root: hash_tree_root(message))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#testing
func get_initial_beacon_block*(state: bellatrix.HashedBeaconState):
bellatrix.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted
@ -268,7 +268,7 @@ func get_initial_beacon_block*(state: bellatrix.HashedBeaconState):
bellatrix.TrustedSignedBeaconBlock(
message: message, root: hash_tree_root(message))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#testing
func get_initial_beacon_block*(state: capella.HashedBeaconState):
capella.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted
@ -297,7 +297,7 @@ func get_initial_beacon_block*(state: ForkedHashedBeaconState):
withState(state):
ForkedTrustedSignedBeaconBlock.init(get_initial_beacon_block(forkyState))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_block_root_at_slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_block_root_at_slot
func get_block_root_at_slot*(state: ForkyBeaconState, slot: Slot): Eth2Digest =
## Return the block root at a recent ``slot``.
@ -315,7 +315,7 @@ func get_block_root_at_slot*(
withState(state):
get_block_root_at_slot(forkyState.data, slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_block_root
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_block_root
func get_block_root*(state: ForkyBeaconState, epoch: Epoch): Eth2Digest =
## Return the block root at the start of a recent ``epoch``.
get_block_root_at_slot(state, epoch.start_slot())
@ -325,7 +325,7 @@ func get_block_root(state: ForkedHashedBeaconState, epoch: Epoch): Eth2Digest =
withState(state):
get_block_root(forkyState.data, epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_total_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_total_balance
template get_total_balance(
state: ForkyBeaconState, validator_indices: untyped): Gwei =
## Return the combined effective balance of the ``indices``.
@ -342,7 +342,7 @@ func is_eligible_for_activation_queue*(validator: Validator): bool =
validator.activation_eligibility_epoch == FAR_FUTURE_EPOCH and
validator.effective_balance == MAX_EFFECTIVE_BALANCE
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#is_eligible_for_activation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#is_eligible_for_activation
func is_eligible_for_activation*(
state: ForkyBeaconState, validator: Validator): bool =
## Check if ``validator`` is eligible for activation.
@ -390,7 +390,7 @@ proc is_valid_indexed_attestation*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_attesting_indices
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_attesting_indices
iterator get_attesting_indices_iter*(state: ForkyBeaconState,
data: AttestationData,
bits: CommitteeValidatorsBits,
@ -411,7 +411,7 @@ iterator get_attesting_indices_iter*(state: ForkyBeaconState,
if bits[index_in_committee]:
yield validator_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_attesting_indices
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_attesting_indices
func get_attesting_indices*(state: ForkyBeaconState,
data: AttestationData,
bits: CommitteeValidatorsBits,
@ -464,7 +464,7 @@ proc is_valid_indexed_attestation(
# Attestation validation
# ------------------------------------------------------------------------------------------
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#attestations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#attestations
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
func check_attestation_slot_target*(data: AttestationData): Result[Slot, cstring] =
@ -481,8 +481,8 @@ func check_attestation_target_epoch(
ok(data.target.epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#attestations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#modified-process_attestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#attestations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#modified-process_attestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.0/specs/deneb/beacon-chain.md#modified-process_attestation
func check_attestation_inclusion(
consensusFork: static ConsensusFork, attestation_slot: Slot,
@ -512,7 +512,7 @@ func check_attestation_index(
Result[CommitteeIndex, cstring] =
check_attestation_index(data.index, committees_per_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices
func get_attestation_participation_flag_indices(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState,
data: AttestationData, inclusion_delay: uint64): set[TimelyFlag] =
@ -585,7 +585,7 @@ func get_attestation_participation_flag_indices(
# TODO these duplicate some stuff in state_transition_epoch which uses TotalBalances
# better to centralize around that if feasible
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_total_active_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_total_active_balance
func get_total_active_balance*(state: ForkyBeaconState, cache: var StateCache): Gwei =
## Return the combined effective balance of the active validators.
## Note: ``get_total_balance`` returns ``EFFECTIVE_BALANCE_INCREMENT`` Gwei
@ -601,7 +601,7 @@ func get_total_active_balance*(state: ForkyBeaconState, cache: var StateCache):
cache.total_active_balance[epoch] = tab
return tab
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#get_base_reward_per_increment
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#get_base_reward_per_increment
func get_base_reward_per_increment_sqrt(
total_active_balance_sqrt: uint64): Gwei =
EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR div total_active_balance_sqrt
@ -610,7 +610,7 @@ func get_base_reward_per_increment*(
total_active_balance: Gwei): Gwei =
get_base_reward_per_increment_sqrt(integer_squareroot(total_active_balance))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#get_base_reward
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#get_base_reward
func get_base_reward(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState,
@ -621,7 +621,7 @@ func get_base_reward(
state.validators[index].effective_balance div EFFECTIVE_BALANCE_INCREMENT
increments * base_reward_per_increment
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#attestations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#attestations
proc check_attestation*(
state: ForkyBeaconState, attestation: SomeAttestation, flags: UpdateFlags,
cache: var StateCache): Result[void, cstring] =
@ -656,7 +656,7 @@ proc check_attestation*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
proc check_bls_to_execution_change*(
genesisFork: Fork, state: capella.BeaconState | deneb.BeaconState,
signed_address_change: SignedBLSToExecutionChange, flags: UpdateFlags):
@ -762,7 +762,7 @@ proc process_attestation*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#get_next_sync_committee_indices
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#get_next_sync_committee_indices
func get_next_sync_committee_keys(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState):
@ -800,19 +800,19 @@ func get_next_sync_committee_keys(
i += 1'u64
res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential
func has_eth1_withdrawal_credential*(validator: Validator): bool =
## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential.
validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#is_fully_withdrawable_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#is_fully_withdrawable_validator
func is_fully_withdrawable_validator(
validator: Validator, balance: Gwei, epoch: Epoch): bool =
## Check if ``validator`` is fully withdrawable.
has_eth1_withdrawal_credential(validator) and
validator.withdrawable_epoch <= epoch and balance > 0
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#is_partially_withdrawable_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#is_partially_withdrawable_validator
func is_partially_withdrawable_validator(
validator: Validator, balance: Gwei): bool =
## Check if ``validator`` is partially withdrawable.
@ -823,7 +823,7 @@ func is_partially_withdrawable_validator(
has_eth1_withdrawal_credential(validator) and
has_max_effective_balance and has_excess_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#new-get_expected_withdrawals
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#new-get_expected_withdrawals
func get_expected_withdrawals*(
state: capella.BeaconState | deneb.BeaconState): seq[Withdrawal] =
let
@ -859,7 +859,7 @@ func get_expected_withdrawals*(
validator_index = (validator_index + 1) mod num_validators
withdrawals
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#get_next_sync_committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#get_next_sync_committee
func get_next_sync_committee*(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState):
@ -980,7 +980,7 @@ proc initialize_hashed_beacon_state_from_eth1*(
cfg, eth1_block_hash, eth1_timestamp, deposits, flags))
result.root = hash_tree_root(result.data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#testing
proc initialize_beacon_state_from_eth1*(
@ -1102,20 +1102,6 @@ proc initialize_beacon_state_from_eth1*(
# TODO https://github.com/nim-lang/Nim/issues/19094
# state
proc initialize_hashed_beacon_state_from_eth1(
cfg: RuntimeConfig,
eth1_block_hash: Eth2Digest,
eth1_timestamp: uint64,
deposits: openArray[DepositData],
execution_payload_header: ForkyExecutionPayloadHeader,
flags: UpdateFlags = {}): auto =
# TODO https://github.com/nim-lang/Nim/issues/19094
result = initHashedBeaconState(
initialize_beacon_state_from_eth1(
cfg, eth1_block_hash, eth1_timestamp, deposits,
execution_payload_header, flags))
result.root = hash_tree_root(result.data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/fork.md#upgrading-the-state
func translate_participation(
state: var altair.BeaconState,
@ -1342,7 +1328,7 @@ func upgrade_to_capella*(cfg: RuntimeConfig, pre: bellatrix.BeaconState):
# historical_summaries initialized to correct default automatically
)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/specs/deneb/fork.md#upgrading-the-state
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/fork.md#upgrading-the-state
func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState):
ref deneb.BeaconState =
let

View File

@ -98,7 +98,7 @@ export
# API
# ----------------------------------------------------------------------
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#bls-signatures
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#bls-signatures
func toPubKey*(privkey: ValidatorPrivKey): CookedPubKey =
## Derive a public key from a private key
@ -206,7 +206,7 @@ func finish*(agg: AggregateSignature): CookedSig {.inline.} =
sig.finish(agg)
CookedSig(sig)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#bls-signatures
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#bls-signatures
func blsVerify*(
pubkey: CookedPubKey, message: openArray[byte],
signature: CookedSig): bool =
@ -219,7 +219,7 @@ func blsVerify*(
## to enforce correct usage.
PublicKey(pubkey).verify(message, blscurve.Signature(signature))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#bls-signatures
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#bls-signatures
proc blsVerify*(
pubkey: ValidatorPubKey, message: openArray[byte],
signature: CookedSig): bool =

View File

@ -40,7 +40,7 @@ static:
doAssert ord(TIMELY_HEAD_FLAG_INDEX) == 2
const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#incentivization-weights
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#incentivization-weights
TIMELY_SOURCE_WEIGHT* = 14
TIMELY_TARGET_WEIGHT* = 26
TIMELY_HEAD_WEIGHT* = 14
@ -55,17 +55,17 @@ const
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16
SYNC_COMMITTEE_SUBNET_COUNT* = 4
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#constants
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#constants
# All of these indices are rooted in `BeaconState`.
# The first member (`genesis_time`) is 32, subsequent members +1 each.
# If there are ever more than 32 members in `BeaconState`, indices change!
# `FINALIZED_ROOT_INDEX` is one layer deeper, i.e., `52 * 2 + 1`.
# `FINALIZED_ROOT_GINDEX` is one layer deeper, i.e., `52 * 2 + 1`.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/ssz/merkle-proofs.md
FINALIZED_ROOT_INDEX* = 105.GeneralizedIndex # `finalized_checkpoint` > `root`
CURRENT_SYNC_COMMITTEE_INDEX* = 54.GeneralizedIndex # `current_sync_committee`
NEXT_SYNC_COMMITTEE_INDEX* = 55.GeneralizedIndex # `next_sync_committee`
FINALIZED_ROOT_GINDEX* = 105.GeneralizedIndex # finalized_checkpoint > root
CURRENT_SYNC_COMMITTEE_GINDEX* = 54.GeneralizedIndex # current_sync_committee
NEXT_SYNC_COMMITTEE_GINDEX* = 55.GeneralizedIndex # next_sync_committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#inactivity-penalties
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#inactivity-penalties
INACTIVITY_SCORE_BIAS* = 4
INACTIVITY_SCORE_RECOVERY_RATE* = 16
@ -79,7 +79,7 @@ static: doAssert TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT +
type
### New types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#custom-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#custom-types
ParticipationFlags* = uint8
EpochParticipationFlags* =
@ -88,7 +88,7 @@ type
## effectively making the cost of clearing the cache higher than the typical
## gains
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#syncaggregate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#syncaggregate
SyncAggregate* = object
sync_committee_bits*: BitArray[SYNC_COMMITTEE_SIZE]
sync_committee_signature*: ValidatorSig
@ -97,12 +97,12 @@ type
sync_committee_bits*: BitArray[SYNC_COMMITTEE_SIZE]
sync_committee_signature*: TrustedSig
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#synccommittee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#synccommittee
SyncCommittee* = object
pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey]
aggregate_pubkey*: ValidatorPubKey
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/validator.md#synccommitteemessage
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/validator.md#synccommitteemessage
SyncCommitteeMessage* = object
slot*: Slot
## Slot to which this contribution pertains
@ -116,7 +116,7 @@ type
signature*: ValidatorSig
## Signature by the validator over the block root of `slot`
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/validator.md#synccommitteecontribution
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/validator.md#synccommitteecontribution
SyncCommitteeAggregationBits* =
BitArray[SYNC_SUBCOMMITTEE_SIZE]
@ -157,20 +157,20 @@ type
### Modified/overloaded
FinalityBranch* =
array[log2trunc(FINALIZED_ROOT_INDEX), Eth2Digest]
array[log2trunc(FINALIZED_ROOT_GINDEX), Eth2Digest]
CurrentSyncCommitteeBranch* =
array[log2trunc(CURRENT_SYNC_COMMITTEE_INDEX), Eth2Digest]
array[log2trunc(CURRENT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
NextSyncCommitteeBranch* =
array[log2trunc(NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
array[log2trunc(NEXT_SYNC_COMMITTEE_GINDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientheader
LightClientHeader* = object
beacon*: BeaconBlockHeader
## Beacon block header
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
LightClientBootstrap* = object
header*: LightClientHeader
## Header matching the requested beacon block root
@ -179,7 +179,7 @@ type
## Current sync committee corresponding to `header.beacon.state_root`
current_sync_committee_branch*: CurrentSyncCommitteeBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientupdate
LightClientUpdate* = object
attested_header*: LightClientHeader
## Header attested to by the sync committee
@ -198,7 +198,7 @@ type
signature_slot*: Slot
## Slot at which the aggregate signature was created (untrusted)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
LightClientFinalityUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@ -212,7 +212,7 @@ type
# Slot at which the aggregate signature was created (untrusted)
signature_slot*: Slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
LightClientOptimisticUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@ -238,7 +238,7 @@ type
LightClientBootstrap |
SomeLightClientUpdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientstore
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientstore
LightClientStore* = object
finalized_header*: LightClientHeader
## Header that is finalized
@ -261,7 +261,7 @@ type
InactivityScores* = HashList[uint64, Limit VALIDATOR_REGISTRY_LIMIT]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#beaconstate
BeaconState* = object
# Versioning
genesis_time*: uint64
@ -347,7 +347,7 @@ type
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to
@ -404,7 +404,7 @@ type
state_root*: Eth2Digest
body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#beaconblockbody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object
randao_reveal*: ValidatorSig
eth1_data*: Eth1Data
@ -456,7 +456,7 @@ type
SyncnetBits* = BitArray[SYNC_COMMITTEE_SUBNET_COUNT]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/p2p-interface.md#metadata
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/p2p-interface.md#metadata
MetaData* = object
seq_number*: uint64
attnets*: AttnetBits
@ -481,7 +481,7 @@ type
# [New in Altair]
sync_aggregate*: TrustedSyncAggregate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#signedbeaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object
message*: BeaconBlock
signature*: ValidatorSig
@ -659,7 +659,7 @@ chronicles.formatIt SyncCommitteeContribution: shortLog(it)
chronicles.formatIt ContributionAndProof: shortLog(it)
chronicles.formatIt SignedContributionAndProof: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#is_valid_light_client_header
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#is_valid_light_client_header
func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool =
true

View File

@ -65,16 +65,15 @@ import
stew/[assign2, byteutils, results],
chronicles,
json_serialization,
chronos/timer,
ssz_serialization/types as sszTypes,
../../version,
".."/[beacon_time, crypto, digest, presets]
export
tables, results, json_serialization, timer, sszTypes, beacon_time, crypto,
tables, results, json_serialization, sszTypes, beacon_time, crypto,
digest, presets
const SPEC_VERSION* = "1.4.0-beta.3"
const SPEC_VERSION* = "1.4.0-beta.4"
## Spec version we're aiming to be compatible with, right now
const
@ -82,9 +81,6 @@ const
ZERO_HASH* = Eth2Digest()
MAX_GRAFFITI_SIZE* = 32
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#configuration
MAXIMUM_GOSSIP_CLOCK_DISPARITY* = 500.millis
SLOTS_PER_ETH1_VOTING_PERIOD* =
EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH
@ -146,7 +142,7 @@ template ethAmountUnit*(typ: type) {.dirty.} =
ethAmountUnit Ether
type
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#custom-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#custom-types
Eth2Domain* = array[32, byte]
ValidatorIndex* = distinct uint32
@ -187,11 +183,18 @@ type
## The `SubnetId` type is constrained to values in the range
## `[0, ATTESTATION_SUBNET_COUNT)` during initialization.
BlobId* = distinct uint8
## The blob id maps which gossip subscription to use to publish a
## blob sidecar - it is distinct from the CommitteeIndex in particular
##
## The `BlobId` type is constrained to values in the range
## `[0, BLOB_SIDECAR_SUBNET_COUNT)` during initialization.
# BitVector[4] in the spec, ie 4 bits which end up encoded as a byte for
# SSZ / hashing purposes
JustificationBits* = distinct uint8
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#proposerslashing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#proposerslashing
ProposerSlashing* = object
signed_header_1*: SignedBeaconBlockHeader
signed_header_2*: SignedBeaconBlockHeader
@ -203,7 +206,7 @@ type
signed_header_1*: TrustedSignedBeaconBlockHeader
signed_header_2*: TrustedSignedBeaconBlockHeader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#attesterslashing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#attesterslashing
AttesterSlashing* = object
attestation_1*: IndexedAttestation
attestation_2*: IndexedAttestation
@ -215,7 +218,7 @@ type
attestation_1*: TrustedIndexedAttestation
attestation_2*: TrustedIndexedAttestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#indexedattestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#indexedattestation
IndexedAttestation* = object
attesting_indices*: List[uint64, Limit MAX_VALIDATORS_PER_COMMITTEE]
data*: AttestationData
@ -231,7 +234,7 @@ type
CommitteeValidatorsBits* = BitList[Limit MAX_VALIDATORS_PER_COMMITTEE]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#attestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#attestation
Attestation* = object
aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData
@ -247,17 +250,17 @@ type
ForkDigest* = distinct array[4, byte]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#forkdata
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#forkdata
ForkData* = object
current_version*: Version
genesis_validators_root*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#checkpoint
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#checkpoint
Checkpoint* = object
epoch*: Epoch
root*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#AttestationData
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#AttestationData
AttestationData* = object
slot*: Slot
@ -270,20 +273,20 @@ type
source*: Checkpoint
target*: Checkpoint
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#deposit
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#deposit
Deposit* = object
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH + 1, Eth2Digest]
## Merkle path to deposit root
data*: DepositData
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#depositmessage
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#depositmessage
DepositMessage* = object
pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest
amount*: Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#depositdata
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#depositdata
DepositData* = object
pubkey*: ValidatorPubKey
withdrawal_credentials*: Eth2Digest
@ -293,7 +296,7 @@ type
signature*: ValidatorSig
## Signing over DepositMessage
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#voluntaryexit
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#voluntaryexit
VoluntaryExit* = object
epoch*: Epoch
## Earliest epoch when voluntary exit can be processed
@ -321,7 +324,7 @@ type
pubkey*: CookedPubKey
withdrawal_credentials*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#validator
Validator* = object
pubkey*: ValidatorPubKey
@ -343,7 +346,7 @@ type
withdrawable_epoch*: Epoch
## When validator can withdraw funds
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#pendingattestation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#pendingattestation
PendingAttestation* = object
aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData
@ -352,12 +355,12 @@ type
proposer_index*: uint64 # `ValidatorIndex` after validation
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#historicalbatch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#historicalbatch
HistoricalBatch* = object
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#fork
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#fork
Fork* = object
previous_version*: Version
current_version*: Version
@ -365,13 +368,13 @@ type
epoch*: Epoch
## Epoch of latest fork
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#eth1data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#eth1data
Eth1Data* = object
deposit_root*: Eth2Digest
deposit_count*: uint64
block_hash*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#signedvoluntaryexit
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#signedvoluntaryexit
SignedVoluntaryExit* = object
message*: VoluntaryExit
signature*: ValidatorSig
@ -380,7 +383,7 @@ type
message*: VoluntaryExit
signature*: TrustedSig
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconblockheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconblockheader
BeaconBlockHeader* = object
slot*: Slot
proposer_index*: uint64 # `ValidatorIndex` after validation
@ -388,14 +391,14 @@ type
state_root*: Eth2Digest
body_root*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#signingdata
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#signingdata
SigningData* = object
object_root*: Eth2Digest
domain*: Eth2Domain
GraffitiBytes* = distinct array[MAX_GRAFFITI_SIZE, byte]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#signedbeaconblockheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#signedbeaconblockheader
SignedBeaconBlockHeader* = object
message*: BeaconBlockHeader
signature*: ValidatorSig
@ -428,12 +431,12 @@ type
sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache]
# This matches the mutable state of the Solidity deposit contract
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/solidity_deposit_contract/deposit_contract.sol
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/solidity_deposit_contract/deposit_contract.sol
DepositContractState* = object
branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest]
deposit_count*: array[32, byte] # Uint256
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#validator
ValidatorStatus* = object
# This is a validator without the expensive, immutable, append-only parts
# serialized. They're represented in memory to allow in-place SSZ reading
@ -459,7 +462,7 @@ type
withdrawable_epoch*: Epoch
## When validator can withdraw funds
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#validator
ValidatorStatusCapella* = object
# This is a validator without the expensive, immutable, append-only parts
# serialized. They're represented in memory to allow in-place SSZ reading
@ -603,6 +606,7 @@ template makeLimitedU64*(T: untyped, limit: uint64) =
makeLimitedU64(CommitteeIndex, MAX_COMMITTEES_PER_SLOT)
makeLimitedU64(SubnetId, ATTESTATION_SUBNET_COUNT)
makeLimitedU64(BlobId, BLOB_SIDECAR_SUBNET_COUNT)
const
validatorIndexLimit = min(uint64(int32.high), VALIDATOR_REGISTRY_LIMIT)

View File

@ -34,7 +34,7 @@ const
NEWPAYLOAD_TIMEOUT* = 8.seconds
type
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#custom-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#custom-types
Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION]
ExecutionAddress* = object
@ -45,7 +45,7 @@ type
PayloadID* = array[8, byte]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#executionpayload
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#executionpayload
ExecutionPayload* = object
# Execution block header fields
parent_hash*: Eth2Digest
@ -73,7 +73,7 @@ type
executionPayload*: ExecutionPayload
blockValue*: Wei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#executionpayloadheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#executionpayloadheader
ExecutionPayloadHeader* = object
# Execution block header fields
parent_hash*: Eth2Digest
@ -103,7 +103,7 @@ type
parent_hash*: Eth2Digest
total_difficulty*: Eth2Digest # uint256
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#beaconstate
BeaconState* = object
# Versioning
genesis_time*: uint64
@ -171,7 +171,7 @@ type
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to
@ -228,7 +228,7 @@ type
state_root*: Eth2Digest
body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#beaconblockbody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object
randao_reveal*: ValidatorSig
eth1_data*: Eth1Data
@ -306,7 +306,7 @@ type
# Execution
execution_payload*: ExecutionPayload # [New in Bellatrix]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#signedbeaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object
message*: BeaconBlock
signature*: ValidatorSig

View File

@ -27,43 +27,43 @@ import
export json_serialization, base
const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/sync-protocol.md#constants
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/sync-protocol.md#constants
# This index is rooted in `BeaconBlockBody`.
# The first member (`randao_reveal`) is 16, subsequent members +1 each.
# If there are ever more than 16 members in `BeaconBlockBody`, indices change!
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/ssz/merkle-proofs.md
EXECUTION_PAYLOAD_INDEX* = 25.GeneralizedIndex # `execution_payload`
EXECUTION_PAYLOAD_GINDEX* = 25.GeneralizedIndex # execution_payload
type
SignedBLSToExecutionChangeList* =
List[SignedBLSToExecutionChange, Limit MAX_BLS_TO_EXECUTION_CHANGES]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#withdrawal
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#withdrawal
Withdrawal* = object
index*: WithdrawalIndex
validator_index*: uint64
address*: ExecutionAddress
amount*: Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#blstoexecutionchange
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#blstoexecutionchange
BLSToExecutionChange* = object
validator_index*: uint64
from_bls_pubkey*: ValidatorPubKey
to_execution_address*: ExecutionAddress
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#signedblstoexecutionchange
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#signedblstoexecutionchange
SignedBLSToExecutionChange* = object
message*: BLSToExecutionChange
signature*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#historicalsummary
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#historicalsummary
HistoricalSummary* = object
# `HistoricalSummary` matches the components of the phase0
# `HistoricalBatch` making the two hash_tree_root-compatible.
block_summary_root*: Eth2Digest
state_summary_root*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#executionpayload
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#executionpayload
ExecutionPayload* = object
# Execution block header fields
parent_hash*: Eth2Digest
@ -93,7 +93,7 @@ type
executionPayload*: ExecutionPayload
blockValue*: Wei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#executionpayloadheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#executionpayloadheader
ExecutionPayloadHeader* = object
# Execution block header fields
parent_hash*: Eth2Digest
@ -120,9 +120,9 @@ type
execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].}
ExecutionBranch* =
array[log2trunc(EXECUTION_PAYLOAD_INDEX), Eth2Digest]
array[log2trunc(EXECUTION_PAYLOAD_GINDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
LightClientHeader* = object
beacon*: BeaconBlockHeader
## Beacon block header
@ -131,7 +131,7 @@ type
## Execution payload header corresponding to `beacon.body_root` (from Capella onward)
execution_branch*: ExecutionBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
LightClientBootstrap* = object
header*: LightClientHeader
## Header matching the requested beacon block root
@ -140,7 +140,7 @@ type
## Current sync committee corresponding to `header.beacon.state_root`
current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientupdate
LightClientUpdate* = object
attested_header*: LightClientHeader
## Header attested to by the sync committee
@ -159,7 +159,7 @@ type
signature_slot*: Slot
## Slot at which the aggregate signature was created (untrusted)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
LightClientFinalityUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@ -173,7 +173,7 @@ type
# Slot at which the aggregate signature was created (untrusted)
signature_slot*: Slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
LightClientOptimisticUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@ -199,7 +199,7 @@ type
LightClientBootstrap |
SomeLightClientUpdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientstore
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientstore
LightClientStore* = object
finalized_header*: LightClientHeader
## Header that is finalized
@ -220,7 +220,7 @@ type
## (used to compute safety threshold)
current_max_active_participants*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#beaconstate
BeaconState* = object
# Versioning
genesis_time*: uint64
@ -299,7 +299,7 @@ type
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to
@ -356,7 +356,7 @@ type
state_root*: Eth2Digest
body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#beaconblockbody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object
randao_reveal*: ValidatorSig
eth1_data*: Eth1Data
@ -650,7 +650,7 @@ func shortLog*(v: SignedBLSToExecutionChange): auto =
signature: shortLog(v.signature)
)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/sync-protocol.md#get_lc_execution_root
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/sync-protocol.md#get_lc_execution_root
func get_lc_execution_root*(
header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest =
let epoch = header.beacon.slot.epoch
@ -660,7 +660,7 @@ func get_lc_execution_root*(
ZERO_HASH
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/sync-protocol.md#modified-is_valid_light_client_header
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/sync-protocol.md#modified-is_valid_light_client_header
func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool =
let epoch = header.beacon.slot.epoch
@ -673,17 +673,17 @@ func is_valid_light_client_header*(
is_valid_merkle_branch(
get_lc_execution_root(header, cfg),
header.execution_branch,
log2trunc(EXECUTION_PAYLOAD_INDEX),
get_subtree_index(EXECUTION_PAYLOAD_INDEX),
log2trunc(EXECUTION_PAYLOAD_GINDEX),
get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
header.beacon.body_root)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_header_to_capella*(
pre: altair.LightClientHeader): LightClientHeader =
LightClientHeader(
beacon: pre.beacon)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_bootstrap_to_capella*(
pre: altair.LightClientBootstrap): LightClientBootstrap =
LightClientBootstrap(
@ -691,7 +691,7 @@ func upgrade_lc_bootstrap_to_capella*(
current_sync_committee: pre.current_sync_committee,
current_sync_committee_branch: pre.current_sync_committee_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_update_to_capella*(
pre: altair.LightClientUpdate): LightClientUpdate =
LightClientUpdate(
@ -703,7 +703,7 @@ func upgrade_lc_update_to_capella*(
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_finality_update_to_capella*(
pre: altair.LightClientFinalityUpdate): LightClientFinalityUpdate =
LightClientFinalityUpdate(
@ -713,7 +713,7 @@ func upgrade_lc_finality_update_to_capella*(
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_optimistic_update_to_capella*(
pre: altair.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
LightClientOptimisticUpdate(
@ -764,7 +764,7 @@ chronicles.formatIt LightClientUpdate: shortLog(it)
chronicles.formatIt LightClientFinalityUpdate: shortLog(it)
chronicles.formatIt LightClientOptimisticUpdate: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/fork.md#upgrading-the-store
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/fork.md#upgrading-the-store
func upgrade_lc_store_to_capella*(
pre: altair.LightClientStore): LightClientStore =
let best_valid_update =

View File

@ -5,18 +5,20 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import chronos/timer
type
Slot* = distinct uint64
Epoch* = distinct uint64
SyncCommitteePeriod* = distinct uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#custom-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#custom-types
WithdrawalIndex* = uint64
DomainType* = distinct array[4, byte]
const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#constants
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#constants
NODE_ID_BITS* = 256
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#configuration
@ -36,7 +38,7 @@ const
FAR_FUTURE_EPOCH* = Epoch(not 0'u64)
FAR_FUTURE_PERIOD* = SyncCommitteePeriod(not 0'u64)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#domain-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#domain-types
DOMAIN_BEACON_PROPOSER* = DomainType([byte 0x00, 0x00, 0x00, 0x00])
DOMAIN_BEACON_ATTESTER* = DomainType([byte 0x01, 0x00, 0x00, 0x00])
DOMAIN_RANDAO* = DomainType([byte 0x02, 0x00, 0x00, 0x00])
@ -46,19 +48,19 @@ const
DOMAIN_AGGREGATE_AND_PROOF* = DomainType([byte 0x06, 0x00, 0x00, 0x00])
DOMAIN_APPLICATION_MASK* = DomainType([byte 0x00, 0x00, 0x00, 0x01])
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#domain-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#domain-types
DOMAIN_SYNC_COMMITTEE* = DomainType([byte 0x07, 0x00, 0x00, 0x00])
DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF* = DomainType([byte 0x08, 0x00, 0x00, 0x00])
DOMAIN_CONTRIBUTION_AND_PROOF* = DomainType([byte 0x09, 0x00, 0x00, 0x00])
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#domain-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#domain-types
DOMAIN_BLS_TO_EXECUTION_CHANGE* = DomainType([byte 0x0a, 0x00, 0x00, 0x00])
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/deneb/beacon-chain.md#domain-types
DOMAIN_BLOB_SIDECAR* = DomainType([byte 0x0b, 0x00, 0x00, 0x00])
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/fork-choice.md#configuration
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/fork-choice.md#configuration
PROPOSER_SCORE_BOOST*: uint64 = 40
REORG_HEAD_WEIGHT_THRESHOLD*: uint64 = 20
REORG_PARENT_WEIGHT_THRESHOLD*: uint64 = 160
REORG_MAX_EPOCHS_SINCE_FINALIZATION* = Epoch(2)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#configuration
BLOB_SIDECAR_SUBNET_COUNT*: uint64 = 6
@ -67,11 +69,11 @@ const
MAX_REQUEST_BLOCKS* = 1024'u64
RESP_TIMEOUT* = 10'u64
ATTESTATION_PROPAGATION_SLOT_RANGE*: uint64 = 32
MAXIMUM_GOSSIP_CLOCK_DISPARITY* = 500.millis
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#configuration
GOSSIP_MAX_SIZE* = 10'u64 * 1024 * 1024 # bytes
MAX_CHUNK_SIZE* = 10'u64 * 1024 * 1024 # bytes
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#configuration
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS*: uint64 = 4096
MAX_REQUEST_BLOCKS_DENEB*: uint64 = 128 # TODO Make use of in request code

View File

@ -33,44 +33,34 @@ const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/polynomial-commitments.md#constants
BYTES_PER_FIELD_ELEMENT = 32
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/beacon-chain.md#blob
BLOB_TX_TYPE* = 0x03'u8
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/polynomial-commitments.md#constants
BLS_MODULUS* = "52435875175126190479447740508185965837690552500527637822603658699938581184513".u256
type
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/beacon-chain.md#beaconblockbody
KzgCommitments* = List[KzgCommitment, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK]
Blobs* = List[Blob, Limit MAX_BLOBS_PER_BLOCK]
# TODO this apparently is suppposed to be SSZ-equivalent to Bytes32, but
# current spec doesn't ever SSZ-serialize it or hash_tree_root it
VersionedHash* = array[32, byte]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/deneb/beacon-chain.md#custom-types
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/beacon-chain.md#custom-types
BlobIndex* = uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/polynomial-commitments.md#custom-types
Blob* = array[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB, byte]
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blobsidecar
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/p2p-interface.md#blobsidecar
BlobSidecar* = object
block_root*: Eth2Digest
index*: BlobIndex
## Index of blob in block
slot*: Slot
block_parent_root*: Eth2Digest
## Proposer shuffling determinant
proposer_index*: uint64
blob*: Blob
kzg_commitment*: KzgCommitment
kzg_proof*: KzgProof
## Allows for quick verification of kzg_commitment
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#signedblobsidecar
SignedBlobSidecar* = object
message*: BlobSidecar
signature*: ValidatorSig
signed_block_header*: SignedBeaconBlockHeader
kzg_commitment_inclusion_proof*:
array[KZG_COMMITMENT_INCLUSION_PROOF_DEPTH, Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#blobidentifier
BlobIdentifier* = object
@ -103,14 +93,22 @@ type
blob_gas_used*: uint64 # [New in Deneb]
excess_blob_gas*: uint64 # [New in Deneb]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/validator.md#blobsbundle
KzgProofs* = List[KzgProof, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK]
Blobs* = List[Blob, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK]
BlobRoots* = List[Eth2Digest, Limit MAX_BLOB_COMMITMENTS_PER_BLOCK]
BlobsBundle* = object
commitments*: KzgCommitments
proofs*: KzgProofs
blobs*: Blobs
ExecutionPayloadForSigning* = object
executionPayload*: ExecutionPayload
blockValue*: Wei
kzgs*: KzgCommitments
proofs*:seq[KZGProof]
blobs*: Blobs
blobsBundle*: BlobsBundle
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/deneb/beacon-chain.md#executionpayloadheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/beacon-chain.md#executionpayloadheader
ExecutionPayloadHeader* = object
# Execution block header fields
parent_hash*: Eth2Digest
@ -134,16 +132,10 @@ type
blob_gas_used*: uint64 # [New in Deneb:EIP4844]
excess_blob_gas*: uint64 # [New in Deneb:EIP4844]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/validator.md#blobsbundle
BlobsBundle* = object
commitments*: seq[KZGCommitment]
proofs*: seq[KZGProof]
blobs*: seq[Blob]
ExecutePayload* = proc(
execution_payload: ExecutionPayload): bool {.gcsafe, raises: [].}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
LightClientHeader* = object
beacon*: BeaconBlockHeader
## Beacon block header
@ -152,7 +144,7 @@ type
## Execution payload header corresponding to `beacon.body_root` (from Capella onward)
execution_branch*: capella.ExecutionBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientbootstrap
LightClientBootstrap* = object
header*: LightClientHeader
## Header matching the requested beacon block root
@ -161,7 +153,7 @@ type
## Current sync committee corresponding to `header.beacon.state_root`
current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientupdate
LightClientUpdate* = object
attested_header*: LightClientHeader
## Header attested to by the sync committee
@ -180,7 +172,7 @@ type
signature_slot*: Slot
## Slot at which the aggregate signature was created (untrusted)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
LightClientFinalityUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@ -194,7 +186,7 @@ type
# Slot at which the aggregate signature was created (untrusted)
signature_slot*: Slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientoptimisticupdate
LightClientOptimisticUpdate* = object
# Header attested to by the sync committee
attested_header*: LightClientHeader
@ -220,7 +212,7 @@ type
LightClientBootstrap |
SomeLightClientUpdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#lightclientstore
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#lightclientstore
LightClientStore* = object
finalized_header*: LightClientHeader
## Header that is finalized
@ -241,7 +233,7 @@ type
## (used to compute safety threshold)
current_max_active_participants*: uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#beaconstate
# changes indirectly via ExecutionPayloadHeader
BeaconState* = object
# Versioning
@ -319,7 +311,7 @@ type
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to
@ -457,7 +449,7 @@ type
bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: KzgCommitments # [New in Deneb]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#signedbeaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object
message*: BeaconBlock
signature*: ValidatorSig
@ -536,23 +528,17 @@ func shortLog*(v: SomeBeaconBlock): auto =
func shortLog*(v: BlobSidecar): auto =
(
block_root: shortLog(v.block_root),
index: v.index,
slot: shortLog(v.slot),
block_parent_root: shortLog(v.block_parent_root),
proposer_index: v.proposer_index,
bloblen: v.blob.len(),
block_header: shortLog(v.signed_block_header.message),
)
func shortLog*(v: seq[BlobSidecar]): auto =
"[" & v.mapIt(shortLog(it)).join(", ") & "]"
func shortLog*(v: seq[ref BlobSidecar]): auto =
"[" & v.mapIt(shortLog(it[])).join(", ") & "]"
func shortLog*(v: SignedBlobSidecar): auto =
(
blob: shortLog(v.message),
signature: shortLog(v.signature)
)
func shortLog*(v: SomeSignedBeaconBlock): auto =
(
blck: shortLog(v.message),
@ -581,7 +567,32 @@ func shortLog*(v: ExecutionPayload): auto =
func shortLog*(x: seq[BlobIdentifier]): string =
"[" & x.mapIt(shortLog(it.block_root) & "/" & $it.index).join(", ") & "]"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root
func kzg_commitment_inclusion_proof_gindex*(
index: BlobIndex): GeneralizedIndex =
# This index is rooted in `BeaconBlockBody`.
# The first member (`randao_reveal`) is 16, subsequent members +1 each.
# If there are ever more than 16 members in `BeaconBlockBody`, indices change!
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/ssz/merkle-proofs.md
const
# blob_kzg_commitments
BLOB_KZG_COMMITMENTS_GINDEX =
27.GeneralizedIndex
# List + 0 = items, + 1 = len
BLOB_KZG_COMMITMENTS_BASE_GINDEX =
(BLOB_KZG_COMMITMENTS_GINDEX shl 1) + 0
# List depth
BLOB_KZG_COMMITMENTS_PROOF_DEPTH =
log2trunc(nextPow2(deneb.KzgCommitments.maxLen.uint64))
# First item
BLOB_KZG_COMMITMENTS_FIRST_GINDEX =
(BLOB_KZG_COMMITMENTS_BASE_GINDEX shl BLOB_KZG_COMMITMENTS_PROOF_DEPTH)
static: doAssert(
log2trunc(BLOB_KZG_COMMITMENTS_FIRST_GINDEX) ==
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH)
BLOB_KZG_COMMITMENTS_FIRST_GINDEX + index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root
func get_lc_execution_root*(
header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest =
let epoch = header.beacon.slot.epoch
@ -610,7 +621,7 @@ func get_lc_execution_root*(
ZERO_HASH
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header
func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool =
let epoch = header.beacon.slot.epoch
@ -628,11 +639,11 @@ func is_valid_light_client_header*(
is_valid_merkle_branch(
get_lc_execution_root(header, cfg),
header.execution_branch,
log2trunc(EXECUTION_PAYLOAD_INDEX),
get_subtree_index(EXECUTION_PAYLOAD_INDEX),
log2trunc(EXECUTION_PAYLOAD_GINDEX),
get_subtree_index(EXECUTION_PAYLOAD_GINDEX),
header.beacon.body_root)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_header_to_deneb*(
pre: capella.LightClientHeader): LightClientHeader =
LightClientHeader(
@ -657,7 +668,7 @@ func upgrade_lc_header_to_deneb*(
excess_blob_gas: 0), # [New in Deneb:EIP4844]
execution_branch: pre.execution_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_bootstrap_to_deneb*(
pre: capella.LightClientBootstrap): LightClientBootstrap =
LightClientBootstrap(
@ -665,7 +676,7 @@ func upgrade_lc_bootstrap_to_deneb*(
current_sync_committee: pre.current_sync_committee,
current_sync_committee_branch: pre.current_sync_committee_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_update_to_deneb*(
pre: capella.LightClientUpdate): LightClientUpdate =
LightClientUpdate(
@ -677,7 +688,7 @@ func upgrade_lc_update_to_deneb*(
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_finality_update_to_deneb*(
pre: capella.LightClientFinalityUpdate): LightClientFinalityUpdate =
LightClientFinalityUpdate(
@ -687,7 +698,7 @@ func upgrade_lc_finality_update_to_deneb*(
sync_aggregate: pre.sync_aggregate,
signature_slot: pre.signature_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/light-client/fork.md#upgrading-light-client-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_optimistic_update_to_deneb*(
pre: capella.LightClientOptimisticUpdate): LightClientOptimisticUpdate =
LightClientOptimisticUpdate(
@ -738,7 +749,7 @@ chronicles.formatIt LightClientUpdate: shortLog(it)
chronicles.formatIt LightClientFinalityUpdate: shortLog(it)
chronicles.formatIt LightClientOptimisticUpdate: shortLog(it)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/light-client/fork.md#upgrading-the-store
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/light-client/fork.md#upgrading-the-store
func upgrade_lc_store_to_deneb*(
pre: capella.LightClientStore): LightClientStore =
let best_valid_update =

View File

@ -22,7 +22,7 @@ import
export base
type
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconstate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconstate
BeaconState* = object
# Versioning
genesis_time*: uint64
@ -111,7 +111,7 @@ type
data*: BeaconState
root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to
@ -167,7 +167,7 @@ type
state_root*: Eth2Digest
body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beaconblockbody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object
randao_reveal*: ValidatorSig
eth1_data*: Eth1Data
@ -219,7 +219,7 @@ type
deposits*: List[Deposit, Limit MAX_DEPOSITS]
voluntary_exits*: List[TrustedSignedVoluntaryExit, Limit MAX_VOLUNTARY_EXITS]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#signedbeaconblock
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object
message*: BeaconBlock
signature*: ValidatorSig

View File

@ -7,7 +7,7 @@
# Consensus hash function / digest
#
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#hash
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#hash
#
# In Phase 0 the beacon chain is deployed with SHA256 (SHA2-256).
# Note that is is different from Keccak256 (often mistakenly called SHA3-256)

View File

@ -122,7 +122,9 @@ type
seq[RestSyncCommitteeSubscription] |
seq[SignedAggregateAndProof] |
seq[SignedValidatorRegistrationV1] |
seq[ValidatorIndex]
seq[ValidatorIndex] |
seq[RestBeaconCommitteeSelection] |
seq[RestSyncCommitteeSelection]
DecodeTypes* =
DataEnclosedObject |
@ -130,6 +132,7 @@ type
DataRootEnclosedObject |
DataOptimisticObject |
DataVersionEnclosedObject |
DataOptimisticAndFinalizedObject |
GetBlockV2Response |
GetDistributedKeystoresResponse |
GetKeystoresResponse |
@ -642,6 +645,16 @@ proc readValue*(reader: var JsonReader[RestJson], value: var uint8) {.
else:
reader.raiseUnexpectedValue($res.error() & ": " & svalue)
## RestNumeric
proc writeValue*(w: var JsonWriter[RestJson],
value: RestNumeric) {.raises: [IOError].} =
writeValue(w, int(value))
proc readValue*(reader: var JsonReader[RestJson],
value: var RestNumeric) {.
raises: [IOError, SerializationError].} =
value = RestNumeric(reader.readValue(int))
## JustificationBits
proc writeValue*(
w: var JsonWriter[RestJson], value: JustificationBits
@ -1744,12 +1757,12 @@ proc readValue*(reader: var JsonReader[RestJson],
var message: Opt[RestPublishedBeaconBlock]
var signed_message: Opt[RestPublishedSignedBeaconBlock]
var signed_block_data: Opt[JsonString]
var signed_blob_sidecars: Opt[List[SignedBlobSidecar,
Limit MAX_BLOBS_PER_BLOCK]]
var kzg_proofs: Opt[deneb.KzgProofs]
var blobs: Opt[deneb.Blobs]
# Pre-Deneb, there were always the same two top-level fields
# ('signature' and 'message'). For Deneb, there's a different set of
# a top-level fields: 'signed_block' 'signed_blob_sidecars'. The
# a top-level fields: 'signed_block' 'kzg_proofs', `blobs`. The
# former is the same as the pre-Deneb object.
for fieldName in readObjectFields(reader):
case fieldName
@ -1793,18 +1806,26 @@ proc readValue*(reader: var JsonReader[RestJson],
of ConsensusFork.Deneb:
ForkedBeaconBlock.init(blck.denebData.message)
))
of "signed_blob_sidecars":
if signed_blob_sidecars.isSome():
of "kzg_proofs":
if kzg_proofs.isSome():
reader.raiseUnexpectedField(
"Multiple `signed_blob_sidecars` fields found",
"Multiple `kzg_proofs` fields found",
"RestPublishedSignedBlockContents")
if signature.isSome():
reader.raiseUnexpectedField(
"Found `signed_blob_sidecars` field alongside signature field",
"Found `kzg_proofs` field alongside signature field",
"RestPublishedSignedBlockContents")
signed_blob_sidecars = Opt.some(reader.readValue(
List[SignedBlobSidecar, Limit MAX_BLOBS_PER_BLOCK]))
kzg_proofs = Opt.some(reader.readValue(deneb.KzgProofs))
of "blobs":
if blobs.isSome():
reader.raiseUnexpectedField(
"Multiple `blobs` fields found",
"RestPublishedSignedBlockContents")
if signature.isSome():
reader.raiseUnexpectedField(
"Found `blobs` field alongside signature field",
"RestPublishedSignedBlockContents")
blobs = Opt.some(reader.readValue(deneb.Blobs))
else:
unrecognizedFieldWarning()
@ -1816,6 +1837,18 @@ proc readValue*(reader: var JsonReader[RestJson],
reader.raiseUnexpectedValue("Field `message` is missing")
let blck = ForkedBeaconBlock(message.get())
if blck.kind >= ConsensusFork.Deneb:
if kzg_proofs.isNone():
reader.raiseUnexpectedValue("Field `kzg_proofs` is missing")
if blobs.isNone():
reader.raiseUnexpectedValue("Field `blobs` is missing")
else:
if kzg_proofs.isSome():
reader.raiseUnexpectedValue("Field `kzg_proofs` found but unsupported")
if blobs.isSome():
reader.raiseUnexpectedValue("Field `blobs` found but unsupported")
case blck.kind
of ConsensusFork.Phase0:
value = RestPublishedSignedBlockContents(
@ -1855,7 +1888,8 @@ proc readValue*(reader: var JsonReader[RestJson],
denebData: DenebSignedBlockContents(
# Constructed to be internally consistent
signed_block: signed_message.get().distinctBase.denebData,
signed_blob_sidecars: signed_blob_sidecars.get()
kzg_proofs: kzg_proofs.get(),
blobs: blobs.get()
)
)
@ -3606,6 +3640,15 @@ proc encodeString*(value: StateIdent): RestResult[string] =
of StateIdentType.Justified:
ok("justified")
proc encodeString*(value: BroadcastValidationType): RestResult[string] =
case value
of BroadcastValidationType.Gossip:
ok("gossip")
of BroadcastValidationType.Consensus:
ok("consensus")
of BroadcastValidationType.ConsensusAndEquivocation:
ok("consensus_and_equivocation")
proc encodeString*(value: BlockIdent): RestResult[string] =
case value.kind
of BlockQueryKind.Slot:
@ -3821,6 +3864,18 @@ proc decodeString*(t: typedesc[BlockIdent],
let res = ? Base10.decode(uint64, value)
ok(BlockIdent(kind: BlockQueryKind.Slot, slot: Slot(res)))
proc decodeString*(t: typedesc[BroadcastValidationType],
value: string): Result[BroadcastValidationType, cstring] =
case value
of "gossip":
ok(BroadcastValidationType.Gossip)
of "consensus":
ok(BroadcastValidationType.Consensus)
of "consensus_and_equivocation":
ok(BroadcastValidationType.ConsensusAndEquivocation)
else:
err("Incorrect broadcast validation type value")
proc decodeString*(t: typedesc[ValidatorIdent],
value: string): Result[ValidatorIdent, cstring] =
if len(value) > 2:

View File

@ -63,6 +63,9 @@ type
ValidatorQueryKind* {.pure.} = enum
Index, Key
ValidatorIndexError* {.pure.} = enum
UnsupportedValue, TooHighValue
ValidatorIdent* = object
case kind*: ValidatorQueryKind
of ValidatorQueryKind.Index:
@ -84,6 +87,9 @@ type
StateIdentType* {.pure.} = enum
Head, Genesis, Finalized, Justified
BroadcastValidationType* {.pure.} = enum
Gossip, Consensus, ConsensusAndEquivocation
StateIdent* = object
case kind*: StateQueryKind
of StateQueryKind.Slot:
@ -113,6 +119,8 @@ type
PeerDirectKind* {.pure.} = enum
Inbound, Outbound
RestNumeric* = distinct int
RestAttesterDuty* = object
pubkey*: ValidatorPubKey
validator_index*: ValidatorIndex
@ -312,7 +320,8 @@ type
DenebSignedBlockContents* = object
signed_block*: deneb.SignedBeaconBlock
signed_blob_sidecars*: List[SignedBlobSidecar, Limit MAX_BLOBS_PER_BLOCK]
kzg_proofs*: deneb.KzgProofs
blobs*: deneb.Blobs
RestPublishedSignedBlockContents* = object
case kind*: ConsensusFork
@ -334,7 +343,8 @@ type
DenebBlockContents* = object
`block`*: deneb.BeaconBlock
blob_sidecars*: List[BlobSidecar, Limit MAX_BLOBS_PER_BLOCK]
kzg_proofs*: deneb.KzgProofs
blobs*: deneb.Blobs
ProduceBlockResponseV2* = object
case kind*: ConsensusFork
@ -388,6 +398,11 @@ type
data*: T
execution_optimistic*: Option[bool]
DataOptimisticAndFinalizedObject*[T] = object
data*: T
execution_optimistic*: Option[bool]
finalized*: Option[bool]
ForkedSignedBlockHeader* = object
message*: uint32 # message offset
signature*: ValidatorSig
@ -503,12 +518,23 @@ type
timestamp3*: uint64
delay*: uint64
RestBeaconCommitteeSelection* = object
validator_index*: RestValidatorIndex
slot*: Slot
selection_proof*: ValidatorSig
RestSyncCommitteeSelection* = object
validator_index*: RestValidatorIndex
slot*: Slot
subcommittee_index*: uint64
selection_proof*: ValidatorSig
# Types based on the OAPI yaml file - used in responses to requests
GetBeaconHeadResponse* = DataEnclosedObject[Slot]
GetAggregatedAttestationResponse* = DataEnclosedObject[Attestation]
GetAttesterDutiesResponse* = DataRootEnclosedObject[seq[RestAttesterDuty]]
GetBlockAttestationsResponse* = DataEnclosedObject[seq[Attestation]]
GetBlockHeaderResponse* = DataOptimisticObject[RestBlockHeaderInfo]
GetBlockHeaderResponse* = DataOptimisticAndFinalizedObject[RestBlockHeaderInfo]
GetBlockHeadersResponse* = DataEnclosedObject[seq[RestBlockHeaderInfo]]
GetBlockRootResponse* = DataOptimisticObject[RestRoot]
GetDebugChainHeadsResponse* = DataEnclosedObject[seq[RestChainHead]]
@ -548,6 +574,8 @@ type
SubmitBlindedBlockResponseDeneb* = DataEnclosedObject[deneb_mev.ExecutionPayloadAndBlobsBundle]
GetValidatorsActivityResponse* = DataEnclosedObject[seq[RestActivityItem]]
GetValidatorsLivenessResponse* = DataEnclosedObject[seq[RestLivenessItem]]
SubmitBeaconCommitteeSelectionsResponse* = DataEnclosedObject[seq[RestBeaconCommitteeSelection]]
SubmitSyncCommitteeSelectionsResponse* = DataEnclosedObject[seq[RestSyncCommitteeSelection]]
RestNodeValidity* {.pure.} = enum
valid = "VALID",
@ -892,3 +920,23 @@ func init*(t: typedesc[RestErrorMessage], code: HttpCode,
message: string, stacktrace: openArray[string]): RestErrorMessage =
RestErrorMessage(code: code.toInt(), message: message,
stacktraces: Opt.some(@stacktrace))
func toValidatorIndex*(value: RestValidatorIndex): Result[ValidatorIndex,
ValidatorIndexError] =
when sizeof(ValidatorIndex) == 4:
if uint64(value) < VALIDATOR_REGISTRY_LIMIT:
# On x86 platform Nim allows only `int32` indexes, so all the indexes in
# range `2^31 <= x < 2^32` are not supported.
if uint64(value) <= uint64(high(int32)):
ok(ValidatorIndex(value))
else:
err(ValidatorIndexError.UnsupportedValue)
else:
err(ValidatorIndexError.TooHighValue)
elif sizeof(ValidatorIndex) == 8:
if uint64(value) < VALIDATOR_REGISTRY_LIMIT:
ok(ValidatorIndex(value))
else:
err(ValidatorIndexError.TooHighValue)
else:
doAssert(false, "ValidatorIndex type size is incorrect")

View File

@ -167,4 +167,18 @@ proc getValidatorsLiveness*(epoch: Epoch,
): RestPlainResponse {.
rest, endpoint: "/eth/v1/validator/liveness/{epoch}",
meth: MethodPost.}
## https://ethereum.github.io/beacon-APIs/#/Validator/getLiveness
proc submitBeaconCommitteeSelectionsPlain*(
body: seq[RestBeaconCommitteeSelection]
): RestPlainResponse {.
rest, endpoint: "/eth/v1/validator/beacon_committee_selections",
meth: MethodPost.}
## https://ethereum.github.io/beacon-APIs/#/Validator/submitBeaconCommitteeSelections
proc submitSyncCommitteeSelectionsPlain*(
body: seq[RestSyncCommitteeSelection]
): RestPlainResponse {.
rest, endpoint: "/eth/v1/validator/sync_committee_selections",
meth: MethodPost.}
## https://ethereum.github.io/beacon-APIs/#/Validator/submitSyncCommitteeSelections

View File

@ -646,11 +646,11 @@ template forky(
template withEpochInfo*(x: ForkedEpochInfo, body: untyped): untyped =
case x.kind
of EpochInfoFork.Phase0:
const infoFork {.inject.} = EpochInfoFork.Phase0
const infoFork {.inject, used.} = EpochInfoFork.Phase0
template info: untyped {.inject.} = x.phase0Data
body
of EpochInfoFork.Altair:
const infoFork {.inject.} = EpochInfoFork.Altair
const infoFork {.inject, used.} = EpochInfoFork.Altair
template info: untyped {.inject.} = x.altairData
body
@ -797,11 +797,11 @@ template withBlck*(
case x.kind
of ConsensusFork.Phase0:
const consensusFork {.inject, used.} = ConsensusFork.Phase0
template forkyBlck: untyped {.inject.} = x.phase0Data
template forkyBlck: untyped {.inject, used.} = x.phase0Data
body
of ConsensusFork.Altair:
const consensusFork {.inject, used.} = ConsensusFork.Altair
template forkyBlck: untyped {.inject.} = x.altairData
template forkyBlck: untyped {.inject, used.} = x.altairData
body
of ConsensusFork.Bellatrix:
const consensusFork {.inject, used.} = ConsensusFork.Bellatrix
@ -809,11 +809,11 @@ template withBlck*(
body
of ConsensusFork.Capella:
const consensusFork {.inject, used.} = ConsensusFork.Capella
template forkyBlck: untyped {.inject.} = x.capellaData
template forkyBlck: untyped {.inject, used.} = x.capellaData
body
of ConsensusFork.Deneb:
const consensusFork {.inject, used.} = ConsensusFork.Deneb
template forkyBlck: untyped {.inject.} = x.denebData
template forkyBlck: untyped {.inject, used.} = x.denebData
body
func proposer_index*(x: ForkedBeaconBlock): uint64 =
@ -899,14 +899,15 @@ template withStateAndBlck*(
body
of ConsensusFork.Phase0:
const consensusFork {.inject.} = ConsensusFork.Phase0
template forkyState: untyped {.inject.} = s.phase0Data
template forkyBlck: untyped {.inject.} = b.phase0Data
template forkyState: untyped {.inject, used.} = s.phase0Data
template forkyBlck: untyped {.inject, used.} = b.phase0Data
body
func toBeaconBlockHeader*(
blck: SomeForkyBeaconBlock |
capella_mev.BlindedBeaconBlock | deneb_mev.BlindedBeaconBlock):
BeaconBlockHeader =
capella_mev.BlindedBeaconBlock |
deneb_mev.BlindedBeaconBlock
): BeaconBlockHeader =
## Reduce a given `BeaconBlock` to its `BeaconBlockHeader`.
BeaconBlockHeader(
slot: blck.slot,
@ -918,7 +919,7 @@ func toBeaconBlockHeader*(
template toBeaconBlockHeader*(
blck: SomeForkySignedBeaconBlock): BeaconBlockHeader =
## Reduce a given `SignedBeaconBlock` to its `BeaconBlockHeader`.
blck.message.toBeaconBlockHeader
blck.message.toBeaconBlockHeader()
template toBeaconBlockHeader*(
blckParam: ForkedMsgTrustedSignedBeaconBlock |
@ -926,6 +927,16 @@ template toBeaconBlockHeader*(
## Reduce a given signed beacon block to its `BeaconBlockHeader`.
withBlck(blckParam): forkyBlck.toBeaconBlockHeader()
func toSignedBeaconBlockHeader*(
signedBlock: SomeForkySignedBeaconBlock |
capella_mev.SignedBlindedBeaconBlock |
deneb_mev.SignedBlindedBeaconBlock
): SignedBeaconBlockHeader =
## Reduce a given `SignedBeaconBlock` to its `SignedBeaconBlockHeader`.
SignedBeaconBlockHeader(
message: signedBlock.message.toBeaconBlockHeader(),
signature: signedBlock.signature)
func genesisFork*(cfg: RuntimeConfig): Fork =
Fork(
previous_version: cfg.GENESIS_FORK_VERSION,
@ -1069,7 +1080,7 @@ func readSszForkedSignedBeaconBlock*(
withBlck(result):
readSszBytes(data, forkyBlck)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_fork_data_root
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_fork_data_root
func compute_fork_data_root*(current_version: Version,
genesis_validators_root: Eth2Digest): Eth2Digest =
## Return the 32-byte fork data root for the ``current_version`` and
@ -1081,7 +1092,7 @@ func compute_fork_data_root*(current_version: Version,
genesis_validators_root: genesis_validators_root
))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_fork_digest
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_fork_digest
func compute_fork_digest*(current_version: Version,
genesis_validators_root: Eth2Digest): ForkDigest =
## Return the 4-byte fork digest for the ``current_version`` and

View File

@ -878,7 +878,7 @@ func toCapellaLightClientHeader(
transactions_root: hash_tree_root(payload.transactions),
withdrawals_root: hash_tree_root(payload.withdrawals)),
execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_INDEX).get)
capella.EXECUTION_PAYLOAD_GINDEX).get)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.0/specs/deneb/light-client/full-node.md#modified-block_to_light_client_header
func toDenebLightClientHeader(
@ -920,7 +920,7 @@ func toDenebLightClientHeader(
transactions_root: hash_tree_root(payload.transactions),
withdrawals_root: hash_tree_root(payload.withdrawals)),
execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_INDEX).get)
capella.EXECUTION_PAYLOAD_GINDEX).get)
func toDenebLightClientHeader(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)
@ -947,7 +947,7 @@ func toDenebLightClientHeader(
withdrawals_root: hash_tree_root(payload.withdrawals),
excess_blob_gas: payload.excess_blob_gas),
execution_branch: blck.message.body.build_proof(
capella.EXECUTION_PAYLOAD_INDEX).get)
capella.EXECUTION_PAYLOAD_GINDEX).get)
func toLightClientHeader*(
blck: # `SomeSignedBeaconBlock` doesn't work here (Nim 1.6)

View File

@ -26,7 +26,7 @@ export
eth2_merkleization, forks, rlp, ssz_codec
func toEther*(gwei: Gwei): Ether =
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/weak-subjectivity.md#constants
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/weak-subjectivity.md#constants
const ETH_TO_GWEI = 1_000_000_000
(gwei div ETH_TO_GWEI).Ether
@ -49,7 +49,7 @@ func shortLog*(v: FinalityCheckpoints): auto =
chronicles.formatIt FinalityCheckpoints: it.shortLog
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#integer_squareroot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#integer_squareroot
func integer_squareroot*(n: SomeInteger): SomeInteger =
## Return the largest integer ``x`` such that ``x**2 <= n``.
doAssert n >= 0'u64
@ -62,7 +62,7 @@ func integer_squareroot*(n: SomeInteger): SomeInteger =
y = (x + n div x) div 2
x
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#is_active_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#is_active_validator
func is_active_validator*(validator: Validator, epoch: Epoch): bool =
## Check if ``validator`` is active.
validator.activation_epoch <= epoch and epoch < validator.exit_epoch
@ -100,23 +100,23 @@ func get_active_validator_indices_len*(
withState(state):
get_active_validator_indices_len(forkyState.data, epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_current_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_current_epoch
func get_current_epoch*(state: ForkyBeaconState): Epoch =
## Return the current epoch.
state.slot.epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_current_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_current_epoch
func get_current_epoch*(state: ForkedHashedBeaconState): Epoch =
## Return the current epoch.
withState(state): get_current_epoch(forkyState.data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_previous_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_previous_epoch
func get_previous_epoch*(
state: ForkyBeaconState | ForkedHashedBeaconState): Epoch =
## Return the previous epoch (unless the current epoch is ``GENESIS_EPOCH``).
get_previous_epoch(get_current_epoch(state))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_randao_mix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_randao_mix
func get_randao_mix*(state: ForkyBeaconState, epoch: Epoch): Eth2Digest =
## Return the randao mix at a recent ``epoch``.
state.randao_mixes[epoch mod EPOCHS_PER_HISTORICAL_VECTOR]
@ -138,7 +138,7 @@ func uint_to_bytes*(x: uint32): array[4, byte] = toBytesLE(x)
func uint_to_bytes*(x: uint16): array[2, byte] = toBytesLE(x)
func uint_to_bytes*(x: uint8): array[1, byte] = toBytesLE(x)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_domain
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_domain
func compute_domain*(
domain_type: DomainType,
fork_version: Version,
@ -152,7 +152,7 @@ func compute_domain*(
result[0..3] = domain_type.data
result[4..31] = fork_data_root.data.toOpenArray(0, 27)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_domain
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_domain
func get_domain*(
fork: Fork,
domain_type: DomainType,
@ -173,7 +173,7 @@ func get_domain*(
## of a message.
get_domain(state.fork, domain_type, epoch, state.genesis_validators_root)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_signing_root
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_signing_root
func compute_signing_root*(ssz_object: auto, domain: Eth2Domain): Eth2Digest =
## Return the signing root for the corresponding signing data.
let domain_wrapped_object = SigningData(
@ -182,7 +182,7 @@ func compute_signing_root*(ssz_object: auto, domain: Eth2Domain): Eth2Digest =
)
hash_tree_root(domain_wrapped_object)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_seed
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_seed
func get_seed*(
state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType,
mix: Eth2Digest): Eth2Digest =
@ -201,17 +201,65 @@ func get_seed*(state: ForkyBeaconState, epoch: Epoch, domain_type: DomainType):
epoch + EPOCHS_PER_HISTORICAL_VECTOR - MIN_SEED_LOOKAHEAD - 1)
state.get_seed(epoch, domain_type, mix)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#add_flag
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#add_flag
func add_flag*(flags: ParticipationFlags, flag_index: TimelyFlag): ParticipationFlags =
let flag = ParticipationFlags(1'u8 shl ord(flag_index))
flags or flag
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#has_flag
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#has_flag
func has_flag*(flags: ParticipationFlags, flag_index: TimelyFlag): bool =
let flag = ParticipationFlags(1'u8 shl ord(flag_index))
(flags and flag) == flag
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#is_sync_committee_update
func create_blob_sidecars*(
forkyBlck: deneb.SignedBeaconBlock,
kzg_proofs: KzgProofs,
blobs: Blobs): seq[BlobSidecar] =
template kzg_commitments: untyped =
forkyBlck.message.body.blob_kzg_commitments
doAssert kzg_proofs.len == blobs.len
doAssert kzg_proofs.len == kzg_commitments.len
var res = newSeqOfCap[BlobSidecar](blobs.len)
let signedBlockHeader = forkyBlck.toSignedBeaconBlockHeader()
for i in 0 ..< blobs.lenu64:
var sidecar = BlobSidecar(
index: i,
blob: blobs[i],
kzg_commitment: kzg_commitments[i],
kzg_proof: kzg_proofs[i],
signed_block_header: signedBlockHeader)
forkyBlck.message.body.build_proof(
kzg_commitment_inclusion_proof_gindex(i),
sidecar.kzg_commitment_inclusion_proof).expect("Valid gindex")
res.add(sidecar)
res
func create_blob_sidecars*(
forkyBlck: deneb_mev.SignedBlindedBeaconBlock,
kzg_proofs: KzgProofs,
blob_roots: BlobRoots): seq[BlindedBlobSidecar] =
template kzg_commitments: untyped =
forkyBlck.message.body.blob_kzg_commitments
doAssert kzg_proofs.len == blob_roots.len
doAssert kzg_proofs.len == kzg_commitments.len
var res = newSeqOfCap[BlindedBlobSidecar](blob_roots.len)
let signedBlockHeader = forkyBlck.toSignedBeaconBlockHeader()
for i in 0 ..< blob_roots.lenu64:
var sidecar = BlindedBlobSidecar(
index: i,
blob_root: blob_roots[i],
kzg_commitment: kzg_commitments[i],
kzg_proof: kzg_proofs[i],
signed_block_header: signedBlockHeader)
forkyBlck.message.body.build_proof(
kzg_commitment_inclusion_proof_gindex(i),
sidecar.kzg_commitment_inclusion_proof).expect("Valid gindex")
res.add(sidecar)
res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#is_sync_committee_update
template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool =
when update is SomeForkyLightClientUpdateWithSyncCommittee:
update.next_sync_committee_branch !=
@ -219,7 +267,7 @@ template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool =
else:
false
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#is_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#is_finality_update
template is_finality_update*(update: SomeForkyLightClientUpdate): bool =
when update is SomeForkyLightClientUpdateWithFinality:
update.finality_branch !=
@ -227,19 +275,19 @@ template is_finality_update*(update: SomeForkyLightClientUpdate): bool =
else:
false
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
template is_next_sync_committee_known*(store: ForkyLightClientStore): bool =
store.next_sync_committee !=
static(default(typeof(store.next_sync_committee)))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#get_safety_threshold
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#get_safety_threshold
func get_safety_threshold*(store: ForkyLightClientStore): uint64 =
max(
store.previous_max_active_participants,
store.current_max_active_participants
) div 2
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#is_better_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#is_better_update
type LightClientUpdateMetadata* = object
attested_slot*, finalized_slot*, signature_slot*: Slot
has_sync_committee*, has_finality*: bool
@ -330,24 +378,24 @@ template is_better_update*[
new_update: A, old_update: B): bool =
is_better_data(toMeta(new_update), toMeta(old_update))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap
func contextEpoch*(bootstrap: ForkyLightClientBootstrap): Epoch =
bootstrap.header.beacon.slot.epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch =
update.attested_header.beacon.slot.epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
func is_merge_transition_complete*(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState): bool =
const defaultExecutionPayloadHeader =
default(typeof(state.latest_execution_payload_header))
state.latest_execution_payload_header != defaultExecutionPayloadHeader
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/sync/optimistic.md#helpers
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/sync/optimistic.md#helpers
func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
when typeof(blck).kind >= ConsensusFork.Bellatrix:
const defaultExecutionPayload =
@ -356,7 +404,7 @@ func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
else:
false
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#is_merge_transition_block
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#is_merge_transition_block
func is_merge_transition_block(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState,
body: bellatrix.BeaconBlockBody | bellatrix.TrustedBeaconBlockBody |
@ -369,7 +417,7 @@ func is_merge_transition_block(
not is_merge_transition_complete(state) and
body.execution_payload != defaultExecutionPayload
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#is_execution_enabled
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#is_execution_enabled
func is_execution_enabled*(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState,
body: bellatrix.BeaconBlockBody | bellatrix.TrustedBeaconBlockBody |
@ -380,7 +428,7 @@ func is_execution_enabled*(
deneb.SigVerifiedBeaconBlockBody): bool =
is_merge_transition_block(state, body) or is_merge_transition_complete(state)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot
func compute_timestamp_at_slot*(state: ForkyBeaconState, slot: Slot): uint64 =
# Note: This function is unsafe with respect to overflows and underflows.
let slots_since_genesis = slot - GENESIS_SLOT

View File

@ -1385,13 +1385,13 @@ proc createWallet*(kdfKind: KdfKind,
crypto: crypto,
nextAccount: nextAccount.get(0))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/validator.md#bls_withdrawal_prefix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/validator.md#bls_withdrawal_prefix
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
var bytes = eth2digest(k.toRaw())
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
bytes
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/deposit-contract.md#withdrawal-credentials
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/deposit-contract.md#withdrawal-credentials
proc makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest =
makeWithdrawalCredentials(k.toPubKey())

View File

@ -15,7 +15,7 @@ import
from ../consensus_object_pools/block_pools_types import VerifierError
export block_pools_types.VerifierError
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
func initialize_light_client_store*(
trusted_block_root: Eth2Digest,
bootstrap: ForkyLightClientBootstrap,
@ -32,8 +32,8 @@ func initialize_light_client_store*(
if not is_valid_merkle_branch(
hash_tree_root(bootstrap.current_sync_committee),
bootstrap.current_sync_committee_branch,
log2trunc(altair.CURRENT_SYNC_COMMITTEE_INDEX),
get_subtree_index(altair.CURRENT_SYNC_COMMITTEE_INDEX),
log2trunc(altair.CURRENT_SYNC_COMMITTEE_GINDEX),
get_subtree_index(altair.CURRENT_SYNC_COMMITTEE_GINDEX),
bootstrap.header.beacon.state_root):
return ResultType.err(VerifierError.Invalid)
@ -42,7 +42,7 @@ func initialize_light_client_store*(
current_sync_committee: bootstrap.current_sync_committee,
optimistic_header: bootstrap.header))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#validate_light_client_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#validate_light_client_update
proc validate_light_client_update*(
store: ForkyLightClientStore,
update: SomeForkyLightClientUpdate,
@ -111,8 +111,8 @@ proc validate_light_client_update*(
if not is_valid_merkle_branch(
finalized_root,
update.finality_branch,
log2trunc(altair.FINALIZED_ROOT_INDEX),
get_subtree_index(altair.FINALIZED_ROOT_INDEX),
log2trunc(altair.FINALIZED_ROOT_GINDEX),
get_subtree_index(altair.FINALIZED_ROOT_GINDEX),
update.attested_header.beacon.state_root):
return err(VerifierError.Invalid)
@ -130,8 +130,8 @@ proc validate_light_client_update*(
if not is_valid_merkle_branch(
hash_tree_root(update.next_sync_committee),
update.next_sync_committee_branch,
log2trunc(altair.NEXT_SYNC_COMMITTEE_INDEX),
get_subtree_index(altair.NEXT_SYNC_COMMITTEE_INDEX),
log2trunc(altair.NEXT_SYNC_COMMITTEE_GINDEX),
get_subtree_index(altair.NEXT_SYNC_COMMITTEE_GINDEX),
update.attested_header.beacon.state_root):
return err(VerifierError.Invalid)
@ -158,7 +158,7 @@ proc validate_light_client_update*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#apply_light_client_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#apply_light_client_update
func apply_light_client_update(
store: var ForkyLightClientStore,
update: SomeForkyLightClientUpdate): bool =
@ -189,7 +189,7 @@ func apply_light_client_update(
didProgress = true
didProgress
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#process_light_client_store_force_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#process_light_client_store_force_update
type
ForceUpdateResult* = enum
NoUpdate,
@ -222,7 +222,7 @@ func process_light_client_store_force_update*(
store.best_valid_update.reset()
res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/sync-protocol.md#process_light_client_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/sync-protocol.md#process_light_client_update
proc process_light_client_update*(
store: var ForkyLightClientStore,
update: SomeForkyLightClientUpdate,

View File

@ -69,25 +69,18 @@ type
# https://github.com/ethereum/builder-specs/blob/534e4f81276b8346d785ed9aba12c4c74b927ec6/specs/deneb/builder.md#blindedblobsidecar
BlindedBlobSidecar* = object
block_root*: Eth2Digest
index*: uint64
slot*: uint64
block_parent_root*: Eth2Digest
proposer_index*: uint64
blob_root*: Eth2Digest
kzg_commitment*: KZGCommitment
kzg_proof*: KZGProof
# https://github.com/ethereum/builder-specs/blob/534e4f81276b8346d785ed9aba12c4c74b927ec6/specs/deneb/builder.md#signedblindedblobsidecar
SignedBlindedBlobSidecar* = object
message*: BlindedBlobSidecar
signature*: ValidatorSig
signed_block_header*: SignedBeaconBlockHeader
kzg_commitment_inclusion_proof*:
array[KZG_COMMITMENT_INCLUSION_PROOF_DEPTH, Eth2Digest]
# https://github.com/ethereum/builder-specs/blob/534e4f81276b8346d785ed9aba12c4c74b927ec6/specs/deneb/builder.md#signedblindedblockcontents
SignedBlindedBeaconBlockContents* = object
signed_blinded_block*: deneb_mev.SignedBlindedBeaconBlock
signed_blinded_blob_sidecars*:
List[SignedBlindedBlobSidecar, Limit MAX_BLOBS_PER_BLOCK]
blinded_blob_sidecars*: List[BlindedBlobSidecar, Limit MAX_BLOBS_PER_BLOCK]
# https://github.com/ethereum/builder-specs/blob/534e4f81276b8346d785ed9aba12c4c74b927ec6/specs/deneb/builder.md#executionpayloadandblobsbundle
ExecutionPayloadAndBlobsBundle* = object

View File

@ -15,7 +15,7 @@ export base
const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/p2p-interface.md#topics-and-messages
topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy"
topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy"
topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy"
@ -27,7 +27,7 @@ const
# The spec now includes this as a bare uint64 as `RESP_TIMEOUT`
RESP_TIMEOUT_DUR* = RESP_TIMEOUT.int64.seconds
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#configuration
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#configuration
MAX_REQUEST_LIGHT_CLIENT_UPDATES* = 128
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#configuration
@ -63,7 +63,7 @@ func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string =
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/p2p-interface.md#topics-and-messages
func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string =
eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix
@ -89,32 +89,32 @@ func getAttestationTopic*(forkDigest: ForkDigest,
## For subscribing and unsubscribing to/from a subnet.
eth2Prefix(forkDigest) & "beacon_attestation_" & $(subnetId) & "/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/p2p-interface.md#topics-and-messages
func getSyncCommitteeTopic*(forkDigest: ForkDigest,
subcommitteeIdx: SyncSubcommitteeIndex): string =
## For subscribing and unsubscribing to/from a subnet.
eth2Prefix(forkDigest) & "sync_committee_" & $subcommitteeIdx & "/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/p2p-interface.md#topics-and-messages
func getSyncCommitteeContributionAndProofTopic*(forkDigest: ForkDigest): string =
## For subscribing and unsubscribing to/from a subnet.
eth2Prefix(forkDigest) & "sync_committee_contribution_and_proof/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blob_sidecar_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id
func getBlobSidecarTopic*(forkDigest: ForkDigest,
subnet_id: SubnetId): string =
subnet_id: BlobId): string =
eth2Prefix(forkDigest) & "blob_sidecar_" & $subnet_id & "/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/validator.md#sidecar
func compute_subnet_for_blob_sidecar*(blob_index: BlobIndex): SubnetId =
SubnetId(blob_index mod BLOB_SIDECAR_SUBNET_COUNT)
func compute_subnet_for_blob_sidecar*(blob_index: BlobIndex): BlobId =
BlobId(blob_index mod BLOB_SIDECAR_SUBNET_COUNT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#light_client_finality_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#light_client_finality_update
func getLightClientFinalityUpdateTopic*(forkDigest: ForkDigest): string =
## For broadcasting or obtaining the latest `LightClientFinalityUpdate`.
eth2Prefix(forkDigest) & "light_client_finality_update/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
func getLightClientOptimisticUpdateTopic*(forkDigest: ForkDigest): string =
## For broadcasting or obtaining the latest `LightClientOptimisticUpdate`.
eth2Prefix(forkDigest) & "light_client_optimistic_update/ssz_snappy"
@ -220,5 +220,5 @@ func getSyncSubnets*(
res
iterator blobSidecarTopics*(forkDigest: ForkDigest): string =
for i in 0.SubnetId ..< static(BLOB_SIDECAR_SUBNET_COUNT.SubnetId):
yield getBlobSidecarTopic(forkDigest, i)
for subnet_id in BlobId:
yield getBlobSidecarTopic(forkDigest, subnet_id)

View File

@ -9,6 +9,7 @@
import
std/[strutils, parseutils, tables, typetraits],
chronos/timer,
stew/[byteutils], stint, web3/[ethtypes],
./datatypes/constants
@ -17,7 +18,7 @@ export constants
export stint, ethtypes.toHex, ethtypes.`==`
const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#withdrawal-prefixes
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#withdrawal-prefixes
BLS_WITHDRAWAL_PREFIX*: byte = 0
ETH1_ADDRESS_WITHDRAWAL_PREFIX*: byte = 1
@ -26,21 +27,23 @@ const
# Not used anywhere; only for network preset checking
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION: uint64 = 256
MESSAGE_DOMAIN_INVALID_SNAPPY = 0'u64
TTFB_TIMEOUT = 5'u64
TTFB_TIMEOUT* = 5'u64
MESSAGE_DOMAIN_INVALID_SNAPPY*: array[4, byte] = [0x00, 0x00, 0x00, 0x00]
MESSAGE_DOMAIN_VALID_SNAPPY*: array[4, byte] = [0x01, 0x00, 0x00, 0x00]
type
Version* = distinct array[4, byte]
Eth1Address* = ethtypes.Address
RuntimeConfig* = object
## https://github.com/ethereum/consensus-specs/tree/v1.3.0/configs
## https://github.com/ethereum/consensus-specs/tree/v1.4.0-beta.4/configs
PRESET_BASE*: string
CONFIG_NAME*: string
# Transition
TERMINAL_TOTAL_DIFFICULTY*: UInt256
TERMINAL_BLOCK_HASH*: BlockHash
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH*: Epoch # Not actively used, but part of the spec
# Genesis
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT*: uint64
@ -75,14 +78,37 @@ type
# Fork choice
# TODO PROPOSER_SCORE_BOOST*: uint64
# TODO REORG_HEAD_WEIGHT_THRESHOLD*: uint64
# TODO REORG_PARENT_WEIGHT_THRESHOLD*: uint64
# TODO REORG_MAX_EPOCHS_SINCE_FINALIZATION*: uint64
# Deposit contract
DEPOSIT_CHAIN_ID*: uint64
DEPOSIT_NETWORK_ID*: uint64
DEPOSIT_CONTRACT_ADDRESS*: Eth1Address
# Not actively used, but part of the spec
TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH*: Epoch
# Networking
# TODO GOSSIP_MAX_SIZE*: uint64
# TODO MAX_REQUEST_BLOCKS*: uint64
# TODO EPOCHS_PER_SUBNET_SUBSCRIPTION*: uint64
# TODO MIN_EPOCHS_FOR_BLOCK_REQUESTS*: uint64
# TODO MAX_CHUNK_SIZE*: uint64
# TODO TTFB_TIMEOUT*: uint64
# TODO RESP_TIMEOUT*: uint64
# TODO ATTESTATION_PROPAGATION_SLOT_RANGE*: uint64
# TODO MAXIMUM_GOSSIP_CLOCK_DISPARITY*: uint64
# TODO MESSAGE_DOMAIN_INVALID_SNAPPY*: array[4, byte]
# TODO MESSAGE_DOMAIN_VALID_SNAPPY*: array[4, byte]
# TODO SUBNETS_PER_NODE*: uint64
# TODO ATTESTATION_SUBNET_COUNT*: uint64
# TODO ATTESTATION_SUBNET_EXTRA_BITS*: uint64
# TODO ATTESTATION_SUBNET_PREFIX_BITS*: uint64
# Deneb
# TODO MAX_REQUEST_BLOCKS_DENEB*: uint64
# TODO MAX_REQUEST_BLOB_SIDECARS*: uint64
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS*: uint64
# TODO BLOB_SIDECAR_SUBNET_COUNT*: uint64
PresetFile* = object
values*: Table[string, string]
@ -203,7 +229,46 @@ when const_preset == "mainnet":
# Ethereum PoW Mainnet
DEPOSIT_CHAIN_ID: 1,
DEPOSIT_NETWORK_ID: 1,
DEPOSIT_CONTRACT_ADDRESS: default(Eth1Address)
DEPOSIT_CONTRACT_ADDRESS: default(Eth1Address),
# Networking
# ---------------------------------------------------------------
# `10 * 2**20` (= 10485760, 10 MiB)
# TODO GOSSIP_MAX_SIZE: 10485760,
# `2**10` (= 1024)
# TODO MAX_REQUEST_BLOCKS: 1024,
# `2**8` (= 256)
# TODO EPOCHS_PER_SUBNET_SUBSCRIPTION: 256,
# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months)
# TODO MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024,
# `10 * 2**20` (=10485760, 10 MiB)
# TODO MAX_CHUNK_SIZE: 10485760,
# 5s
# TODO TTFB_TIMEOUT: 5,
# 10s
# TODO RESP_TIMEOUT: 10,
# TODO ATTESTATION_PROPAGATION_SLOT_RANGE: 32,
# 500ms
# TODO MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500,
# TODO MESSAGE_DOMAIN_INVALID_SNAPPY: [byte 0x00, 0x00, 0x00, 0x00],
# TODO MESSAGE_DOMAIN_VALID_SNAPPY: [byte 0x01, 0x00, 0x00, 0x00],
# 2 subnets per node
# TODO SUBNETS_PER_NODE: 2,
# 2**8 (= 64)
# TODO ATTESTATION_SUBNET_COUNT: 64,
# TODO ATTESTATION_SUBNET_EXTRA_BITS: 0,
# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS
# TODO ATTESTATION_SUBNET_PREFIX_BITS: 6,
# Deneb
# `2**7` (=128)
# TODO MAX_REQUEST_BLOCKS_DENEB: 128,
# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
# TODO MAX_REQUEST_BLOB_SIDECARS: 768,
# `2**12` (= 4096 epochs, ~18 days)
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096,
# `6`
# TODO BLOB_SIDECAR_SUBNET_COUNT: 6,
)
elif const_preset == "gnosis":
@ -309,7 +374,46 @@ elif const_preset == "gnosis":
# Gnosis PoW Mainnet
DEPOSIT_CHAIN_ID: 100,
DEPOSIT_NETWORK_ID: 100,
DEPOSIT_CONTRACT_ADDRESS: default(Eth1Address)
DEPOSIT_CONTRACT_ADDRESS: default(Eth1Address),
# Networking
# ---------------------------------------------------------------
# `10 * 2**20` (= 10485760, 10 MiB)
# TODO GOSSIP_MAX_SIZE: 10485760,
# `2**10` (= 1024)
# TODO MAX_REQUEST_BLOCKS: 1024,
# `2**8` (= 256)
# TODO EPOCHS_PER_SUBNET_SUBSCRIPTION: 256,
# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months)
# TODO MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024,
# `10 * 2**20` (=10485760, 10 MiB)
# TODO MAX_CHUNK_SIZE: 10485760,
# 5s
# TODO TTFB_TIMEOUT: 5,
# 10s
# TODO RESP_TIMEOUT: 10,
# TODO ATTESTATION_PROPAGATION_SLOT_RANGE: 32,
# 500ms
# TODO MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500,
# TODO MESSAGE_DOMAIN_INVALID_SNAPPY: [byte 0x00, 0x00, 0x00, 0x00],
# TODO MESSAGE_DOMAIN_VALID_SNAPPY: [byte 0x01, 0x00, 0x00, 0x00],
# 2 subnets per node
# TODO SUBNETS_PER_NODE: 2,
# 2**8 (= 64)
# TODO ATTESTATION_SUBNET_COUNT: 64,
# TODO ATTESTATION_SUBNET_EXTRA_BITS: 0,
# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS
# TODO ATTESTATION_SUBNET_PREFIX_BITS: 6,
# Deneb
# `2**7` (=128)
# TODO MAX_REQUEST_BLOCKS_DENEB: 128,
# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
# TODO MAX_REQUEST_BLOB_SIDECARS: 768,
# `2**12` (= 4096 epochs, ~18 days)
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384,
# `6`
# TODO BLOB_SIDECAR_SUBNET_COUNT: 6,
)
elif const_preset == "minimal":
@ -412,7 +516,46 @@ elif const_preset == "minimal":
DEPOSIT_CHAIN_ID: 5,
DEPOSIT_NETWORK_ID: 5,
# Configured on a per testnet basis
DEPOSIT_CONTRACT_ADDRESS: default(Eth1Address)
DEPOSIT_CONTRACT_ADDRESS: default(Eth1Address),
# Networking
# ---------------------------------------------------------------
# `10 * 2**20` (= 10485760, 10 MiB)
# TODO GOSSIP_MAX_SIZE: 10485760,
# `2**10` (= 1024)
# TODO MAX_REQUEST_BLOCKS: 1024,
# `2**8` (= 256)
# TODO EPOCHS_PER_SUBNET_SUBSCRIPTION: 256,
# [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272)
# TODO MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272,
# `10 * 2**20` (=10485760, 10 MiB)
# TODO MAX_CHUNK_SIZE: 10485760,
# 5s
# TODO TTFB_TIMEOUT: 5,
# 10s
# TODO RESP_TIMEOUT: 10,
# TODO ATTESTATION_PROPAGATION_SLOT_RANGE: 32,
# 500ms
# TODO MAXIMUM_GOSSIP_CLOCK_DISPARITY: 500,
# TODO MESSAGE_DOMAIN_INVALID_SNAPPY: [byte 0x00, 0x00, 0x00, 0x00],
# TODO MESSAGE_DOMAIN_VALID_SNAPPY: [byte 0x01, 0x00, 0x00, 0x00],
# 2 subnets per node
# TODO SUBNETS_PER_NODE: 2,
# 2**8 (= 64)
# TODO ATTESTATION_SUBNET_COUNT: 64,
# TODO ATTESTATION_SUBNET_EXTRA_BITS: 0,
# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS
# TODO ATTESTATION_SUBNET_PREFIX_BITS: 6,
# Deneb
# `2**7` (=128)
# TODO MAX_REQUEST_BLOCKS_DENEB: 128,
# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
# TODO MAX_REQUEST_BLOB_SIDECARS: 768,
# `2**12` (= 4096 epochs, ~18 days)
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096,
# `6`
# TODO BLOB_SIDECAR_SUBNET_COUNT: 6,
)
else:
@ -443,6 +586,10 @@ else:
const SLOTS_PER_SYNC_COMMITTEE_PERIOD* =
SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/p2p-interface.md#configuration
func MIN_EPOCHS_FOR_BLOCK_REQUESTS*(cfg: RuntimeConfig): uint64 =
cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY + cfg.CHURN_LIMIT_QUOTIENT div 2
func parse(T: type uint64, input: string): T {.raises: [ValueError].} =
var res: BiggestUInt
if input.len > 2 and input[0] == '0' and input[1] == 'x':
@ -457,6 +604,10 @@ func parse(T: type uint64, input: string): T {.raises: [ValueError].} =
template parse(T: type byte, input: string): T =
byte parse(uint64, input)
func parse(T: type array[4, byte], input: string): T
{.raises: [ValueError].} =
hexToByteArray(input, 4)
func parse(T: type Version, input: string): T
{.raises: [ValueError].} =
Version hexToByteArray(input, 4)
@ -517,27 +668,30 @@ proc readRuntimeConfig*(
# Certain config keys are baked into the binary at compile-time
# and cannot be overridden via config.
template checkCompatibility(constValue: untyped, name: string): untyped =
if values.hasKey(name):
try:
let value = parse(typeof(constValue), values[name])
when constValue is distinct:
if distinctBase(value) != distinctBase(constValue):
raise (ref PresetFileError)(msg:
"Cannot override config" &
" (compiled: " & name & "=" & $distinctBase(constValue) &
" - config: " & name & "=" & values[name] & ")")
else:
if value != constValue:
raise (ref PresetFileError)(msg:
"Cannot override config" &
" (compiled: " & name & "=" & $constValue &
" - config: " & name & "=" & values[name] & ")")
values.del name
except ValueError:
raise (ref PresetFileError)(msg: "Unable to parse " & name)
template checkCompatibility(constValue: untyped): untyped =
block:
const name = astToStr(constValue)
if values.hasKey(name):
try:
let value = parse(typeof(constValue), values[name])
when constValue is distinct:
if distinctBase(value) != distinctBase(constValue):
raise (ref PresetFileError)(msg:
"Cannot override config" &
" (compiled: " & name & "=" & $distinctBase(constValue) &
" - config: " & name & "=" & values[name] & ")")
else:
if value != constValue:
raise (ref PresetFileError)(msg:
"Cannot override config" &
" (compiled: " & name & "=" & $constValue &
" - config: " & name & "=" & values[name] & ")")
values.del name
except ValueError:
raise (ref PresetFileError)(msg: "Unable to parse " & name)
checkCompatibility(constValue, name)
checkCompatibility SECONDS_PER_SLOT
@ -578,7 +732,6 @@ proc readRuntimeConfig*(
checkCompatibility TARGET_AGGREGATORS_PER_COMMITTEE
checkCompatibility EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION
checkCompatibility ATTESTATION_SUBNET_COUNT
checkCompatibility DOMAIN_BEACON_PROPOSER
checkCompatibility DOMAIN_BEACON_ATTESTER
@ -595,19 +748,29 @@ proc readRuntimeConfig*(
checkCompatibility MAX_REQUEST_BLOCKS
checkCompatibility EPOCHS_PER_SUBNET_SUBSCRIPTION
checkCompatibility MAX_CHUNK_SIZE
checkCompatibility TTFB_TIMEOUT
checkCompatibility RESP_TIMEOUT
checkCompatibility ATTESTATION_PROPAGATION_SLOT_RANGE
checkCompatibility MAXIMUM_GOSSIP_CLOCK_DISPARITY.milliseconds.uint64,
"MAXIMUM_GOSSIP_CLOCK_DISPARITY"
checkCompatibility MESSAGE_DOMAIN_INVALID_SNAPPY
checkCompatibility MESSAGE_DOMAIN_VALID_SNAPPY
checkCompatibility SUBNETS_PER_NODE
checkCompatibility ATTESTATION_SUBNET_COUNT
checkCompatibility ATTESTATION_SUBNET_EXTRA_BITS
checkCompatibility ATTESTATION_SUBNET_PREFIX_BITS
checkCompatibility BLOB_SIDECAR_SUBNET_COUNT
checkCompatibility MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
checkCompatibility RESP_TIMEOUT
checkCompatibility TTFB_TIMEOUT
checkCompatibility MESSAGE_DOMAIN_INVALID_SNAPPY
checkCompatibility MAX_REQUEST_BLOCKS_DENEB
checkCompatibility ATTESTATION_PROPAGATION_SLOT_RANGE
checkCompatibility MAX_REQUEST_BLOCKS_DENEB
checkCompatibility MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK,
"MAX_REQUEST_BLOB_SIDECARS"
checkCompatibility BLOB_SIDECAR_SUBNET_COUNT
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/fork-choice.md#configuration
# Isn't being used as a preset in the usual way: at any time, there's one correct value
checkCompatibility PROPOSER_SCORE_BOOST
checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD
checkCompatibility REORG_PARENT_WEIGHT_THRESHOLD
checkCompatibility REORG_MAX_EPOCHS_SINCE_FINALIZATION
for name, field in cfg.fieldPairs():
if name in values:
@ -621,6 +784,10 @@ proc readRuntimeConfig*(
raise (ref PresetIncompatibleError)(
msg: "Config not compatible with binary, compile with -d:const_preset=" & cfg.PRESET_BASE)
# Requires initialized `cfg`
checkCompatibility cfg.MIN_EPOCHS_FOR_BLOCK_REQUESTS,
"MIN_EPOCHS_FOR_BLOCK_REQUESTS"
var unknowns: seq[string]
for name in values.keys:
unknowns.add name
@ -638,10 +805,6 @@ template name*(cfg: RuntimeConfig): string =
else:
const_preset
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/p2p-interface.md#configuration
func MIN_EPOCHS_FOR_BLOCK_REQUESTS*(cfg: RuntimeConfig): uint64 =
cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY + cfg.CHURN_LIMIT_QUOTIENT div 2
func defaultLightClientDataMaxPeriods*(cfg: RuntimeConfig): uint64 =
const epochsPerPeriod = EPOCHS_PER_SYNC_COMMITTEE_PERIOD
let maxEpochs = cfg.MIN_EPOCHS_FOR_BLOCK_REQUESTS

View File

@ -5,8 +5,8 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/mainnet/altair.yaml
# Gnosis preset - Altair
# https://github.com/gnosischain/configs/blob/b8ae3091439131949a994d638d730b5a5cb60f7a/presets/gnosis/altair.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Gnosis preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/mainnet/bellatrix.yaml
# https://github.com/gnosischain/configs/blob/b8ae3091439131949a994d638d730b5a5cb60f7a/presets/gnosis/bellatrix.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Gnosis preset - Capella
# https://github.com/gnosischain/configs/blob/main/presets/gnosis/capella.yaml
# https://github.com/gnosischain/configs/blob/b8ae3091439131949a994d638d730b5a5cb60f7a/presets/gnosis/capella.yaml
const
# Max operations per block
# ---------------------------------------------------------------

View File

@ -5,12 +5,14 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Deneb
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/presets/mainnet/deneb.yaml
# Gnosis preset - Deneb
# https://github.com/gnosischain/configs/blob/b8ae3091439131949a994d638d730b5a5cb60f7a/presets/gnosis/deneb.yaml
const
# `uint64(4096)`
FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096
# `uint64(2**12)` (= 4096)
MAX_BLOB_COMMITMENTS_PER_BLOCK*: uint64 = 4096
# `uint64(2**2)` (= 4)
MAX_BLOBS_PER_BLOCK*: uint64 = 4
# `uint64(6)`
MAX_BLOBS_PER_BLOCK*: uint64 = 6
# `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH* = 17

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Gnosis preset - Phase0
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.1/presets/mainnet/phase0.yaml
# https://github.com/gnosischain/configs/blob/b8ae3091439131949a994d638d730b5a5cb60f7a/presets/gnosis/phase0.yaml
const
#

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/mainnet/altair.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/mainnet/altair.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/mainnet/bellatrix.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/mainnet/bellatrix.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Capella
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/mainnet/capella.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/mainnet/capella.yaml
const
# Max operations per block
# ---------------------------------------------------------------

View File

@ -6,11 +6,13 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Mainnet preset - Deneb
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/mainnet/deneb.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/mainnet/deneb.yaml
const
# `uint64(4096)`
FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096
# `uint64(2**12)` (= 4096)
MAX_BLOB_COMMITMENTS_PER_BLOCK*: uint64 = 4096
# `uint64(2**2)` (= 4)
# `uint64(6)`
MAX_BLOBS_PER_BLOCK*: uint64 = 6
# `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 12 = 17
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH* = 17

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Minimal preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/minimal/altair.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/minimal/altair.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Minimal preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/minimal/bellatrix.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/minimal/bellatrix.yaml
const
# Updated penalty values
# ---------------------------------------------------------------

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Minimal preset - Capella
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/minimal/capella.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/minimal/capella.yaml
const
# Max operations per block
# ---------------------------------------------------------------

View File

@ -6,11 +6,13 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# Minimal preset - Deneb
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/presets/minimal/deneb.yaml
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/presets/minimal/deneb.yaml
const
# [customized]
# `uint64(4096)`
FIELD_ELEMENTS_PER_BLOB*: uint64 = 4096
# [customized]
MAX_BLOB_COMMITMENTS_PER_BLOCK*: uint64 = 16
# `uint64(6)`
MAX_BLOBS_PER_BLOCK*: uint64 = 6
# [customized] `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9
KZG_COMMITMENT_INCLUSION_PROOF_DEPTH* = 9

View File

@ -44,7 +44,7 @@ func compute_slot_signing_root*(
fork, DOMAIN_SELECTION_PROOF, epoch, genesis_validators_root)
compute_signing_root(slot, domain)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/validator.md#aggregation-selection
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/validator.md#aggregation-selection
func get_slot_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
privkey: ValidatorPrivKey): CookedSig =
@ -88,16 +88,7 @@ func compute_block_signing_root*(
fork, DOMAIN_BEACON_PROPOSER, epoch, genesis_validators_root)
compute_signing_root(blck, domain)
func compute_blob_signing_root(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
blob: BlobSidecar): Eth2Digest =
let
epoch = epoch(slot)
domain = get_domain(fork, DOMAIN_BLOB_SIDECAR, epoch,
genesis_validators_root)
compute_signing_root(blob, domain)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/validator.md#signature
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/validator.md#signature
func get_block_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
root: Eth2Digest, privkey: ValidatorPrivKey): CookedSig =
@ -106,15 +97,6 @@ func get_block_signature*(
blsSign(privkey, signing_root.data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/deneb/validator.md#constructing-the-signedblobsidecars
proc get_blob_sidecar_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
blob: BlobSidecar, privkey: ValidatorPrivKey): CookedSig =
let signing_root = compute_blob_signing_root(
fork, genesis_validators_root, slot, blob)
blsSign(privkey, signing_root.data)
proc verify_block_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
blck: Eth2Digest | SomeForkyBeaconBlock | BeaconBlockHeader,
@ -126,17 +108,6 @@ proc verify_block_signature*(
blsVerify(pubkey, signing_root.data, signature)
proc verify_blob_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, slot: Slot,
blobSidecar: BlobSidecar,
pubkey: ValidatorPubKey | CookedPubKey, signature: SomeSig): bool =
withTrust(signature):
let
signing_root = compute_blob_signing_root(
fork, genesis_validators_root, slot, blobSidecar)
blsVerify(pubkey, signing_root.data, signature)
func compute_aggregate_and_proof_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest,
aggregate_and_proof: AggregateAndProof): Eth2Digest =
@ -146,7 +117,7 @@ func compute_aggregate_and_proof_signing_root*(
fork, DOMAIN_AGGREGATE_AND_PROOF, epoch, genesis_validators_root)
compute_signing_root(aggregate_and_proof, domain)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/validator.md#broadcast-aggregate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/validator.md#broadcast-aggregate
func get_aggregate_and_proof_signature*(fork: Fork, genesis_validators_root: Eth2Digest,
aggregate_and_proof: AggregateAndProof,
privkey: ValidatorPrivKey): CookedSig =
@ -174,7 +145,7 @@ func compute_attestation_signing_root*(
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
compute_signing_root(attestation_data, domain)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/validator.md#aggregate-signature
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/validator.md#aggregate-signature
func get_attestation_signature*(
fork: Fork, genesis_validators_root: Eth2Digest,
attestation_data: AttestationData,
@ -264,7 +235,7 @@ proc verify_voluntary_exit_signature*(
blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/validator.md#prepare-sync-committee-message
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/validator.md#prepare-sync-committee-message
func compute_sync_committee_message_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest,
slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest =
@ -299,7 +270,7 @@ proc verify_sync_committee_signature*(
blsFastAggregateVerify(pubkeys, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/validator.md#aggregation-selection
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/validator.md#aggregation-selection
func compute_sync_committee_selection_proof_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest,
slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest =
@ -348,7 +319,7 @@ proc get_contribution_and_proof_signature*(
blsSign(privkey, signing_root.data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/validator.md#aggregation-selection
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/validator.md#aggregation-selection
func is_sync_committee_aggregator*(signature: ValidatorSig): bool =
let
signatureDigest = eth2digest(signature.blob)
@ -388,7 +359,7 @@ proc verify_builder_signature*(
let signing_root = compute_builder_signing_root(fork, msg)
blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#new-process_bls_to_execution_change
func compute_bls_to_execution_change_signing_root*(
genesisFork: Fork, genesis_validators_root: Eth2Digest,
msg: BLSToExecutionChange): Eth2Digest =

View File

@ -82,7 +82,7 @@ func aggregateAttesters(
# Aggregation spec requires non-empty collection
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
# Consensus specs require at least one attesting index in attestation
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
return err("aggregateAttesters: no attesting indices")
let
@ -108,7 +108,7 @@ func aggregateAttesters(
# Aggregation spec requires non-empty collection
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
# Consensus specs require at least one attesting index in attestation
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
return err("aggregateAttesters: no attesting indices")
var attestersAgg{.noinit.}: AggregatePublicKey

View File

@ -6,7 +6,7 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# State transition, as described in
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
#
# The entry point is `state_transition` which is at the bottom of the file!
#
@ -70,7 +70,7 @@ proc verify_block_signature(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
func verifyStateRoot(
state: ForkyBeaconState,
blck: ForkyBeaconBlock | ForkySigVerifiedBeaconBlock):
@ -126,7 +126,7 @@ func clear_epoch_from_cache(cache: var StateCache, epoch: Epoch) =
for slot in epoch.slots():
cache.beacon_proposer_indices.del slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#beacon-chain-state-transition-function
proc advance_slot(
cfg: RuntimeConfig,
state: var ForkyBeaconState, previous_slot_state_root: Eth2Digest,
@ -373,14 +373,14 @@ func partialBeaconBlock*(
when consensusFork >= ConsensusFork.Bellatrix:
res.body.execution_payload = execution_payload.executionPayload
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/validator.md#block-proposal
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/validator.md#block-proposal
when consensusFork >= ConsensusFork.Capella:
res.body.bls_to_execution_changes =
validator_changes.bls_to_execution_changes
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/validator.md#constructing-the-beaconblockbody
when consensusFork >= ConsensusFork.Deneb:
res.body.blob_kzg_commitments = execution_payload.kzgs
res.body.blob_kzg_commitments = execution_payload.blobsBundle.commitments
res
@ -403,7 +403,8 @@ proc makeBeaconBlock*(
# removed if we don't use invalid signatures there
verificationFlags: UpdateFlags,
transactions_root: Opt[Eth2Digest],
execution_payload_root: Opt[Eth2Digest]):
execution_payload_root: Opt[Eth2Digest],
kzg_commitments: Opt[KzgCommitments]):
Result[ForkedBeaconBlock, cstring] =
## Create a block for the given state. The latest block applied to it will
## be used for the parent_root value, and the slot will be take from
@ -428,7 +429,7 @@ proc makeBeaconBlock*(
rollback(state)
return err(res.error())
# Override for MEV
# Override for Builder API
if transactions_root.isSome and execution_payload_root.isSome:
withState(state):
when consensusFork < ConsensusFork.Capella:
@ -438,7 +439,7 @@ proc makeBeaconBlock*(
forkyState.data.latest_execution_payload_header.transactions_root =
transactions_root.get
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#beaconblockbody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#beaconblockbody
# Effectively hash_tree_root(ExecutionPayload) with the beacon block
# body, with the execution payload replaced by the execution payload
# header. htr(payload) == htr(payload header), so substitute.
@ -455,6 +456,9 @@ proc makeBeaconBlock*(
execution_payload_root.get,
hash_tree_root(validator_changes.bls_to_execution_changes)])
elif consensusFork == ConsensusFork.Deneb:
forkyState.data.latest_execution_payload_header.transactions_root =
transactions_root.get
when executionPayload is deneb.ExecutionPayloadForSigning:
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/deneb/beacon-chain.md#beaconblockbody
forkyState.data.latest_block_header.body_root = hash_tree_root(
@ -469,7 +473,7 @@ proc makeBeaconBlock*(
hash_tree_root(sync_aggregate),
execution_payload_root.get,
hash_tree_root(validator_changes.bls_to_execution_changes),
hash_tree_root(executionPayload.kzgs)
hash_tree_root(kzg_commitments.get)
])
else:
raiseAssert "Attempt to use non-Deneb payload with post-Deneb state"
@ -517,7 +521,8 @@ proc makeBeaconBlock*(
attestations, deposits, validator_changes, sync_aggregate,
executionPayload, rollback, cache,
verificationFlags = {}, transactions_root = Opt.none Eth2Digest,
execution_payload_root = Opt.none Eth2Digest)
execution_payload_root = Opt.none Eth2Digest,
kzg_commitments = Opt.none KzgCommitments)
proc makeBeaconBlock*(
cfg: RuntimeConfig, state: var ForkedHashedBeaconState,
@ -536,4 +541,5 @@ proc makeBeaconBlock*(
executionPayload, rollback, cache,
verificationFlags = verificationFlags,
transactions_root = Opt.none Eth2Digest,
execution_payload_root = Opt.none Eth2Digest)
execution_payload_root = Opt.none Eth2Digest,
kzg_commitments = Opt.none KzgCommitments)

View File

@ -6,10 +6,10 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
# State transition - block processing, as described in
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/deneb/beacon-chain.md#block-processing
#
# The entry point is `process_block` which is at the bottom of this file.
@ -38,7 +38,7 @@ from ./datatypes/capella import
export extras, phase0, altair
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#block-header
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#block-header
func process_block_header*(
state: var ForkyBeaconState, blck: SomeForkyBeaconBlock,
flags: UpdateFlags, cache: var StateCache): Result[void, cstring] =
@ -80,7 +80,7 @@ func `xor`[T: array](a, b: T): T =
for i in 0..<result.len:
result[i] = a[i] xor b[i]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#randao
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#randao
proc process_randao(
state: var ForkyBeaconState, body: SomeForkyBeaconBlockBody,
flags: UpdateFlags, cache: var StateCache): Result[void, cstring] =
@ -113,7 +113,7 @@ proc process_randao(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#eth1-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#eth1-data
func process_eth1_data(
state: var ForkyBeaconState,
body: SomeForkyBeaconBlockBody): Result[void, cstring] =
@ -126,14 +126,14 @@ func process_eth1_data(
state.eth1_data = body.eth1_data
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#is_slashable_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#is_slashable_validator
func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
# Check if ``validator`` is slashable.
(not validator.slashed) and
(validator.activation_epoch <= epoch) and
(epoch < validator.withdrawable_epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#proposer-slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#proposer-slashings
proc check_proposer_slashing*(
state: ForkyBeaconState, proposer_slashing: SomeProposerSlashing,
flags: UpdateFlags):
@ -182,7 +182,7 @@ proc check_proposer_slashing*(
withState(state):
check_proposer_slashing(forkyState.data, proposer_slashing, flags)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#proposer-slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#proposer-slashings
proc process_proposer_slashing*(
cfg: RuntimeConfig, state: var ForkyBeaconState,
proposer_slashing: SomeProposerSlashing, flags: UpdateFlags,
@ -192,7 +192,7 @@ proc process_proposer_slashing*(
? slash_validator(cfg, state, proposer_index, cache)
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#is_slashable_attestation_data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#is_slashable_attestation_data
func is_slashable_attestation_data(
data_1: AttestationData, data_2: AttestationData): bool =
## Check if ``data_1`` and ``data_2`` are slashable according to Casper FFG
@ -204,7 +204,7 @@ func is_slashable_attestation_data(
(data_1.source.epoch < data_2.source.epoch and
data_2.target.epoch < data_1.target.epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#attester-slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#attester-slashings
proc check_attester_slashing*(
state: ForkyBeaconState,
attester_slashing: SomeAttesterSlashing,
@ -246,7 +246,7 @@ proc check_attester_slashing*(
withState(state):
check_attester_slashing(forkyState.data, attester_slashing, flags)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#attester-slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#attester-slashings
proc process_attester_slashing*(
cfg: RuntimeConfig,
state: var ForkyBeaconState,
@ -330,7 +330,7 @@ proc process_deposit*(cfg: RuntimeConfig,
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/deneb/beacon-chain.md#modified-process_voluntary_exit
proc check_voluntary_exit*(
cfg: RuntimeConfig,
@ -388,7 +388,7 @@ proc check_voluntary_exit*(
withState(state):
check_voluntary_exit(cfg, forkyState.data, signed_voluntary_exit, flags)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#voluntary-exits
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#voluntary-exits
proc process_voluntary_exit*(
cfg: RuntimeConfig,
state: var ForkyBeaconState,
@ -418,8 +418,8 @@ proc process_bls_to_execution_change*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#operations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#modified-process_operations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#operations
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#modified-process_operations
proc process_operations(cfg: RuntimeConfig,
state: var ForkyBeaconState,
body: SomeForkyBeaconBlockBody,
@ -452,7 +452,7 @@ proc process_operations(cfg: RuntimeConfig,
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#sync-aggregate-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#sync-aggregate-processing
func get_participant_reward*(total_active_balance: Gwei): Gwei =
let
total_active_increments =
@ -465,11 +465,11 @@ func get_participant_reward*(total_active_balance: Gwei): Gwei =
WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH
max_participant_rewards div SYNC_COMMITTEE_SIZE
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#sync-aggregate-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#sync-aggregate-processing
func get_proposer_reward*(participant_reward: Gwei): Gwei =
participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#sync-aggregate-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#sync-aggregate-processing
proc process_sync_aggregate*(
state: var (altair.BeaconState | bellatrix.BeaconState |
capella.BeaconState | deneb.BeaconState),
@ -673,7 +673,7 @@ proc process_execution_payload*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#new-process_withdrawals
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#new-process_withdrawals
func process_withdrawals*(
state: var (capella.BeaconState | deneb.BeaconState),
payload: capella.ExecutionPayload | deneb.ExecutionPayload):
@ -715,39 +715,10 @@ func process_withdrawals*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#tx_peek_blob_versioned_hashes
func tx_peek_blob_versioned_hashes(opaque_tx: Transaction):
Result[seq[VersionedHash], cstring] =
## This function retrieves the hashes from the `SignedBlobTransaction` as
## defined in Deneb, using SSZ offsets. Offsets are little-endian `uint32`
## values, as defined in the SSZ specification. See the full details of
## `blob_versioned_hashes` offset calculation.
if not (opaque_tx[0] == BLOB_TX_TYPE):
return err("tx_peek_blob_versioned_hashes: invalid opaque transaction type")
let message_offset = 1 + bytes_to_uint32(opaque_tx.asSeq.toOpenArray(1, 4))
if opaque_tx.lenu64 < (message_offset + 192).uint64:
return err("tx_peek_blob_versioned_hashes: opaque transaction too short")
# field offset: 32 + 8 + 32 + 32 + 8 + 4 + 32 + 4 + 4 + 32 = 188
let blob_versioned_hashes_offset = (
message_offset + bytes_to_uint32(
opaque_tx[(message_offset + 188) ..< (message_offset + 192)]))
if blob_versioned_hashes_offset.uint64 > high(int).uint64:
return err("tx_peek_blob_versioned_hashes: blob_versioned_hashes_offset too high")
var res: seq[VersionedHash]
for x in countup(blob_versioned_hashes_offset.int, len(opaque_tx) - 1, 32):
var versionedHash: VersionedHash
versionedHash[0 .. 31] = opaque_tx.asSeq.toOpenArray(x, x + 31)
res.add versionedHash
ok res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/deneb/beacon-chain.md#kzg_commitment_to_versioned_hash
func kzg_commitment_to_versioned_hash*(
kzg_commitment: KzgCommitment): VersionedHash =
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/deneb/beacon-chain.md#blob
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/deneb/beacon-chain.md#blob
const VERSIONED_HASH_VERSION_KZG = 0x01'u8
var res: VersionedHash
@ -755,31 +726,6 @@ func kzg_commitment_to_versioned_hash*(
res[1 .. 31] = eth2digest(kzg_commitment).data.toOpenArray(1, 31)
res
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#verify_kzg_commitments_against_transactions
func verify_kzg_commitments_against_transactions*(
transactions: seq[Transaction],
kzg_commitments: seq[KzgCommitment]): bool =
var all_versioned_hashes: seq[VersionedHash]
for tx in transactions:
if tx[0] == BLOB_TX_TYPE:
let maybe_versioned_hashed = tx_peek_blob_versioned_hashes(tx)
if maybe_versioned_hashed.isErr:
return false
all_versioned_hashes.add maybe_versioned_hashed.get
# TODO valueOr version fails to compile
#all_versioned_hashes.add tx_peek_blob_versioned_hashes(tx).valueOr:
# return false
all_versioned_hashes == mapIt(
kzg_commitments, it.kzg_commitment_to_versioned_hash)
func process_blob_kzg_commitments(
body: deneb.BeaconBlockBody | deneb.TrustedBeaconBlockBody |
deneb.SigVerifiedBeaconBlockBody): bool =
verify_kzg_commitments_against_transactions(
body.execution_payload.transactions.asSeq,
body.blob_kzg_commitments.asSeq)
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/fork-choice.md#validate_blobs
proc validate_blobs*(expected_kzg_commitments: seq[KzgCommitment],
blobs: seq[KzgBlob],
@ -818,7 +764,7 @@ proc process_block*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#block-processing
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
# copy of datatypes/altair.nim
type SomeAltairBlock =
@ -848,7 +794,7 @@ proc process_block*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#block-processing
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
type SomeBellatrixBlock =
bellatrix.BeaconBlock | bellatrix.SigVerifiedBeaconBlock | bellatrix.TrustedBeaconBlock

View File

@ -7,8 +7,8 @@
# State transition - epoch processing, as described in
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#epoch-processing
#
# The entry point is `process_epoch`, which is at the bottom of this file.
@ -39,7 +39,7 @@ export extras, phase0, altair
logScope: topics = "consens"
# Accessors that implement the max condition in `get_total_balance`:
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_total_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_total_balance
template current_epoch*(v: TotalBalances): Gwei =
max(EFFECTIVE_BALANCE_INCREMENT, v.current_epoch_raw)
template previous_epoch*(v: TotalBalances): Gwei =
@ -154,7 +154,7 @@ func process_attestations*(
if v.flags.contains RewardFlags.isPreviousEpochHeadAttester:
info.balances.previous_epoch_head_attesters_raw += validator_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#helpers
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#helpers
# get_eligible_validator_indices
func is_eligible_validator*(validator: RewardStatus): bool =
validator.flags.contains(RewardFlags.isActiveInPreviousEpoch) or
@ -241,7 +241,7 @@ func is_unslashed_participating_index(
has_flag(epoch_participation[].item(validator_index), flag_index) and
not state.validators[validator_index].slashed
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#justification-and-finalization
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#justification-and-finalization
type FinalityState = object
slot: Slot
current_epoch_ancestor_root: Eth2Digest
@ -309,7 +309,7 @@ proc weigh_justification_and_finalization(
## state.justification_bits[1:] = state.justification_bits[:-1]
## state.justification_bits[0] = 0b0
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#misc
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#misc
const JUSTIFICATION_BITS_LENGTH = 4
state.justification_bits = JustificationBits(
@ -386,7 +386,7 @@ proc weigh_justification_and_finalization(
current_epoch = current_epoch,
checkpoint = shortLog(state.finalized_checkpoint)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#justification-and-finalization
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#justification-and-finalization
proc process_justification_and_finalization*(
state: var phase0.BeaconState,
balances: TotalBalances, flags: UpdateFlags = {}) =
@ -422,7 +422,7 @@ proc compute_unrealized_finality*(
justified: finalityState.current_justified_checkpoint,
finalized: finalityState.finalized_checkpoint)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#justification-and-finalization
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#justification-and-finalization
proc process_justification_and_finalization*(
state: var (altair.BeaconState | bellatrix.BeaconState |
capella.BeaconState | deneb.BeaconState),
@ -458,7 +458,7 @@ proc compute_unrealized_finality*(
justified: finalityState.current_justified_checkpoint,
finalized: finalityState.finalized_checkpoint)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#helpers
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#helpers
func get_base_reward_sqrt*(state: phase0.BeaconState, index: ValidatorIndex,
total_balance_sqrt: auto): Gwei =
# Spec function recalculates total_balance every time, which creates an
@ -506,7 +506,7 @@ func get_attestation_component_delta(is_unslashed_attester: bool,
else:
RewardDelta(penalties: base_reward)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#components-of-attestation-deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#components-of-attestation-deltas
func get_source_delta*(validator: RewardStatus,
base_reward: uint64,
balances: TotalBalances,
@ -584,7 +584,7 @@ func get_inactivity_penalty_delta*(validator: RewardStatus,
delta
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_attestation_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_attestation_deltas
func get_attestation_deltas(
state: phase0.BeaconState, info: var phase0.EpochInfo) =
## Update rewards with attestation reward/penalty deltas for each validator.
@ -629,7 +629,7 @@ func get_attestation_deltas(
info.validators[proposer_index].delta.add(
proposer_delta.get()[1])
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#get_base_reward
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#get_base_reward
func get_base_reward_increment*(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState,
@ -640,7 +640,7 @@ func get_base_reward_increment*(
state.validators[index].effective_balance div EFFECTIVE_BALANCE_INCREMENT
increments * base_reward_per_increment
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#get_flag_index_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#get_flag_index_deltas
func get_flag_index_reward*(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState,
@ -654,18 +654,18 @@ func get_flag_index_reward*(
else:
0.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#get_flag_index_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#get_flag_index_deltas
func get_unslashed_participating_increment*(
info: altair.EpochInfo | bellatrix.BeaconState, flag_index: TimelyFlag): Gwei =
info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#get_flag_index_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#get_flag_index_deltas
func get_active_increments*(
info: altair.EpochInfo | bellatrix.BeaconState): Gwei =
info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#get_flag_index_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#get_flag_index_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/beacon-chain.md#modified-get_inactivity_penalty_deltas
# Combines get_flag_index_deltas() and get_inactivity_penalty_deltas()
iterator get_flag_and_inactivity_deltas*(
@ -812,7 +812,7 @@ func process_rewards_and_penalties*(
from std/heapqueue import HeapQueue, `[]`, len, push, replace
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#registry-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#registry-updates
func process_registry_updates*(
cfg: RuntimeConfig, state: var ForkyBeaconState, cache: var StateCache):
Result[void, cstring] =
@ -874,9 +874,9 @@ func process_registry_updates*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#slashings
func get_adjusted_total_slashing_balance*(
state: ForkyBeaconState, total_balance: Gwei): Gwei =
const multiplier =
@ -894,15 +894,15 @@ func get_adjusted_total_slashing_balance*(
min(sum(state.slashings.data) * multiplier, total_balance)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#slashings
func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool =
validator.slashed and
epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#slashings
func get_slashing_penalty*(validator: Validator,
adjusted_total_slashing_balance,
total_balance: Gwei): Gwei =
@ -912,9 +912,9 @@ func get_slashing_penalty*(validator: Validator,
adjusted_total_slashing_balance
penalty_numerator div total_balance * increment
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/bellatrix/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/bellatrix/beacon-chain.md#slashings
func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) =
let
epoch = get_current_epoch(state)
@ -928,7 +928,7 @@ func process_slashings*(state: var ForkyBeaconState, total_balance: Gwei) =
validator[], adjusted_total_slashing_balance, total_balance)
decrease_balance(state, vidx, penalty)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#eth1-data-votes-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#eth1-data-votes-updates
func process_eth1_data_reset*(state: var ForkyBeaconState) =
let next_epoch = get_current_epoch(state) + 1
@ -936,7 +936,7 @@ func process_eth1_data_reset*(state: var ForkyBeaconState) =
if next_epoch mod EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
state.eth1_data_votes = default(type state.eth1_data_votes)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#effective-balances-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#effective-balances-updates
template effective_balance_might_update*(
balance: Gwei, effective_balance: Gwei): bool =
const
@ -946,7 +946,7 @@ template effective_balance_might_update*(
balance + DOWNWARD_THRESHOLD < effective_balance or
effective_balance + UPWARD_THRESHOLD < balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#effective-balances-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#effective-balances-updates
func process_effective_balance_updates*(state: var ForkyBeaconState) =
# Update effective balances with hysteresis
for vidx in state.validators.vindices:
@ -962,14 +962,14 @@ func process_effective_balance_updates*(state: var ForkyBeaconState) =
if new_effective_balance != effective_balance:
state.validators.mitem(vidx).effective_balance = new_effective_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#slashings-balances-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#slashings-balances-updates
func process_slashings_reset*(state: var ForkyBeaconState) =
let next_epoch = get_current_epoch(state) + 1
# Reset slashings
state.slashings[int(next_epoch mod EPOCHS_PER_SLASHINGS_VECTOR)] = 0.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#randao-mixes-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#randao-mixes-updates
func process_randao_mixes_reset*(state: var ForkyBeaconState) =
let
current_epoch = get_current_epoch(state)
@ -982,12 +982,12 @@ func process_randao_mixes_reset*(state: var ForkyBeaconState) =
func compute_historical_root*(state: var ForkyBeaconState): Eth2Digest =
# Equivalent to hash_tree_root(foo: HistoricalBatch), but without using
# significant additional stack or heap.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#historicalbatch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#historicalbatch
# In response to https://github.com/status-im/nimbus-eth2/issues/921
hash_tree_root([
hash_tree_root(state.block_roots), hash_tree_root(state.state_roots)])
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#historical-roots-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#historical-roots-updates
func process_historical_roots_update*(state: var ForkyBeaconState) =
## Set historical root accumulator
let next_epoch = get_current_epoch(state) + 1
@ -995,7 +995,7 @@ func process_historical_roots_update*(state: var ForkyBeaconState) =
if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0:
# Equivalent to hash_tree_root(foo: HistoricalBatch), but without using
# significant additional stack or heap.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#historicalbatch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#historicalbatch
# In response to https://github.com/status-im/nimbus-eth2/issues/921
if not state.historical_roots.add state.compute_historical_root():
raiseAssert "no more room for historical roots, so long and thanks for the fish!"
@ -1007,7 +1007,7 @@ func process_participation_record_updates*(state: var phase0.BeaconState) =
state.previous_epoch_attestations.clear()
swap(state.previous_epoch_attestations, state.current_epoch_attestations)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#participation-flags-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#participation-flags-updates
func process_participation_flag_updates*(
state: var (altair.BeaconState | bellatrix.BeaconState |
capella.BeaconState | deneb.BeaconState)) =
@ -1021,7 +1021,7 @@ func process_participation_flag_updates*(
# grows. New elements are automatically initialized to 0, as required.
doAssert state.current_epoch_participation.asList.setLen(state.validators.len)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#sync-committee-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#sync-committee-updates
func process_sync_committee_updates*(
state: var (altair.BeaconState | bellatrix.BeaconState |
capella.BeaconState | deneb.BeaconState)) =
@ -1030,7 +1030,7 @@ func process_sync_committee_updates*(
state.current_sync_committee = state.next_sync_committee
state.next_sync_committee = get_next_sync_committee(state)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#inactivity-scores
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#inactivity-scores
func process_inactivity_updates*(
cfg: RuntimeConfig,
state: var (altair.BeaconState | bellatrix.BeaconState |
@ -1067,7 +1067,7 @@ func process_inactivity_updates*(
if pre_inactivity_score != inactivity_score:
state.inactivity_scores[index] = inactivity_score
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#historical-summaries-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#historical-summaries-updates
func process_historical_summaries_update*(
state: var (capella.BeaconState | deneb.BeaconState)):
Result[void, cstring] =
@ -1083,7 +1083,7 @@ func process_historical_summaries_update*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#epoch-processing
proc process_epoch*(
cfg: RuntimeConfig, state: var phase0.BeaconState, flags: UpdateFlags,
cache: var StateCache, info: var phase0.EpochInfo): Result[void, cstring] =
@ -1152,7 +1152,7 @@ proc process_epoch*(
info.init(state)
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#justification-and-finalization
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#justification-and-finalization
# [Modified in Altair]
process_justification_and_finalization(state, info.balances, flags)
@ -1171,10 +1171,10 @@ proc process_epoch*(
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#rewards-and-penalties
process_rewards_and_penalties(cfg, state, info) # [Modified in Altair]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#registry-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#registry-updates
? process_registry_updates(cfg, state, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#slashings
process_slashings(state, info.balances.current_epoch) # [Modified in Altair]
process_eth1_data_reset(state)
@ -1187,7 +1187,7 @@ proc process_epoch*(
ok()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/capella/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/capella/beacon-chain.md#epoch-processing
proc process_epoch*(
cfg: RuntimeConfig,
state: var (capella.BeaconState | deneb.BeaconState),
@ -1198,7 +1198,7 @@ proc process_epoch*(
info.init(state)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#justification-and-finalization
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#justification-and-finalization
process_justification_and_finalization(state, info.balances, flags)
# state.slot hasn't been incremented yet.
@ -1217,10 +1217,10 @@ proc process_epoch*(
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#rewards-and-penalties
process_rewards_and_penalties(cfg, state, info)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#registry-updates
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#registry-updates
? process_registry_updates(cfg, state, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/beacon-chain.md#slashings
process_slashings(state, info.balances.current_epoch)
process_eth1_data_reset(state)

View File

@ -18,8 +18,8 @@ const
PIVOT_VIEW_SIZE = SEED_SIZE + ROUND_SIZE
TOTAL_SIZE = PIVOT_VIEW_SIZE + POSITION_WINDOW_SIZE
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_committee
# Port of https://github.com/protolambda/zrnt/blob/v0.14.0/eth2/beacon/shuffle.go
func shuffle_list*(input: var seq[ValidatorIndex], seed: Eth2Digest) =
let list_size = input.lenu64
@ -156,13 +156,13 @@ func get_shuffled_active_validator_indices*(
withState(state):
cache.get_shuffled_active_validator_indices(forkyState.data, epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_active_validator_indices
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_active_validator_indices
func count_active_validators*(state: ForkyBeaconState,
epoch: Epoch,
cache: var StateCache): uint64 =
cache.get_shuffled_active_validator_indices(state, epoch).lenu64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_committee_count_per_slot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_committee_count_per_slot
func get_committee_count_per_slot*(num_active_validators: uint64): uint64 =
clamp(
num_active_validators div SLOTS_PER_EPOCH div TARGET_COMMITTEE_SIZE,
@ -187,7 +187,7 @@ iterator get_committee_indices*(committee_count_per_slot: uint64): CommitteeInde
let committee_index = CommitteeIndex.init(idx).expect("value clamped")
yield committee_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_committee
func compute_committee_slice*(
active_validators, index, count: uint64): Slice[int] =
doAssert active_validators <= ValidatorIndex.high.uint64
@ -233,7 +233,7 @@ func compute_committee_len*(
(slice.b - slice.a + 1).uint64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_beacon_committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_beacon_committee
iterator get_beacon_committee*(
state: ForkyBeaconState, slot: Slot, index: CommitteeIndex,
cache: var StateCache): (int, ValidatorIndex) =
@ -273,7 +273,7 @@ func get_beacon_committee*(
withState(state):
get_beacon_committee(forkyState.data, slot, index, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_beacon_committee
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee_len*(
state: ForkyBeaconState, slot: Slot, index: CommitteeIndex,
cache: var StateCache): uint64 =
@ -297,7 +297,7 @@ func get_beacon_committee_len*(
withState(state):
get_beacon_committee_len(forkyState.data, slot, index, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_shuffled_index
template compute_shuffled_index_aux(
index: uint64, index_count: uint64, seed: Eth2Digest, iter: untyped):
uint64 =
@ -346,7 +346,7 @@ func compute_inverted_shuffled_index*(
compute_shuffled_index_aux(index, index_count, seed) do:
countdown(SHUFFLE_ROUND_COUNT.uint8 - 1, 0'u8, 1)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#compute_proposer_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#compute_proposer_index
template compute_proposer_index(state: ForkyBeaconState,
indices: openArray[ValidatorIndex], seed: Eth2Digest,
unshuffleTransform: untyped): Opt[ValidatorIndex] =
@ -386,7 +386,7 @@ func compute_proposer_index(state: ForkyBeaconState,
## Return from ``indices`` a random index sampled by effective balance.
compute_proposer_index(state, indices, seed, shuffled_index)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*(
state: ForkyBeaconState, cache: var StateCache, slot: Slot):
Opt[ValidatorIndex] =
@ -421,7 +421,7 @@ func get_beacon_proposer_index*(
return res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_indices*(
state: ForkyBeaconState, shuffled_indices: openArray[ValidatorIndex], epoch: Epoch):
seq[Opt[ValidatorIndex]] =
@ -443,7 +443,7 @@ func get_beacon_proposer_indices*(
res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*(state: ForkyBeaconState, cache: var StateCache):
Opt[ValidatorIndex] =
## Return the beacon proposer index at the current slot.
@ -513,7 +513,7 @@ func livenessFailsafeInEffect*(
false
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/p2p-interface.md#attestation-subnet-subscription
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-subscription
func compute_subscribed_subnet(node_id: UInt256, epoch: Epoch, index: uint64):
SubnetId =
# Ensure neither `truncate` loses information
@ -537,7 +537,7 @@ func compute_subscribed_subnet(node_id: UInt256, epoch: Epoch, index: uint64):
)
SubnetId((permutated_prefix + index) mod ATTESTATION_SUBNET_COUNT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/p2p-interface.md#attestation-subnet-subscription
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-subscription
iterator compute_subscribed_subnets*(node_id: UInt256, epoch: Epoch): SubnetId =
for index in 0'u64 ..< SUBNETS_PER_NODE:
yield compute_subscribed_subnet(node_id, epoch, index)

View File

@ -10,10 +10,10 @@
import
./datatypes/base, ./beaconstate, ./forks, ./helpers
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/weak-subjectivity.md#configuration
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/weak-subjectivity.md#configuration
const SAFETY_DECAY* = 10'u64
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period
func compute_weak_subjectivity_period(
cfg: RuntimeConfig, state: ForkyBeaconState): uint64 =
## Returns the weak subjectivity period for the current ``state``.
@ -49,7 +49,7 @@ func compute_weak_subjectivity_period(
ws_period
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period
func is_within_weak_subjectivity_period*(cfg: RuntimeConfig, current_slot: Slot,
ws_state: ForkedHashedBeaconState,
ws_checkpoint: Checkpoint): bool =

View File

@ -187,7 +187,7 @@ func getBeaconStateDiffSummary*(state0: capella.BeaconState):
if state0.eth1_data_votes.len > 0:
# replaceOrAddEncodeEth1Votes will check whether it needs to replace or add
# the votes. Which happens is a function of effectively external data, i.e.
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/phase0/beacon-chain.md#eth1-data
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/beacon-chain.md#eth1-data
# notes it depends on things not deterministic, from a pure consensus-layer
# perspective. It thus must distinguish between adding and replacing votes,
# which it accomplishes by checking lengths and the most recent votes. This

View File

@ -21,7 +21,7 @@ Blocks are received by batch:
- in case of failure:
- `push(SyncQueue, SyncRequest)` is called to reschedule the sync request.
Every second when sync is not in progress, the beacon node will ask the RequestManager to download all missing blocks currently in quarantaine.
Every second when sync is not in progress, the beacon node will ask the RequestManager to download all missing blocks currently in quarantine.
- via `handleMissingBlocks`
- which calls `fetchAncestorBlocks`
- which asynchronously enqueue the request in the SharedBlockQueue `AsyncQueue[BlockEntry]`.

View File

@ -110,7 +110,7 @@ proc isGossipSupported*(
finalizedPeriod = self.getFinalizedPeriod(),
isNextSyncCommitteeKnown = self.isNextSyncCommitteeKnown())
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap
proc doRequest(
e: typedesc[Bootstrap],
peer: Peer,
@ -119,7 +119,7 @@ proc doRequest(
raises: [IOError].} =
peer.lightClientBootstrap(blockRoot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
type LightClientUpdatesByRangeResponse =
NetRes[List[ForkedLightClientUpdate, MAX_REQUEST_LIGHT_CLIENT_UPDATES]]
proc doRequest(
@ -138,7 +138,7 @@ proc doRequest(
raise newException(ResponseError, e.error)
return response
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
proc doRequest(
e: typedesc[FinalityUpdate],
peer: Peer
@ -146,7 +146,7 @@ proc doRequest(
raises: [IOError].} =
peer.lightClientFinalityUpdate()
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
proc doRequest(
e: typedesc[OptimisticUpdate],
peer: Peer
@ -335,7 +335,7 @@ template query[E](
): Future[bool] =
self.query(e, Nothing())
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/light-client.md#light-client-sync-process
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/light-client.md#light-client-sync-process
proc loop(self: LightClientManager) {.async.} =
var nextSyncTaskTime = self.getBeaconTime()
while true:

View File

@ -93,9 +93,10 @@ proc checkResponse(idList: seq[BlobIdentifier],
if len(blobs) > len(idList):
return false
for blob in blobs:
let block_root = hash_tree_root(blob.signed_block_header.message)
var found = false
for id in idList:
if id.block_root == blob.block_root and
if id.block_root == block_root and
id.index == blob.index:
found = true
break
@ -204,8 +205,9 @@ proc fetchBlobsFromNetwork(self: RequestManager,
self.blobQuarantine[].put(b)
var curRoot: Eth2Digest
for b in ublobs:
if b.block_root != curRoot:
curRoot = b.block_root
let block_root = hash_tree_root(b.signed_block_header.message)
if block_root != curRoot:
curRoot = block_root
if (let o = self.quarantine[].popBlobless(curRoot); o.isSome):
let b = o.unsafeGet()
discard await self.blockVerifier(ForkedSignedBeaconBlock.init(b), false)
@ -292,7 +294,7 @@ proc getMissingBlobs(rman: RequestManager): seq[BlobIdentifier] =
warn "quarantine missing blobs, but missing indices is empty",
blk=blobless.root,
indices=rman.blobQuarantine[].blobIndices(blobless.root),
kzgs=len(blobless.message.body.blob_kzg_commitments)
commitments=len(blobless.message.body.blob_kzg_commitments)
for idx in missing.indices:
let id = BlobIdentifier(block_root: blobless.root, index: idx)
if id notin fetches:
@ -302,7 +304,7 @@ proc getMissingBlobs(rman: RequestManager): seq[BlobIdentifier] =
warn "missing blob handler found blobless block with all blobs",
blk=blobless.root,
indices=rman.blobQuarantine[].blobIndices(blobless.root),
kzgs=len(blobless.message.body.blob_kzg_commitments)
commitments=len(blobless.message.body.blob_kzg_commitments)
discard rman.blockVerifier(ForkedSignedBeaconBlock.init(blobless),
false)
rman.quarantine[].removeBlobless(blobless)
@ -356,4 +358,3 @@ proc stop*(rman: RequestManager) =
rman.blockLoopFuture.cancelSoon()
if not(isNil(rman.blobLoopFuture)):
rman.blobLoopFuture.cancelSoon()

View File

@ -49,6 +49,7 @@ type
SyncManager*[A, B] = ref object
pool: PeerPool[A, B]
DENEB_FORK_EPOCH: Epoch
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: uint64
responseTimeout: chronos.Duration
maxHeadAge: uint64
getLocalHeadSlot: GetSlotCallback
@ -116,6 +117,7 @@ proc initQueue[A, B](man: SyncManager[A, B]) =
proc newSyncManager*[A, B](pool: PeerPool[A, B],
denebEpoch: Epoch,
minEpochsForBlobSidecarsRequests: uint64,
direction: SyncQueueKind,
getLocalHeadSlotCb: GetSlotCallback,
getLocalWallSlotCb: GetSlotCallback,
@ -138,6 +140,7 @@ proc newSyncManager*[A, B](pool: PeerPool[A, B],
var res = SyncManager[A, B](
pool: pool,
DENEB_FORK_EPOCH: denebEpoch,
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: minEpochsForBlobSidecarsRequests,
getLocalHeadSlot: getLocalHeadSlotCb,
getLocalWallSlot: getLocalWallSlotCb,
getSafeSlot: getSafeSlot,
@ -187,8 +190,8 @@ proc getBlocks*[A, B](man: SyncManager[A, B], peer: A,
proc shouldGetBlobs[A, B](man: SyncManager[A, B], e: Epoch): bool =
let wallEpoch = man.getLocalWallSlot().epoch
e >= man.DENEB_FORK_EPOCH and
(wallEpoch < MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS or
e >= wallEpoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS)
(wallEpoch < man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS or
e >= wallEpoch - man.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS)
proc getBlobSidecars*[A, B](man: SyncManager[A, B], peer: A,
req: SyncRequest): Future[BlobSidecarsRes] {.async.} =
@ -247,9 +250,9 @@ func groupBlobs*[T](req: SyncRequest[T],
# reached end of blobs, have more blobless blocks
break
for blob in blobs[blobCursor..len(blobs)-1]:
if blob.slot < slot:
if blob.signed_block_header.message.slot < slot:
return Result[seq[BlobSidecars], string].err "invalid blob sequence"
if blob.slot==slot:
if blob.signed_block_header.message.slot == slot:
grouped[i].add(blob)
blobCursor = blobCursor + 1
i = i + 1
@ -439,7 +442,7 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A) {.async.} =
blobs_map = blobSmap, request = req
if len(blobData) > 0:
let slots = mapIt(blobData, it[].slot)
let slots = mapIt(blobData, it[].signed_block_header.message.slot)
let uniqueSlots = foldl(slots, combine(a, b), @[slots[0]])
if not(checkResponse(req, uniqueSlots)):
peer.updateScore(PeerScoreBadResponse)
@ -464,7 +467,8 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A) {.async.} =
man.queue.push(req)
return
for i, blk in blockData:
if len(blobs[i]) > 0 and blk[].slot != blobs[i][0].slot:
if len(blobs[i]) > 0 and blk[].slot !=
blobs[i][0].signed_block_header.message.slot:
peer.updateScore(PeerScoreNoValues)
man.queue.push(req)
debug "block and blobs data have inconsistent slots"

View File

@ -498,10 +498,10 @@ p2pProtocol BeaconSync(version = 1,
let
dag = peer.networkState.dag
epochBoundary =
if MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS >= dag.head.slot.epoch:
if dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS >= dag.head.slot.epoch:
GENESIS_EPOCH
else:
dag.head.slot.epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS
if startSlot.epoch < epochBoundary:
raise newException(ResourceUnavailableError, BlobsOutOfRange)
@ -546,7 +546,7 @@ p2pProtocol BeaconSync(version = 1,
debug "BlobSidecar range request done",
peer, startSlot, count = reqCount, found
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#getlightclientbootstrap
proc lightClientBootstrap(
peer: Peer,
blockRoot: Eth2Digest,
@ -574,7 +574,7 @@ p2pProtocol BeaconSync(version = 1,
debug "LC bootstrap request done", peer, blockRoot
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
proc lightClientUpdatesByRange(
peer: Peer,
startPeriod: SyncCommitteePeriod,
@ -619,7 +619,7 @@ p2pProtocol BeaconSync(version = 1,
debug "LC updates by range request done", peer, startPeriod, count, found
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
proc lightClientFinalityUpdate(
peer: Peer,
response: SingleChunkResponse[ForkedLightClientFinalityUpdate])
@ -646,7 +646,7 @@ p2pProtocol BeaconSync(version = 1,
debug "LC finality update request done", peer
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
proc lightClientOptimisticUpdate(
peer: Peer,
response: SingleChunkResponse[ForkedLightClientOptimisticUpdate])

View File

@ -119,9 +119,9 @@ proc getShortMap*[T](req: SyncRequest[T],
if cur >= lenu64(data):
res.add('|')
continue
if slot == data[cur].slot:
if slot == data[cur].signed_block_header.message.slot:
for k in cur..<cur+MAX_BLOBS_PER_BLOCK:
if k >= lenu64(data) or slot != data[k].slot:
if k >= lenu64(data) or slot != data[k].signed_block_header.message.slot:
res.add('|')
break
else:

View File

@ -68,6 +68,7 @@ func shortLog*(v: TrustedNodeSyncTarget): auto =
chronicles.formatIt(TrustedNodeSyncTarget): shortLog(it)
proc doTrustedNodeSync*(
db: BeaconChainDB,
cfg: RuntimeConfig,
databaseDir: string,
eraDir: string,
@ -89,11 +90,6 @@ proc doTrustedNodeSync*(
error "Cannot connect to server", error = error
quit 1
let
db = BeaconChainDB.new(databaseDir, cfg, inMemory = false)
defer:
db.close()
# If possible, we'll store the genesis state in the database - this is not
# strictly necessary but renders the resulting database compatible with
# versions prior to 22.11 and makes reindexing possible
@ -181,7 +177,7 @@ proc doTrustedNodeSync*(
let stateId =
case syncTarget.kind
of TrustedNodeSyncKind.TrustedBlockRoot:
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.3/specs/altair/light-client/light-client.md#light-client-sync-process
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/altair/light-client/light-client.md#light-client-sync-process
const lcDataFork = LightClientDataFork.high
var bestViableCheckpoint: Opt[tuple[slot: Slot, state_root: Eth2Digest]]
func trackBestViableCheckpoint(store: lcDataFork.LightClientStore) =
@ -551,7 +547,8 @@ when isMainModule:
kind: TrustedNodeSyncKind.StateId,
stateId: os.paramStr(5))
backfill = os.paramCount() > 5 and os.paramStr(6) == "true"
waitFor doTrustedNodeSync(
db = BeaconChainDB.new(databaseDir, cfg, inMemory = false)
waitFor db.doTrustedNodeSync(
getRuntimeConfig(some os.paramStr(1)), os.paramStr(2), os.paramStr(3),
os.paramStr(4), syncTarget, backfill, false, true)
db.close()

View File

@ -22,6 +22,8 @@ const
ResponseNoSyncError = "Received nosync error response"
ResponseDecodeError = "Received response could not be decoded"
ResponseECNotInSyncError* = "Execution client not in sync"
ResponseNotImplementedError =
"Received endpoint not implemented error response"
type
ApiResponse*[T] = Result[T, string]
@ -772,6 +774,12 @@ template handle500(): untyped {.dirty.} =
node.updateStatus(RestBeaconNodeStatus.InternalError, failure)
failures.add(failure)
template handle501(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.NotImplemented, RequestName,
strategy, node, response.status, response.getErrorMessage())
node.updateStatus(RestBeaconNodeStatus.Incompatible, failure)
failures.add(failure)
template handle503(): untyped {.dirty.} =
let failure = ApiNodeFailure.init(ApiFailure.NotSynced, RequestName,
strategy, node, response.status, response.getErrorMessage())
@ -2570,3 +2578,284 @@ proc getValidatorsLiveness*(
res
return GetValidatorsLivenessResponse(data: response)
proc getFinalizedBlockHeader*(
vc: ValidatorClientRef,
): Future[Opt[GetBlockHeaderResponse]] {.async.} =
const RequestName = "getFinalizedBlockHeader"
let
blockIdent = BlockIdent.init(BlockIdentType.Finalized)
resp = vc.onceToAll(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
getBlockHeaderPlain(it, blockIdent))
case resp.status
of ApiOperation.Timeout:
debug "Unable to obtain finalized block header in time",
timeout = SlotDuration
return Opt.none(GetBlockHeaderResponse)
of ApiOperation.Interrupt:
debug "Finalized block header request was interrupted"
return Opt.none(GetBlockHeaderResponse)
of ApiOperation.Failure:
debug "Unexpected error happened while trying to get finalized block header"
return Opt.none(GetBlockHeaderResponse)
of ApiOperation.Success:
var oldestBlockHeader: GetBlockHeaderResponse
var oldestEpoch: Opt[Epoch]
for apiResponse in resp.data:
if apiResponse.data.isErr():
debug "Unable to get finalized block header",
endpoint = apiResponse.node, error = apiResponse.data.error
else:
let response = apiResponse.data.get()
case response.status
of 200:
let res = decodeBytes(GetBlockHeaderResponse,
response.data, response.contentType)
if res.isOk():
let
rdata = res.get()
epoch = rdata.data.header.message.slot.epoch()
if oldestEpoch.get(FAR_FUTURE_EPOCH) > epoch:
oldestEpoch = Opt.some(epoch)
oldestBlockHeader = rdata
else:
let failure = ApiNodeFailure.init(
ApiFailure.UnexpectedResponse, RequestName,
apiResponse.node, response.status, $res.error)
# We do not update beacon node's status anymore because of
# issue #5377.
debug ResponseDecodeError, reason = getFailureReason(failure)
continue
of 400:
let failure = ApiNodeFailure.init(
ApiFailure.Invalid, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
debug ResponseInvalidError, reason = getFailureReason(failure)
continue
of 404:
let failure = ApiNodeFailure.init(
ApiFailure.NotFound, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
debug ResponseNotFoundError, reason = getFailureReason(failure)
continue
of 500:
let failure = ApiNodeFailure.init(
ApiFailure.Internal, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
debug ResponseInternalError, reason = getFailureReason(failure)
continue
else:
let failure = ApiNodeFailure.init(
ApiFailure.UnexpectedCode, RequestName,
apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of
# issue #5377.
debug ResponseUnexpectedError, reason = getFailureReason(failure)
continue
if oldestEpoch.isSome():
return Opt.some(oldestBlockHeader)
else:
return Opt.none(GetBlockHeaderResponse)
proc submitBeaconCommitteeSelections*(
vc: ValidatorClientRef,
data: seq[RestBeaconCommitteeSelection],
strategy: ApiStrategyKind
): Future[SubmitBeaconCommitteeSelectionsResponse] {.async.} =
const
RequestName = "submitBeaconCommitteeSelections"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(
RestPlainResponse,
SubmitBeaconCommitteeSelectionsResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
submitBeaconCommitteeSelectionsPlain(it, data)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(SubmitBeaconCommitteeSelectionsResponse,
response.data, response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err($res.error)
else:
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].ok(res.get())
of 400:
handle400()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
ResponseInvalidError)
of 500:
handle500()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
ResponseInternalError)
of 501:
handle501()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
ResponseNotImplementedError)
of 503:
handle503()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[SubmitBeaconCommitteeSelectionsResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
submitBeaconCommitteeSelectionsPlain(it, data)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(SubmitBeaconCommitteeSelectionsResponse,
response.data, response.contentType)
if res.isOk(): return res.get()
handleUnexpectedData()
false
of 400:
handle400()
false
of 500:
handle500()
false
of 501:
handle501()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to submit beacon committee selections", data: failures)
proc submitSyncCommitteeSelections*(
vc: ValidatorClientRef,
data: seq[RestSyncCommitteeSelection],
strategy: ApiStrategyKind
): Future[SubmitSyncCommitteeSelectionsResponse] {.async.} =
const
RequestName = "submitBeaconCommitteeSelections"
var failures: seq[ApiNodeFailure]
case strategy
of ApiStrategyKind.First, ApiStrategyKind.Best:
let res = vc.firstSuccessParallel(
RestPlainResponse,
SubmitSyncCommitteeSelectionsResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
submitSyncCommitteeSelectionsPlain(it, data)):
if apiResponse.isErr():
handleCommunicationError()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
apiResponse.error)
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(SubmitSyncCommitteeSelectionsResponse,
response.data, response.contentType)
if res.isErr():
handleUnexpectedData()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err($res.error)
else:
ApiResponse[SubmitSyncCommitteeSelectionsResponse].ok(res.get())
of 400:
handle400()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
ResponseInvalidError)
of 500:
handle500()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
ResponseInternalError)
of 501:
handle501()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
ResponseNotImplementedError)
of 503:
handle503()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
ResponseNoSyncError)
else:
handleUnexpectedCode()
ApiResponse[SubmitSyncCommitteeSelectionsResponse].err(
ResponseUnexpectedError)
if res.isErr():
raise (ref ValidatorApiError)(msg: res.error, data: failures)
return res.get()
of ApiStrategyKind.Priority:
vc.firstSuccessSequential(RestPlainResponse,
SlotDuration,
ViableNodeStatus,
{BeaconNodeRole.Duties},
submitSyncCommitteeSelectionsPlain(it, data)):
if apiResponse.isErr():
handleCommunicationError()
false
else:
let response = apiResponse.get()
case response.status
of 200:
let res = decodeBytes(SubmitSyncCommitteeSelectionsResponse,
response.data, response.contentType)
if res.isOk(): return res.get()
handleUnexpectedData()
false
of 400:
handle400()
false
of 500:
handle500()
false
of 501:
handle501()
false
of 503:
handle503()
false
else:
handleUnexpectedCode()
false
raise (ref ValidatorApiError)(
msg: "Failed to submit sync committee selections", data: failures)

View File

@ -21,17 +21,19 @@ const
logScope: service = ServiceName
type
BlobList = List[BlobSidecar, Limit MAX_BLOBS_PER_BLOCK]
PreparedBeaconBlock = object
blockRoot*: Eth2Digest
data*: ForkedBeaconBlock
blobsOpt*: Opt[BlobList]
kzgProofsOpt*: Opt[deneb.KzgProofs]
blobsOpt*: Opt[deneb.Blobs]
PreparedBlindedBeaconBlock = object
blockRoot*: Eth2Digest
data*: ForkedBlindedBeaconBlock
proc proposeBlock(vc: ValidatorClientRef, slot: Slot,
proposerKey: ValidatorPubKey) {.async.}
proc produceBlock(
vc: ValidatorClientRef,
currentSlot, slot: Slot,
@ -63,30 +65,36 @@ proc produceBlock(
let blck = produceBlockResponse.phase0Data
return Opt.some(PreparedBeaconBlock(blockRoot: hash_tree_root(blck),
data: ForkedBeaconBlock.init(blck),
blobsOpt: Opt.none(BlobList)))
kzgProofsOpt: Opt.none(deneb.KzgProofs),
blobsOpt: Opt.none(deneb.Blobs)))
of ConsensusFork.Altair:
let blck = produceBlockResponse.altairData
return Opt.some(PreparedBeaconBlock(blockRoot: hash_tree_root(blck),
data: ForkedBeaconBlock.init(blck),
blobsOpt: Opt.none(BlobList)))
kzgProofsOpt: Opt.none(deneb.KzgProofs),
blobsOpt: Opt.none(deneb.Blobs)))
of ConsensusFork.Bellatrix:
let blck = produceBlockResponse.bellatrixData
return Opt.some(PreparedBeaconBlock(blockRoot: hash_tree_root(blck),
data: ForkedBeaconBlock.init(blck),
blobsOpt: Opt.none(BlobList)))
kzgProofsOpt: Opt.none(deneb.KzgProofs),
blobsOpt: Opt.none(deneb.Blobs)))
of ConsensusFork.Capella:
let blck = produceBlockResponse.capellaData
return Opt.some(PreparedBeaconBlock(blockRoot: hash_tree_root(blck),
data: ForkedBeaconBlock.init(blck),
blobsOpt: Opt.none(BlobList)))
kzgProofsOpt: Opt.none(deneb.KzgProofs),
blobsOpt: Opt.none(deneb.Blobs)))
of ConsensusFork.Deneb:
let blck = produceBlockResponse.denebData.`block`
let blobs = produceBlockResponse.denebData.blob_sidecars
let
blck = produceBlockResponse.denebData.`block`
kzgProofs = produceBlockResponse.denebData.kzg_proofs
blobs = produceBlockResponse.denebData.blobs
return Opt.some(PreparedBeaconBlock(blockRoot: hash_tree_root(blck),
data: ForkedBeaconBlock.init(blck),
kzgProofsOpt: Opt.some(kzgProofs),
blobsOpt: Opt.some(blobs)))
proc produceBlindedBlock(
vc: ValidatorClientRef,
currentSlot, slot: Slot,
@ -125,6 +133,58 @@ proc lazyWait[T](fut: Future[T]) {.async.} =
except CatchableError:
discard
proc prepareRandao(vc: ValidatorClientRef, slot: Slot,
proposerKey: ValidatorPubKey) {.async.} =
if slot == GENESIS_SLOT:
return
let
destSlot = slot - 1'u64
destOffset = TimeDiff(nanoseconds: NANOSECONDS_PER_SLOT.int64 div 2)
deadline = destSlot.start_beacon_time() + destOffset
epoch = slot.epoch()
# We going to wait to T - (T / 4 * 2), where T is proposer's
# duty slot.
currentSlot = (await vc.checkedWaitForSlot(destSlot, destOffset,
false)).valueOr:
debug "Unable to perform RANDAO signature preparation because of " &
"system time failure"
return
validator =
vc.getValidatorForDuties(proposerKey, slot, true).valueOr: return
if currentSlot <= destSlot:
# We do not need result, because we want it to be cached.
let
start = Moment.now()
genesisRoot = vc.beaconGenesis.genesis_validators_root
fork = vc.forkAtEpoch(epoch)
rsig = await validator.getEpochSignature(fork, genesisRoot, epoch)
timeElapsed = Moment.now() - start
if rsig.isErr():
debug "Unable to prepare RANDAO signature", epoch = epoch,
validator = shortLog(validator), elapsed_time = timeElapsed,
current_slot = currentSlot, destination_slot = destSlot,
delay = vc.getDelay(deadline)
else:
debug "RANDAO signature has been prepared", epoch = epoch,
validator = shortLog(validator), elapsed_time = timeElapsed,
current_slot = currentSlot, destination_slot = destSlot,
delay = vc.getDelay(deadline)
else:
debug "RANDAO signature preparation timed out", epoch = epoch,
validator = shortLog(validator),
current_slot = currentSlot, destination_slot = destSlot,
delay = vc.getDelay(deadline)
proc spawnProposalTask(vc: ValidatorClientRef,
duty: RestProposerDuty): ProposerTask =
ProposerTask(
randaoFut: prepareRandao(vc, duty.slot, duty.pubkey),
proposeFut: proposeBlock(vc, duty.slot, duty.pubkey),
duty: duty
)
proc publishBlock(vc: ValidatorClientRef, currentSlot, slot: Slot,
validator: AttachedValidator) {.async.} =
let
@ -146,21 +206,22 @@ proc publishBlock(vc: ValidatorClientRef, currentSlot, slot: Slot,
debug "Publishing block", delay = vc.getDelay(slot.block_deadline()),
genesis_root = genesisRoot,
graffiti = graffiti, fork = fork
let randaoReveal =
try:
let res = await validator.getEpochSignature(fork, genesisRoot, slot.epoch)
if res.isErr():
warn "Unable to generate randao reveal using remote signer",
reason = res.error()
let
randaoReveal =
try:
(await validator.getEpochSignature(fork, genesisRoot,
slot.epoch())).valueOr:
warn "Unable to generate RANDAO reveal using remote signer",
reason = error
return
except CancelledError as exc:
debug "RANDAO reveal production has been interrupted"
raise exc
except CatchableError as exc:
error "An unexpected error occurred while receiving RANDAO data",
error_name = exc.name, error_msg = exc.msg
return
res.get()
except CancelledError as exc:
debug "Randao reveal production has been interrupted"
raise exc
except CatchableError as exc:
error "An unexpected error occurred while receiving randao data",
error_name = exc.name, error_msg = exc.msg
return
var beaconBlocks =
block:
@ -333,30 +394,14 @@ proc publishBlock(vc: ValidatorClientRef, currentSlot, slot: Slot,
root: preparedBlock.blockRoot,
signature: signature))
of ConsensusFork.Deneb:
let blobs = preparedBlock.blobsOpt.get()
var signed: seq[SignedBlobSidecar] = @[]
for i in 0..<blobs.len:
let res = validator.getBlobSignature(fork, genesisRoot,
slot, blobs[i])
if res.isErr():
warn "Unable to sign blob",
reason = res.error()
return
let signature = res.get()
signed.add(deneb.SignedBlobSidecar(
message: blobs[i],
signature: signature))
let signedList =
List[SignedBlobSidecar, Limit MAX_BLOBS_PER_BLOCK].init(signed)
RestPublishedSignedBlockContents(kind: ConsensusFork.Deneb,
denebData: DenebSignedBlockContents(
signed_block: deneb.SignedBeaconBlock(
message: preparedBlock.data.denebData,
root: preparedBlock.blockRoot,
signature: signature),
signed_blob_sidecars: signedList
))
kzg_proofs: preparedBlock.kzgProofsOpt.get,
blobs: preparedBlock.blobsOpt.get))
res =
try:
@ -408,11 +453,6 @@ proc proposeBlock(vc: ValidatorClientRef, slot: Slot,
error "Unexpected error encountered while proposing block",
slot = slot, validator = shortLog(validator)
proc spawnProposalTask(vc: ValidatorClientRef,
duty: RestProposerDuty): ProposerTask =
let future = proposeBlock(vc, duty.slot, duty.pubkey)
ProposerTask(future: future, duty: duty)
proc contains(data: openArray[RestProposerDuty], task: ProposerTask): bool =
for item in data:
if (item.pubkey == task.duty.pubkey) and (item.slot == task.duty.slot):
@ -462,13 +502,14 @@ proc addOrReplaceProposers*(vc: ValidatorClientRef, epoch: Epoch,
for task in epochDuties.duties:
if task notin duties:
# Task is no more relevant, so cancel it.
debug "Cancelling running proposal duty task",
debug "Cancelling running proposal duty tasks",
slot = task.duty.slot,
validator = shortLog(task.duty.pubkey)
task.future.cancelSoon()
task.proposeFut.cancelSoon()
task.randaoFut.cancelSoon()
else:
# If task is already running for proper slot, we keep it alive.
debug "Keep running previous proposal duty task",
debug "Keep running previous proposal duty tasks",
slot = task.duty.slot,
validator = shortLog(task.duty.pubkey)
res.add(task)
@ -783,8 +824,10 @@ proc mainLoop(service: BlockServiceRef) {.async.} =
var res: seq[FutureBase]
for epoch, data in vc.proposers.pairs():
for duty in data.duties.items():
if not(duty.future.finished()):
res.add(duty.future.cancelAndWait())
if not(duty.proposeFut.finished()):
res.add(duty.proposeFut.cancelAndWait())
if not(duty.randaoFut.finished()):
res.add(duty.randaoFut.cancelAndWait())
await noCancel allFutures(res)
proc init*(t: typedesc[BlockServiceRef],

View File

@ -65,7 +65,9 @@ type
DutiesServiceRef* = ref object of ClientServiceRef
pollingAttesterDutiesTask*: Future[void]
pollingSyncDutiesTask*: Future[void]
pruneSlashingDatabaseTask*: Future[void]
syncSubscriptionEpoch*: Opt[Epoch]
lastSlashingEpoch*: Opt[Epoch]
FallbackServiceRef* = ref object of ClientServiceRef
changesEvent*: AsyncEvent
@ -95,7 +97,8 @@ type
ProposerTask* = object
duty*: RestProposerDuty
future*: Future[void]
proposeFut*: Future[void]
randaoFut*: Future[void]
ProposedData* = object
epoch*: Epoch
@ -228,6 +231,7 @@ type
blocksSeen*: Table[Slot, BlockDataItem]
rootsSeen*: Table[Eth2Digest, Slot]
processingDelay*: Opt[Duration]
finalizedEpoch*: Opt[Epoch]
rng*: ref HmacDrbgContext
ApiStrategyKind* {.pure.} = enum
@ -235,7 +239,7 @@ type
ApiFailure* {.pure.} = enum
Communication, Invalid, NotFound, OptSynced, NotSynced, Internal,
UnexpectedCode, UnexpectedResponse, NoError
NotImplemented, UnexpectedCode, UnexpectedResponse, NoError
ApiNodeFailure* = object
node*: BeaconNodeServerRef
@ -251,22 +255,6 @@ type
ValidatorApiError* = object of ValidatorClientError
data*: seq[ApiNodeFailure]
FillSignaturesResult* = object
signaturesRequested*: int
signaturesReceived*: int
AttestationSlotRequest* = object
validator*: AttachedValidator
fork*: Fork
slot*: Slot
SyncCommitteeSlotRequest* = object
validator*: AttachedValidator
fork*: Fork
slot*: Slot
sync_committee_index*: IndexInSyncCommittee
duty*: SyncCommitteeDuty
const
DefaultDutyAndProof* = DutyAndProof(epoch: FAR_FUTURE_EPOCH)
DefaultSyncCommitteeDuty* = SyncCommitteeDuty()
@ -382,6 +370,7 @@ proc `$`*(failure: ApiFailure): string =
of ApiFailure.NotSynced: "not-synced"
of ApiFailure.OptSynced: "opt-synced"
of ApiFailure.Internal: "internal-issue"
of ApiFailure.NotImplemented: "not-implemented"
of ApiFailure.UnexpectedCode: "unexpected-code"
of ApiFailure.UnexpectedResponse: "unexpected-data"
of ApiFailure.NoError: "status-update"
@ -1461,240 +1450,6 @@ func `==`*(a, b: SyncCommitteeDuty): bool =
compareUnsorted(a.validator_sync_committee_indices,
b.validator_sync_committee_indices)
proc cmp(x, y: AttestationSlotRequest|SyncCommitteeSlotRequest): int =
cmp(x.slot, y.slot)
func getIndex*(proof: SyncCommitteeSelectionProof,
inindex: IndexInSyncCommittee): Opt[int] =
if len(proof) == 0:
return Opt.none(int)
for index, value in proof.pairs():
if value.sync_committee_index == inindex:
return Opt.some(index)
Opt.none(int)
func hasSignature*(proof: SyncCommitteeSelectionProof,
inindex: IndexInSyncCommittee,
slot: Slot): bool =
let index = proof.getIndex(inindex).valueOr: return false
proof[index].signatures[int(slot.since_epoch_start())].isSome()
proc setSignature*(proof: var SyncCommitteeSelectionProof,
inindex: IndexInSyncCommittee, slot: Slot,
signature: Opt[ValidatorSig]) =
let index = proof.getIndex(inindex).expect(
"EpochSelectionProof should be present at this moment")
proof[index].signatures[int(slot.since_epoch_start())] = signature
proc setSyncSelectionProof*(vc: ValidatorClientRef, pubkey: ValidatorPubKey,
inindex: IndexInSyncCommittee, slot: Slot,
duty: SyncCommitteeDuty,
signature: Opt[ValidatorSig]) =
let
proof =
block:
let length = len(duty.validator_sync_committee_indices)
var res = newSeq[EpochSelectionProof](length)
for i in 0 ..< length:
res[i].sync_committee_index = duty.validator_sync_committee_indices[i]
res
vc.syncCommitteeProofs.
mgetOrPut(slot.epoch(), default(SyncCommitteeProofs)).proofs.
mgetOrPut(pubkey, proof).setSignature(inindex, slot, signature)
proc getSyncCommitteeSelectionProof*(
vc: ValidatorClientRef,
pubkey: ValidatorPubKey,
epoch: Epoch
): Opt[SyncCommitteeSelectionProof] =
vc.syncCommitteeProofs.withValue(epoch, epochProofs):
epochProofs[].proofs.withValue(pubkey, validatorProofs):
return Opt.some(validatorProofs[])
do:
return Opt.none(SyncCommitteeSelectionProof)
do:
return Opt.none(SyncCommitteeSelectionProof)
proc getSyncCommitteeSelectionProof*(
vc: ValidatorClientRef,
pubkey: ValidatorPubKey,
slot: Slot,
inindex: IndexInSyncCommittee
): Opt[ValidatorSig] =
vc.syncCommitteeProofs.withValue(slot.epoch(), epochProofs):
epochProofs[].proofs.withValue(pubkey, validatorProofs):
let index = getIndex(validatorProofs[], inindex).valueOr:
return Opt.none(ValidatorSig)
return validatorProofs[][index].signatures[int(slot.since_epoch_start())]
do:
return Opt.none(ValidatorSig)
do:
return Opt.none(ValidatorSig)
proc fillSyncCommitteeSelectionProofs*(
service: DutiesServiceRef,
start, finish: Slot
): Future[FillSignaturesResult] {.async.} =
let
vc = service.client
genesisRoot = vc.beaconGenesis.genesis_validators_root
var
requests =
block:
var res: seq[SyncCommitteeSlotRequest]
for epoch in start.epoch() .. finish.epoch():
let
fork = vc.forkAtEpoch(epoch)
period = epoch.sync_committee_period()
for duty in vc.syncDutiesForPeriod(period):
let validator = vc.attachedValidators[].
getValidator(duty.pubkey).valueOr:
# Ignore all the validators which are not here anymore
continue
if validator.index.isNone():
# Ignore all the valididators which do not have index yet.
continue
let proof = vc.getSyncCommitteeSelectionProof(duty.pubkey, epoch).
get(default(SyncCommitteeSelectionProof))
for inindex in duty.validator_sync_committee_indices:
for slot in epoch.slots():
if slot < start: continue
if slot > finish: break
if not(proof.hasSignature(inindex, slot)):
res.add(
SyncCommitteeSlotRequest(
validator: validator,
fork: fork,
slot: slot,
duty: duty,
sync_committee_index: inindex))
# We make requests sorted by slot number.
sorted(res, cmp, order = SortOrder.Ascending)
sigres = FillSignaturesResult(signaturesRequested: len(requests))
pendingRequests = requests.mapIt(
FutureBase(getSyncCommitteeSelectionProof(
it.validator, it.fork, genesisRoot, it.slot,
getSubcommitteeIndex(it.sync_committee_index))))
while len(pendingRequests) > 0:
try:
discard await race(pendingRequests)
except CancelledError as exc:
let pending = pendingRequests
.filterIt(not(it.finished())).mapIt(it.cancelAndWait())
await noCancel allFutures(pending)
raise exc
(requests, pendingRequests) =
block:
var
res1: seq[SyncCommitteeSlotRequest]
res2: seq[FutureBase]
for index, fut in pendingRequests.pairs():
if not(fut.finished()):
res1.add(requests[index])
res2.add(fut)
else:
let
request = requests[index]
signature =
if fut.completed():
let sres = Future[SignatureResult](fut).read()
if sres.isErr():
warn "Unable to create slot signature using remote signer",
reason = sres.error(), epoch = request.slot.epoch(),
slot = request.slot
Opt.none(ValidatorSig)
else:
inc(sigres.signaturesReceived)
Opt.some(sres.get())
else:
Opt.none(ValidatorSig)
vc.setSyncSelectionProof(request.validator.pubkey,
request.sync_committee_index,
request.slot, request.duty,
signature)
(res1, res2)
sigres
proc fillAttestationSelectionProofs*(
service: DutiesServiceRef,
start, finish: Slot
): Future[FillSignaturesResult] {.async.} =
let
vc = service.client
genesisRoot = vc.beaconGenesis.genesis_validators_root
var
requests =
block:
var res: seq[AttestationSlotRequest]
for epoch in start.epoch() .. finish.epoch():
for duty in vc.attesterDutiesForEpoch(epoch):
if (duty.data.slot < start) or (duty.data.slot > finish):
# Ignore all the slots which are not in range.
continue
if duty.slotSig.isSome():
# Ignore all the duties which already has selection proof.
continue
let validator = vc.attachedValidators[].
getValidator(duty.data.pubkey).valueOr:
# Ignore all the validators which are not here anymore
continue
if validator.index.isNone():
# Ignore all the valididators which do not have index yet.
continue
res.add(AttestationSlotRequest(
validator: validator,
slot: duty.data.slot,
fork: vc.forkAtEpoch(duty.data.slot.epoch())
))
# We make requests sorted by slot number.
sorted(res, cmp, order = SortOrder.Ascending)
sigres = FillSignaturesResult(signaturesRequested: len(requests))
pendingRequests = requests.mapIt(
FutureBase(getSlotSignature(it.validator, it.fork, genesisRoot, it.slot)))
while len(pendingRequests) > 0:
try:
discard await race(pendingRequests)
except CancelledError as exc:
let pending = pendingRequests
.filterIt(not(it.finished())).mapIt(it.cancelAndWait())
await noCancel allFutures(pending)
raise exc
(requests, pendingRequests) =
block:
var
res1: seq[AttestationSlotRequest]
res2: seq[FutureBase]
for index, fut in pendingRequests.pairs():
if not(fut.finished()):
res1.add(requests[index])
res2.add(fut)
else:
let
request = requests[index]
signature =
if fut.completed():
let sres = Future[SignatureResult](fut).read()
if sres.isErr():
warn "Unable to create slot signature using remote signer",
reason = sres.error(), epoch = request.slot.epoch(),
slot = request.slot
Opt.none(ValidatorSig)
else:
inc(sigres.signaturesReceived)
Opt.some(sres.get())
else:
Opt.none(ValidatorSig)
vc.attesters.withValue(request.validator.pubkey, map):
map[].duties.withValue(request.slot.epoch(), dap):
dap[].slotSig = signature
(res1, res2)
sigres
proc updateRuntimeConfig*(vc: ValidatorClientRef,
node: BeaconNodeServerRef,
info: VCRuntimeConfig): Result[void, string] =

View File

@ -6,8 +6,8 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import std/[sets, sequtils]
import chronicles
import common, api, block_service
import chronicles, metrics
import common, api, block_service, selection_proofs
const
ServiceName = "duties_service"
@ -19,7 +19,8 @@ logScope: service = ServiceName
type
DutiesServiceLoop* = enum
AttesterLoop, ProposerLoop, IndicesLoop, SyncCommitteeLoop,
ProposerPreparationLoop, ValidatorRegisterLoop, DynamicValidatorsLoop
ProposerPreparationLoop, ValidatorRegisterLoop, DynamicValidatorsLoop,
SlashPruningLoop
chronicles.formatIt(DutiesServiceLoop):
case it
@ -30,6 +31,7 @@ chronicles.formatIt(DutiesServiceLoop):
of ProposerPreparationLoop: "proposer_prepare_loop"
of ValidatorRegisterLoop: "validator_register_loop"
of DynamicValidatorsLoop: "dynamic_validators_loop"
of SlashPruningLoop: "slashing_pruning_loop"
proc checkDuty(duty: RestAttesterDuty): bool =
(duty.committee_length <= MAX_VALIDATORS_PER_COMMITTEE) and
@ -312,12 +314,23 @@ proc pollForAttesterDuties*(service: DutiesServiceRef) {.async.} =
block:
let
moment = Moment.now()
sigres = await service.fillAttestationSelectionProofs(
currentSlot, currentSlot + Epoch(1))
debug "Attestation selection proofs have been received",
signatures_requested = sigres.signaturesRequested,
signatures_received = sigres.signaturesReceived,
time = (Moment.now() - moment)
sigres =
await vc.fillAttestationSelectionProofs(currentSlot,
currentSlot + Epoch(AGGREGATION_PRE_COMPUTE_EPOCHS))
if vc.config.distributedEnabled:
debug "Attestation selection proofs have been received",
signatures_requested = sigres.signaturesRequested,
signatures_received = sigres.signaturesReceived,
selections_requested = sigres.selections_requested,
selections_received = sigres.selections_received,
selections_processed = sigres.selections_processed,
total_elapsed_time = (Moment.now() - moment)
else:
debug "Attestation selection proofs have been received",
signatures_requested = sigres.signaturesRequested,
signatures_received = sigres.signaturesReceived,
total_elapsed_time = (Moment.now() - moment)
let subscriptions =
block:
@ -391,12 +404,23 @@ proc pollForSyncCommitteeDuties*(service: DutiesServiceRef) {.async.} =
block:
let
moment = Moment.now()
sigres = await service.fillSyncCommitteeSelectionProofs(
currentSlot, currentSlot + Epoch(AGGREGATION_PRE_COMPUTE_EPOCHS))
debug "Sync committee selection proofs have been received",
signatures_requested = sigres.signaturesRequested,
signatures_received = sigres.signaturesReceived,
time = (Moment.now() - moment)
sigres =
await vc.fillSyncCommitteeSelectionProofs(currentSlot,
currentSlot + Epoch(AGGREGATION_PRE_COMPUTE_EPOCHS))
if vc.config.distributedEnabled:
debug "Sync committee selection proofs have been received",
signatures_requested = sigres.signaturesRequested,
signatures_received = sigres.signaturesReceived,
selections_requested = sigres.selections_requested,
selections_received = sigres.selections_received,
selections_processed = sigres.selections_processed,
total_elapsed_time = (Moment.now() - moment)
else:
debug "Sync committee selection proofs have been received",
signatures_requested = sigres.signaturesRequested,
signatures_received = sigres.signaturesReceived,
total_elapsed_time = (Moment.now() - moment)
let
periods =
@ -677,6 +701,70 @@ proc syncCommitteeDutiesLoop(service: DutiesServiceRef) {.async.} =
# Spawning new attestation duties task.
service.pollingSyncDutiesTask = service.pollForSyncCommitteeDuties()
proc getNextEpochMiddleSlot(vc: ValidatorClientRef): Slot =
let
middleSlot = Slot(SLOTS_PER_EPOCH div 2)
currentSlot = vc.beaconClock.now().slotOrZero()
slotInEpoch = currentSlot.since_epoch_start()
if slotInEpoch >= middleSlot:
(currentSlot.epoch + 1'u64).start_slot() + uint64(middleSlot)
else:
currentSlot + (uint64(middleSlot) - uint64(slotInEpoch))
proc pruneSlashingDatabase(service: DutiesServiceRef) {.async.} =
let
vc = service.client
currentSlot = vc.beaconClock.now().slotOrZero()
startTime = Moment.now()
blockHeader =
try:
await vc.getFinalizedBlockHeader()
except CancelledError as exc:
debug "Finalized block header request was interrupted",
slot = currentSlot
raise exc
except CatchableError as exc:
error "Unexpected error occured while requesting " &
"finalized block header", slot = currentSlot,
err_name = exc.name, err_msg = exc.msg
Opt.none(GetBlockHeaderResponse)
checkpointTime = Moment.now()
if blockHeader.isSome():
let epoch = blockHeader.get().data.header.message.slot.epoch
vc.finalizedEpoch = Opt.some(epoch)
if service.lastSlashingEpoch.get(FAR_FUTURE_EPOCH) != epoch:
vc.attachedValidators[]
.slashingProtection
.pruneAfterFinalization(epoch)
service.lastSlashingEpoch = Opt.some(epoch)
let finishTime = Moment.now()
debug "Slashing database has been pruned", slot = currentSlot,
epoch = currentSlot.epoch(),
finalized_epoch = epoch,
elapsed_time = (finishTime - startTime),
pruning_time = (finishTime - checkpointTime)
proc slashingDatabasePruningLoop(service: DutiesServiceRef) {.async.} =
let vc = service.client
debug "Slashing database pruning loop is waiting for initialization"
await allFutures(
vc.preGenesisEvent.wait(),
vc.indicesAvailable.wait(),
vc.forksAvailable.wait()
)
doAssert(len(vc.forks) > 0, "Fork schedule must not be empty at this point")
while true:
let slot = await vc.checkedWaitForSlot(vc.getNextEpochMiddleSlot(),
aggregateSlotOffset, false)
if slot.isNone():
continue
if not(isNil(service.pruneSlashingDatabaseTask)) and
not(service.pruneSlashingDatabaseTask.finished()):
await cancelAndWait(service.pruneSlashingDatabaseTask)
service.pruneSlashingDatabaseTask = service.pruneSlashingDatabase()
template checkAndRestart(serviceLoop: DutiesServiceLoop,
future: Future[void], body: untyped): untyped =
if future.finished():
@ -715,6 +803,7 @@ proc mainLoop(service: DutiesServiceRef) {.async.} =
else:
debug "Dynamic validators update loop disabled"
@[]
slashPruningFut = service.slashingDatabasePruningLoop()
web3SignerUrls = vc.config.web3SignerUrls
while true:
@ -729,6 +818,7 @@ proc mainLoop(service: DutiesServiceRef) {.async.} =
FutureBase(indicesFut),
FutureBase(syncFut),
FutureBase(prepareFut),
FutureBase(slashPruningFut)
]
for fut in dynamicFuts:
futures.add fut
@ -749,6 +839,8 @@ proc mainLoop(service: DutiesServiceRef) {.async.} =
service.dynamicValidatorsLoop(
web3SignerUrls[i],
vc.config.web3signerUpdateInterval))
checkAndRestart(SlashPruningLoop, slashPruningFut,
service.slashingDatabasePruningLoop())
false
except CancelledError:
debug "Service interrupted"
@ -774,6 +866,9 @@ proc mainLoop(service: DutiesServiceRef) {.async.} =
if not(isNil(service.pollingSyncDutiesTask)) and
not(service.pollingSyncDutiesTask.finished()):
pending.add(service.pollingSyncDutiesTask.cancelAndWait())
if not(isNil(service.pruneSlashingDatabaseTask)) and
not(service.pruneSlashingDatabaseTask.finished()):
pending.add(service.pruneSlashingDatabaseTask.cancelAndWait())
await allFutures(pending)
true
except CatchableError as exc:

View File

@ -0,0 +1,514 @@
# beacon_chain
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import std/[algorithm, sequtils]
import chronicles, chronos, metrics
import common, api
{.push raises: [].}
declareGauge client_slot_signatures_time,
"Time used to obtain slot signatures"
declareGauge client_sync_committee_selection_proof_time,
"Time used to obtain sync committee selection proofs"
declareGauge client_obol_aggregated_slot_signatures_time,
"Time used to obtain slot signatures"
declareGauge client_obol_aggregated_sync_committee_selection_proof_time,
"Time used to obtain sync committee selection proofs"
type
FillSignaturesResult* = object
signaturesRequested*: int
signaturesReceived*: int
selectionsRequested*: int
selectionsReceived*: int
selectionsProcessed*: int
AttestationSlotRequest = object
validator: AttachedValidator
fork: Fork
slot: Slot
proof: Opt[ValidatorSig]
future: FutureBase
SyncCommitteeSlotRequest* = object
validator: AttachedValidator
fork: Fork
slot: Slot
sync_committee_index: IndexInSyncCommittee
sub_committee_index: SyncSubcommitteeIndex
duty: SyncCommitteeDuty
proof: Opt[ValidatorSig]
future: FutureBase
template withTimeMetric(metricName, body: untyped): untyped =
let momentTime = Moment.now()
try:
body
finally:
let elapsedTime = Moment.now() - momentTime
metrics.set(metricName, elapsedTime.milliseconds())
proc cmp(x, y: AttestationSlotRequest|SyncCommitteeSlotRequest): int =
cmp(x.slot, y.slot)
proc getAttesterDutiesRequests(
vc: ValidatorClientRef,
start, finish: Slot,
genesisRoot: Eth2Digest
): seq[AttestationSlotRequest] =
var res: seq[AttestationSlotRequest]
for epoch in start.epoch() .. finish.epoch():
for duty in vc.attesterDutiesForEpoch(epoch):
if (duty.data.slot < start) or (duty.data.slot > finish):
# Ignore all the slots which are not in range.
continue
if duty.slotSig.isSome():
# Ignore all the duties which already has selection proof.
continue
let validator = vc.attachedValidators[].
getValidator(duty.data.pubkey).valueOr:
# Ignore all the validators which are not here anymore
continue
if validator.index.isNone():
# Ignore all the validators which do not have index yet.
continue
let
fork = vc.forkAtEpoch(duty.data.slot.epoch())
future = getSlotSignature(validator, fork, genesisRoot, duty.data.slot)
res.add(
AttestationSlotRequest(validator: validator, slot: duty.data.slot,
fork: fork, future: FutureBase(future)))
# We make requests sorted by slot number.
sorted(res, cmp, order = SortOrder.Ascending)
proc fillAttestationSelectionProofs*(
vc: ValidatorClientRef,
start, finish: Slot
): Future[FillSignaturesResult] {.async.} =
let genesisRoot = vc.beaconGenesis.genesis_validators_root
var
requests: seq[AttestationSlotRequest]
sigres: FillSignaturesResult
withTimeMetric(client_slot_signatures_time):
requests = vc.getAttesterDutiesRequests(start, finish, genesisRoot)
sigres.signaturesRequested = len(requests)
var pendingRequests = requests.mapIt(it.future)
while len(pendingRequests) > 0:
try:
discard await race(pendingRequests)
except CancelledError as exc:
var pending: seq[Future[void]]
for future in pendingRequests:
if not(future.finished()): pending.add(future.cancelAndWait())
await noCancel allFutures(pending)
raise exc
pendingRequests =
block:
var res: seq[FutureBase]
for mreq in requests.mitems():
if isNil(mreq.future): continue
if not(mreq.future.finished()):
res.add(mreq.future)
else:
let signature =
if mreq.future.completed():
let sres = Future[SignatureResult](mreq.future).read()
if sres.isErr():
warn "Unable to create slot signature using remote signer",
reason = sres.error(), epoch = mreq.slot.epoch(),
slot = mreq.slot
Opt.none(ValidatorSig)
else:
inc(sigres.signaturesReceived)
Opt.some(sres.get())
else:
Opt.none(ValidatorSig)
mreq.future = nil
mreq.proof = signature
if signature.isSome():
vc.attesters.withValue(mreq.validator.pubkey, map):
map[].duties.withValue(mreq.slot.epoch(), dap):
dap[].slotSig = signature
res
if vc.config.distributedEnabled:
withTimeMetric(client_obol_aggregated_slot_signatures_time):
let (indexToKey, selections) =
block:
var
res1: Table[ValidatorIndex, Opt[ValidatorPubKey]]
res2: seq[RestBeaconCommitteeSelection]
for mreq in requests.mitems():
if mreq.proof.isSome():
res1[mreq.validator.index.get()] = Opt.some(mreq.validator.pubkey)
res2.add(RestBeaconCommitteeSelection(
validator_index: RestValidatorIndex(mreq.validator.index.get()),
slot: mreq.slot, selection_proof: mreq.proof.get()))
(res1, res2)
sigres.selectionsRequested = len(selections)
if len(selections) == 0:
return sigres
let sresponse =
try:
# Query middleware for aggregated signatures.
await vc.submitBeaconCommitteeSelections(selections,
ApiStrategyKind.Best)
except ValidatorApiError as exc:
warn "Unable to submit beacon committee selections",
reason = exc.getFailureReason()
return sigres
except CancelledError as exc:
debug "Beacon committee selections processing was interrupted"
raise exc
except CatchableError as exc:
error "Unexpected error occured while trying to submit beacon " &
"committee selections", reason = exc.msg, error = exc.name
return sigres
sigres.selectionsReceived = len(sresponse.data)
for selection in sresponse.data:
let
vindex = selection.validator_index.toValidatorIndex().valueOr:
warn "Invalid validator_index value encountered while processing " &
"beacon committee selections",
validator_index = uint64(selection.validator_index),
reason = $error
continue
selectionProof = selection.selection_proof.load().valueOr:
warn "Invalid signature encountered while processing " &
"beacon committee selections",
validator_index = vindex, slot = selection.slot,
selection_proof = shortLog(selection.selection_proof)
continue
validator =
block:
# Selections operating using validator indices, so we should check
# if we have such validator index in our validator's pool and it
# still in place (not removed using keystore manager).
let key = indexToKey.getOrDefault(vindex)
if key.isNone():
warn "Non-existing validator encountered while processing " &
"beacon committee selections",
validator_index = vindex,
slot = selection.slot,
selection_proof = shortLog(selection.selection_proof)
continue
vc.attachedValidators[].getValidator(key.get()).valueOr:
notice "Found missing validator while processing " &
"beacon committee selections", validator_index = vindex,
slot = selection.slot,
validator = shortLog(key.get()),
selection_proof = shortLog(selection.selection_proof)
continue
vc.attesters.withValue(validator.pubkey, map):
map[].duties.withValue(selection.slot.epoch(), dap):
dap[].slotSig = Opt.some(selectionProof.toValidatorSig())
inc(sigres.selectionsProcessed)
sigres
func getIndex*(proof: SyncCommitteeSelectionProof,
inindex: IndexInSyncCommittee): Opt[int] =
if len(proof) == 0:
return Opt.none(int)
for index, value in proof.pairs():
if value.sync_committee_index == inindex:
return Opt.some(index)
Opt.none(int)
func hasSignature*(proof: SyncCommitteeSelectionProof,
inindex: IndexInSyncCommittee,
slot: Slot): bool =
let index = proof.getIndex(inindex).valueOr: return false
proof[index].signatures[int(slot.since_epoch_start())].isSome()
func getSignature*(proof: SyncCommitteeSelectionProof,
inindex: IndexInSyncCommittee,
slot: Slot): Opt[ValidatorSig] =
let index = proof.getIndex(inindex).valueOr:
return Opt.none(ValidatorSig)
proof[index].signatures[int(slot.since_epoch_start())]
proc setSignature*(proof: var SyncCommitteeSelectionProof,
inindex: IndexInSyncCommittee, slot: Slot,
signature: Opt[ValidatorSig]) =
let index = proof.getIndex(inindex).expect(
"EpochSelectionProof should be present at this moment")
proof[index].signatures[int(slot.since_epoch_start())] = signature
proc setSyncSelectionProof*(vc: ValidatorClientRef, pubkey: ValidatorPubKey,
inindex: IndexInSyncCommittee, slot: Slot,
duty: SyncCommitteeDuty,
signature: Opt[ValidatorSig]) =
let
proof =
block:
let length = len(duty.validator_sync_committee_indices)
var res = newSeq[EpochSelectionProof](length)
for i in 0 ..< length:
res[i].sync_committee_index = duty.validator_sync_committee_indices[i]
res
vc.syncCommitteeProofs.
mgetOrPut(slot.epoch(), default(SyncCommitteeProofs)).proofs.
mgetOrPut(pubkey, proof).setSignature(inindex, slot, signature)
proc getSyncCommitteeSelectionProof*(
vc: ValidatorClientRef,
pubkey: ValidatorPubKey,
epoch: Epoch
): Opt[SyncCommitteeSelectionProof] =
vc.syncCommitteeProofs.withValue(epoch, epochProofs):
epochProofs[].proofs.withValue(pubkey, validatorProofs):
return Opt.some(validatorProofs[])
do:
return Opt.none(SyncCommitteeSelectionProof)
do:
return Opt.none(SyncCommitteeSelectionProof)
proc getSyncCommitteeSelectionProof*(
vc: ValidatorClientRef,
pubkey: ValidatorPubKey,
slot: Slot,
inindex: IndexInSyncCommittee
): Opt[ValidatorSig] =
vc.syncCommitteeProofs.withValue(slot.epoch(), epochProofs):
epochProofs[].proofs.withValue(pubkey, validatorProofs):
let index = getIndex(validatorProofs[], inindex).valueOr:
return Opt.none(ValidatorSig)
return validatorProofs[][index].signatures[int(slot.since_epoch_start())]
do:
return Opt.none(ValidatorSig)
do:
return Opt.none(ValidatorSig)
proc getSyncCommitteeDutiesRequests*(
vc: ValidatorClientRef,
start, finish: Slot,
genesisRoot: Eth2Digest
): seq[SyncCommitteeSlotRequest] =
var res: seq[SyncCommitteeSlotRequest]
for epoch in start.epoch() .. finish.epoch():
let
fork = vc.forkAtEpoch(epoch)
period = epoch.sync_committee_period()
for duty in vc.syncDutiesForPeriod(period):
let validator = vc.attachedValidators[].getValidator(duty.pubkey).valueOr:
# Ignore all the validators which are not here anymore
continue
if validator.index.isNone():
# Ignore all the valididators which do not have index yet.
continue
let proof = vc.getSyncCommitteeSelectionProof(duty.pubkey, epoch).
get(default(SyncCommitteeSelectionProof))
for inindex in duty.validator_sync_committee_indices:
for slot in epoch.slots():
if slot < start: continue
if slot > finish: break
if proof.hasSignature(inindex, slot): continue
let
future =
getSyncCommitteeSelectionProof(validator, fork, genesisRoot, slot,
getSubcommitteeIndex(inindex))
req =
SyncCommitteeSlotRequest(
validator: validator,
fork: fork,
slot: slot,
duty: duty,
sync_committee_index: inindex,
sub_committee_index: getSubcommitteeIndex(inindex),
future: FutureBase(future))
res.add(req)
# We make requests sorted by slot number.
sorted(res, cmp, order = SortOrder.Ascending)
proc getSyncRequest*(
requests: var openArray[SyncCommitteeSlotRequest],
validator: AttachedValidator,
slot: Slot,
subcommittee_index: uint64
): Opt[SyncCommitteeSlotRequest] =
for mreq in requests.mitems():
if mreq.validator.pubkey == validator.pubkey and
mreq.slot == slot and
mreq.sub_committee_index == subcommittee_index:
return Opt.some(mreq)
Opt.none(SyncCommitteeSlotRequest)
proc fillSyncCommitteeSelectionProofs*(
vc: ValidatorClientRef,
start, finish: Slot
): Future[FillSignaturesResult] {.async.} =
let genesisRoot = vc.beaconGenesis.genesis_validators_root
var
requests: seq[SyncCommitteeSlotRequest]
sigres: FillSignaturesResult
withTimeMetric(client_sync_committee_selection_proof_time):
requests = vc.getSyncCommitteeDutiesRequests(start, finish, genesisRoot)
sigres.signaturesRequested = len(requests)
var pendingRequests = requests.mapIt(it.future)
while len(pendingRequests) > 0:
try:
discard await race(pendingRequests)
except CancelledError as exc:
var pending: seq[Future[void]]
for future in pendingRequests:
if not(future.finished()): pending.add(future.cancelAndWait())
await noCancel allFutures(pending)
raise exc
pendingRequests =
block:
var res: seq[FutureBase]
for mreq in requests.mitems():
if isNil(mreq.future): continue
if not(mreq.future.finished()):
res.add(mreq.future)
else:
let signature =
if mreq.future.completed():
let sres = Future[SignatureResult](mreq.future).read()
if sres.isErr():
warn "Unable to create slot signature using remote signer",
reason = sres.error(), epoch = mreq.slot.epoch(),
slot = mreq.slot
Opt.none(ValidatorSig)
else:
inc(sigres.signaturesReceived)
Opt.some(sres.get())
else:
Opt.none(ValidatorSig)
mreq.future = nil
mreq.proof = signature
if signature.isSome():
vc.setSyncSelectionProof(mreq.validator.pubkey,
mreq.sync_committee_index,
mreq.slot, mreq.duty,
signature)
res
if vc.config.distributedEnabled:
withTimeMetric(client_obol_aggregated_sync_committee_selection_proof_time):
let (indexToKey, selections) =
block:
var
res1: Table[ValidatorIndex, Opt[ValidatorPubKey]]
res2: seq[RestSyncCommitteeSelection]
for mreq in requests.mitems():
if mreq.proof.isSome():
res1[mreq.validator.index.get()] = Opt.some(mreq.validator.pubkey)
res2.add(RestSyncCommitteeSelection(
validator_index: RestValidatorIndex(mreq.validator.index.get()),
subcommittee_index: uint64(mreq.sub_committee_index),
slot: mreq.slot, selection_proof: mreq.proof.get()))
(res1, res2)
sigres.selectionsRequested = len(selections)
if len(selections) == 0:
return sigres
let sresponse =
try:
# Query middleware for aggregated signatures.
await vc.submitSyncCommitteeSelections(selections,
ApiStrategyKind.Best)
except ValidatorApiError as exc:
warn "Unable to submit sync committee selections",
reason = exc.getFailureReason()
return sigres
except CancelledError as exc:
debug "Sync committee selections processing was interrupted"
raise exc
except CatchableError as exc:
error "Unexpected error occured while trying to submit sync " &
"committee selections", reason = exc.msg, error = exc.name
return sigres
sigres.selectionsReceived = len(sresponse.data)
for selection in sresponse.data:
let
slot = selection.slot
subcommittee_index = selection.subcommittee_index
vindex = selection.validator_index.toValidatorIndex().valueOr:
warn "Invalid validator_index value encountered while processing " &
"sync committee selections",
validator_index = uint64(selection.validator_index),
reason = $error
continue
selectionProof = selection.selection_proof.load().valueOr:
warn "Invalid signature encountered while processing " &
"sync committee selections",
validator_index = vindex, slot = slot,
selection_proof = shortLog(selection.selection_proof)
continue
validator =
block:
# Selections operating using validator indices, so we should check
# if we have such validator index in our validator's pool and it
# still in place (not removed using keystore manager).
let key = indexToKey.getOrDefault(vindex)
if key.isNone():
warn "Non-existing validator encountered while processing " &
"sync committee selections",
validator_index = vindex,
slot = slot,
selection_proof = shortLog(selection.selection_proof)
continue
vc.attachedValidators[].getValidator(key.get()).valueOr:
notice "Found missing validator while processing " &
"sync committee selections", validator_index = vindex,
slot = slot,
validator = shortLog(key.get()),
selection_proof = shortLog(selection.selection_proof)
continue
request =
block:
let res = getSyncRequest(requests, validator, slot,
subcommittee_index)
if res.isNone():
warn "Found sync committee selection proof which was not " &
"requested",
slot = slot, subcommittee_index = subcommittee_index,
validator = shortLog(validator),
selection_proof = shortLog(selection.selection_proof)
continue
res.get()
vc.syncCommitteeProofs.withValue(slot.epoch(), epochProofs):
epochProofs[].proofs.withValue(validator.pubkey, signatures):
signatures[].setSignature(request.sync_committee_index,
selection.slot,
Opt.some(selection.selectionProof))
inc(sigres.selectionsProcessed)
sigres

View File

@ -11,7 +11,7 @@ import
../spec/datatypes/[phase0, altair, bellatrix],
../spec/eth2_apis/rest_types,
../validators/activity_metrics,
"."/[common, api]
"."/[common, api, selection_proofs]
const
ServiceName = "sync_committee_service"

View File

@ -88,9 +88,6 @@ declarePublicGauge(attached_validator_balance_total,
logScope: topics = "beacval"
type
BlobsBundle = tuple[blobs: deneb.Blobs,
kzgs: KzgCommitments,
proofs: seq[kzg_abi.KZGProof]]
ForkedBlockResult =
Result[tuple[blck: ForkedBeaconBlock,
blockValue: Wei,
@ -440,7 +437,8 @@ proc makeBeaconBlockForHeadAndSlot*(
execution_payload: Opt[PayloadType],
transactions_root: Opt[Eth2Digest],
execution_payload_root: Opt[Eth2Digest],
withdrawals_root: Opt[Eth2Digest]):
withdrawals_root: Opt[Eth2Digest],
kzg_commitments: Opt[KzgCommitments]):
Future[ForkedBlockResult] {.async.} =
# Advance state to the slot that we're proposing for
var cache = StateCache()
@ -528,7 +526,8 @@ proc makeBeaconBlockForHeadAndSlot*(
cache,
verificationFlags = {},
transactions_root = transactions_root,
execution_payload_root = execution_payload_root).mapErr do (error: cstring) -> string:
execution_payload_root = execution_payload_root,
kzg_commitments = kzg_commitments).mapErr do (error: cstring) -> string:
# This is almost certainly a bug, but it's complex enough that there's a
# small risk it might happen even when most proposals succeed - thus we
# log instead of asserting
@ -539,10 +538,7 @@ proc makeBeaconBlockForHeadAndSlot*(
var blobsBundleOpt = Opt.none(BlobsBundle)
when payload is deneb.ExecutionPayloadForSigning:
let bb: BlobsBundle = (blobs: payload.blobs,
kzgs: payload.kzgs,
proofs: payload.proofs)
blobsBundleOpt = Opt.some(bb)
blobsBundleOpt = Opt.some(payload.blobsBundle)
return if blck.isOk:
ok((blck.get, payload.blockValue, blobsBundleOpt))
else:
@ -560,7 +556,8 @@ proc makeBeaconBlockForHeadAndSlot*(
execution_payload = Opt.none(PayloadType),
transactions_root = Opt.none(Eth2Digest),
execution_payload_root = Opt.none(Eth2Digest),
withdrawals_root = Opt.none(Eth2Digest))
withdrawals_root = Opt.none(Eth2Digest),
kzg_commitments = Opt.none(KzgCommitments))
proc getBlindedExecutionPayload[
EPH: capella.ExecutionPayloadHeader |
@ -619,6 +616,7 @@ from ./message_router_mev import
func constructSignableBlindedBlock[T: capella_mev.SignedBlindedBeaconBlock](
blck: capella.BeaconBlock,
executionPayloadHeader: capella.ExecutionPayloadHeader): T =
# Leaves signature field default, to be filled in by caller
const
blckFields = getFieldNames(typeof(blck))
blckBodyFields = getFieldNames(typeof(blck.body))
@ -659,19 +657,12 @@ proc constructSignableBlindedBlock[T: deneb_mev.SignedBlindedBeaconBlockContents
doAssert bbb.proofs.len == bbb.blob_roots.len
doAssert bbb.proofs.len == bbb.commitments.len
if blindedBlockContents.signed_blinded_blob_sidecars.setLen(bbb.proofs.len):
for i in 0 ..< blindedBlockContents.signed_blinded_blob_sidecars.lenu64:
assign(
blindedBlockContents.signed_blinded_blob_sidecars[i],
deneb_mev.SignedBlindedBlobSidecar(message: deneb_mev.BlindedBlobSidecar(
block_root: hash_tree_root(blck),
index: i,
slot: distinctBase(blck.slot),
block_parent_root: blck.parent_root,
proposer_index: blck.proposer_index,
blob_root: bbb.blob_roots[i],
kzg_commitment: bbb.commitments[i],
kzg_proof: bbb.proofs[i])))
assign(blindedBlock.message.body.blob_kzg_commitments, bbb.commitments)
let sidecars = blindedBlock.create_blob_sidecars(bbb.proofs, bbb.blob_roots)
if blindedBlockContents.blinded_blob_sidecars.setLen(bbb.proofs.len):
for i in 0 ..< sidecars.len:
assign(blindedBlockContents.blinded_blob_sidecars[i], sidecars[i])
else:
debug "constructSignableBlindedBlock: unable to set blinded blob sidecar length",
blobs_len = bbb.proofs.len
@ -761,7 +752,7 @@ proc blindedBlockCheckSlashingAndSign[
fork, genesis_validators_root, slot, blockRoot,
blindedBlockContents.signed_blinded_block.message)
if res.isErr():
return err("Unable to sign block: " & res.error())
return err("Unable to sign blinded block: " & res.error())
res.get()
return ok blindedBlockContents
@ -832,6 +823,7 @@ proc getBlindedBlockParts[
template actualEPH: untyped = executionPayloadHeader.get.blindedBlckPart
let withdrawals_root =
Opt.some executionPayloadHeader.get.blindedBlckPart.withdrawals_root
const kzg_commitments = Opt.none KzgCommitments
var shimExecutionPayload: PayloadType
copyFields(
@ -841,7 +833,10 @@ proc getBlindedBlockParts[
type PayloadType = deneb.ExecutionPayloadForSigning
template actualEPH: untyped =
executionPayloadHeader.get.blindedBlckPart.execution_payload_header
let withdrawals_root = Opt.some actualEPH.withdrawals_root
let
withdrawals_root = Opt.some actualEPH.withdrawals_root
kzg_commitments = Opt.some(
executionPayloadHeader.get.blindedBlckPart.blinded_blobs_bundle.commitments)
var shimExecutionPayload: PayloadType
type DenebEPH =
@ -856,7 +851,8 @@ proc getBlindedBlockParts[
execution_payload = Opt.some shimExecutionPayload,
transactions_root = Opt.some actualEPH.transactions_root,
execution_payload_root = Opt.some hash_tree_root(actualEPH),
withdrawals_root = withdrawals_root)
withdrawals_root = withdrawals_root,
kzg_commitments = kzg_commitments)
if newBlock.isErr():
# Haven't committed to the MEV block, so allow EL fallback.
@ -1153,32 +1149,6 @@ proc proposeBlockAux(
.slashingProtection
.registerBlock(validator_index, validator.pubkey, slot, signingRoot)
let blobSidecarsOpt =
when forkyBlck is deneb.BeaconBlock:
var sidecars: seq[BlobSidecar]
let bundle = collectedBids.engineBlockFut.read.get().blobsBundleOpt.get
let (blobs, kzgs, proofs) = (bundle.blobs, bundle.kzgs, bundle.proofs)
for i in 0..<blobs.len:
var sidecar = BlobSidecar(
block_root: blockRoot,
index: BlobIndex(i),
slot: slot,
block_parent_root: forkyBlck.parent_root,
proposer_index: forkyBlck.proposer_index,
blob: blobs[i],
kzg_commitment: kzgs[i],
kzg_proof: proofs[i]
)
sidecars.add(sidecar)
Opt.some(sidecars)
elif forkyBlck is phase0.BeaconBlock or
forkyBlck is altair.BeaconBlock or
forkyBlck is bellatrix.BeaconBlock or
forkyBlck is capella.BeaconBlock:
Opt.none(seq[BlobSidecar])
else:
static: doAssert "Unknown BeaconBlock type"
if notSlashable.isErr:
warn "Slashing protection activated for block proposal",
blockRoot = shortLog(blockRoot), blck = shortLog(forkyBlck),
@ -1198,51 +1168,20 @@ proc proposeBlockAux(
validator = shortLog(validator), error_msg = res.error()
return head
res.get()
signedBlock =
when forkyBlck is phase0.BeaconBlock:
phase0.SignedBeaconBlock(
message: forkyBlck, signature: signature, root: blockRoot)
elif forkyBlck is altair.BeaconBlock:
altair.SignedBeaconBlock(
message: forkyBlck, signature: signature, root: blockRoot)
elif forkyBlck is bellatrix.BeaconBlock:
bellatrix.SignedBeaconBlock(
message: forkyBlck, signature: signature, root: blockRoot)
elif forkyBlck is capella.BeaconBlock:
capella.SignedBeaconBlock(
message: forkyBlck, signature: signature, root: blockRoot)
elif forkyBlck is deneb.BeaconBlock:
deneb.SignedBeaconBlock(
message: forkyBlck, signature: signature, root: blockRoot)
signedBlock = consensusFork.SignedBeaconBlock(
message: forkyBlck, signature: signature, root: blockRoot)
blobsOpt =
when consensusFork >= ConsensusFork.Deneb:
template blobsBundle: untyped =
collectedBids.engineBlockFut.read.get.blobsBundleOpt.get
Opt.some(signedBlock.create_blob_sidecars(
blobsBundle.proofs, blobsBundle.blobs))
else:
static: doAssert "Unknown SignedBeaconBlock type"
signedBlobs =
when forkyBlck is phase0.BeaconBlock or
forkyBlck is altair.BeaconBlock or
forkyBlck is bellatrix.BeaconBlock or
forkyBlck is capella.BeaconBlock:
Opt.none(SignedBlobSidecars)
elif forkyBlck is deneb.BeaconBlock:
var signed: seq[SignedBlobSidecar]
let blobSidecars = blobSidecarsOpt.get()
for i in 0..<blobs.len:
let res = validator.getBlobSignature(fork, genesis_validators_root,
slot, blobSidecars[i])
if res.isErr():
warn "Unable to sign blob",
reason = res.error()
return
let signature = res.get()
signed.add(deneb.SignedBlobSidecar(
message: blobSidecars[i],
signature: signature))
Opt.some(signed)
else:
static: doAssert "Unknown SignedBeaconBlock type"
newBlockRef =
(await node.router.routeSignedBeaconBlock(signedBlock, signedBlobs)).valueOr:
return head # Errors logged in router
Opt.none(seq[BlobSidecar])
newBlockRef = (
await node.router.routeSignedBeaconBlock(signedBlock, blobsOpt)
).valueOr:
return head # Errors logged in router
if newBlockRef.isNone():
return head # Validation errors logged in router

View File

@ -1021,10 +1021,7 @@ proc createLocalValidatorFiles*(
encodedStorage: string
): Result[void, KeystoreGenerationError] {.raises: [].} =
var
success = false # becomes true when everything is created successfully
cleanupSecretsDir = true # becomes false if secretsDir already existed
cleanupValidatorsDir = true # becomes false if validatorsDir already existed
var success = false # becomes true when everything is created successfully
# secretsDir:
let secretsDirExisted: bool = dirExists(secretsDir)
@ -1068,10 +1065,7 @@ proc createLockedLocalValidatorFiles(
encodedStorage: string
): Result[FileLockHandle, KeystoreGenerationError] {.raises: [].} =
var
success = false # becomes true when everything is created successfully
cleanupSecretsDir = true # becomes false if secretsDir already existed
cleanupValidatorsDir = true # becomes false if validatorsDir already existed
var success = false # becomes true when everything is created successfully
# secretsDir:
let secretsDirExisted: bool = dirExists(secretsDir)

View File

@ -82,14 +82,10 @@ template blockProcessor(router: MessageRouter): ref BlockProcessor =
template getCurrentBeaconTime(router: MessageRouter): BeaconTime =
router.processor[].getCurrentBeaconTime()
type SignedBlobSidecars* = seq[SignedBlobSidecar]
func shortLog*(v: SignedBlobSidecars): auto =
"[" & v.mapIt(shortLog(it)).join(", ") & "]"
type RouteBlockResult = Result[Opt[BlockRef], cstring]
proc routeSignedBeaconBlock*(
router: ref MessageRouter, blck: ForkySignedBeaconBlock,
blobsOpt: Opt[SignedBlobSidecars]): Future[RouteBlockResult] {.async.} =
blobsOpt: Opt[seq[BlobSidecar]]): Future[RouteBlockResult] {.async.} =
## Validate and broadcast beacon block, then add it to the block database
## Returns the new Head when block is added successfully to dag, none when
## block passes validation but is not added, and error otherwise
@ -112,8 +108,8 @@ proc routeSignedBeaconBlock*(
let blobs = blobsOpt.get()
let kzgCommits = blck.message.body.blob_kzg_commitments.asSeq
if blobs.len > 0 or kzgCommits.len > 0:
let res = validate_blobs(kzgCommits, blobs.mapIt(it.message.blob),
blobs.mapIt(it.message.kzg_proof))
let res = validate_blobs(kzgCommits, blobs.mapIt(it.blob),
blobs.mapIt(it.kzg_proof))
if res.isErr():
warn "blobs failed validation",
blockRoot = shortLog(blck.root),
@ -145,26 +141,26 @@ proc routeSignedBeaconBlock*(
blockRoot = shortLog(blck.root), blck = shortLog(blck.message),
signature = shortLog(blck.signature), error = res.error()
var blobs = Opt.none(seq[ref BlobSidecar])
var blobRefs = Opt.none(BlobSidecars)
if blobsOpt.isSome():
let signedBlobs = blobsOpt.get()
var workers = newSeq[Future[SendResult]](signedBlobs.len)
for i in 0..<signedBlobs.len:
let subnet_id = compute_subnet_for_blob_sidecar(BlobIndex(i))
workers[i] = router[].network.broadcastBlobSidecar(subnet_id, signedBlobs[i])
let blobs = blobsOpt.get()
var workers = newSeq[Future[SendResult]](blobs.len)
for i in 0..<blobs.lenu64:
let subnet_id = compute_subnet_for_blob_sidecar(i)
workers[i] = router[].network.broadcastBlobSidecar(subnet_id, blobs[i])
let allres = await allFinished(workers)
for i in 0..<allres.len:
let res = allres[i]
doAssert res.finished()
if res.failed():
notice "Blob not sent",
blob = shortLog(signedBlobs[i]), error = res.error[]
blob = shortLog(blobs[i]), error = res.error[]
else:
notice "Blob sent", blob = shortLog(signedBlobs[i])
blobs = Opt.some(blobsOpt.get().mapIt(newClone(it.message)))
notice "Blob sent", blob = shortLog(blobs[i])
blobRefs = Opt.some(blobs.mapIt(newClone(it)))
let added = await router[].blockProcessor[].addBlock(
MsgSource.api, ForkedSignedBeaconBlock.init(blck), blobs)
MsgSource.api, ForkedSignedBeaconBlock.init(blck), blobRefs)
# The boolean we return tells the caller whether the block was integrated
# into the chain

View File

@ -72,7 +72,7 @@ proc unblindAndRouteBlockMEV*(
if hash_tree_root(
blindedBlock.message.body.execution_payload_header) !=
hash_tree_root(unblindedPayload.data.data):
return err("unblinded payload doesn't match blinded payload header: " &
err("unblinded payload doesn't match blinded payload header: " &
$blindedBlock.message.body.execution_payload_header)
else:
# Signature provided is consistent with unblinded execution payload,
@ -97,7 +97,7 @@ proc unblindAndRouteBlockMEV*(
let newBlockRef =
(await node.router.routeSignedBeaconBlock(
signedBlock, Opt.none(SignedBlobSidecars))).valueOr:
signedBlock, Opt.none(seq[BlobSidecar]))).valueOr:
# submitBlindedBlock has run, so don't allow fallback to run
return err("routeSignedBeaconBlock error") # Errors logged in router
@ -107,19 +107,17 @@ proc unblindAndRouteBlockMEV*(
blockRoot = shortLog(signedBlock.root), blck = shortLog(signedBlock),
signature = shortLog(signedBlock.signature)
return ok newBlockRef
ok newBlockRef
else:
return err("submitBlindedBlock failed with HTTP error code" &
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#proposer-slashing
# This means if a validator publishes a signature for a
# `BlindedBeaconBlock` (via a dissemination of a
# `SignedBlindedBeaconBlock`) then the validator **MUST** not use the
# local build process as a fallback, even in the event of some failure
# with the external builder network.
err("submitBlindedBlock failed with HTTP error code" &
$unblindedPayload.status & ": " & $shortLog(blindedBlock))
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#proposer-slashing
# This means if a validator publishes a signature for a
# `BlindedBeaconBlock` (via a dissemination of a
# `SignedBlindedBeaconBlock`) then the validator **MUST** not use the
# local build process as a fallback, even in the event of some failure
# with the external builder network.
return err("unblindAndRouteBlockMEV error")
# TODO currently cannot be combined into one generic function
proc unblindAndRouteBlockMEV*(
node: BeaconNode, payloadBuilderRestClient: RestClientRef,
@ -150,7 +148,7 @@ proc unblindAndRouteBlockMEV*(
if hash_tree_root(
blindedBlock.message.body.execution_payload_header) !=
hash_tree_root(unblindedPayload.data.data.execution_payload):
return err("unblinded payload doesn't match blinded payload header: " &
err("unblinded payload doesn't match blinded payload header: " &
$blindedBlock.message.body.execution_payload_header)
else:
# Signature provided is consistent with unblinded execution payload,
@ -177,7 +175,7 @@ proc unblindAndRouteBlockMEV*(
let newBlockRef =
(await node.router.routeSignedBeaconBlock(
signedBlock, Opt.none(SignedBlobSidecars))).valueOr:
signedBlock, Opt.none(seq[BlobSidecar]))).valueOr:
# submitBlindedBlock has run, so don't allow fallback to run
return err("routeSignedBeaconBlock error") # Errors logged in router
@ -189,14 +187,13 @@ proc unblindAndRouteBlockMEV*(
discard $denebImplementationMissing & ": route unblinded blobs"
return ok newBlockRef
ok newBlockRef
else:
return err("submitBlindedBlock failed with HTTP error code" &
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#proposer-slashing
# This means if a validator publishes a signature for a
# `BlindedBeaconBlock` (via a dissemination of a
# `SignedBlindedBeaconBlock`) then the validator **MUST** not use the
# local build process as a fallback, even in the event of some failure
# with the external builder network.
err("submitBlindedBlock failed with HTTP error code" &
$unblindedPayload.status & ": " & $shortLog(blindedBlock))
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#proposer-slashing
# This means if a validator publishes a signature for a
# `BlindedBeaconBlock` (via a dissemination of a
# `SignedBlindedBeaconBlock`) then the validator **MUST** not use the
# local build process as a fallback, even in the event of some failure
# with the external builder network.

Some files were not shown because too many files have changed in this diff Show More