Merge branch 'dev/etan/df-forkedblobs' into feat_eip-7688

This commit is contained in:
Etan Kissling 2024-09-01 13:18:18 +02:00
commit 7ac4756e73
No known key found for this signature in database
GPG Key ID: B21DA824C5A3D03D
121 changed files with 4574 additions and 3909 deletions

9
.dockerignore Normal file
View File

@ -0,0 +1,9 @@
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
vendor/*

View File

@ -35,18 +35,8 @@ jobs:
cpu: amd64 cpu: amd64
- os: windows - os: windows
cpu: amd64 cpu: amd64
branch: [~, upstream/version-2-0] branch: [~]
exclude:
- target:
os: macos
branch: upstream/version-2-0
- target:
os: windows
branch: upstream/version-2-0
include: include:
- branch: upstream/version-2-0
branch-short: version-2-0
nimflags-extra: --mm:refc
- target: - target:
os: linux os: linux
builder: ['self-hosted','ubuntu-22.04'] builder: ['self-hosted','ubuntu-22.04']

View File

@ -89,10 +89,10 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 1/1 Fail: 0/1 Skip: 0/1 OK: 1/1 Fail: 0/1 Skip: 0/1
## Blinded block conversions ## Blinded block conversions
```diff ```diff
+ Bellatrix toSignedBlindedBlock OK + Bellatrix toSignedBlindedBeaconBlock OK
+ Capella toSignedBlindedBlock OK + Capella toSignedBlindedBeaconBlock OK
+ Deneb toSignedBlindedBlock OK + Deneb toSignedBlindedBeaconBlock OK
+ Electra toSignedBlindedBlock OK + Electra toSignedBlindedBeaconBlock OK
``` ```
OK: 4/4 Fail: 0/4 Skip: 0/4 OK: 4/4 Fail: 0/4 Skip: 0/4
## Block pool altair processing [Preset: mainnet] ## Block pool altair processing [Preset: mainnet]
@ -467,16 +467,20 @@ OK: 5/5 Fail: 0/5 Skip: 0/5
+ URL parsing OK + URL parsing OK
``` ```
OK: 5/5 Fail: 0/5 Skip: 0/5 OK: 5/5 Fail: 0/5 Skip: 0/5
## Eth1 monitor ## Engine API conversions
```diff ```diff
+ Deposits chain OK
+ Rewrite URLs OK
+ Roundtrip engine RPC V1 and bellatrix ExecutionPayload representations OK + Roundtrip engine RPC V1 and bellatrix ExecutionPayload representations OK
+ Roundtrip engine RPC V2 and capella ExecutionPayload representations OK + Roundtrip engine RPC V2 and capella ExecutionPayload representations OK
+ Roundtrip engine RPC V3 and deneb ExecutionPayload representations OK + Roundtrip engine RPC V3 and deneb ExecutionPayload representations OK
+ Roundtrip engine RPC V4 and electra ExecutionPayload representations OK + Roundtrip engine RPC V4 and electra ExecutionPayload representations OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 4/4 Fail: 0/4 Skip: 0/4
## Eth1 monitor
```diff
+ Deposits chain OK
+ Rewrite URLs OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## Eth2 specific discovery tests ## Eth2 specific discovery tests
```diff ```diff
+ Invalid attnets field OK + Invalid attnets field OK
@ -936,10 +940,10 @@ OK: 6/6 Fail: 0/6 Skip: 0/6
+ Dynamic validator set: updateDynamicValidators() test OK + Dynamic validator set: updateDynamicValidators() test OK
``` ```
OK: 4/4 Fail: 0/4 Skip: 0/4 OK: 4/4 Fail: 0/4 Skip: 0/4
## ValidatorPubKey Bloom filter ## ValidatorPubKey bucket sort
```diff ```diff
+ incremental construction with no false positives/negatives OK + incremental construction OK
+ one-shot construction with no false positives/negatives OK + one-shot construction OK
``` ```
OK: 2/2 Fail: 0/2 Skip: 0/2 OK: 2/2 Fail: 0/2 Skip: 0/2
## Zero signature sanity checks ## Zero signature sanity checks

View File

@ -1,3 +1,36 @@
2024-08-29 v24.8.0
==================
Nimbus `v24.8.0` is a `low-urgency` release with beacon API improvements and fixes.
### Improvements
* Increase speed of processing blocks with deposits by 25%:
https://github.com/status-im/nimbus-eth2/pull/6469
* Avoid running light client sync in background when node is synced:
https://github.com/status-im/nimbus-eth2/pull/6505
* Add additional Sepolia bootnode:
https://github.com/status-im/nimbus-eth2/pull/6490
### Fixes
* Add timeouts to failed execution layer requests:
https://github.com/status-im/nimbus-eth2/pull/6441
* Use correct fork digest when broadcasting blob sidecars, sync committee, and sync contribution messages:
https://github.com/status-im/nimbus-eth2/pull/6440
* Fix Holesky genesis state being downloaded multiple times:
https://github.com/status-im/nimbus-eth2/pull/6452
* Check blob versioned hashes when optimistic syncing:
https://github.com/status-im/nimbus-eth2/pull/6501
* Increase trusted node sync state downloading timeout to 120 seconds:
https://github.com/status-im/nimbus-eth2/pull/6487
2024-07-29 v24.7.0 2024-07-29 v24.7.0
================== ==================

View File

@ -2487,9 +2487,12 @@ OK: 12/12 Fail: 0/12 Skip: 0/12
+ Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK + Pending consolidations - all_consolidation_cases_together [Preset: mainnet] OK
+ Pending consolidations - basic_pending_consolidation [Preset: mainnet] OK + Pending consolidations - basic_pending_consolidation [Preset: mainnet] OK
+ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: mainnet] OK + Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: mainnet] OK
+ Pending consolidations - pending_consolidation_compounding_creds [Preset: mainnet] OK
+ Pending consolidations - pending_consolidation_future_epoch [Preset: mainnet] OK
+ Pending consolidations - pending_consolidation_with_pending_deposit [Preset: mainnet] OK
+ Pending consolidations - skip_consolidation_when_source_slashed [Preset: mainnet] OK + Pending consolidations - skip_consolidation_when_source_slashed [Preset: mainnet] OK
``` ```
OK: 4/4 Fail: 0/4 Skip: 0/4 OK: 7/7 Fail: 0/7 Skip: 0/7
## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: mainnet] ## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: mainnet]
```diff ```diff
+ RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK + RANDAO mixes reset - updated_randao_mixes [Preset: mainnet] OK
@ -3710,4 +3713,4 @@ OK: 69/88 Fail: 0/88 Skip: 19/88
OK: 3/3 Fail: 0/3 Skip: 0/3 OK: 3/3 Fail: 0/3 Skip: 0/3
---TOTAL--- ---TOTAL---
OK: 2988/3008 Fail: 0/3008 Skip: 20/3008 OK: 2991/3011 Fail: 0/3011 Skip: 20/3011

View File

@ -2598,9 +2598,12 @@ OK: 12/12 Fail: 0/12 Skip: 0/12
+ Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK + Pending consolidations - all_consolidation_cases_together [Preset: minimal] OK
+ Pending consolidations - basic_pending_consolidation [Preset: minimal] OK + Pending consolidations - basic_pending_consolidation [Preset: minimal] OK
+ Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: minimal] OK + Pending consolidations - consolidation_not_yet_withdrawable_validator [Preset: minimal] OK
+ Pending consolidations - pending_consolidation_compounding_creds [Preset: minimal] OK
+ Pending consolidations - pending_consolidation_future_epoch [Preset: minimal] OK
+ Pending consolidations - pending_consolidation_with_pending_deposit [Preset: minimal] OK
+ Pending consolidations - skip_consolidation_when_source_slashed [Preset: minimal] OK + Pending consolidations - skip_consolidation_when_source_slashed [Preset: minimal] OK
``` ```
OK: 4/4 Fail: 0/4 Skip: 0/4 OK: 7/7 Fail: 0/7 Skip: 0/7
## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: minimal] ## EF - Electra - Epoch Processing - RANDAO mixes reset [Preset: minimal]
```diff ```diff
+ RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK + RANDAO mixes reset - updated_randao_mixes [Preset: minimal] OK
@ -4047,4 +4050,4 @@ OK: 185/207 Fail: 0/207 Skip: 22/207
OK: 3/3 Fail: 0/3 Skip: 0/3 OK: 3/3 Fail: 0/3 Skip: 0/3
---TOTAL--- ---TOTAL---
OK: 3294/3317 Fail: 0/3317 Skip: 23/3317 OK: 3297/3320 Fail: 0/3320 Skip: 23/3320

47
Dockerfile Normal file
View File

@ -0,0 +1,47 @@
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
FROM debian:testing-slim AS build
SHELL ["/bin/bash", "-c"]
RUN apt-get clean && apt update \
&& apt -y install build-essential git-lfs
RUN ldd --version ldd
ADD . /root/nimbus-eth2
RUN cd /root/nimbus-eth2 \
&& make -j$(nproc) update \
&& make -j$(nproc) V=1 NIMFLAGS="-d:const_preset=mainnet -d:disableMarchNative" LOG_LEVEL=TRACE nimbus_beacon_node
# --------------------------------- #
# Starting new image to reduce size #
# --------------------------------- #
FROM debian:testing-slim as deploy
SHELL ["/bin/bash", "-c"]
RUN apt-get clean && apt update \
&& apt -y install build-essential
RUN apt update && apt -y upgrade
RUN ldd --version ldd
RUN rm -rf /home/user/nimbus-eth2/build/nimbus_beacon_node
# "COPY" creates new image layers, so we cram all we can into one command
COPY --from=build /root/nimbus-eth2/build/nimbus_beacon_node /home/user/nimbus-eth2/build/nimbus_beacon_node
ENV PATH="/home/user/nimbus-eth2/build:${PATH}"
ENTRYPOINT ["nimbus_beacon_node"]
WORKDIR /home/user/nimbus-eth2/build
STOPSIGNAL SIGINT

View File

@ -130,7 +130,7 @@ type
current_sync_committee*: SyncCommittee # [New in Altair] current_sync_committee*: SyncCommittee # [New in Altair]
next_sync_committee*: SyncCommittee # [New in Altair] next_sync_committee*: SyncCommittee # [New in Altair]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#beaconstate
# Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ # Memory-representation-equivalent to a Bellatrix BeaconState for in-place SSZ
# reading and writing # reading and writing
BellatrixBeaconStateNoImmutableValidators* = object BellatrixBeaconStateNoImmutableValidators* = object

View File

@ -27,7 +27,7 @@ type
## which blocks are valid - in particular, blocks are not valid if they ## which blocks are valid - in particular, blocks are not valid if they
## come from the future as seen from the local clock. ## come from the future as seen from the local clock.
## ##
## https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#fork-choice ## https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/fork-choice.md#fork-choice
## ##
# TODO consider NTP and network-adjusted timestamps as outlined here: # TODO consider NTP and network-adjusted timestamps as outlined here:
# https://ethresear.ch/t/network-adjusted-timestamps/4187 # https://ethresear.ch/t/network-adjusted-timestamps/4187

View File

@ -52,9 +52,15 @@ proc initLightClient*(
optimisticProcessor = initOptimisticProcessor( optimisticProcessor = initOptimisticProcessor(
getBeaconTime, optimisticHandler) getBeaconTime, optimisticHandler)
shouldInhibitSync = func(): bool =
if node.syncManager != nil:
not node.syncManager.inProgress # No LC sync needed if DAG is in sync
else:
false
lightClient = createLightClient( lightClient = createLightClient(
node.network, rng, config, cfg, forkDigests, getBeaconTime, node.network, rng, config, cfg, forkDigests, getBeaconTime,
genesis_validators_root, LightClientFinalizationMode.Strict) genesis_validators_root, LightClientFinalizationMode.Strict,
shouldInhibitSync = shouldInhibitSync)
if config.syncLightClient: if config.syncLightClient:
proc onOptimisticHeader( proc onOptimisticHeader(

View File

@ -1,49 +0,0 @@
# beacon_chain
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import "."/spec/crypto
from stew/bitops2 import getBit, setBit
from "."/spec/datatypes/base import Validator, pubkey
from "."/spec/helpers import bytes_to_uint32
const
# https://hur.st/bloomfilter/?n=4M&p=&m=8MiB&k=
pubkeyBloomFilterScale = 23 # 21 too small, 22 borderline, 24 also ok
type
PubkeyBloomFilter* = object
data: array[1 shl pubkeyBloomFilterScale, byte]
iterator bloomFilterHashes(pubkey: ValidatorPubKey): auto =
const pubkeyBloomFilterMask = (1 shl pubkeyBloomFilterScale) - 1
for r in countup(0'u32, 20'u32, 4'u32):
# ValidatorPubKeys have fairly uniform entropy; using enough hash
# functions also reduces risk of low-entropy portions
yield pubkey.blob.toOpenArray(r, r+3).bytes_to_uint32 and
pubkeyBloomFilterMask
template incl*(bloomFilter: var PubkeyBloomFilter, pubkey: ValidatorPubKey) =
for bloomFilterHash in bloomFilterHashes(pubkey):
setBit(bloomFilter.data, bloomFilterHash)
func constructBloomFilter*(x: openArray[Validator]): auto =
let res = new PubkeyBloomFilter
for m in x:
incl(res[], m.pubkey)
res
func mightContain*(
bloomFilter: PubkeyBloomFilter, pubkey: ValidatorPubKey): bool =
# Might return false positive, but never false negative
for bloomFilterHash in bloomFilterHashes(pubkey):
if not getBit(bloomFilter.data, bloomFilterHash):
return false
true

View File

@ -32,8 +32,6 @@ import
from std/os import getHomeDir, parentDir, `/` from std/os import getHomeDir, parentDir, `/`
from std/strutils import parseBiggestUInt, replace from std/strutils import parseBiggestUInt, replace
from fork_choice/fork_choice_types
import ForkChoiceVersion
from consensus_object_pools/block_pools_types_light_client from consensus_object_pools/block_pools_types_light_client
import LightClientDataImportMode import LightClientDataImportMode
@ -676,12 +674,6 @@ type
desc: "Bandwidth estimate for the node (bits per second)" desc: "Bandwidth estimate for the node (bits per second)"
name: "debug-bandwidth-estimate" .}: Option[Natural] name: "debug-bandwidth-estimate" .}: Option[Natural]
forkChoiceVersion* {.
hidden
desc: "Forkchoice version to use. " &
"Must be one of: stable"
name: "debug-forkchoice-version" .}: Option[ForkChoiceVersion]
of BNStartUpCmd.wallets: of BNStartUpCmd.wallets:
case walletsCmd* {.command.}: WalletsCmd case walletsCmd* {.command.}: WalletsCmd
of WalletsCmd.create: of WalletsCmd.create:

View File

@ -104,7 +104,6 @@ declareGauge attestation_pool_block_attestation_packing_time,
proc init*(T: type AttestationPool, dag: ChainDAGRef, proc init*(T: type AttestationPool, dag: ChainDAGRef,
quarantine: ref Quarantine, quarantine: ref Quarantine,
forkChoiceVersion = ForkChoiceVersion.Stable,
onAttestation: OnPhase0AttestationCallback = nil, onAttestation: OnPhase0AttestationCallback = nil,
onElectraAttestation: OnElectraAttestationCallback = nil): T = onElectraAttestation: OnElectraAttestationCallback = nil): T =
## Initialize an AttestationPool from the dag `headState` ## Initialize an AttestationPool from the dag `headState`
@ -113,7 +112,7 @@ proc init*(T: type AttestationPool, dag: ChainDAGRef,
let finalizedEpochRef = dag.getFinalizedEpochRef() let finalizedEpochRef = dag.getFinalizedEpochRef()
var forkChoice = ForkChoice.init( var forkChoice = ForkChoice.init(
finalizedEpochRef, dag.finalizedHead.blck, forkChoiceVersion) finalizedEpochRef, dag.finalizedHead.blck)
# Feed fork choice with unfinalized history - during startup, block pool only # Feed fork choice with unfinalized history - during startup, block pool only
# keeps track of a single history so we just need to follow it # keeps track of a single history so we just need to follow it
@ -200,6 +199,7 @@ proc addForkChoiceVotes(
error "Couldn't add attestation to fork choice, bug?", err = v.error() error "Couldn't add attestation to fork choice, bug?", err = v.error()
func candidateIdx(pool: AttestationPool, slot: Slot): Opt[int] = func candidateIdx(pool: AttestationPool, slot: Slot): Opt[int] =
static: doAssert pool.phase0Candidates.len == pool.electraCandidates.len
if slot >= pool.startingSlot and if slot >= pool.startingSlot and
slot < (pool.startingSlot + pool.phase0Candidates.lenu64): slot < (pool.startingSlot + pool.phase0Candidates.lenu64):
Opt.some(int(slot mod pool.phase0Candidates.lenu64)) Opt.some(int(slot mod pool.phase0Candidates.lenu64))
@ -210,8 +210,8 @@ proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) =
if wallSlot + 1 < pool.phase0Candidates.lenu64: if wallSlot + 1 < pool.phase0Candidates.lenu64:
return # Genesis return # Genesis
let static: doAssert pool.phase0Candidates.len == pool.electraCandidates.len
newStartingSlot = wallSlot + 1 - pool.phase0Candidates.lenu64 let newStartingSlot = wallSlot + 1 - pool.phase0Candidates.lenu64
if newStartingSlot < pool.startingSlot: if newStartingSlot < pool.startingSlot:
error "Current slot older than attestation pool view, clock reset?", error "Current slot older than attestation pool view, clock reset?",
@ -224,10 +224,12 @@ proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) =
if newStartingSlot - pool.startingSlot >= pool.phase0Candidates.lenu64(): if newStartingSlot - pool.startingSlot >= pool.phase0Candidates.lenu64():
# In case many slots passed since the last update, avoid iterating over # In case many slots passed since the last update, avoid iterating over
# the same indices over and over # the same indices over and over
pool.phase0Candidates = default(type(pool.phase0Candidates)) pool.phase0Candidates.reset()
pool.electraCandidates.reset()
else: else:
for i in pool.startingSlot..newStartingSlot: for i in pool.startingSlot..newStartingSlot:
pool.phase0Candidates[i.uint64 mod pool.phase0Candidates.lenu64].reset() pool.phase0Candidates[i.uint64 mod pool.phase0Candidates.lenu64].reset()
pool.electraCandidates[i.uint64 mod pool.electraCandidates.lenu64].reset()
pool.startingSlot = newStartingSlot pool.startingSlot = newStartingSlot
@ -507,6 +509,7 @@ func covers*(
if candidateIdx.isNone: if candidateIdx.isNone:
return false return false
debugComment "foo"
# needs to know more than attestationdata now # needs to know more than attestationdata now
#let attestation_data_root = hash_tree_root(data) #let attestation_data_root = hash_tree_root(data)
#pool.electraCandidates[candidateIdx.get()].withValue(attestation_data_root, entry): #pool.electraCandidates[candidateIdx.get()].withValue(attestation_data_root, entry):
@ -651,7 +654,8 @@ func score(
proc check_attestation_compatible*( proc check_attestation_compatible*(
dag: ChainDAGRef, dag: ChainDAGRef,
state: ForkyHashedBeaconState, state: ForkyHashedBeaconState,
attestation: SomeAttestation | electra.Attestation | electra.TrustedAttestation): Result[void, cstring] = attestation: SomeAttestation | electra.Attestation |
electra.TrustedAttestation): Result[void, cstring] =
let let
targetEpoch = attestation.data.target.epoch targetEpoch = attestation.data.target.epoch
compatibleRoot = state.dependent_root(targetEpoch.get_previous_epoch) compatibleRoot = state.dependent_root(targetEpoch.get_previous_epoch)

View File

@ -1178,7 +1178,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# should have `previous_version` set to `current_version` while # should have `previous_version` set to `current_version` while
# this doesn't happen to be the case in network that go through # this doesn't happen to be the case in network that go through
# regular hard-fork upgrades. See for example: # regular hard-fork upgrades. See for example:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#testing
if stateFork.current_version != configFork.current_version: if stateFork.current_version != configFork.current_version:
error "State from database does not match network, check --network parameter", error "State from database does not match network, check --network parameter",
tail = dag.tail, headRef, stateFork, configFork tail = dag.tail, headRef, stateFork, configFork
@ -1972,7 +1972,7 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
prunedHeads = hlen - dag.heads.len, prunedHeads = hlen - dag.heads.len,
dagPruneDur = Moment.now() - startTick dagPruneDur = Moment.now() - startTick
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#helpers # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/sync/optimistic.md#helpers
func is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool = func is_optimistic*(dag: ChainDAGRef, bid: BlockId): bool =
let blck = let blck =
if bid.slot <= dag.finalizedHead.slot: if bid.slot <= dag.finalizedHead.slot:

View File

@ -15,6 +15,7 @@ import
../beacon_clock, ../beacon_clock,
./common_tools ./common_tools
from ../el/engine_api_conversions import asBlockHash
from ../spec/beaconstate import from ../spec/beaconstate import
get_expected_withdrawals, has_eth1_withdrawal_credential get_expected_withdrawals, has_eth1_withdrawal_credential
from ../spec/datatypes/capella import Withdrawal from ../spec/datatypes/capella import Withdrawal

View File

@ -53,7 +53,7 @@ iterator get_beacon_committee*(
committees_per_slot * SLOTS_PER_EPOCH committees_per_slot * SLOTS_PER_EPOCH
): yield (index_in_committee, idx) ): yield (index_in_committee, idx)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee*( func get_beacon_committee*(
shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex): shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex):
seq[ValidatorIndex] = seq[ValidatorIndex] =

View File

@ -364,7 +364,7 @@ proc produceSyncAggregate*(
proc isEpochLeadTime*( proc isEpochLeadTime*(
pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool = pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool =
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#sync-committee-subnet-stability
# This ensures a uniform distribution without requiring additional state: # This ensures a uniform distribution without requiring additional state:
# (1/4) = 1/4, 4 slots out # (1/4) = 1/4, 4 slots out
# (3/4) * (1/3) = 1/4, 3 slots out # (3/4) * (1/3) = 1/4, 3 slots out

View File

@ -20,7 +20,7 @@ import
../spec/[eth2_merkleization, forks], ../spec/[eth2_merkleization, forks],
../networking/network_metadata, ../networking/network_metadata,
".."/beacon_node_status, ".."/beacon_node_status,
"."/[eth1_chain, el_conf] "."/[el_conf, engine_api_conversions, eth1_chain]
from std/times import getTime, inSeconds, initTime, `-` from std/times import getTime, inSeconds, initTime, `-`
from ../spec/engine_authentication import getSignedIatToken from ../spec/engine_authentication import getSignedIatToken
@ -40,6 +40,12 @@ type
Int64LeBytes = DynamicBytes[8, 8] Int64LeBytes = DynamicBytes[8, 8]
WithoutTimeout* = distinct int WithoutTimeout* = distinct int
SomeEnginePayloadWithValue =
BellatrixExecutionPayloadWithValue |
GetPayloadV2Response |
GetPayloadV3Response |
GetPayloadV4Response
contract(DepositContract): contract(DepositContract):
proc deposit(pubkey: PubKeyBytes, proc deposit(pubkey: PubKeyBytes,
withdrawalCredentials: WithdrawalCredentialsBytes, withdrawalCredentials: WithdrawalCredentialsBytes,
@ -198,16 +204,6 @@ type
merkleTreeIndex: Int64LeBytes, merkleTreeIndex: Int64LeBytes,
j: JsonNode) {.gcsafe, raises: [].} j: JsonNode) {.gcsafe, raises: [].}
BellatrixExecutionPayloadWithValue* = object
executionPayload*: ExecutionPayloadV1
blockValue*: UInt256
SomeEnginePayloadWithValue =
BellatrixExecutionPayloadWithValue |
GetPayloadV2Response |
GetPayloadV3Response |
GetPayloadV4Response
declareCounter failed_web3_requests, declareCounter failed_web3_requests,
"Failed web3 requests" "Failed web3 requests"
@ -376,340 +372,6 @@ template eth1ChainBlocks*(m: ELManager): Deque[Eth1Block] =
# doAssert SECONDS_PER_ETH1_BLOCK * cfg.ETH1_FOLLOW_DISTANCE < GENESIS_DELAY, # doAssert SECONDS_PER_ETH1_BLOCK * cfg.ETH1_FOLLOW_DISTANCE < GENESIS_DELAY,
# "Invalid configuration: GENESIS_DELAY is set too low" # "Invalid configuration: GENESIS_DELAY is set too low"
func asConsensusWithdrawal(w: WithdrawalV1): capella.Withdrawal =
capella.Withdrawal(
index: w.index.uint64,
validator_index: w.validatorIndex.uint64,
address: ExecutionAddress(data: w.address.distinctBase),
amount: Gwei w.amount)
func asEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 =
WithdrawalV1(
index: Quantity(w.index),
validatorIndex: Quantity(w.validator_index),
address: Address(w.address.data),
amount: Quantity(w.amount))
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1):
bellatrix.ExecutionPayload =
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
bellatrix.Transaction.init(tt.distinctBase)
bellatrix.ExecutionPayload(
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
feeRecipient:
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
block_number: rpcExecutionPayload.blockNumber.uint64,
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.transactions, it.getTransaction)))
func asConsensusType*(payloadWithValue: BellatrixExecutionPayloadWithValue):
bellatrix.ExecutionPayloadForSigning =
bellatrix.ExecutionPayloadForSigning(
executionPayload: payloadWithValue.executionPayload.asConsensusType,
blockValue: payloadWithValue.blockValue)
template maybeDeref[T](o: Opt[T]): T = o.get
template maybeDeref[V](v: V): V = v
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1OrV2|ExecutionPayloadV2):
capella.ExecutionPayload =
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
bellatrix.Transaction.init(tt.distinctBase)
capella.ExecutionPayload(
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
feeRecipient:
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
block_number: rpcExecutionPayload.blockNumber.uint64,
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
mapIt(maybeDeref rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)))
func asConsensusType*(payloadWithValue: engine_api.GetPayloadV2Response):
capella.ExecutionPayloadForSigning =
capella.ExecutionPayloadForSigning(
executionPayload: payloadWithValue.executionPayload.asConsensusType,
blockValue: payloadWithValue.blockValue)
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV3):
deneb.ExecutionPayload =
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
bellatrix.Transaction.init(tt.distinctBase)
deneb.ExecutionPayload(
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
feeRecipient:
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
block_number: rpcExecutionPayload.blockNumber.uint64,
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64)
func asConsensusType*(payload: engine_api.GetPayloadV3Response):
deneb.ExecutionPayloadForSigning =
deneb.ExecutionPayloadForSigning(
executionPayload: payload.executionPayload.asConsensusType,
blockValue: payload.blockValue,
# TODO
# The `mapIt` calls below are necessary only because we use different distinct
# types for KZG commitments and Blobs in the `web3` and the `deneb` spec types.
# Both are defined as `array[N, byte]` under the hood.
blobsBundle: deneb.BlobsBundle(
commitments: KzgCommitments.init(
payload.blobsBundle.commitments.mapIt(
kzg_abi.KzgCommitment(bytes: it.bytes))),
proofs: KzgProofs.init(
payload.blobsBundle.proofs.mapIt(
kzg_abi.KzgProof(bytes: it.bytes))),
blobs: Blobs.init(
payload.blobsBundle.blobs.mapIt(it.bytes))))
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV4):
electra.ExecutionPayload =
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
bellatrix.Transaction.init(tt.distinctBase)
template getDepositRequest(
dr: DepositRequestV1): electra.DepositRequest =
electra.DepositRequest(
pubkey: ValidatorPubKey(blob: dr.pubkey.distinctBase),
withdrawal_credentials: dr.withdrawalCredentials.asEth2Digest,
amount: dr.amount.Gwei,
signature: ValidatorSig(blob: dr.signature.distinctBase),
index: dr.index.uint64)
template getWithdrawalRequest(
wr: WithdrawalRequestV1): electra.WithdrawalRequest =
electra.WithdrawalRequest(
source_address: ExecutionAddress(data: wr.sourceAddress.distinctBase),
validator_pubkey: ValidatorPubKey(blob: wr.validatorPubkey.distinctBase),
amount: wr.amount.Gwei)
template getConsolidationRequest(
cr: ConsolidationRequestV1): electra.ConsolidationRequest =
electra.ConsolidationRequest(
source_address: ExecutionAddress(data: cr.sourceAddress.distinctBase),
source_pubkey: ValidatorPubKey(blob: cr.sourcePubkey.distinctBase),
target_pubkey: ValidatorPubKey(blob: cr.targetPubkey.distinctBase))
electra.ExecutionPayload(
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
feeRecipient:
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
block_number: rpcExecutionPayload.blockNumber.uint64,
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(
rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64,
deposit_requests:
List[electra.DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.depositRequests, it.getDepositRequest)),
withdrawal_requests: List[electra.WithdrawalRequest,
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.withdrawalRequests,
it.getWithdrawalRequest)),
consolidation_requests: List[electra.ConsolidationRequest,
Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.consolidationRequests,
it.getConsolidationRequest)))
func asConsensusType*(payload: engine_api.GetPayloadV4Response):
electra.ExecutionPayloadForSigning =
electra.ExecutionPayloadForSigning(
executionPayload: payload.executionPayload.asConsensusType,
blockValue: payload.blockValue,
# TODO
# The `mapIt` calls below are necessary only because we use different distinct
# types for KZG commitments and Blobs in the `web3` and the `deneb` spec types.
# Both are defined as `array[N, byte]` under the hood.
blobsBundle: deneb.BlobsBundle(
commitments: KzgCommitments.init(
payload.blobsBundle.commitments.mapIt(
kzg_abi.KzgCommitment(bytes: it.bytes))),
proofs: KzgProofs.init(
payload.blobsBundle.proofs.mapIt(
kzg_abi.KzgProof(bytes: it.bytes))),
blobs: Blobs.init(
payload.blobsBundle.blobs.mapIt(it.bytes))))
func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload):
ExecutionPayloadV1 =
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
TypedTransaction(tt.distinctBase)
engine_api.ExecutionPayloadV1(
parentHash: executionPayload.parent_hash.asBlockHash,
feeRecipient: Address(executionPayload.fee_recipient.data),
stateRoot: executionPayload.state_root.asBlockHash,
receiptsRoot: executionPayload.receipts_root.asBlockHash,
logsBloom:
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
prevRandao: executionPayload.prev_randao.asBlockHash,
blockNumber: Quantity(executionPayload.block_number),
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
blockHash: executionPayload.block_hash.asBlockHash,
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction))
template toEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 =
WithdrawalV1(
index: Quantity(w.index),
validatorIndex: Quantity(w.validator_index),
address: Address(w.address.data),
amount: Quantity(w.amount))
func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload):
ExecutionPayloadV2 =
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
TypedTransaction(tt.distinctBase)
engine_api.ExecutionPayloadV2(
parentHash: executionPayload.parent_hash.asBlockHash,
feeRecipient: Address(executionPayload.fee_recipient.data),
stateRoot: executionPayload.state_root.asBlockHash,
receiptsRoot: executionPayload.receipts_root.asBlockHash,
logsBloom:
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
prevRandao: executionPayload.prev_randao.asBlockHash,
blockNumber: Quantity(executionPayload.block_number),
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
blockHash: executionPayload.block_hash.asBlockHash,
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
withdrawals: mapIt(executionPayload.withdrawals, it.toEngineWithdrawal))
func asEngineExecutionPayload*(executionPayload: deneb.ExecutionPayload):
ExecutionPayloadV3 =
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
TypedTransaction(tt.distinctBase)
engine_api.ExecutionPayloadV3(
parentHash: executionPayload.parent_hash.asBlockHash,
feeRecipient: Address(executionPayload.fee_recipient.data),
stateRoot: executionPayload.state_root.asBlockHash,
receiptsRoot: executionPayload.receipts_root.asBlockHash,
logsBloom:
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
prevRandao: executionPayload.prev_randao.asBlockHash,
blockNumber: Quantity(executionPayload.block_number),
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
blockHash: executionPayload.block_hash.asBlockHash,
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal),
blobGasUsed: Quantity(executionPayload.blob_gas_used),
excessBlobGas: Quantity(executionPayload.excess_blob_gas))
func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
ExecutionPayloadV4 =
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
TypedTransaction(tt.distinctBase)
template getDepositRequest(
dr: electra.DepositRequest): DepositRequestV1 =
DepositRequestV1(
pubkey: FixedBytes[RawPubKeySize](dr.pubkey.blob),
withdrawalCredentials: FixedBytes[32](dr.withdrawal_credentials.data),
amount: dr.amount.Quantity,
signature: FixedBytes[RawSigSize](dr.signature.blob),
index: dr.index.Quantity)
template getWithdrawalRequest(
wr: electra.WithdrawalRequest): WithdrawalRequestV1 =
WithdrawalRequestV1(
sourceAddress: Address(wr.source_address.data),
validatorPubkey: FixedBytes[RawPubKeySize](wr.validator_pubkey.blob),
amount: wr.amount.Quantity)
template getConsolidationRequest(
cr: electra.ConsolidationRequest): ConsolidationRequestV1 =
ConsolidationRequestV1(
sourceAddress: Address(cr.source_address.data),
sourcePubkey: FixedBytes[RawPubKeySize](cr.source_pubkey.blob),
targetPubkey: FixedBytes[RawPubKeySize](cr.target_pubkey.blob))
engine_api.ExecutionPayloadV4(
parentHash: executionPayload.parent_hash.asBlockHash,
feeRecipient: Address(executionPayload.fee_recipient.data),
stateRoot: executionPayload.state_root.asBlockHash,
receiptsRoot: executionPayload.receipts_root.asBlockHash,
logsBloom:
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
prevRandao: executionPayload.prev_randao.asBlockHash,
blockNumber: Quantity(executionPayload.block_number),
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
blockHash: executionPayload.block_hash.asBlockHash,
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal),
blobGasUsed: Quantity(executionPayload.blob_gas_used),
excessBlobGas: Quantity(executionPayload.excess_blob_gas),
depositRequests: mapIt(
executionPayload.deposit_requests, it.getDepositRequest),
withdrawalRequests: mapIt(
executionPayload.withdrawal_requests, it.getWithdrawalRequest),
consolidationRequests: mapIt(
executionPayload.consolidation_requests, it.getConsolidationRequest))
func isConnected(connection: ELConnection): bool = func isConnected(connection: ELConnection): bool =
connection.web3.isSome connection.web3.isSome

View File

@ -0,0 +1,359 @@
# beacon_chain
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
../spec/datatypes/[bellatrix, capella, deneb, electra],
web3/[engine_api, engine_api_types]
from std/sequtils import mapIt
type
BellatrixExecutionPayloadWithValue* = object
executionPayload*: ExecutionPayloadV1
blockValue*: UInt256
func asEth2Digest*(x: BlockHash): Eth2Digest =
Eth2Digest(data: array[32, byte](x))
template asBlockHash*(x: Eth2Digest): BlockHash =
BlockHash(x.data)
func asConsensusWithdrawal*(w: WithdrawalV1): capella.Withdrawal =
capella.Withdrawal(
index: w.index.uint64,
validator_index: w.validatorIndex.uint64,
address: ExecutionAddress(data: w.address.distinctBase),
amount: Gwei w.amount)
func asEngineWithdrawal(w: capella.Withdrawal): WithdrawalV1 =
WithdrawalV1(
index: Quantity(w.index),
validatorIndex: Quantity(w.validator_index),
address: Address(w.address.data),
amount: Quantity(w.amount))
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1):
bellatrix.ExecutionPayload =
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
bellatrix.Transaction.init(tt.distinctBase)
bellatrix.ExecutionPayload(
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
feeRecipient:
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
block_number: rpcExecutionPayload.blockNumber.uint64,
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.transactions, it.getTransaction)))
func asConsensusType*(payloadWithValue: BellatrixExecutionPayloadWithValue):
bellatrix.ExecutionPayloadForSigning =
bellatrix.ExecutionPayloadForSigning(
executionPayload: payloadWithValue.executionPayload.asConsensusType,
blockValue: payloadWithValue.blockValue)
template maybeDeref*[T](o: Opt[T]): T = o.get
template maybeDeref*[V](v: V): V = v
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV1OrV2|ExecutionPayloadV2):
capella.ExecutionPayload =
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
bellatrix.Transaction.init(tt.distinctBase)
capella.ExecutionPayload(
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
feeRecipient:
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
block_number: rpcExecutionPayload.blockNumber.uint64,
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
mapIt(maybeDeref rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)))
func asConsensusType*(payloadWithValue: engine_api.GetPayloadV2Response):
capella.ExecutionPayloadForSigning =
capella.ExecutionPayloadForSigning(
executionPayload: payloadWithValue.executionPayload.asConsensusType,
blockValue: payloadWithValue.blockValue)
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV3):
deneb.ExecutionPayload =
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
bellatrix.Transaction.init(tt.distinctBase)
deneb.ExecutionPayload(
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
feeRecipient:
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
block_number: rpcExecutionPayload.blockNumber.uint64,
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64)
func asConsensusType*(payload: engine_api.GetPayloadV3Response):
deneb.ExecutionPayloadForSigning =
deneb.ExecutionPayloadForSigning(
executionPayload: payload.executionPayload.asConsensusType,
blockValue: payload.blockValue,
# TODO
# The `mapIt` calls below are necessary only because we use different distinct
# types for KZG commitments and Blobs in the `web3` and the `deneb` spec types.
# Both are defined as `array[N, byte]` under the hood.
blobsBundle: deneb.BlobsBundle(
commitments: KzgCommitments.init(
payload.blobsBundle.commitments.mapIt(
kzg_abi.KzgCommitment(bytes: it.bytes))),
proofs: KzgProofs.init(
payload.blobsBundle.proofs.mapIt(
kzg_abi.KzgProof(bytes: it.bytes))),
blobs: Blobs.init(
payload.blobsBundle.blobs.mapIt(it.bytes))))
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV4):
electra.ExecutionPayload =
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
bellatrix.Transaction.init(tt.distinctBase)
template getDepositRequest(
dr: DepositRequestV1): electra.DepositRequest =
electra.DepositRequest(
pubkey: ValidatorPubKey(blob: dr.pubkey.distinctBase),
withdrawal_credentials: dr.withdrawalCredentials.asEth2Digest,
amount: dr.amount.Gwei,
signature: ValidatorSig(blob: dr.signature.distinctBase),
index: dr.index.uint64)
template getWithdrawalRequest(
wr: WithdrawalRequestV1): electra.WithdrawalRequest =
electra.WithdrawalRequest(
source_address: ExecutionAddress(data: wr.sourceAddress.distinctBase),
validator_pubkey: ValidatorPubKey(blob: wr.validatorPubkey.distinctBase),
amount: wr.amount.Gwei)
template getConsolidationRequest(
cr: ConsolidationRequestV1): electra.ConsolidationRequest =
electra.ConsolidationRequest(
source_address: ExecutionAddress(data: cr.sourceAddress.distinctBase),
source_pubkey: ValidatorPubKey(blob: cr.sourcePubkey.distinctBase),
target_pubkey: ValidatorPubKey(blob: cr.targetPubkey.distinctBase))
electra.ExecutionPayload(
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
feeRecipient:
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
block_number: rpcExecutionPayload.blockNumber.uint64,
gas_limit: rpcExecutionPayload.gasLimit.uint64,
gas_used: rpcExecutionPayload.gasUsed.uint64,
timestamp: rpcExecutionPayload.timestamp.uint64,
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(
rpcExecutionPayload.extraData.bytes),
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64,
deposit_requests:
List[electra.DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.depositRequests, it.getDepositRequest)),
withdrawal_requests: List[electra.WithdrawalRequest,
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.withdrawalRequests,
it.getWithdrawalRequest)),
consolidation_requests: List[electra.ConsolidationRequest,
Limit MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD].init(
mapIt(rpcExecutionPayload.consolidationRequests,
it.getConsolidationRequest)))
func asConsensusType*(payload: engine_api.GetPayloadV4Response):
electra.ExecutionPayloadForSigning =
electra.ExecutionPayloadForSigning(
executionPayload: payload.executionPayload.asConsensusType,
blockValue: payload.blockValue,
# TODO
# The `mapIt` calls below are necessary only because we use different distinct
# types for KZG commitments and Blobs in the `web3` and the `deneb` spec types.
# Both are defined as `array[N, byte]` under the hood.
blobsBundle: deneb.BlobsBundle(
commitments: KzgCommitments.init(
payload.blobsBundle.commitments.mapIt(
kzg_abi.KzgCommitment(bytes: it.bytes))),
proofs: KzgProofs.init(
payload.blobsBundle.proofs.mapIt(
kzg_abi.KzgProof(bytes: it.bytes))),
blobs: Blobs.init(
payload.blobsBundle.blobs.mapIt(it.bytes))))
func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload):
ExecutionPayloadV1 =
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
TypedTransaction(tt.distinctBase)
engine_api.ExecutionPayloadV1(
parentHash: executionPayload.parent_hash.asBlockHash,
feeRecipient: Address(executionPayload.fee_recipient.data),
stateRoot: executionPayload.state_root.asBlockHash,
receiptsRoot: executionPayload.receipts_root.asBlockHash,
logsBloom:
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
prevRandao: executionPayload.prev_randao.asBlockHash,
blockNumber: Quantity(executionPayload.block_number),
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
blockHash: executionPayload.block_hash.asBlockHash,
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction))
template toEngineWithdrawal*(w: capella.Withdrawal): WithdrawalV1 =
WithdrawalV1(
index: Quantity(w.index),
validatorIndex: Quantity(w.validator_index),
address: Address(w.address.data),
amount: Quantity(w.amount))
func asEngineExecutionPayload*(executionPayload: capella.ExecutionPayload):
ExecutionPayloadV2 =
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
TypedTransaction(tt.distinctBase)
engine_api.ExecutionPayloadV2(
parentHash: executionPayload.parent_hash.asBlockHash,
feeRecipient: Address(executionPayload.fee_recipient.data),
stateRoot: executionPayload.state_root.asBlockHash,
receiptsRoot: executionPayload.receipts_root.asBlockHash,
logsBloom:
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
prevRandao: executionPayload.prev_randao.asBlockHash,
blockNumber: Quantity(executionPayload.block_number),
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
blockHash: executionPayload.block_hash.asBlockHash,
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
withdrawals: mapIt(executionPayload.withdrawals, it.toEngineWithdrawal))
func asEngineExecutionPayload*(executionPayload: deneb.ExecutionPayload):
ExecutionPayloadV3 =
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
TypedTransaction(tt.distinctBase)
engine_api.ExecutionPayloadV3(
parentHash: executionPayload.parent_hash.asBlockHash,
feeRecipient: Address(executionPayload.fee_recipient.data),
stateRoot: executionPayload.state_root.asBlockHash,
receiptsRoot: executionPayload.receipts_root.asBlockHash,
logsBloom:
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
prevRandao: executionPayload.prev_randao.asBlockHash,
blockNumber: Quantity(executionPayload.block_number),
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
blockHash: executionPayload.block_hash.asBlockHash,
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal),
blobGasUsed: Quantity(executionPayload.blob_gas_used),
excessBlobGas: Quantity(executionPayload.excess_blob_gas))
func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
ExecutionPayloadV4 =
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
TypedTransaction(tt.distinctBase)
template getDepositRequest(
dr: electra.DepositRequest): DepositRequestV1 =
DepositRequestV1(
pubkey: FixedBytes[RawPubKeySize](dr.pubkey.blob),
withdrawalCredentials: FixedBytes[32](dr.withdrawal_credentials.data),
amount: dr.amount.Quantity,
signature: FixedBytes[RawSigSize](dr.signature.blob),
index: dr.index.Quantity)
template getWithdrawalRequest(
wr: electra.WithdrawalRequest): WithdrawalRequestV1 =
WithdrawalRequestV1(
sourceAddress: Address(wr.source_address.data),
validatorPubkey: FixedBytes[RawPubKeySize](wr.validator_pubkey.blob),
amount: wr.amount.Quantity)
template getConsolidationRequest(
cr: electra.ConsolidationRequest): ConsolidationRequestV1 =
ConsolidationRequestV1(
sourceAddress: Address(cr.source_address.data),
sourcePubkey: FixedBytes[RawPubKeySize](cr.source_pubkey.blob),
targetPubkey: FixedBytes[RawPubKeySize](cr.target_pubkey.blob))
engine_api.ExecutionPayloadV4(
parentHash: executionPayload.parent_hash.asBlockHash,
feeRecipient: Address(executionPayload.fee_recipient.data),
stateRoot: executionPayload.state_root.asBlockHash,
receiptsRoot: executionPayload.receipts_root.asBlockHash,
logsBloom:
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
prevRandao: executionPayload.prev_randao.asBlockHash,
blockNumber: Quantity(executionPayload.block_number),
gasLimit: Quantity(executionPayload.gas_limit),
gasUsed: Quantity(executionPayload.gas_used),
timestamp: Quantity(executionPayload.timestamp),
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
baseFeePerGas: executionPayload.base_fee_per_gas,
blockHash: executionPayload.block_hash.asBlockHash,
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal),
blobGasUsed: Quantity(executionPayload.blob_gas_used),
excessBlobGas: Quantity(executionPayload.excess_blob_gas),
depositRequests: mapIt(
executionPayload.deposit_requests, it.getDepositRequest),
withdrawalRequests: mapIt(
executionPayload.withdrawal_requests, it.getWithdrawalRequest),
consolidationRequests: mapIt(
executionPayload.consolidation_requests, it.getConsolidationRequest))

View File

@ -16,6 +16,8 @@ import
web3/[conversions, eth_api_types], web3/[conversions, eth_api_types],
./merkle_minimal ./merkle_minimal
from ./engine_api_conversions import asBlockHash, asEth2Digest
export beacon_chain_db, deques, digest, base, forks export beacon_chain_db, deques, digest, base, forks
logScope: logScope:
@ -80,12 +82,6 @@ type
deposits*: seq[Deposit] deposits*: seq[Deposit]
hasMissingDeposits*: bool hasMissingDeposits*: bool
func asEth2Digest*(x: BlockHash): Eth2Digest =
Eth2Digest(data: array[32, byte](x))
template asBlockHash*(x: Eth2Digest): BlockHash =
BlockHash(x.data)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#get_eth1_data # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#get_eth1_data
func compute_time_at_slot(genesis_time: uint64, slot: Slot): uint64 = func compute_time_at_slot(genesis_time: uint64, slot: Slot): uint64 =
genesis_time + slot * SECONDS_PER_SLOT genesis_time + slot * SECONDS_PER_SLOT
@ -115,7 +111,7 @@ template findBlock(chain: Eth1Chain, eth1Data: Eth1Data): Eth1Block =
func makeSuccessorWithoutDeposits*(existingBlock: Eth1Block, func makeSuccessorWithoutDeposits*(existingBlock: Eth1Block,
successor: BlockObject): Eth1Block = successor: BlockObject): Eth1Block =
result = Eth1Block( Eth1Block(
hash: successor.hash.asEth2Digest, hash: successor.hash.asEth2Digest,
number: Eth1BlockNumber successor.number, number: Eth1BlockNumber successor.number,
timestamp: Eth1BlockTimestamp successor.timestamp) timestamp: Eth1BlockTimestamp successor.timestamp)

View File

@ -7,7 +7,7 @@
{.push raises: [].} {.push raises: [].}
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/tests/core/pyspec/eth2spec/utils/merkle_minimal.py # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/tests/core/pyspec/eth2spec/utils/merkle_minimal.py
# Merkle tree helpers # Merkle tree helpers
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -49,13 +49,11 @@ func compute_deltas(
logScope: topics = "fork_choice" logScope: topics = "fork_choice"
func init*( func init*(
T: type ForkChoiceBackend, checkpoints: FinalityCheckpoints, T: type ForkChoiceBackend, checkpoints: FinalityCheckpoints): T =
version: ForkChoiceVersion): T = T(proto_array: ProtoArray.init(checkpoints))
T(proto_array: ProtoArray.init(checkpoints, version))
proc init*( proc init*(
T: type ForkChoice, epochRef: EpochRef, blck: BlockRef, T: type ForkChoice, epochRef: EpochRef, blck: BlockRef): T =
version: ForkChoiceVersion): T =
## Initialize a fork choice context for a finalized state - in the finalized ## Initialize a fork choice context for a finalized state - in the finalized
## state, the justified and finalized checkpoints are the same, so only one ## state, the justified and finalized checkpoints are the same, so only one
## is used here ## is used here
@ -67,10 +65,8 @@ proc init*(
backend: ForkChoiceBackend.init( backend: ForkChoiceBackend.init(
FinalityCheckpoints( FinalityCheckpoints(
justified: checkpoint, justified: checkpoint,
finalized: checkpoint), finalized: checkpoint)),
version),
checkpoints: Checkpoints( checkpoints: Checkpoints(
version: version,
justified: BalanceCheckpoint( justified: BalanceCheckpoint(
checkpoint: checkpoint, checkpoint: checkpoint,
total_active_balance: epochRef.total_active_balance, total_active_balance: epochRef.total_active_balance,
@ -113,7 +109,7 @@ proc update_justified(
self.update_justified(dag, blck, justified.epoch) self.update_justified(dag, blck, justified.epoch)
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#update_checkpoints # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/fork-choice.md#update_checkpoints
proc update_checkpoints( proc update_checkpoints(
self: var Checkpoints, dag: ChainDAGRef, self: var Checkpoints, dag: ChainDAGRef,
checkpoints: FinalityCheckpoints): FcResult[void] = checkpoints: FinalityCheckpoints): FcResult[void] =
@ -377,7 +373,7 @@ proc get_head*(self: var ForkChoice,
self.checkpoints.justified.balances, self.checkpoints.justified.balances,
self.checkpoints.proposer_boost_root) self.checkpoints.proposer_boost_root)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/fork_choice/safe-block.md#get_safe_beacon_block_root # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/fork_choice/safe-block.md#get_safe_beacon_block_root
func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest = func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest =
# Use most recent justified block as a stopgap # Use most recent justified block as a stopgap
self.checkpoints.justified.checkpoint.root self.checkpoints.justified.checkpoint.root

View File

@ -29,14 +29,6 @@ import
# ---------------------------------------------------------------------- # ----------------------------------------------------------------------
type type
ForkChoiceVersion* {.pure.} = enum
## Controls which version of fork choice to run.
Stable = "stable"
## Use current version from stable Ethereum consensus specifications
Pr3431 = "pr3431"
## https://github.com/ethereum/consensus-specs/pull/3431
## https://github.com/ethereum/consensus-specs/issues/3466
fcKind* = enum fcKind* = enum
## Fork Choice Error Kinds ## Fork Choice Error Kinds
fcFinalizedNodeUnknown fcFinalizedNodeUnknown
@ -96,7 +88,6 @@ type
## Subtracted from logical index to get the physical index ## Subtracted from logical index to get the physical index
ProtoArray* = object ProtoArray* = object
version*: ForkChoiceVersion
currentEpoch*: Epoch currentEpoch*: Epoch
checkpoints*: FinalityCheckpoints checkpoints*: FinalityCheckpoints
nodes*: ProtoNodes nodes*: ProtoNodes
@ -121,7 +112,6 @@ type
balances*: seq[Gwei] balances*: seq[Gwei]
Checkpoints* = object Checkpoints* = object
version*: ForkChoiceVersion
time*: BeaconTime time*: BeaconTime
justified*: BalanceCheckpoint justified*: BalanceCheckpoint
finalized*: Checkpoint finalized*: Checkpoint

View File

@ -90,8 +90,7 @@ func nodeLeadsToViableHead(
# ---------------------------------------------------------------------- # ----------------------------------------------------------------------
func init*( func init*(
T: type ProtoArray, checkpoints: FinalityCheckpoints, T: type ProtoArray, checkpoints: FinalityCheckpoints): T =
version: ForkChoiceVersion): T =
let node = ProtoNode( let node = ProtoNode(
bid: BlockId( bid: BlockId(
slot: checkpoints.finalized.epoch.start_slot, slot: checkpoints.finalized.epoch.start_slot,
@ -103,8 +102,7 @@ func init*(
bestChild: none(int), bestChild: none(int),
bestDescendant: none(int)) bestDescendant: none(int))
T(version: version, T(checkpoints: checkpoints,
checkpoints: checkpoints,
nodes: ProtoNodes(buf: @[node], offset: 0), nodes: ProtoNodes(buf: @[node], offset: 0),
indices: {node.bid.root: 0}.toTable()) indices: {node.bid.root: 0}.toTable())
@ -536,19 +534,6 @@ func nodeIsViableForHead(
node.checkpoints.justified.epoch == self.checkpoints.justified.epoch node.checkpoints.justified.epoch == self.checkpoints.justified.epoch
if not correctJustified: if not correctJustified:
case self.version
of ForkChoiceVersion.Stable:
# If the previous epoch is justified, the block should be pulled-up.
# In this case, check that unrealized justification is higher than the
# store and that the voting source is not more than two epochs ago
if self.isPreviousEpochJustified and
node.bid.slot.epoch == self.currentEpoch:
let unrealized =
self.currentEpochTips.getOrDefault(nodeIdx, node.checkpoints)
correctJustified =
unrealized.justified.epoch >= self.checkpoints.justified.epoch and
node.checkpoints.justified.epoch + 2 >= self.currentEpoch
of ForkChoiceVersion.Pr3431:
# The voting source should be either at the same height as the store's # The voting source should be either at the same height as the store's
# justified checkpoint or not more than two epochs ago # justified checkpoint or not more than two epochs ago
correctJustified = correctJustified =

View File

@ -99,7 +99,7 @@ type
Batch* = object Batch* = object
## A batch represents up to BatchedCryptoSize non-aggregated signatures ## A batch represents up to BatchedCryptoSize non-aggregated signatures
created: Moment created: Moment
sigsets: seq[SignatureSet] multiSets: Table[array[32, byte], MultiSignatureSet]
items: seq[BatchItem] items: seq[BatchItem]
VerifierItem = object VerifierItem = object
@ -198,7 +198,7 @@ proc complete(batchCrypto: var BatchCrypto, batch: var Batch, ok: bool) =
batchCrypto.counts.batches += 1 batchCrypto.counts.batches += 1
batchCrypto.counts.signatures += batch.items.len() batchCrypto.counts.signatures += batch.items.len()
batchCrypto.counts.aggregates += batch.sigsets.len() batchCrypto.counts.aggregates += batch.multiSets.len()
if batchCrypto.counts.batches >= 256: if batchCrypto.counts.batches >= 256:
# Not too often, so as not to overwhelm our metrics # Not too often, so as not to overwhelm our metrics
@ -208,16 +208,6 @@ proc complete(batchCrypto: var BatchCrypto, batch: var Batch, ok: bool) =
reset(batchCrypto.counts) reset(batchCrypto.counts)
func combine(a: var Signature, b: Signature) =
var tmp = AggregateSignature.init(CookedSig(a))
tmp.aggregate(b)
a = Signature(tmp.finish())
func combine(a: var PublicKey, b: PublicKey) =
var tmp = AggregatePublicKey.init(CookedPubKey(a))
tmp.aggregate(b)
a = PublicKey(tmp.finish())
proc batchVerifyTask(task: ptr BatchTask) {.nimcall.} = proc batchVerifyTask(task: ptr BatchTask) {.nimcall.} =
# Task suitable for running in taskpools - look, no GC! # Task suitable for running in taskpools - look, no GC!
let let
@ -237,12 +227,29 @@ proc spawnBatchVerifyTask(tp: Taskpool, task: ptr BatchTask) =
# Possibly related to: https://github.com/nim-lang/Nim/issues/22305 # Possibly related to: https://github.com/nim-lang/Nim/issues/22305
tp.spawn batchVerifyTask(task) tp.spawn batchVerifyTask(task)
proc batchVerifyAsync*( func combine(
verifier: ref BatchVerifier, signal: ThreadSignalPtr, multiSet: MultiSignatureSet,
verifier: ref BatchVerifier): SignatureSet =
var secureRandomBytes: array[32, byte]
verifier[].rng[].generate(secureRandomBytes)
multiSet.combine(secureRandomBytes)
func combineAll(
multiSets: Table[array[32, byte], MultiSignatureSet],
verifier: ref BatchVerifier): seq[SignatureSet] =
var sigsets = newSeqOfCap[SignatureSet](multiSets.len)
for multiSet in multiSets.values():
sigsets.add multiSet.combine(verifier)
sigsets
proc batchVerifyAsync(
verifier: ref BatchVerifier,
signal: ThreadSignalPtr,
batch: ref Batch): Future[bool] {.async: (raises: [CancelledError]).} = batch: ref Batch): Future[bool] {.async: (raises: [CancelledError]).} =
let sigsets = batch[].multiSets.combineAll(verifier)
var task = BatchTask( var task = BatchTask(
setsPtr: makeUncheckedArray(baseAddr batch[].sigsets), setsPtr: makeUncheckedArray(baseAddr sigsets),
numSets: batch[].sigsets.len, numSets: sigsets.len,
taskpool: verifier[].taskpool, taskpool: verifier[].taskpool,
cache: addr verifier[].sigVerifCache, cache: addr verifier[].sigVerifCache,
signal: signal, signal: signal,
@ -264,18 +271,18 @@ proc batchVerifyAsync*(
task.ok.load() task.ok.load()
proc processBatch( proc processBatch(
batchCrypto: ref BatchCrypto, batch: ref Batch, batchCrypto: ref BatchCrypto,
verifier: ref BatchVerifier, signal: ThreadSignalPtr) {.async: (raises: [CancelledError]).} = batch: ref Batch,
let verifier: ref BatchVerifier,
numSets = batch[].sigsets.len() signal: ThreadSignalPtr) {.async: (raises: [CancelledError]).} =
let numSets = batch[].multiSets.len
if numSets == 0: if numSets == 0:
# Nothing to do in this batch, can happen when a batch is created without # Nothing to do in this batch, can happen when a batch is created without
# there being any signatures successfully added to it # there being any signatures successfully added to it
return return
let let startTick = Moment.now()
startTick = Moment.now()
# If the hardware is too slow to keep up or an event caused a temporary # If the hardware is too slow to keep up or an event caused a temporary
# buildup of signature verification tasks, the batch will be dropped so as to # buildup of signature verification tasks, the batch will be dropped so as to
@ -300,13 +307,19 @@ proc processBatch(
# may not be beneficial to use batch verification: # may not be beneficial to use batch verification:
# https://github.com/status-im/nim-blscurve/blob/3956f63dd0ed5d7939f6195ee09e4c5c1ace9001/blscurve/bls_batch_verifier.nim#L390 # https://github.com/status-im/nim-blscurve/blob/3956f63dd0ed5d7939f6195ee09e4c5c1ace9001/blscurve/bls_batch_verifier.nim#L390
if numSets == 1: if numSets == 1:
blsVerify(batch[].sigsets[0]) var r: bool
for multiSet in batch[].multiSets.values():
r = blsVerify(multiSet.combine(verifier))
break
r
elif batchCrypto[].taskpool.numThreads > 1 and numSets > 3: elif batchCrypto[].taskpool.numThreads > 1 and numSets > 3:
await batchVerifyAsync(verifier, signal, batch) await batchVerifyAsync(verifier, signal, batch)
else: else:
let secureRandomBytes = verifier[].rng[].generate(array[32, byte]) let secureRandomBytes = verifier[].rng[].generate(array[32, byte])
batchVerifySerial( batchVerifySerial(
verifier[].sigVerifCache, batch.sigsets, secureRandomBytes) verifier[].sigVerifCache,
batch.multiSets.combineAll(verifier),
secureRandomBytes)
trace "batch crypto - finished", trace "batch crypto - finished",
numSets, items = batch[].items.len(), ok, numSets, items = batch[].items.len(), ok,
@ -366,18 +379,10 @@ proc verifySoon(
batch = batchCrypto[].getBatch() batch = batchCrypto[].getBatch()
fut = newFuture[BatchResult](name) fut = newFuture[BatchResult](name)
var found = false batch[].multiSets.withValue(sigset.message, multiSet):
# Find existing signature sets with the same message - if we can verify an multiSet[].add sigset
# aggregate instead of several signatures, that is _much_ faster do:
for item in batch[].sigsets.mitems(): batch[].multiSets[sigset.message] = MultiSignatureSet.init sigset
if item.message == sigset.message:
item.signature.combine(sigset.signature)
item.pubkey.combine(sigset.pubkey)
found = true
break
if not found:
batch[].sigsets.add sigset
# We need to keep the "original" sigset to allow verifying each signature # We need to keep the "original" sigset to allow verifying each signature
# one by one in the case the combined operation fails # one by one in the case the combined operation fails

View File

@ -9,11 +9,11 @@
import import
chronicles, chronos, metrics, chronicles, chronos, metrics,
../spec/[forks, signatures, signatures_batch], ../spec/[forks, helpers_el, signatures, signatures_batch],
../sszdump ../sszdump
from std/deques import Deque, addLast, contains, initDeque, items, len, shrink from std/deques import Deque, addLast, contains, initDeque, items, len, shrink
from std/sequtils import mapIt from std/sequtils import anyIt, mapIt
from ../consensus_object_pools/consensus_manager import from ../consensus_object_pools/consensus_manager import
ConsensusManager, checkNextProposer, optimisticExecutionBlockHash, ConsensusManager, checkNextProposer, optimisticExecutionBlockHash,
runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead, runProposalForkchoiceUpdated, shouldSyncOptimistically, updateHead,
@ -586,32 +586,42 @@ proc storeBlock(
if NewPayloadStatus.noResponse == payloadStatus: if NewPayloadStatus.noResponse == payloadStatus:
# When the execution layer is not available to verify the payload, we do the # When the execution layer is not available to verify the payload, we do the
# required check on the CL side instead and proceed as if the EL was syncing # required checks on the CL instead and proceed as if the EL was syncing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#verify_and_notify_new_payload
# TODO run https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#blob-kzg-commitments # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/beacon-chain.md#modified-verify_and_notify_new_payload
# https://github.com/ethereum/execution-apis/blob/main/src/engine/experimental/blob-extension.md#specification
# "This validation MUST be instantly run in all cases even during active
# sync process."
#
# Client software MUST validate `blockHash` value as being equivalent to
# `Keccak256(RLP(ExecutionBlockHeader))`
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/paris.md#specification
#
# This should simulate an unsynced EL, which still must perform these
# checks. This means it must be able to do so without context, beyond
# whatever data the block itself contains.
when typeof(signedBlock).kind >= ConsensusFork.Bellatrix: when typeof(signedBlock).kind >= ConsensusFork.Bellatrix:
if signedBlock.message.is_execution_block:
template payload(): auto = signedBlock.message.body.execution_payload template payload(): auto = signedBlock.message.body.execution_payload
if signedBlock.message.is_execution_block and
payload.block_hash != template returnWithError(msg: string, extraMsg = ""): untyped =
signedBlock.message.compute_execution_block_hash(): if extraMsg != "":
debug "Execution block hash validation failed", debug msg, reason = extraMsg, executionPayload = shortLog(payload)
execution_payload = shortLog(payload) else:
debug msg, executionPayload = shortLog(payload)
self[].dumpInvalidBlock(signedBlock) self[].dumpInvalidBlock(signedBlock)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
self.consensusManager.quarantine[].addUnviable(signedBlock.root) self.consensusManager.quarantine[].addUnviable(signedBlock.root)
return err((VerifierError.Invalid, ProcessingStatus.completed)) return err((VerifierError.Invalid, ProcessingStatus.completed))
if payload.transactions.anyIt(it.len == 0):
returnWithError "Execution block contains zero length transactions"
if payload.block_hash !=
signedBlock.message.compute_execution_block_hash():
returnWithError "Execution block hash validation failed"
# [New in Deneb:EIP4844]
when typeof(signedBlock).kind >= ConsensusFork.Deneb:
let blobsRes = signedBlock.message.is_valid_versioned_hashes
if blobsRes.isErr:
returnWithError "Blob versioned hashes invalid", blobsRes.error
else:
# If there are EIP-4844 (type 3) transactions in the payload with
# versioned hashes, the transactions would be rejected by the EL
# based on payload timestamp (only allowed post Deneb);
# There are no `blob_kzg_commitments` before Deneb to compare against
discard
let newPayloadTick = Moment.now() let newPayloadTick = Moment.now()
# TODO with v1.4.0, not sure this is still relevant # TODO with v1.4.0, not sure this is still relevant
@ -888,7 +898,7 @@ proc processBlock(
# - MUST NOT optimistically import the block. # - MUST NOT optimistically import the block.
# - MUST NOT apply the block to the fork choice store. # - MUST NOT apply the block to the fork choice store.
# - MAY queue the block for later processing. # - MAY queue the block for later processing.
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#execution-engine-errors # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/sync/optimistic.md#execution-engine-errors
await sleepAsync(chronos.seconds(1)) await sleepAsync(chronos.seconds(1))
self[].enqueueBlock( self[].enqueueBlock(
entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized, entry.src, entry.blck, entry.blobs, entry.resfut, entry.maybeFinalized,

View File

@ -303,7 +303,7 @@ template validateBeaconBlockBellatrix(
# #
# `is_merge_transition_complete(state)` tests for # `is_merge_transition_complete(state)` tests for
# `state.latest_execution_payload_header != ExecutionPayloadHeader()`, while # `state.latest_execution_payload_header != ExecutionPayloadHeader()`, while
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#block-processing
# shows that `state.latest_execution_payload_header` being default or not is # shows that `state.latest_execution_payload_header` being default or not is
# exactly equivalent to whether that block's execution payload is default or # exactly equivalent to whether that block's execution payload is default or
# not, so test cached block information rather than reconstructing a state. # not, so test cached block information rather than reconstructing a state.
@ -1187,7 +1187,7 @@ proc validateAggregate*(
ok((attesting_indices, sig)) ok((attesting_indices, sig))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#bls_to_execution_change # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/p2p-interface.md#bls_to_execution_change
proc validateBlsToExecutionChange*( proc validateBlsToExecutionChange*(
pool: ValidatorChangePool, batchCrypto: ref BatchCrypto, pool: ValidatorChangePool, batchCrypto: ref BatchCrypto,
signed_address_change: SignedBLSToExecutionChange, signed_address_change: SignedBLSToExecutionChange,

View File

@ -94,7 +94,7 @@ typedef struct ETHConsensusConfig ETHConsensusConfig;
* based on the given `config.yaml` file content - If successful. * based on the given `config.yaml` file content - If successful.
* @return `NULL` - If the given `config.yaml` is malformed or incompatible. * @return `NULL` - If the given `config.yaml` is malformed or incompatible.
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/configs/README.md
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHConsensusConfig *_Nullable ETHConsensusConfigCreateFromYaml(const char *configFileContent); ETHConsensusConfig *_Nullable ETHConsensusConfigCreateFromYaml(const char *configFileContent);
@ -151,9 +151,9 @@ typedef struct ETHBeaconState ETHBeaconState;
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/configs/README.md
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHBeaconState *_Nullable ETHBeaconStateCreateFromSsz( ETHBeaconState *_Nullable ETHBeaconStateCreateFromSsz(
@ -325,8 +325,8 @@ typedef struct ETHLightClientStore ETHLightClientStore;
* *
* @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap * @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
* @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream * @see https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#weak-subjectivity-period * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
ETHLightClientStore *_Nullable ETHLightClientStoreCreateFromBootstrap( ETHLightClientStore *_Nullable ETHLightClientStoreCreateFromBootstrap(
@ -579,7 +579,7 @@ typedef struct ETHLightClientHeader ETHLightClientHeader;
* *
* @return Latest finalized header. * @return Latest finalized header.
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/sync-protocol.md#modified-lightclientheader * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader( const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader(
@ -598,7 +598,7 @@ const ETHLightClientHeader *ETHLightClientStoreGetFinalizedHeader(
* @return Whether or not the next sync committee is currently known. * @return Whether or not the next sync committee is currently known.
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
* @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *store); bool ETHLightClientStoreIsNextSyncCommitteeKnown(const ETHLightClientStore *store);
@ -695,7 +695,7 @@ typedef struct ETHBeaconBlockHeader ETHBeaconBlockHeader;
* *
* @return Beacon block header. * @return Beacon block header.
* *
* @see https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#beaconblockheader * @see https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#beaconblockheader
*/ */
ETH_RESULT_USE_CHECK ETH_RESULT_USE_CHECK
const ETHBeaconBlockHeader *ETHLightClientHeaderGetBeacon( const ETHBeaconBlockHeader *ETHLightClientHeaderGetBeacon(

View File

@ -17,7 +17,7 @@ import
json_rpc/jsonmarshal, json_rpc/jsonmarshal,
secp256k1, secp256k1,
web3/[engine_api_types, eth_api_types, conversions], web3/[engine_api_types, eth_api_types, conversions],
../el/eth1_chain, ../el/[engine_api_conversions, eth1_chain],
../spec/eth2_apis/[eth2_rest_serialization, rest_light_client_calls], ../spec/eth2_apis/[eth2_rest_serialization, rest_light_client_calls],
../spec/[helpers, light_client_sync], ../spec/[helpers, light_client_sync],
../sync/light_client_sync_helpers, ../sync/light_client_sync_helpers,
@ -77,7 +77,7 @@ proc ETHConsensusConfigCreateFromYaml(
## * `NULL` - If the given `config.yaml` is malformed or incompatible. ## * `NULL` - If the given `config.yaml` is malformed or incompatible.
## ##
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/configs/README.md
let cfg = RuntimeConfig.new() let cfg = RuntimeConfig.new()
try: try:
cfg[] = readRuntimeConfig($configFileContent, "config.yaml")[0] cfg[] = readRuntimeConfig($configFileContent, "config.yaml")[0]
@ -143,9 +143,9 @@ proc ETHBeaconStateCreateFromSsz(
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#beaconstate
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/configs/README.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/configs/README.md
let let
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr: consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
return nil return nil
@ -328,8 +328,8 @@ proc ETHLightClientStoreCreateFromBootstrap(
## See: ## See:
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap ## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Beacon/getLightClientBootstrap
## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream ## * https://ethereum.github.io/beacon-APIs/?urls.primaryName=v2.4.1#/Events/eventstream
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#weak-subjectivity-period ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#weak-subjectivity-period
let let
mediaType = MediaType.init($mediaType) mediaType = MediaType.init($mediaType)
consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr: consensusFork = ConsensusFork.decodeString($consensusVersion).valueOr:
@ -755,7 +755,7 @@ func ETHLightClientStoreIsNextSyncCommitteeKnown(
## ##
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_next_sync_committee_known
## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md
store[].is_next_sync_committee_known store[].is_next_sync_committee_known
func ETHLightClientStoreGetOptimisticHeader( func ETHLightClientStoreGetOptimisticHeader(
@ -841,7 +841,7 @@ proc ETHLightClientHeaderCopyBeaconRoot(
## * Pointer to a copy of the given header's beacon block root. ## * Pointer to a copy of the given header's beacon block root.
## ##
## See: ## See:
## * https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#hash_tree_root ## * https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#hash_tree_root
discard cfg # Future-proof against new fields, see `get_lc_execution_root`. discard cfg # Future-proof against new fields, see `get_lc_execution_root`.
let root = Eth2Digest.new() let root = Eth2Digest.new()
root[] = header[].beacon.hash_tree_root() root[] = header[].beacon.hash_tree_root()

View File

@ -86,7 +86,8 @@ proc createLightClient(
getBeaconTime: GetBeaconTimeFn, getBeaconTime: GetBeaconTimeFn,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
finalizationMode: LightClientFinalizationMode, finalizationMode: LightClientFinalizationMode,
strictVerification = false strictVerification = false,
shouldInhibitSync: light_client_manager.GetBoolCallback = nil
): LightClient = ): LightClient =
let lightClient = LightClient( let lightClient = LightClient(
network: network, network: network,
@ -177,7 +178,8 @@ proc createLightClient(
lightClient.network, rng, getTrustedBlockRoot, lightClient.network, rng, getTrustedBlockRoot,
bootstrapVerifier, updateVerifier, finalityVerifier, optimisticVerifier, bootstrapVerifier, updateVerifier, finalityVerifier, optimisticVerifier,
isLightClientStoreInitialized, isNextSyncCommitteeKnown, isLightClientStoreInitialized, isNextSyncCommitteeKnown,
getFinalizedPeriod, getOptimisticPeriod, getBeaconTime) getFinalizedPeriod, getOptimisticPeriod, getBeaconTime,
shouldInhibitSync = shouldInhibitSync)
lightClient.gossipState = {} lightClient.gossipState = {}
@ -191,13 +193,15 @@ proc createLightClient*(
forkDigests: ref ForkDigests, forkDigests: ref ForkDigests,
getBeaconTime: GetBeaconTimeFn, getBeaconTime: GetBeaconTimeFn,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
finalizationMode: LightClientFinalizationMode finalizationMode: LightClientFinalizationMode,
shouldInhibitSync: light_client_manager.GetBoolCallback = nil
): LightClient = ): LightClient =
createLightClient( createLightClient(
network, rng, network, rng,
config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming, config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming,
cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode, cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode,
strictVerification = config.strictVerification) strictVerification = config.strictVerification,
shouldInhibitSync = shouldInhibitSync)
proc createLightClient*( proc createLightClient*(
network: Eth2Node, network: Eth2Node,
@ -207,12 +211,14 @@ proc createLightClient*(
forkDigests: ref ForkDigests, forkDigests: ref ForkDigests,
getBeaconTime: GetBeaconTimeFn, getBeaconTime: GetBeaconTimeFn,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
finalizationMode: LightClientFinalizationMode finalizationMode: LightClientFinalizationMode,
shouldInhibitSync: light_client_manager.GetBoolCallback = nil
): LightClient = ): LightClient =
createLightClient( createLightClient(
network, rng, network, rng,
dumpEnabled = false, dumpDirInvalid = ".", dumpDirIncoming = ".", dumpEnabled = false, dumpDirInvalid = ".", dumpDirIncoming = ".",
cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode) cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode,
shouldInhibitSync = shouldInhibitSync)
proc start*(lightClient: LightClient) = proc start*(lightClient: LightClient) =
notice "Starting light client", notice "Starting light client",

View File

@ -176,7 +176,7 @@ type
MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [].} MounterProc* = proc(network: Eth2Node) {.gcsafe, raises: [].}
MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [].} MessageContentPrinter* = proc(msg: pointer): string {.gcsafe, raises: [].}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#goodbye # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#goodbye
DisconnectionReason* = enum DisconnectionReason* = enum
# might see other values on the wire! # might see other values on the wire!
ClientShutDown = 1 ClientShutDown = 1
@ -2555,8 +2555,8 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
node.metadata.seq_number += 1 node.metadata.seq_number += 1
node.metadata.attnets = attnets node.metadata.attnets = attnets
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#attestation-subnet-subscription # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#attestation-subnet-bitfield # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
let res = node.discovery.updateRecord({ let res = node.discovery.updateRecord({
enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets) enrAttestationSubnetsField: SSZ.encode(node.metadata.attnets)
}) })
@ -2568,7 +2568,7 @@ proc updateStabilitySubnetMetadata*(node: Eth2Node, attnets: AttnetBits) =
debug "Stability subnets changed; updated ENR attnets", attnets debug "Stability subnets changed; updated ENR attnets", attnets
proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) = proc updateSyncnetsMetadata*(node: Eth2Node, syncnets: SyncnetBits) =
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#sync-committee-subnet-stability
if node.metadata.syncnets == syncnets: if node.metadata.syncnets == syncnets:
return return

View File

@ -385,7 +385,7 @@ proc initFullNode(
quarantine = newClone( quarantine = newClone(
Quarantine.init()) Quarantine.init())
attestationPool = newClone(AttestationPool.init( attestationPool = newClone(AttestationPool.init(
dag, quarantine, config.forkChoiceVersion.get, onAttestationReceived)) dag, quarantine, onAttestationReceived))
syncCommitteeMsgPool = newClone( syncCommitteeMsgPool = newClone(
SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution)) SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution))
lightClientPool = newClone( lightClientPool = newClone(
@ -574,7 +574,9 @@ proc init*(T: type BeaconNode,
config: BeaconNodeConf, config: BeaconNodeConf,
metadata: Eth2NetworkMetadata): Future[BeaconNode] metadata: Eth2NetworkMetadata): Future[BeaconNode]
{.async.} = {.async.} =
var taskpool: TaskPoolPtr var
taskpool: TaskPoolPtr
genesisState: ref ForkedHashedBeaconState = nil
template cfg: auto = metadata.cfg template cfg: auto = metadata.cfg
template eth1Network: auto = metadata.eth1Network template eth1Network: auto = metadata.eth1Network
@ -582,10 +584,10 @@ proc init*(T: type BeaconNode,
if not(isDir(config.databaseDir)): if not(isDir(config.databaseDir)):
# If database directory missing, we going to use genesis state to check # If database directory missing, we going to use genesis state to check
# for weak_subjectivity_period. # for weak_subjectivity_period.
let
genesisState = genesisState =
await fetchGenesisState( await fetchGenesisState(
metadata, config.genesisState, config.genesisStateUrl) metadata, config.genesisState, config.genesisStateUrl)
let
genesisTime = getStateField(genesisState[], genesis_time) genesisTime = getStateField(genesisState[], genesis_time)
beaconClock = BeaconClock.init(genesisTime).valueOr: beaconClock = BeaconClock.init(genesisTime).valueOr:
fatal "Invalid genesis time in genesis state", genesisTime fatal "Invalid genesis time in genesis state", genesisTime
@ -640,15 +642,15 @@ proc init*(T: type BeaconNode,
db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false) db = BeaconChainDB.new(config.databaseDir, cfg, inMemory = false)
if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr: if config.externalBeaconApiUrl.isSome and ChainDAGRef.isInitialized(db).isErr:
var genesisState: ref ForkedHashedBeaconState
let trustedBlockRoot = let trustedBlockRoot =
if config.trustedStateRoot.isSome or config.trustedBlockRoot.isSome: if config.trustedStateRoot.isSome or config.trustedBlockRoot.isSome:
config.trustedBlockRoot config.trustedBlockRoot
elif cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH: elif cfg.ALTAIR_FORK_EPOCH == GENESIS_EPOCH:
# Sync can be bootstrapped from the genesis block root # Sync can be bootstrapped from the genesis block root
if genesisState.isNil:
genesisState = await fetchGenesisState( genesisState = await fetchGenesisState(
metadata, config.genesisState, config.genesisStateUrl) metadata, config.genesisState, config.genesisStateUrl)
if genesisState != nil: if not genesisState.isNil:
let genesisBlockRoot = get_initial_beacon_block(genesisState[]).root let genesisBlockRoot = get_initial_beacon_block(genesisState[]).root
notice "Neither `--trusted-block-root` nor `--trusted-state-root` " & notice "Neither `--trusted-block-root` nor `--trusted-state-root` " &
"provided with `--external-beacon-api-url`, " & "provided with `--external-beacon-api-url`, " &
@ -669,7 +671,7 @@ proc init*(T: type BeaconNode,
trustedBlockRoot = config.trustedBlockRoot, trustedBlockRoot = config.trustedBlockRoot,
trustedStateRoot = config.trustedStateRoot trustedStateRoot = config.trustedStateRoot
else: else:
if genesisState == nil: if genesisState.isNil:
genesisState = await fetchGenesisState( genesisState = await fetchGenesisState(
metadata, config.genesisState, config.genesisStateUrl) metadata, config.genesisState, config.genesisStateUrl)
await db.doRunTrustedNodeSync( await db.doRunTrustedNodeSync(
@ -735,15 +737,18 @@ proc init*(T: type BeaconNode,
var networkGenesisValidatorsRoot = metadata.bakedGenesisValidatorsRoot var networkGenesisValidatorsRoot = metadata.bakedGenesisValidatorsRoot
if not ChainDAGRef.isInitialized(db).isOk(): if not ChainDAGRef.isInitialized(db).isOk():
let genesisState = genesisState =
if checkpointState != nil and if not checkpointState.isNil and
getStateField(checkpointState[], slot) == 0: getStateField(checkpointState[], slot) == 0:
checkpointState checkpointState
else: else:
if genesisState.isNil:
await fetchGenesisState( await fetchGenesisState(
metadata, config.genesisState, config.genesisStateUrl) metadata, config.genesisState, config.genesisStateUrl)
else:
genesisState
if genesisState == nil and checkpointState == nil: if genesisState.isNil and checkpointState.isNil:
fatal "No database and no genesis snapshot found. Please supply a genesis.ssz " & fatal "No database and no genesis snapshot found. Please supply a genesis.ssz " &
"with the network configuration" "with the network configuration"
quit 1 quit 1
@ -1246,8 +1251,6 @@ proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) =
for validator in node.attachedValidators[]: for validator in node.attachedValidators[]:
validator.doppelgangerChecked(epoch - 1) validator.doppelgangerChecked(epoch - 1)
from ./spec/state_transition_epoch import effective_balance_might_update
proc maybeUpdateActionTrackerNextEpoch( proc maybeUpdateActionTrackerNextEpoch(
node: BeaconNode, forkyState: ForkyHashedBeaconState, nextEpoch: Epoch) = node: BeaconNode, forkyState: ForkyHashedBeaconState, nextEpoch: Epoch) =
if node.consensusManager[].actionTracker.needsUpdate( if node.consensusManager[].actionTracker.needsUpdate(
@ -1793,7 +1796,7 @@ proc installMessageValidators(node: BeaconNode) =
let digest = forkDigests[].atConsensusFork(consensusFork) let digest = forkDigests[].atConsensusFork(consensusFork)
# beacon_block # beacon_block
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#beacon_block # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#beacon_block
node.network.addValidator( node.network.addValidator(
getBeaconBlocksTopic(digest), proc ( getBeaconBlocksTopic(digest), proc (
signedBlock: consensusFork.SignedBeaconBlock signedBlock: consensusFork.SignedBeaconBlock
@ -1910,7 +1913,7 @@ proc installMessageValidators(node: BeaconNode) =
MsgSource.gossip, msg))) MsgSource.gossip, msg)))
when consensusFork >= ConsensusFork.Capella: when consensusFork >= ConsensusFork.Capella:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#bls_to_execution_change # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/p2p-interface.md#bls_to_execution_change
node.network.addAsyncValidator( node.network.addAsyncValidator(
getBlsToExecutionChangeTopic(digest), proc ( getBlsToExecutionChangeTopic(digest), proc (
msg: SignedBLSToExecutionChange msg: SignedBLSToExecutionChange
@ -2262,8 +2265,6 @@ proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref HmacDrbgContext) {.rai
# works # works
for node in metadata.bootstrapNodes: for node in metadata.bootstrapNodes:
config.bootstrapNodes.add node config.bootstrapNodes.add node
if config.forkChoiceVersion.isNone:
config.forkChoiceVersion = some(ForkChoiceVersion.Pr3431)
## Ctrl+C handling ## Ctrl+C handling
proc controlCHandler() {.noconv.} = proc controlCHandler() {.noconv.} =

View File

@ -43,6 +43,8 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
Base10.toString(MIN_DEPOSIT_AMOUNT), Base10.toString(MIN_DEPOSIT_AMOUNT),
MAX_EFFECTIVE_BALANCE: MAX_EFFECTIVE_BALANCE:
Base10.toString(MAX_EFFECTIVE_BALANCE), Base10.toString(MAX_EFFECTIVE_BALANCE),
MAX_EFFECTIVE_BALANCE_ELECTRA:
Base10.toString(static(MAX_EFFECTIVE_BALANCE_ELECTRA.uint64)),
EFFECTIVE_BALANCE_INCREMENT: EFFECTIVE_BALANCE_INCREMENT:
Base10.toString(EFFECTIVE_BALANCE_INCREMENT), Base10.toString(EFFECTIVE_BALANCE_INCREMENT),
MIN_ATTESTATION_INCLUSION_DELAY: MIN_ATTESTATION_INCLUSION_DELAY:
@ -90,7 +92,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_VOLUNTARY_EXITS: MAX_VOLUNTARY_EXITS:
Base10.toString(MAX_VOLUNTARY_EXITS), Base10.toString(MAX_VOLUNTARY_EXITS),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/altair.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/altair.yaml
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: INACTIVITY_PENALTY_QUOTIENT_ALTAIR:
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR), Base10.toString(INACTIVITY_PENALTY_QUOTIENT_ALTAIR),
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR:
@ -106,7 +108,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
UPDATE_TIMEOUT: UPDATE_TIMEOUT:
Base10.toString(UPDATE_TIMEOUT), Base10.toString(UPDATE_TIMEOUT),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/bellatrix.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/bellatrix.yaml
INACTIVITY_PENALTY_QUOTIENT_BELLATRIX: INACTIVITY_PENALTY_QUOTIENT_BELLATRIX:
Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX), Base10.toString(INACTIVITY_PENALTY_QUOTIENT_BELLATRIX),
MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX: MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX:
@ -122,7 +124,7 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
MAX_EXTRA_DATA_BYTES: MAX_EXTRA_DATA_BYTES:
Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)), Base10.toString(uint64(MAX_EXTRA_DATA_BYTES)),
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/capella.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/capella.yaml
MAX_BLS_TO_EXECUTION_CHANGES: MAX_BLS_TO_EXECUTION_CHANGES:
Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)), Base10.toString(uint64(MAX_BLS_TO_EXECUTION_CHANGES)),
MAX_WITHDRAWALS_PER_PAYLOAD: MAX_WITHDRAWALS_PER_PAYLOAD:
@ -180,6 +182,10 @@ proc installConfigApiHandlers*(router: var RestRouter, node: BeaconNode) =
"0x" & $cfg.DENEB_FORK_VERSION, "0x" & $cfg.DENEB_FORK_VERSION,
DENEB_FORK_EPOCH: DENEB_FORK_EPOCH:
Base10.toString(uint64(cfg.DENEB_FORK_EPOCH)), Base10.toString(uint64(cfg.DENEB_FORK_EPOCH)),
ELECTRA_FORK_VERSION:
"0x" & $cfg.ELECTRA_FORK_VERSION,
ELECTRA_FORK_EPOCH:
Base10.toString(uint64(cfg.ELECTRA_FORK_EPOCH)),
SECONDS_PER_SLOT: SECONDS_PER_SLOT:
Base10.toString(SECONDS_PER_SLOT), Base10.toString(SECONDS_PER_SLOT),
SECONDS_PER_ETH1_BLOCK: SECONDS_PER_ETH1_BLOCK:

View File

@ -90,8 +90,7 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
var response = GetForkChoiceResponse( var response = GetForkChoiceResponse(
justified_checkpoint: forkChoice.checkpoints.justified.checkpoint, justified_checkpoint: forkChoice.checkpoints.justified.checkpoint,
finalized_checkpoint: forkChoice.checkpoints.finalized, finalized_checkpoint: forkChoice.checkpoints.finalized,
extra_data: RestExtraData( extra_data: RestExtraData())
version: some($forkChoice.backend.proto_array.version)))
for item in forkChoice.backend.proto_array: for item in forkChoice.backend.proto_array:
let let

View File

@ -43,7 +43,7 @@ const
GENESIS_SLOT* = Slot(0) GENESIS_SLOT* = Slot(0)
GENESIS_EPOCH* = Epoch(0) # compute_epoch_at_slot(GENESIS_SLOT) GENESIS_EPOCH* = Epoch(0) # compute_epoch_at_slot(GENESIS_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#constant # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/fork-choice.md#constant
INTERVALS_PER_SLOT* = 3 INTERVALS_PER_SLOT* = 3
FAR_FUTURE_BEACON_TIME* = BeaconTime(ns_since_genesis: int64.high()) FAR_FUTURE_BEACON_TIME* = BeaconTime(ns_since_genesis: int64.high())
@ -139,16 +139,16 @@ const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate
aggregateSlotOffset* = TimeDiff(nanoseconds: aggregateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#prepare-sync-committee-message
syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds: syncCommitteeMessageSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#broadcast-sync-committee-contribution
syncContributionSlotOffset* = TimeDiff(nanoseconds: syncContributionSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 * 2 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#sync-committee
lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds: lightClientFinalityUpdateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#sync-committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#sync-committee
lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds: lightClientOptimisticUpdateSlotOffset* = TimeDiff(nanoseconds:
NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT) NANOSECONDS_PER_SLOT.int64 div INTERVALS_PER_SLOT)
@ -188,7 +188,7 @@ func epoch*(slot: Slot): Epoch = # aka compute_epoch_at_slot
if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH if slot == FAR_FUTURE_SLOT: FAR_FUTURE_EPOCH
else: Epoch(slot div SLOTS_PER_EPOCH) else: Epoch(slot div SLOTS_PER_EPOCH)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#compute_slots_since_epoch_start # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/fork-choice.md#compute_slots_since_epoch_start
func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_start
## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`) ## How many slots since the beginning of the epoch (`[0..SLOTS_PER_EPOCH-1]`)
(slot mod SLOTS_PER_EPOCH) (slot mod SLOTS_PER_EPOCH)
@ -196,7 +196,7 @@ func since_epoch_start*(slot: Slot): uint64 = # aka compute_slots_since_epoch_st
template is_epoch*(slot: Slot): bool = template is_epoch*(slot: Slot): bool =
slot.since_epoch_start == 0 slot.since_epoch_start == 0
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#compute_start_slot_at_epoch
func start_slot*(epoch: Epoch): Slot = # aka compute_start_slot_at_epoch func start_slot*(epoch: Epoch): Slot = # aka compute_start_slot_at_epoch
## Return the start slot of ``epoch``. ## Return the start slot of ``epoch``.
const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH) const maxEpoch = Epoch(FAR_FUTURE_SLOT div SLOTS_PER_EPOCH)
@ -216,7 +216,7 @@ iterator slots*(epoch: Epoch): Slot =
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH: for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
yield slot yield slot
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#sync-committee
template sync_committee_period*(epoch: Epoch): SyncCommitteePeriod = template sync_committee_period*(epoch: Epoch): SyncCommitteePeriod =
if epoch == FAR_FUTURE_EPOCH: FAR_FUTURE_PERIOD if epoch == FAR_FUTURE_EPOCH: FAR_FUTURE_PERIOD
else: SyncCommitteePeriod(epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD) else: SyncCommitteePeriod(epoch div EPOCHS_PER_SYNC_COMMITTEE_PERIOD)

View File

@ -86,7 +86,7 @@ func compute_activation_exit_epoch*(epoch: Epoch): Epoch =
## ``epoch`` take effect. ## ``epoch`` take effect.
epoch + 1 + MAX_SEED_LOOKAHEAD epoch + 1 + MAX_SEED_LOOKAHEAD
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_validator_churn_limit # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_validator_churn_limit
func get_validator_churn_limit*( func get_validator_churn_limit*(
cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache): cfg: RuntimeConfig, state: ForkyBeaconState, cache: var StateCache):
uint64 = uint64 =
@ -96,7 +96,7 @@ func get_validator_churn_limit*(
count_active_validators( count_active_validators(
state, state.get_current_epoch(), cache) div cfg.CHURN_LIMIT_QUOTIENT) state, state.get_current_epoch(), cache) div cfg.CHURN_LIMIT_QUOTIENT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#new-get_validator_activation_churn_limit # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/beacon-chain.md#new-get_validator_activation_churn_limit
func get_validator_activation_churn_limit*( func get_validator_activation_churn_limit*(
cfg: RuntimeConfig, state: deneb.BeaconState | electra.BeaconState, cfg: RuntimeConfig, state: deneb.BeaconState | electra.BeaconState,
cache: var StateCache): uint64 = cache: var StateCache): uint64 =
@ -270,7 +270,7 @@ func compute_consolidation_epoch_and_update_churn*(
state.earliest_consolidation_epoch state.earliest_consolidation_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated--initiate_validator_exit # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#modified-initiate_validator_exit
func initiate_validator_exit*( func initiate_validator_exit*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
index: ValidatorIndex, exit_queue_info: ExitQueueInfo, index: ValidatorIndex, exit_queue_info: ExitQueueInfo,
@ -301,7 +301,7 @@ from ./datatypes/deneb import BeaconState
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-slash_validator
func get_slashing_penalty*( func get_slashing_penalty*(
state: ForkyBeaconState, validator_effective_balance: Gwei): Gwei = state: ForkyBeaconState, validator_effective_balance: Gwei): Gwei =
@ -319,21 +319,21 @@ func get_slashing_penalty*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_whistleblower_reward*( func get_whistleblower_reward*(
state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState | state: phase0.BeaconState | altair.BeaconState | bellatrix.BeaconState |
capella.BeaconState | deneb.BeaconState, capella.BeaconState | deneb.BeaconState,
validator_effective_balance: Gwei): Gwei = validator_effective_balance: Gwei): Gwei =
validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#modified-slash_validator
func get_whistleblower_reward*( func get_whistleblower_reward*(
state: electra.BeaconState, validator_effective_balance: Gwei): Gwei = state: electra.BeaconState, validator_effective_balance: Gwei): Gwei =
validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA validator_effective_balance div WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#modified-slash_validator
func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): Gwei = func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): Gwei =
when state is phase0.BeaconState: when state is phase0.BeaconState:
whistleblower_reward div PROPOSER_REWARD_QUOTIENT whistleblower_reward div PROPOSER_REWARD_QUOTIENT
@ -346,7 +346,7 @@ func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): G
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-slash_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-slash_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#modified-slash_validator
proc slash_validator*( proc slash_validator*(
cfg: RuntimeConfig, state: var ForkyBeaconState, cfg: RuntimeConfig, state: var ForkyBeaconState,
slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo, slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo,
@ -407,7 +407,7 @@ func get_initial_beacon_block*(state: phase0.HashedBeaconState):
phase0.TrustedSignedBeaconBlock( phase0.TrustedSignedBeaconBlock(
message: message, root: hash_tree_root(message)) message: message, root: hash_tree_root(message))
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/beacon-chain.md#initialize-state-for-pure-altair-testnets-and-test-vectors # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#initialize-state-for-pure-altair-testnets-and-test-vectors
func get_initial_beacon_block*(state: altair.HashedBeaconState): func get_initial_beacon_block*(state: altair.HashedBeaconState):
altair.TrustedSignedBeaconBlock = altair.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted # The genesis block is implicitly trusted
@ -419,7 +419,7 @@ func get_initial_beacon_block*(state: altair.HashedBeaconState):
altair.TrustedSignedBeaconBlock( altair.TrustedSignedBeaconBlock(
message: message, root: hash_tree_root(message)) message: message, root: hash_tree_root(message))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#testing
func get_initial_beacon_block*(state: bellatrix.HashedBeaconState): func get_initial_beacon_block*(state: bellatrix.HashedBeaconState):
bellatrix.TrustedSignedBeaconBlock = bellatrix.TrustedSignedBeaconBlock =
# The genesis block is implicitly trusted # The genesis block is implicitly trusted
@ -624,7 +624,7 @@ func get_attesting_indices*(
toSeq(get_attesting_indices_iter(state, data, aggregation_bits, cache)) toSeq(get_attesting_indices_iter(state, data, aggregation_bits, cache))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_attesting_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_attesting_indices
func get_attesting_indices*( func get_attesting_indices*(
state: ForkyBeaconState, data: AttestationData, state: ForkyBeaconState, data: AttestationData,
aggregation_bits: ElectraCommitteeValidatorsBits, committee_bits: auto, aggregation_bits: ElectraCommitteeValidatorsBits, committee_bits: auto,
@ -770,7 +770,7 @@ func check_attestation_index(
Result[CommitteeIndex, cstring] = Result[CommitteeIndex, cstring] =
check_attestation_index(data.index, committees_per_slot) check_attestation_index(data.index, committees_per_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices
func get_attestation_participation_flag_indices( func get_attestation_participation_flag_indices(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState, state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState,
data: AttestationData, inclusion_delay: uint64): set[TimelyFlag] = data: AttestationData, inclusion_delay: uint64): set[TimelyFlag] =
@ -1128,7 +1128,7 @@ proc process_attestation*(
ok(proposer_reward) ok(proposer_reward)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_next_sync_committee_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#get_next_sync_committee_indices
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-get_next_sync_committee_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#modified-get_next_sync_committee_indices
func get_next_sync_committee_keys( func get_next_sync_committee_keys(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
@ -1173,7 +1173,7 @@ func get_next_sync_committee_keys(
i += 1'u64 i += 1'u64
res res
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/beacon-chain.md#has_eth1_withdrawal_credential
func has_eth1_withdrawal_credential*(validator: Validator): bool = func has_eth1_withdrawal_credential*(validator: Validator): bool =
## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential. ## Check if ``validator`` has an 0x01 prefixed "eth1" withdrawal credential.
validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX validator.withdrawal_credentials.data[0] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
@ -1195,7 +1195,7 @@ func has_execution_withdrawal_credential*(validator: Validator): bool =
has_compounding_withdrawal_credential(validator) or has_compounding_withdrawal_credential(validator) or
has_eth1_withdrawal_credential(validator) has_eth1_withdrawal_credential(validator)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#is_fully_withdrawable_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/beacon-chain.md#is_fully_withdrawable_validator
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_fully_withdrawable_validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-is_fully_withdrawable_validator
func is_fully_withdrawable_validator( func is_fully_withdrawable_validator(
fork: static ConsensusFork, validator: Validator, balance: Gwei, fork: static ConsensusFork, validator: Validator, balance: Gwei,
@ -1277,21 +1277,60 @@ func get_pending_balance_to_withdraw*(
pending_balance pending_balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#effective-balances-updates
template effective_balance_might_update*(
balance: Gwei, effective_balance: Gwei): bool =
const
HYSTERESIS_INCREMENT =
EFFECTIVE_BALANCE_INCREMENT.Gwei div HYSTERESIS_QUOTIENT
DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
balance + DOWNWARD_THRESHOLD < effective_balance or
effective_balance + UPWARD_THRESHOLD < balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#effective-balances-updates
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#updated-process_effective_balance_updates
template get_effective_balance_update*(
consensusFork: static ConsensusFork, balance: Gwei,
effective_balance: Gwei, vidx: uint64): Gwei =
when consensusFork <= ConsensusFork.Deneb:
min(
balance - balance mod EFFECTIVE_BALANCE_INCREMENT.Gwei,
MAX_EFFECTIVE_BALANCE.Gwei)
else:
debugComment "amortize validator read access"
let effective_balance_limit =
if has_compounding_withdrawal_credential(state.validators.item(vidx)):
MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei
else:
MIN_ACTIVATION_BALANCE.Gwei
min(
balance - balance mod EFFECTIVE_BALANCE_INCREMENT.Gwei,
effective_balance_limit)
template get_updated_effective_balance*(
consensusFork: static ConsensusFork, balance: Gwei,
effective_balance: Gwei, vidx: uint64): Gwei =
if effective_balance_might_update(balance, effective_balance):
get_effective_balance_update(consensusFork, balance, effective_balance, vidx)
else:
balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#new-get_expected_withdrawals # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#new-get_expected_withdrawals
func get_expected_withdrawals*( template get_expected_withdrawals_aux*(
state: capella.BeaconState | deneb.BeaconState): seq[Withdrawal] = state: capella.BeaconState | deneb.BeaconState, epoch: Epoch,
fetch_balance: untyped): seq[Withdrawal] =
let let
epoch = get_current_epoch(state)
num_validators = lenu64(state.validators) num_validators = lenu64(state.validators)
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP) bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
var var
withdrawal_index = state.next_withdrawal_index withdrawal_index = state.next_withdrawal_index
validator_index = state.next_withdrawal_validator_index validator_index {.inject.} = state.next_withdrawal_validator_index
withdrawals: seq[Withdrawal] = @[] withdrawals: seq[Withdrawal] = @[]
for _ in 0 ..< bound: for _ in 0 ..< bound:
let let
validator = state.validators[validator_index] validator = state.validators[validator_index]
balance = state.balances[validator_index] balance = fetch_balance
if is_fully_withdrawable_validator( if is_fully_withdrawable_validator(
typeof(state).kind, validator, balance, epoch): typeof(state).kind, validator, balance, epoch):
var w = Withdrawal( var w = Withdrawal(
@ -1315,13 +1354,20 @@ func get_expected_withdrawals*(
validator_index = (validator_index + 1) mod num_validators validator_index = (validator_index + 1) mod num_validators
withdrawals withdrawals
func get_expected_withdrawals*(
state: capella.BeaconState | deneb.BeaconState): seq[Withdrawal] =
get_expected_withdrawals_aux(state, get_current_epoch(state)) do:
state.balances[validator_index]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-get_expected_withdrawals # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#updated-get_expected_withdrawals
# This partials count is used in exactly one place, while in general being able # This partials count is used in exactly one place, while in general being able
# to cleanly treat the results of get_expected_withdrawals as a seq[Withdrawal] # to cleanly treat the results of get_expected_withdrawals as a seq[Withdrawal]
# are valuable enough to make that the default version of this spec function. # are valuable enough to make that the default version of this spec function.
func get_expected_withdrawals_with_partial_count*(state: electra.BeaconState): template get_expected_withdrawals_with_partial_count_aux*(
state: electra.BeaconState, epoch: Epoch, fetch_balance: untyped):
(seq[Withdrawal], uint64) = (seq[Withdrawal], uint64) =
let epoch = get_current_epoch(state) doAssert epoch - get_current_epoch(state) in [0'u64, 1'u64]
var var
withdrawal_index = state.next_withdrawal_index withdrawal_index = state.next_withdrawal_index
withdrawals: seq[Withdrawal] = @[] withdrawals: seq[Withdrawal] = @[]
@ -1333,15 +1379,30 @@ func get_expected_withdrawals_with_partial_count*(state: electra.BeaconState):
break break
let let
validator = state.validators[withdrawal.index] validator = state.validators.item(withdrawal.index)
# Keep a uniform variable name available for injected code
validator_index {.inject.} = withdrawal.index
# Here, can't use the pre-stored effective balance because this template
# might be called on the next slot and therefore next epoch, after which
# the effective balance might have updated.
effective_balance_at_slot =
if epoch == get_current_epoch(state):
validator.effective_balance
else:
get_updated_effective_balance(
typeof(state).kind, fetch_balance, validator.effective_balance,
validator_index)
has_sufficient_effective_balance = has_sufficient_effective_balance =
validator.effective_balance >= static(MIN_ACTIVATION_BALANCE.Gwei) effective_balance_at_slot >= static(MIN_ACTIVATION_BALANCE.Gwei)
has_excess_balance = has_excess_balance = fetch_balance > static(MIN_ACTIVATION_BALANCE.Gwei)
state.balances[withdrawal.index] > static(MIN_ACTIVATION_BALANCE.Gwei)
if validator.exit_epoch == FAR_FUTURE_EPOCH and if validator.exit_epoch == FAR_FUTURE_EPOCH and
has_sufficient_effective_balance and has_excess_balance: has_sufficient_effective_balance and has_excess_balance:
let withdrawable_balance = min( let
state.balances[withdrawal.index] - static(MIN_ACTIVATION_BALANCE.Gwei), withdrawable_balance = min(
fetch_balance - static(MIN_ACTIVATION_BALANCE.Gwei),
withdrawal.amount) withdrawal.amount)
var w = Withdrawal( var w = Withdrawal(
index: withdrawal_index, index: withdrawal_index,
@ -1356,13 +1417,13 @@ func get_expected_withdrawals_with_partial_count*(state: electra.BeaconState):
let let
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP) bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
num_validators = lenu64(state.validators) num_validators = lenu64(state.validators)
var validator_index = state.next_withdrawal_validator_index var validator_index {.inject.} = state.next_withdrawal_validator_index
# Sweep for remaining. # Sweep for remaining.
for _ in 0 ..< bound: for _ in 0 ..< bound:
let let
validator = state.validators[validator_index] validator = state.validators.item(validator_index)
balance = state.balances[validator_index] balance = fetch_balance
if is_fully_withdrawable_validator( if is_fully_withdrawable_validator(
typeof(state).kind, validator, balance, epoch): typeof(state).kind, validator, balance, epoch):
var w = Withdrawal( var w = Withdrawal(
@ -1388,6 +1449,12 @@ func get_expected_withdrawals_with_partial_count*(state: electra.BeaconState):
(withdrawals, partial_withdrawals_count) (withdrawals, partial_withdrawals_count)
template get_expected_withdrawals_with_partial_count*(
state: electra.BeaconState): (seq[Withdrawal], uint64) =
get_expected_withdrawals_with_partial_count_aux(
state, get_current_epoch(state)) do:
state.balances.item(validator_index)
func get_expected_withdrawals*(state: electra.BeaconState): seq[Withdrawal] = func get_expected_withdrawals*(state: electra.BeaconState): seq[Withdrawal] =
get_expected_withdrawals_with_partial_count(state)[0] get_expected_withdrawals_with_partial_count(state)[0]
@ -1513,7 +1580,7 @@ proc initialize_hashed_beacon_state_from_eth1*(
cfg, eth1_block_hash, eth1_timestamp, deposits, flags)) cfg, eth1_block_hash, eth1_timestamp, deposits, flags))
result.root = hash_tree_root(result.data) result.root = hash_tree_root(result.data)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#testing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#testing # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/beacon-chain.md#testing
proc initialize_beacon_state_from_eth1*( proc initialize_beacon_state_from_eth1*(
@ -1866,7 +1933,7 @@ func upgrade_to_capella*(cfg: RuntimeConfig, pre: bellatrix.BeaconState):
# historical_summaries initialized to correct default automatically # historical_summaries initialized to correct default automatically
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/fork.md#upgrading-the-state # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/fork.md#upgrading-the-state
func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState): func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState):
ref deneb.BeaconState = ref deneb.BeaconState =
let let
@ -1951,7 +2018,7 @@ func upgrade_to_deneb*(cfg: RuntimeConfig, pre: capella.BeaconState):
historical_summaries: pre.historical_summaries historical_summaries: pre.historical_summaries
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/fork.md#upgrading-the-state # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/fork.md#upgrading-the-state
func upgrade_to_electra*( func upgrade_to_electra*(
cfg: RuntimeConfig, pre: deneb.BeaconState, cache: var StateCache): cfg: RuntimeConfig, pre: deneb.BeaconState, cache: var StateCache):
ref electra.BeaconState = ref electra.BeaconState =

View File

@ -245,14 +245,14 @@ proc blsVerify*(
# Guard against invalid signature blobs that fail to parse # Guard against invalid signature blobs that fail to parse
parsedSig.isSome() and blsVerify(pubkey, message, parsedSig.get()) parsedSig.isSome() and blsVerify(pubkey, message, parsedSig.get())
func blsVerify*(sigSet: SignatureSet): bool = func blsVerify*(sigset: SignatureSet): bool =
## Unbatched verification ## Unbatched verification
## of 1 SignatureSet ## of 1 SignatureSet
## tuple[pubkey: blscurve.PublicKey, message: array[32, byte], blscurve.signature: Signature] ## tuple[pubkey: blscurve.PublicKey, message: array[32, byte], blscurve.signature: Signature]
verify( verify(
sigSet.pubkey, sigset.pubkey,
sigSet.message, sigset.message,
sigSet.signature sigset.signature
) )
func blsSign*(privkey: ValidatorPrivKey, message: openArray[byte]): CookedSig = func blsSign*(privkey: ValidatorPrivKey, message: openArray[byte]): CookedSig =

View File

@ -51,7 +51,7 @@ const
PARTICIPATION_FLAG_WEIGHTS*: array[TimelyFlag, uint64] = PARTICIPATION_FLAG_WEIGHTS*: array[TimelyFlag, uint64] =
[uint64 TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT] [uint64 TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#misc # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#misc
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16 TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE* = 16
SYNC_COMMITTEE_SUBNET_COUNT* = 4 SYNC_COMMITTEE_SUBNET_COUNT* = 4
@ -78,7 +78,7 @@ static: doAssert TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT +
type type
### New types ### New types
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#synccommitteemessage # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#synccommitteemessage
SyncCommitteeMessage* = object SyncCommitteeMessage* = object
slot*: Slot slot*: Slot
## Slot to which this contribution pertains ## Slot to which this contribution pertains
@ -92,7 +92,7 @@ type
signature*: ValidatorSig signature*: ValidatorSig
## Signature by the validator over the block root of `slot` ## Signature by the validator over the block root of `slot`
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#synccommitteecontribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#synccommitteecontribution
SyncCommitteeAggregationBits* = SyncCommitteeAggregationBits* =
BitArray[SYNC_SUBCOMMITTEE_SIZE] BitArray[SYNC_SUBCOMMITTEE_SIZE]
@ -114,18 +114,18 @@ type
signature*: ValidatorSig signature*: ValidatorSig
## Signature by the validator(s) over the block root of `slot` ## Signature by the validator(s) over the block root of `slot`
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#contributionandproof # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#contributionandproof
ContributionAndProof* = object ContributionAndProof* = object
aggregator_index*: uint64 # `ValidatorIndex` after validation aggregator_index*: uint64 # `ValidatorIndex` after validation
contribution*: SyncCommitteeContribution contribution*: SyncCommitteeContribution
selection_proof*: ValidatorSig selection_proof*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#signedcontributionandproof # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#signedcontributionandproof
SignedContributionAndProof* = object SignedContributionAndProof* = object
message*: ContributionAndProof message*: ContributionAndProof
signature*: ValidatorSig signature*: ValidatorSig
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#syncaggregatorselectiondata # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#syncaggregatorselectiondata
SyncAggregatorSelectionData* = object SyncAggregatorSelectionData* = object
slot*: Slot slot*: Slot
subcommittee_index*: uint64 # `SyncSubcommitteeIndex` after validation subcommittee_index*: uint64 # `SyncSubcommitteeIndex` after validation

View File

@ -76,7 +76,7 @@ export
tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto, tables, results, endians2, json_serialization, sszTypes, beacon_time, crypto,
digest, presets, kzg4844 digest, presets, kzg4844
const SPEC_VERSION* = "1.5.0-alpha.3" const SPEC_VERSION* = "1.5.0-alpha.5"
## Spec version we're aiming to be compatible with, right now ## Spec version we're aiming to be compatible with, right now
const const
@ -312,7 +312,7 @@ type
HashedValidatorPubKey* = object HashedValidatorPubKey* = object
value*: ptr HashedValidatorPubKeyItem value*: ptr HashedValidatorPubKeyItem
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#validator # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#validator
Validator* = object Validator* = object
pubkeyData*{.serializedFieldName: "pubkey".}: HashedValidatorPubKey pubkeyData*{.serializedFieldName: "pubkey".}: HashedValidatorPubKey
@ -334,7 +334,7 @@ type
withdrawable_epoch*: Epoch withdrawable_epoch*: Epoch
## When validator can withdraw funds ## When validator can withdraw funds
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#pendingattestation # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#pendingattestation
PendingAttestation* = object PendingAttestation* = object
aggregation_bits*: CommitteeValidatorsBits aggregation_bits*: CommitteeValidatorsBits
data*: AttestationData data*: AttestationData
@ -343,7 +343,7 @@ type
proposer_index*: uint64 # `ValidatorIndex` after validation proposer_index*: uint64 # `ValidatorIndex` after validation
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#historicalbatch # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#historicalbatch
HistoricalBatch* = object HistoricalBatch* = object
block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] block_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] state_roots* : array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
@ -380,7 +380,7 @@ type
sync_committee_bits*: BitArray[SYNC_COMMITTEE_SIZE] sync_committee_bits*: BitArray[SYNC_COMMITTEE_SIZE]
sync_committee_signature*: TrustedSig sync_committee_signature*: TrustedSig
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#custom-types # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#custom-types
Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION] Transaction* = List[byte, Limit MAX_BYTES_PER_TRANSACTION]
ExecutionAddress* = object ExecutionAddress* = object
@ -440,7 +440,7 @@ type
block_summary_root*: Eth2Digest block_summary_root*: Eth2Digest
state_summary_root*: Eth2Digest state_summary_root*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/82133085a1295e93394ebdf71df8f2f6e0962588/specs/electra/beacon-chain.md#depositreceipt # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#pendingbalancedeposit
PendingBalanceDeposit* = object PendingBalanceDeposit* = object
index*: uint64 index*: uint64
amount*: Gwei amount*: Gwei
@ -472,7 +472,7 @@ type
pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey] pubkeys*: HashArray[Limit SYNC_COMMITTEE_SIZE, ValidatorPubKey]
aggregate_pubkey*: ValidatorPubKey aggregate_pubkey*: ValidatorPubKey
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#beaconblockheader # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#beaconblockheader
BeaconBlockHeader* = object BeaconBlockHeader* = object
slot*: Slot slot*: Slot
proposer_index*: uint64 # `ValidatorIndex` after validation proposer_index*: uint64 # `ValidatorIndex` after validation
@ -480,7 +480,7 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body_root*: Eth2Digest body_root*: Eth2Digest
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#signingdata # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#signingdata
SigningData* = object SigningData* = object
object_root*: Eth2Digest object_root*: Eth2Digest
domain*: Eth2Domain domain*: Eth2Domain
@ -509,7 +509,7 @@ type
sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache] sync_committees*: Table[SyncCommitteePeriod, SyncCommitteeCache]
# This matches the mutable state of the Solidity deposit contract # This matches the mutable state of the Solidity deposit contract
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/solidity_deposit_contract/deposit_contract.sol # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/solidity_deposit_contract/deposit_contract.sol
DepositContractState* = object DepositContractState* = object
branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest] branch*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest]
deposit_count*: array[32, byte] # Uint256 deposit_count*: array[32, byte] # Uint256

View File

@ -35,7 +35,7 @@ const
NEWPAYLOAD_TIMEOUT* = 8.seconds NEWPAYLOAD_TIMEOUT* = 8.seconds
type type
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#executionpayload # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#executionpayload
ExecutionPayload* = object ExecutionPayload* = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
@ -63,7 +63,7 @@ type
executionPayload*: ExecutionPayload executionPayload*: ExecutionPayload
blockValue*: Wei blockValue*: Wei
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#executionpayloadheader # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#executionpayloadheader
ExecutionPayloadHeader* = object ExecutionPayloadHeader* = object
# Execution block header fields # Execution block header fields
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
@ -93,7 +93,7 @@ type
parent_hash*: Eth2Digest parent_hash*: Eth2Digest
total_difficulty*: Eth2Digest # uint256 total_difficulty*: Eth2Digest # uint256
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconstate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#beaconstate
BeaconState* = object BeaconState* = object
# Versioning # Versioning
genesis_time*: uint64 genesis_time*: uint64
@ -218,7 +218,7 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body*: TrustedBeaconBlockBody body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#beaconblockbody # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object BeaconBlockBody* = object
randao_reveal*: ValidatorSig randao_reveal*: ValidatorSig
eth1_data*: Eth1Data eth1_data*: Eth1Data

View File

@ -32,7 +32,7 @@ const
# This index is rooted in `BeaconBlockBody`. # This index is rooted in `BeaconBlockBody`.
# The first member (`randao_reveal`) is 16, subsequent members +1 each. # The first member (`randao_reveal`) is 16, subsequent members +1 each.
# If there are ever more than 16 members in `BeaconBlockBody`, indices change! # If there are ever more than 16 members in `BeaconBlockBody`, indices change!
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/ssz/merkle-proofs.md # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/ssz/merkle-proofs.md
# execution_payload # execution_payload
EXECUTION_PAYLOAD_GINDEX* = 25.GeneralizedIndex EXECUTION_PAYLOAD_GINDEX* = 25.GeneralizedIndex
@ -96,7 +96,7 @@ type
ExecutionBranch* = ExecutionBranch* =
array[log2trunc(EXECUTION_PAYLOAD_GINDEX), Eth2Digest] array[log2trunc(EXECUTION_PAYLOAD_GINDEX), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/light-client/sync-protocol.md#modified-lightclientheader
LightClientHeader* = object LightClientHeader* = object
beacon*: BeaconBlockHeader beacon*: BeaconBlockHeader
## Beacon block header ## Beacon block header
@ -330,7 +330,7 @@ type
state_root*: Eth2Digest state_root*: Eth2Digest
body*: TrustedBeaconBlockBody body*: TrustedBeaconBlockBody
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/capella/beacon-chain.md#beaconblockbody # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/beacon-chain.md#beaconblockbody
BeaconBlockBody* = object BeaconBlockBody* = object
randao_reveal*: ValidatorSig randao_reveal*: ValidatorSig
eth1_data*: Eth1Data eth1_data*: Eth1Data
@ -658,7 +658,7 @@ func upgrade_lc_bootstrap_to_capella*(
current_sync_committee: pre.current_sync_committee, current_sync_committee: pre.current_sync_committee,
current_sync_committee_branch: pre.current_sync_committee_branch) current_sync_committee_branch: pre.current_sync_committee_branch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/light-client/fork.md#upgrading-light-client-data # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/light-client/fork.md#upgrading-light-client-data
func upgrade_lc_update_to_capella*( func upgrade_lc_update_to_capella*(
pre: altair.LightClientUpdate): LightClientUpdate = pre: altair.LightClientUpdate): LightClientUpdate =
LightClientUpdate( LightClientUpdate(

View File

@ -87,5 +87,5 @@ const
UNSET_DEPOSIT_REQUESTS_START_INDEX*: uint64 = not 0'u64 UNSET_DEPOSIT_REQUESTS_START_INDEX*: uint64 = not 0'u64
FULL_EXIT_REQUEST_AMOUNT*: uint64 = 0 FULL_EXIT_REQUEST_AMOUNT*: uint64 = 0
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#withdrawal-prefixes # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#withdrawal-prefixes
COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02 COMPOUNDING_WITHDRAWAL_PREFIX* = 0x02

View File

@ -77,7 +77,7 @@ type
kzg_commitment*: KzgCommitment kzg_commitment*: KzgCommitment
versioned_hash*: string # TODO should be string; VersionedHash not distinct versioned_hash*: string # TODO should be string; VersionedHash not distinct
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/p2p-interface.md#blobidentifier # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/p2p-interface.md#blobidentifier
BlobIdentifier* = object BlobIdentifier* = object
block_root*: Eth2Digest block_root*: Eth2Digest
index*: BlobIndex index*: BlobIndex
@ -168,7 +168,7 @@ type
## Current sync committee corresponding to `header.beacon.state_root` ## Current sync committee corresponding to `header.beacon.state_root`
current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch current_sync_committee_branch*: altair.CurrentSyncCommitteeBranch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientupdate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#lightclientupdate
LightClientUpdate* = object LightClientUpdate* = object
attested_header*: LightClientHeader attested_header*: LightClientHeader
## Header attested to by the sync committee ## Header attested to by the sync committee
@ -467,7 +467,7 @@ type
bls_to_execution_changes*: SignedBLSToExecutionChangeList bls_to_execution_changes*: SignedBLSToExecutionChangeList
blob_kzg_commitments*: KzgCommitments # [New in Deneb] blob_kzg_commitments*: KzgCommitments # [New in Deneb]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#signedbeaconblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#signedbeaconblock
SignedBeaconBlock* = object SignedBeaconBlock* = object
message*: BeaconBlock message*: BeaconBlock
signature*: ValidatorSig signature*: ValidatorSig
@ -627,7 +627,7 @@ func kzg_commitment_inclusion_proof_gindex*(
BLOB_KZG_COMMITMENTS_FIRST_GINDEX + index BLOB_KZG_COMMITMENTS_FIRST_GINDEX + index
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/light-client/sync-protocol.md#modified-get_lc_execution_root
func get_lc_execution_root*( func get_lc_execution_root*(
header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest = header: LightClientHeader, cfg: RuntimeConfig): Eth2Digest =
let epoch = header.beacon.slot.epoch let epoch = header.beacon.slot.epoch
@ -658,7 +658,7 @@ func get_lc_execution_root*(
ZERO_HASH ZERO_HASH
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/deneb/light-client/sync-protocol.md#modified-is_valid_light_client_header
func is_valid_light_client_header*( func is_valid_light_client_header*(
header: LightClientHeader, cfg: RuntimeConfig): bool = header: LightClientHeader, cfg: RuntimeConfig): bool =
let epoch = header.beacon.slot.epoch let epoch = header.beacon.slot.epoch

View File

@ -180,7 +180,7 @@ type
ExecutionBranch* = ExecutionBranch* =
array[log2trunc(EXECUTION_PAYLOAD_GINDEX_ELECTRA), Eth2Digest] array[log2trunc(EXECUTION_PAYLOAD_GINDEX_ELECTRA), Eth2Digest]
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#aggregateandproof # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#aggregateandproof
AggregateAndProof* = object AggregateAndProof* = object
aggregator_index*: uint64 # `ValidatorIndex` after validation aggregator_index*: uint64 # `ValidatorIndex` after validation
aggregate*: Attestation aggregate*: Attestation
@ -237,7 +237,7 @@ type
signature_slot*: Slot signature_slot*: Slot
## Slot at which the aggregate signature was created (untrusted) ## Slot at which the aggregate signature was created (untrusted)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#lightclientfinalityupdate
LightClientFinalityUpdate* = object LightClientFinalityUpdate* = object
# Header attested to by the sync committee # Header attested to by the sync committee
attested_header*: LightClientHeader attested_header*: LightClientHeader
@ -394,7 +394,7 @@ type
data*: BeaconState data*: BeaconState
root*: Eth2Digest # hash_tree_root(data) root*: Eth2Digest # hash_tree_root(data)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#beaconblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#beaconblock
BeaconBlock* = object BeaconBlock* = object
## For each slot, a proposer is chosen from the validator pool to propose ## For each slot, a proposer is chosen from the validator pool to propose
## a new block. Once the block as been proposed, it is transmitted to ## a new block. Once the block as been proposed, it is transmitted to
@ -857,7 +857,7 @@ func upgrade_lc_header_to_electra*(
transactions_root: pre.execution.transactions_root, transactions_root: pre.execution.transactions_root,
withdrawals_root: pre.execution.withdrawals_root, withdrawals_root: pre.execution.withdrawals_root,
blob_gas_used: pre.execution.blob_gas_used, blob_gas_used: pre.execution.blob_gas_used,
excess_blob_gas: pre.execution.blob_gas_used, excess_blob_gas: pre.execution.excess_blob_gas,
deposit_requests_root: ZERO_HASH, # [New in Electra:EIP6110] deposit_requests_root: ZERO_HASH, # [New in Electra:EIP6110]
withdrawal_requests_root: ZERO_HASH, # [New in Electra:EIP7002:EIP7251] withdrawal_requests_root: ZERO_HASH, # [New in Electra:EIP7002:EIP7251]
consolidation_requests_root: ZERO_HASH), # [New in Electra:EIP7251] consolidation_requests_root: ZERO_HASH), # [New in Electra:EIP7251]

View File

@ -601,7 +601,7 @@ type
extra_data*: Option[RestNodeExtraData] extra_data*: Option[RestNodeExtraData]
RestExtraData* = object RestExtraData* = object
version*: Option[string] discard
GetForkChoiceResponse* = object GetForkChoiceResponse* = object
justified_checkpoint*: Checkpoint justified_checkpoint*: Checkpoint

View File

@ -169,20 +169,6 @@ type
of LightClientDataFork.Electra: of LightClientDataFork.Electra:
electraData*: electra.LightClientStore electraData*: electra.LightClientStore
func lcDataForkAtEpoch*(
cfg: RuntimeConfig, epoch: Epoch): LightClientDataFork =
static: doAssert LightClientDataFork.high == LightClientDataFork.Electra
if epoch >= cfg.ELECTRA_FORK_EPOCH:
LightClientDataFork.Electra
elif epoch >= cfg.DENEB_FORK_EPOCH:
LightClientDataFork.Deneb
elif epoch >= cfg.CAPELLA_FORK_EPOCH:
LightClientDataFork.Capella
elif epoch >= cfg.ALTAIR_FORK_EPOCH:
LightClientDataFork.Altair
else:
LightClientDataFork.None
template kind*( template kind*(
# `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095 # `SomeLightClientObject`: https://github.com/nim-lang/Nim/issues/18095
x: typedesc[ x: typedesc[

View File

@ -25,7 +25,7 @@ import
export export
eth2_merkleization, forks, rlp, ssz_codec eth2_merkleization, forks, rlp, ssz_codec
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#constants # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#constants
const ETH_TO_GWEI = 1_000_000_000.Gwei const ETH_TO_GWEI = 1_000_000_000.Gwei
func toEther*(gwei: Gwei): Ether = func toEther*(gwei: Gwei): Ether =
@ -162,7 +162,7 @@ func compute_domain*(
result[0..3] = domain_type.data result[0..3] = domain_type.data
result[4..31] = fork_data_root.data.toOpenArray(0, 27) result[4..31] = fork_data_root.data.toOpenArray(0, 27)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_domain # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_domain
func get_domain*( func get_domain*(
fork: Fork, fork: Fork,
domain_type: DomainType, domain_type: DomainType,
@ -264,7 +264,7 @@ func create_blob_sidecars*(
res.add(sidecar) res.add(sidecar)
res res
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/sync-protocol.md#is_sync_committee_update # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#is_sync_committee_update
template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool = template is_sync_committee_update*(update: SomeForkyLightClientUpdate): bool =
when update is SomeForkyLightClientUpdateWithSyncCommittee: when update is SomeForkyLightClientUpdateWithSyncCommittee:
update.next_sync_committee_branch != update.next_sync_committee_branch !=
@ -393,7 +393,7 @@ func contextEpoch*(bootstrap: ForkyLightClientBootstrap): Epoch =
func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch = func contextEpoch*(update: SomeForkyLightClientUpdate): Epoch =
update.attested_header.beacon.slot.epoch update.attested_header.beacon.slot.epoch
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_merge_transition_complete # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#is_merge_transition_complete
func is_merge_transition_complete*( func is_merge_transition_complete*(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
electra.BeaconState): bool = electra.BeaconState): bool =
@ -401,7 +401,7 @@ func is_merge_transition_complete*(
default(typeof(state.latest_execution_payload_header)) default(typeof(state.latest_execution_payload_header))
state.latest_execution_payload_header != defaultExecutionPayloadHeader state.latest_execution_payload_header != defaultExecutionPayloadHeader
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/sync/optimistic.md#helpers # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/sync/optimistic.md#helpers
func is_execution_block*(blck: SomeForkyBeaconBlock): bool = func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
when typeof(blck).kind >= ConsensusFork.Bellatrix: when typeof(blck).kind >= ConsensusFork.Bellatrix:
const defaultExecutionPayload = const defaultExecutionPayload =
@ -410,7 +410,7 @@ func is_execution_block*(blck: SomeForkyBeaconBlock): bool =
else: else:
false false
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_merge_transition_block # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#is_merge_transition_block
func is_merge_transition_block( func is_merge_transition_block(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
electra.BeaconState, electra.BeaconState,
@ -426,7 +426,7 @@ func is_merge_transition_block(
not is_merge_transition_complete(state) and not is_merge_transition_complete(state) and
body.execution_payload != defaultExecutionPayload body.execution_payload != defaultExecutionPayload
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#is_execution_enabled # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#is_execution_enabled
func is_execution_enabled*( func is_execution_enabled*(
state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState | state: bellatrix.BeaconState | capella.BeaconState | deneb.BeaconState |
electra.BeaconState, electra.BeaconState,
@ -440,7 +440,7 @@ func is_execution_enabled*(
electra.SigVerifiedBeaconBlockBody): bool = electra.SigVerifiedBeaconBlockBody): bool =
is_merge_transition_block(state, body) or is_merge_transition_complete(state) is_merge_transition_block(state, body) or is_merge_transition_complete(state)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#compute_timestamp_at_slot
func compute_timestamp_at_slot*(state: ForkyBeaconState, slot: Slot): uint64 = func compute_timestamp_at_slot*(state: ForkyBeaconState, slot: Slot): uint64 =
# Note: This function is unsafe with respect to overflows and underflows. # Note: This function is unsafe with respect to overflows and underflows.
let slots_since_genesis = slot - GENESIS_SLOT let slots_since_genesis = slot - GENESIS_SLOT

View File

@ -0,0 +1,46 @@
# beacon_chain
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import
std/typetraits,
eth/common/eth_types_rlp,
"."/[helpers, state_transition_block]
func readExecutionTransaction(
txBytes: bellatrix.Transaction): Result[ExecutionTransaction, string] =
# Nim 2.0.8: `rlp.decode(distinctBase(txBytes), ExecutionTransaction)`
# uses the generic `read` from `rlp.nim` instead of the specific `read`
# from `eth_types_rlp.nim`, leading to compilation error.
# Doing this in two steps works around this resolution order issue.
var rlp = rlpFromBytes(distinctBase(txBytes))
try:
ok rlp.read(ExecutionTransaction)
except RlpError as exc:
err("Invalid transaction: " & exc.msg)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.4/specs/deneb/beacon-chain.md#is_valid_versioned_hashes
func is_valid_versioned_hashes*(blck: ForkyBeaconBlock): Result[void, string] =
static: doAssert typeof(blck).kind >= ConsensusFork.Deneb
template transactions: untyped = blck.body.execution_payload.transactions
template commitments: untyped = blck.body.blob_kzg_commitments
var i = 0
for txBytes in transactions:
if txBytes.len == 0 or txBytes[0] != TxEip4844.byte:
continue # Only blob transactions may have blobs
let tx = ? txBytes.readExecutionTransaction()
for vHash in tx.versionedHashes:
if commitments.len <= i:
return err("Extra blobs without matching `blob_kzg_commitments`")
if vHash.data != kzg_commitment_to_versioned_hash(commitments[i]):
return err("Invalid `blob_versioned_hash` at index " & $i)
inc i
if i != commitments.len:
return err("Extra `blob_kzg_commitments` without matching blobs")
ok()

View File

@ -1380,13 +1380,13 @@ proc createWallet*(kdfKind: KdfKind,
crypto: crypto, crypto: crypto,
nextAccount: nextAccount.get(0)) nextAccount: nextAccount.get(0))
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#bls_withdrawal_prefix # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#bls_withdrawal_prefix
func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest = func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
var bytes = eth2digest(k.toRaw()) var bytes = eth2digest(k.toRaw())
bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8 bytes.data[0] = BLS_WITHDRAWAL_PREFIX.uint8
bytes bytes
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/deposit-contract.md#withdrawal-credentials # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/deposit-contract.md#withdrawal-credentials
func makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest = func makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest =
makeWithdrawalCredentials(k.toPubKey()) makeWithdrawalCredentials(k.toPubKey())

View File

@ -14,8 +14,8 @@ import
export base export base
const const
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#topics-and-messages # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#topics-and-messages
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/p2p-interface.md#topics-and-messages
topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy" topicBeaconBlocksSuffix* = "beacon_block/ssz_snappy"
topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy" topicVoluntaryExitsSuffix* = "voluntary_exit/ssz_snappy"
topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy" topicProposerSlashingsSuffix* = "proposer_slashing/ssz_snappy"
@ -27,7 +27,7 @@ const
# The spec now includes this as a bare uint64 as `RESP_TIMEOUT` # The spec now includes this as a bare uint64 as `RESP_TIMEOUT`
RESP_TIMEOUT_DUR* = RESP_TIMEOUT.int64.seconds RESP_TIMEOUT_DUR* = RESP_TIMEOUT.int64.seconds
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#configuration
MAX_REQUEST_LIGHT_CLIENT_UPDATES* = 128 MAX_REQUEST_LIGHT_CLIENT_UPDATES* = 128
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#configuration
@ -63,7 +63,7 @@ func getAttesterSlashingsTopic*(forkDigest: ForkDigest): string =
func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string = func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix eth2Prefix(forkDigest) & topicAggregateAndProofsSuffix
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/p2p-interface.md#topics-and-messages # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/capella/p2p-interface.md#topics-and-messages
func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string = func getBlsToExecutionChangeTopic*(forkDigest: ForkDigest): string =
eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix eth2Prefix(forkDigest) & topicBlsToExecutionChangeSuffix
@ -197,7 +197,7 @@ func getTargetGossipState*(
targetForks targetForks
func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] = func nearSyncCommitteePeriod*(epoch: Epoch): Opt[uint64] =
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#sync-committee-subnet-stability # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#sync-committee-subnet-stability
if epoch.is_sync_committee_period(): if epoch.is_sync_committee_period():
return Opt.some 0'u64 return Opt.some 0'u64
let epochsBefore = let epochsBefore =
@ -216,7 +216,7 @@ func getSyncSubnets*(
if not nodeHasPubkey(pubkey): if not nodeHasPubkey(pubkey):
continue continue
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#broadcast-sync-committee-message
# The first quarter of the pubkeys map to subnet 0, the second quarter to # The first quarter of the pubkeys map to subnet 0, the second quarter to
# subnet 1, the third quarter to subnet 2 and the final quarter to subnet # subnet 1, the third quarter to subnet 2 and the final quarter to subnet
# 3. # 3.

View File

@ -787,7 +787,7 @@ proc readRuntimeConfig*(
"MAX_REQUEST_BLOB_SIDECARS" "MAX_REQUEST_BLOB_SIDECARS"
checkCompatibility BLOB_SIDECAR_SUBNET_COUNT checkCompatibility BLOB_SIDECAR_SUBNET_COUNT
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/fork-choice.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/fork-choice.md#configuration
# Isn't being used as a preset in the usual way: at any time, there's one correct value # Isn't being used as a preset in the usual way: at any time, there's one correct value
checkCompatibility PROPOSER_SCORE_BOOST checkCompatibility PROPOSER_SCORE_BOOST
checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD checkCompatibility REORG_HEAD_WEIGHT_THRESHOLD

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Gnosis preset - Electra (Gnosis version not avilable yet; EF mainnet for now) # Gnosis preset - Electra (Gnosis version not avilable yet; EF mainnet for now)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/electra.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/electra.yaml
const const
# Gwei values # Gwei values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Altair # Mainnet preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/altair.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/altair.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Bellatrix # Mainnet preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/bellatrix.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/bellatrix.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Mainnet preset - Capella # Mainnet preset - Capella
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/capella.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/capella.yaml
const const
# Max operations per block # Max operations per block
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Electra preset - Electra # Electra preset - Electra
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/mainnet/electra.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/mainnet/electra.yaml
const const
# Gwei values # Gwei values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Altair # Minimal preset - Altair
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/altair.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/minimal/altair.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Bellatrix # Minimal preset - Bellatrix
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/bellatrix.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/minimal/bellatrix.yaml
const const
# Updated penalty values # Updated penalty values
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -8,7 +8,7 @@
{.push raises: [].} {.push raises: [].}
# Minimal preset - Capella # Minimal preset - Capella
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/presets/minimal/capella.yaml # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/presets/minimal/capella.yaml
const const
# Max operations per block # Max operations per block
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -143,7 +143,7 @@ func compute_attestation_signing_root*(
fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root) fork, DOMAIN_BEACON_ATTESTER, epoch, genesis_validators_root)
compute_signing_root(attestation_data, domain) compute_signing_root(attestation_data, domain)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#aggregate-signature # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#aggregate-signature
func get_attestation_signature*( func get_attestation_signature*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
attestation_data: AttestationData, attestation_data: AttestationData,
@ -269,7 +269,7 @@ proc verify_voluntary_exit_signature*(
blsVerify(pubkey, signing_root.data, signature) blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#prepare-sync-committee-message
func compute_sync_committee_message_signing_root*( func compute_sync_committee_message_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest = slot: Slot, beacon_block_root: Eth2Digest): Eth2Digest =
@ -304,7 +304,7 @@ proc verify_sync_committee_signature*(
blsFastAggregateVerify(pubkeys, signing_root.data, signature) blsFastAggregateVerify(pubkeys, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#aggregation-selection
func compute_sync_committee_selection_proof_signing_root*( func compute_sync_committee_selection_proof_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest = slot: Slot, subcommittee_index: SyncSubcommitteeIndex): Eth2Digest =
@ -335,7 +335,7 @@ proc verify_sync_committee_selection_proof*(
blsVerify(pubkey, signing_root.data, signature) blsVerify(pubkey, signing_root.data, signature)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#signature # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#signature
func compute_contribution_and_proof_signing_root*( func compute_contribution_and_proof_signing_root*(
fork: Fork, genesis_validators_root: Eth2Digest, fork: Fork, genesis_validators_root: Eth2Digest,
msg: ContributionAndProof): Eth2Digest = msg: ContributionAndProof): Eth2Digest =
@ -353,7 +353,7 @@ proc get_contribution_and_proof_signature*(
blsSign(privkey, signing_root.data) blsSign(privkey, signing_root.data)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#aggregation-selection
func is_sync_committee_aggregator*(signature: ValidatorSig): bool = func is_sync_committee_aggregator*(signature: ValidatorSig): bool =
let let
signatureDigest = eth2digest(signature.blob) signatureDigest = eth2digest(signature.blob)

View File

@ -83,7 +83,7 @@ func aggregateAttesters(
# Aggregation spec requires non-empty collection # Aggregation spec requires non-empty collection
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04 # - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
# Consensus specs require at least one attesting index in attestation # Consensus specs require at least one attesting index in attestation
# - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#is_valid_indexed_attestation # - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
return err("aggregateAttesters: no attesting indices") return err("aggregateAttesters: no attesting indices")
let let

View File

@ -365,7 +365,7 @@ func partialBeaconBlock*(
): auto = ): auto =
const consensusFork = typeof(state).kind const consensusFork = typeof(state).kind
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#preparing-for-a-beaconblock # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#preparing-for-a-beaconblock
var res = consensusFork.BeaconBlock( var res = consensusFork.BeaconBlock(
slot: state.data.slot, slot: state.data.slot,
proposer_index: proposer_index.uint64, proposer_index: proposer_index.uint64,

View File

@ -10,7 +10,7 @@
# State transition - block processing, as described in # State transition - block processing, as described in
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/capella/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#block-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#block-processing
@ -82,7 +82,7 @@ func `xor`[T: array](a, b: T): T =
for i in 0..<result.len: for i in 0..<result.len:
result[i] = a[i] xor b[i] result[i] = a[i] xor b[i]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#randao # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#randao
proc process_randao( proc process_randao(
state: var ForkyBeaconState, body: SomeForkyBeaconBlockBody, state: var ForkyBeaconState, body: SomeForkyBeaconBlockBody,
flags: UpdateFlags, cache: var StateCache): Result[void, cstring] = flags: UpdateFlags, cache: var StateCache): Result[void, cstring] =
@ -135,7 +135,7 @@ func is_slashable_validator(validator: Validator, epoch: Epoch): bool =
(validator.activation_epoch <= epoch) and (validator.activation_epoch <= epoch) and
(epoch < validator.withdrawable_epoch) (epoch < validator.withdrawable_epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#proposer-slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#proposer-slashings
proc check_proposer_slashing*( proc check_proposer_slashing*(
state: ForkyBeaconState, proposer_slashing: SomeProposerSlashing, state: ForkyBeaconState, proposer_slashing: SomeProposerSlashing,
flags: UpdateFlags): flags: UpdateFlags):
@ -275,48 +275,20 @@ proc process_attester_slashing*(
ok((proposer_reward, cur_exit_queue_info)) ok((proposer_reward, cur_exit_queue_info))
func findValidatorIndex*(state: ForkyBeaconState, pubkey: ValidatorPubKey): from ".."/validator_bucket_sort import
Opt[ValidatorIndex] = BucketSortedValidators, add, findValidatorIndex, sortValidatorBuckets
# This linear scan is unfortunate, but should be fairly fast as we do a simple
# byte comparison of the key. The alternative would be to build a Table, but
# given that each block can hold no more than 16 deposits, it's slower to
# build the table and use it for lookups than to scan it like this.
# Once we have a reusable, long-lived cache, this should be revisited
#
# For deposit processing purposes, two broad cases exist, either
#
# (a) someone has deposited all 32 required ETH as a single transaction,
# in which case the index doesn't yet exist so the search order does
# not matter so long as it's generally in an order memory controller
# prefetching can predict; or
#
# (b) the deposit has been split into multiple parts, typically not far
# apart from each other, such that on average one would expect this
# validator index to be nearer the maximal than minimal index.
#
# countdown() infinite-loops if the lower bound with uint32 is 0, so
# shift indices by 1, which avoids triggering unsigned wraparound.
for vidx in countdown(state.validators.len.uint32, 1):
if state.validators.asSeq[vidx - 1].pubkey == pubkey:
return Opt[ValidatorIndex].ok((vidx - 1).ValidatorIndex)
from ".."/bloomfilter import
PubkeyBloomFilter, constructBloomFilter, incl, mightContain
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/phase0/beacon-chain.md#deposits # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/phase0/beacon-chain.md#deposits
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated--apply_deposit # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated--apply_deposit
proc apply_deposit( proc apply_deposit(
cfg: RuntimeConfig, state: var ForkyBeaconState, cfg: RuntimeConfig, state: var ForkyBeaconState,
bloom_filter: var PubkeyBloomFilter, deposit_data: DepositData, bucketSortedValidators: var BucketSortedValidators,
flags: UpdateFlags): Result[void, cstring] = deposit_data: DepositData, flags: UpdateFlags): Result[void, cstring] =
let let
pubkey = deposit_data.pubkey pubkey = deposit_data.pubkey
amount = deposit_data.amount amount = deposit_data.amount
index = index = findValidatorIndex(
if bloom_filter.mightContain(pubkey): state.validators.asSeq, bucketSortedValidators, pubkey)
findValidatorIndex(state, pubkey)
else:
Opt.none(ValidatorIndex)
if index.isSome(): if index.isSome():
# Increase balance by deposit amount # Increase balance by deposit amount
@ -358,14 +330,15 @@ proc apply_deposit(
return err("apply_deposit: too many validators (current_epoch_participation)") return err("apply_deposit: too many validators (current_epoch_participation)")
if not state.inactivity_scores.add(0'u64): if not state.inactivity_scores.add(0'u64):
return err("apply_deposit: too many validators (inactivity_scores)") return err("apply_deposit: too many validators (inactivity_scores)")
let new_vidx = state.validators.lenu64 - 1
when typeof(state).kind >= ConsensusFork.Electra: when typeof(state).kind >= ConsensusFork.Electra:
debugComment "check hashlist add return" debugComment "check hashlist add return"
# [New in Electra:EIP7251] # [New in Electra:EIP7251]
discard state.pending_balance_deposits.add PendingBalanceDeposit( discard state.pending_balance_deposits.add PendingBalanceDeposit(
index: state.validators.lenu64 - 1, amount: amount) index: new_vidx, amount: amount)
doAssert state.validators.len == state.balances.len doAssert state.validators.len == state.balances.len
bloom_filter.incl pubkey bucketSortedValidators.add new_vidx.ValidatorIndex
else: else:
# Deposits may come with invalid signatures - in that case, they are not # Deposits may come with invalid signatures - in that case, they are not
# turned into a validator but still get processed to keep the deposit # turned into a validator but still get processed to keep the deposit
@ -375,10 +348,11 @@ proc apply_deposit(
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/phase0/beacon-chain.md#deposits # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#deposits
proc process_deposit*( proc process_deposit*(
cfg: RuntimeConfig, state: var ForkyBeaconState, cfg: RuntimeConfig, state: var ForkyBeaconState,
bloom_filter: var PubkeyBloomFilter, deposit: Deposit, flags: UpdateFlags): bucketSortedValidators: var BucketSortedValidators,
deposit: Deposit, flags: UpdateFlags):
Result[void, cstring] = Result[void, cstring] =
## Process an Eth1 deposit, registering a validator or increasing its balance. ## Process an Eth1 deposit, registering a validator or increasing its balance.
@ -395,12 +369,13 @@ proc process_deposit*(
# Deposits must be processed in order # Deposits must be processed in order
state.eth1_deposit_index += 1 state.eth1_deposit_index += 1
apply_deposit(cfg, state, bloom_filter, deposit.data, flags) apply_deposit(cfg, state, bucketSortedValidators, deposit.data, flags)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_deposit_request # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#new-process_deposit_request
func process_deposit_request*( func process_deposit_request*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
bloom_filter: var PubkeyBloomFilter, deposit_request: DepositRequest, bucketSortedValidators: var BucketSortedValidators,
deposit_request: DepositRequest,
flags: UpdateFlags): Result[void, cstring] = flags: UpdateFlags): Result[void, cstring] =
# Set deposit request start index # Set deposit request start index
if state.deposit_requests_start_index == if state.deposit_requests_start_index ==
@ -408,7 +383,7 @@ func process_deposit_request*(
state.deposit_requests_start_index = deposit_request.index state.deposit_requests_start_index = deposit_request.index
apply_deposit( apply_deposit(
cfg, state, bloom_filter, DepositData( cfg, state, bucketSortedValidators, DepositData(
pubkey: deposit_request.pubkey, pubkey: deposit_request.pubkey,
withdrawal_credentials: deposit_request.withdrawal_credentials, withdrawal_credentials: deposit_request.withdrawal_credentials,
amount: deposit_request.amount, amount: deposit_request.amount,
@ -510,6 +485,7 @@ proc process_bls_to_execution_change*(
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_withdrawal_request # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_withdrawal_request
func process_withdrawal_request*( func process_withdrawal_request*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
bucketSortedValidators: BucketSortedValidators,
withdrawal_request: WithdrawalRequest, cache: var StateCache) = withdrawal_request: WithdrawalRequest, cache: var StateCache) =
let let
amount = withdrawal_request.amount amount = withdrawal_request.amount
@ -523,7 +499,9 @@ func process_withdrawal_request*(
let let
request_pubkey = withdrawal_request.validator_pubkey request_pubkey = withdrawal_request.validator_pubkey
# Verify pubkey exists # Verify pubkey exists
index = findValidatorIndex(state, request_pubkey).valueOr: index = findValidatorIndex(
state.validators.asSeq, bucketSortedValidators,
request_pubkey).valueOr:
return return
validator = state.validators.item(index) validator = state.validators.item(index)
@ -591,6 +569,7 @@ func process_withdrawal_request*(
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_consolidation_request # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/electra/beacon-chain.md#new-process_consolidation_request
proc process_consolidation_request*( proc process_consolidation_request*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
bucketSortedValidators: BucketSortedValidators,
consolidation_request: ConsolidationRequest, consolidation_request: ConsolidationRequest,
cache: var StateCache) = cache: var StateCache) =
# If the pending consolidations queue is full, consolidation requests are # If the pending consolidations queue is full, consolidation requests are
@ -606,11 +585,14 @@ proc process_consolidation_request*(
let let
# Verify pubkeys exists # Verify pubkeys exists
source_index = source_index = findValidatorIndex(
findValidatorIndex(state, consolidation_request.source_pubkey).valueOr: state.validators.asSeq, bucketSortedValidators,
consolidation_request.source_pubkey).valueOr:
return return
target_index = target_index =
findValidatorIndex(state, consolidation_request.target_pubkey).valueOr: findValidatorIndex(
state.validators.asSeq, bucketSortedValidators,
consolidation_request.target_pubkey).valueOr:
return return
# Verify that source != target, so a consolidation cannot be used as an exit. # Verify that source != target, so a consolidation cannot be used as an exit.
@ -698,12 +680,26 @@ proc process_operations(
# It costs a full validator set scan to construct these values; only do so if # It costs a full validator set scan to construct these values; only do so if
# there will be some kind of exit. # there will be some kind of exit.
var exit_queue_info = # TODO Electra doesn't use exit_queue_info, don't calculate
var
exit_queue_info =
if body.proposer_slashings.len + body.attester_slashings.len + if body.proposer_slashings.len + body.attester_slashings.len +
body.voluntary_exits.len > 0: body.voluntary_exits.len > 0:
get_state_exit_queue_info(state) get_state_exit_queue_info(state)
else: else:
default(ExitQueueInfo) # not used default(ExitQueueInfo) # not used
bsv_use =
when typeof(body).kind >= ConsensusFork.Electra:
body.deposits.len + body.execution_payload.deposit_requests.len +
body.execution_payload.withdrawal_requests.len +
body.execution_payload.consolidation_requests.len > 0
else:
body.deposits.len > 0
bsv =
if bsv_use:
sortValidatorBuckets(state.validators.asSeq)
else:
nil # this is a logic error, effectively assert
for op in body.proposer_slashings: for op in body.proposer_slashings:
let (proposer_slashing_reward, new_exit_queue_info) = let (proposer_slashing_reward, new_exit_queue_info) =
@ -718,10 +714,8 @@ proc process_operations(
for op in body.attestations: for op in body.attestations:
operations_rewards.attestations += operations_rewards.attestations +=
? process_attestation(state, op, flags, base_reward_per_increment, cache) ? process_attestation(state, op, flags, base_reward_per_increment, cache)
if body.deposits.len > 0:
let bloom_filter = constructBloomFilter(state.validators.asSeq)
for op in body.deposits: for op in body.deposits:
? process_deposit(cfg, state, bloom_filter[], op, flags) ? process_deposit(cfg, state, bsv[], op, flags)
for op in body.voluntary_exits: for op in body.voluntary_exits:
exit_queue_info = ? process_voluntary_exit( exit_queue_info = ? process_voluntary_exit(
cfg, state, op, flags, exit_queue_info, cache) cfg, state, op, flags, exit_queue_info, cache)
@ -731,15 +725,13 @@ proc process_operations(
when typeof(body).kind >= ConsensusFork.Electra: when typeof(body).kind >= ConsensusFork.Electra:
for op in body.execution_payload.deposit_requests: for op in body.execution_payload.deposit_requests:
debugComment "combine with previous Bloom filter construction" ? process_deposit_request(cfg, state, bsv[], op, {})
let bloom_filter = constructBloomFilter(state.validators.asSeq)
? process_deposit_request(cfg, state, bloom_filter[], op, {})
for op in body.execution_payload.withdrawal_requests: for op in body.execution_payload.withdrawal_requests:
# [New in Electra:EIP7002:7251] # [New in Electra:EIP7002:7251]
process_withdrawal_request(cfg, state, op, cache) process_withdrawal_request(cfg, state, bsv[], op, cache)
for op in body.execution_payload.consolidation_requests: for op in body.execution_payload.consolidation_requests:
# [New in Electra:EIP7251] # [New in Electra:EIP7251]
process_consolidation_request(cfg, state, op, cache) process_consolidation_request(cfg, state, bsv[], op, cache)
ok(operations_rewards) ok(operations_rewards)
@ -756,11 +748,11 @@ func get_participant_reward*(total_active_balance: Gwei): Gwei =
WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH WEIGHT_DENOMINATOR div SLOTS_PER_EPOCH
max_participant_rewards div SYNC_COMMITTEE_SIZE max_participant_rewards div SYNC_COMMITTEE_SIZE
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#sync-aggregate-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#sync-aggregate-processing
func get_proposer_reward*(participant_reward: Gwei): Gwei = func get_proposer_reward*(participant_reward: Gwei): Gwei =
participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) participant_reward * PROPOSER_WEIGHT div (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#sync-aggregate-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#sync-aggregate-processing
proc process_sync_aggregate*( proc process_sync_aggregate*(
state: var (altair.BeaconState | bellatrix.BeaconState | state: var (altair.BeaconState | bellatrix.BeaconState |
capella.BeaconState | deneb.BeaconState | electra.BeaconState), capella.BeaconState | deneb.BeaconState | electra.BeaconState),
@ -1117,7 +1109,7 @@ proc process_block*(
ok(? process_operations(cfg, state, blck.body, 0.Gwei, flags, cache)) ok(? process_operations(cfg, state, blck.body, 0.Gwei, flags, cache))
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#block-processing
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095 # TODO workaround for https://github.com/nim-lang/Nim/issues/18095
# copy of datatypes/altair.nim # copy of datatypes/altair.nim
type SomeAltairBlock = type SomeAltairBlock =
@ -1146,7 +1138,7 @@ proc process_block*(
ok(operations_rewards) ok(operations_rewards)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#block-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#block-processing
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095 # TODO workaround for https://github.com/nim-lang/Nim/issues/18095
type SomeBellatrixBlock = type SomeBellatrixBlock =
bellatrix.BeaconBlock | bellatrix.SigVerifiedBeaconBlock | bellatrix.TrustedBeaconBlock bellatrix.BeaconBlock | bellatrix.SigVerifiedBeaconBlock | bellatrix.TrustedBeaconBlock

View File

@ -10,7 +10,7 @@
# State transition - epoch processing, as described in # State transition - epoch processing, as described in
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#epoch-processing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/beacon-chain.md#epoch-processing
# #
# The entry point is `process_epoch`, which is at the bottom of this file. # The entry point is `process_epoch`, which is at the bottom of this file.
@ -535,7 +535,7 @@ func get_attestation_component_delta(
else: else:
RewardDelta(penalties: base_reward) RewardDelta(penalties: base_reward)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#components-of-attestation-deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#components-of-attestation-deltas
func get_source_delta*( func get_source_delta*(
validator: RewardStatus, validator: RewardStatus,
base_reward: Gwei, base_reward: Gwei,
@ -694,14 +694,14 @@ func get_unslashed_participating_increment*(
flag_index: TimelyFlag): uint64 = flag_index: TimelyFlag): uint64 =
info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT.Gwei info.balances.previous_epoch[flag_index] div EFFECTIVE_BALANCE_INCREMENT.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#get_flag_index_deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#get_flag_index_deltas
func get_active_increments*( func get_active_increments*(
info: altair.EpochInfo | bellatrix.BeaconState): uint64 = info: altair.EpochInfo | bellatrix.BeaconState): uint64 =
info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT.Gwei info.balances.current_epoch div EFFECTIVE_BALANCE_INCREMENT.Gwei
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#get_flag_index_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#modified-get_inactivity_penalty_deltas
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#modified-get_inactivity_penalty_deltas # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#modified-get_inactivity_penalty_deltas
# Combines get_flag_index_deltas() and get_inactivity_penalty_deltas() # Combines get_flag_index_deltas() and get_inactivity_penalty_deltas()
template get_flag_and_inactivity_delta( template get_flag_and_inactivity_delta(
state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState | state: altair.BeaconState | bellatrix.BeaconState | capella.BeaconState |
@ -843,7 +843,7 @@ func get_flag_and_inactivity_delta_for_validator(
active_increments, penalty_denominator, epoch_participation, active_increments, penalty_denominator, epoch_participation,
participating_increments, info, vidx, inactivity_score.uint64) participating_increments, info, vidx, inactivity_score.uint64)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#rewards-and-penalties-1 # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#rewards-and-penalties-1
func process_rewards_and_penalties*( func process_rewards_and_penalties*(
state: var phase0.BeaconState, info: var phase0.EpochInfo) = state: var phase0.BeaconState, info: var phase0.EpochInfo) =
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are # No rewards are applied at the end of `GENESIS_EPOCH` because rewards are
@ -866,7 +866,7 @@ func process_rewards_and_penalties*(
decrease_balance(balance, v.delta.penalties) decrease_balance(balance, v.delta.penalties)
state.balances.asSeq()[idx] = balance state.balances.asSeq()[idx] = balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/altair/beacon-chain.md#rewards-and-penalties # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#rewards-and-penalties
func process_rewards_and_penalties*( func process_rewards_and_penalties*(
cfg: RuntimeConfig, cfg: RuntimeConfig,
state: var (altair.BeaconState | bellatrix.BeaconState | state: var (altair.BeaconState | bellatrix.BeaconState |
@ -902,7 +902,7 @@ func process_rewards_and_penalties*(
from std/heapqueue import HeapQueue, `[]`, len, push, replace from std/heapqueue import HeapQueue, `[]`, len, push, replace
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#registry-updates # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#registry-updates
func process_registry_updates*( func process_registry_updates*(
cfg: RuntimeConfig, cfg: RuntimeConfig,
state: var (phase0.BeaconState | altair.BeaconState | state: var (phase0.BeaconState | altair.BeaconState |
@ -971,7 +971,7 @@ func process_registry_updates*(
ok() ok()
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.0/specs/electra/beacon-chain.md#updated--process_registry_updates # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/electra/beacon-chain.md#modified-process_registry_updates
func process_registry_updates*( func process_registry_updates*(
cfg: RuntimeConfig, state: var electra.BeaconState, cache: var StateCache): cfg: RuntimeConfig, state: var electra.BeaconState, cache: var StateCache):
Result[void, cstring] = Result[void, cstring] =
@ -999,7 +999,7 @@ func process_registry_updates*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#slashings
func get_adjusted_total_slashing_balance*( func get_adjusted_total_slashing_balance*(
state: ForkyBeaconState, total_balance: Gwei): Gwei = state: ForkyBeaconState, total_balance: Gwei): Gwei =
const multiplier = const multiplier =
@ -1018,14 +1018,14 @@ func get_adjusted_total_slashing_balance*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#slashings
func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool = func slashing_penalty_applies*(validator: Validator, epoch: Epoch): bool =
validator.slashed and validator.slashed and
epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch epoch + EPOCHS_PER_SLASHINGS_VECTOR div 2 == validator.withdrawable_epoch
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#slashings
func get_slashing_penalty*(validator: Validator, func get_slashing_penalty*(validator: Validator,
adjusted_total_slashing_balance, adjusted_total_slashing_balance,
total_balance: Gwei): Gwei = total_balance: Gwei): Gwei =
@ -1036,8 +1036,8 @@ func get_slashing_penalty*(validator: Validator,
penalty_numerator div total_balance * increment penalty_numerator div total_balance * increment
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#slashings
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/bellatrix/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/bellatrix/beacon-chain.md#slashings
func get_slashing( func get_slashing(
state: ForkyBeaconState, total_balance: Gwei, vidx: ValidatorIndex): Gwei = state: ForkyBeaconState, total_balance: Gwei, vidx: ValidatorIndex): Gwei =
# For efficiency reasons, it doesn't make sense to have process_slashings use # For efficiency reasons, it doesn't make sense to have process_slashings use
@ -1075,61 +1075,18 @@ func process_eth1_data_reset*(state: var ForkyBeaconState) =
if next_epoch mod EPOCHS_PER_ETH1_VOTING_PERIOD == 0: if next_epoch mod EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
state.eth1_data_votes = default(type state.eth1_data_votes) state.eth1_data_votes = default(type state.eth1_data_votes)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.7/specs/phase0/beacon-chain.md#effective-balances-updates
template effective_balance_might_update*(
balance: Gwei, effective_balance: Gwei): bool =
const
HYSTERESIS_INCREMENT =
EFFECTIVE_BALANCE_INCREMENT.Gwei div HYSTERESIS_QUOTIENT
DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
balance + DOWNWARD_THRESHOLD < effective_balance or
effective_balance + UPWARD_THRESHOLD < balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#effective-balances-updates # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#effective-balances-updates
func process_effective_balance_updates*(
state: var (phase0.BeaconState | altair.BeaconState |
bellatrix.BeaconState | capella.BeaconState |
deneb.BeaconState)) =
# Update effective balances with hysteresis
for vidx in state.validators.vindices:
let
balance = state.balances.item(vidx)
effective_balance = state.validators.item(vidx).effective_balance
if effective_balance_might_update(balance, effective_balance):
let new_effective_balance =
min(
balance - balance mod EFFECTIVE_BALANCE_INCREMENT.Gwei,
MAX_EFFECTIVE_BALANCE.Gwei)
# Protect against unnecessary cache invalidation
if new_effective_balance != effective_balance:
state.validators.mitem(vidx).effective_balance = new_effective_balance
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#updated-process_effective_balance_updates # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.1/specs/electra/beacon-chain.md#updated-process_effective_balance_updates
func process_effective_balance_updates*(state: var electra.BeaconState) = func process_effective_balance_updates*(state: var ForkyBeaconState) =
# Update effective balances with hysteresis # Update effective balances with hysteresis
for vidx in state.validators.vindices: for vidx in state.validators.vindices:
let let
balance = state.balances.item(vidx) balance = state.balances.item(vidx)
effective_balance = state.validators.item(vidx).effective_balance effective_balance = state.validators.item(vidx).effective_balance
if effective_balance_might_update(balance, effective_balance): if effective_balance_might_update(balance, effective_balance):
debugComment "amortize validator read access" let new_effective_balance = get_effective_balance_update(
# Wrapping MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei and typeof(state).kind, balance, effective_balance, vidx.distinctBase)
# MIN_ACTIVATION_BALANCE.Gwei in static() results
# in
# beacon_chain/spec/state_transition_epoch.nim(1067, 20) Error: expected: ':', but got: '('
# even though it'd be better to statically verify safety
let
effective_balance_limit =
if has_compounding_withdrawal_credential(
state.validators.item(vidx)):
MAX_EFFECTIVE_BALANCE_ELECTRA.Gwei
else:
MIN_ACTIVATION_BALANCE.Gwei
new_effective_balance =
min(
balance - balance mod EFFECTIVE_BALANCE_INCREMENT.Gwei,
effective_balance_limit)
# Protect against unnecessary cache invalidation # Protect against unnecessary cache invalidation
if new_effective_balance != effective_balance: if new_effective_balance != effective_balance:
state.validators.mitem(vidx).effective_balance = new_effective_balance state.validators.mitem(vidx).effective_balance = new_effective_balance
@ -1167,7 +1124,7 @@ func process_historical_roots_update*(state: var ForkyBeaconState) =
if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0: if next_epoch mod (SLOTS_PER_HISTORICAL_ROOT div SLOTS_PER_EPOCH) == 0:
# Equivalent to hash_tree_root(foo: HistoricalBatch), but without using # Equivalent to hash_tree_root(foo: HistoricalBatch), but without using
# significant additional stack or heap. # significant additional stack or heap.
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#historicalbatch # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#historicalbatch
# In response to https://github.com/status-im/nimbus-eth2/issues/921 # In response to https://github.com/status-im/nimbus-eth2/issues/921
if not state.historical_roots.add state.compute_historical_root(): if not state.historical_roots.add state.compute_historical_root():
raiseAssert "no more room for historical roots, so long and thanks for the fish!" raiseAssert "no more room for historical roots, so long and thanks for the fish!"
@ -1277,7 +1234,9 @@ func process_historical_summaries_update*(
func process_pending_balance_deposits*( func process_pending_balance_deposits*(
cfg: RuntimeConfig, state: var electra.BeaconState, cfg: RuntimeConfig, state: var electra.BeaconState,
cache: var StateCache): Result[void, cstring] = cache: var StateCache): Result[void, cstring] =
let available_for_processing = state.deposit_balance_to_consume + let
next_epoch = get_current_epoch(state) + 1
available_for_processing = state.deposit_balance_to_consume +
get_activation_exit_churn_limit(cfg, state, cache) get_activation_exit_churn_limit(cfg, state, cache)
var var
processed_amount = 0.Gwei processed_amount = 0.Gwei
@ -1293,7 +1252,7 @@ func process_pending_balance_deposits*(
# Validator is exiting, postpone the deposit until after withdrawable epoch # Validator is exiting, postpone the deposit until after withdrawable epoch
if validator.exit_epoch < FAR_FUTURE_EPOCH: if validator.exit_epoch < FAR_FUTURE_EPOCH:
if get_current_epoch(state) <= validator.withdrawable_epoch: if next_epoch <= validator.withdrawable_epoch:
deposits_to_postpone.add(deposit) deposits_to_postpone.add(deposit)
# Deposited balance will never become active. Increase balance but do not # Deposited balance will never become active. Increase balance but do not
# consume churn # consume churn
@ -1333,6 +1292,7 @@ func process_pending_balance_deposits*(
func process_pending_consolidations*( func process_pending_consolidations*(
cfg: RuntimeConfig, state: var electra.BeaconState): cfg: RuntimeConfig, state: var electra.BeaconState):
Result[void, cstring] = Result[void, cstring] =
let next_epoch = get_current_epoch(state) + 1
var next_pending_consolidation = 0 var next_pending_consolidation = 0
for pending_consolidation in state.pending_consolidations: for pending_consolidation in state.pending_consolidations:
let source_validator = let source_validator =
@ -1340,7 +1300,7 @@ func process_pending_consolidations*(
if source_validator.slashed: if source_validator.slashed:
next_pending_consolidation += 1 next_pending_consolidation += 1
continue continue
if source_validator.withdrawable_epoch > get_current_epoch(state): if source_validator.withdrawable_epoch > next_epoch:
break break
let let
@ -1424,7 +1384,7 @@ func init*(
deneb.BeaconState | electra.BeaconState): T = deneb.BeaconState | electra.BeaconState): T =
init(result, state) init(result, state)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/beacon-chain.md#epoch-processing # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#epoch-processing
proc process_epoch*( proc process_epoch*(
cfg: RuntimeConfig, cfg: RuntimeConfig,
state: var (altair.BeaconState | bellatrix.BeaconState), state: var (altair.BeaconState | bellatrix.BeaconState),
@ -1451,7 +1411,7 @@ proc process_epoch*(
process_inactivity_updates(cfg, state, info) # [New in Altair] process_inactivity_updates(cfg, state, info) # [New in Altair]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#rewards-and-penalties # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#rewards-and-penalties
process_rewards_and_penalties(cfg, state, info) # [Modified in Altair] process_rewards_and_penalties(cfg, state, info) # [Modified in Altair]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#registry-updates # https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#registry-updates
@ -1503,7 +1463,7 @@ proc process_epoch*(
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#registry-updates # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#registry-updates
? process_registry_updates(cfg, state, cache) ? process_registry_updates(cfg, state, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#slashings # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#slashings
process_slashings(state, info.balances.current_epoch) process_slashings(state, info.balances.current_epoch)
process_eth1_data_reset(state) process_eth1_data_reset(state)
@ -1526,7 +1486,7 @@ proc process_epoch*(
info.init(state) info.init(state)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/beacon-chain.md#justification-and-finalization # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/beacon-chain.md#justification-and-finalization
process_justification_and_finalization(state, info.balances, flags) process_justification_and_finalization(state, info.balances, flags)
# state.slot hasn't been incremented yet. # state.slot hasn't been incremented yet.
@ -1564,9 +1524,8 @@ proc process_epoch*(
ok() ok()
proc get_validator_balance_after_epoch*( proc get_validator_balance_after_epoch*(
cfg: RuntimeConfig, cfg: RuntimeConfig, state: deneb.BeaconState | electra.BeaconState,
state: deneb.BeaconState | electra.BeaconState, cache: var StateCache, info: var altair.EpochInfo,
flags: UpdateFlags, cache: var StateCache, info: var altair.EpochInfo,
index: ValidatorIndex): Gwei = index: ValidatorIndex): Gwei =
# Run a subset of process_epoch() which affects an individual validator, # Run a subset of process_epoch() which affects an individual validator,
# without modifying state itself # without modifying state itself
@ -1586,7 +1545,7 @@ proc get_validator_balance_after_epoch*(
weigh_justification_and_finalization( weigh_justification_and_finalization(
state, info.balances.current_epoch, state, info.balances.current_epoch,
info.balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX], info.balances.previous_epoch[TIMELY_TARGET_FLAG_INDEX],
info.balances.current_epoch_TIMELY_TARGET, flags) info.balances.current_epoch_TIMELY_TARGET, {})
# Used as part of process_rewards_and_penalties # Used as part of process_rewards_and_penalties
let inactivity_score = let inactivity_score =
@ -1667,3 +1626,21 @@ proc get_validator_balance_after_epoch*(
processed_amount += deposit.amount processed_amount += deposit.amount
post_epoch_balance post_epoch_balance
proc get_next_slot_expected_withdrawals*(
cfg: RuntimeConfig, state: deneb.BeaconState, cache: var StateCache,
info: var altair.EpochInfo): seq[Withdrawal] =
get_expected_withdrawals_aux(state, (state.slot + 1).epoch) do:
# validator_index is defined by an injected symbol within the template
get_validator_balance_after_epoch(
cfg, state, cache, info, validator_index.ValidatorIndex)
proc get_next_slot_expected_withdrawals*(
cfg: RuntimeConfig, state: electra.BeaconState, cache: var StateCache,
info: var altair.EpochInfo): seq[Withdrawal] =
let (res, _) = get_expected_withdrawals_with_partial_count_aux(
state, (state.slot + 1).epoch) do:
# validator_index is defined by an injected symbol within the template
get_validator_balance_after_epoch(
cfg, state, cache, info, validator_index.ValidatorIndex)
res

View File

@ -158,7 +158,7 @@ func get_shuffled_active_validator_indices*(
withState(state): withState(state):
cache.get_shuffled_active_validator_indices(forkyState.data, epoch) cache.get_shuffled_active_validator_indices(forkyState.data, epoch)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_active_validator_indices # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_active_validator_indices
func count_active_validators*(state: ForkyBeaconState, func count_active_validators*(state: ForkyBeaconState,
epoch: Epoch, epoch: Epoch,
cache: var StateCache): uint64 = cache: var StateCache): uint64 =
@ -394,7 +394,7 @@ func compute_proposer_index(state: ForkyBeaconState,
## Return from ``indices`` a random index sampled by effective balance. ## Return from ``indices`` a random index sampled by effective balance.
compute_proposer_index(state, indices, seed, shuffled_index) compute_proposer_index(state, indices, seed, shuffled_index)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#get_beacon_proposer_index # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*( func get_beacon_proposer_index*(
state: ForkyBeaconState, cache: var StateCache, slot: Slot): state: ForkyBeaconState, cache: var StateCache, slot: Slot):
Opt[ValidatorIndex] = Opt[ValidatorIndex] =

View File

@ -10,10 +10,10 @@
import import
./datatypes/base, ./beaconstate, ./forks, ./helpers ./datatypes/base, ./beaconstate, ./forks, ./helpers
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#configuration # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#configuration
const SAFETY_DECAY* = 10'u64 const SAFETY_DECAY* = 10'u64
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#compute_weak_subjectivity_period
func compute_weak_subjectivity_period( func compute_weak_subjectivity_period(
cfg: RuntimeConfig, state: ForkyBeaconState): uint64 = cfg: RuntimeConfig, state: ForkyBeaconState): uint64 =
## Returns the weak subjectivity period for the current ``state``. ## Returns the weak subjectivity period for the current ``state``.
@ -49,7 +49,7 @@ func compute_weak_subjectivity_period(
ws_period ws_period
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md#is_within_weak_subjectivity_period
func is_within_weak_subjectivity_period*(cfg: RuntimeConfig, current_slot: Slot, func is_within_weak_subjectivity_period*(cfg: RuntimeConfig, current_slot: Slot,
ws_state: ForkedHashedBeaconState, ws_state: ForkedHashedBeaconState,
ws_checkpoint: Checkpoint): bool = ws_checkpoint: Checkpoint): bool =

View File

@ -65,6 +65,7 @@ type
getFinalizedPeriod: GetSyncCommitteePeriodCallback getFinalizedPeriod: GetSyncCommitteePeriodCallback
getOptimisticPeriod: GetSyncCommitteePeriodCallback getOptimisticPeriod: GetSyncCommitteePeriodCallback
getBeaconTime: GetBeaconTimeFn getBeaconTime: GetBeaconTimeFn
shouldInhibitSync: GetBoolCallback
loopFuture: Future[void].Raising([CancelledError]) loopFuture: Future[void].Raising([CancelledError])
func init*( func init*(
@ -80,7 +81,8 @@ func init*(
isNextSyncCommitteeKnown: GetBoolCallback, isNextSyncCommitteeKnown: GetBoolCallback,
getFinalizedPeriod: GetSyncCommitteePeriodCallback, getFinalizedPeriod: GetSyncCommitteePeriodCallback,
getOptimisticPeriod: GetSyncCommitteePeriodCallback, getOptimisticPeriod: GetSyncCommitteePeriodCallback,
getBeaconTime: GetBeaconTimeFn getBeaconTime: GetBeaconTimeFn,
shouldInhibitSync: GetBoolCallback = nil
): LightClientManager = ): LightClientManager =
## Initialize light client manager. ## Initialize light client manager.
LightClientManager( LightClientManager(
@ -95,8 +97,8 @@ func init*(
isNextSyncCommitteeKnown: isNextSyncCommitteeKnown, isNextSyncCommitteeKnown: isNextSyncCommitteeKnown,
getFinalizedPeriod: getFinalizedPeriod, getFinalizedPeriod: getFinalizedPeriod,
getOptimisticPeriod: getOptimisticPeriod, getOptimisticPeriod: getOptimisticPeriod,
getBeaconTime: getBeaconTime getBeaconTime: getBeaconTime,
) shouldInhibitSync: shouldInhibitSync)
proc isGossipSupported*( proc isGossipSupported*(
self: LightClientManager, self: LightClientManager,
@ -328,13 +330,14 @@ template query[E](
): Future[bool].Raising([CancelledError]) = ): Future[bool].Raising([CancelledError]) =
self.query(e, Nothing()) self.query(e, Nothing())
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md#light-client-sync-process # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md#light-client-sync-process
proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} = proc loop(self: LightClientManager) {.async: (raises: [CancelledError]).} =
var nextSyncTaskTime = self.getBeaconTime() var nextSyncTaskTime = self.getBeaconTime()
while true: while true:
# Periodically wake and check for changes # Periodically wake and check for changes
let wallTime = self.getBeaconTime() let wallTime = self.getBeaconTime()
if wallTime < nextSyncTaskTime or if wallTime < nextSyncTaskTime or
(self.shouldInhibitSync != nil and self.shouldInhibitSync()) or
self.network.peerPool.lenAvailable < 1: self.network.peerPool.lenAvailable < 1:
await sleepAsync(chronos.seconds(2)) await sleepAsync(chronos.seconds(2))
continue continue

View File

@ -90,7 +90,7 @@ p2pProtocol LightClientSync(version = 1,
debug "LC bootstrap request done", peer, blockRoot debug "LC bootstrap request done", peer, blockRoot
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#lightclientupdatesbyrange
proc lightClientUpdatesByRange( proc lightClientUpdatesByRange(
peer: Peer, peer: Peer,
startPeriod: SyncCommitteePeriod, startPeriod: SyncCommitteePeriod,
@ -134,7 +134,7 @@ p2pProtocol LightClientSync(version = 1,
debug "LC updates by range request done", peer, startPeriod, count, found debug "LC updates by range request done", peer, startPeriod, count, found
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#getlightclientfinalityupdate
proc lightClientFinalityUpdate( proc lightClientFinalityUpdate(
peer: Peer, peer: Peer,
response: SingleChunkResponse[ForkedLightClientFinalityUpdate]) response: SingleChunkResponse[ForkedLightClientFinalityUpdate])
@ -160,7 +160,7 @@ p2pProtocol LightClientSync(version = 1,
debug "LC finality update request done", peer debug "LC finality update request done", peer
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/p2p-interface.md#getlightclientoptimisticupdate
proc lightClientOptimisticUpdate( proc lightClientOptimisticUpdate(
peer: Peer, peer: Peer,
response: SingleChunkResponse[ForkedLightClientOptimisticUpdate]) response: SingleChunkResponse[ForkedLightClientOptimisticUpdate])

View File

@ -21,7 +21,7 @@ import
from presto import RestDecodingError from presto import RestDecodingError
const const
largeRequestsTimeout = 90.seconds # Downloading large items such as states. largeRequestsTimeout = 120.seconds # Downloading large items such as states.
smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots. smallRequestsTimeout = 30.seconds # Downloading smaller items such as blocks and deposit snapshots.
proc fetchDepositSnapshot( proc fetchDepositSnapshot(
@ -171,7 +171,7 @@ proc doTrustedNodeSync*(
let stateId = let stateId =
case syncTarget.kind case syncTarget.kind
of TrustedNodeSyncKind.TrustedBlockRoot: of TrustedNodeSyncKind.TrustedBlockRoot:
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/light-client/light-client.md#light-client-sync-process # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/light-client.md#light-client-sync-process
const lcDataFork = LightClientDataFork.high const lcDataFork = LightClientDataFork.high
var bestViableCheckpoint: Opt[tuple[slot: Slot, state_root: Eth2Digest]] var bestViableCheckpoint: Opt[tuple[slot: Slot, state_root: Eth2Digest]]
func trackBestViableCheckpoint(store: lcDataFork.LightClientStore) = func trackBestViableCheckpoint(store: lcDataFork.LightClientStore) =

View File

@ -0,0 +1,90 @@
# beacon_chain
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import std/typetraits
import "."/spec/crypto
from "."/spec/datatypes/base import Validator, ValidatorIndex, pubkey, `==`
const
BUCKET_BITS = 9 # >= 13 gets slow to construct
NUM_BUCKETS = 1 shl BUCKET_BITS
type
# `newSeqUninitialized` requires its type to be SomeNumber
IntValidatorIndex = distinctBase ValidatorIndex
BucketSortedValidators* = object
bucketSorted*: seq[IntValidatorIndex]
bucketUpperBounds: array[NUM_BUCKETS, uint] # avoids over/underflow checks
extraItems*: seq[ValidatorIndex]
template getBucketNumber(h: ValidatorPubKey): uint =
# This assumes https://en.wikipedia.org/wiki/Avalanche_effect for uniform
# distribution across pubkeys. ValidatorPubKey specifically satisfies this
# criterion. If required, can look at more input bytes, but ultimately it
# doesn't affect correctness, only speed.
# Otherwise need more than 2 bytes of input
static: doAssert BUCKET_BITS <= 16
const BUCKET_MASK = (NUM_BUCKETS - 1)
((h.blob[0] * 256 + h.blob[1]) and BUCKET_MASK)
func sortValidatorBuckets*(validators: openArray[Validator]):
ref BucketSortedValidators {.noinline.} =
var bucketSizes: array[NUM_BUCKETS, uint]
for validator in validators:
inc bucketSizes[getBucketNumber(validator.pubkey)]
var
bucketInsertPositions: array[NUM_BUCKETS, uint]
accum: uint
for i, s in bucketSizes:
accum += s
bucketInsertPositions[i] = accum
doAssert accum == validators.len.uint
let res = (ref BucketSortedValidators)(
bucketSorted: newSeqUninitialized[IntValidatorIndex](validators.len),
bucketUpperBounds: bucketInsertPositions)
for i, validator in validators:
let insertPos =
addr bucketInsertPositions[getBucketNumber(validator.pubkey)]
dec insertPos[]
res.bucketSorted[insertPos[]] = i.IntValidatorIndex
doAssert bucketInsertPositions[0] == 0
for i in 1 ..< NUM_BUCKETS:
doAssert res.bucketUpperBounds[i - 1] == bucketInsertPositions[i]
res
func add*(
bucketSortedValidators: var BucketSortedValidators,
validatorIndex: ValidatorIndex) =
bucketSortedValidators.extraItems.add validatorIndex
func findValidatorIndex*(
validators: openArray[Validator], bsv: BucketSortedValidators,
pubkey: ValidatorPubKey): Opt[ValidatorIndex] =
for validatorIndex in bsv.extraItems:
if validators[validatorIndex.distinctBase].pubkey == pubkey:
return Opt.some validatorIndex
let
bucketNumber = getBucketNumber(pubkey)
lowerBounds =
if bucketNumber == 0:
0'u
else:
bsv.bucketUpperBounds[bucketNumber - 1]
for i in lowerBounds ..< bsv.bucketUpperBounds[bucketNumber]:
if validators[bsv.bucketSorted[i]].pubkey == pubkey:
return Opt.some bsv.bucketSorted[i].ValidatorIndex
Opt.none ValidatorIndex

View File

@ -1950,8 +1950,8 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (ra
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#broadcast-aggregate # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#broadcast-aggregate
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#broadcast-sync-committee-contribution
# Wait 2 / 3 of the slot time to allow messages to propagate, then collect # Wait 2 / 3 of the slot time to allow messages to propagate, then collect
# the result in aggregates # the result in aggregates
static: static:

View File

@ -209,7 +209,7 @@ template withContext*(db: SlashingProtectionDB, body: untyped): untyped =
index: ValidatorIndex, index: ValidatorIndex,
validator: ValidatorPubKey, validator: ValidatorPubKey,
source, target: Epoch, source, target: Epoch,
attestation_signing_root: Eth2Digest): Result[void, BadVote] = attestation_signing_root: Eth2Digest): Result[void, BadVote] {.redefine.} =
registerAttestationInContextV2(Opt.some(index), validator, source, target, attestation_signing_root) registerAttestationInContextV2(Opt.some(index), validator, source, target, attestation_signing_root)
block: block:
body body

View File

@ -36,7 +36,7 @@ export results
# - https://notes.ethereum.org/@djrtwo/Bkn3zpwxB#Validator-responsibilities # - https://notes.ethereum.org/@djrtwo/Bkn3zpwxB#Validator-responsibilities
# #
# Phase 0 spec - Honest Validator - how to avoid slashing # Phase 0 spec - Honest Validator - how to avoid slashing
# - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/validator.md#how-to-avoid-slashing # - https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#how-to-avoid-slashing
# #
# In-depth reading on slashing conditions # In-depth reading on slashing conditions
# #

View File

@ -776,7 +776,7 @@ proc getAggregateAndProofSignature*(v: AttachedValidator,
fork, genesis_validators_root, aggregate_and_proof) fork, genesis_validators_root, aggregate_and_proof)
await v.signData(request) await v.signData(request)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#prepare-sync-committee-message # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#prepare-sync-committee-message
proc getSyncCommitteeMessage*(v: AttachedValidator, proc getSyncCommitteeMessage*(v: AttachedValidator,
fork: Fork, fork: Fork,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
@ -807,7 +807,7 @@ proc getSyncCommitteeMessage*(v: AttachedValidator,
) )
) )
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#aggregation-selection # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#aggregation-selection
proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork, proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
slot: Slot, slot: Slot,
@ -827,7 +827,7 @@ proc getSyncCommitteeSelectionProof*(v: AttachedValidator, fork: Fork,
) )
await v.signData(request) await v.signData(request)
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/altair/validator.md#broadcast-sync-committee-contribution # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/validator.md#broadcast-sync-committee-contribution
proc getContributionAndProofSignature*(v: AttachedValidator, fork: Fork, proc getContributionAndProofSignature*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest, genesis_validators_root: Eth2Digest,
contribution_and_proof: ContributionAndProof contribution_and_proof: ContributionAndProof

View File

@ -18,7 +18,7 @@ const
"Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH" "Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH"
versionMajor* = 24 versionMajor* = 24
versionMinor* = 7 versionMinor* = 8
versionBuild* = 0 versionBuild* = 0
versionBlob* = "stateofus" # Single word - ends up in the default graffiti versionBlob* = "stateofus" # Single word - ends up in the default graffiti

View File

@ -6,7 +6,7 @@ This is a WIP document to explain the attestation flows.
It is important to distinguish attestation `validation` from attestation `verification`. It is important to distinguish attestation `validation` from attestation `verification`.
- Attestation `validation` is defined in the P2P specs. Validated attestations can be forwarded on GossipSub. - Attestation `validation` is defined in the P2P specs. Validated attestations can be forwarded on GossipSub.
- Aggregated: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof - Aggregated: https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
- Unaggregated: https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id - Unaggregated: https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
- Attestation `verification` is defined in the consensus specs. Verified attestations can affect fork choice and may be included in a block. - Attestation `verification` is defined in the consensus specs. Verified attestations can affect fork choice and may be included in a block.
- https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#attestations

View File

@ -9,7 +9,7 @@ Important distinction:
https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_block. https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/p2p-interface.md#beacon_block.
A validated block can be forwarded on gossipsub. A validated block can be forwarded on gossipsub.
- and we distinguish `verification` which is defined in consensus specs: - and we distinguish `verification` which is defined in consensus specs:
https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/beacon-chain.md#block-processing https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/beacon-chain.md#block-processing
A block needs to be verified to enter fork choice, the DAG and the BeaconChainDB A block needs to be verified to enter fork choice, the DAG and the BeaconChainDB
In particular in terms of costly checks validating a block only requires checking: In particular in terms of costly checks validating a block only requires checking:

View File

@ -104,7 +104,7 @@ The following sections explain how to do this for certain EL clients.
## Running the light client ## Running the light client
The light client starts syncing from a trusted block. The light client starts syncing from a trusted block.
This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client. This trusted block should be somewhat recent ([~1-2 weeks](https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/weak-subjectivity.md)) and needs to be configured each time when starting the light client.
### 1. Obtaining a trusted block root ### 1. Obtaining a trusted block root

5
funding.json Normal file
View File

@ -0,0 +1,5 @@
{
"opRetro": {
"projectId": "0xe346264e87202b47f1057eb0b0fcaa0ea7f83e14507ca4585a91a5d94e0e92c0"
}
}

View File

@ -0,0 +1,31 @@
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
participants_matrix:
el:
- el_type: geth
- el_type: nethermind
- el_type: erigon
cl:
- cl_type: nimbus
cl_image: <image-placeholder>
- cl_type: lighthouse
- cl_type: prysm
additional_services:
- tx_spammer
- assertoor
- beacon_metrics_gazer
mev_type: null
assertoor_params:
image: "ethpandaops/assertoor:master"
run_stability_check: true
run_block_proposal_check: true
run_transaction_test: true
run_blob_transaction_test: true
run_opcodes_transaction_test: true

View File

@ -406,6 +406,9 @@ func collectFromAttestations(
rewardsAndPenalties[index].inclusion_delay = rewardsAndPenalties[index].inclusion_delay =
some(inclusionDelay.uint64) some(inclusionDelay.uint64)
from ".."/beacon_chain/validator_bucket_sort import
findValidatorIndex, sortValidatorBuckets
proc collectFromDeposits( proc collectFromDeposits(
rewardsAndPenalties: var seq[RewardsAndPenalties], rewardsAndPenalties: var seq[RewardsAndPenalties],
forkedState: ForkedHashedBeaconState, forkedState: ForkedHashedBeaconState,
@ -414,9 +417,12 @@ proc collectFromDeposits(
cfg: RuntimeConfig) = cfg: RuntimeConfig) =
withStateAndBlck(forkedState, forkedBlock): withStateAndBlck(forkedState, forkedBlock):
for deposit in forkyBlck.message.body.deposits: for deposit in forkyBlck.message.body.deposits:
let pubkey = deposit.data.pubkey let
let amount = deposit.data.amount pubkey = deposit.data.pubkey
var index = findValidatorIndex(forkyState.data, pubkey) amount = deposit.data.amount
var index = findValidatorIndex(
forkyState.data.validators.asSeq, sortValidatorBuckets(
forkyState.data.validators.asSeq)[], pubkey)
if index.isNone: if index.isNone:
if pubkey in pubkeyToIndex: if pubkey in pubkeyToIndex:
try: try:

View File

@ -24,6 +24,7 @@ import
from std/os import changeFileExt, fileExists from std/os import changeFileExt, fileExists
from std/sequtils import mapIt, toSeq from std/sequtils import mapIt, toSeq
from std/times import toUnix from std/times import toUnix
from ../beacon_chain/el/engine_api_conversions import asEth2Digest
from ../beacon_chain/spec/beaconstate import initialize_beacon_state_from_eth1 from ../beacon_chain/spec/beaconstate import initialize_beacon_state_from_eth1
from ../tests/mocking/mock_genesis import mockEth1BlockHash from ../tests/mocking/mock_genesis import mockEth1BlockHash

View File

@ -4012,7 +4012,7 @@
"response": { "response": {
"status": {"operator": "equals", "value": "200"}, "status": {"operator": "equals", "value": "200"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}], "headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "start": ["data"], "value": {"MAX_COMMITTEES_PER_SLOT":"","TARGET_COMMITTEE_SIZE":"","MAX_VALIDATORS_PER_COMMITTEE":"","SHUFFLE_ROUND_COUNT":"","HYSTERESIS_QUOTIENT":"","HYSTERESIS_DOWNWARD_MULTIPLIER":"","HYSTERESIS_UPWARD_MULTIPLIER":"","MIN_DEPOSIT_AMOUNT":"","MAX_EFFECTIVE_BALANCE":"","EFFECTIVE_BALANCE_INCREMENT":"","MIN_ATTESTATION_INCLUSION_DELAY":"","SLOTS_PER_EPOCH":"","MIN_SEED_LOOKAHEAD":"","MAX_SEED_LOOKAHEAD":"","EPOCHS_PER_ETH1_VOTING_PERIOD":"","SLOTS_PER_HISTORICAL_ROOT":"","MIN_EPOCHS_TO_INACTIVITY_PENALTY":"","EPOCHS_PER_HISTORICAL_VECTOR":"","EPOCHS_PER_SLASHINGS_VECTOR":"","HISTORICAL_ROOTS_LIMIT":"","VALIDATOR_REGISTRY_LIMIT":"","BASE_REWARD_FACTOR":"","WHISTLEBLOWER_REWARD_QUOTIENT":"","PROPOSER_REWARD_QUOTIENT":"","INACTIVITY_PENALTY_QUOTIENT":"","MIN_SLASHING_PENALTY_QUOTIENT":"","PROPORTIONAL_SLASHING_MULTIPLIER":"","MAX_PROPOSER_SLASHINGS":"","MAX_ATTESTER_SLASHINGS":"","MAX_ATTESTATIONS":"","MAX_DEPOSITS":"","MAX_VOLUNTARY_EXITS":"","INACTIVITY_PENALTY_QUOTIENT_ALTAIR":"","MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":"","PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":"","SYNC_COMMITTEE_SIZE":"","EPOCHS_PER_SYNC_COMMITTEE_PERIOD":"","MIN_SYNC_COMMITTEE_PARTICIPANTS":"","UPDATE_TIMEOUT":"","INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":"","MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":"","PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":"","MAX_BYTES_PER_TRANSACTION":"","MAX_TRANSACTIONS_PER_PAYLOAD":"","BYTES_PER_LOGS_BLOOM":"","MAX_EXTRA_DATA_BYTES":"","MAX_BLS_TO_EXECUTION_CHANGES":"","MAX_WITHDRAWALS_PER_PAYLOAD":"","MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":"","PRESET_BASE":"","CONFIG_NAME":"","TERMINAL_TOTAL_DIFFICULTY":"","TERMINAL_BLOCK_HASH":"","TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":"","MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":"","MIN_GENESIS_TIME":"","GENESIS_FORK_VERSION":"","GENESIS_DELAY":"","ALTAIR_FORK_VERSION":"","ALTAIR_FORK_EPOCH":"","BELLATRIX_FORK_VERSION":"","BELLATRIX_FORK_EPOCH":"","CAPELLA_FORK_VERSION":"","CAPELLA_FORK_EPOCH":"","DENEB_FORK_VERSION":"","DENEB_FORK_EPOCH":"","SECONDS_PER_SLOT":"","SECONDS_PER_ETH1_BLOCK":"","MIN_VALIDATOR_WITHDRAWABILITY_DELAY":"","FIELD_ELEMENTS_PER_BLOB":"","MAX_BLOB_COMMITMENTS_PER_BLOCK":"","MAX_BLOBS_PER_BLOCK":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_ELECTRA":"","SHARD_COMMITTEE_PERIOD":"","ETH1_FOLLOW_DISTANCE":"","INACTIVITY_SCORE_BIAS":"","INACTIVITY_SCORE_RECOVERY_RATE":"","EJECTION_BALANCE":"","MIN_PER_EPOCH_CHURN_LIMIT":"","CHURN_LIMIT_QUOTIENT":"","MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":"","PROPOSER_SCORE_BOOST":"","REORG_HEAD_WEIGHT_THRESHOLD":"","REORG_PARENT_WEIGHT_THRESHOLD":"","REORG_MAX_EPOCHS_SINCE_FINALIZATION":"","DEPOSIT_CHAIN_ID":"","DEPOSIT_NETWORK_ID":"","DEPOSIT_CONTRACT_ADDRESS":"","GOSSIP_MAX_SIZE":"","MAX_REQUEST_BLOCKS":"","EPOCHS_PER_SUBNET_SUBSCRIPTION":"","MIN_EPOCHS_FOR_BLOCK_REQUESTS":"","MAX_CHUNK_SIZE":"","TTFB_TIMEOUT":"","RESP_TIMEOUT":"","ATTESTATION_PROPAGATION_SLOT_RANGE":"","MAXIMUM_GOSSIP_CLOCK_DISPARITY":"","MESSAGE_DOMAIN_INVALID_SNAPPY":"","MESSAGE_DOMAIN_VALID_SNAPPY":"","SUBNETS_PER_NODE":"","ATTESTATION_SUBNET_COUNT":"","ATTESTATION_SUBNET_EXTRA_BITS":"","ATTESTATION_SUBNET_PREFIX_BITS":"","MAX_REQUEST_BLOCKS_DENEB":"","MAX_REQUEST_BLOB_SIDECARS":"","MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":"","BLOB_SIDECAR_SUBNET_COUNT":"","BLS_WITHDRAWAL_PREFIX":"","ETH1_ADDRESS_WITHDRAWAL_PREFIX":"","DOMAIN_BEACON_PROPOSER":"","DOMAIN_BEACON_ATTESTER":"","DOMAIN_RANDAO":"","DOMAIN_DEPOSIT":"","DOMAIN_VOLUNTARY_EXIT":"","DOMAIN_SELECTION_PROOF":"","DOMAIN_AGGREGATE_AND_PROOF":"","TIMELY_SOURCE_FLAG_INDEX":"","TIMELY_TARGET_FLAG_INDEX":"","TIMELY_HEAD_FLAG_INDEX":"","TIMELY_SOURCE_WEIGHT":"","TIMELY_TARGET_WEIGHT":"","TIMELY_HEAD_WEIGHT":"","SYNC_REWARD_WEIGHT":"","PROPOSER_WEIGHT":"","WEIGHT_DENOMINATOR":"","DOMAIN_SYNC_COMMITTEE":"","DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":"","DOMAIN_CONTRIBUTION_AND_PROOF":"","DOMAIN_BLS_TO_EXECUTION_CHANGE":"","TARGET_AGGREGATORS_PER_COMMITTEE":"","TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":"","SYNC_COMMITTEE_SUBNET_COUNT":""}}] "body": [{"operator": "jstructcmps", "start": ["data"], "value": {"MAX_COMMITTEES_PER_SLOT":"","TARGET_COMMITTEE_SIZE":"","MAX_VALIDATORS_PER_COMMITTEE":"","SHUFFLE_ROUND_COUNT":"","HYSTERESIS_QUOTIENT":"","HYSTERESIS_DOWNWARD_MULTIPLIER":"","HYSTERESIS_UPWARD_MULTIPLIER":"","MIN_DEPOSIT_AMOUNT":"","MAX_EFFECTIVE_BALANCE":"","MAX_EFFECTIVE_BALANCE_ELECTRA":"","EFFECTIVE_BALANCE_INCREMENT":"","MIN_ATTESTATION_INCLUSION_DELAY":"","SLOTS_PER_EPOCH":"","MIN_SEED_LOOKAHEAD":"","MAX_SEED_LOOKAHEAD":"","EPOCHS_PER_ETH1_VOTING_PERIOD":"","SLOTS_PER_HISTORICAL_ROOT":"","MIN_EPOCHS_TO_INACTIVITY_PENALTY":"","EPOCHS_PER_HISTORICAL_VECTOR":"","EPOCHS_PER_SLASHINGS_VECTOR":"","HISTORICAL_ROOTS_LIMIT":"","VALIDATOR_REGISTRY_LIMIT":"","BASE_REWARD_FACTOR":"","WHISTLEBLOWER_REWARD_QUOTIENT":"","PROPOSER_REWARD_QUOTIENT":"","INACTIVITY_PENALTY_QUOTIENT":"","MIN_SLASHING_PENALTY_QUOTIENT":"","PROPORTIONAL_SLASHING_MULTIPLIER":"","MAX_PROPOSER_SLASHINGS":"","MAX_ATTESTER_SLASHINGS":"","MAX_ATTESTATIONS":"","MAX_DEPOSITS":"","MAX_VOLUNTARY_EXITS":"","INACTIVITY_PENALTY_QUOTIENT_ALTAIR":"","MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR":"","PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR":"","SYNC_COMMITTEE_SIZE":"","EPOCHS_PER_SYNC_COMMITTEE_PERIOD":"","MIN_SYNC_COMMITTEE_PARTICIPANTS":"","UPDATE_TIMEOUT":"","INACTIVITY_PENALTY_QUOTIENT_BELLATRIX":"","MIN_SLASHING_PENALTY_QUOTIENT_BELLATRIX":"","PROPORTIONAL_SLASHING_MULTIPLIER_BELLATRIX":"","MAX_BYTES_PER_TRANSACTION":"","MAX_TRANSACTIONS_PER_PAYLOAD":"","BYTES_PER_LOGS_BLOOM":"","MAX_EXTRA_DATA_BYTES":"","MAX_BLS_TO_EXECUTION_CHANGES":"","MAX_WITHDRAWALS_PER_PAYLOAD":"","MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP":"","PRESET_BASE":"","CONFIG_NAME":"","TERMINAL_TOTAL_DIFFICULTY":"","TERMINAL_BLOCK_HASH":"","TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH":"","MIN_GENESIS_ACTIVE_VALIDATOR_COUNT":"","MIN_GENESIS_TIME":"","GENESIS_FORK_VERSION":"","GENESIS_DELAY":"","ALTAIR_FORK_VERSION":"","ALTAIR_FORK_EPOCH":"","BELLATRIX_FORK_VERSION":"","BELLATRIX_FORK_EPOCH":"","CAPELLA_FORK_VERSION":"","CAPELLA_FORK_EPOCH":"","DENEB_FORK_VERSION":"","DENEB_FORK_EPOCH":"","ELECTRA_FORK_VERSION":"","ELECTRA_FORK_EPOCH":"","SECONDS_PER_SLOT":"","SECONDS_PER_ETH1_BLOCK":"","MIN_VALIDATOR_WITHDRAWABILITY_DELAY":"","FIELD_ELEMENTS_PER_BLOB":"","MAX_BLOB_COMMITMENTS_PER_BLOCK":"","MAX_BLOBS_PER_BLOCK":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH":"","KZG_COMMITMENT_INCLUSION_PROOF_DEPTH_ELECTRA":"","SHARD_COMMITTEE_PERIOD":"","ETH1_FOLLOW_DISTANCE":"","INACTIVITY_SCORE_BIAS":"","INACTIVITY_SCORE_RECOVERY_RATE":"","EJECTION_BALANCE":"","MIN_PER_EPOCH_CHURN_LIMIT":"","CHURN_LIMIT_QUOTIENT":"","MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT":"","PROPOSER_SCORE_BOOST":"","REORG_HEAD_WEIGHT_THRESHOLD":"","REORG_PARENT_WEIGHT_THRESHOLD":"","REORG_MAX_EPOCHS_SINCE_FINALIZATION":"","DEPOSIT_CHAIN_ID":"","DEPOSIT_NETWORK_ID":"","DEPOSIT_CONTRACT_ADDRESS":"","GOSSIP_MAX_SIZE":"","MAX_REQUEST_BLOCKS":"","EPOCHS_PER_SUBNET_SUBSCRIPTION":"","MIN_EPOCHS_FOR_BLOCK_REQUESTS":"","MAX_CHUNK_SIZE":"","TTFB_TIMEOUT":"","RESP_TIMEOUT":"","ATTESTATION_PROPAGATION_SLOT_RANGE":"","MAXIMUM_GOSSIP_CLOCK_DISPARITY":"","MESSAGE_DOMAIN_INVALID_SNAPPY":"","MESSAGE_DOMAIN_VALID_SNAPPY":"","SUBNETS_PER_NODE":"","ATTESTATION_SUBNET_COUNT":"","ATTESTATION_SUBNET_EXTRA_BITS":"","ATTESTATION_SUBNET_PREFIX_BITS":"","MAX_REQUEST_BLOCKS_DENEB":"","MAX_REQUEST_BLOB_SIDECARS":"","MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS":"","BLOB_SIDECAR_SUBNET_COUNT":"","BLS_WITHDRAWAL_PREFIX":"","ETH1_ADDRESS_WITHDRAWAL_PREFIX":"","DOMAIN_BEACON_PROPOSER":"","DOMAIN_BEACON_ATTESTER":"","DOMAIN_RANDAO":"","DOMAIN_DEPOSIT":"","DOMAIN_VOLUNTARY_EXIT":"","DOMAIN_SELECTION_PROOF":"","DOMAIN_AGGREGATE_AND_PROOF":"","TIMELY_SOURCE_FLAG_INDEX":"","TIMELY_TARGET_FLAG_INDEX":"","TIMELY_HEAD_FLAG_INDEX":"","TIMELY_SOURCE_WEIGHT":"","TIMELY_TARGET_WEIGHT":"","TIMELY_HEAD_WEIGHT":"","SYNC_REWARD_WEIGHT":"","PROPOSER_WEIGHT":"","WEIGHT_DENOMINATOR":"","DOMAIN_SYNC_COMMITTEE":"","DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF":"","DOMAIN_CONTRIBUTION_AND_PROOF":"","DOMAIN_BLS_TO_EXECUTION_CHANGE":"","TARGET_AGGREGATORS_PER_COMMITTEE":"","TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE":"","SYNC_COMMITTEE_SUBNET_COUNT":""}}]
} }
}, },
{ {

View File

@ -141,14 +141,14 @@ func nfuzz_block_header(input: openArray[byte], xoutput: ptr byte,
decodeAndProcess(BlockHeaderInput): decodeAndProcess(BlockHeaderInput):
process_block_header(data.state, data.beaconBlock.message, flags, cache).isOk process_block_header(data.state, data.beaconBlock.message, flags, cache).isOk
from ".."/beacon_chain/bloomfilter import constructBloomFilter from ".."/beacon_chain/validator_bucket_sort import sortValidatorBuckets
proc nfuzz_deposit(input: openArray[byte], xoutput: ptr byte, proc nfuzz_deposit(input: openArray[byte], xoutput: ptr byte,
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError].} = xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError].} =
decodeAndProcess(DepositInput): decodeAndProcess(DepositInput):
process_deposit( process_deposit(
getRuntimeConfig(some "mainnet"), data.state, getRuntimeConfig(some "mainnet"), data.state,
constructBloomFilter(data.state.validators.asSeq)[], data.deposit, sortValidatorBuckets(data.state.validators.asSeq)[], data.deposit,
flags).isOk flags).isOk
proc nfuzz_proposer_slashing(input: openArray[byte], xoutput: ptr byte, proc nfuzz_proposer_slashing(input: openArray[byte], xoutput: ptr byte,

252
run-kurtosis-check.sh Executable file
View File

@ -0,0 +1,252 @@
#!/bin/bash
set -euo pipefail
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
# ------------------------------------------------
# Inputs on how to run checks
# ------------------------------------------------
echo
printf "Do you want to run the checks in terminal or visit the assertoor URL? (terminal/url) "
read reply
echo
printf "Build new changes (yes/no)? "
read use_previous_image
# ------------------------------------------------
# Installation Checks
# ------------------------------------------------
# Checking for docker installation
echo "Checking docker installation"
if command -v docker &> /dev/null; then
echo "Docker installation found"
else
echo "Docker installation not found. Please install docker."
exit 1
fi
echo "Checking kurtosis installation"
if command -v kurtosis &> /dev/null; then
echo "Kurtosis installation found"
else
echo "Kurtosis installation not found. Installing kurtosis"
echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list
sudo apt-get update
sudo apt-get install -y kurtosis
fi
# Install jq if not installed already
if [ "$(which jq)" != "" ];
then
echo "jq is already installed"
else
echo "jq is not installed. Installing jq"
sudo apt-get install -y jq
fi
new_cl_image="localtestnet"
# ------------------------------------------------
# Build the Docker Image
# ------------------------------------------------
if [[ "$use_previous_image" == "no" ]]; then
echo "Using the previously built Docker image"
echo
echo -n "Please enter the docker image name (default: localtestnet) "
read -r cl_image
if [[ "$cl_image" == "" ]]; then
new_cl_image="localtestnet"
else
new_cl_image=$cl_image
fi
else
echo "Starting the Docker Build!"
# Build the docker Image
sudo docker build . -t localtestnet
# The new el_image value
new_cl_image="localtestnet"
fi
# ------------------------------------------------
# Run the Kurtosis Tests
# ------------------------------------------------
# Use sed to replace the el_image value in the file
cat kurtosis-network-params.yml | envsubst > assertoor.yaml
sed -i "s/cl_image: .*/cl_image: $new_cl_image/" assertoor.yaml
sudo kurtosis run \
--enclave nimbus-localtestnet \
github.com/ethpandaops/ethereum-package \
--args-file assertoor.yaml
enclave_dump=$(kurtosis enclave inspect nimbus-localtestnet)
assertoor_url=$(echo "$enclave_dump" | grep assertoor | grep http | sed 's/.*\(http:\/\/[0-9.:]\+\).*/\1/')
# ------------------------------------------------
# Remove Generated File
# ------------------------------------------------
rm assertoor.yaml
# Check the user's input and respond accordingly
if [[ "$reply" == "url" ]]; then
echo "You chose to visit the assertoor URL."
echo "Assertoor Checks Please Visit -> ${assertoor_url}"
echo "Please visit the URL to check the status of the tests"
echo "The kurtosis enclave needs to be cleared, after the tests are done. Please run the following command ----- sudo kurtosis enclave rm -f nimbus-localtestnet"
else
echo "Running the checks over terminal"
# ------------------------------------------------
# Check for Test Status
# ------------------------------------------------
YELLOW='\033[1;33m'
GRAY='\033[0;37m'
GREEN='\033[0;32m'
RED='\033[0;31m'
NC='\033[0m'
# print assertor logs
assertoor_container=$(docker container list | grep assertoor | sed 's/^\([^ ]\+\) .*$/\1/')
docker logs -f "$assertoor_container" &
# helper to fetch task status for specific test id
get_tasks_status() {
tasks=$(curl -s "${assertoor_url}"/api/v1/test_run/"$1" | jq -c ".data.tasks[] | {index, parent_index, name, title, status, result}")
declare -A task_graph_map
task_graph_map[0]=""
while read task; do
task_id=$(echo "$task" | jq -r ".index")
task_parent=$(echo "$task" | jq -r ".parent_index")
task_name=$(echo "$task" | jq -r ".name")
task_title=$(echo "$task" | jq -r ".title")
task_status=$(echo "$task" | jq -r ".status")
task_result=$(echo "$task" | jq -r ".result")
task_graph="${task_graph_map[$task_parent]}"
task_graph_map[$task_id]="$task_graph |"
if [ ! -z "$task_graph" ]; then
task_graph="${task_graph}- "
fi
if [ "$task_status" == "pending" ]; then
task_status="${GRAY}pending ${NC}"
elif [ "$task_status" == "running" ]; then
task_status="${YELLOW}running ${NC}"
elif [ "$task_status" == "complete" ]; then
task_status="${GREEN}complete${NC}"
fi
if [ "$task_result" == "none" ]; then
task_result="${GRAY}none ${NC}"
elif [ "$task_result" == "success" ]; then
task_result="${GREEN}success${NC}"
elif [ "$task_result" == "failure" ]; then
task_result="${RED}failure${NC}"
fi
echo -e " $(printf '%-4s' "$task_id")\t$task_status\t$task_result\t$(printf '%-50s' "$task_graph$task_name") \t$task_title"
done <<< $(echo "$tasks")
}
# poll & check test status
final_test_result=""
failed_test_id=""
while true
do
pending_tests=0
failed_tests=0
total_tests=0
running_test=""
status_lines=()
task_lines=""
status_lines+=("$(date +'%Y-%m-%d %H:%M:%S') Test Status:")
tests=$(curl -s "${assertoor_url}"/api/v1/test_runs | jq -c ".data[] | {run_id, test_id, name, status}")
while read -r test; do
if [ -z "$test" ]; then
continue
fi
run_id=$(echo "$test" | jq -r ".run_id")
test_id=$(echo "$test" | jq -r ".test_id")
test_name=$(echo "$test" | jq -r ".name")
test_status=$(echo "$test" | jq -r ".status")
if [ "$test_status" == "pending" ]; then
pending_tests=$(expr $pending_tests + 1)
status_name="${GRAY}pending${NC}"
elif [ "$test_status" == "running" ]; then
pending_tests=$(expr $pending_tests + 1)
running_test="$run_id"
status_name="${YELLOW}running${NC}"
elif [ "$test_status" == "success" ]; then
status_name="${GREEN}success${NC}"
elif [ "$test_status" == "failure" ]; then
failed_tests=$(expr $failed_tests + 1)
failed_test_id="$run_id"
status_name="${RED}failure${NC}"
else
status_name="$test_status"
fi
status_lines+=(" $(printf '%-3s' "$test_id") $status_name \t$test_name")
total_tests=$(expr $total_tests + 1)
done <<< $(echo "$tests")
for status_line in "${status_lines[@]}"
do
echo -e "$status_line"
done
if [ -n "$running_test" ]; then
task_lines=$(get_tasks_status "$running_test")
echo "Active Test Task Status:"
echo "$task_lines"
fi
if [ "$failed_tests" -gt 0 ]; then
final_test_result="failure"
break
fi
if [ "$total_tests" -gt 0 ] && [ "$pending_tests" -le 0 ]; then
final_test_result="success"
break
fi
sleep 60
done
# save test results & status to github output
echo "test_result=$(echo "$final_test_result")"
echo "test_status"
for status_line in "${status_lines[@]}"
do
echo -e "$status_line"
done
echo
if [ -n "$failed_test_id" ]; then
echo "failed_test_status"
get_tasks_status "$failed_test_id"
echo ""
else
echo "failed_test_status="
fi
# ------------------------------------------------
# Cleanup
# ------------------------------------------------
sudo kurtosis enclave rm -f nimbus-localtestnet
fi

View File

@ -21,11 +21,11 @@ import # Unit test
./test_block_dag, ./test_block_dag,
./test_block_processor, ./test_block_processor,
./test_block_quarantine, ./test_block_quarantine,
./test_bloom_filter,
./test_conf, ./test_conf,
./test_datatypes, ./test_datatypes,
./test_deposit_snapshots, ./test_deposit_snapshots,
./test_discovery, ./test_discovery,
./test_engine_api_conversions,
./test_engine_authentication, ./test_engine_authentication,
./test_el_manager, ./test_el_manager,
./test_el_conf, ./test_el_conf,
@ -51,6 +51,7 @@ import # Unit test
./test_sync_committee_pool, ./test_sync_committee_pool,
./test_sync_manager, ./test_sync_manager,
./test_toblindedblock, ./test_toblindedblock,
./test_validator_bucket_sort,
./test_validator_change_pool, ./test_validator_change_pool,
./test_validator_pool, ./test_validator_pool,
./test_zero_signature, ./test_zero_signature,

View File

@ -23,7 +23,7 @@ import
# Test utilities # Test utilities
../../testutil, ../../testblockutil ../../testutil, ../../testblockutil
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py#L27-L44 # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/tests/core/pyspec/eth2spec/test/helpers/sync_committee.py#L27-L44
proc compute_aggregate_sync_committee_signature( proc compute_aggregate_sync_committee_signature(
cfg: RuntimeConfig, cfg: RuntimeConfig,
forked: ForkedHashedBeaconState, forked: ForkedHashedBeaconState,
@ -133,7 +133,7 @@ proc block_for_next_slot(
addTestBlock( addTestBlock(
forked, cache, attestations = attestations, cfg = cfg) forked, cache, attestations = attestations, cfg = cfg)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/sync-protocol.md#initialize_light_client_store # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/altair/light-client/sync-protocol.md#initialize_light_client_store
func initialize_light_client_store( func initialize_light_client_store(
state: auto, storeDataFork: static LightClientDataFork): auto = state: auto, storeDataFork: static LightClientDataFork): auto =
storeDataFork.LightClientStore( storeDataFork.LightClientStore(

View File

@ -114,7 +114,8 @@ suite baseDescription & "Block Header " & preset():
runTest[altair.BeaconBlock, typeof applyBlockHeader]( runTest[altair.BeaconBlock, typeof applyBlockHeader](
OpBlockHeaderDir, suiteName, "Block Header", "block", applyBlockHeader, path) OpBlockHeaderDir, suiteName, "Block Header", "block", applyBlockHeader, path)
from ".."/".."/".."/beacon_chain/bloomfilter import constructBloomFilter from ".."/".."/".."/beacon_chain/validator_bucket_sort import
sortValidatorBuckets
suite baseDescription & "Deposit " & preset(): suite baseDescription & "Deposit " & preset():
proc applyDeposit( proc applyDeposit(
@ -122,7 +123,7 @@ suite baseDescription & "Deposit " & preset():
Result[void, cstring] = Result[void, cstring] =
process_deposit( process_deposit(
defaultRuntimeConfig, preState, defaultRuntimeConfig, preState,
constructBloomFilter(preState.validators.asSeq)[], deposit, {}) sortValidatorBuckets(preState.validators.asSeq)[], deposit, {})
for path in walkTests(OpDepositsDir): for path in walkTests(OpDepositsDir):
runTest[Deposit, typeof applyDeposit]( runTest[Deposit, typeof applyDeposit](

View File

@ -121,7 +121,8 @@ suite baseDescription & "Block Header " & preset():
OpBlockHeaderDir, suiteName, "Block Header", "block", OpBlockHeaderDir, suiteName, "Block Header", "block",
applyBlockHeader, path) applyBlockHeader, path)
from ".."/".."/".."/beacon_chain/bloomfilter import constructBloomFilter from ".."/".."/".."/beacon_chain/validator_bucket_sort import
sortValidatorBuckets
suite baseDescription & "Deposit " & preset(): suite baseDescription & "Deposit " & preset():
proc applyDeposit( proc applyDeposit(
@ -129,7 +130,7 @@ suite baseDescription & "Deposit " & preset():
Result[void, cstring] = Result[void, cstring] =
process_deposit( process_deposit(
defaultRuntimeConfig, preState, defaultRuntimeConfig, preState,
constructBloomFilter(preState.validators.asSeq)[], deposit, {}) sortValidatorBuckets(preState.validators.asSeq)[], deposit, {})
for path in walkTests(OpDepositsDir): for path in walkTests(OpDepositsDir):
runTest[Deposit, typeof applyDeposit]( runTest[Deposit, typeof applyDeposit](

View File

@ -138,7 +138,8 @@ suite baseDescription & "BLS to execution change " & preset():
OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change", OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change",
applyBlsToExecutionChange, path) applyBlsToExecutionChange, path)
from ".."/".."/".."/beacon_chain/bloomfilter import constructBloomFilter from ".."/".."/".."/beacon_chain/validator_bucket_sort import
sortValidatorBuckets
suite baseDescription & "Deposit " & preset(): suite baseDescription & "Deposit " & preset():
func applyDeposit( func applyDeposit(
@ -146,7 +147,7 @@ suite baseDescription & "Deposit " & preset():
Result[void, cstring] = Result[void, cstring] =
process_deposit( process_deposit(
defaultRuntimeConfig, preState, defaultRuntimeConfig, preState,
constructBloomFilter(preState.validators.asSeq)[], deposit, {}) sortValidatorBuckets(preState.validators.asSeq)[], deposit, {})
for path in walkTests(OpDepositsDir): for path in walkTests(OpDepositsDir):
runTest[Deposit, typeof applyDeposit]( runTest[Deposit, typeof applyDeposit](

View File

@ -141,7 +141,8 @@ suite baseDescription & "BLS to execution change " & preset():
OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change", OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change",
applyBlsToExecutionChange, path) applyBlsToExecutionChange, path)
from ".."/".."/".."/beacon_chain/bloomfilter import constructBloomFilter from ".."/".."/".."/beacon_chain/validator_bucket_sort import
sortValidatorBuckets
suite baseDescription & "Deposit " & preset(): suite baseDescription & "Deposit " & preset():
func applyDeposit( func applyDeposit(
@ -149,7 +150,7 @@ suite baseDescription & "Deposit " & preset():
Result[void, cstring] = Result[void, cstring] =
process_deposit( process_deposit(
defaultRuntimeConfig, preState, defaultRuntimeConfig, preState,
constructBloomFilter(preState.validators.asSeq)[], deposit, {}) sortValidatorBuckets(preState.validators.asSeq)[], deposit, {})
for path in walkTests(OpDepositsDir): for path in walkTests(OpDepositsDir):
runTest[Deposit, typeof applyDeposit]( runTest[Deposit, typeof applyDeposit](

View File

@ -148,13 +148,18 @@ suite baseDescription & "BLS to execution change " & preset():
OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change", OpBlsToExecutionChangeDir, suiteName, "BLS to execution change", "address_change",
applyBlsToExecutionChange, path) applyBlsToExecutionChange, path)
from ".."/".."/".."/beacon_chain/validator_bucket_sort import
sortValidatorBuckets
suite baseDescription & "Consolidation Request " & preset(): suite baseDescription & "Consolidation Request " & preset():
proc applyConsolidationRequest( proc applyConsolidationRequest(
preState: var electra.BeaconState, preState: var electra.BeaconState,
consolidation_request: ConsolidationRequest): Result[void, cstring] = consolidation_request: ConsolidationRequest): Result[void, cstring] =
var cache: StateCache var cache: StateCache
process_consolidation_request( process_consolidation_request(
defaultRuntimeConfig, preState, consolidation_request, cache) defaultRuntimeConfig, preState,
sortValidatorBuckets(preState.validators.asSeq)[],
consolidation_request, cache)
ok() ok()
for path in walkTests(OpConsolidationRequestDir): for path in walkTests(OpConsolidationRequestDir):
@ -162,15 +167,13 @@ suite baseDescription & "Consolidation Request " & preset():
OpConsolidationRequestDir, suiteName, "Consolidation Request", OpConsolidationRequestDir, suiteName, "Consolidation Request",
"consolidation_request", applyConsolidationRequest, path) "consolidation_request", applyConsolidationRequest, path)
from ".."/".."/".."/beacon_chain/bloomfilter import constructBloomFilter
suite baseDescription & "Deposit " & preset(): suite baseDescription & "Deposit " & preset():
func applyDeposit( func applyDeposit(
preState: var electra.BeaconState, deposit: Deposit): preState: var electra.BeaconState, deposit: Deposit):
Result[void, cstring] = Result[void, cstring] =
process_deposit( process_deposit(
defaultRuntimeConfig, preState, defaultRuntimeConfig, preState,
constructBloomFilter(preState.validators.asSeq)[], deposit, {}) sortValidatorBuckets(preState.validators.asSeq)[], deposit, {})
for path in walkTests(OpDepositsDir): for path in walkTests(OpDepositsDir):
runTest[Deposit, typeof applyDeposit]( runTest[Deposit, typeof applyDeposit](
@ -182,7 +185,7 @@ suite baseDescription & "Deposit Request " & preset():
Result[void, cstring] = Result[void, cstring] =
process_deposit_request( process_deposit_request(
defaultRuntimeConfig, preState, defaultRuntimeConfig, preState,
constructBloomFilter(preState.validators.asSeq)[], depositRequest, {}) sortValidatorBuckets(preState.validators.asSeq)[], depositRequest, {})
for path in walkTests(OpDepositRequestDir): for path in walkTests(OpDepositRequestDir):
runTest[DepositRequest, typeof applyDepositRequest]( runTest[DepositRequest, typeof applyDepositRequest](
@ -212,7 +215,9 @@ suite baseDescription & "Withdrawal Request " & preset():
Result[void, cstring] = Result[void, cstring] =
var cache: StateCache var cache: StateCache
process_withdrawal_request( process_withdrawal_request(
defaultRuntimeConfig, preState, withdrawalRequest, cache) defaultRuntimeConfig, preState,
sortValidatorBuckets(preState.validators.asSeq)[], withdrawalRequest,
cache)
ok() ok()
for path in walkTests(OpWithdrawalRequestDir): for path in walkTests(OpWithdrawalRequestDir):

View File

@ -83,7 +83,7 @@ type
rewards*: List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT] rewards*: List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT]
penalties*: List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT] penalties*: List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT]
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/validator.md#eth1block # https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.5/specs/phase0/validator.md#eth1block
Eth1Block* = object Eth1Block* = object
timestamp*: uint64 timestamp*: uint64
deposit_root*: Eth2Digest deposit_root*: Eth2Digest

View File

@ -106,7 +106,8 @@ suite baseDescription & "Block Header " & preset():
OpBlockHeaderDir, suiteName, "Block Header", "block", OpBlockHeaderDir, suiteName, "Block Header", "block",
applyBlockHeader, path) applyBlockHeader, path)
from ".."/".."/".."/beacon_chain/bloomfilter import constructBloomFilter from ".."/".."/".."/beacon_chain/validator_bucket_sort import
sortValidatorBuckets
suite baseDescription & "Deposit " & preset(): suite baseDescription & "Deposit " & preset():
proc applyDeposit( proc applyDeposit(
@ -114,7 +115,7 @@ suite baseDescription & "Deposit " & preset():
Result[void, cstring] = Result[void, cstring] =
process_deposit( process_deposit(
defaultRuntimeConfig, preState, defaultRuntimeConfig, preState,
constructBloomFilter(preState.validators.asSeq)[], deposit, {}) sortValidatorBuckets(preState.validators.asSeq)[], deposit, {})
for path in walkTests(OpDepositsDir): for path in walkTests(OpDepositsDir):
runTest[Deposit, typeof applyDeposit]( runTest[Deposit, typeof applyDeposit](

View File

@ -90,8 +90,7 @@ proc initialLoad(
dag = ChainDAGRef.init( dag = ChainDAGRef.init(
forkedState[].kind.genesisTestRuntimeConfig, db, validatorMonitor, {}) forkedState[].kind.genesisTestRuntimeConfig, db, validatorMonitor, {})
fkChoice = newClone(ForkChoice.init( fkChoice = newClone(ForkChoice.init(
dag.getFinalizedEpochRef(), dag.finalizedHead.blck, dag.getFinalizedEpochRef(), dag.finalizedHead.blck))
ForkChoiceVersion.Pr3431))
(dag, fkChoice) (dag, fkChoice)

View File

@ -1,147 +0,0 @@
# beacon_chain
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
{.used.}
import ".."/beacon_chain/spec/crypto, unittest2
from std/sequtils import mapIt
from ".."/beacon_chain/bloomfilter import
constructBloomFilter, incl, mightContain
from ".."/beacon_chain/spec/datatypes/base import
HashedValidatorPubKey, HashedValidatorPubKeyItem, Validator, fromHex, pubkey
from ".."/beacon_chain/spec/eth2_merkleization import hash_tree_root
let pubkeys = [
ValidatorPubKey.fromHex("0xd52edb450c9fdad41ce16d724be7b986a5422f8a791b68a370ef86045a85147cf8f7a6342034958d46a136965b622c48"),
ValidatorPubKey.fromHex("0x6f343f3c55183fc6c980e7597ac47c14b59322e22be9109e7ad8412f5b0e5c918b4e6dd60e5b98eb8d2501a94b2fb022"),
ValidatorPubKey.fromHex("0x5e40d512d91a27aa60e95fa10acb60a8a5dc6d85f2238e6418bfd4ebf44215270301f9e15564dde2c2b628fe80e7f970"),
ValidatorPubKey.fromHex("0x4ae23aea68bfd30022d4efdde1b4428f23317a70fb6df716dc16ccde96b74174c2f8cd18237bdb7ae900acbaba8cad70"),
ValidatorPubKey.fromHex("0x032fc41fa7fc1a44a1f38d73a3465974c2048bb347a9fcb261b93fc6581009d7c9870f0e1a21d619069d5250456cd5ca"),
ValidatorPubKey.fromHex("0x8cea40c0986bc0dc51b664e846a08948112987903b6ffe462b77f092dc43e752dfefaad738810c43364b2f2ed24a5988"),
ValidatorPubKey.fromHex("0xc663a799c732d544a835251935fc5be18eb365806279863877ff2f9308779106816a48be235b4b5d9dcaf42bdf1119f7"),
ValidatorPubKey.fromHex("0xc5682345f202d59614089a6fd5c2375adf8e40316bb69114474f1861c9a6791cc512c0133860353a4bb35d659f3fcd14"),
ValidatorPubKey.fromHex("0x593c3b4d962ff759945f70afa98d3d653fb4c73a2808a4f30472d972cdfd12df7535ba5ba88f3c5e8a59ff844129949f"),
ValidatorPubKey.fromHex("0xabc272512d7a861c0bc190c23cdef8d4d6b9b159d9f53aaf8834c8f521edf416b850d6c14b4c040bac7ceaa1be117e98"),
ValidatorPubKey.fromHex("0xd6dc377e866b762ab63dc2155be71bf24624855e255332dc48a175a9024e71057ad4ad351d7b5aeee944afaaff5d4e1b"),
ValidatorPubKey.fromHex("0x9af21f5d70846185023f70f7841f2f6323c27307c3e54025f103ba359c856b76d3c06f0a09b4669e4838187805253467"),
ValidatorPubKey.fromHex("0x92312221300b0707c401d3163f951babaeb4121fa7222dafebba8b8cf91928567477b4b2c249af446a759ef13d990a0c"),
ValidatorPubKey.fromHex("0x37c2731f409eafdb4bb5a1722e33cc39ab8dcf87eb7b4702aca0dcfdceea15002c1b697124eb6f1f83bd807cafb0ff43"),
ValidatorPubKey.fromHex("0xac72cfe3b2a0c549f608746fd0c3daa7195c42e05157f8d8b10bd84b1d04bff763eb6bf74620be8bcdba0ea4704630ee"),
ValidatorPubKey.fromHex("0x6cab2ab1fd15489aae21becc2cfb8923513bacce9d9773c3ad35ef7535a6e92d3a78de4d103e2ed88a818f872de331f2"),
ValidatorPubKey.fromHex("0x99138fe703da75af5571e3994e7c0b6bba06cb2a4a4978e4b41e52e06af7c1c928105bb5fae878d16934529c96883e97"),
ValidatorPubKey.fromHex("0x850c61b9bf24be2470fe0b1ead466d9b93ea4b4d41980f2f6c82eef9b526d68bf6be613b4e7653b79267829a4107dd30"),
ValidatorPubKey.fromHex("0x310ddff78f82b2ea039f6077b099f4e8e148da97d35a14140cdf5754db933034d15a58085ff91522e2722504a6ebdc87"),
ValidatorPubKey.fromHex("0x331103905b6cc0da6ef1fc2e10cb6c9feed110a5a09fed5f32f56416ea814e80961fdf81455a6483de18c40e1f3bb718"),
ValidatorPubKey.fromHex("0x8f4a32c968cb197581a3c4cec214d33736026997d1a4dc9538c932b3d859dd0547a7a06a08a9115c2c2a4fdfccaa07d2"),
ValidatorPubKey.fromHex("0xda87a0a9a300057c1f4a196f9e8947a1f461aca3be84799ac9a187c4ecb0f6450cc15e64d30b30da4f5cf2848808b9ab"),
ValidatorPubKey.fromHex("0x91e197089e1a351f0f6b1d4777c464edffac62067162133c01185074d520cbefd4e661d978cf04f9832804cb636e7a5f"),
ValidatorPubKey.fromHex("0xf0e76be22bf4afd4ea3730ef7dd0156b777e2835d828deee887881263affa33bf4685ad18fa05d09e87481a4c89c345c"),
ValidatorPubKey.fromHex("0x4a0276deca3b176cd6fe0b648f0fc418568c0c9d29d607e74e02c17852b72e636e681f4be63b0b1ad842db3efe0518c2"),
ValidatorPubKey.fromHex("0x7ad942fe106ee88c214bd5e34078b2c98849ba594a4e266a8548c1b5e44bd151135fa5a720323927c142af19fd1e74b1"),
ValidatorPubKey.fromHex("0x0648a3a4f9cf10e8f8881902549e0b7c6b207e72d5498e54503e1497ccfc03954a7440dfa0cd5ba62f80234bd99733ca"),
ValidatorPubKey.fromHex("0x5d569974f21599857609ec27e11cd2b9c007209790fe36e0cc5ff1bef0c83c07eddc84602ae04a3b803b158fa8d8a7df"),
ValidatorPubKey.fromHex("0x63290edbc38bfa204b7fd4b3fba3f677f00a54897b4c62c83ff5a1d0a905f64d2ea73ab9fa903d86c3ac8e5c91f66cc2"),
ValidatorPubKey.fromHex("0xc56363e2f8a19dcb1c9fa0b446b9c2e6a93218250df814da9566c4ceaeb116a4d60031ec60b89c23e0e911dccc301e34"),
ValidatorPubKey.fromHex("0x68c143f8c1cf0dc47345526bfd5123ed31edcbf393673352fe948107f5317ddcf8934814657879da7a1ec5782d13fdc4"),
ValidatorPubKey.fromHex("0x6e1c7d1ca0056d721a94cda0a776b68d447b1706882e04ed7ca7356d61d7d08c9c2aaf782e9c3f0c4c6e4758ca6c9228"),
ValidatorPubKey.fromHex("0x12d410ee83662b4506546e912ada2e0273f27279fdc46565d0c862e262bdbe98f91466a5bfa4e65660fd8e5a4da28543"),
ValidatorPubKey.fromHex("0x039b3ebfcc2d6f181b40da2b63d94406c440f2c32547e69560bb137a295886c3e82b7ac5aa18e14bfe080b805ae75197"),
ValidatorPubKey.fromHex("0x02875a3d83a806329b612096329959eec1a2300d9740a2c94d030dc5c99c6c0c62bd5f832b615d36cc165bc304e7a892"),
ValidatorPubKey.fromHex("0xfc0acd4ca1e1ea234b39219af5c899a743332e33a17be2dcb10bfed72e4d437fd2693ac1ae1dcec2d189a9689b0b53ff"),
ValidatorPubKey.fromHex("0x8104b3b199bf0261b1584fe269e5599266bd60cbd201205565e139fbe4a9577f48f71cebae7f7cf434cf07f66cc51ec9"),
ValidatorPubKey.fromHex("0xcfe998a8989f5318aee192e185e87a51b96aeec479d37f00cdcfafe534f514c316a10c4ba311c076cae6b192386dc25a"),
ValidatorPubKey.fromHex("0x44d7bcaebb2da8001982540c8917da5ff950750d90e5b788f2c35262b58efca64dfe3df46793380016a9d24b521c3920"),
ValidatorPubKey.fromHex("0x2b7fd53635b1effa086d6db933b65bfbca85160ed731fa8b5c77a8b726b4c5b61ff56d88d57f3e4fece8c593df18f2b3"),
ValidatorPubKey.fromHex("0x642e56b532e08e4cb75d619ed3b360ad1971584e338638b7d5716672922e513465c3fb13d26d381e7b21ffe9bc8e428f"),
ValidatorPubKey.fromHex("0x61820ec30590c9e75b06b0cc454686067fc6db1d329814aaf1a31e3e3defe50f41ee15c106e3602c4931e131032787db"),
ValidatorPubKey.fromHex("0xdc41f2c1504c90f44ba32b7e9d8e069d9c788a125f45df163c65c56cf22f5823e7614b2fcd5cec7c14a276b67e0fa7b8"),
ValidatorPubKey.fromHex("0x079d59adc0ac14e2c7397a23c3debcb080d1378ad4ac6a091daeb12f1d134c063ce4629bdf0880172017b81bed0064ec"),
ValidatorPubKey.fromHex("0x41e0b5b8befce0add67f48a9b485307105e3772aae012777c6afa62304f67a7407dd0c16b791754076549eba2b7a18a8"),
ValidatorPubKey.fromHex("0xd36e7623ae93544eaa5868e50936797bddffb2b3b66728b38f0c479f1640c60e82ad887b960e6c9340526da8a030f5b2"),
ValidatorPubKey.fromHex("0x8986816ba54e777b2c6045a805b11c08bb1f64898a6786428da9efc2ae466cb940fa3c11feacfdeeba87df9b3ce3e93f"),
ValidatorPubKey.fromHex("0x5ea844f61fe1710c19cb67e5daec1c3ba0fc203ab23598b1c9cfae6f4ab9d9f127d50d0b9cebf64d7650f66c06ca5596"),
ValidatorPubKey.fromHex("0x3e77eef77d7573362dffd75800d7554ad41f4349b3a2ab72d6fe031bf3c42bf283f985b933ac142de581079371018fdc"),
ValidatorPubKey.fromHex("0xa848afaf6d44d43e2f072bf3cf82e1ae6a8c63cda627c12d95a43e6ac4f20b8a9213a723d642c95ae2bd66bccadb8467"),
ValidatorPubKey.fromHex("0xb0b1b8582a84cbc5f43585c9d2e2b9d34f0788977f5004d6e21b12bfd5cd7165d72fba0da182f13aa44af63f9045da3e"),
ValidatorPubKey.fromHex("0x4f5517fe02d94b1eeee0a294b4f7d6064f8a3eb3fd6f31801ab7545be1dc290f26972515b23018b23efa9a812f648b6b"),
ValidatorPubKey.fromHex("0xa0f040547549deccd5cdc3a0a3a91974754fdc8177763adfc25ffb7704f8ca5e83985db3f276fadb1c113fb279720a05"),
ValidatorPubKey.fromHex("0x7dd6ae00b240244b0e49cf7538a5021e6725d3b87b909e797c7d9c6947c3b29353ff61c128ad36db66b77f197308ba04"),
ValidatorPubKey.fromHex("0xdc824ba613c5ddf2c112f0ca3bb91e6d7bfcbfd340b1e611183b8bf8c4cc37d1b843909f2c9db8353de6938834516fa2"),
ValidatorPubKey.fromHex("0xb085822d9549b0b674591015525f0846ec00ef3ff52b1107592285d0a75b757708a54fcfe5655f28473c33ae4d43ee5c"),
ValidatorPubKey.fromHex("0xab704b4be6cbbbe0f9176fd3dccbf2c0272e4f42538d5f4844a288820179f7c799d051c501e78ee3848484e1818d8456"),
ValidatorPubKey.fromHex("0x12c3c3fa284bd55ebbe82abce576c104929a909e9d78eba2f595ce42822ffe52c427ad61923f48107b1639e4bd99a45b"),
ValidatorPubKey.fromHex("0x64c86e12cdc8091c0b0e317abc073a71c96df04e1fb2235219a1289d3ce62b323fc1a226f0b298ee5596bbebabdacaf5"),
ValidatorPubKey.fromHex("0x1d5cc7e50da341a6f6931dc9fb4df6a37d21545281b9fdc2836182e2f45ff2a2a6e9181ab5d4893125fea6495fe68dd3"),
ValidatorPubKey.fromHex("0x923573206c1b1a75716339eb61f489b10d5811a280dd15333f980374ca63664741e16d911f8372ff74714ec79662683f"),
ValidatorPubKey.fromHex("0x7c1fe9a7ab8da368228a27f575cbb36aa9ce2e68d60c336184f02b985b5c13a7d09cbe315895a1da5f1f86d713f94417"),
ValidatorPubKey.fromHex("0xbb85e9cdac2db9a2dda61480082f3ed0f683db798219cdbfadac846c7b374f90a8c6784c95b53676b631152077619ee5"),
ValidatorPubKey.fromHex("0x58db99741e4c904ec1444a9c23c287eeea88de3c647c9dd9ed45e8230b7ed0bf080d546ae4597af148b69809df07e73c"),
ValidatorPubKey.fromHex("0x2208988a10feef0f7ec1550e8ef8c14c786de0bd647e5b3d10d3b884c8521af0ce59ba1a8583afe888b9348d2e1ed7d5"),
ValidatorPubKey.fromHex("0xd11cd69262896cf2a19a52928b7fcba8cd1c1661d0c938ffbfb4482283f53b44435af5695ce10fddc9315393aeda57ef"),
ValidatorPubKey.fromHex("0x4a568216203673c3f895529c194c2ca172d613e8f866dd9ee5e8db9b5b681942c7b5634c2349689a6753e1d1113d062e"),
ValidatorPubKey.fromHex("0x7ceb8add4aebaf802c3e8b37f85076a6de8c6d7007dcb92fa7b4da028a571f9dae41338b8d3f2446db4335ffbff7f083"),
ValidatorPubKey.fromHex("0xfda68482093ff5780855a139008ba695a1bd74864cb4ff72451caa5a46f8db497b44baecc93ead6aacd34c9ac92522d4"),
ValidatorPubKey.fromHex("0x8483c152bf17da7df9f3e7102d2fdd143b7649a95920263c8231ce6e80f01a849ae62064f2d03d6dcb89024d07ef9f33"),
ValidatorPubKey.fromHex("0x33ea02799800edf1c7660f1acf923f33913f2eaa89944c3b8ca4e44a2d061a1c6e4286ca92251bc0f3b11c535824aa0e"),
ValidatorPubKey.fromHex("0x46e3fdc0b5b6df3147a95ccfdfe66373bdbf96e6d5eed7306428f986778dd3b9eecb0bc5e568213b0b3faee7ce6caa79"),
ValidatorPubKey.fromHex("0xac9df2f76111be4c822a91d24a85291f55ed4ae4c574803781018360f83cc395fee9a3e56d92fc34d2f74f4dcd81c19d"),
ValidatorPubKey.fromHex("0xe6724c500b1573fee191980bdf4d8e30086bc2f2460ac775d9ceec553d4870f314fae83d04b9d9f17dc1bec64e1b5260"),
ValidatorPubKey.fromHex("0xb45d08842d2721b18d17209081b5b95ed2b9198c0dd47d16117834e1b96913071f5afe5abe53206a10103baeadbc4314"),
ValidatorPubKey.fromHex("0x8badb39dec9b9c348e4833797ac1f7fc84f7bac557d1bed58096144f48b8cda5fd8ddbe21e278f0b6d5c9aed6c90f783"),
ValidatorPubKey.fromHex("0x5fd79ebdc6f58defee05a823c9d793dfdc4b0c43ddbd1eb74c3432f59d069fe026ead5b1c925626ed9f915aee6f91247"),
ValidatorPubKey.fromHex("0x7763334ab10953dea5bffac69dea12eb53f0cd46947f04334d417223040453cfbe0f658d6f1e22a79c09807bdf3ee2c1"),
ValidatorPubKey.fromHex("0xf2df734e8b11d562900079828c2cde7dca86a2d63cf57813c67bab47fc627f1bb773d70015a486a1a2cd09b4a04c1b28"),
ValidatorPubKey.fromHex("0xd0c621f5bb524fb68aa3631b4a0629bf6bc210fe30e237d9caf8bfb476686b82eb8e8460062d187d6e2699ddc8988c0c"),
ValidatorPubKey.fromHex("0x10eb53f3ba6d355e301c785a2f204294c6a63233edee9cc135791815d086c9a8604c0d46baca6abe8c7a58e708e2106a"),
ValidatorPubKey.fromHex("0x4244a5380986232f8fb39f9396be04e6c504c3b1f87e9672d7154d09b97f0fa86cae849aac06b30ce993e00e126cf5b0"),
ValidatorPubKey.fromHex("0x2382850a411c389df2afdd2a03a6196b451893e2674d11e0b8ac6914ffa53c7a1ced201cc1390a3aa1a2879dcdfa143b"),
ValidatorPubKey.fromHex("0xa20189e31ecc6a8c2002a9dec9645aada8f01dbaa6f22f7efcc10e1de109f2528edcbe768f1baf78b8ecba189d70e28b"),
ValidatorPubKey.fromHex("0xd1f4e4ebedcc39544148157f4a5279def61a8dda08c087afbcc85e85f5fe8a244972e26077cfc1820c0c85814adfad6e"),
ValidatorPubKey.fromHex("0xf62d8f1b982babdffcc6616f8b2ac54fac5224c7a1fb66121079b9a521aff4f2ade3cd7aa40baa838e522a927179ac82"),
ValidatorPubKey.fromHex("0x7e0c87bbf88d5762dfca69732bb36525d11a755fde736f28088bc17958cb8d5745a923a56c6c0b4e98c0ffd9623f9816"),
ValidatorPubKey.fromHex("0xbf1d6ae7fd84bee92a4e22bd73b3869402504736ff5af0da6e02181ae2506a248ca4e969a82ea0304a93b6bb68d29435"),
ValidatorPubKey.fromHex("0x8ec4826fcde422ba62d222274fda595cd988d27fa0ffcbc91ab7ace22d2c9617a94ba008064a5f159801dc3b1956d96f"),
ValidatorPubKey.fromHex("0x068bee5a0d17f286962fdf71fe6e9d8b2d05f8203ecf2fbc0672003ec18a53636062dabd430715b8599f1111091417dd"),
ValidatorPubKey.fromHex("0xc0e15eadc90fbf93e2deccdd58cb13b30fea11913ca39c2ee42ddf74201dae1e84553ce8c6818d91658cb8ae97573c24"),
ValidatorPubKey.fromHex("0x5a0e0446883b0a0f09ea42faffc02ebf25407159503f5b430a216a54b1b9a4272765314c267ee2f3be8fe101208a28fd"),
ValidatorPubKey.fromHex("0xc22aa9c85a08126c371c19163c940c459a478a7391cabfb170a352faa30687ef571568d4ad327a6fe69652cd0daa33af"),
ValidatorPubKey.fromHex("0xc53c961a6977d4711914b2852ac231e6dae019ce13555e189bcae94b1786f0bb3b3e8ad173c3f029758ecbc0c0b1c6f0"),
ValidatorPubKey.fromHex("0x925aefdfeaeea3402ddd678a7069c20183fed9a11f7f866215788177ba9ae9d2914874866c2dd78f79f81495ce172352"),
ValidatorPubKey.fromHex("0x4aca00821c817196db75be87cb044f36466c65e5ea3ca90c60353b3927107bdbd8ec0775dfe8c08ea123801f4443d01b"),
ValidatorPubKey.fromHex("0xb84960b4042210498cd2ab478685a1b65e2a4e3bbf2e813440e38f38659def0e5ebe9514316f125634e23ae398fa2458"),
ValidatorPubKey.fromHex("0x3dbee79b334a30be85c82ae64331ab0bd7ce371c2b5cc734212f079209a845d0f45393bbca97ffad203e0af81af4325b"),
ValidatorPubKey.fromHex("0xfd9e33dec3e8ebeeb2ec64297ace2997dc6ecf148d98067cc3aabf2419a2788160c4d670836419672eebd663999ba53b"),
ValidatorPubKey.fromHex("0xdd9de04d992ecd5991ed84567803f2195b9c0cbbf74968e60c2272ba59f741fb07e84eefd970a0507b36ad7e4bd56e7e")]
suite "ValidatorPubKey Bloom filter":
test "one-shot construction with no false positives/negatives":
var hashedPubkeyItems = mapIt(pubkeys, HashedValidatorPubKeyItem(
key: it.get, root: hash_tree_root(it.get)))
let
hashedPubkeys = mapIt(hashedPubkeyItems, HashedValidatorPubKey(
value: unsafeAddr it))
validators = mapIt(hashedPubkeys, Validator(pubkeyData: it))
let bloomFilter = constructBloomFilter(
validators.toOpenArray(0, validators.len div 2))
for validator in validators.toOpenArray(0, validators.len div 2):
check: bloomFilter[].mightContain(validator.pubkey)
for validator in validators.toOpenArray(
validators.len div 2 + 1, validators.len - 1):
check: not bloomFilter[].mightContain(validator.pubkey)
test "incremental construction with no false positives/negatives":
let bloomFilter = constructBloomFilter([])
for pubkey in pubkeys.toOpenArray(0, pubkeys.len div 2):
incl(bloomFilter[], pubkey.get)
for pubkey in pubkeys.toOpenArray(0, pubkeys.len div 2):
check: bloomFilter[].mightContain(pubkey.get)
for pubkey in pubkeys.toOpenArray(pubkeys.len div 2 + 1, pubkeys.len - 1):
check: not bloomFilter[].mightContain(pubkey.get)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -181,6 +181,28 @@ suite "Gossip validation " & preset():
fut_1_0.waitFor().error()[0] == ValidationResult.Reject fut_1_0.waitFor().error()[0] == ValidationResult.Reject
fut_1_1.waitFor().isOk() fut_1_1.waitFor().isOk()
block:
pool[].nextAttestationEpoch.setLen(0) # reset for test
check:
att_1_0.data == att_1_1.data
beacon_committee[0] != beacon_committee[1] # Different validator
var
broken_1_0 = att_1_0
broken_1_1 = att_1_1
broken_1_0.signature = att_1_1.signature
broken_1_1.signature = att_1_0.signature
# The signatures were swapped and no longer match their pubkeys;
# the individual attestations are invalid but their aggregate validates!
let
fut_1_0 = validateAttestation(
pool, batchCrypto, broken_1_0, beaconTime, subnet, true)
fut_1_1 = validateAttestation(
pool, batchCrypto, broken_1_1, beaconTime, subnet, true)
check:
fut_1_0.waitFor().error()[0] == ValidationResult.Reject
fut_1_1.waitFor().error()[0] == ValidationResult.Reject
suite "Gossip validation - Altair": suite "Gossip validation - Altair":
let cfg = block: let cfg = block:
var res = defaultRuntimeConfig var res = defaultRuntimeConfig

View File

@ -298,8 +298,7 @@ proc startBeaconNode(basePort: int) {.raises: [CatchableError].} =
"--keymanager-port=" & $(basePort + PortKind.KeymanagerBN.ord), "--keymanager-port=" & $(basePort + PortKind.KeymanagerBN.ord),
"--keymanager-token-file=" & tokenFilePath, "--keymanager-token-file=" & tokenFilePath,
"--suggested-fee-recipient=" & $defaultFeeRecipient, "--suggested-fee-recipient=" & $defaultFeeRecipient,
"--doppelganger-detection=off", "--doppelganger-detection=off"], it))
"--debug-forkchoice-version=stable"], it))
except Exception as exc: # TODO fix confutils exceptions except Exception as exc: # TODO fix confutils exceptions
raiseAssert exc.msg raiseAssert exc.msg

Some files were not shown because too many files have changed in this diff Show More