2022-02-17 11:53:55 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
2022-02-17 11:53:55 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2024-02-19 09:56:19 +00:00
|
|
|
{.push raises: [].}
|
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
import
|
2022-10-03 21:43:40 +00:00
|
|
|
std/[os, strutils],
|
2022-01-23 19:41:29 +00:00
|
|
|
stew/bitops2,
|
2022-01-17 12:58:33 +00:00
|
|
|
../beacon_chain/spec/[
|
|
|
|
datatypes/base,
|
|
|
|
datatypes/phase0,
|
|
|
|
datatypes/altair,
|
2022-01-18 13:36:52 +00:00
|
|
|
datatypes/bellatrix,
|
2022-01-17 12:58:33 +00:00
|
|
|
beaconstate,
|
|
|
|
state_transition_epoch,
|
|
|
|
state_transition_block,
|
|
|
|
signatures],
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
../beacon_chain/consensus_object_pools/blockchain_dag
|
2022-01-17 12:58:33 +00:00
|
|
|
|
2022-11-02 16:23:30 +00:00
|
|
|
from ../beacon_chain/spec/datatypes/capella import BeaconState
|
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
type
|
|
|
|
RewardsAndPenalties* = object
|
|
|
|
source_outcome*: int64
|
|
|
|
max_source_reward*: Gwei
|
|
|
|
target_outcome*: int64
|
|
|
|
max_target_reward*: Gwei
|
|
|
|
head_outcome*: int64
|
|
|
|
max_head_reward*: Gwei
|
|
|
|
inclusion_delay_outcome*: int64
|
|
|
|
max_inclusion_delay_reward*: Gwei
|
|
|
|
sync_committee_outcome*: int64
|
|
|
|
max_sync_committee_reward*: Gwei
|
|
|
|
proposer_outcome*: int64
|
|
|
|
inactivity_penalty*: Gwei
|
|
|
|
slashing_outcome*: int64
|
|
|
|
deposits*: Gwei
|
|
|
|
inclusion_delay*: Option[uint64]
|
|
|
|
|
|
|
|
ParticipationFlags* = object
|
|
|
|
currentEpochParticipation: EpochParticipationFlags
|
|
|
|
previousEpochParticipation: EpochParticipationFlags
|
|
|
|
|
2022-05-23 23:39:08 +00:00
|
|
|
PubkeyToIndexTable = Table[ValidatorPubKey, ValidatorIndex]
|
2022-01-17 12:58:33 +00:00
|
|
|
|
|
|
|
AuxiliaryState* = object
|
|
|
|
epochParticipationFlags: ParticipationFlags
|
|
|
|
pubkeyToIndex: PubkeyToIndexTable
|
|
|
|
|
|
|
|
const
|
|
|
|
epochInfoFileNameDigitsCount = 8
|
2022-10-03 21:43:40 +00:00
|
|
|
epochFileNameExtension = ".epoch"
|
2022-01-17 12:58:33 +00:00
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func copyParticipationFlags*(auxiliaryState: var AuxiliaryState,
|
2022-01-17 12:58:33 +00:00
|
|
|
forkedState: ForkedHashedBeaconState) =
|
|
|
|
withState(forkedState):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork > ConsensusFork.Phase0:
|
2022-01-17 12:58:33 +00:00
|
|
|
template flags: untyped = auxiliaryState.epochParticipationFlags
|
2022-09-13 11:53:12 +00:00
|
|
|
flags.currentEpochParticipation =
|
|
|
|
forkyState.data.current_epoch_participation
|
|
|
|
flags.previousEpochParticipation =
|
|
|
|
forkyState.data.previous_epoch_participation
|
2022-01-17 12:58:33 +00:00
|
|
|
|
2022-10-03 21:43:40 +00:00
|
|
|
from std/sequtils import allIt
|
|
|
|
|
|
|
|
func matchFilenameUnaggregatedFiles(filename: string): bool =
|
|
|
|
# epochNumberRegexStr & epochFileNameExtension
|
|
|
|
filename.len == epochInfoFileNameDigitsCount + epochFileNameExtension.len and
|
|
|
|
filename.endsWith(epochFileNameExtension) and
|
|
|
|
# TODO should use toOpenArray, but
|
|
|
|
# https://github.com/nim-lang/Nim/issues/15952
|
|
|
|
# https://github.com/nim-lang/Nim/issues/19969
|
|
|
|
allIt(filename[0 ..< epochInfoFileNameDigitsCount], it.isDigit)
|
|
|
|
|
|
|
|
static:
|
|
|
|
for filename in [
|
|
|
|
"00000000.epoch", "00243929.epoch", "04957024.epoch", "39820353.epoch",
|
|
|
|
"82829191.epoch", "85740966.epoch", "93321944.epoch", "98928899.epoch"]:
|
|
|
|
doAssert filename.matchFilenameUnaggregatedFiles
|
|
|
|
|
|
|
|
for filename in [
|
|
|
|
# Valid aggregated, not unaggregated
|
|
|
|
"03820350_13372742.epoch", "04117778_69588614.epoch",
|
|
|
|
"25249017_64218993.epoch", "34265267_41589365.epoch",
|
|
|
|
"57926659_59282297.epoch", "67699314_92835461.epoch",
|
|
|
|
|
|
|
|
"0000000.epoch", # Too short
|
|
|
|
"000000000.epoch", # Too long
|
|
|
|
"75787x73.epoch", # Incorrect number format
|
|
|
|
"00000000.ecpoh"]: # Wrong extension
|
|
|
|
doAssert not filename.matchFilenameUnaggregatedFiles
|
|
|
|
|
|
|
|
func matchFilenameAggregatedFiles(filename: string): bool =
|
|
|
|
# epochNumberRegexStr & "_" & epochNumberRegexStr & epochFileNameExtension
|
|
|
|
filename.len == epochInfoFileNameDigitsCount * 2 + "_".len + epochFileNameExtension.len and
|
|
|
|
filename.endsWith(epochFileNameExtension) and
|
|
|
|
# TODO should use toOpenArray, but
|
|
|
|
# https://github.com/nim-lang/Nim/issues/15952
|
|
|
|
# https://github.com/nim-lang/Nim/issues/19969
|
|
|
|
allIt(filename[0 ..< epochInfoFileNameDigitsCount], it.isDigit) and
|
|
|
|
filename[epochInfoFileNameDigitsCount] == '_' and
|
|
|
|
allIt(
|
|
|
|
filename[epochInfoFileNameDigitsCount + 1 ..< 2 * epochInfoFileNameDigitsCount + 1],
|
|
|
|
it.isDigit)
|
|
|
|
|
|
|
|
static:
|
|
|
|
for filename in [
|
|
|
|
"03820350_13372742.epoch", "04117778_69588614.epoch",
|
|
|
|
"25249017_64218993.epoch", "34265267_41589365.epoch",
|
|
|
|
"57926659_59282297.epoch", "67699314_92835461.epoch"]:
|
|
|
|
doAssert filename.matchFilenameAggregatedFiles
|
|
|
|
|
|
|
|
for filename in [
|
|
|
|
# Valid unaggregated, not aggregated
|
|
|
|
"00000000.epoch", "00243929.epoch", "04957024.epoch", "39820353.epoch",
|
|
|
|
"82829191.epoch", "85740966.epoch", "93321944.epoch", "98928899.epoch",
|
|
|
|
|
|
|
|
"00000000_0000000.epoch", # Too short
|
|
|
|
"31x85971_93149672.epoch", # Incorrect number format, first field
|
|
|
|
"18049105&72034596.epoch", # No underscore separator
|
|
|
|
"31485971_931496x2.epoch", # Incorrect number format, second field
|
|
|
|
"15227487_86601706.echop"]: # Wrong extension
|
|
|
|
doAssert not filename.matchFilenameAggregatedFiles
|
|
|
|
|
2024-02-19 09:56:19 +00:00
|
|
|
proc getUnaggregatedFilesEpochRange*(
|
|
|
|
dir: string
|
|
|
|
): tuple[firstEpoch, lastEpoch: Epoch] {.raises: [OSError, ValueError].} =
|
2022-01-17 12:58:33 +00:00
|
|
|
var smallestEpochFileName =
|
|
|
|
'9'.repeat(epochInfoFileNameDigitsCount) & epochFileNameExtension
|
|
|
|
var largestEpochFileName =
|
|
|
|
'0'.repeat(epochInfoFileNameDigitsCount) & epochFileNameExtension
|
|
|
|
for (_, fn) in walkDir(dir.string, relative = true):
|
2022-10-03 21:43:40 +00:00
|
|
|
if fn.matchFilenameUnaggregatedFiles:
|
2022-01-17 12:58:33 +00:00
|
|
|
if fn < smallestEpochFileName:
|
|
|
|
smallestEpochFileName = fn
|
|
|
|
if fn > largestEpochFileName:
|
|
|
|
largestEpochFileName = fn
|
|
|
|
result.firstEpoch = parseUInt(
|
|
|
|
smallestEpochFileName[0 ..< epochInfoFileNameDigitsCount]).Epoch
|
|
|
|
result.lastEpoch = parseUInt(
|
|
|
|
largestEpochFileName[0 ..< epochInfoFileNameDigitsCount]).Epoch
|
|
|
|
|
2024-02-19 09:56:19 +00:00
|
|
|
proc getUnaggregatedFilesLastEpoch*(
|
|
|
|
dir: string): Epoch {.raises: [OSError, ValueError].} =
|
2022-01-31 12:06:16 +00:00
|
|
|
dir.getUnaggregatedFilesEpochRange.lastEpoch
|
|
|
|
|
2024-02-19 09:56:19 +00:00
|
|
|
proc getAggregatedFilesLastEpoch*(
|
|
|
|
dir: string): Epoch {.raises: [OSError, ValueError].}=
|
2022-01-31 12:06:16 +00:00
|
|
|
var largestEpochInFileName = 0'u
|
|
|
|
for (_, fn) in walkDir(dir.string, relative = true):
|
2022-10-03 21:43:40 +00:00
|
|
|
if fn.matchFilenameAggregatedFiles:
|
2022-04-14 10:47:14 +00:00
|
|
|
let fileLastEpoch = parseUInt(
|
2022-01-31 12:06:16 +00:00
|
|
|
fn[epochInfoFileNameDigitsCount + 1 .. 2 * epochInfoFileNameDigitsCount])
|
|
|
|
if fileLastEpoch > largestEpochInFileName:
|
|
|
|
largestEpochInFileName = fileLastEpoch
|
2022-02-21 11:55:56 +00:00
|
|
|
largestEpochInFileName.Epoch
|
2022-01-31 12:06:16 +00:00
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func epochAsString*(epoch: Epoch): string =
|
2022-01-17 12:58:33 +00:00
|
|
|
let strEpoch = $epoch
|
|
|
|
'0'.repeat(epochInfoFileNameDigitsCount - strEpoch.len) & strEpoch
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func getFilePathForEpoch*(epoch: Epoch, dir: string): string =
|
2022-01-17 12:58:33 +00:00
|
|
|
dir / epochAsString(epoch) & epochFileNameExtension
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func getFilePathForEpochs*(startEpoch, endEpoch: Epoch, dir: string): string =
|
2022-01-31 12:06:16 +00:00
|
|
|
let fileName = epochAsString(startEpoch) & "_" &
|
|
|
|
epochAsString(endEpoch) & epochFileNameExtension
|
|
|
|
dir / fileName
|
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
func getBlockRange*(dag: ChainDAGRef, start, ends: Slot): seq[BlockId] =
|
2022-01-17 12:58:33 +00:00
|
|
|
# Range of block in reverse order
|
|
|
|
doAssert start < ends
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
result = newSeqOfCap[BlockId](ends - start)
|
|
|
|
var current = ends
|
|
|
|
while current > start:
|
|
|
|
current -= 1
|
|
|
|
let bsid = dag.getBlockIdAtSlot(current).valueOr:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if bsid.bid.slot < start: # current might be empty
|
|
|
|
break
|
|
|
|
|
|
|
|
result.add(bsid.bid)
|
|
|
|
current = bsid.bid.slot # skip empty slots
|
2022-01-17 12:58:33 +00:00
|
|
|
|
|
|
|
func getOutcome(delta: RewardDelta): int64 =
|
|
|
|
delta.rewards.int64 - delta.penalties.int64
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func collectSlashings(
|
2022-01-17 12:58:33 +00:00
|
|
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
|
|
|
state: ForkyBeaconState, total_balance: Gwei) =
|
|
|
|
let
|
|
|
|
epoch = get_current_epoch(state)
|
|
|
|
adjusted_total_slashing_balance = get_adjusted_total_slashing_balance(
|
|
|
|
state, total_balance)
|
|
|
|
|
|
|
|
for index in 0 ..< state.validators.len:
|
2022-05-30 13:30:42 +00:00
|
|
|
let validator = unsafeAddr state.validators[index]
|
2022-01-17 12:58:33 +00:00
|
|
|
if slashing_penalty_applies(validator[], epoch):
|
|
|
|
rewardsAndPenalties[index].slashing_outcome +=
|
|
|
|
validator[].get_slashing_penalty(
|
|
|
|
adjusted_total_slashing_balance, total_balance).int64
|
|
|
|
|
2022-02-22 12:14:17 +00:00
|
|
|
proc collectEpochRewardsAndPenalties*(
|
2022-01-17 12:58:33 +00:00
|
|
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
2022-02-22 12:14:17 +00:00
|
|
|
state: var phase0.BeaconState, cache: var StateCache, cfg: RuntimeConfig,
|
2022-01-23 19:41:29 +00:00
|
|
|
flags: UpdateFlags) =
|
2022-01-17 12:58:33 +00:00
|
|
|
if get_current_epoch(state) == GENESIS_EPOCH:
|
|
|
|
return
|
|
|
|
|
|
|
|
var info: phase0.EpochInfo
|
|
|
|
|
|
|
|
info.init(state)
|
|
|
|
info.process_attestations(state, cache)
|
|
|
|
doAssert info.validators.len == state.validators.len
|
|
|
|
rewardsAndPenalties.setLen(state.validators.len)
|
|
|
|
|
2022-02-22 12:14:17 +00:00
|
|
|
process_justification_and_finalization(state, info.balances, flags)
|
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
let
|
2022-02-22 12:14:17 +00:00
|
|
|
finality_delay = get_finality_delay(state)
|
2022-01-17 12:58:33 +00:00
|
|
|
total_balance = info.balances.current_epoch
|
|
|
|
total_balance_sqrt = integer_squareroot(total_balance)
|
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
for index, validator in info.validators:
|
2022-01-17 12:58:33 +00:00
|
|
|
if not is_eligible_validator(validator):
|
|
|
|
continue
|
|
|
|
|
|
|
|
let base_reward = get_base_reward_sqrt(
|
|
|
|
state, index.ValidatorIndex, total_balance_sqrt)
|
|
|
|
|
|
|
|
template get_attestation_component_reward_helper(attesting_balance: Gwei): Gwei =
|
|
|
|
get_attestation_component_reward(attesting_balance,
|
|
|
|
info.balances.current_epoch, base_reward.uint64, finality_delay)
|
|
|
|
|
|
|
|
template rp: untyped = rewardsAndPenalties[index]
|
|
|
|
|
|
|
|
rp.source_outcome = get_source_delta(
|
|
|
|
validator, base_reward, info.balances, finality_delay).getOutcome
|
|
|
|
rp.max_source_reward = get_attestation_component_reward_helper(
|
|
|
|
info.balances.previous_epoch_attesters)
|
|
|
|
|
|
|
|
rp.target_outcome = get_target_delta(
|
|
|
|
validator, base_reward, info.balances, finality_delay).getOutcome
|
|
|
|
rp.max_target_reward = get_attestation_component_reward_helper(
|
|
|
|
info.balances.previous_epoch_target_attesters)
|
|
|
|
|
|
|
|
rp.head_outcome = get_head_delta(
|
|
|
|
validator, base_reward, info.balances, finality_delay).getOutcome
|
|
|
|
rp.max_head_reward = get_attestation_component_reward_helper(
|
|
|
|
info.balances.previous_epoch_head_attesters)
|
|
|
|
|
|
|
|
let (inclusion_delay_delta, proposer_delta) = get_inclusion_delay_delta(
|
|
|
|
validator, base_reward)
|
|
|
|
rp.inclusion_delay_outcome = inclusion_delay_delta.getOutcome
|
|
|
|
rp.max_inclusion_delay_reward =
|
|
|
|
base_reward - state_transition_epoch.get_proposer_reward(base_reward)
|
|
|
|
|
|
|
|
rp.inactivity_penalty = get_inactivity_penalty_delta(
|
|
|
|
validator, base_reward, finality_delay).penalties
|
|
|
|
|
|
|
|
if proposer_delta.isSome:
|
|
|
|
let proposer_index = proposer_delta.get[0]
|
|
|
|
if proposer_index < info.validators.lenu64:
|
|
|
|
rewardsAndPenalties[proposer_index].proposer_outcome +=
|
|
|
|
proposer_delta.get[1].getOutcome
|
|
|
|
|
|
|
|
rewardsAndPenalties.collectSlashings(state, info.balances.current_epoch)
|
|
|
|
|
2022-02-22 12:14:17 +00:00
|
|
|
proc collectEpochRewardsAndPenalties*(
|
2022-01-17 12:58:33 +00:00
|
|
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
2023-02-02 22:24:06 +00:00
|
|
|
state: var (altair.BeaconState | bellatrix.BeaconState |
|
2023-03-09 00:34:17 +00:00
|
|
|
capella.BeaconState | deneb.BeaconState),
|
2022-01-23 19:41:29 +00:00
|
|
|
cache: var StateCache, cfg: RuntimeConfig, flags: UpdateFlags) =
|
2022-01-17 12:58:33 +00:00
|
|
|
if get_current_epoch(state) == GENESIS_EPOCH:
|
|
|
|
return
|
|
|
|
|
|
|
|
var info: altair.EpochInfo
|
|
|
|
info.init(state)
|
|
|
|
doAssert info.validators.len == state.validators.len
|
|
|
|
rewardsAndPenalties.setLen(state.validators.len)
|
|
|
|
|
2022-02-22 12:14:17 +00:00
|
|
|
process_justification_and_finalization(state, info.balances, flags)
|
|
|
|
process_inactivity_updates(cfg, state, info)
|
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
let
|
|
|
|
total_active_balance = info.balances.current_epoch
|
|
|
|
base_reward_per_increment = get_base_reward_per_increment(
|
|
|
|
total_active_balance)
|
2022-02-22 12:14:17 +00:00
|
|
|
finality_delay = get_finality_delay(state)
|
2022-01-17 12:58:33 +00:00
|
|
|
|
2023-09-08 16:42:18 +00:00
|
|
|
for validator_index, reward_source, reward_target, reward_head,
|
|
|
|
penalty_source, penalty_target, penalty_inactivity in
|
|
|
|
get_flag_and_inactivity_deltas(
|
|
|
|
cfg, state, base_reward_per_increment, info, finality_delay):
|
|
|
|
template rp: untyped = rewardsAndPenalties[validator_index]
|
|
|
|
|
|
|
|
let
|
|
|
|
base_reward = get_base_reward_increment(
|
|
|
|
state, validator_index, base_reward_per_increment)
|
|
|
|
active_increments = get_active_increments(info)
|
|
|
|
|
|
|
|
template unslashed_participating_increment(flag_index: untyped): untyped =
|
|
|
|
get_unslashed_participating_increment(info, flag_index)
|
|
|
|
template max_flag_index_reward(flag_index: untyped): untyped =
|
|
|
|
get_flag_index_reward(
|
|
|
|
state, base_reward, active_increments,
|
|
|
|
unslashed_participating_increment(flag_index),
|
|
|
|
PARTICIPATION_FLAG_WEIGHTS[flag_index], finality_delay)
|
|
|
|
|
|
|
|
rp.source_outcome = reward_source.int64 - penalty_source.int64
|
|
|
|
rp.max_source_reward =
|
|
|
|
max_flag_index_reward(TimelyFlag.TIMELY_SOURCE_FLAG_INDEX)
|
|
|
|
rp.target_outcome = reward_target.int64 - penalty_target.int64
|
|
|
|
rp.max_target_reward =
|
|
|
|
max_flag_index_reward(TimelyFlag.TIMELY_TARGET_FLAG_INDEX)
|
|
|
|
rp.head_outcome = reward_head.int64
|
|
|
|
rp.max_head_reward =
|
|
|
|
max_flag_index_reward(TimelyFlag.TIMELY_HEAD_FLAG_INDEX)
|
|
|
|
|
|
|
|
rewardsAndPenalties[validator_index].inactivity_penalty +=
|
|
|
|
penalty_inactivity
|
2022-01-17 12:58:33 +00:00
|
|
|
|
|
|
|
rewardsAndPenalties.collectSlashings(state, info.balances.current_epoch)
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func collectFromSlashedValidator(
|
2022-01-17 12:58:33 +00:00
|
|
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
|
|
|
state: ForkyBeaconState, slashedIndex, proposerIndex: ValidatorIndex) =
|
|
|
|
template slashed_validator: untyped = state.validators[slashedIndex]
|
|
|
|
let slashingPenalty = get_slashing_penalty(state, slashed_validator.effective_balance)
|
|
|
|
let whistleblowerReward = get_whistleblower_reward(slashed_validator.effective_balance)
|
|
|
|
rewardsAndPenalties[slashedIndex].slashing_outcome -= slashingPenalty.int64
|
|
|
|
rewardsAndPenalties[proposerIndex].slashing_outcome += whistleblowerReward.int64
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func collectFromProposerSlashings(
|
2022-01-17 12:58:33 +00:00
|
|
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
|
|
|
forkedState: ForkedHashedBeaconState,
|
|
|
|
forkedBlock: ForkedTrustedSignedBeaconBlock) =
|
|
|
|
withStateAndBlck(forkedState, forkedBlock):
|
2023-09-21 10:49:14 +00:00
|
|
|
for proposer_slashing in forkyBlck.message.body.proposer_slashings:
|
2022-10-03 13:08:50 +00:00
|
|
|
doAssert check_proposer_slashing(
|
|
|
|
forkyState.data, proposer_slashing, {}).isOk
|
|
|
|
let slashedIndex =
|
|
|
|
proposer_slashing.signed_header_1.message.proposer_index
|
|
|
|
rewardsAndPenalties.collectFromSlashedValidator(
|
|
|
|
forkyState.data, slashedIndex.ValidatorIndex,
|
2023-09-21 10:49:14 +00:00
|
|
|
forkyBlck.message.proposer_index.ValidatorIndex)
|
2022-01-17 12:58:33 +00:00
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func collectFromAttesterSlashings(
|
2022-01-17 12:58:33 +00:00
|
|
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
|
|
|
forkedState: ForkedHashedBeaconState,
|
|
|
|
forkedBlock: ForkedTrustedSignedBeaconBlock) =
|
|
|
|
withStateAndBlck(forkedState, forkedBlock):
|
2023-09-21 10:49:14 +00:00
|
|
|
for attester_slashing in forkyBlck.message.body.attester_slashings:
|
2022-01-17 12:58:33 +00:00
|
|
|
let attester_slashing_validity = check_attester_slashing(
|
2022-10-03 13:08:50 +00:00
|
|
|
forkyState.data, attester_slashing, {})
|
2022-01-17 12:58:33 +00:00
|
|
|
doAssert attester_slashing_validity.isOk
|
|
|
|
for slashedIndex in attester_slashing_validity.value:
|
|
|
|
rewardsAndPenalties.collectFromSlashedValidator(
|
2022-10-03 13:08:50 +00:00
|
|
|
forkyState.data, slashedIndex,
|
2023-09-21 10:49:14 +00:00
|
|
|
forkyBlck.message.proposer_index.ValidatorIndex)
|
2022-01-17 12:58:33 +00:00
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func collectFromAttestations(
|
2022-01-17 12:58:33 +00:00
|
|
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
|
|
|
forkedState: ForkedHashedBeaconState,
|
|
|
|
forkedBlock: ForkedTrustedSignedBeaconBlock,
|
|
|
|
epochParticipationFlags: var ParticipationFlags,
|
|
|
|
cache: var StateCache) =
|
|
|
|
withStateAndBlck(forkedState, forkedBlock):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork > ConsensusFork.Phase0:
|
2022-01-17 12:58:33 +00:00
|
|
|
let base_reward_per_increment = get_base_reward_per_increment(
|
2022-10-03 13:08:50 +00:00
|
|
|
get_total_active_balance(forkyState.data, cache))
|
2022-01-17 12:58:33 +00:00
|
|
|
doAssert base_reward_per_increment > 0
|
2023-09-21 10:49:14 +00:00
|
|
|
for attestation in forkyBlck.message.body.attestations:
|
2022-10-03 13:08:50 +00:00
|
|
|
doAssert check_attestation(
|
|
|
|
forkyState.data, attestation, {}, cache).isOk
|
2022-01-17 12:58:33 +00:00
|
|
|
let proposerReward =
|
2022-10-03 13:08:50 +00:00
|
|
|
if attestation.data.target.epoch == get_current_epoch(forkyState.data):
|
2022-01-17 12:58:33 +00:00
|
|
|
get_proposer_reward(
|
2022-10-03 13:08:50 +00:00
|
|
|
forkyState.data, attestation, base_reward_per_increment, cache,
|
2022-01-17 12:58:33 +00:00
|
|
|
epochParticipationFlags.currentEpochParticipation)
|
|
|
|
else:
|
|
|
|
get_proposer_reward(
|
2022-10-03 13:08:50 +00:00
|
|
|
forkyState.data, attestation, base_reward_per_increment, cache,
|
2022-01-17 12:58:33 +00:00
|
|
|
epochParticipationFlags.previousEpochParticipation)
|
2023-09-21 10:49:14 +00:00
|
|
|
rewardsAndPenalties[forkyBlck.message.proposer_index]
|
|
|
|
.proposer_outcome += proposerReward.int64
|
2022-10-03 13:08:50 +00:00
|
|
|
let inclusionDelay = forkyState.data.slot - attestation.data.slot
|
2022-01-17 12:58:33 +00:00
|
|
|
for index in get_attesting_indices(
|
2022-10-03 13:08:50 +00:00
|
|
|
forkyState.data, attestation.data, attestation.aggregation_bits,
|
|
|
|
cache):
|
2022-01-17 12:58:33 +00:00
|
|
|
rewardsAndPenalties[index].inclusion_delay = some(inclusionDelay.uint64)
|
|
|
|
|
|
|
|
proc collectFromDeposits(
|
|
|
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
|
|
|
forkedState: ForkedHashedBeaconState,
|
|
|
|
forkedBlock: ForkedTrustedSignedBeaconBlock,
|
|
|
|
pubkeyToIndex: var PubkeyToIndexTable,
|
|
|
|
cfg: RuntimeConfig) =
|
|
|
|
withStateAndBlck(forkedState, forkedBlock):
|
2023-09-21 10:49:14 +00:00
|
|
|
for deposit in forkyBlck.message.body.deposits:
|
2022-01-17 12:58:33 +00:00
|
|
|
let pubkey = deposit.data.pubkey
|
|
|
|
let amount = deposit.data.amount
|
2022-10-03 13:08:50 +00:00
|
|
|
var index = findValidatorIndex(forkyState.data, pubkey)
|
2022-05-23 23:39:08 +00:00
|
|
|
if index.isNone:
|
|
|
|
if pubkey in pubkeyToIndex:
|
2024-02-19 09:56:19 +00:00
|
|
|
try:
|
|
|
|
index = Opt[ValidatorIndex].ok(pubkeyToIndex[pubkey])
|
|
|
|
except KeyError as e:
|
|
|
|
raiseAssert "pubkey was checked to exist: " & e.msg
|
2022-05-23 23:39:08 +00:00
|
|
|
if index.isSome:
|
2024-02-19 09:56:19 +00:00
|
|
|
try:
|
|
|
|
rewardsAndPenalties[index.get()].deposits += amount
|
|
|
|
except KeyError as e:
|
|
|
|
raiseAssert "rewardsAndPenalties lacks expected index " & $index.get()
|
2022-01-17 12:58:33 +00:00
|
|
|
elif verify_deposit_signature(cfg, deposit.data):
|
2022-05-23 23:39:08 +00:00
|
|
|
pubkeyToIndex[pubkey] = ValidatorIndex(rewardsAndPenalties.len)
|
2022-01-17 12:58:33 +00:00
|
|
|
rewardsAndPenalties.add(
|
|
|
|
RewardsAndPenalties(deposits: amount))
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func collectFromSyncAggregate(
|
2022-01-17 12:58:33 +00:00
|
|
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
|
|
|
forkedState: ForkedHashedBeaconState,
|
|
|
|
forkedBlock: ForkedTrustedSignedBeaconBlock,
|
|
|
|
cache: var StateCache) =
|
|
|
|
withStateAndBlck(forkedState, forkedBlock):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork > ConsensusFork.Phase0:
|
2022-10-03 13:08:50 +00:00
|
|
|
let
|
|
|
|
total_active_balance = get_total_active_balance(forkyState.data, cache)
|
|
|
|
participant_reward = get_participant_reward(total_active_balance)
|
|
|
|
proposer_reward =
|
|
|
|
state_transition_block.get_proposer_reward(participant_reward)
|
|
|
|
indices = get_sync_committee_cache(
|
|
|
|
forkyState.data, cache).current_sync_committee
|
2022-01-17 12:58:33 +00:00
|
|
|
|
2023-09-21 10:49:14 +00:00
|
|
|
template aggregate: untyped = forkyBlck.message.body.sync_aggregate
|
2022-01-17 12:58:33 +00:00
|
|
|
|
|
|
|
doAssert indices.len == SYNC_COMMITTEE_SIZE
|
|
|
|
doAssert aggregate.sync_committee_bits.len == SYNC_COMMITTEE_SIZE
|
2022-10-03 13:08:50 +00:00
|
|
|
doAssert forkyState.data.current_sync_committee.pubkeys.len ==
|
|
|
|
SYNC_COMMITTEE_SIZE
|
2022-01-17 12:58:33 +00:00
|
|
|
|
|
|
|
for i in 0 ..< SYNC_COMMITTEE_SIZE:
|
|
|
|
rewardsAndPenalties[indices[i]].max_sync_committee_reward +=
|
|
|
|
participant_reward
|
|
|
|
if aggregate.sync_committee_bits[i]:
|
|
|
|
rewardsAndPenalties[indices[i]].sync_committee_outcome +=
|
|
|
|
participant_reward.int64
|
2023-09-21 10:49:14 +00:00
|
|
|
rewardsAndPenalties[forkyBlck.message.proposer_index]
|
|
|
|
.proposer_outcome += proposer_reward.int64
|
2022-01-17 12:58:33 +00:00
|
|
|
else:
|
|
|
|
rewardsAndPenalties[indices[i]].sync_committee_outcome -=
|
|
|
|
participant_reward.int64
|
|
|
|
|
|
|
|
proc collectBlockRewardsAndPenalties*(
|
|
|
|
rewardsAndPenalties: var seq[RewardsAndPenalties],
|
|
|
|
forkedState: ForkedHashedBeaconState,
|
|
|
|
forkedBlock: ForkedTrustedSignedBeaconBlock,
|
|
|
|
auxiliaryState: var AuxiliaryState,
|
|
|
|
cache: var StateCache, cfg: RuntimeConfig) =
|
|
|
|
rewardsAndPenalties.collectFromProposerSlashings(forkedState, forkedBlock)
|
|
|
|
rewardsAndPenalties.collectFromAttesterSlashings(forkedState, forkedBlock)
|
|
|
|
rewardsAndPenalties.collectFromAttestations(
|
|
|
|
forkedState, forkedBlock, auxiliaryState.epochParticipationFlags, cache)
|
|
|
|
rewardsAndPenalties.collectFromDeposits(
|
|
|
|
forkedState, forkedBlock, auxiliaryState.pubkeyToIndex, cfg)
|
|
|
|
# This table is needed only to resolve double deposits in the same block, so
|
|
|
|
# it can be cleared after processing all deposits for the current block.
|
|
|
|
auxiliaryState.pubkeyToIndex.clear
|
|
|
|
rewardsAndPenalties.collectFromSyncAggregate(forkedState, forkedBlock, cache)
|
|
|
|
|
|
|
|
func serializeToCsv*(rp: RewardsAndPenalties,
|
|
|
|
avgInclusionDelay = none(float)): string =
|
|
|
|
for name, value in fieldPairs(rp):
|
|
|
|
if value isnot Option:
|
|
|
|
result &= $value & ","
|
|
|
|
if avgInclusionDelay.isSome:
|
|
|
|
result.addFloat(avgInclusionDelay.get)
|
|
|
|
elif rp.inclusion_delay.isSome:
|
|
|
|
result &= $rp.inclusion_delay.get
|
|
|
|
result &= "\n"
|