Merge branch 'unstable' into dev/etan/zf-branchpull
This commit is contained in:
commit
c5b4ae493a
|
@ -459,8 +459,9 @@ OK: 5/5 Fail: 0/5 Skip: 0/5
|
|||
+ Roundtrip engine RPC V1 and bellatrix ExecutionPayload representations OK
|
||||
+ Roundtrip engine RPC V2 and capella ExecutionPayload representations OK
|
||||
+ Roundtrip engine RPC V3 and deneb ExecutionPayload representations OK
|
||||
+ Roundtrip engine RPC V4 and electra ExecutionPayload representations OK
|
||||
```
|
||||
OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||
OK: 6/6 Fail: 0/6 Skip: 0/6
|
||||
## Eth2 specific discovery tests
|
||||
```diff
|
||||
+ Invalid attnets field OK
|
||||
|
@ -1018,4 +1019,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
|||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
|
||||
---TOTAL---
|
||||
OK: 683/688 Fail: 0/688 Skip: 5/688
|
||||
OK: 684/689 Fail: 0/689 Skip: 5/689
|
||||
|
|
37
CHANGELOG.md
37
CHANGELOG.md
|
@ -1,3 +1,40 @@
|
|||
2024-03-29 v24.3.0
|
||||
==================
|
||||
|
||||
Nimbus `v24.3.0` is a `low-urgency` upgrade bringing additional beacon API support and resilience to suboptimal network conditions.
|
||||
|
||||
### Improvements
|
||||
|
||||
* Add keymanager API graffiti endpoints:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6054
|
||||
|
||||
* Remember gossip messages longer to avoid potentially slow handling of irrelevant messages:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6098
|
||||
|
||||
* Nimbus processes blocks with deposits in a more optimized way:
|
||||
https://github.com/status-im/nimbus-eth2/pull/5982
|
||||
|
||||
* Fork choice performance during periods of nonfinality has been improved:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6076
|
||||
|
||||
* Nimbus will continue validating even without external chain progression:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6101
|
||||
|
||||
* Locally built blocks via the engine API are preferentially selected by default over similarly valuable builder API blocks:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6103
|
||||
|
||||
### Fixes
|
||||
|
||||
* Add required header `eth-consensus-block-value` in produceBlockV3 REST beacon API call in beacon node:
|
||||
https://github.com/status-im/nimbus-eth2/pull/5873
|
||||
|
||||
* Restore usage of certain mainnet bootstrap nodes to enable faster and more reliable node connectivity at startup:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6052
|
||||
|
||||
* The network configuration `INACTIVITY_SCORE_RECOVERY_RATE` can now be overridden:
|
||||
https://github.com/status-im/nimbus-eth2/pull/6091
|
||||
|
||||
|
||||
2023-02-27 v24.2.2
|
||||
==================
|
||||
|
||||
|
|
|
@ -482,6 +482,54 @@ func asConsensusType*(payload: engine_api.GetPayloadV3Response):
|
|||
blobs: Blobs.init(
|
||||
payload.blobsBundle.blobs.mapIt(it.bytes))))
|
||||
|
||||
func asConsensusType*(rpcExecutionPayload: ExecutionPayloadV4):
|
||||
electra.ExecutionPayload =
|
||||
template getTransaction(tt: TypedTransaction): bellatrix.Transaction =
|
||||
bellatrix.Transaction.init(tt.distinctBase)
|
||||
|
||||
template getDepositReceipt(dr: DepositReceiptV1): DepositReceipt =
|
||||
DepositReceipt(
|
||||
pubkey: ValidatorPubKey(blob: dr.pubkey.distinctBase),
|
||||
withdrawal_credentials: dr.withdrawalCredentials.asEth2Digest,
|
||||
amount: dr.amount.Gwei,
|
||||
signature: ValidatorSig(blob: dr.signature.distinctBase),
|
||||
index: dr.index.uint64)
|
||||
|
||||
template getExecutionLayerExit(ele: ExitV1): ExecutionLayerExit =
|
||||
ExecutionLayerExit(
|
||||
source_address: ExecutionAddress(data: ele.sourceAddress.distinctBase),
|
||||
validator_pubkey: ValidatorPubKey(
|
||||
blob: ele.validatorPublicKey.distinctBase))
|
||||
|
||||
electra.ExecutionPayload(
|
||||
parent_hash: rpcExecutionPayload.parentHash.asEth2Digest,
|
||||
feeRecipient:
|
||||
ExecutionAddress(data: rpcExecutionPayload.feeRecipient.distinctBase),
|
||||
state_root: rpcExecutionPayload.stateRoot.asEth2Digest,
|
||||
receipts_root: rpcExecutionPayload.receiptsRoot.asEth2Digest,
|
||||
logs_bloom: BloomLogs(data: rpcExecutionPayload.logsBloom.distinctBase),
|
||||
prev_randao: rpcExecutionPayload.prevRandao.asEth2Digest,
|
||||
block_number: rpcExecutionPayload.blockNumber.uint64,
|
||||
gas_limit: rpcExecutionPayload.gasLimit.uint64,
|
||||
gas_used: rpcExecutionPayload.gasUsed.uint64,
|
||||
timestamp: rpcExecutionPayload.timestamp.uint64,
|
||||
extra_data: List[byte, MAX_EXTRA_DATA_BYTES].init(
|
||||
rpcExecutionPayload.extraData.bytes),
|
||||
base_fee_per_gas: rpcExecutionPayload.baseFeePerGas,
|
||||
block_hash: rpcExecutionPayload.blockHash.asEth2Digest,
|
||||
transactions: List[bellatrix.Transaction, MAX_TRANSACTIONS_PER_PAYLOAD].init(
|
||||
mapIt(rpcExecutionPayload.transactions, it.getTransaction)),
|
||||
withdrawals: List[capella.Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD].init(
|
||||
mapIt(rpcExecutionPayload.withdrawals, it.asConsensusWithdrawal)),
|
||||
blob_gas_used: rpcExecutionPayload.blobGasUsed.uint64,
|
||||
excess_blob_gas: rpcExecutionPayload.excessBlobGas.uint64,
|
||||
deposit_receipts:
|
||||
List[electra.DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD].init(
|
||||
mapIt(rpcExecutionPayload.depositReceipts, it.getDepositReceipt)),
|
||||
exits:
|
||||
List[electra.ExecutionLayerExit, MAX_EXECUTION_LAYER_EXITS_PER_PAYLOAD].init(
|
||||
mapIt(rpcExecutionPayload.exits, it.getExecutionLayerExit)))
|
||||
|
||||
func asEngineExecutionPayload*(executionPayload: bellatrix.ExecutionPayload):
|
||||
ExecutionPayloadV1 =
|
||||
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
||||
|
@ -558,6 +606,47 @@ func asEngineExecutionPayload*(executionPayload: deneb.ExecutionPayload):
|
|||
blobGasUsed: Quantity(executionPayload.blob_gas_used),
|
||||
excessBlobGas: Quantity(executionPayload.excess_blob_gas))
|
||||
|
||||
func asEngineExecutionPayload*(executionPayload: electra.ExecutionPayload):
|
||||
ExecutionPayloadV4 =
|
||||
template getTypedTransaction(tt: bellatrix.Transaction): TypedTransaction =
|
||||
TypedTransaction(tt.distinctBase)
|
||||
|
||||
template getDepositReceipt(dr: DepositReceipt): DepositReceiptV1 =
|
||||
DepositReceiptV1(
|
||||
pubkey: FixedBytes[RawPubKeySize](dr.pubkey.blob),
|
||||
withdrawalCredentials: FixedBytes[32](dr.withdrawal_credentials.data),
|
||||
amount: dr.amount.Quantity,
|
||||
signature: FixedBytes[RawSigSize](dr.signature.blob),
|
||||
index: dr.index.Quantity)
|
||||
|
||||
template getExecutionLayerExit(ele: ExecutionLayerExit): ExitV1 =
|
||||
ExitV1(
|
||||
sourceAddress: Address(ele.source_address.data),
|
||||
validatorPublicKey: FixedBytes[RawPubKeySize](ele.validator_pubkey.blob))
|
||||
|
||||
engine_api.ExecutionPayloadV4(
|
||||
parentHash: executionPayload.parent_hash.asBlockHash,
|
||||
feeRecipient: Address(executionPayload.fee_recipient.data),
|
||||
stateRoot: executionPayload.state_root.asBlockHash,
|
||||
receiptsRoot: executionPayload.receipts_root.asBlockHash,
|
||||
logsBloom:
|
||||
FixedBytes[BYTES_PER_LOGS_BLOOM](executionPayload.logs_bloom.data),
|
||||
prevRandao: executionPayload.prev_randao.asBlockHash,
|
||||
blockNumber: Quantity(executionPayload.block_number),
|
||||
gasLimit: Quantity(executionPayload.gas_limit),
|
||||
gasUsed: Quantity(executionPayload.gas_used),
|
||||
timestamp: Quantity(executionPayload.timestamp),
|
||||
extraData: DynamicBytes[0, MAX_EXTRA_DATA_BYTES](executionPayload.extra_data),
|
||||
baseFeePerGas: executionPayload.base_fee_per_gas,
|
||||
blockHash: executionPayload.block_hash.asBlockHash,
|
||||
transactions: mapIt(executionPayload.transactions, it.getTypedTransaction),
|
||||
withdrawals: mapIt(executionPayload.withdrawals, it.asEngineWithdrawal),
|
||||
blobGasUsed: Quantity(executionPayload.blob_gas_used),
|
||||
excessBlobGas: Quantity(executionPayload.excess_blob_gas),
|
||||
depositReceipts: mapIt(
|
||||
executionPayload.deposit_receipts, it.getDepositReceipt),
|
||||
exits: mapIt(executionPayload.exits, it.getExecutionLayerExit))
|
||||
|
||||
func isConnected(connection: ELConnection): bool =
|
||||
connection.web3.isSome
|
||||
|
||||
|
|
|
@ -370,7 +370,7 @@ proc checkBloblessSignature(
|
|||
let proposer = getProposer(
|
||||
dag, parent, signed_beacon_block.message.slot).valueOr:
|
||||
return err("checkBloblessSignature: Cannot compute proposer")
|
||||
if uint64(proposer) != signed_beacon_block.message.proposer_index:
|
||||
if distinctBase(proposer) != signed_beacon_block.message.proposer_index:
|
||||
return err("checkBloblessSignature: Incorrect proposer")
|
||||
if not verify_block_signature(
|
||||
dag.forkAtEpoch(signed_beacon_block.message.slot.epoch),
|
||||
|
@ -537,6 +537,7 @@ proc storeBlock(
|
|||
|
||||
if NewPayloadStatus.invalid == payloadStatus:
|
||||
self.consensusManager.quarantine[].addUnviable(signedBlock.root)
|
||||
self[].dumpInvalidBlock(signedBlock)
|
||||
return err((VerifierError.UnviableFork, ProcessingStatus.completed))
|
||||
|
||||
if NewPayloadStatus.noResponse == payloadStatus:
|
||||
|
|
|
@ -2295,51 +2295,48 @@ proc createEth2Node*(rng: ref HmacDrbgContext,
|
|||
let phase0Prefix = "/eth2/" & $forkDigests.phase0
|
||||
|
||||
func msgIdProvider(m: messages.Message): Result[seq[byte], ValidationResult] =
|
||||
template topic: untyped =
|
||||
if m.topicIds.len > 0: m.topicIds[0] else: ""
|
||||
|
||||
try:
|
||||
# This doesn't have to be a tight bound, just enough to avoid denial of
|
||||
# service attacks.
|
||||
let decoded = snappy.decode(m.data, static(GOSSIP_MAX_SIZE.uint32))
|
||||
ok(gossipId(decoded, phase0Prefix, topic))
|
||||
ok(gossipId(decoded, phase0Prefix, m.topic))
|
||||
except CatchableError:
|
||||
err(ValidationResult.Reject)
|
||||
|
||||
let
|
||||
params = GossipSubParams(
|
||||
explicit: true,
|
||||
pruneBackoff: chronos.minutes(1),
|
||||
unsubscribeBackoff: chronos.seconds(10),
|
||||
floodPublish: true,
|
||||
gossipFactor: 0.05,
|
||||
d: 8,
|
||||
dLow: 6,
|
||||
dHigh: 12,
|
||||
dScore: 6,
|
||||
dOut: 6 div 2, # less than dlow and no more than dlow/2
|
||||
dLazy: 6,
|
||||
heartbeatInterval: chronos.milliseconds(700),
|
||||
historyLength: 6,
|
||||
historyGossip: 3,
|
||||
fanoutTTL: chronos.seconds(60),
|
||||
params = GossipSubParams.init(
|
||||
explicit = true,
|
||||
pruneBackoff = chronos.minutes(1),
|
||||
unsubscribeBackoff = chronos.seconds(10),
|
||||
floodPublish = true,
|
||||
gossipFactor = 0.05,
|
||||
d = 8,
|
||||
dLow = 6,
|
||||
dHigh = 12,
|
||||
dScore = 6,
|
||||
dOut = 6 div 2, # less than dlow and no more than dlow/2
|
||||
dLazy = 6,
|
||||
heartbeatInterval = chronos.milliseconds(700),
|
||||
historyLength = 6,
|
||||
historyGossip = 3,
|
||||
fanoutTTL = chronos.seconds(60),
|
||||
# 2 epochs matching maximum valid attestation lifetime
|
||||
seenTTL: chronos.seconds(int(SECONDS_PER_SLOT * SLOTS_PER_EPOCH * 2)),
|
||||
gossipThreshold: -4000,
|
||||
publishThreshold: -8000,
|
||||
graylistThreshold: -16000, # also disconnect threshold
|
||||
opportunisticGraftThreshold: 0,
|
||||
decayInterval: chronos.seconds(12),
|
||||
decayToZero: 0.01,
|
||||
retainScore: chronos.seconds(385),
|
||||
appSpecificWeight: 0.0,
|
||||
ipColocationFactorWeight: -53.75,
|
||||
ipColocationFactorThreshold: 3.0,
|
||||
behaviourPenaltyWeight: -15.9,
|
||||
behaviourPenaltyDecay: 0.986,
|
||||
disconnectBadPeers: true,
|
||||
directPeers: directPeers,
|
||||
bandwidthEstimatebps: config.bandwidthEstimate.get(100_000_000)
|
||||
seenTTL = chronos.seconds(int(SECONDS_PER_SLOT * SLOTS_PER_EPOCH * 2)),
|
||||
gossipThreshold = -4000,
|
||||
publishThreshold = -8000,
|
||||
graylistThreshold = -16000, # also disconnect threshold
|
||||
opportunisticGraftThreshold = 0,
|
||||
decayInterval = chronos.seconds(12),
|
||||
decayToZero = 0.01,
|
||||
retainScore = chronos.seconds(385),
|
||||
appSpecificWeight = 0.0,
|
||||
ipColocationFactorWeight = -53.75,
|
||||
ipColocationFactorThreshold = 3.0,
|
||||
behaviourPenaltyWeight = -15.9,
|
||||
behaviourPenaltyDecay = 0.986,
|
||||
disconnectBadPeers = true,
|
||||
directPeers = directPeers,
|
||||
bandwidthEstimatebps = config.bandwidthEstimate.get(100_000_000)
|
||||
)
|
||||
pubsub = GossipSub.init(
|
||||
switch = switch,
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, strutils, os],
|
||||
std/os,
|
||||
stew/[byteutils, objects], stew/shims/macros, nimcrypto/hash,
|
||||
web3/[conversions],
|
||||
web3/primitives as web3types,
|
||||
|
@ -16,6 +16,11 @@ import
|
|||
eth/common/eth_types_json_serialization,
|
||||
../spec/[eth2_ssz_serialization, forks]
|
||||
|
||||
from std/sequtils import deduplicate, filterIt, mapIt
|
||||
from std/strutils import
|
||||
escape, parseBiggestUInt, replace, splitLines, startsWith, strip,
|
||||
toLowerAscii
|
||||
|
||||
# TODO(zah):
|
||||
# We can compress the embedded states with snappy before embedding them here.
|
||||
|
||||
|
@ -236,7 +241,7 @@ when const_preset == "gnosis":
|
|||
chiadoGenesisSize* {.importc: "gnosis_chiado_genesis_size".}: int
|
||||
|
||||
# let `.incbin` in assembly file find the binary file through search path
|
||||
{.passc: "-I" & vendorDir.}
|
||||
{.passc: "-I" & escape(vendorDir).}
|
||||
{.compile: "network_metadata_gnosis.S".}
|
||||
|
||||
else:
|
||||
|
@ -263,9 +268,6 @@ when const_preset == "gnosis":
|
|||
checkForkConsistency(network.cfg)
|
||||
|
||||
for network in [gnosisMetadata, chiadoMetadata]:
|
||||
doAssert network.cfg.ALTAIR_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
doAssert network.cfg.BELLATRIX_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
doAssert network.cfg.CAPELLA_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
doAssert network.cfg.DENEB_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
doAssert network.cfg.ELECTRA_FORK_EPOCH == FAR_FUTURE_EPOCH
|
||||
static: doAssert ConsensusFork.high == ConsensusFork.Deneb
|
||||
|
@ -287,7 +289,7 @@ elif const_preset == "mainnet":
|
|||
{.pop.}
|
||||
|
||||
# let `.incbin` in assembly file find the binary file through search path
|
||||
{.passc: "-I" & vendorDir.}
|
||||
{.passc: "-I" & escape(vendorDir).}
|
||||
{.compile: "network_metadata_mainnet.S".}
|
||||
|
||||
else:
|
||||
|
@ -329,9 +331,6 @@ elif const_preset == "mainnet":
|
|||
checkForkConsistency(network.cfg)
|
||||
|
||||
for network in [mainnetMetadata, praterMetadata, sepoliaMetadata, holeskyMetadata]:
|
||||
doAssert network.cfg.ALTAIR_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
doAssert network.cfg.BELLATRIX_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
doAssert network.cfg.CAPELLA_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
doAssert network.cfg.DENEB_FORK_EPOCH < FAR_FUTURE_EPOCH
|
||||
doAssert network.cfg.ELECTRA_FORK_EPOCH == FAR_FUTURE_EPOCH
|
||||
static: doAssert ConsensusFork.high == ConsensusFork.Deneb
|
||||
|
|
|
@ -129,6 +129,21 @@ proc toString*(kind: ValidatorFilterKind): string =
|
|||
of ValidatorFilterKind.WithdrawalDone:
|
||||
"withdrawal_done"
|
||||
|
||||
func checkRestBlockBlobsValid(
|
||||
forkyBlck: deneb.SignedBeaconBlock | electra.SignedBeaconBlock,
|
||||
kzg_proofs: KzgProofs,
|
||||
blobs: Blobs): Result[void, string] =
|
||||
if kzg_proofs.len != blobs.len:
|
||||
return err("Invalid block publish: " & $kzg_proofs.len & " KZG proofs and " &
|
||||
$blobs.len & " blobs")
|
||||
|
||||
if kzg_proofs.len != forkyBlck.message.body.blob_kzg_commitments.len:
|
||||
return err("Invalid block publish: " & $kzg_proofs.len &
|
||||
" KZG proofs and " & $forkyBlck.message.body.blob_kzg_commitments.len &
|
||||
" KZG commitments")
|
||||
|
||||
ok()
|
||||
|
||||
proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
||||
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-4881.md
|
||||
router.api2(MethodGet, "/eth/v1/beacon/deposit_snapshot") do (
|
||||
|
@ -920,6 +935,12 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
of ConsensusFork.Deneb:
|
||||
var blck = restBlock.denebData.signed_block
|
||||
blck.root = hash_tree_root(blck.message)
|
||||
|
||||
let validity = checkRestBlockBlobsValid(
|
||||
blck, restBlock.denebData.kzg_proofs, restBlock.denebData.blobs)
|
||||
if validity.isErr:
|
||||
return RestApiResponse.jsonError(Http400, validity.error)
|
||||
|
||||
await node.router.routeSignedBeaconBlock(
|
||||
blck, Opt.some(blck.create_blob_sidecars(
|
||||
restBlock.denebData.kzg_proofs, restBlock.denebData.blobs)))
|
||||
|
@ -996,6 +1017,12 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
of ConsensusFork.Deneb:
|
||||
var blck = restBlock.denebData.signed_block
|
||||
blck.root = hash_tree_root(blck.message)
|
||||
|
||||
let validity = checkRestBlockBlobsValid(
|
||||
blck, restBlock.denebData.kzg_proofs, restBlock.denebData.blobs)
|
||||
if validity.isErr:
|
||||
return RestApiResponse.jsonError(Http400, validity.error)
|
||||
|
||||
await node.router.routeSignedBeaconBlock(
|
||||
blck, Opt.some(blck.create_blob_sidecars(
|
||||
restBlock.denebData.kzg_proofs, restBlock.denebData.blobs)))
|
||||
|
|
|
@ -11,13 +11,11 @@ import
|
|||
stew/assign2,
|
||||
json_serialization/std/sets,
|
||||
chronicles,
|
||||
../extras,
|
||||
./datatypes/[phase0, altair, bellatrix],
|
||||
"."/[eth2_merkleization, forks, signatures, validator]
|
||||
|
||||
from std/algorithm import fill
|
||||
from std/sequtils import anyIt, mapIt, toSeq
|
||||
|
||||
from ./datatypes/capella import BeaconState, ExecutionPayloadHeader, Withdrawal
|
||||
|
||||
export extras, forks, validator, chronicles
|
||||
|
@ -92,42 +90,59 @@ func get_validator_activation_churn_limit*(
|
|||
cfg.MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT,
|
||||
get_validator_churn_limit(cfg, state, cache))
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#initiate_validator_exit
|
||||
func initiate_validator_exit*(
|
||||
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
||||
index: ValidatorIndex, cache: var StateCache): Result[void, cstring] =
|
||||
## Initiate the exit of the validator with index ``index``.
|
||||
|
||||
if state.validators.item(index).exit_epoch != FAR_FUTURE_EPOCH:
|
||||
return ok() # Before touching cache
|
||||
|
||||
# Return if validator already initiated exit
|
||||
let validator = addr state.validators.mitem(index)
|
||||
|
||||
trace "Validator exiting",
|
||||
index = index,
|
||||
num_validators = state.validators.len,
|
||||
current_epoch = get_current_epoch(state),
|
||||
validator_slashed = validator.slashed,
|
||||
validator_withdrawable_epoch = validator.withdrawable_epoch,
|
||||
validator_exit_epoch = validator.exit_epoch,
|
||||
validator_effective_balance = validator.effective_balance
|
||||
|
||||
var exit_queue_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#initiate_validator_exit
|
||||
func get_state_exit_queue_info*(
|
||||
cfg: RuntimeConfig, state: var ForkyBeaconState, cache: var StateCache): ExitQueueInfo =
|
||||
var
|
||||
exit_queue_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
exit_queue_churn: uint64
|
||||
# Compute max exit epoch
|
||||
for idx in 0..<state.validators.len:
|
||||
let exit_epoch = state.validators.item(idx).exit_epoch
|
||||
if exit_epoch != FAR_FUTURE_EPOCH and exit_epoch > exit_queue_epoch:
|
||||
exit_queue_epoch = exit_epoch
|
||||
|
||||
var
|
||||
exit_queue_churn: int
|
||||
for idx in 0..<state.validators.len:
|
||||
if state.validators.item(idx).exit_epoch == exit_queue_epoch:
|
||||
exit_queue_churn += 1
|
||||
# Reset exit queue churn counter as the expected exit_queue_epoch updates
|
||||
# via this essentially max()-but-not-FAR_FUTURE_EPOCH loop to restart the
|
||||
# counting the second for loop in spec version does. Only the last count,
|
||||
# the one corresponding to the ultimately correct exit_queue_epoch, won't
|
||||
# be reset.
|
||||
exit_queue_churn = 0
|
||||
|
||||
if exit_queue_churn.uint64 >= get_validator_churn_limit(cfg, state, cache):
|
||||
exit_queue_epoch += 1
|
||||
# Second spec loop body, which there is responsible for taking the already
|
||||
# known exit_queue_epoch, scanning for all validators with that exit epoch
|
||||
# and checking if they'll reach validator_churn_limit(state). Do that here
|
||||
# incrementally to fuse the two loops and save an all-validator iteration.
|
||||
if exit_epoch == exit_queue_epoch:
|
||||
inc exit_queue_churn
|
||||
|
||||
ExitQueueInfo(
|
||||
exit_queue_epoch: exit_queue_epoch, exit_queue_churn: exit_queue_churn)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/beacon-chain.md#initiate_validator_exit
|
||||
func initiate_validator_exit*(
|
||||
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
||||
index: ValidatorIndex, exit_queue_info: ExitQueueInfo, cache: var StateCache):
|
||||
Result[ExitQueueInfo, cstring] =
|
||||
## Initiate the exit of the validator with index ``index``.
|
||||
|
||||
if state.validators.item(index).exit_epoch != FAR_FUTURE_EPOCH:
|
||||
return ok(exit_queue_info) # Before touching cache
|
||||
|
||||
# Return if validator already initiated exit
|
||||
let validator = addr state.validators.mitem(index)
|
||||
|
||||
var
|
||||
exit_queue_epoch = exit_queue_info.exit_queue_epoch
|
||||
exit_queue_churn = exit_queue_info.exit_queue_churn
|
||||
|
||||
if exit_queue_churn >= get_validator_churn_limit(cfg, state, cache):
|
||||
inc exit_queue_epoch
|
||||
|
||||
# Bookkeeping for inter-operation caching; include this exit for next time
|
||||
exit_queue_churn = 1
|
||||
else:
|
||||
inc exit_queue_churn
|
||||
|
||||
# Set validator exit epoch and withdrawable epoch
|
||||
validator.exit_epoch = exit_queue_epoch
|
||||
|
@ -139,7 +154,8 @@ func initiate_validator_exit*(
|
|||
validator.withdrawable_epoch =
|
||||
validator.exit_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
|
||||
ok()
|
||||
ok(ExitQueueInfo(
|
||||
exit_queue_epoch: exit_queue_epoch, exit_queue_churn: exit_queue_churn))
|
||||
|
||||
from ./datatypes/deneb import BeaconState
|
||||
|
||||
|
@ -184,23 +200,16 @@ func get_proposer_reward(state: ForkyBeaconState, whistleblower_reward: Gwei): G
|
|||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/bellatrix/beacon-chain.md#modified-slash_validator
|
||||
proc slash_validator*(
|
||||
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
||||
slashed_index: ValidatorIndex, cache: var StateCache):
|
||||
Result[Gwei, cstring] =
|
||||
slashed_index: ValidatorIndex, pre_exit_queue_info: ExitQueueInfo,
|
||||
cache: var StateCache): Result[(Gwei, ExitQueueInfo), cstring] =
|
||||
## Slash the validator with index ``index``.
|
||||
let epoch = get_current_epoch(state)
|
||||
? initiate_validator_exit(cfg, state, slashed_index, cache)
|
||||
let
|
||||
epoch = get_current_epoch(state)
|
||||
post_exit_queue_info = ? initiate_validator_exit(
|
||||
cfg, state, slashed_index, pre_exit_queue_info, cache)
|
||||
|
||||
let validator = addr state.validators.mitem(slashed_index)
|
||||
|
||||
trace "slash_validator: ejecting validator via slashing (validator_leaving)",
|
||||
index = slashed_index,
|
||||
num_validators = state.validators.len,
|
||||
current_epoch = get_current_epoch(state),
|
||||
validator_slashed = validator.slashed,
|
||||
validator_withdrawable_epoch = validator.withdrawable_epoch,
|
||||
validator_exit_epoch = validator.exit_epoch,
|
||||
validator_effective_balance = validator.effective_balance
|
||||
|
||||
validator.slashed = true
|
||||
validator.withdrawable_epoch =
|
||||
max(validator.withdrawable_epoch, epoch + EPOCHS_PER_SLASHINGS_VECTOR)
|
||||
|
@ -213,7 +222,7 @@ proc slash_validator*(
|
|||
# The rest doesn't make sense without there being any proposer index, so skip
|
||||
let proposer_index = get_beacon_proposer_index(state, cache).valueOr:
|
||||
debug "No beacon proposer index and probably no active validators"
|
||||
return ok(0.Gwei)
|
||||
return ok((0.Gwei, post_exit_queue_info))
|
||||
|
||||
# Apply proposer and whistleblower rewards
|
||||
let
|
||||
|
@ -224,11 +233,13 @@ proc slash_validator*(
|
|||
|
||||
increase_balance(state, proposer_index, proposer_reward)
|
||||
# TODO: evaluate if spec bug / underflow can be triggered
|
||||
doAssert(whistleblower_reward >= proposer_reward, "Spec bug: underflow in slash_validator")
|
||||
doAssert(
|
||||
whistleblower_reward >= proposer_reward,
|
||||
"Spec bug: underflow in slash_validator")
|
||||
increase_balance(
|
||||
state, whistleblower_index, whistleblower_reward - proposer_reward)
|
||||
|
||||
ok(proposer_reward)
|
||||
ok((proposer_reward, post_exit_queue_info))
|
||||
|
||||
func genesis_time_from_eth1_timestamp(
|
||||
cfg: RuntimeConfig, eth1_timestamp: uint64): uint64 =
|
||||
|
|
|
@ -580,6 +580,10 @@ type
|
|||
|
||||
flags*: set[RewardFlags]
|
||||
|
||||
ExitQueueInfo* = object
|
||||
exit_queue_epoch*: Epoch
|
||||
exit_queue_churn*: uint64
|
||||
|
||||
func pubkey*(v: HashedValidatorPubKey): ValidatorPubKey =
|
||||
if isNil(v.value):
|
||||
# This should never happen but we guard against it in case a
|
||||
|
|
|
@ -30,8 +30,27 @@ from ./deneb import Blobs, BlobsBundle, KzgCommitments, KzgProofs
|
|||
|
||||
export json_serialization, base, kzg4844
|
||||
|
||||
const
|
||||
# Keep these here for now, since things still in flux
|
||||
# https://github.com/ethereum/consensus-specs/pull/3615
|
||||
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD* = 8192
|
||||
MAX_EXECUTION_LAYER_EXITS_PER_PAYLOAD* = 16 # there's a discrepancy here, _PER_PAYLOAD or not
|
||||
|
||||
type
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/deneb/beacon-chain.md#executionpayload
|
||||
# https://github.com/ethereum/consensus-specs/pull/3615
|
||||
DepositReceipt* = object
|
||||
pubkey*: ValidatorPubKey
|
||||
withdrawal_credentials*: Eth2Digest
|
||||
amount*: Gwei
|
||||
signature*: ValidatorSig
|
||||
index*: uint64
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/pull/3615
|
||||
ExecutionLayerExit* = object
|
||||
source_address*: ExecutionAddress
|
||||
validator_pubkey*: ValidatorPubKey
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/pull/3615
|
||||
ExecutionPayload* = object
|
||||
# Execution block header fields
|
||||
parent_hash*: Eth2Digest
|
||||
|
@ -54,8 +73,10 @@ type
|
|||
block_hash*: Eth2Digest # Hash of execution block
|
||||
transactions*: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||
withdrawals*: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||
blob_gas_used*: uint64 # [New in Deneb]
|
||||
excess_blob_gas*: uint64 # [New in Deneb]
|
||||
blob_gas_used*: uint64
|
||||
excess_blob_gas*: uint64
|
||||
deposit_receipts*: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD]
|
||||
exits*: List[ExecutionLayerExit, MAX_EXECUTION_LAYER_EXITS_PER_PAYLOAD]
|
||||
|
||||
ExecutionPayloadForSigning* = object
|
||||
executionPayload*: ExecutionPayload
|
||||
|
|
|
@ -186,9 +186,10 @@ proc check_proposer_slashing*(
|
|||
proc process_proposer_slashing*(
|
||||
cfg: RuntimeConfig, state: var ForkyBeaconState,
|
||||
proposer_slashing: SomeProposerSlashing, flags: UpdateFlags,
|
||||
cache: var StateCache): Result[Gwei, cstring] =
|
||||
exit_queue_info: ExitQueueInfo, cache: var StateCache):
|
||||
Result[(Gwei, ExitQueueInfo), cstring] =
|
||||
let proposer_index = ? check_proposer_slashing(state, proposer_slashing, flags)
|
||||
slash_validator(cfg, state, proposer_index, cache)
|
||||
slash_validator(cfg, state, proposer_index, exit_queue_info, cache)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_slashable_attestation_data
|
||||
func is_slashable_attestation_data(
|
||||
|
@ -250,17 +251,24 @@ proc process_attester_slashing*(
|
|||
state: var ForkyBeaconState,
|
||||
attester_slashing: SomeAttesterSlashing,
|
||||
flags: UpdateFlags,
|
||||
cache: var StateCache
|
||||
): Result[Gwei, cstring] =
|
||||
exit_queue_info: ExitQueueInfo, cache: var StateCache
|
||||
): Result[(Gwei, ExitQueueInfo), cstring] =
|
||||
let slashed_attesters =
|
||||
? check_attester_slashing(state, attester_slashing, flags)
|
||||
|
||||
var proposer_reward: Gwei
|
||||
var
|
||||
proposer_reward: Gwei
|
||||
cur_exit_queue_info = exit_queue_info
|
||||
|
||||
for index in slashed_attesters:
|
||||
proposer_reward += ? slash_validator(cfg, state, index, cache)
|
||||
doAssert strictVerification notin flags or
|
||||
cur_exit_queue_info == get_state_exit_queue_info(cfg, state, cache)
|
||||
let (new_proposer_reward, new_exit_queue_info) = ? slash_validator(
|
||||
cfg, state, index, cur_exit_queue_info, cache)
|
||||
proposer_reward += new_proposer_reward
|
||||
cur_exit_queue_info = new_exit_queue_info
|
||||
|
||||
ok(proposer_reward)
|
||||
ok((proposer_reward, cur_exit_queue_info))
|
||||
|
||||
func findValidatorIndex*(state: ForkyBeaconState, pubkey: ValidatorPubKey):
|
||||
Opt[ValidatorIndex] =
|
||||
|
@ -410,12 +418,12 @@ proc process_voluntary_exit*(
|
|||
cfg: RuntimeConfig,
|
||||
state: var ForkyBeaconState,
|
||||
signed_voluntary_exit: SomeSignedVoluntaryExit,
|
||||
flags: UpdateFlags,
|
||||
cache: var StateCache): Result[void, cstring] =
|
||||
flags: UpdateFlags, exit_queue_info: ExitQueueInfo,
|
||||
cache: var StateCache): Result[ExitQueueInfo, cstring] =
|
||||
let exited_validator =
|
||||
? check_voluntary_exit(cfg, state, signed_voluntary_exit, flags)
|
||||
? initiate_validator_exit(cfg, state, exited_validator, cache)
|
||||
ok()
|
||||
ok(? initiate_validator_exit(
|
||||
cfg, state, exited_validator, exit_queue_info, cache))
|
||||
|
||||
proc process_bls_to_execution_change*(
|
||||
cfg: RuntimeConfig,
|
||||
|
@ -464,12 +472,25 @@ proc process_operations(cfg: RuntimeConfig,
|
|||
|
||||
var operations_rewards: BlockRewards
|
||||
|
||||
# It costs a full validator set scan to construct these values; only do so if
|
||||
# there will be some kind of exit.
|
||||
var exit_queue_info =
|
||||
if body.proposer_slashings.len + body.attester_slashings.len +
|
||||
body.voluntary_exits.len > 0:
|
||||
get_state_exit_queue_info(cfg, state, cache)
|
||||
else:
|
||||
default(ExitQueueInfo) # not used
|
||||
|
||||
for op in body.proposer_slashings:
|
||||
operations_rewards.proposer_slashings +=
|
||||
? process_proposer_slashing(cfg, state, op, flags, cache)
|
||||
let (proposer_slashing_reward, new_exit_queue_info) =
|
||||
? process_proposer_slashing(cfg, state, op, flags, exit_queue_info, cache)
|
||||
operations_rewards.proposer_slashings += proposer_slashing_reward
|
||||
exit_queue_info = new_exit_queue_info
|
||||
for op in body.attester_slashings:
|
||||
operations_rewards.attester_slashings +=
|
||||
? process_attester_slashing(cfg, state, op, flags, cache)
|
||||
let (attester_slashing_reward, new_exit_queue_info) =
|
||||
? process_attester_slashing(cfg, state, op, flags, exit_queue_info, cache)
|
||||
operations_rewards.attester_slashings += attester_slashing_reward
|
||||
exit_queue_info = new_exit_queue_info
|
||||
for op in body.attestations:
|
||||
operations_rewards.attestations +=
|
||||
? process_attestation(state, op, flags, base_reward_per_increment, cache)
|
||||
|
@ -478,7 +499,8 @@ proc process_operations(cfg: RuntimeConfig,
|
|||
for op in body.deposits:
|
||||
? process_deposit(cfg, state, bloom_filter[], op, flags)
|
||||
for op in body.voluntary_exits:
|
||||
? process_voluntary_exit(cfg, state, op, flags, cache)
|
||||
exit_queue_info = ? process_voluntary_exit(
|
||||
cfg, state, op, flags, exit_queue_info, cache)
|
||||
when typeof(body).kind >= ConsensusFork.Capella:
|
||||
for op in body.bls_to_execution_changes:
|
||||
? process_bls_to_execution_change(cfg, state, op)
|
||||
|
|
|
@ -897,6 +897,9 @@ func process_registry_updates*(
|
|||
get_validator_activation_churn_limit(cfg, state, cache)
|
||||
else:
|
||||
get_validator_churn_limit(cfg, state, cache)
|
||||
|
||||
var maybe_exit_queue_info: Opt[ExitQueueInfo]
|
||||
|
||||
for vidx in state.validators.vindices:
|
||||
if is_eligible_for_activation_queue(state.validators.item(vidx)):
|
||||
state.validators.mitem(vidx).activation_eligibility_epoch =
|
||||
|
@ -904,7 +907,17 @@ func process_registry_updates*(
|
|||
|
||||
if is_active_validator(state.validators.item(vidx), get_current_epoch(state)) and
|
||||
state.validators.item(vidx).effective_balance <= cfg.EJECTION_BALANCE.Gwei:
|
||||
? initiate_validator_exit(cfg, state, vidx, cache)
|
||||
# Typically, there will be no ejected validators, and even more rarely,
|
||||
# more than one. Therefore, only calculate the information required for
|
||||
# initiate_validator_exit if there actually is at least one.
|
||||
let exit_queue_info = maybe_exit_queue_info.valueOr:
|
||||
let initial_exit_queue_info = get_state_exit_queue_info(
|
||||
cfg, state, cache)
|
||||
maybe_exit_queue_info = Opt.some initial_exit_queue_info
|
||||
initial_exit_queue_info
|
||||
|
||||
maybe_exit_queue_info = Opt.some (? initiate_validator_exit(
|
||||
cfg, state, vidx, exit_queue_info, cache))
|
||||
|
||||
let validator = unsafeAddr state.validators.item(vidx)
|
||||
if is_eligible_for_activation(state, validator[]):
|
||||
|
|
|
@ -1195,137 +1195,6 @@ proc proposeBlockAux(
|
|||
blobsBundle.proofs, blobsBundle.blobs))
|
||||
else:
|
||||
Opt.none(seq[BlobSidecar])
|
||||
|
||||
# BIG BUG SOURCE: The `let` below cannot be combined with the others above!
|
||||
# If combined, there are sometimes `SIGSEGV` during `test_keymanager_api`.
|
||||
# This has only been observed on macOS (aarch64) in Jenkins, not on GitHub.
|
||||
#
|
||||
# - macOS 14.2.1 (23C71)
|
||||
# - Xcode 15.1 (15C65)
|
||||
# - Nim v1.6.18 (a749a8b742bd0a4272c26a65517275db4720e58a)
|
||||
#
|
||||
# Issue has started occuring around 12 Jan 2024, in a CI run for PR #5731.
|
||||
# The PR did not change anything related to this, suggesting an environment
|
||||
# or hardware change. The issue is flaky; could have been introduced earlier
|
||||
# before surfacing in the aforementioned PR. About 30% to hit bug.
|
||||
#
|
||||
# [2024-01-12T11:54:21.011Z] Wrote test_keymanager_api/bootstrap_node.enr
|
||||
# [2024-01-12T11:54:29.294Z] Serialization/deserialization [Beacon Node] [Preset: mainnet] . (0.00s)
|
||||
# [2024-01-12T11:54:29.294Z] ListKeys requests [Beacon Node] [Preset: mainnet] .... (0.01s)
|
||||
# [2024-01-12T11:54:34.870Z] ImportKeystores requests [Beacon Node] [Preset: mainnet] Traceback (most recent call last, using override)
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-libp2p/libp2p/protocols/rendezvous.nim(1016) main
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-libp2p/libp2p/protocols/rendezvous.nim(1006) NimMain
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-libp2p/libp2p/protocols/rendezvous.nim(997) PreMain
|
||||
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(1502) atmtest_keymanager_apidotnim_Init000
|
||||
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(1475) main
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(378) futureContinue
|
||||
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(1481) main
|
||||
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(307) startBeaconNode
|
||||
# [2024-01-12T11:54:34.870Z] beacon_chain/nimbus_beacon_node.nim(1900) start
|
||||
# [2024-01-12T11:54:34.870Z] beacon_chain/nimbus_beacon_node.nim(1847) run
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(378) futureContinue
|
||||
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(1465) delayedTests
|
||||
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(392) runTests
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(378) futureContinue
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-unittest2/unittest2.nim(1147) runTests
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-unittest2/unittest2.nim(1086) runDirect
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-testutils/testutils/unittests.nim(16) runTestX60gensym2933
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(656) waitFor
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(631) pollFor
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(378) futureContinue
|
||||
# [2024-01-12T11:54:34.870Z] beacon_chain/validators/beacon_validators.nim(82) proposeBlockAux
|
||||
# [2024-01-12T11:54:34.870Z] vendor/nimbus-build-system/vendor/Nim/lib/system/excpt.nim(631) signalHandler
|
||||
# [2024-01-12T11:54:34.870Z] SIGSEGV: Illegal storage access. (Attempt to read from nil?)
|
||||
#
|
||||
# This appeared again around 25 Feb 2024, in a CI run for PR #5959,
|
||||
# despite the extra `let` having been applied -- once more observed on
|
||||
# macOS (aarch64) in Jenkins, and much rarer than before.
|
||||
#
|
||||
# [2024-02-25T23:21:24.533Z] Wrote test_keymanager_api/bootstrap_node.enr
|
||||
# [2024-02-25T23:21:32.756Z] Serialization/deserialization [Beacon Node] [Preset: mainnet] . (0.00s)
|
||||
# [2024-02-25T23:21:32.756Z] ListKeys requests [Beacon Node] [Preset: mainnet] .... (0.01s)
|
||||
# [2024-02-25T23:21:37.219Z] ImportKeystores requests [Beacon Node] [Preset: mainnet] Traceback (most recent call last, using override)
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-libp2p/libp2p/protocols/pubsub/pubsub.nim(1068) main
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-libp2p/libp2p/protocols/pubsub/pubsub.nim(1058) NimMain
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-libp2p/libp2p/protocols/pubsub/pubsub.nim(1049) PreMain
|
||||
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(1501) atmtest_keymanager_apidotnim_Init000
|
||||
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(1474) main
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(382) futureContinue
|
||||
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(1480) main
|
||||
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(307) startBeaconNode
|
||||
# [2024-02-25T23:21:37.219Z] beacon_chain/nimbus_beacon_node.nim(1916) start
|
||||
# [2024-02-25T23:21:37.219Z] beacon_chain/nimbus_beacon_node.nim(1863) run
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(382) futureContinue
|
||||
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(1464) delayedTests
|
||||
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(391) runTests
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(382) futureContinue
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-unittest2/unittest2.nim(1151) runTests
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-unittest2/unittest2.nim(1086) runDirect
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-testutils/testutils/unittests.nim(16) runTestX60gensym3188
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(660) waitFor
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(635) pollFor
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(382) futureContinue
|
||||
# [2024-02-25T23:21:37.219Z] vendor/nim-chronicles/chronicles.nim(251) proposeBlockAux
|
||||
# [2024-02-25T23:21:37.219Z] SIGSEGV: Illegal storage access. (Attempt to read from nil?)
|
||||
#
|
||||
# One theory is that PR #5946 may increase the frequency, as there were
|
||||
# times where the Jenkins CI failed almost every time using a shorter trace.
|
||||
# However, the problem was once more flaky, with some passes in-between.
|
||||
# For now, PR #5946 was reverted (low priority), and the problem is gone,
|
||||
# whether related or not.
|
||||
#
|
||||
# [2024-02-23T23:11:47.700Z] Wrote test_keymanager_api/bootstrap_node.enr
|
||||
# [2024-02-23T23:11:54.728Z] Serialization/deserialization [Beacon Node] [Preset: mainnet] . (0.00s)
|
||||
# [2024-02-23T23:11:54.728Z] ListKeys requests [Beacon Node] [Preset: mainnet] .... (0.01s)
|
||||
# [2024-02-23T23:11:59.523Z] ImportKeystores requests [Beacon Node] [Preset: mainnet] Traceback (most recent call last, using override)
|
||||
# [2024-02-23T23:11:59.523Z] vendor/nim-libp2p/libp2p/protocols/pubsub/pubsub.nim(1067) main
|
||||
# [2024-02-23T23:11:59.523Z] vendor/nim-libp2p/libp2p/protocols/pubsub/pubsub.nim(1057) NimMain
|
||||
# [2024-02-23T23:11:59.523Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
||||
# [2024-02-23T23:11:59.523Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
||||
# [2024-02-23T23:11:59.523Z] SIGSEGV: Illegal storage access. (Attempt to read from nil?)
|
||||
#
|
||||
# The generated `nimcache` differs slightly if the `let` are separated from
|
||||
# a single block; separation introduces an additional state in closure iter.
|
||||
# This change, maybe combined with some macOS specific compiler specifics,
|
||||
# could this trigger the `SIGSEGV`? Maybe the extra state adds just enough
|
||||
# complexity to the function to disable certain problematic optimizations?
|
||||
# The change in size of the environment changes a number of things such as
|
||||
# alignment and which parts of an environment contain pointers and so on,
|
||||
# which in turn may have surprising behavioural effects, ie most likely this
|
||||
# extra state masks some underlying issue. Furthermore, the combination of
|
||||
# `(await xyz).valueOr: return` is not very commonly used with other `await`
|
||||
# in the same `let` block, which could explain this not being more common.
|
||||
#
|
||||
# Note that when compiling for Wasm, there are similar bugs with `results`
|
||||
# when inlining unwraps, e.g., in `eth2_rest_serialization.nim`.
|
||||
# These have not been investigated thoroughly so far as that project uses
|
||||
# Nim 2.0 with --mm:orc and is just a prototype for Wasm, no production use.
|
||||
# But maybe there is something weird going on with `results` related to the
|
||||
# random `SIGSEGV` that we are now observing here, related to doing too much
|
||||
# inline logic without defining intermediate isolated `let` statements.
|
||||
#
|
||||
# if mediaType == ApplicationJsonMediaType:
|
||||
# try:
|
||||
# - ok RestJson.decode(value, T,
|
||||
# - requireAllFields = true,
|
||||
# - allowUnknownFields = true)
|
||||
# + let r = RestJson.decode(value, T,
|
||||
# + requireAllFields = true,
|
||||
# + allowUnknownFields = true)
|
||||
# + ok r
|
||||
# except SerializationError as exc:
|
||||
# debug "Failed to deserialize REST JSON data",
|
||||
# err = exc.formatMsg("<data>"),
|
||||
#
|
||||
# At this time we can only speculate about the trigger of these issues.
|
||||
# Until a shared pattern can be identified, it is better to apply
|
||||
# workarounds that at least avoid the known to be reachable triggers.
|
||||
# The solution is hacky and far from desirable; it is what it is.
|
||||
let
|
||||
newBlockRef = (
|
||||
await node.router.routeSignedBeaconBlock(signedBlock, blobsOpt)
|
||||
).valueOr:
|
||||
|
|
|
@ -18,8 +18,8 @@ const
|
|||
"Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH"
|
||||
|
||||
versionMajor* = 24
|
||||
versionMinor* = 2
|
||||
versionBuild* = 2
|
||||
versionMinor* = 3
|
||||
versionBuild* = 0
|
||||
|
||||
versionBlob* = "stateofus" # Single word - ends up in the default graffiti
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* beacon_chain
|
||||
* Copyright (c) 2019-2023 Status Research & Development GmbH
|
||||
* Copyright (c) 2019-2024 Status Research & Development GmbH
|
||||
* Licensed and distributed under either of
|
||||
* * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
* * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -68,22 +68,15 @@ pipeline {
|
|||
stage('Build') {
|
||||
steps { timeout(50) {
|
||||
sh 'make LOG_LEVEL=TRACE'
|
||||
/* Check documentation reflects `nimbus_beacon_node --help`. */
|
||||
sh '''#!/usr/bin/env bash
|
||||
if ! diff -u \\
|
||||
<(sed -n '/Usage/,/^...$/ { /^...$/d; p; }' \\
|
||||
docs/the_nimbus_book/src/options.md) \\
|
||||
<(COLUMNS=200 build/nimbus_beacon_node --help | \\
|
||||
sed -n '/Usage/,/Available sub-commands/ { /Available sub-commands/d; p; }' | \\
|
||||
sed 's/\\x1B\\[[0-9;]*[mG]//g' | \\
|
||||
sed 's/[[:space:]]*$//'); then \\
|
||||
echo "Please update 'docs/the_nimbus_book/src/options.md' to match 'COLUMNS=200 nimbus_beacon_node --help'"; \\
|
||||
false; \\
|
||||
fi
|
||||
'''
|
||||
} }
|
||||
}
|
||||
|
||||
stage('Check Docs') {
|
||||
steps {
|
||||
sh './scripts/check_docs_help_msg.sh'
|
||||
}
|
||||
}
|
||||
|
||||
stage('Tests') {
|
||||
parallel {
|
||||
stage('General') {
|
||||
|
|
11
env.sh
11
env.sh
|
@ -1,9 +1,15 @@
|
|||
#!/usr/bin/env bash
|
||||
# beacon_chain
|
||||
# Copyright (c) 2020-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
# We use ${BASH_SOURCE[0]} instead of $0 to allow sourcing this file
|
||||
# and we fall back to a Zsh-specific special var to also support Zsh.
|
||||
REL_PATH="$(dirname ${BASH_SOURCE[0]:-${(%):-%x}})"
|
||||
ABS_PATH="$(cd ${REL_PATH}; pwd)"
|
||||
ABS_PATH="$(cd "${REL_PATH}"; pwd)"
|
||||
|
||||
# Activate nvm only when this file is sourced without arguments:
|
||||
if [ -z "$*" ]; then
|
||||
|
@ -32,5 +38,4 @@ if [ -f "${USER_ENV_FILE}" ]; then
|
|||
set +o allexport
|
||||
fi
|
||||
|
||||
source ${ABS_PATH}/vendor/nimbus-build-system/scripts/env.sh
|
||||
|
||||
source "${ABS_PATH}/vendor/nimbus-build-system/scripts/env.sh"
|
||||
|
|
|
@ -108,7 +108,9 @@ proc nfuzz_attestation(input: openArray[byte], xoutput: ptr byte,
|
|||
proc nfuzz_attester_slashing(input: openArray[byte], xoutput: ptr byte,
|
||||
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError].} =
|
||||
decodeAndProcess(AttesterSlashingInput):
|
||||
process_attester_slashing(getRuntimeConfig(some "mainnet"), data.state, data.attesterSlashing, flags, cache).isOk
|
||||
process_attester_slashing(getRuntimeConfig(some "mainnet"), data.state,
|
||||
data.attesterSlashing, flags, get_state_exit_queue_info(
|
||||
getRuntimeConfig(some "mainnet"), data.state, cache), cache).isOk
|
||||
|
||||
proc nfuzz_block(input: openArray[byte], xoutput: ptr byte,
|
||||
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError].} =
|
||||
|
@ -152,12 +154,16 @@ proc nfuzz_deposit(input: openArray[byte], xoutput: ptr byte,
|
|||
proc nfuzz_proposer_slashing(input: openArray[byte], xoutput: ptr byte,
|
||||
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError].} =
|
||||
decodeAndProcess(ProposerSlashingInput):
|
||||
process_proposer_slashing(getRuntimeConfig(some "mainnet"), data.state, data.proposerSlashing, flags, cache).isOk
|
||||
process_proposer_slashing(getRuntimeConfig(some "mainnet"), data.state,
|
||||
data.proposerSlashing, flags, get_state_exit_queue_info(
|
||||
getRuntimeConfig(some "mainnet"), data.state, cache), cache).isOk
|
||||
|
||||
proc nfuzz_voluntary_exit(input: openArray[byte], xoutput: ptr byte,
|
||||
xoutput_size: ptr uint, disable_bls: bool): bool {.exportc, raises: [FuzzCrashError].} =
|
||||
decodeAndProcess(VoluntaryExitInput):
|
||||
process_voluntary_exit(getRuntimeConfig(some "mainnet"), data.state, data.exit, flags, cache).isOk
|
||||
process_voluntary_exit(getRuntimeConfig(some "mainnet"), data.state,
|
||||
data.exit, flags, get_state_exit_queue_info(
|
||||
getRuntimeConfig(some "mainnet"), data.state, cache), cache).isOk
|
||||
|
||||
# Note: Could also accept raw input pointer and access list_size + seed here.
|
||||
# However, list_size needs to be known also outside this proc to allocate xoutput.
|
||||
|
|
|
@ -0,0 +1,23 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH.
|
||||
# Licensed under either of:
|
||||
# - Apache License, version 2.0
|
||||
# - MIT license
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
set -euo pipefail
|
||||
DOC_FILE='docs/the_nimbus_book/src/options.md'
|
||||
DOC_USAGE=$(sed -n '/Usage/,/^...$/ { /^...$/d; p; }' "${DOC_FILE}")
|
||||
BIN_USAGE=$(
|
||||
COLUMNS=200 build/nimbus_beacon_node --help | \
|
||||
sed 's/\x1b\[[0-9;]*m//g' | \
|
||||
sed -n '/Usage/,/Available sub-commands/ { /Available sub-commands/d; p; }' | \
|
||||
sed 's/\\x1B\\[[0-9;]*[mG]//g' | \
|
||||
sed 's/[[:space:]]*$//'
|
||||
)
|
||||
if ! diff -u <(echo "${DOC_USAGE}") <(echo "${BIN_USAGE}"); then
|
||||
echo "Please update '${DOC_FILE}' to match 'COLUMNS=200 nimbus_beacon_node --help'"
|
||||
exit 1
|
||||
fi
|
|
@ -95,7 +95,9 @@ suite baseDescription & "Attester Slashing " & preset():
|
|||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_attester_slashing(
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {}, cache)) > 0.Gwei
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {strictVerification},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpAttSlashingDir):
|
||||
|
@ -134,7 +136,9 @@ suite baseDescription & "Proposer Slashing " & preset():
|
|||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_proposer_slashing(
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {}, cache)) > 0.Gwei
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpProposerSlashingDir):
|
||||
|
@ -162,8 +166,13 @@ suite baseDescription & "Voluntary Exit " & preset():
|
|||
preState: var altair.BeaconState, voluntaryExit: SignedVoluntaryExit):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {}, cache)
|
||||
if process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache).isOk:
|
||||
ok()
|
||||
else:
|
||||
err("")
|
||||
|
||||
for path in walkTests(OpVoluntaryExitDir):
|
||||
runTest[SignedVoluntaryExit, typeof applyVoluntaryExit](
|
||||
|
|
|
@ -24,7 +24,8 @@ import
|
|||
from std/sequtils import mapIt, toSeq
|
||||
from std/strutils import contains
|
||||
from ../../../beacon_chain/spec/beaconstate import
|
||||
get_base_reward_per_increment, get_total_active_balance, process_attestation
|
||||
get_base_reward_per_increment, get_state_exit_queue_info,
|
||||
get_total_active_balance, process_attestation
|
||||
|
||||
const
|
||||
OpDir = SszTestsDir/const_preset/"bellatrix"/"operations"
|
||||
|
@ -100,7 +101,9 @@ suite baseDescription & "Attester Slashing " & preset():
|
|||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_attester_slashing(
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {}, cache)) > 0.Gwei
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {strictVerification},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpAttSlashingDir):
|
||||
|
@ -158,7 +161,9 @@ suite baseDescription & "Proposer Slashing " & preset():
|
|||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_proposer_slashing(
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {}, cache)) > 0.Gwei
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpProposerSlashingDir):
|
||||
|
@ -186,8 +191,13 @@ suite baseDescription & "Voluntary Exit " & preset():
|
|||
preState: var bellatrix.BeaconState, voluntaryExit: SignedVoluntaryExit):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {}, cache)
|
||||
if process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache).isOk:
|
||||
ok()
|
||||
else:
|
||||
err("")
|
||||
|
||||
for path in walkTests(OpVoluntaryExitDir):
|
||||
runTest[SignedVoluntaryExit, typeof applyVoluntaryExit](
|
||||
|
|
|
@ -24,7 +24,8 @@ import
|
|||
from std/sequtils import mapIt, toSeq
|
||||
from std/strutils import contains
|
||||
from ../../../beacon_chain/spec/beaconstate import
|
||||
get_base_reward_per_increment, get_total_active_balance, process_attestation
|
||||
get_base_reward_per_increment, get_state_exit_queue_info,
|
||||
get_total_active_balance, process_attestation
|
||||
|
||||
const
|
||||
OpDir = SszTestsDir/const_preset/"capella"/"operations"
|
||||
|
@ -104,7 +105,9 @@ suite baseDescription & "Attester Slashing " & preset():
|
|||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_attester_slashing(
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {}, cache)) > 0.Gwei
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {strictVerification},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState,
|
||||
cache), cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpAttSlashingDir):
|
||||
|
@ -175,7 +178,9 @@ suite baseDescription & "Proposer Slashing " & preset():
|
|||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_proposer_slashing(
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {}, cache)) > 0.Gwei
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpProposerSlashingDir):
|
||||
|
@ -203,8 +208,13 @@ suite baseDescription & "Voluntary Exit " & preset():
|
|||
preState: var capella.BeaconState, voluntaryExit: SignedVoluntaryExit):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {}, cache)
|
||||
if process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache).isOk:
|
||||
ok()
|
||||
else:
|
||||
err("")
|
||||
|
||||
for path in walkTests(OpVoluntaryExitDir):
|
||||
runTest[SignedVoluntaryExit, typeof applyVoluntaryExit](
|
||||
|
|
|
@ -24,7 +24,8 @@ import
|
|||
from std/sequtils import mapIt, toSeq
|
||||
from std/strutils import contains
|
||||
from ../../../beacon_chain/spec/beaconstate import
|
||||
get_base_reward_per_increment, get_total_active_balance, process_attestation
|
||||
get_base_reward_per_increment, get_state_exit_queue_info,
|
||||
get_total_active_balance, process_attestation
|
||||
|
||||
const
|
||||
OpDir = SszTestsDir/const_preset/"deneb"/"operations"
|
||||
|
@ -90,7 +91,7 @@ suite baseDescription & "Attestation " & preset():
|
|||
# This returns the proposer reward for including the attestation, which
|
||||
# isn't tested here.
|
||||
discard ? process_attestation(
|
||||
preState, attestation, {}, base_reward_per_increment, cache)
|
||||
preState, attestation, {strictVerification}, base_reward_per_increment, cache)
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpAttestationsDir):
|
||||
|
@ -104,7 +105,9 @@ suite baseDescription & "Attester Slashing " & preset():
|
|||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_attester_slashing(
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {}, cache)) > 0.Gwei
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {strictVerification},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpAttSlashingDir):
|
||||
|
@ -177,7 +180,9 @@ suite baseDescription & "Proposer Slashing " & preset():
|
|||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_proposer_slashing(
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {}, cache)) > 0.Gwei
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpProposerSlashingDir):
|
||||
|
@ -205,8 +210,13 @@ suite baseDescription & "Voluntary Exit " & preset():
|
|||
preState: var deneb.BeaconState, voluntaryExit: SignedVoluntaryExit):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {}, cache)
|
||||
if process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache).isOk:
|
||||
ok()
|
||||
else:
|
||||
err("")
|
||||
|
||||
for path in walkTests(OpVoluntaryExitDir):
|
||||
runTest[SignedVoluntaryExit, typeof applyVoluntaryExit](
|
||||
|
|
|
@ -86,7 +86,9 @@ suite baseDescription & "Attester Slashing " & preset():
|
|||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_attester_slashing(
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {}, cache)) > 0.Gwei
|
||||
defaultRuntimeConfig, preState, attesterSlashing, {strictVerification},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpAttSlashingDir):
|
||||
|
@ -126,7 +128,9 @@ suite baseDescription & "Proposer Slashing " & preset():
|
|||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
doAssert (? process_proposer_slashing(
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {}, cache)) > 0.Gwei
|
||||
defaultRuntimeConfig, preState, proposerSlashing, {},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState,
|
||||
cache), cache))[0] > 0.Gwei
|
||||
ok()
|
||||
|
||||
for path in walkTests(OpProposerSlashingDir):
|
||||
|
@ -139,8 +143,13 @@ suite baseDescription & "Voluntary Exit " & preset():
|
|||
preState: var phase0.BeaconState, voluntaryExit: SignedVoluntaryExit):
|
||||
Result[void, cstring] =
|
||||
var cache: StateCache
|
||||
process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {}, cache)
|
||||
if process_voluntary_exit(
|
||||
defaultRuntimeConfig, preState, voluntaryExit, {},
|
||||
get_state_exit_queue_info(defaultRuntimeConfig, preState, cache),
|
||||
cache).isOk:
|
||||
ok()
|
||||
else:
|
||||
err("")
|
||||
|
||||
for path in walkTests(OpVoluntaryExitDir):
|
||||
runTest[SignedVoluntaryExit, typeof applyVoluntaryExit](
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1 +1 @@
|
|||
Subproject commit ab3ab545be0b550cca1c2529f7e97fbebf5eba81
|
||||
Subproject commit 5b79c5ed5e460b19d8d8afc241b5f5a02de628a6
|
|
@ -1 +1 @@
|
|||
Subproject commit 47cc17719f4293bf80a22ebe28e3bfc54b2a59a1
|
||||
Subproject commit ef1b077adfdc803fcce880e81a5740b964bac0bc
|
|
@ -1 +1 @@
|
|||
Subproject commit 057f7c653e1abe91cca9aac2f94832f39228ea98
|
||||
Subproject commit 4fbcfbe4c452313bd440936318a87ed708987d8b
|
|
@ -1 +1 @@
|
|||
Subproject commit 28609597d104a9be880ed5e1648e1ce18ca9dc38
|
||||
Subproject commit bb97a9de7931c5e7f8706c5bade8e0cc7fa86848
|
|
@ -1 +1 @@
|
|||
Subproject commit 0fc5e49093fa8d3c07476738e3257d0d8e7999a3
|
||||
Subproject commit 248f2bdca2d65ff920920c72b764d0622d522596
|
|
@ -1 +1 @@
|
|||
Subproject commit 285d97c2b05bbe2a13dab4b52ea878157fb1a1a1
|
||||
Subproject commit 85e34e8ab2767f3da1d5c166d695666d42ff0c96
|
|
@ -1 +1 @@
|
|||
Subproject commit 3866a8ab98fc6e0e6d406b88800aed72163d5fd4
|
||||
Subproject commit 14e0c55e89f0bd529f39b259b0f88196277ac08a
|
Loading…
Reference in New Issue