Merge branch 'stable' into feat/eip-7495

This commit is contained in:
Etan Kissling 2024-06-26 15:23:33 +02:00
commit ba5efbfa5c
No known key found for this signature in database
GPG Key ID: B21DA824C5A3D03D
21 changed files with 169 additions and 199 deletions

View File

@ -1,3 +1,27 @@
2024-06-24 v24.6.0
==================
Nimbus `v24.6.0` is a `low-urgency` release with performance and safety improvements.
### Improvements
* Improve SHA256 protocol object hashing speed by 30%:
https://github.com/status-im/nimbus-eth2/pull/6292
* Ensure that when a paired Nimbus beacon node and validator client improperly share validator keys, they avoid slashing:
https://github.com/status-im/nimbus-eth2/pull/6329
* Add block scoring to validator client to pick the best block from multiple beacon nodes:
https://github.com/status-im/nimbus-eth2/pull/6303
* Enable block monitoring in validator client by default to attest earlier and more reliably:
https://github.com/status-im/nimbus-eth2/pull/6331
### Fixes
* Fix light client libp2p gossip topic subscriptions:
https://github.com/status-im/nimbus-eth2/pull/6351
2024-05-23 v24.5.1
==================

View File

@ -162,7 +162,7 @@ DEPOSITS_DELAY := 0
#- "--define:release" cannot be added to "config.nims"
#- disable Nim's default parallelisation because it starts too many processes for too little gain
#- https://github.com/status-im/nim-libp2p#use-identify-metrics
NIM_PARAMS := -d:release --parallelBuild:1 -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku $(NIM_PARAMS)
NIM_PARAMS := -d:release --parallelBuild:1 -d:libp2p_agents_metrics -d:KnownLibP2PAgents=nimbus,lighthouse,lodestar,prysm,teku,grandine $(NIM_PARAMS)
ifeq ($(USE_LIBBACKTRACE), 0)
NIM_PARAMS += -d:disable_libbacktrace

View File

@ -67,6 +67,8 @@ type
config*: BeaconNodeConf
attachedValidators*: ref ValidatorPool
optimisticProcessor*: OptimisticProcessor
optimisticFcuFut*: Future[(PayloadExecutionStatus, Opt[BlockHash])]
.Raising([CancelledError])
lightClient*: LightClient
dag*: ChainDAGRef
quarantine*: ref Quarantine

View File

@ -38,61 +38,17 @@ proc initLightClient*(
# for broadcasting light client data as a server.
let
optimisticHandler = proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock):
Future[void] {.async: (raises: [CancelledError]).} =
debug "New LC optimistic block",
opt = signedBlock.toBlockId(),
dag = node.dag.head.bid,
wallSlot = node.currentSlot
optimisticHandler = proc(
signedBlock: ForkedSignedBeaconBlock
): Future[void] {.async: (raises: [CancelledError]).} =
withBlck(signedBlock):
when consensusFork >= ConsensusFork.Bellatrix:
if forkyBlck.message.is_execution_block:
template blckPayload(): auto =
forkyBlck.message.body.execution_payload
if not blckPayload.block_hash.isZero:
# engine_newPayloadV1
template payload(): auto = forkyBlck.message.body.execution_payload
if not payload.block_hash.isZero:
discard await node.elManager.newExecutionPayload(
forkyBlck.message)
# Retain optimistic head for other `forkchoiceUpdated` callers.
# May temporarily block `forkchoiceUpdatedV1` calls, e.g., Geth:
# - Refuses `newPayload`: "Ignoring payload while snap syncing"
# - Refuses `fcU`: "Forkchoice requested unknown head"
# Once DAG sync catches up or as new optimistic heads are fetched
# the situation recovers
node.consensusManager[].setOptimisticHead(
forkyBlck.toBlockId(), blckPayload.block_hash)
# engine_forkchoiceUpdatedV1 or engine_forkchoiceUpdatedV2,
# depending on pre or post-Shapella
let beaconHead = node.attestationPool[].getBeaconHead(nil)
template callForkchoiceUpdated(attributes: untyped) =
discard await node.elManager.forkchoiceUpdated(
headBlockHash = blckPayload.block_hash,
safeBlockHash = beaconHead.safeExecutionBlockHash,
finalizedBlockHash = beaconHead.finalizedExecutionBlockHash,
payloadAttributes = Opt.none attributes)
case node.dag.cfg.consensusForkAtEpoch(
forkyBlck.message.slot.epoch)
of ConsensusFork.Deneb, ConsensusFork.Electra:
callForkchoiceUpdated(PayloadAttributesV3)
of ConsensusFork.Capella:
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/shanghai.md#specification-1
# Consensus layer client MUST call this method instead of
# `engine_forkchoiceUpdatedV1` under any of the following
# conditions:
# `headBlockHash` references a block which `timestamp` is
# greater or equal to the Shanghai timestamp
callForkchoiceUpdated(PayloadAttributesV2)
of ConsensusFork.Bellatrix:
callForkchoiceUpdated(PayloadAttributesV1)
of ConsensusFork.Phase0, ConsensusFork.Altair:
discard
else: discard
optimisticProcessor = initOptimisticProcessor(
getBeaconTime, optimisticHandler)
@ -104,9 +60,46 @@ proc initLightClient*(
proc onOptimisticHeader(
lightClient: LightClient,
optimisticHeader: ForkedLightClientHeader) =
if node.optimisticFcuFut != nil:
return
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
let bid = forkyHeader.beacon.toBlockId()
logScope:
opt = bid
dag = node.dag.head.bid
wallSlot = node.currentSlot
when lcDataFork >= LightClientDataFork.Capella:
let
consensusFork = node.dag.cfg.consensusForkAtEpoch(bid.slot.epoch)
blockHash = forkyHeader.execution.block_hash
# Retain optimistic head for other `forkchoiceUpdated` callers.
# May temporarily block `forkchoiceUpdated` calls, e.g., Geth:
# - Refuses `newPayload`: "Ignoring payload while snap syncing"
# - Refuses `fcU`: "Forkchoice requested unknown head"
# Once DAG sync catches up or as new optimistic heads are fetched
# the situation recovers
debug "New LC optimistic header"
node.consensusManager[].setOptimisticHead(bid, blockHash)
if not node.consensusManager[]
.shouldSyncOptimistically(node.currentSlot):
return
# engine_forkchoiceUpdated
let beaconHead = node.attestationPool[].getBeaconHead(nil)
withConsensusFork(consensusFork):
when lcDataForkAtConsensusFork(consensusFork) == lcDataFork:
node.optimisticFcuFut = node.elManager.forkchoiceUpdated(
headBlockHash = blockHash,
safeBlockHash = beaconHead.safeExecutionBlockHash,
finalizedBlockHash = beaconHead.finalizedExecutionBlockHash,
payloadAttributes = Opt.none consensusFork.PayloadAttributes)
node.optimisticFcuFut.addCallback do (future: pointer):
node.optimisticFcuFut = nil
else:
# The execution block hash is only available from Capella onward
info "Ignoring new LC optimistic header until Capella"
lightClient.onOptimisticHeader = onOptimisticHeader
lightClient.trustedBlockRoot = config.trustedBlockRoot

View File

@ -20,29 +20,19 @@ export gossip_validation
logScope:
topics = "gossip_opt"
const
# Maximum `blocks` to cache (not validated; deleted on new optimistic header)
maxBlocks = 16 # <= `GOSSIP_MAX_SIZE_BELLATRIX` (10 MB) each
# Minimum interval at which spam is logged
minLogInterval = chronos.seconds(5)
type
MsgTrustedBlockProcessor* =
proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock): Future[void] {.
async: (raises: [CancelledError]).}
OptimisticBlockVerifier* = proc(
signedBlock: ForkedSignedBeaconBlock
): Future[void] {.async: (raises: [CancelledError]).}
OptimisticProcessor* = ref object
getBeaconTime: GetBeaconTimeFn
optimisticVerifier: MsgTrustedBlockProcessor
blocks: Table[Eth2Digest, ref ForkedSignedBeaconBlock]
latestOptimisticSlot: Slot
optimisticVerifier: OptimisticBlockVerifier
processFut: Future[void].Raising([CancelledError])
logMoment: Moment
proc initOptimisticProcessor*(
getBeaconTime: GetBeaconTimeFn,
optimisticVerifier: MsgTrustedBlockProcessor): OptimisticProcessor =
optimisticVerifier: OptimisticBlockVerifier): OptimisticProcessor =
OptimisticProcessor(
getBeaconTime: getBeaconTime,
optimisticVerifier: optimisticVerifier)
@ -56,9 +46,6 @@ proc validateBeaconBlock(
(wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero):
return errIgnore("BeaconBlock: slot too high")
if signed_beacon_block.message.slot <= self.latestOptimisticSlot:
return errIgnore("BeaconBlock: no significant progress")
if not signed_beacon_block.message.is_execution_block():
return errIgnore("BeaconBlock: no execution block")
@ -93,32 +80,16 @@ proc processSignedBeaconBlock*(
debug "Dropping optimistic block", error = v.error
return err(v.error)
# Note that validation of blocks is delayed by ~4/3 slots because we have to
# wait for the sync committee to sign the correct block and for that signature
# to be included in the next block. Therefore, we skip block validation here
# and cache the block in memory. Because there is no validation, we have to
# mitigate against bogus blocks, mostly by bounding the caches. Assuming that
# any denial-of-service attacks eventually subside, care is taken to recover.
template logWithSpamProtection(body: untyped): untyped =
block:
let now = Moment.now()
if self.logMoment + minLogInterval <= now:
logScope: minLogInterval
body
self.logMoment = now
# Only process one block at a time (backpressure)
trace "Optimistic block validated"
if self.processFut == nil:
self.processFut = self.optimisticVerifier(
ForkedSignedBeaconBlock.init(signedBlock))
# Store block for later verification
if not self.blocks.hasKey(signedBlock.root):
# If `blocks` is full, we got spammed with multiple blocks for a slot,
# of the optimistic header advancements have been all withheld from us.
# Whenever the optimistic header advances, old blocks are cleared,
# so we can simply ignore additional spam blocks until that happens.
if self.blocks.len >= maxBlocks:
logWithSpamProtection:
error "`blocks` full - ignoring", maxBlocks
else:
self.blocks[signedBlock.root] =
newClone(ForkedSignedBeaconBlock.init(signedBlock))
proc handleFinishedProcess(future: pointer) =
self.processFut = nil
self.processFut.addCallback(handleFinishedProcess)
# Block validation is delegated to the sync committee and is done with delay.
# If we forward invalid spam blocks, we may be disconnected + IP banned,
@ -127,40 +98,4 @@ proc processSignedBeaconBlock*(
# However, we are actively contributing to other topics, so some of the
# negative peer score may be offset through those different topics.
# The practical impact depends on the actually deployed scoring heuristics.
trace "Optimistic block cached"
return errIgnore("Validation delegated to sync committee")
proc setOptimisticHeader*(
self: OptimisticProcessor, optimisticHeader: BeaconBlockHeader) =
# If irrelevant, skip processing
if optimisticHeader.slot <= self.latestOptimisticSlot:
return
self.latestOptimisticSlot = optimisticHeader.slot
# Delete blocks that are no longer of interest
let blockRoot = optimisticHeader.hash_tree_root()
var
rootsToDelete: seq[Eth2Digest]
signedBlock: ref ForkedMsgTrustedSignedBeaconBlock
for root, blck in self.blocks:
if root == blockRoot:
signedBlock = blck.asMsgTrusted()
if blck[].slot <= optimisticHeader.slot:
rootsToDelete.add root
for root in rootsToDelete:
self.blocks.del root
# Block must be known
if signedBlock == nil:
return
# If a block is already being processed, skip (backpressure)
if self.processFut != nil:
return
self.processFut = self.optimisticVerifier(signedBlock[])
proc handleFinishedProcess(future: pointer) =
self.processFut = nil
self.processFut.addCallback(handleFinishedProcess)

View File

@ -9,6 +9,7 @@
import
std/[json, sequtils, times],
stew/saturation_arith,
eth/common/[eth_types_rlp, transaction],
eth/keys,
eth/p2p/discoveryv5/random2,
@ -1257,8 +1258,8 @@ proc ETHExecutionBlockHeaderCreateFromJson(
logsBloom: distinctBase(data.logsBloom),
difficulty: data.difficulty,
number: distinctBase(data.number),
gasLimit: distinctBase(data.gasLimit),
gasUsed: distinctBase(data.gasUsed),
gasLimit: GasInt.saturate distinctBase(data.gasLimit),
gasUsed: GasInt.saturate distinctBase(data.gasUsed),
timestamp: EthTime(distinctBase(data.timestamp)),
extraData: distinctBase(data.extraData),
mixHash: data.mixHash.asEth2Digest,
@ -1613,9 +1614,9 @@ proc ETHTransactionsCreateFromJson(
chainId: distinctBase(tx.chainId).u256,
`from`: ExecutionAddress(data: fromAddress),
nonce: tx.nonce,
maxPriorityFeePerGas: tx.maxPriorityFeePerGas,
maxFeePerGas: tx.maxFeePerGas,
gas: tx.gasLimit,
maxPriorityFeePerGas: tx.maxPriorityFeePerGas.uint64,
maxFeePerGas: tx.maxFeePerGas.uint64,
gas: tx.gasLimit.uint64,
destinationType: destinationType,
to: ExecutionAddress(data: toAddress),
value: tx.value,

View File

@ -342,25 +342,26 @@ proc installMessageValidators*(
for consensusFork in ConsensusFork:
withLcDataFork(lcDataForkAtConsensusFork(consensusFork)):
when lcDataFork > LightClientDataFork.None:
let
contextFork = consensusFork # Avoid capturing `Deneb` (Nim 1.6)
digest = forkDigests[].atConsensusFork(contextFork)
closureScope:
let
contextFork = consensusFork
digest = forkDigests[].atConsensusFork(contextFork)
# light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_finality_update
lightClient.network.addValidator(
getLightClientFinalityUpdateTopic(digest), proc (
msg: lcDataFork.LightClientFinalityUpdate
): ValidationResult =
validate(msg, contextFork, processLightClientFinalityUpdate))
# light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_finality_update
lightClient.network.addValidator(
getLightClientFinalityUpdateTopic(digest), proc (
msg: lcDataFork.LightClientFinalityUpdate
): ValidationResult =
validate(msg, contextFork, processLightClientFinalityUpdate))
# light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
lightClient.network.addValidator(
getLightClientOptimisticUpdateTopic(digest), proc (
msg: lcDataFork.LightClientOptimisticUpdate
): ValidationResult =
validate(msg, contextFork, processLightClientOptimisticUpdate))
# light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
lightClient.network.addValidator(
getLightClientOptimisticUpdateTopic(digest), proc (
msg: lcDataFork.LightClientOptimisticUpdate
): ValidationResult =
validate(msg, contextFork, processLightClientOptimisticUpdate))
proc updateGossipStatus*(
lightClient: LightClient, slot: Slot, dagIsBehind = default(Option[bool])) =

View File

@ -1619,7 +1619,7 @@ func syncStatus(node: BeaconNode, wallSlot: Slot): string =
node.syncManager.syncStatus & optimisticSuffix & lightClientSuffix
elif node.backfiller.inProgress:
"backfill: " & node.backfiller.syncStatus
elif optimistic_head:
elif optimisticHead:
"synced/opt"
else:
"synced"
@ -1768,7 +1768,8 @@ proc installMessageValidators(node: BeaconNode) =
node.network.addAsyncValidator(
getAttestationTopic(digest, subnet_id), proc (
attestation: electra.Attestation
): Future[ValidationResult] {.async: (raises: [CancelledError]).} =
): Future[ValidationResult] {.
async: (raises: [CancelledError]).} =
return toValidationResult(
await node.processor.processAttestation(
MsgSource.gossip, attestation, subnet_id,
@ -1780,7 +1781,8 @@ proc installMessageValidators(node: BeaconNode) =
node.network.addAsyncValidator(
getAttestationTopic(digest, subnet_id), proc (
attestation: phase0.Attestation
): Future[ValidationResult] {.async: (raises: [CancelledError]).} =
): Future[ValidationResult] {.
async: (raises: [CancelledError]).} =
return toValidationResult(
await node.processor.processAttestation(
MsgSource.gossip, attestation, subnet_id,

View File

@ -107,23 +107,15 @@ programMain:
else:
nil
optimisticHandler = proc(signedBlock: ForkedMsgTrustedSignedBeaconBlock):
Future[void] {.async: (raises: [CancelledError]).} =
notice "New LC optimistic block",
opt = signedBlock.toBlockId(),
wallSlot = getBeaconTime().slotOrZero
optimisticHandler = proc(
signedBlock: ForkedSignedBeaconBlock
): Future[void] {.async: (raises: [CancelledError]).} =
withBlck(signedBlock):
when consensusFork >= ConsensusFork.Bellatrix:
if forkyBlck.message.is_execution_block:
template payload(): auto = forkyBlck.message.body.execution_payload
if elManager != nil and not payload.block_hash.isZero:
discard await elManager.newExecutionPayload(forkyBlck.message)
discard await elManager.forkchoiceUpdated(
headBlockHash = payload.block_hash,
safeBlockHash = payload.block_hash, # stub value
finalizedBlockHash = ZERO_HASH,
payloadAttributes = Opt.none(consensusFork.PayloadAttributes))
else: discard
optimisticProcessor = initOptimisticProcessor(
getBeaconTime, optimisticHandler)
@ -153,26 +145,54 @@ programMain:
waitFor network.startListening()
waitFor network.start()
func isSynced(optimisticSlot: Slot, wallSlot: Slot): bool =
# Check whether light client has synced sufficiently close to wall slot
const maxAge = 2 * SLOTS_PER_EPOCH
optimisticSlot >= max(wallSlot, maxAge.Slot) - maxAge
proc onFinalizedHeader(
lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) =
withForkyHeader(finalizedHeader):
when lcDataFork > LightClientDataFork.None:
info "New LC finalized header",
finalized_header = shortLog(forkyHeader)
let
period = forkyHeader.beacon.slot.sync_committee_period
syncCommittee = lightClient.finalizedSyncCommittee.expect("Init OK")
db.putSyncCommittee(period, syncCommittee)
db.putLatestFinalizedHeader(finalizedHeader)
var optimisticFcuFut: Future[(PayloadExecutionStatus, Opt[BlockHash])]
.Raising([CancelledError])
proc onOptimisticHeader(
lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) =
if optimisticFcuFut != nil:
return
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
info "New LC optimistic header",
optimistic_header = shortLog(forkyHeader)
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
logScope: optimistic_header = shortLog(forkyHeader)
when lcDataFork >= LightClientDataFork.Capella:
let
bid = forkyHeader.beacon.toBlockId()
consensusFork = cfg.consensusForkAtEpoch(bid.slot.epoch)
blockHash = forkyHeader.execution.block_hash
info "New LC optimistic header"
if elManager == nil or blockHash.isZero or
not isSynced(bid.slot, getBeaconTime().slotOrZero()):
return
withConsensusFork(consensusFork):
when lcDataForkAtConsensusFork(consensusFork) == lcDataFork:
optimisticFcuFut = elManager.forkchoiceUpdated(
headBlockHash = blockHash,
safeBlockHash = blockHash, # stub value
finalizedBlockHash = ZERO_HASH,
payloadAttributes = Opt.none(consensusFork.PayloadAttributes))
optimisticFcuFut.addCallback do (future: pointer):
optimisticFcuFut = nil
else:
info "Ignoring new LC optimistic header until Capella"
lightClient.onFinalizedHeader = onFinalizedHeader
lightClient.onOptimisticHeader = onOptimisticHeader
@ -204,9 +224,7 @@ programMain:
let optimisticHeader = lightClient.optimisticHeader
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
# Check whether light client has synced sufficiently close to wall slot
const maxAge = 2 * SLOTS_PER_EPOCH
forkyHeader.beacon.slot >= max(wallSlot, maxAge.Slot) - maxAge
isSynced(forkyHeader.beacon.slot, wallSlot)
else:
false

View File

@ -77,7 +77,7 @@ type
BlsResult*[T] = Result[T, cstring]
TrustedSig* = object
data* {.align: 16.}: array[RawSigSize, byte]
blob* {.align: 16.}: array[RawSigSize, byte]
SomeSig* = TrustedSig | ValidatorSig
@ -390,12 +390,9 @@ func toRaw*(x: ValidatorPrivKey): array[32, byte] =
# TODO: distinct type - see https://github.com/status-im/nim-blscurve/pull/67
static: doAssert BLS_BACKEND == BLST
result = SecretKey(x).exportRaw()
template toRaw*(x: ValidatorPubKey | ValidatorSig): auto =
x.blob
template toRaw*(x: TrustedSig): auto =
x.data
template toRaw*(x: ValidatorPubKey | SomeSig): auto =
x.blob
func toHex*(x: BlsCurveType): string =
toHex(toRaw(x))
@ -507,7 +504,7 @@ template fromSszBytes*(T: type[ValidatorPubKey | ValidatorSig], bytes: openArray
# Logging
# ----------------------------------------------------------------------
func shortLog*(x: ValidatorPubKey | ValidatorSig): string =
func shortLog*(x: ValidatorPubKey | SomeSig): string =
## Logging for wrapped BLS types
## that may contain valid or non-validated data
byteutils.toHex(x.blob.toOpenArray(0, 3))
@ -520,9 +517,6 @@ func shortLog*(x: ValidatorPrivKey): string =
## Logging for raw unwrapped BLS types
"<private key>"
func shortLog*(x: TrustedSig): string =
byteutils.toHex(x.data.toOpenArray(0, 3))
# Initialization
# ----------------------------------------------------------------------

View File

@ -513,9 +513,9 @@ proc blockToBlockHeader*(blck: ForkyBeaconBlock): ExecutionBlockHeader =
logsBloom : payload.logs_bloom.data,
difficulty : default(DifficultyInt),
number : payload.block_number,
gasLimit : cast[GasInt](payload.gas_limit),
gasUsed : cast[GasInt](payload.gas_used),
timestamp : EthTime(int64.saturate payload.timestamp),
gasLimit : GasInt.saturate(payload.gas_limit),
gasUsed : GasInt.saturate(payload.gas_used),
timestamp : EthTime(payload.timestamp),
extraData : payload.extra_data.asSeq,
mixHash : payload.prev_randao, # EIP-4399 `mixHash` -> `prevRandao`
nonce : default(BlockNonce),

View File

@ -18,8 +18,8 @@ const
"Copyright (c) 2019-" & compileYear & " Status Research & Development GmbH"
versionMajor* = 24
versionMinor* = 5
versionBuild* = 1
versionMinor* = 6
versionBuild* = 0
versionBlob* = "stateofus" # Single word - ends up in the default graffiti

View File

@ -178,13 +178,11 @@ INF 2022-11-21 18:03:27.984+01:00 New LC optimistic header opt
WRN 2022-11-21 18:03:31.419+01:00 Peer count low, no new peers discovered topics="networking" discovered_nodes=0 new_peers=@[] current_peers=7 wanted_peers=160
INF 2022-11-21 18:03:36.001+01:00 Slot start slot=1109718 epoch=34678 sync=synced peers=7 head=c5464508:1109716 finalized=c092a1d1:1109216 delay=1ms98us
INF 2022-11-21 18:03:40.012+01:00 New LC optimistic header optimistic_header="(beacon: (slot: 1109717, proposer_index: 835, parent_root: \"c5464508\", state_root: \"13f823f8\"))"
NTC 2022-11-21 18:03:40.012+01:00 New LC optimistic block opt=99ab28aa:1109717 wallSlot=1109718
WRN 2022-11-21 18:03:40.422+01:00 Peer count low, no new peers discovered topics="networking" discovered_nodes=1 new_peers=@[] current_peers=7 wanted_peers=160
INF 2022-11-21 18:03:48.001+01:00 Slot start slot=1109719 epoch=34678 sync=synced peers=7 head=99ab28aa:1109717 finalized=c092a1d1:1109216 delay=1ms53us
WRN 2022-11-21 18:03:50.205+01:00 Peer count low, no new peers discovered topics="networking" discovered_nodes=0 new_peers=@[] current_peers=7 wanted_peers=160
INF 2022-11-21 18:04:00.001+01:00 Slot start slot=1109720 epoch=34678 sync=synced peers=7 head=99ab28aa:1109717 finalized=c092a1d1:1109216 delay=1ms145us
INF 2022-11-21 18:04:03.982+01:00 New LC optimistic header optimistic_header="(beacon: (slot: 1109718, proposer_index: 1202, parent_root: \"99ab28aa\", state_root: \"7f7f88d2\"))"
NTC 2022-11-21 18:04:03.982+01:00 New LC optimistic block opt=ab007266:1109718 wallSlot=1109720
```
!!! note

View File

@ -884,7 +884,7 @@ suite "SyncManager test suite":
# Push a single request that will fail with all blocks being unviable
var f11 = queue.push(r11, chain.getSlice(startSlot, r11),
Opt.none(seq[BlobSidecars]))
discard await f11.withTimeout(100.milliseconds)
discard await f11.withTimeout(1.seconds)
check:
f11.finished == true

View File

@ -118,6 +118,8 @@ proc build_empty_merge_execution_payload(state: bellatrix.BeaconState):
bellatrix.ExecutionPayloadForSigning(executionPayload: payload,
blockValue: Wei.zero)
from stew/saturating_arith import saturate
proc build_empty_execution_payload(
state: bellatrix.BeaconState,
feeRecipient: Eth1Address): bellatrix.ExecutionPayloadForSigning =
@ -127,8 +129,8 @@ proc build_empty_execution_payload(
latest = state.latest_execution_payload_header
timestamp = compute_timestamp_at_slot(state, state.slot)
randao_mix = get_randao_mix(state, get_current_epoch(state))
base_fee = calcEip1599BaseFee(latest.gas_limit,
latest.gas_used,
base_fee = calcEip1599BaseFee(GasInt.saturate latest.gas_limit,
GasInt.saturate latest.gas_used,
latest.base_fee_per_gas)
var payload = bellatrix.ExecutionPayloadForSigning(

2
vendor/nim-bearssl vendored

@ -1 +1 @@
Subproject commit d81b37dc2011bf3a2bd93500489877c2ce8e6ac3
Subproject commit a806cbfab5fe8de49c76139f8705fff79daf99ee

2
vendor/nim-chronos vendored

@ -1 +1 @@
Subproject commit 8a306763cec8105fa83574b56734b0f66823f844
Subproject commit 1b9d9253e89445d585d0fff39cc0d19254fdfd0d

2
vendor/nim-eth vendored

@ -1 +1 @@
Subproject commit 9b6497ed8a05ba25ee47142f3fc1f61742b51a6c
Subproject commit f169068df6c11a2aeba27584c60e354e19c42e94

@ -1 +1 @@
Subproject commit cefd3eec9c13330f6726383ed0c8a60ca8823603
Subproject commit aab85b6d242df38706664373f089675235953ab8

2
vendor/nim-metrics vendored

@ -1 +1 @@
Subproject commit 0e768ca7fb7df4798649145a403e7db65cae1f8b
Subproject commit 2e29df095059a7a787b234f040612b742567b2bc

@ -1 +1 @@
Subproject commit a15dc546a053dd94c610a4178887bbf7c908aadb
Subproject commit 8cdaec502b5a48f2514e11209f0d81a001d2a2b1