2023-08-27 01:23:45 +00:00
|
|
|
# Nimbus
|
2024-03-26 02:31:21 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-08-27 01:23:45 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
|
|
|
|
|
|
|
import
|
|
|
|
std/[typetraits],
|
|
|
|
eth/common,
|
2024-05-30 12:54:03 +00:00
|
|
|
results,
|
2023-08-27 01:23:45 +00:00
|
|
|
../web3_eth_conv,
|
|
|
|
../beacon_engine,
|
2023-12-08 09:35:50 +00:00
|
|
|
web3/execution_types,
|
2023-08-27 01:23:45 +00:00
|
|
|
./api_utils,
|
|
|
|
chronicles
|
|
|
|
|
|
|
|
{.push gcsafe, raises:[CatchableError].}
|
|
|
|
|
2024-04-17 10:31:37 +00:00
|
|
|
template validateVersion(attr, com, apiVersion) =
|
|
|
|
let
|
|
|
|
version = attr.version
|
|
|
|
timestamp = ethTime attr.timestamp
|
|
|
|
|
|
|
|
if apiVersion == Version.V3:
|
|
|
|
if version != apiVersion:
|
|
|
|
raise invalidAttr("forkChoiceUpdatedV3 expect PayloadAttributesV3" &
|
|
|
|
" but got PayloadAttributes" & $version)
|
|
|
|
if not com.isCancunOrLater(timestamp):
|
|
|
|
raise unsupportedFork(
|
|
|
|
"forkchoiceUpdatedV3 get invalid payloadAttributes timestamp")
|
|
|
|
else:
|
|
|
|
if com.isCancunOrLater(timestamp):
|
|
|
|
if version < Version.V3:
|
|
|
|
raise unsupportedFork("forkChoiceUpdated" & $apiVersion &
|
|
|
|
" doesn't support payloadAttributes" & $version)
|
2024-05-30 20:30:40 +00:00
|
|
|
if version > Version.V3:
|
2024-04-17 10:31:37 +00:00
|
|
|
raise invalidAttr("forkChoiceUpdated" & $apiVersion &
|
|
|
|
" doesn't support PayloadAttributes" & $version)
|
|
|
|
elif com.isShanghaiOrLater(timestamp):
|
|
|
|
if version < Version.V2:
|
|
|
|
raise invalidParams("forkChoiceUpdated" & $apiVersion &
|
2024-09-27 00:53:27 +00:00
|
|
|
" doesn't support payloadAttributesV1 when Shanghai is activated")
|
2024-04-17 10:31:37 +00:00
|
|
|
if version > Version.V2:
|
|
|
|
raise invalidAttr("if timestamp is Shanghai or later," &
|
|
|
|
" payloadAttributes must be PayloadAttributesV2")
|
2023-08-27 01:23:45 +00:00
|
|
|
else:
|
2024-04-17 10:31:37 +00:00
|
|
|
if version != Version.V1:
|
|
|
|
raise invalidParams("if timestamp is earlier than Shanghai," &
|
|
|
|
" payloadAttributes must be PayloadAttributesV1")
|
2023-10-23 02:25:03 +00:00
|
|
|
|
2024-06-01 19:14:16 +00:00
|
|
|
template validateHeaderTimestamp(header, com, apiVersion) =
|
|
|
|
# See fCUV3 specification No.2 bullet iii
|
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/cancun.md#specification-1
|
|
|
|
if com.isCancunOrLater(header.timestamp):
|
|
|
|
if apiVersion != Version.V3:
|
|
|
|
raise invalidAttr("forkChoiceUpdated" & $apiVersion &
|
|
|
|
" doesn't support head block with timestamp >= Cancun")
|
|
|
|
# See fCUV2 specification No.2 bullet 1
|
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/shanghai.md#specification-1
|
|
|
|
elif com.isShanghaiOrLater(header.timestamp):
|
|
|
|
if apiVersion != Version.V2:
|
|
|
|
raise invalidAttr("forkChoiceUpdated" & $apiVersion &
|
|
|
|
" doesn't support head block with Shanghai timestamp")
|
|
|
|
else:
|
|
|
|
if apiVersion != Version.V1:
|
|
|
|
raise invalidAttr("forkChoiceUpdated" & $apiVersion &
|
|
|
|
" doesn't support head block with timestamp earlier than Shanghai")
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
proc forkchoiceUpdated*(ben: BeaconEngineRef,
|
2023-10-24 04:30:48 +00:00
|
|
|
apiVersion: Version,
|
2023-08-27 01:23:45 +00:00
|
|
|
update: ForkchoiceStateV1,
|
2024-06-14 07:31:08 +00:00
|
|
|
attrsOpt: Opt[PayloadAttributes]):
|
2023-08-27 01:23:45 +00:00
|
|
|
ForkchoiceUpdatedResponse =
|
|
|
|
let
|
|
|
|
com = ben.com
|
|
|
|
db = com.db
|
|
|
|
chain = ben.chain
|
|
|
|
blockHash = ethHash update.headBlockHash
|
|
|
|
|
2024-09-26 11:24:36 +00:00
|
|
|
if blockHash == default(common.Hash256):
|
2023-08-27 01:23:45 +00:00
|
|
|
warn "Forkchoice requested update to zero hash"
|
|
|
|
return simpleFCU(PayloadExecutionStatus.invalid)
|
|
|
|
|
|
|
|
# Check whether we have the block yet in our database or not. If not, we'll
|
|
|
|
# need to either trigger a sync, or to reject this forkchoice update for a
|
|
|
|
# reason.
|
2024-09-04 09:54:54 +00:00
|
|
|
let header = ben.chain.headerByHash(blockHash).valueOr:
|
2024-05-17 01:38:46 +00:00
|
|
|
# If this block was previously invalidated, keep rejecting it here too
|
|
|
|
let res = ben.checkInvalidAncestor(blockHash, blockHash)
|
|
|
|
if res.isSome:
|
|
|
|
return simpleFCU(res.get)
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
# If the head hash is unknown (was not given to us in a newPayload request),
|
|
|
|
# we cannot resolve the header, so not much to do. This could be extended in
|
|
|
|
# the future to resolve from the `eth` network, but it's an unexpected case
|
|
|
|
# that should be fixed, not papered over.
|
2024-09-04 09:54:54 +00:00
|
|
|
var header: common.BlockHeader
|
2023-08-27 01:23:45 +00:00
|
|
|
if not ben.get(blockHash, header):
|
|
|
|
warn "Forkchoice requested unknown head",
|
2023-10-28 08:35:58 +00:00
|
|
|
hash = blockHash.short
|
2023-08-27 01:23:45 +00:00
|
|
|
return simpleFCU(PayloadExecutionStatus.syncing)
|
|
|
|
|
|
|
|
# Header advertised via a past newPayload request. Start syncing to it.
|
|
|
|
# Before we do however, make sure any legacy sync in switched off so we
|
|
|
|
# don't accidentally have 2 cycles running.
|
|
|
|
if not ben.ttdReached():
|
|
|
|
ben.reachTTD()
|
|
|
|
# TODO: cancel downloader
|
|
|
|
|
|
|
|
info "Forkchoice requested sync to new head",
|
2024-06-14 07:31:08 +00:00
|
|
|
number = header.number,
|
2023-09-28 06:20:12 +00:00
|
|
|
hash = blockHash.short
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
# Update sync header (if any)
|
|
|
|
com.syncReqNewHead(header)
|
Flare sync (#2627)
* Cosmetics, small fixes, add stashed headers verifier
* Remove direct `Era1` support
why:
Era1 is indirectly supported by using the import tool before syncing.
* Clarify database persistent save function.
why:
Function relied on the last saved state block number which was wrong.
It now relies on the tx-level. If it is 0, then data are saved directly.
Otherwise the task that owns the tx will do it.
* Extracted configuration constants into separate file
* Enable single peer mode for debugging
* Fix peer losing issue in multi-mode
details:
Running concurrent download peers was previously programmed as running
a batch downloading and storing ~8k headers and then leaving the `async`
function to be restarted by a scheduler.
This was unfortunate because of occasionally occurring long waiting
times for restart.
While the time gap until restarting were typically observed a few
millisecs, there were always a few outliers which well exceed several
seconds. This seemed to let remote peers run into timeouts.
* Prefix function names `unprocXxx()` and `stagedYyy()` by `headers`
why:
There will be other `unproc` and `staged` modules.
* Remove cruft, update logging
* Fix accounting issue
details:
When staging after fetching headers from the network, there was an off
by 1 error occurring when the result was by one smaller than requested.
Also, a whole range was mis-accounted when a peer was terminating
connection immediately after responding.
* Fix slow/error header accounting when fetching
why:
Originally set for detecting slow headers in a row, the counter
was wrongly extended to general errors.
* Ban peers for a while that respond with too few headers continuously
why:
Some peers only returned one header at a time. If these peers sit on a
farm, they might collectively slow down the download process.
* Update RPC beacon header updater
why:
Old function hook has slightly changed its meaning since it was used
for snap sync. Also, the old hook is used by other functions already.
* Limit number of peers or set to single peer mode
details:
Merge several concepts, single peer mode being one of it.
* Some code clean up, fixings for removing of compiler warnings
* De-noise header fetch related sources
why:
Header download looks relatively stable, so general debugging is not
needed, anymore. This is the equivalent of removing the scaffold from
the part of the building where work has completed.
* More clean up and code prettification for headers stuff
* Implement body fetch and block import
details:
Available headers are used stage blocks by combining existing headers
with newly fetched blocks. Then these blocks are imported/executed via
`persistBlocks()`.
* Logger cosmetics and cleanup
* Remove staged block queue debugging
details:
Feature still available, just not executed anymore
* Docu, logging update
* Update/simplify `runDaemon()`
* Re-calibrate block body requests and soft config for import blocks batch
why:
* For fetching, larger fetch requests are mostly truncated anyway on
MainNet.
* For executing, smaller batch sizes reduce the memory needed for the
price of longer execution times.
* Update metrics counters
* Docu update
* Some fixes, formatting updates, etc.
* Update `borrowed` type: uint -. uint64
also:
Always convert to `uint64` rather than `uint` where appropriate
2024-09-27 15:07:42 +00:00
|
|
|
|
|
|
|
# Pass on finalised header
|
2024-10-01 09:19:29 +00:00
|
|
|
if com.haveSyncFinalisedBlockHash() or true:
|
Flare sync (#2627)
* Cosmetics, small fixes, add stashed headers verifier
* Remove direct `Era1` support
why:
Era1 is indirectly supported by using the import tool before syncing.
* Clarify database persistent save function.
why:
Function relied on the last saved state block number which was wrong.
It now relies on the tx-level. If it is 0, then data are saved directly.
Otherwise the task that owns the tx will do it.
* Extracted configuration constants into separate file
* Enable single peer mode for debugging
* Fix peer losing issue in multi-mode
details:
Running concurrent download peers was previously programmed as running
a batch downloading and storing ~8k headers and then leaving the `async`
function to be restarted by a scheduler.
This was unfortunate because of occasionally occurring long waiting
times for restart.
While the time gap until restarting were typically observed a few
millisecs, there were always a few outliers which well exceed several
seconds. This seemed to let remote peers run into timeouts.
* Prefix function names `unprocXxx()` and `stagedYyy()` by `headers`
why:
There will be other `unproc` and `staged` modules.
* Remove cruft, update logging
* Fix accounting issue
details:
When staging after fetching headers from the network, there was an off
by 1 error occurring when the result was by one smaller than requested.
Also, a whole range was mis-accounted when a peer was terminating
connection immediately after responding.
* Fix slow/error header accounting when fetching
why:
Originally set for detecting slow headers in a row, the counter
was wrongly extended to general errors.
* Ban peers for a while that respond with too few headers continuously
why:
Some peers only returned one header at a time. If these peers sit on a
farm, they might collectively slow down the download process.
* Update RPC beacon header updater
why:
Old function hook has slightly changed its meaning since it was used
for snap sync. Also, the old hook is used by other functions already.
* Limit number of peers or set to single peer mode
details:
Merge several concepts, single peer mode being one of it.
* Some code clean up, fixings for removing of compiler warnings
* De-noise header fetch related sources
why:
Header download looks relatively stable, so general debugging is not
needed, anymore. This is the equivalent of removing the scaffold from
the part of the building where work has completed.
* More clean up and code prettification for headers stuff
* Implement body fetch and block import
details:
Available headers are used stage blocks by combining existing headers
with newly fetched blocks. Then these blocks are imported/executed via
`persistBlocks()`.
* Logger cosmetics and cleanup
* Remove staged block queue debugging
details:
Feature still available, just not executed anymore
* Docu, logging update
* Update/simplify `runDaemon()`
* Re-calibrate block body requests and soft config for import blocks batch
why:
* For fetching, larger fetch requests are mostly truncated anyway on
MainNet.
* For executing, smaller batch sizes reduce the memory needed for the
price of longer execution times.
* Update metrics counters
* Docu update
* Some fixes, formatting updates, etc.
* Update `borrowed` type: uint -. uint64
also:
Always convert to `uint64` rather than `uint` where appropriate
2024-09-27 15:07:42 +00:00
|
|
|
let finalizedBlockHash = ethHash update.finalizedBlockHash
|
2024-10-01 09:19:29 +00:00
|
|
|
if finalizedBlockHash != zeroHash32:
|
Flare sync (#2627)
* Cosmetics, small fixes, add stashed headers verifier
* Remove direct `Era1` support
why:
Era1 is indirectly supported by using the import tool before syncing.
* Clarify database persistent save function.
why:
Function relied on the last saved state block number which was wrong.
It now relies on the tx-level. If it is 0, then data are saved directly.
Otherwise the task that owns the tx will do it.
* Extracted configuration constants into separate file
* Enable single peer mode for debugging
* Fix peer losing issue in multi-mode
details:
Running concurrent download peers was previously programmed as running
a batch downloading and storing ~8k headers and then leaving the `async`
function to be restarted by a scheduler.
This was unfortunate because of occasionally occurring long waiting
times for restart.
While the time gap until restarting were typically observed a few
millisecs, there were always a few outliers which well exceed several
seconds. This seemed to let remote peers run into timeouts.
* Prefix function names `unprocXxx()` and `stagedYyy()` by `headers`
why:
There will be other `unproc` and `staged` modules.
* Remove cruft, update logging
* Fix accounting issue
details:
When staging after fetching headers from the network, there was an off
by 1 error occurring when the result was by one smaller than requested.
Also, a whole range was mis-accounted when a peer was terminating
connection immediately after responding.
* Fix slow/error header accounting when fetching
why:
Originally set for detecting slow headers in a row, the counter
was wrongly extended to general errors.
* Ban peers for a while that respond with too few headers continuously
why:
Some peers only returned one header at a time. If these peers sit on a
farm, they might collectively slow down the download process.
* Update RPC beacon header updater
why:
Old function hook has slightly changed its meaning since it was used
for snap sync. Also, the old hook is used by other functions already.
* Limit number of peers or set to single peer mode
details:
Merge several concepts, single peer mode being one of it.
* Some code clean up, fixings for removing of compiler warnings
* De-noise header fetch related sources
why:
Header download looks relatively stable, so general debugging is not
needed, anymore. This is the equivalent of removing the scaffold from
the part of the building where work has completed.
* More clean up and code prettification for headers stuff
* Implement body fetch and block import
details:
Available headers are used stage blocks by combining existing headers
with newly fetched blocks. Then these blocks are imported/executed via
`persistBlocks()`.
* Logger cosmetics and cleanup
* Remove staged block queue debugging
details:
Feature still available, just not executed anymore
* Docu, logging update
* Update/simplify `runDaemon()`
* Re-calibrate block body requests and soft config for import blocks batch
why:
* For fetching, larger fetch requests are mostly truncated anyway on
MainNet.
* For executing, smaller batch sizes reduce the memory needed for the
price of longer execution times.
* Update metrics counters
* Docu update
* Some fixes, formatting updates, etc.
* Update `borrowed` type: uint -. uint64
also:
Always convert to `uint64` rather than `uint` where appropriate
2024-09-27 15:07:42 +00:00
|
|
|
com.syncFinalisedBlockHash(finalizedBlockHash)
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
return simpleFCU(PayloadExecutionStatus.syncing)
|
|
|
|
|
2024-06-01 19:14:16 +00:00
|
|
|
validateHeaderTimestamp(header, com, apiVersion)
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
# Block is known locally, just sanity check that the beacon client does not
|
|
|
|
# attempt to push us back to before the merge.
|
2024-06-01 13:26:12 +00:00
|
|
|
#
|
|
|
|
# Disable terminal PoW block conditions validation for fCUV2 and later.
|
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/shanghai.md#specification-1
|
|
|
|
if apiVersion == Version.V1:
|
2024-06-14 07:31:08 +00:00
|
|
|
let blockNumber = header.number
|
2024-06-01 13:26:12 +00:00
|
|
|
if header.difficulty > 0.u256 or blockNumber == 0'u64:
|
|
|
|
var
|
|
|
|
td, ptd: DifficultyInt
|
2024-06-14 07:31:08 +00:00
|
|
|
ttd = com.ttd.get(high(UInt256))
|
2024-06-01 13:26:12 +00:00
|
|
|
|
|
|
|
if not db.getTd(blockHash, td) or (blockNumber > 0'u64 and not db.getTd(header.parentHash, ptd)):
|
|
|
|
error "TDs unavailable for TTD check",
|
|
|
|
number = blockNumber,
|
|
|
|
hash = blockHash.short,
|
|
|
|
td = td,
|
|
|
|
parent = header.parentHash.short,
|
|
|
|
ptd = ptd
|
|
|
|
return simpleFCU(PayloadExecutionStatus.invalid, "TDs unavailable for TTD check")
|
|
|
|
|
|
|
|
if td < ttd or (blockNumber > 0'u64 and ptd > ttd):
|
|
|
|
notice "Refusing beacon update to pre-merge",
|
|
|
|
number = blockNumber,
|
|
|
|
hash = blockHash.short,
|
|
|
|
diff = header.difficulty,
|
|
|
|
ptd = ptd,
|
|
|
|
ttd = ttd
|
|
|
|
|
|
|
|
return invalidFCU("Refusing beacon update to pre-merge")
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
# If the head block is already in our canonical chain, the beacon client is
|
|
|
|
# probably resyncing. Ignore the update.
|
2024-06-01 13:26:12 +00:00
|
|
|
# See point 2 of fCUV1 specification
|
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/paris.md#specification-1
|
2024-09-24 10:53:18 +00:00
|
|
|
if ben.chain.isCanonicalAncestor(header.number, blockHash):
|
2024-06-01 13:26:12 +00:00
|
|
|
notice "Ignoring beacon update to old head",
|
|
|
|
blockHash=blockHash.short,
|
2024-06-14 07:31:08 +00:00
|
|
|
blockNumber=header.number
|
|
|
|
return validFCU(Opt.none(PayloadID), blockHash)
|
2024-05-31 07:13:56 +00:00
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
# If the beacon client also advertised a finalized block, mark the local
|
|
|
|
# chain final and completely in PoS mode.
|
|
|
|
let finalizedBlockHash = ethHash update.finalizedBlockHash
|
2024-09-26 11:24:36 +00:00
|
|
|
if finalizedBlockHash != default(common.Hash256):
|
2023-08-27 01:23:45 +00:00
|
|
|
if not ben.posFinalized:
|
|
|
|
ben.finalizePoS()
|
|
|
|
|
2024-09-04 09:54:54 +00:00
|
|
|
if not ben.chain.isCanonical(finalizedBlockHash):
|
2023-08-27 01:23:45 +00:00
|
|
|
warn "Final block not in canonical chain",
|
2023-10-28 08:35:58 +00:00
|
|
|
hash=finalizedBlockHash.short
|
2024-03-26 02:31:21 +00:00
|
|
|
raise invalidForkChoiceState("finalized block not canonical")
|
2023-08-27 01:23:45 +00:00
|
|
|
db.finalizedHeaderHash(finalizedBlockHash)
|
|
|
|
|
|
|
|
let safeBlockHash = ethHash update.safeBlockHash
|
2024-09-26 11:24:36 +00:00
|
|
|
if safeBlockHash != default(common.Hash256):
|
2024-09-04 09:54:54 +00:00
|
|
|
if not ben.chain.isCanonical(safeBlockHash):
|
2023-08-27 01:23:45 +00:00
|
|
|
warn "Safe block not in canonical chain",
|
2024-09-04 09:54:54 +00:00
|
|
|
hash=safeBlockHash.short
|
2024-03-26 02:31:21 +00:00
|
|
|
raise invalidForkChoiceState("safe head not canonical")
|
2023-08-27 01:23:45 +00:00
|
|
|
db.safeHeaderHash(safeBlockHash)
|
|
|
|
|
2024-09-27 00:53:27 +00:00
|
|
|
chain.forkChoice(blockHash, finalizedBlockHash).isOkOr:
|
2024-09-04 09:54:54 +00:00
|
|
|
return invalidFCU(error, com, header)
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
# If payload generation was requested, create a new block to be potentially
|
|
|
|
# sealed by the beacon client. The payload will be requested later, and we
|
|
|
|
# might replace it arbitrarilly many times in between.
|
|
|
|
if attrsOpt.isSome:
|
|
|
|
let attrs = attrsOpt.get()
|
2024-04-17 10:31:37 +00:00
|
|
|
validateVersion(attrs, com, apiVersion)
|
|
|
|
|
2024-05-15 03:07:59 +00:00
|
|
|
let bundle = ben.generatePayload(attrs).valueOr:
|
2023-08-27 01:23:45 +00:00
|
|
|
error "Failed to create sealing payload", err = error
|
|
|
|
raise invalidAttr(error)
|
|
|
|
|
|
|
|
let id = computePayloadId(blockHash, attrs)
|
2024-08-08 23:05:18 +00:00
|
|
|
ben.put(id, bundle.blockValue, bundle.executionPayload, bundle.blobsBundle)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
info "Created payload for sealing",
|
|
|
|
id = id.toHex,
|
2024-05-15 03:07:59 +00:00
|
|
|
hash = bundle.executionPayload.blockHash.short,
|
2024-09-27 00:53:27 +00:00
|
|
|
number = bundle.executionPayload.blockNumber,
|
|
|
|
attr = attrs
|
2023-08-27 01:23:45 +00:00
|
|
|
|
2024-06-14 07:31:08 +00:00
|
|
|
return validFCU(Opt.some(id), blockHash)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
2024-06-14 07:31:08 +00:00
|
|
|
return validFCU(Opt.none(PayloadID), blockHash)
|