2023-08-27 01:23:45 +00:00
|
|
|
# Nimbus
|
2024-03-26 15:13:30 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-08-27 01:23:45 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
|
|
|
|
|
|
|
import
|
|
|
|
eth/common,
|
2024-05-30 12:54:03 +00:00
|
|
|
results,
|
2023-08-27 01:23:45 +00:00
|
|
|
../web3_eth_conv,
|
|
|
|
../beacon_engine,
|
2023-12-08 09:35:50 +00:00
|
|
|
web3/execution_types,
|
2023-08-27 01:23:45 +00:00
|
|
|
../payload_conv,
|
|
|
|
./api_utils,
|
|
|
|
chronicles
|
|
|
|
|
|
|
|
{.push gcsafe, raises:[CatchableError].}
|
|
|
|
|
2024-04-19 19:43:13 +00:00
|
|
|
func validateVersionedHashed(payload: ExecutionPayload,
|
|
|
|
expected: openArray[Web3Hash]): bool =
|
|
|
|
var versionedHashes: seq[common.Hash256]
|
|
|
|
for x in payload.transactions:
|
|
|
|
let tx = rlp.decode(distinctBase(x), Transaction)
|
|
|
|
versionedHashes.add tx.versionedHashes
|
|
|
|
|
|
|
|
if versionedHashes.len != expected.len:
|
|
|
|
return false
|
|
|
|
|
|
|
|
for i, x in expected:
|
|
|
|
if distinctBase(x) != versionedHashes[i].data:
|
|
|
|
return false
|
|
|
|
true
|
|
|
|
|
2023-10-24 04:30:48 +00:00
|
|
|
template validateVersion(com, timestamp, version, apiVersion) =
|
2024-03-28 11:59:23 +00:00
|
|
|
if apiVersion == Version.V4:
|
|
|
|
if not com.isPragueOrLater(timestamp):
|
|
|
|
raise unsupportedFork("newPayloadV4 expect payload timestamp fall within Prague")
|
|
|
|
|
|
|
|
if com.isPragueOrLater(timestamp):
|
|
|
|
if version != Version.V4:
|
|
|
|
raise invalidParams("if timestamp is Prague or later, " &
|
2024-05-15 16:22:03 +00:00
|
|
|
"payload must be ExecutionPayloadV4, got ExecutionPayload" & $version)
|
2024-03-28 11:59:23 +00:00
|
|
|
|
2023-10-24 04:30:48 +00:00
|
|
|
if apiVersion == Version.V3:
|
|
|
|
if not com.isCancunOrLater(timestamp):
|
|
|
|
raise unsupportedFork("newPayloadV3 expect payload timestamp fall within Cancun")
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
if com.isCancunOrLater(timestamp):
|
|
|
|
if version != Version.V3:
|
|
|
|
raise invalidParams("if timestamp is Cancun or later, " &
|
2024-05-15 16:22:03 +00:00
|
|
|
"payload must be ExecutionPayloadV3, got ExecutionPayload" & $version)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
elif com.isShanghaiOrLater(timestamp):
|
|
|
|
if version != Version.V2:
|
|
|
|
raise invalidParams("if timestamp is Shanghai or later, " &
|
2024-05-15 16:22:03 +00:00
|
|
|
"payload must be ExecutionPayloadV2, got ExecutionPayload" & $version)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
elif version != Version.V1:
|
2024-05-30 20:30:40 +00:00
|
|
|
raise invalidParams("if timestamp is earlier than Shanghai, " &
|
|
|
|
"payload must be ExecutionPayloadV1, got ExecutionPayload" & $version)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
2024-03-28 11:59:23 +00:00
|
|
|
if apiVersion >= Version.V3:
|
2023-10-24 04:30:48 +00:00
|
|
|
if version != apiVersion:
|
|
|
|
raise invalidParams("newPayload" & $apiVersion &
|
|
|
|
" expect ExecutionPayload" & $apiVersion &
|
|
|
|
" but got ExecutionPayload" & $version)
|
2023-10-23 02:25:03 +00:00
|
|
|
|
2024-03-26 15:13:30 +00:00
|
|
|
template validatePayload(apiVersion, version, payload) =
|
2024-03-28 11:59:23 +00:00
|
|
|
if version >= Version.V2:
|
|
|
|
if payload.withdrawals.isNone:
|
|
|
|
raise invalidParams("newPayload" & $apiVersion &
|
|
|
|
"withdrawals is expected from execution payload")
|
|
|
|
|
2024-04-21 14:44:05 +00:00
|
|
|
if apiVersion >= Version.V3 or version >= Version.V3:
|
2024-03-26 15:13:30 +00:00
|
|
|
if payload.blobGasUsed.isNone:
|
|
|
|
raise invalidParams("newPayload" & $apiVersion &
|
|
|
|
"blobGasUsed is expected from execution payload")
|
|
|
|
if payload.excessBlobGas.isNone:
|
|
|
|
raise invalidParams("newPayload" & $apiVersion &
|
|
|
|
"excessBlobGas is expected from execution payload")
|
2023-10-23 02:25:03 +00:00
|
|
|
|
2024-04-21 14:44:05 +00:00
|
|
|
if apiVersion >= Version.V4 or version >= Version.V4:
|
2024-06-19 01:57:45 +00:00
|
|
|
if payload.depositRequests.isNone:
|
2024-03-28 11:59:23 +00:00
|
|
|
raise invalidParams("newPayload" & $apiVersion &
|
2024-06-19 01:57:45 +00:00
|
|
|
"depositRequests is expected from execution payload")
|
2024-07-04 05:41:32 +00:00
|
|
|
if payload.withdrawalRequests.isNone:
|
2024-03-28 11:59:23 +00:00
|
|
|
raise invalidParams("newPayload" & $apiVersion &
|
2024-07-04 05:41:32 +00:00
|
|
|
"withdrawalRequests is expected from execution payload")
|
|
|
|
if payload.consolidationRequests.isNone:
|
|
|
|
raise invalidParams("newPayload" & $apiVersion &
|
|
|
|
"consolidationRequests is expected from execution payload")
|
2024-03-28 11:59:23 +00:00
|
|
|
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
proc newPayload*(ben: BeaconEngineRef,
|
2023-10-24 04:30:48 +00:00
|
|
|
apiVersion: Version,
|
2023-08-27 01:23:45 +00:00
|
|
|
payload: ExecutionPayload,
|
2024-06-14 07:31:08 +00:00
|
|
|
versionedHashes = Opt.none(seq[Web3Hash]),
|
|
|
|
beaconRoot = Opt.none(Web3Hash)): PayloadStatusV1 =
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
trace "Engine API request received",
|
|
|
|
meth = "newPayload",
|
|
|
|
number = payload.blockNumber,
|
|
|
|
hash = payload.blockHash
|
|
|
|
|
2024-04-19 19:43:13 +00:00
|
|
|
if apiVersion >= Version.V3:
|
2023-10-23 13:59:57 +00:00
|
|
|
if beaconRoot.isNone:
|
|
|
|
raise invalidParams("newPayloadV3 expect beaconRoot but got none")
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
let
|
|
|
|
com = ben.com
|
|
|
|
db = com.db
|
|
|
|
timestamp = ethTime payload.timestamp
|
|
|
|
version = payload.version
|
|
|
|
|
2024-03-26 15:13:30 +00:00
|
|
|
validatePayload(apiVersion, version, payload)
|
2024-04-24 00:50:50 +00:00
|
|
|
validateVersion(com, timestamp, version, apiVersion)
|
2024-05-15 03:07:59 +00:00
|
|
|
|
Consolidate block type for block processing (#2325)
This PR consolidates the split header-body sequences into a single EthBlock
sequence and cleans up the fallout from that which significantly reduces
block processing overhead during import thanks to less garbage collection
and fewer copies of things all around.
Notably, since the number of headers must always match the number of bodies,
we also get rid of a pointless degree of freedom that in the future could
introduce unnecessary bugs.
* only read header and body from era file
* avoid several unnecessary copies along the block processing way
* simplify signatures, cleaning up unused arguemnts and returns
* use `stew/assign2` in a few strategic places where the generated
nim assignent is slow and add a few `move` to work around poor
analysis in nim 1.6 (will need to be revisited for 2.0)
```
stats-20240607_2223-a814aa0b.csv vs stats-20240608_0714-21c1d0a9.csv
bps_x bps_y tps_x tps_y bpsd tpsd timed
block_number
(498305, 713245] 1,540.52 1,809.73 2,361.58 2775.340189 17.63% 17.63% -14.92%
(713245, 928185] 730.36 865.26 1,715.90 2028.973852 18.01% 18.01% -15.21%
(928185, 1143126] 663.03 789.10 2,529.26 3032.490771 19.79% 19.79% -16.28%
(1143126, 1358066] 393.46 508.05 2,152.50 2777.578119 29.13% 29.13% -22.50%
(1358066, 1573007] 370.88 440.72 2,351.31 2791.896052 18.81% 18.81% -15.80%
(1573007, 1787947] 283.65 335.11 2,068.93 2441.373402 17.60% 17.60% -14.91%
(1787947, 2002888] 287.29 342.11 2,078.39 2474.179448 18.99% 18.99% -15.91%
(2002888, 2217828] 293.38 343.16 2,208.83 2584.77457 17.16% 17.16% -14.61%
(2217828, 2432769] 140.09 167.86 1,081.87 1296.336926 18.82% 18.82% -15.80%
blocks: 1934464, baseline: 3h13m1s, contender: 2h43m47s
bpsd (mean): 19.55%
tpsd (mean): 19.55%
Time (total): -29m13s, -15.14%
```
2024-06-09 14:32:20 +00:00
|
|
|
var blk = ethBlock(payload, beaconRoot = ethHash beaconRoot)
|
|
|
|
template header: BlockHeader = blk.header
|
2024-05-15 03:07:59 +00:00
|
|
|
|
2024-04-19 19:43:13 +00:00
|
|
|
if apiVersion >= Version.V3:
|
|
|
|
if versionedHashes.isNone:
|
|
|
|
raise invalidParams("newPayload" & $apiVersion &
|
|
|
|
" expect blobVersionedHashes but got none")
|
|
|
|
if not validateVersionedHashed(payload, versionedHashes.get):
|
|
|
|
return invalidStatus(header.parentHash, "invalid blob versionedHashes")
|
2024-05-15 03:07:59 +00:00
|
|
|
|
2024-04-21 14:44:05 +00:00
|
|
|
let blockHash = ethHash payload.blockHash
|
|
|
|
header.validateBlockHash(blockHash, version).isOkOr:
|
|
|
|
return error
|
2024-05-15 03:07:59 +00:00
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
# If we already have the block locally, ignore the entire execution and just
|
|
|
|
# return a fake success.
|
|
|
|
if db.getBlockHeader(blockHash, header):
|
|
|
|
warn "Ignoring already known beacon payload",
|
2024-06-14 07:31:08 +00:00
|
|
|
number = header.number, hash = blockHash.short
|
2023-08-27 01:23:45 +00:00
|
|
|
return validStatus(blockHash)
|
|
|
|
|
2024-05-17 01:38:46 +00:00
|
|
|
# If this block was rejected previously, keep rejecting it
|
|
|
|
let res = ben.checkInvalidAncestor(blockHash, blockHash)
|
|
|
|
if res.isSome:
|
|
|
|
return res.get
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
# If the parent is missing, we - in theory - could trigger a sync, but that
|
|
|
|
# would also entail a reorg. That is problematic if multiple sibling blocks
|
|
|
|
# are being fed to us, and even moreso, if some semi-distant uncle shortens
|
|
|
|
# our live chain. As such, payload execution will not permit reorgs and thus
|
|
|
|
# will not trigger a sync cycle. That is fine though, if we get a fork choice
|
|
|
|
# update after legit payload executions.
|
|
|
|
var parent: common.BlockHeader
|
|
|
|
if not db.getBlockHeader(header.parentHash, parent):
|
2024-05-17 01:38:46 +00:00
|
|
|
return ben.delayPayloadImport(header)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
# We have an existing parent, do some sanity checks to avoid the beacon client
|
|
|
|
# triggering too early
|
2024-06-14 07:31:08 +00:00
|
|
|
let ttd = com.ttd.get(high(UInt256))
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
if version == Version.V1:
|
2024-07-17 10:05:53 +00:00
|
|
|
let ptd = db.getScore(header.parentHash).valueOr:
|
2024-06-14 05:10:00 +00:00
|
|
|
0.u256
|
2024-07-17 10:05:53 +00:00
|
|
|
let gptd = db.getScore(parent.parentHash)
|
|
|
|
if ptd < ttd:
|
2023-08-27 01:23:45 +00:00
|
|
|
warn "Ignoring pre-merge payload",
|
2024-07-17 10:05:53 +00:00
|
|
|
number = header.number, hash = blockHash.short, ptd, ttd
|
|
|
|
return invalidStatus()
|
|
|
|
if parent.difficulty > 0.u256 and gptd.isSome and gptd.value >= ttd:
|
|
|
|
warn "Ignoring pre-merge parent block",
|
|
|
|
number = header.number, hash = blockHash.short, ptd, ttd
|
2023-08-27 01:23:45 +00:00
|
|
|
return invalidStatus()
|
|
|
|
|
|
|
|
if header.timestamp <= parent.timestamp:
|
|
|
|
warn "Invalid timestamp",
|
2024-06-14 07:31:08 +00:00
|
|
|
number = header.number, parentNumber = parent.number,
|
2023-11-03 14:41:05 +00:00
|
|
|
parent = parent.timestamp, header = header.timestamp
|
|
|
|
return invalidStatus(parent.blockHash, "Invalid timestamp")
|
2023-08-27 01:23:45 +00:00
|
|
|
|
2024-05-17 01:38:46 +00:00
|
|
|
# Another corner case: if the node is in snap sync mode, but the CL client
|
|
|
|
# tries to make it import a block. That should be denied as pushing something
|
|
|
|
# into the database directly will conflict with the assumptions of snap sync
|
|
|
|
# that it has an empty db that it can fill itself.
|
|
|
|
when false:
|
|
|
|
if api.eth.SyncMode() != downloader.FullSync:
|
|
|
|
return api.delayPayloadImport(header)
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
if not db.haveBlockAndState(header.parentHash):
|
|
|
|
ben.put(blockHash, header)
|
|
|
|
warn "State not available, ignoring new payload",
|
|
|
|
hash = blockHash,
|
2024-06-14 07:31:08 +00:00
|
|
|
number = header.number
|
2023-08-27 01:23:45 +00:00
|
|
|
let blockHash = latestValidHash(db, parent, ttd)
|
|
|
|
return acceptedStatus(blockHash)
|
|
|
|
|
|
|
|
trace "Inserting block without sethead",
|
2024-06-14 07:31:08 +00:00
|
|
|
hash = blockHash, number = header.number
|
Consolidate block type for block processing (#2325)
This PR consolidates the split header-body sequences into a single EthBlock
sequence and cleans up the fallout from that which significantly reduces
block processing overhead during import thanks to less garbage collection
and fewer copies of things all around.
Notably, since the number of headers must always match the number of bodies,
we also get rid of a pointless degree of freedom that in the future could
introduce unnecessary bugs.
* only read header and body from era file
* avoid several unnecessary copies along the block processing way
* simplify signatures, cleaning up unused arguemnts and returns
* use `stew/assign2` in a few strategic places where the generated
nim assignent is slow and add a few `move` to work around poor
analysis in nim 1.6 (will need to be revisited for 2.0)
```
stats-20240607_2223-a814aa0b.csv vs stats-20240608_0714-21c1d0a9.csv
bps_x bps_y tps_x tps_y bpsd tpsd timed
block_number
(498305, 713245] 1,540.52 1,809.73 2,361.58 2775.340189 17.63% 17.63% -14.92%
(713245, 928185] 730.36 865.26 1,715.90 2028.973852 18.01% 18.01% -15.21%
(928185, 1143126] 663.03 789.10 2,529.26 3032.490771 19.79% 19.79% -16.28%
(1143126, 1358066] 393.46 508.05 2,152.50 2777.578119 29.13% 29.13% -22.50%
(1358066, 1573007] 370.88 440.72 2,351.31 2791.896052 18.81% 18.81% -15.80%
(1573007, 1787947] 283.65 335.11 2,068.93 2441.373402 17.60% 17.60% -14.91%
(1787947, 2002888] 287.29 342.11 2,078.39 2474.179448 18.99% 18.99% -15.91%
(2002888, 2217828] 293.38 343.16 2,208.83 2584.77457 17.16% 17.16% -14.61%
(2217828, 2432769] 140.09 167.86 1,081.87 1296.336926 18.82% 18.82% -15.80%
blocks: 1934464, baseline: 3h13m1s, contender: 2h43m47s
bpsd (mean): 19.55%
tpsd (mean): 19.55%
Time (total): -29m13s, -15.14%
```
2024-06-09 14:32:20 +00:00
|
|
|
let vres = ben.chain.insertBlockWithoutSetHead(blk)
|
2024-05-31 07:13:56 +00:00
|
|
|
if vres.isErr:
|
2024-05-17 01:38:46 +00:00
|
|
|
ben.setInvalidAncestor(header, blockHash)
|
2023-08-27 01:23:45 +00:00
|
|
|
let blockHash = latestValidHash(db, parent, ttd)
|
2024-05-31 07:13:56 +00:00
|
|
|
return invalidStatus(blockHash, vres.error())
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
# We've accepted a valid payload from the beacon client. Mark the local
|
|
|
|
# chain transitions to notify other subsystems (e.g. downloader) of the
|
|
|
|
# behavioral change.
|
|
|
|
if not ben.ttdReached():
|
|
|
|
ben.reachTTD()
|
|
|
|
# TODO: cancel downloader
|
|
|
|
|
|
|
|
return validStatus(blockHash)
|