2023-08-27 01:23:45 +00:00
|
|
|
# Nimbus
|
2024-02-20 03:07:38 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-08-27 01:23:45 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
|
|
|
# at your option.
|
|
|
|
# This file may not be copied, modified, or distributed except according to
|
|
|
|
# those terms.
|
|
|
|
|
|
|
|
import
|
2024-05-17 01:38:46 +00:00
|
|
|
std/[sequtils, tables],
|
2023-08-27 01:23:45 +00:00
|
|
|
./web3_eth_conv,
|
|
|
|
./payload_conv,
|
2024-05-17 01:38:46 +00:00
|
|
|
chronicles,
|
2023-12-08 09:35:50 +00:00
|
|
|
web3/execution_types,
|
2023-08-27 01:23:45 +00:00
|
|
|
./merge_tracker,
|
|
|
|
./payload_queue,
|
2024-05-17 01:38:46 +00:00
|
|
|
./api_handler/api_utils,
|
2023-08-27 01:23:45 +00:00
|
|
|
../db/core_db,
|
|
|
|
../core/[tx_pool, casper, chain],
|
|
|
|
../common/common
|
|
|
|
|
|
|
|
export
|
|
|
|
common,
|
|
|
|
chain
|
|
|
|
|
|
|
|
type
|
|
|
|
BeaconEngineRef* = ref object
|
|
|
|
txPool: TxPoolRef
|
2023-09-28 06:20:12 +00:00
|
|
|
merge : MergeTracker
|
2023-08-27 01:23:45 +00:00
|
|
|
queue : PayloadQueue
|
2024-09-04 09:54:54 +00:00
|
|
|
chain : ForkedChainRef
|
2023-08-27 01:23:45 +00:00
|
|
|
|
2024-05-17 01:38:46 +00:00
|
|
|
# The forkchoice update and new payload method require us to return the
|
|
|
|
# latest valid hash in an invalid chain. To support that return, we need
|
|
|
|
# to track historical bad blocks as well as bad tipsets in case a chain
|
|
|
|
# is constantly built on it.
|
|
|
|
#
|
|
|
|
# There are a few important caveats in this mechanism:
|
|
|
|
# - The bad block tracking is ephemeral, in-memory only. We must never
|
|
|
|
# persist any bad block information to disk as a bug in Geth could end
|
|
|
|
# up blocking a valid chain, even if a later Geth update would accept
|
|
|
|
# it.
|
|
|
|
# - Bad blocks will get forgotten after a certain threshold of import
|
|
|
|
# attempts and will be retried. The rationale is that if the network
|
|
|
|
# really-really-really tries to feed us a block, we should give it a
|
|
|
|
# new chance, perhaps us being racey instead of the block being legit
|
|
|
|
# bad (this happened in Geth at a point with import vs. pending race).
|
|
|
|
# - Tracking all the blocks built on top of the bad one could be a bit
|
|
|
|
# problematic, so we will only track the head chain segment of a bad
|
|
|
|
# chain to allow discarding progressing bad chains and side chains,
|
|
|
|
# without tracking too much bad data.
|
|
|
|
|
|
|
|
# Ephemeral cache to track invalid blocks and their hit count
|
|
|
|
invalidBlocksHits: Table[common.Hash256, int]
|
|
|
|
# Ephemeral cache to track invalid tipsets and their bad ancestor
|
|
|
|
invalidTipsets : Table[common.Hash256, common.BlockHeader]
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
{.push gcsafe, raises:[].}
|
|
|
|
|
2024-05-17 01:38:46 +00:00
|
|
|
const
|
|
|
|
# invalidBlockHitEviction is the number of times an invalid block can be
|
|
|
|
# referenced in forkchoice update or new payload before it is attempted
|
|
|
|
# to be reprocessed again.
|
|
|
|
invalidBlockHitEviction = 128
|
|
|
|
|
|
|
|
# invalidTipsetsCap is the max number of recent block hashes tracked that
|
|
|
|
# have lead to some bad ancestor block. It's just an OOM protection.
|
|
|
|
invalidTipsetsCap = 512
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-10-19 00:50:07 +00:00
|
|
|
proc setWithdrawals(ctx: CasperRef, attrs: PayloadAttributes) =
|
2023-08-27 01:23:45 +00:00
|
|
|
case attrs.version
|
|
|
|
of Version.V2, Version.V3:
|
2023-10-19 00:50:07 +00:00
|
|
|
ctx.withdrawals = ethWithdrawals attrs.withdrawals.get
|
2023-08-27 01:23:45 +00:00
|
|
|
else:
|
2023-10-19 00:50:07 +00:00
|
|
|
ctx.withdrawals = @[]
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
template wrapException(body: untyped): auto =
|
|
|
|
try:
|
|
|
|
body
|
|
|
|
except CatchableError as ex:
|
|
|
|
err(ex.msg)
|
|
|
|
|
2024-05-17 01:38:46 +00:00
|
|
|
# setInvalidAncestor is a callback for the downloader to notify us if a bad block
|
|
|
|
# is encountered during the async sync.
|
|
|
|
proc setInvalidAncestor(ben: BeaconEngineRef,
|
|
|
|
invalid, origin: common.BlockHeader) =
|
|
|
|
ben.invalidTipsets[origin.blockHash] = invalid
|
|
|
|
inc ben.invalidBlocksHits.mgetOrPut(invalid.blockHash, 0)
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Constructors
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc new*(_: type BeaconEngineRef,
|
|
|
|
txPool: TxPoolRef,
|
2024-09-04 09:54:54 +00:00
|
|
|
chain: ForkedChainRef): BeaconEngineRef =
|
2024-05-17 01:38:46 +00:00
|
|
|
let ben = BeaconEngineRef(
|
2023-08-27 01:23:45 +00:00
|
|
|
txPool: txPool,
|
2023-09-28 06:20:12 +00:00
|
|
|
merge : MergeTracker.init(txPool.com.db),
|
2023-08-27 01:23:45 +00:00
|
|
|
queue : PayloadQueue(),
|
|
|
|
chain : chain,
|
|
|
|
)
|
|
|
|
|
2024-05-17 01:38:46 +00:00
|
|
|
txPool.com.notifyBadBlock = proc(invalid, origin: common.BlockHeader)
|
|
|
|
{.gcsafe, raises: [].} =
|
|
|
|
ben.setInvalidAncestor(invalid, origin)
|
|
|
|
|
|
|
|
ben
|
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions, setters
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc reachTTD*(ben: BeaconEngineRef) =
|
|
|
|
## ReachTTD is called whenever the first NewHead message received
|
|
|
|
## from the consensus-layer.
|
|
|
|
ben.merge.reachTTD()
|
|
|
|
|
|
|
|
proc finalizePoS*(ben: BeaconEngineRef) =
|
|
|
|
## FinalizePoS is called whenever the first FinalisedBlock message received
|
|
|
|
## from the consensus-layer.
|
2024-02-20 03:07:38 +00:00
|
|
|
ben.merge.finalizePoS()
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
proc put*(ben: BeaconEngineRef,
|
|
|
|
hash: common.Hash256, header: common.BlockHeader) =
|
|
|
|
ben.queue.put(hash, header)
|
|
|
|
|
|
|
|
proc put*(ben: BeaconEngineRef, id: PayloadID,
|
2024-05-15 03:07:59 +00:00
|
|
|
blockValue: UInt256, payload: ExecutionPayload,
|
2024-06-14 07:31:08 +00:00
|
|
|
blobsBundle: Opt[BlobsBundleV1]) =
|
2024-05-15 03:07:59 +00:00
|
|
|
ben.queue.put(id, blockValue, payload, blobsBundle)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
proc put*(ben: BeaconEngineRef, id: PayloadID,
|
2024-05-15 03:07:59 +00:00
|
|
|
blockValue: UInt256, payload: SomeExecutionPayload,
|
2024-06-14 07:31:08 +00:00
|
|
|
blobsBundle: Opt[BlobsBundleV1]) =
|
2024-05-15 03:07:59 +00:00
|
|
|
doAssert blobsBundle.isNone == (payload is
|
|
|
|
ExecutionPayloadV1 | ExecutionPayloadV2)
|
|
|
|
ben.queue.put(id, blockValue, payload, blobsBundle)
|
|
|
|
|
|
|
|
proc put*(ben: BeaconEngineRef, id: PayloadID,
|
|
|
|
blockValue: UInt256,
|
|
|
|
payload: ExecutionPayloadV1 | ExecutionPayloadV2) =
|
|
|
|
ben.queue.put(
|
2024-06-14 07:31:08 +00:00
|
|
|
id, blockValue, payload, blobsBundle = Opt.none(BlobsBundleV1))
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions, getters
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
func com*(ben: BeaconEngineRef): CommonRef =
|
|
|
|
ben.txPool.com
|
|
|
|
|
2024-09-04 09:54:54 +00:00
|
|
|
func chain*(ben: BeaconEngineRef): ForkedChainRef =
|
2023-08-27 01:23:45 +00:00
|
|
|
ben.chain
|
|
|
|
|
|
|
|
func ttdReached*(ben: BeaconEngineRef): bool =
|
|
|
|
## TTDReached reports whether the chain has left the PoW stage.
|
|
|
|
ben.merge.ttdReached
|
|
|
|
|
|
|
|
func posFinalized*(ben: BeaconEngineRef): bool =
|
|
|
|
## PoSFinalized reports whether the chain has entered the PoS stage.
|
|
|
|
ben.merge.posFinalized
|
|
|
|
|
|
|
|
proc get*(ben: BeaconEngineRef, hash: common.Hash256,
|
|
|
|
header: var common.BlockHeader): bool =
|
|
|
|
ben.queue.get(hash, header)
|
|
|
|
|
|
|
|
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
|
|
|
blockValue: var UInt256,
|
2024-05-15 03:07:59 +00:00
|
|
|
payload: var ExecutionPayload,
|
2024-06-14 07:31:08 +00:00
|
|
|
blobsBundle: var Opt[BlobsBundleV1]): bool =
|
2024-05-15 03:07:59 +00:00
|
|
|
ben.queue.get(id, blockValue, payload, blobsBundle)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
|
|
|
blockValue: var UInt256,
|
|
|
|
payload: var ExecutionPayloadV1): bool =
|
|
|
|
ben.queue.get(id, blockValue, payload)
|
|
|
|
|
|
|
|
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
|
|
|
blockValue: var UInt256,
|
|
|
|
payload: var ExecutionPayloadV2): bool =
|
|
|
|
ben.queue.get(id, blockValue, payload)
|
|
|
|
|
|
|
|
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
|
|
|
blockValue: var UInt256,
|
2024-05-15 03:07:59 +00:00
|
|
|
payload: var ExecutionPayloadV3,
|
|
|
|
blobsBundle: var BlobsBundleV1): bool =
|
|
|
|
ben.queue.get(id, blockValue, payload, blobsBundle)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
|
|
|
blockValue: var UInt256,
|
|
|
|
payload: var ExecutionPayloadV1OrV2): bool =
|
|
|
|
ben.queue.get(id, blockValue, payload)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-05-15 03:07:59 +00:00
|
|
|
type ExecutionPayloadAndBlobsBundle* = object
|
|
|
|
executionPayload*: ExecutionPayload
|
2024-06-14 07:31:08 +00:00
|
|
|
blobsBundle*: Opt[BlobsBundleV1]
|
2024-08-08 23:05:18 +00:00
|
|
|
blockValue*: UInt256
|
2024-05-15 03:07:59 +00:00
|
|
|
|
2023-08-27 01:23:45 +00:00
|
|
|
proc generatePayload*(ben: BeaconEngineRef,
|
|
|
|
attrs: PayloadAttributes):
|
2024-05-15 03:07:59 +00:00
|
|
|
Result[ExecutionPayloadAndBlobsBundle, string] =
|
2023-08-27 01:23:45 +00:00
|
|
|
wrapException:
|
|
|
|
let
|
|
|
|
xp = ben.txPool
|
|
|
|
pos = xp.com.pos
|
2024-09-04 09:54:54 +00:00
|
|
|
headBlock = ben.chain.latestHeader
|
2023-08-27 01:23:45 +00:00
|
|
|
|
2024-10-02 16:22:35 +00:00
|
|
|
pos.prevRandao = attrs.prevRandao
|
2023-08-27 01:23:45 +00:00
|
|
|
pos.timestamp = ethTime attrs.timestamp
|
|
|
|
pos.feeRecipient = ethAddr attrs.suggestedFeeRecipient
|
2023-08-14 10:26:38 +00:00
|
|
|
|
2023-08-30 16:29:48 +00:00
|
|
|
if attrs.parentBeaconBlockRoot.isSome:
|
|
|
|
pos.parentBeaconBlockRoot = ethHash attrs.parentBeaconBlockRoot.get
|
2023-08-27 01:23:45 +00:00
|
|
|
|
2023-10-19 00:50:07 +00:00
|
|
|
pos.setWithdrawals(attrs)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
|
|
|
if headBlock.blockHash != xp.head.blockHash:
|
|
|
|
# reorg
|
2024-09-04 09:54:54 +00:00
|
|
|
discard xp.smartHead(headBlock, ben.chain)
|
2023-08-27 01:23:45 +00:00
|
|
|
|
2023-10-28 08:35:58 +00:00
|
|
|
if pos.timestamp <= headBlock.timestamp:
|
|
|
|
return err "timestamp must be strictly later than parent"
|
|
|
|
|
2024-05-15 03:07:59 +00:00
|
|
|
# someBaseFee = true: make sure bundle.blk.header
|
2023-08-27 01:23:45 +00:00
|
|
|
# have the same blockHash with generated payload
|
2024-05-15 03:07:59 +00:00
|
|
|
let bundle = xp.assembleBlock(someBaseFee = true).valueOr:
|
2023-11-01 02:24:32 +00:00
|
|
|
return err(error)
|
|
|
|
|
2024-05-15 03:07:59 +00:00
|
|
|
if bundle.blk.header.extraData.len > 32:
|
2023-08-27 01:23:45 +00:00
|
|
|
return err "extraData length should not exceed 32 bytes"
|
|
|
|
|
2024-06-14 07:31:08 +00:00
|
|
|
var blobsBundle: Opt[BlobsBundleV1]
|
2024-05-15 03:07:59 +00:00
|
|
|
if bundle.blobsBundle.isSome:
|
|
|
|
template blobData: untyped = bundle.blobsBundle.get
|
2024-06-14 07:31:08 +00:00
|
|
|
blobsBundle = Opt.some BlobsBundleV1(
|
2024-05-15 03:07:59 +00:00
|
|
|
commitments: blobData.commitments.mapIt it.Web3KZGCommitment,
|
|
|
|
proofs: blobData.proofs.mapIt it.Web3KZGProof,
|
|
|
|
blobs: blobData.blobs.mapIt it.Web3Blob)
|
|
|
|
|
|
|
|
ok ExecutionPayloadAndBlobsBundle(
|
|
|
|
executionPayload: executionPayload(bundle.blk),
|
2024-08-08 23:05:18 +00:00
|
|
|
blobsBundle: blobsBundle,
|
|
|
|
blockValue: bundle.blockValue)
|
2024-05-17 01:38:46 +00:00
|
|
|
|
|
|
|
proc setInvalidAncestor*(ben: BeaconEngineRef, header: common.BlockHeader, blockHash: common.Hash256) =
|
|
|
|
ben.invalidBlocksHits[blockHash] = 1
|
|
|
|
ben.invalidTipsets[blockHash] = header
|
|
|
|
|
|
|
|
# checkInvalidAncestor checks whether the specified chain end links to a known
|
|
|
|
# bad ancestor. If yes, it constructs the payload failure response to return.
|
|
|
|
proc checkInvalidAncestor*(ben: BeaconEngineRef,
|
|
|
|
check, head: common.Hash256): Opt[PayloadStatusV1] =
|
|
|
|
# If the hash to check is unknown, return valid
|
|
|
|
ben.invalidTipsets.withValue(check, invalid) do:
|
|
|
|
# If the bad hash was hit too many times, evict it and try to reprocess in
|
|
|
|
# the hopes that we have a data race that we can exit out of.
|
|
|
|
let badHash = invalid[].blockHash
|
|
|
|
|
|
|
|
inc ben.invalidBlocksHits.mgetOrPut(badHash, 0)
|
|
|
|
if ben.invalidBlocksHits.getOrDefault(badHash) >= invalidBlockHitEviction:
|
|
|
|
warn "Too many bad block import attempt, trying",
|
2024-06-14 07:31:08 +00:00
|
|
|
number=invalid.number, hash=badHash.short
|
2024-05-17 01:38:46 +00:00
|
|
|
|
|
|
|
ben.invalidBlocksHits.del(badHash)
|
|
|
|
|
|
|
|
var deleted = newSeq[common.Hash256]()
|
|
|
|
for descendant, badHeader in ben.invalidTipsets:
|
|
|
|
if badHeader.blockHash == badHash:
|
|
|
|
deleted.add descendant
|
|
|
|
|
|
|
|
for x in deleted:
|
|
|
|
ben.invalidTipsets.del(x)
|
|
|
|
|
|
|
|
return Opt.none(PayloadStatusV1)
|
|
|
|
|
|
|
|
# Not too many failures yet, mark the head of the invalid chain as invalid
|
|
|
|
if check != head:
|
|
|
|
warn "Marked new chain head as invalid",
|
2024-06-14 07:31:08 +00:00
|
|
|
hash=head, badnumber=invalid.number, badhash=badHash
|
2024-05-17 01:38:46 +00:00
|
|
|
|
|
|
|
if ben.invalidTipsets.len >= invalidTipsetsCap:
|
|
|
|
let size = invalidTipsetsCap - ben.invalidTipsets.len
|
|
|
|
var deleted = newSeqOfCap[common.Hash256](size)
|
|
|
|
for key in ben.invalidTipsets.keys:
|
|
|
|
deleted.add key
|
|
|
|
if deleted.len >= size:
|
|
|
|
break
|
|
|
|
for x in deleted:
|
|
|
|
ben.invalidTipsets.del(x)
|
|
|
|
|
|
|
|
ben.invalidTipsets[head] = invalid[]
|
|
|
|
|
|
|
|
var lastValid = invalid.parentHash
|
|
|
|
|
|
|
|
# If the last valid hash is the terminal pow block, return 0x0 for latest valid hash
|
|
|
|
var header: common.BlockHeader
|
|
|
|
if ben.com.db.getBlockHeader(invalid.parentHash, header):
|
|
|
|
if header.difficulty != 0.u256:
|
2024-09-26 11:24:36 +00:00
|
|
|
lastValid = default(common.Hash256)
|
2024-05-17 01:38:46 +00:00
|
|
|
|
|
|
|
return Opt.some invalidStatus(lastValid, "links to previously rejected block")
|
|
|
|
|
|
|
|
do:
|
|
|
|
return Opt.none(PayloadStatusV1)
|
|
|
|
|
|
|
|
# delayPayloadImport stashes the given block away for import at a later time,
|
|
|
|
# either via a forkchoice update or a sync extension. This method is meant to
|
|
|
|
# be called by the newpayload command when the block seems to be ok, but some
|
|
|
|
# prerequisite prevents it from being processed (e.g. no parent, or snap sync).
|
|
|
|
proc delayPayloadImport*(ben: BeaconEngineRef, header: common.BlockHeader): PayloadStatusV1 =
|
|
|
|
# Sanity check that this block's parent is not on a previously invalidated
|
|
|
|
# chain. If it is, mark the block as invalid too.
|
|
|
|
let blockHash = header.blockHash
|
|
|
|
let res = ben.checkInvalidAncestor(header.parentHash, blockHash)
|
|
|
|
if res.isSome:
|
|
|
|
return res.get
|
|
|
|
|
|
|
|
# Stash the block away for a potential forced forkchoice update to it
|
|
|
|
# at a later time.
|
|
|
|
ben.put(blockHash, header)
|
|
|
|
|
|
|
|
# Although we don't want to trigger a sync, if there is one already in
|
|
|
|
# progress, try to extend it with the current payload request to relieve
|
|
|
|
# some strain from the forkchoice update.
|
|
|
|
ben.com.syncReqNewHead(header)
|
|
|
|
|
|
|
|
PayloadStatusV1(status: PayloadExecutionStatus.syncing)
|