diff --git a/fluffy/tools/bridge/beacon_chain_bridge.nim b/fluffy/tools/bridge/beacon_chain_bridge.nim index d3c2cf51b..67594f968 100644 --- a/fluffy/tools/bridge/beacon_chain_bridge.nim +++ b/fluffy/tools/bridge/beacon_chain_bridge.nim @@ -88,6 +88,8 @@ import from beacon_chain/gossip_processing/block_processor import newExecutionPayload from beacon_chain/gossip_processing/eth2_processor import toValidationResult +type Hash256 = etypes.Hash256 + template asEthHash(hash: ethtypes.BlockHash): Hash256 = Hash256(data: distinctBase(hash)) diff --git a/fluffy/tools/bridge/beacon_chain_bridge_conf.nim b/fluffy/tools/bridge/beacon_chain_bridge_conf.nim index ccaa8aa89..81bee94fe 100644 --- a/fluffy/tools/bridge/beacon_chain_bridge_conf.nim +++ b/fluffy/tools/bridge/beacon_chain_bridge_conf.nim @@ -198,7 +198,7 @@ func asLightClientConf*(pc: BeaconBridgeConf): LightClientConf = directPeers: pc.directPeers, trustedBlockRoot: pc.trustedBlockRoot, web3Urls: @[], - jwtSecret: none(string), + jwtSecret: none(InputFile), stopAtEpoch: 0 ) diff --git a/hive_integration/nodocker/engine/clmock.nim b/hive_integration/nodocker/engine/clmock.nim index 887d50617..85083f36a 100644 --- a/hive_integration/nodocker/engine/clmock.nim +++ b/hive_integration/nodocker/engine/clmock.nim @@ -4,12 +4,13 @@ import nimcrypto/sysrand, stew/byteutils, eth/common, chronos, - web3/engine_api_types, json_rpc/rpcclient, ../../../nimbus/rpc/merge/mergeutils, ../../../nimbus/[constants], ./engine_client +import web3/engine_api_types except Hash256 # conflict with the one from eth/common + # Consensus Layer Client Mock used to sync the Execution Clients once the TTD has been reached type CLMocker* = ref object diff --git a/hive_integration/nodocker/engine/engine_client.nim b/hive_integration/nodocker/engine/engine_client.nim index 88d62f665..f6ce41c21 100644 --- a/hive_integration/nodocker/engine/engine_client.nim +++ b/hive_integration/nodocker/engine/engine_client.nim @@ -1,7 +1,7 @@ import std/[times, json, strutils], stew/byteutils, - eth/[common, rlp], chronos, + eth/[common, common/eth_types, rlp], chronos, web3/engine_api_types, json_rpc/[rpcclient, errors], ../../../tests/rpcclient/eth_api, @@ -11,6 +11,8 @@ import import web3/engine_api as web3_engine_api +type Hash256 = eth_types.Hash256 + template wrapTry(body: untyped) = try: body @@ -41,6 +43,12 @@ proc newPayloadV1*(client: RpcClient, wrapTrySimpleRes: client.engine_newPayloadV1(payload) +proc newPayloadV2*(client: RpcClient, + payload: ExecutionPayloadV2): + Result[PayloadStatusV1, string] = + wrapTrySimpleRes: + client.engine_newPayloadV2(payload) + proc toBlockNumber(n: Option[HexQuantityStr]): common.BlockNumber = if n.isNone: return 0.toBlockNumber diff --git a/hive_integration/nodocker/engine/engine_tests.nim b/hive_integration/nodocker/engine/engine_tests.nim index 8ff0cd50c..44e43676a 100644 --- a/hive_integration/nodocker/engine/engine_tests.nim +++ b/hive_integration/nodocker/engine/engine_tests.nim @@ -10,6 +10,9 @@ import ../../../nimbus/rpc/rpc_types, ../../../nimbus/rpc/merge/mergeutils +import eth/common/eth_types as common_eth_types +type Hash256 = common_eth_types.Hash256 + const prevRandaoContractAddr = hexToByteArray[20]("0000000000000000000000000000000000000316") @@ -2003,4 +2006,4 @@ const engineTestList* = [ run: postMergeSync, ttd: 10, ) -] \ No newline at end of file +] diff --git a/hive_integration/nodocker/engine/helper.nim b/hive_integration/nodocker/engine/helper.nim index 3273e9168..a0246cd41 100644 --- a/hive_integration/nodocker/engine/helper.nim +++ b/hive_integration/nodocker/engine/helper.nim @@ -7,6 +7,9 @@ import ../../../nimbus/rpc/hexstrings, ../../../nimbus/transaction +import eth/common/eth_types as common_eth_types +type Hash256 = common_eth_types.Hash256 + type ExecutableData* = object parentHash* : Hash256 diff --git a/hive_integration/nodocker/engine/test_env.nim b/hive_integration/nodocker/engine/test_env.nim index 4b6e5b0ff..b20e3f670 100644 --- a/hive_integration/nodocker/engine/test_env.nim +++ b/hive_integration/nodocker/engine/test_env.nim @@ -178,7 +178,7 @@ proc makeNextTransaction*(t: TestEnv, recipient: EthAddress, amount: UInt256, pa inc t.nonce signTransaction(tx, t.vaultKey, chainId, eip155 = true) -proc verifyPoWProgress*(t: TestEnv, lastBlockHash: Hash256): bool = +proc verifyPoWProgress*(t: TestEnv, lastBlockHash: ethtypes.Hash256): bool = let res = waitFor verifyPoWProgress(t.rpcClient, lastBlockHash) if res.isErr: error "verify PoW Progress error", msg=res.error diff --git a/hive_integration/nodocker/engine/types.nim b/hive_integration/nodocker/engine/types.nim index ae9aa2fb1..4f3458561 100644 --- a/hive_integration/nodocker/engine/types.nim +++ b/hive_integration/nodocker/engine/types.nim @@ -7,6 +7,8 @@ import export ethtypes +import eth/common/eth_types as common_eth_types + type TestSpec* = object name*: string @@ -32,7 +34,7 @@ template testCond*(expr, body: untyped) = else: return TestStatus.Failed -proc `$`*(x: Option[Hash256]): string = +proc `$`*(x: Option[common_eth_types.Hash256]): string = if x.isNone: "none" else: diff --git a/nimbus/common/common.nim b/nimbus/common/common.nim index 255051656..85194b59e 100644 --- a/nimbus/common/common.nim +++ b/nimbus/common/common.nim @@ -10,7 +10,7 @@ {.push raises: [].} import - std/[options], + std/[options, times], chronicles, eth/trie/trie_defs, ./chain_config, @@ -318,6 +318,9 @@ proc isBlockAfterTtd*(com: CommonRef, header: BlockHeader): bool td = ptd + header.difficulty ptd >= ttd and td >= ttd +func isShanghaiOrLater*(com: CommonRef, t: EthTime): bool = + com.config.shanghaiTime.isSome and t >= com.config.shanghaiTime.get + proc consensus*(com: CommonRef, header: BlockHeader): ConsensusType {.gcsafe, raises: [CatchableError].} = if com.isBlockAfterTtd(header): diff --git a/nimbus/core/casper.nim b/nimbus/core/casper.nim index e9e12128f..801758ddb 100644 --- a/nimbus/core/casper.nim +++ b/nimbus/core/casper.nim @@ -11,9 +11,9 @@ import type CasperRef* = ref object - feeRecipient : EthAddress - timestamp : EthTime - prevRandao : Hash256 + feeRecipient* : EthAddress + timestamp* : EthTime + prevRandao* : Hash256 proc prepare*(ctx: CasperRef, header: var BlockHeader) = header.coinbase = ctx.feeRecipient diff --git a/nimbus/core/executor/process_block.nim b/nimbus/core/executor/process_block.nim index ab726c25a..0391abe4c 100644 --- a/nimbus/core/executor/process_block.nim +++ b/nimbus/core/executor/process_block.nim @@ -9,6 +9,7 @@ # according to those terms. import + math, ../../common/common, ../../constants, ../../db/accounts_cache, @@ -30,6 +31,9 @@ import # Private functions # ------------------------------------------------------------------------------ +func gwei(n: uint64): UInt256 = + (n * (10'u64 ^ 9'u64)).u256 + proc procBlkPreamble(vmState: BaseVMState; header: BlockHeader; body: BlockBody): bool {.gcsafe, raises: [CatchableError].} = @@ -66,6 +70,15 @@ proc procBlkPreamble(vmState: BaseVMState; return false vmState.receipts[txIndex] = vmState.makeReceipt(tx.txType) + if header.withdrawalsRoot.isSome: + if body.withdrawals.get.calcWithdrawalsRoot != header.withdrawalsRoot.get: + debug "Mismatched withdrawalsRoot", + blockNumber = header.blockNumber + return false + + for withdrawal in body.withdrawals.get: + vmState.stateDB.addBalance(withdrawal.address, withdrawal.amount.gwei) + if vmState.cumulativeGasUsed != header.gasUsed: debug "gasUsed neq cumulativeGasUsed", gasUsed = header.gasUsed, diff --git a/nimbus/core/sealer.nim b/nimbus/core/sealer.nim index 06478979d..d5cd6bf53 100644 --- a/nimbus/core/sealer.nim +++ b/nimbus/core/sealer.nim @@ -9,7 +9,7 @@ # according to those terms. import - std/[times, typetraits], + std/[sequtils, times, typetraits], pkg/[chronos, stew/results, chronicles, @@ -30,8 +30,8 @@ import ../common/[common, context] -from web3/ethtypes as web3types import nil -from web3/engine_api_types import PayloadAttributesV1, ExecutionPayloadV1 +from web3/ethtypes as web3types import nil, TypedTransaction, WithdrawalV1, ExecutionPayloadV1OrV2, toExecutionPayloadV1OrV2, toExecutionPayloadV1 +from web3/engine_api_types import PayloadAttributesV1, ExecutionPayloadV1, PayloadAttributesV2, ExecutionPayloadV2 type EngineState* = enum @@ -139,9 +139,11 @@ proc sealingLoop(engine: SealingEngineRef): Future[void] {.async.} = template unsafeQuantityToInt64(q: web3types.Quantity): int64 = int64 q +proc toTypedTransaction(tx: Transaction): TypedTransaction = + web3types.TypedTransaction(rlp.encode(tx)) + proc generateExecutionPayload*(engine: SealingEngineRef, - payloadAttrs: PayloadAttributesV1, - payloadRes: var ExecutionPayloadV1): Result[void, string] = + payloadAttrs: PayloadAttributesV1 | PayloadAttributesV2): Result[ExecutionPayloadV1OrV2, string] = let headBlock = try: engine.chain.db.getCanonicalHead() except CatchableError: return err "No head block in database" @@ -159,9 +161,9 @@ proc generateExecutionPayload*(engine: SealingEngineRef, let res = engine.generateBlock(blk) if res.isErr: error "sealing engine generateBlock error", msg = res.error - return res + return err(res.error) - # make sure both generated block header and payloadRes(ExecutionPayloadV1) + # make sure both generated block header and payloadRes(ExecutionPayloadV2) # produce the same blockHash blk.header.fee = some(blk.header.fee.get(UInt256.zero)) # force it with some(UInt256) @@ -169,25 +171,35 @@ proc generateExecutionPayload*(engine: SealingEngineRef, if blk.header.extraData.len > 32: return err "extraData length should not exceed 32 bytes" - payloadRes.parentHash = Web3BlockHash blk.header.parentHash.data - payloadRes.feeRecipient = Web3Address blk.header.coinbase - payloadRes.stateRoot = Web3BlockHash blk.header.stateRoot.data - payloadRes.receiptsRoot = Web3BlockHash blk.header.receiptRoot.data - payloadRes.logsBloom = Web3Bloom blk.header.bloom - payloadRes.prevRandao = payloadAttrs.prevRandao - payloadRes.blockNumber = Web3Quantity blk.header.blockNumber.truncate(uint64) - payloadRes.gasLimit = Web3Quantity blk.header.gasLimit - payloadRes.gasUsed = Web3Quantity blk.header.gasUsed - payloadRes.timestamp = payloadAttrs.timestamp - payloadRes.extraData = web3types.DynamicBytes[0, 32] blk.header.extraData - payloadRes.baseFeePerGas = blk.header.fee.get(UInt256.zero) - payloadRes.blockHash = Web3BlockHash blockHash.data + let transactions = blk.txs.map(toTypedTransaction) - for tx in blk.txs: - let txData = rlp.encode(tx) - payloadRes.transactions.add web3types.TypedTransaction(txData) + let withdrawals = + when payloadAttrs is PayloadAttributesV2: + some(payloadAttrs.withdrawals) + else: + none[seq[WithdrawalV1]]() - return ok() + return ok(ExecutionPayloadV1OrV2( + parentHash: Web3BlockHash blk.header.parentHash.data, + feeRecipient: Web3Address blk.header.coinbase, + stateRoot: Web3BlockHash blk.header.stateRoot.data, + receiptsRoot: Web3BlockHash blk.header.receiptRoot.data, + logsBloom: Web3Bloom blk.header.bloom, + prevRandao: payloadAttrs.prevRandao, + blockNumber: Web3Quantity blk.header.blockNumber.truncate(uint64), + gasLimit: Web3Quantity blk.header.gasLimit, + gasUsed: Web3Quantity blk.header.gasUsed, + timestamp: payloadAttrs.timestamp, + extraData: web3types.DynamicBytes[0, 32] blk.header.extraData, + baseFeePerGas: blk.header.fee.get(UInt256.zero), + blockHash: Web3BlockHash blockHash.data, + transactions: transactions, + withdrawals: withdrawals + )) + +proc generateExecutionPayloadV1*(engine: SealingEngineRef, + payloadAttrs: PayloadAttributesV1): Result[ExecutionPayloadV1, string] = + return generateExecutionPayload(engine, payloadAttrs).map(toExecutionPayloadV1) proc new*(_: type SealingEngineRef, chain: ChainRef, diff --git a/nimbus/rpc/engine_api.nim b/nimbus/rpc/engine_api.nim index def69e37c..db9e09419 100644 --- a/nimbus/rpc/engine_api.nim +++ b/nimbus/rpc/engine_api.nim @@ -8,11 +8,13 @@ # those terms. import - std/[typetraits, times, strutils], + std/[typetraits, times, strutils, sequtils, sets], stew/[results, byteutils], json_rpc/rpcserver, web3/[conversions, engine_api_types], eth/rlp, + eth/common/eth_types, + eth/common/eth_types_rlp, ../common/common, ".."/core/chain/[chain_desc, persist_blocks], ../constants, @@ -26,6 +28,28 @@ import {.push raises: [].} +type Hash256 = eth_types.Hash256 + + +func toPayloadAttributesV1OrPayloadAttributesV2*(a: PayloadAttributesV1OrV2): Result[PayloadAttributesV1, PayloadAttributesV2] = + if a.withdrawals.isNone: + ok( + PayloadAttributesV1( + timestamp: a.timestamp, + prevRandao: a.prevRandao, + suggestedFeeRecipient: a.suggestedFeeRecipient + ) + ) + else: + err( + PayloadAttributesV2( + timestamp: a.timestamp, + prevRandao: a.prevRandao, + suggestedFeeRecipient: a.suggestedFeeRecipient, + withdrawals: a.withdrawals.get + ) + ) + proc latestValidHash(db: ChainDBRef, parent: EthBlockHeader, ttd: DifficultyInt): Hash256 {.gcsafe, raises: [RlpError].} = let ptd = db.getScore(parent.parentHash) @@ -45,6 +69,397 @@ proc invalidFCU(com: CommonRef, header: EthBlockHeader): ForkchoiceUpdatedRespon let blockHash = latestValidHash(com.db, parent, com.ttd.get(high(common.BlockNumber))) invalidFCU(blockHash) +proc txPriorityFee(ttx: TypedTransaction): UInt256 = + try: + let tx = rlp.decode(distinctBase(ttx), Transaction) + return u256(tx.gasPrice * tx.maxPriorityFee) + except RlpError: + doAssert(false, "found TypedTransaction that RLP failed to decode") + +# AARDVARK: make sure I have the right units (wei/gwei) +proc sumOfBlockPriorityFees(payload: ExecutionPayloadV1OrV2): UInt256 = + payload.transactions.foldl(a + txPriorityFee(b), UInt256.zero) + +template unsafeQuantityToInt64(q: Quantity): int64 = + int64 q + +# I created these handle_whatever procs to eliminate duplicated code +# between the V1 and V2 RPC endpoint implementations. (I believe +# they're meant to be implementable in that way. e.g. The V2 specs +# explicitly say "here's what to do if the `withdrawals` field is +# null.) --Adam + +# https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1 +proc handle_newPayload(sealingEngine: SealingEngineRef, api: EngineApiRef, com: CommonRef, payload: ExecutionPayloadV1 | ExecutionPayloadV2): PayloadStatusV1 {.raises: [CatchableError].} = + trace "Engine API request received", + meth = "newPayload", number = $(distinctBase payload.blockNumber), hash = payload.blockHash + + if com.isShanghaiOrLater(fromUnix(payload.timestamp.unsafeQuantityToInt64)): + when not(payload is ExecutionPayloadV2): + raise invalidParams("if timestamp is Shanghai or later, payload must be ExecutionPayloadV2") + else: + when not(payload is ExecutionPayloadV1): + raise invalidParams("if timestamp is earlier than Shanghai, payload must be ExecutionPayloadV1") + + var header = toBlockHeader(payload) + let blockHash = payload.blockHash.asEthHash + var res = header.validateBlockHash(blockHash) + if res.isErr: + return res.error + + let db = sealingEngine.chain.db + + # If we already have the block locally, ignore the entire execution and just + # return a fake success. + if db.getBlockHeader(blockHash, header): + warn "Ignoring already known beacon payload", + number = header.blockNumber, hash = blockHash + return validStatus(blockHash) + + # If the parent is missing, we - in theory - could trigger a sync, but that + # would also entail a reorg. That is problematic if multiple sibling blocks + # are being fed to us, and even moreso, if some semi-distant uncle shortens + # our live chain. As such, payload execution will not permit reorgs and thus + # will not trigger a sync cycle. That is fine though, if we get a fork choice + # update after legit payload executions. + var parent: EthBlockHeader + if not db.getBlockHeader(header.parentHash, parent): + # Stash the block away for a potential forced forckchoice update to it + # at a later time. + api.put(blockHash, header) + + # Although we don't want to trigger a sync, if there is one already in + # progress, try to extend if with the current payload request to relieve + # some strain from the forkchoice update. + #if err := api.eth.Downloader().BeaconExtend(api.eth.SyncMode(), block.Header()); err == nil { + # log.Debug("Payload accepted for sync extension", "number", params.Number, "hash", params.BlockHash) + # return beacon.PayloadStatusV1{Status: beacon.SYNCING}, nil + + # Either no beacon sync was started yet, or it rejected the delivered + # payload as non-integratable on top of the existing sync. We'll just + # have to rely on the beacon client to forcefully update the head with + # a forkchoice update request. + warn "Ignoring payload with missing parent", + number = header.blockNumber, + hash = blockHash, + parent = header.parentHash + return acceptedStatus() + + # We have an existing parent, do some sanity checks to avoid the beacon client + # triggering too early + let + td = db.getScore(header.parentHash) + ttd = com.ttd.get(high(common.BlockNumber)) + + if td < ttd: + warn "Ignoring pre-merge payload", + number = header.blockNumber, hash = blockHash, td, ttd + return invalidStatus() + + if header.timestamp <= parent.timestamp: + warn "Invalid timestamp", + parent = header.timestamp, header = header.timestamp + return invalidStatus(db.getHeadBlockHash(), "Invalid timestamp") + + if not db.haveBlockAndState(header.parentHash): + api.put(blockHash, header) + warn "State not available, ignoring new payload", + hash = blockHash, + number = header.blockNumber + let blockHash = latestValidHash(db, parent, ttd) + return acceptedStatus(blockHash) + + trace "Inserting block without sethead", + hash = blockHash, number = header.blockNumber + let body = toBlockBody(payload) + let vres = sealingEngine.chain.insertBlockWithoutSetHead(header, body) + if vres != ValidationResult.OK: + let blockHash = latestValidHash(db, parent, ttd) + return invalidStatus(blockHash, "Failed to insert block") + + # We've accepted a valid payload from the beacon client. Mark the local + # chain transitions to notify other subsystems (e.g. downloader) of the + # behavioral change. + if not api.merger.ttdReached(): + api.merger.reachTTD() + # TODO: cancel downloader + + return validStatus(blockHash) + +# https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_getpayloadv1 +proc handle_getPayload(api: EngineApiRef, payloadId: PayloadID): GetPayloadV2Response {.raises: [CatchableError].} = + trace "Engine API request received", + meth = "GetPayload", id = payloadId.toHex + + var payload: ExecutionPayloadV1OrV2 + if not api.get(payloadId, payload): + raise unknownPayload("Unknown payload") + + let blockValue = sumOfBlockPriorityFees(payload) + + return GetPayloadV2Response( + executionPayload: payload, + blockValue: blockValue + ) + +# https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_exchangetransitionconfigurationv1 +proc handle_exchangeTransitionConfiguration(sealingEngine: SealingEngineRef, com: CommonRef, conf: TransitionConfigurationV1): TransitionConfigurationV1 {.raises: [CatchableError].} = + trace "Engine API request received", + meth = "exchangeTransitionConfigurationV1", + ttd = conf.terminalTotalDifficulty, + number = uint64(conf.terminalBlockNumber), + blockHash = conf.terminalBlockHash + let db = sealingEngine.chain.db + let ttd = com.ttd + + if ttd.isNone: + raise newException(ValueError, "invalid ttd: EL (none) CL ($2)" % [$conf.terminalTotalDifficulty]) + + if conf.terminalTotalDifficulty != ttd.get: + raise newException(ValueError, "invalid ttd: EL ($1) CL ($2)" % [$ttd.get, $conf.terminalTotalDifficulty]) + + let terminalBlockNumber = uint64(conf.terminalBlockNumber).toBlockNumber + let terminalBlockHash = conf.terminalBlockHash.asEthHash + + if terminalBlockHash != Hash256(): + var headerHash: Hash256 + + if not db.getBlockHash(terminalBlockNumber, headerHash): + raise newException(ValueError, "cannot get terminal block hash, number $1" % + [$terminalBlockNumber]) + + if terminalBlockHash != headerHash: + raise newException(ValueError, "invalid terminal block hash, got $1 want $2" % + [$terminalBlockHash, $headerHash]) + + var header: EthBlockHeader + if not db.getBlockHeader(headerHash, header): + raise newException(ValueError, "cannot get terminal block header, hash $1" % + [$terminalBlockHash]) + + return TransitionConfigurationV1( + terminalTotalDifficulty: ttd.get, + terminalBlockHash : BlockHash headerHash.data, + terminalBlockNumber : Quantity header.blockNumber.truncate(uint64) + ) + + if terminalBlockNumber != 0: + raise newException(ValueError, "invalid terminal block number: $1" % [$terminalBlockNumber]) + + if terminalBlockHash != Hash256(): + raise newException(ValueError, "invalid terminal block hash, no terminal header set") + + return TransitionConfigurationV1(terminalTotalDifficulty: ttd.get) + +# ForkchoiceUpdated has several responsibilities: +# If the method is called with an empty head block: +# we return success, which can be used to check if the catalyst mode is enabled +# If the total difficulty was not reached: +# we return INVALID +# If the finalizedBlockHash is set: +# we check if we have the finalizedBlockHash in our db, if not we start a sync +# We try to set our blockchain to the headBlock +# If there are payloadAttributes: +# we try to assemble a block with the payloadAttributes and return its payloadID +# https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_forkchoiceupdatedv2 +proc handle_forkchoiceUpdated(sealingEngine: SealingEngineRef, com: CommonRef, api: EngineApiRef, update: ForkchoiceStateV1, payloadAttributes: Option[PayloadAttributesV1] | Option[PayloadAttributesV2]): ForkchoiceUpdatedResponse {.raises: [CatchableError].} = + + if payloadAttributes.isSome: + if com.isShanghaiOrLater(fromUnix(payloadAttributes.get.timestamp.unsafeQuantityToInt64)): + when not(payloadAttributes is Option[PayloadAttributesV2]): + raise invalidParams("if timestamp is Shanghai or later, payloadAttributes must be PayloadAttributesV2") + else: + when not(payloadAttributes is Option[PayloadAttributesV1]): + raise invalidParams("if timestamp is earlier than Shanghai, payloadAttributes must be PayloadAttributesV1") + + let + chain = sealingEngine.chain + db = chain.db + blockHash = update.headBlockHash.asEthHash + + if blockHash == Hash256(): + warn "Forkchoice requested update to zero hash" + return simpleFCU(PayloadExecutionStatus.invalid) + + # Check whether we have the block yet in our database or not. If not, we'll + # need to either trigger a sync, or to reject this forkchoice update for a + # reason. + var header: EthBlockHeader + if not db.getBlockHeader(blockHash, header): + # If the head hash is unknown (was not given to us in a newPayload request), + # we cannot resolve the header, so not much to do. This could be extended in + # the future to resolve from the `eth` network, but it's an unexpected case + # that should be fixed, not papered over. + if not api.get(blockHash, header): + warn "Forkchoice requested unknown head", + hash = blockHash + return simpleFCU(PayloadExecutionStatus.syncing) + + # Header advertised via a past newPayload request. Start syncing to it. + # Before we do however, make sure any legacy sync in switched off so we + # don't accidentally have 2 cycles running. + if not api.merger.ttdReached(): + api.merger.reachTTD() + # TODO: cancel downloader + + info "Forkchoice requested sync to new head", + number = header.blockNumber, + hash = blockHash + + # Update sync header (if any) + com.syncReqNewHead(header) + + return simpleFCU(PayloadExecutionStatus.syncing) + + # Block is known locally, just sanity check that the beacon client does not + # attempt to push us back to before the merge. + let blockNumber = header.blockNumber.truncate(uint64) + if header.difficulty > 0.u256 or blockNumber == 0'u64: + var + td, ptd: DifficultyInt + ttd = com.ttd.get(high(common.BlockNumber)) + + if not db.getTd(blockHash, td) or (blockNumber > 0'u64 and not db.getTd(header.parentHash, ptd)): + error "TDs unavailable for TTD check", + number = blockNumber, + hash = blockHash, + td = td, + parent = header.parentHash, + ptd = ptd + return simpleFCU(PayloadExecutionStatus.invalid, "TDs unavailable for TDD check") + + if td < ttd or (blockNumber > 0'u64 and ptd > ttd): + error "Refusing beacon update to pre-merge", + number = blockNumber, + hash = blockHash, + diff = header.difficulty, + ptd = ptd, + ttd = ttd + + return invalidFCU() + + # If the head block is already in our canonical chain, the beacon client is + # probably resyncing. Ignore the update. + var canonHash: Hash256 + if db.getBlockHash(header.blockNumber, canonHash) and canonHash == blockHash: + # TODO should this be possible? + # If we allow these types of reorgs, we will do lots and lots of reorgs during sync + warn "Reorg to previous block" + if chain.setCanonical(header) != ValidationResult.OK: + return invalidFCU(com, header) + elif chain.setCanonical(header) != ValidationResult.OK: + return invalidFCU(com, header) + + # If the beacon client also advertised a finalized block, mark the local + # chain final and completely in PoS mode. + let finalizedBlockHash = update.finalizedBlockHash.asEthHash + if finalizedBlockHash != Hash256(): + if not api.merger.posFinalized: + api.merger.finalizePoS() + + # TODO: If the finalized block is not in our canonical tree, somethings wrong + var finalBlock: EthBlockHeader + if not db.getBlockHeader(finalizedBlockHash, finalBlock): + warn "Final block not available in database", + hash=finalizedBlockHash + raise invalidParams("finalized block header not available") + var finalHash: Hash256 + if not db.getBlockHash(finalBlock.blockNumber, finalHash): + warn "Final block not in canonical chain", + number=finalBlock.blockNumber, + hash=finalizedBlockHash + raise invalidParams("finalized block hash not available") + if finalHash != finalizedBlockHash: + warn "Final block not in canonical chain", + number=finalBlock.blockNumber, + expect=finalizedBlockHash, + get=finalHash + raise invalidParams("finalilized block not canonical") + db.finalizedHeaderHash(finalizedBlockHash) + + let safeBlockHash = update.safeBlockHash.asEthHash + if safeBlockHash != Hash256(): + var safeBlock: EthBlockHeader + if not db.getBlockHeader(safeBlockHash, safeBlock): + warn "Safe block not available in database", + hash = safeBlockHash + raise invalidParams("safe head not available") + var safeHash: Hash256 + if not db.getBlockHash(safeBlock.blockNumber, safeHash): + warn "Safe block hash not available in database", + hash = safeHash + raise invalidParams("safe block hash not available") + if safeHash != safeBlockHash: + warn "Safe block not in canonical chain", + blockNumber=safeBlock.blockNumber, + expect=safeBlockHash, + get=safeHash + raise invalidParams("safe head not canonical") + db.safeHeaderHash(safeBlockHash) + + # If payload generation was requested, create a new block to be potentially + # sealed by the beacon client. The payload will be requested later, and we + # might replace it arbitrarilly many times in between. + if payloadAttributes.isSome: + let payloadAttrs = payloadAttributes.get() + let res = sealingEngine.generateExecutionPayload(payloadAttrs) + + if res.isErr: + error "Failed to create sealing payload", err = res.error + raise invalidAttr(res.error) + + let payload = res.get + + let id = computePayloadId(blockHash, payloadAttrs) + api.put(id, payload) + + info "Created payload for sealing", + id = id.toHex, + hash = payload.blockHash, + number = payload.blockNumber.uint64 + + return validFCU(some(id), blockHash) + + return validFCU(none(PayloadID), blockHash) + +func toHash(value: array[32, byte]): Hash256 = + result.data = value + +proc handle_getPayloadBodiesByHash(sealingEngine: SealingEngineRef, hashes: seq[BlockHash]): seq[Option[ExecutionPayloadBodyV1]] {.raises: [CatchableError].} = + let db = sealingEngine.chain.db + var body: BlockBody + for h in hashes: + if db.getBlockBody(toHash(distinctBase(h)), body): + var typedTransactions: seq[TypedTransaction] + for tx in body.transactions: + typedTransactions.add(tx.toTypedTransaction) + var withdrawals: seq[WithdrawalV1] + for w in body.withdrawals.get: + withdrawals.add(w.toWithdrawalV1) + result.add( + some(ExecutionPayloadBodyV1( + transactions: typedTransactions, + withdrawals: withdrawals + )) + ) + else: + result.add(none[ExecutionPayloadBodyV1]()) + +const supportedMethods: HashSet[string] = + toHashSet([ + "engine_newPayloadV1", + "engine_newPayloadV2", + "engine_getPayloadV1", + "engine_getPayloadV2", + "engine_exchangeTransitionConfigurationV1", + "engine_forkchoiceUpdatedV1", + "engine_forkchoiceUpdatedV2", + "engine_getPayloadBodiesByHashV1" + ]) + +# I'm trying to keep the handlers below very thin, and move the +# bodies up to the various procs above. Once we have multiple +# versions, they'll need to be able to share code. proc setupEngineApi*( sealingEngine: SealingEngineRef, server: RpcServer, @@ -54,316 +469,48 @@ proc setupEngineApi*( api = EngineApiRef.new(merger) com = sealingEngine.chain.com - # https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1 + server.rpc("engine_exchangeCapabilities") do(methods: seq[string]) -> seq[string]: + return methods.filterIt(supportedMethods.contains(it)) + # cannot use `params` as param name. see https:#github.com/status-im/nim-json-rpc/issues/128 server.rpc("engine_newPayloadV1") do(payload: ExecutionPayloadV1) -> PayloadStatusV1: - trace "Engine API request received", - meth = "newPayloadV1", number = $(distinctBase payload.blockNumber), hash = payload.blockHash + return handle_newPayload(sealingEngine, api, com, payload) + + server.rpc("engine_newPayloadV2") do(payload: ExecutionPayloadV1OrV2) -> PayloadStatusV1: + let p = payload.toExecutionPayloadV1OrExecutionPayloadV2 + if p.isOk: + return handle_newPayload(sealingEngine, api, com, p.get) + else: + return handle_newPayload(sealingEngine, api, com, p.error) - var header = toBlockHeader(payload) - let blockHash = payload.blockHash.asEthHash - var res = header.validateBlockHash(blockHash) - if res.isErr: - return res.error - - let db = sealingEngine.chain.db - - # If we already have the block locally, ignore the entire execution and just - # return a fake success. - if db.getBlockHeader(blockHash, header): - warn "Ignoring already known beacon payload", - number = header.blockNumber, hash = blockHash - return validStatus(blockHash) - - # If the parent is missing, we - in theory - could trigger a sync, but that - # would also entail a reorg. That is problematic if multiple sibling blocks - # are being fed to us, and even moreso, if some semi-distant uncle shortens - # our live chain. As such, payload execution will not permit reorgs and thus - # will not trigger a sync cycle. That is fine though, if we get a fork choice - # update after legit payload executions. - var parent: EthBlockHeader - if not db.getBlockHeader(header.parentHash, parent): - # Stash the block away for a potential forced forckchoice update to it - # at a later time. - api.put(blockHash, header) - - # Although we don't want to trigger a sync, if there is one already in - # progress, try to extend if with the current payload request to relieve - # some strain from the forkchoice update. - #if err := api.eth.Downloader().BeaconExtend(api.eth.SyncMode(), block.Header()); err == nil { - # log.Debug("Payload accepted for sync extension", "number", params.Number, "hash", params.BlockHash) - # return beacon.PayloadStatusV1{Status: beacon.SYNCING}, nil - - # Either no beacon sync was started yet, or it rejected the delivered - # payload as non-integratable on top of the existing sync. We'll just - # have to rely on the beacon client to forcefully update the head with - # a forkchoice update request. - warn "Ignoring payload with missing parent", - number = header.blockNumber, - hash = blockHash, - parent = header.parentHash - return acceptedStatus() - - # We have an existing parent, do some sanity checks to avoid the beacon client - # triggering too early - let - td = db.getScore(header.parentHash) - ttd = com.ttd.get(high(common.BlockNumber)) - - if td < ttd: - warn "Ignoring pre-merge payload", - number = header.blockNumber, hash = blockHash, td, ttd - return invalidStatus() - - if header.timestamp <= parent.timestamp: - warn "Invalid timestamp", - parent = header.timestamp, header = header.timestamp - return invalidStatus(db.getHeadBlockHash(), "Invalid timestamp") - - if not db.haveBlockAndState(header.parentHash): - api.put(blockHash, header) - warn "State not available, ignoring new payload", - hash = blockHash, - number = header.blockNumber - let blockHash = latestValidHash(db, parent, ttd) - return acceptedStatus(blockHash) - - trace "Inserting block without sethead", - hash = blockHash, number = header.blockNumber - let body = toBlockBody(payload) - let vres = sealingEngine.chain.insertBlockWithoutSetHead(header, body) - if vres != ValidationResult.OK: - let blockHash = latestValidHash(db, parent, ttd) - return invalidStatus(blockHash, "Failed to insert block") - - # We've accepted a valid payload from the beacon client. Mark the local - # chain transitions to notify other subsystems (e.g. downloader) of the - # behavioral change. - if not api.merger.ttdReached(): - api.merger.reachTTD() - # TODO: cancel downloader - - return validStatus(blockHash) - - # https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_getpayloadv1 server.rpc("engine_getPayloadV1") do(payloadId: PayloadID) -> ExecutionPayloadV1: - trace "Engine API request received", - meth = "GetPayload", id = payloadId.toHex + let r = handle_getPayload(api, payloadId) + return r.executionPayload.toExecutionPayloadV1 - var payload: ExecutionPayloadV1 - if not api.get(payloadId, payload): - raise unknownPayload("Unknown payload") - return payload + server.rpc("engine_getPayloadV2") do(payloadId: PayloadID) -> GetPayloadV2Response: + return handle_getPayload(api, payloadId) - # https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_exchangetransitionconfigurationv1 server.rpc("engine_exchangeTransitionConfigurationV1") do(conf: TransitionConfigurationV1) -> TransitionConfigurationV1: - trace "Engine API request received", - meth = "exchangeTransitionConfigurationV1", - ttd = conf.terminalTotalDifficulty, - number = uint64(conf.terminalBlockNumber), - blockHash = conf.terminalBlockHash + return handle_exchangeTransitionConfiguration(sealingEngine, com, conf) - let db = sealingEngine.chain.db - let ttd = com.ttd - - if ttd.isNone: - raise newException(ValueError, "invalid ttd: EL (none) CL ($2)" % [$conf.terminalTotalDifficulty]) - - if conf.terminalTotalDifficulty != ttd.get: - raise newException(ValueError, "invalid ttd: EL ($1) CL ($2)" % [$ttd.get, $conf.terminalTotalDifficulty]) - - let terminalBlockNumber = uint64(conf.terminalBlockNumber).toBlockNumber - let terminalBlockHash = conf.terminalBlockHash.asEthHash - - if terminalBlockHash != Hash256(): - var headerHash: Hash256 - - if not db.getBlockHash(terminalBlockNumber, headerHash): - raise newException(ValueError, "cannot get terminal block hash, number $1" % - [$terminalBlockNumber]) - - if terminalBlockHash != headerHash: - raise newException(ValueError, "invalid terminal block hash, got $1 want $2" % - [$terminalBlockHash, $headerHash]) - - var header: EthBlockHeader - if not db.getBlockHeader(headerHash, header): - raise newException(ValueError, "cannot get terminal block header, hash $1" % - [$terminalBlockHash]) - - return TransitionConfigurationV1( - terminalTotalDifficulty: ttd.get, - terminalBlockHash : BlockHash headerHash.data, - terminalBlockNumber : Quantity header.blockNumber.truncate(uint64) - ) - - if terminalBlockNumber != 0: - raise newException(ValueError, "invalid terminal block number: $1" % [$terminalBlockNumber]) - - if terminalBlockHash != Hash256(): - raise newException(ValueError, "invalid terminal block hash, no terminal header set") - - return TransitionConfigurationV1(terminalTotalDifficulty: ttd.get) - - # ForkchoiceUpdatedV1 has several responsibilities: - # If the method is called with an empty head block: - # we return success, which can be used to check if the catalyst mode is enabled - # If the total difficulty was not reached: - # we return INVALID - # If the finalizedBlockHash is set: - # we check if we have the finalizedBlockHash in our db, if not we start a sync - # We try to set our blockchain to the headBlock - # If there are payloadAttributes: - # we try to assemble a block with the payloadAttributes and return its payloadID - # https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_forkchoiceupdatedv1 server.rpc("engine_forkchoiceUpdatedV1") do( update: ForkchoiceStateV1, payloadAttributes: Option[PayloadAttributesV1]) -> ForkchoiceUpdatedResponse: - let - chain = sealingEngine.chain - db = chain.db - blockHash = update.headBlockHash.asEthHash + return handle_forkchoiceUpdated(sealingEngine, com, api, update, payloadAttributes) - if blockHash == Hash256(): - warn "Forkchoice requested update to zero hash" - return simpleFCU(PayloadExecutionStatus.invalid) + server.rpc("engine_forkchoiceUpdatedV2") do( + update: ForkchoiceStateV1, + payloadAttributes: Option[PayloadAttributesV1OrV2]) -> ForkchoiceUpdatedResponse: + if payloadAttributes.isNone: + return handle_forkchoiceUpdated(sealingEngine, com, api, update, none[PayloadAttributesV2]()) + else: + let a = payloadAttributes.get.toPayloadAttributesV1OrPayloadAttributesV2 + if a.isOk: + return handle_forkchoiceUpdated(sealingEngine, com, api, update, some(a.get)) + else: + return handle_forkchoiceUpdated(sealingEngine, com, api, update, some(a.error)) - # Check whether we have the block yet in our database or not. If not, we'll - # need to either trigger a sync, or to reject this forkchoice update for a - # reason. - var header: EthBlockHeader - if not db.getBlockHeader(blockHash, header): - # If the head hash is unknown (was not given to us in a newPayload request), - # we cannot resolve the header, so not much to do. This could be extended in - # the future to resolve from the `eth` network, but it's an unexpected case - # that should be fixed, not papered over. - if not api.get(blockHash, header): - warn "Forkchoice requested unknown head", - hash = blockHash - return simpleFCU(PayloadExecutionStatus.syncing) + server.rpc("engine_getPayloadBodiesByHashV1") do( + hashes: seq[BlockHash]) -> seq[Option[ExecutionPayloadBodyV1]]: + return handle_getPayloadBodiesByHash(sealingEngine, hashes) - # Header advertised via a past newPayload request. Start syncing to it. - # Before we do however, make sure any legacy sync in switched off so we - # don't accidentally have 2 cycles running. - if not api.merger.ttdReached(): - api.merger.reachTTD() - # TODO: cancel downloader - - info "Forkchoice requested sync to new head", - number = header.blockNumber, - hash = blockHash - - # Update sync header (if any) - com.syncReqNewHead(header) - - return simpleFCU(PayloadExecutionStatus.syncing) - - # Block is known locally, just sanity check that the beacon client does not - # attempt to push us back to before the merge. - let blockNumber = header.blockNumber.truncate(uint64) - if header.difficulty > 0.u256 or blockNumber == 0'u64: - var - td, ptd: DifficultyInt - ttd = com.ttd.get(high(common.BlockNumber)) - - if not db.getTd(blockHash, td) or (blockNumber > 0'u64 and not db.getTd(header.parentHash, ptd)): - error "TDs unavailable for TTD check", - number = blockNumber, - hash = blockHash, - td = td, - parent = header.parentHash, - ptd = ptd - return simpleFCU(PayloadExecutionStatus.invalid, "TDs unavailable for TDD check") - - if td < ttd or (blockNumber > 0'u64 and ptd > ttd): - error "Refusing beacon update to pre-merge", - number = blockNumber, - hash = blockHash, - diff = header.difficulty, - ptd = ptd, - ttd = ttd - - return invalidFCU() - - # If the head block is already in our canonical chain, the beacon client is - # probably resyncing. Ignore the update. - var canonHash: Hash256 - if db.getBlockHash(header.blockNumber, canonHash) and canonHash == blockHash: - # TODO should this be possible? - # If we allow these types of reorgs, we will do lots and lots of reorgs during sync - warn "Reorg to previous block" - if chain.setCanonical(header) != ValidationResult.OK: - return invalidFCU(com, header) - elif chain.setCanonical(header) != ValidationResult.OK: - return invalidFCU(com, header) - - # If the beacon client also advertised a finalized block, mark the local - # chain final and completely in PoS mode. - let finalizedBlockHash = update.finalizedBlockHash.asEthHash - if finalizedBlockHash != Hash256(): - if not api.merger.posFinalized: - api.merger.finalizePoS() - - # TODO: If the finalized block is not in our canonical tree, somethings wrong - var finalBlock: EthBlockHeader - if not db.getBlockHeader(finalizedBlockHash, finalBlock): - warn "Final block not available in database", - hash=finalizedBlockHash - raise invalidParams("finalized block header not available") - var finalHash: Hash256 - if not db.getBlockHash(finalBlock.blockNumber, finalHash): - warn "Final block not in canonical chain", - number=finalBlock.blockNumber, - hash=finalizedBlockHash - raise invalidParams("finalized block hash not available") - if finalHash != finalizedBlockHash: - warn "Final block not in canonical chain", - number=finalBlock.blockNumber, - expect=finalizedBlockHash, - get=finalHash - raise invalidParams("finalilized block not canonical") - db.finalizedHeaderHash(finalizedBlockHash) - - let safeBlockHash = update.safeBlockHash.asEthHash - if safeBlockHash != Hash256(): - var safeBlock: EthBlockHeader - if not db.getBlockHeader(safeBlockHash, safeBlock): - warn "Safe block not available in database", - hash = safeBlockHash - raise invalidParams("safe head not available") - var safeHash: Hash256 - if not db.getBlockHash(safeBlock.blockNumber, safeHash): - warn "Safe block hash not available in database", - hash = safeHash - raise invalidParams("safe block hash not available") - if safeHash != safeBlockHash: - warn "Safe block not in canonical chain", - blockNumber=safeBlock.blockNumber, - expect=safeBlockHash, - get=safeHash - raise invalidParams("safe head not canonical") - db.safeHeaderHash(safeBlockHash) - - # If payload generation was requested, create a new block to be potentially - # sealed by the beacon client. The payload will be requested later, and we - # might replace it arbitrarilly many times in between. - if payloadAttributes.isSome: - let payloadAttrs = payloadAttributes.get() - var payload: ExecutionPayloadV1 - let res = sealingEngine.generateExecutionPayload(payloadAttrs, payload) - - if res.isErr: - error "Failed to create sealing payload", err = res.error - raise invalidAttr(res.error) - - let id = computePayloadId(blockHash, payloadAttrs) - api.put(id, payload) - - info "Created payload for sealing", - id = id.toHex, - hash = payload.blockHash, - number = payload.blockNumber.uint64 - - return validFCU(some(id), blockHash) - - return validFCU(none(PayloadID), blockHash) diff --git a/nimbus/rpc/merge/mergetypes.nim b/nimbus/rpc/merge/mergetypes.nim index 603637eaf..7c6fc041d 100644 --- a/nimbus/rpc/merge/mergetypes.nim +++ b/nimbus/rpc/merge/mergetypes.nim @@ -18,6 +18,7 @@ export merger, eth_types type EthBlockHeader* = eth_types.BlockHeader + Hash256 = eth_types.Hash256 const # maxTrackedPayloads is the maximum number of prepared payloads the execution @@ -40,7 +41,7 @@ type PayloadItem = object id: PayloadID - payload: ExecutionPayloadV1 + payload: ExecutionPayloadV1OrV2 HeaderItem = object hash: Hash256 @@ -81,15 +82,27 @@ proc get*(api: EngineApiRef, hash: Hash256, header: var EthBlockHeader): bool = return true false -proc put*(api: EngineApiRef, id: PayloadID, payload: ExecutionPayloadV1) = +proc put*(api: EngineApiRef, id: PayloadID, payload: ExecutionPayloadV1OrV2) = api.payloadQueue.put(PayloadItem(id: id, payload: payload)) -proc get*(api: EngineApiRef, id: PayloadID, payload: var ExecutionPayloadV1): bool = +proc put*(api: EngineApiRef, id: PayloadID, payload: ExecutionPayloadV1) = + api.put(id, payload.toExecutionPayloadV1OrV2) + +proc put*(api: EngineApiRef, id: PayloadID, payload: ExecutionPayloadV2) = + api.put(id, payload.toExecutionPayloadV1OrV2) + +proc get*(api: EngineApiRef, id: PayloadID, payload: var ExecutionPayloadV1OrV2): bool = for x in api.payloadQueue: if x.id == id: payload = x.payload return true false +proc get*(api: EngineApiRef, id: PayloadID, payload: var ExecutionPayloadV1): bool = + var p: ExecutionPayloadV1OrV2 + let found = api.get(id, p) + payload = p.toExecutionPayloadV1 + return found + proc merger*(api: EngineApiRef): MergerRef = api.merger diff --git a/nimbus/rpc/merge/mergeutils.nim b/nimbus/rpc/merge/mergeutils.nim index f967a4cae..c5113dbb2 100644 --- a/nimbus/rpc/merge/mergeutils.nim +++ b/nimbus/rpc/merge/mergeutils.nim @@ -8,16 +8,18 @@ # those terms. import - std/[typetraits, times, strutils], + std/[typetraits, times, strutils, sequtils], nimcrypto/[hash, sha2], web3/engine_api_types, json_rpc/errors, - eth/[trie, rlp, common, trie/db], + eth/[trie, rlp, common, common/eth_types, trie/db], stew/[results, byteutils], ../../constants, ./mergetypes -proc computePayloadId*(headBlockHash: Hash256, params: PayloadAttributesV1): PayloadID = +type Hash256 = eth_types.Hash256 + +proc computePayloadId*(headBlockHash: Hash256, params: PayloadAttributesV1 | PayloadAttributesV2): PayloadID = var dest: Hash256 var ctx: sha256 ctx.init() @@ -25,10 +27,22 @@ proc computePayloadId*(headBlockHash: Hash256, params: PayloadAttributesV1): Pay ctx.update(toBytesBE distinctBase params.timestamp) ctx.update(distinctBase params.prevRandao) ctx.update(distinctBase params.suggestedFeeRecipient) + # FIXME-Adam: Do we need to include the withdrawals in this calculation? + # https://github.com/ethereum/go-ethereum/pull/25838#discussion_r1024340383 + # "The execution api specs define that this ID can be completely random. It + # used to be derived from payload attributes in the past, but maybe it's + # time to use a randomized ID to not break it with any changes to the + # attributes?" ctx.finish dest.data ctx.clear() (distinctBase result)[0..7] = dest.data[0..7] +proc append*(w: var RlpWriter, q: Quantity) = + w.append(uint64(q)) + +proc append*(w: var RlpWriter, a: Address) = + w.append(distinctBase(a)) + template unsafeQuantityToInt64(q: Quantity): int64 = int64 q @@ -41,33 +55,69 @@ proc calcRootHashRlp*(items: openArray[seq[byte]]): Hash256 = tr.put(rlp.encode(i), t) return tr.rootHash() -proc toBlockHeader*(payload: ExecutionPayloadV1): EthBlockHeader = +proc calcWithdrawalsRoot(withdrawals: seq[WithdrawalV1]): Hash256 = + calcRootHashRlp(withdrawals.map(writer.encode)) + +func maybeWithdrawals*(payload: ExecutionPayloadV1 | ExecutionPayloadV2): Option[seq[WithdrawalV1]] = + when payload is ExecutionPayloadV1: + none[seq[WithdrawalV1]]() + else: + some(payload.withdrawals) + +proc toBlockHeader*(payload: ExecutionPayloadV1 | ExecutionPayloadV2): EthBlockHeader = let transactions = seq[seq[byte]](payload.transactions) let txRoot = calcRootHashRlp(transactions) - + EthBlockHeader( - parentHash : payload.parentHash.asEthHash, - ommersHash : EMPTY_UNCLE_HASH, - coinbase : EthAddress payload.feeRecipient, - stateRoot : payload.stateRoot.asEthHash, - txRoot : txRoot, - receiptRoot : payload.receiptsRoot.asEthHash, - bloom : distinctBase(payload.logsBloom), - difficulty : default(DifficultyInt), - blockNumber : payload.blockNumber.distinctBase.u256, - gasLimit : payload.gasLimit.unsafeQuantityToInt64, - gasUsed : payload.gasUsed.unsafeQuantityToInt64, - timestamp : fromUnix payload.timestamp.unsafeQuantityToInt64, - extraData : bytes payload.extraData, - mixDigest : payload.prevRandao.asEthHash, # EIP-4399 redefine `mixDigest` -> `prevRandao` - nonce : default(BlockNonce), - fee : some payload.baseFeePerGas + parentHash : payload.parentHash.asEthHash, + ommersHash : EMPTY_UNCLE_HASH, + coinbase : EthAddress payload.feeRecipient, + stateRoot : payload.stateRoot.asEthHash, + txRoot : txRoot, + receiptRoot : payload.receiptsRoot.asEthHash, + bloom : distinctBase(payload.logsBloom), + difficulty : default(DifficultyInt), + blockNumber : payload.blockNumber.distinctBase.u256, + gasLimit : payload.gasLimit.unsafeQuantityToInt64, + gasUsed : payload.gasUsed.unsafeQuantityToInt64, + timestamp : fromUnix payload.timestamp.unsafeQuantityToInt64, + extraData : bytes payload.extraData, + mixDigest : payload.prevRandao.asEthHash, # EIP-4399 redefine `mixDigest` -> `prevRandao` + nonce : default(BlockNonce), + fee : some payload.baseFeePerGas, + withdrawalsRoot: payload.maybeWithdrawals.map(calcWithdrawalsRoot) # EIP-4895 ) -proc toBlockBody*(payload: ExecutionPayloadV1): BlockBody = +proc toWithdrawal*(w: WithdrawalV1): Withdrawal = + Withdrawal( + index: uint64(w.index), + validatorIndex: uint64(w.validatorIndex), + address: distinctBase(w.address), + amount: uint64(w.amount) # AARDVARK: is this wei or gwei or what? + ) + +proc toWithdrawalV1*(w: Withdrawal): WithdrawalV1 = + WithdrawalV1( + index: Quantity(w.index), + validatorIndex: Quantity(w.validatorIndex), + address: Address(w.address), + amount: Quantity(w.amount) # AARDVARK: is this wei or gwei or what? + ) + +proc toTypedTransaction*(tx: Transaction): TypedTransaction = + TypedTransaction(rlp.encode(tx)) + +proc toBlockBody*(payload: ExecutionPayloadV1 | ExecutionPayloadV2): BlockBody = result.transactions.setLen(payload.transactions.len) for i, tx in payload.transactions: result.transactions[i] = rlp.decode(distinctBase tx, Transaction) + when payload is ExecutionPayloadV2: + let ws = payload.maybeWithdrawals + result.withdrawals = + if ws.isSome: + some(ws.get.map(toWithdrawal)) + else: + none[seq[Withdrawal]]() proc `$`*(x: BlockHash): string = toHex(x) @@ -79,7 +129,10 @@ proc validateBlockHash*(header: EthBlockHeader, gotHash: Hash256): Result[void, let wantHash = header.blockHash if wantHash != gotHash: let status = PayloadStatusV1( - status: PayloadExecutionStatus.invalid_block_hash, + # This used to say invalid_block_hash, but see here: + # https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_newpayloadv2 + # "INVALID_BLOCK_HASH status value is supplanted by INVALID." + status: PayloadExecutionStatus.invalid, validationError: some("blockhash mismatch, want $1, got $2" % [$wantHash, $gotHash]) ) return err(status) diff --git a/nimbus/utils/utils.nim b/nimbus/utils/utils.nim index 844acbd30..e20d8dd24 100644 --- a/nimbus/utils/utils.nim +++ b/nimbus/utils/utils.nim @@ -16,6 +16,9 @@ proc calcRootHash[T](items: openArray[T]): Hash256 template calcTxRoot*(transactions: openArray[Transaction]): Hash256 = calcRootHash(transactions) +template calcWithdrawalsRoot*(withdrawals: openArray[Withdrawal]): Hash256 = + calcRootHash(withdrawals) + template calcReceiptRoot*(receipts: openArray[Receipt]): Hash256 = calcRootHash(receipts) diff --git a/nimbus_verified_proxy/nimbus_verified_proxy_conf.nim b/nimbus_verified_proxy/nimbus_verified_proxy_conf.nim index b14a0423e..6e94f23ed 100644 --- a/nimbus_verified_proxy/nimbus_verified_proxy_conf.nim +++ b/nimbus_verified_proxy/nimbus_verified_proxy_conf.nim @@ -204,7 +204,7 @@ func asLightClientConf*(pc: VerifiedProxyConf): LightClientConf = directPeers: pc.directPeers, trustedBlockRoot: pc.trustedBlockRoot, web3Urls: @[], - jwtSecret: none(string), + jwtSecret: none(InputFile), stopAtEpoch: 0 ) diff --git a/nimbus_verified_proxy/rpc/rpc_eth_api.nim b/nimbus_verified_proxy/rpc/rpc_eth_api.nim index 1061fa32a..bf4841a6b 100644 --- a/nimbus_verified_proxy/rpc/rpc_eth_api.nim +++ b/nimbus_verified_proxy/rpc/rpc_eth_api.nim @@ -284,6 +284,41 @@ proc new*( blockCache: blockCache, chainId: chainId) +# Used to be in eth1_monitor.nim; not sure why it was deleted, +# so I copied it here. --Adam +template awaitWithRetries*[T](lazyFutExpr: Future[T], + retries = 3, + timeout = 60.seconds): untyped = + const + reqType = astToStr(lazyFutExpr) + var + retryDelayMs = 16000 + f: Future[T] + attempts = 0 + + while true: + f = lazyFutExpr + yield f or sleepAsync(timeout) + if not f.finished: + await cancelAndWait(f) + elif f.failed: + when not (f.error of CatchableError): + static: doAssert false, "f.error not CatchableError" + debug "Web3 request failed", req = reqType, err = f.error.msg + else: + break + + inc attempts + if attempts >= retries: + var errorMsg = reqType & " failed " & $retries & " times" + if f.failed: errorMsg &= ". Last error: " & f.error.msg + raise newException(DataProviderFailure, errorMsg) + + await sleepAsync(chronos.milliseconds(retryDelayMs)) + retryDelayMs *= 2 + + read(f) + proc verifyChaindId*(p: VerifiedRpcProxy): Future[void] {.async.} = let localId = p.chainId diff --git a/nimbus_verified_proxy/rpc/rpc_utils.nim b/nimbus_verified_proxy/rpc/rpc_utils.nim index fdfe693e2..464c14e97 100644 --- a/nimbus_verified_proxy/rpc/rpc_utils.nim +++ b/nimbus_verified_proxy/rpc/rpc_utils.nim @@ -78,8 +78,8 @@ template unsafeQuantityToInt64(q: Quantity): int64 = func toFixedBytes(d: MDigest[256]): FixedBytes[32] = FixedBytes[32](d.data) -template asEthHash(hash: BlockHash): Hash256 = - Hash256(data: distinctBase(hash)) +template asEthHash(hash: BlockHash): etypes.Hash256 = + etypes.Hash256(data: distinctBase(hash)) proc calculateTransactionData( items: openArray[TypedTransaction]): @@ -121,7 +121,7 @@ func blockHeaderSize( return uint64(len(rlp.encode(bh))) proc asBlockObject*( - p: ExecutionData): BlockObject {.raises: [RlpError].} = + p: ExecutionData): BlockObject {.raises: [RlpError, ValueError].} = # TODO: currently we always calculate txHashes as BlockObject does not have # option of returning full transactions. It needs fixing at nim-web3 library # level @@ -139,7 +139,7 @@ proc asBlockObject*( receiptsRoot: p.receiptsRoot, miner: p.feeRecipient, difficulty: UInt256.zero, - extraData: p.extraData.toHex, + extraData: fromHex(DynamicBytes[0, 32], p.extraData.toHex), gasLimit: p.gasLimit, gasUsed: p.gasUsed, timestamp: p.timestamp, diff --git a/premix/parser.nim b/premix/parser.nim index 4d9b96271..36f98701b 100644 --- a/premix/parser.nim +++ b/premix/parser.nim @@ -108,6 +108,7 @@ proc parseBlockHeader*(n: JsonNode): BlockHeader = n.fromJson "mixHash", result.mixDigest n.fromJson "nonce", result.nonce n.fromJson "baseFeePerGas", result.fee + n.fromJson "withdrawalsRoot", result.withdrawalsRoot if result.baseFee == 0.u256: # probably geth bug @@ -154,6 +155,12 @@ proc parseTransaction*(n: JsonNode): Transaction = tx.accessList.add parseAccessPair(acn) tx +proc parseWithdrawal*(n: JsonNode): Withdrawal = + n.fromJson "index", result.index + n.fromJson "validatorIndex", result.validatorIndex + n.fromJson "address", result.address + n.fromJson "amount", result.amount + proc validateTxSenderAndHash*(n: JsonNode, tx: Transaction) = var sender = tx.getSender() var fromAddr: EthAddress diff --git a/tests/test_blockchain_json.nim b/tests/test_blockchain_json.nim index d906f886c..bf55fc2ab 100644 --- a/tests/test_blockchain_json.nim +++ b/tests/test_blockchain_json.nim @@ -40,6 +40,7 @@ type header : BlockHeader body : BlockBody hasException: bool + withdrawals: Option[seq[Withdrawal]] Tester = object lastBlockHash: Hash256 @@ -95,12 +96,30 @@ func normalizeBlockHeader(node: JsonNode): JsonNode = else: discard result = node +func normalizeWithdrawal(node: JsonNode): JsonNode = + for k, v in node: + case k + of "address", "amount", "index", "validatorIndex": + node[k] = normalizeNumber(v) + else: discard + result = node + proc parseHeader(blockHeader: JsonNode, testStatusIMPL: var TestStatus): BlockHeader = result = normalizeBlockHeader(blockHeader).parseBlockHeader var blockHash: Hash256 blockHeader.fromJson "hash", blockHash check blockHash == hash(result) +proc parseWithdrawals(withdrawals: JsonNode): Option[seq[Withdrawal]] = + case withdrawals.kind + of JArray: + var ws: seq[Withdrawal] + for v in withdrawals: + ws.add(parseWithdrawal(normalizeWithdrawal(v))) + some(ws) + else: + none[seq[Withdrawal]]() + proc parseBlocks(blocks: JsonNode): seq[TestBlock] = for fixture in blocks: var t: TestBlock @@ -120,6 +139,8 @@ proc parseBlocks(blocks: JsonNode): seq[TestBlock] = let valid = tx["valid"].getStr == "true" noError = noError and valid doAssert(noError == false, "NOT A VALID TEST CASE") + of "withdrawals": + t.withdrawals = parseWithdrawals(value) else: doAssert("expectException" in key, key) t.hasException = true @@ -206,6 +227,7 @@ proc applyFixtureBlockToChain(tester: var Tester, tb: var TestBlock, var rlp = rlpFromBytes(tb.blockRLP) tb.header = rlp.read(EthHeader).header tb.body = rlp.readRecordType(BlockBody, false) + tb.body.withdrawals = tb.withdrawals tester.importBlock(com, tb, checkSeal, validation) func shouldCheckSeal(tester: Tester): bool = diff --git a/vendor/nim-json-rpc b/vendor/nim-json-rpc index 38950a786..af1276443 160000 --- a/vendor/nim-json-rpc +++ b/vendor/nim-json-rpc @@ -1 +1 @@ -Subproject commit 38950a786d00d4b97e7550b25a32eb14fdbc790d +Subproject commit af1276443618974a95dd3c83e57a1ecd70df2c5e diff --git a/vendor/nim-presto b/vendor/nim-presto index c784f3afb..18837545f 160000 --- a/vendor/nim-presto +++ b/vendor/nim-presto @@ -1 +1 @@ -Subproject commit c784f3afb58740d5c203c987e9c9ba9ef8e642f9 +Subproject commit 18837545f3234f2eae187b2fd1ea24477398775e diff --git a/vendor/nim-web3 b/vendor/nim-web3 index 98fba0fb0..610dda642 160000 --- a/vendor/nim-web3 +++ b/vendor/nim-web3 @@ -1 +1 @@ -Subproject commit 98fba0fb0471abffdbe69fb8e66bb59152a7075c +Subproject commit 610dda642c3d7e5b0f50bba5457f0da490219001 diff --git a/vendor/nimbus-eth2 b/vendor/nimbus-eth2 index cdca07908..8771e91d5 160000 --- a/vendor/nimbus-eth2 +++ b/vendor/nimbus-eth2 @@ -1 +1 @@ -Subproject commit cdca07908b489a7445aa10d2776f21dd9f8ba264 +Subproject commit 8771e91d53072373cde1b2241092c5d6b2e5f3ab