Introduce wrapper type for EIP-4844 transactions (#2177)
* Introduce wrapper type for EIP-4844 transactions EIP-4844 blob sidecars are a concept that only exists in the mempool. After inclusion of a transaction into an execution block, only the versioned hash within the transaction remains. To improve type safety, replace the `Transaction.networkPayload` member with a wrapper type `PooledTransaction` that is used in contexts where blob sidecars exist. * Bump nimbus-eth2 to 87605d08a7f9cfc3b223bd32143e93a6cdf351ac * IPv6 'listen-address' in `nimbus_verified_proxy` * Bump nim-libp2p to 21cbe3a91a70811522554e89e6a791172cebfef2 * Fix beacon_lc_bridge payload conversion and conf.listenAddress type * Change nimbus_verified_proxy.asExecutionData param to SomeExecutionPayload * Rerun nph to fix asExecutionData style format * nimbus_verified_proxy listenAddress * Use PooledTransaction in nimbus-eth1 tests --------- Co-authored-by: jangko <jangko128@gmail.com>
This commit is contained in:
parent
766823cd49
commit
c4c37302b1
|
@ -154,7 +154,7 @@ proc asPortalBlockData*(
|
||||||
(hash, headerWithProof, body)
|
(hash, headerWithProof, body)
|
||||||
|
|
||||||
proc asPortalBlockData*(
|
proc asPortalBlockData*(
|
||||||
payload: ExecutionPayloadV2 | ExecutionPayloadV3
|
payload: ExecutionPayloadV2 | ExecutionPayloadV3 | ExecutionPayloadV4
|
||||||
): (common_types.BlockHash, BlockHeaderWithProof, PortalBlockBodyShanghai) =
|
): (common_types.BlockHash, BlockHeaderWithProof, PortalBlockBodyShanghai) =
|
||||||
let
|
let
|
||||||
txRoot = calculateTransactionData(payload.transactions)
|
txRoot = calculateTransactionData(payload.transactions)
|
||||||
|
|
|
@ -99,10 +99,9 @@ type BeaconBridgeConf* = object # Config
|
||||||
|
|
||||||
listenAddress* {.
|
listenAddress* {.
|
||||||
desc: "Listening address for the Ethereum LibP2P and Discovery v5 traffic",
|
desc: "Listening address for the Ethereum LibP2P and Discovery v5 traffic",
|
||||||
defaultValue: defaultListenAddress,
|
defaultValueDesc: "*",
|
||||||
defaultValueDesc: $defaultListenAddressDesc,
|
|
||||||
name: "listen-address"
|
name: "listen-address"
|
||||||
.}: IpAddress
|
.}: Option[IpAddress]
|
||||||
|
|
||||||
tcpPort* {.
|
tcpPort* {.
|
||||||
desc: "Listening TCP port for Ethereum LibP2P traffic",
|
desc: "Listening TCP port for Ethereum LibP2P traffic",
|
||||||
|
|
|
@ -336,7 +336,7 @@ func getTimestamp*(cust: CustomPayloadData, basePayload: ExecutionPayload): uint
|
||||||
# Construct a customized payload by taking an existing payload as base and mixing it CustomPayloadData
|
# Construct a customized payload by taking an existing payload as base and mixing it CustomPayloadData
|
||||||
# blockHash is calculated automatically.
|
# blockHash is calculated automatically.
|
||||||
proc customizePayload*(cust: CustomPayloadData, data: ExecutableData): ExecutableData {.gcsafe.} =
|
proc customizePayload*(cust: CustomPayloadData, data: ExecutableData): ExecutableData {.gcsafe.} =
|
||||||
var customHeader = blockHeader(data.basePayload, removeBlobs = false, beaconRoot = data.beaconRoot)
|
var customHeader = blockHeader(data.basePayload, beaconRoot = data.beaconRoot)
|
||||||
if cust.transactions.isSome:
|
if cust.transactions.isSome:
|
||||||
customHeader.txRoot = calcTxRoot(cust.transactions.get)
|
customHeader.txRoot = calcTxRoot(cust.transactions.get)
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,7 @@ type
|
||||||
TestBlobTxPool* = ref object
|
TestBlobTxPool* = ref object
|
||||||
currentBlobID* : BlobID
|
currentBlobID* : BlobID
|
||||||
currentTxIndex*: int
|
currentTxIndex*: int
|
||||||
transactions* : Table[common.Hash256, Transaction]
|
transactions* : Table[common.Hash256, PooledTransaction]
|
||||||
hashesByIndex* : Table[int, common.Hash256]
|
hashesByIndex* : Table[int, common.Hash256]
|
||||||
|
|
||||||
const
|
const
|
||||||
|
@ -53,7 +53,7 @@ func getMinExcessBlobGasForBlobGasPrice(data_gas_price: uint64): uint64 =
|
||||||
func getMinExcessBlobsForBlobGasPrice*(data_gas_price: uint64): uint64 =
|
func getMinExcessBlobsForBlobGasPrice*(data_gas_price: uint64): uint64 =
|
||||||
return getMinExcessBlobGasForBlobGasPrice(data_gas_price) div GAS_PER_BLOB.uint64
|
return getMinExcessBlobGasForBlobGasPrice(data_gas_price) div GAS_PER_BLOB.uint64
|
||||||
|
|
||||||
proc addBlobTransaction*(pool: TestBlobTxPool, tx: Transaction) =
|
proc addBlobTransaction*(pool: TestBlobTxPool, tx: PooledTransaction) =
|
||||||
let txHash = rlpHash(tx)
|
let txHash = rlpHash(tx)
|
||||||
pool.transactions[txHash] = tx
|
pool.transactions[txHash] = tx
|
||||||
|
|
||||||
|
@ -178,19 +178,19 @@ proc getBlobDataInPayload*(pool: TestBlobTxPool, payload: ExecutionPayload): Res
|
||||||
return err("blob data is nil")
|
return err("blob data is nil")
|
||||||
|
|
||||||
let np = blobTx.networkPayload
|
let np = blobTx.networkPayload
|
||||||
if blobTx.versionedHashes.len != np.commitments.len or
|
if blobTx.tx.versionedHashes.len != np.commitments.len or
|
||||||
np.commitments.len != np.blobs.len or
|
np.commitments.len != np.blobs.len or
|
||||||
np.blobs.len != np.proofs.len:
|
np.blobs.len != np.proofs.len:
|
||||||
return err("invalid blob wrap data")
|
return err("invalid blob wrap data")
|
||||||
|
|
||||||
for i in 0..<blobTx.versionedHashes.len:
|
for i in 0..<blobTx.tx.versionedHashes.len:
|
||||||
blobData.data.add BlobWrapData(
|
blobData.data.add BlobWrapData(
|
||||||
versionedHash: blobTx.versionedHashes[i],
|
versionedHash: blobTx.tx.versionedHashes[i],
|
||||||
commitment : np.commitments[i],
|
commitment : np.commitments[i],
|
||||||
blob : np.blobs[i],
|
blob : np.blobs[i],
|
||||||
proof : np.proofs[i],
|
proof : np.proofs[i],
|
||||||
)
|
)
|
||||||
blobData.txs.add blobTx
|
blobData.txs.add blobTx.tx
|
||||||
|
|
||||||
return ok(blobData)
|
return ok(blobData)
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2023 Status Research & Development GmbH
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
@ -40,7 +40,7 @@ method execute*(step: DevP2PRequestPooledTransactionHash, ctx: CancunTestContext
|
||||||
|
|
||||||
var
|
var
|
||||||
txHashes = newSeq[common.Hash256](step.transactionIndexes.len)
|
txHashes = newSeq[common.Hash256](step.transactionIndexes.len)
|
||||||
txs = newSeq[Transaction](step.transactionIndexes.len)
|
txs = newSeq[PooledTransaction](step.transactionIndexes.len)
|
||||||
|
|
||||||
for i, txIndex in step.transactionIndexes:
|
for i, txIndex in step.transactionIndexes:
|
||||||
if not ctx.txPool.hashesByIndex.hasKey(txIndex):
|
if not ctx.txPool.hashesByIndex.hasKey(txIndex):
|
||||||
|
|
|
@ -80,7 +80,7 @@ method execute*(step: SendBlobTransactions, ctx: CancunTestContext): bool =
|
||||||
|
|
||||||
let blobTx = res.get
|
let blobTx = res.get
|
||||||
if not step.skipVerificationFromNode:
|
if not step.skipVerificationFromNode:
|
||||||
let r = verifyTransactionFromNode(engine.client, blobTx)
|
let r = verifyTransactionFromNode(engine.client, blobTx.tx)
|
||||||
if r.isErr:
|
if r.isErr:
|
||||||
error "verify tx from node", msg=r.error
|
error "verify tx from node", msg=r.error
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -333,7 +333,7 @@ proc getNextPayload(cl: CLMocker): bool =
|
||||||
cl.latestShouldOverrideBuilder = x.shouldOverrideBuilder
|
cl.latestShouldOverrideBuilder = x.shouldOverrideBuilder
|
||||||
|
|
||||||
let beaconRoot = ethHash cl.latestPayloadAttributes.parentBeaconblockRoot
|
let beaconRoot = ethHash cl.latestPayloadAttributes.parentBeaconblockRoot
|
||||||
let header = blockHeader(cl.latestPayloadBuilt, removeBlobs = true, beaconRoot = beaconRoot)
|
let header = blockHeader(cl.latestPayloadBuilt, beaconRoot = beaconRoot)
|
||||||
let blockHash = w3Hash header.blockHash
|
let blockHash = w3Hash header.blockHash
|
||||||
if blockHash != cl.latestPayloadBuilt.blockHash:
|
if blockHash != cl.latestPayloadBuilt.blockHash:
|
||||||
error "CLMocker: getNextPayload blockHash mismatch",
|
error "CLMocker: getNextPayload blockHash mismatch",
|
||||||
|
|
|
@ -192,7 +192,7 @@ method getName(cs: InvalidMissingAncestorReOrgSyncTest): string =
|
||||||
$cs.invalidField, $cs.emptyTransactions, $cs.reOrgFromCanonical, $cs.invalidIndex]
|
$cs.invalidField, $cs.emptyTransactions, $cs.reOrgFromCanonical, $cs.invalidIndex]
|
||||||
|
|
||||||
proc executableDataToBlock(ex: ExecutableData): EthBlock =
|
proc executableDataToBlock(ex: ExecutableData): EthBlock =
|
||||||
ethBlock(ex.basePayload, removeBlobs = true, beaconRoot = ex.beaconRoot)
|
ethBlock(ex.basePayload, beaconRoot = ex.beaconRoot)
|
||||||
|
|
||||||
method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
|
method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
|
||||||
var sec = env.addEngine(true, cs.reOrgFromCanonical)
|
var sec = env.addEngine(true, cs.reOrgFromCanonical)
|
||||||
|
|
|
@ -380,7 +380,7 @@ method execute(cs: PayloadBuildAfterInvalidPayloadTest, env: TestEnv): bool =
|
||||||
type
|
type
|
||||||
InvalidTxChainIDTest* = ref object of EngineSpec
|
InvalidTxChainIDTest* = ref object of EngineSpec
|
||||||
InvalidTxChainIDShadow = ref object
|
InvalidTxChainIDShadow = ref object
|
||||||
invalidTx: Transaction
|
invalidTx: PooledTransaction
|
||||||
|
|
||||||
method withMainFork(cs: InvalidTxChainIDTest, fork: EngineFork): BaseSpec =
|
method withMainFork(cs: InvalidTxChainIDTest, fork: EngineFork): BaseSpec =
|
||||||
var res = cs.clone()
|
var res = cs.clone()
|
||||||
|
@ -430,7 +430,9 @@ method execute(cs: InvalidTxChainIDTest, env: TestEnv): bool =
|
||||||
chainID: some((chainId.uint64 + 1'u64).ChainId)
|
chainID: some((chainId.uint64 + 1'u64).ChainId)
|
||||||
)
|
)
|
||||||
|
|
||||||
shadow.invalidTx = env.customizeTransaction(sender, tx, txCustomizerData)
|
shadow.invalidTx = tx
|
||||||
|
shadow.invalidTx.tx = env.customizeTransaction(
|
||||||
|
sender, shadow.invalidTx.tx, txCustomizerData)
|
||||||
testCond env.sendTx(shadow.invalidTx):
|
testCond env.sendTx(shadow.invalidTx):
|
||||||
info "Error on sending transaction with incorrect chain ID"
|
info "Error on sending transaction with incorrect chain ID"
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2023 Status Research & Development GmbH
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
@ -23,7 +23,7 @@ type
|
||||||
startBlockNumber: uint64
|
startBlockNumber: uint64
|
||||||
blockCount: int
|
blockCount: int
|
||||||
currentTxIndex: int
|
currentTxIndex: int
|
||||||
txs: seq[Transaction]
|
txs: seq[PooledTransaction]
|
||||||
|
|
||||||
method withMainFork(cs: PrevRandaoTransactionTest, fork: EngineFork): BaseSpec =
|
method withMainFork(cs: PrevRandaoTransactionTest, fork: EngineFork): BaseSpec =
|
||||||
var res = cs.clone()
|
var res = cs.clone()
|
||||||
|
|
|
@ -119,9 +119,9 @@ type
|
||||||
|
|
||||||
ShadowTx = ref object
|
ShadowTx = ref object
|
||||||
payload: ExecutionPayload
|
payload: ExecutionPayload
|
||||||
nextTx: Transaction
|
nextTx: PooledTransaction
|
||||||
tx: Option[Transaction]
|
tx: Option[PooledTransaction]
|
||||||
sendTransaction: proc(i: int): Transaction {.gcsafe.}
|
sendTransaction: proc(i: int): PooledTransaction {.gcsafe.}
|
||||||
|
|
||||||
method withMainFork(cs: TransactionReOrgTest, fork: EngineFork): BaseSpec =
|
method withMainFork(cs: TransactionReOrgTest, fork: EngineFork): BaseSpec =
|
||||||
var res = cs.clone()
|
var res = cs.clone()
|
||||||
|
@ -153,7 +153,7 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool =
|
||||||
var shadow = ShadowTx()
|
var shadow = ShadowTx()
|
||||||
|
|
||||||
# Send a transaction on each payload of the canonical chain
|
# Send a transaction on each payload of the canonical chain
|
||||||
shadow.sendTransaction = proc(i: int): Transaction {.gcsafe.} =
|
shadow.sendTransaction = proc(i: int): PooledTransaction {.gcsafe.} =
|
||||||
let sstoreContractAddr = hexToByteArray[20]("0000000000000000000000000000000000000317")
|
let sstoreContractAddr = hexToByteArray[20]("0000000000000000000000000000000000000317")
|
||||||
var data: array[32, byte]
|
var data: array[32, byte]
|
||||||
data[^1] = i.byte
|
data[^1] = i.byte
|
||||||
|
@ -332,7 +332,7 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool =
|
||||||
txt.expectBlockHash(ethHash env.clMock.latestForkchoice.headBlockHash)
|
txt.expectBlockHash(ethHash env.clMock.latestForkchoice.headBlockHash)
|
||||||
|
|
||||||
if cs.scenario != TransactionReOrgScenarioReOrgBackIn:
|
if cs.scenario != TransactionReOrgScenarioReOrgBackIn:
|
||||||
shadow.tx = none(Transaction)
|
shadow.tx = none(PooledTransaction)
|
||||||
|
|
||||||
if cs.scenario == TransactionReOrgScenarioReOrgBackIn and i > 0:
|
if cs.scenario == TransactionReOrgScenarioReOrgBackIn and i > 0:
|
||||||
# Reasoning: Most of the clients do not re-add blob transactions to the pool
|
# Reasoning: Most of the clients do not re-add blob transactions to the pool
|
||||||
|
|
|
@ -512,7 +512,8 @@ proc namedHeader*(client: RpcClient, name: string): Result[common.BlockHeader, s
|
||||||
return err("failed to get named blockHeader")
|
return err("failed to get named blockHeader")
|
||||||
return ok(res.toBlockHeader)
|
return ok(res.toBlockHeader)
|
||||||
|
|
||||||
proc sendTransaction*(client: RpcClient, tx: common.Transaction): Result[void, string] =
|
proc sendTransaction*(
|
||||||
|
client: RpcClient, tx: common.PooledTransaction): Result[void, string] =
|
||||||
wrapTry:
|
wrapTry:
|
||||||
let encodedTx = rlp.encode(tx)
|
let encodedTx = rlp.encode(tx)
|
||||||
let res = waitFor client.eth_sendRawTransaction(encodedTx)
|
let res = waitFor client.eth_sendRawTransaction(encodedTx)
|
||||||
|
@ -603,7 +604,10 @@ TraceOpts.useDefaultSerializationIn JrpcConv
|
||||||
createRpcSigsFromNim(RpcClient):
|
createRpcSigsFromNim(RpcClient):
|
||||||
proc debug_traceTransaction(hash: TxHash, opts: TraceOpts): JsonNode
|
proc debug_traceTransaction(hash: TxHash, opts: TraceOpts): JsonNode
|
||||||
|
|
||||||
proc debugPrevRandaoTransaction*(client: RpcClient, tx: Transaction, expectedPrevRandao: Hash256): Result[void, string] =
|
proc debugPrevRandaoTransaction*(
|
||||||
|
client: RpcClient,
|
||||||
|
tx: PooledTransaction,
|
||||||
|
expectedPrevRandao: Hash256): Result[void, string] =
|
||||||
wrapTry:
|
wrapTry:
|
||||||
let hash = w3Hash tx.rlpHash
|
let hash = w3Hash tx.rlpHash
|
||||||
# we only interested in stack, disable all other elems
|
# we only interested in stack, disable all other elems
|
||||||
|
|
|
@ -116,18 +116,20 @@ func numEngines*(env: TestEnv): int =
|
||||||
func accounts*(env: TestEnv, idx: int): TestAccount =
|
func accounts*(env: TestEnv, idx: int): TestAccount =
|
||||||
env.sender.getAccount(idx)
|
env.sender.getAccount(idx)
|
||||||
|
|
||||||
proc makeTx*(env: TestEnv, tc: BaseTx, nonce: AccountNonce): Transaction =
|
proc makeTx*(
|
||||||
|
env: TestEnv, tc: BaseTx, nonce: AccountNonce): PooledTransaction =
|
||||||
env.sender.makeTx(tc, nonce)
|
env.sender.makeTx(tc, nonce)
|
||||||
|
|
||||||
proc makeTx*(env: TestEnv, tc: BigInitcodeTx, nonce: AccountNonce): Transaction =
|
proc makeTx*(
|
||||||
|
env: TestEnv, tc: BigInitcodeTx, nonce: AccountNonce): PooledTransaction =
|
||||||
env.sender.makeTx(tc, nonce)
|
env.sender.makeTx(tc, nonce)
|
||||||
|
|
||||||
proc makeTxs*(env: TestEnv, tc: BaseTx, num: int): seq[Transaction] =
|
proc makeTxs*(env: TestEnv, tc: BaseTx, num: int): seq[PooledTransaction] =
|
||||||
result = newSeqOfCap[Transaction](num)
|
result = newSeqOfCap[PooledTransaction](num)
|
||||||
for _ in 0..<num:
|
for _ in 0..<num:
|
||||||
result.add env.sender.makeNextTx(tc)
|
result.add env.sender.makeNextTx(tc)
|
||||||
|
|
||||||
proc makeNextTx*(env: TestEnv, tc: BaseTx): Transaction =
|
proc makeNextTx*(env: TestEnv, tc: BaseTx): PooledTransaction =
|
||||||
env.sender.makeNextTx(tc)
|
env.sender.makeNextTx(tc)
|
||||||
|
|
||||||
proc sendNextTx*(env: TestEnv, eng: EngineEnv, tc: BaseTx): bool =
|
proc sendNextTx*(env: TestEnv, eng: EngineEnv, tc: BaseTx): bool =
|
||||||
|
@ -145,7 +147,8 @@ proc sendTx*(env: TestEnv, eng: EngineEnv, tc: BaseTx, nonce: AccountNonce): boo
|
||||||
proc sendTx*(env: TestEnv, eng: EngineEnv, tc: BigInitcodeTx, nonce: AccountNonce): bool =
|
proc sendTx*(env: TestEnv, eng: EngineEnv, tc: BigInitcodeTx, nonce: AccountNonce): bool =
|
||||||
env.sender.sendTx(eng.client, tc, nonce)
|
env.sender.sendTx(eng.client, tc, nonce)
|
||||||
|
|
||||||
proc sendTxs*(env: TestEnv, eng: EngineEnv, txs: openArray[Transaction]): bool =
|
proc sendTxs*(
|
||||||
|
env: TestEnv, eng: EngineEnv, txs: openArray[PooledTransaction]): bool =
|
||||||
for tx in txs:
|
for tx in txs:
|
||||||
if not sendTx(eng.client, tx):
|
if not sendTx(eng.client, tx):
|
||||||
return false
|
return false
|
||||||
|
@ -163,17 +166,29 @@ proc sendTx*(env: TestEnv, tc: BigInitcodeTx, nonce: AccountNonce): bool =
|
||||||
let client = env.engine.client
|
let client = env.engine.client
|
||||||
env.sender.sendTx(client, tc, nonce)
|
env.sender.sendTx(client, tc, nonce)
|
||||||
|
|
||||||
proc sendTx*(env: TestEnv, tx: Transaction): bool =
|
proc sendTx*(env: TestEnv, tx: PooledTransaction): bool =
|
||||||
let client = env.engine.client
|
let client = env.engine.client
|
||||||
sendTx(client, tx)
|
sendTx(client, tx)
|
||||||
|
|
||||||
proc sendTx*(env: TestEnv, sender: TestAccount, eng: EngineEnv, tc: BlobTx): Result[Transaction, void] =
|
proc sendTx*(
|
||||||
|
env: TestEnv,
|
||||||
|
sender: TestAccount,
|
||||||
|
eng: EngineEnv,
|
||||||
|
tc: BlobTx): Result[PooledTransaction, void] =
|
||||||
env.sender.sendTx(sender, eng.client, tc)
|
env.sender.sendTx(sender, eng.client, tc)
|
||||||
|
|
||||||
proc replaceTx*(env: TestEnv, sender: TestAccount, eng: EngineEnv, tc: BlobTx): Result[Transaction, void] =
|
proc replaceTx*(
|
||||||
|
env: TestEnv,
|
||||||
|
sender: TestAccount,
|
||||||
|
eng: EngineEnv,
|
||||||
|
tc: BlobTx): Result[PooledTransaction, void] =
|
||||||
env.sender.replaceTx(sender, eng.client, tc)
|
env.sender.replaceTx(sender, eng.client, tc)
|
||||||
|
|
||||||
proc makeTx*(env: TestEnv, tc: BaseTx, sender: TestAccount, nonce: AccountNonce): Transaction =
|
proc makeTx*(
|
||||||
|
env: TestEnv,
|
||||||
|
tc: BaseTx,
|
||||||
|
sender: TestAccount,
|
||||||
|
nonce: AccountNonce): PooledTransaction =
|
||||||
env.sender.makeTx(tc, sender, nonce)
|
env.sender.makeTx(tc, sender, nonce)
|
||||||
|
|
||||||
proc customizeTransaction*(env: TestEnv,
|
proc customizeTransaction*(env: TestEnv,
|
||||||
|
|
|
@ -130,7 +130,7 @@ proc getTxType(tc: BaseTx, nonce: uint64): TxType =
|
||||||
else:
|
else:
|
||||||
tc.txType.get
|
tc.txType.get
|
||||||
|
|
||||||
proc makeTxOfType(params: MakeTxParams, tc: BaseTx): Transaction =
|
proc makeTxOfType(params: MakeTxParams, tc: BaseTx): PooledTransaction =
|
||||||
let
|
let
|
||||||
gasFeeCap = if tc.gasFee != 0.GasInt: tc.gasFee
|
gasFeeCap = if tc.gasFee != 0.GasInt: tc.gasFee
|
||||||
else: gasPrice
|
else: gasPrice
|
||||||
|
@ -140,26 +140,30 @@ proc makeTxOfType(params: MakeTxParams, tc: BaseTx): Transaction =
|
||||||
let txType = tc.getTxType(params.nonce)
|
let txType = tc.getTxType(params.nonce)
|
||||||
case txType
|
case txType
|
||||||
of TxLegacy:
|
of TxLegacy:
|
||||||
Transaction(
|
PooledTransaction(
|
||||||
txType : TxLegacy,
|
tx: Transaction(
|
||||||
nonce : params.nonce,
|
txType : TxLegacy,
|
||||||
to : tc.recipient,
|
nonce : params.nonce,
|
||||||
value : tc.amount,
|
to : tc.recipient,
|
||||||
gasLimit: tc.gasLimit,
|
value : tc.amount,
|
||||||
gasPrice: gasPrice,
|
gasLimit: tc.gasLimit,
|
||||||
payload : tc.payload
|
gasPrice: gasPrice,
|
||||||
|
payload : tc.payload
|
||||||
|
)
|
||||||
)
|
)
|
||||||
of TxEip1559:
|
of TxEip1559:
|
||||||
Transaction(
|
PooledTransaction(
|
||||||
txType : TxEIP1559,
|
tx: Transaction(
|
||||||
nonce : params.nonce,
|
txType : TxEIP1559,
|
||||||
gasLimit: tc.gasLimit,
|
nonce : params.nonce,
|
||||||
maxFee : gasFeeCap,
|
gasLimit: tc.gasLimit,
|
||||||
maxPriorityFee: gasTipCap,
|
maxFee : gasFeeCap,
|
||||||
to : tc.recipient,
|
maxPriorityFee: gasTipCap,
|
||||||
value : tc.amount,
|
to : tc.recipient,
|
||||||
payload : tc.payload,
|
value : tc.amount,
|
||||||
chainId : params.chainId
|
payload : tc.payload,
|
||||||
|
chainId : params.chainId
|
||||||
|
)
|
||||||
)
|
)
|
||||||
of TxEip4844:
|
of TxEip4844:
|
||||||
doAssert(tc.recipient.isSome, "recipient must be some")
|
doAssert(tc.recipient.isSome, "recipient must be some")
|
||||||
|
@ -173,19 +177,21 @@ proc makeTxOfType(params: MakeTxParams, tc: BaseTx): Transaction =
|
||||||
var blobData = blobDataGenerator(tc.blobID, blobCount)
|
var blobData = blobDataGenerator(tc.blobID, blobCount)
|
||||||
#tc.blobID += BlobID(blobCount)
|
#tc.blobID += BlobID(blobCount)
|
||||||
|
|
||||||
Transaction(
|
PooledTransaction(
|
||||||
txType : TxEIP4844,
|
tx: Transaction(
|
||||||
nonce : params.nonce,
|
txType : TxEIP4844,
|
||||||
chainId : params.chainId,
|
nonce : params.nonce,
|
||||||
maxFee : gasFeeCap,
|
chainId : params.chainId,
|
||||||
maxPriorityFee: gasTipCap,
|
maxFee : gasFeeCap,
|
||||||
gasLimit: tc.gasLimit,
|
maxPriorityFee: gasTipCap,
|
||||||
to : tc.recipient,
|
gasLimit: tc.gasLimit,
|
||||||
value : tc.amount,
|
to : tc.recipient,
|
||||||
payload : tc.payload,
|
value : tc.amount,
|
||||||
#AccessList: tc.AccessList,
|
payload : tc.payload,
|
||||||
maxFeePerBlobGas: blobFeeCap,
|
#AccessList: tc.AccessList,
|
||||||
versionedHashes: system.move(blobData.hashes),
|
maxFeePerBlobGas: blobFeeCap,
|
||||||
|
versionedHashes: system.move(blobData.hashes),
|
||||||
|
),
|
||||||
networkPayload: NetworkPayload(
|
networkPayload: NetworkPayload(
|
||||||
blobs: system.move(blobData.blobs),
|
blobs: system.move(blobData.blobs),
|
||||||
commitments: system.move(blobData.commitments),
|
commitments: system.move(blobData.commitments),
|
||||||
|
@ -193,15 +199,16 @@ proc makeTxOfType(params: MakeTxParams, tc: BaseTx): Transaction =
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
doAssert(false, "unsupported tx type")
|
raiseAssert "unsupported tx type"
|
||||||
Transaction()
|
|
||||||
|
|
||||||
proc makeTx(params: MakeTxParams, tc: BaseTx): Transaction =
|
proc makeTx(params: MakeTxParams, tc: BaseTx): PooledTransaction =
|
||||||
# Build the transaction depending on the specified type
|
# Build the transaction depending on the specified type
|
||||||
let tx = makeTxOfType(params, tc)
|
let tx = makeTxOfType(params, tc)
|
||||||
signTransaction(tx, params.key, params.chainId, eip155 = true)
|
PooledTransaction(
|
||||||
|
tx: signTransaction(tx.tx, params.key, params.chainId, eip155 = true),
|
||||||
|
networkPayload: tx.networkPayload)
|
||||||
|
|
||||||
proc makeTx(params: MakeTxParams, tc: BigInitcodeTx): Transaction =
|
proc makeTx(params: MakeTxParams, tc: BigInitcodeTx): PooledTransaction =
|
||||||
var tx = tc
|
var tx = tc
|
||||||
if tx.payload.len == 0:
|
if tx.payload.len == 0:
|
||||||
# Prepare initcode payload
|
# Prepare initcode payload
|
||||||
|
@ -215,7 +222,8 @@ proc makeTx(params: MakeTxParams, tc: BigInitcodeTx): Transaction =
|
||||||
doAssert(tx.recipient.isNone, "invalid configuration for big contract tx creator")
|
doAssert(tx.recipient.isNone, "invalid configuration for big contract tx creator")
|
||||||
params.makeTx(tx.BaseTx)
|
params.makeTx(tx.BaseTx)
|
||||||
|
|
||||||
proc makeTx*(sender: TxSender, tc: BaseTx, nonce: AccountNonce): Transaction =
|
proc makeTx*(
|
||||||
|
sender: TxSender, tc: BaseTx, nonce: AccountNonce): PooledTransaction =
|
||||||
let acc = sender.getNextAccount()
|
let acc = sender.getNextAccount()
|
||||||
let params = MakeTxParams(
|
let params = MakeTxParams(
|
||||||
chainId: sender.chainId,
|
chainId: sender.chainId,
|
||||||
|
@ -224,7 +232,10 @@ proc makeTx*(sender: TxSender, tc: BaseTx, nonce: AccountNonce): Transaction =
|
||||||
)
|
)
|
||||||
params.makeTx(tc)
|
params.makeTx(tc)
|
||||||
|
|
||||||
proc makeTx*(sender: TxSender, tc: BigInitcodeTx, nonce: AccountNonce): Transaction =
|
proc makeTx*(
|
||||||
|
sender: TxSender,
|
||||||
|
tc: BigInitcodeTx,
|
||||||
|
nonce: AccountNonce): PooledTransaction =
|
||||||
let acc = sender.getNextAccount()
|
let acc = sender.getNextAccount()
|
||||||
let params = MakeTxParams(
|
let params = MakeTxParams(
|
||||||
chainId: sender.chainId,
|
chainId: sender.chainId,
|
||||||
|
@ -233,7 +244,7 @@ proc makeTx*(sender: TxSender, tc: BigInitcodeTx, nonce: AccountNonce): Transact
|
||||||
)
|
)
|
||||||
params.makeTx(tc)
|
params.makeTx(tc)
|
||||||
|
|
||||||
proc makeNextTx*(sender: TxSender, tc: BaseTx): Transaction =
|
proc makeNextTx*(sender: TxSender, tc: BaseTx): PooledTransaction =
|
||||||
let
|
let
|
||||||
acc = sender.getNextAccount()
|
acc = sender.getNextAccount()
|
||||||
nonce = sender.getNextNonce(acc.address)
|
nonce = sender.getNextNonce(acc.address)
|
||||||
|
@ -290,14 +301,14 @@ proc sendTx*(sender: TxSender, client: RpcClient, tc: BigInitcodeTx, nonce: Acco
|
||||||
inc sender.txSent
|
inc sender.txSent
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc sendTx*(client: RpcClient, tx: Transaction): bool =
|
proc sendTx*(client: RpcClient, tx: PooledTransaction): bool =
|
||||||
let rr = client.sendTransaction(tx)
|
let rr = client.sendTransaction(tx)
|
||||||
if rr.isErr:
|
if rr.isErr:
|
||||||
error "Unable to send transaction", msg=rr.error
|
error "Unable to send transaction", msg=rr.error
|
||||||
return false
|
return false
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc makeTx*(params: MakeTxParams, tc: BlobTx): Transaction =
|
proc makeTx*(params: MakeTxParams, tc: BlobTx): PooledTransaction =
|
||||||
# Need tx wrap data that will pass blob verification
|
# Need tx wrap data that will pass blob verification
|
||||||
let data = blobDataGenerator(tc.blobID, tc.blobCount)
|
let data = blobDataGenerator(tc.blobID, tc.blobCount)
|
||||||
doAssert(tc.recipient.isSome, "nil recipient address")
|
doAssert(tc.recipient.isSome, "nil recipient address")
|
||||||
|
@ -323,19 +334,23 @@ proc makeTx*(params: MakeTxParams, tc: BlobTx): Transaction =
|
||||||
versionedHashes: data.hashes,
|
versionedHashes: data.hashes,
|
||||||
)
|
)
|
||||||
|
|
||||||
var tx = signTransaction(unsignedTx, params.key, params.chainId, eip155 = true)
|
PooledTransaction(
|
||||||
tx.networkPayload = NetworkPayload(
|
tx: signTransaction(unsignedTx, params.key, params.chainId, eip155 = true),
|
||||||
blobs : data.blobs,
|
networkPayload: NetworkPayload(
|
||||||
commitments: data.commitments,
|
blobs : data.blobs,
|
||||||
proofs : data.proofs,
|
commitments: data.commitments,
|
||||||
|
proofs : data.proofs,
|
||||||
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
tx
|
|
||||||
|
|
||||||
proc getAccount*(sender: TxSender, idx: int): TestAccount =
|
proc getAccount*(sender: TxSender, idx: int): TestAccount =
|
||||||
sender.accounts[idx]
|
sender.accounts[idx]
|
||||||
|
|
||||||
proc sendTx*(sender: TxSender, acc: TestAccount, client: RpcClient, tc: BlobTx): Result[Transaction, void] =
|
proc sendTx*(
|
||||||
|
sender: TxSender,
|
||||||
|
acc: TestAccount,
|
||||||
|
client: RpcClient,
|
||||||
|
tc: BlobTx): Result[PooledTransaction, void] =
|
||||||
let
|
let
|
||||||
params = MakeTxParams(
|
params = MakeTxParams(
|
||||||
chainId: sender.chainId,
|
chainId: sender.chainId,
|
||||||
|
@ -352,7 +367,11 @@ proc sendTx*(sender: TxSender, acc: TestAccount, client: RpcClient, tc: BlobTx):
|
||||||
inc sender.txSent
|
inc sender.txSent
|
||||||
return ok(tx)
|
return ok(tx)
|
||||||
|
|
||||||
proc replaceTx*(sender: TxSender, acc: TestAccount, client: RpcClient, tc: BlobTx): Result[Transaction, void] =
|
proc replaceTx*(
|
||||||
|
sender: TxSender,
|
||||||
|
acc: TestAccount,
|
||||||
|
client: RpcClient,
|
||||||
|
tc: BlobTx): Result[PooledTransaction, void] =
|
||||||
let
|
let
|
||||||
params = MakeTxParams(
|
params = MakeTxParams(
|
||||||
chainId: sender.chainId,
|
chainId: sender.chainId,
|
||||||
|
@ -369,7 +388,11 @@ proc replaceTx*(sender: TxSender, acc: TestAccount, client: RpcClient, tc: BlobT
|
||||||
inc sender.txSent
|
inc sender.txSent
|
||||||
return ok(tx)
|
return ok(tx)
|
||||||
|
|
||||||
proc makeTx*(sender: TxSender, tc: BaseTx, acc: TestAccount, nonce: AccountNonce): Transaction =
|
proc makeTx*(
|
||||||
|
sender: TxSender,
|
||||||
|
tc: BaseTx,
|
||||||
|
acc: TestAccount,
|
||||||
|
nonce: AccountNonce): PooledTransaction =
|
||||||
let
|
let
|
||||||
params = MakeTxParams(
|
params = MakeTxParams(
|
||||||
chainId: sender.chainId,
|
chainId: sender.chainId,
|
||||||
|
|
|
@ -112,7 +112,7 @@ proc execute*(ws: MaxInitcodeSizeSpec, env: TestEnv): bool =
|
||||||
# Customize the payload to include a tx with an invalid initcode
|
# Customize the payload to include a tx with an invalid initcode
|
||||||
let customizer = CustomPayloadData(
|
let customizer = CustomPayloadData(
|
||||||
parentBeaconRoot: ethHash env.clMock.latestPayloadAttributes.parentBeaconBlockRoot,
|
parentBeaconRoot: ethHash env.clMock.latestPayloadAttributes.parentBeaconBlockRoot,
|
||||||
transactions: some( @[invalidTx] ),
|
transactions: some( @[invalidTx.tx] ),
|
||||||
)
|
)
|
||||||
|
|
||||||
let customPayload = customizer.customizePayload(env.clMock.latestExecutableData).basePayload
|
let customPayload = customizer.customizePayload(env.clMock.latestExecutableData).basePayload
|
||||||
|
|
|
@ -18,7 +18,8 @@ import
|
||||||
|
|
||||||
export eth_api
|
export eth_api
|
||||||
|
|
||||||
proc sendTransaction*(client: RpcClient, tx: Transaction): Future[bool] {.async.} =
|
proc sendTransaction*(
|
||||||
|
client: RpcClient, tx: PooledTransaction): Future[bool] {.async.} =
|
||||||
let data = rlp.encode(tx)
|
let data = rlp.encode(tx)
|
||||||
let txHash = keccakHash(data)
|
let txHash = keccakHash(data)
|
||||||
let hex = await client.eth_sendRawTransaction(data)
|
let hex = await client.eth_sendRawTransaction(data)
|
||||||
|
|
|
@ -90,7 +90,7 @@ proc balanceAndNonceAtTest(t: TestEnv): Future[TestStatus] {.async.} =
|
||||||
|
|
||||||
let txHash = rlpHash(tx)
|
let txHash = rlpHash(tx)
|
||||||
echo "BalanceAt: send $1 wei from 0x$2 to 0x$3 in 0x$4" % [
|
echo "BalanceAt: send $1 wei from 0x$2 to 0x$3 in 0x$4" % [
|
||||||
$tx.value, sourceAddr.toHex, targetAddr.toHex, txHash.data.toHex]
|
$tx.tx.value, sourceAddr.toHex, targetAddr.toHex, txHash.data.toHex]
|
||||||
|
|
||||||
let ok = await client.sendTransaction(tx)
|
let ok = await client.sendTransaction(tx)
|
||||||
if not ok:
|
if not ok:
|
||||||
|
@ -117,14 +117,16 @@ proc balanceAndNonceAtTest(t: TestEnv): Future[TestStatus] {.async.} =
|
||||||
let balanceTargetAccountAfter = await client.balanceAt(targetAddr)
|
let balanceTargetAccountAfter = await client.balanceAt(targetAddr)
|
||||||
|
|
||||||
# expected balance is previous balance - tx amount - tx fee (gasUsed * gasPrice)
|
# expected balance is previous balance - tx amount - tx fee (gasUsed * gasPrice)
|
||||||
let exp = sourceAddressBalanceBefore - amount - (gasUsed * tx.gasPrice).u256
|
let exp =
|
||||||
|
sourceAddressBalanceBefore - amount - (gasUsed * tx.tx.gasPrice).u256
|
||||||
|
|
||||||
if exp != accountBalanceAfter:
|
if exp != accountBalanceAfter:
|
||||||
echo "Expected sender account to have a balance of $1, got $2" % [$exp, $accountBalanceAfter]
|
echo "Expected sender account to have a balance of $1, got $2" % [$exp, $accountBalanceAfter]
|
||||||
return TestStatus.Failed
|
return TestStatus.Failed
|
||||||
|
|
||||||
if balanceTargetAccountAfter != amount:
|
if balanceTargetAccountAfter != amount:
|
||||||
echo "Expected new account to have a balance of $1, got $2" % [$tx.value, $balanceTargetAccountAfter]
|
echo "Expected new account to have a balance of $1, got $2" % [
|
||||||
|
$tx.tx.value, $balanceTargetAccountAfter]
|
||||||
return TestStatus.Failed
|
return TestStatus.Failed
|
||||||
|
|
||||||
# ensure nonce is incremented by 1
|
# ensure nonce is incremented by 1
|
||||||
|
|
|
@ -79,7 +79,8 @@ proc sendSome(address: EthAddress, amount: UInt256): seq[byte] =
|
||||||
result.add amount.toBytesBE
|
result.add amount.toBytesBE
|
||||||
doAssert(result.len == 68) # 4 + 32 + 32
|
doAssert(result.len == 68) # 4 + 32 + 32
|
||||||
|
|
||||||
proc makeFundingTx*(v: Vault, recipient: EthAddress, amount: UInt256): Transaction =
|
proc makeFundingTx*(
|
||||||
|
v: Vault, recipient: EthAddress, amount: UInt256): PooledTransaction =
|
||||||
let
|
let
|
||||||
unsignedTx = Transaction(
|
unsignedTx = Transaction(
|
||||||
txType : TxLegacy,
|
txType : TxLegacy,
|
||||||
|
@ -92,7 +93,8 @@ proc makeFundingTx*(v: Vault, recipient: EthAddress, amount: UInt256): Transacti
|
||||||
payload : sendSome(recipient, amount)
|
payload : sendSome(recipient, amount)
|
||||||
)
|
)
|
||||||
|
|
||||||
signTransaction(unsignedTx, v.vaultKey, v.chainId, eip155 = true)
|
PooledTransaction(
|
||||||
|
tx: signTransaction(unsignedTx, v.vaultKey, v.chainId, eip155 = true))
|
||||||
|
|
||||||
proc signTx*(v: Vault,
|
proc signTx*(v: Vault,
|
||||||
sender: EthAddress,
|
sender: EthAddress,
|
||||||
|
@ -100,7 +102,7 @@ proc signTx*(v: Vault,
|
||||||
recipient: EthAddress,
|
recipient: EthAddress,
|
||||||
amount: UInt256,
|
amount: UInt256,
|
||||||
gasLimit, gasPrice: GasInt,
|
gasLimit, gasPrice: GasInt,
|
||||||
payload: seq[byte] = @[]): Transaction =
|
payload: seq[byte] = @[]): PooledTransaction =
|
||||||
|
|
||||||
let
|
let
|
||||||
unsignedTx = Transaction(
|
unsignedTx = Transaction(
|
||||||
|
@ -115,7 +117,8 @@ proc signTx*(v: Vault,
|
||||||
)
|
)
|
||||||
|
|
||||||
let key = v.accounts[sender]
|
let key = v.accounts[sender]
|
||||||
signTransaction(unsignedTx, key, v.chainId, eip155 = true)
|
PooledTransaction(
|
||||||
|
tx: signTransaction(unsignedTx, key, v.chainId, eip155 = true))
|
||||||
|
|
||||||
# createAccount creates a new account that is funded from the vault contract.
|
# createAccount creates a new account that is funded from the vault contract.
|
||||||
# It will panic when the account could not be created and funded.
|
# It will panic when the account could not be created and funded.
|
||||||
|
|
|
@ -188,17 +188,17 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
|
||||||
let attrs = attrsOpt.get()
|
let attrs = attrsOpt.get()
|
||||||
validateVersion(attrs, com, apiVersion)
|
validateVersion(attrs, com, apiVersion)
|
||||||
|
|
||||||
let payload = ben.generatePayload(attrs).valueOr:
|
let bundle = ben.generatePayload(attrs).valueOr:
|
||||||
error "Failed to create sealing payload", err = error
|
error "Failed to create sealing payload", err = error
|
||||||
raise invalidAttr(error)
|
raise invalidAttr(error)
|
||||||
|
|
||||||
let id = computePayloadId(blockHash, attrs)
|
let id = computePayloadId(blockHash, attrs)
|
||||||
ben.put(id, ben.blockValue, payload)
|
ben.put(id, ben.blockValue, bundle.executionPayload, bundle.blobsBundle)
|
||||||
|
|
||||||
info "Created payload for sealing",
|
info "Created payload for sealing",
|
||||||
id = id.toHex,
|
id = id.toHex,
|
||||||
hash = payload.blockHash.short,
|
hash = bundle.executionPayload.blockHash.short,
|
||||||
number = payload.blockNumber
|
number = bundle.executionPayload.blockNumber
|
||||||
|
|
||||||
return validFCU(some(id), blockHash)
|
return validFCU(some(id), blockHash)
|
||||||
|
|
||||||
|
|
|
@ -26,14 +26,18 @@ proc getPayload*(ben: BeaconEngineRef,
|
||||||
|
|
||||||
var payloadGeneric: ExecutionPayload
|
var payloadGeneric: ExecutionPayload
|
||||||
var blockValue: UInt256
|
var blockValue: UInt256
|
||||||
if not ben.get(id, blockValue, payloadGeneric):
|
var blobsBundle: Option[BlobsBundleV1]
|
||||||
|
if not ben.get(id, blockValue, payloadGeneric, blobsBundle):
|
||||||
raise unknownPayload("Unknown payload")
|
raise unknownPayload("Unknown payload")
|
||||||
|
|
||||||
let version = payloadGeneric.version
|
let version = payloadGeneric.version
|
||||||
if version > expectedVersion:
|
if version > expectedVersion:
|
||||||
raise unsupportedFork("getPayload" & $expectedVersion &
|
raise unsupportedFork("getPayload" & $expectedVersion &
|
||||||
" expect ExecutionPayload" & $expectedVersion &
|
" expect ExecutionPayload" & $expectedVersion &
|
||||||
" but get ExecutionPayload" & $version)
|
" but get ExecutionPayload" & $version)
|
||||||
|
if blobsBundle.isSome:
|
||||||
|
raise unsupportedFork("getPayload" & $expectedVersion &
|
||||||
|
" contains unsupported BlobsBundleV1")
|
||||||
|
|
||||||
GetPayloadV2Response(
|
GetPayloadV2Response(
|
||||||
executionPayload: payloadGeneric.V1V2,
|
executionPayload: payloadGeneric.V1V2,
|
||||||
|
@ -46,38 +50,25 @@ proc getPayloadV3*(ben: BeaconEngineRef, id: PayloadID): GetPayloadV3Response =
|
||||||
|
|
||||||
var payloadGeneric: ExecutionPayload
|
var payloadGeneric: ExecutionPayload
|
||||||
var blockValue: UInt256
|
var blockValue: UInt256
|
||||||
if not ben.get(id, blockValue, payloadGeneric):
|
var blobsBundle: Option[BlobsBundleV1]
|
||||||
|
if not ben.get(id, blockValue, payloadGeneric, blobsBundle):
|
||||||
raise unknownPayload("Unknown payload")
|
raise unknownPayload("Unknown payload")
|
||||||
|
|
||||||
let version = payloadGeneric.version
|
let version = payloadGeneric.version
|
||||||
if version != Version.V3:
|
if version != Version.V3:
|
||||||
raise unsupportedFork("getPayloadV3 expect ExecutionPayloadV3 but get ExecutionPayload" & $version)
|
raise unsupportedFork("getPayloadV3 expect ExecutionPayloadV3 but get ExecutionPayload" & $version)
|
||||||
|
if blobsBundle.isNone:
|
||||||
|
raise unsupportedFork("getPayloadV3 is missing BlobsBundleV1")
|
||||||
|
|
||||||
let payload = payloadGeneric.V3
|
let payload = payloadGeneric.V3
|
||||||
let com = ben.com
|
let com = ben.com
|
||||||
if not com.isCancunOrLater(ethTime payload.timestamp):
|
if not com.isCancunOrLater(ethTime payload.timestamp):
|
||||||
raise unsupportedFork("payload timestamp is less than Cancun activation")
|
raise unsupportedFork("payload timestamp is less than Cancun activation")
|
||||||
|
|
||||||
var
|
|
||||||
blobsBundle: BlobsBundleV1
|
|
||||||
|
|
||||||
try:
|
|
||||||
for ttx in payload.transactions:
|
|
||||||
let tx = rlp.decode(distinctBase(ttx), Transaction)
|
|
||||||
if tx.networkPayload.isNil.not:
|
|
||||||
for blob in tx.networkPayload.blobs:
|
|
||||||
blobsBundle.blobs.add Web3Blob(blob)
|
|
||||||
for p in tx.networkPayload.proofs:
|
|
||||||
blobsBundle.proofs.add Web3KZGProof(p)
|
|
||||||
for k in tx.networkPayload.commitments:
|
|
||||||
blobsBundle.commitments.add Web3KZGCommitment(k)
|
|
||||||
except RlpError:
|
|
||||||
doAssert(false, "found TypedTransaction that RLP failed to decode")
|
|
||||||
|
|
||||||
GetPayloadV3Response(
|
GetPayloadV3Response(
|
||||||
executionPayload: payload,
|
executionPayload: payload,
|
||||||
blockValue: blockValue,
|
blockValue: blockValue,
|
||||||
blobsBundle: blobsBundle,
|
blobsBundle: blobsBundle.get,
|
||||||
shouldOverrideBuilder: false
|
shouldOverrideBuilder: false
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -87,37 +78,24 @@ proc getPayloadV4*(ben: BeaconEngineRef, id: PayloadID): GetPayloadV4Response =
|
||||||
|
|
||||||
var payloadGeneric: ExecutionPayload
|
var payloadGeneric: ExecutionPayload
|
||||||
var blockValue: UInt256
|
var blockValue: UInt256
|
||||||
if not ben.get(id, blockValue, payloadGeneric):
|
var blobsBundle: Option[BlobsBundleV1]
|
||||||
|
if not ben.get(id, blockValue, payloadGeneric, blobsBundle):
|
||||||
raise unknownPayload("Unknown payload")
|
raise unknownPayload("Unknown payload")
|
||||||
|
|
||||||
let version = payloadGeneric.version
|
let version = payloadGeneric.version
|
||||||
if version != Version.V4:
|
if version != Version.V4:
|
||||||
raise unsupportedFork("getPayloadV4 expect ExecutionPayloadV4 but get ExecutionPayload" & $version)
|
raise unsupportedFork("getPayloadV4 expect ExecutionPayloadV4 but get ExecutionPayload" & $version)
|
||||||
|
if blobsBundle.isNone:
|
||||||
|
raise unsupportedFork("getPayloadV4 is missing BlobsBundleV1")
|
||||||
|
|
||||||
let payload = payloadGeneric.V4
|
let payload = payloadGeneric.V4
|
||||||
let com = ben.com
|
let com = ben.com
|
||||||
if not com.isPragueOrLater(ethTime payload.timestamp):
|
if not com.isPragueOrLater(ethTime payload.timestamp):
|
||||||
raise unsupportedFork("payload timestamp is less than Prague activation")
|
raise unsupportedFork("payload timestamp is less than Prague activation")
|
||||||
|
|
||||||
var
|
|
||||||
blobsBundle: BlobsBundleV1
|
|
||||||
|
|
||||||
try:
|
|
||||||
for ttx in payload.transactions:
|
|
||||||
let tx = rlp.decode(distinctBase(ttx), Transaction)
|
|
||||||
if tx.networkPayload.isNil.not:
|
|
||||||
for blob in tx.networkPayload.blobs:
|
|
||||||
blobsBundle.blobs.add Web3Blob(blob)
|
|
||||||
for p in tx.networkPayload.proofs:
|
|
||||||
blobsBundle.proofs.add Web3KZGProof(p)
|
|
||||||
for k in tx.networkPayload.commitments:
|
|
||||||
blobsBundle.commitments.add Web3KZGCommitment(k)
|
|
||||||
except RlpError:
|
|
||||||
doAssert(false, "found TypedTransaction that RLP failed to decode")
|
|
||||||
|
|
||||||
GetPayloadV4Response(
|
GetPayloadV4Response(
|
||||||
executionPayload: payload,
|
executionPayload: payload,
|
||||||
blockValue: blockValue,
|
blockValue: blockValue,
|
||||||
blobsBundle: blobsBundle,
|
blobsBundle: blobsBundle.get,
|
||||||
shouldOverrideBuilder: false
|
shouldOverrideBuilder: false
|
||||||
)
|
)
|
||||||
|
|
|
@ -118,20 +118,20 @@ proc newPayload*(ben: BeaconEngineRef,
|
||||||
|
|
||||||
validatePayload(apiVersion, version, payload)
|
validatePayload(apiVersion, version, payload)
|
||||||
validateVersion(com, timestamp, version, apiVersion)
|
validateVersion(com, timestamp, version, apiVersion)
|
||||||
|
|
||||||
var header = blockHeader(payload, removeBlobs = true, beaconRoot = ethHash beaconRoot)
|
var header = blockHeader(payload, beaconRoot = ethHash beaconRoot)
|
||||||
|
|
||||||
if apiVersion >= Version.V3:
|
if apiVersion >= Version.V3:
|
||||||
if versionedHashes.isNone:
|
if versionedHashes.isNone:
|
||||||
raise invalidParams("newPayload" & $apiVersion &
|
raise invalidParams("newPayload" & $apiVersion &
|
||||||
" expect blobVersionedHashes but got none")
|
" expect blobVersionedHashes but got none")
|
||||||
if not validateVersionedHashed(payload, versionedHashes.get):
|
if not validateVersionedHashed(payload, versionedHashes.get):
|
||||||
return invalidStatus(header.parentHash, "invalid blob versionedHashes")
|
return invalidStatus(header.parentHash, "invalid blob versionedHashes")
|
||||||
|
|
||||||
let blockHash = ethHash payload.blockHash
|
let blockHash = ethHash payload.blockHash
|
||||||
header.validateBlockHash(blockHash, version).isOkOr:
|
header.validateBlockHash(blockHash, version).isOkOr:
|
||||||
return error
|
return error
|
||||||
|
|
||||||
# If we already have the block locally, ignore the entire execution and just
|
# If we already have the block locally, ignore the entire execution and just
|
||||||
# return a fake success.
|
# return a fake success.
|
||||||
if db.getBlockHeader(blockHash, header):
|
if db.getBlockHeader(blockHash, header):
|
||||||
|
@ -195,7 +195,7 @@ proc newPayload*(ben: BeaconEngineRef,
|
||||||
|
|
||||||
trace "Inserting block without sethead",
|
trace "Inserting block without sethead",
|
||||||
hash = blockHash, number = header.blockNumber
|
hash = blockHash, number = header.blockNumber
|
||||||
let body = blockBody(payload, removeBlobs = true)
|
let body = blockBody(payload)
|
||||||
let vres = ben.chain.insertBlockWithoutSetHead(header, body)
|
let vres = ben.chain.insertBlockWithoutSetHead(header, body)
|
||||||
if vres != ValidationResult.OK:
|
if vres != ValidationResult.OK:
|
||||||
let blockHash = latestValidHash(db, parent, ttd)
|
let blockHash = latestValidHash(db, parent, ttd)
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
# those terms.
|
# those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
|
std/sequtils,
|
||||||
./web3_eth_conv,
|
./web3_eth_conv,
|
||||||
./payload_conv,
|
./payload_conv,
|
||||||
web3/execution_types,
|
web3/execution_types,
|
||||||
|
@ -80,12 +81,22 @@ proc put*(ben: BeaconEngineRef,
|
||||||
ben.queue.put(hash, header)
|
ben.queue.put(hash, header)
|
||||||
|
|
||||||
proc put*(ben: BeaconEngineRef, id: PayloadID,
|
proc put*(ben: BeaconEngineRef, id: PayloadID,
|
||||||
blockValue: UInt256, payload: ExecutionPayload) =
|
blockValue: UInt256, payload: ExecutionPayload,
|
||||||
ben.queue.put(id, blockValue, payload)
|
blobsBundle: Option[BlobsBundleV1]) =
|
||||||
|
ben.queue.put(id, blockValue, payload, blobsBundle)
|
||||||
|
|
||||||
proc put*(ben: BeaconEngineRef, id: PayloadID,
|
proc put*(ben: BeaconEngineRef, id: PayloadID,
|
||||||
blockValue: UInt256, payload: SomeExecutionPayload) =
|
blockValue: UInt256, payload: SomeExecutionPayload,
|
||||||
ben.queue.put(id, blockValue, payload)
|
blobsBundle: Option[BlobsBundleV1]) =
|
||||||
|
doAssert blobsBundle.isNone == (payload is
|
||||||
|
ExecutionPayloadV1 | ExecutionPayloadV2)
|
||||||
|
ben.queue.put(id, blockValue, payload, blobsBundle)
|
||||||
|
|
||||||
|
proc put*(ben: BeaconEngineRef, id: PayloadID,
|
||||||
|
blockValue: UInt256,
|
||||||
|
payload: ExecutionPayloadV1 | ExecutionPayloadV2) =
|
||||||
|
ben.queue.put(
|
||||||
|
id, blockValue, payload, blobsBundle = options.none(BlobsBundleV1))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions, getters
|
# Public functions, getters
|
||||||
|
@ -115,8 +126,9 @@ proc get*(ben: BeaconEngineRef, hash: common.Hash256,
|
||||||
|
|
||||||
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
||||||
blockValue: var UInt256,
|
blockValue: var UInt256,
|
||||||
payload: var ExecutionPayload): bool =
|
payload: var ExecutionPayload,
|
||||||
ben.queue.get(id, blockValue, payload)
|
blobsBundle: var Option[BlobsBundleV1]): bool =
|
||||||
|
ben.queue.get(id, blockValue, payload, blobsBundle)
|
||||||
|
|
||||||
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
||||||
blockValue: var UInt256,
|
blockValue: var UInt256,
|
||||||
|
@ -130,8 +142,9 @@ proc get*(ben: BeaconEngineRef, id: PayloadID,
|
||||||
|
|
||||||
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
||||||
blockValue: var UInt256,
|
blockValue: var UInt256,
|
||||||
payload: var ExecutionPayloadV3): bool =
|
payload: var ExecutionPayloadV3,
|
||||||
ben.queue.get(id, blockValue, payload)
|
blobsBundle: var BlobsBundleV1): bool =
|
||||||
|
ben.queue.get(id, blockValue, payload, blobsBundle)
|
||||||
|
|
||||||
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
proc get*(ben: BeaconEngineRef, id: PayloadID,
|
||||||
blockValue: var UInt256,
|
blockValue: var UInt256,
|
||||||
|
@ -142,9 +155,13 @@ proc get*(ben: BeaconEngineRef, id: PayloadID,
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
type ExecutionPayloadAndBlobsBundle* = object
|
||||||
|
executionPayload*: ExecutionPayload
|
||||||
|
blobsBundle*: Option[BlobsBundleV1]
|
||||||
|
|
||||||
proc generatePayload*(ben: BeaconEngineRef,
|
proc generatePayload*(ben: BeaconEngineRef,
|
||||||
attrs: PayloadAttributes):
|
attrs: PayloadAttributes):
|
||||||
Result[ExecutionPayload, string] =
|
Result[ExecutionPayloadAndBlobsBundle, string] =
|
||||||
wrapException:
|
wrapException:
|
||||||
let
|
let
|
||||||
xp = ben.txPool
|
xp = ben.txPool
|
||||||
|
@ -168,12 +185,22 @@ proc generatePayload*(ben: BeaconEngineRef,
|
||||||
if pos.timestamp <= headBlock.timestamp:
|
if pos.timestamp <= headBlock.timestamp:
|
||||||
return err "timestamp must be strictly later than parent"
|
return err "timestamp must be strictly later than parent"
|
||||||
|
|
||||||
# someBaseFee = true: make sure blk.header
|
# someBaseFee = true: make sure bundle.blk.header
|
||||||
# have the same blockHash with generated payload
|
# have the same blockHash with generated payload
|
||||||
let blk = xp.assembleBlock(someBaseFee = true).valueOr:
|
let bundle = xp.assembleBlock(someBaseFee = true).valueOr:
|
||||||
return err(error)
|
return err(error)
|
||||||
|
|
||||||
if blk.header.extraData.len > 32:
|
if bundle.blk.header.extraData.len > 32:
|
||||||
return err "extraData length should not exceed 32 bytes"
|
return err "extraData length should not exceed 32 bytes"
|
||||||
|
|
||||||
ok(executionPayload(blk))
|
var blobsBundle: Option[BlobsBundleV1]
|
||||||
|
if bundle.blobsBundle.isSome:
|
||||||
|
template blobData: untyped = bundle.blobsBundle.get
|
||||||
|
blobsBundle = options.some BlobsBundleV1(
|
||||||
|
commitments: blobData.commitments.mapIt it.Web3KZGCommitment,
|
||||||
|
proofs: blobData.proofs.mapIt it.Web3KZGProof,
|
||||||
|
blobs: blobData.blobs.mapIt it.Web3Blob)
|
||||||
|
|
||||||
|
ok ExecutionPayloadAndBlobsBundle(
|
||||||
|
executionPayload: executionPayload(bundle.blk),
|
||||||
|
blobsBundle: blobsBundle)
|
||||||
|
|
|
@ -28,10 +28,10 @@ func wdRoot(x: Option[seq[WithdrawalV1]]): Option[common.Hash256]
|
||||||
if x.isNone: none(common.Hash256)
|
if x.isNone: none(common.Hash256)
|
||||||
else: some(wdRoot x.get)
|
else: some(wdRoot x.get)
|
||||||
|
|
||||||
func txRoot(list: openArray[Web3Tx], removeBlobs: bool): common.Hash256
|
func txRoot(list: openArray[Web3Tx]): common.Hash256
|
||||||
{.gcsafe, raises:[RlpError].} =
|
{.gcsafe, raises:[RlpError].} =
|
||||||
{.noSideEffect.}:
|
{.noSideEffect.}:
|
||||||
calcTxRoot(ethTxs(list, removeBlobs))
|
calcTxRoot(ethTxs(list))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
|
@ -80,15 +80,14 @@ func executionPayloadV1V2*(blk: EthBlock): ExecutionPayloadV1OrV2 =
|
||||||
)
|
)
|
||||||
|
|
||||||
func blockHeader*(p: ExecutionPayload,
|
func blockHeader*(p: ExecutionPayload,
|
||||||
removeBlobs: bool,
|
|
||||||
beaconRoot: Option[common.Hash256]):
|
beaconRoot: Option[common.Hash256]):
|
||||||
common.BlockHeader {.gcsafe, raises:[CatchableError].} =
|
common.BlockHeader {.gcsafe, raises:[CatchableError].} =
|
||||||
common.BlockHeader(
|
common.BlockHeader(
|
||||||
parentHash : ethHash p.parentHash,
|
parentHash : ethHash p.parentHash,
|
||||||
ommersHash : EMPTY_UNCLE_HASH,
|
ommersHash : EMPTY_UNCLE_HASH,
|
||||||
coinbase : ethAddr p.feeRecipient,
|
coinbase : ethAddr p.feeRecipient,
|
||||||
stateRoot : ethHash p.stateRoot,
|
stateRoot : ethHash p.stateRoot,
|
||||||
txRoot : txRoot(p.transactions, removeBlobs),
|
txRoot : txRoot p.transactions,
|
||||||
receiptRoot : ethHash p.receiptsRoot,
|
receiptRoot : ethHash p.receiptsRoot,
|
||||||
bloom : ethBloom p.logsBloom,
|
bloom : ethBloom p.logsBloom,
|
||||||
difficulty : 0.u256,
|
difficulty : 0.u256,
|
||||||
|
@ -106,21 +105,20 @@ func blockHeader*(p: ExecutionPayload,
|
||||||
parentBeaconBlockRoot: beaconRoot
|
parentBeaconBlockRoot: beaconRoot
|
||||||
)
|
)
|
||||||
|
|
||||||
func blockBody*(p: ExecutionPayload, removeBlobs: bool):
|
func blockBody*(p: ExecutionPayload):
|
||||||
common.BlockBody {.gcsafe, raises:[RlpError].} =
|
common.BlockBody {.gcsafe, raises:[RlpError].} =
|
||||||
common.BlockBody(
|
common.BlockBody(
|
||||||
uncles : @[],
|
uncles : @[],
|
||||||
transactions: ethTxs(p.transactions, removeBlobs),
|
transactions: ethTxs p.transactions,
|
||||||
withdrawals : ethWithdrawals p.withdrawals,
|
withdrawals : ethWithdrawals p.withdrawals,
|
||||||
)
|
)
|
||||||
|
|
||||||
func ethBlock*(p: ExecutionPayload,
|
func ethBlock*(p: ExecutionPayload,
|
||||||
removeBlobs: bool,
|
|
||||||
beaconRoot: Option[common.Hash256]):
|
beaconRoot: Option[common.Hash256]):
|
||||||
common.EthBlock {.gcsafe, raises:[CatchableError].} =
|
common.EthBlock {.gcsafe, raises:[CatchableError].} =
|
||||||
common.Ethblock(
|
common.EthBlock(
|
||||||
header : blockHeader(p, removeBlobs, beaconRoot),
|
header : blockHeader(p, beaconRoot),
|
||||||
uncles : @[],
|
uncles : @[],
|
||||||
txs : ethTxs(p.transactions, removeBlobs),
|
txs : ethTxs p.transactions,
|
||||||
withdrawals: ethWithdrawals p.withdrawals,
|
withdrawals: ethWithdrawals p.withdrawals,
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
@ -35,6 +35,7 @@ type
|
||||||
id: PayloadID
|
id: PayloadID
|
||||||
payload: ExecutionPayload
|
payload: ExecutionPayload
|
||||||
blockValue: UInt256
|
blockValue: UInt256
|
||||||
|
blobsBundle: Option[BlobsBundleV1]
|
||||||
|
|
||||||
HeaderItem = object
|
HeaderItem = object
|
||||||
hash: common.Hash256
|
hash: common.Hash256
|
||||||
|
@ -71,13 +72,22 @@ proc put*(api: var PayloadQueue,
|
||||||
api.headerQueue.put(HeaderItem(hash: hash, header: header))
|
api.headerQueue.put(HeaderItem(hash: hash, header: header))
|
||||||
|
|
||||||
proc put*(api: var PayloadQueue, id: PayloadID,
|
proc put*(api: var PayloadQueue, id: PayloadID,
|
||||||
blockValue: UInt256, payload: ExecutionPayload) =
|
blockValue: UInt256, payload: ExecutionPayload,
|
||||||
|
blobsBundle: Option[BlobsBundleV1]) =
|
||||||
api.payloadQueue.put(PayloadItem(id: id,
|
api.payloadQueue.put(PayloadItem(id: id,
|
||||||
payload: payload, blockValue: blockValue))
|
payload: payload, blockValue: blockValue, blobsBundle: blobsBundle))
|
||||||
|
|
||||||
proc put*(api: var PayloadQueue, id: PayloadID,
|
proc put*(api: var PayloadQueue, id: PayloadID,
|
||||||
blockValue: UInt256, payload: SomeExecutionPayload) =
|
blockValue: UInt256, payload: SomeExecutionPayload,
|
||||||
api.put(id, blockValue, payload.executionPayload)
|
blobsBundle: Option[BlobsBundleV1]) =
|
||||||
|
doAssert blobsBundle.isNone == (payload is
|
||||||
|
ExecutionPayloadV1 | ExecutionPayloadV2)
|
||||||
|
api.put(id, blockValue, payload.executionPayload, blobsBundle = blobsBundle)
|
||||||
|
|
||||||
|
proc put*(api: var PayloadQueue, id: PayloadID,
|
||||||
|
blockValue: UInt256,
|
||||||
|
payload: ExecutionPayloadV1 | ExecutionPayloadV2) =
|
||||||
|
api.put(id, blockValue, payload, blobsBundle = options.none(BlobsBundleV1))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions, getters
|
# Public functions, getters
|
||||||
|
@ -93,46 +103,66 @@ proc get*(api: PayloadQueue, hash: common.Hash256,
|
||||||
|
|
||||||
proc get*(api: PayloadQueue, id: PayloadID,
|
proc get*(api: PayloadQueue, id: PayloadID,
|
||||||
blockValue: var UInt256,
|
blockValue: var UInt256,
|
||||||
payload: var ExecutionPayload): bool =
|
payload: var ExecutionPayload,
|
||||||
|
blobsBundle: var Option[BlobsBundleV1]): bool =
|
||||||
for x in api.payloadQueue:
|
for x in api.payloadQueue:
|
||||||
if x.id == id:
|
if x.id == id:
|
||||||
payload = x.payload
|
payload = x.payload
|
||||||
blockValue = x.blockValue
|
blockValue = x.blockValue
|
||||||
|
blobsBundle = x.blobsBundle
|
||||||
return true
|
return true
|
||||||
false
|
false
|
||||||
|
|
||||||
proc get*(api: PayloadQueue, id: PayloadID,
|
proc get*(api: PayloadQueue, id: PayloadID,
|
||||||
blockValue: var UInt256,
|
blockValue: var UInt256,
|
||||||
payload: var ExecutionPayloadV1): bool =
|
payload: var ExecutionPayloadV1): bool =
|
||||||
var p: ExecutionPayload
|
var
|
||||||
let found = api.get(id, blockValue, p)
|
p: ExecutionPayload
|
||||||
doAssert(p.version == Version.V1)
|
blobsBundleOpt: Option[BlobsBundleV1]
|
||||||
payload = p.V1
|
let found = api.get(id, blockValue, p, blobsBundleOpt)
|
||||||
|
if found:
|
||||||
|
doAssert(p.version == Version.V1)
|
||||||
|
payload = p.V1
|
||||||
|
doAssert(blobsBundleOpt.isNone)
|
||||||
return found
|
return found
|
||||||
|
|
||||||
proc get*(api: PayloadQueue, id: PayloadID,
|
proc get*(api: PayloadQueue, id: PayloadID,
|
||||||
blockValue: var UInt256,
|
blockValue: var UInt256,
|
||||||
payload: var ExecutionPayloadV2): bool =
|
payload: var ExecutionPayloadV2): bool =
|
||||||
var p: ExecutionPayload
|
var
|
||||||
let found = api.get(id, blockValue, p)
|
p: ExecutionPayload
|
||||||
doAssert(p.version == Version.V2)
|
blobsBundleOpt: Option[BlobsBundleV1]
|
||||||
payload = p.V2
|
let found = api.get(id, blockValue, p, blobsBundleOpt)
|
||||||
|
if found:
|
||||||
|
doAssert(p.version == Version.V2)
|
||||||
|
payload = p.V2
|
||||||
|
doAssert(blobsBundleOpt.isNone)
|
||||||
return found
|
return found
|
||||||
|
|
||||||
proc get*(api: PayloadQueue, id: PayloadID,
|
proc get*(api: PayloadQueue, id: PayloadID,
|
||||||
blockValue: var UInt256,
|
blockValue: var UInt256,
|
||||||
payload: var ExecutionPayloadV3): bool =
|
payload: var ExecutionPayloadV3,
|
||||||
var p: ExecutionPayload
|
blobsBundle: var BlobsBundleV1): bool =
|
||||||
let found = api.get(id, blockValue, p)
|
var
|
||||||
doAssert(p.version == Version.V3)
|
p: ExecutionPayload
|
||||||
payload = p.V3
|
blobsBundleOpt: Option[BlobsBundleV1]
|
||||||
|
let found = api.get(id, blockValue, p, blobsBundleOpt)
|
||||||
|
if found:
|
||||||
|
doAssert(p.version == Version.V3)
|
||||||
|
payload = p.V3
|
||||||
|
doAssert(blobsBundleOpt.isSome)
|
||||||
|
blobsBundle = blobsBundleOpt.unsafeGet
|
||||||
return found
|
return found
|
||||||
|
|
||||||
proc get*(api: PayloadQueue, id: PayloadID,
|
proc get*(api: PayloadQueue, id: PayloadID,
|
||||||
blockValue: var UInt256,
|
blockValue: var UInt256,
|
||||||
payload: var ExecutionPayloadV1OrV2): bool =
|
payload: var ExecutionPayloadV1OrV2): bool =
|
||||||
var p: ExecutionPayload
|
var
|
||||||
let found = api.get(id, blockValue, p)
|
p: ExecutionPayload
|
||||||
doAssert(p.version in {Version.V1, Version.V2})
|
blobsBundleOpt: Option[BlobsBundleV1]
|
||||||
payload = p.V1V2
|
let found = api.get(id, blockValue, p, blobsBundleOpt)
|
||||||
|
if found:
|
||||||
|
doAssert(p.version in {Version.V1, Version.V2})
|
||||||
|
payload = p.V1V2
|
||||||
|
doAssert(blobsBundleOpt.isNone)
|
||||||
return found
|
return found
|
||||||
|
|
|
@ -151,15 +151,11 @@ func ethWithdrawals*(x: Option[seq[WithdrawalV1]]):
|
||||||
func ethTx*(x: Web3Tx): common.Transaction {.gcsafe, raises:[RlpError].} =
|
func ethTx*(x: Web3Tx): common.Transaction {.gcsafe, raises:[RlpError].} =
|
||||||
result = rlp.decode(distinctBase x, common.Transaction)
|
result = rlp.decode(distinctBase x, common.Transaction)
|
||||||
|
|
||||||
func ethTxs*(list: openArray[Web3Tx], removeBlobs = false):
|
func ethTxs*(list: openArray[Web3Tx]):
|
||||||
seq[common.Transaction] {.gcsafe, raises:[RlpError].} =
|
seq[common.Transaction] {.gcsafe, raises:[RlpError].} =
|
||||||
result = newSeqOfCap[common.Transaction](list.len)
|
result = newSeqOfCap[common.Transaction](list.len)
|
||||||
if removeBlobs:
|
for x in list:
|
||||||
for x in list:
|
result.add ethTx(x)
|
||||||
result.add ethTx(x).removeNetworkPayload
|
|
||||||
else:
|
|
||||||
for x in list:
|
|
||||||
result.add ethTx(x)
|
|
||||||
|
|
||||||
func storageKeys(list: seq[FixedBytes[32]]): seq[StorageKey] =
|
func storageKeys(list: seq[FixedBytes[32]]): seq[StorageKey] =
|
||||||
for x in list:
|
for x in list:
|
||||||
|
|
|
@ -167,17 +167,17 @@ func validateEip4844Header*(
|
||||||
|
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
proc validateBlobTransactionWrapper*(tx: Transaction):
|
proc validateBlobTransactionWrapper*(tx: PooledTransaction):
|
||||||
Result[void, string] {.raises: [].} =
|
Result[void, string] {.raises: [].} =
|
||||||
if tx.networkPayload.isNil:
|
if tx.networkPayload.isNil:
|
||||||
return err("tx wrapper is none")
|
return err("tx wrapper is none")
|
||||||
|
|
||||||
# note: assert blobs are not malformatted
|
# note: assert blobs are not malformatted
|
||||||
let goodFormatted = tx.versionedHashes.len ==
|
let goodFormatted = tx.tx.versionedHashes.len ==
|
||||||
tx.networkPayload.commitments.len and
|
tx.networkPayload.commitments.len and
|
||||||
tx.versionedHashes.len ==
|
tx.tx.versionedHashes.len ==
|
||||||
tx.networkPayload.blobs.len and
|
tx.networkPayload.blobs.len and
|
||||||
tx.versionedHashes.len ==
|
tx.tx.versionedHashes.len ==
|
||||||
tx.networkPayload.proofs.len
|
tx.networkPayload.proofs.len
|
||||||
|
|
||||||
if not goodFormatted:
|
if not goodFormatted:
|
||||||
|
@ -194,12 +194,13 @@ proc validateBlobTransactionWrapper*(tx: Transaction):
|
||||||
return err("Failed to verify network payload of a transaction")
|
return err("Failed to verify network payload of a transaction")
|
||||||
|
|
||||||
# Now that all commitments have been verified, check that versionedHashes matches the commitments
|
# Now that all commitments have been verified, check that versionedHashes matches the commitments
|
||||||
for i in 0 ..< tx.versionedHashes.len:
|
for i in 0 ..< tx.tx.versionedHashes.len:
|
||||||
# this additional check also done in tx validation
|
# this additional check also done in tx validation
|
||||||
if tx.versionedHashes[i].data[0] != VERSIONED_HASH_VERSION_KZG:
|
if tx.tx.versionedHashes[i].data[0] != VERSIONED_HASH_VERSION_KZG:
|
||||||
return err("wrong kzg version in versioned hash at index " & $i)
|
return err("wrong kzg version in versioned hash at index " & $i)
|
||||||
|
|
||||||
if tx.versionedHashes[i] != kzgToVersionedHash(tx.networkPayload.commitments[i]):
|
if tx.tx.versionedHashes[i] !=
|
||||||
|
kzgToVersionedHash(tx.networkPayload.commitments[i]):
|
||||||
return err("tx versioned hash not match commitments at index " & $i)
|
return err("tx versioned hash not match commitments at index " & $i)
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
|
||||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
|
||||||
|
@ -61,8 +61,9 @@ proc validateSealer*(conf: NimbusConf, ctx: EthContext, chain: ChainRef): Result
|
||||||
proc generateBlock(engine: SealingEngineRef,
|
proc generateBlock(engine: SealingEngineRef,
|
||||||
outBlock: var EthBlock): Result[void, string] =
|
outBlock: var EthBlock): Result[void, string] =
|
||||||
|
|
||||||
outBlock = engine.txPool.assembleBlock().valueOr:
|
let bundle = engine.txPool.assembleBlock().valueOr:
|
||||||
return err(error)
|
return err(error)
|
||||||
|
outBlock = bundle.blk
|
||||||
|
|
||||||
if engine.chain.com.consensus == ConsensusType.POS:
|
if engine.chain.com.consensus == ConsensusType.POS:
|
||||||
# Stop the block generator if we reach TTD
|
# Stop the block generator if we reach TTD
|
||||||
|
|
|
@ -423,7 +423,7 @@
|
||||||
##
|
##
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[sequtils, tables],
|
std/[options, sequtils, tables],
|
||||||
./tx_pool/[tx_chain, tx_desc, tx_info, tx_item],
|
./tx_pool/[tx_chain, tx_desc, tx_info, tx_item],
|
||||||
./tx_pool/tx_tabs,
|
./tx_pool/tx_tabs,
|
||||||
./tx_pool/tx_tasks/[
|
./tx_pool/tx_tasks/[
|
||||||
|
@ -517,7 +517,7 @@ proc new*(T: type TxPoolRef; com: CommonRef; miner: EthAddress): T
|
||||||
|
|
||||||
# core/tx_pool.go(848): func (pool *TxPool) AddLocals(txs []..
|
# core/tx_pool.go(848): func (pool *TxPool) AddLocals(txs []..
|
||||||
# core/tx_pool.go(864): func (pool *TxPool) AddRemotes(txs []..
|
# core/tx_pool.go(864): func (pool *TxPool) AddRemotes(txs []..
|
||||||
proc add*(xp: TxPoolRef; txs: openArray[Transaction]; info = "")
|
proc add*(xp: TxPoolRef; txs: openArray[PooledTransaction]; info = "")
|
||||||
{.gcsafe,raises: [CatchableError].} =
|
{.gcsafe,raises: [CatchableError].} =
|
||||||
## Add a list of transactions to be processed and added to the buckets
|
## Add a list of transactions to be processed and added to the buckets
|
||||||
## database. It is OK pass an empty list in which case some maintenance
|
## database. It is OK pass an empty list in which case some maintenance
|
||||||
|
@ -533,7 +533,7 @@ proc add*(xp: TxPoolRef; txs: openArray[Transaction]; info = "")
|
||||||
|
|
||||||
# core/tx_pool.go(854): func (pool *TxPool) AddLocals(txs []..
|
# core/tx_pool.go(854): func (pool *TxPool) AddLocals(txs []..
|
||||||
# core/tx_pool.go(883): func (pool *TxPool) AddRemotes(txs []..
|
# core/tx_pool.go(883): func (pool *TxPool) AddRemotes(txs []..
|
||||||
proc add*(xp: TxPoolRef; tx: Transaction; info = "")
|
proc add*(xp: TxPoolRef; tx: PooledTransaction; info = "")
|
||||||
{.gcsafe,raises: [CatchableError].} =
|
{.gcsafe,raises: [CatchableError].} =
|
||||||
## Variant of `add()` for a single transaction.
|
## Variant of `add()` for a single transaction.
|
||||||
xp.add(@[tx], info)
|
xp.add(@[tx], info)
|
||||||
|
@ -607,8 +607,14 @@ proc dirtyBuckets*(xp: TxPoolRef): bool =
|
||||||
## flag is also set.
|
## flag is also set.
|
||||||
xp.pDirtyBuckets
|
xp.pDirtyBuckets
|
||||||
|
|
||||||
proc assembleBlock*(xp: TxPoolRef, someBaseFee: bool = false): Result[EthBlock, string]
|
type EthBlockAndBlobsBundle* = object
|
||||||
{.gcsafe,raises: [CatchableError].} =
|
blk*: EthBlock
|
||||||
|
blobsBundle*: Option[BlobsBundle]
|
||||||
|
|
||||||
|
proc assembleBlock*(
|
||||||
|
xp: TxPoolRef,
|
||||||
|
someBaseFee: bool = false
|
||||||
|
): Result[EthBlockAndBlobsBundle, string] {.gcsafe,raises: [CatchableError].} =
|
||||||
## Getter, retrieves a packed block ready for mining and signing depending
|
## Getter, retrieves a packed block ready for mining and signing depending
|
||||||
## on the internally cached block chain head, the txs in the pool and some
|
## on the internally cached block chain head, the txs in the pool and some
|
||||||
## tuning parameters. The following block header fields are left
|
## tuning parameters. The following block header fields are left
|
||||||
|
@ -627,19 +633,41 @@ proc assembleBlock*(xp: TxPoolRef, someBaseFee: bool = false): Result[EthBlock,
|
||||||
var blk = EthBlock(
|
var blk = EthBlock(
|
||||||
header: xp.chain.getHeader # uses updated vmState
|
header: xp.chain.getHeader # uses updated vmState
|
||||||
)
|
)
|
||||||
|
var blobsBundle: BlobsBundle
|
||||||
|
|
||||||
for (_,nonceList) in xp.txDB.packingOrderAccounts(txItemPacked):
|
for _, nonceList in xp.txDB.packingOrderAccounts(txItemPacked):
|
||||||
blk.txs.add toSeq(nonceList.incNonce).mapIt(it.tx)
|
for item in nonceList.incNonce:
|
||||||
|
let tx = item.pooledTx
|
||||||
|
blk.txs.add tx.tx
|
||||||
|
if tx.networkPayload != nil:
|
||||||
|
for k in tx.networkPayload.commitments:
|
||||||
|
blobsBundle.commitments.add k
|
||||||
|
for p in tx.networkPayload.proofs:
|
||||||
|
blobsBundle.proofs.add p
|
||||||
|
for blob in tx.networkPayload.blobs:
|
||||||
|
blobsBundle.blobs.add blob
|
||||||
|
|
||||||
let com = xp.chain.com
|
let com = xp.chain.com
|
||||||
if com.forkGTE(Shanghai):
|
if com.forkGTE(Shanghai):
|
||||||
blk.withdrawals = some(com.pos.withdrawals)
|
blk.withdrawals = some(com.pos.withdrawals)
|
||||||
|
|
||||||
|
if not com.forkGTE(Cancun) and blobsBundle.commitments.len > 0:
|
||||||
|
return err("PooledTransaction contains blobs prior to Cancun")
|
||||||
|
let blobsBundleOpt =
|
||||||
|
if com.forkGTE(Cancun):
|
||||||
|
doAssert blobsBundle.commitments.len == blobsBundle.blobs.len
|
||||||
|
doAssert blobsBundle.proofs.len == blobsBundle.blobs.len
|
||||||
|
options.some blobsBundle
|
||||||
|
else:
|
||||||
|
options.none BlobsBundle
|
||||||
|
|
||||||
if someBaseFee:
|
if someBaseFee:
|
||||||
# make sure baseFee always has something
|
# make sure baseFee always has something
|
||||||
blk.header.fee = some(blk.header.fee.get(0.u256))
|
blk.header.fee = some(blk.header.fee.get(0.u256))
|
||||||
|
|
||||||
ok(blk)
|
ok EthBlockAndBlobsBundle(
|
||||||
|
blk: blk,
|
||||||
|
blobsBundle: blobsBundleOpt)
|
||||||
|
|
||||||
proc gasCumulative*(xp: TxPoolRef): GasInt =
|
proc gasCumulative*(xp: TxPoolRef): GasInt =
|
||||||
## Getter, retrieves the gas that will be burned in the block after
|
## Getter, retrieves the gas that will be burned in the block after
|
||||||
|
@ -856,7 +884,7 @@ proc accountRanks*(xp: TxPoolRef): TxTabsLocality =
|
||||||
xp.txDB.locality
|
xp.txDB.locality
|
||||||
|
|
||||||
proc addRemote*(xp: TxPoolRef;
|
proc addRemote*(xp: TxPoolRef;
|
||||||
tx: Transaction; force = false): Result[void,TxInfo]
|
tx: PooledTransaction; force = false): Result[void,TxInfo]
|
||||||
{.gcsafe,raises: [CatchableError].} =
|
{.gcsafe,raises: [CatchableError].} =
|
||||||
## Adds the argument transaction `tx` to the buckets database.
|
## Adds the argument transaction `tx` to the buckets database.
|
||||||
##
|
##
|
||||||
|
@ -890,7 +918,7 @@ proc addRemote*(xp: TxPoolRef;
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc addLocal*(xp: TxPoolRef;
|
proc addLocal*(xp: TxPoolRef;
|
||||||
tx: Transaction; force = false): Result[void,TxInfo]
|
tx: PooledTransaction; force = false): Result[void,TxInfo]
|
||||||
{.gcsafe,raises: [CatchableError].} =
|
{.gcsafe,raises: [CatchableError].} =
|
||||||
## Adds the argument transaction `tx` to the buckets database.
|
## Adds the argument transaction `tx` to the buckets database.
|
||||||
##
|
##
|
||||||
|
|
|
@ -42,7 +42,7 @@ type
|
||||||
TxItemRef* = ref object of RootObj ##\
|
TxItemRef* = ref object of RootObj ##\
|
||||||
## Data container with transaction and meta data. Entries are *read-only*\
|
## Data container with transaction and meta data. Entries are *read-only*\
|
||||||
## by default, for some there is a setter available.
|
## by default, for some there is a setter available.
|
||||||
tx: Transaction ## Transaction data
|
tx: PooledTransaction ## Transaction data
|
||||||
itemID: Hash256 ## Transaction hash
|
itemID: Hash256 ## Transaction hash
|
||||||
timeStamp: Time ## Time when added
|
timeStamp: Time ## Time when added
|
||||||
sender: EthAddress ## Sender account address
|
sender: EthAddress ## Sender account address
|
||||||
|
@ -112,10 +112,10 @@ proc init*(item: TxItemRef; status: TxItemStatus; info: string) =
|
||||||
item.timeStamp = utcTime()
|
item.timeStamp = utcTime()
|
||||||
item.reject = txInfoOk
|
item.reject = txInfoOk
|
||||||
|
|
||||||
proc new*(T: type TxItemRef; tx: Transaction; itemID: Hash256;
|
proc new*(T: type TxItemRef; tx: PooledTransaction; itemID: Hash256;
|
||||||
status: TxItemStatus; info: string): Result[T,void] {.gcsafe,raises: [].} =
|
status: TxItemStatus; info: string): Result[T,void] {.gcsafe,raises: [].} =
|
||||||
## Create item descriptor.
|
## Create item descriptor.
|
||||||
let rc = tx.ecRecover
|
let rc = tx.tx.ecRecover
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
return err()
|
return err()
|
||||||
ok(T(itemID: itemID,
|
ok(T(itemID: itemID,
|
||||||
|
@ -125,7 +125,7 @@ proc new*(T: type TxItemRef; tx: Transaction; itemID: Hash256;
|
||||||
info: info,
|
info: info,
|
||||||
status: status))
|
status: status))
|
||||||
|
|
||||||
proc new*(T: type TxItemRef; tx: Transaction;
|
proc new*(T: type TxItemRef; tx: PooledTransaction;
|
||||||
reject: TxInfo; status: TxItemStatus; info: string): T {.gcsafe,raises: [].} =
|
reject: TxInfo; status: TxItemStatus; info: string): T {.gcsafe,raises: [].} =
|
||||||
## Create incomplete item descriptor, so meta-data can be stored (e.g.
|
## Create incomplete item descriptor, so meta-data can be stored (e.g.
|
||||||
## for holding in the waste basket to be investigated later.)
|
## for holding in the waste basket to be investigated later.)
|
||||||
|
@ -150,6 +150,10 @@ proc itemID*(tx: Transaction): Hash256 =
|
||||||
## Getter, transaction ID
|
## Getter, transaction ID
|
||||||
tx.rlpHash
|
tx.rlpHash
|
||||||
|
|
||||||
|
proc itemID*(tx: PooledTransaction): Hash256 =
|
||||||
|
## Getter, transaction ID
|
||||||
|
tx.tx.rlpHash
|
||||||
|
|
||||||
# core/types/transaction.go(297): func (tx *Transaction) Cost() *big.Int {
|
# core/types/transaction.go(297): func (tx *Transaction) Cost() *big.Int {
|
||||||
proc cost*(tx: Transaction): UInt256 =
|
proc cost*(tx: Transaction): UInt256 =
|
||||||
## Getter (go/ref compat): gas * gasPrice + value.
|
## Getter (go/ref compat): gas * gasPrice + value.
|
||||||
|
@ -210,10 +214,14 @@ proc timeStamp*(item: TxItemRef): Time =
|
||||||
## Getter
|
## Getter
|
||||||
item.timeStamp
|
item.timeStamp
|
||||||
|
|
||||||
proc tx*(item: TxItemRef): Transaction =
|
proc pooledTx*(item: TxItemRef): PooledTransaction =
|
||||||
## Getter
|
## Getter
|
||||||
item.tx
|
item.tx
|
||||||
|
|
||||||
|
proc tx*(item: TxItemRef): Transaction =
|
||||||
|
## Getter
|
||||||
|
item.tx.tx
|
||||||
|
|
||||||
func rejectInfo*(item: TxItemRef): string =
|
func rejectInfo*(item: TxItemRef): string =
|
||||||
## Getter
|
## Getter
|
||||||
result = $item.reject
|
result = $item.reject
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
@ -138,7 +138,7 @@ proc new*(T: type TxTabsRef): T {.gcsafe,raises: [].} =
|
||||||
|
|
||||||
proc insert*(
|
proc insert*(
|
||||||
xp: TxTabsRef;
|
xp: TxTabsRef;
|
||||||
tx: var Transaction;
|
tx: var PooledTransaction;
|
||||||
status = txItemPending;
|
status = txItemPending;
|
||||||
info = ""): Result[void,TxInfo]
|
info = ""): Result[void,TxInfo]
|
||||||
{.gcsafe,raises: [CatchableError].} =
|
{.gcsafe,raises: [CatchableError].} =
|
||||||
|
@ -221,7 +221,7 @@ proc dispose*(xp: TxTabsRef; item: TxItemRef; reason: TxInfo): bool
|
||||||
xp.byRejects[item.itemID] = item
|
xp.byRejects[item.itemID] = item
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc reject*(xp: TxTabsRef; tx: var Transaction;
|
proc reject*(xp: TxTabsRef; tx: var PooledTransaction;
|
||||||
reason: TxInfo; status = txItemPending; info = "") =
|
reason: TxInfo; status = txItemPending; info = "") =
|
||||||
## Similar to dispose but for a tx without the item wrapper, the function
|
## Similar to dispose but for a tx without the item wrapper, the function
|
||||||
## imports the tx into the waste basket (e.g. after it could not
|
## imports the tx into the waste basket (e.g. after it could not
|
||||||
|
@ -239,7 +239,7 @@ proc reject*(xp: TxTabsRef; item: TxItemRef; reason: TxInfo) =
|
||||||
item.reject = reason
|
item.reject = reason
|
||||||
xp.byRejects[item.itemID] = item
|
xp.byRejects[item.itemID] = item
|
||||||
|
|
||||||
proc reject*(xp: TxTabsRef; tx: Transaction;
|
proc reject*(xp: TxTabsRef; tx: PooledTransaction;
|
||||||
reason: TxInfo; status = txItemPending; info = "") =
|
reason: TxInfo; status = txItemPending; info = "") =
|
||||||
## Variant of `reject()`
|
## Variant of `reject()`
|
||||||
var ty = tx
|
var ty = tx
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
@ -160,7 +160,7 @@ proc addTx*(xp: TxPoolRef; item: TxItemRef): bool
|
||||||
# core/tx_pool.go(883): func (pool *TxPool) AddRemotes(txs []..
|
# core/tx_pool.go(883): func (pool *TxPool) AddRemotes(txs []..
|
||||||
# core/tx_pool.go(889): func (pool *TxPool) addTxs(txs []*types.Transaction, ..
|
# core/tx_pool.go(889): func (pool *TxPool) addTxs(txs []*types.Transaction, ..
|
||||||
proc addTxs*(xp: TxPoolRef;
|
proc addTxs*(xp: TxPoolRef;
|
||||||
txs: openArray[Transaction]; info = ""): TxAddStats
|
txs: openArray[PooledTransaction]; info = ""): TxAddStats
|
||||||
{.discardable,gcsafe,raises: [CatchableError].} =
|
{.discardable,gcsafe,raises: [CatchableError].} =
|
||||||
## Add a list of transactions. The list is sorted after nonces and txs are
|
## Add a list of transactions. The list is sorted after nonces and txs are
|
||||||
## tested and stored into either of the `pending` or `staged` buckets, or
|
## tested and stored into either of the `pending` or `staged` buckets, or
|
||||||
|
@ -181,7 +181,7 @@ proc addTxs*(xp: TxPoolRef;
|
||||||
for tx in txs.items:
|
for tx in txs.items:
|
||||||
var reason: TxInfo
|
var reason: TxInfo
|
||||||
|
|
||||||
if tx.txType == TxEip4844:
|
if tx.tx.txType == TxEip4844:
|
||||||
let res = tx.validateBlobTransactionWrapper()
|
let res = tx.validateBlobTransactionWrapper()
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
# move item to waste basket
|
# move item to waste basket
|
||||||
|
|
|
@ -38,7 +38,7 @@ logScope:
|
||||||
|
|
||||||
proc checkTxBasic(xp: TxPoolRef; item: TxItemRef): bool =
|
proc checkTxBasic(xp: TxPoolRef; item: TxItemRef): bool =
|
||||||
let res = validateTxBasic(
|
let res = validateTxBasic(
|
||||||
item.tx.removeNetworkPayload,
|
item.tx,
|
||||||
xp.chain.nextFork,
|
xp.chain.nextFork,
|
||||||
# A new transaction of the next fork may be
|
# A new transaction of the next fork may be
|
||||||
# coming before the fork activated
|
# coming before the fork activated
|
||||||
|
@ -234,7 +234,8 @@ proc classifyValidatePacked*(xp: TxPoolRef;
|
||||||
tx = item.tx.eip1559TxNormalization(xp.chain.baseFee.GasInt)
|
tx = item.tx.eip1559TxNormalization(xp.chain.baseFee.GasInt)
|
||||||
excessBlobGas = calcExcessBlobGas(vmState.parent)
|
excessBlobGas = calcExcessBlobGas(vmState.parent)
|
||||||
|
|
||||||
roDB.validateTransaction(tx.removeNetworkPayload, item.sender, gasLimit, baseFee, excessBlobGas, fork).isOk
|
roDB.validateTransaction(
|
||||||
|
tx, item.sender, gasLimit, baseFee, excessBlobGas, fork).isOk
|
||||||
|
|
||||||
proc classifyPacked*(xp: TxPoolRef; gasBurned, moreBurned: GasInt): bool =
|
proc classifyPacked*(xp: TxPoolRef; gasBurned, moreBurned: GasInt): bool =
|
||||||
## Classifier for *packing* (i.e. adding up `gasUsed` values after executing
|
## Classifier for *packing* (i.e. adding up `gasUsed` values after executing
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
@ -31,7 +31,7 @@ type
|
||||||
## Diff data, txs changes that apply after changing the head\
|
## Diff data, txs changes that apply after changing the head\
|
||||||
## insertion point of the block chain
|
## insertion point of the block chain
|
||||||
|
|
||||||
addTxs*: KeyedQueue[Hash256,Transaction] ##\
|
addTxs*: KeyedQueue[Hash256, PooledTransaction] ##\
|
||||||
## txs to add; using a queue makes it more intuive to delete
|
## txs to add; using a queue makes it more intuive to delete
|
||||||
## items while travesing the queue in a loop.
|
## items while travesing the queue in a loop.
|
||||||
|
|
||||||
|
@ -50,7 +50,13 @@ proc insert(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
|
||||||
{.gcsafe,raises: [CatchableError].} =
|
{.gcsafe,raises: [CatchableError].} =
|
||||||
let db = xp.chain.com.db
|
let db = xp.chain.com.db
|
||||||
for tx in db.getBlockBody(blockHash).transactions:
|
for tx in db.getBlockBody(blockHash).transactions:
|
||||||
kq.addTxs[tx.itemID] = tx
|
if tx.versionedHashes.len > 0:
|
||||||
|
# EIP-4844 blobs are not persisted and cannot be re-broadcasted.
|
||||||
|
# Note that it is also not possible to crete a cache in all cases,
|
||||||
|
# as we may have never seen the actual blob sidecar while syncing.
|
||||||
|
# Only the consensus layer persists the blob sidecar.
|
||||||
|
continue
|
||||||
|
kq.addTxs[tx.itemID] = PooledTransaction(tx: tx)
|
||||||
|
|
||||||
proc remove(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
|
proc remove(xp: TxPoolRef; kq: TxHeadDiffRef; blockHash: Hash256)
|
||||||
{.gcsafe,raises: [CatchableError].} =
|
{.gcsafe,raises: [CatchableError].} =
|
||||||
|
|
|
@ -141,7 +141,7 @@ proc runTxCommit(pst: TxPackerStateRef; item: TxItemRef; gasBurned: GasInt)
|
||||||
pst.blobGasUsed += item.tx.getTotalBlobGas
|
pst.blobGasUsed += item.tx.getTotalBlobGas
|
||||||
|
|
||||||
# Update txRoot
|
# Update txRoot
|
||||||
pst.tr.put(rlp.encode(inx), rlp.encode(item.tx.removeNetworkPayload))
|
pst.tr.put(rlp.encode(inx), rlp.encode(item.tx))
|
||||||
|
|
||||||
# Add the item to the `packed` bucket. This implicitely increases the
|
# Add the item to the `packed` bucket. This implicitely increases the
|
||||||
# receipts index `inx` at the next visit of this function.
|
# receipts index `inx` at the next visit of this function.
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
@ -35,7 +35,7 @@ let
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc recoverItem*(xp: TxPoolRef; tx: Transaction; status = txItemPending;
|
proc recoverItem*(xp: TxPoolRef; tx: PooledTransaction; status = txItemPending;
|
||||||
info = ""; acceptExisting = false): Result[TxItemRef,TxInfo] =
|
info = ""; acceptExisting = false): Result[TxItemRef,TxInfo] =
|
||||||
## Recover item from waste basket or create new. It is an error if the item
|
## Recover item from waste basket or create new. It is an error if the item
|
||||||
## is in the buckets database, already.
|
## is in the buckets database, already.
|
||||||
|
|
|
@ -267,9 +267,6 @@ proc validateTxBasic*(
|
||||||
"index=$1, len=$2" % [$i, $acl.storageKeys.len])
|
"index=$1, len=$2" % [$i, $acl.storageKeys.len])
|
||||||
|
|
||||||
if tx.txType >= TxEip4844:
|
if tx.txType >= TxEip4844:
|
||||||
if tx.networkPayload.isNil.not:
|
|
||||||
return err("invalid tx: network payload should not appear in block validation")
|
|
||||||
|
|
||||||
if tx.to.isNone:
|
if tx.to.isNone:
|
||||||
return err("invalid tx: destination must be not empty")
|
return err("invalid tx: destination must be not empty")
|
||||||
|
|
||||||
|
|
|
@ -558,8 +558,8 @@ proc persistTransactions*(
|
||||||
for idx, tx in transactions:
|
for idx, tx in transactions:
|
||||||
let
|
let
|
||||||
encodedKey = rlp.encode(idx)
|
encodedKey = rlp.encode(idx)
|
||||||
encodedTx = rlp.encode(tx.removeNetworkPayload)
|
encodedTx = rlp.encode(tx)
|
||||||
txHash = rlpHash(tx) # beware EIP-4844
|
txHash = rlpHash(tx)
|
||||||
blockKey = transactionHashToBlockKey(txHash)
|
blockKey = transactionHashToBlockKey(txHash)
|
||||||
txKey: TransactionKey = (blockNumber, idx)
|
txKey: TransactionKey = (blockNumber, idx)
|
||||||
mpt.merge(encodedKey, encodedTx).isOkOr:
|
mpt.merge(encodedKey, encodedTx).isOkOr:
|
||||||
|
|
|
@ -1364,8 +1364,8 @@ proc sendRawTransaction(ud: RootRef, params: Args, parent: Node): RespResult {.a
|
||||||
let ctx = GraphqlContextRef(ud)
|
let ctx = GraphqlContextRef(ud)
|
||||||
try:
|
try:
|
||||||
let data = hexToSeqByte(params[0].val.stringVal)
|
let data = hexToSeqByte(params[0].val.stringVal)
|
||||||
let tx = decodeTx(data) # we want to know if it is a valid tx blob
|
let tx = decodePooledTx(data) # we want to know if it is a valid tx blob
|
||||||
let txHash = rlpHash(tx) # beware EIP-4844
|
let txHash = rlpHash(tx)
|
||||||
|
|
||||||
ctx.txPool.add(tx)
|
ctx.txPool.add(tx)
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[times, tables, typetraits],
|
std/[sequtils, times, tables, typetraits],
|
||||||
json_rpc/rpcserver, stint, stew/byteutils,
|
json_rpc/rpcserver, stint, stew/byteutils,
|
||||||
json_serialization, web3/conversions, json_serialization/std/options,
|
json_serialization, web3/conversions, json_serialization/std/options,
|
||||||
eth/common/eth_types_json_serialization,
|
eth/common/eth_types_json_serialization,
|
||||||
|
@ -282,8 +282,27 @@ proc setupEthRpc*(
|
||||||
tx = unsignedTx(data, chainDB, accDB.getNonce(address) + 1)
|
tx = unsignedTx(data, chainDB, accDB.getNonce(address) + 1)
|
||||||
eip155 = com.isEIP155(com.syncCurrent)
|
eip155 = com.isEIP155(com.syncCurrent)
|
||||||
signedTx = signTransaction(tx, acc.privateKey, com.chainId, eip155)
|
signedTx = signTransaction(tx, acc.privateKey, com.chainId, eip155)
|
||||||
|
networkPayload =
|
||||||
|
if signedTx.txType == TxEip4844:
|
||||||
|
if data.blobs.isNone or data.commitments.isNone or data.proofs.isNone:
|
||||||
|
raise newException(ValueError, "EIP-4844 transaction needs blobs")
|
||||||
|
if data.blobs.get.len != signedTx.versionedHashes.len:
|
||||||
|
raise newException(ValueError, "Incorrect number of blobs")
|
||||||
|
if data.commitments.get.len != signedTx.versionedHashes.len:
|
||||||
|
raise newException(ValueError, "Incorrect number of commitments")
|
||||||
|
if data.proofs.get.len != signedTx.versionedHashes.len:
|
||||||
|
raise newException(ValueError, "Incorrect number of proofs")
|
||||||
|
NetworkPayload(
|
||||||
|
blobs: data.blobs.get.mapIt it.NetworkBlob,
|
||||||
|
commitments: data.commitments.get.mapIt eth_types.KzgCommitment(it),
|
||||||
|
proofs: data.proofs.get.mapIt eth_types.KzgProof(it))
|
||||||
|
else:
|
||||||
|
if data.blobs.isSome or data.commitments.isSome or data.proofs.isSome:
|
||||||
|
raise newException(ValueError, "Blobs require EIP-4844 transaction")
|
||||||
|
nil
|
||||||
|
pooledTx = PooledTransaction(tx: signedTx, networkPayload: networkPayload)
|
||||||
|
|
||||||
txPool.add(signedTx)
|
txPool.add(pooledTx)
|
||||||
result = rlpHash(signedTx).w3Hash
|
result = rlpHash(signedTx).w3Hash
|
||||||
|
|
||||||
server.rpc("eth_sendRawTransaction") do(txBytes: seq[byte]) -> Web3Hash:
|
server.rpc("eth_sendRawTransaction") do(txBytes: seq[byte]) -> Web3Hash:
|
||||||
|
@ -293,10 +312,10 @@ proc setupEthRpc*(
|
||||||
## Returns the transaction hash, or the zero hash if the transaction is not yet available.
|
## Returns the transaction hash, or the zero hash if the transaction is not yet available.
|
||||||
## Note: Use eth_getTransactionReceipt to get the contract address, after the transaction was mined, when you created a contract.
|
## Note: Use eth_getTransactionReceipt to get the contract address, after the transaction was mined, when you created a contract.
|
||||||
let
|
let
|
||||||
signedTx = decodeTx(txBytes)
|
pooledTx = decodePooledTx(txBytes)
|
||||||
txHash = rlpHash(signedTx)
|
txHash = rlpHash(pooledTx)
|
||||||
|
|
||||||
txPool.add(signedTx)
|
txPool.add(pooledTx)
|
||||||
let res = txPool.inPoolAndReason(txHash)
|
let res = txPool.inPoolAndReason(txHash)
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
raise newException(ValueError, res.error)
|
raise newException(ValueError, res.error)
|
||||||
|
|
|
@ -442,14 +442,14 @@ method getReceipts*(ctx: EthWireRef,
|
||||||
|
|
||||||
method getPooledTxs*(ctx: EthWireRef,
|
method getPooledTxs*(ctx: EthWireRef,
|
||||||
hashes: openArray[Hash256]):
|
hashes: openArray[Hash256]):
|
||||||
Result[seq[Transaction], string]
|
Result[seq[PooledTransaction], string]
|
||||||
{.gcsafe.} =
|
{.gcsafe.} =
|
||||||
let txPool = ctx.txPool
|
let txPool = ctx.txPool
|
||||||
var list: seq[Transaction]
|
var list: seq[PooledTransaction]
|
||||||
for txHash in hashes:
|
for txHash in hashes:
|
||||||
let res = txPool.getItem(txHash)
|
let res = txPool.getItem(txHash)
|
||||||
if res.isOk:
|
if res.isOk:
|
||||||
list.add res.value.tx
|
list.add res.value.pooledTx
|
||||||
else:
|
else:
|
||||||
trace "handlers.getPooledTxs: tx not found", txHash
|
trace "handlers.getPooledTxs: tx not found", txHash
|
||||||
ok(list)
|
ok(list)
|
||||||
|
@ -522,7 +522,11 @@ method handleAnnouncedTxs*(ctx: EthWireRef,
|
||||||
txHashes.add rlpHash(tx)
|
txHashes.add rlpHash(tx)
|
||||||
|
|
||||||
ctx.addToKnownByPeer(txHashes, peer)
|
ctx.addToKnownByPeer(txHashes, peer)
|
||||||
ctx.txPool.add(txs)
|
for tx in txs:
|
||||||
|
if tx.versionedHashes.len > 0:
|
||||||
|
# EIP-4844 blobs are not persisted and cannot be broadcasted
|
||||||
|
continue
|
||||||
|
ctx.txPool.add PooledTransaction(tx: tx)
|
||||||
|
|
||||||
var newTxHashes = newSeqOfCap[Hash256](txHashes.len)
|
var newTxHashes = newSeqOfCap[Hash256](txHashes.len)
|
||||||
var validTxs = newSeqOfCap[Transaction](txHashes.len)
|
var validTxs = newSeqOfCap[Transaction](txHashes.len)
|
||||||
|
|
|
@ -65,7 +65,7 @@ method getReceipts*(ctx: EthWireBase,
|
||||||
|
|
||||||
method getPooledTxs*(ctx: EthWireBase,
|
method getPooledTxs*(ctx: EthWireBase,
|
||||||
hashes: openArray[Hash256]):
|
hashes: openArray[Hash256]):
|
||||||
Result[seq[Transaction], string]
|
Result[seq[PooledTransaction], string]
|
||||||
{.base, gcsafe.} =
|
{.base, gcsafe.} =
|
||||||
notImplemented("getPooledTxs")
|
notImplemented("getPooledTxs")
|
||||||
|
|
||||||
|
|
|
@ -266,7 +266,8 @@ p2pProtocol eth66(version = ethVersion,
|
||||||
await response.send(txs.get)
|
await response.send(txs.get)
|
||||||
|
|
||||||
# User message 0x0a: PooledTransactions.
|
# User message 0x0a: PooledTransactions.
|
||||||
proc pooledTransactions(peer: Peer, transactions: openArray[Transaction])
|
proc pooledTransactions(
|
||||||
|
peer: Peer, transactions: openArray[PooledTransaction])
|
||||||
|
|
||||||
nextId 0x0d
|
nextId 0x0d
|
||||||
|
|
||||||
|
|
|
@ -267,7 +267,8 @@ p2pProtocol eth67(version = ethVersion,
|
||||||
await response.send(txs.get)
|
await response.send(txs.get)
|
||||||
|
|
||||||
# User message 0x0a: PooledTransactions.
|
# User message 0x0a: PooledTransactions.
|
||||||
proc pooledTransactions(peer: Peer, transactions: openArray[Transaction])
|
proc pooledTransactions(
|
||||||
|
peer: Peer, transactions: openArray[PooledTransaction])
|
||||||
|
|
||||||
# User message 0x0d: GetNodeData -- removed, was so 66ish
|
# User message 0x0d: GetNodeData -- removed, was so 66ish
|
||||||
# User message 0x0e: NodeData -- removed, was so 66ish
|
# User message 0x0e: NodeData -- removed, was so 66ish
|
||||||
|
|
|
@ -270,7 +270,8 @@ p2pProtocol eth68(version = ethVersion,
|
||||||
await response.send(txs.get)
|
await response.send(txs.get)
|
||||||
|
|
||||||
# User message 0x0a: PooledTransactions.
|
# User message 0x0a: PooledTransactions.
|
||||||
proc pooledTransactions(peer: Peer, transactions: openArray[Transaction])
|
proc pooledTransactions(
|
||||||
|
peer: Peer, transactions: openArray[PooledTransaction])
|
||||||
|
|
||||||
# User message 0x0d: GetNodeData -- removed, was so 66ish
|
# User message 0x0d: GetNodeData -- removed, was so 66ish
|
||||||
# User message 0x0e: NodeData -- removed, was so 66ish
|
# User message 0x0e: NodeData -- removed, was so 66ish
|
||||||
|
|
|
@ -233,3 +233,9 @@ proc decodeTx*(bytes: openArray[byte]): Transaction =
|
||||||
result = rlp.read(Transaction)
|
result = rlp.read(Transaction)
|
||||||
if rlp.hasData:
|
if rlp.hasData:
|
||||||
raise newException(RlpError, "rlp: input contains more than one value")
|
raise newException(RlpError, "rlp: input contains more than one value")
|
||||||
|
|
||||||
|
proc decodePooledTx*(bytes: openArray[byte]): PooledTransaction =
|
||||||
|
var rlp = rlpFromBytes(bytes)
|
||||||
|
result = rlp.read(PooledTransaction)
|
||||||
|
if rlp.hasData:
|
||||||
|
raise newException(RlpError, "rlp: input contains more than one value")
|
||||||
|
|
|
@ -134,7 +134,12 @@ proc debug*(tx: Transaction): string =
|
||||||
result.add "accessList : " & $tx.accessList & "\n"
|
result.add "accessList : " & $tx.accessList & "\n"
|
||||||
result.add "maxFeePerBlobGas: " & $tx.maxFeePerBlobGas & "\n"
|
result.add "maxFeePerBlobGas: " & $tx.maxFeePerBlobGas & "\n"
|
||||||
result.add "versionedHashes.len: " & $tx.versionedHashes.len & "\n"
|
result.add "versionedHashes.len: " & $tx.versionedHashes.len & "\n"
|
||||||
|
result.add "V : " & $tx.V & "\n"
|
||||||
|
result.add "R : " & $tx.R & "\n"
|
||||||
|
result.add "S : " & $tx.S & "\n"
|
||||||
|
|
||||||
|
proc debug*(tx: PooledTransaction): string =
|
||||||
|
result.add debug(tx.tx)
|
||||||
if tx.networkPayload.isNil:
|
if tx.networkPayload.isNil:
|
||||||
result.add "networkPaylod : nil\n"
|
result.add "networkPaylod : nil\n"
|
||||||
else:
|
else:
|
||||||
|
@ -143,10 +148,6 @@ proc debug*(tx: Transaction): string =
|
||||||
result.add " - commitments : " & $tx.networkPayload.commitments.len & "\n"
|
result.add " - commitments : " & $tx.networkPayload.commitments.len & "\n"
|
||||||
result.add " - proofs : " & $tx.networkPayload.proofs.len & "\n"
|
result.add " - proofs : " & $tx.networkPayload.proofs.len & "\n"
|
||||||
|
|
||||||
result.add "V : " & $tx.V & "\n"
|
|
||||||
result.add "R : " & $tx.R & "\n"
|
|
||||||
result.add "S : " & $tx.S & "\n"
|
|
||||||
|
|
||||||
proc debugSum*(h: BlockHeader): string =
|
proc debugSum*(h: BlockHeader): string =
|
||||||
result.add "txRoot : " & $h.txRoot & "\n"
|
result.add "txRoot : " & $h.txRoot & "\n"
|
||||||
result.add "ommersHash : " & $h.ommersHash & "\n"
|
result.add "ommersHash : " & $h.ommersHash & "\n"
|
||||||
|
|
|
@ -28,6 +28,7 @@ proc initLib() =
|
||||||
nimGC_setStackBottom(locals)
|
nimGC_setStackBottom(locals)
|
||||||
|
|
||||||
proc runContext(ctx: ptr Context) {.thread.} =
|
proc runContext(ctx: ptr Context) {.thread.} =
|
||||||
|
const defaultListenAddress = (static parseIpAddress("0.0.0.0"))
|
||||||
let str = $ctx.configJson
|
let str = $ctx.configJson
|
||||||
try:
|
try:
|
||||||
let jsonNode = parseJson(str)
|
let jsonNode = parseJson(str)
|
||||||
|
@ -35,7 +36,7 @@ proc runContext(ctx: ptr Context) {.thread.} =
|
||||||
let rpcAddr = jsonNode["RpcAddress"].getStr()
|
let rpcAddr = jsonNode["RpcAddress"].getStr()
|
||||||
let myConfig = VerifiedProxyConf(
|
let myConfig = VerifiedProxyConf(
|
||||||
rpcAddress: parseIpAddress(rpcAddr),
|
rpcAddress: parseIpAddress(rpcAddr),
|
||||||
listenAddress: defaultListenAddress,
|
listenAddress: some(defaultListenAddress),
|
||||||
eth2Network: some(jsonNode["Eth2Network"].getStr()),
|
eth2Network: some(jsonNode["Eth2Network"].getStr()),
|
||||||
trustedBlockRoot: Eth2Digest.fromHex(jsonNode["TrustedBlockRoot"].getStr()),
|
trustedBlockRoot: Eth2Digest.fromHex(jsonNode["TrustedBlockRoot"].getStr()),
|
||||||
web3Url: parseCmdArg(Web3Url, jsonNode["Web3Url"].getStr()),
|
web3Url: parseCmdArg(Web3Url, jsonNode["Web3Url"].getStr()),
|
||||||
|
|
|
@ -108,10 +108,8 @@ type VerifiedProxyConf* = object # Config
|
||||||
|
|
||||||
listenAddress* {.
|
listenAddress* {.
|
||||||
desc: "Listening address for the Ethereum LibP2P and Discovery v5 traffic",
|
desc: "Listening address for the Ethereum LibP2P and Discovery v5 traffic",
|
||||||
defaultValue: defaultListenAddress,
|
|
||||||
defaultValueDesc: $defaultListenAddressDesc,
|
|
||||||
name: "listen-address"
|
name: "listen-address"
|
||||||
.}: IpAddress
|
.}: Option[IpAddress]
|
||||||
|
|
||||||
tcpPort* {.
|
tcpPort* {.
|
||||||
desc: "Listening TCP port for Ethereum LibP2P traffic",
|
desc: "Listening TCP port for Ethereum LibP2P traffic",
|
||||||
|
|
|
@ -34,9 +34,7 @@ type ExecutionData* = object
|
||||||
transactions*: seq[TypedTransaction]
|
transactions*: seq[TypedTransaction]
|
||||||
withdrawals*: seq[WithdrawalV1]
|
withdrawals*: seq[WithdrawalV1]
|
||||||
|
|
||||||
proc asExecutionData*(
|
proc asExecutionData*(payload: SomeExecutionPayload): ExecutionData =
|
||||||
payload: ExecutionPayloadV1 | ExecutionPayloadV2 | ExecutionPayloadV3
|
|
||||||
): ExecutionData =
|
|
||||||
when payload is ExecutionPayloadV1:
|
when payload is ExecutionPayloadV1:
|
||||||
return ExecutionData(
|
return ExecutionData(
|
||||||
parentHash: payload.parentHash,
|
parentHash: payload.parentHash,
|
||||||
|
|
|
@ -100,7 +100,6 @@ func pp*(t: Transaction; sep = " "): string =
|
||||||
&"accessList=[#{t.accessList.len}]{sep}" &
|
&"accessList=[#{t.accessList.len}]{sep}" &
|
||||||
&"maxFeePerBlobGas={t.maxFeePerBlobGas}{sep}" &
|
&"maxFeePerBlobGas={t.maxFeePerBlobGas}{sep}" &
|
||||||
&"versionedHashes=[#{t.versionedHashes.len}]{sep}" &
|
&"versionedHashes=[#{t.versionedHashes.len}]{sep}" &
|
||||||
&"networkPayload={t.networkPayload.pp}{sep}" &
|
|
||||||
&"V={t.V}{sep}" &
|
&"V={t.V}{sep}" &
|
||||||
&"R={t.R}{sep}" &
|
&"R={t.R}{sep}" &
|
||||||
&"S={t.S}{sep}"
|
&"S={t.S}{sep}"
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2023 Status Research & Development GmbH
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||||
# Licensed under either of
|
# Licensed under either of
|
||||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||||
|
@ -16,11 +16,9 @@ import
|
||||||
|
|
||||||
const
|
const
|
||||||
recipient = hexToByteArray[20]("095e7baea6a6c7c4c2dfeb977efac326af552d87")
|
recipient = hexToByteArray[20]("095e7baea6a6c7c4c2dfeb977efac326af552d87")
|
||||||
zeroG1 = hexToByteArray[48]("0xc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
|
|
||||||
source = hexToByteArray[20]("0x0000000000000000000000000000000000000001")
|
source = hexToByteArray[20]("0x0000000000000000000000000000000000000001")
|
||||||
storageKey= default(StorageKey)
|
storageKey= default(StorageKey)
|
||||||
accesses = @[AccessPair(address: source, storageKeys: @[storageKey])]
|
accesses = @[AccessPair(address: source, storageKeys: @[storageKey])]
|
||||||
blob = default(NetworkBlob)
|
|
||||||
abcdef = hexToSeqByte("abcdef")
|
abcdef = hexToSeqByte("abcdef")
|
||||||
hexKey = "af1a9be9f1a54421cac82943820a0fe0f601bb5f4f6d0bccc81c613f0ce6ae22"
|
hexKey = "af1a9be9f1a54421cac82943820a0fe0f601bb5f4f6d0bccc81c613f0ce6ae22"
|
||||||
senderTop = hexToByteArray[20]("73cf19657412508833f618a15e8251306b3e6ee5")
|
senderTop = hexToByteArray[20]("73cf19657412508833f618a15e8251306b3e6ee5")
|
||||||
|
@ -98,12 +96,7 @@ proc tx6(i: int): Transaction =
|
||||||
maxPriorityFee: 42.GasInt,
|
maxPriorityFee: 42.GasInt,
|
||||||
maxFee: 10.GasInt,
|
maxFee: 10.GasInt,
|
||||||
accessList: accesses,
|
accessList: accesses,
|
||||||
versionedHashes: @[digest],
|
versionedHashes: @[digest]
|
||||||
networkPayload: NetworkPayload(
|
|
||||||
commitments: @[zeroG1],
|
|
||||||
blobs: @[blob],
|
|
||||||
proofs: @[zeroG1],
|
|
||||||
)
|
|
||||||
)
|
)
|
||||||
|
|
||||||
proc tx7(i: int): Transaction =
|
proc tx7(i: int): Transaction =
|
||||||
|
|
|
@ -287,7 +287,7 @@ proc runTxPoolTests(noisy = true) =
|
||||||
|
|
||||||
# insert some txs
|
# insert some txs
|
||||||
for triple in testTxs:
|
for triple in testTxs:
|
||||||
xq.add(triple[1], triple[0].info)
|
xq.add(PooledTransaction(tx: triple[1]), triple[0].info)
|
||||||
|
|
||||||
check xq.nItems.total == testTxs.len
|
check xq.nItems.total == testTxs.len
|
||||||
check xq.nItems.disposed == 0
|
check xq.nItems.disposed == 0
|
||||||
|
@ -296,7 +296,7 @@ proc runTxPoolTests(noisy = true) =
|
||||||
|
|
||||||
# re-insert modified transactions
|
# re-insert modified transactions
|
||||||
for triple in testTxs:
|
for triple in testTxs:
|
||||||
xq.add(triple[2], "alt " & triple[0].info)
|
xq.add(PooledTransaction(tx: triple[2]), "alt " & triple[0].info)
|
||||||
|
|
||||||
check xq.nItems.total == testTxs.len
|
check xq.nItems.total == testTxs.len
|
||||||
check xq.nItems.disposed == testTxs.len
|
check xq.nItems.disposed == testTxs.len
|
||||||
|
@ -505,7 +505,7 @@ proc runTxPoolTests(noisy = true) =
|
||||||
check txList.len == xq.nItems.total + xq.nItems.disposed
|
check txList.len == xq.nItems.total + xq.nItems.disposed
|
||||||
|
|
||||||
# re-add item
|
# re-add item
|
||||||
xq.add(thisItem.tx)
|
xq.add(thisItem.pooledTx)
|
||||||
|
|
||||||
# verify that the pivot item was moved out from the waste basket
|
# verify that the pivot item was moved out from the waste basket
|
||||||
check not xq.txDB.byRejects.hasKey(thisItem.itemID)
|
check not xq.txDB.byRejects.hasKey(thisItem.itemID)
|
||||||
|
@ -793,7 +793,7 @@ proc runTxPackerTests(noisy = true) =
|
||||||
check false
|
check false
|
||||||
return
|
return
|
||||||
|
|
||||||
let blk = r.get
|
let blk = r.get.blk
|
||||||
# Make sure that there are at least two txs on the packed block so
|
# Make sure that there are at least two txs on the packed block so
|
||||||
# this test does not degenerate.
|
# this test does not degenerate.
|
||||||
check 1 < xq.chain.receipts.len
|
check 1 < xq.chain.receipts.len
|
||||||
|
|
|
@ -103,7 +103,7 @@ proc toTxPool*(
|
||||||
status = statusInfo[getStatus()]
|
status = statusInfo[getStatus()]
|
||||||
info = &"{txCount} #{num}({chainNo}) {n}/{txs.len} {status}"
|
info = &"{txCount} #{num}({chainNo}) {n}/{txs.len} {status}"
|
||||||
noisy.showElapsed(&"insert: {info}"):
|
noisy.showElapsed(&"insert: {info}"):
|
||||||
result[0].add(txs[n], info)
|
result[0].add(PooledTransaction(tx: txs[n]), info)
|
||||||
|
|
||||||
if loadTxs <= txCount:
|
if loadTxs <= txCount:
|
||||||
break
|
break
|
||||||
|
@ -132,11 +132,11 @@ proc toTxPool*(
|
||||||
noisy.showElapsed(&"Loading {itList.len} transactions"):
|
noisy.showElapsed(&"Loading {itList.len} transactions"):
|
||||||
for item in itList:
|
for item in itList:
|
||||||
if noLocals:
|
if noLocals:
|
||||||
result.add(item.tx, item.info)
|
result.add(item.pooledTx, item.info)
|
||||||
elif localAddr.hasKey(item.sender):
|
elif localAddr.hasKey(item.sender):
|
||||||
doAssert result.addLocal(item.tx, true).isOk
|
doAssert result.addLocal(item.pooledTx, true).isOk
|
||||||
else:
|
else:
|
||||||
doAssert result.addRemote(item.tx, true).isOk
|
doAssert result.addRemote(item.pooledTx, true).isOk
|
||||||
doAssert result.nItems.total == itList.len
|
doAssert result.nItems.total == itList.len
|
||||||
|
|
||||||
|
|
||||||
|
@ -174,11 +174,11 @@ proc toTxPool*(
|
||||||
for n in 0 ..< itList.len:
|
for n in 0 ..< itList.len:
|
||||||
let item = itList[n]
|
let item = itList[n]
|
||||||
if noLocals:
|
if noLocals:
|
||||||
result.add(item.tx, item.info)
|
result.add(item.pooledTx, item.info)
|
||||||
elif localAddr.hasKey(item.sender):
|
elif localAddr.hasKey(item.sender):
|
||||||
doAssert result.addLocal(item.tx, true).isOk
|
doAssert result.addLocal(item.pooledTx, true).isOk
|
||||||
else:
|
else:
|
||||||
doAssert result.addRemote(item.tx, true).isOk
|
doAssert result.addRemote(item.pooledTx, true).isOk
|
||||||
if n < 3 or delayAt-3 <= n and n <= delayAt+3 or itList.len-4 < n:
|
if n < 3 or delayAt-3 <= n and n <= delayAt+3 or itList.len-4 < n:
|
||||||
let t = result.getItem(item.itemID).value.timeStamp.format(tFmt, utc())
|
let t = result.getItem(item.itemID).value.timeStamp.format(tFmt, utc())
|
||||||
noisy.say &"added item {n} time={t}"
|
noisy.say &"added item {n} time={t}"
|
||||||
|
|
|
@ -156,7 +156,7 @@ proc runTxPoolCliqueTest*() =
|
||||||
|
|
||||||
suite "Test TxPool with Clique sealer":
|
suite "Test TxPool with Clique sealer":
|
||||||
test "TxPool addLocal":
|
test "TxPool addLocal":
|
||||||
let res = xp.addLocal(tx, force = true)
|
let res = xp.addLocal(PooledTransaction(tx: tx), force = true)
|
||||||
check res.isOk
|
check res.isOk
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
debugEcho res.error
|
debugEcho res.error
|
||||||
|
@ -172,7 +172,7 @@ proc runTxPoolCliqueTest*() =
|
||||||
check false
|
check false
|
||||||
return
|
return
|
||||||
|
|
||||||
blk = res.get
|
blk = res.get.blk
|
||||||
body = BlockBody(
|
body = BlockBody(
|
||||||
transactions: blk.txs,
|
transactions: blk.txs,
|
||||||
uncles: blk.uncles
|
uncles: blk.uncles
|
||||||
|
@ -201,7 +201,7 @@ proc runTxPoolCliqueTest*() =
|
||||||
check xp.smartHead(blk.header)
|
check xp.smartHead(blk.header)
|
||||||
|
|
||||||
let tx = env.makeTx(recipient, amount)
|
let tx = env.makeTx(recipient, amount)
|
||||||
let res = xp.addLocal(tx, force = true)
|
let res = xp.addLocal(PooledTransaction(tx: tx), force = true)
|
||||||
check res.isOk
|
check res.isOk
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
debugEcho res.error
|
debugEcho res.error
|
||||||
|
@ -214,7 +214,7 @@ proc runTxPoolCliqueTest*() =
|
||||||
check false
|
check false
|
||||||
return
|
return
|
||||||
|
|
||||||
blk = r.get
|
blk = r.get.blk
|
||||||
body = BlockBody(
|
body = BlockBody(
|
||||||
transactions: blk.txs,
|
transactions: blk.txs,
|
||||||
uncles: blk.uncles
|
uncles: blk.uncles
|
||||||
|
@ -249,7 +249,7 @@ proc runTxPoolPosTest*() =
|
||||||
|
|
||||||
suite "Test TxPool with PoS block":
|
suite "Test TxPool with PoS block":
|
||||||
test "TxPool addLocal":
|
test "TxPool addLocal":
|
||||||
let res = xp.addLocal(tx, force = true)
|
let res = xp.addLocal(PooledTransaction(tx: tx), force = true)
|
||||||
check res.isOk
|
check res.isOk
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
debugEcho res.error
|
debugEcho res.error
|
||||||
|
@ -269,7 +269,7 @@ proc runTxPoolPosTest*() =
|
||||||
check false
|
check false
|
||||||
return
|
return
|
||||||
|
|
||||||
blk = r.get
|
blk = r.get.blk
|
||||||
check com.isBlockAfterTtd(blk.header)
|
check com.isBlockAfterTtd(blk.header)
|
||||||
|
|
||||||
body = BlockBody(
|
body = BlockBody(
|
||||||
|
@ -310,12 +310,12 @@ proc runTxPoolBlobhashTest*() =
|
||||||
|
|
||||||
suite "Test TxPool with blobhash block":
|
suite "Test TxPool with blobhash block":
|
||||||
test "TxPool addLocal":
|
test "TxPool addLocal":
|
||||||
let res = xp.addLocal(tx1, force = true)
|
let res = xp.addLocal(PooledTransaction(tx: tx1), force = true)
|
||||||
check res.isOk
|
check res.isOk
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
debugEcho res.error
|
debugEcho res.error
|
||||||
return
|
return
|
||||||
let res2 = xp.addLocal(tx2, force = true)
|
let res2 = xp.addLocal(PooledTransaction(tx: tx2), force = true)
|
||||||
check res2.isOk
|
check res2.isOk
|
||||||
|
|
||||||
test "TxPool jobCommit":
|
test "TxPool jobCommit":
|
||||||
|
@ -332,7 +332,7 @@ proc runTxPoolBlobhashTest*() =
|
||||||
check false
|
check false
|
||||||
return
|
return
|
||||||
|
|
||||||
blk = r.get
|
blk = r.get.blk
|
||||||
check com.isBlockAfterTtd(blk.header)
|
check com.isBlockAfterTtd(blk.header)
|
||||||
|
|
||||||
body = BlockBody(
|
body = BlockBody(
|
||||||
|
@ -366,7 +366,7 @@ proc runTxPoolBlobhashTest*() =
|
||||||
xp = env.xp
|
xp = env.xp
|
||||||
|
|
||||||
check xp.smartHead(blk.header)
|
check xp.smartHead(blk.header)
|
||||||
let res = xp.addLocal(tx4, force = true)
|
let res = xp.addLocal(PooledTransaction(tx: tx4), force = true)
|
||||||
check res.isOk
|
check res.isOk
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
debugEcho res.error
|
debugEcho res.error
|
||||||
|
@ -400,7 +400,7 @@ proc runTxHeadDelta*(noisy = true) =
|
||||||
let tx = env.makeTx(recipient, amount)
|
let tx = env.makeTx(recipient, amount)
|
||||||
# Instead of `add()`, the functions `addRemote()` or `addLocal()`
|
# Instead of `add()`, the functions `addRemote()` or `addLocal()`
|
||||||
# also would do.
|
# also would do.
|
||||||
xp.add(tx)
|
xp.add(PooledTransaction(tx: tx))
|
||||||
|
|
||||||
noisy.say "***", "txDB",
|
noisy.say "***", "txDB",
|
||||||
&" n={n}",
|
&" n={n}",
|
||||||
|
@ -418,7 +418,7 @@ proc runTxHeadDelta*(noisy = true) =
|
||||||
check false
|
check false
|
||||||
return
|
return
|
||||||
|
|
||||||
let blk = r.get
|
let blk = r.get.blk
|
||||||
check com.isBlockAfterTtd(blk.header)
|
check com.isBlockAfterTtd(blk.header)
|
||||||
|
|
||||||
let body = BlockBody(
|
let body = BlockBody(
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit d8209f623f837d14c43a9e3fd464b0e199c5d180
|
Subproject commit c482b4c5b658a77cc96b49d4a397aa6d98472ac7
|
|
@ -1 +1 @@
|
||||||
Subproject commit 7faa0fac238c2209e7bbbb4755af8f8a8c3834ad
|
Subproject commit 21cbe3a91a70811522554e89e6a791172cebfef2
|
|
@ -1 +1 @@
|
||||||
Subproject commit fc9bc1da3ae7dde04f4591eba302f9e8b20c3924
|
Subproject commit 87605d08a7f9cfc3b223bd32143e93a6cdf351ac
|
Loading…
Reference in New Issue