diff --git a/hive_integration/nodocker/engine/base_spec.nim b/hive_integration/nodocker/engine/base_spec.nim new file mode 100644 index 000000000..be502a018 --- /dev/null +++ b/hive_integration/nodocker/engine/base_spec.nim @@ -0,0 +1,70 @@ +import + std/[options], + eth/common, + ./clmock, + ./types, + ../../../tools/common/helpers, + ../../../nimbus/common/chain_config + +func getBlockTimeIncrements*(s: BaseSpec): int = + if s.blockTimestampIncrement == 0: + return 1 + return s.blockTimestampIncrement + +proc configureCLMock*(s: BaseSpec, cl: CLMocker) = + if s.slotsToSafe != 0: + cl.slotsToSafe = s.slotsToSafe + + if s.slotsToFinalized != 0: + cl.slotsToFinalized = s.slotsToFinalized + + if s.safeSlotsToImportOptimistically != 0: + cl.safeSlotsToImportOptimistically = s.safeSlotsToImportOptimistically + + cl.blockTimestampIncrement = some(s.getBlockTimeIncrements()) + +func getMainFork*(s: BaseSpec): string = + let mainFork = s.mainFork + if mainFork == "": + return ForkParis + return mainFork + +func getGenesisTimestamp*(s: BaseSpec): uint64 = + var genesisTimestamp = GenesisTimestamp + if s.genesisTimestamp != 0: + genesisTimestamp = s.genesisTimestamp + return genesisTimestamp.uint64 + +func getBlockTime*(s: BaseSpec, blockNumber: uint64): uint64 = + return s.getGenesisTimestamp() + blockNumber*s.getBlockTimeIncrements().uint64 + +func getForkTime*(s: BaseSpec): uint64 = + var forkTime = s.forkTime + if s.forkHeight > 0: + forkTime = s.getBlockTime(s.forkHeight.uint64) + return forkTime + +func getForkConfig*(s: BaseSpec): ChainConfig = + let + forkTime = s.getForkTime() + previousForkTime = s.previousForkTime + mainFork = s.getMainFork() + forkConfig = getChainConfig(mainFork) + genesisTimestamp = s.getGenesisTimestamp() + + doAssert(previousForkTime <= forkTime, + "previous fork time cannot be greater than fork time") + + if mainFork == ForkParis: + let cond = forkTime > genesisTimestamp or previousForkTime != 0 + doAssert(not cond, "Cannot configure a fork before Paris, skip test") + elif mainFork == ForkShanghai: + doAssert(previousForkTime == 0, "Cannot configure a fork before Shanghai") + forkConfig.shanghaiTime = some(forkTime.EthTime) + elif mainFork == ForkCancun: + forkConfig.shanghaiTime = some(previousForkTime.EthTime) + forkConfig.cancunTime = some(forkTime.EthTime) + else: + doAssert(false, "unknown fork: " & mainFork) + + return forkConfig diff --git a/hive_integration/nodocker/engine/cancun/blobs.nim b/hive_integration/nodocker/engine/cancun/blobs.nim new file mode 100644 index 000000000..1e59b962c --- /dev/null +++ b/hive_integration/nodocker/engine/cancun/blobs.nim @@ -0,0 +1,144 @@ +import + eth/common/eth_types, + stint, + kzg4844/kzg_ex as kzg, + stew/endians2, + nimcrypto/sha2, + stew/results, + ../../../nimbus/core/eip4844 + +type + BlobID* = uint64 + BlobIDs* = seq[BlobID] + + BlobCommitment* = object + blob*: kzg.KzgBlob + commitment*: kzg.KZGCommitment + + BlobTxWrapData* = object + hashes*: seq[Hash256] + blobs*: seq[kzg.KzgBlob] + commitments*: seq[kzg.KZGCommitment] + proofs*: seq[kzg.KzgProof] + +func getBlobList*(startId: BlobID, count: int): BlobIDs = + result = newSeq[BlobID](count) + for i in 0.. startIndex: + count = uint64(endIndex - startIndex + 1) + else: + count = uint64(startIndex - endIndex + 1) + + result = newSeq[BlobID](count) + if endIndex > startIndex: + for i in 0..= BLS_MODULUS[i]: + if BLS_MODULUS[i] > 0: + # This chunk is greater than the modulus, and we can reduce it in this byte position + expectedFieldElem[blobByteIdx] = BLS_MODULUS[i] - 1 + # done with this field element + break + else: + # This chunk is greater than the modulus, but we can't reduce it in this byte position, so we will try in the next byte position + expectedFieldElem[blobByteIdx] = BLS_MODULUS[i] + + if not equalMem(blob[chunkIdx*32].unsafeaddr, expectedFieldElem[0].addr, 32): + return false + + # Hash the current hash + currentHashed = sha256.digest(currentHashed.data) + + return true + +proc fillBlob(blobid: BlobID): KzgBlob = + if blobId == 0: + # Blob zero is empty blob, so leave as is + return + + # Fill the blob with deterministic data + let blobIdBytes = toBytesBE blobId + + # First 32 bytes are the hash of the blob ID + var currentHashed = sha256.digest(blobIdBytes) + + for chunkIdx in 0..= BLS_MODULUS[i]: + if BLS_MODULUS[i] > 0: + # This chunk is greater than the modulus, and we can reduce it in this byte position + result[blobByteIdx] = BLS_MODULUS[i] - 1 + # go to next chunk + break + else: + # This chunk is greater than the modulus, but we can't reduce it in this byte position, so we will try in the next byte position + result[blobByteIdx] = BLS_MODULUS[i] + + # Hash the current hash + currentHashed = sha256.digest(currentHashed.data) + +proc generateBlob(blobid: BlobID): BlobCommitment = + result.blob = blobId.fillBlob() + let res = blobToKzgCommitment(result.blob) + if res.isErr: + doAssert(false, res.error) + result.commitment = res.get + +proc getVersionedHash*(blobid: BlobID, commitmentVersion: byte): Hash256 = + let res = blobId.generateBlob() + result = sha256.digest(res.commitment) + result.data[0] = commitmentVersion + +proc blobDataGenerator*(startBlobId: BlobID, blobCount: int): BlobTxWrapData = + result.blobs = newSeq[kzg.KzgBlob](blobCount) + result.commitments = newSeq[kzg.KZGCommitment](blobCount) + result.hashes = newSeq[Hash256](blobCount) + result.proofs = newSeq[kzg.KzgProof](blobCount) + + for i in 0.. i: + version = v.hashVersions[i] + result[i] = blobID.getVersionedHash(version) + +proc description*(v: VersionedHashRef): string = + result = "VersionedHashes: " + for x in v.blobs: + result.add x.toHex + + if v.hashVersions.len > 0: + result.add " with versions " + result.add v.hashVersions.toHex + +type + VersionedHashesCustomizer* = ref object of RootRef + IncreaseVersionVersionedHashes* = ref object of VersionedHashesCustomizer + +method getVersionedHashes*(cust: VersionedHashesCustomizer, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] {.base.} = + doAssert(false, "getVersionedHashes unimplemented") + +method getVersionedHashes(cust: IncreaseVersionVersionedHashes, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] = + doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification") + + result = newSeq[common.Hash256](baseVersionedHashes.len) + for i, h in baseVersionedHashes: + result[i] = h + result[i].data[0] = result[i].data[0] + 1 + +type + CorruptVersionedHashes* = ref object of VersionedHashesCustomizer + +method getVersionedHashes(cust: CorruptVersionedHashes, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] = + doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification") + + result = newSeq[common.Hash256](baseVersionedHashes.len) + for i, h in baseVersionedHashes: + result[i] = h + result[i].data[h.data.len-1] = result[i].data[h.data.len-1] + 1 + +type + RemoveVersionedHash* = ref object of VersionedHashesCustomizer + +method getVersionedHashes(cust: RemoveVersionedHash, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] = + doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification") + + result = newSeq[common.Hash256](baseVersionedHashes.len - 1) + for i, h in baseVersionedHashes: + if i < baseVersionedHashes.len-1: + result[i] = h + result[i].data[h.data.len-1] = result[i].data[h.data.len-1] + 1 + +type + ExtraVersionedHash* = ref object of VersionedHashesCustomizer + +method getVersionedHashes(cust: ExtraVersionedHash, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] = + result = newSeq[common.Hash256](baseVersionedHashes.len + 1) + for i, h in baseVersionedHashes: + result[i] = h + + var extraHash: common.Hash256 + doAssert randomBytes(extraHash.data) == 32 + extraHash.data[0] = VERSIONED_HASH_VERSION_KZG + result[^1] = extraHash + + +type + PayloadCustomizer* = ref object of EngineAPIVersionResolver + + ExecutableData* = object + basePayload*: ExecutionPayload + beaconRoot* : Option[common.Hash256] + attr* : PayloadAttributes + versionedHashes*: seq[common.Hash256] + +method customizePayload(cust: PayloadCustomizer, data: ExecutableData): ExecutableData {.base.} = + doAssert(false, "customizePayload unimplemented") + +method getTimestamp(cust: PayloadCustomizer, basePayload: ExecutionPayload): uint64 {.base.} = + doAssert(false, "getTimestamp unimplemented") + +type + NewPayloadCustomizer* = ref object of PayloadCustomizer + +method getExpectedError(cust: NewPayloadCustomizer): int {.base.} = + doAssert(false, "getExpectedError unimplemented") + +method getExpectInvalidStatus(cust: NewPayloadCustomizer): bool {.base.} = + doAssert(false, "getExpectInvalidStatus unimplemented") + +type + CustomPayloadData = object + parentHash* : Option[common.Hash256] + feeRecipient* : Option[common.EthAddress] + stateRoot* : Option[common.Hash256] + receiptsRoot* : Option[common.Hash256] + logsBloom* : Option[BloomFilter] + prevRandao* : Option[common.Hash256] + number* : Option[uint64] + gasLimit* : Option[GasInt] + gasUsed* : Option[GasInt] + timestamp* : Option[uint64] + extraData* : Option[common.Blob] + baseFeePerGas* : Option[UInt256] + blockHash* : Option[common.Hash256] + transactions* : Option[seq[Transaction]] + withdrawals* : Option[seq[Withdrawal]] + removeWithdrawals* : bool + blobGasUsed* : Option[uint64] + removeBlobGasUsed* : bool + excessBlobGas* : Option[uint64] + removeExcessBlobGas* : bool + parentBeaconRoot* : Option[common.Hash256] + removeParentBeaconRoot* : bool + versionedHashesCustomizer*: VersionedHashesCustomizer + +func getTimestamp*(cust: CustomPayloadData, basePayload: ExecutionPayload): uint64 = + if cust.timestamp.isSome: + return cust.timestamp.get + return basePayload.timestamp.uint64 + +# Construct a customized payload by taking an existing payload as base and mixing it CustomPayloadData +# blockHash is calculated automatically. +proc customizePayload*(cust: CustomPayloadData, data: ExecutableData): ExecutableData = + var customHeader = blockHeader(data.basePayload, data.beaconRoot) + + if cust.transactions.isSome: + customHeader.txRoot = calcTxRoot(cust.transactions.get) + + # Overwrite custom information + if cust.parentHash.isSome: + customHeader.parentHash = cust.parentHash.get + + if cust.feeRecipient.isSome: + customHeader.coinbase = cust.feeRecipient.get + + if cust.stateRoot.isSome: + customHeader.stateRoot = cust.stateRoot.get + + if cust.receiptsRoot.isSome: + customHeader.receiptRoot = cust.receiptsRoot.get + + if cust.logsBloom.isSome: + customHeader.bloom = cust.logsBloom.get + + if cust.prevRandao.isSome: + customHeader.mixDigest = cust.prevRandao.get + + if cust.number.isSome: + customHeader.blockNumber = cust.number.get.u256 + + if cust.gasLimit.isSome: + customHeader.gasLimit = cust.gasLimit.get + + if cust.gasUsed.isSome: + customHeader.gasUsed = cust.gasUsed.get + + if cust.timestamp.isSome: + customHeader.timestamp = cust.timestamp.get.EthTime + + if cust.extraData.isSome: + customHeader.extraData = cust.extraData.get + + if cust.baseFeePerGas.isSome: + customHeader.fee = cust.baseFeePerGas + + if cust.removeWithdrawals: + customHeader.withdrawalsRoot = none(common.Hash256) + elif cust.withdrawals.isSome: + let h = calcWithdrawalsRoot(cust.withdrawals.get) + customHeader.withdrawalsRoot = some(h) + + if cust.removeBlobGasUsed: + customHeader.blobGasUsed = none(uint64) + elif cust.blobGasUsed.isSome: + customHeader.blobGasUsed = cust.blobGasUsed + + if cust.removeExcessBlobGas: + customHeader.excessBlobGas = none(uint64) + elif cust.excessBlobGas.isSome: + customHeader.excessBlobGas = cust.excessBlobGas + + if cust.removeParentBeaconRoot: + customHeader.parentBeaconBlockRoot = none(common.Hash256) + elif cust.parentBeaconRoot.isSome: + customHeader.parentBeaconBlockRoot = cust.parentBeaconRoot + + var blk = EthBlock( + header: customHeader, + ) + + if cust.removeWithdrawals: + blk.withdrawals = none(seq[Withdrawal]) + elif cust.withdrawals.isSome: + blk.withdrawals = cust.withdrawals + elif data.basePayload.withdrawals.isSome: + blk.withdrawals = ethWithdrawals data.basePayload.withdrawals + + result = ExecutableData( + basePayload : executionPayload(blk), + beaconRoot : blk.header.parentBeaconBlockRoot, + attr : data.attr, + versionedHashes: data.versionedHashes, + ) + + if cust.versionedHashesCustomizer.isNil.not: + result.versionedHashes = cust.versionedHashesCustomizer.getVersionedHashes(data.versionedHashes) + + +# Base new payload directive call cust. +# Used as base to other customizers. +type + BaseNewPayloadVersionCustomizer* = ref object of NewPayloadCustomizer + payloadCustomizer* : CustomPayloadData + expectedError* : int + expectInvalidStatus*: bool + +method customizePayload(cust: BaseNewPayloadVersionCustomizer, data: ExecutableData): ExecutableData = + cust.payloadCustomizer.customizePayload(data) + +method getExpectedError(cust: BaseNewPayloadVersionCustomizer): int = + cust.expectedError + +method getExpectInvalidStatus(cust: BaseNewPayloadVersionCustomizer): bool = + cust.expectInvalidStatus + +# Customizer that upgrades the version of the payload to the next version. +type + UpgradeNewPayloadVersion* = ref object of NewPayloadCustomizer + +method newPayloadVersion(cust: UpgradeNewPayloadVersion, timestamp: uint64): Version = + let version = procCall newPayloadVersion(EngineAPIVersionResolver(cust), timestamp) + doAssert(version != Version.high, "cannot upgrade version " & $Version.high) + version.succ + +# Customizer that downgrades the version of the payload to the previous version. +type + DowngradeNewPayloadVersion* = ref object of NewPayloadCustomizer + +method newPayloadVersion(cust: DowngradeNewPayloadVersion, timestamp: uint64): Version = + let version = procCall newPayloadVersion(EngineAPIVersionResolver(cust), timestamp) + doAssert(version != Version.V1, "cannot downgrade version 1") + version.pred + +proc customizePayloadTransactions*(data: ExecutableData, customTransactions: openArray[Transaction]): ExecutableData = + let cpd = CustomPayloadData( + transactions: some(@customTransactions), + ) + customizePayload(cpd, data) + +proc `$`*(cust: CustomPayloadData): string = + var fieldList = newSeq[string]() + + if cust.parentHash.isSome: + fieldList.add "parentHash=" & cust.parentHash.get.short + + if cust.feeRecipient.isSome: + fieldList.add "Coinbase=" & $cust.feeRecipient.get + + if cust.stateRoot.isSome: + fieldList.add "stateRoot=" & cust.stateRoot.get.short + + if cust.receiptsRoot.isSome: + fieldList.add "receiptsRoot=" & cust.receiptsRoot.get.short + + if cust.logsBloom.isSome: + fieldList.add "logsBloom=" & cust.logsBloom.get.toHex + + if cust.prevRandao.isSome: + fieldList.add "prevRandao=" & cust.prevRandao.get.short + + if cust.number.isSome: + fieldList.add "Number=" & $cust.number.get + + if cust.gasLimit.isSome: + fieldList.add "gasLimit=" & $cust.gasLimit.get + + if cust.gasUsed.isSome: + fieldList.add "gasUsed=" & $cust.gasUsed.get + + if cust.timestamp.isSome: + fieldList.add "timestamp=" & $cust.timestamp.get + + if cust.extraData.isSome: + fieldList.add "extraData=" & cust.extraData.get.toHex + + if cust.baseFeePerGas.isSome: + fieldList.add "baseFeePerGas=" & $cust.baseFeePerGas.get + + if cust.transactions.isSome: + fieldList.add "transactions=" & $cust.transactions.get.len + + if cust.withdrawals.isSome: + fieldList.add "withdrawals=" & $cust.withdrawals.get.len + + fieldList.join(", ") + +type + InvalidPayloadBlockField* = enum + InvalidParentHash + InvalidStateRoot + InvalidReceiptsRoot + InvalidNumber + InvalidGasLimit + InvalidGasUsed + InvalidTimestamp + InvalidPrevRandao + RemoveTransaction + InvalidTransactionSignature + InvalidTransactionNonce + InvalidTransactionGas + InvalidTransactionGasPrice + InvalidTransactionValue + InvalidTransactionGasTipPrice + InvalidTransactionChainID + InvalidParentBeaconBlockRoot + InvalidExcessBlobGas + InvalidBlobGasUsed + InvalidBlobCountGasUsed + InvalidVersionedHashesVersion + InvalidVersionedHashes + IncompleteVersionedHashes + ExtraVersionedHashes + InvalidWithdrawals + +func scramble(data: Web3Hash): Option[common.Hash256] = + var h = ethHash data + h.data[^1] = byte(255 - h.data[^1]) + some(h) + +func scramble(data: common.Hash256): Option[common.Hash256] = + var h = data + h.data[^1] = byte(255 - h.data[^1]) + some(h) + +# This function generates an invalid payload by taking a base payload and modifying the specified field such that it ends up being invalid. +# One small consideration is that the payload needs to contain transactions and specially transactions using the PREVRANDAO opcode for all the fields to be compatible with this function. +proc generateInvalidPayload*(sender: TxSender, data: ExecutableData, payloadField: InvalidPayloadBlockField): ExecutableData = + var customPayloadMod: CustomPayloadData + let basePayload = data.basePayload + + case payloadField + of InvalidParentHash: + customPayloadMod = CustomPayloadData( + parentHash: scramble(basePayload.parentHash), + ) + of InvalidStateRoot: + customPayloadMod = CustomPayloadData( + stateRoot: scramble(basePayload.stateRoot), + ) + of InvalidReceiptsRoot: + customPayloadMod = CustomPayloadData( + receiptsRoot: scramble(basePayload.receiptsRoot), + ) + of InvalidNumber: + let modNumber = basePayload.blockNumber.uint64 - 1 + customPayloadMod = CustomPayloadData( + number: some(modNumber), + ) + of InvalidGasLimit: + let modGasLimit = basePayload.gasLimit.GasInt * 2 + customPayloadMod = CustomPayloadData( + gasLimit: some(modGasLimit), + ) + of InvalidGasUsed: + let modGasUsed = basePayload.gasUsed.GasInt - 1 + customPayloadMod = CustomPayloadData( + gasUsed: some(modGasUsed), + ) + of InvalidTimestamp: + let modTimestamp = basePayload.timestamp.uint64 - 1 + customPayloadMod = CustomPayloadData( + timestamp: some(modTimestamp), + ) + of InvalidPrevRandao: + # This option potentially requires a transaction that uses the PREVRANDAO opcode. + # Otherwise the payload will still be valid. + var randomHash: common.Hash256 + doAssert randomBytes(randomHash.data) == 32 + customPayloadMod = CustomPayloadData( + prevRandao: some(randomHash), + ) + of InvalidParentBeaconBlockRoot: + doAssert(data.beaconRoot.isSome, + "no parent beacon block root available for modification") + customPayloadMod = CustomPayloadData( + parentBeaconRoot: scramble(data.beaconRoot.get), + ) + of InvalidBlobGasUsed: + doAssert(basePayload.blobGasUsed.isSome, "no blob gas used available for modification") + let modBlobGasUsed = basePayload.blobGasUsed.get.uint64 + 1 + customPayloadMod = CustomPayloadData( + blobGasUsed: some(modBlobGasUsed), + ) + of InvalidBlobCountGasUsed: + doAssert(basePayload.blobGasUsed.isSome, "no blob gas used available for modification") + let modBlobGasUsed = basePayload.blobGasUsed.get.uint64 + GAS_PER_BLOB + customPayloadMod = CustomPayloadData( + blobGasUsed: some(modBlobGasUsed), + ) + of InvalidExcessBlobGas: + doAssert(basePayload.excessBlobGas.isSome, "no excess blob gas available for modification") + let modExcessBlobGas = basePayload.excessBlobGas.get.uint64 + 1 + customPayloadMod = CustomPayloadData( + excessBlobGas: some(modExcessBlobGas), + ) + of InvalidVersionedHashesVersion: + doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification") + customPayloadMod = CustomPayloadData( + versionedHashesCustomizer: IncreaseVersionVersionedHashes(), + ) + of InvalidVersionedHashes: + doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification") + customPayloadMod = CustomPayloadData( + versionedHashesCustomizer: CorruptVersionedHashes(), + ) + of IncompleteVersionedHashes: + doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification") + customPayloadMod = CustomPayloadData( + versionedHashesCustomizer: RemoveVersionedHash(), + ) + of ExtraVersionedHashes: + doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification") + customPayloadMod = CustomPayloadData( + versionedHashesCustomizer: ExtraVersionedHash(), + ) + of InvalidWithdrawals: + # These options are not supported yet. + # TODO: Implement + doAssert(false, "invalid payload field not supported yet: " & $payloadField) + of RemoveTransaction: + let emptyTxs = newSeq[Transaction]() + customPayloadMod = CustomPayloadData( + transactions: some(emptyTxs), + ) + of InvalidTransactionSignature, + InvalidTransactionNonce, + InvalidTransactionGas, + InvalidTransactionGasPrice, + InvalidTransactionGasTipPrice, + InvalidTransactionValue, + InvalidTransactionChainID: + + doAssert(basePayload.transactions.len > 0, "no transactions available for modification") + let baseTx = rlp.decode(distinctBase basePayload.transactions[0], Transaction) + var custTx: CustomTransactionData + + case payloadField + of InvalidTransactionSignature: + custTx.signature = some(baseTx.R - 1.u256) + of InvalidTransactionNonce: + custTx.nonce = some(baseTx.nonce - 1) + of InvalidTransactionGas: + custTx.gas = some(0.GasInt) + of InvalidTransactionGasPrice: + custTx.gasPriceOrGasFeeCap = some(0.GasInt) + of InvalidTransactionGasTipPrice: + custTx.gasTipCap = some(gasTipPrice.GasInt * 2.GasInt) + of InvalidTransactionValue: + # Vault account initially has 0x123450000000000000000, so this value should overflow + custTx.value = some(UInt256.fromHex("0x123450000000000000001")) + of InvalidTransactionChainID: + custTx.chainId = some(ChainId(baseTx.chainId.uint64 + 1)) + else: discard + + let modifiedTx = sender.customizeTransaction(baseTx, custTx) + customPayloadMod = CustomPayloadData( + transactions: some(@[modifiedTx]), + ) + + customPayloadMod.customizePayload(data) + +# Generates an alternative withdrawals list that contains the same +# amounts and accounts, but the order in the list is different, so +# stateRoot of the resulting payload should be the same. + +proc randomizeWithdrawalsOrder(src: openArray[Withdrawal]): seq[Withdrawal] = + result = @src + result.shuffle diff --git a/hive_integration/nodocker/engine/cancun/helpers.nim b/hive_integration/nodocker/engine/cancun/helpers.nim new file mode 100644 index 000000000..1105aa731 --- /dev/null +++ b/hive_integration/nodocker/engine/cancun/helpers.nim @@ -0,0 +1,216 @@ +import + std/[tables, strutils, typetraits], + stint, + eth/[common, rlp], + eth/common/eth_types_rlp, + chronicles, + stew/[results, byteutils], + kzg4844/kzg_ex as kzg, + ../types, + ../engine_client, + ../../../../nimbus/constants, + ../../../../nimbus/core/eip4844, + ../../../../nimbus/rpc/rpc_types, + ../../../../nimbus/beacon/execution_types, + ../../../../nimbus/beacon/web3_eth_conv, + ./blobs + +type + TestBlobTxPool* = ref object + currentBlobID* : BlobID + currentTxIndex*: int + transactions* : Table[common.Hash256, Transaction] + hashesByIndex* : Table[int, common.Hash256] + +const + HISTORY_BUFFER_LENGTH* = 8191 + + # Test constants + DATAHASH_START_ADDRESS* = toAddress(0x20000.u256) + DATAHASH_ADDRESS_COUNT* = 1000 + +func getMinExcessBlobGasForBlobGasPrice(data_gas_price: uint64): uint64 = + var + current_excess_data_gas = 0'u64 + current_data_gas_price = 1'u64 + + while current_data_gas_price < data_gas_price: + current_excess_data_gas += GAS_PER_BLOB.uint64 + current_data_gas_price = getBlobGasPrice(current_excess_data_gas).truncate(uint64) + + return current_excess_data_gas + +func getMinExcessBlobsForBlobGasPrice*(data_gas_price: uint64): uint64 = + return getMinExcessBlobGasForBlobGasPrice(data_gas_price) div GAS_PER_BLOB.uint64 + +proc addBlobTransaction*(pool: TestBlobTxPool, tx: Transaction) = + let txHash = rlpHash(tx) + pool.transactions[txHash] = tx + +proc `==`(a: openArray[rpc_types.AccessTuple], b: openArray[AccessPair]): bool = + if a.len != b.len: + return false + + for i in 0..= uint64(len(t.TestEngines)) { + return error "invalid client index %d", step.ClientIndex) + } + engine = t.Engines[step.ClientIndex] + conn, err = devp2p.PeerEngineClient(engine, env.clMock) + if err != nil { + return error "error peering engine client: %v", err) + } + defer conn.Close() + info "Connected to client %d, remote public key: %s", step.ClientIndex, conn.RemoteKey()) + + # Sleep + time.Sleep(1 * time.Second) + + # Timeout value for all requests + timeout = 20 * time.Second + + # Send a ping request to verify that we are not immediately disconnected + pingReq = &devp2p.Ping{} + if size, err = conn.Write(pingReq); err != nil { + return errors.Wrap(err, "could not write to conn") + else: + info "Wrote %d bytes to conn", size) + } + + # Finally wait for the pong response + msg, err = conn.WaitForResponse(timeout, 0) + if err != nil { + return errors.Wrap(err, "error waiting for response") + } + switch msg = msg.(type) { + case *devp2p.Pong: + info "Received pong response: %v", msg) + default: + return error "unexpected message type: %T", msg) + } + + return nil +} + +func (step DevP2PClientPeering) Description() string { + return fmt.Sprintf("DevP2PClientPeering: client %d", step.ClientIndex) +} \ No newline at end of file diff --git a/hive_integration/nodocker/engine/cancun/step_devp2p_pooledtx.nim b/hive_integration/nodocker/engine/cancun/step_devp2p_pooledtx.nim new file mode 100644 index 000000000..c0bdbab78 --- /dev/null +++ b/hive_integration/nodocker/engine/cancun/step_devp2p_pooledtx.nim @@ -0,0 +1,134 @@ +import + ./step + +# A step that requests a Transaction hash via P2P and expects the correct full blob tx +type DevP2PRequestPooledTransactionHash struct { + # Client index to request the transaction hash from + ClientIndex uint64 + # Transaction Index to request + TransactionIndexes []uint64 + # Wait for a new pooled transaction message before actually requesting the transaction + WaitForNewPooledTransaction bool +} + +func (step DevP2PRequestPooledTransactionHash) Execute(t *CancunTestContext) error { + # Get client index's enode + if step.ClientIndex >= uint64(len(t.TestEngines)) { + return error "invalid client index %d", step.ClientIndex) + } + engine = t.Engines[step.ClientIndex] + conn, err = devp2p.PeerEngineClient(engine, env.clMock) + if err != nil { + return error "error peering engine client: %v", err) + } + defer conn.Close() + info "Connected to client %d, remote public key: %s", step.ClientIndex, conn.RemoteKey()) + + var ( + txHashes = make([]Hash256, len(step.TransactionIndexes)) + txs = make([]typ.Transaction, len(step.TransactionIndexes)) + ok bool + ) + for i, txIndex = range step.TransactionIndexes { + txHashes[i], ok = t.TestBlobTxPool.HashesByIndex[txIndex] + if !ok { + return error "transaction index %d not found", step.TransactionIndexes[0]) + } + txs[i], ok = t.TestBlobTxPool.transactions[txHashes[i]] + if !ok { + return error "transaction %s not found", txHashes[i].String()) + } + } + + # Timeout value for all requests + timeout = 20 * time.Second + + # Wait for a new pooled transaction message + if step.WaitForNewPooledTransaction { + msg, err = conn.WaitForResponse(timeout, 0) + if err != nil { + return errors.Wrap(err, "error waiting for response") + } + switch msg = msg.(type) { + case *devp2p.NewPooledTransactionHashes: + if len(msg.Hashes) != len(txHashes) { + return error "expected %d hashes, got %d", len(txHashes), len(msg.Hashes)) + } + if len(msg.Types) != len(txHashes) { + return error "expected %d types, got %d", len(txHashes), len(msg.Types)) + } + if len(msg.Sizes) != len(txHashes) { + return error "expected %d sizes, got %d", len(txHashes), len(msg.Sizes)) + } + for i = 0; i < len(txHashes); i++ { + hash, typ, size = msg.Hashes[i], msg.Types[i], msg.Sizes[i] + # Get the transaction + tx, ok = t.TestBlobTxPool.transactions[hash] + if !ok { + return error "transaction %s not found", hash.String()) + } + + if typ != tx.Type() { + return error "expected type %d, got %d", tx.Type(), typ) + } + + b, err = tx.MarshalBinary() + if err != nil { + return errors.Wrap(err, "error marshaling transaction") + } + if size != uint32(len(b)) { + return error "expected size %d, got %d", len(b), size) + } + } + default: + return error "unexpected message type: %T", msg) + } + } + + # Send the request for the pooled transactions + getTxReq = &devp2p.GetPooledTransactions{ + RequestId: 1234, + GetPooledTransactionsPacket: txHashes, + } + if size, err = conn.Write(getTxReq); err != nil { + return errors.Wrap(err, "could not write to conn") + else: + info "Wrote %d bytes to conn", size) + } + + # Wait for the response + msg, err = conn.WaitForResponse(timeout, getTxReq.RequestId) + if err != nil { + return errors.Wrap(err, "error waiting for response") + } + switch msg = msg.(type) { + case *devp2p.PooledTransactions: + if len(msg.PooledTransactionsBytesPacket) != len(txHashes) { + return error "expected %d txs, got %d", len(txHashes), len(msg.PooledTransactionsBytesPacket)) + } + for i, txBytes = range msg.PooledTransactionsBytesPacket { + tx = txs[i] + + expBytes, err = tx.MarshalBinary() + if err != nil { + return errors.Wrap(err, "error marshaling transaction") + } + + if len(expBytes) != len(txBytes) { + return error "expected size %d, got %d", len(expBytes), len(txBytes)) + } + + if !bytes.Equal(expBytes, txBytes) { + return error "expected tx %#x, got %#x", expBytes, txBytes) + } + + } + default: + return error "unexpected message type: %T", msg) + } + return nil +} + +func (step DevP2PRequestPooledTransactionHash) Description() string { + return fmt.Sprintf("DevP2PRequestPooledTransactionHash: client %d, transaction indexes %v", step.ClientIndex, step.TransactionIndexes) +} \ No newline at end of file diff --git a/hive_integration/nodocker/engine/cancun/step_launch_client.nim b/hive_integration/nodocker/engine/cancun/step_launch_client.nim new file mode 100644 index 000000000..46be4b385 --- /dev/null +++ b/hive_integration/nodocker/engine/cancun/step_launch_client.nim @@ -0,0 +1,47 @@ +import + ./step + +# A step that launches a new client +type LaunchClients struct { + client.EngineStarter + ClientCount uint64 + SkipConnectingToBootnode bool + SkipAddingToCLMock bool +} + +func (step LaunchClients) GetClientCount() uint64 { + clientCount = step.ClientCount + if clientCount == 0 { + clientCount = 1 + } + return clientCount +} + +func (step LaunchClients) Execute(t *CancunTestContext) error { + # Launch a new client + var ( + client client.EngineClient + err error + ) + clientCount = step.GetClientCount() + for i = uint64(0); i < clientCount; i++ { + if !step.SkipConnectingToBootnode { + client, err = step.StartClient(t.T, t.TestContext, t.Genesis, t.ClientParams, t.ClientFiles, t.Engines[0]) + else: + client, err = step.StartClient(t.T, t.TestContext, t.Genesis, t.ClientParams, t.ClientFiles) + } + if err != nil { + return err + } + t.Engines = append(t.Engines, client) + t.TestEngines = append(t.TestEngines, test.NewTestEngineClient(t.Env, client)) + if !step.SkipAddingToCLMock { + env.clMock.AddEngineClient(client) + } + } + return nil +} + +func (step LaunchClients) Description() string { + return fmt.Sprintf("Launch %d new engine client(s)", step.GetClientCount()) +} diff --git a/hive_integration/nodocker/engine/cancun/step_newpayloads.nim b/hive_integration/nodocker/engine/cancun/step_newpayloads.nim new file mode 100644 index 000000000..012df10d1 --- /dev/null +++ b/hive_integration/nodocker/engine/cancun/step_newpayloads.nim @@ -0,0 +1,409 @@ +import + std/strutils, + chronicles, + ./step_desc, + ./helpers, + ./customizer, + ./blobs, + ../engine_client, + ../test_env, + ../types, + ../../../../nimbus/core/eip4844, + ../../../../nimbus/common/common + +type + NewPayloads* = ref object of TestStep + # Payload Count + payloadCount*: int + # Number of blob transactions that are expected to be included in the payload + expectedIncludedBlobCount*: int + # Blob IDs expected to be found in the payload + expectedBlobs*: seq[BlobID] + # Delay between FcU and GetPayload calls + getPayloadDelay*: int + # GetPayload modifier when requesting the new Payload + getPayloadCustomizer*: GetPayloadCustomizer + # ForkchoiceUpdate modifier when requesting the new Payload + fcUOnPayloadRequest*: ForkchoiceUpdatedCustomizer + # Extra modifications on NewPayload to potentially generate an invalid payload + newPayloadCustomizer*: NewPayloadCustomizer + # ForkchoiceUpdate modifier when setting the new payload as head + fcUOnHeadSet*: ForkchoiceUpdatedCustomizer + # Expected responses on the NewPayload call + expectationDescription*: string + +func getPayloadCount(step: NewPayloads): int = + var payloadCount = step.payloadCount + if payloadCount == 0: + payloadCount = 1 + return payloadCount + +proc verifyPayload(step: NewPayloads, + com: CommonRef, + client: RpcClient, + blobTxsInPayload: openArray[Transaction], + shouldOverrideBuilder: Option[bool], + payload: ExecutionPayload, + previousPayload = none(ExecutionPayload)): bool = + + var + parentExcessBlobGas = 0'u64 + parentBlobGasUsed = 0'u64 + + if previousPayload.isSome: + let prevPayload = previousPayload.get + if prevPayload.excessBlobGas.isSome: + parentExcessBlobGas = prevPayload.excessBlobGas.get.uint64 + + if prevPayload.blobGasUsed.isSome: + parentBlobGasUsed = prevPayload.blobGasUsed.get.uint64 + + let + parent = common.BlockHeader( + excessBlobGas: some(parentExcessBlobGas), + blobGasUsed: some(parentBlobGasUsed) + ) + expectedExcessBlobGas = calcExcessBlobGas(parent) + + if com.isCancunOrLater(payload.timestamp.EthTime): + if payload.excessBlobGas.isNone: + error "payload contains nil excessDataGas" + return false + + if payload.blobGasUsed.isNone: + error "payload contains nil dataGasUsed" + return false + + if payload.excessBlobGas.get.uint64 != expectedExcessBlobGas: + error "payload contains incorrect excessDataGas", + want=expectedExcessBlobGas, + have=payload.excessBlobGas.get.uint64 + return false + + if shouldOverrideBuilder.isNone: + error "shouldOverrideBuilder was not included in the getPayload response" + return false + + var + totalBlobCount = 0 + expectedBlobGasPrice = getBlobGasPrice(expectedExcessBlobGas) + + for tx in blobTxsInPayload: + let blobCount = tx.versionedHashes.len + totalBlobCount += blobCount + + # Retrieve receipt from client + let r = client.txReceipt(tx.rlpHash) + let expectedBlobGasUsed = blobCount.uint64 * GAS_PER_BLOB + + #r.ExpectBlobGasUsed(expectedBlobGasUsed) + #r.ExpectBlobGasPrice(expectedBlobGasPrice) + + if totalBlobCount != step.expectedIncludedBlobCount: + error "expected blobs in transactions", + expect=step.expectedIncludedBlobCount, + got=totalBlobCount + return false + + if not verifyBeaconRootStorage(client, payload): + return false + + else: + if payload.excessBlobGas.isSome: + error "payload contains non-nil excessDataGas pre-fork" + return false + + if payload.blobGasUsed.isSome: + error "payload contains non-nil dataGasUsed pre-fork" + return false + + return true + +proc verifyBlobBundle(step: NewPayloads, + blobDataInPayload: openArray[BlobWrapData], + payload: ExecutionPayload, + blobBundle: BlobsBundleV1): bool = + + if blobBundle.blobs.len != blobBundle.commitments.len or + blobBundle.blobs.len != blobBundle.proofs.len: + error "unexpected length in blob bundle", + blobs=len(blobBundle.blobs), + proofs=len(blobBundle.proofs), + kzgs=len(blobBundle.commitments) + return false + + if len(blobBundle.blobs) != step.expectedIncludedBlobCount: + error "expected", + expect=step.expectedIncludedBlobCount, + get=len(blobBundle.blobs) + return false + + # Verify that the calculated amount of blobs in the payload matches the + # amount of blobs in the bundle + if len(blobDataInPayload) != len(blobBundle.blobs): + error "expected blobs in the bundle", + expect=len(blobDataInPayload), + get=len(blobBundle.blobs) + return false + + for i, blobData in blobDataInPayload: + let bundleCommitment = blobBundle.commitments[i].bytes + let bundleBlob = blobBundle.blobs[i].bytes + let bundleProof = blobBundle.proofs[i].bytes + + if bundleCommitment != blobData.commitment: + error "KZG mismatch at index of the bundle", index=i + return false + + if bundleBlob != blobData.blob: + error "blob mismatch at index of the bundle", index=i + return false + + if bundleProof != blobData.proof: + error "proof mismatch at index of the bundle", index=i + return false + + if len(step.expectedBlobs) != 0: + # Verify that the blobs in the payload match the expected blobs + for expectedBlob in step.expectedBlobs: + var found = false + for blobData in blobDataInPayload: + if not expectedBlob.verifyBlob(blobData.blob): + return false + else: + found = true + break + + if not found: + error "could not find expected blob", expectedBlob + return false + + return true + +type + Shadow = ref object + p: int + payloadCount: int + prevPayload: ExecutionPayload + +method execute*(step: NewPayloads, ctx: CancunTestContext): bool = + # Create a new payload + # Produce the payload + let env = ctx.env + + var originalGetPayloadDelay = env.clMock.payloadProductionClientDelay + if step.getPayloadDelay != 0: + env.clMock.payloadProductionClientDelay = step.getPayloadDelay + + var shadow = Shadow( + payloadCount: step.getPayloadCount(), + prevPayload: env.clMock.latestPayloadBuilt + ) + + for p in 0..= ctx.env.numEngines: + error "invalid client index", index=step.clientIndex + return false + + let engine = ctx.env.engines(step.clientIndex) + # Send the blob transactions + for _ in 0..= uint64(len(t.TestEngines)) { + return error "invalid client index %d", step.ClientID) + } + testEngine = t.TestEngines[step.ClientID].WithEngineAPIVersionResolver(step.NewPayloadCustomizer) + r = env.client.NewPayload(payload) + if expectedError != nil { + r.ExpectErrorCode(*expectedError) + else: + r.ExpectStatus(expectedStatus) + } + return nil +} + +method description*(step: SendModifiedLatestPayload): string = + desc = fmt.Sprintf("SendModifiedLatestPayload: client %d, expected invalid=%T, ", step.ClientID, step.NewPayloadCustomizer.getExpectInvalidStatus()) + /* + TODO: Figure out if we need this. + if step.VersionedHashes != nil { + desc += step.VersionedHashes.Description() + } + */ + + return desc +} \ No newline at end of file diff --git a/hive_integration/nodocker/engine/cancun_tests.nim b/hive_integration/nodocker/engine/cancun_tests.nim new file mode 100644 index 000000000..f66a1c7c7 --- /dev/null +++ b/hive_integration/nodocker/engine/cancun_tests.nim @@ -0,0 +1,1992 @@ +import + std/tables, + chronos, + chronicles, + stew/byteutils, + ./types, + ./base_spec, + ./test_env, + ./clmock, + ./cancun/step_desc, + ./cancun/helpers, + ./cancun/blobs, + ../../nimbus/constants, + ../../nimbus/common/chain_config + +import + ./cancun/step_newpayloads, + ./cancun/step_sendblobtx + +# Precalculate the first data gas cost increase +const + DATA_GAS_COST_INCREMENT_EXCEED_BLOBS = getMinExcessBlobsForBlobGasPrice(2) + TARGET_BLOBS_PER_BLOCK = int(TARGET_BLOB_GAS_PER_BLOCK div GAS_PER_BLOB) + +proc getGenesis(param: NetworkParams) = + # Add bytecode pre deploy to the EIP-4788 address. + param.genesis.alloc[BEACON_ROOTS_ADDRESS] = GenesisAccount( + balance: 0.u256, + nonce: 1, + code: hexToSeqByte("3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500"), + ) + +# Execution specification reference: +# https:#github.com/ethereum/execution-apis/blob/main/src/engine/cancun.md +proc specExecute(ws: BaseSpec): bool = + ws.mainFork = ForkCancun + let + cs = CancunSpec(ws) + conf = envConfig(ws.getForkConfig()) + + getGenesis(conf.networkParams) + let env = TestEnv.new(conf) + env.engine.setRealTTD(0) + env.setupCLMock() + ws.configureCLMock(env.clMock) + + testCond waitFor env.clMock.waitForTTD() + + let blobTestCtx = CancunTestContext( + env: env, + txPool: TestBlobTxPool(), + ) + + if cs.getPayloadDelay != 0: + env.clMock.payloadProductionClientDelay = cs.getPayloadDelay + + result = true + for stepId, step in cs.testSequence: + echo "INFO: Executing step", stepId+1, ": ", step.description() + if not step.execute(blobTestCtx): + fatal "FAIL: Error executing", step=stepId+1 + result = false + break + + env.close() + +# List of all blob tests +let cancunTestList* = [ + TestDesc( + name: "Blob Transactions On Block 1, Shanghai Genesis", + about: """ + Tests the Cancun fork since Block 1. + + Verifications performed: + - Correct implementation of Engine API changes for Cancun: + - engine_newPayloadV3, engine_forkchoiceUpdatedV3, engine_getPayloadV3 + - Correct implementation of EIP-4844: + - Blob transaction ordering and inclusion + - Blob transaction blob gas cost checks + - Verify Blob bundle on built payload + - Eth RPC changes for Cancun: + - Blob fields in eth_getBlockByNumber + - Beacon root in eth_getBlockByNumber + - Blob fields in transaction receipts from eth_getTransactionReceipt + """, + run: specExecute, + spec: CancunSpec( + forkHeight: 1, + testSequence: @[ + # We are starting at Shanghai genesis so send a couple payloads to reach the fork + NewPayloads().TestStep, + + # First, we send a couple of blob transactions on genesis, + # with enough data gas cost to make sure they are included in the first block. + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + + # We create the first payload, and verify that the blob transactions + # are included in the payload. + # We also verify that the blob transactions are included in the blobs bundle. + #[NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + ), + + # Try to increase the data gas cost of the blob transactions + # by maxing out the number of blobs for the next payloads. + SendBlobTransactions( + transactionCount: DATA_GAS_COST_INCREMENT_EXCEED_BLOBS div (MAX_BLOBS_PER_BLOCK-TARGET_BLOBS_PER_BLOCK) + 1, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + + # Next payloads will have max data blobs each + NewPayloads( + payloadCount: DATA_GAS_COST_INCREMENT_EXCEED_BLOBS div (MAX_BLOBS_PER_BLOCK - TARGET_BLOBS_PER_BLOCK), + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + ), + + # But there will be an empty payload, since the data gas cost increased + # and the last blob transaction was not included. + NewPayloads( + expectedIncludedBlobCount: 0, + ), + + # But it will be included in the next payload + NewPayloads( + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + ),]# + ] + ) + ), +] + +#[ + TestDesc( + spec: CancunSpec( + + + name: "Blob Transactions On Block 1, Cancun Genesis", + about: """ + Tests the Cancun fork since genesis. + + Verifications performed: + * See Blob Transactions On Block 1, Shanghai Genesis + """, + mainFork: Cancun, + ), + + testSequence: @[ + NewPayloads(), # Create a single empty payload to push the client through the fork. + # First, we send a couple of blob transactions on genesis, + # with enough data gas cost to make sure they are included in the first block. + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + + # We create the first payload, and verify that the blob transactions + # are included in the payload. + # We also verify that the blob transactions are included in the blobs bundle. + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + ), + + # Try to increase the data gas cost of the blob transactions + # by maxing out the number of blobs for the next payloads. + SendBlobTransactions( + transactionCount: DATA_GAS_COST_INCREMENT_EXCEED_BLOBS/(MAX_BLOBS_PER_BLOCK-TARGET_BLOBS_PER_BLOCK) + 1, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + + # Next payloads will have max data blobs each + NewPayloads( + payloadCount: DATA_GAS_COST_INCREMENT_EXCEED_BLOBS / (MAX_BLOBS_PER_BLOCK - TARGET_BLOBS_PER_BLOCK), + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + ), + + # But there will be an empty payload, since the data gas cost increased + # and the last blob transaction was not included. + NewPayloads( + expectedIncludedBlobCount: 0, + ), + + # But it will be included in the next payload + NewPayloads( + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + ), + ), + ), + TestDesc( + spec: CancunSpec( + + + name: "Blob Transaction Ordering, Single Account", + about: """ + Send N blob transactions with MAX_BLOBS_PER_BLOCK-1 blobs each, + using account A. + Using same account, and an increased nonce from the previously sent + transactions, send N blob transactions with 1 blob each. + Verify that the payloads are created with the correct ordering: + - The first payloads must include the first N blob transactions + - The last payloads must include the last single-blob transactions + All transactions have sufficient data gas price to be included any + of the payloads. + """, + mainFork: Cancun, + ), + + testSequence: @[ + # First send the MAX_BLOBS_PER_BLOCK-1 blob transactions. + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK - 1, + blobTransactionMaxBlobGasCost: u256(100), + ), + # Then send the single-blob transactions + SendBlobTransactions( + transactionCount: MAX_BLOBS_PER_BLOCK + 1, + blobsPerTransaction: 1, + blobTransactionMaxBlobGasCost: u256(100), + ), + + # First four payloads have MAX_BLOBS_PER_BLOCK-1 blobs each + NewPayloads( + payloadCount: 4, + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK - 1, + ), + + # The rest of the payloads have full blobs + NewPayloads( + payloadCount: 2, + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + ), + ), + ), + TestDesc( + spec: CancunSpec( + + + name: "Blob Transaction Ordering, Single Account 2", + about: """ + Send N blob transactions with MAX_BLOBS_PER_BLOCK-1 blobs each, + using account A. + Using same account, and an increased nonce from the previously sent + transactions, send a single 2-blob transaction, and send N blob + transactions with 1 blob each. + Verify that the payloads are created with the correct ordering: + - The first payloads must include the first N blob transactions + - The last payloads must include the rest of the transactions + All transactions have sufficient data gas price to be included any + of the payloads. + """, + mainFork: Cancun, + ), + + testSequence: @[ + # First send the MAX_BLOBS_PER_BLOCK-1 blob transactions. + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK - 1, + blobTransactionMaxBlobGasCost: u256(100), + ), + + # Then send the dual-blob transaction + SendBlobTransactions( + transactionCount: 1, + blobsPerTransaction: 2, + blobTransactionMaxBlobGasCost: u256(100), + ), + + # Then send the single-blob transactions + SendBlobTransactions( + transactionCount: MAX_BLOBS_PER_BLOCK - 2, + blobsPerTransaction: 1, + blobTransactionMaxBlobGasCost: u256(100), + ), + + # First five payloads have MAX_BLOBS_PER_BLOCK-1 blobs each + NewPayloads( + payloadCount: 5, + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK - 1, + ), + + # The rest of the payloads have full blobs + NewPayloads( + payloadCount: 1, + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + + name: "Blob Transaction Ordering, Multiple Accounts", + about: """ + Send N blob transactions with MAX_BLOBS_PER_BLOCK-1 blobs each, + using account A. + Send N blob transactions with 1 blob each from account B. + Verify that the payloads are created with the correct ordering: + - All payloads must have full blobs. + All transactions have sufficient data gas price to be included any + of the payloads. + """, + mainFork: Cancun, + ), + + testSequence: @[ + # First send the MAX_BLOBS_PER_BLOCK-1 blob transactions from + # account A. + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK - 1, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 0, + ), + # Then send the single-blob transactions from account B + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: 1, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 1, + ), + + # All payloads have full blobs + NewPayloads( + payloadCount: 5, + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + + name: "Blob Transaction Ordering, Multiple Clients", + about: """ + Send N blob transactions with MAX_BLOBS_PER_BLOCK-1 blobs each, + using account A, to client A. + Send N blob transactions with 1 blob each from account B, to client + B. + Verify that the payloads are created with the correct ordering: + - All payloads must have full blobs. + All transactions have sufficient data gas price to be included any + of the payloads. + """, + mainFork: Cancun, + ), + + testSequence: @[ + # Start a secondary client to also receive blob transactions + LaunchClients{ + EngineStarter: hive_rpc.HiveRPCEngineStarter{), + # Skip adding the second client to the CL Mock to guarantee + # that all payloads are produced by client A. + # This is done to not have client B prioritizing single-blob + # transactions to fill one single payload. + SkipAddingToCLMock: true, + ), + + # Create a block without any blobs to get past genesis + NewPayloads( + payloadCount: 1, + expectedIncludedBlobCount: 0, + ), + + # First send the MAX_BLOBS_PER_BLOCK-1 blob transactions from + # account A, to client A. + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK - 1, + blobTransactionMaxBlobGasCost: u256(120), + AccountIndex: 0, + ClientIndex: 0, + ), + # Then send the single-blob transactions from account B, to client + # B. + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: 1, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 1, + ClientIndex: 1, + ), + + # All payloads have full blobs + NewPayloads( + payloadCount: 5, + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + # Wait a bit more on before requesting the built payload from the client + GetPayloadDelay: 2, + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + + name: "Replace Blob Transactions", + about: """ + Test sending multiple blob transactions with the same nonce, but + higher gas tip so the transaction is replaced. + """, + mainFork: Cancun, + ), + + testSequence: @[ + # Send multiple blob transactions with the same nonce. + SendBlobTransactions( # Blob ID 0 + transactionCount: 1, + blobTransactionMaxBlobGasCost: u256(1), + BlobTransactionGasFeeCap: u256(1e9), + BlobTransactionGasTipCap: u256(1e9), + ), + SendBlobTransactions( # Blob ID 1 + transactionCount: 1, + blobTransactionMaxBlobGasCost: u256(1e2), + BlobTransactionGasFeeCap: u256(1e10), + BlobTransactionGasTipCap: u256(1e10), + ReplaceTransactions: true, + ), + SendBlobTransactions( # Blob ID 2 + transactionCount: 1, + blobTransactionMaxBlobGasCost: u256(1e3), + BlobTransactionGasFeeCap: u256(1e11), + BlobTransactionGasTipCap: u256(1e11), + ReplaceTransactions: true, + ), + SendBlobTransactions( # Blob ID 3 + transactionCount: 1, + blobTransactionMaxBlobGasCost: u256(1e4), + BlobTransactionGasFeeCap: u256(1e12), + BlobTransactionGasTipCap: u256(1e12), + ReplaceTransactions: true, + ), + + # We create the first payload, which must contain the blob tx + # with the higher tip. + NewPayloads( + expectedIncludedBlobCount: 1, + expectedBlobs: []helper.BlobID{3), + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + + name: "Parallel Blob Transactions", + about: """ + Test sending multiple blob transactions in parallel from different accounts. + + Verify that a payload is created with the maximum number of blobs. + """, + mainFork: Cancun, + ), + + testSequence: @[ + # Send multiple blob transactions with the same nonce. + ParallelSteps{ + Steps: []TestStep{ + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 0, + ), + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 1, + ), + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 2, + ), + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 3, + ), + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 4, + ), + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 5, + ), + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 6, + ), + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 7, + ), + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 8, + ), + SendBlobTransactions( + transactionCount: 5, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + AccountIndex: 9, + ), + ), + ), + + # We create the first payload, which is guaranteed to have the first MAX_BLOBS_PER_BLOCK blobs. + NewPayloads( + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, MAX_BLOBS_PER_BLOCK), + ), + ), + ), + + # ForkchoiceUpdatedV3 before cancun + TestDesc( + spec: CancunSpec( + + name: "ForkchoiceUpdatedV3 Set Head to Shanghai Payload, Nil Payload Attributes", + about: """ + Test sending ForkchoiceUpdatedV3 to set the head of the chain to a Shanghai payload: + - Send NewPayloadV2 with Shanghai payload on block 1 + - Use ForkchoiceUpdatedV3 to set the head to the payload, with nil payload attributes + + Verify that client returns no error. + """, + mainFork: Cancun, + forkHeight: 2, + ), + + testSequence: @[ + NewPayloads( + FcUOnHeadSet: &helper.UpgradeForkchoiceUpdatedVersion{ + ForkchoiceUpdatedCustomizer: &helper.BaseForkchoiceUpdatedCustomizer{), + ), + ExpectationDescription: """ + ForkchoiceUpdatedV3 before Cancun returns no error without payload attributes + """, + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "ForkchoiceUpdatedV3 To Request Shanghai Payload, Nil Beacon Root", + about: """ + Test sending ForkchoiceUpdatedV3 to request a Shanghai payload: + - Payload Attributes uses Shanghai timestamp + - Payload Attributes' Beacon Root is nil + + Verify that client returns INVALID_PARAMS_ERROR. + """, + mainFork: Cancun, + forkHeight: 2, + ), + + testSequence: @[ + NewPayloads( + FcUOnPayloadRequest: &helper.UpgradeForkchoiceUpdatedVersion{ + ForkchoiceUpdatedCustomizer: &helper.BaseForkchoiceUpdatedCustomizer{ + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + ForkchoiceUpdatedV3 before Cancun with any nil field must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "ForkchoiceUpdatedV3 To Request Shanghai Payload, Zero Beacon Root", + about: """ + Test sending ForkchoiceUpdatedV3 to request a Shanghai payload: + - Payload Attributes uses Shanghai timestamp + - Payload Attributes' Beacon Root zero + + Verify that client returns UNSUPPORTED_FORK_ERROR. + """, + mainFork: Cancun, + forkHeight: 2, + ), + + testSequence: @[ + NewPayloads( + FcUOnPayloadRequest: &helper.UpgradeForkchoiceUpdatedVersion{ + ForkchoiceUpdatedCustomizer: &helper.BaseForkchoiceUpdatedCustomizer{ + PayloadAttributesCustomizer: &helper.BasePayloadAttributesCustomizer{ + BeaconRoot: &(common.Hash{}), + ), + ExpectedError: globals.UNSUPPORTED_FORK_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + ForkchoiceUpdatedV3 before Cancun with beacon root must return UNSUPPORTED_FORK_ERROR (code %d) + """, *globals.UNSUPPORTED_FORK_ERROR), + ), + ), + ), + + # ForkchoiceUpdatedV2 before cancun with beacon root + TestDesc( + spec: CancunSpec( + + name: "ForkchoiceUpdatedV2 To Request Shanghai Payload, Zero Beacon Root", + about: """ + Test sending ForkchoiceUpdatedV2 to request a Cancun payload: + - Payload Attributes uses Shanghai timestamp + - Payload Attributes' Beacon Root zero + + Verify that client returns INVALID_PARAMS_ERROR. + """, + mainFork: Cancun, + forkHeight: 1, + ), + + testSequence: @[ + NewPayloads( + FcUOnPayloadRequest: &helper.DowngradeForkchoiceUpdatedVersion{ + ForkchoiceUpdatedCustomizer: &helper.BaseForkchoiceUpdatedCustomizer{ + PayloadAttributesCustomizer: &helper.BasePayloadAttributesCustomizer{ + BeaconRoot: &(common.Hash{}), + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + ForkchoiceUpdatedV2 before Cancun with beacon root field must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + + # ForkchoiceUpdatedV2 after cancun + TestDesc( + spec: CancunSpec( + + name: "ForkchoiceUpdatedV2 To Request Cancun Payload, Zero Beacon Root", + about: """ + Test sending ForkchoiceUpdatedV2 to request a Cancun payload: + - Payload Attributes uses Cancun timestamp + - Payload Attributes' Beacon Root zero + + Verify that client returns INVALID_PARAMS_ERROR. + """, + mainFork: Cancun, + forkHeight: 1, + ), + + testSequence: @[ + NewPayloads( + FcUOnPayloadRequest: &helper.DowngradeForkchoiceUpdatedVersion{ + ForkchoiceUpdatedCustomizer: &helper.BaseForkchoiceUpdatedCustomizer{ + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + ForkchoiceUpdatedV2 after Cancun with beacon root field must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + name: "ForkchoiceUpdatedV2 To Request Cancun Payload, Nil Beacon Root", + about: """ + Test sending ForkchoiceUpdatedV2 to request a Cancun payload: + - Payload Attributes uses Cancun timestamp + - Payload Attributes' Beacon Root nil (not provided) + + Verify that client returns UNSUPPORTED_FORK_ERROR. + """, + mainFork: Cancun, + forkHeight: 1, + ), + + testSequence: @[ + NewPayloads( + FcUOnPayloadRequest: &helper.DowngradeForkchoiceUpdatedVersion{ + ForkchoiceUpdatedCustomizer: &helper.BaseForkchoiceUpdatedCustomizer{ + PayloadAttributesCustomizer: &helper.BasePayloadAttributesCustomizer{ + RemoveBeaconRoot: true, + ), + ExpectedError: globals.UNSUPPORTED_FORK_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + ForkchoiceUpdatedV2 after Cancun must return UNSUPPORTED_FORK_ERROR (code %d) + """, *globals.UNSUPPORTED_FORK_ERROR), + ), + ), + ), + + # ForkchoiceUpdatedV3 with modified BeaconRoot Attribute + TestDesc( + spec: CancunSpec( + + name: "ForkchoiceUpdatedV3 Modifies Payload ID on Different Beacon Root", + about: """ + Test requesting a Cancun Payload using ForkchoiceUpdatedV3 twice with the beacon root + payload attribute as the only change between requests and verify that the payload ID is + different. + """, + mainFork: Cancun, + ), + + testSequence: @[ + SendBlobTransactions( + transactionCount: 1, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + ), + NewPayloads( + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + FcUOnPayloadRequest: &helper.BaseForkchoiceUpdatedCustomizer{ + PayloadAttributesCustomizer: &helper.BasePayloadAttributesCustomizer{ + BeaconRoot: &(common.Hash{}), + ), + ), + ), + SendBlobTransactions( + transactionCount: 1, + blobsPerTransaction: MAX_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(100), + ), + NewPayloads( + expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK, + FcUOnPayloadRequest: &helper.BaseForkchoiceUpdatedCustomizer{ + PayloadAttributesCustomizer: &helper.BasePayloadAttributesCustomizer{ + BeaconRoot: &(common.Hash{1}), + ), + ), + ), + ), + ), + + # GetPayloadV3 Before Cancun, Negative Tests + TestDesc( + spec: CancunSpec( + + name: "GetPayloadV3 To Request Shanghai Payload", + about: """ + Test requesting a Shanghai PayloadID using GetPayloadV3. + Verify that client returns UNSUPPORTED_FORK_ERROR. + """, + mainFork: Cancun, + forkHeight: 2, + ), + + testSequence: @[ + NewPayloads( + GetPayloadCustomizer: &helper.UpgradeGetPayloadVersion{ + GetPayloadCustomizer: &helper.BaseGetPayloadCustomizer{ + ExpectedError: globals.UNSUPPORTED_FORK_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + GetPayloadV3 To Request Shanghai Payload must return UNSUPPORTED_FORK_ERROR (code %d) + """, *globals.UNSUPPORTED_FORK_ERROR), + ), + ), + ), + + # GetPayloadV2 After Cancun, Negative Tests + TestDesc( + spec: CancunSpec( + + name: "GetPayloadV2 To Request Cancun Payload", + about: """ + Test requesting a Cancun PayloadID using GetPayloadV2. + Verify that client returns UNSUPPORTED_FORK_ERROR. + """, + mainFork: Cancun, + forkHeight: 1, + ), + + testSequence: @[ + NewPayloads( + GetPayloadCustomizer: &helper.DowngradeGetPayloadVersion{ + GetPayloadCustomizer: &helper.BaseGetPayloadCustomizer{ + ExpectedError: globals.UNSUPPORTED_FORK_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + GetPayloadV2 To Request Cancun Payload must return UNSUPPORTED_FORK_ERROR (code %d) + """, *globals.UNSUPPORTED_FORK_ERROR), + ), + ), + ), + + # NewPayloadV3 Before Cancun, Negative Tests + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Before Cancun, Nil Data Fields, Nil Versioned Hashes, Nil Beacon Root", + about: """ + Test sending NewPayloadV3 Before Cancun with: + - nil ExcessBlobGas + - nil BlobGasUsed + - nil Versioned Hashes Array + - nil Beacon Root + + Verify that client returns INVALID_PARAMS_ERROR + """, + mainFork: Cancun, + forkHeight: 2, + ), + + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.UpgradeNewPayloadVersion{ + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: nil, + ), + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + NewPayloadV3 before Cancun with any nil field must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Before Cancun, Nil ExcessBlobGas, 0x00 BlobGasUsed, Nil Versioned Hashes, Nil Beacon Root", + about: """ + Test sending NewPayloadV3 Before Cancun with: + - nil ExcessBlobGas + - 0x00 BlobGasUsed + - nil Versioned Hashes Array + - nil Beacon Root + """, + mainFork: Cancun, + forkHeight: 2, + ), + + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.UpgradeNewPayloadVersion{ + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + BlobGasUsed: pUint64(0), + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + NewPayloadV3 before Cancun with any nil field must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Before Cancun, 0x00 ExcessBlobGas, Nil BlobGasUsed, Nil Versioned Hashes, Nil Beacon Root", + about: """ + Test sending NewPayloadV3 Before Cancun with: + - 0x00 ExcessBlobGas + - nil BlobGasUsed + - nil Versioned Hashes Array + - nil Beacon Root + """, + mainFork: Cancun, + forkHeight: 2, + ), + + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.UpgradeNewPayloadVersion{ + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + ExcessBlobGas: pUint64(0), + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + NewPayloadV3 before Cancun with any nil field must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Before Cancun, Nil Data Fields, Empty Array Versioned Hashes, Nil Beacon Root", + about: """ + Test sending NewPayloadV3 Before Cancun with: + - nil ExcessBlobGas + - nil BlobGasUsed + - Empty Versioned Hashes Array + - nil Beacon Root + """, + mainFork: Cancun, + forkHeight: 2, + ), + + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.UpgradeNewPayloadVersion{ + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: []helper.BlobID{), + ), + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + NewPayloadV3 before Cancun with any nil field must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Before Cancun, Nil Data Fields, Nil Versioned Hashes, Zero Beacon Root", + about: """ + Test sending NewPayloadV3 Before Cancun with: + - nil ExcessBlobGas + - nil BlobGasUsed + - nil Versioned Hashes Array + - Zero Beacon Root + """, + mainFork: Cancun, + forkHeight: 2, + ), + + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.UpgradeNewPayloadVersion{ + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + ParentBeaconRoot: &(common.Hash{}), + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + NewPayloadV3 before Cancun with any nil field must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Before Cancun, 0x00 Data Fields, Empty Array Versioned Hashes, Zero Beacon Root", + about: """ + Test sending NewPayloadV3 Before Cancun with: + - 0x00 ExcessBlobGas + - 0x00 BlobGasUsed + - Empty Versioned Hashes Array + - Zero Beacon Root + """, + mainFork: Cancun, + forkHeight: 2, + ), + + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.UpgradeNewPayloadVersion{ + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + ExcessBlobGas: pUint64(0), + BlobGasUsed: pUint64(0), + ParentBeaconRoot: &(common.Hash{}), + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: []helper.BlobID{), + ), + ), + ExpectedError: globals.UNSUPPORTED_FORK_ERROR, + ), + ), + ExpectationDescription: fmt.Sprintf(""" + NewPayloadV3 before Cancun with no nil fields must return UNSUPPORTED_FORK_ERROR (code %d) + """, *globals.UNSUPPORTED_FORK_ERROR), + ), + ), + ), + + # NewPayloadV3 After Cancun, Negative Tests + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 After Cancun, Nil ExcessBlobGas, 0x00 BlobGasUsed, Empty Array Versioned Hashes, Zero Beacon Root", + about: """ + Test sending NewPayloadV3 After Cancun with: + - nil ExcessBlobGas + - 0x00 BlobGasUsed + - Empty Versioned Hashes Array + - Zero Beacon Root + """, + mainFork: Cancun, + forkHeight: 1, + ), + + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + RemoveExcessBlobGas: true, + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ExpectationDescription: fmt.Sprintf(""" + NewPayloadV3 after Cancun with nil ExcessBlobGas must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 After Cancun, 0x00 ExcessBlobGas, Nil BlobGasUsed, Empty Array Versioned Hashes", + about: """ + Test sending NewPayloadV3 After Cancun with: + - 0x00 ExcessBlobGas + - nil BlobGasUsed + - Empty Versioned Hashes Array + """, + mainFork: Cancun, + forkHeight: 1, + ), + + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + RemoveBlobGasUsed: true, + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ExpectationDescription: fmt.Sprintf(""" + NewPayloadV3 after Cancun with nil BlobGasUsed must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 After Cancun, 0x00 Blob Fields, Empty Array Versioned Hashes, Nil Beacon Root", + about: """ + Test sending NewPayloadV3 After Cancun with: + - 0x00 ExcessBlobGas + - nil BlobGasUsed + - Empty Versioned Hashes Array + """, + mainFork: Cancun, + forkHeight: 1, + ), + + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + RemoveParentBeaconRoot: true, + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ExpectationDescription: fmt.Sprintf(""" + NewPayloadV3 after Cancun with nil parentBeaconBlockRoot must return INVALID_PARAMS_ERROR (code %d) + """, *globals.INVALID_PARAMS_ERROR), + ), + ), + ), + + # Fork time tests + TestDesc( + spec: CancunSpec( + + name: "ForkchoiceUpdatedV2 then ForkchoiceUpdatedV3 Valid Payload Building Requests", + about: """ + Test requesting a Shanghai ForkchoiceUpdatedV2 payload followed by a Cancun ForkchoiceUpdatedV3 request. + Verify that client correctly returns the Cancun payload. + """, + mainFork: Cancun, + # We request two blocks from the client, first on shanghai and then on cancun, both with + # the same parent. + # Client must respond correctly to later request. + forkHeight: 1, + BlockTimestampIncrement: 2, + ), + + testSequence: @[ + # First, we send a couple of blob transactions on genesis, + # with enough data gas cost to make sure they are included in the first block. + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + # This customizer only simulates requesting a Shanghai payload 1 second before cancun. + # CL Mock will still request the Cancun payload afterwards + FcUOnPayloadRequest: &helper.BaseForkchoiceUpdatedCustomizer{ + PayloadAttributesCustomizer: &helper.TimestampDeltaPayloadAttributesCustomizer{ + PayloadAttributesCustomizer: &helper.BasePayloadAttributesCustomizer{ + RemoveBeaconRoot: true, + ), + TimestampDelta: -1, + ), + ), + ExpectationDescription: """ + ForkchoiceUpdatedV3 must construct transaction with blob payloads even if a ForkchoiceUpdatedV2 was previously requested + """, + ), + ), + ), + + # Test versioned hashes in Engine API NewPayloadV3 + TestDesc( + spec: CancunSpec( + + + name: "NewPayloadV3 Versioned Hashes, Missing Hash", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is missing one of the hashes. + """, + mainFork: Cancun, + ), + testSequence: @[ + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK-1), + ), + ), + ExpectInvalidStatus: true, + ), + ExpectationDescription: """ + NewPayloadV3 with incorrect list of versioned hashes must return INVALID status + """, + ), + ), + ), + TestDesc( + spec: CancunSpec( + + + name: "NewPayloadV3 Versioned Hashes, Extra Hash", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is has an extra hash for a blob that is not in the payload. + """, + mainFork: Cancun, + ), + # TODO: It could be worth it to also test this with a blob that is in the + # mempool but was not included in the payload. + testSequence: @[ + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK+1), + ), + ), + ExpectInvalidStatus: true, + ), + ExpectationDescription: """ + NewPayloadV3 with incorrect list of versioned hashes must return INVALID status + """, + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Out of Order", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is out of order. + """, + mainFork: Cancun, + ), + testSequence: @[ + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: getBlobListByIndex(helper.BlobID(TARGET_BLOBS_PER_BLOCK-1), 0), + ), + ), + ExpectInvalidStatus: true, + ), + ExpectationDescription: """ + NewPayloadV3 with incorrect list of versioned hashes must return INVALID status + """, + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Repeated Hash", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + has a blob that is repeated in the array. + """, + mainFork: Cancun, + ), + testSequence: @[ + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: append(getBlobList(0, TARGET_BLOBS_PER_BLOCK), helper.BlobID(TARGET_BLOBS_PER_BLOCK-1)), + ), + ), + ExpectInvalidStatus: true, + ), + ExpectationDescription: """ + NewPayloadV3 with incorrect list of versioned hashes must return INVALID status + """, + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Incorrect Hash", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + has a blob hash that does not belong to any blob contained in the payload. + """, + mainFork: Cancun, + ), + testSequence: @[ + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: append(getBlobList(0, TARGET_BLOBS_PER_BLOCK-1), helper.BlobID(TARGET_BLOBS_PER_BLOCK)), + ), + ), + ExpectInvalidStatus: true, + ), + ExpectationDescription: """ + NewPayloadV3 with incorrect hash in list of versioned hashes must return INVALID status + """, + ), + ), + ), + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Incorrect Version", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + has a single blob that has an incorrect version. + """, + mainFork: Cancun, + ), + testSequence: @[ + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + HashVersions: []byte{VERSIONED_HASH_VERSION_KZG, VERSIONED_HASH_VERSION_KZG + 1), + ), + ), + ExpectInvalidStatus: true, + ), + ExpectationDescription: """ + NewPayloadV3 with incorrect version in list of versioned hashes must return INVALID status + """, + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Nil Hashes", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is nil, even though the fork has already happened. + """, + mainFork: Cancun, + ), + testSequence: @[ + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: nil, + ), + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ExpectationDescription: """ + NewPayloadV3 after Cancun with nil VersionedHashes must return INVALID_PARAMS_ERROR (code -32602) + """, + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Empty Hashes", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is empty, even though there are blobs in the payload. + """, + mainFork: Cancun, + ), + testSequence: @[ + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: []helper.BlobID{), + ), + ), + ExpectInvalidStatus: true, + ), + ExpectationDescription: """ + NewPayloadV3 with incorrect list of versioned hashes must return INVALID status + """, + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Non-Empty Hashes", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is contains hashes, even though there are no blobs in the payload. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads( + expectedBlobs: []helper.BlobID{), + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: []helper.BlobID{0), + ), + ), + ExpectInvalidStatus: true, + ), + ExpectationDescription: """ + NewPayloadV3 with incorrect list of versioned hashes must return INVALID status + """, + ), + ), + ), + + # Test versioned hashes in Engine API NewPayloadV3 on syncing clients + TestDesc( + spec: CancunSpec( + + + name: "NewPayloadV3 Versioned Hashes, Missing Hash (Syncing)", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is missing one of the hashes. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads(), # Send new payload so the parent is unknown to the secondary client + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + ), + + LaunchClients{ + EngineStarter: hive_rpc.HiveRPCEngineStarter{), + SkipAddingToCLMock: true, + SkipConnectingToBootnode: true, # So the client is in a perpetual syncing state + ), + SendModifiedLatestPayload{ + ClientID: 1, + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK-1), + ), + ), + ExpectInvalidStatus: true, + ), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + + name: "NewPayloadV3 Versioned Hashes, Extra Hash (Syncing)", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is has an extra hash for a blob that is not in the payload. + """, + mainFork: Cancun, + ), + # TODO: It could be worth it to also test this with a blob that is in the + # mempool but was not included in the payload. + testSequence: @[ + NewPayloads(), # Send new payload so the parent is unknown to the secondary client + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + ), + + LaunchClients{ + EngineStarter: hive_rpc.HiveRPCEngineStarter{), + SkipAddingToCLMock: true, + SkipConnectingToBootnode: true, # So the client is in a perpetual syncing state + ), + SendModifiedLatestPayload{ + ClientID: 1, + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK+1), + ), + ), + ExpectInvalidStatus: true, + ), + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Out of Order (Syncing)", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is out of order. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads(), # Send new payload so the parent is unknown to the secondary client + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + ), + LaunchClients{ + EngineStarter: hive_rpc.HiveRPCEngineStarter{), + SkipAddingToCLMock: true, + SkipConnectingToBootnode: true, # So the client is in a perpetual syncing state + ), + SendModifiedLatestPayload{ + ClientID: 1, + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: getBlobListByIndex(helper.BlobID(TARGET_BLOBS_PER_BLOCK-1), 0), + ), + ), + ExpectInvalidStatus: true, + ), + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Repeated Hash (Syncing)", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + has a blob that is repeated in the array. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads(), # Send new payload so the parent is unknown to the secondary client + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + ), + + LaunchClients{ + EngineStarter: hive_rpc.HiveRPCEngineStarter{), + SkipAddingToCLMock: true, + SkipConnectingToBootnode: true, # So the client is in a perpetual syncing state + ), + SendModifiedLatestPayload{ + ClientID: 1, + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: append(getBlobList(0, TARGET_BLOBS_PER_BLOCK), helper.BlobID(TARGET_BLOBS_PER_BLOCK-1)), + ), + ), + ExpectInvalidStatus: true, + ), + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Incorrect Hash (Syncing)", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + has a blob that is repeated in the array. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads(), # Send new payload so the parent is unknown to the secondary client + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + ), + + LaunchClients{ + EngineStarter: hive_rpc.HiveRPCEngineStarter{), + SkipAddingToCLMock: true, + SkipConnectingToBootnode: true, # So the client is in a perpetual syncing state + ), + SendModifiedLatestPayload{ + ClientID: 1, + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: append(getBlobList(0, TARGET_BLOBS_PER_BLOCK-1), helper.BlobID(TARGET_BLOBS_PER_BLOCK)), + ), + ), + ExpectInvalidStatus: true, + ), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Incorrect Version (Syncing)", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + has a single blob that has an incorrect version. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads(), # Send new payload so the parent is unknown to the secondary client + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + ), + + LaunchClients{ + EngineStarter: hive_rpc.HiveRPCEngineStarter{), + SkipAddingToCLMock: true, + SkipConnectingToBootnode: true, # So the client is in a perpetual syncing state + ), + SendModifiedLatestPayload{ + ClientID: 1, + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + HashVersions: []byte{VERSIONED_HASH_VERSION_KZG, VERSIONED_HASH_VERSION_KZG + 1), + ), + ), + ExpectInvalidStatus: true, + ), + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Nil Hashes (Syncing)", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is nil, even though the fork has already happened. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads(), # Send new payload so the parent is unknown to the secondary client + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + ), + + LaunchClients{ + EngineStarter: hive_rpc.HiveRPCEngineStarter{), + SkipAddingToCLMock: true, + SkipConnectingToBootnode: true, # So the client is in a perpetual syncing state + ), + SendModifiedLatestPayload{ + ClientID: 1, + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: nil, + ), + ), + ExpectedError: globals.INVALID_PARAMS_ERROR, + ), + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Empty Hashes (Syncing)", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is empty, even though there are blobs in the payload. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads(), # Send new payload so the parent is unknown to the secondary client + SendBlobTransactions( + transactionCount: TARGET_BLOBS_PER_BLOCK, + blobTransactionMaxBlobGasCost: u256(1), + ), + NewPayloads( + expectedIncludedBlobCount: TARGET_BLOBS_PER_BLOCK, + expectedBlobs: getBlobList(0, TARGET_BLOBS_PER_BLOCK), + ), + + LaunchClients{ + EngineStarter: hive_rpc.HiveRPCEngineStarter{), + SkipAddingToCLMock: true, + SkipConnectingToBootnode: true, # So the client is in a perpetual syncing state + ), + SendModifiedLatestPayload{ + ClientID: 1, + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: []helper.BlobID{), + ), + ), + ExpectInvalidStatus: true, + ), + ), + ), + ), + + TestDesc( + spec: CancunSpec( + + name: "NewPayloadV3 Versioned Hashes, Non-Empty Hashes (Syncing)", + about: """ + Tests VersionedHashes in Engine API NewPayloadV3 where the array + is contains hashes, even though there are no blobs in the payload. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads(), # Send new payload so the parent is unknown to the secondary client + NewPayloads( + expectedBlobs: []helper.BlobID{), + ), + + LaunchClients{ + EngineStarter: hive_rpc.HiveRPCEngineStarter{), + SkipAddingToCLMock: true, + SkipConnectingToBootnode: true, # So the client is in a perpetual syncing state + ), + SendModifiedLatestPayload{ + ClientID: 1, + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + VersionedHashesCustomizer: &VersionedHashes{ + Blobs: []helper.BlobID{0), + ), + ), + ExpectInvalidStatus: true, + ), + ), + ), + ), + + # BlobGasUsed, ExcessBlobGas Negative Tests + # Most cases are contained in https:#github.com/ethereum/execution-spec-tests/tree/main/tests/cancun/eip4844_blobs + # and can be executed using """pyspec""" simulator. + TestDesc( + spec: CancunSpec( + + name: "Incorrect BlobGasUsed: Non-Zero on Zero Blobs", + about: """ + Send a payload with zero blobs, but non-zero BlobGasUsed. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + BlobGasUsed: pUint64(1), + ), + ExpectInvalidStatus: true, + ), + ), + ), + ), + TestDesc( + spec: CancunSpec( + + + name: "Incorrect BlobGasUsed: GAS_PER_BLOB on Zero Blobs", + about: """ + Send a payload with zero blobs, but non-zero BlobGasUsed. + """, + mainFork: Cancun, + ), + testSequence: @[ + NewPayloads( + NewPayloadCustomizer: &helper.BaseNewPayloadVersionCustomizer{ + payloadCustomizer: CustomPayloadData( + BlobGasUsed: pUint64(cancun.GAS_PER_BLOB), + ), + ExpectInvalidStatus: true, + ), + ), + ), + ), + + # DevP2P tests + TestDesc( + spec: CancunSpec( + + name: "Request Blob Pooled Transactions", + about: """ + Requests blob pooled transactions and verify correct encoding. + """, + mainFork: Cancun, + ), + testSequence: @[ + # Get past the genesis + NewPayloads( + payloadCount: 1, + ), + # Send multiple transactions with multiple blobs each + SendBlobTransactions( + transactionCount: 1, + blobTransactionMaxBlobGasCost: u256(1), + ), + DevP2PRequestPooledTransactionHash{ + ClientIndex: 0, + TransactionIndexes: []uint64{0), + WaitForNewPooledTransaction: true, + ), + ), + ), +} + +var EngineAPITests []test.Spec + +func init() { + # Append all engine api tests with Cancun as main fork + for _, test := range suite_engine.Tests { + Tests = append(Tests, test.WithMainFork(Cancun)) + } + + # Cancun specific variants for pre-existing tests + baseSpec := test.BaseSpec{ + mainFork: Cancun, + } + onlyBlobTxsSpec := test.BaseSpec{ + mainFork: Cancun, + TestTransactionType: helper.BlobTxOnly, + } + + # Payload Attributes + for _, t := range []suite_engine.InvalidPayloadAttributesTest{ + { + BaseSpec: baseSpec, + Description: "Missing BeaconRoot", + Customizer: &helper.BasePayloadAttributesCustomizer{ + RemoveBeaconRoot: true, + ), + # Error is expected on syncing because V3 checks all fields to be present + ErrorOnSync: true, + ), + } { + Tests = append(Tests, t) + t.Syncing = true + Tests = append(Tests, t) + } + + # Unique Payload ID Tests + for _, t := range []suite_engine.PayloadAttributesFieldChange{ + suite_engine.PayloadAttributesParentBeaconRoot, + # TODO: Remove when withdrawals suite is refactored + suite_engine.PayloadAttributesAddWithdrawal, + suite_engine.PayloadAttributesModifyWithdrawalAmount, + suite_engine.PayloadAttributesModifyWithdrawalIndex, + suite_engine.PayloadAttributesModifyWithdrawalValidator, + suite_engine.PayloadAttributesModifyWithdrawalAddress, + suite_engine.PayloadAttributesRemoveWithdrawal, + } { + Tests = append(Tests, suite_engine.UniquePayloadIDTest{ + BaseSpec: baseSpec, + FieldModification: t, + }) + } + + # Invalid Payload Tests + for _, invalidField := range []helper.InvalidPayloadBlockField{ + helper.InvalidParentBeaconBlockRoot, + helper.InvalidBlobGasUsed, + helper.InvalidBlobCountGasUsed, + helper.InvalidExcessBlobGas, + helper.InvalidVersionedHashes, + helper.InvalidVersionedHashesVersion, + helper.IncompleteVersionedHashes, + helper.ExtraVersionedHashes, + } { + for _, syncing := range []bool{false, true} { + # Invalidity of payload can be detected even when syncing because the + # blob gas only depends on the transactions contained. + invalidDetectedOnSync := (invalidField == helper.InvalidBlobGasUsed || + invalidField == helper.InvalidBlobCountGasUsed || + invalidField == helper.InvalidVersionedHashes || + invalidField == helper.InvalidVersionedHashesVersion || + invalidField == helper.IncompleteVersionedHashes || + invalidField == helper.ExtraVersionedHashes) + + nilLatestValidHash := (invalidField == helper.InvalidVersionedHashes || + invalidField == helper.InvalidVersionedHashesVersion || + invalidField == helper.IncompleteVersionedHashes || + invalidField == helper.ExtraVersionedHashes) + + Tests = append(Tests, suite_engine.InvalidPayloadTestCase{ + BaseSpec: onlyBlobTxsSpec, + InvalidField: invalidField, + Syncing: syncing, + InvalidDetectedOnSync: invalidDetectedOnSync, + NilLatestValidHash: nilLatestValidHash, + }) + } + } + + # Invalid Transaction ChainID Tests + Tests = append(Tests, + suite_engine.InvalidTxChainIDTest{ + BaseSpec: onlyBlobTxsSpec, + ), + ) + + Tests = append(Tests, suite_engine.PayloadBuildAfterInvalidPayloadTest{ + BaseSpec: onlyBlobTxsSpec, + InvalidField: helper.InvalidParentBeaconBlockRoot, + }) + + # Suggested Fee Recipient Tests (New Transaction Type) + Tests = append(Tests, + suite_engine.SuggestedFeeRecipientTest{ + BaseSpec: onlyBlobTxsSpec, + transactionCount: 1, # Only one blob tx gets through due to blob gas limit + ), + ) + # Prev Randao Tests (New Transaction Type) + Tests = append(Tests, + suite_engine.PrevRandaoTransactionTest{ + BaseSpec: onlyBlobTxsSpec, + ), + ) +} +]# \ No newline at end of file diff --git a/hive_integration/nodocker/engine/clmock.nim b/hive_integration/nodocker/engine/clmock.nim index 39dd54aec..81a845387 100644 --- a/hive_integration/nodocker/engine/clmock.nim +++ b/hive_integration/nodocker/engine/clmock.nim @@ -2,7 +2,7 @@ import std/[tables], chronicles, nimcrypto/sysrand, - stew/[byteutils, endians2], + stew/[byteutils], eth/common, chronos, json_rpc/rpcclient, ../../../nimbus/beacon/execution_types, @@ -12,7 +12,8 @@ import ../../../nimbus/common as nimbus_common, ./client_pool, ./engine_env, - ./engine_client + ./engine_client, + ./types import web3/engine_api_types except Hash256 # conflict with the one from eth/common @@ -24,9 +25,10 @@ type # Number of required slots before a block which was set as Head moves to `safe` and `finalized` respectively slotsToSafe* : int slotsToFinalized*: int + safeSlotsToImportOptimistically*: int # Wait time before attempting to get the payload - payloadProductionClientDelay: int + payloadProductionClientDelay*: int # Block production related blockTimestampIncrement*: Option[int] @@ -52,6 +54,7 @@ type latestPayloadBuilt* : ExecutionPayload latestBlockValue* : Option[UInt256] latestBlobsBundle* : Option[BlobsBundleV1] + latestShouldOverrideBuilder*: Option[bool] latestPayloadAttributes*: PayloadAttributes latestExecutedPayload* : ExecutionPayload latestForkchoice* : ForkchoiceStateV1 @@ -60,7 +63,6 @@ type firstPoSBlockNumber : Option[uint64] ttdReached* : bool transitionPayloadTimestamp: Option[int] - safeSlotsToImportOptimistically: int chainTotalDifficulty : UInt256 # Shanghai related @@ -68,6 +70,7 @@ type BlockProcessCallbacks* = object onPayloadProducerSelected* : proc(): bool {.gcsafe.} + onPayloadAttributesGenerated* : proc(): bool {.gcsafe.} onRequestNextPayload* : proc(): bool {.gcsafe.} onGetPayload* : proc(): bool {.gcsafe.} onNewPayloadBroadcast* : proc(): bool {.gcsafe.} @@ -100,7 +103,7 @@ proc init(cl: CLMocker, eng: EngineEnv, com: CommonRef) = cl.com = com cl.slotsToSafe = 1 cl.slotsToFinalized = 2 - cl.payloadProductionClientDelay = 1 + cl.payloadProductionClientDelay = 0 cl.headerHistory[0] = com.genesisHeader() proc newClMocker*(eng: EngineEnv, com: CommonRef): CLMocker = @@ -179,11 +182,6 @@ func getNextBlockTimestamp(cl: CLMocker): EthTime = func setNextWithdrawals(cl: CLMocker, nextWithdrawals: Option[seq[WithdrawalV1]]) = cl.nextWithdrawals = nextWithdrawals -func timestampToBeaconRoot(timestamp: Quantity): FixedBytes[32] = - # Generates a deterministic hash from the timestamp - let h = keccakHash(timestamp.uint64.toBytesBE) - FixedBytes[32](h.data) - func isShanghai(cl: CLMocker, timestamp: Quantity): bool = let ts = EthTime(timestamp.uint64) cl.com.isShanghaiOrLater(ts) @@ -222,7 +220,7 @@ proc pickNextPayloadProducer(cl: CLMocker): bool = doAssert cl.nextBlockProducer != nil return true -proc requestNextPayload(cl: CLMocker): bool = +proc generatePayloadAttributes(cl: CLMocker) = # Generate a random value for the PrevRandao field var nextPrevRandao: common.Hash256 doAssert randomBytes(nextPrevRandao.data) == 32 @@ -246,6 +244,7 @@ proc requestNextPayload(cl: CLMocker): bool = let number = cl.latestHeader.blockNumber.truncate(uint64) + 1 cl.prevRandaoHistory[number] = nextPrevRandao +proc requestNextPayload(cl: CLMocker): bool = let version = cl.latestPayloadAttributes.version let client = cl.nextBlockProducer.client let res = client.forkchoiceUpdated(version, cl.latestForkchoice, some(cl.latestPayloadAttributes)) @@ -290,6 +289,7 @@ proc getNextPayload(cl: CLMocker): bool = cl.latestPayloadBuilt = x.executionPayload cl.latestBlockValue = x.blockValue cl.latestBlobsBundle = x.blobsBundle + cl.latestShouldOverrideBuilder = x.shouldOverrideBuilder let beaconRoot = ethHash cl.latestPayloadAttributes.parentBeaconblockRoot let header = blockHeader(cl.latestPayloadBuilt, beaconRoot) @@ -333,7 +333,7 @@ proc getNextPayload(cl: CLMocker): bool = return true func versionedHashes(bb: BlobsBundleV1): seq[Web3Hash] = - doAssert(bb.commitments.len > 0) + #doAssert(bb.commitments.len > 0) result = newSeqOfCap[BlockHash](bb.commitments.len) for com in bb.commitments: @@ -481,6 +481,12 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe if not cb.onPayloadProducerSelected(): return false + cl.generatePayloadAttributes() + + if cb.onPayloadAttributesGenerated != nil: + if not cb.onPayloadAttributesGenerated(): + return false + if not cl.requestNextPayload(): return false @@ -491,7 +497,9 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe return false # Give the client a delay between getting the payload ID and actually retrieving the payload - #time.Sleep(PayloadProductionClientDelay) + if cl.payloadProductionClientDelay != 0: + let period = chronos.seconds(cl.payloadProductionClientDelay) + waitFor sleepAsync(period) if not cl.getNextPayload(): return false diff --git a/hive_integration/nodocker/engine/engine/engine_spec.nim b/hive_integration/nodocker/engine/engine/engine_spec.nim index 1d113a37c..c3cea99eb 100644 --- a/hive_integration/nodocker/engine/engine/engine_spec.nim +++ b/hive_integration/nodocker/engine/engine/engine_spec.nim @@ -16,8 +16,6 @@ type exec*: proc(env: TestEnv): bool ttd*: int64 chainFile*: string - slotsToFinalized*: int - slotsToSafe*: int template testNP(res, cond: untyped, validHash = none(common.Hash256)) = testCond res.isOk diff --git a/hive_integration/nodocker/engine/engine_client.nim b/hive_integration/nodocker/engine/engine_client.nim index 042ee3de3..02bb6e7be 100644 --- a/hive_integration/nodocker/engine/engine_client.nim +++ b/hive_integration/nodocker/engine/engine_client.nim @@ -81,7 +81,8 @@ proc getPayload*(client: RpcClient, ok(GetPayloadResponse( executionPayload: executionPayload(x.executionPayload), blockValue: some(x.blockValue), - blobsBundle: some(x.blobsBundle) + blobsBundle: some(x.blobsBundle), + shouldOverrideBuilder: some(x.shouldOverrideBuilder), )) elif version == Version.V2: let x = client.getPayloadV2(payloadId).valueOr: @@ -231,6 +232,7 @@ proc toBlockHeader(bc: eth_api.BlockObject): common.BlockHeader = withdrawalsRoot: bc.withdrawalsRoot, blobGasUsed : maybeU64(bc.blobGasUsed), excessBlobGas : maybeU64(bc.excessBlobGas), + parentBeaconBlockRoot: bc.parentBeaconBlockRoot, ) proc toTransactions(txs: openArray[JsonNode]): seq[Transaction] = @@ -293,7 +295,7 @@ type s*: UInt256 chainId*: Option[ChainId] accessList*: Option[seq[rpc_types.AccessTuple]] - maxFeePerBlobGas*: Option[GasInt] + maxFeePerBlobGas*: Option[UInt256] versionedHashes*: Option[VersionedHashes] proc toRPCReceipt(rec: eth_api.ReceiptObject): RPCReceipt = @@ -336,7 +338,7 @@ proc toRPCTx(tx: eth_api.TransactionObject): RPCTx = s: UInt256.fromHex(string tx.s), chainId: maybeChainId(tx.chainId), accessList: tx.accessList, - maxFeePerBlobGas: maybeInt64(tx.maxFeePerBlobGas), + maxFeePerBlobGas: maybeU256(tx.maxFeePerBlobGas), versionedHashes: tx.versionedHashes, ) diff --git a/hive_integration/nodocker/engine/engine_sim.nim b/hive_integration/nodocker/engine/engine_sim.nim index 000ee0199..16ec93596 100644 --- a/hive_integration/nodocker/engine/engine_sim.nim +++ b/hive_integration/nodocker/engine/engine_sim.nim @@ -1,19 +1,24 @@ import std/times, + chronicles, + stew/results, ./types, - ../sim_utils + ../sim_utils, + ../../../nimbus/core/eip4844 import ./engine_tests, ./auths_tests, ./exchange_cap_tests, - ./withdrawal_tests + ./withdrawal_tests, + ./cancun_tests proc combineTests(): seq[TestDesc] = result.add wdTestList result.add ecTestList result.add authTestList result.add engineTestList + result.add cancunTestList let testList = combineTests() @@ -22,6 +27,11 @@ proc main() = var stat: SimStat let start = getTime() + let res = loadKzgTrustedSetup() + if res.isErr: + fatal "Cannot load baked in Kzg trusted setup", msg=res.error + quit(QuitFailure) + for x in testList: let status = if x.run(x.spec): TestStatus.OK diff --git a/hive_integration/nodocker/engine/engine_tests.nim b/hive_integration/nodocker/engine/engine_tests.nim index de9d3d35e..0b4acefbd 100644 --- a/hive_integration/nodocker/engine/engine_tests.nim +++ b/hive_integration/nodocker/engine/engine_tests.nim @@ -1,7 +1,8 @@ import ./engine/engine_spec, ./types, - ./test_env + ./test_env, + ./base_spec proc specExecute(ws: BaseSpec): bool = var @@ -10,12 +11,7 @@ proc specExecute(ws: BaseSpec): bool = env.engine.setRealTTD(ws.ttd) env.setupCLMock() - - if ws.slotsToFinalized != 0: - env.slotsToFinalized(ws.slotsToFinalized) - if ws.slotsToSafe != 0: - env.slotsToSafe(ws.slotsToSafe) - + ws.configureCLMock(env.clMock) result = ws.exec(env) env.close() diff --git a/hive_integration/nodocker/engine/test_env.nim b/hive_integration/nodocker/engine/test_env.nim index a83e59c4c..e5d1f3310 100644 --- a/hive_integration/nodocker/engine/test_env.nim +++ b/hive_integration/nodocker/engine/test_env.nim @@ -1,6 +1,7 @@ import chronicles, eth/keys, + stew/results, json_rpc/rpcclient, ../../../nimbus/config, ../../../nimbus/common, @@ -90,6 +91,15 @@ proc addEngine*(env: TestEnv, addToCL: bool = true): EngineEnv = env.clMock.addEngine(eng) eng +func engines*(env: TestEnv, idx: int): EngineEnv = + env.clients[idx] + +func numEngines*(env: TestEnv): int = + env.clients.len + +func accounts*(env: TestEnv, idx: int): TestAccount = + env.sender.getAccount(idx) + proc makeTx*(env: TestEnv, tc: BaseTx, nonce: AccountNonce): Transaction = env.sender.makeTx(tc, nonce) @@ -132,6 +142,12 @@ proc sendTx*(env: TestEnv, tx: Transaction): bool = let client = env.engine.client sendTx(client, tx) +proc sendTx*(env: TestEnv, sender: TestAccount, eng: EngineEnv, tc: BlobTx): Result[Transaction, void] = + env.sender.sendTx(sender, eng.client, tc) + +proc replaceTx*(env: TestEnv, sender: TestAccount, eng: EngineEnv, tc: BlobTx): Result[Transaction, void] = + env.sender.replaceTx(sender, eng.client, tc) + proc verifyPoWProgress*(env: TestEnv, lastBlockHash: common.Hash256): bool = let res = waitFor env.client.verifyPoWProgress(lastBlockHash) if res.isErr: @@ -139,9 +155,3 @@ proc verifyPoWProgress*(env: TestEnv, lastBlockHash: common.Hash256): bool = return false true - -proc slotsToSafe*(env: TestEnv, x: int) = - env.clMock.slotsToSafe = x - -proc slotsToFinalized*(env: TestEnv, x: int) = - env.clMock.slotsToFinalized = x diff --git a/hive_integration/nodocker/engine/tx_sender.nim b/hive_integration/nodocker/engine/tx_sender.nim index 844833803..b29e889d8 100644 --- a/hive_integration/nodocker/engine/tx_sender.nim +++ b/hive_integration/nodocker/engine/tx_sender.nim @@ -5,6 +5,7 @@ import nimcrypto/sha2, chronicles, ./engine_client, + ./cancun/blobs, ../../../nimbus/transaction, ../../../nimbus/common, ../../../nimbus/utils/utils @@ -22,7 +23,15 @@ type padByte* : uint8 initcode* : seq[byte] - TestAccount = object + # Blob transaction creator + BlobTx* = object of BaseTx + gasFee* : GasInt + gasTip* : GasInt + blobGasFee*: UInt256 + blobID* : BlobID + blobCount* : int + + TestAccount* = object key : PrivateKey address: EthAddress index : int @@ -38,8 +47,21 @@ type key* : PrivateKey nonce* : AccountNonce + CustomTransactionData* = object + nonce* : Option[uint64] + gasPriceOrGasFeeCap*: Option[GasInt] + gasTipCap* : Option[GasInt] + gas* : Option[GasInt] + to* : Option[common.EthAddress] + value* : Option[UInt256] + data* : Option[seq[byte]] + chainId* : Option[ChainId] + signature* : Option[UInt256] + const TestAccountCount = 1000 + gasPrice* = 30.gwei + gasTipPrice* = 1.gwei func toAddress(key: PrivateKey): EthAddress = toKeyPair(key).pubkey.toCanonicalAddress() @@ -67,6 +89,9 @@ proc getNextNonce(sender: TxSender, address: EthAddress): uint64 = sender.nonceMap[address] = nonce + 1 nonce +proc getLastNonce(sender: TxSender, address: EthAddress): uint64 = + sender.nonceMap.getOrDefault(address, 0'u64) + proc fillBalance(sender: TxSender, params: NetworkParams) = for x in sender.accounts: params.genesis.alloc[x.address] = GenesisAccount( @@ -89,9 +114,6 @@ proc getTxType(tc: BaseTx, nonce: uint64): TxType = proc makeTx(params: MakeTxParams, tc: BaseTx): Transaction = const - gasPrice = 30.gwei - gasTipPrice = 1.gwei - gasFeeCap = gasPrice gasTipCap = gasTipPrice @@ -212,3 +234,74 @@ proc sendTx*(client: RpcClient, tx: Transaction): bool = error "Unable to send transaction", msg=rr.error return false return true + +proc makeTx*(params: MakeTxParams, tc: BlobTx): Transaction = + # Need tx wrap data that will pass blob verification + let data = blobDataGenerator(tc.blobID, tc.blobCount) + doAssert(tc.recipient.isSome, "nil recipient address") + + # Collect fields for transaction + let + gasFeeCap = if tc.gasFee != 0.GasInt: tc.gasFee + else: gasPrice + gasTipCap = if tc.gasTip != 0.GasInt: tc.gasTip + else: gasTipPrice + + let unsignedTx = Transaction( + txType : TxEIP4844, + chainId : params.chainId, + nonce : params.nonce, + maxPriorityFee: gasTipCap, + maxFee : gasFeeCap, + gasLimit : tc.gasLimit, + to : tc.recipient, + value : tc.amount, + payload : tc.payload, + maxFeePerBlobGas: tc.blobGasFee, + versionedHashes: data.hashes, + ) + + var tx = signTransaction(unsignedTx, params.key, params.chainId, eip155 = true) + tx.networkPayload = NetworkPayload( + blobs : data.blobs, + commitments: data.commitments, + proofs : data.proofs, + ) + + tx + +proc getAccount*(sender: TxSender, idx: int): TestAccount = + sender.accounts[idx] + +proc sendTx*(sender: TxSender, acc: TestAccount, client: RpcClient, tc: BlobTx): Result[Transaction, void] = + let + params = MakeTxParams( + chainId: sender.chainId, + key: acc.key, + nonce: sender.getNextNonce(acc.address), + ) + tx = params.makeTx(tc) + + let rr = client.sendTransaction(tx) + if rr.isErr: + error "Unable to send transaction", msg=rr.error + return err() + return ok(tx) + +proc replaceTx*(sender: TxSender, acc: TestAccount, client: RpcClient, tc: BlobTx): Result[Transaction, void] = + let + params = MakeTxParams( + chainId: sender.chainId, + key: acc.key, + nonce: sender.getLastNonce(acc.address), + ) + tx = params.makeTx(tc) + + let rr = client.sendTransaction(tx) + if rr.isErr: + error "Unable to send transaction", msg=rr.error + return err() + return ok(tx) + +proc customizeTransaction*(sender: TxSender, baseTx: Transaction, custTx: CustomTransactionData): Transaction = + discard diff --git a/hive_integration/nodocker/engine/types.nim b/hive_integration/nodocker/engine/types.nim index 6663c6618..329561fbb 100644 --- a/hive_integration/nodocker/engine/types.nim +++ b/hive_integration/nodocker/engine/types.nim @@ -1,7 +1,7 @@ import std/[options, typetraits, strutils], eth/common, - stew/byteutils, + stew/[byteutils, endians2], web3/ethtypes, web3/engine_api_types, ../../../nimbus/beacon/execution_types, @@ -11,6 +11,18 @@ type BaseSpec* = ref object of RootObj txType*: Option[TxType] + # CL Mocker configuration for slots to `safe` and `finalized` respectively + slotsToSafe*: int + slotsToFinalized*: int + safeSlotsToImportOptimistically*: int + blockTimestampIncrement*: int + timeoutSeconds*: int + mainFork*: string + genesisTimestamp*: int + forkHeight*: int + forkTime*: uint64 + previousForkTime*: uint64 + TestDesc* = object name* : string about*: string @@ -21,6 +33,29 @@ const DefaultTimeout* = 60 # seconds DefaultSleep* = 1 prevRandaoContractAddr* = hexToByteArray[20]("0000000000000000000000000000000000000316") + GenesisTimestamp* = 0x1234 + ForkParis* = "Paris" + ForkShanghai* = "Shanghai" + ForkCancun* = "Cancun" + +func toAddress*(x: UInt256): EthAddress = + var + mm = x.toByteArrayBE + x = 0 + for i in 12..31: + result[x] = mm[i] + inc x + +func toHash*(x: UInt256): common.Hash256 = + common.Hash256(data: x.toByteArrayBE) + +func timestampToBeaconRoot*(timestamp: Quantity): FixedBytes[32] = + # Generates a deterministic hash from the timestamp + let h = keccakHash(timestamp.uint64.toBytesBE) + FixedBytes[32](h.data) + +func beaconRoot*(x: UInt256): FixedBytes[32] = + FixedBytes[32](x.toByteArrayBE) template testCond*(expr: untyped) = if not (expr): diff --git a/hive_integration/nodocker/engine/withdrawal_tests.nim b/hive_integration/nodocker/engine/withdrawal_tests.nim index 3b57b64db..0a134e50e 100644 --- a/hive_integration/nodocker/engine/withdrawal_tests.nim +++ b/hive_integration/nodocker/engine/withdrawal_tests.nim @@ -6,9 +6,11 @@ import withdrawals/wd_reorg_spec, withdrawals/wd_sync_spec, ./types, - ./test_env + ./test_env, + ./base_spec proc specExecute[T](ws: BaseSpec): bool = + ws.mainFork = ForkShanghai let ws = T(ws) conf = envConfig(ws.getForkConfig()) @@ -29,10 +31,10 @@ let wdTestList* = [ about: "Tests a 8 block re-org using NewPayload. Re-org does not change withdrawals fork height", run: specExecute[ReorgSpec], spec: ReorgSpec( - slotsToSafe: u256(32), - slotsToFinalized: u256(64), + slotsToSafe: 32, + slotsToFinalized: 64, timeoutSeconds: 300, - wdForkHeight: 1, # Genesis is Pre-Withdrawals + forkHeight: 1, # Genesis is Pre-Withdrawals wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, reOrgBlockCount: 8, @@ -45,10 +47,10 @@ let wdTestList* = [ # the payload at the height of the fork run: specExecute[ReorgSpec], spec: ReorgSpec( - slotsToSafe: u256(32), - slotsToFinalized: u256(64), + slotsToSafe: 32, + slotsToFinalized: 64, timeoutSeconds: 300, - wdForkHeight: 8, # Genesis is Pre-Withdrawals + forkHeight: 8, # Genesis is Pre-Withdrawals wdBlockCount: 8, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, reOrgBlockCount: 10, @@ -61,10 +63,10 @@ let wdTestList* = [ # than the canonical chain run: specExecute[ReorgSpec], spec: ReorgSpec( - slotsToSafe: u256(32), - slotsToFinalized: u256(64), + slotsToSafe: 32, + slotsToFinalized: 64, timeoutSeconds: 300, - wdForkHeight: 8, # Genesis is Pre-Withdrawals + forkHeight: 8, # Genesis is Pre-Withdrawals wdBlockCount: 8, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, reOrgBlockCount: 10, @@ -78,13 +80,13 @@ let wdTestList* = [ # than the canonical chain run: specExecute[ReorgSpec], spec: ReorgSpec( - slotsToSafe: u256(32), - slotsToFinalized: u256(64), + slotsToSafe: 32, + slotsToFinalized: 64, timeoutSeconds: 300, - wdForkHeight: 8, # Genesis is Pre-Withdrawals + forkHeight: 8, # Genesis is Pre-Withdrawals wdBlockCount: 8, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, - timeIncrements: 2, + blockTimestampIncrement: 2, reOrgBlockCount: 10, reOrgViaSync: true, sidechaintimeIncrements: 1, @@ -94,10 +96,10 @@ let wdTestList* = [ about: "Tests a simple 1 block re-org", run: specExecute[ReorgSpec], spec: ReorgSpec( - slotsToSafe: u256(32), - slotsToFinalized: u256(64), + slotsToSafe: 32, + slotsToFinalized: 64, timeoutSeconds: 300, - wdForkHeight: 1, # Genesis is Pre-Withdrawals + forkHeight: 1, # Genesis is Pre-Withdrawals wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, reOrgBlockCount: 1, @@ -108,10 +110,10 @@ let wdTestList* = [ about: "Tests a 8 block re-org using NewPayload. Re-org does not change withdrawals fork height", run: specExecute[ReorgSpec], spec: ReorgSpec( - slotsToSafe: u256(32), - slotsToFinalized: u256(64), + slotsToSafe: 32, + slotsToFinalized: 64, timeoutSeconds: 300, - wdForkHeight: 1, # Genesis is Pre-Withdrawals + forkHeight: 1, # Genesis is Pre-Withdrawals wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, reOrgBlockCount: 8, @@ -124,10 +126,10 @@ let wdTestList* = [ "the payload at the height of the fork\n", run: specExecute[ReorgSpec], spec: ReorgSpec( - slotsToSafe: u256(32), - slotsToFinalized: u256(64), + slotsToSafe: 32, + slotsToFinalized: 64, timeoutSeconds: 300, - wdForkHeight: 8, # Genesis is Pre-Withdrawals + forkHeight: 8, # Genesis is Pre-Withdrawals wdBlockCount: 8, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, reOrgBlockCount: 10, @@ -140,10 +142,10 @@ let wdTestList* = [ # than the canonical chain run: specExecute[ReorgSpec], spec: ReorgSpec( - slotsToSafe: u256(32), - slotsToFinalized: u256(64), + slotsToSafe: 32, + slotsToFinalized: 64, timeoutSeconds: 300, - wdForkHeight: 8, # Genesis is Pre-Withdrawals + forkHeight: 8, # Genesis is Pre-Withdrawals wdBlockCount: 8, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, reOrgBlockCount: 10, @@ -157,13 +159,13 @@ let wdTestList* = [ # than the canonical chain run: specExecute[ReorgSpec], spec: ReorgSpec( - slotsToSafe: u256(32), - slotsToFinalized: u256(64), + slotsToSafe: 32, + slotsToFinalized: 64, timeoutSeconds: 300, - wdForkHeight: 8, # Genesis is Pre-Withdrawals + forkHeight: 8, # Genesis is Pre-Withdrawals wdBlockCount: 8, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, - timeIncrements: 2, + blockTimestampIncrement: 2, reOrgBlockCount: 10, reOrgViaSync: false, sidechaintimeIncrements: 1, @@ -180,7 +182,7 @@ let wdTestList* = [ run: specExecute[SyncSpec], spec: SyncSpec( timeoutSeconds: 6, - wdForkHeight: 1, + forkHeight: 1, wdBlockCount: 2, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdAbleAccountCount: 1, @@ -196,7 +198,7 @@ let wdTestList* = [ "- Wait for sync and verify withdrawn account's balance\n", run: specExecute[SyncSpec], spec: SyncSpec( - wdForkHeight: 1, + forkHeight: 1, wdBlockCount: 2, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdAbleAccountCount: 1, @@ -210,7 +212,7 @@ let wdTestList* = [ "- Wait for sync and verify withdrawn account's balance\n", run: specExecute[SyncSpec], spec: SyncSpec( - wdForkHeight: 0, + forkHeight: 0, wdBlockCount: 2, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdAbleAccountCount: 1, @@ -225,7 +227,7 @@ let wdTestList* = [ "- Wait for sync, which include syncing a pre-Withdrawals block, and verify withdrawn account's balance\n", run: specExecute[SyncSpec], spec: SyncSpec( - wdForkHeight: 2, + forkHeight: 2, wdBlockCount: 2, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdAbleAccountCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, @@ -241,7 +243,7 @@ let wdTestList* = [ "- Wait for sync, which include syncing a pre-Withdrawals block, and verify withdrawn account's balance\n", run: specExecute[SyncSpec], spec: SyncSpec( - wdForkHeight: 2, + forkHeight: 2, wdBlockCount: 2, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdAbleAccountCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, @@ -257,7 +259,7 @@ let wdTestList* = [ run: specExecute[SyncSpec], spec: SyncSpec( timeoutSeconds: 100, - wdForkHeight: 2, + forkHeight: 2, wdBlockCount: 128, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdAbleAccountCount: 1024, @@ -269,7 +271,7 @@ let wdTestList* = [ name: "Max Initcode Size", run: specExecute[MaxInitcodeSizeSpec], spec: MaxInitcodeSizeSpec( - wdForkHeight: 2, # Block 1 is Pre-Withdrawals + forkHeight: 2, # Block 1 is Pre-Withdrawals wdBlockCount: 2, overflowMaxInitcodeTxCountBeforeFork: 0, overflowMaxInitcodeTxCountAfterFork: 1, @@ -280,7 +282,7 @@ let wdTestList* = [ about: "Verify the block value returned in GetPayloadV2.", run: specExecute[BlockValueSpec], spec: BlockValueSpec( - wdForkHeight: 1, + forkHeight: 1, wdBlockCount: 1, )), # Base tests @@ -289,7 +291,7 @@ let wdTestList* = [ about: "Tests the withdrawals fork happening since genesis (e.g. on a testnet).", run: specExecute[WDBaseSpec], spec: WDBaseSpec( - wdForkHeight: 0, + forkHeight: 0, wdBlockCount: 2, # Genesis is a withdrawals block wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, )), @@ -298,7 +300,7 @@ let wdTestList* = [ about: "Tests the withdrawals fork happening directly after genesis.", run: specExecute[WDBaseSpec], spec: WDBaseSpec( - wdForkHeight: 1, # Only Genesis is Pre-Withdrawals + forkHeight: 1, # Only Genesis is Pre-Withdrawals wdBlockCount: 1, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, )), @@ -309,7 +311,7 @@ let wdTestList* = [ " client is expected to respond with the appropriate error.", run: specExecute[WDBaseSpec], spec: WDBaseSpec( - wdForkHeight: 2, # Genesis and Block 1 are Pre-Withdrawals + forkHeight: 2, # Genesis and Block 1 are Pre-Withdrawals wdBlockCount: 1, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, )), @@ -320,7 +322,7 @@ let wdTestList* = [ " client is expected to respond with the appropriate error.", run: specExecute[WDBaseSpec], spec: WDBaseSpec( - wdForkHeight: 3, # Genesis, Block 1 and 2 are Pre-Withdrawals + forkHeight: 3, # Genesis, Block 1 and 2 are Pre-Withdrawals wdBlockCount: 1, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, )), @@ -329,7 +331,7 @@ let wdTestList* = [ about: "Make multiple withdrawals to a single account.", run: specExecute[WDBaseSpec], spec: WDBaseSpec( - wdForkHeight: 1, + forkHeight: 1, wdBlockCount: 1, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdAbleAccountCount: 1, @@ -342,7 +344,7 @@ let wdTestList* = [ " is not in ordered sequence.", run: specExecute[WDBaseSpec], spec: WDBaseSpec( - wdForkHeight: 1, + forkHeight: 1, wdBlockCount: 1, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdAbleAccountCount: 2, @@ -354,7 +356,7 @@ let wdTestList* = [ # TimeoutSeconds: 240, run: specExecute[WDBaseSpec], spec: WDBaseSpec( - wdForkHeight: 1, + forkHeight: 1, wdBlockCount: 4, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK * 5, wdAbleAccountCount: 1024, @@ -364,7 +366,7 @@ let wdTestList* = [ about: "Make multiple withdrawals where the amount withdrawn is 0.", run: specExecute[WDBaseSpec], spec: WDBaseSpec( - wdForkHeight: 1, + forkHeight: 1, wdBlockCount: 1, wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, wdAbleAccountCount: 2, @@ -375,7 +377,7 @@ let wdTestList* = [ about: "Produce withdrawals block with zero withdrawals.", run: specExecute[WDBaseSpec], spec: WDBaseSpec( - wdForkHeight: 1, + forkHeight: 1, wdBlockCount: 1, wdPerBlock: 0, )), @@ -384,7 +386,7 @@ let wdTestList* = [ about: "Send a valid payload with a corrupted hash using engine_newPayloadV2.", run: specExecute[WDBaseSpec], spec: WDBaseSpec( - wdForkHeight: 1, + forkHeight: 1, wdBlockCount: 1, testCorrupedHashPayloads: true, ) diff --git a/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim b/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim index ec2dab572..f0a680258 100644 --- a/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim +++ b/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim @@ -11,18 +11,15 @@ import ../test_env, ../engine_client, ../types, + ../base_spec, ../../../nimbus/common/common, ../../../nimbus/utils/utils, ../../../nimbus/common/chain_config, ../../../nimbus/beacon/execution_types, ../../../nimbus/beacon/web3_eth_conv -import ../../../tools/common/helpers except LogLevel - type WDBaseSpec* = ref object of BaseSpec - timeIncrements*: int # Timestamp increments per block throughout the test - wdForkHeight*: int # Withdrawals activation fork height wdBlockCount*: int # Number of blocks on and after withdrawals fork activation wdPerBlock*: int # Number of withdrawals per block wdAbleAccountCount*: int # Number of accounts to withdraw to (round-robin) @@ -37,7 +34,6 @@ type nextIndex*: int const - GenesisTimestamp = 0x1234 WARM_COINBASE_ADDRESS = hexToByteArray[20]("0x0101010101010101010101010101010101010101") PUSH0_ADDRESS = hexToByteArray[20]("0x0202020202020202020202020202020202020202") MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK* = 16 @@ -46,34 +42,14 @@ const PUSH0_ADDRESS, ] -# Get the per-block timestamp increments configured for this test -func getBlockTimeIncrements*(ws: WDBaseSpec): int = - if ws.timeIncrements == 0: - return 1 - ws.timeIncrements - # Timestamp delta between genesis and the withdrawals fork func getWithdrawalsGenesisTimeDelta*(ws: WDBaseSpec): int = - ws.wdForkHeight * ws.getBlockTimeIncrements() - -# Calculates Shanghai fork timestamp given the amount of blocks that need to be -# produced beforehand. -func getWithdrawalsForkTime(ws: WDBaseSpec): int = - GenesisTimestamp + ws.getWithdrawalsGenesisTimeDelta() - -# Generates the fork config, including withdrawals fork timestamp. -func getForkConfig*(ws: WDBaseSpec): ChainConfig = - result = getChainConfig("Shanghai") - result.shanghaiTime = some(ws.getWithdrawalsForkTime().EthTime) + ws.forkHeight * ws.getBlockTimeIncrements() # Get the start account for all withdrawals. func getWithdrawalsStartAccount*(ws: WDBaseSpec): UInt256 = 0x1000.u256 -func toAddress(x: UInt256): EthAddress = - var mm = x.toByteArrayBE - copyMem(result[0].addr, mm[11].addr, 20) - # Adds bytecode that unconditionally sets an storage key to specified account range func addUnconditionalBytecode(g: Genesis, start, stop: UInt256) = var acc = start @@ -177,7 +153,7 @@ proc verifyContractsStorage(ws: WDBaseSpec, env: TestEnv): Result[void, string] r = env.client.storageAt(WARM_COINBASE_ADDRESS, latestPayloadNumber, latestPayloadNumber) p = env.client.storageAt(PUSH0_ADDRESS, 0.u256, latestPayloadNumber) - if latestPayloadNumber.truncate(int) >= ws.wdForkHeight: + if latestPayloadNumber.truncate(int) >= ws.forkHeight: # Shanghai r.expectStorageEqual(WARM_COINBASE_ADDRESS, 100.u256) # WARM_STORAGE_READ_COST p.expectStorageEqual(PUSH0_ADDRESS, latestPayloadNumber) # tx succeeded @@ -188,18 +164,13 @@ proc verifyContractsStorage(ws: WDBaseSpec, env: TestEnv): Result[void, string] ok() -# Changes the CL Mocker default time increments of 1 to the value specified -# in the test spec. -proc configureCLMock*(ws: WDBaseSpec, cl: CLMocker) = - cl.blockTimestampIncrement = some(ws.getBlockTimeIncrements()) - # Number of blocks to be produced (not counting genesis) before withdrawals # fork. func getPreWithdrawalsBlockCount*(ws: WDBaseSpec): int = - if ws.wdForkHeight == 0: + if ws.forkHeight == 0: 0 else: - ws.wdForkHeight - 1 + ws.forkHeight - 1 # Number of payloads to be produced (pre and post withdrawals) during the entire test func getTotalPayloadCount*(ws: WDBaseSpec): int = @@ -235,7 +206,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool = testCond ok # Check if we have pre-Shanghai blocks - if ws.getWithdrawalsForkTime() > GenesisTimestamp: + if ws.getForkTime() > GenesisTimestamp: # Check `latest` during all pre-shanghai blocks, none should # contain `withdrawalsRoot`, including genesis. @@ -538,7 +509,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool = let r = env.client.headerByNumber(bn, h) var expectedWithdrawalsRoot: Option[common.Hash256] - if bn >= ws.wdForkHeight.uint64: + if bn >= ws.forkHeight.uint64: let wds = ws.wdHistory.getWithdrawals(bn) expectedWithdrawalsRoot = some(calcWithdrawalsRoot(wds.list)) diff --git a/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim b/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim index fc3e708ca..39f3bd6db 100644 --- a/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim +++ b/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim @@ -9,6 +9,7 @@ import ../test_env, ../engine_client, ../types, + ../base_spec, ../../../nimbus/beacon/web3_eth_conv # Withdrawals re-org spec: @@ -22,9 +23,6 @@ type # Whether the client should fetch the sidechain by syncing from the secondary client reOrgViaSync* : bool sidechainTimeIncrements*: int - slotsToSafe* : UInt256 - slotsToFinalized* : UInt256 - timeoutSeconds* : int Sidechain = ref object startAccount: UInt256 @@ -48,14 +46,14 @@ proc getSidechainBlockTimeIncrements(ws: ReorgSpec): int= return ws.getBlockTimeIncrements() ws.sidechainTimeIncrements -proc getSidechainWdForkHeight(ws: ReorgSpec): int = +proc getSidechainforkHeight(ws: ReorgSpec): int = if ws.getSidechainBlockTimeIncrements() != ws.getBlockTimeIncrements(): # Block timestamp increments in both chains are different so need to # calculate different heights, only if split happens before fork. # We cannot split by having two different genesis blocks. doAssert(ws.getSidechainSplitHeight() != 0, "invalid sidechain split height") - if ws.getSidechainSplitHeight() <= ws.wdForkHeight: + if ws.getSidechainSplitHeight() <= ws.forkHeight: # We need to calculate the height of the fork on the sidechain let sidechainSplitBlocktimestamp = (ws.getSidechainSplitHeight() - 1) * ws.getBlockTimeIncrements() let remainingTime = ws.getWithdrawalsGenesisTimeDelta() - sidechainSplitBlocktimestamp @@ -64,7 +62,7 @@ proc getSidechainWdForkHeight(ws: ReorgSpec): int = return ((remainingTime - 1) div ws.sidechainTimeIncrements) + ws.getSidechainSplitHeight() - return ws.wdForkHeight + return ws.forkHeight proc execute*(ws: ReorgSpec, env: TestEnv): bool = result = true @@ -94,7 +92,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = onPayloadProducerSelected: proc(): bool = env.clMock.nextWithdrawals = none(seq[WithdrawalV1]) - if env.clMock.currentPayloadNumber >= ws.wdForkHeight.uint64: + if env.clMock.currentPayloadNumber >= ws.forkHeight.uint64: # Prepare some withdrawals let wfb = ws.generateWithdrawalsForBlock(canonical.nextIndex, canonical.startAccount) env.clMock.nextWithdrawals = some(w3Withdrawals wfb.wds) @@ -103,7 +101,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = if env.clMock.currentPayloadNumber >= ws.getSidechainSplitHeight().uint64: # We have split - if env.clMock.currentPayloadNumber >= ws.getSidechainWdForkHeight().uint64: + if env.clMock.currentPayloadNumber >= ws.getSidechainforkHeight().uint64: # And we are past the withdrawals fork on the sidechain let wfb = ws.generateWithdrawalsForBlock(sidechain.nextIndex, sidechain.startAccount) sidechain.wdHistory.put(env.clMock.currentPayloadNumber, wfb.wds) @@ -156,7 +154,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = else: attr.timestamp = env.clMock.latestPayloadAttributes.timestamp - if env.clMock.currentPayloadNumber >= ws.getSidechainwdForkHeight().uint64: + if env.clMock.currentPayloadNumber >= ws.getSidechainforkHeight().uint64: # Withdrawals let rr = sidechain.wdHistory.get(env.clMock.currentPayloadNumber) testCond rr.isOk: @@ -207,12 +205,12 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = sidechain.height = env.clMock.latestExecutedPayload.blockNumber.uint64 - if ws.wdForkHeight < ws.getSidechainwdForkHeight(): + if ws.forkHeight < ws.getSidechainforkHeight(): # This means the canonical chain forked before the sidechain. # Therefore we need to produce more sidechain payloads to reach # at least`ws.WithdrawalsBlockCount` withdrawals payloads produced on # the sidechain. - let height = ws.getSidechainwdForkHeight()-ws.wdForkHeight + let height = ws.getSidechainforkHeight()-ws.forkHeight for i in 0..= ws.getSidechainwdForkHeight(): + if payloadNumber >= ws.getSidechainforkHeight(): version = Version.V2 info "Sending sidechain", @@ -322,7 +320,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool = # We are using different accounts credited between the canonical chain # and the fork. # We check on `latest`. - let r3 = ws.wdHistory.verifyWithdrawals(uint64(ws.wdForkHeight-1), none(UInt256), env.client) + let r3 = ws.wdHistory.verifyWithdrawals(uint64(ws.forkHeight-1), none(UInt256), env.client) testCond r3.isOk # Re-Org back to the canonical chain diff --git a/hive_integration/nodocker/engine/withdrawals/wd_sync_spec.nim b/hive_integration/nodocker/engine/withdrawals/wd_sync_spec.nim index d5f2c9204..5b613ac3c 100644 --- a/hive_integration/nodocker/engine/withdrawals/wd_sync_spec.nim +++ b/hive_integration/nodocker/engine/withdrawals/wd_sync_spec.nim @@ -14,7 +14,6 @@ type SyncSpec* = ref object of WDBaseSpec syncSteps*: int # Sync block chunks that will be passed as head through FCUs to the syncing client syncShouldFail*: bool - timeoutSeconds*: int sleep*: int proc doSync(ws: SyncSpec, client: RpcClient, clMock: CLMocker): Future[bool] {.async.} = diff --git a/nimbus/constants.nim b/nimbus/constants.nim index 2a774d310..491e4df92 100644 --- a/nimbus/constants.nim +++ b/nimbus/constants.nim @@ -85,7 +85,7 @@ const BLOB_GASPRICE_UPDATE_FRACTION* = 3338477'u64 MAX_BLOB_GAS_PER_BLOCK* = 786432 MAX_ALLOWED_BLOB* = MAX_BLOB_GAS_PER_BLOCK div GAS_PER_BLOB - + # EIP-4788 addresses # BEACON_ROOTS_ADDRESS is the address where historical beacon roots are stored as per EIP-4788 BEACON_ROOTS_ADDRESS* = hexToByteArray[20]("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02")