From 38c0c3433142bacb3b857cee9180f1dd5368ffad Mon Sep 17 00:00:00 2001 From: jangko Date: Mon, 21 Aug 2023 09:08:54 +0700 Subject: [PATCH] hive: Engine api simulator overhaul --- .../nodocker/engine/auths_tests.nim | 79 +- hive_integration/nodocker/engine/clmock.nim | 333 ++- .../nodocker/engine/engine/engine_spec.nim | 1748 +++++++++++++ .../nodocker/engine/engine_callsigs.nim | 7 +- .../nodocker/engine/engine_client.nim | 247 +- .../nodocker/engine/engine_sim.nim | 32 +- .../nodocker/engine/engine_tests.nim | 2223 +++-------------- .../nodocker/engine/exchange_cap_tests.nim | 58 +- hive_integration/nodocker/engine/helper.nim | 86 +- .../nodocker/engine/{ => init}/genesis.json | 0 .../nodocker/engine/{ => init}/sealer.key | 0 hive_integration/nodocker/engine/test_env.nim | 144 +- hive_integration/nodocker/engine/types.nim | 193 +- .../nodocker/engine/withdrawal_tests.nim | 390 +++ .../engine/withdrawals/wd_base_spec.nim | 550 ++++ .../withdrawals/wd_block_value_spec.nim | 43 + .../engine/withdrawals/wd_history.nim | 95 + .../withdrawals/wd_max_init_code_spec.nim | 116 + .../withdrawals/wd_payload_body_spec.nim | 259 ++ .../engine/withdrawals/wd_reorg_spec.nim | 323 +++ .../engine/withdrawals/wd_sync_spec.nim | 53 + 21 files changed, 4830 insertions(+), 2149 deletions(-) create mode 100644 hive_integration/nodocker/engine/engine/engine_spec.nim rename hive_integration/nodocker/engine/{ => init}/genesis.json (100%) rename hive_integration/nodocker/engine/{ => init}/sealer.key (100%) create mode 100644 hive_integration/nodocker/engine/withdrawal_tests.nim create mode 100644 hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim create mode 100644 hive_integration/nodocker/engine/withdrawals/wd_block_value_spec.nim create mode 100644 hive_integration/nodocker/engine/withdrawals/wd_history.nim create mode 100644 hive_integration/nodocker/engine/withdrawals/wd_max_init_code_spec.nim create mode 100644 hive_integration/nodocker/engine/withdrawals/wd_payload_body_spec.nim create mode 100644 hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim create mode 100644 hive_integration/nodocker/engine/withdrawals/wd_sync_spec.nim diff --git a/hive_integration/nodocker/engine/auths_tests.nim b/hive_integration/nodocker/engine/auths_tests.nim index 729354c58..3d9a313c5 100644 --- a/hive_integration/nodocker/engine/auths_tests.nim +++ b/hive_integration/nodocker/engine/auths_tests.nim @@ -1,7 +1,6 @@ import std/[base64, times, strutils], test_env, - unittest2, chronicles, nimcrypto/[hmac], json_rpc/[rpcclient], @@ -32,9 +31,7 @@ proc getClient(t: TestEnv, token: string): RpcHttpClient = return client template genAuthTest(procName: untyped, timeDriftSeconds: int64, customAuthSecretBytes: string, authOK: bool) = - proc procName(t: TestEnv): TestStatus = - result = TestStatus.OK - + proc procName(t: TestEnv): bool = # Default values var # All test cases send a simple TransitionConfigurationV1 to check the Authentication mechanism (JWT) @@ -57,9 +54,10 @@ template genAuthTest(procName: untyped, timeDriftSeconds: int64, customAuthSecre discard waitFor client.call("engine_exchangeTransitionConfigurationV1", %[%tConf]) testCond authOk: error "Authentication was supposed to fail authentication but passed" - except CatchableError: + except CatchableError as ex: testCond not authOk: error "Authentication was supposed to pass authentication but failed" + return true genAuthTest(authTest1, 0'i64, "", true) genAuthTest(authTest2, 0'i64, "secretsecretsecretsecretsecrets", false) @@ -69,41 +67,68 @@ genAuthTest(authTest5, 1 - maxTimeDriftSeconds, "", true) genAuthTest(authTest6, maxTimeDriftSeconds + 1, "", false) genAuthTest(authTest7, maxTimeDriftSeconds - 1, "", true) +type + AuthSpec* = ref object of BaseSpec + exec*: proc(t: TestEnv): bool + +proc specExecute(ws: BaseSpec): bool = + let + ws = AuthSpec(ws) + env = setupELClient("", true) + + env.setRealTTD(0) + result = ws.exec(env) + env.stopELClient() + # JWT Authentication Tests -const authTestList* = [ - TestSpec( +let authTestList* = [ + TestDesc( name: "JWT Authentication: No time drift, correct secret", - run: authTest1, - enableAuth: true + run: specExecute, + spec: AuthSpec( + exec: authTest1, + ) ), - TestSpec( + TestDesc( name: "JWT Authentication: No time drift, incorrect secret (shorter)", - run: authTest2, - enableAuth: true + run: specExecute, + spec: AuthSpec( + exec: authTest2, + ) ), - TestSpec( + TestDesc( name: "JWT Authentication: No time drift, incorrect secret (longer)", - run: authTest3, - enableAuth: true + run: specExecute, + spec: AuthSpec( + exec: authTest3, + ) ), - TestSpec( + TestDesc( name: "JWT Authentication: Negative time drift, exceeding limit, correct secret", - run: authTest4, - enableAuth: true + run: specExecute, + spec: AuthSpec( + exec: authTest4, + ) ), - TestSpec( + TestDesc( name: "JWT Authentication: Negative time drift, within limit, correct secret", - run: authTest5, - enableAuth: true + run: specExecute, + spec: AuthSpec( + exec: authTest5, + ) ), - TestSpec( + TestDesc( name: "JWT Authentication: Positive time drift, exceeding limit, correct secret", - run: authTest6, - enableAuth: true + run: specExecute, + spec: AuthSpec( + exec: authTest6, + ) ), - TestSpec( + TestDesc( name: "JWT Authentication: Positive time drift, within limit, correct secret", - run: authTest7, - enableAuth: true + run: specExecute, + spec: AuthSpec( + exec: authTest7, + ) ) ] diff --git a/hive_integration/nodocker/engine/clmock.nim b/hive_integration/nodocker/engine/clmock.nim index 85083f36a..1a6eb64aa 100644 --- a/hive_integration/nodocker/engine/clmock.nim +++ b/hive_integration/nodocker/engine/clmock.nim @@ -2,11 +2,13 @@ import std/[times, tables], chronicles, nimcrypto/sysrand, - stew/byteutils, + stew/[byteutils, endians2], eth/common, chronos, json_rpc/rpcclient, ../../../nimbus/rpc/merge/mergeutils, + ../../../nimbus/rpc/execution_types, ../../../nimbus/[constants], + ../../../nimbus/common as nimbus_common, ./engine_client import web3/engine_api_types except Hash256 # conflict with the one from eth/common @@ -14,30 +16,51 @@ import web3/engine_api_types except Hash256 # conflict with the one from eth/co # Consensus Layer Client Mock used to sync the Execution Clients once the TTD has been reached type CLMocker* = ref object - nextFeeRecipient*: EthAddress - nextPayloadID: PayloadID + com: CommonRef + + # Number of required slots before a block which was set as Head moves to `safe` and `finalized` respectively + slotsToSafe* : int + slotsToFinalized*: int + + # Wait time before attempting to get the payload + payloadProductionClientDelay: int + + # Block production related + blockTimestampIncrement*: Option[int] + + # Block Production State + client : RpcClient + nextFeeRecipient* : EthAddress + nextPayloadID* : PayloadID + currentPayloadNumber* : uint64 + + # Chain History + headerHistory : Table[uint64, common.BlockHeader] # PoS Chain History Information - prevRandaoHistory*: Table[uint64, Hash256] - executedPayloadHistory*: Table[uint64, ExecutionPayloadV1] + prevRandaoHistory* : Table[uint64, common.Hash256] + executedPayloadHistory* : Table[uint64, ExecutionPayload] + headHashHistory : seq[BlockHash] # Latest broadcasted data using the PoS Engine API - latestHeadNumber*: uint64 - latestHeader*: common.BlockHeader - latestPayloadBuilt* : ExecutionPayloadV1 - latestExecutedPayload*: ExecutionPayloadV1 - latestForkchoice* : ForkchoiceStateV1 + latestHeadNumber* : uint64 + latestHeader* : common.BlockHeader + latestPayloadBuilt* : ExecutionPayload + latestBlockValue* : Option[UInt256] + latestBlobsBundle* : Option[BlobsBundleV1] + latestPayloadAttributes*: PayloadAttributes + latestExecutedPayload* : ExecutionPayload + latestForkchoice* : ForkchoiceStateV1 # Merge related - firstPoSBlockNumber : Option[uint64] - ttdReached* : bool + firstPoSBlockNumber : Option[uint64] + ttdReached* : bool + transitionPayloadTimestamp: Option[int] + safeSlotsToImportOptimistically: int + chainTotalDifficulty : UInt256 - client : RpcClient - ttd : DifficultyInt - - slotsToSafe* : int - slotsToFinalized* : int - headHashHistory : seq[BlockHash] + # Shanghai related + nextWithdrawals* : Option[seq[WithdrawalV1]] BlockProcessCallbacks* = object onPayloadProducerSelected* : proc(): bool {.gcsafe.} @@ -48,36 +71,66 @@ type onSafeBlockChange * : proc(): bool {.gcsafe.} onFinalizedBlockChange* : proc(): bool {.gcsafe.} + GetPayloadResponse = object + executionPayload: ExecutionPayload + blockValue: Option[UInt256] + blobsBundle: Option[BlobsBundleV1] -proc init*(cl: CLMocker, client: RpcClient, ttd: DifficultyInt) = +func latestPayloadNumber(h: Table[uint64, ExecutionPayload]): uint64 = + result = 0'u64 + for n, _ in h: + if n > result: + result = n + +func latestWithdrawalsIndex(h: Table[uint64, ExecutionPayload]): uint64 = + result = 0'u64 + for n, p in h: + if p.withdrawals.isNone: + continue + let wds = p.withdrawals.get + for w in wds: + if w.index.uint64 > result: + result = w.index.uint64 + +proc init*(cl: CLMocker, client: RpcClient, com: CommonRef) = cl.client = client - cl.ttd = ttd + cl.com = com cl.slotsToSafe = 1 cl.slotsToFinalized = 2 + cl.payloadProductionClientDelay = 1 + cl.headerHistory[0] = com.genesisHeader() -proc newClMocker*(client: RpcClient, ttd: DifficultyInt): CLMocker = +proc newClMocker*(client: RpcClient, com: CommonRef): CLMocker = new result - result.init(client, ttd) + result.init(client, com) proc waitForTTD*(cl: CLMocker): Future[bool] {.async.} = - let (header, waitRes) = await cl.client.waitForTTD(cl.ttd) + let ttd = cl.com.ttd() + doAssert(ttd.isSome) + let (header, waitRes) = await cl.client.waitForTTD(ttd.get) if not waitRes: - error "timeout while waiting for TTD" + error "CLMocker: timeout while waiting for TTD" return false cl.latestHeader = header + cl.headerHistory[header.blockNumber.truncate(uint64)] = header cl.ttdReached = true let headerHash = BlockHash(common.blockHash(cl.latestHeader).data) - cl.latestForkchoice.headBlockHash = headerHash - if cl.slotsToSafe == 0: cl.latestForkchoice.safeBlockHash = headerHash if cl.slotsToFinalized == 0: cl.latestForkchoice.finalizedBlockHash = headerHash + # Reset transition values cl.latestHeadNumber = cl.latestHeader.blockNumber.truncate(uint64) + cl.headHashHistory = @[] + cl.firstPoSBlockNumber = none(uint64) + + # Prepare initial forkchoice, to be sent to the transition payload producer + cl.latestForkchoice = ForkchoiceStateV1() + cl.latestForkchoice.headBlockHash = headerHash let res = cl.client.forkchoiceUpdatedV1(cl.latestForkchoice) if res.isErr: @@ -93,6 +146,38 @@ proc waitForTTD*(cl: CLMocker): Future[bool] {.async.} = return true +# Check whether a block number is a PoS block +proc isBlockPoS*(cl: CLMocker, bn: common.BlockNumber): bool = + if cl.firstPoSBlockNumber.isNone: + return false + + let number = cl.firstPoSBlockNumber.get() + let bn = bn.truncate(uint64) + if number > bn: + return false + + return true + +# Return the per-block timestamp value increment +func getTimestampIncrement(cl: CLMocker): int = + cl.blockTimestampIncrement.get(1) + +# Returns the timestamp value to be included in the next payload attributes +func getNextBlockTimestamp(cl: CLMocker): int64 = + if cl.firstPoSBlockNumber.isNone and cl.transitionPayloadTimestamp.isSome: + # We are producing the transition payload and there's a value specified + # for this specific payload + return cl.transitionPayloadTimestamp.get + return cl.latestHeader.timestamp.toUnix + cl.getTimestampIncrement().int64 + +func setNextWithdrawals(cl: CLMocker, nextWithdrawals: Option[seq[WithdrawalV1]]) = + cl.nextWithdrawals = nextWithdrawals + +func timestampToBeaconRoot(timestamp: Quantity): FixedBytes[32] = + # Generates a deterministic hash from the timestamp + let h = keccakHash(timestamp.uint64.toBytesBE) + FixedBytes[32](h.data) + proc pickNextPayloadProducer(cl: CLMocker): bool = let nRes = cl.client.blockNumber() if nRes.isErr: @@ -119,25 +204,66 @@ proc pickNextPayloadProducer(cl: CLMocker): bool = return true +func isShanghai(cl: CLMocker, timestamp: Quantity): bool = + let ts = fromUnix(timestamp.int64) + cl.com.isShanghaiOrLater(ts) + +func isCancun(cl: CLMocker, timestamp: Quantity): bool = + let ts = fromUnix(timestamp.int64) + cl.com.isCancunOrLater(ts) + +func V1(attr: Option[PayloadAttributes]): Option[PayloadAttributesV1] = + if attr.isNone: + return none(PayloadAttributesV1) + some(attr.get.V1) + +func V2(attr: Option[PayloadAttributes]): Option[PayloadAttributesV2] = + if attr.isNone: + return none(PayloadAttributesV2) + some(attr.get.V2) + +func V3(attr: Option[PayloadAttributes]): Option[PayloadAttributesV3] = + if attr.isNone: + return none(PayloadAttributesV3) + some(attr.get.V3) + +proc fcu(cl: CLMocker, version: Version, + update: ForkchoiceStateV1, + attr: Option[PayloadAttributes]): + Result[ForkchoiceUpdatedResponse, string] = + case version + of Version.V1: cl.client.forkchoiceUpdatedV1(update, attr.V1) + of Version.V2: cl.client.forkchoiceUpdatedV2(update, attr.V2) + of Version.V3: cl.client.forkchoiceUpdatedV3(update, attr.V3) + proc getNextPayloadID*(cl: CLMocker): bool = # Generate a random value for the PrevRandao field - var nextPrevRandao: Hash256 + var nextPrevRandao: common.Hash256 doAssert randomBytes(nextPrevRandao.data) == 32 - let timestamp = Quantity toUnix(cl.latestHeader.timestamp + 1.seconds) - let payloadAttributes = PayloadAttributesV1( + let timestamp = Quantity cl.getNextBlockTimestamp.uint64 + cl.latestPayloadAttributes = PayloadAttributes( timestamp: timestamp, prevRandao: FixedBytes[32] nextPrevRandao.data, suggestedFeeRecipient: Address cl.nextFeeRecipient, ) + if cl.isShanghai(timestamp): + cl.latestPayloadAttributes.withdrawals = cl.nextWithdrawals + + if cl.isCancun(timestamp): + # Write a deterministic hash based on the block number + let beaconRoot = timestampToBeaconRoot(timestamp) + cl.latestPayloadAttributes.parentBeaconBlockRoot = some(beaconRoot) + # Save random value let number = cl.latestHeader.blockNumber.truncate(uint64) + 1 cl.prevRandaoHistory[number] = nextPrevRandao - let res = cl.client.forkchoiceUpdatedV1(cl.latestForkchoice, some(payloadAttributes)) + let version = cl.latestPayloadAttributes.version + let res = cl.fcu(version, cl.latestForkchoice, some(cl.latestPayloadAttributes)) if res.isErr: - error "CLMocker: Could not send forkchoiceUpdatedV1", msg=res.error + error "CLMocker: Could not send forkchoiceUpdated", version=version, msg=res.error return false let s = res.get() @@ -145,31 +271,118 @@ proc getNextPayloadID*(cl: CLMocker): bool = error "CLMocker: Unexpected forkchoiceUpdated Response from Payload builder", status=s.payloadStatus.status + if s.payloadStatus.latestValidHash.isNone or s.payloadStatus.latestValidHash.get != cl.latestForkchoice.headBlockHash: + error "CLMocker: Unexpected forkchoiceUpdated LatestValidHash Response from Payload builder", + latest=s.payloadStatus.latestValidHash, + head=cl.latestForkchoice.headBlockHash + doAssert s.payLoadID.isSome cl.nextPayloadID = s.payloadID.get() return true +proc getPayload(cl: CLMocker, payloadId: PayloadID): Result[GetPayloadResponse, string] = + let ts = cl.latestPayloadAttributes.timestamp + if cl.isCancun(ts): + let res = cl.client.getPayloadV3(payloadId) + if res.isErr: + return err(res.error) + let x = res.get + return ok(GetPayloadResponse( + executionPayload: executionPayload(x.executionPayload), + blockValue: some(x.blockValue), + blobsBundle: some(x.blobsBundle) + )) + + if cl.isShanghai(ts): + let res = cl.client.getPayloadV2(payloadId) + if res.isErr: + return err(res.error) + let x = res.get + return ok(GetPayloadResponse( + executionPayload: executionPayload(x.executionPayload), + blockValue: some(x.blockValue) + )) + + let res = cl.client.getPayloadV1(payloadId) + if res.isErr: + return err(res.error) + return ok(GetPayloadResponse( + executionPayload: executionPayload(res.get), + )) + proc getNextPayload*(cl: CLMocker): bool = - let res = cl.client.getPayloadV1(cl.nextPayloadID) + let res = cl.getPayload(cl.nextPayloadID) if res.isErr: error "CLMocker: Could not getPayload", payloadID=toHex(cl.nextPayloadID) return false - cl.latestPayloadBuilt = res.get() + let x = res.get() + cl.latestPayloadBuilt = x.executionPayload + cl.latestBlockValue = x.blockValue + cl.latestBlobsBundle = x.blobsBundle + let header = toBlockHeader(cl.latestPayloadBuilt) let blockHash = BlockHash header.blockHash.data if blockHash != cl.latestPayloadBuilt.blockHash: - error "getNextPayload blockHash mismatch", + error "CLMocker: getNextPayload blockHash mismatch", expected=cl.latestPayloadBuilt.blockHash.toHex, get=blockHash.toHex return false + if cl.latestPayloadBuilt.timestamp != cl.latestPayloadAttributes.timestamp: + error "CLMocker: Incorrect Timestamp on payload built", + expect=cl.latestPayloadBuilt.timestamp.uint64, + get=cl.latestPayloadAttributes.timestamp.uint64 + return false + + if cl.latestPayloadBuilt.feeRecipient != cl.latestPayloadAttributes.suggestedFeeRecipient: + error "CLMocker: Incorrect SuggestedFeeRecipient on payload built", + expect=cl.latestPayloadBuilt.feeRecipient.toHex, + get=cl.latestPayloadAttributes.suggestedFeeRecipient.toHex + return false + + if cl.latestPayloadBuilt.prevRandao != cl.latestPayloadAttributes.prevRandao: + error "CLMocker: Incorrect PrevRandao on payload built", + expect=cl.latestPayloadBuilt.prevRandao.toHex, + get=cl.latestPayloadAttributes.prevRandao.toHex + return false + + if cl.latestPayloadBuilt.parentHash != BlockHash cl.latestHeader.blockHash.data: + error "CLMocker: Incorrect ParentHash on payload built", + expect=cl.latestPayloadBuilt.parentHash.toHex, + get=cl.latestHeader.blockHash + return false + + if cl.latestPayloadBuilt.blockNumber.uint64.toBlockNumber != cl.latestHeader.blockNumber + 1.toBlockNumber: + error "CLMocker: Incorrect Number on payload built", + expect=cl.latestPayloadBuilt.blockNumber.uint64, + get=cl.latestHeader.blockNumber+1.toBlockNumber + return false + return true -proc broadcastNewPayload(cl: CLMocker, payload: ExecutionPayloadV1): Result[PayloadStatusV1, string] = - let res = cl.client.newPayloadV1(payload) - return res +func versionedHashes(bb: BlobsBundleV1): seq[BlockHash] = + doAssert(bb.commitments.len > 0) + result = newSeqOfCap[BlockHash](bb.commitments.len) + + for com in bb.commitments: + var h = keccakHash(com.bytes) + h.data[0] = BLOB_COMMITMENT_VERSION_KZG + result.add BlockHash(h.data) + +proc broadcastNewPayload(cl: CLMocker, payload: ExecutionPayload): Result[PayloadStatusV1, string] = + var versionedHashes: seq[BlockHash] + if cl.latestBlobsBundle.isSome: + # Broadcast the blob bundle to all clients + versionedHashes = versionedHashes(cl.latestBlobsBundle.get) + + case payload.version + of Version.V1: return cl.client.newPayloadV1(payload.V1) + of Version.V2: return cl.client.newPayloadV2(payload.V2) + of Version.V3: return cl.client.newPayloadV3(payload.V3, + versionedHashes, + cl.latestPayloadAttributes.parentBeaconBlockRoot.get) proc broadcastNextNewPayload(cl: CLMocker): bool = let res = cl.broadcastNewPayload(cl.latestPayloadBuilt) @@ -180,7 +393,7 @@ proc broadcastNextNewPayload(cl: CLMocker): bool = let s = res.get() if s.status == PayloadExecutionStatus.valid: # The client is synced and the payload was immediately validated - # https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md: + # https:#github.com/ethereum/execution-apis/blob/main/src/engine/specification.md: # - If validation succeeds, the response MUST contain {status: VALID, latestValidHash: payload.blockHash} let blockHash = cl.latestPayloadBuilt.blockHash if s.latestValidHash.isNone: @@ -196,12 +409,12 @@ proc broadcastNextNewPayload(cl: CLMocker): bool = elif s.status == PayloadExecutionStatus.accepted: # The client is not synced but the payload was accepted - # https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md: + # https:#github.com/ethereum/execution-apis/blob/main/src/engine/specification.md: # - {status: ACCEPTED, latestValidHash: null, validationError: null} if the following conditions are met: # the blockHash of the payload is valid # the payload doesn't extend the canonical chain # the payload hasn't been fully validated. - let nullHash = BlockHash Hash256().data + let nullHash = BlockHash common.Hash256().data let latestValidHash = s.latestValidHash.get(nullHash) if s.latestValidHash.isSome and latestValidHash != nullHash: error "CLMocker: NewPayload returned ACCEPTED status with incorrect LatestValidHash", @@ -220,8 +433,8 @@ proc broadcastNextNewPayload(cl: CLMocker): bool = proc broadcastForkchoiceUpdated*(cl: CLMocker, update: ForkchoiceStateV1): Result[ForkchoiceUpdatedResponse, string] = - let res = cl.client.forkchoiceUpdatedV1(update) - return res + let version = cl.latestExecutedPayload.version + cl.fcu(version, update, none(PayloadAttributes)) proc broadcastLatestForkchoice(cl: CLMocker): bool = let res = cl.broadcastForkchoiceUpdated(cl.latestForkchoice) @@ -235,14 +448,35 @@ proc broadcastLatestForkchoice(cl: CLMocker): bool = status=s.payloadStatus.status return false + if s.payloadStatus.latestValidHash.get != cl.latestForkchoice.headBlockHash: + error "CLMocker: Incorrect LatestValidHash from ForkchoiceUpdated", + get=s.payloadStatus.latestValidHash.get.toHex, + expect=cl.latestForkchoice.headBlockHash.toHex + + if s.payloadStatus.validationError.isSome: + error "CLMocker: Expected empty validationError", + msg=s.payloadStatus.validationError.get + + if s.payloadID.isSome: + error "CLMocker: Expected empty PayloadID", + msg=s.payloadID.get.toHex + return true + proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe.} = doAssert(cl.ttdReached) + cl.currentPayloadNumber = cl.latestHeader.blockNumber.truncate(uint64) + 1'u64 if not cl.pickNextPayloadProducer(): return false + # Check if next withdrawals necessary, test can override this value on + # `OnPayloadProducerSelected` callback + if cl.nextWithdrawals.isNone: + var nw: seq[WithdrawalV1] + cl.setNextWithdrawals(some(nw)) + if cb.onPayloadProducerSelected != nil: if not cb.onPayloadProducerSelected(): return false @@ -250,6 +484,8 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe if not cl.getNextPayloadID(): return false + cl.setNextWithdrawals(none(seq[WithdrawalV1])) + if cb.onGetPayloadID != nil: if not cb.onGetPayloadID(): return false @@ -354,6 +590,7 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe return false cl.latestHeader = newHeader + cl.headerHistory[cl.latestHeadNumber] = cl.latestHeader return true @@ -365,17 +602,5 @@ proc produceBlocks*(cl: CLMocker, blockCount: int, cb: BlockProcessCallbacks): b return false return true -# Check whether a block number is a PoS block -proc isBlockPoS*(cl: CLMocker, bn: common.BlockNumber): bool = - if cl.firstPoSBlockNumber.isNone: - return false - - let number = cl.firstPoSBlockNumber.get() - let bn = bn.truncate(uint64) - if number > bn: - return false - - return true - proc posBlockNumber*(cl: CLMocker): uint64 = cl.firstPoSBlockNumber.get(0'u64) diff --git a/hive_integration/nodocker/engine/engine/engine_spec.nim b/hive_integration/nodocker/engine/engine/engine_spec.nim new file mode 100644 index 000000000..0dcb81f1f --- /dev/null +++ b/hive_integration/nodocker/engine/engine/engine_spec.nim @@ -0,0 +1,1748 @@ +import + std/tables, + stew/byteutils, + chronicles, + nimcrypto/sysrand, + chronos, + ".."/[test_env, helper, types], + ../../../nimbus/transaction, + ../../../nimbus/rpc/rpc_types, + ../../../nimbus/rpc/merge/mergeutils + +import eth/common/eth_types as common_eth_types +type Hash256 = common_eth_types.Hash256 + +type + EngineSpec* = ref object of BaseSpec + exec*: proc(t: TestEnv): bool + ttd*: int64 + chainFile*: string + slotsToFinalized*: int + slotsToSafe*: int + +const + prevRandaoContractAddr = hexToByteArray[20]("0000000000000000000000000000000000000316") + +template testNP(res, cond: untyped, validHash = none(Hash256)) = + testCond res.isOk + let s = res.get() + testCond s.status == PayloadExecutionStatus.cond: + error "Unexpected NewPayload status", expect=PayloadExecutionStatus.cond, get=s.status + testCond s.latestValidHash == validHash: + error "Unexpected NewPayload latestValidHash", expect=validHash, get=s.latestValidHash + +template testNPEither(res, cond: untyped, validHash = none(Hash256)) = + testCond res.isOk + let s = res.get() + testCond s.status in cond: + error "Unexpected NewPayload status", expect=cond, get=s.status + testCond s.latestValidHash == validHash: + error "Unexpected NewPayload latestValidHash", expect=validHash, get=s.latestValidHash + +template testLatestHeader(client: untyped, expectedHash: BlockHash) = + var lastHeader: EthBlockHeader + var hRes = client.latestHeader(lastHeader) + testCond hRes.isOk: + error "unable to get latest header", msg=hRes.error + + let lastHash = BlockHash lastHeader.blockHash.data + # Latest block header available via Eth RPC should not have changed at this point + testCond lastHash == expectedHash: + error "latest block header incorrect", + expect = expectedHash, + get = lastHash + +#proc sendTx(t: TestEnv, recipient: EthAddress, val: UInt256, data: openArray[byte] = []): bool = +# t.tx = t.makeTx(recipient, val, data) +# let rr = t.rpcClient.sendTransaction(t.tx) +# if rr.isErr: +# error "Unable to send transaction", msg=rr.error +# return false +# return true +# +#proc sendTx(t: TestEnv, val: UInt256): bool = +# t.sendTx(prevRandaoContractAddr, val) + +# Invalid Terminal Block in ForkchoiceUpdated: +# Client must reject ForkchoiceUpdated directives if the referenced HeadBlockHash does not meet the TTD requirement. +proc invalidTerminalBlockForkchoiceUpdated*(t: TestEnv): bool = + let + gHash = w3Hash t.gHeader.blockHash + forkchoiceState = ForkchoiceStateV1( + headBlockHash: gHash, + safeBlockHash: gHash, + finalizedBlockHash: gHash, + ) + + let res = t.rpcClient.forkchoiceUpdatedV1(forkchoiceState) + # Execution specification: + # {payloadStatus: {status: INVALID, latestValidHash=0x00..00}, payloadId: null} + # either obtained from the Payload validation process or as a result of + # validating a PoW block referenced by forkchoiceState.headBlockHash + + testFCU(res, invalid, some(Hash256())) + # ValidationError is not validated since it can be either null or a string message + + # Check that PoW chain progresses + testCond t.verifyPoWProgress(t.gHeader.blockHash) + return true + +#[ +# Invalid GetPayload Under PoW: Client must reject GetPayload directives under PoW. +proc invalidGetPayloadUnderPoW(t: TestEnv): TestStatus = + result = TestStatus.OK + + # We start in PoW and try to get an invalid Payload, which should produce an error but nothing should be disrupted. + let id = PayloadID [1.byte, 2,3,4,5,6,7,8] + let res = t.rpcClient.getPayloadV1(id) + testCond res.isErr + + # Check that PoW chain progresses + testCond t.verifyPoWProgress(t.gHeader.blockHash) + +# Invalid Terminal Block in NewPayload: +# Client must reject NewPayload directives if the referenced ParentHash does not meet the TTD requirement. +proc invalidTerminalBlockNewPayload(t: TestEnv): TestStatus = + result = TestStatus.OK + + let gBlock = t.gHeader + let payload = ExecutableData( + parentHash: gBlock.blockHash, + stateRoot: gBlock.stateRoot, + receiptsRoot: EMPTY_ROOT_HASH, + number: 1, + gasLimit: gBlock.gasLimit, + gasUsed: 0, + timestamp: gBlock.timestamp + 1.seconds, + baseFeePerGas:gBlock.baseFee + ) + let hashedPayload = customizePayload(payload, CustomPayload()) + let res = t.rpcClient.newPayloadV1(hashedPayload) + + # Execution specification: + # {status: INVALID, latestValidHash=0x00..00} + # if terminal block conditions are not satisfied + testNP(res, invalid, some(Hash256())) + + # Check that PoW chain progresses + testCond t.verifyPoWProgress(t.gHeader.blockHash) + +proc unknownHeadBlockHash(t: TestEnv): TestStatus = + result = TestStatus.OK + + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + var randomHash: Hash256 + testCond randomBytes(randomHash.data) == 32 + + let clMock = t.clMock + let forkchoiceStateUnknownHeadHash = ForkchoiceStateV1( + headBlockHash: BlockHash randomHash.data, + safeBlockHash: clMock.latestForkchoice.finalizedBlockHash, + finalizedBlockHash: clMock.latestForkchoice.finalizedBlockHash, + ) + + var res = t.rpcClient.forkchoiceUpdatedV1(forkchoiceStateUnknownHeadHash) + testCond res.isOk + + let s = res.get() + # Execution specification:: + # - {payloadStatus: {status: SYNCING, latestValidHash: null, validationError: null}, payloadId: null} + # if forkchoiceState.headBlockHash references an unknown payload or a payload that can't be validated + # because requisite data for the validation is missing + testCond s.payloadStatus.status == PayloadExecutionStatus.syncing + + # Test again using PayloadAttributes, should also return SYNCING and no PayloadID + let timestamp = uint64 clMock.latestExecutedPayload.timestamp + let payloadAttr = PayloadAttributesV1( + timestamp: Quantity(timestamp + 1) + ) + + res = t.rpcClient.forkchoiceUpdatedV1(forkchoiceStateUnknownHeadHash, some(payloadAttr)) + testCond res.isOk + testCond s.payloadStatus.status == PayloadExecutionStatus.syncing + testCond s.payloadId.isNone + +proc unknownSafeBlockHash(t: TestEnv): TestStatus = + result = TestStatus.OK + + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Produce blocks before starting the test + let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) + testCond produce5BlockRes + + let clMock = t.clMock + let client = t.rpcClient + let produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( + # Run test after a new payload has been broadcast + onNewPayloadBroadcast: proc(): bool = + # Generate a random SafeBlock hash + var randomSafeBlockHash: Hash256 + doAssert randomBytes(randomSafeBlockHash.data) == 32 + + # Send forkchoiceUpdated with random SafeBlockHash + let forkchoiceStateUnknownSafeHash = ForkchoiceStateV1( + headBlockHash: clMock.latestExecutedPayload.blockHash, + safeBlockHash: BlockHash randomSafeBlockHash.data, + finalizedBlockHash: clMock.latestForkchoice.finalizedBlockHash, + ) + # Execution specification: + # - This value MUST be either equal to or an ancestor of headBlockHash + let res = client.forkchoiceUpdatedV1(forkchoiceStateUnknownSafeHash) + return res.isErr + )) + + testCond produceSingleBlockRes + +proc unknownFinalizedBlockHash(t: TestEnv): TestStatus = + result = TestStatus.OK + + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Produce blocks before starting the test + let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) + testCond produce5BlockRes + + let clMock = t.clMock + let client = t.rpcClient + let produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( + # Run test after a new payload has been broadcast + onNewPayloadBroadcast: proc(): bool = + # Generate a random SafeBlock hash + var randomFinalBlockHash: Hash256 + doAssert randomBytes(randomFinalBlockHash.data) == 32 + + # Send forkchoiceUpdated with random SafeBlockHash + let forkchoiceStateUnknownFinalizedHash = ForkchoiceStateV1( + headBlockHash: clMock.latestExecutedPayload.blockHash, + safeBlockHash: clMock.latestForkchoice.safeBlockHash, + finalizedBlockHash: BlockHash randomFinalBlockHash.data, + ) + # Execution specification: + # - This value MUST be either equal to or an ancestor of headBlockHash + var res = client.forkchoiceUpdatedV1(forkchoiceStateUnknownFinalizedHash) + if res.isOk: + return false + + # Test again using PayloadAttributes, should also return INVALID and no PayloadID + let timestamp = uint64 clMock.latestExecutedPayload.timestamp + let payloadAttr = PayloadAttributesV1( + timestamp: Quantity(timestamp + 1) + ) + res = client.forkchoiceUpdatedV1(forkchoiceStateUnknownFinalizedHash, some(payloadAttr)) + return res.isErr + )) + + testCond produceSingleBlockRes + +# Send an inconsistent ForkchoiceState with a known payload that belongs to a side chain as head, safe or finalized. +type + Inconsistency {.pure.} = enum + Head + Safe + Finalized + + PayloadList = ref object + canonicalPayloads : seq[ExecutableData] + alternativePayloads: seq[ExecutableData] + +template inconsistentForkchoiceStateGen(procname: untyped, inconsistency: Inconsistency) = + proc procName(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by this client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + var pList = PayloadList() + let clMock = t.clMock + let client = t.rpcClient + + # Produce blocks before starting the test + let produceBlockRes = clMock.produceBlocks(3, BlockProcessCallbacks( + onGetPayload: proc(): bool = + # Generate and send an alternative side chain + var customData = CustomPayload( + extraData: some(@[0x01.byte]) + ) + + if pList.alternativePayloads.len > 0: + customData.parentHash = some(pList.alternativePayloads[^1].blockHash) + + let executableData = toExecutableData(clMock.latestPayloadBuilt) + let alternativePayload = customizePayload(executableData, customData) + pList.alternativePayloads.add(alternativePayload.toExecutableData) + + let latestCanonicalPayload = toExecutableData(clMock.latestPayloadBuilt) + pList.canonicalPayloads.add(latestCanonicalPayload) + + # Send the alternative payload + let res = client.newPayloadV1(alternativePayload) + if res.isErr: + return false + + let s = res.get() + s.status == PayloadExecutionStatus.valid or s.status == PayloadExecutionStatus.accepted + )) + + testCond produceBlockRes + + # Send the invalid ForkchoiceStates + let len = pList.alternativePayloads.len + var inconsistentFcU = ForkchoiceStateV1( + headBlockHash: Web3BlockHash pList.canonicalPayloads[len-1].blockHash.data, + safeBlockHash: Web3BlockHash pList.canonicalPayloads[len-2].blockHash.data, + finalizedBlockHash: Web3BlockHash pList.canonicalPayloads[len-3].blockHash.data, + ) + + when inconsistency == Inconsistency.Head: + inconsistentFcU.headBlockHash = Web3BlockHash pList.alternativePayloads[len-1].blockHash.data + elif inconsistency == Inconsistency.Safe: + inconsistentFcU.safeBlockHash = Web3BlockHash pList.alternativePayloads[len-2].blockHash.data + else: + inconsistentFcU.finalizedBlockHash = Web3BlockHash pList.alternativePayloads[len-3].blockHash.data + + var r = client.forkchoiceUpdatedV1(inconsistentFcU) + testCond r.isErr + + # Return to the canonical chain + r = client.forkchoiceUpdatedV1(clMock.latestForkchoice) + testCond r.isOk + let s = r.get() + testCond s.payloadStatus.status == PayloadExecutionStatus.valid + +inconsistentForkchoiceStateGen(inconsistentForkchoiceState1, Inconsistency.Head) +inconsistentForkchoiceStateGen(inconsistentForkchoiceState2, Inconsistency.Safe) +inconsistentForkchoiceStateGen(inconsistentForkchoiceState3, Inconsistency.Finalized) + +# Verify behavior on a forkchoiceUpdated with invalid payload attributes +template invalidPayloadAttributesGen(procname: untyped, syncingCond: bool) = + proc procName(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by this client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + let clMock = t.clMock + let client = t.rpcClient + + # Produce blocks before starting the test + var produceBlockRes = clMock.produceBlocks(5, BlockProcessCallbacks()) + testCond produceBlockRes + + # Send a forkchoiceUpdated with invalid PayloadAttributes + produceBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( + onNewPayloadBroadcast: proc(): bool = + # Try to apply the new payload with invalid attributes + var blockHash: Hash256 + when syncingCond: + # Setting a random hash will put the client into `SYNCING` + doAssert randomBytes(blockHash.data) == 32 + else: + # Set the block hash to the next payload that was broadcasted + blockHash = hash256(clMock.latestPayloadBuilt.blockHash) + + let fcu = ForkchoiceStateV1( + headBlockHash: Web3BlockHash blockHash.data, + safeBlockHash: Web3BlockHash blockHash.data, + finalizedBlockHash: Web3BlockHash blockHash.data, + ) + + let attr = PayloadAttributesV1() + + # 0) Check headBlock is known and there is no missing data, if not respond with SYNCING + # 1) Check headBlock is VALID, if not respond with INVALID + # 2) Apply forkchoiceState + # 3) Check payloadAttributes, if invalid respond with error: code: Invalid payload attributes + # 4) Start payload build process and respond with VALID + when syncingCond: + # If we are SYNCING, the outcome should be SYNCING regardless of the validity of the payload atttributes + let r = client.forkchoiceUpdatedV1(fcu, some(attr)) + testFCU(r, syncing) + else: + let r = client.forkchoiceUpdatedV1(fcu, some(attr)) + testCond r.isOk: + error "Unexpected error", msg = r.error + + # Check that the forkchoice was applied, regardless of the error + testLatestHeader(client, BlockHash blockHash.data) + return true + )) + + testCond produceBlockRes + +invalidPayloadAttributesGen(invalidPayloadAttributes1, false) +invalidPayloadAttributesGen(invalidPayloadAttributes2, true) + +proc preTTDFinalizedBlockHash(t: TestEnv): TestStatus = + result = TestStatus.OK + + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Produce blocks before starting the test + let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) + testCond produce5BlockRes + + let + gHash = Web3BlockHash t.gHeader.blockHash.data + forkchoiceState = ForkchoiceStateV1( + headBlockHash: gHash, + safeBlockHash: gHash, + finalizedBlockHash: gHash, + ) + client = t.rpcClient + clMock = t.clMock + + var res = client.forkchoiceUpdatedV1(forkchoiceState) + testFCU(res, invalid, some(Hash256())) + + res = client.forkchoiceUpdatedV1(clMock.latestForkchoice) + testFCU(res, valid) + +# Corrupt the hash of a valid payload, client should reject the payload. +# All possible scenarios: +# (fcU) +# ┌────────┐ ┌────────────────────────┐ +# │ HEAD │◄───────┤ Bad Hash (!Sync,!Side) │ +# └────┬───┘ └────────────────────────┘ +# │ +# │ +# ┌────▼───┐ ┌────────────────────────┐ +# │ HEAD-1 │◄───────┤ Bad Hash (!Sync, Side) │ +# └────┬───┘ └────────────────────────┘ +# │ +# +# +# (fcU) +# ******************** ┌───────────────────────┐ +# * (Unknown) HEAD *◄─┤ Bad Hash (Sync,!Side) │ +# ******************** └───────────────────────┘ +# │ +# │ +# ┌────▼───┐ ┌───────────────────────┐ +# │ HEAD-1 │◄───────────┤ Bad Hash (Sync, Side) │ +# └────┬───┘ └───────────────────────┘ +# │ +# + +type + Shadow = ref object + hash: Hash256 + +template badHashOnNewPayloadGen(procname: untyped, syncingCond: bool, sideChain: bool) = + proc procName(t: TestEnv): TestStatus = + result = TestStatus.OK + + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Produce blocks before starting the test + let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) + testCond produce5BlockRes + + let clMock = t.clMock + let client = t.rpcClient + let shadow = Shadow() + + var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( + # Run test after the new payload has been obtained + onGetPayload: proc(): bool = + # Alter hash on the payload and send it to client, should produce an error + var alteredPayload = clMock.latestPayloadBuilt + var invalidPayloadHash = hash256(alteredPayload.blockHash) + let lastByte = int invalidPayloadHash.data[^1] + invalidPayloadHash.data[^1] = byte(not lastByte) + shadow.hash = invalidPayloadHash + alteredPayload.blockHash = BlockHash invalidPayloadHash.data + + when not syncingCond and sideChain: + # We alter the payload by setting the parent to a known past block in the + # canonical chain, which makes this payload a side chain payload, and also an invalid block hash + # (because we did not update the block hash appropriately) + alteredPayload.parentHash = Web3BlockHash clMock.latestHeader.parentHash.data + elif syncingCond: + # We need to send an fcU to put the client in SYNCING state. + var randomHeadBlock: Hash256 + doAssert randomBytes(randomHeadBlock.data) == 32 + + let latestHeaderHash = clMock.latestHeader.blockHash + let fcU = ForkchoiceStateV1( + headBlockHash: Web3BlockHash randomHeadBlock.data, + safeBlockHash: Web3BlockHash latestHeaderHash.data, + finalizedBlockHash: Web3BlockHash latestHeaderHash.data + ) + + let r = client.forkchoiceUpdatedV1(fcU) + if r.isErr: + return false + let z = r.get() + if z.payloadStatus.status != PayloadExecutionStatus.syncing: + return false + + when sidechain: + # Syncing and sidechain, the caonincal head is an unknown payload to us, + # but this specific bad hash payload is in theory part of a side chain. + # Therefore the parent we use is the head hash. + alteredPayload.parentHash = Web3BlockHash latestHeaderHash.data + else: + # The invalid bad-hash payload points to the unknown head, but we know it is + # indeed canonical because the head was set using forkchoiceUpdated. + alteredPayload.parentHash = Web3BlockHash randomHeadBlock.data + + let res = client.newPayloadV1(alteredPayload) + # Execution specification:: + # - {status: INVALID_BLOCK_HASH, latestValidHash: null, validationError: null} if the blockHash validation has failed + if res.isErr: + return false + let s = res.get() + if s.status != PayloadExecutionStatus.invalid_block_hash: + return false + s.latestValidHash.isNone + )) + testCond produceSingleBlockRes + + # Lastly, attempt to build on top of the invalid payload + produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( + # Run test after the new payload has been obtained + onGetPayload: proc(): bool = + let payload = toExecutableData(clMock.latestPayloadBuilt) + let alteredPayload = customizePayload(payload, CustomPayload( + parentHash: some(shadow.hash), + )) + let res = client.newPayloadV1(alteredPayload) + if res.isErr: + return false + # Response status can be ACCEPTED (since parent payload could have been thrown out by the client) + # or INVALID (client still has the payload and can verify that this payload is incorrectly building on top of it), + # but a VALID response is incorrect. + let s = res.get() + s.status != PayloadExecutionStatus.valid + )) + testCond produceSingleBlockRes + +badHashOnNewPayloadGen(badHashOnNewPayload1, false, false) +badHashOnNewPayloadGen(badHashOnNewPayload2, true, false) +badHashOnNewPayloadGen(badHashOnNewPayload3, false, true) +badHashOnNewPayloadGen(badHashOnNewPayload4, true, true) + +proc parentHashOnExecPayload(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by this client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Produce blocks before starting the test + let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) + testCond produce5BlockRes + + let clMock = t.clMock + let client = t.rpcClient + var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( + # Run test after the new payload has been obtained + onGetPayload: proc(): bool = + # Alter hash on the payload and send it to client, should produce an error + var alteredPayload = clMock.latestPayloadBuilt + alteredPayload.blockHash = alteredPayload.parentHash + let res = client.newPayloadV1(alteredPayload) + if res.isErr: + return false + # Execution specification:: + # - {status: INVALID_BLOCK_HASH, latestValidHash: null, validationError: null} if the blockHash validation has failed + let s = res.get() + s.status == PayloadExecutionStatus.invalid_block_hash + )) + testCond produceSingleBlockRes + +# Attempt to re-org to a chain containing an invalid transition payload +proc invalidTransitionPayload(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by main client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + let clMock = t.clMock + let client = t.rpcClient + + # Produce two blocks before trying to re-org + t.nonce = 2 # Initial PoW chain already contains 2 transactions + var pbRes = clMock.produceBlocks(2, BlockProcessCallbacks( + onPayloadProducerSelected: proc(): bool = + t.sendTx(1.u256) + )) + + testCond pbRes + + # Introduce the invalid transition payload + pbRes = clMock.produceSingleBlock(BlockProcessCallbacks( + # This is being done in the middle of the block building + # process simply to be able to re-org back. + onGetPayload: proc(): bool = + let basePayload = clMock.executedPayloadHistory[clMock.posBlockNumber] + let alteredPayload = generateInvalidPayload(basePayload, InvalidStateRoot) + + let res = client.newPayloadV1(alteredPayload) + let cond = {PayloadExecutionStatus.invalid, PayloadExecutionStatus.accepted} + testNPEither(res, cond, some(Hash256())) + + let rr = client.forkchoiceUpdatedV1( + ForkchoiceStateV1(headBlockHash: alteredPayload.blockHash) + ) + testFCU(rr, invalid, some(Hash256())) + + testLatestHeader(client, clMock.latestExecutedPayload.blockHash) + return true + )) + + testCond pbRes + +template invalidPayloadTestCaseGen(procName: untyped, payloadField: InvalidPayloadField, emptyTxs: bool = false) = + proc procName(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by this client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + let clMock = t.clMock + let client = t.rpcClient + + template txProc(): bool = + when not emptyTxs: + t.sendTx(0.u256) + else: + true + + # Produce blocks before starting the test + var pbRes = clMock.produceBlocks(5, BlockProcessCallbacks( + # Make sure at least one transaction is included in each block + onPayloadProducerSelected: proc(): bool = + txProc() + )) + + testCond pbRes + + let invalidPayload = Shadow() + + pbRes = clMock.produceSingleBlock(BlockProcessCallbacks( + # Make sure at least one transaction is included in the payload + onPayloadProducerSelected: proc(): bool = + txProc() + , + # Run test after the new payload has been obtained + onGetPayload: proc(): bool = + # Alter the payload while maintaining a valid hash and send it to the client, should produce an error + + # We need at least one transaction for most test cases to work + when not emptyTxs: + if clMock.latestPayloadBuilt.transactions.len == 0: + # But if the payload has no transactions, the test is invalid + error "No transactions in the base payload" + return false + + let alteredPayload = generateInvalidPayload(clMock.latestPayloadBuilt, payloadField, t.vaultKey) + invalidPayload.hash = hash256(alteredPayload.blockHash) + + # Depending on the field we modified, we expect a different status + let rr = client.newPayloadV1(alteredPayload) + if rr.isErr: + error "unable to send altered payload", msg=rr.error + return false + let s = rr.get() + + when payloadField == InvalidParentHash: + # Execution specification:: + # {status: ACCEPTED, latestValidHash: null, validationError: null} if the following conditions are met: + # - the blockHash of the payload is valid + # - the payload doesn't extend the canonical chain + # - the payload hasn't been fully validated + # {status: SYNCING, latestValidHash: null, validationError: null} + # if the payload extends the canonical chain and requisite data for its validation is missing + # (the client can assume the payload extends the canonical because the linking payload could be missing) + if s.status notin {PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted}: + error "newPayloadV1 status expect syncing or accepted", get=s.status + return false + + if s.latestValidHash.isSome: + error "newPayloadV1 latestValidHash not empty" + return false + else: + if s.status != PayloadExecutionStatus.invalid: + error "newPayloadV1 status expect invalid", get=s.status + return false + + if s.latestValidHash.isNone: + return false + + let latestValidHash = s.latestValidHash.get + if latestValidHash != alteredPayload.parentHash: + error "latestValidHash is not the same with parentHash", + expected = alteredPayload.parentHash, get = latestValidHash + return false + + # Send the forkchoiceUpdated with a reference to the invalid payload. + let fcState = ForkchoiceStateV1( + headBlockHash: alteredPayload.blockHash, + safeBlockHash: alteredPayload.blockHash, + finalizedBlockHash: alteredPayload.blockHash, + ) + + let timestamp = Quantity(alteredPayload.timestamp.int64 + 1) + let payloadAttr = PayloadAttributesV1(timestamp: timestamp) + + # Execution specification: + # {payloadStatus: {status: INVALID, latestValidHash: null, validationError: errorMessage | null}, payloadId: null} + # obtained from the Payload validation process if the payload is deemed INVALID + let rs = client.forkchoiceUpdatedV1(fcState, some(payloadAttr)) + # Execution specification: + # {payloadStatus: {status: INVALID, latestValidHash: null, validationError: errorMessage | null}, payloadId: null} + # obtained from the Payload validation process if the payload is deemed INVALID + # Note: SYNCING/ACCEPTED is acceptable here as long as the block produced after this test is produced successfully + if rs.isErr: + error "unable to send altered payload", msg=rs.error + return false + + let z = rs.get() + if z.payloadStatus.status notin {PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted, PayloadExecutionStatus.invalid}: + return false + + # Finally, attempt to fetch the invalid payload using the JSON-RPC endpoint + var header: rpc_types.BlockHeader + let rp = client.headerByHash(alteredPayload.blockHash.hash256, header) + rp.isErr + )) + + testCond pbRes + + # Lastly, attempt to build on top of the invalid payload + let psb = clMock.produceSingleBlock(BlockProcessCallbacks( + # Run test after the new payload has been obtained + onGetPayload: proc(): bool = + let alteredPayload = customizePayload(clMock.latestPayloadBuilt.toExecutableData, CustomPayload( + parentHash: some(invalidPayload.hash), + )) + + info "Sending customized NewPayload: ParentHash", + fromHash=clMock.latestPayloadBuilt.parentHash, toHash=invalidPayload.hash + # Response status can be ACCEPTED (since parent payload could have been thrown out by the client) + # or SYNCING (parent payload is thrown out and also client assumes that the parent is part of canonical chain) + # or INVALID (client still has the payload and can verify that this payload is incorrectly building on top of it), + # but a VALID response is incorrect. + let rr = client.newPayloadV1(alteredPayload) + if rr.isErr: + error "unable to send altered payload", msg=rr.error + return false + + let z = rr.get() + z.status in {PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted, PayloadExecutionStatus.invalid} + )) + + testCond psb + +invalidPayloadTestCaseGen(invalidPayload1, InvalidParentHash) +invalidPayloadTestCaseGen(invalidPayload2, InvalidStateRoot) +invalidPayloadTestCaseGen(invalidPayload3, InvalidStateRoot, true) +invalidPayloadTestCaseGen(invalidPayload4, InvalidReceiptsRoot) +invalidPayloadTestCaseGen(invalidPayload5, InvalidNumber) +invalidPayloadTestCaseGen(invalidPayload6, InvalidGasLimit) +invalidPayloadTestCaseGen(invalidPayload7, InvalidGasUsed) +invalidPayloadTestCaseGen(invalidPayload8, InvalidTimestamp) +invalidPayloadTestCaseGen(invalidPayload9, InvalidPrevRandao) +invalidPayloadTestCaseGen(invalidPayload10, RemoveTransaction) +invalidPayloadTestCaseGen(invalidPayload11, InvalidTransactionSignature) +invalidPayloadTestCaseGen(invalidPayload12, InvalidTransactionNonce) +invalidPayloadTestCaseGen(invalidPayload13, InvalidTransactionGasPrice) +invalidPayloadTestCaseGen(invalidPayload14, InvalidTransactionGas) +invalidPayloadTestCaseGen(invalidPayload15, InvalidTransactionValue) + +# Test to verify Block information available at the Eth RPC after NewPayload +template blockStatusExecPayloadGen(procname: untyped, transitionBlock: bool) = + proc procName(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by this client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Produce blocks before starting the test, only if we are not testing the transition block + when not transitionBlock: + let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) + testCond produce5BlockRes + + let clMock = t.clMock + let client = t.rpcClient + let shadow = Shadow() + + var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( + onPayloadProducerSelected: proc(): bool = + var address: EthAddress + testCond t.sendTx(address, 1.u256) + shadow.hash = rlpHash(t.tx) + return true + , + onNewPayloadBroadcast: proc(): bool = + testLatestHeader(client, clMock.latestForkchoice.headBlockHash) + + let nRes = client.blockNumber() + if nRes.isErr: + error "Unable to get latest block number", msg=nRes.error + return false + + # Latest block number available via Eth RPC should not have changed at this point + let latestNumber = nRes.get + if latestNumber != clMock.latestHeadNumber: + error "latest block number incorrect after newPayload", + expected=clMock.latestHeadNumber, + get=latestNumber + return false + + # Check that the receipt for the transaction we just sent is still not available + let rr = client.txReceipt(shadow.hash) + if rr.isOk: + error "not expecting receipt" + return false + + return true + )) + testCond produceSingleBlockRes + +blockStatusExecPayloadGen(blockStatusExecPayload1, false) +blockStatusExecPayloadGen(blockStatusExecPayload2, true) + +type + MissingAncestorShadow = ref object + cA: ExecutionPayloadV1 + n: int + altChainPayloads: seq[ExecutionPayloadV1] + +# Attempt to re-org to a chain which at some point contains an unknown payload which is also invalid. +# Then reveal the invalid payload and expect that the client rejects it and rejects forkchoice updated calls to this chain. +# The invalid_index parameter determines how many payloads apart is the common ancestor from the block that invalidates the chain, +# with a value of 1 meaning that the immediate payload after the common ancestor will be invalid. +template invalidMissingAncestorReOrgGen(procName: untyped, + invalid_index: int, payloadField: InvalidPayloadField, p2psync: bool, emptyTxs: bool) = + + proc procName(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by this client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + let clMock = t.clMock + let client = t.rpcClient + + # Produce blocks before starting the test + testCond clMock.produceBlocks(5, BlockProcessCallbacks()) + + let shadow = MissingAncestorShadow( + # Save the common ancestor + cA: clMock.latestPayloadBuilt, + + # Amount of blocks to deviate starting from the common ancestor + n: 10, + + # Slice to save the alternate B chain + altChainPayloads: @[] + ) + + # Append the common ancestor + shadow.altChainPayloads.add shadow.cA + + # Produce blocks but at the same time create an alternate chain which contains an invalid payload at some point (INV_P) + # CommonAncestor◄─▲── P1 ◄─ P2 ◄─ P3 ◄─ ... ◄─ Pn + # │ + # └── P1' ◄─ P2' ◄─ ... ◄─ INV_P ◄─ ... ◄─ Pn' + var pbRes = clMock.produceBlocks(shadow.n, BlockProcessCallbacks( + onPayloadProducerSelected: proc(): bool = + # Function to send at least one transaction each block produced. + # Empty Txs Payload with invalid stateRoot discovered an issue in geth sync, hence this is customizable. + when not emptyTxs: + # Send the transaction to the prevRandaoContractAddr + t.sendTx(1.u256) + return true + , + onGetPayload: proc(): bool = + # Insert extraData to ensure we deviate from the main payload, which contains empty extradata + var alternatePayload = customizePayload(clMock.latestPayloadBuilt, CustomPayload( + parentHash: some(shadow.altChainPayloads[^1].blockHash.hash256), + extraData: some(@[1.byte]), + )) + + if shadow.altChainPayloads.len == invalid_index: + alternatePayload = generateInvalidPayload(alternatePayload, payloadField) + + shadow.altChainPayloads.add alternatePayload + return true + )) + testCond pbRes + + pbRes = clMock.produceSingleBlock(BlockProcessCallbacks( + # Note: We perform the test in the middle of payload creation by the CL Mock, in order to be able to + # re-org back into this chain and use the new payload without issues. + onGetPayload: proc(): bool = + # Now let's send the alternate chain to the client using newPayload/sync + for i in 1..shadow.n: + # Send the payload + var payloadValidStr = "VALID" + if i == invalid_index: + payloadValidStr = "INVALID" + elif i > invalid_index: + payloadValidStr = "VALID with INVALID ancestor" + + info "Invalid chain payload", + i = i, + payloadValidStr = payloadValidStr, + hash = shadow.altChainPayloads[i].blockHash + + let rr = client.newPayloadV1(shadow.altChainPayloads[i]) + testCond rr.isOk + + let rs = client.forkchoiceUpdatedV1(ForkchoiceStateV1( + headBlockHash: shadow.altChainPayloads[i].blockHash, + safeBlockHash: shadow.altChainPayloads[i].blockHash + )) + + if i == invalid_index: + # If this is the first payload after the common ancestor, and this is the payload we invalidated, + # then we have all the information to determine that this payload is invalid. + testNP(rr, invalid, some(shadow.altChainPayloads[i-1].blockHash.hash256)) + elif i > invalid_index: + # We have already sent the invalid payload, but the client could've discarded it. + # In reality the CL will not get to this point because it will have already received the `INVALID` + # response from the previous payload. + let cond = {PayloadExecutionStatus.accepted, PayloadExecutionStatus.syncing, PayloadExecutionStatus.invalid} + testNPEither(rr, cond) + else: + # This is one of the payloads before the invalid one, therefore is valid. + let latestValidHash = some(shadow.altChainPayloads[i].blockHash.hash256) + testNP(rr, valid, latestValidHash) + testFCU(rs, valid, latestValidHash) + + + # Resend the latest correct fcU + let rx = client.forkchoiceUpdatedV1(clMock.latestForkchoice) + testCond rx.isOk: + error "Unexpected error ", msg=rx.error + + # After this point, the CL Mock will send the next payload of the canonical chain + return true + )) + + testCond pbRes + +invalidMissingAncestorReOrgGen(invalidMissingAncestor1, 1, InvalidStateRoot, false, true) +invalidMissingAncestorReOrgGen(invalidMissingAncestor2, 9, InvalidStateRoot, false, true) +invalidMissingAncestorReOrgGen(invalidMissingAncestor3, 10, InvalidStateRoot, false, true) + +template blockStatusHeadBlockGen(procname: untyped, transitionBlock: bool) = + proc procName(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by this client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Produce blocks before starting the test, only if we are not testing the transition block + when not transitionBlock: + let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) + testCond produce5BlockRes + + let clMock = t.clMock + let client = t.rpcClient + let shadow = Shadow() + + var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( + onPayloadProducerSelected: proc(): bool = + var address: EthAddress + testCond t.sendTx(address, 1.u256) + shadow.hash = rlpHash(t.tx) + return true + , + # Run test after a forkchoice with new HeadBlockHash has been broadcasted + onForkchoiceBroadcast: proc(): bool = + testLatestHeader(client, clMock.latestForkchoice.headBlockHash) + + let rr = client.txReceipt(shadow.hash) + if rr.isErr: + error "unable to get transaction receipt" + return false + + return true + )) + testCond produceSingleBlockRes + +blockStatusHeadBlockGen(blockStatusHeadBlock1, false) +blockStatusHeadBlockGen(blockStatusHeadBlock2, true) + +proc blockStatusSafeBlock(t: TestEnv): TestStatus = + result = TestStatus.OK + + let clMock = t.clMock + let client = t.rpcClient + + # On PoW mode, `safe` tag shall return error. + var header: EthBlockHeader + var rr = client.namedHeader("safe", header) + testCond rr.isErr + + # Wait until this client catches up with latest PoS Block + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # First ForkchoiceUpdated sent was equal to 0x00..00, `safe` should return error now + rr = client.namedHeader("safe", header) + testCond rr.isErr + + let pbres = clMock.produceBlocks(3, BlockProcessCallbacks( + # Run test after a forkchoice with new SafeBlockHash has been broadcasted + onSafeBlockChange: proc(): bool = + var header: EthBlockHeader + let rr = client.namedHeader("safe", header) + testCond rr.isOk + let safeBlockHash = hash256(clMock.latestForkchoice.safeBlockHash) + header.blockHash == safeBlockHash + )) + + testCond pbres + +proc blockStatusFinalizedBlock(t: TestEnv): TestStatus = + result = TestStatus.OK + + let clMock = t.clMock + let client = t.rpcClient + + # On PoW mode, `finalized` tag shall return error. + var header: EthBlockHeader + var rr = client.namedHeader("finalized", header) + testCond rr.isErr + + # Wait until this client catches up with latest PoS Block + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # First ForkchoiceUpdated sent was equal to 0x00..00, `finalized` should return error now + rr = client.namedHeader("finalized", header) + testCond rr.isErr + + let pbres = clMock.produceBlocks(3, BlockProcessCallbacks( + # Run test after a forkchoice with new FinalizedBlockHash has been broadcasted + onFinalizedBlockChange: proc(): bool = + var header: EthBlockHeader + let rr = client.namedHeader("finalized", header) + testCond rr.isOk + let finalizedBlockHash = hash256(clMock.latestForkchoice.finalizedBlockHash) + header.blockHash == finalizedBlockHash + )) + + testCond pbres + +proc blockStatusReorg(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by this client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Produce blocks before starting the test + let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) + testCond produce5BlockRes + + let clMock = t.clMock + let client = t.rpcClient + var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( + # Run test after a forkchoice with new HeadBlockHash has been broadcasted + onForkchoiceBroadcast: proc(): bool = + # Verify the client is serving the latest HeadBlock + var currHeader: EthBlockHeader + var hRes = client.latestHeader(currHeader) + if hRes.isErr: + error "unable to get latest header", msg=hRes.error + return false + + var currHash = BlockHash currHeader.blockHash.data + if currHash != clMock.latestForkchoice.headBlockHash or + currHash == clMock.latestForkchoice.safeBlockHash or + currHash == clMock.latestForkchoice.finalizedBlockHash: + error "latest block header doesn't match HeadBlock hash", hash=currHash + return false + + # Reorg back to the previous block (FinalizedBlock) + let reorgForkchoice = ForkchoiceStateV1( + headBlockHash: clMock.latestForkchoice.finalizedBlockHash, + safeBlockHash: clMock.latestForkchoice.finalizedBlockHash, + finalizedBlockHash: clMock.latestForkchoice.finalizedBlockHash + ) + + var res = client.forkchoiceUpdatedV1(reorgForkchoice) + if res.isErr: + error "Could not send forkchoiceUpdatedV1", msg=res.error + return false + + var s = res.get() + if s.payloadStatus.status != PayloadExecutionStatus.valid: + error "Incorrect status returned after a HeadBlockHash reorg", status=s.payloadStatus.status + return false + + if s.payloadStatus.latestValidHash.isNone: + error "Cannot get latestValidHash from payloadStatus" + return false + + var latestValidHash = s.payloadStatus.latestValidHash.get + if latestValidHash != reorgForkchoice.headBlockHash: + error "Incorrect latestValidHash returned after a HeadBlockHash reorg", + expected=reorgForkchoice.headBlockHash, + get=latestValidHash + return false + + # testCond that we reorg to the previous block + testLatestHeader(client, reorgForkchoice.headBlockHash) + + # Send the HeadBlock again to leave everything back the way it was + res = client.forkchoiceUpdatedV1(clMock.latestForkchoice) + if res.isErr: + error "Could not send forkchoiceUpdatedV1", msg=res.error + return false + + s = res.get() + if s.payloadStatus.status != PayloadExecutionStatus.valid: + error "Incorrect status returned after a HeadBlockHash reorg", + status=s.payloadStatus.status + return false + + if s.payloadStatus.latestValidHash.isNone: + error "Cannot get latestValidHash from payloadStatus" + return false + + latestValidHash = s.payloadStatus.latestValidHash.get + if latestValidHash != clMock.latestForkchoice.headBlockHash: + error "Incorrect latestValidHash returned after a HeadBlockHash reorg", + expected=clMock.latestForkchoice.headBlockHash, + get=latestValidHash + return false + return true + )) + testCond produceSingleBlockRes + +proc reExecPayloads(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until this client catches up with latest PoS + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # How many Payloads we are going to re-execute + var payloadReExecCount = 10 + + # Create those blocks + let produceBlockRes = t.clMock.produceBlocks(payloadReExecCount, BlockProcessCallbacks()) + testCond produceBlockRes + + # Re-execute the payloads + let client = t.rpcClient + var hRes = client.blockNumber() + testCond hRes.isOk: + error "unable to get blockNumber", msg=hRes.error + + let lastBlock = int(hRes.get) + info "Started re-executing payloads at block", number=lastBlock + + let + clMock = t.clMock + start = lastBlock - payloadReExecCount + 1 + + for i in start..lastBlock: + if clMock.executedPayloadHistory.hasKey(uint64 i): + let payload = clMock.executedPayloadHistory[uint64 i] + let res = client.newPayloadV1(payload) + testCond res.isOk: + error "FAIL (%s): Unable to re-execute valid payload", msg=res.error + + let s = res.get() + testCond s.status == PayloadExecutionStatus.valid: + error "Unexpected status after re-execute valid payload", status=s.status + else: + testCond true: + error "(test issue) Payload does not exist", index=i + +proc multipleNewCanonicalPayloads(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by this client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Produce blocks before starting the test + let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) + testCond produce5BlockRes + + let clMock = t.clMock + let client = t.rpcClient + var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( + # Run test after a new payload has been obtained + onGetPayload: proc(): bool = + let payloadCount = 80 + let basePayload = toExecutableData(clMock.latestPayloadBuilt) + var newPrevRandao: Hash256 + + # Fabricate and send multiple new payloads by changing the PrevRandao field + for i in 0.. 0: + altParentHash = pList.sidechainPayloads[^1].blockHash + + let executableData = toExecutableData(clMock.latestPayloadBuilt) + let altPayload = customizePayload(executableData, + CustomPayload( + parentHash: some(altParentHash.hash256), + extraData: some(@[0x01.byte]), + )) + + pList.sidechainPayloads.add(altPayload) + return true + )) + + testCond r1 + + + # Produce blocks before starting the test (So we don't try to reorg back to the genesis block) + let r2= clMock.produceSingleBlock(BlockProcessCallbacks( + onGetPayload: proc(): bool = + let r = client.newPayloadV1(pList.sidechainPayloads[^1]) + if r.isErr: + return false + let s = r.get() + if s.status notin {PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted}: + return false + + # We are going to send one of the alternative payloads and fcU to it + let len = pList.sidechainPayloads.len + let forkchoiceUpdatedBack = ForkchoiceStateV1( + headBlockHash: pList.sidechainPayloads[len-1].blockHash, + safeBlockHash: pList.sidechainPayloads[len-2].blockHash, + finalizedBlockHash: pList.sidechainPayloads[len-3].blockHash, + ) + + # It is only expected that the client does not produce an error and the CL Mocker is able to progress after the re-org + let res = client.forkchoiceUpdatedV1(forkchoiceUpdatedBack) + if res.isErr: + return false + + let rs = res.get() + if rs.payloadStatus.status != PayloadExecutionStatus.syncing: + return false + + rs.payloadStatus.latestValidHash.isNone + # After this, the clMocker will continue and try to re-org to canonical chain once again + # clMocker will fail the test if this is not possible, so nothing left to do. + )) + + testCond r2 + +type + TxReorgShadow = ref object + noTxnPayload: ExecutionPayloadV1 + txHash: Hash256 + +proc transactionReorg(t: TestEnv): TestStatus = + result = TestStatus.OK + + # Wait until TTD is reached by this client + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Produce blocks before starting the test + testCond t.clMock.produceBlocks(5, BlockProcessCallbacks()) + + # Create transactions that modify the state in order to testCond after the reorg. + const + txCount = 5 + contractAddr = hexToByteArray[20]("0000000000000000000000000000000000000317") + + let + client = t.rpcClient + clMock = t.clMock + shadow = TxReorgShadow() + + for i in 0.. 0: + error "(Test issue) no transactions went in block" + + let storageKey = i.u256 + let rr = client.storageAt(prevRandaoContractAddr, storageKey) + testCond rr.isOk: + error "Unable to get storage", msg=rr.error + + let opcodeValueAtBlock = rr.get() + testCond opcodeValueAtBlock == 2.u256: + error "Incorrect difficulty value in block", + expect=2, + get=opcodeValueAtBlock + + # Send transactions now past TTD, the value of the storage in these blocks must match the prevRandao value + type + ShadowTx = ref object + currentTxIndex: int + txs: seq[Transaction] + + let shadow = ShadowTx(currentTxIndex: 0) + + let produceBlockRes = clMock.produceBlocks(10, BlockProcessCallbacks( + onPayloadProducerSelected: proc(): bool = + testCond t.sendTx(0.u256) + shadow.txs.add t.tx + inc shadow.currentTxIndex + return true + , + onForkchoiceBroadcast: proc(): bool = + # Check the transaction tracing, which is client specific + let expectedPrevRandao = clMock.prevRandaoHistory[clMock.latestHeadNumber + 1'u64] + let res = debugPrevRandaoTransaction(client, shadow.txs[shadow.currentTxIndex-1], expectedPrevRandao) + if res.isErr: + error "unable to debug prev randao", msg=res.error + return false + return true + )) + + testCond produceBlockRes + + let rr = client.blockNumber() + testCond rr.isOk: + error "Unable to get latest block number" + + let lastBlockNumber = rr.get() + for i in ttdBlockNumber + 1 ..< lastBlockNumber: + let expectedPrevRandao = UInt256.fromBytesBE(clMock.prevRandaoHistory[i].data) + let storageKey = i.u256 + + let rz = client.storageAt(prevRandaoContractAddr, storageKey) + testCond rz.isOk: + error "Unable to get storage", msg=rz.error + + let storage = rz.get() + testCond storage == expectedPrevRandao: + error "Unexpected storage", expected=expectedPrevRandao, get=storage + +]# + diff --git a/hive_integration/nodocker/engine/engine_callsigs.nim b/hive_integration/nodocker/engine/engine_callsigs.nim index 01378eadc..be9b0cb21 100644 --- a/hive_integration/nodocker/engine/engine_callsigs.nim +++ b/hive_integration/nodocker/engine/engine_callsigs.nim @@ -1,3 +1,8 @@ -import ethtypes, engine_api_types +import + web3/ethtypes, + web3/engine_api_types, + ../../../nimbus/rpc/execution_types proc engine_newPayloadV2(payload: ExecutionPayloadV1OrV2): PayloadStatusV1 +proc engine_forkchoiceUpdatedV2(forkchoiceState: ForkchoiceStateV1, payloadAttributes: Option[PayloadAttributes]): ForkchoiceUpdatedResponse +proc engine_forkchoiceUpdatedV3(forkchoiceState: ForkchoiceStateV1, payloadAttributes: Option[PayloadAttributes]): ForkchoiceUpdatedResponse diff --git a/hive_integration/nodocker/engine/engine_client.nim b/hive_integration/nodocker/engine/engine_client.nim index c474b32a4..a623096d8 100644 --- a/hive_integration/nodocker/engine/engine_client.nim +++ b/hive_integration/nodocker/engine/engine_client.nim @@ -6,11 +6,14 @@ import json_rpc/[rpcclient, errors, jsonmarshal], ../../../tests/rpcclient/eth_api, ../../../premix/parser, - ../../../nimbus/rpc/hexstrings + ../../../nimbus/rpc/hexstrings, + ../../../nimbus/rpc/execution_types import web3/engine_api as web3_engine_api -type Hash256 = eth_types.Hash256 +type + Hash256 = eth_types.Hash256 + VersionedHash = engine_api_types.VersionedHash from os import DirSep, AltSep const @@ -45,10 +48,39 @@ proc forkchoiceUpdatedV2*(client: RpcClient, wrapTrySimpleRes: client.engine_forkchoiceUpdatedV2(update, payloadAttributes) +proc forkchoiceUpdatedV3*(client: RpcClient, + update: ForkchoiceStateV1, + payloadAttributes = none(PayloadAttributesV3)): + Result[ForkchoiceUpdatedResponse, string] = + wrapTrySimpleRes: + client.engine_forkchoiceUpdatedV3(update, payloadAttributes) + +proc forkchoiceUpdatedV2*(client: RpcClient, + update: ForkchoiceStateV1, + payloadAttributes = none(PayloadAttributes)): + Result[ForkchoiceUpdatedResponse, string] = + wrapTrySimpleRes: + client.engine_forkchoiceUpdatedV2(update, payloadAttributes) + +proc forkchoiceUpdatedV3*(client: RpcClient, + update: ForkchoiceStateV1, + payloadAttributes = none(PayloadAttributes)): + Result[ForkchoiceUpdatedResponse, string] = + wrapTrySimpleRes: + client.engine_forkchoiceUpdatedV3(update, payloadAttributes) + proc getPayloadV1*(client: RpcClient, payloadId: PayloadID): Result[ExecutionPayloadV1, string] = wrapTrySimpleRes: client.engine_getPayloadV1(payloadId) +proc getPayloadV2*(client: RpcClient, payloadId: PayloadID): Result[GetPayloadV2Response, string] = + wrapTrySimpleRes: + client.engine_getPayloadV2(payloadId) + +proc getPayloadV3*(client: RpcClient, payloadId: PayloadID): Result[GetPayloadV3Response, string] = + wrapTrySimpleRes: + client.engine_getPayloadV3(payloadId) + proc newPayloadV1*(client: RpcClient, payload: ExecutionPayloadV1): Result[PayloadStatusV1, string] = @@ -67,12 +99,21 @@ proc newPayloadV2*(client: RpcClient, wrapTrySimpleRes: client.engine_newPayloadV2(payload) +proc newPayloadV3*(client: RpcClient, + payload: ExecutionPayloadV3, + versionedHashes: seq[VersionedHash], + parentBeaconBlockRoot: FixedBytes[32] + ): + Result[PayloadStatusV1, string] = + wrapTrySimpleRes: + client.engine_newPayloadV3(payload, versionedHashes, parentBeaconBlockRoot) + proc exchangeCapabilities*(client: RpcClient, methods: seq[string]): Result[seq[string], string] = wrapTrySimpleRes: client.engine_exchangeCapabilities(methods) - + proc toBlockNumber(n: Option[HexQuantityStr]): common.BlockNumber = if n.isNone: return 0.toBlockNumber @@ -83,35 +124,166 @@ proc toBlockNonce(n: Option[HexDataStr]): common.BlockNonce = return default(BlockNonce) hexToByteArray(string n.get, result) -proc toBaseFeePerGas(n: Option[HexQuantityStr]): Option[UInt256] = +proc maybeU256(n: Option[HexQuantityStr]): Option[UInt256] = if n.isNone: return none(UInt256) some(UInt256.fromHex(string n.get)) +proc maybeU64(n: Option[HexQuantityStr]): Option[uint64] = + if n.isNone: + return none(uint64) + some(hexToInt(string n.get, uint64)) + +proc maybeBool(n: Option[HexQuantityStr]): Option[bool] = + if n.isNone: + return none(bool) + some(hexToInt(string n.get, int).bool) + +proc maybeChainId(n: Option[HexQuantityStr]): Option[ChainId] = + if n.isNone: + return none(ChainId) + some(hexToInt(string n.get, int).ChainId) + +proc maybeInt64(n: Option[HexQuantityStr]): Option[int64] = + if n.isNone: + return none(int64) + some(hexToInt(string n.get, int64)) + +proc maybeInt(n: Option[HexQuantityStr]): Option[int] = + if n.isNone: + return none(int) + some(hexToInt(string n.get, int)) + proc toBlockHeader(bc: eth_api.BlockObject): common.BlockHeader = common.BlockHeader( - blockNumber: toBlockNumber(bc.number), - parentHash : bc.parentHash, - nonce : toBlockNonce(bc.nonce), - ommersHash : bc.sha3Uncles, - bloom : BloomFilter bc.logsBloom, - txRoot : bc.transactionsRoot, - stateRoot : bc.stateRoot, - receiptRoot: bc.receiptsRoot, - coinbase : bc.miner, - difficulty : UInt256.fromHex(string bc.difficulty), - extraData : hexToSeqByte(string bc.extraData), - mixDigest : bc.mixHash, - gasLimit : hexToInt(string bc.gasLimit, GasInt), - gasUsed : hexToInt(string bc.gasUsed, GasInt), - timestamp : initTime(hexToInt(string bc.timestamp, int64), 0), - fee : toBaseFeePerGas(bc.baseFeePerGas) + blockNumber : toBlockNumber(bc.number), + parentHash : bc.parentHash, + nonce : toBlockNonce(bc.nonce), + ommersHash : bc.sha3Uncles, + bloom : BloomFilter bc.logsBloom, + txRoot : bc.transactionsRoot, + stateRoot : bc.stateRoot, + receiptRoot : bc.receiptsRoot, + coinbase : bc.miner, + difficulty : UInt256.fromHex(string bc.difficulty), + extraData : hexToSeqByte(string bc.extraData), + mixDigest : bc.mixHash, + gasLimit : hexToInt(string bc.gasLimit, GasInt), + gasUsed : hexToInt(string bc.gasUsed, GasInt), + timestamp : initTime(hexToInt(string bc.timestamp, int64), 0), + fee : maybeU256(bc.baseFeePerGas), + withdrawalsRoot: bc.withdrawalsRoot, + blobGasUsed : maybeU64(bc.blobGasUsed), + excessBlobGas : maybeU64(bc.excessBlobGas), ) proc toTransactions(txs: openArray[JsonNode]): seq[Transaction] = for x in txs: result.add parseTransaction(x) +proc toWithdrawal(wd: WithdrawalObject): Withdrawal = + Withdrawal( + index: hexToInt(string wd.index, uint64), + validatorIndex: hexToInt(string wd.validatorIndex, uint64), + address: wd.address, + amount: hexToInt(string wd.amount, uint64), + ) + +proc toWithdrawals(list: seq[WithdrawalObject]): seq[Withdrawal] = + result = newSeqOfCap[Withdrawal](list.len) + for wd in list: + result.add toWithdrawal(wd) + +proc toWithdrawals(list: Option[seq[WithdrawalObject]]): Option[seq[Withdrawal]] = + if list.isNone: + return none(seq[Withdrawal]) + some(toWithdrawals(list.get)) + +type + RPCReceipt* = object + txHash*: Hash256 + txIndex*: int + blockHash*: Hash256 + blockNumber*: uint64 + sender*: EthAddress + to*: Option[EthAddress] + cumulativeGasUsed*: GasInt + gasUsed*: GasInt + contractAddress*: Option[EthAddress] + logs*: seq[FilterLog] + logsBloom*: FixedBytes[256] + recType*: ReceiptType + stateRoot*: Option[Hash256] + status*: Option[bool] + effectiveGasPrice*: GasInt + + RPCTx* = object + txType*: TxType + blockHash*: Option[Hash256] # none if pending + blockNumber*: Option[uint64] + sender*: EthAddress + gasLimit*: GasInt + gasPrice*: GasInt + maxFeePerGas*: GasInt + maxPriorityFeePerGas*: GasInt + hash*: Hash256 + payload*: seq[byte] + nonce*: AccountNonce + to*: Option[EthAddress] + txIndex*: Option[int] + value*: UInt256 + v*: int64 + r*: UInt256 + s*: UInt256 + chainId*: Option[ChainId] + accessList*: Option[seq[rpc_types.AccessTuple]] + maxFeePerBlobGas*: Option[GasInt] + versionedHashes*: Option[VersionedHashes] + +proc toRPCReceipt(rec: eth_api.ReceiptObject): RPCReceipt = + RPCReceipt( + txHash: rec.transactionHash, + txIndex: hexToInt(string rec.transactionIndex, int), + blockHash: rec.blockHash, + blockNumber: hexToInt(string rec.blockNumber, uint64), + sender: rec.`from`, + to: rec.to, + cumulativeGasUsed: hexToInt(string rec.cumulativeGasUsed, GasInt), + gasUsed: hexToInt(string rec.gasUsed, GasInt), + contractAddress: rec.contractAddress, + logs: rec.logs, + logsBloom: rec.logsBloom, + recType: hexToInt(string rec.`type`, int).ReceiptType, + stateRoot: rec.root, + status: maybeBool(rec.status), + effectiveGasPrice: hexToInt(string rec.effectiveGasPrice, GasInt), + ) + +proc toRPCTx(tx: eth_api.TransactionObject): RPCTx = + RPCTx( + txType: hexToInt(string tx.`type`, int).TxType, + blockHash: tx.blockHash, + blockNumber: maybeU64 tx.blockNumber, + sender: tx.`from`, + gasLimit: hexToInt(string tx.gas, GasInt), + gasPrice: hexToInt(string tx.gasPrice, GasInt), + maxFeePerGas: hexToInt(string tx.maxFeePerGas, GasInt), + maxPriorityFeePerGas: hexToInt(string tx.maxPriorityFeePerGas, GasInt), + hash: tx.hash, + payload: tx.input, + nonce: hexToInt(string tx.nonce, AccountNonce), + to: tx.to, + txIndex: maybeInt(tx.transactionIndex), + value: UInt256.fromHex(string tx.value), + v: hexToInt(string tx.v, int64), + r: UInt256.fromHex(string tx.r), + s: UInt256.fromHex(string tx.s), + chainId: maybeChainId(tx.chainId), + accessList: tx.accessList, + maxFeePerBlobGas: maybeInt64(tx.maxFeePerBlobGas), + versionedHashes: tx.versionedHashes, + ) + proc waitForTTD*(client: RpcClient, ttd: DifficultyInt): Future[(common.BlockHeader, bool)] {.async.} = let period = chronos.seconds(5) @@ -153,6 +325,7 @@ proc blockByNumber*(client: RpcClient, number: uint64, output: var common.EthBlo let blk = res.get() output.header = toBlockHeader(blk) output.txs = toTransactions(blk.transactions) + output.withdrawals = toWithdrawals(blk.withdrawals) return ok() proc headerByHash*(client: RpcClient, hash: Hash256, output: var common.BlockHeader): Result[void, string] = @@ -180,6 +353,7 @@ proc latestBlock*(client: RpcClient, output: var common.EthBlock): Result[void, let blk = res.get() output.header = toBlockHeader(blk) output.txs = toTransactions(blk.transactions) + output.withdrawals = toWithdrawals(blk.withdrawals) return ok() proc namedHeader*(client: RpcClient, name: string, output: var common.BlockHeader): Result[void, string] = @@ -205,17 +379,30 @@ proc balanceAt*(client: RpcClient, address: EthAddress): Result[UInt256, string] let res = waitFor client.eth_getBalance(ethAddressStr(address), "latest") return ok(UInt256.fromHex(res.string)) +proc balanceAt*(client: RpcClient, address: EthAddress, blockNumber: UInt256): Result[UInt256, string] = + wrapTry: + let qty = encodeQuantity(blockNumber) + let res = waitFor client.eth_getBalance(ethAddressStr(address), qty.string) + return ok(UInt256.fromHex(res.string)) + proc nonceAt*(client: RpcClient, address: EthAddress): Result[AccountNonce, string] = wrapTry: let res = waitFor client.eth_getTransactionCount(ethAddressStr(address), "latest") return ok(fromHex[AccountNonce](res.string)) -proc txReceipt*(client: RpcClient, txHash: Hash256): Result[eth_api.ReceiptObject, string] = +proc txReceipt*(client: RpcClient, txHash: Hash256): Result[RPCReceipt, string] = wrapTry: let res = waitFor client.eth_getTransactionReceipt(txHash) if res.isNone: return err("failed to get receipt: " & txHash.data.toHex) - return ok(res.get) + return ok(toRPCReceipt res.get) + +proc txByHash*(client: RpcClient, txHash: Hash256): Result[RPCTx, string] = + wrapTry: + let res = waitFor client.eth_getTransactionByHash(txHash) + if res.isNone: + return err("failed to get transaction: " & txHash.data.toHex) + return ok(toRPCTx res.get) proc toDataStr(slot: UInt256): HexDataStr = let hex = slot.toHex @@ -299,3 +486,19 @@ proc debugPrevRandaoTransaction*(client: RpcClient, tx: Transaction, expectedPre return err("PREVRANDAO opcode not found") return ok() + +template expectBalanceEqual*(res: Result[UInt256, string], account: EthAddress, + expectedBalance: UInt256): auto = + if res.isErr: + return err(res.error) + if res.get != expectedBalance: + return err("invalid wd balance at $1, expect $2, get $3" % [ + account.toHex, $expectedBalance, $res.get]) + +template expectStorageEqual*(res: Result[UInt256, string], account: EthAddress, + expectedValue: UInt256): auto = + if res.isErr: + return err(res.error) + if res.get != expectedValue: + return err("invalid wd storage at $1 is $2, expect $3" % [ + account.toHex, $res.get, $expectedValue]) diff --git a/hive_integration/nodocker/engine/engine_sim.nim b/hive_integration/nodocker/engine/engine_sim.nim index b39d46e7e..a5b891ebf 100644 --- a/hive_integration/nodocker/engine/engine_sim.nim +++ b/hive_integration/nodocker/engine/engine_sim.nim @@ -5,33 +5,27 @@ import import ./engine_tests, ./auths_tests, - ./exchange_cap_tests + ./exchange_cap_tests, + ./withdrawal_tests -proc combineTests(): seq[TestSpec] = - result = @engineTestList - result.add @authTestList +proc combineTests(): seq[TestDesc] = + result = @wdTestList + result.add ecTestList + result.add authTestList + result.add engineTestList -const testList = combineTests() +let + testList = combineTests() proc main() = var stat: SimStat let start = getTime() for x in testList: - var t = setupELClient(x.chainFile, x.enableAuth) - t.setRealTTD(x.ttd) - if x.slotsToFinalized != 0: - t.slotsToFinalized(x.slotsToFinalized) - if x.slotsToSafe != 0: - t.slotsToSafe(x.slotsToSafe) - let status = x.run(t) - t.stopELClient() - stat.inc(x.name, status) - - for x in exchangeCapTestList: - let env = setupELClient(x.conf) - let status = x.run(env) - env.stopELClient() + let status = if x.run(x.spec): + TestStatus.OK + else: + TestStatus.Failed stat.inc(x.name, status) let elpd = getTime() - start diff --git a/hive_integration/nodocker/engine/engine_tests.nim b/hive_integration/nodocker/engine/engine_tests.nim index 44e43676a..f1e03c389 100644 --- a/hive_integration/nodocker/engine/engine_tests.nim +++ b/hive_integration/nodocker/engine/engine_tests.nim @@ -1,2009 +1,370 @@ import - std/tables, - stew/byteutils, - chronicles, - unittest2, - nimcrypto/sysrand, - chronos, - "."/[test_env, helper, types], - ../../../nimbus/transaction, - ../../../nimbus/rpc/rpc_types, - ../../../nimbus/rpc/merge/mergeutils + ./engine/engine_spec, + ./types, + ./test_env -import eth/common/eth_types as common_eth_types -type Hash256 = common_eth_types.Hash256 +proc specExecute(ws: BaseSpec): bool = + var + ws = EngineSpec(ws) + env = setupELClient(ws.chainFile, false) -const - prevRandaoContractAddr = hexToByteArray[20]("0000000000000000000000000000000000000316") + env.setRealTTD(ws.ttd) + if ws.slotsToFinalized != 0: + env.slotsToFinalized(ws.slotsToFinalized) + if ws.slotsToSafe != 0: + env.slotsToSafe(ws.slotsToSafe) -proc `==`(a: Option[BlockHash], b: Option[Hash256]): bool = - if a.isNone and b.isNone: - return true - if a.isSome and b.isSome: - return a.get() == b.get().data.BlockHash + result = ws.exec(env) + env.stopELClient() -template testFCU(res, cond: untyped, validHash: Option[Hash256], id = none(PayloadID)) = - testCond res.isOk - let s = res.get() - testCond s.payloadStatus.status == PayloadExecutionStatus.cond: - error "Unexpected FCU status", expect=PayloadExecutionStatus.cond, get=s.payloadStatus.status - testCond s.payloadStatus.latestValidHash == validHash: - error "Unexpected FCU latestValidHash", expect=validHash, get=s.payloadStatus.latestValidHash - testCond s.payloadId == id: - error "Unexpected FCU payloadID", expect=id, get=s.payloadId - -template testFCU(res, cond: untyped) = - testCond res.isOk - let s = res.get() - testCond s.payloadStatus.status == PayloadExecutionStatus.cond: - error "Unexpected FCU status", expect=PayloadExecutionStatus.cond, get=s.payloadStatus.status - -template testNP(res, cond: untyped, validHash = none(Hash256)) = - testCond res.isOk - let s = res.get() - testCond s.status == PayloadExecutionStatus.cond: - error "Unexpected NewPayload status", expect=PayloadExecutionStatus.cond, get=s.status - testCond s.latestValidHash == validHash: - error "Unexpected NewPayload latestValidHash", expect=validHash, get=s.latestValidHash - -template testNPEither(res, cond: untyped, validHash = none(Hash256)) = - testCond res.isOk - let s = res.get() - testCond s.status in cond: - error "Unexpected NewPayload status", expect=cond, get=s.status - testCond s.latestValidHash == validHash: - error "Unexpected NewPayload latestValidHash", expect=validHash, get=s.latestValidHash - -template testLatestHeader(client: untyped, expectedHash: BlockHash) = - var lastHeader: EthBlockHeader - var hRes = client.latestHeader(lastHeader) - testCond hRes.isOk: - error "unable to get latest header", msg=hRes.error - - let lastHash = BlockHash lastHeader.blockHash.data - # Latest block header available via Eth RPC should not have changed at this point - testCond lastHash == expectedHash: - error "latest block header incorrect", - expect = expectedHash, - get = lastHash - -proc sendTx(t: TestEnv, recipient: EthAddress, val: UInt256, data: openArray[byte] = []): bool = - t.tx = t.makeNextTransaction(recipient, val, data) - let rr = t.rpcClient.sendTransaction(t.tx) - if rr.isErr: - error "Unable to send transaction", msg=rr.error - return false - return true - -proc sendTx(t: TestEnv, val: UInt256): bool = - t.sendTx(prevRandaoContractAddr, val) - -# Invalid Terminal Block in ForkchoiceUpdated: -# Client must reject ForkchoiceUpdated directives if the referenced HeadBlockHash does not meet the TTD requirement. -proc invalidTerminalBlockForkchoiceUpdated(t: TestEnv): TestStatus = - result = TestStatus.OK - - let - gHash = Web3BlockHash t.gHeader.blockHash.data - forkchoiceState = ForkchoiceStateV1( - headBlockHash: gHash, - safeBlockHash: gHash, - finalizedBlockHash: gHash, - ) - - let res = t.rpcClient.forkchoiceUpdatedV1(forkchoiceState) - # Execution specification: - # {payloadStatus: {status: INVALID, latestValidHash=0x00..00}, payloadId: null} - # either obtained from the Payload validation process or as a result of validating a PoW block referenced by forkchoiceState.headBlockHash - - testFCU(res, invalid, some(Hash256())) - # ValidationError is not validated since it can be either null or a string message - - # Check that PoW chain progresses - testCond t.verifyPoWProgress(t.gHeader.blockHash) - -# Invalid GetPayload Under PoW: Client must reject GetPayload directives under PoW. -proc invalidGetPayloadUnderPoW(t: TestEnv): TestStatus = - result = TestStatus.OK - - # We start in PoW and try to get an invalid Payload, which should produce an error but nothing should be disrupted. - let id = PayloadID [1.byte, 2,3,4,5,6,7,8] - let res = t.rpcClient.getPayloadV1(id) - testCond res.isErr - - # Check that PoW chain progresses - testCond t.verifyPoWProgress(t.gHeader.blockHash) - -# Invalid Terminal Block in NewPayload: -# Client must reject NewPayload directives if the referenced ParentHash does not meet the TTD requirement. -proc invalidTerminalBlockNewPayload(t: TestEnv): TestStatus = - result = TestStatus.OK - - let gBlock = t.gHeader - let payload = ExecutableData( - parentHash: gBlock.blockHash, - stateRoot: gBlock.stateRoot, - receiptsRoot: EMPTY_ROOT_HASH, - number: 1, - gasLimit: gBlock.gasLimit, - gasUsed: 0, - timestamp: gBlock.timestamp + 1.seconds, - baseFeePerGas:gBlock.baseFee - ) - let hashedPayload = customizePayload(payload, CustomPayload()) - let res = t.rpcClient.newPayloadV1(hashedPayload) - - # Execution specification: - # {status: INVALID, latestValidHash=0x00..00} - # if terminal block conditions are not satisfied - testNP(res, invalid, some(Hash256())) - - # Check that PoW chain progresses - testCond t.verifyPoWProgress(t.gHeader.blockHash) - -proc unknownHeadBlockHash(t: TestEnv): TestStatus = - result = TestStatus.OK - - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - var randomHash: Hash256 - testCond randomBytes(randomHash.data) == 32 - - let clMock = t.clMock - let forkchoiceStateUnknownHeadHash = ForkchoiceStateV1( - headBlockHash: BlockHash randomHash.data, - safeBlockHash: clMock.latestForkchoice.finalizedBlockHash, - finalizedBlockHash: clMock.latestForkchoice.finalizedBlockHash, - ) - - var res = t.rpcClient.forkchoiceUpdatedV1(forkchoiceStateUnknownHeadHash) - testCond res.isOk - - let s = res.get() - # Execution specification:: - # - {payloadStatus: {status: SYNCING, latestValidHash: null, validationError: null}, payloadId: null} - # if forkchoiceState.headBlockHash references an unknown payload or a payload that can't be validated - # because requisite data for the validation is missing - testCond s.payloadStatus.status == PayloadExecutionStatus.syncing - - # Test again using PayloadAttributes, should also return SYNCING and no PayloadID - let timestamp = uint64 clMock.latestExecutedPayload.timestamp - let payloadAttr = PayloadAttributesV1( - timestamp: Quantity(timestamp + 1) - ) - - res = t.rpcClient.forkchoiceUpdatedV1(forkchoiceStateUnknownHeadHash, some(payloadAttr)) - testCond res.isOk - testCond s.payloadStatus.status == PayloadExecutionStatus.syncing - testCond s.payloadId.isNone - -proc unknownSafeBlockHash(t: TestEnv): TestStatus = - result = TestStatus.OK - - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # Produce blocks before starting the test - let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) - testCond produce5BlockRes - - let clMock = t.clMock - let client = t.rpcClient - let produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( - # Run test after a new payload has been broadcast - onNewPayloadBroadcast: proc(): bool = - # Generate a random SafeBlock hash - var randomSafeBlockHash: Hash256 - doAssert randomBytes(randomSafeBlockHash.data) == 32 - - # Send forkchoiceUpdated with random SafeBlockHash - let forkchoiceStateUnknownSafeHash = ForkchoiceStateV1( - headBlockHash: clMock.latestExecutedPayload.blockHash, - safeBlockHash: BlockHash randomSafeBlockHash.data, - finalizedBlockHash: clMock.latestForkchoice.finalizedBlockHash, - ) - # Execution specification: - # - This value MUST be either equal to or an ancestor of headBlockHash - let res = client.forkchoiceUpdatedV1(forkchoiceStateUnknownSafeHash) - return res.isErr - )) - - testCond produceSingleBlockRes - -proc unknownFinalizedBlockHash(t: TestEnv): TestStatus = - result = TestStatus.OK - - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # Produce blocks before starting the test - let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) - testCond produce5BlockRes - - let clMock = t.clMock - let client = t.rpcClient - let produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( - # Run test after a new payload has been broadcast - onNewPayloadBroadcast: proc(): bool = - # Generate a random SafeBlock hash - var randomFinalBlockHash: Hash256 - doAssert randomBytes(randomFinalBlockHash.data) == 32 - - # Send forkchoiceUpdated with random SafeBlockHash - let forkchoiceStateUnknownFinalizedHash = ForkchoiceStateV1( - headBlockHash: clMock.latestExecutedPayload.blockHash, - safeBlockHash: clMock.latestForkchoice.safeBlockHash, - finalizedBlockHash: BlockHash randomFinalBlockHash.data, - ) - # Execution specification: - # - This value MUST be either equal to or an ancestor of headBlockHash - var res = client.forkchoiceUpdatedV1(forkchoiceStateUnknownFinalizedHash) - if res.isOk: - return false - - # Test again using PayloadAttributes, should also return INVALID and no PayloadID - let timestamp = uint64 clMock.latestExecutedPayload.timestamp - let payloadAttr = PayloadAttributesV1( - timestamp: Quantity(timestamp + 1) - ) - res = client.forkchoiceUpdatedV1(forkchoiceStateUnknownFinalizedHash, some(payloadAttr)) - return res.isErr - )) - - testCond produceSingleBlockRes - -# Send an inconsistent ForkchoiceState with a known payload that belongs to a side chain as head, safe or finalized. -type - Inconsistency {.pure.} = enum - Head - Safe - Finalized - - PayloadList = ref object - canonicalPayloads : seq[ExecutableData] - alternativePayloads: seq[ExecutableData] - -template inconsistentForkchoiceStateGen(procname: untyped, inconsistency: Inconsistency) = - proc procName(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by this client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - var pList = PayloadList() - let clMock = t.clMock - let client = t.rpcClient - - # Produce blocks before starting the test - let produceBlockRes = clMock.produceBlocks(3, BlockProcessCallbacks( - onGetPayload: proc(): bool = - # Generate and send an alternative side chain - var customData = CustomPayload( - extraData: some(@[0x01.byte]) - ) - - if pList.alternativePayloads.len > 0: - customData.parentHash = some(pList.alternativePayloads[^1].blockHash) - - let executableData = toExecutableData(clMock.latestPayloadBuilt) - let alternativePayload = customizePayload(executableData, customData) - pList.alternativePayloads.add(alternativePayload.toExecutableData) - - let latestCanonicalPayload = toExecutableData(clMock.latestPayloadBuilt) - pList.canonicalPayloads.add(latestCanonicalPayload) - - # Send the alternative payload - let res = client.newPayloadV1(alternativePayload) - if res.isErr: - return false - - let s = res.get() - s.status == PayloadExecutionStatus.valid or s.status == PayloadExecutionStatus.accepted - )) - - testCond produceBlockRes - - # Send the invalid ForkchoiceStates - let len = pList.alternativePayloads.len - var inconsistentFcU = ForkchoiceStateV1( - headBlockHash: Web3BlockHash pList.canonicalPayloads[len-1].blockHash.data, - safeBlockHash: Web3BlockHash pList.canonicalPayloads[len-2].blockHash.data, - finalizedBlockHash: Web3BlockHash pList.canonicalPayloads[len-3].blockHash.data, - ) - - when inconsistency == Inconsistency.Head: - inconsistentFcU.headBlockHash = Web3BlockHash pList.alternativePayloads[len-1].blockHash.data - elif inconsistency == Inconsistency.Safe: - inconsistentFcU.safeBlockHash = Web3BlockHash pList.alternativePayloads[len-2].blockHash.data - else: - inconsistentFcU.finalizedBlockHash = Web3BlockHash pList.alternativePayloads[len-3].blockHash.data - - var r = client.forkchoiceUpdatedV1(inconsistentFcU) - testCond r.isErr - - # Return to the canonical chain - r = client.forkchoiceUpdatedV1(clMock.latestForkchoice) - testCond r.isOk - let s = r.get() - testCond s.payloadStatus.status == PayloadExecutionStatus.valid - -inconsistentForkchoiceStateGen(inconsistentForkchoiceState1, Inconsistency.Head) -inconsistentForkchoiceStateGen(inconsistentForkchoiceState2, Inconsistency.Safe) -inconsistentForkchoiceStateGen(inconsistentForkchoiceState3, Inconsistency.Finalized) - -# Verify behavior on a forkchoiceUpdated with invalid payload attributes -template invalidPayloadAttributesGen(procname: untyped, syncingCond: bool) = - proc procName(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by this client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - let clMock = t.clMock - let client = t.rpcClient - - # Produce blocks before starting the test - var produceBlockRes = clMock.produceBlocks(5, BlockProcessCallbacks()) - testCond produceBlockRes - - # Send a forkchoiceUpdated with invalid PayloadAttributes - produceBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( - onNewPayloadBroadcast: proc(): bool = - # Try to apply the new payload with invalid attributes - var blockHash: Hash256 - when syncingCond: - # Setting a random hash will put the client into `SYNCING` - doAssert randomBytes(blockHash.data) == 32 - else: - # Set the block hash to the next payload that was broadcasted - blockHash = hash256(clMock.latestPayloadBuilt.blockHash) - - let fcu = ForkchoiceStateV1( - headBlockHash: Web3BlockHash blockHash.data, - safeBlockHash: Web3BlockHash blockHash.data, - finalizedBlockHash: Web3BlockHash blockHash.data, - ) - - let attr = PayloadAttributesV1() - - # 0) Check headBlock is known and there is no missing data, if not respond with SYNCING - # 1) Check headBlock is VALID, if not respond with INVALID - # 2) Apply forkchoiceState - # 3) Check payloadAttributes, if invalid respond with error: code: Invalid payload attributes - # 4) Start payload build process and respond with VALID - when syncingCond: - # If we are SYNCING, the outcome should be SYNCING regardless of the validity of the payload atttributes - let r = client.forkchoiceUpdatedV1(fcu, some(attr)) - testFCU(r, syncing) - else: - let r = client.forkchoiceUpdatedV1(fcu, some(attr)) - testCond r.isOk: - error "Unexpected error", msg = r.error - - # Check that the forkchoice was applied, regardless of the error - testLatestHeader(client, BlockHash blockHash.data) - return true - )) - - testCond produceBlockRes - -invalidPayloadAttributesGen(invalidPayloadAttributes1, false) -invalidPayloadAttributesGen(invalidPayloadAttributes2, true) - -proc preTTDFinalizedBlockHash(t: TestEnv): TestStatus = - result = TestStatus.OK - - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # Produce blocks before starting the test - let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) - testCond produce5BlockRes - - let - gHash = Web3BlockHash t.gHeader.blockHash.data - forkchoiceState = ForkchoiceStateV1( - headBlockHash: gHash, - safeBlockHash: gHash, - finalizedBlockHash: gHash, - ) - client = t.rpcClient - clMock = t.clMock - - var res = client.forkchoiceUpdatedV1(forkchoiceState) - testFCU(res, invalid, some(Hash256())) - - res = client.forkchoiceUpdatedV1(clMock.latestForkchoice) - testFCU(res, valid) - -# Corrupt the hash of a valid payload, client should reject the payload. -# All possible scenarios: -# (fcU) -# ┌────────┐ ┌────────────────────────┐ -# │ HEAD │◄───────┤ Bad Hash (!Sync,!Side) │ -# └────┬───┘ └────────────────────────┘ -# │ -# │ -# ┌────▼───┐ ┌────────────────────────┐ -# │ HEAD-1 │◄───────┤ Bad Hash (!Sync, Side) │ -# └────┬───┘ └────────────────────────┘ -# │ -# -# -# (fcU) -# ******************** ┌───────────────────────┐ -# * (Unknown) HEAD *◄─┤ Bad Hash (Sync,!Side) │ -# ******************** └───────────────────────┘ -# │ -# │ -# ┌────▼───┐ ┌───────────────────────┐ -# │ HEAD-1 │◄───────────┤ Bad Hash (Sync, Side) │ -# └────┬───┘ └───────────────────────┘ -# │ -# - -type - Shadow = ref object - hash: Hash256 - -template badHashOnNewPayloadGen(procname: untyped, syncingCond: bool, sideChain: bool) = - proc procName(t: TestEnv): TestStatus = - result = TestStatus.OK - - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # Produce blocks before starting the test - let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) - testCond produce5BlockRes - - let clMock = t.clMock - let client = t.rpcClient - let shadow = Shadow() - - var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( - # Run test after the new payload has been obtained - onGetPayload: proc(): bool = - # Alter hash on the payload and send it to client, should produce an error - var alteredPayload = clMock.latestPayloadBuilt - var invalidPayloadHash = hash256(alteredPayload.blockHash) - let lastByte = int invalidPayloadHash.data[^1] - invalidPayloadHash.data[^1] = byte(not lastByte) - shadow.hash = invalidPayloadHash - alteredPayload.blockHash = BlockHash invalidPayloadHash.data - - when not syncingCond and sideChain: - # We alter the payload by setting the parent to a known past block in the - # canonical chain, which makes this payload a side chain payload, and also an invalid block hash - # (because we did not update the block hash appropriately) - alteredPayload.parentHash = Web3BlockHash clMock.latestHeader.parentHash.data - elif syncingCond: - # We need to send an fcU to put the client in SYNCING state. - var randomHeadBlock: Hash256 - doAssert randomBytes(randomHeadBlock.data) == 32 - - let latestHeaderHash = clMock.latestHeader.blockHash - let fcU = ForkchoiceStateV1( - headBlockHash: Web3BlockHash randomHeadBlock.data, - safeBlockHash: Web3BlockHash latestHeaderHash.data, - finalizedBlockHash: Web3BlockHash latestHeaderHash.data - ) - - let r = client.forkchoiceUpdatedV1(fcU) - if r.isErr: - return false - let z = r.get() - if z.payloadStatus.status != PayloadExecutionStatus.syncing: - return false - - when sidechain: - # Syncing and sidechain, the caonincal head is an unknown payload to us, - # but this specific bad hash payload is in theory part of a side chain. - # Therefore the parent we use is the head hash. - alteredPayload.parentHash = Web3BlockHash latestHeaderHash.data - else: - # The invalid bad-hash payload points to the unknown head, but we know it is - # indeed canonical because the head was set using forkchoiceUpdated. - alteredPayload.parentHash = Web3BlockHash randomHeadBlock.data - - let res = client.newPayloadV1(alteredPayload) - # Execution specification:: - # - {status: INVALID_BLOCK_HASH, latestValidHash: null, validationError: null} if the blockHash validation has failed - if res.isErr: - return false - let s = res.get() - if s.status != PayloadExecutionStatus.invalid_block_hash: - return false - s.latestValidHash.isNone - )) - testCond produceSingleBlockRes - - # Lastly, attempt to build on top of the invalid payload - produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( - # Run test after the new payload has been obtained - onGetPayload: proc(): bool = - let payload = toExecutableData(clMock.latestPayloadBuilt) - let alteredPayload = customizePayload(payload, CustomPayload( - parentHash: some(shadow.hash), - )) - let res = client.newPayloadV1(alteredPayload) - if res.isErr: - return false - # Response status can be ACCEPTED (since parent payload could have been thrown out by the client) - # or INVALID (client still has the payload and can verify that this payload is incorrectly building on top of it), - # but a VALID response is incorrect. - let s = res.get() - s.status != PayloadExecutionStatus.valid - )) - testCond produceSingleBlockRes - -badHashOnNewPayloadGen(badHashOnNewPayload1, false, false) -badHashOnNewPayloadGen(badHashOnNewPayload2, true, false) -badHashOnNewPayloadGen(badHashOnNewPayload3, false, true) -badHashOnNewPayloadGen(badHashOnNewPayload4, true, true) - -proc parentHashOnExecPayload(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by this client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # Produce blocks before starting the test - let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) - testCond produce5BlockRes - - let clMock = t.clMock - let client = t.rpcClient - var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( - # Run test after the new payload has been obtained - onGetPayload: proc(): bool = - # Alter hash on the payload and send it to client, should produce an error - var alteredPayload = clMock.latestPayloadBuilt - alteredPayload.blockHash = alteredPayload.parentHash - let res = client.newPayloadV1(alteredPayload) - if res.isErr: - return false - # Execution specification:: - # - {status: INVALID_BLOCK_HASH, latestValidHash: null, validationError: null} if the blockHash validation has failed - let s = res.get() - s.status == PayloadExecutionStatus.invalid_block_hash - )) - testCond produceSingleBlockRes - -# Attempt to re-org to a chain containing an invalid transition payload -proc invalidTransitionPayload(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by main client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - let clMock = t.clMock - let client = t.rpcClient - - # Produce two blocks before trying to re-org - t.nonce = 2 # Initial PoW chain already contains 2 transactions - var pbRes = clMock.produceBlocks(2, BlockProcessCallbacks( - onPayloadProducerSelected: proc(): bool = - t.sendTx(1.u256) - )) - - testCond pbRes - - # Introduce the invalid transition payload - pbRes = clMock.produceSingleBlock(BlockProcessCallbacks( - # This is being done in the middle of the block building - # process simply to be able to re-org back. - onGetPayload: proc(): bool = - let basePayload = clMock.executedPayloadHistory[clMock.posBlockNumber] - let alteredPayload = generateInvalidPayload(basePayload, InvalidStateRoot) - - let res = client.newPayloadV1(alteredPayload) - let cond = {PayloadExecutionStatus.invalid, PayloadExecutionStatus.accepted} - testNPEither(res, cond, some(Hash256())) - - let rr = client.forkchoiceUpdatedV1( - ForkchoiceStateV1(headBlockHash: alteredPayload.blockHash) - ) - testFCU(rr, invalid, some(Hash256())) - - testLatestHeader(client, clMock.latestExecutedPayload.blockHash) - return true - )) - - testCond pbRes - -template invalidPayloadTestCaseGen(procName: untyped, payloadField: InvalidPayloadField, emptyTxs: bool = false) = - proc procName(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by this client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - let clMock = t.clMock - let client = t.rpcClient - - template txProc(): bool = - when not emptyTxs: - t.sendTx(0.u256) - else: - true - - # Produce blocks before starting the test - var pbRes = clMock.produceBlocks(5, BlockProcessCallbacks( - # Make sure at least one transaction is included in each block - onPayloadProducerSelected: proc(): bool = - txProc() - )) - - testCond pbRes - - let invalidPayload = Shadow() - - pbRes = clMock.produceSingleBlock(BlockProcessCallbacks( - # Make sure at least one transaction is included in the payload - onPayloadProducerSelected: proc(): bool = - txProc() - , - # Run test after the new payload has been obtained - onGetPayload: proc(): bool = - # Alter the payload while maintaining a valid hash and send it to the client, should produce an error - - # We need at least one transaction for most test cases to work - when not emptyTxs: - if clMock.latestPayloadBuilt.transactions.len == 0: - # But if the payload has no transactions, the test is invalid - error "No transactions in the base payload" - return false - - let alteredPayload = generateInvalidPayload(clMock.latestPayloadBuilt, payloadField, t.vaultKey) - invalidPayload.hash = hash256(alteredPayload.blockHash) - - # Depending on the field we modified, we expect a different status - let rr = client.newPayloadV1(alteredPayload) - if rr.isErr: - error "unable to send altered payload", msg=rr.error - return false - let s = rr.get() - - when payloadField == InvalidParentHash: - # Execution specification:: - # {status: ACCEPTED, latestValidHash: null, validationError: null} if the following conditions are met: - # - the blockHash of the payload is valid - # - the payload doesn't extend the canonical chain - # - the payload hasn't been fully validated - # {status: SYNCING, latestValidHash: null, validationError: null} - # if the payload extends the canonical chain and requisite data for its validation is missing - # (the client can assume the payload extends the canonical because the linking payload could be missing) - if s.status notin {PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted}: - error "newPayloadV1 status expect syncing or accepted", get=s.status - return false - - if s.latestValidHash.isSome: - error "newPayloadV1 latestValidHash not empty" - return false - else: - if s.status != PayloadExecutionStatus.invalid: - error "newPayloadV1 status expect invalid", get=s.status - return false - - if s.latestValidHash.isNone: - return false - - let latestValidHash = s.latestValidHash.get - if latestValidHash != alteredPayload.parentHash: - error "latestValidHash is not the same with parentHash", - expected = alteredPayload.parentHash, get = latestValidHash - return false - - # Send the forkchoiceUpdated with a reference to the invalid payload. - let fcState = ForkchoiceStateV1( - headBlockHash: alteredPayload.blockHash, - safeBlockHash: alteredPayload.blockHash, - finalizedBlockHash: alteredPayload.blockHash, - ) - - let timestamp = Quantity(alteredPayload.timestamp.int64 + 1) - let payloadAttr = PayloadAttributesV1(timestamp: timestamp) - - # Execution specification: - # {payloadStatus: {status: INVALID, latestValidHash: null, validationError: errorMessage | null}, payloadId: null} - # obtained from the Payload validation process if the payload is deemed INVALID - let rs = client.forkchoiceUpdatedV1(fcState, some(payloadAttr)) - # Execution specification: - # {payloadStatus: {status: INVALID, latestValidHash: null, validationError: errorMessage | null}, payloadId: null} - # obtained from the Payload validation process if the payload is deemed INVALID - # Note: SYNCING/ACCEPTED is acceptable here as long as the block produced after this test is produced successfully - if rs.isErr: - error "unable to send altered payload", msg=rs.error - return false - - let z = rs.get() - if z.payloadStatus.status notin {PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted, PayloadExecutionStatus.invalid}: - return false - - # Finally, attempt to fetch the invalid payload using the JSON-RPC endpoint - var header: rpc_types.BlockHeader - let rp = client.headerByHash(alteredPayload.blockHash.hash256, header) - rp.isErr - )) - - testCond pbRes - - # Lastly, attempt to build on top of the invalid payload - let psb = clMock.produceSingleBlock(BlockProcessCallbacks( - # Run test after the new payload has been obtained - onGetPayload: proc(): bool = - let alteredPayload = customizePayload(clMock.latestPayloadBuilt.toExecutableData, CustomPayload( - parentHash: some(invalidPayload.hash), - )) - - info "Sending customized NewPayload: ParentHash", - fromHash=clMock.latestPayloadBuilt.parentHash, toHash=invalidPayload.hash - # Response status can be ACCEPTED (since parent payload could have been thrown out by the client) - # or SYNCING (parent payload is thrown out and also client assumes that the parent is part of canonical chain) - # or INVALID (client still has the payload and can verify that this payload is incorrectly building on top of it), - # but a VALID response is incorrect. - let rr = client.newPayloadV1(alteredPayload) - if rr.isErr: - error "unable to send altered payload", msg=rr.error - return false - - let z = rr.get() - z.status in {PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted, PayloadExecutionStatus.invalid} - )) - - testCond psb - -invalidPayloadTestCaseGen(invalidPayload1, InvalidParentHash) -invalidPayloadTestCaseGen(invalidPayload2, InvalidStateRoot) -invalidPayloadTestCaseGen(invalidPayload3, InvalidStateRoot, true) -invalidPayloadTestCaseGen(invalidPayload4, InvalidReceiptsRoot) -invalidPayloadTestCaseGen(invalidPayload5, InvalidNumber) -invalidPayloadTestCaseGen(invalidPayload6, InvalidGasLimit) -invalidPayloadTestCaseGen(invalidPayload7, InvalidGasUsed) -invalidPayloadTestCaseGen(invalidPayload8, InvalidTimestamp) -invalidPayloadTestCaseGen(invalidPayload9, InvalidPrevRandao) -invalidPayloadTestCaseGen(invalidPayload10, RemoveTransaction) -invalidPayloadTestCaseGen(invalidPayload11, InvalidTransactionSignature) -invalidPayloadTestCaseGen(invalidPayload12, InvalidTransactionNonce) -invalidPayloadTestCaseGen(invalidPayload13, InvalidTransactionGasPrice) -invalidPayloadTestCaseGen(invalidPayload14, InvalidTransactionGas) -invalidPayloadTestCaseGen(invalidPayload15, InvalidTransactionValue) - -# Test to verify Block information available at the Eth RPC after NewPayload -template blockStatusExecPayloadGen(procname: untyped, transitionBlock: bool) = - proc procName(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by this client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # Produce blocks before starting the test, only if we are not testing the transition block - when not transitionBlock: - let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) - testCond produce5BlockRes - - let clMock = t.clMock - let client = t.rpcClient - let shadow = Shadow() - - var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( - onPayloadProducerSelected: proc(): bool = - var address: EthAddress - testCond t.sendTx(address, 1.u256) - shadow.hash = rlpHash(t.tx) - return true - , - onNewPayloadBroadcast: proc(): bool = - testLatestHeader(client, clMock.latestForkchoice.headBlockHash) - - let nRes = client.blockNumber() - if nRes.isErr: - error "Unable to get latest block number", msg=nRes.error - return false - - # Latest block number available via Eth RPC should not have changed at this point - let latestNumber = nRes.get - if latestNumber != clMock.latestHeadNumber: - error "latest block number incorrect after newPayload", - expected=clMock.latestHeadNumber, - get=latestNumber - return false - - # Check that the receipt for the transaction we just sent is still not available - let rr = client.txReceipt(shadow.hash) - if rr.isOk: - error "not expecting receipt" - return false - - return true - )) - testCond produceSingleBlockRes - -blockStatusExecPayloadGen(blockStatusExecPayload1, false) -blockStatusExecPayloadGen(blockStatusExecPayload2, true) - -type - MissingAncestorShadow = ref object - cA: ExecutionPayloadV1 - n: int - altChainPayloads: seq[ExecutionPayloadV1] - -# Attempt to re-org to a chain which at some point contains an unknown payload which is also invalid. -# Then reveal the invalid payload and expect that the client rejects it and rejects forkchoice updated calls to this chain. -# The invalid_index parameter determines how many payloads apart is the common ancestor from the block that invalidates the chain, -# with a value of 1 meaning that the immediate payload after the common ancestor will be invalid. -template invalidMissingAncestorReOrgGen(procName: untyped, - invalid_index: int, payloadField: InvalidPayloadField, p2psync: bool, emptyTxs: bool) = - - proc procName(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by this client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - let clMock = t.clMock - let client = t.rpcClient - - # Produce blocks before starting the test - testCond clMock.produceBlocks(5, BlockProcessCallbacks()) - - let shadow = MissingAncestorShadow( - # Save the common ancestor - cA: clMock.latestPayloadBuilt, - - # Amount of blocks to deviate starting from the common ancestor - n: 10, - - # Slice to save the alternate B chain - altChainPayloads: @[] - ) - - # Append the common ancestor - shadow.altChainPayloads.add shadow.cA - - # Produce blocks but at the same time create an alternate chain which contains an invalid payload at some point (INV_P) - # CommonAncestor◄─▲── P1 ◄─ P2 ◄─ P3 ◄─ ... ◄─ Pn - # │ - # └── P1' ◄─ P2' ◄─ ... ◄─ INV_P ◄─ ... ◄─ Pn' - var pbRes = clMock.produceBlocks(shadow.n, BlockProcessCallbacks( - onPayloadProducerSelected: proc(): bool = - # Function to send at least one transaction each block produced. - # Empty Txs Payload with invalid stateRoot discovered an issue in geth sync, hence this is customizable. - when not emptyTxs: - # Send the transaction to the prevRandaoContractAddr - t.sendTx(1.u256) - return true - , - onGetPayload: proc(): bool = - # Insert extraData to ensure we deviate from the main payload, which contains empty extradata - var alternatePayload = customizePayload(clMock.latestPayloadBuilt, CustomPayload( - parentHash: some(shadow.altChainPayloads[^1].blockHash.hash256), - extraData: some(@[1.byte]), - )) - - if shadow.altChainPayloads.len == invalid_index: - alternatePayload = generateInvalidPayload(alternatePayload, payloadField) - - shadow.altChainPayloads.add alternatePayload - return true - )) - testCond pbRes - - pbRes = clMock.produceSingleBlock(BlockProcessCallbacks( - # Note: We perform the test in the middle of payload creation by the CL Mock, in order to be able to - # re-org back into this chain and use the new payload without issues. - onGetPayload: proc(): bool = - # Now let's send the alternate chain to the client using newPayload/sync - for i in 1..shadow.n: - # Send the payload - var payloadValidStr = "VALID" - if i == invalid_index: - payloadValidStr = "INVALID" - elif i > invalid_index: - payloadValidStr = "VALID with INVALID ancestor" - - info "Invalid chain payload", - i = i, - payloadValidStr = payloadValidStr, - hash = shadow.altChainPayloads[i].blockHash - - let rr = client.newPayloadV1(shadow.altChainPayloads[i]) - testCond rr.isOk - - let rs = client.forkchoiceUpdatedV1(ForkchoiceStateV1( - headBlockHash: shadow.altChainPayloads[i].blockHash, - safeBlockHash: shadow.altChainPayloads[i].blockHash - )) - - if i == invalid_index: - # If this is the first payload after the common ancestor, and this is the payload we invalidated, - # then we have all the information to determine that this payload is invalid. - testNP(rr, invalid, some(shadow.altChainPayloads[i-1].blockHash.hash256)) - elif i > invalid_index: - # We have already sent the invalid payload, but the client could've discarded it. - # In reality the CL will not get to this point because it will have already received the `INVALID` - # response from the previous payload. - let cond = {PayloadExecutionStatus.accepted, PayloadExecutionStatus.syncing, PayloadExecutionStatus.invalid} - testNPEither(rr, cond) - else: - # This is one of the payloads before the invalid one, therefore is valid. - let latestValidHash = some(shadow.altChainPayloads[i].blockHash.hash256) - testNP(rr, valid, latestValidHash) - testFCU(rs, valid, latestValidHash) - - - # Resend the latest correct fcU - let rx = client.forkchoiceUpdatedV1(clMock.latestForkchoice) - testCond rx.isOk: - error "Unexpected error ", msg=rx.error - - # After this point, the CL Mock will send the next payload of the canonical chain - return true - )) - - testCond pbRes - -invalidMissingAncestorReOrgGen(invalidMissingAncestor1, 1, InvalidStateRoot, false, true) -invalidMissingAncestorReOrgGen(invalidMissingAncestor2, 9, InvalidStateRoot, false, true) -invalidMissingAncestorReOrgGen(invalidMissingAncestor3, 10, InvalidStateRoot, false, true) - -template blockStatusHeadBlockGen(procname: untyped, transitionBlock: bool) = - proc procName(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by this client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # Produce blocks before starting the test, only if we are not testing the transition block - when not transitionBlock: - let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) - testCond produce5BlockRes - - let clMock = t.clMock - let client = t.rpcClient - let shadow = Shadow() - - var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( - onPayloadProducerSelected: proc(): bool = - var address: EthAddress - testCond t.sendTx(address, 1.u256) - shadow.hash = rlpHash(t.tx) - return true - , - # Run test after a forkchoice with new HeadBlockHash has been broadcasted - onForkchoiceBroadcast: proc(): bool = - testLatestHeader(client, clMock.latestForkchoice.headBlockHash) - - let rr = client.txReceipt(shadow.hash) - if rr.isErr: - error "unable to get transaction receipt" - return false - - return true - )) - testCond produceSingleBlockRes - -blockStatusHeadBlockGen(blockStatusHeadBlock1, false) -blockStatusHeadBlockGen(blockStatusHeadBlock2, true) - -proc blockStatusSafeBlock(t: TestEnv): TestStatus = - result = TestStatus.OK - - let clMock = t.clMock - let client = t.rpcClient - - # On PoW mode, `safe` tag shall return error. - var header: EthBlockHeader - var rr = client.namedHeader("safe", header) - testCond rr.isErr - - # Wait until this client catches up with latest PoS Block - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # First ForkchoiceUpdated sent was equal to 0x00..00, `safe` should return error now - rr = client.namedHeader("safe", header) - testCond rr.isErr - - let pbres = clMock.produceBlocks(3, BlockProcessCallbacks( - # Run test after a forkchoice with new SafeBlockHash has been broadcasted - onSafeBlockChange: proc(): bool = - var header: EthBlockHeader - let rr = client.namedHeader("safe", header) - testCond rr.isOk - let safeBlockHash = hash256(clMock.latestForkchoice.safeBlockHash) - header.blockHash == safeBlockHash - )) - - testCond pbres - -proc blockStatusFinalizedBlock(t: TestEnv): TestStatus = - result = TestStatus.OK - - let clMock = t.clMock - let client = t.rpcClient - - # On PoW mode, `finalized` tag shall return error. - var header: EthBlockHeader - var rr = client.namedHeader("finalized", header) - testCond rr.isErr - - # Wait until this client catches up with latest PoS Block - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # First ForkchoiceUpdated sent was equal to 0x00..00, `finalized` should return error now - rr = client.namedHeader("finalized", header) - testCond rr.isErr - - let pbres = clMock.produceBlocks(3, BlockProcessCallbacks( - # Run test after a forkchoice with new FinalizedBlockHash has been broadcasted - onFinalizedBlockChange: proc(): bool = - var header: EthBlockHeader - let rr = client.namedHeader("finalized", header) - testCond rr.isOk - let finalizedBlockHash = hash256(clMock.latestForkchoice.finalizedBlockHash) - header.blockHash == finalizedBlockHash - )) - - testCond pbres - -proc blockStatusReorg(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by this client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # Produce blocks before starting the test - let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) - testCond produce5BlockRes - - let clMock = t.clMock - let client = t.rpcClient - var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( - # Run test after a forkchoice with new HeadBlockHash has been broadcasted - onForkchoiceBroadcast: proc(): bool = - # Verify the client is serving the latest HeadBlock - var currHeader: EthBlockHeader - var hRes = client.latestHeader(currHeader) - if hRes.isErr: - error "unable to get latest header", msg=hRes.error - return false - - var currHash = BlockHash currHeader.blockHash.data - if currHash != clMock.latestForkchoice.headBlockHash or - currHash == clMock.latestForkchoice.safeBlockHash or - currHash == clMock.latestForkchoice.finalizedBlockHash: - error "latest block header doesn't match HeadBlock hash", hash=currHash - return false - - # Reorg back to the previous block (FinalizedBlock) - let reorgForkchoice = ForkchoiceStateV1( - headBlockHash: clMock.latestForkchoice.finalizedBlockHash, - safeBlockHash: clMock.latestForkchoice.finalizedBlockHash, - finalizedBlockHash: clMock.latestForkchoice.finalizedBlockHash - ) - - var res = client.forkchoiceUpdatedV1(reorgForkchoice) - if res.isErr: - error "Could not send forkchoiceUpdatedV1", msg=res.error - return false - - var s = res.get() - if s.payloadStatus.status != PayloadExecutionStatus.valid: - error "Incorrect status returned after a HeadBlockHash reorg", status=s.payloadStatus.status - return false - - if s.payloadStatus.latestValidHash.isNone: - error "Cannot get latestValidHash from payloadStatus" - return false - - var latestValidHash = s.payloadStatus.latestValidHash.get - if latestValidHash != reorgForkchoice.headBlockHash: - error "Incorrect latestValidHash returned after a HeadBlockHash reorg", - expected=reorgForkchoice.headBlockHash, - get=latestValidHash - return false - - # testCond that we reorg to the previous block - testLatestHeader(client, reorgForkchoice.headBlockHash) - - # Send the HeadBlock again to leave everything back the way it was - res = client.forkchoiceUpdatedV1(clMock.latestForkchoice) - if res.isErr: - error "Could not send forkchoiceUpdatedV1", msg=res.error - return false - - s = res.get() - if s.payloadStatus.status != PayloadExecutionStatus.valid: - error "Incorrect status returned after a HeadBlockHash reorg", - status=s.payloadStatus.status - return false - - if s.payloadStatus.latestValidHash.isNone: - error "Cannot get latestValidHash from payloadStatus" - return false - - latestValidHash = s.payloadStatus.latestValidHash.get - if latestValidHash != clMock.latestForkchoice.headBlockHash: - error "Incorrect latestValidHash returned after a HeadBlockHash reorg", - expected=clMock.latestForkchoice.headBlockHash, - get=latestValidHash - return false - return true - )) - testCond produceSingleBlockRes - -proc reExecPayloads(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until this client catches up with latest PoS - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # How many Payloads we are going to re-execute - var payloadReExecCount = 10 - - # Create those blocks - let produceBlockRes = t.clMock.produceBlocks(payloadReExecCount, BlockProcessCallbacks()) - testCond produceBlockRes - - # Re-execute the payloads - let client = t.rpcClient - var hRes = client.blockNumber() - testCond hRes.isOk: - error "unable to get blockNumber", msg=hRes.error - - let lastBlock = int(hRes.get) - info "Started re-executing payloads at block", number=lastBlock - - let - clMock = t.clMock - start = lastBlock - payloadReExecCount + 1 - - for i in start..lastBlock: - if clMock.executedPayloadHistory.hasKey(uint64 i): - let payload = clMock.executedPayloadHistory[uint64 i] - let res = client.newPayloadV1(payload) - testCond res.isOk: - error "FAIL (%s): Unable to re-execute valid payload", msg=res.error - - let s = res.get() - testCond s.status == PayloadExecutionStatus.valid: - error "Unexpected status after re-execute valid payload", status=s.status - else: - testCond true: - error "(test issue) Payload does not exist", index=i - -proc multipleNewCanonicalPayloads(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by this client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # Produce blocks before starting the test - let produce5BlockRes = t.clMock.produceBlocks(5, BlockProcessCallbacks()) - testCond produce5BlockRes - - let clMock = t.clMock - let client = t.rpcClient - var produceSingleBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( - # Run test after a new payload has been obtained - onGetPayload: proc(): bool = - let payloadCount = 80 - let basePayload = toExecutableData(clMock.latestPayloadBuilt) - var newPrevRandao: Hash256 - - # Fabricate and send multiple new payloads by changing the PrevRandao field - for i in 0.. 0: - altParentHash = pList.sidechainPayloads[^1].blockHash - - let executableData = toExecutableData(clMock.latestPayloadBuilt) - let altPayload = customizePayload(executableData, - CustomPayload( - parentHash: some(altParentHash.hash256), - extraData: some(@[0x01.byte]), - )) - - pList.sidechainPayloads.add(altPayload) - return true - )) - - testCond r1 - - - # Produce blocks before starting the test (So we don't try to reorg back to the genesis block) - let r2= clMock.produceSingleBlock(BlockProcessCallbacks( - onGetPayload: proc(): bool = - let r = client.newPayloadV1(pList.sidechainPayloads[^1]) - if r.isErr: - return false - let s = r.get() - if s.status notin {PayloadExecutionStatus.syncing, PayloadExecutionStatus.accepted}: - return false - - # We are going to send one of the alternative payloads and fcU to it - let len = pList.sidechainPayloads.len - let forkchoiceUpdatedBack = ForkchoiceStateV1( - headBlockHash: pList.sidechainPayloads[len-1].blockHash, - safeBlockHash: pList.sidechainPayloads[len-2].blockHash, - finalizedBlockHash: pList.sidechainPayloads[len-3].blockHash, - ) - - # It is only expected that the client does not produce an error and the CL Mocker is able to progress after the re-org - let res = client.forkchoiceUpdatedV1(forkchoiceUpdatedBack) - if res.isErr: - return false - - let rs = res.get() - if rs.payloadStatus.status != PayloadExecutionStatus.syncing: - return false - - rs.payloadStatus.latestValidHash.isNone - # After this, the clMocker will continue and try to re-org to canonical chain once again - # clMocker will fail the test if this is not possible, so nothing left to do. - )) - - testCond r2 - -type - TxReorgShadow = ref object - noTxnPayload: ExecutionPayloadV1 - txHash: Hash256 - -proc transactionReorg(t: TestEnv): TestStatus = - result = TestStatus.OK - - # Wait until TTD is reached by this client - let ok = waitFor t.clMock.waitForTTD() - testCond ok - - # Produce blocks before starting the test - testCond t.clMock.produceBlocks(5, BlockProcessCallbacks()) - - # Create transactions that modify the state in order to testCond after the reorg. - const - txCount = 5 - contractAddr = hexToByteArray[20]("0000000000000000000000000000000000000317") - - let - client = t.rpcClient - clMock = t.clMock - shadow = TxReorgShadow() - - for i in 0.. 0: - error "(Test issue) no transactions went in block" - - let storageKey = i.u256 - let rr = client.storageAt(prevRandaoContractAddr, storageKey) - testCond rr.isOk: - error "Unable to get storage", msg=rr.error - - let opcodeValueAtBlock = rr.get() - testCond opcodeValueAtBlock == 2.u256: - error "Incorrect difficulty value in block", - expect=2, - get=opcodeValueAtBlock - - # Send transactions now past TTD, the value of the storage in these blocks must match the prevRandao value - type - ShadowTx = ref object - currentTxIndex: int - txs: seq[Transaction] - - let shadow = ShadowTx(currentTxIndex: 0) - - let produceBlockRes = clMock.produceBlocks(10, BlockProcessCallbacks( - onPayloadProducerSelected: proc(): bool = - testCond t.sendTx(0.u256) - shadow.txs.add t.tx - inc shadow.currentTxIndex - return true - , - onForkchoiceBroadcast: proc(): bool = - # Check the transaction tracing, which is client specific - let expectedPrevRandao = clMock.prevRandaoHistory[clMock.latestHeadNumber + 1'u64] - let res = debugPrevRandaoTransaction(client, shadow.txs[shadow.currentTxIndex-1], expectedPrevRandao) - if res.isErr: - error "unable to debug prev randao", msg=res.error - return false - return true - )) - - testCond produceBlockRes - - let rr = client.blockNumber() - testCond rr.isOk: - error "Unable to get latest block number" - - let lastBlockNumber = rr.get() - for i in ttdBlockNumber + 1 ..< lastBlockNumber: - let expectedPrevRandao = UInt256.fromBytesBE(clMock.prevRandaoHistory[i].data) - let storageKey = i.u256 - - let rz = client.storageAt(prevRandaoContractAddr, storageKey) - testCond rz.isOk: - error "Unable to get storage", msg=rz.error - - let storage = rz.get() - testCond storage == expectedPrevRandao: - error "Unexpected storage", expected=expectedPrevRandao, get=storage - -proc postMergeSync(t: TestEnv): TestStatus = - result = TestStatus.SKIPPED - # TODO: need multiple client - -const engineTestList* = [ +let engineTestList* = [ # Engine API Negative Test Cases - TestSpec( + TestDesc( name: "Invalid Terminal Block in ForkchoiceUpdated", - run: invalidTerminalBlockForkchoiceUpdated, - ttd: 1000000 - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidTerminalBlockForkchoiceUpdated, + ttd: 1000000 + ))#[, + TestDesc( name: "Invalid GetPayload Under PoW", - run: invalidGetPayloadUnderPoW, - ttd: 1000000 - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidGetPayloadUnderPoW, + ttd: 1000000 + )), + TestDesc( name: "Invalid Terminal Block in NewPayload", - run: invalidTerminalBlockNewPayload, - ttd: 1000000, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidTerminalBlockNewPayload, + ttd: 1000000, + )), + TestDesc( name: "Inconsistent Head in ForkchoiceState", - run: inconsistentForkchoiceState1, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: inconsistentForkchoiceState1, + )), + TestDesc( name: "Inconsistent Safe in ForkchoiceState", - run: inconsistentForkchoiceState2, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: inconsistentForkchoiceState2, + )), + TestDesc( name: "Inconsistent Finalized in ForkchoiceState", - run: inconsistentForkchoiceState3, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: inconsistentForkchoiceState3, + )), + TestDesc( name: "Unknown HeadBlockHash", - run: unknownHeadBlockHash, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: unknownHeadBlockHash, + )), + TestDesc( name: "Unknown SafeBlockHash", - run: unknownSafeBlockHash, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: unknownSafeBlockHash, + )), + TestDesc( name: "Unknown FinalizedBlockHash", - run: unknownFinalizedBlockHash, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: unknownFinalizedBlockHash, + )), + TestDesc( name: "ForkchoiceUpdated Invalid Payload Attributes", - run: invalidPayloadAttributes1, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayloadAttributes1, + )), + TestDesc( name: "ForkchoiceUpdated Invalid Payload Attributes (Syncing)", - run: invalidPayloadAttributes2, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayloadAttributes2, + )), + TestDesc( name: "Pre-TTD ForkchoiceUpdated After PoS Switch", - run: preTTDFinalizedBlockHash, - ttd: 2, - ), + run: specExecute, + spec: EngineSpec( + exec: preTTDFinalizedBlockHash, + ttd: 2, + )), # Invalid Payload Tests - TestSpec( + TestDesc( name: "Bad Hash on NewPayload", - run: badHashOnNewPayload1, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: badHashOnNewPayload1, + )), + TestDesc( name: "Bad Hash on NewPayload Syncing", - run: badHashOnNewPayload2, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: badHashOnNewPayload2, + )), + TestDesc( name: "Bad Hash on NewPayload Side Chain", - run: badHashOnNewPayload3, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: badHashOnNewPayload3, + )), + TestDesc( name: "Bad Hash on NewPayload Side Chain Syncing", - run: badHashOnNewPayload4, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: badHashOnNewPayload4, + )), + TestDesc( name: "ParentHash==BlockHash on NewPayload", - run: parentHashOnExecPayload, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: parentHashOnExecPayload, + )), + TestDesc( name: "Invalid Transition Payload", - run: invalidTransitionPayload, - ttd: 393504, - chainFile: "blocks_2_td_393504.rlp", - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidTransitionPayload, + ttd: 393504, + chainFile: "blocks_2_td_393504.rlp", + )), + TestDesc( name: "Invalid ParentHash NewPayload", - run: invalidPayload1, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload1, + )), + TestDesc( name: "Invalid StateRoot NewPayload", - run: invalidPayload2, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload2, + )), + TestDesc( name: "Invalid StateRoot NewPayload, Empty Transactions", - run: invalidPayload3, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload3, + )), + TestDesc( name: "Invalid ReceiptsRoot NewPayload", - run: invalidPayload4, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload4, + )), + TestDesc( name: "Invalid Number NewPayload", - run: invalidPayload5, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload5, + )), + TestDesc( name: "Invalid GasLimit NewPayload", - run: invalidPayload6, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload6, + )), + TestDesc( name: "Invalid GasUsed NewPayload", - run: invalidPayload7, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload7, + )), + TestDesc( name: "Invalid Timestamp NewPayload", - run: invalidPayload8, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload8, + )), + TestDesc( name: "Invalid PrevRandao NewPayload", - run: invalidPayload9, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload9, + )), + TestDesc( name: "Invalid Incomplete Transactions NewPayload", - run: invalidPayload10, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload10, + )), + TestDesc( name: "Invalid Transaction Signature NewPayload", - run: invalidPayload11, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload11, + )), + TestDesc( name: "Invalid Transaction Nonce NewPayload", - run: invalidPayload12, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload12, + )), + TestDesc( name: "Invalid Transaction GasPrice NewPayload", - run: invalidPayload13, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload13, + )), + TestDesc( name: "Invalid Transaction Gas NewPayload", - run: invalidPayload14, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidPayload14, + )), + TestDesc( name: "Invalid Transaction Value NewPayload", - run: invalidPayload15, - ), + run: specExecute, + spec: EngineSpec( + exec: invalidPayload15, + )), # Invalid Ancestor Re-Org Tests (Reveal via newPayload) - TestSpec( + TestDesc( name: "Invalid Ancestor Chain Re-Org, Invalid StateRoot, Invalid P1', Reveal using newPayload", slotsToFinalized: 20, - run: invalidMissingAncestor1, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidMissingAncestor1, + )), + TestDesc( name: "Invalid Ancestor Chain Re-Org, Invalid StateRoot, Invalid P9', Reveal using newPayload", slotsToFinalized: 20, - run: invalidMissingAncestor2, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: invalidMissingAncestor2, + )), + TestDesc( name: "Invalid Ancestor Chain Re-Org, Invalid StateRoot, Invalid P10', Reveal using newPayload", slotsToFinalized: 20, - run: invalidMissingAncestor3, - ), + run: specExecute, + spec: EngineSpec( + exec: invalidMissingAncestor3, + )), # Eth RPC Status on ForkchoiceUpdated Events - TestSpec( + TestDesc( name: "Latest Block after NewPayload", - run: blockStatusExecPayload1, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: blockStatusExecPayload1, + )), + TestDesc( name: "Latest Block after NewPayload (Transition Block)", - run: blockStatusExecPayload2, - ttd: 5, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: blockStatusExecPayload2, + ttd: 5, + )), + TestDesc( name: "Latest Block after New HeadBlock", - run: blockStatusHeadBlock1, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: blockStatusHeadBlock1, + )), + TestDesc( name: "Latest Block after New HeadBlock (Transition Block)", - run: blockStatusHeadBlock2, - ttd: 5, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: blockStatusHeadBlock2, + ttd: 5, + )), + TestDesc( name: "safe Block after New SafeBlockHash", - run: blockStatusSafeBlock, - ttd: 5, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: blockStatusSafeBlock, + ttd: 5, + )), + TestDesc( name: "finalized Block after New FinalizedBlockHash", - run: blockStatusFinalizedBlock, - ttd: 5, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: blockStatusFinalizedBlock, + ttd: 5, + )), + TestDesc( name: "Latest Block after Reorg", - run: blockStatusReorg, - ), + run: specExecute, + spec: EngineSpec( + exec: blockStatusReorg, + )), # Payload Tests - TestSpec( + TestDesc( name: "Re-Execute Payload", - run: reExecPayloads, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: reExecPayloads, + )), + TestDesc( name: "Multiple New Payloads Extending Canonical Chain", - run: multipleNewCanonicalPayloads, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: multipleNewCanonicalPayloads, + )), + TestDesc( name: "Out of Order Payload Execution", - run: outOfOrderPayloads, - ), + run: specExecute, + spec: EngineSpec( + exec: outOfOrderPayloads, + )), # Transaction Reorg using Engine API - TestSpec( + TestDesc( name: "Transaction Reorg", - run: transactionReorg, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: transactionReorg, + )), + TestDesc( name: "Sidechain Reorg", - run: sidechainReorg, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: sidechainReorg, + )), + TestDesc( name: "Re-Org Back into Canonical Chain", - run: reorgBack, - ), - TestSpec( + run: specExecute, + spec: EngineSpec( + exec: reorgBack, + )), + TestDesc( name: "Re-Org Back to Canonical Chain From Syncing Chain", - run: reorgBackFromSyncing, - ), + run: specExecute, + spec: EngineSpec( + exec: reorgBackFromSyncing, + )), # Suggested Fee Recipient in Payload creation - TestSpec( + TestDesc( name: "Suggested Fee Recipient Test", - run: suggestedFeeRecipient, - ), + run: specExecute, + spec: EngineSpec( + exec: suggestedFeeRecipient, + )), # PrevRandao opcode tests - TestSpec( + TestDesc( name: "PrevRandao Opcode Transactions", - run: prevRandaoOpcodeTx, - ttd: 10, - ), + run: specExecute, + spec: EngineSpec( + exec: prevRandaoOpcodeTx, + ttd: 10, + )), # Multi-Client Sync tests - TestSpec( + TestDesc( name: "Sync Client Post Merge", - run: postMergeSync, - ttd: 10, - ) + run: specExecute, + spec: EngineSpec( + exec: postMergeSync, + ttd: 10, + )),]# ] diff --git a/hive_integration/nodocker/engine/exchange_cap_tests.nim b/hive_integration/nodocker/engine/exchange_cap_tests.nim index 166b7b657..16c142f11 100644 --- a/hive_integration/nodocker/engine/exchange_cap_tests.nim +++ b/hive_integration/nodocker/engine/exchange_cap_tests.nim @@ -1,15 +1,13 @@ import ./test_env, ./types, - unittest2, chronicles, ../../tools/common/helpers, ../../nimbus/common/hardforks type - ECTestSpec* = object - name*: string - run*: proc(t: TestEnv): TestStatus + ECSpec* = ref object of BaseSpec + exec*: proc(t: TestEnv): bool conf*: ChainConfig const @@ -32,8 +30,7 @@ const "engine_getPayloadV3", ] -proc ecImpl(t: TestEnv, minExpectedCaps: openArray[string]): TestStatus = - result = TestStatus.OK +proc ecImpl(t: TestEnv, minExpectedCaps: openArray[string]): bool = let res = t.rpcClient.exchangeCapabilities(@minExpectedCaps) testCond res.isOk: error "Unable request capabilities", msg=res.error @@ -42,11 +39,12 @@ proc ecImpl(t: TestEnv, minExpectedCaps: openArray[string]): TestStatus = for x in minExpectedCaps: testCond x in returnedCaps: error "Expected capability not found", cap=x + return true -proc ecShanghai(env: TestEnv): TestStatus = +proc ecShanghai(env: TestEnv): bool = ecImpl(env, ShanghaiCapabilities) -proc ecCancun(env: TestEnv): TestStatus = +proc ecCancun(env: TestEnv): bool = ecImpl(env, CancunCapabilities) proc getCCShanghai(timestamp: int): ChainConfig = @@ -57,26 +55,44 @@ proc getCCCancun(timestamp: int): ChainConfig = result = getChainConfig("Cancun") result.cancunTime = some(fromUnix(timestamp)) +proc specExecute(ws: BaseSpec): bool = + let ws = ECSpec(ws) + let env = setupELClient(ws.conf) + result = ws.exec(env) + env.stopELClient() + # const doesn't work with ref object -let exchangeCapTestList* = [ - ECTestSpec( +let ecTestList* = [ + TestDesc( name: "Exchange Capabilities - Shanghai", - run: ecShanghai, - conf: getCCShanghai(0) + run: specExecute, + spec: ECSpec( + exec: ecShanghai, + conf: getCCShanghai(0) + ) ), - ECTestSpec( + TestDesc( name: "Exchange Capabilities - Shanghai (Not active)", - run: ecShanghai, - conf: getCCShanghai(1000) + run: specExecute, + spec: ECSpec( + exec: ecShanghai, + conf: getCCShanghai(1000) + ) ), - ECTestSpec( + TestDesc( name: "Exchange Capabilities - Cancun", - run: ecCancun, - conf: getCCCancun(0) + run: specExecute, + spec: ECSpec( + exec: ecCancun, + conf: getCCCancun(0) + ) ), - ECTestSpec( + TestDesc( name: "Exchange Capabilities - Cancun (Not active)", - run: ecCancun, - conf: getCCCancun(1000) + run: specExecute, + spec: ECSpec( + exec: ecCancun, + conf: getCCCancun(1000) + ) ) ] diff --git a/hive_integration/nodocker/engine/helper.nim b/hive_integration/nodocker/engine/helper.nim index 467339c69..b8d50946c 100644 --- a/hive_integration/nodocker/engine/helper.nim +++ b/hive_integration/nodocker/engine/helper.nim @@ -1,13 +1,17 @@ import - std/[typetraits], + std/[typetraits, times], nimcrypto/sysrand, - test_env, eth/[common, rlp, keys], json_rpc/[rpcclient], - ../../../nimbus/transaction + ../../../nimbus/transaction, + ../../../nimbus/utils/utils, + ../../../nimbus/rpc/execution_types, + ./types import eth/common/eth_types as common_eth_types -type Hash256 = common_eth_types.Hash256 +type + Hash256 = common_eth_types.Hash256 + EthBlockHeader = common_eth_types.BlockHeader type ExecutableData* = object @@ -25,6 +29,9 @@ type baseFeePerGas*: UInt256 blockHash* : Hash256 transactions* : seq[Transaction] + withdrawals* : Option[seq[Withdrawal]] + blobGasUsed* : Option[uint64] + excessBlobGas*: Option[uint64] CustomPayload* = object parentHash* : Option[Hash256] @@ -41,6 +48,11 @@ type baseFeePerGas*: Option[UInt256] blockHash* : Option[Hash256] transactions* : Option[seq[Transaction]] + withdrawals* : Option[seq[Withdrawal]] + blobGasUsed* : Option[uint64] + excessBlobGas*: Option[uint64] + beaconRoot* : Option[Hash256] + removeWithdrawals*: bool InvalidPayloadField* = enum InvalidParentHash @@ -72,14 +84,20 @@ type data : Option[seq[byte]] sig : Option[SignatureVal] -proc customizePayload*(basePayload: ExecutableData, customData: CustomPayload): ExecutionPayloadV1 = +proc customizePayload*(basePayload: ExecutableData, customData: CustomPayload): ExecutionPayload = let txs = if customData.transactions.isSome: customData.transactions.get else: basePayload.transactions - let txRoot = calcTxRoot(txs) + let wdRoot = if customData.withdrawals.isSome: + some(calcWithdrawalsRoot(customData.withdrawals.get)) + elif basePayload.withdrawals.isSome: + some(calcWithdrawalsRoot(basePayload.withdrawals.get)) + else: + none(Hash256) + var customHeader = EthBlockHeader( parentHash: basePayload.parentHash, ommersHash: EMPTY_UNCLE_HASH, @@ -96,7 +114,10 @@ proc customizePayload*(basePayload: ExecutableData, customData: CustomPayload): extraData: basePayload.extraData, mixDigest: basePayload.prevRandao, nonce: default(BlockNonce), - fee: some(basePayload.baseFeePerGas) + fee: some(basePayload.baseFeePerGas), + withdrawalsRoot: wdRoot, + blobGasUsed: basePayload.blobGasUsed, + excessBlobGas: basePayload.excessBlobGas, ) # Overwrite custom information @@ -136,12 +157,21 @@ proc customizePayload*(basePayload: ExecutableData, customData: CustomPayload): if customData.baseFeePerGas.isSome: customHeader.baseFee = customData.baseFeePerGas.get + if customData.blobGasUsed.isSome: + customHeader.blobGasUsed = customData.blobGasUsed + + if customData.excessBlobGas.isSome: + customHeader.excessBlobGas = customData.excessBlobGas + + if customData.beaconRoot.isSome: + customHeader.parentBeaconBlockRoot = customData.beaconRoot + # Return the new payload - result = ExecutionPayloadV1( - parentHash: Web3BlockHash customHeader.parentHash.data, + result = ExecutionPayload( + parentHash: w3Hash customHeader.parentHash, feeRecipient: Web3Address customHeader.coinbase, - stateRoot: Web3BlockHash customHeader.stateRoot.data, - receiptsRoot: Web3BlockHash customHeader.receiptRoot.data, + stateRoot: w3Hash customHeader.stateRoot, + receiptsRoot: w3Hash customHeader.receiptRoot, logsBloom: Web3Bloom customHeader.bloom, prevRandao: Web3PrevRandao customHeader.mixDigest.data, blockNumber: Web3Quantity customHeader.blockNumber.truncate(uint64), @@ -150,17 +180,26 @@ proc customizePayload*(basePayload: ExecutableData, customData: CustomPayload): timestamp: Web3Quantity toUnix(customHeader.timestamp), extraData: Web3ExtraData customHeader.extraData, baseFeePerGas: customHeader.baseFee, - blockHash: Web3BlockHash customHeader.blockHash.data + blockHash: w3Hash customHeader.blockHash, + blobGasUsed: w3Qty customHeader.blobGasUsed, + excessBlobGas: w3Qty customHeader.excessBlobGas, ) for tx in txs: let txData = rlp.encode(tx) result.transactions.add TypedTransaction(txData) -proc hash256*(h: Web3BlockHash): Hash256 = - Hash256(data: distinctBase h) + let wds = if customData.withdrawals.isSome: + customData.withdrawals + elif basePayload.withdrawals.isSome: + basePayload.withdrawals + else: + none(seq[Withdrawal]) -proc toExecutableData*(payload: ExecutionPayloadV1): ExecutableData = + if wds.isSome and customData.removeWithdrawals.not: + result.withdrawals = some(w3Withdrawals(wds.get)) + +proc toExecutableData*(payload: ExecutionPayload): ExecutableData = result = ExecutableData( parentHash : hash256(payload.parentHash), feeRecipient : distinctBase payload.feeRecipient, @@ -174,14 +213,19 @@ proc toExecutableData*(payload: ExecutionPayloadV1): ExecutableData = timestamp : fromUnix(int64 payload.timestamp), extraData : distinctBase payload.extraData, baseFeePerGas : payload.baseFeePerGas, - blockHash : hash256(payload.blockHash) + blockHash : hash256(payload.blockHash), + blobGasUsed : u64 payload.blobGasUsed, + excessBlobGas : u64 payload.excessBlobGas, ) for data in payload.transactions: let tx = rlp.decode(distinctBase data, Transaction) result.transactions.add tx -proc customizePayload*(basePayload: ExecutionPayloadV1, customData: CustomPayload): ExecutionPayloadV1 = + if payload.withdrawals.isSome: + result.withdrawals = some(withdrawals(payload.withdrawals.get)) + +proc customizePayload*(basePayload: ExecutionPayload, customData: CustomPayload): ExecutionPayload = customizePayload(basePayload.toExecutableData, customData) proc customizeTx(baseTx: Transaction, vaultKey: PrivateKey, customTx: CustomTx): Transaction = @@ -231,7 +275,7 @@ proc modifyHash(x: Hash256): Hash256 = proc generateInvalidPayload*(basePayload: ExecutableData, payloadField: InvalidPayloadField, - vaultKey: PrivateKey): ExecutionPayloadV1 = + vaultKey: PrivateKey): ExecutionPayload = var customPayload: CustomPayload @@ -294,12 +338,12 @@ proc generateInvalidPayload*(basePayload: ExecutableData, customizePayload(basePayload, customPayload) -proc generateInvalidPayload*(basePayload: ExecutionPayloadV1, +proc generateInvalidPayload*(basePayload: ExecutionPayload, payloadField: InvalidPayloadField, - vaultKey = default(PrivateKey)): ExecutionPayloadV1 = + vaultKey = default(PrivateKey)): ExecutionPayload = generateInvalidPayload(basePayload.toExecutableData, payloadField, vaultKey) -proc txInPayload*(payload: ExecutionPayloadV1, txHash: Hash256): bool = +proc txInPayload*(payload: ExecutionPayload, txHash: Hash256): bool = for txBytes in payload.transactions: let currTx = rlp.decode(common.Blob txBytes, Transaction) if rlpHash(currTx) == txHash: diff --git a/hive_integration/nodocker/engine/genesis.json b/hive_integration/nodocker/engine/init/genesis.json similarity index 100% rename from hive_integration/nodocker/engine/genesis.json rename to hive_integration/nodocker/engine/init/genesis.json diff --git a/hive_integration/nodocker/engine/sealer.key b/hive_integration/nodocker/engine/init/sealer.key similarity index 100% rename from hive_integration/nodocker/engine/sealer.key rename to hive_integration/nodocker/engine/init/sealer.key diff --git a/hive_integration/nodocker/engine/test_env.nim b/hive_integration/nodocker/engine/test_env.nim index 2ab50960e..b5f453a55 100644 --- a/hive_integration/nodocker/engine/test_env.nim +++ b/hive_integration/nodocker/engine/test_env.nim @@ -20,13 +20,10 @@ import ../../../tests/test_helpers, "."/[clmock, engine_client] -import web3/engine_api_types -from web3/ethtypes as web3types import nil - export - common, engine_api_types, times, + common, times, results, constants, - TypedTransaction, clmock, engine_client + clmock, engine_client type EthBlockHeader* = common.BlockHeader @@ -43,21 +40,26 @@ type gHeader*: EthBlockHeader ttd*: DifficultyInt clMock*: CLMocker - nonce*: uint64 vaultKey*: PrivateKey tx*: Transaction + nonce*: uint64 - Web3BlockHash* = web3types.BlockHash - Web3Address* = web3types.Address - Web3Bloom* = web3types.FixedBytes[256] - Web3Quantity* = web3types.Quantity - Web3PrevRandao* = web3types.FixedBytes[32] - Web3ExtraData* = web3types.DynamicBytes[0, 32] + BaseTx* = object of RootObj + recipient*: Option[EthAddress] + gasLimit* : GasInt + amount* : UInt256 + payload* : seq[byte] + txType* : Option[TxType] + + BigInitcodeTx* = object of BaseTx + initcodeLength*: int + padByte* : uint8 + initcode* : seq[byte] const - baseFolder = "hive_integration" / "nodocker" / "engine" - genesisFile = baseFolder / "genesis.json" - sealerKey = baseFolder / "sealer.key" + baseFolder = "hive_integration/nodocker/engine" + genesisFile = baseFolder / "init/genesis.json" + sealerKey = baseFolder / "init/sealer.key" chainFolder = baseFolder / "chains" # This is the account that sends vault funding transactions. @@ -148,10 +150,20 @@ proc setupELClient*(conf: ChainConfig): TestEnv = result.conf.networkParams.config = conf setupELClient(result, "", false) +proc newTestEnv*(): TestEnv = + TestEnv( + conf: makeConfig(@["--engine-signer:658bdf435d810c91414ec09147daa6db62406379", "--custom-network:" & genesisFile]) + ) + +proc newTestEnv*(conf: ChainConfig): TestEnv = + result = TestEnv( + conf: makeConfig(@["--engine-signer:658bdf435d810c91414ec09147daa6db62406379", "--custom-network:" & genesisFile]) + ) + result.conf.networkParams.config = conf + proc stopELClient*(t: TestEnv) = waitFor t.rpcClient.close() waitFor t.sealingEngine.stop() - #waitFor t.rpcServer.stop() waitFor t.rpcServer.closeWait() # TTD is the value specified in the TestSpec + Genesis.Difficulty @@ -159,7 +171,7 @@ proc setRealTTD*(t: TestEnv, ttdValue: int64) = let realTTD = t.gHeader.difficulty + ttdValue.u256 t.com.setTTD some(realTTD) t.ttd = realTTD - t.clmock = newCLMocker(t.rpcClient, realTTD) + t.clmock = newCLMocker(t.rpcClient, t.com) proc slotsToSafe*(t: TestEnv, x: int) = t.clMock.slotsToSafe = x @@ -170,26 +182,98 @@ proc slotsToFinalized*(t: TestEnv, x: int) = func gwei(n: int64): GasInt {.compileTime.} = GasInt(n * (10 ^ 9)) -proc makeNextTransaction*(t: TestEnv, recipient: EthAddress, amount: UInt256, payload: openArray[byte] = []): Transaction = +proc getTxType(tc: BaseTx, nonce: uint64): TxType = + if tc.txType.isNone: + if nonce mod 2 == 0: + TxLegacy + else: + TxEIP1559 + else: + tc.txType.get + +proc makeTx*(t: TestEnv, tc: BaseTx, nonce: AccountNonce): Transaction = const - gasLimit = 75000.GasInt gasPrice = 30.gwei + gasTipPrice = 1.gwei + + gasFeeCap = gasPrice + gasTipCap = gasTipPrice let chainId = t.conf.networkParams.config.chainId - let tx = Transaction( - txType : TxLegacy, - chainId : chainId, - nonce : AccountNonce(t.nonce), - gasPrice: gasPrice, - gasLimit: gasLimit, - to : some(recipient), - value : amount, - payload : @payload - ) + let txType = tc.getTxType(nonce) + + # Build the transaction depending on the specified type + let tx = if txType == TxLegacy: + Transaction( + txType : TxLegacy, + nonce : nonce, + to : tc.recipient, + value : tc.amount, + gasLimit: tc.gasLimit, + gasPrice: gasPrice, + payload : tc.payload + ) + else: + Transaction( + txType : TxEIP1559, + nonce : nonce, + gasLimit: tc.gasLimit, + maxFee : gasFeeCap, + maxPriorityFee: gasTipCap, + to : tc.recipient, + value : tc.amount, + payload : tc.payload, + chainId : chainId + ) - inc t.nonce signTransaction(tx, t.vaultKey, chainId, eip155 = true) +proc makeTx*(t: TestEnv, tc: var BigInitcodeTx, nonce: AccountNonce): Transaction = + if tc.payload.len == 0: + # Prepare initcode payload + if tc.initcode.len != 0: + doAssert(tc.initcode.len <= tc.initcodeLength, "invalid initcode (too big)") + tc.payload = tc.initcode + + while tc.payload.len < tc.initcodeLength: + tc.payload.add tc.padByte + + doAssert(tc.recipient.isNone, "invalid configuration for big contract tx creator") + t.makeTx(tc.BaseTx, nonce) + +proc sendNextTx*(t: TestEnv, tc: BaseTx): bool = + t.tx = t.makeTx(tc, t.nonce) + inc t.nonce + let rr = t.rpcClient.sendTransaction(t.tx) + if rr.isErr: + error "Unable to send transaction", msg=rr.error + return false + return true + +proc sendTx*(t: TestEnv, tc: BaseTx, nonce: AccountNonce): bool = + t.tx = t.makeTx(tc, nonce) + let rr = t.rpcClient.sendTransaction(t.tx) + if rr.isErr: + error "Unable to send transaction", msg=rr.error + return false + return true + +proc sendTx*(t: TestEnv, tc: BigInitcodeTx, nonce: AccountNonce): bool = + t.tx = t.makeTx(tc, nonce) + let rr = t.rpcClient.sendTransaction(t.tx) + if rr.isErr: + error "Unable to send transaction", msg=rr.error + return false + return true + +proc sendTx*(t: TestEnv, tx: Transaction): bool = + t.tx = tx + let rr = t.rpcClient.sendTransaction(t.tx) + if rr.isErr: + error "Unable to send transaction", msg=rr.error + return false + return true + proc verifyPoWProgress*(t: TestEnv, lastBlockHash: ethtypes.Hash256): bool = let res = waitFor verifyPoWProgress(t.rpcClient, lastBlockHash) if res.isErr: diff --git a/hive_integration/nodocker/engine/types.nim b/hive_integration/nodocker/engine/types.nim index 4f3458561..c6b5a509c 100644 --- a/hive_integration/nodocker/engine/types.nim +++ b/hive_integration/nodocker/engine/types.nim @@ -1,40 +1,46 @@ import - std/options, - test_env, - unittest2, + std/[options, times, strutils, typetraits], web3/ethtypes, - ../../../nimbus/rpc/merge/mergeutils + ../../../nimbus/rpc/merge/mergeutils, + ../../../nimbus/rpc/execution_types, + web3/engine_api_types, + eth/common/eth_types_rlp -export ethtypes +from web3/ethtypes as web3types import nil -import eth/common/eth_types as common_eth_types +export + ethtypes, + engine_api_types + +import eth/common/eth_types as common type - TestSpec* = object - name*: string - run*: proc(t: TestEnv): TestStatus - ttd*: int64 - chainFile*: string - slotsToFinalized*: int - slotsToSafe*: int - enableAuth*: bool + BaseSpec* = ref object of RootObj + txType*: Option[TxType] + + TestDesc* = object + name* : string + about*: string + run* : proc(spec: BaseSpec): bool + spec* : BaseSpec + + Web3Hash256* = web3types.Hash256 + Web3Address* = web3types.Address + Web3Bloom* = web3types.FixedBytes[256] + Web3Quantity* = web3types.Quantity + Web3PrevRandao* = web3types.FixedBytes[32] + Web3ExtraData* = web3types.DynamicBytes[0, 32] template testCond*(expr: untyped) = if not (expr): - when result is bool: - return false - else: - return TestStatus.Failed + return false template testCond*(expr, body: untyped) = if not (expr): body - when result is bool: - return false - else: - return TestStatus.Failed + return false -proc `$`*(x: Option[common_eth_types.Hash256]): string = +proc `$`*(x: Option[common.Hash256]): string = if x.isNone: "none" else: @@ -51,3 +57,144 @@ proc `$`*(x: Option[PayloadID]): string = "none" else: x.get().toHex + +func w3Hash*(x: common.Hash256): Web3Hash256 = + Web3Hash256 x.data + +func w3Hash*(x: Option[common.Hash256]): Option[BlockHash] = + if x.isNone: + return none(BlockHash) + some(BlockHash x.get.data) + +proc w3Hash*(x: common.BlockHeader): BlockHash = + BlockHash x.blockHash.data + +func w3Qty*(a: EthTime, b: int): Quantity = + Quantity(a.toUnix + b.int64) + +func w3Qty*(x: Option[uint64]): Option[Quantity] = + if x.isNone: + return none(Quantity) + return some(Quantity x.get) + +func u64*(x: Option[Quantity]): Option[uint64] = + if x.isNone: + return none(uint64) + return some(uint64 x.get) + +func w3PrevRandao*(): Web3PrevRandao = + discard + +func w3Address*(): Web3Address = + discard + +proc hash256*(h: Web3Hash256): common.Hash256 = + common.Hash256(data: distinctBase h) + +proc hash256*(h: Option[Web3Hash256]): Option[common.Hash256] = + if h.isNone: + return none(common.Hash256) + some(hash256(h.get)) + +proc w3Withdrawal*(w: Withdrawal): WithdrawalV1 = + WithdrawalV1( + index: Quantity(w.index), + validatorIndex: Quantity(w.validatorIndex), + address: Address(w.address), + amount: Quantity(w.amount) + ) + +proc w3Withdrawals*(list: openArray[Withdrawal]): seq[WithdrawalV1] = + result = newSeqOfCap[WithdrawalV1](list.len) + for x in list: + result.add w3Withdrawal(x) + +proc withdrawal*(w: WithdrawalV1): Withdrawal = + Withdrawal( + index: uint64(w.index), + validatorIndex: uint64(w.validatorIndex), + address: distinctBase(w.address), + amount: uint64(w.amount) + ) + +proc withdrawals*(list: openArray[WithdrawalV1]): seq[Withdrawal] = + result = newSeqOfCap[Withdrawal](list.len) + for x in list: + result.add withdrawal(x) + +proc `==`*(a: Option[BlockHash], b: Option[common.Hash256]): bool = + if a.isNone and b.isNone: + return true + if a.isSome and b.isSome: + return a.get() == b.get().data.BlockHash + +proc `==`*(a, b: TypedTransaction): bool = + distinctBase(a) == distinctBase(b) + +template testFCU*(res, cond: untyped, validHash: Option[common.Hash256], id = none(PayloadID)) = + testCond res.isOk: + error "Unexpected FCU Error", msg=res.error + let s = res.get() + testCond s.payloadStatus.status == PayloadExecutionStatus.cond: + error "Unexpected FCU status", expect=PayloadExecutionStatus.cond, get=s.payloadStatus.status + testCond s.payloadStatus.latestValidHash == validHash: + error "Unexpected FCU latestValidHash", expect=validHash, get=s.payloadStatus.latestValidHash + testCond s.payloadId == id: + error "Unexpected FCU payloadID", expect=id, get=s.payloadId + +template testFCU*(res, cond: untyped) = + testCond res.isOk: + error "Unexpected FCU Error", msg=res.error + let s = res.get() + testCond s.payloadStatus.status == PayloadExecutionStatus.cond: + error "Unexpected FCU status", expect=PayloadExecutionStatus.cond, get=s.payloadStatus.status + +template expectErrorCode*(res: untyped, errCode: int) = + testCond res.isErr: + error "unexpected result, want error, get ok" + testCond res.error.find($errCode) != -1 + +template expectNoError*(res: untyped) = + testCond res.isOk + +template expectPayload*(res: untyped, payload: ExecutionPayload) = + testCond res.isOk: + error "Unexpected getPayload Error", msg=res.error + let x = res.get + when typeof(x) is ExecutionPayloadV1: + testCond x == payload.V1: + error "getPayloadV1 return mismatch payload" + elif typeof(x) is GetPayloadV2Response: + testCond x.executionPayload == payload.V1V2: + error "getPayloadV2 return mismatch payload" + else: + testCond x.executionPayload == payload.V3: + error "getPayloadV3 return mismatch payload" + +template expectStatus*(res, cond: untyped) = + testCond res.isOk: + error "Unexpected newPayload error", msg=res.error + let s = res.get() + testCond s.status == PayloadExecutionStatus.cond: + error "Unexpected newPayload status", expect=PayloadExecutionStatus.cond, get=s.status + +template expectWithdrawalsRoot*(res: untyped, h: common.BlockHeader, wdRoot: Option[common.Hash256]) = + testCond res.isOk: + error "Unexpected error", msg=res.error + testCond h.withdrawalsRoot == wdRoot: + error "wdroot mismatch" + +template expectBalanceEqual*(res: untyped, expectedBalance: UInt256) = + testCond res.isOk: + error "Unexpected error", msg=res.error + testCond res.get == expectedBalance: + error "balance mismatch", expect=expectedBalance, get=res.get + +template expectLatestValidHash*(res: untyped, expectedHash: Web3Hash256) = + testCond res.isOk: + error "Unexpected error", msg=res.error + let s = res.get + testCond s.latestValidHash.isSome: + error "Expect latest valid hash isSome" + testCond s.latestValidHash.get == expectedHash: + error "latest valid hash mismatch", expect=expectedHash, get=s.latestValidHash.get diff --git a/hive_integration/nodocker/engine/withdrawal_tests.nim b/hive_integration/nodocker/engine/withdrawal_tests.nim new file mode 100644 index 000000000..92e4c4d0c --- /dev/null +++ b/hive_integration/nodocker/engine/withdrawal_tests.nim @@ -0,0 +1,390 @@ +import + withdrawals/wd_base_spec, + withdrawals/wd_block_value_spec, + withdrawals/wd_max_init_code_spec, + #withdrawals/wd_payload_body_spec, + withdrawals/wd_reorg_spec, + withdrawals/wd_sync_spec, + ./types, + ./test_env + +proc specExecute[T](ws: BaseSpec): bool = + let + ws = T(ws) + conf = ws.getForkConfig() + env = newTestEnv(conf) + discard ws.getGenesis(env.conf.networkParams) + + setupELClient(env, "", false) + env.setRealTTD(0) + ws.configureCLMock(env.clMock) + result = ws.execute(env) + env.stopELClient() + +let wdTestList* = [ + #Re-Org tests + TestDesc( + name: "Withdrawals Fork on Block 1 - 1 Block Re-Org", + about: "Tests a simple 1 block re-org", + run: specExecute[ReorgSpec], + spec: ReorgSpec( + slotsToSafe: u256(32), + slotsToFinalized: u256(64), + timeoutSeconds: 300, + wdForkHeight: 1, # Genesis is Pre-Withdrawals + wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + reOrgBlockCount: 1, + reOrgViaSync: false, + )), + TestDesc( + name: "Withdrawals Fork on Block 1 - 8 Block Re-Org NewPayload", + about: "Tests a 8 block re-org using NewPayload. Re-org does not change withdrawals fork height", + run: specExecute[ReorgSpec], + spec: ReorgSpec( + slotsToSafe: u256(32), + slotsToFinalized: u256(64), + timeoutSeconds: 300, + wdForkHeight: 1, # Genesis is Pre-Withdrawals + wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + reOrgBlockCount: 8, + reOrgViaSync: false, + )), + TestDesc( + name: "Withdrawals Fork on Block 1 - 8 Block Re-Org, Sync", + about: "Tests a 8 block re-org using NewPayload. Re-org does not change withdrawals fork height", + run: specExecute[ReorgSpec], + spec: ReorgSpec( + slotsToSafe: u256(32), + slotsToFinalized: u256(64), + timeoutSeconds: 300, + wdForkHeight: 1, # Genesis is Pre-Withdrawals + wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + reOrgBlockCount: 8, + reOrgViaSync: true, + )), + TestDesc( + name: "Withdrawals Fork on Block 8 - 10 Block Re-Org NewPayload", + about: "Tests a 10 block re-org using NewPayload\n" & + "Re-org does not change withdrawals fork height, but changes\n" & + "the payload at the height of the fork\n", + run: specExecute[ReorgSpec], + spec: ReorgSpec( + slotsToSafe: u256(32), + slotsToFinalized: u256(64), + timeoutSeconds: 300, + wdForkHeight: 8, # Genesis is Pre-Withdrawals + wdBlockCount: 8, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + reOrgBlockCount: 10, + reOrgViaSync: false, + )), + TestDesc( + name: "Withdrawals Fork on Block 8 - 10 Block Re-Org Sync", + about: " Tests a 10 block re-org using sync", + # Re-org does not change withdrawals fork height, but changes + # the payload at the height of the fork + run: specExecute[ReorgSpec], + spec: ReorgSpec( + slotsToSafe: u256(32), + slotsToFinalized: u256(64), + timeoutSeconds: 300, + wdForkHeight: 8, # Genesis is Pre-Withdrawals + wdBlockCount: 8, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + reOrgBlockCount: 10, + reOrgViaSync: true, + )), + TestDesc( + name: "Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org", + about: "Tests a 10 block re-org using NewPayload", + # Sidechain reaches withdrawals fork at a lower block height + # than the canonical chain + run: specExecute[ReorgSpec], + spec: ReorgSpec( + slotsToSafe: u256(32), + slotsToFinalized: u256(64), + timeoutSeconds: 300, + wdForkHeight: 8, # Genesis is Pre-Withdrawals + wdBlockCount: 8, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + reOrgBlockCount: 10, + reOrgViaSync: false, + sidechaintimeIncrements: 2, + )), + TestDesc( + name: "Withdrawals Fork on Canonical Block 8 / Side Block 7 - 10 Block Re-Org Sync", + about: "Tests a 10 block re-org using sync", + # Sidechain reaches withdrawals fork at a lower block height + # than the canonical chain + run: specExecute[ReorgSpec], + spec: ReorgSpec( + slotsToSafe: u256(32), + slotsToFinalized: u256(64), + timeoutSeconds: 300, + wdForkHeight: 8, # Genesis is Pre-Withdrawals + wdBlockCount: 8, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + reOrgBlockCount: 10, + reOrgViaSync: true, + sidechaintimeIncrements: 2, + )), + TestDesc( + name: "Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org", + about: "Tests a 10 block re-org using NewPayload", + # Sidechain reaches withdrawals fork at a higher block height + # than the canonical chain + run: specExecute[ReorgSpec], + spec: ReorgSpec( + slotsToSafe: u256(32), + slotsToFinalized: u256(64), + timeoutSeconds: 300, + wdForkHeight: 8, # Genesis is Pre-Withdrawals + wdBlockCount: 8, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + timeIncrements: 2, + reOrgBlockCount: 10, + reOrgViaSync: false, + sidechaintimeIncrements: 1, + )), + TestDesc( + name: "Withdrawals Fork on Canonical Block 8 / Side Block 9 - 10 Block Re-Org Sync", + about: "Tests a 10 block re-org using sync", + # Sidechain reaches withdrawals fork at a higher block height + # than the canonical chain + run: specExecute[ReorgSpec], + spec: ReorgSpec( + slotsToSafe: u256(32), + slotsToFinalized: u256(64), + timeoutSeconds: 300, + wdForkHeight: 8, # Genesis is Pre-Withdrawals + wdBlockCount: 8, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + timeIncrements: 2, + reOrgBlockCount: 10, + reOrgViaSync: true, + sidechaintimeIncrements: 1, + )), + + # Sync Tests + TestDesc( + name: "Sync after 2 blocks - Withdrawals on Block 1 - Single Withdrawal Account - No Transactions", + about: "- Spawn a first client\n" & + "- Go through withdrawals fork on Block 1\n" & + "- Withdraw to a single account MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK times each block for 2 blocks\n" & + "- Spawn a secondary client and send FCUV2(head)\n" & + "- Wait for sync and verify withdrawn account's balance\n", + run: specExecute[SyncSpec], + spec: SyncSpec( + timeoutSeconds: 6000, + wdForkHeight: 1, + wdBlockCount: 2, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdAbleAccountCount: 1, + txPerBlock: some(0), + syncSteps: 1, + )), + TestDesc( + name: "Sync after 2 blocks - Withdrawals on Block 1 - Single Withdrawal Account", + about: "- Spawn a first client\n" & + "- Go through withdrawals fork on Block 1\n" & + "- Withdraw to a single account MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK times each block for 2 blocks\n" & + "- Spawn a secondary client and send FCUV2(head)\n" & + "- Wait for sync and verify withdrawn account's balance\n", + run: specExecute[SyncSpec], + spec: SyncSpec( + wdForkHeight: 1, + wdBlockCount: 2, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdAbleAccountCount: 1, + syncSteps: 1, + )), + TestDesc( + name: "Sync after 2 blocks - Withdrawals on Genesis - Single Withdrawal Account", + about: "- Spawn a first client, with Withdrawals since genesis\n" & + "- Withdraw to a single account MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK times each block for 2 blocks\n" & + "- Spawn a secondary client and send FCUV2(head)\n" & + "- Wait for sync and verify withdrawn account's balance\n", + run: specExecute[SyncSpec], + spec: SyncSpec( + wdForkHeight: 0, + wdBlockCount: 2, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdAbleAccountCount: 1, + syncSteps: 1, + )), + TestDesc( + name: "Sync after 2 blocks - Withdrawals on Block 2 - Multiple Withdrawal Accounts - No Transactions", + about: "- Spawn a first client\n" & + "- Go through withdrawals fork on Block 2\n" & + "- Withdraw to MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK accounts each block for 2 blocks\n" & + "- Spawn a secondary client and send FCUV2(head)\n" & + "- Wait for sync, which include syncing a pre-Withdrawals block, and verify withdrawn account's balance\n", + run: specExecute[SyncSpec], + spec: SyncSpec( + wdForkHeight: 2, + wdBlockCount: 2, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdAbleAccountCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + txPerBlock: some(0), + syncSteps: 1, + )), + TestDesc( + name: "Sync after 2 blocks - Withdrawals on Block 2 - Multiple Withdrawal Accounts", + about: "- Spawn a first client\n" & + "- Go through withdrawals fork on Block 2\n" & + "- Withdraw to MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK accounts each block for 2 blocks\n" & + "- Spawn a secondary client and send FCUV2(head)\n" & + "- Wait for sync, which include syncing a pre-Withdrawals block, and verify withdrawn account's balance\n", + run: specExecute[SyncSpec], + spec: SyncSpec( + wdForkHeight: 2, + wdBlockCount: 2, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdAbleAccountCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + syncSteps: 1, + )), + TestDesc( + name: "Sync after 128 blocks - Withdrawals on Block 2 - Multiple Withdrawal Accounts", + about: "- Spawn a first client\n" & + "- Go through withdrawals fork on Block 2\n" & + "- Withdraw to many accounts MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK times each block for 128 blocks\n" & + "- Spawn a secondary client and send FCUV2(head)\n" & + "- Wait for sync, which include syncing a pre-Withdrawals block, and verify withdrawn account's balance\n", + run: specExecute[SyncSpec], + spec: SyncSpec( + timeoutSeconds: 300, + wdForkHeight: 2, + wdBlockCount: 128, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdAbleAccountCount: 1024, + syncSteps: 1, + )), + + # EVM Tests (EIP-3651, EIP-3855, EIP-3860) + TestDesc( + name: "Max Initcode Size", + run: specExecute[MaxInitcodeSizeSpec], + spec: MaxInitcodeSizeSpec( + wdForkHeight: 2, # Block 1 is Pre-Withdrawals + wdBlockCount: 2, + overflowMaxInitcodeTxCountBeforeFork: 0, + overflowMaxInitcodeTxCountAfterFork: 1, + )), + # Block value tests + TestDesc( + name: "GetPayloadV2 Block Value", + about: "Verify the block value returned in GetPayloadV2.", + run: specExecute[BlockValueSpec], + spec: BlockValueSpec( + wdForkHeight: 1, + wdBlockCount: 1, + )), + TestDesc( + name: "Withdrawals Fork On Genesis", + about: "Tests the withdrawals fork happening since genesis (e.g. on a testnet).", + run: specExecute[WDBaseSpec], + spec: WDBaseSpec( + wdForkHeight: 0, + wdBlockCount: 2, # Genesis is a withdrawals block + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + )), + TestDesc( + name: "Withdrawals Fork on Block 1", + about: "Tests the withdrawals fork happening directly after genesis.", + run: specExecute[WDBaseSpec], + spec: WDBaseSpec( + wdForkHeight: 1, # Only Genesis is Pre-Withdrawals + wdBlockCount: 1, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + )), + TestDesc( + name: "Withdrawals Fork on Block 2", + about: "Tests the transition to the withdrawals fork after a single block" & + " has happened. Block 1 is sent with invalid non-null withdrawals payload and" & + " client is expected to respond with the appropriate error.", + run: specExecute[WDBaseSpec], + spec: WDBaseSpec( + wdForkHeight: 2, # Genesis and Block 1 are Pre-Withdrawals + wdBlockCount: 1, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + )), + TestDesc( + name: "Withdrawals Fork on Block 3", + about: "Tests the transition to the withdrawals fork after two blocks" & + " have happened. Block 2 is sent with invalid non-null withdrawals payload and" & + " client is expected to respond with the appropriate error.", + run: specExecute[WDBaseSpec], + spec: WDBaseSpec( + wdForkHeight: 3, # Genesis, Block 1 and 2 are Pre-Withdrawals + wdBlockCount: 1, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + )), + TestDesc( + name: "Withdraw to a single account", + about: "Make multiple withdrawals to a single account.", + run: specExecute[WDBaseSpec], + spec: WDBaseSpec( + wdForkHeight: 1, + wdBlockCount: 1, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdAbleAccountCount: 1, + )), + TestDesc( + name: "Withdraw to two accounts", + about: "Make multiple withdrawals to two different accounts, repeated in" & + " round-robin. Reasoning: There might be a difference in implementation when an" & + " account appears multiple times in the withdrawals list but the list" & + " is not in ordered sequence.", + run: specExecute[WDBaseSpec], + spec: WDBaseSpec( + wdForkHeight: 1, + wdBlockCount: 1, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdAbleAccountCount: 2, + )), + TestDesc( + name: "Withdraw many accounts", + about: "Make multiple withdrawals to MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK * 5 different accounts." & + " Execute many blocks this way.", + # TimeoutSeconds: 240, + run: specExecute[WDBaseSpec], + spec: WDBaseSpec( + wdForkHeight: 1, + wdBlockCount: 4, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK * 5, + wdAbleAccountCount: 1024, + )), + TestDesc( + name: "Withdraw zero amount", + about: "Make multiple withdrawals where the amount withdrawn is 0.", + run: specExecute[WDBaseSpec], + spec: WDBaseSpec( + wdForkHeight: 1, + wdBlockCount: 1, + wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK, + wdAbleAccountCount: 2, + wdAmounts: @[0'u64, 1'u64] + )), + TestDesc( + name: "Empty Withdrawals", + about: "Produce withdrawals block with zero withdrawals.", + run: specExecute[WDBaseSpec], + spec: WDBaseSpec( + wdForkHeight: 1, + wdBlockCount: 1, + wdPerBlock: 0, + )), + TestDesc( + name: "Corrupted Block Hash Payload (INVALID)", + about: "Send a valid payload with a corrupted hash using engine_newPayloadV2.", + run: specExecute[WDBaseSpec], + spec: WDBaseSpec( + wdForkHeight: 1, + wdBlockCount: 1, + testCorrupedHashPayloads: true, + ) + ), +] diff --git a/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim b/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim new file mode 100644 index 000000000..a3c796889 --- /dev/null +++ b/hive_integration/nodocker/engine/withdrawals/wd_base_spec.nim @@ -0,0 +1,550 @@ +import + std/[times, options], + stint, + chronicles, + chronos, + stew/byteutils, + nimcrypto/sysrand, + web3/ethtypes, + ./wd_history, + ../helper, + ../test_env, + ../engine_client, + ../types, + ../../../tools/common/helpers, + ../../../nimbus/common/common, + ../../../nimbus/utils/utils, + ../../../nimbus/common/chain_config, + ../../../nimbus/rpc/execution_types + +type + WDBaseSpec* = ref object of BaseSpec + timeIncrements*: int # Timestamp increments per block throughout the test + wdForkHeight*: int # Withdrawals activation fork height + wdBlockCount*: int # Number of blocks on and after withdrawals fork activation + wdPerBlock*: int # Number of withdrawals per block + wdAbleAccountCount*: int # Number of accounts to withdraw to (round-robin) + wdHistory*: WDHistory # Internal withdrawals history that keeps track of all withdrawals + wdAmounts*: seq[uint64] # Amounts of withdrawn wei on each withdrawal (round-robin) + txPerBlock*: Option[int] # Amount of test transactions to include in withdrawal blocks + testCorrupedHashPayloads*: bool # Send a valid payload with corrupted hash + skipBaseVerifications*: bool # For code reuse of the base spec procedure + + WithdrawalsForBlock = object + wds: seq[Withdrawal] + nextIndex: int + +const + GenesisTimestamp = 0x1234 + WARM_COINBASE_ADDRESS = hexToByteArray[20]("0x0101010101010101010101010101010101010101") + PUSH0_ADDRESS = hexToByteArray[20]("0x0202020202020202020202020202020202020202") + MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK* = 16 + TX_CONTRACT_ADDRESSES = [ + WARM_COINBASE_ADDRESS, + PUSH0_ADDRESS, + ] + +# Get the per-block timestamp increments configured for this test +func getBlockTimeIncrements(ws: WDBaseSpec): int = + if ws.timeIncrements == 0: + return 1 + ws.timeIncrements + +# Timestamp delta between genesis and the withdrawals fork +func getWithdrawalsGenesisTimeDelta(ws: WDBaseSpec): int = + ws.wdForkHeight * ws.getBlockTimeIncrements() + +# Calculates Shanghai fork timestamp given the amount of blocks that need to be +# produced beforehand. +func getWithdrawalsForkTime(ws: WDBaseSpec): int = + GenesisTimestamp + ws.getWithdrawalsGenesisTimeDelta() + +# Generates the fork config, including withdrawals fork timestamp. +func getForkConfig*(ws: WDBaseSpec): ChainConfig = + result = getChainConfig("Shanghai") + result.shanghaiTime = some(ws.getWithdrawalsForkTime().fromUnix) + +# Get the start account for all withdrawals. +func getWithdrawalsStartAccount*(ws: WDBaseSpec): UInt256 = + 0x1000.u256 + +func toAddress(x: UInt256): EthAddress = + var mm = x.toByteArrayBE + copyMem(result[0].addr, mm[11].addr, 20) + +# Adds bytecode that unconditionally sets an storage key to specified account range +func addUnconditionalBytecode(g: Genesis, start, stop: UInt256) = + var acc = start + while acc= ws.wdForkHeight: + # Shanghai + r.expectStorageEqual(WARM_COINBASE_ADDRESS, 100.u256) # WARM_STORAGE_READ_COST + p.expectStorageEqual(PUSH0_ADDRESS, latestPayloadNumber) # tx succeeded + else: + # Pre-Shanghai + r.expectStorageEqual(WARM_COINBASE_ADDRESS, 2600.u256) # COLD_ACCOUNT_ACCESS_COST + p.expectStorageEqual(PUSH0_ADDRESS, 0.u256) # tx must've failed + + ok() + +# Changes the CL Mocker default time increments of 1 to the value specified +# in the test spec. +proc configureCLMock*(ws: WDBaseSpec, cl: CLMocker) = + cl.blockTimestampIncrement = some(ws.getBlockTimeIncrements()) + +# Number of blocks to be produced (not counting genesis) before withdrawals +# fork. +func getPreWithdrawalsBlockCount*(ws: WDBaseSpec): int = + if ws.wdForkHeight == 0: + 0 + else: + ws.wdForkHeight - 1 + +# Number of payloads to be produced (pre and post withdrawals) during the entire test +func getTotalPayloadCount(ws: WDBaseSpec): int = + ws.getPreWithdrawalsBlockCount() + ws.wdBlockCount + +# Generates a list of withdrawals based on current configuration +func generateWithdrawalsForBlock(ws: WDBaseSpec, nextIndex: int, startAccount: UInt256): WithdrawalsForBlock = + let + differentAccounts = ws.getWithdrawableAccountCount() + + var wdAmounts = ws.wdAmounts + if wdAmounts.len == 0: + wdAmounts.add(1) + + for i in 0 ..< ws.wdPerBlock: + let + nextAccount = startAccount + (nextIndex mod differentAccounts).u256 + nextWithdrawal = Withdrawal( + index: nextIndex.uint64, + validatorIndex: nextIndex.uint64, + address: nextAccount.toAddress, + amount: wdAmounts[nextIndex mod wdAmounts.len] + ) + + result.wds.add nextWithdrawal + inc result.nextIndex + +# Base test case execution procedure for withdrawals +proc execute*(ws: WDBaseSpec, t: TestEnv): bool = + result = true + + let ok = waitFor t.clMock.waitForTTD() + testCond ok + + # Check if we have pre-Shanghai blocks + if ws.getWithdrawalsForkTime() > GenesisTimestamp: + # Check `latest` during all pre-shanghai blocks, none should + # contain `withdrawalsRoot`, including genesis. + + # Genesis should not contain `withdrawalsRoot` either + var h: common.BlockHeader + let r = t.rpcClient.latestHeader(h) + testCond r.isOk: + error "failed to ge latest header", msg=r.error + testCond h.withdrawalsRoot.isNone: + error "genesis should not contains wdsRoot" + else: + # Genesis is post shanghai, it should contain EmptyWithdrawalsRoot + var h: common.BlockHeader + let r = t.rpcClient.latestHeader(h) + testCond r.isOk: + error "failed to ge latest header", msg=r.error + testCond h.withdrawalsRoot.isSome: + error "genesis should contains wdsRoot" + testCond h.withdrawalsRoot.get == EMPTY_ROOT_HASH: + error "genesis should contains wdsRoot==EMPTY_ROOT_HASH" + + # Produce any blocks necessary to reach withdrawals fork + var pbRes = t.clMock.produceBlocks(ws.getPreWithdrawalsBlockCount, BlockProcessCallbacks( + onPayloadProducerSelected: proc(): bool = + + # Send some transactions + let numTx = ws.getTransactionCountPerPayload() + for i in 0..= ws.wdForkHeight.uint64: + let wds = ws.wdHistory.getWithdrawals(bn) + expectedWithdrawalsRoot = some(calcWithdrawalsRoot(wds.list)) + + #r.ExpectationDescription = fmt.Sprintf(` + # Requested block %d to verify withdrawalsRoot with the + # following withdrawals: + # %s`, block, jsWithdrawals) + r.expectWithdrawalsRoot(h, expectedWithdrawalsRoot) + + # Verify on `latest` + let bnu = t.clMock.latestExecutedPayload.blockNumber.uint64 + let res = ws.wdHistory.verifyWithdrawals(bnu, none(UInt256), t.rpcClient) + testCond res.isOk: + error "verify wd error", msg=res.error diff --git a/hive_integration/nodocker/engine/withdrawals/wd_block_value_spec.nim b/hive_integration/nodocker/engine/withdrawals/wd_block_value_spec.nim new file mode 100644 index 000000000..bc46bb60f --- /dev/null +++ b/hive_integration/nodocker/engine/withdrawals/wd_block_value_spec.nim @@ -0,0 +1,43 @@ +import + stint, + chronicles, + eth/common/eth_types_rlp, + ./wd_base_spec, + ../test_env, + ../engine_client, + ../types, + ../../../nimbus/transaction + +type + BlockValueSpec* = ref object of WDBaseSpec + +proc execute*(ws: BlockValueSpec, t: TestEnv): bool = + WDBaseSpec(ws).skipBaseVerifications = true + testCond WDBaseSpec(ws).execute(t) + + # Get the latest block and the transactions included + var blk: EthBlock + let b = t.rpcClient.latestBlock(blk) + b.expectNoError() + + var totalValue: UInt256 + testCond blk.txs.len > 0: + error "No transactions included in latest block" + + for tx in blk.txs: + let txHash = rlpHash(tx) + let r = t.rpcClient.txReceipt(txHash) + r.expectNoError() + + let + rec = r.get + txTip = tx.effectiveGasTip(blk.header.baseFee) + + totalValue += txTip.uint64.u256 * rec.gasUsed.u256 + + doAssert(t.cLMock.latestBlockValue.isSome) + testCond totalValue == t.cLMock.latestBlockValue.get: + error "Unexpected block value returned on GetPayloadV2", + expect=totalValue, + get=t.cLMock.latestBlockValue.get + return true diff --git a/hive_integration/nodocker/engine/withdrawals/wd_history.nim b/hive_integration/nodocker/engine/withdrawals/wd_history.nim new file mode 100644 index 000000000..bcc4d1eb3 --- /dev/null +++ b/hive_integration/nodocker/engine/withdrawals/wd_history.nim @@ -0,0 +1,95 @@ +import + std/[tables, sets, strutils, math], + eth/common/eth_types, + json_rpc/[rpcclient], + stew/[byteutils, results], + ../engine_client + +type + Withdrawals* = ref object + list*: seq[Withdrawal] + + # Helper structure used to keep history of the amounts + # withdrawn to each test account. + WDHistory* = object + map: Table[uint64, Withdrawals] + +proc put*(wh: var WDHistory, blockNumber: uint64, wds: openArray[Withdrawal]) = + wh.map[blockNumber] = Withdrawals( + list: @wds + ) + +proc get*(wh: WDHistory, blockNumber: uint64): Result[seq[Withdrawal], string] = + let wds = wh.map.getOrDefault(blockNumber) + if wds.isNil: + return err("withdrawal not found in block " & $blockNumber) + ok(wds.list) + +# Helper types to convert gwei into wei more easily +func weiAmount(w: Withdrawal): UInt256 = + w.amount.u256 * (10 ^ 9).u256 + +# Gets an account expected value for a given block, taking into account all +# withdrawals that credited the account. +func getExpectedAccountBalance*(wh: WDHistory, account: EthAddress, blockNumber: uint64): UInt256 = + for b in 0..blockNumber: + let wds = wh.map.getOrDefault(b) + if wds.isNil: continue + for wd in wds.list: + if wd.address == account: + result += wd.weiAmount + +# Get a list of all addresses that were credited by withdrawals on a given block. +func getAddressesWithdrawnOnBlock*(wh: WDHistory, blockNumber: uint64): seq[EthAddress] = + var addressMap: HashSet[EthAddress] + let wds = wh.map.getOrDefault(blockNumber) + if wds.isNil.not: + for wd in wds.list: + addressMap.incl wd.address + + for address in addressMap: + result.add address + +# Get the withdrawals list for a given block. +func getWithdrawals*(wh: WDHistory, blockNumber: uint64): Withdrawals = + let wds = wh.map.getOrDefault(blockNumber) + if wds.isNil: + Withdrawals() + else: + wds + +# Get the withdrawn accounts list until a given block height. +func getWithdrawnAccounts*(wh: WDHistory, blockHeight: uint64): Table[EthAddress, UInt256] = + for blockNumber in 0..blockHeight: + let wds = wh.map.getOrDefault(blockNumber) + if wds.isNil: continue + for wd in wds.list: + result.withValue(wd.address, value) do: + value[] += wd.weiAmount + do: + result[wd.address] = wd.weiAmount + +# Verify all withdrawals on a client at a given height +proc verifyWithdrawals*(wh: WDHistory, blockNumber: uint64, rpcBlock: Option[UInt256], client: RpcClient): Result[void, string] = + let accounts = wh.getWithdrawnAccounts(blockNumber) + for account, expectedBalance in accounts: + let res = if rpcBlock.isSome: + client.balanceAt(account, rpcBlock.get) + else: + client.balanceAt(account) + res.expectBalanceEqual(account, expectedBalance) + + # All withdrawals account have a bytecode that unconditionally set the + # zero storage key to one on EVM execution. + # Withdrawals must not trigger EVM so we expect zero. + let s = if rpcBlock.isSome: + client.storageAt(account, 0.u256, rpcBlock.get) + else: + client.storageAt(account, 0.u256) + s.expectStorageEqual(account, 0.u256) + ok() + +# Create a new copy of the withdrawals history +func copy*(wh: WDHistory): WDHistory = + for k, v in wh.map: + result.map[k] = v diff --git a/hive_integration/nodocker/engine/withdrawals/wd_max_init_code_spec.nim b/hive_integration/nodocker/engine/withdrawals/wd_max_init_code_spec.nim new file mode 100644 index 000000000..79e9df113 --- /dev/null +++ b/hive_integration/nodocker/engine/withdrawals/wd_max_init_code_spec.nim @@ -0,0 +1,116 @@ +import + std/typetraits, + chronos, + chronicles, + eth/common/eth_types_rlp, + ./wd_base_spec, + ../test_env, + ../engine_client, + ../types, + ../helper, + ../../../nimbus/constants, + ../../../nimbus/rpc/execution_types + +# EIP-3860 Shanghai Tests: +# Send transactions overflowing the MAX_INITCODE_SIZE +# limit set in EIP-3860, before and after the Shanghai +# fork. +type + MaxInitcodeSizeSpec* = ref object of WDBaseSpec + overflowMaxInitcodeTxCountBeforeFork*: uint64 + overflowMaxInitcodeTxCountAfterFork *: uint64 + +const + MAX_INITCODE_SIZE = EIP3860_MAX_INITCODE_SIZE + +proc execute*(ws: MaxInitcodeSizeSpec, t: TestEnv): bool = + testCond waitFor t.clMock.waitForTTD() + + var + invalidTxCreator = BigInitcodeTx( + initcodeLength: MAX_INITCODE_SIZE + 1, + gasLimit: 2000000, + ) + + validTxCreator = BigInitcodeTx( + initcodeLength: MAX_INITCODE_SIZE, + gasLimit: 2000000, + ) + + if ws.overflowMaxInitcodeTxCountBeforeFork > 0: + doAssert(ws.getPreWithdrawalsBlockCount > 0, "invalid test configuration") + for i in 0.. 0: + error "No max initcode txs included before Shanghai. Txs must have been included before the MAX_INITCODE_SIZE limit was enabled" + + # Create a payload, no txs should be included + pbRes = t.clMock.produceSingleBlock(BlockProcessCallbacks( + onGetPayload: proc(): bool = + testCond t.clMock.latestPayloadBuilt.transactions.len == 0: + error "Client included tx exceeding the MAX_INITCODE_SIZE in payload" + return true + )) + + testCond pbRes + + # Send transactions after the fork + for i in txIncluded.. latestPayloadNumber { + r.expectationDescription = fmt.Sprintf(` + Sent start=%d and count=%d to engine_getPayloadBodiesByRangeV1, latest known block is %d, hence an empty list is expected. + `, req.Start, req.Count, latestPayloadNumber) + r.expectPayloadBodiesCount(0) + } else { + var count = req.Count + if req.Start+req.Count-1 > latestPayloadNumber { + count = latestPayloadNumber - req.Start + 1 + } + r.expectationDescription = fmt.Sprintf("Sent engine_getPayloadBodiesByRange(start=%d, count=%d), latest payload number in canonical chain is %d", req.Start, req.Count, latestPayloadNumber) + r.expectPayloadBodiesCount(count) + for i := req.Start; i < req.Start+count; i++ { + p := payloadHistory[i] + + r.expectPayloadBody(i-req.Start, ExecutionPayloadBodyV1{ + Transactions: p.Transactions, + Withdrawals: p.Withdrawals, + }) + } + } +} + + + +func (req GetPayloadBodyRequestByHashIndex) Verify(reqIndex int, testEngine *test.TestEngineClient, payloadHistory clmock.ExecutableDataHistory) { + info "Starting GetPayloadBodyByHash request %d", reqIndex) + startTime := time.Now() + defer func() { + info "Ended GetPayloadBodyByHash request %d, %s", reqIndex, time.Since(startTime)) + }() + payloads := make([]ExecutableData, 0) + hashes := make([]common.Hash, 0) + if len(req.BlockNumbers) > 0 { + for _, n := range req.BlockNumbers { + if p, ok := payloadHistory[n]; ok { + payloads = append(payloads, p) + hashes = append(hashes, p.BlockHash) + } else { + # signal to request an unknown hash (random) + randHash := common.Hash{} + rand.Read(randHash[:]) + payloads = append(payloads, nil) + hashes = append(hashes, randHash) + } + } + } + if req.Start > 0 && req.End > 0 { + for n := req.Start; n <= req.End; n++ { + if p, ok := payloadHistory[n]; ok { + payloads = append(payloads, p) + hashes = append(hashes, p.BlockHash) + } else { + # signal to request an unknown hash (random) + randHash := common.Hash{} + rand.Read(randHash[:]) + payloads = append(payloads, nil) + hashes = append(hashes, randHash) + } + } + } + if len(payloads) == 0 { + panic("invalid test") + } + + r := testEngine.TestEngineGetPayloadBodiesByHashV1(hashes) + r.expectPayloadBodiesCount(uint64(len(payloads))) + for i, p := range payloads { + var expectedPayloadBody ExecutionPayloadBodyV1 + if p != nil { + expectedPayloadBody = ExecutionPayloadBodyV1{ + Transactions: p.Transactions, + Withdrawals: p.Withdrawals, + } + } + r.expectPayloadBody(uint64(i), expectedPayloadBody) + } + +} +]# + +proc execute*(ws: GetPayloadBodiesSpec, t: TestEnv): bool = + WDBaseSpec(ws).skipBaseVerifications = true + testCond WDBaseSpec(ws).execute(t) + +#[ + payloadHistory := t.clMock.ExecutedPayloadHistory + + testEngine := t.TestEngine + + if ws.GenerateSidechain { + + # First generate an extra payload on top of the canonical chain + # Generate more withdrawals + nextWithdrawals, _ := ws.GenerateWithdrawalsForBlock(payloadHistory.latestWithdrawalsIndex(), ws.getWithdrawalsStartAccount()) + + f := t.rpcClient.forkchoiceUpdatedV2( + &beacon.ForkchoiceStateV1{ + HeadBlockHash: t.clMock.latestHeader.Hash(), + }, + PayloadAttributes{ + Timestamp: t.clMock.latestHeader.Time + ws.getBlockTimeIncrements(), + Withdrawals: nextWithdrawals, + }, + ) + f.expectPayloadStatus(test.Valid) + + # Wait for payload to be built + time.Sleep(time.Second) + + # Get the next canonical payload + p := t.rpcClient.getPayloadV2(f.Response.PayloadID) + p.expectNoError() + nextCanonicalPayload := &p.Payload + + # Now we have an extra payload that follows the canonical chain, + # but we need a side chain for the test. + customizer := &helper.CustomPayloadData{ + Withdrawals: helper.RandomizeWithdrawalsOrder(t.clMock.latestExecutedPayload.Withdrawals), + } + sidechainCurrent, _, err := customizer.CustomizePayload(&t.clMock.latestExecutedPayload, t.clMock.latestPayloadAttributes.BeaconRoot) + if err != nil { + error "Error obtaining custom sidechain payload: %v", t.TestName, err) + } + customizer = &helper.CustomPayloadData{ + ParentHash: &sidechainCurrent.BlockHash, + Withdrawals: helper.RandomizeWithdrawalsOrder(nextCanonicalPayload.Withdrawals), + } + sidechainHead, _, err := customizer.CustomizePayload(nextCanonicalPayload, t.clMock.latestPayloadAttributes.BeaconRoot) + if err != nil { + error "Error obtaining custom sidechain payload: %v", t.TestName, err) + } + + # Send both sidechain payloads as engine_newPayloadV2 + n1 := t.rpcClient.newPayloadV2(sidechainCurrent) + n1.expectStatus(test.Valid) + n2 := t.rpcClient.newPayloadV2(sidechainHead) + n2.expectStatus(test.Valid) + } else if ws.AfterSync { + # Spawn a secondary client which will need to sync to the primary client + secondaryEngine, err := hive_rpc.HiveRPCEngineStarter{}.StartClient(t.T, t.TestContext, t.Genesis, t.ClientParams, t.ClientFiles, t.Engine) + if err != nil { + error "Unable to spawn a secondary client: %v", t.TestName, err) + } + secondaryEngineTest := test.NewTestEngineClient(t, secondaryEngine) + t.clMock.AddEngineClient(secondaryEngine) + + loop: + for { + select { + case <-t.TimeoutContext.Done(): + error "Timeout while waiting for secondary client to sync", t.TestName) + case <-time.After(time.Second): + secondaryEngineTest.newPayloadV2( + &t.clMock.latestExecutedPayload, + ) + r := secondaryEngineTest.TestEngineForkchoiceUpdatedV2( + &t.clMock.latestForkchoice, + nil, + ) + if r.Response.PayloadStatus.Status == test.Valid { + break loop + } + if r.Response.PayloadStatus.Status == test.Invalid { + error "Syncing client rejected valid chain: %s", t.TestName, r.Response) + } + } + } + + # GetPayloadBodies will be sent to the secondary client + testEngine = secondaryEngineTest + } + + # Now send the range request, which should ignore any sidechain + if ws.Parallel { + wg := new(sync.WaitGroup) + type RequestIndex struct { + Request GetPayloadBodyRequest + Index int + } + workChan := make(chan *RequestIndex) + workers := 16 + wg.Add(workers) + for w := 0; w < workers; w++ { + go func() { + defer wg.Done() + for req := range workChan { + req.Request.Verify(req.Index, testEngine, payloadHistory) + } + }() + } + repeat := 1 + if ws.RequestsRepeat > 0 { + repeat = ws.RequestsRepeat + } + for j := 0; j < repeat; j++ { + for i, req := range ws.getPayloadBodiesRequests { + workChan <- &RequestIndex{ + Request: req, + Index: i + (j * repeat), + } + } + } + + close(workChan) + wg.Wait() + } else { + for i, req := range ws.getPayloadBodiesRequests { + req.Verify(i, testEngine, payloadHistory) +]# diff --git a/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim b/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim new file mode 100644 index 000000000..25c5199cf --- /dev/null +++ b/hive_integration/nodocker/engine/withdrawals/wd_reorg_spec.nim @@ -0,0 +1,323 @@ +import + stint, + chronos, + chronicles, + ./wd_base_spec, + ../test_env, + ../engine_client, + ../types + +# Withdrawals re-org spec: +# Specifies a withdrawals test where the withdrawals re-org can happen +# even to a point before withdrawals were enabled, or simply to a previous +# withdrawals block. +type + ReorgSpec* = ref object of WDBaseSpec + reOrgBlockCount* : uint64 # How many blocks the re-org will replace, including the head + reOrgViaSync* : bool # Whether the client should fetch the sidechain by syncing from the secondary client + sidechainTimeIncrements*: uint64 + slotsToSafe* : UInt256 + slotsToFinalized* : UInt256 + timeoutSeconds* : int + +#[ +func (ws *WithdrawalsReorgSpec) GetSidechainSplitHeight() uint64 { + if ws.ReOrgBlockCount > ws.getTotalPayloadCount() { + panic("invalid payload/re-org configuration") + + return ws.getTotalPayloadCount() + 1 - ws.ReOrgBlockCount + +func (ws *WithdrawalsReorgSpec) GetSidechainBlockTimeIncrements() uint64 { + if ws.SidechainTimeIncrements == 0 { + return ws.getBlockTimeIncrements() + + return ws.SidechainTimeIncrements + +func (ws *WithdrawalsReorgSpec) GetSidechainWithdrawalsForkHeight() uint64 { + if ws.getSidechainBlockTimeIncrements() != ws.getBlockTimeIncrements() { + # Block timestamp increments in both chains are different so need to calculate different heights, only if split happens before fork + if ws.getSidechainSplitHeight() == 0 { + # We cannot split by having two different genesis blocks. + panic("invalid sidechain split height") + + if ws.getSidechainSplitHeight() <= ws.WithdrawalsForkHeight { + # We need to calculate the height of the fork on the sidechain + sidechainSplitBlockTimestamp := ((ws.getSidechainSplitHeight() - 1) * ws.getBlockTimeIncrements()) + remainingTime := (ws.getWithdrawalsGenesisTimeDelta() - sidechainSplitBlockTimestamp) + if remainingTime == 0 { + return ws.getSidechainSplitHeight() + + return ((remainingTime - 1) / ws.SidechainTimeIncrements) + ws.getSidechainSplitHeight() + + return ws.WithdrawalsForkHeight +]# + +proc execute*(ws: ReorgSpec, t: TestEnv): bool = + testCond waitFor t.clMock.waitForTTD() + + return true +#[ + # Spawn a secondary client which will produce the sidechain + secondaryEngine, err := hive_rpc.HiveRPCEngineStarter{}.StartClient(t.T, t.TestContext, t.Genesis, t.ClientParams, t.ClientFiles, t.Engine) + if err != nil { + error "Unable to spawn a secondary client: %v", t.TestName, err) + } + secondaryEngineTest := test.NewTestEngineClient(t, secondaryEngine) + # t.clMock.AddEngineClient(secondaryEngine) + + var ( + canonicalStartAccount = big.NewInt(0x1000) + canonicalNextIndex = uint64(0) + sidechainStartAccount = new(big.Int).SetBit(common.Big0, 160, 1) + sidechainNextIndex = uint64(0) + sidechainwdHistory = make(wdHistory) + sidechain = make(map[uint64]*typ.ExecutableData) + sidechainPayloadId *beacon.PayloadID + ) + + # Sidechain withdraws on the max account value range 0xffffffffffffffffffffffffffffffffffffffff + sidechainStartAccount.Sub(sidechainStartAccount, big.NewInt(int64(ws.getWithdrawableAccountCount())+1)) + + t.clMock.ProduceBlocks(int(ws.getPreWithdrawalsBlockCount()+ws.WithdrawalsBlockCount), clmock.BlockProcessCallbacks{ + OnPayloadProducerSelected: proc(): bool = + t.clMock.NextWithdrawals = nil + + if t.clMock.CurrentPayloadNumber >= ws.WithdrawalsForkHeight { + # Prepare some withdrawals + t.clMock.NextWithdrawals, canonicalNextIndex = ws.GenerateWithdrawalsForBlock(canonicalNextIndex, canonicalStartAccount) + ws.wdHistory[t.clMock.CurrentPayloadNumber] = t.clMock.NextWithdrawals + } + + if t.clMock.CurrentPayloadNumber >= ws.getSidechainSplitHeight() { + # We have split + if t.clMock.CurrentPayloadNumber >= ws.getSidechainWithdrawalsForkHeight() { + # And we are past the withdrawals fork on the sidechain + sidechainwdHistory[t.clMock.CurrentPayloadNumber], sidechainNextIndex = ws.GenerateWithdrawalsForBlock(sidechainNextIndex, sidechainStartAccount) + } # else nothing to do + } else { + # We have not split + sidechainwdHistory[t.clMock.CurrentPayloadNumber] = t.clMock.NextWithdrawals + sidechainNextIndex = canonicalNextIndex + } + + }, + OnRequestNextPayload: proc(): bool = + # Send transactions to be included in the payload + txs, err := helper.SendNextTransactions( + t.TestContext, + t.clMock.NextBlockProducer, + &helper.BaseTransactionCreator{ + Recipient: &globals.PrevRandaoContractAddr, + Amount: common.Big1, + Payload: nil, + TxType: t.TestTransactionType, + GasLimit: 75000, + }, + ws.getTransactionCountPerPayload(), + ) + if err != nil { + error "Error trying to send transactions: %v", t.TestName, err) + } + + # Error will be ignored here since the tx could have been already relayed + secondaryEngine.SendTransactions(t.TestContext, txs...) + + if t.clMock.CurrentPayloadNumber >= ws.getSidechainSplitHeight() { + # Also request a payload from the sidechain + fcU := beacon.ForkchoiceStateV1{ + HeadBlockHash: t.clMock.latestForkchoice.HeadBlockHash, + } + + if t.clMock.CurrentPayloadNumber > ws.getSidechainSplitHeight() { + if lastSidePayload, ok := sidechain[t.clMock.CurrentPayloadNumber-1]; !ok { + panic("sidechain payload not found") + } else { + fcU.HeadBlockHash = lastSidePayload.BlockHash + } + } + + var version int + pAttributes := typ.PayloadAttributes{ + Random: t.clMock.latestPayloadAttributes.Random, + SuggestedFeeRecipient: t.clMock.latestPayloadAttributes.SuggestedFeeRecipient, + } + if t.clMock.CurrentPayloadNumber > ws.getSidechainSplitHeight() { + pAttributes.Timestamp = sidechain[t.clMock.CurrentPayloadNumber-1].Timestamp + uint64(ws.getSidechainBlockTimeIncrements()) + } else if t.clMock.CurrentPayloadNumber == ws.getSidechainSplitHeight() { + pAttributes.Timestamp = t.clMock.latestHeader.Time + uint64(ws.getSidechainBlockTimeIncrements()) + } else { + pAttributes.Timestamp = t.clMock.latestPayloadAttributes.Timestamp + } + if t.clMock.CurrentPayloadNumber >= ws.getSidechainWithdrawalsForkHeight() { + # Withdrawals + version = 2 + pAttributes.Withdrawals = sidechainwdHistory[t.clMock.CurrentPayloadNumber] + } else { + # No withdrawals + version = 1 + } + + info "Requesting sidechain payload %d: %v", t.TestName, t.clMock.CurrentPayloadNumber, pAttributes) + + r := secondaryEngineTest.forkchoiceUpdated(&fcU, &pAttributes, version) + r.expectNoError() + r.expectPayloadStatus(test.Valid) + if r.Response.PayloadID == nil { + error "Unable to get a payload ID on the sidechain", t.TestName) + } + sidechainPayloadId = r.Response.PayloadID + } + }, + OnGetPayload: proc(): bool = + var ( + version int + payload *typ.ExecutableData + ) + if t.clMock.CurrentPayloadNumber >= ws.getSidechainWithdrawalsForkHeight() { + version = 2 + } else { + version = 1 + } + if t.clMock.latestPayloadBuilt.Number >= ws.getSidechainSplitHeight() { + # This payload is built by the secondary client, hence need to manually fetch it here + r := secondaryEngineTest.getPayload(sidechainPayloadId, version) + r.expectNoError() + payload = &r.Payload + sidechain[payload.Number] = payload + } else { + # This block is part of both chains, simply forward it to the secondary client + payload = &t.clMock.latestPayloadBuilt + } + r := secondaryEngineTest.newPayload(payload, nil, nil, version) + r.expectStatus(test.Valid) + p := secondaryEngineTest.forkchoiceUpdated( + &beacon.ForkchoiceStateV1{ + HeadBlockHash: payload.BlockHash, + }, + nil, + version, + ) + p.expectPayloadStatus(test.Valid) + }, + }) + + sidechainHeight := t.clMock.latestExecutedPayload.Number + + if ws.WithdrawalsForkHeight < ws.getSidechainWithdrawalsForkHeight() { + # This means the canonical chain forked before the sidechain. + # Therefore we need to produce more sidechain payloads to reach + # at least`ws.WithdrawalsBlockCount` withdrawals payloads produced on + # the sidechain. + for i := uint64(0); i < ws.getSidechainWithdrawalsForkHeight()-ws.WithdrawalsForkHeight; i++ { + sidechainwdHistory[sidechainHeight+1], sidechainNextIndex = ws.GenerateWithdrawalsForBlock(sidechainNextIndex, sidechainStartAccount) + pAttributes := typ.PayloadAttributes{ + Timestamp: sidechain[sidechainHeight].Timestamp + ws.getSidechainBlockTimeIncrements(), + Random: t.clMock.latestPayloadAttributes.Random, + SuggestedFeeRecipient: t.clMock.latestPayloadAttributes.SuggestedFeeRecipient, + Withdrawals: sidechainwdHistory[sidechainHeight+1], + } + r := secondaryEngineTest.forkchoiceUpdatedV2(&beacon.ForkchoiceStateV1{ + HeadBlockHash: sidechain[sidechainHeight].BlockHash, + }, &pAttributes) + r.expectPayloadStatus(test.Valid) + time.Sleep(time.Second) + p := secondaryEngineTest.getPayloadV2(r.Response.PayloadID) + p.expectNoError() + s := secondaryEngineTest.newPayloadV2(&p.Payload) + s.expectStatus(test.Valid) + q := secondaryEngineTest.forkchoiceUpdatedV2( + &beacon.ForkchoiceStateV1{ + HeadBlockHash: p.Payload.BlockHash, + }, + nil, + ) + q.expectPayloadStatus(test.Valid) + sidechainHeight++ + sidechain[sidechainHeight] = &p.Payload + } + } + + # Check the withdrawals on the latest + ws.wdHistory.VerifyWithdrawals( + sidechainHeight, + nil, + t.TestEngine, + ) + + if ws.ReOrgViaSync { + # Send latest sidechain payload as NewPayload + FCU and wait for sync + loop: + for { + r := t.rpcClient.newPayloadV2(sidechain[sidechainHeight]) + r.expectNoError() + p := t.rpcClient.forkchoiceUpdatedV2( + &beacon.ForkchoiceStateV1{ + HeadBlockHash: sidechain[sidechainHeight].BlockHash, + }, + nil, + ) + p.expectNoError() + if p.Response.PayloadStatus.Status == test.Invalid { + error "Primary client invalidated side chain", t.TestName) + } + select { + case <-t.TimeoutContext.Done(): + error "Timeout waiting for sync", t.TestName) + case <-time.After(time.Second): + b := t.rpcClient.BlockByNumber(nil) + if b.Block.Hash() == sidechain[sidechainHeight].BlockHash { + # sync successful + break loop + } + } + } + } else { + # Send all payloads one by one to the primary client + for payloadNumber := ws.getSidechainSplitHeight(); payloadNumber <= sidechainHeight; payloadNumber++ { + payload, ok := sidechain[payloadNumber] + if !ok { + error "Invalid payload %d requested.", t.TestName, payloadNumber) + } + var version int + if payloadNumber >= ws.getSidechainWithdrawalsForkHeight() { + version = 2 + } else { + version = 1 + } + info "Sending sidechain payload %d, hash=%s, parent=%s", t.TestName, payloadNumber, payload.BlockHash, payload.ParentHash) + r := t.rpcClient.newPayload(payload, nil, nil, version) + r.expectStatusEither(test.Valid, test.Accepted) + p := t.rpcClient.forkchoiceUpdated( + &beacon.ForkchoiceStateV1{ + HeadBlockHash: payload.BlockHash, + }, + nil, + version, + ) + p.expectPayloadStatus(test.Valid) + } + } + + # Verify withdrawals changed + sidechainwdHistory.VerifyWithdrawals( + sidechainHeight, + nil, + t.TestEngine, + ) + # Verify all balances of accounts in the original chain didn't increase + # after the fork. + # We are using different accounts credited between the canonical chain + # and the fork. + # We check on `latest`. + ws.wdHistory.VerifyWithdrawals( + ws.WithdrawalsForkHeight-1, + nil, + t.TestEngine, + ) + + # Re-Org back to the canonical chain + r := t.rpcClient.forkchoiceUpdatedV2(&beacon.ForkchoiceStateV1{ + HeadBlockHash: t.clMock.latestPayloadBuilt.BlockHash, + }, nil) + r.expectPayloadStatus(test.Valid) +]# diff --git a/hive_integration/nodocker/engine/withdrawals/wd_sync_spec.nim b/hive_integration/nodocker/engine/withdrawals/wd_sync_spec.nim new file mode 100644 index 000000000..dfce7376e --- /dev/null +++ b/hive_integration/nodocker/engine/withdrawals/wd_sync_spec.nim @@ -0,0 +1,53 @@ +import + chronicles, + ./wd_base_spec, + ../test_env, + ../engine_client, + ../types + +# Withdrawals sync spec: +# Specifies a withdrawals test where the withdrawals happen and then a +# client needs to sync and apply the withdrawals. +type + SyncSpec* = ref object of WDBaseSpec + syncSteps*: int # Sync block chunks that will be passed as head through FCUs to the syncing client + syncShouldFail*: bool + timeoutSeconds*: int + +proc execute*(ws: SyncSpec, t: TestEnv): bool = + # Do the base withdrawal test first, skipping base verifications + WDBaseSpec(ws).skipBaseVerifications = true + testCond WDBaseSpec(ws).execute(t) + +#[ + # Spawn a secondary client which will need to sync to the primary client + secondaryEngine, err := hive_rpc.HiveRPCEngineStarter{}.StartClient(t.T, t.TestContext, t.Genesis, t.ClientParams, t.ClientFiles, t.Engine) + if err != nil { + error "Unable to spawn a secondary client: %v", t.TestName, err) + + secondaryEngineTest := test.NewTestEngineClient(t, secondaryEngine) + t.clMock.AddEngineClient(secondaryEngine) + + if ws.SyncSteps > 1 { + # TODO + else: + # Send the FCU to trigger sync on the secondary client + loop: + for { + select { + case <-t.TimeoutContext.Done(): + error "Timeout while waiting for secondary client to sync", t.TestName) + case <-time.After(time.Second): + secondaryEngineTest.TestEngineNewPayloadV2( + &t.clMock.latestExecutedPayload, + r := secondaryEngineTest.TestEngineForkchoiceUpdatedV2( + &t.clMock.latestForkchoice, + nil, + if r.Response.PayloadStatus.Status == test.Valid { + break loop + if r.Response.PayloadStatus.Status == test.Invalid { + error "Syncing client rejected valid chain: %s", t.TestName, r.Response) + + ws.wdHistory.VerifyWithdrawals(t.clMock.latestHeader.Number.Uint64(), nil, secondaryEngineTest) +]# + return true