hive sim: Prepare Cancun engine API test framework (#1809)
This commit is contained in:
parent
c005281391
commit
ec08907f05
|
@ -0,0 +1,70 @@
|
||||||
|
import
|
||||||
|
std/[options],
|
||||||
|
eth/common,
|
||||||
|
./clmock,
|
||||||
|
./types,
|
||||||
|
../../../tools/common/helpers,
|
||||||
|
../../../nimbus/common/chain_config
|
||||||
|
|
||||||
|
func getBlockTimeIncrements*(s: BaseSpec): int =
|
||||||
|
if s.blockTimestampIncrement == 0:
|
||||||
|
return 1
|
||||||
|
return s.blockTimestampIncrement
|
||||||
|
|
||||||
|
proc configureCLMock*(s: BaseSpec, cl: CLMocker) =
|
||||||
|
if s.slotsToSafe != 0:
|
||||||
|
cl.slotsToSafe = s.slotsToSafe
|
||||||
|
|
||||||
|
if s.slotsToFinalized != 0:
|
||||||
|
cl.slotsToFinalized = s.slotsToFinalized
|
||||||
|
|
||||||
|
if s.safeSlotsToImportOptimistically != 0:
|
||||||
|
cl.safeSlotsToImportOptimistically = s.safeSlotsToImportOptimistically
|
||||||
|
|
||||||
|
cl.blockTimestampIncrement = some(s.getBlockTimeIncrements())
|
||||||
|
|
||||||
|
func getMainFork*(s: BaseSpec): string =
|
||||||
|
let mainFork = s.mainFork
|
||||||
|
if mainFork == "":
|
||||||
|
return ForkParis
|
||||||
|
return mainFork
|
||||||
|
|
||||||
|
func getGenesisTimestamp*(s: BaseSpec): uint64 =
|
||||||
|
var genesisTimestamp = GenesisTimestamp
|
||||||
|
if s.genesisTimestamp != 0:
|
||||||
|
genesisTimestamp = s.genesisTimestamp
|
||||||
|
return genesisTimestamp.uint64
|
||||||
|
|
||||||
|
func getBlockTime*(s: BaseSpec, blockNumber: uint64): uint64 =
|
||||||
|
return s.getGenesisTimestamp() + blockNumber*s.getBlockTimeIncrements().uint64
|
||||||
|
|
||||||
|
func getForkTime*(s: BaseSpec): uint64 =
|
||||||
|
var forkTime = s.forkTime
|
||||||
|
if s.forkHeight > 0:
|
||||||
|
forkTime = s.getBlockTime(s.forkHeight.uint64)
|
||||||
|
return forkTime
|
||||||
|
|
||||||
|
func getForkConfig*(s: BaseSpec): ChainConfig =
|
||||||
|
let
|
||||||
|
forkTime = s.getForkTime()
|
||||||
|
previousForkTime = s.previousForkTime
|
||||||
|
mainFork = s.getMainFork()
|
||||||
|
forkConfig = getChainConfig(mainFork)
|
||||||
|
genesisTimestamp = s.getGenesisTimestamp()
|
||||||
|
|
||||||
|
doAssert(previousForkTime <= forkTime,
|
||||||
|
"previous fork time cannot be greater than fork time")
|
||||||
|
|
||||||
|
if mainFork == ForkParis:
|
||||||
|
let cond = forkTime > genesisTimestamp or previousForkTime != 0
|
||||||
|
doAssert(not cond, "Cannot configure a fork before Paris, skip test")
|
||||||
|
elif mainFork == ForkShanghai:
|
||||||
|
doAssert(previousForkTime == 0, "Cannot configure a fork before Shanghai")
|
||||||
|
forkConfig.shanghaiTime = some(forkTime.EthTime)
|
||||||
|
elif mainFork == ForkCancun:
|
||||||
|
forkConfig.shanghaiTime = some(previousForkTime.EthTime)
|
||||||
|
forkConfig.cancunTime = some(forkTime.EthTime)
|
||||||
|
else:
|
||||||
|
doAssert(false, "unknown fork: " & mainFork)
|
||||||
|
|
||||||
|
return forkConfig
|
|
@ -0,0 +1,144 @@
|
||||||
|
import
|
||||||
|
eth/common/eth_types,
|
||||||
|
stint,
|
||||||
|
kzg4844/kzg_ex as kzg,
|
||||||
|
stew/endians2,
|
||||||
|
nimcrypto/sha2,
|
||||||
|
stew/results,
|
||||||
|
../../../nimbus/core/eip4844
|
||||||
|
|
||||||
|
type
|
||||||
|
BlobID* = uint64
|
||||||
|
BlobIDs* = seq[BlobID]
|
||||||
|
|
||||||
|
BlobCommitment* = object
|
||||||
|
blob*: kzg.KzgBlob
|
||||||
|
commitment*: kzg.KZGCommitment
|
||||||
|
|
||||||
|
BlobTxWrapData* = object
|
||||||
|
hashes*: seq[Hash256]
|
||||||
|
blobs*: seq[kzg.KzgBlob]
|
||||||
|
commitments*: seq[kzg.KZGCommitment]
|
||||||
|
proofs*: seq[kzg.KzgProof]
|
||||||
|
|
||||||
|
func getBlobList*(startId: BlobID, count: int): BlobIDs =
|
||||||
|
result = newSeq[BlobID](count)
|
||||||
|
for i in 0..<count:
|
||||||
|
result[i] = startId + BlobID(i)
|
||||||
|
|
||||||
|
func getBlobListByIndex*(startIndex: BlobID, endIndex: BlobID): BlobIDs =
|
||||||
|
var count = uint64(0)
|
||||||
|
if endIndex > startIndex:
|
||||||
|
count = uint64(endIndex - startIndex + 1)
|
||||||
|
else:
|
||||||
|
count = uint64(startIndex - endIndex + 1)
|
||||||
|
|
||||||
|
result = newSeq[BlobID](count)
|
||||||
|
if endIndex > startIndex:
|
||||||
|
for i in 0..<count:
|
||||||
|
result[i] = startIndex + BlobID(i)
|
||||||
|
else:
|
||||||
|
for i in 0..<count:
|
||||||
|
result[i] = endIndex - BlobID(i)
|
||||||
|
|
||||||
|
func verifyBlob*(blobId: BlobID, blob: kzg.KzgBlob): bool =
|
||||||
|
if blobId == 0:
|
||||||
|
# Blob zero is empty blob
|
||||||
|
var emptyFieldElem: kzg.KzgBlob
|
||||||
|
return emptyFieldElem == blob
|
||||||
|
|
||||||
|
# Check the blob against the deterministic data
|
||||||
|
let blobIdBytes = toBytesBE blobId
|
||||||
|
|
||||||
|
# First 32 bytes are the hash of the blob ID
|
||||||
|
var currentHashed = sha256.digest(blobIdBytes)
|
||||||
|
|
||||||
|
for chunkIdx in 0..<FIELD_ELEMENTS_PER_BLOB:
|
||||||
|
var expectedFieldElem = currentHashed.data
|
||||||
|
|
||||||
|
# Check that no 32 bytes chunks are greater than the BLS modulus
|
||||||
|
for i in 0..<32:
|
||||||
|
# blobByteIdx = 32 - i - 1
|
||||||
|
let blobByteIdx = i
|
||||||
|
if expectedFieldElem[blobByteIdx] < BLS_MODULUS[i]:
|
||||||
|
# done with this field element
|
||||||
|
break
|
||||||
|
elif expectedFieldElem[blobByteIdx] >= BLS_MODULUS[i]:
|
||||||
|
if BLS_MODULUS[i] > 0:
|
||||||
|
# This chunk is greater than the modulus, and we can reduce it in this byte position
|
||||||
|
expectedFieldElem[blobByteIdx] = BLS_MODULUS[i] - 1
|
||||||
|
# done with this field element
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# This chunk is greater than the modulus, but we can't reduce it in this byte position, so we will try in the next byte position
|
||||||
|
expectedFieldElem[blobByteIdx] = BLS_MODULUS[i]
|
||||||
|
|
||||||
|
if not equalMem(blob[chunkIdx*32].unsafeaddr, expectedFieldElem[0].addr, 32):
|
||||||
|
return false
|
||||||
|
|
||||||
|
# Hash the current hash
|
||||||
|
currentHashed = sha256.digest(currentHashed.data)
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc fillBlob(blobid: BlobID): KzgBlob =
|
||||||
|
if blobId == 0:
|
||||||
|
# Blob zero is empty blob, so leave as is
|
||||||
|
return
|
||||||
|
|
||||||
|
# Fill the blob with deterministic data
|
||||||
|
let blobIdBytes = toBytesBE blobId
|
||||||
|
|
||||||
|
# First 32 bytes are the hash of the blob ID
|
||||||
|
var currentHashed = sha256.digest(blobIdBytes)
|
||||||
|
|
||||||
|
for chunkIdx in 0..<FIELD_ELEMENTS_PER_BLOB:
|
||||||
|
copyMem(result[chunkIdx*32].addr, currentHashed.data[0].addr, 32)
|
||||||
|
|
||||||
|
# Check that no 32 bytes chunks are greater than the BLS modulus
|
||||||
|
for i in 0..<32:
|
||||||
|
#blobByteIdx = ((chunkIdx + 1) * 32) - i - 1
|
||||||
|
let blobByteIdx = (chunkIdx * 32) + i
|
||||||
|
if result[blobByteIdx] < BLS_MODULUS[i]:
|
||||||
|
# go to next chunk
|
||||||
|
break
|
||||||
|
elif result[blobByteIdx] >= BLS_MODULUS[i]:
|
||||||
|
if BLS_MODULUS[i] > 0:
|
||||||
|
# This chunk is greater than the modulus, and we can reduce it in this byte position
|
||||||
|
result[blobByteIdx] = BLS_MODULUS[i] - 1
|
||||||
|
# go to next chunk
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
# This chunk is greater than the modulus, but we can't reduce it in this byte position, so we will try in the next byte position
|
||||||
|
result[blobByteIdx] = BLS_MODULUS[i]
|
||||||
|
|
||||||
|
# Hash the current hash
|
||||||
|
currentHashed = sha256.digest(currentHashed.data)
|
||||||
|
|
||||||
|
proc generateBlob(blobid: BlobID): BlobCommitment =
|
||||||
|
result.blob = blobId.fillBlob()
|
||||||
|
let res = blobToKzgCommitment(result.blob)
|
||||||
|
if res.isErr:
|
||||||
|
doAssert(false, res.error)
|
||||||
|
result.commitment = res.get
|
||||||
|
|
||||||
|
proc getVersionedHash*(blobid: BlobID, commitmentVersion: byte): Hash256 =
|
||||||
|
let res = blobId.generateBlob()
|
||||||
|
result = sha256.digest(res.commitment)
|
||||||
|
result.data[0] = commitmentVersion
|
||||||
|
|
||||||
|
proc blobDataGenerator*(startBlobId: BlobID, blobCount: int): BlobTxWrapData =
|
||||||
|
result.blobs = newSeq[kzg.KzgBlob](blobCount)
|
||||||
|
result.commitments = newSeq[kzg.KZGCommitment](blobCount)
|
||||||
|
result.hashes = newSeq[Hash256](blobCount)
|
||||||
|
result.proofs = newSeq[kzg.KzgProof](blobCount)
|
||||||
|
|
||||||
|
for i in 0..<blobCount:
|
||||||
|
let res = generateBlob(startBlobId + BlobID(i))
|
||||||
|
result.blobs[i] = res.blob
|
||||||
|
result.commitments[i] = res.commitment
|
||||||
|
result.hashes[i] = kzgToVersionedHash(result.commitments[i])
|
||||||
|
let z = computeBlobKzgProof(result.blobs[i], result.commitments[i])
|
||||||
|
if z.isErr:
|
||||||
|
doAssert(false, z.error)
|
||||||
|
result.proofs[i] = z.get()
|
|
@ -0,0 +1,683 @@
|
||||||
|
import
|
||||||
|
std/[options, strutils, typetraits, random],
|
||||||
|
nimcrypto/sysrand,
|
||||||
|
stew/byteutils,
|
||||||
|
./blobs,
|
||||||
|
../tx_sender,
|
||||||
|
../../../../nimbus/constants,
|
||||||
|
../../../../nimbus/utils/utils,
|
||||||
|
../../../../nimbus/common as nimbus_common,
|
||||||
|
../../../../nimbus/beacon/web3_eth_conv,
|
||||||
|
../../../../nimbus/beacon/payload_conv,
|
||||||
|
../../../../nimbus/beacon/execution_types
|
||||||
|
|
||||||
|
type
|
||||||
|
EngineAPIVersionResolver* = ref object of RootRef
|
||||||
|
com: CommonRef
|
||||||
|
|
||||||
|
method setEngineAPIVersionResolver*(cust: EngineAPIVersionResolver, v: CommonRef) {.base.} =
|
||||||
|
cust.com = v
|
||||||
|
|
||||||
|
method forkchoiceUpdatedVersion*(cust: EngineAPIVersionResolver,
|
||||||
|
headTimestamp: uint64, payloadAttributesTimestamp: Option[uint64]): Version {.base.} =
|
||||||
|
let ts = if payloadAttributesTimestamp.isNone: headTimestamp.EthTime
|
||||||
|
else: payloadAttributesTimestamp.get().EthTime
|
||||||
|
if cust.com.isCancunOrLater(ts):
|
||||||
|
Version.V3
|
||||||
|
elif cust.com.isShanghaiOrLater(ts):
|
||||||
|
Version.V2
|
||||||
|
else:
|
||||||
|
Version.V1
|
||||||
|
|
||||||
|
method newPayloadVersion*(cust: EngineAPIVersionResolver, timestamp: uint64): Version {.base.} =
|
||||||
|
let ts = timestamp.EthTime
|
||||||
|
if cust.com.isCancunOrLater(ts):
|
||||||
|
Version.V3
|
||||||
|
elif cust.com.isShanghaiOrLater(ts):
|
||||||
|
Version.V2
|
||||||
|
else:
|
||||||
|
Version.V1
|
||||||
|
|
||||||
|
method getPayloadVersion*(cust: EngineAPIVersionResolver, timestamp: uint64): Version {.base.} =
|
||||||
|
let ts = timestamp.EthTime
|
||||||
|
if cust.com.isCancunOrLater(ts):
|
||||||
|
Version.V3
|
||||||
|
elif cust.com.isShanghaiOrLater(ts):
|
||||||
|
Version.V2
|
||||||
|
else:
|
||||||
|
Version.V1
|
||||||
|
|
||||||
|
type
|
||||||
|
GetPayloadCustomizer* = ref object of EngineAPIVersionResolver
|
||||||
|
|
||||||
|
method getPayloadID*(cust: GetPayloadCustomizer,
|
||||||
|
basePayloadID: PayloadID): PayloadID {.base.} =
|
||||||
|
doAssert(false, "getPayloadID unimplemented")
|
||||||
|
|
||||||
|
method getExpectedError*(cust: GetPayloadCustomizer): int {.base.} =
|
||||||
|
doAssert(false, "getExpectedError unimplemented")
|
||||||
|
|
||||||
|
type
|
||||||
|
BaseGetPayloadCustomizer* = ref object of GetPayloadCustomizer
|
||||||
|
customPayloadID: Option[PayloadID]
|
||||||
|
expectedError : int
|
||||||
|
|
||||||
|
method getPayloadID(cust: BaseGetPayloadCustomizer,
|
||||||
|
basePayloadID: PayloadID): PayloadID =
|
||||||
|
if cust.customPayloadID.isSome:
|
||||||
|
return cust.customPayloadID.get
|
||||||
|
return basePayloadID
|
||||||
|
|
||||||
|
method getExpectedError(cust: BaseGetPayloadCustomizer): int =
|
||||||
|
cust.expectedError
|
||||||
|
|
||||||
|
type
|
||||||
|
UpgradegetPayloadVersion* = ref object of GetPayloadCustomizer
|
||||||
|
|
||||||
|
method getPayloadVersion(cust: UpgradegetPayloadVersion, timestamp: uint64): Version =
|
||||||
|
let version = procCall getPayloadVersion(cust.GetPayloadCustomizer, timestamp)
|
||||||
|
doAssert(version != Version.high, "cannot upgrade version " & $Version.high)
|
||||||
|
version.succ
|
||||||
|
|
||||||
|
type
|
||||||
|
DowngradegetPayloadVersion* = ref object of GetPayloadCustomizer
|
||||||
|
|
||||||
|
method getPayloadVersion(cust: DowngradegetPayloadVersion, timestamp: uint64): Version =
|
||||||
|
let version = procCall getPayloadVersion(cust.GetPayloadCustomizer, timestamp)
|
||||||
|
doAssert(version != Version.V1, "cannot downgrade version 1")
|
||||||
|
version.pred
|
||||||
|
|
||||||
|
type
|
||||||
|
PayloadAttributesCustomizer* = ref object of GetPayloadCustomizer
|
||||||
|
|
||||||
|
method getPayloadAttributes*(cust: PayloadAttributesCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes {.base.} =
|
||||||
|
doAssert(false, "getPayloadAttributes unimplemented")
|
||||||
|
|
||||||
|
type
|
||||||
|
BasePayloadAttributesCustomizer* = ref object of PayloadAttributesCustomizer
|
||||||
|
timestamp : Option[uint64]
|
||||||
|
prevRandao : Option[common.Hash256]
|
||||||
|
suggestedFeeRecipient : Option[common.EthAddress]
|
||||||
|
withdrawals : Option[seq[Withdrawal]]
|
||||||
|
removeWithdrawals : bool
|
||||||
|
beaconRoot : Option[common.Hash256]
|
||||||
|
removeBeaconRoot : bool
|
||||||
|
|
||||||
|
method getPayloadAttributes(cust: BasePayloadAttributesCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes =
|
||||||
|
var customPayloadAttributes = PayloadAttributes(
|
||||||
|
timestamp: basePayloadAttributes.timestamp,
|
||||||
|
prevRandao: basePayloadAttributes.prevRandao,
|
||||||
|
suggestedFeeRecipient: basePayloadAttributes.suggestedFeeRecipient,
|
||||||
|
withdrawals: basePayloadAttributes.withdrawals,
|
||||||
|
parentBeaconBlockRoot: basePayloadAttributes.parentBeaconBlockRoot,
|
||||||
|
)
|
||||||
|
|
||||||
|
if cust.timestamp.isSome:
|
||||||
|
customPayloadAttributes.timestamp = w3Qty cust.timestamp.get
|
||||||
|
|
||||||
|
if cust.prevRandao.isSome:
|
||||||
|
customPayloadAttributes.prevRandao = w3Hash cust.prevRandao.get
|
||||||
|
|
||||||
|
if cust.suggestedFeeRecipient.isSome:
|
||||||
|
customPayloadAttributes.suggestedFeeRecipient = w3Addr cust.suggestedFeeRecipient.get
|
||||||
|
|
||||||
|
if cust.removeWithdrawals:
|
||||||
|
customPayloadAttributes.withdrawals = none(seq[WithdrawalV1])
|
||||||
|
elif cust.withdrawals.isSome:
|
||||||
|
customPayloadAttributes.withdrawals = w3Withdrawals cust.withdrawals
|
||||||
|
|
||||||
|
if cust.removeBeaconRoot:
|
||||||
|
customPayloadAttributes.parentBeaconBlockRoot = none(Web3Hash)
|
||||||
|
elif cust.beaconRoot.isSome:
|
||||||
|
customPayloadAttributes.parentBeaconBlockRoot = w3Hash cust.beaconRoot
|
||||||
|
|
||||||
|
return customPayloadAttributes
|
||||||
|
|
||||||
|
type
|
||||||
|
TimestampDeltaPayloadAttributesCustomizer* = ref object of BasePayloadAttributesCustomizer
|
||||||
|
timestampDelta: uint64
|
||||||
|
|
||||||
|
method getPayloadAttributes(cust: TimestampDeltaPayloadAttributesCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes =
|
||||||
|
var customPayloadAttributes = procCall getPayloadAttributes(cust.BasePayloadAttributesCustomizer, basePayloadAttributes)
|
||||||
|
customPayloadAttributes.timestamp = w3Qty(customPayloadAttributes.timestamp, cust.timestampDelta)
|
||||||
|
return customPayloadAttributes
|
||||||
|
|
||||||
|
type
|
||||||
|
ForkchoiceUpdatedCustomizer* = ref object of BasePayloadAttributesCustomizer
|
||||||
|
|
||||||
|
method getForkchoiceState*(cust: ForkchoiceUpdatedCustomizer,
|
||||||
|
baseForkchoiceUpdate: ForkchoiceStateV1): ForkchoiceStateV1 {.base.} =
|
||||||
|
doAssert(false, "getForkchoiceState unimplemented")
|
||||||
|
|
||||||
|
method getExpectInvalidStatus*(cust: ForkchoiceUpdatedCustomizer): bool {.base.} =
|
||||||
|
doAssert(false, "getExpectInvalidStatus unimplemented")
|
||||||
|
|
||||||
|
# Customizer that makes no modifications to the forkchoice directive call.
|
||||||
|
# Used as base to other customizers.
|
||||||
|
type
|
||||||
|
BaseForkchoiceUpdatedCustomizer* = ref object of ForkchoiceUpdatedCustomizer
|
||||||
|
expectedError : int
|
||||||
|
expectInvalidStatus: bool
|
||||||
|
|
||||||
|
method getPayloadAttributes(cust: BaseForkchoiceUpdatedCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes =
|
||||||
|
var customPayloadAttributes = procCall getPayloadAttributes(cust.BasePayloadAttributesCustomizer, basePayloadAttributes)
|
||||||
|
return customPayloadAttributes
|
||||||
|
|
||||||
|
method getForkchoiceState(cust: BaseForkchoiceUpdatedCustomizer, baseForkchoiceUpdate: ForkchoiceStateV1): ForkchoiceStateV1 =
|
||||||
|
return baseForkchoiceUpdate
|
||||||
|
|
||||||
|
method getExpectedError(cust: BaseForkchoiceUpdatedCustomizer): int =
|
||||||
|
return cust.expectedError
|
||||||
|
|
||||||
|
method getExpectInvalidStatus(cust: BaseForkchoiceUpdatedCustomizer): bool =
|
||||||
|
return cust.expectInvalidStatus
|
||||||
|
|
||||||
|
# Customizer that upgrades the version of the forkchoice directive call to the next version.
|
||||||
|
type
|
||||||
|
UpgradeforkchoiceUpdatedVersion* = ref object of BaseForkchoiceUpdatedCustomizer
|
||||||
|
|
||||||
|
method forkchoiceUpdatedVersion(cust: UpgradeforkchoiceUpdatedVersion, headTimestamp: uint64, payloadAttributesTimestamp: Option[uint64]): Version =
|
||||||
|
let version = procCall forkchoiceUpdatedVersion(EngineAPIVersionResolver(cust), headTimestamp, payloadAttributesTimestamp)
|
||||||
|
doAssert(version != Version.high, "cannot upgrade version " & $Version.high)
|
||||||
|
version.succ
|
||||||
|
|
||||||
|
# Customizer that downgrades the version of the forkchoice directive call to the previous version.
|
||||||
|
type
|
||||||
|
DowngradeforkchoiceUpdatedVersion* = ref object of BaseForkchoiceUpdatedCustomizer
|
||||||
|
|
||||||
|
method forkchoiceUpdatedVersion(cust: DowngradeforkchoiceUpdatedVersion, headTimestamp: uint64, payloadAttributesTimestamp: Option[uint64]): Version =
|
||||||
|
let version = procCall forkchoiceUpdatedVersion(EngineAPIVersionResolver(cust), headTimestamp, payloadAttributesTimestamp)
|
||||||
|
doAssert(version != Version.V1, "cannot downgrade version 1")
|
||||||
|
version.pred
|
||||||
|
|
||||||
|
type
|
||||||
|
VersionedHashRef* = ref object of RootRef
|
||||||
|
blobs*: seq[BlobID]
|
||||||
|
hashVersions*: seq[byte]
|
||||||
|
|
||||||
|
proc getVersionedHashes*(v: VersionedHashRef): seq[common.Hash256] =
|
||||||
|
if v.blobs.len == 0:
|
||||||
|
return @[]
|
||||||
|
|
||||||
|
result = newSeq[common.Hash256](v.blobs.len)
|
||||||
|
|
||||||
|
var version: byte
|
||||||
|
for i, blobID in v.blobs:
|
||||||
|
if v.hashVersions.len > i:
|
||||||
|
version = v.hashVersions[i]
|
||||||
|
result[i] = blobID.getVersionedHash(version)
|
||||||
|
|
||||||
|
proc description*(v: VersionedHashRef): string =
|
||||||
|
result = "VersionedHashes: "
|
||||||
|
for x in v.blobs:
|
||||||
|
result.add x.toHex
|
||||||
|
|
||||||
|
if v.hashVersions.len > 0:
|
||||||
|
result.add " with versions "
|
||||||
|
result.add v.hashVersions.toHex
|
||||||
|
|
||||||
|
type
|
||||||
|
VersionedHashesCustomizer* = ref object of RootRef
|
||||||
|
IncreaseVersionVersionedHashes* = ref object of VersionedHashesCustomizer
|
||||||
|
|
||||||
|
method getVersionedHashes*(cust: VersionedHashesCustomizer, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] {.base.} =
|
||||||
|
doAssert(false, "getVersionedHashes unimplemented")
|
||||||
|
|
||||||
|
method getVersionedHashes(cust: IncreaseVersionVersionedHashes, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] =
|
||||||
|
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
|
||||||
|
|
||||||
|
result = newSeq[common.Hash256](baseVersionedHashes.len)
|
||||||
|
for i, h in baseVersionedHashes:
|
||||||
|
result[i] = h
|
||||||
|
result[i].data[0] = result[i].data[0] + 1
|
||||||
|
|
||||||
|
type
|
||||||
|
CorruptVersionedHashes* = ref object of VersionedHashesCustomizer
|
||||||
|
|
||||||
|
method getVersionedHashes(cust: CorruptVersionedHashes, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] =
|
||||||
|
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
|
||||||
|
|
||||||
|
result = newSeq[common.Hash256](baseVersionedHashes.len)
|
||||||
|
for i, h in baseVersionedHashes:
|
||||||
|
result[i] = h
|
||||||
|
result[i].data[h.data.len-1] = result[i].data[h.data.len-1] + 1
|
||||||
|
|
||||||
|
type
|
||||||
|
RemoveVersionedHash* = ref object of VersionedHashesCustomizer
|
||||||
|
|
||||||
|
method getVersionedHashes(cust: RemoveVersionedHash, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] =
|
||||||
|
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
|
||||||
|
|
||||||
|
result = newSeq[common.Hash256](baseVersionedHashes.len - 1)
|
||||||
|
for i, h in baseVersionedHashes:
|
||||||
|
if i < baseVersionedHashes.len-1:
|
||||||
|
result[i] = h
|
||||||
|
result[i].data[h.data.len-1] = result[i].data[h.data.len-1] + 1
|
||||||
|
|
||||||
|
type
|
||||||
|
ExtraVersionedHash* = ref object of VersionedHashesCustomizer
|
||||||
|
|
||||||
|
method getVersionedHashes(cust: ExtraVersionedHash, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] =
|
||||||
|
result = newSeq[common.Hash256](baseVersionedHashes.len + 1)
|
||||||
|
for i, h in baseVersionedHashes:
|
||||||
|
result[i] = h
|
||||||
|
|
||||||
|
var extraHash: common.Hash256
|
||||||
|
doAssert randomBytes(extraHash.data) == 32
|
||||||
|
extraHash.data[0] = VERSIONED_HASH_VERSION_KZG
|
||||||
|
result[^1] = extraHash
|
||||||
|
|
||||||
|
|
||||||
|
type
|
||||||
|
PayloadCustomizer* = ref object of EngineAPIVersionResolver
|
||||||
|
|
||||||
|
ExecutableData* = object
|
||||||
|
basePayload*: ExecutionPayload
|
||||||
|
beaconRoot* : Option[common.Hash256]
|
||||||
|
attr* : PayloadAttributes
|
||||||
|
versionedHashes*: seq[common.Hash256]
|
||||||
|
|
||||||
|
method customizePayload(cust: PayloadCustomizer, data: ExecutableData): ExecutableData {.base.} =
|
||||||
|
doAssert(false, "customizePayload unimplemented")
|
||||||
|
|
||||||
|
method getTimestamp(cust: PayloadCustomizer, basePayload: ExecutionPayload): uint64 {.base.} =
|
||||||
|
doAssert(false, "getTimestamp unimplemented")
|
||||||
|
|
||||||
|
type
|
||||||
|
NewPayloadCustomizer* = ref object of PayloadCustomizer
|
||||||
|
|
||||||
|
method getExpectedError(cust: NewPayloadCustomizer): int {.base.} =
|
||||||
|
doAssert(false, "getExpectedError unimplemented")
|
||||||
|
|
||||||
|
method getExpectInvalidStatus(cust: NewPayloadCustomizer): bool {.base.} =
|
||||||
|
doAssert(false, "getExpectInvalidStatus unimplemented")
|
||||||
|
|
||||||
|
type
|
||||||
|
CustomPayloadData = object
|
||||||
|
parentHash* : Option[common.Hash256]
|
||||||
|
feeRecipient* : Option[common.EthAddress]
|
||||||
|
stateRoot* : Option[common.Hash256]
|
||||||
|
receiptsRoot* : Option[common.Hash256]
|
||||||
|
logsBloom* : Option[BloomFilter]
|
||||||
|
prevRandao* : Option[common.Hash256]
|
||||||
|
number* : Option[uint64]
|
||||||
|
gasLimit* : Option[GasInt]
|
||||||
|
gasUsed* : Option[GasInt]
|
||||||
|
timestamp* : Option[uint64]
|
||||||
|
extraData* : Option[common.Blob]
|
||||||
|
baseFeePerGas* : Option[UInt256]
|
||||||
|
blockHash* : Option[common.Hash256]
|
||||||
|
transactions* : Option[seq[Transaction]]
|
||||||
|
withdrawals* : Option[seq[Withdrawal]]
|
||||||
|
removeWithdrawals* : bool
|
||||||
|
blobGasUsed* : Option[uint64]
|
||||||
|
removeBlobGasUsed* : bool
|
||||||
|
excessBlobGas* : Option[uint64]
|
||||||
|
removeExcessBlobGas* : bool
|
||||||
|
parentBeaconRoot* : Option[common.Hash256]
|
||||||
|
removeParentBeaconRoot* : bool
|
||||||
|
versionedHashesCustomizer*: VersionedHashesCustomizer
|
||||||
|
|
||||||
|
func getTimestamp*(cust: CustomPayloadData, basePayload: ExecutionPayload): uint64 =
|
||||||
|
if cust.timestamp.isSome:
|
||||||
|
return cust.timestamp.get
|
||||||
|
return basePayload.timestamp.uint64
|
||||||
|
|
||||||
|
# Construct a customized payload by taking an existing payload as base and mixing it CustomPayloadData
|
||||||
|
# blockHash is calculated automatically.
|
||||||
|
proc customizePayload*(cust: CustomPayloadData, data: ExecutableData): ExecutableData =
|
||||||
|
var customHeader = blockHeader(data.basePayload, data.beaconRoot)
|
||||||
|
|
||||||
|
if cust.transactions.isSome:
|
||||||
|
customHeader.txRoot = calcTxRoot(cust.transactions.get)
|
||||||
|
|
||||||
|
# Overwrite custom information
|
||||||
|
if cust.parentHash.isSome:
|
||||||
|
customHeader.parentHash = cust.parentHash.get
|
||||||
|
|
||||||
|
if cust.feeRecipient.isSome:
|
||||||
|
customHeader.coinbase = cust.feeRecipient.get
|
||||||
|
|
||||||
|
if cust.stateRoot.isSome:
|
||||||
|
customHeader.stateRoot = cust.stateRoot.get
|
||||||
|
|
||||||
|
if cust.receiptsRoot.isSome:
|
||||||
|
customHeader.receiptRoot = cust.receiptsRoot.get
|
||||||
|
|
||||||
|
if cust.logsBloom.isSome:
|
||||||
|
customHeader.bloom = cust.logsBloom.get
|
||||||
|
|
||||||
|
if cust.prevRandao.isSome:
|
||||||
|
customHeader.mixDigest = cust.prevRandao.get
|
||||||
|
|
||||||
|
if cust.number.isSome:
|
||||||
|
customHeader.blockNumber = cust.number.get.u256
|
||||||
|
|
||||||
|
if cust.gasLimit.isSome:
|
||||||
|
customHeader.gasLimit = cust.gasLimit.get
|
||||||
|
|
||||||
|
if cust.gasUsed.isSome:
|
||||||
|
customHeader.gasUsed = cust.gasUsed.get
|
||||||
|
|
||||||
|
if cust.timestamp.isSome:
|
||||||
|
customHeader.timestamp = cust.timestamp.get.EthTime
|
||||||
|
|
||||||
|
if cust.extraData.isSome:
|
||||||
|
customHeader.extraData = cust.extraData.get
|
||||||
|
|
||||||
|
if cust.baseFeePerGas.isSome:
|
||||||
|
customHeader.fee = cust.baseFeePerGas
|
||||||
|
|
||||||
|
if cust.removeWithdrawals:
|
||||||
|
customHeader.withdrawalsRoot = none(common.Hash256)
|
||||||
|
elif cust.withdrawals.isSome:
|
||||||
|
let h = calcWithdrawalsRoot(cust.withdrawals.get)
|
||||||
|
customHeader.withdrawalsRoot = some(h)
|
||||||
|
|
||||||
|
if cust.removeBlobGasUsed:
|
||||||
|
customHeader.blobGasUsed = none(uint64)
|
||||||
|
elif cust.blobGasUsed.isSome:
|
||||||
|
customHeader.blobGasUsed = cust.blobGasUsed
|
||||||
|
|
||||||
|
if cust.removeExcessBlobGas:
|
||||||
|
customHeader.excessBlobGas = none(uint64)
|
||||||
|
elif cust.excessBlobGas.isSome:
|
||||||
|
customHeader.excessBlobGas = cust.excessBlobGas
|
||||||
|
|
||||||
|
if cust.removeParentBeaconRoot:
|
||||||
|
customHeader.parentBeaconBlockRoot = none(common.Hash256)
|
||||||
|
elif cust.parentBeaconRoot.isSome:
|
||||||
|
customHeader.parentBeaconBlockRoot = cust.parentBeaconRoot
|
||||||
|
|
||||||
|
var blk = EthBlock(
|
||||||
|
header: customHeader,
|
||||||
|
)
|
||||||
|
|
||||||
|
if cust.removeWithdrawals:
|
||||||
|
blk.withdrawals = none(seq[Withdrawal])
|
||||||
|
elif cust.withdrawals.isSome:
|
||||||
|
blk.withdrawals = cust.withdrawals
|
||||||
|
elif data.basePayload.withdrawals.isSome:
|
||||||
|
blk.withdrawals = ethWithdrawals data.basePayload.withdrawals
|
||||||
|
|
||||||
|
result = ExecutableData(
|
||||||
|
basePayload : executionPayload(blk),
|
||||||
|
beaconRoot : blk.header.parentBeaconBlockRoot,
|
||||||
|
attr : data.attr,
|
||||||
|
versionedHashes: data.versionedHashes,
|
||||||
|
)
|
||||||
|
|
||||||
|
if cust.versionedHashesCustomizer.isNil.not:
|
||||||
|
result.versionedHashes = cust.versionedHashesCustomizer.getVersionedHashes(data.versionedHashes)
|
||||||
|
|
||||||
|
|
||||||
|
# Base new payload directive call cust.
|
||||||
|
# Used as base to other customizers.
|
||||||
|
type
|
||||||
|
BaseNewPayloadVersionCustomizer* = ref object of NewPayloadCustomizer
|
||||||
|
payloadCustomizer* : CustomPayloadData
|
||||||
|
expectedError* : int
|
||||||
|
expectInvalidStatus*: bool
|
||||||
|
|
||||||
|
method customizePayload(cust: BaseNewPayloadVersionCustomizer, data: ExecutableData): ExecutableData =
|
||||||
|
cust.payloadCustomizer.customizePayload(data)
|
||||||
|
|
||||||
|
method getExpectedError(cust: BaseNewPayloadVersionCustomizer): int =
|
||||||
|
cust.expectedError
|
||||||
|
|
||||||
|
method getExpectInvalidStatus(cust: BaseNewPayloadVersionCustomizer): bool =
|
||||||
|
cust.expectInvalidStatus
|
||||||
|
|
||||||
|
# Customizer that upgrades the version of the payload to the next version.
|
||||||
|
type
|
||||||
|
UpgradeNewPayloadVersion* = ref object of NewPayloadCustomizer
|
||||||
|
|
||||||
|
method newPayloadVersion(cust: UpgradeNewPayloadVersion, timestamp: uint64): Version =
|
||||||
|
let version = procCall newPayloadVersion(EngineAPIVersionResolver(cust), timestamp)
|
||||||
|
doAssert(version != Version.high, "cannot upgrade version " & $Version.high)
|
||||||
|
version.succ
|
||||||
|
|
||||||
|
# Customizer that downgrades the version of the payload to the previous version.
|
||||||
|
type
|
||||||
|
DowngradeNewPayloadVersion* = ref object of NewPayloadCustomizer
|
||||||
|
|
||||||
|
method newPayloadVersion(cust: DowngradeNewPayloadVersion, timestamp: uint64): Version =
|
||||||
|
let version = procCall newPayloadVersion(EngineAPIVersionResolver(cust), timestamp)
|
||||||
|
doAssert(version != Version.V1, "cannot downgrade version 1")
|
||||||
|
version.pred
|
||||||
|
|
||||||
|
proc customizePayloadTransactions*(data: ExecutableData, customTransactions: openArray[Transaction]): ExecutableData =
|
||||||
|
let cpd = CustomPayloadData(
|
||||||
|
transactions: some(@customTransactions),
|
||||||
|
)
|
||||||
|
customizePayload(cpd, data)
|
||||||
|
|
||||||
|
proc `$`*(cust: CustomPayloadData): string =
|
||||||
|
var fieldList = newSeq[string]()
|
||||||
|
|
||||||
|
if cust.parentHash.isSome:
|
||||||
|
fieldList.add "parentHash=" & cust.parentHash.get.short
|
||||||
|
|
||||||
|
if cust.feeRecipient.isSome:
|
||||||
|
fieldList.add "Coinbase=" & $cust.feeRecipient.get
|
||||||
|
|
||||||
|
if cust.stateRoot.isSome:
|
||||||
|
fieldList.add "stateRoot=" & cust.stateRoot.get.short
|
||||||
|
|
||||||
|
if cust.receiptsRoot.isSome:
|
||||||
|
fieldList.add "receiptsRoot=" & cust.receiptsRoot.get.short
|
||||||
|
|
||||||
|
if cust.logsBloom.isSome:
|
||||||
|
fieldList.add "logsBloom=" & cust.logsBloom.get.toHex
|
||||||
|
|
||||||
|
if cust.prevRandao.isSome:
|
||||||
|
fieldList.add "prevRandao=" & cust.prevRandao.get.short
|
||||||
|
|
||||||
|
if cust.number.isSome:
|
||||||
|
fieldList.add "Number=" & $cust.number.get
|
||||||
|
|
||||||
|
if cust.gasLimit.isSome:
|
||||||
|
fieldList.add "gasLimit=" & $cust.gasLimit.get
|
||||||
|
|
||||||
|
if cust.gasUsed.isSome:
|
||||||
|
fieldList.add "gasUsed=" & $cust.gasUsed.get
|
||||||
|
|
||||||
|
if cust.timestamp.isSome:
|
||||||
|
fieldList.add "timestamp=" & $cust.timestamp.get
|
||||||
|
|
||||||
|
if cust.extraData.isSome:
|
||||||
|
fieldList.add "extraData=" & cust.extraData.get.toHex
|
||||||
|
|
||||||
|
if cust.baseFeePerGas.isSome:
|
||||||
|
fieldList.add "baseFeePerGas=" & $cust.baseFeePerGas.get
|
||||||
|
|
||||||
|
if cust.transactions.isSome:
|
||||||
|
fieldList.add "transactions=" & $cust.transactions.get.len
|
||||||
|
|
||||||
|
if cust.withdrawals.isSome:
|
||||||
|
fieldList.add "withdrawals=" & $cust.withdrawals.get.len
|
||||||
|
|
||||||
|
fieldList.join(", ")
|
||||||
|
|
||||||
|
type
|
||||||
|
InvalidPayloadBlockField* = enum
|
||||||
|
InvalidParentHash
|
||||||
|
InvalidStateRoot
|
||||||
|
InvalidReceiptsRoot
|
||||||
|
InvalidNumber
|
||||||
|
InvalidGasLimit
|
||||||
|
InvalidGasUsed
|
||||||
|
InvalidTimestamp
|
||||||
|
InvalidPrevRandao
|
||||||
|
RemoveTransaction
|
||||||
|
InvalidTransactionSignature
|
||||||
|
InvalidTransactionNonce
|
||||||
|
InvalidTransactionGas
|
||||||
|
InvalidTransactionGasPrice
|
||||||
|
InvalidTransactionValue
|
||||||
|
InvalidTransactionGasTipPrice
|
||||||
|
InvalidTransactionChainID
|
||||||
|
InvalidParentBeaconBlockRoot
|
||||||
|
InvalidExcessBlobGas
|
||||||
|
InvalidBlobGasUsed
|
||||||
|
InvalidBlobCountGasUsed
|
||||||
|
InvalidVersionedHashesVersion
|
||||||
|
InvalidVersionedHashes
|
||||||
|
IncompleteVersionedHashes
|
||||||
|
ExtraVersionedHashes
|
||||||
|
InvalidWithdrawals
|
||||||
|
|
||||||
|
func scramble(data: Web3Hash): Option[common.Hash256] =
|
||||||
|
var h = ethHash data
|
||||||
|
h.data[^1] = byte(255 - h.data[^1])
|
||||||
|
some(h)
|
||||||
|
|
||||||
|
func scramble(data: common.Hash256): Option[common.Hash256] =
|
||||||
|
var h = data
|
||||||
|
h.data[^1] = byte(255 - h.data[^1])
|
||||||
|
some(h)
|
||||||
|
|
||||||
|
# This function generates an invalid payload by taking a base payload and modifying the specified field such that it ends up being invalid.
|
||||||
|
# One small consideration is that the payload needs to contain transactions and specially transactions using the PREVRANDAO opcode for all the fields to be compatible with this function.
|
||||||
|
proc generateInvalidPayload*(sender: TxSender, data: ExecutableData, payloadField: InvalidPayloadBlockField): ExecutableData =
|
||||||
|
var customPayloadMod: CustomPayloadData
|
||||||
|
let basePayload = data.basePayload
|
||||||
|
|
||||||
|
case payloadField
|
||||||
|
of InvalidParentHash:
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
parentHash: scramble(basePayload.parentHash),
|
||||||
|
)
|
||||||
|
of InvalidStateRoot:
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
stateRoot: scramble(basePayload.stateRoot),
|
||||||
|
)
|
||||||
|
of InvalidReceiptsRoot:
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
receiptsRoot: scramble(basePayload.receiptsRoot),
|
||||||
|
)
|
||||||
|
of InvalidNumber:
|
||||||
|
let modNumber = basePayload.blockNumber.uint64 - 1
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
number: some(modNumber),
|
||||||
|
)
|
||||||
|
of InvalidGasLimit:
|
||||||
|
let modGasLimit = basePayload.gasLimit.GasInt * 2
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
gasLimit: some(modGasLimit),
|
||||||
|
)
|
||||||
|
of InvalidGasUsed:
|
||||||
|
let modGasUsed = basePayload.gasUsed.GasInt - 1
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
gasUsed: some(modGasUsed),
|
||||||
|
)
|
||||||
|
of InvalidTimestamp:
|
||||||
|
let modTimestamp = basePayload.timestamp.uint64 - 1
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
timestamp: some(modTimestamp),
|
||||||
|
)
|
||||||
|
of InvalidPrevRandao:
|
||||||
|
# This option potentially requires a transaction that uses the PREVRANDAO opcode.
|
||||||
|
# Otherwise the payload will still be valid.
|
||||||
|
var randomHash: common.Hash256
|
||||||
|
doAssert randomBytes(randomHash.data) == 32
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
prevRandao: some(randomHash),
|
||||||
|
)
|
||||||
|
of InvalidParentBeaconBlockRoot:
|
||||||
|
doAssert(data.beaconRoot.isSome,
|
||||||
|
"no parent beacon block root available for modification")
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
parentBeaconRoot: scramble(data.beaconRoot.get),
|
||||||
|
)
|
||||||
|
of InvalidBlobGasUsed:
|
||||||
|
doAssert(basePayload.blobGasUsed.isSome, "no blob gas used available for modification")
|
||||||
|
let modBlobGasUsed = basePayload.blobGasUsed.get.uint64 + 1
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
blobGasUsed: some(modBlobGasUsed),
|
||||||
|
)
|
||||||
|
of InvalidBlobCountGasUsed:
|
||||||
|
doAssert(basePayload.blobGasUsed.isSome, "no blob gas used available for modification")
|
||||||
|
let modBlobGasUsed = basePayload.blobGasUsed.get.uint64 + GAS_PER_BLOB
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
blobGasUsed: some(modBlobGasUsed),
|
||||||
|
)
|
||||||
|
of InvalidExcessBlobGas:
|
||||||
|
doAssert(basePayload.excessBlobGas.isSome, "no excess blob gas available for modification")
|
||||||
|
let modExcessBlobGas = basePayload.excessBlobGas.get.uint64 + 1
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
excessBlobGas: some(modExcessBlobGas),
|
||||||
|
)
|
||||||
|
of InvalidVersionedHashesVersion:
|
||||||
|
doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification")
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
versionedHashesCustomizer: IncreaseVersionVersionedHashes(),
|
||||||
|
)
|
||||||
|
of InvalidVersionedHashes:
|
||||||
|
doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification")
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
versionedHashesCustomizer: CorruptVersionedHashes(),
|
||||||
|
)
|
||||||
|
of IncompleteVersionedHashes:
|
||||||
|
doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification")
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
versionedHashesCustomizer: RemoveVersionedHash(),
|
||||||
|
)
|
||||||
|
of ExtraVersionedHashes:
|
||||||
|
doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification")
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
versionedHashesCustomizer: ExtraVersionedHash(),
|
||||||
|
)
|
||||||
|
of InvalidWithdrawals:
|
||||||
|
# These options are not supported yet.
|
||||||
|
# TODO: Implement
|
||||||
|
doAssert(false, "invalid payload field not supported yet: " & $payloadField)
|
||||||
|
of RemoveTransaction:
|
||||||
|
let emptyTxs = newSeq[Transaction]()
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
transactions: some(emptyTxs),
|
||||||
|
)
|
||||||
|
of InvalidTransactionSignature,
|
||||||
|
InvalidTransactionNonce,
|
||||||
|
InvalidTransactionGas,
|
||||||
|
InvalidTransactionGasPrice,
|
||||||
|
InvalidTransactionGasTipPrice,
|
||||||
|
InvalidTransactionValue,
|
||||||
|
InvalidTransactionChainID:
|
||||||
|
|
||||||
|
doAssert(basePayload.transactions.len > 0, "no transactions available for modification")
|
||||||
|
let baseTx = rlp.decode(distinctBase basePayload.transactions[0], Transaction)
|
||||||
|
var custTx: CustomTransactionData
|
||||||
|
|
||||||
|
case payloadField
|
||||||
|
of InvalidTransactionSignature:
|
||||||
|
custTx.signature = some(baseTx.R - 1.u256)
|
||||||
|
of InvalidTransactionNonce:
|
||||||
|
custTx.nonce = some(baseTx.nonce - 1)
|
||||||
|
of InvalidTransactionGas:
|
||||||
|
custTx.gas = some(0.GasInt)
|
||||||
|
of InvalidTransactionGasPrice:
|
||||||
|
custTx.gasPriceOrGasFeeCap = some(0.GasInt)
|
||||||
|
of InvalidTransactionGasTipPrice:
|
||||||
|
custTx.gasTipCap = some(gasTipPrice.GasInt * 2.GasInt)
|
||||||
|
of InvalidTransactionValue:
|
||||||
|
# Vault account initially has 0x123450000000000000000, so this value should overflow
|
||||||
|
custTx.value = some(UInt256.fromHex("0x123450000000000000001"))
|
||||||
|
of InvalidTransactionChainID:
|
||||||
|
custTx.chainId = some(ChainId(baseTx.chainId.uint64 + 1))
|
||||||
|
else: discard
|
||||||
|
|
||||||
|
let modifiedTx = sender.customizeTransaction(baseTx, custTx)
|
||||||
|
customPayloadMod = CustomPayloadData(
|
||||||
|
transactions: some(@[modifiedTx]),
|
||||||
|
)
|
||||||
|
|
||||||
|
customPayloadMod.customizePayload(data)
|
||||||
|
|
||||||
|
# Generates an alternative withdrawals list that contains the same
|
||||||
|
# amounts and accounts, but the order in the list is different, so
|
||||||
|
# stateRoot of the resulting payload should be the same.
|
||||||
|
|
||||||
|
proc randomizeWithdrawalsOrder(src: openArray[Withdrawal]): seq[Withdrawal] =
|
||||||
|
result = @src
|
||||||
|
result.shuffle
|
|
@ -0,0 +1,216 @@
|
||||||
|
import
|
||||||
|
std/[tables, strutils, typetraits],
|
||||||
|
stint,
|
||||||
|
eth/[common, rlp],
|
||||||
|
eth/common/eth_types_rlp,
|
||||||
|
chronicles,
|
||||||
|
stew/[results, byteutils],
|
||||||
|
kzg4844/kzg_ex as kzg,
|
||||||
|
../types,
|
||||||
|
../engine_client,
|
||||||
|
../../../../nimbus/constants,
|
||||||
|
../../../../nimbus/core/eip4844,
|
||||||
|
../../../../nimbus/rpc/rpc_types,
|
||||||
|
../../../../nimbus/beacon/execution_types,
|
||||||
|
../../../../nimbus/beacon/web3_eth_conv,
|
||||||
|
./blobs
|
||||||
|
|
||||||
|
type
|
||||||
|
TestBlobTxPool* = ref object
|
||||||
|
currentBlobID* : BlobID
|
||||||
|
currentTxIndex*: int
|
||||||
|
transactions* : Table[common.Hash256, Transaction]
|
||||||
|
hashesByIndex* : Table[int, common.Hash256]
|
||||||
|
|
||||||
|
const
|
||||||
|
HISTORY_BUFFER_LENGTH* = 8191
|
||||||
|
|
||||||
|
# Test constants
|
||||||
|
DATAHASH_START_ADDRESS* = toAddress(0x20000.u256)
|
||||||
|
DATAHASH_ADDRESS_COUNT* = 1000
|
||||||
|
|
||||||
|
func getMinExcessBlobGasForBlobGasPrice(data_gas_price: uint64): uint64 =
|
||||||
|
var
|
||||||
|
current_excess_data_gas = 0'u64
|
||||||
|
current_data_gas_price = 1'u64
|
||||||
|
|
||||||
|
while current_data_gas_price < data_gas_price:
|
||||||
|
current_excess_data_gas += GAS_PER_BLOB.uint64
|
||||||
|
current_data_gas_price = getBlobGasPrice(current_excess_data_gas).truncate(uint64)
|
||||||
|
|
||||||
|
return current_excess_data_gas
|
||||||
|
|
||||||
|
func getMinExcessBlobsForBlobGasPrice*(data_gas_price: uint64): uint64 =
|
||||||
|
return getMinExcessBlobGasForBlobGasPrice(data_gas_price) div GAS_PER_BLOB.uint64
|
||||||
|
|
||||||
|
proc addBlobTransaction*(pool: TestBlobTxPool, tx: Transaction) =
|
||||||
|
let txHash = rlpHash(tx)
|
||||||
|
pool.transactions[txHash] = tx
|
||||||
|
|
||||||
|
proc `==`(a: openArray[rpc_types.AccessTuple], b: openArray[AccessPair]): bool =
|
||||||
|
if a.len != b.len:
|
||||||
|
return false
|
||||||
|
|
||||||
|
for i in 0..<a.len:
|
||||||
|
if a[i].address != b[i].address:
|
||||||
|
return false
|
||||||
|
if a[i].storageKeys.len != b[i].storageKeys.len:
|
||||||
|
return false
|
||||||
|
for j in 0..<a[i].storageKeys.len:
|
||||||
|
if a[i].storageKeys[j].data != b[i].storageKeys[j]:
|
||||||
|
return false
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
# Test two different transactions with the same blob, and check the blob bundle.
|
||||||
|
proc verifyTransactionFromNode*(client: RpcClient, tx: Transaction): Result[void, string] =
|
||||||
|
let txHash = tx.rlpHash
|
||||||
|
let res = client.txByHash(txHash)
|
||||||
|
if res.isErr:
|
||||||
|
return err(res.error)
|
||||||
|
let returnedTx = res.get()
|
||||||
|
|
||||||
|
# Verify that the tx fields are all the same
|
||||||
|
if returnedTx.nonce != tx.nonce:
|
||||||
|
return err("nonce mismatch: $1 != $2" % [$returnedTx.nonce, $tx.nonce])
|
||||||
|
|
||||||
|
if returnedTx.gasLimit != tx.gasLimit:
|
||||||
|
return err("gas mismatch: $1 != $2" % [$returnedTx.gasLimit, $tx.gasLimit])
|
||||||
|
|
||||||
|
if returnedTx.gasPrice != tx.gasPrice:
|
||||||
|
return err("gas price mismatch: $1 != $2" % [$returnedTx.gasPrice, $tx.gasPrice])
|
||||||
|
|
||||||
|
if returnedTx.value != tx.value:
|
||||||
|
return err("value mismatch: $1 != $2" % [$returnedTx.value, $tx.value])
|
||||||
|
|
||||||
|
if returnedTx.to != tx.to:
|
||||||
|
return err("to mismatch: $1 != $2" % [$returnedTx.to, $tx.to])
|
||||||
|
|
||||||
|
if returnedTx.payload != tx.payload:
|
||||||
|
return err("data mismatch: $1 != $2" % [returnedTx.payload.toHex, tx.payload.toHex])
|
||||||
|
|
||||||
|
if returnedTx.accessList.isNone:
|
||||||
|
return err("expect accessList is some")
|
||||||
|
|
||||||
|
let ac = returnedTx.accessList.get
|
||||||
|
if ac != tx.accessList:
|
||||||
|
return err("access list mismatch")
|
||||||
|
|
||||||
|
if returnedTx.chainId.isNone:
|
||||||
|
return err("chain id is none, expect is some")
|
||||||
|
|
||||||
|
if returnedTx.chainId.get.uint64 != tx.chainId.uint64:
|
||||||
|
return err("chain id mismatch: $1 != $2" % [$returnedTx.chainId.get.uint64, $tx.chainId.uint64])
|
||||||
|
|
||||||
|
if returnedTx.maxFeePerGas != tx.maxFee:
|
||||||
|
return err("max fee per gas mismatch: $1 != $2" % [$returnedTx.maxFeePerGas, $tx.maxFee])
|
||||||
|
|
||||||
|
if returnedTx.maxPriorityFeePerGas != tx.maxPriorityFee:
|
||||||
|
return err("max priority fee per gas mismatch: $1 != $2" % [$returnedTx.maxPriorityFeePerGas, $tx.maxPriorityFee])
|
||||||
|
|
||||||
|
if returnedTx.maxFeePerBlobGas.isNone:
|
||||||
|
return err("expect maxFeePerBlobGas is some")
|
||||||
|
|
||||||
|
if returnedTx.maxFeePerBlobGas.get != tx.maxFeePerBlobGas:
|
||||||
|
return err("max fee per data gas mismatch: $1 != $2" % [$returnedTx.maxFeePerBlobGas.get, $tx.maxFeePerBlobGas])
|
||||||
|
|
||||||
|
if returnedTx.versionedHashes.isNone:
|
||||||
|
return err("expect versioned hashes is some")
|
||||||
|
|
||||||
|
let vs = returnedTx.versionedHashes.get
|
||||||
|
if vs != tx.versionedHashes:
|
||||||
|
return err("blob versioned hashes mismatch")
|
||||||
|
|
||||||
|
if returnedTx.txType != tx.txType:
|
||||||
|
return err("type mismatch: $1 != $2" % [$returnedTx.txType, $tx.txType])
|
||||||
|
|
||||||
|
ok()
|
||||||
|
|
||||||
|
proc beaconRootStorageIndexes*(timestamp: uint64): (UInt256, UInt256) =
|
||||||
|
# Calculate keys
|
||||||
|
let
|
||||||
|
timestampReduced = timestamp mod HISTORY_BUFFER_LENGTH
|
||||||
|
timestampExtended = timestampReduced + HISTORY_BUFFER_LENGTH
|
||||||
|
|
||||||
|
(timestampReduced.u256, timestampExtended.u256)
|
||||||
|
|
||||||
|
|
||||||
|
type
|
||||||
|
BlobWrapData* = object
|
||||||
|
versionedHash*: common.Hash256
|
||||||
|
blob* : kzg.KzgBlob
|
||||||
|
commitment* : kzg.KZGCommitment
|
||||||
|
proof* : kzg.KzgProof
|
||||||
|
|
||||||
|
BlobData* = ref object
|
||||||
|
txs* : seq[Transaction]
|
||||||
|
data*: seq[BlobWrapData]
|
||||||
|
|
||||||
|
proc getBlobDataInPayload*(pool: TestBlobTxPool, payload: ExecutionPayload): Result[BlobData, string] =
|
||||||
|
var blobData = BlobData()
|
||||||
|
|
||||||
|
# Find all blob transactions included in the payload
|
||||||
|
for binaryTx in payload.transactions:
|
||||||
|
# Unmarshal the tx from the payload, which should be the minimal version
|
||||||
|
# of the blob transaction
|
||||||
|
let txData = rlp.decode(distinctBase binaryTx, Transaction)
|
||||||
|
if txData.txType != TxEIP4844:
|
||||||
|
continue
|
||||||
|
|
||||||
|
let txHash = rlpHash(txData)
|
||||||
|
|
||||||
|
# Find the transaction in the current pool of known transactions
|
||||||
|
if not pool.transactions.hasKey(txHash):
|
||||||
|
return err("could not find transaction in the pool")
|
||||||
|
|
||||||
|
let blobTx = pool.transactions[txHash]
|
||||||
|
if blobTx.networkPayload.isNil:
|
||||||
|
return err("blob data is nil")
|
||||||
|
|
||||||
|
let np = blobTx.networkPayload
|
||||||
|
if blobTx.versionedHashes.len != np.commitments.len or
|
||||||
|
np.commitments.len != np.blobs.len or
|
||||||
|
np.blobs.len != np.proofs.len:
|
||||||
|
return err("invalid blob wrap data")
|
||||||
|
|
||||||
|
for i in 0..<blobTx.versionedHashes.len:
|
||||||
|
blobData.data.add BlobWrapData(
|
||||||
|
versionedHash: blobTx.versionedHashes[i],
|
||||||
|
commitment : np.commitments[i],
|
||||||
|
blob : np.blobs[i],
|
||||||
|
proof : np.proofs[i],
|
||||||
|
)
|
||||||
|
blobData.txs.add blobTx
|
||||||
|
|
||||||
|
return ok(blobData)
|
||||||
|
|
||||||
|
proc verifyBeaconRootStorage*(client: RpcClient, payload: ExecutionPayload): bool =
|
||||||
|
# Read the storage keys from the stateful precompile that stores the beacon roots and verify
|
||||||
|
# that the beacon root is the same as the one in the payload
|
||||||
|
let
|
||||||
|
blockNumber = u256 payload.blockNumber
|
||||||
|
precompileAddress = BEACON_ROOTS_ADDRESS
|
||||||
|
(timestampKey, beaconRootKey) = beaconRootStorageIndexes(payload.timestamp.uint64)
|
||||||
|
|
||||||
|
# Verify the timestamp key
|
||||||
|
var r = client.storageAt(precompileAddress, timestampKey, blockNumber)
|
||||||
|
if r.isErr:
|
||||||
|
error "verifyBeaconRootStorage", msg=r.error
|
||||||
|
return false
|
||||||
|
|
||||||
|
if r.get != payload.timestamp.uint64.u256:
|
||||||
|
error "verifyBeaconRootStorage storage 1",
|
||||||
|
expect=payload.timestamp.uint64.u256,
|
||||||
|
get=r.get
|
||||||
|
return false
|
||||||
|
|
||||||
|
# Verify the beacon root key
|
||||||
|
r = client.storageAt(precompileAddress, beaconRootKey, blockNumber)
|
||||||
|
let parentBeaconBlockRoot = timestampToBeaconRoot(payload.timestamp)
|
||||||
|
if parentBeaconBlockRoot != beaconRoot(r.get):
|
||||||
|
error "verifyBeaconRootStorage storage 2",
|
||||||
|
expect=parentBeaconBlockRoot.toHex,
|
||||||
|
get=beaconRoot(r.get).toHex
|
||||||
|
return false
|
||||||
|
|
||||||
|
return true
|
|
@ -0,0 +1,24 @@
|
||||||
|
import
|
||||||
|
../types,
|
||||||
|
../test_env,
|
||||||
|
./helpers
|
||||||
|
|
||||||
|
type
|
||||||
|
CancunTestContext* = object
|
||||||
|
env*: TestEnv
|
||||||
|
txPool*: TestBlobTxPool
|
||||||
|
|
||||||
|
# Interface to represent a single step in a test vector
|
||||||
|
TestStep* = ref object of RootRef
|
||||||
|
# Executes the step
|
||||||
|
|
||||||
|
# Contains the base spec for all cancun tests.
|
||||||
|
CancunSpec* = ref object of BaseSpec
|
||||||
|
getPayloadDelay*: int # Delay between FcU and GetPayload calls
|
||||||
|
testSequence*: seq[TestStep]
|
||||||
|
|
||||||
|
method execute*(step: TestStep, ctx: CancunTestContext): bool {.base.} =
|
||||||
|
true
|
||||||
|
|
||||||
|
method description*(step: TestStep): string {.base.} =
|
||||||
|
discard
|
|
@ -0,0 +1,54 @@
|
||||||
|
import
|
||||||
|
./step
|
||||||
|
|
||||||
|
# A step that attempts to peer to the client using devp2p, and checks the forkid of the client
|
||||||
|
type DevP2PClientPeering struct {
|
||||||
|
# Client index to peer to
|
||||||
|
ClientIndex uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func (step DevP2PClientPeering) Execute(t *CancunTestContext) error {
|
||||||
|
# Get client index's enode
|
||||||
|
if step.ClientIndex >= uint64(len(t.TestEngines)) {
|
||||||
|
return error "invalid client index %d", step.ClientIndex)
|
||||||
|
}
|
||||||
|
engine = t.Engines[step.ClientIndex]
|
||||||
|
conn, err = devp2p.PeerEngineClient(engine, env.clMock)
|
||||||
|
if err != nil {
|
||||||
|
return error "error peering engine client: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
info "Connected to client %d, remote public key: %s", step.ClientIndex, conn.RemoteKey())
|
||||||
|
|
||||||
|
# Sleep
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
# Timeout value for all requests
|
||||||
|
timeout = 20 * time.Second
|
||||||
|
|
||||||
|
# Send a ping request to verify that we are not immediately disconnected
|
||||||
|
pingReq = &devp2p.Ping{}
|
||||||
|
if size, err = conn.Write(pingReq); err != nil {
|
||||||
|
return errors.Wrap(err, "could not write to conn")
|
||||||
|
else:
|
||||||
|
info "Wrote %d bytes to conn", size)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Finally wait for the pong response
|
||||||
|
msg, err = conn.WaitForResponse(timeout, 0)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error waiting for response")
|
||||||
|
}
|
||||||
|
switch msg = msg.(type) {
|
||||||
|
case *devp2p.Pong:
|
||||||
|
info "Received pong response: %v", msg)
|
||||||
|
default:
|
||||||
|
return error "unexpected message type: %T", msg)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (step DevP2PClientPeering) Description() string {
|
||||||
|
return fmt.Sprintf("DevP2PClientPeering: client %d", step.ClientIndex)
|
||||||
|
}
|
|
@ -0,0 +1,134 @@
|
||||||
|
import
|
||||||
|
./step
|
||||||
|
|
||||||
|
# A step that requests a Transaction hash via P2P and expects the correct full blob tx
|
||||||
|
type DevP2PRequestPooledTransactionHash struct {
|
||||||
|
# Client index to request the transaction hash from
|
||||||
|
ClientIndex uint64
|
||||||
|
# Transaction Index to request
|
||||||
|
TransactionIndexes []uint64
|
||||||
|
# Wait for a new pooled transaction message before actually requesting the transaction
|
||||||
|
WaitForNewPooledTransaction bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (step DevP2PRequestPooledTransactionHash) Execute(t *CancunTestContext) error {
|
||||||
|
# Get client index's enode
|
||||||
|
if step.ClientIndex >= uint64(len(t.TestEngines)) {
|
||||||
|
return error "invalid client index %d", step.ClientIndex)
|
||||||
|
}
|
||||||
|
engine = t.Engines[step.ClientIndex]
|
||||||
|
conn, err = devp2p.PeerEngineClient(engine, env.clMock)
|
||||||
|
if err != nil {
|
||||||
|
return error "error peering engine client: %v", err)
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
info "Connected to client %d, remote public key: %s", step.ClientIndex, conn.RemoteKey())
|
||||||
|
|
||||||
|
var (
|
||||||
|
txHashes = make([]Hash256, len(step.TransactionIndexes))
|
||||||
|
txs = make([]typ.Transaction, len(step.TransactionIndexes))
|
||||||
|
ok bool
|
||||||
|
)
|
||||||
|
for i, txIndex = range step.TransactionIndexes {
|
||||||
|
txHashes[i], ok = t.TestBlobTxPool.HashesByIndex[txIndex]
|
||||||
|
if !ok {
|
||||||
|
return error "transaction index %d not found", step.TransactionIndexes[0])
|
||||||
|
}
|
||||||
|
txs[i], ok = t.TestBlobTxPool.transactions[txHashes[i]]
|
||||||
|
if !ok {
|
||||||
|
return error "transaction %s not found", txHashes[i].String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Timeout value for all requests
|
||||||
|
timeout = 20 * time.Second
|
||||||
|
|
||||||
|
# Wait for a new pooled transaction message
|
||||||
|
if step.WaitForNewPooledTransaction {
|
||||||
|
msg, err = conn.WaitForResponse(timeout, 0)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error waiting for response")
|
||||||
|
}
|
||||||
|
switch msg = msg.(type) {
|
||||||
|
case *devp2p.NewPooledTransactionHashes:
|
||||||
|
if len(msg.Hashes) != len(txHashes) {
|
||||||
|
return error "expected %d hashes, got %d", len(txHashes), len(msg.Hashes))
|
||||||
|
}
|
||||||
|
if len(msg.Types) != len(txHashes) {
|
||||||
|
return error "expected %d types, got %d", len(txHashes), len(msg.Types))
|
||||||
|
}
|
||||||
|
if len(msg.Sizes) != len(txHashes) {
|
||||||
|
return error "expected %d sizes, got %d", len(txHashes), len(msg.Sizes))
|
||||||
|
}
|
||||||
|
for i = 0; i < len(txHashes); i++ {
|
||||||
|
hash, typ, size = msg.Hashes[i], msg.Types[i], msg.Sizes[i]
|
||||||
|
# Get the transaction
|
||||||
|
tx, ok = t.TestBlobTxPool.transactions[hash]
|
||||||
|
if !ok {
|
||||||
|
return error "transaction %s not found", hash.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
if typ != tx.Type() {
|
||||||
|
return error "expected type %d, got %d", tx.Type(), typ)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err = tx.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error marshaling transaction")
|
||||||
|
}
|
||||||
|
if size != uint32(len(b)) {
|
||||||
|
return error "expected size %d, got %d", len(b), size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return error "unexpected message type: %T", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send the request for the pooled transactions
|
||||||
|
getTxReq = &devp2p.GetPooledTransactions{
|
||||||
|
RequestId: 1234,
|
||||||
|
GetPooledTransactionsPacket: txHashes,
|
||||||
|
}
|
||||||
|
if size, err = conn.Write(getTxReq); err != nil {
|
||||||
|
return errors.Wrap(err, "could not write to conn")
|
||||||
|
else:
|
||||||
|
info "Wrote %d bytes to conn", size)
|
||||||
|
}
|
||||||
|
|
||||||
|
# Wait for the response
|
||||||
|
msg, err = conn.WaitForResponse(timeout, getTxReq.RequestId)
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error waiting for response")
|
||||||
|
}
|
||||||
|
switch msg = msg.(type) {
|
||||||
|
case *devp2p.PooledTransactions:
|
||||||
|
if len(msg.PooledTransactionsBytesPacket) != len(txHashes) {
|
||||||
|
return error "expected %d txs, got %d", len(txHashes), len(msg.PooledTransactionsBytesPacket))
|
||||||
|
}
|
||||||
|
for i, txBytes = range msg.PooledTransactionsBytesPacket {
|
||||||
|
tx = txs[i]
|
||||||
|
|
||||||
|
expBytes, err = tx.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return errors.Wrap(err, "error marshaling transaction")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(expBytes) != len(txBytes) {
|
||||||
|
return error "expected size %d, got %d", len(expBytes), len(txBytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(expBytes, txBytes) {
|
||||||
|
return error "expected tx %#x, got %#x", expBytes, txBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return error "unexpected message type: %T", msg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (step DevP2PRequestPooledTransactionHash) Description() string {
|
||||||
|
return fmt.Sprintf("DevP2PRequestPooledTransactionHash: client %d, transaction indexes %v", step.ClientIndex, step.TransactionIndexes)
|
||||||
|
}
|
|
@ -0,0 +1,47 @@
|
||||||
|
import
|
||||||
|
./step
|
||||||
|
|
||||||
|
# A step that launches a new client
|
||||||
|
type LaunchClients struct {
|
||||||
|
client.EngineStarter
|
||||||
|
ClientCount uint64
|
||||||
|
SkipConnectingToBootnode bool
|
||||||
|
SkipAddingToCLMock bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (step LaunchClients) GetClientCount() uint64 {
|
||||||
|
clientCount = step.ClientCount
|
||||||
|
if clientCount == 0 {
|
||||||
|
clientCount = 1
|
||||||
|
}
|
||||||
|
return clientCount
|
||||||
|
}
|
||||||
|
|
||||||
|
func (step LaunchClients) Execute(t *CancunTestContext) error {
|
||||||
|
# Launch a new client
|
||||||
|
var (
|
||||||
|
client client.EngineClient
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
clientCount = step.GetClientCount()
|
||||||
|
for i = uint64(0); i < clientCount; i++ {
|
||||||
|
if !step.SkipConnectingToBootnode {
|
||||||
|
client, err = step.StartClient(t.T, t.TestContext, t.Genesis, t.ClientParams, t.ClientFiles, t.Engines[0])
|
||||||
|
else:
|
||||||
|
client, err = step.StartClient(t.T, t.TestContext, t.Genesis, t.ClientParams, t.ClientFiles)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t.Engines = append(t.Engines, client)
|
||||||
|
t.TestEngines = append(t.TestEngines, test.NewTestEngineClient(t.Env, client))
|
||||||
|
if !step.SkipAddingToCLMock {
|
||||||
|
env.clMock.AddEngineClient(client)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (step LaunchClients) Description() string {
|
||||||
|
return fmt.Sprintf("Launch %d new engine client(s)", step.GetClientCount())
|
||||||
|
}
|
|
@ -0,0 +1,409 @@
|
||||||
|
import
|
||||||
|
std/strutils,
|
||||||
|
chronicles,
|
||||||
|
./step_desc,
|
||||||
|
./helpers,
|
||||||
|
./customizer,
|
||||||
|
./blobs,
|
||||||
|
../engine_client,
|
||||||
|
../test_env,
|
||||||
|
../types,
|
||||||
|
../../../../nimbus/core/eip4844,
|
||||||
|
../../../../nimbus/common/common
|
||||||
|
|
||||||
|
type
|
||||||
|
NewPayloads* = ref object of TestStep
|
||||||
|
# Payload Count
|
||||||
|
payloadCount*: int
|
||||||
|
# Number of blob transactions that are expected to be included in the payload
|
||||||
|
expectedIncludedBlobCount*: int
|
||||||
|
# Blob IDs expected to be found in the payload
|
||||||
|
expectedBlobs*: seq[BlobID]
|
||||||
|
# Delay between FcU and GetPayload calls
|
||||||
|
getPayloadDelay*: int
|
||||||
|
# GetPayload modifier when requesting the new Payload
|
||||||
|
getPayloadCustomizer*: GetPayloadCustomizer
|
||||||
|
# ForkchoiceUpdate modifier when requesting the new Payload
|
||||||
|
fcUOnPayloadRequest*: ForkchoiceUpdatedCustomizer
|
||||||
|
# Extra modifications on NewPayload to potentially generate an invalid payload
|
||||||
|
newPayloadCustomizer*: NewPayloadCustomizer
|
||||||
|
# ForkchoiceUpdate modifier when setting the new payload as head
|
||||||
|
fcUOnHeadSet*: ForkchoiceUpdatedCustomizer
|
||||||
|
# Expected responses on the NewPayload call
|
||||||
|
expectationDescription*: string
|
||||||
|
|
||||||
|
func getPayloadCount(step: NewPayloads): int =
|
||||||
|
var payloadCount = step.payloadCount
|
||||||
|
if payloadCount == 0:
|
||||||
|
payloadCount = 1
|
||||||
|
return payloadCount
|
||||||
|
|
||||||
|
proc verifyPayload(step: NewPayloads,
|
||||||
|
com: CommonRef,
|
||||||
|
client: RpcClient,
|
||||||
|
blobTxsInPayload: openArray[Transaction],
|
||||||
|
shouldOverrideBuilder: Option[bool],
|
||||||
|
payload: ExecutionPayload,
|
||||||
|
previousPayload = none(ExecutionPayload)): bool =
|
||||||
|
|
||||||
|
var
|
||||||
|
parentExcessBlobGas = 0'u64
|
||||||
|
parentBlobGasUsed = 0'u64
|
||||||
|
|
||||||
|
if previousPayload.isSome:
|
||||||
|
let prevPayload = previousPayload.get
|
||||||
|
if prevPayload.excessBlobGas.isSome:
|
||||||
|
parentExcessBlobGas = prevPayload.excessBlobGas.get.uint64
|
||||||
|
|
||||||
|
if prevPayload.blobGasUsed.isSome:
|
||||||
|
parentBlobGasUsed = prevPayload.blobGasUsed.get.uint64
|
||||||
|
|
||||||
|
let
|
||||||
|
parent = common.BlockHeader(
|
||||||
|
excessBlobGas: some(parentExcessBlobGas),
|
||||||
|
blobGasUsed: some(parentBlobGasUsed)
|
||||||
|
)
|
||||||
|
expectedExcessBlobGas = calcExcessBlobGas(parent)
|
||||||
|
|
||||||
|
if com.isCancunOrLater(payload.timestamp.EthTime):
|
||||||
|
if payload.excessBlobGas.isNone:
|
||||||
|
error "payload contains nil excessDataGas"
|
||||||
|
return false
|
||||||
|
|
||||||
|
if payload.blobGasUsed.isNone:
|
||||||
|
error "payload contains nil dataGasUsed"
|
||||||
|
return false
|
||||||
|
|
||||||
|
if payload.excessBlobGas.get.uint64 != expectedExcessBlobGas:
|
||||||
|
error "payload contains incorrect excessDataGas",
|
||||||
|
want=expectedExcessBlobGas,
|
||||||
|
have=payload.excessBlobGas.get.uint64
|
||||||
|
return false
|
||||||
|
|
||||||
|
if shouldOverrideBuilder.isNone:
|
||||||
|
error "shouldOverrideBuilder was not included in the getPayload response"
|
||||||
|
return false
|
||||||
|
|
||||||
|
var
|
||||||
|
totalBlobCount = 0
|
||||||
|
expectedBlobGasPrice = getBlobGasPrice(expectedExcessBlobGas)
|
||||||
|
|
||||||
|
for tx in blobTxsInPayload:
|
||||||
|
let blobCount = tx.versionedHashes.len
|
||||||
|
totalBlobCount += blobCount
|
||||||
|
|
||||||
|
# Retrieve receipt from client
|
||||||
|
let r = client.txReceipt(tx.rlpHash)
|
||||||
|
let expectedBlobGasUsed = blobCount.uint64 * GAS_PER_BLOB
|
||||||
|
|
||||||
|
#r.ExpectBlobGasUsed(expectedBlobGasUsed)
|
||||||
|
#r.ExpectBlobGasPrice(expectedBlobGasPrice)
|
||||||
|
|
||||||
|
if totalBlobCount != step.expectedIncludedBlobCount:
|
||||||
|
error "expected blobs in transactions",
|
||||||
|
expect=step.expectedIncludedBlobCount,
|
||||||
|
got=totalBlobCount
|
||||||
|
return false
|
||||||
|
|
||||||
|
if not verifyBeaconRootStorage(client, payload):
|
||||||
|
return false
|
||||||
|
|
||||||
|
else:
|
||||||
|
if payload.excessBlobGas.isSome:
|
||||||
|
error "payload contains non-nil excessDataGas pre-fork"
|
||||||
|
return false
|
||||||
|
|
||||||
|
if payload.blobGasUsed.isSome:
|
||||||
|
error "payload contains non-nil dataGasUsed pre-fork"
|
||||||
|
return false
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc verifyBlobBundle(step: NewPayloads,
|
||||||
|
blobDataInPayload: openArray[BlobWrapData],
|
||||||
|
payload: ExecutionPayload,
|
||||||
|
blobBundle: BlobsBundleV1): bool =
|
||||||
|
|
||||||
|
if blobBundle.blobs.len != blobBundle.commitments.len or
|
||||||
|
blobBundle.blobs.len != blobBundle.proofs.len:
|
||||||
|
error "unexpected length in blob bundle",
|
||||||
|
blobs=len(blobBundle.blobs),
|
||||||
|
proofs=len(blobBundle.proofs),
|
||||||
|
kzgs=len(blobBundle.commitments)
|
||||||
|
return false
|
||||||
|
|
||||||
|
if len(blobBundle.blobs) != step.expectedIncludedBlobCount:
|
||||||
|
error "expected",
|
||||||
|
expect=step.expectedIncludedBlobCount,
|
||||||
|
get=len(blobBundle.blobs)
|
||||||
|
return false
|
||||||
|
|
||||||
|
# Verify that the calculated amount of blobs in the payload matches the
|
||||||
|
# amount of blobs in the bundle
|
||||||
|
if len(blobDataInPayload) != len(blobBundle.blobs):
|
||||||
|
error "expected blobs in the bundle",
|
||||||
|
expect=len(blobDataInPayload),
|
||||||
|
get=len(blobBundle.blobs)
|
||||||
|
return false
|
||||||
|
|
||||||
|
for i, blobData in blobDataInPayload:
|
||||||
|
let bundleCommitment = blobBundle.commitments[i].bytes
|
||||||
|
let bundleBlob = blobBundle.blobs[i].bytes
|
||||||
|
let bundleProof = blobBundle.proofs[i].bytes
|
||||||
|
|
||||||
|
if bundleCommitment != blobData.commitment:
|
||||||
|
error "KZG mismatch at index of the bundle", index=i
|
||||||
|
return false
|
||||||
|
|
||||||
|
if bundleBlob != blobData.blob:
|
||||||
|
error "blob mismatch at index of the bundle", index=i
|
||||||
|
return false
|
||||||
|
|
||||||
|
if bundleProof != blobData.proof:
|
||||||
|
error "proof mismatch at index of the bundle", index=i
|
||||||
|
return false
|
||||||
|
|
||||||
|
if len(step.expectedBlobs) != 0:
|
||||||
|
# Verify that the blobs in the payload match the expected blobs
|
||||||
|
for expectedBlob in step.expectedBlobs:
|
||||||
|
var found = false
|
||||||
|
for blobData in blobDataInPayload:
|
||||||
|
if not expectedBlob.verifyBlob(blobData.blob):
|
||||||
|
return false
|
||||||
|
else:
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
error "could not find expected blob", expectedBlob
|
||||||
|
return false
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
type
|
||||||
|
Shadow = ref object
|
||||||
|
p: int
|
||||||
|
payloadCount: int
|
||||||
|
prevPayload: ExecutionPayload
|
||||||
|
|
||||||
|
method execute*(step: NewPayloads, ctx: CancunTestContext): bool =
|
||||||
|
# Create a new payload
|
||||||
|
# Produce the payload
|
||||||
|
let env = ctx.env
|
||||||
|
|
||||||
|
var originalGetPayloadDelay = env.clMock.payloadProductionClientDelay
|
||||||
|
if step.getPayloadDelay != 0:
|
||||||
|
env.clMock.payloadProductionClientDelay = step.getPayloadDelay
|
||||||
|
|
||||||
|
var shadow = Shadow(
|
||||||
|
payloadCount: step.getPayloadCount(),
|
||||||
|
prevPayload: env.clMock.latestPayloadBuilt
|
||||||
|
)
|
||||||
|
|
||||||
|
for p in 0..<shadow.payloadCount:
|
||||||
|
shadow.p = p
|
||||||
|
let pbRes = env.clMock.produceSingleBlock(BlockProcessCallbacks(
|
||||||
|
onPayloadAttributesGenerated: proc(): bool =
|
||||||
|
#[if step.fcUOnPayloadRequest != nil:
|
||||||
|
var
|
||||||
|
payloadAttributes = env.clMock.latestPayloadAttributes
|
||||||
|
forkchoiceState = env.clMock.latestForkchoice
|
||||||
|
expectedError *int
|
||||||
|
expectedStatus = test.Valid
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
step.fcUOnPayloadRequest.setEngineAPIVersionResolver(t.ForkConfig)
|
||||||
|
testEngine = t.TestEngine.WithEngineAPIVersionResolver(step.FcUOnPayloadRequest)
|
||||||
|
|
||||||
|
payloadAttributes, err = step.FcUOnPayloadRequest.getPayloadAttributes(payloadAttributes)
|
||||||
|
if err != nil {
|
||||||
|
fatal "Error getting custom payload attributes (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
||||||
|
|
||||||
|
expectedError, err = step.FcUOnPayloadRequest.getExpectedError()
|
||||||
|
if err != nil {
|
||||||
|
fatal "Error getting custom expected error (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
||||||
|
|
||||||
|
if step.FcUOnPayloadRequest.getExpectInvalidStatus() {
|
||||||
|
expectedStatus = test.Invalid
|
||||||
|
|
||||||
|
|
||||||
|
r = env.client.ForkchoiceUpdated(&forkchoiceState, payloadAttributes, env.clMock.LatestHeader.Time)
|
||||||
|
r.ExpectationDescription = step.ExpectationDescription
|
||||||
|
if expectedError != nil {
|
||||||
|
r.ExpectErrorCode(*expectedError)
|
||||||
|
else:
|
||||||
|
r.ExpectNoError()
|
||||||
|
r.ExpectPayloadStatus(expectedStatus)
|
||||||
|
|
||||||
|
if r.Response.PayloadID != nil {
|
||||||
|
env.clMock.AddPayloadID(t.Engine, r.Response.PayloadID)
|
||||||
|
]#
|
||||||
|
return true
|
||||||
|
,
|
||||||
|
onRequestNextPayload: proc(): bool =
|
||||||
|
# Get the next payload
|
||||||
|
#[if step.GetPayloadCustomizer != nil {
|
||||||
|
var (
|
||||||
|
payloadAttributes = env.clMock.latestPayloadAttributes
|
||||||
|
payloadID = env.clMock.NextPayloadID
|
||||||
|
expectedError *int
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
step.GetPayloadCustomizer.setEngineAPIVersionResolver(t.ForkConfig)
|
||||||
|
testEngine = t.TestEngine.WithEngineAPIVersionResolver(step.GetPayloadCustomizer)
|
||||||
|
|
||||||
|
# We are going to sleep twice because there is no way to skip the CL Mock's sleep
|
||||||
|
time.Sleep(time.Duration(step.GetPayloadDelay) * time.Second)
|
||||||
|
|
||||||
|
payloadID, err = step.GetPayloadCustomizer.getPayloadID(payloadID)
|
||||||
|
if err != nil {
|
||||||
|
fatal "Error getting custom payload ID (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expectedError, err = step.GetPayloadCustomizer.getExpectedError()
|
||||||
|
if err != nil {
|
||||||
|
fatal "Error getting custom expected error (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r = env.client.GetPayload(payloadID, payloadAttributes)
|
||||||
|
r.ExpectationDescription = step.ExpectationDescription
|
||||||
|
if expectedError != nil {
|
||||||
|
r.ExpectErrorCode(*expectedError)
|
||||||
|
else:
|
||||||
|
r.ExpectNoError()
|
||||||
|
]#
|
||||||
|
return true
|
||||||
|
,
|
||||||
|
onGetPayload: proc(): bool =
|
||||||
|
# Get the latest blob bundle
|
||||||
|
var
|
||||||
|
blobBundle = env.clMock.latestBlobsBundle
|
||||||
|
payload = env.clMock.latestPayloadBuilt
|
||||||
|
|
||||||
|
if not env.engine.com.isCancunOrLater(payload.timestamp.EthTime):
|
||||||
|
# Nothing to do
|
||||||
|
return true
|
||||||
|
|
||||||
|
if blobBundle.isNone:
|
||||||
|
fatal "Error getting blobs bundle", payload=shadow.p+1, count=shadow.payloadCount
|
||||||
|
return false
|
||||||
|
|
||||||
|
let res = getBlobDataInPayload(ctx.txPool, payload)
|
||||||
|
if res.isErr:
|
||||||
|
fatal "Error retrieving blob bundle", payload=shadow.p+1, count=shadow.payloadCount, msg=res.error
|
||||||
|
return false
|
||||||
|
|
||||||
|
let blobData = res.get
|
||||||
|
|
||||||
|
if not step.verifyBlobBundle(blobData.data, payload, blobBundle.get):
|
||||||
|
fatal "Error verifying blob bundle", payload=shadow.p+1, count=shadow.payloadCount
|
||||||
|
return false
|
||||||
|
|
||||||
|
return true
|
||||||
|
,
|
||||||
|
onNewPayloadBroadcast: proc(): bool =
|
||||||
|
#[if step.NewPayloadCustomizer != nil {
|
||||||
|
# Send a test NewPayload directive with either a modified payload or modifed versioned hashes
|
||||||
|
var (
|
||||||
|
payload = env.clMock.latestPayloadBuilt
|
||||||
|
r *test.NewPayloadResponseExpectObject
|
||||||
|
expectedError *int
|
||||||
|
expectedStatus test.PayloadStatus = test.Valid
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
# Send a custom new payload
|
||||||
|
step.NewPayloadCustomizer.setEngineAPIVersionResolver(t.ForkConfig)
|
||||||
|
testEngine = t.TestEngine.WithEngineAPIVersionResolver(step.NewPayloadCustomizer)
|
||||||
|
|
||||||
|
payload, err = step.NewPayloadCustomizer.customizePayload(payload)
|
||||||
|
if err != nil {
|
||||||
|
fatal "Error customizing payload (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
||||||
|
}
|
||||||
|
expectedError, err = step.NewPayloadCustomizer.getExpectedError()
|
||||||
|
if err != nil {
|
||||||
|
fatal "Error getting custom expected error (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
||||||
|
}
|
||||||
|
if step.NewPayloadCustomizer.getExpectInvalidStatus() {
|
||||||
|
expectedStatus = test.Invalid
|
||||||
|
}
|
||||||
|
|
||||||
|
r = env.client.NewPayload(payload)
|
||||||
|
r.ExpectationDescription = step.ExpectationDescription
|
||||||
|
if expectedError != nil {
|
||||||
|
r.ExpectErrorCode(*expectedError)
|
||||||
|
else:
|
||||||
|
r.ExpectNoError()
|
||||||
|
r.ExpectStatus(expectedStatus)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if step.FcUOnHeadSet != nil {
|
||||||
|
var (
|
||||||
|
forkchoiceState api.ForkchoiceStateV1 = env.clMock.latestForkchoice
|
||||||
|
expectedError *int
|
||||||
|
expectedStatus test.PayloadStatus = test.Valid
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
step.FcUOnHeadSet.setEngineAPIVersionResolver(t.ForkConfig)
|
||||||
|
testEngine = t.TestEngine.WithEngineAPIVersionResolver(step.FcUOnHeadSet)
|
||||||
|
expectedError, err = step.FcUOnHeadSet.getExpectedError()
|
||||||
|
if err != nil {
|
||||||
|
fatal "Error getting custom expected error (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
||||||
|
}
|
||||||
|
if step.FcUOnHeadSet.getExpectInvalidStatus() {
|
||||||
|
expectedStatus = test.Invalid
|
||||||
|
}
|
||||||
|
|
||||||
|
forkchoiceState.HeadBlockHash = env.clMock.latestPayloadBuilt.blockHash
|
||||||
|
|
||||||
|
r = env.client.ForkchoiceUpdated(&forkchoiceState, nil, env.clMock.latestPayloadBuilt.Timestamp)
|
||||||
|
r.ExpectationDescription = step.ExpectationDescription
|
||||||
|
if expectedError != nil {
|
||||||
|
r.ExpectErrorCode(*expectedError)
|
||||||
|
else:
|
||||||
|
r.ExpectNoError()
|
||||||
|
r.ExpectPayloadStatus(expectedStatus)
|
||||||
|
]#
|
||||||
|
return true
|
||||||
|
,
|
||||||
|
onForkchoiceBroadcast: proc(): bool =
|
||||||
|
# Verify the transaction receipts on incorporated transactions
|
||||||
|
let payload = env.clMock.latestPayloadBuilt
|
||||||
|
|
||||||
|
let res = getBlobDataInPayload(ctx.txPool, payload)
|
||||||
|
if res.isErr:
|
||||||
|
fatal "Error retrieving blob bundle", payload=shadow.p+1, count=shadow.payloadCount, msg=res.error
|
||||||
|
return false
|
||||||
|
|
||||||
|
let blobData = res.get
|
||||||
|
if not step.verifyPayload(env.engine.com, env.engine.client,
|
||||||
|
blobData.txs, env.clMock.latestShouldOverrideBuilder,
|
||||||
|
payload, some(shadow.prevPayload)):
|
||||||
|
fatal "Error verifying payload", payload=shadow.p+1, count=shadow.payloadCount
|
||||||
|
return false
|
||||||
|
|
||||||
|
shadow.prevPayload = env.clMock.latestPayloadBuilt
|
||||||
|
return true
|
||||||
|
))
|
||||||
|
|
||||||
|
testCond pbRes
|
||||||
|
info "Correctly produced payload", payload=shadow.p+1, count=shadow.payloadCount
|
||||||
|
|
||||||
|
if step.getPayloadDelay != 0:
|
||||||
|
# Restore the original delay
|
||||||
|
env.clMock.payloadProductionClientDelay = originalGetPayloadDelay
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
|
||||||
|
method description*(step: NewPayloads): string =
|
||||||
|
#[
|
||||||
|
TODO: Figure out if we need this.
|
||||||
|
if step.VersionedHashes != nil {
|
||||||
|
return fmt.Sprintf("NewPayloads: %d payloads, %d blobs expected, %s", step.getPayloadCount(), step.ExpectedIncludedBlobCount, step.VersionedHashes.Description())
|
||||||
|
]#
|
||||||
|
"NewPayloads: $1 payloads, $2 blobs expected" % [
|
||||||
|
$step.getPayloadCount(), $step.expectedIncludedBlobCount
|
||||||
|
]
|
|
@ -0,0 +1,37 @@
|
||||||
|
import
|
||||||
|
./step
|
||||||
|
|
||||||
|
# A step that runs two or more steps in parallel
|
||||||
|
type ParallelSteps struct {
|
||||||
|
Steps []TestStep
|
||||||
|
}
|
||||||
|
|
||||||
|
func (step ParallelSteps) Execute(t *CancunTestContext) error {
|
||||||
|
# Run the steps in parallel
|
||||||
|
wg = sync.WaitGroup{}
|
||||||
|
errs = make(chan error, len(step.Steps))
|
||||||
|
for _, s = range step.Steps {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(s TestStep) {
|
||||||
|
defer wg.Done()
|
||||||
|
if err = s.Execute(t); err != nil {
|
||||||
|
errs <- err
|
||||||
|
}
|
||||||
|
}(s)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
close(errs)
|
||||||
|
for err = range errs {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (step ParallelSteps) Description() string {
|
||||||
|
desc = "ParallelSteps: running steps in parallel:\n"
|
||||||
|
for i, step = range step.Steps {
|
||||||
|
desc += fmt.Sprintf("%d: %s\n", i, step.Description())
|
||||||
|
}
|
||||||
|
|
||||||
|
return desc
|
||||||
|
}
|
|
@ -0,0 +1,89 @@
|
||||||
|
import
|
||||||
|
std/strutils,
|
||||||
|
eth/common,
|
||||||
|
stint,
|
||||||
|
chronicles,
|
||||||
|
./step_desc,
|
||||||
|
./helpers,
|
||||||
|
./blobs,
|
||||||
|
../test_env,
|
||||||
|
../tx_sender,
|
||||||
|
../../../../nimbus/utils/utils
|
||||||
|
|
||||||
|
type
|
||||||
|
# A step that sends multiple new blobs to the client
|
||||||
|
SendBlobTransactions* = ref object of TestStep
|
||||||
|
# Number of blob transactions to send before this block's GetPayload request
|
||||||
|
transactionCount*: int
|
||||||
|
# Blobs per transaction
|
||||||
|
blobsPerTransaction*: int
|
||||||
|
# Max Data Gas Cost for every blob transaction
|
||||||
|
blobTransactionMaxBlobGasCost*: UInt256
|
||||||
|
# Gas Fee Cap for every blob transaction
|
||||||
|
blobTransactionGasFeeCap*: GasInt
|
||||||
|
# Gas Tip Cap for every blob transaction
|
||||||
|
blobTransactionGasTipCap*: GasInt
|
||||||
|
# Replace transactions
|
||||||
|
replaceTransactions*: bool
|
||||||
|
# Skip verification of retrieving the tx from node
|
||||||
|
skipVerificationFromNode*: bool
|
||||||
|
# Account index to send the blob transactions from
|
||||||
|
accountIndex*: int
|
||||||
|
# Client index to send the blob transactions to
|
||||||
|
clientIndex*: int
|
||||||
|
|
||||||
|
func getBlobsPerTransaction(step: SendBlobTransactions): int =
|
||||||
|
var blobCountPerTx = step.blobsPerTransaction
|
||||||
|
if blobCountPerTx == 0:
|
||||||
|
blobCountPerTx = 1
|
||||||
|
return blobCountPerTx
|
||||||
|
|
||||||
|
method execute*(step: SendBlobTransactions, ctx: CancunTestContext): bool =
|
||||||
|
# Send a blob transaction
|
||||||
|
let blobCountPerTx = step.getBlobsPerTransaction()
|
||||||
|
|
||||||
|
if step.clientIndex >= ctx.env.numEngines:
|
||||||
|
error "invalid client index", index=step.clientIndex
|
||||||
|
return false
|
||||||
|
|
||||||
|
let engine = ctx.env.engines(step.clientIndex)
|
||||||
|
# Send the blob transactions
|
||||||
|
for _ in 0..<step.transactionCount:
|
||||||
|
let tc = BlobTx(
|
||||||
|
recipient: some(DATAHASH_START_ADDRESS),
|
||||||
|
gasLimit: 100000.GasInt,
|
||||||
|
gasTip: step.blobTransactionGasTipCap,
|
||||||
|
gasFee: step.blobTransactionGasFeeCap,
|
||||||
|
blobGasFee: step.blobTransactionMaxBlobGasCost,
|
||||||
|
blobCount: blobCountPerTx,
|
||||||
|
blobID: ctx.txPool.currentBlobID,
|
||||||
|
)
|
||||||
|
|
||||||
|
let sender = ctx.env.accounts(step.accountIndex)
|
||||||
|
let res = if step.replaceTransactions:
|
||||||
|
ctx.env.replaceTx(sender, engine, tc)
|
||||||
|
else:
|
||||||
|
ctx.env.sendTx(sender, engine, tc)
|
||||||
|
|
||||||
|
if res.isErr:
|
||||||
|
return false
|
||||||
|
|
||||||
|
let blobTx = res.get
|
||||||
|
if not step.skipVerificationFromNode:
|
||||||
|
let r = verifyTransactionFromNode(engine.client, blobTx)
|
||||||
|
if r.isErr:
|
||||||
|
error "verify tx from node", msg=r.error
|
||||||
|
return false
|
||||||
|
|
||||||
|
let txHash = rlpHash(blobTx)
|
||||||
|
ctx.txPool.addBlobTransaction(blobTx)
|
||||||
|
ctx.txPool.hashesByIndex[ctx.txPool.currentTxIndex] = txHash
|
||||||
|
ctx.txPool.currentTxIndex += 1
|
||||||
|
info "Sent blob transaction", txHash=txHash.short
|
||||||
|
ctx.txPool.currentBlobID += BlobID(blobCountPerTx)
|
||||||
|
|
||||||
|
return true
|
||||||
|
|
||||||
|
method description*(step: SendBlobTransactions): string =
|
||||||
|
"SendBlobTransactions: $1 transactions, $2 blobs each, $3 max data gas fee" % [
|
||||||
|
$step.transactionCount, $step.getBlobsPerTransaction(), $step.blobTransactionMaxBlobGasCost]
|
|
@ -0,0 +1,63 @@
|
||||||
|
# Send a modified version of the latest payload produced using NewPayloadV3
|
||||||
|
type SendModifiedLatestPayload struct {
|
||||||
|
ClientID uint64
|
||||||
|
NewPayloadCustomizer helper.NewPayloadCustomizer
|
||||||
|
}
|
||||||
|
|
||||||
|
method execute*(step: SendModifiedLatestPayload, ctx: CancunTestContext): bool =
|
||||||
|
# Get the latest payload
|
||||||
|
var (
|
||||||
|
payload = &env.clMock.latestPayloadBuilt
|
||||||
|
expectedError *int = nil
|
||||||
|
expectedStatus test.PayloadStatus = test.Valid
|
||||||
|
err error = nil
|
||||||
|
)
|
||||||
|
if payload == nil {
|
||||||
|
return error "TEST-FAIL: no payload available")
|
||||||
|
}
|
||||||
|
if env.clMock.LatestBlobBundle == nil {
|
||||||
|
return error "TEST-FAIL: no blob bundle available")
|
||||||
|
}
|
||||||
|
if step.NewPayloadCustomizer == nil {
|
||||||
|
return error "TEST-FAIL: no payload customizer available")
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send a custom new payload
|
||||||
|
step.NewPayloadCustomizer.setEngineAPIVersionResolver(t.ForkConfig)
|
||||||
|
payload, err = step.NewPayloadCustomizer.customizePayload(payload)
|
||||||
|
if err != nil {
|
||||||
|
fatal "Error customizing payload: %v", err)
|
||||||
|
}
|
||||||
|
expectedError, err = step.NewPayloadCustomizer.getExpectedError()
|
||||||
|
if err != nil {
|
||||||
|
fatal "Error getting custom expected error: %v", err)
|
||||||
|
}
|
||||||
|
if step.NewPayloadCustomizer.getExpectInvalidStatus() {
|
||||||
|
expectedStatus = test.Invalid
|
||||||
|
}
|
||||||
|
|
||||||
|
# Send the payload
|
||||||
|
if step.ClientID >= uint64(len(t.TestEngines)) {
|
||||||
|
return error "invalid client index %d", step.ClientID)
|
||||||
|
}
|
||||||
|
testEngine = t.TestEngines[step.ClientID].WithEngineAPIVersionResolver(step.NewPayloadCustomizer)
|
||||||
|
r = env.client.NewPayload(payload)
|
||||||
|
if expectedError != nil {
|
||||||
|
r.ExpectErrorCode(*expectedError)
|
||||||
|
else:
|
||||||
|
r.ExpectStatus(expectedStatus)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
method description*(step: SendModifiedLatestPayload): string =
|
||||||
|
desc = fmt.Sprintf("SendModifiedLatestPayload: client %d, expected invalid=%T, ", step.ClientID, step.NewPayloadCustomizer.getExpectInvalidStatus())
|
||||||
|
/*
|
||||||
|
TODO: Figure out if we need this.
|
||||||
|
if step.VersionedHashes != nil {
|
||||||
|
desc += step.VersionedHashes.Description()
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
return desc
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -2,7 +2,7 @@ import
|
||||||
std/[tables],
|
std/[tables],
|
||||||
chronicles,
|
chronicles,
|
||||||
nimcrypto/sysrand,
|
nimcrypto/sysrand,
|
||||||
stew/[byteutils, endians2],
|
stew/[byteutils],
|
||||||
eth/common, chronos,
|
eth/common, chronos,
|
||||||
json_rpc/rpcclient,
|
json_rpc/rpcclient,
|
||||||
../../../nimbus/beacon/execution_types,
|
../../../nimbus/beacon/execution_types,
|
||||||
|
@ -12,7 +12,8 @@ import
|
||||||
../../../nimbus/common as nimbus_common,
|
../../../nimbus/common as nimbus_common,
|
||||||
./client_pool,
|
./client_pool,
|
||||||
./engine_env,
|
./engine_env,
|
||||||
./engine_client
|
./engine_client,
|
||||||
|
./types
|
||||||
|
|
||||||
import web3/engine_api_types except Hash256 # conflict with the one from eth/common
|
import web3/engine_api_types except Hash256 # conflict with the one from eth/common
|
||||||
|
|
||||||
|
@ -24,9 +25,10 @@ type
|
||||||
# Number of required slots before a block which was set as Head moves to `safe` and `finalized` respectively
|
# Number of required slots before a block which was set as Head moves to `safe` and `finalized` respectively
|
||||||
slotsToSafe* : int
|
slotsToSafe* : int
|
||||||
slotsToFinalized*: int
|
slotsToFinalized*: int
|
||||||
|
safeSlotsToImportOptimistically*: int
|
||||||
|
|
||||||
# Wait time before attempting to get the payload
|
# Wait time before attempting to get the payload
|
||||||
payloadProductionClientDelay: int
|
payloadProductionClientDelay*: int
|
||||||
|
|
||||||
# Block production related
|
# Block production related
|
||||||
blockTimestampIncrement*: Option[int]
|
blockTimestampIncrement*: Option[int]
|
||||||
|
@ -52,6 +54,7 @@ type
|
||||||
latestPayloadBuilt* : ExecutionPayload
|
latestPayloadBuilt* : ExecutionPayload
|
||||||
latestBlockValue* : Option[UInt256]
|
latestBlockValue* : Option[UInt256]
|
||||||
latestBlobsBundle* : Option[BlobsBundleV1]
|
latestBlobsBundle* : Option[BlobsBundleV1]
|
||||||
|
latestShouldOverrideBuilder*: Option[bool]
|
||||||
latestPayloadAttributes*: PayloadAttributes
|
latestPayloadAttributes*: PayloadAttributes
|
||||||
latestExecutedPayload* : ExecutionPayload
|
latestExecutedPayload* : ExecutionPayload
|
||||||
latestForkchoice* : ForkchoiceStateV1
|
latestForkchoice* : ForkchoiceStateV1
|
||||||
|
@ -60,7 +63,6 @@ type
|
||||||
firstPoSBlockNumber : Option[uint64]
|
firstPoSBlockNumber : Option[uint64]
|
||||||
ttdReached* : bool
|
ttdReached* : bool
|
||||||
transitionPayloadTimestamp: Option[int]
|
transitionPayloadTimestamp: Option[int]
|
||||||
safeSlotsToImportOptimistically: int
|
|
||||||
chainTotalDifficulty : UInt256
|
chainTotalDifficulty : UInt256
|
||||||
|
|
||||||
# Shanghai related
|
# Shanghai related
|
||||||
|
@ -68,6 +70,7 @@ type
|
||||||
|
|
||||||
BlockProcessCallbacks* = object
|
BlockProcessCallbacks* = object
|
||||||
onPayloadProducerSelected* : proc(): bool {.gcsafe.}
|
onPayloadProducerSelected* : proc(): bool {.gcsafe.}
|
||||||
|
onPayloadAttributesGenerated* : proc(): bool {.gcsafe.}
|
||||||
onRequestNextPayload* : proc(): bool {.gcsafe.}
|
onRequestNextPayload* : proc(): bool {.gcsafe.}
|
||||||
onGetPayload* : proc(): bool {.gcsafe.}
|
onGetPayload* : proc(): bool {.gcsafe.}
|
||||||
onNewPayloadBroadcast* : proc(): bool {.gcsafe.}
|
onNewPayloadBroadcast* : proc(): bool {.gcsafe.}
|
||||||
|
@ -100,7 +103,7 @@ proc init(cl: CLMocker, eng: EngineEnv, com: CommonRef) =
|
||||||
cl.com = com
|
cl.com = com
|
||||||
cl.slotsToSafe = 1
|
cl.slotsToSafe = 1
|
||||||
cl.slotsToFinalized = 2
|
cl.slotsToFinalized = 2
|
||||||
cl.payloadProductionClientDelay = 1
|
cl.payloadProductionClientDelay = 0
|
||||||
cl.headerHistory[0] = com.genesisHeader()
|
cl.headerHistory[0] = com.genesisHeader()
|
||||||
|
|
||||||
proc newClMocker*(eng: EngineEnv, com: CommonRef): CLMocker =
|
proc newClMocker*(eng: EngineEnv, com: CommonRef): CLMocker =
|
||||||
|
@ -179,11 +182,6 @@ func getNextBlockTimestamp(cl: CLMocker): EthTime =
|
||||||
func setNextWithdrawals(cl: CLMocker, nextWithdrawals: Option[seq[WithdrawalV1]]) =
|
func setNextWithdrawals(cl: CLMocker, nextWithdrawals: Option[seq[WithdrawalV1]]) =
|
||||||
cl.nextWithdrawals = nextWithdrawals
|
cl.nextWithdrawals = nextWithdrawals
|
||||||
|
|
||||||
func timestampToBeaconRoot(timestamp: Quantity): FixedBytes[32] =
|
|
||||||
# Generates a deterministic hash from the timestamp
|
|
||||||
let h = keccakHash(timestamp.uint64.toBytesBE)
|
|
||||||
FixedBytes[32](h.data)
|
|
||||||
|
|
||||||
func isShanghai(cl: CLMocker, timestamp: Quantity): bool =
|
func isShanghai(cl: CLMocker, timestamp: Quantity): bool =
|
||||||
let ts = EthTime(timestamp.uint64)
|
let ts = EthTime(timestamp.uint64)
|
||||||
cl.com.isShanghaiOrLater(ts)
|
cl.com.isShanghaiOrLater(ts)
|
||||||
|
@ -222,7 +220,7 @@ proc pickNextPayloadProducer(cl: CLMocker): bool =
|
||||||
doAssert cl.nextBlockProducer != nil
|
doAssert cl.nextBlockProducer != nil
|
||||||
return true
|
return true
|
||||||
|
|
||||||
proc requestNextPayload(cl: CLMocker): bool =
|
proc generatePayloadAttributes(cl: CLMocker) =
|
||||||
# Generate a random value for the PrevRandao field
|
# Generate a random value for the PrevRandao field
|
||||||
var nextPrevRandao: common.Hash256
|
var nextPrevRandao: common.Hash256
|
||||||
doAssert randomBytes(nextPrevRandao.data) == 32
|
doAssert randomBytes(nextPrevRandao.data) == 32
|
||||||
|
@ -246,6 +244,7 @@ proc requestNextPayload(cl: CLMocker): bool =
|
||||||
let number = cl.latestHeader.blockNumber.truncate(uint64) + 1
|
let number = cl.latestHeader.blockNumber.truncate(uint64) + 1
|
||||||
cl.prevRandaoHistory[number] = nextPrevRandao
|
cl.prevRandaoHistory[number] = nextPrevRandao
|
||||||
|
|
||||||
|
proc requestNextPayload(cl: CLMocker): bool =
|
||||||
let version = cl.latestPayloadAttributes.version
|
let version = cl.latestPayloadAttributes.version
|
||||||
let client = cl.nextBlockProducer.client
|
let client = cl.nextBlockProducer.client
|
||||||
let res = client.forkchoiceUpdated(version, cl.latestForkchoice, some(cl.latestPayloadAttributes))
|
let res = client.forkchoiceUpdated(version, cl.latestForkchoice, some(cl.latestPayloadAttributes))
|
||||||
|
@ -290,6 +289,7 @@ proc getNextPayload(cl: CLMocker): bool =
|
||||||
cl.latestPayloadBuilt = x.executionPayload
|
cl.latestPayloadBuilt = x.executionPayload
|
||||||
cl.latestBlockValue = x.blockValue
|
cl.latestBlockValue = x.blockValue
|
||||||
cl.latestBlobsBundle = x.blobsBundle
|
cl.latestBlobsBundle = x.blobsBundle
|
||||||
|
cl.latestShouldOverrideBuilder = x.shouldOverrideBuilder
|
||||||
|
|
||||||
let beaconRoot = ethHash cl.latestPayloadAttributes.parentBeaconblockRoot
|
let beaconRoot = ethHash cl.latestPayloadAttributes.parentBeaconblockRoot
|
||||||
let header = blockHeader(cl.latestPayloadBuilt, beaconRoot)
|
let header = blockHeader(cl.latestPayloadBuilt, beaconRoot)
|
||||||
|
@ -333,7 +333,7 @@ proc getNextPayload(cl: CLMocker): bool =
|
||||||
return true
|
return true
|
||||||
|
|
||||||
func versionedHashes(bb: BlobsBundleV1): seq[Web3Hash] =
|
func versionedHashes(bb: BlobsBundleV1): seq[Web3Hash] =
|
||||||
doAssert(bb.commitments.len > 0)
|
#doAssert(bb.commitments.len > 0)
|
||||||
result = newSeqOfCap[BlockHash](bb.commitments.len)
|
result = newSeqOfCap[BlockHash](bb.commitments.len)
|
||||||
|
|
||||||
for com in bb.commitments:
|
for com in bb.commitments:
|
||||||
|
@ -481,6 +481,12 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
|
||||||
if not cb.onPayloadProducerSelected():
|
if not cb.onPayloadProducerSelected():
|
||||||
return false
|
return false
|
||||||
|
|
||||||
|
cl.generatePayloadAttributes()
|
||||||
|
|
||||||
|
if cb.onPayloadAttributesGenerated != nil:
|
||||||
|
if not cb.onPayloadAttributesGenerated():
|
||||||
|
return false
|
||||||
|
|
||||||
if not cl.requestNextPayload():
|
if not cl.requestNextPayload():
|
||||||
return false
|
return false
|
||||||
|
|
||||||
|
@ -491,7 +497,9 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
|
||||||
return false
|
return false
|
||||||
|
|
||||||
# Give the client a delay between getting the payload ID and actually retrieving the payload
|
# Give the client a delay between getting the payload ID and actually retrieving the payload
|
||||||
#time.Sleep(PayloadProductionClientDelay)
|
if cl.payloadProductionClientDelay != 0:
|
||||||
|
let period = chronos.seconds(cl.payloadProductionClientDelay)
|
||||||
|
waitFor sleepAsync(period)
|
||||||
|
|
||||||
if not cl.getNextPayload():
|
if not cl.getNextPayload():
|
||||||
return false
|
return false
|
||||||
|
|
|
@ -16,8 +16,6 @@ type
|
||||||
exec*: proc(env: TestEnv): bool
|
exec*: proc(env: TestEnv): bool
|
||||||
ttd*: int64
|
ttd*: int64
|
||||||
chainFile*: string
|
chainFile*: string
|
||||||
slotsToFinalized*: int
|
|
||||||
slotsToSafe*: int
|
|
||||||
|
|
||||||
template testNP(res, cond: untyped, validHash = none(common.Hash256)) =
|
template testNP(res, cond: untyped, validHash = none(common.Hash256)) =
|
||||||
testCond res.isOk
|
testCond res.isOk
|
||||||
|
|
|
@ -81,7 +81,8 @@ proc getPayload*(client: RpcClient,
|
||||||
ok(GetPayloadResponse(
|
ok(GetPayloadResponse(
|
||||||
executionPayload: executionPayload(x.executionPayload),
|
executionPayload: executionPayload(x.executionPayload),
|
||||||
blockValue: some(x.blockValue),
|
blockValue: some(x.blockValue),
|
||||||
blobsBundle: some(x.blobsBundle)
|
blobsBundle: some(x.blobsBundle),
|
||||||
|
shouldOverrideBuilder: some(x.shouldOverrideBuilder),
|
||||||
))
|
))
|
||||||
elif version == Version.V2:
|
elif version == Version.V2:
|
||||||
let x = client.getPayloadV2(payloadId).valueOr:
|
let x = client.getPayloadV2(payloadId).valueOr:
|
||||||
|
@ -231,6 +232,7 @@ proc toBlockHeader(bc: eth_api.BlockObject): common.BlockHeader =
|
||||||
withdrawalsRoot: bc.withdrawalsRoot,
|
withdrawalsRoot: bc.withdrawalsRoot,
|
||||||
blobGasUsed : maybeU64(bc.blobGasUsed),
|
blobGasUsed : maybeU64(bc.blobGasUsed),
|
||||||
excessBlobGas : maybeU64(bc.excessBlobGas),
|
excessBlobGas : maybeU64(bc.excessBlobGas),
|
||||||
|
parentBeaconBlockRoot: bc.parentBeaconBlockRoot,
|
||||||
)
|
)
|
||||||
|
|
||||||
proc toTransactions(txs: openArray[JsonNode]): seq[Transaction] =
|
proc toTransactions(txs: openArray[JsonNode]): seq[Transaction] =
|
||||||
|
@ -293,7 +295,7 @@ type
|
||||||
s*: UInt256
|
s*: UInt256
|
||||||
chainId*: Option[ChainId]
|
chainId*: Option[ChainId]
|
||||||
accessList*: Option[seq[rpc_types.AccessTuple]]
|
accessList*: Option[seq[rpc_types.AccessTuple]]
|
||||||
maxFeePerBlobGas*: Option[GasInt]
|
maxFeePerBlobGas*: Option[UInt256]
|
||||||
versionedHashes*: Option[VersionedHashes]
|
versionedHashes*: Option[VersionedHashes]
|
||||||
|
|
||||||
proc toRPCReceipt(rec: eth_api.ReceiptObject): RPCReceipt =
|
proc toRPCReceipt(rec: eth_api.ReceiptObject): RPCReceipt =
|
||||||
|
@ -336,7 +338,7 @@ proc toRPCTx(tx: eth_api.TransactionObject): RPCTx =
|
||||||
s: UInt256.fromHex(string tx.s),
|
s: UInt256.fromHex(string tx.s),
|
||||||
chainId: maybeChainId(tx.chainId),
|
chainId: maybeChainId(tx.chainId),
|
||||||
accessList: tx.accessList,
|
accessList: tx.accessList,
|
||||||
maxFeePerBlobGas: maybeInt64(tx.maxFeePerBlobGas),
|
maxFeePerBlobGas: maybeU256(tx.maxFeePerBlobGas),
|
||||||
versionedHashes: tx.versionedHashes,
|
versionedHashes: tx.versionedHashes,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,19 +1,24 @@
|
||||||
import
|
import
|
||||||
std/times,
|
std/times,
|
||||||
|
chronicles,
|
||||||
|
stew/results,
|
||||||
./types,
|
./types,
|
||||||
../sim_utils
|
../sim_utils,
|
||||||
|
../../../nimbus/core/eip4844
|
||||||
|
|
||||||
import
|
import
|
||||||
./engine_tests,
|
./engine_tests,
|
||||||
./auths_tests,
|
./auths_tests,
|
||||||
./exchange_cap_tests,
|
./exchange_cap_tests,
|
||||||
./withdrawal_tests
|
./withdrawal_tests,
|
||||||
|
./cancun_tests
|
||||||
|
|
||||||
proc combineTests(): seq[TestDesc] =
|
proc combineTests(): seq[TestDesc] =
|
||||||
result.add wdTestList
|
result.add wdTestList
|
||||||
result.add ecTestList
|
result.add ecTestList
|
||||||
result.add authTestList
|
result.add authTestList
|
||||||
result.add engineTestList
|
result.add engineTestList
|
||||||
|
result.add cancunTestList
|
||||||
|
|
||||||
let
|
let
|
||||||
testList = combineTests()
|
testList = combineTests()
|
||||||
|
@ -22,6 +27,11 @@ proc main() =
|
||||||
var stat: SimStat
|
var stat: SimStat
|
||||||
let start = getTime()
|
let start = getTime()
|
||||||
|
|
||||||
|
let res = loadKzgTrustedSetup()
|
||||||
|
if res.isErr:
|
||||||
|
fatal "Cannot load baked in Kzg trusted setup", msg=res.error
|
||||||
|
quit(QuitFailure)
|
||||||
|
|
||||||
for x in testList:
|
for x in testList:
|
||||||
let status = if x.run(x.spec):
|
let status = if x.run(x.spec):
|
||||||
TestStatus.OK
|
TestStatus.OK
|
||||||
|
|
|
@ -1,7 +1,8 @@
|
||||||
import
|
import
|
||||||
./engine/engine_spec,
|
./engine/engine_spec,
|
||||||
./types,
|
./types,
|
||||||
./test_env
|
./test_env,
|
||||||
|
./base_spec
|
||||||
|
|
||||||
proc specExecute(ws: BaseSpec): bool =
|
proc specExecute(ws: BaseSpec): bool =
|
||||||
var
|
var
|
||||||
|
@ -10,12 +11,7 @@ proc specExecute(ws: BaseSpec): bool =
|
||||||
|
|
||||||
env.engine.setRealTTD(ws.ttd)
|
env.engine.setRealTTD(ws.ttd)
|
||||||
env.setupCLMock()
|
env.setupCLMock()
|
||||||
|
ws.configureCLMock(env.clMock)
|
||||||
if ws.slotsToFinalized != 0:
|
|
||||||
env.slotsToFinalized(ws.slotsToFinalized)
|
|
||||||
if ws.slotsToSafe != 0:
|
|
||||||
env.slotsToSafe(ws.slotsToSafe)
|
|
||||||
|
|
||||||
result = ws.exec(env)
|
result = ws.exec(env)
|
||||||
env.close()
|
env.close()
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
import
|
import
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/keys,
|
eth/keys,
|
||||||
|
stew/results,
|
||||||
json_rpc/rpcclient,
|
json_rpc/rpcclient,
|
||||||
../../../nimbus/config,
|
../../../nimbus/config,
|
||||||
../../../nimbus/common,
|
../../../nimbus/common,
|
||||||
|
@ -90,6 +91,15 @@ proc addEngine*(env: TestEnv, addToCL: bool = true): EngineEnv =
|
||||||
env.clMock.addEngine(eng)
|
env.clMock.addEngine(eng)
|
||||||
eng
|
eng
|
||||||
|
|
||||||
|
func engines*(env: TestEnv, idx: int): EngineEnv =
|
||||||
|
env.clients[idx]
|
||||||
|
|
||||||
|
func numEngines*(env: TestEnv): int =
|
||||||
|
env.clients.len
|
||||||
|
|
||||||
|
func accounts*(env: TestEnv, idx: int): TestAccount =
|
||||||
|
env.sender.getAccount(idx)
|
||||||
|
|
||||||
proc makeTx*(env: TestEnv, tc: BaseTx, nonce: AccountNonce): Transaction =
|
proc makeTx*(env: TestEnv, tc: BaseTx, nonce: AccountNonce): Transaction =
|
||||||
env.sender.makeTx(tc, nonce)
|
env.sender.makeTx(tc, nonce)
|
||||||
|
|
||||||
|
@ -132,6 +142,12 @@ proc sendTx*(env: TestEnv, tx: Transaction): bool =
|
||||||
let client = env.engine.client
|
let client = env.engine.client
|
||||||
sendTx(client, tx)
|
sendTx(client, tx)
|
||||||
|
|
||||||
|
proc sendTx*(env: TestEnv, sender: TestAccount, eng: EngineEnv, tc: BlobTx): Result[Transaction, void] =
|
||||||
|
env.sender.sendTx(sender, eng.client, tc)
|
||||||
|
|
||||||
|
proc replaceTx*(env: TestEnv, sender: TestAccount, eng: EngineEnv, tc: BlobTx): Result[Transaction, void] =
|
||||||
|
env.sender.replaceTx(sender, eng.client, tc)
|
||||||
|
|
||||||
proc verifyPoWProgress*(env: TestEnv, lastBlockHash: common.Hash256): bool =
|
proc verifyPoWProgress*(env: TestEnv, lastBlockHash: common.Hash256): bool =
|
||||||
let res = waitFor env.client.verifyPoWProgress(lastBlockHash)
|
let res = waitFor env.client.verifyPoWProgress(lastBlockHash)
|
||||||
if res.isErr:
|
if res.isErr:
|
||||||
|
@ -139,9 +155,3 @@ proc verifyPoWProgress*(env: TestEnv, lastBlockHash: common.Hash256): bool =
|
||||||
return false
|
return false
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
||||||
proc slotsToSafe*(env: TestEnv, x: int) =
|
|
||||||
env.clMock.slotsToSafe = x
|
|
||||||
|
|
||||||
proc slotsToFinalized*(env: TestEnv, x: int) =
|
|
||||||
env.clMock.slotsToFinalized = x
|
|
||||||
|
|
|
@ -5,6 +5,7 @@ import
|
||||||
nimcrypto/sha2,
|
nimcrypto/sha2,
|
||||||
chronicles,
|
chronicles,
|
||||||
./engine_client,
|
./engine_client,
|
||||||
|
./cancun/blobs,
|
||||||
../../../nimbus/transaction,
|
../../../nimbus/transaction,
|
||||||
../../../nimbus/common,
|
../../../nimbus/common,
|
||||||
../../../nimbus/utils/utils
|
../../../nimbus/utils/utils
|
||||||
|
@ -22,7 +23,15 @@ type
|
||||||
padByte* : uint8
|
padByte* : uint8
|
||||||
initcode* : seq[byte]
|
initcode* : seq[byte]
|
||||||
|
|
||||||
TestAccount = object
|
# Blob transaction creator
|
||||||
|
BlobTx* = object of BaseTx
|
||||||
|
gasFee* : GasInt
|
||||||
|
gasTip* : GasInt
|
||||||
|
blobGasFee*: UInt256
|
||||||
|
blobID* : BlobID
|
||||||
|
blobCount* : int
|
||||||
|
|
||||||
|
TestAccount* = object
|
||||||
key : PrivateKey
|
key : PrivateKey
|
||||||
address: EthAddress
|
address: EthAddress
|
||||||
index : int
|
index : int
|
||||||
|
@ -38,8 +47,21 @@ type
|
||||||
key* : PrivateKey
|
key* : PrivateKey
|
||||||
nonce* : AccountNonce
|
nonce* : AccountNonce
|
||||||
|
|
||||||
|
CustomTransactionData* = object
|
||||||
|
nonce* : Option[uint64]
|
||||||
|
gasPriceOrGasFeeCap*: Option[GasInt]
|
||||||
|
gasTipCap* : Option[GasInt]
|
||||||
|
gas* : Option[GasInt]
|
||||||
|
to* : Option[common.EthAddress]
|
||||||
|
value* : Option[UInt256]
|
||||||
|
data* : Option[seq[byte]]
|
||||||
|
chainId* : Option[ChainId]
|
||||||
|
signature* : Option[UInt256]
|
||||||
|
|
||||||
const
|
const
|
||||||
TestAccountCount = 1000
|
TestAccountCount = 1000
|
||||||
|
gasPrice* = 30.gwei
|
||||||
|
gasTipPrice* = 1.gwei
|
||||||
|
|
||||||
func toAddress(key: PrivateKey): EthAddress =
|
func toAddress(key: PrivateKey): EthAddress =
|
||||||
toKeyPair(key).pubkey.toCanonicalAddress()
|
toKeyPair(key).pubkey.toCanonicalAddress()
|
||||||
|
@ -67,6 +89,9 @@ proc getNextNonce(sender: TxSender, address: EthAddress): uint64 =
|
||||||
sender.nonceMap[address] = nonce + 1
|
sender.nonceMap[address] = nonce + 1
|
||||||
nonce
|
nonce
|
||||||
|
|
||||||
|
proc getLastNonce(sender: TxSender, address: EthAddress): uint64 =
|
||||||
|
sender.nonceMap.getOrDefault(address, 0'u64)
|
||||||
|
|
||||||
proc fillBalance(sender: TxSender, params: NetworkParams) =
|
proc fillBalance(sender: TxSender, params: NetworkParams) =
|
||||||
for x in sender.accounts:
|
for x in sender.accounts:
|
||||||
params.genesis.alloc[x.address] = GenesisAccount(
|
params.genesis.alloc[x.address] = GenesisAccount(
|
||||||
|
@ -89,9 +114,6 @@ proc getTxType(tc: BaseTx, nonce: uint64): TxType =
|
||||||
|
|
||||||
proc makeTx(params: MakeTxParams, tc: BaseTx): Transaction =
|
proc makeTx(params: MakeTxParams, tc: BaseTx): Transaction =
|
||||||
const
|
const
|
||||||
gasPrice = 30.gwei
|
|
||||||
gasTipPrice = 1.gwei
|
|
||||||
|
|
||||||
gasFeeCap = gasPrice
|
gasFeeCap = gasPrice
|
||||||
gasTipCap = gasTipPrice
|
gasTipCap = gasTipPrice
|
||||||
|
|
||||||
|
@ -212,3 +234,74 @@ proc sendTx*(client: RpcClient, tx: Transaction): bool =
|
||||||
error "Unable to send transaction", msg=rr.error
|
error "Unable to send transaction", msg=rr.error
|
||||||
return false
|
return false
|
||||||
return true
|
return true
|
||||||
|
|
||||||
|
proc makeTx*(params: MakeTxParams, tc: BlobTx): Transaction =
|
||||||
|
# Need tx wrap data that will pass blob verification
|
||||||
|
let data = blobDataGenerator(tc.blobID, tc.blobCount)
|
||||||
|
doAssert(tc.recipient.isSome, "nil recipient address")
|
||||||
|
|
||||||
|
# Collect fields for transaction
|
||||||
|
let
|
||||||
|
gasFeeCap = if tc.gasFee != 0.GasInt: tc.gasFee
|
||||||
|
else: gasPrice
|
||||||
|
gasTipCap = if tc.gasTip != 0.GasInt: tc.gasTip
|
||||||
|
else: gasTipPrice
|
||||||
|
|
||||||
|
let unsignedTx = Transaction(
|
||||||
|
txType : TxEIP4844,
|
||||||
|
chainId : params.chainId,
|
||||||
|
nonce : params.nonce,
|
||||||
|
maxPriorityFee: gasTipCap,
|
||||||
|
maxFee : gasFeeCap,
|
||||||
|
gasLimit : tc.gasLimit,
|
||||||
|
to : tc.recipient,
|
||||||
|
value : tc.amount,
|
||||||
|
payload : tc.payload,
|
||||||
|
maxFeePerBlobGas: tc.blobGasFee,
|
||||||
|
versionedHashes: data.hashes,
|
||||||
|
)
|
||||||
|
|
||||||
|
var tx = signTransaction(unsignedTx, params.key, params.chainId, eip155 = true)
|
||||||
|
tx.networkPayload = NetworkPayload(
|
||||||
|
blobs : data.blobs,
|
||||||
|
commitments: data.commitments,
|
||||||
|
proofs : data.proofs,
|
||||||
|
)
|
||||||
|
|
||||||
|
tx
|
||||||
|
|
||||||
|
proc getAccount*(sender: TxSender, idx: int): TestAccount =
|
||||||
|
sender.accounts[idx]
|
||||||
|
|
||||||
|
proc sendTx*(sender: TxSender, acc: TestAccount, client: RpcClient, tc: BlobTx): Result[Transaction, void] =
|
||||||
|
let
|
||||||
|
params = MakeTxParams(
|
||||||
|
chainId: sender.chainId,
|
||||||
|
key: acc.key,
|
||||||
|
nonce: sender.getNextNonce(acc.address),
|
||||||
|
)
|
||||||
|
tx = params.makeTx(tc)
|
||||||
|
|
||||||
|
let rr = client.sendTransaction(tx)
|
||||||
|
if rr.isErr:
|
||||||
|
error "Unable to send transaction", msg=rr.error
|
||||||
|
return err()
|
||||||
|
return ok(tx)
|
||||||
|
|
||||||
|
proc replaceTx*(sender: TxSender, acc: TestAccount, client: RpcClient, tc: BlobTx): Result[Transaction, void] =
|
||||||
|
let
|
||||||
|
params = MakeTxParams(
|
||||||
|
chainId: sender.chainId,
|
||||||
|
key: acc.key,
|
||||||
|
nonce: sender.getLastNonce(acc.address),
|
||||||
|
)
|
||||||
|
tx = params.makeTx(tc)
|
||||||
|
|
||||||
|
let rr = client.sendTransaction(tx)
|
||||||
|
if rr.isErr:
|
||||||
|
error "Unable to send transaction", msg=rr.error
|
||||||
|
return err()
|
||||||
|
return ok(tx)
|
||||||
|
|
||||||
|
proc customizeTransaction*(sender: TxSender, baseTx: Transaction, custTx: CustomTransactionData): Transaction =
|
||||||
|
discard
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import
|
import
|
||||||
std/[options, typetraits, strutils],
|
std/[options, typetraits, strutils],
|
||||||
eth/common,
|
eth/common,
|
||||||
stew/byteutils,
|
stew/[byteutils, endians2],
|
||||||
web3/ethtypes,
|
web3/ethtypes,
|
||||||
web3/engine_api_types,
|
web3/engine_api_types,
|
||||||
../../../nimbus/beacon/execution_types,
|
../../../nimbus/beacon/execution_types,
|
||||||
|
@ -11,6 +11,18 @@ type
|
||||||
BaseSpec* = ref object of RootObj
|
BaseSpec* = ref object of RootObj
|
||||||
txType*: Option[TxType]
|
txType*: Option[TxType]
|
||||||
|
|
||||||
|
# CL Mocker configuration for slots to `safe` and `finalized` respectively
|
||||||
|
slotsToSafe*: int
|
||||||
|
slotsToFinalized*: int
|
||||||
|
safeSlotsToImportOptimistically*: int
|
||||||
|
blockTimestampIncrement*: int
|
||||||
|
timeoutSeconds*: int
|
||||||
|
mainFork*: string
|
||||||
|
genesisTimestamp*: int
|
||||||
|
forkHeight*: int
|
||||||
|
forkTime*: uint64
|
||||||
|
previousForkTime*: uint64
|
||||||
|
|
||||||
TestDesc* = object
|
TestDesc* = object
|
||||||
name* : string
|
name* : string
|
||||||
about*: string
|
about*: string
|
||||||
|
@ -21,6 +33,29 @@ const
|
||||||
DefaultTimeout* = 60 # seconds
|
DefaultTimeout* = 60 # seconds
|
||||||
DefaultSleep* = 1
|
DefaultSleep* = 1
|
||||||
prevRandaoContractAddr* = hexToByteArray[20]("0000000000000000000000000000000000000316")
|
prevRandaoContractAddr* = hexToByteArray[20]("0000000000000000000000000000000000000316")
|
||||||
|
GenesisTimestamp* = 0x1234
|
||||||
|
ForkParis* = "Paris"
|
||||||
|
ForkShanghai* = "Shanghai"
|
||||||
|
ForkCancun* = "Cancun"
|
||||||
|
|
||||||
|
func toAddress*(x: UInt256): EthAddress =
|
||||||
|
var
|
||||||
|
mm = x.toByteArrayBE
|
||||||
|
x = 0
|
||||||
|
for i in 12..31:
|
||||||
|
result[x] = mm[i]
|
||||||
|
inc x
|
||||||
|
|
||||||
|
func toHash*(x: UInt256): common.Hash256 =
|
||||||
|
common.Hash256(data: x.toByteArrayBE)
|
||||||
|
|
||||||
|
func timestampToBeaconRoot*(timestamp: Quantity): FixedBytes[32] =
|
||||||
|
# Generates a deterministic hash from the timestamp
|
||||||
|
let h = keccakHash(timestamp.uint64.toBytesBE)
|
||||||
|
FixedBytes[32](h.data)
|
||||||
|
|
||||||
|
func beaconRoot*(x: UInt256): FixedBytes[32] =
|
||||||
|
FixedBytes[32](x.toByteArrayBE)
|
||||||
|
|
||||||
template testCond*(expr: untyped) =
|
template testCond*(expr: untyped) =
|
||||||
if not (expr):
|
if not (expr):
|
||||||
|
|
|
@ -6,9 +6,11 @@ import
|
||||||
withdrawals/wd_reorg_spec,
|
withdrawals/wd_reorg_spec,
|
||||||
withdrawals/wd_sync_spec,
|
withdrawals/wd_sync_spec,
|
||||||
./types,
|
./types,
|
||||||
./test_env
|
./test_env,
|
||||||
|
./base_spec
|
||||||
|
|
||||||
proc specExecute[T](ws: BaseSpec): bool =
|
proc specExecute[T](ws: BaseSpec): bool =
|
||||||
|
ws.mainFork = ForkShanghai
|
||||||
let
|
let
|
||||||
ws = T(ws)
|
ws = T(ws)
|
||||||
conf = envConfig(ws.getForkConfig())
|
conf = envConfig(ws.getForkConfig())
|
||||||
|
@ -29,10 +31,10 @@ let wdTestList* = [
|
||||||
about: "Tests a 8 block re-org using NewPayload. Re-org does not change withdrawals fork height",
|
about: "Tests a 8 block re-org using NewPayload. Re-org does not change withdrawals fork height",
|
||||||
run: specExecute[ReorgSpec],
|
run: specExecute[ReorgSpec],
|
||||||
spec: ReorgSpec(
|
spec: ReorgSpec(
|
||||||
slotsToSafe: u256(32),
|
slotsToSafe: 32,
|
||||||
slotsToFinalized: u256(64),
|
slotsToFinalized: 64,
|
||||||
timeoutSeconds: 300,
|
timeoutSeconds: 300,
|
||||||
wdForkHeight: 1, # Genesis is Pre-Withdrawals
|
forkHeight: 1, # Genesis is Pre-Withdrawals
|
||||||
wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
reOrgBlockCount: 8,
|
reOrgBlockCount: 8,
|
||||||
|
@ -45,10 +47,10 @@ let wdTestList* = [
|
||||||
# the payload at the height of the fork
|
# the payload at the height of the fork
|
||||||
run: specExecute[ReorgSpec],
|
run: specExecute[ReorgSpec],
|
||||||
spec: ReorgSpec(
|
spec: ReorgSpec(
|
||||||
slotsToSafe: u256(32),
|
slotsToSafe: 32,
|
||||||
slotsToFinalized: u256(64),
|
slotsToFinalized: 64,
|
||||||
timeoutSeconds: 300,
|
timeoutSeconds: 300,
|
||||||
wdForkHeight: 8, # Genesis is Pre-Withdrawals
|
forkHeight: 8, # Genesis is Pre-Withdrawals
|
||||||
wdBlockCount: 8,
|
wdBlockCount: 8,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
reOrgBlockCount: 10,
|
reOrgBlockCount: 10,
|
||||||
|
@ -61,10 +63,10 @@ let wdTestList* = [
|
||||||
# than the canonical chain
|
# than the canonical chain
|
||||||
run: specExecute[ReorgSpec],
|
run: specExecute[ReorgSpec],
|
||||||
spec: ReorgSpec(
|
spec: ReorgSpec(
|
||||||
slotsToSafe: u256(32),
|
slotsToSafe: 32,
|
||||||
slotsToFinalized: u256(64),
|
slotsToFinalized: 64,
|
||||||
timeoutSeconds: 300,
|
timeoutSeconds: 300,
|
||||||
wdForkHeight: 8, # Genesis is Pre-Withdrawals
|
forkHeight: 8, # Genesis is Pre-Withdrawals
|
||||||
wdBlockCount: 8,
|
wdBlockCount: 8,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
reOrgBlockCount: 10,
|
reOrgBlockCount: 10,
|
||||||
|
@ -78,13 +80,13 @@ let wdTestList* = [
|
||||||
# than the canonical chain
|
# than the canonical chain
|
||||||
run: specExecute[ReorgSpec],
|
run: specExecute[ReorgSpec],
|
||||||
spec: ReorgSpec(
|
spec: ReorgSpec(
|
||||||
slotsToSafe: u256(32),
|
slotsToSafe: 32,
|
||||||
slotsToFinalized: u256(64),
|
slotsToFinalized: 64,
|
||||||
timeoutSeconds: 300,
|
timeoutSeconds: 300,
|
||||||
wdForkHeight: 8, # Genesis is Pre-Withdrawals
|
forkHeight: 8, # Genesis is Pre-Withdrawals
|
||||||
wdBlockCount: 8,
|
wdBlockCount: 8,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
timeIncrements: 2,
|
blockTimestampIncrement: 2,
|
||||||
reOrgBlockCount: 10,
|
reOrgBlockCount: 10,
|
||||||
reOrgViaSync: true,
|
reOrgViaSync: true,
|
||||||
sidechaintimeIncrements: 1,
|
sidechaintimeIncrements: 1,
|
||||||
|
@ -94,10 +96,10 @@ let wdTestList* = [
|
||||||
about: "Tests a simple 1 block re-org",
|
about: "Tests a simple 1 block re-org",
|
||||||
run: specExecute[ReorgSpec],
|
run: specExecute[ReorgSpec],
|
||||||
spec: ReorgSpec(
|
spec: ReorgSpec(
|
||||||
slotsToSafe: u256(32),
|
slotsToSafe: 32,
|
||||||
slotsToFinalized: u256(64),
|
slotsToFinalized: 64,
|
||||||
timeoutSeconds: 300,
|
timeoutSeconds: 300,
|
||||||
wdForkHeight: 1, # Genesis is Pre-Withdrawals
|
forkHeight: 1, # Genesis is Pre-Withdrawals
|
||||||
wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
reOrgBlockCount: 1,
|
reOrgBlockCount: 1,
|
||||||
|
@ -108,10 +110,10 @@ let wdTestList* = [
|
||||||
about: "Tests a 8 block re-org using NewPayload. Re-org does not change withdrawals fork height",
|
about: "Tests a 8 block re-org using NewPayload. Re-org does not change withdrawals fork height",
|
||||||
run: specExecute[ReorgSpec],
|
run: specExecute[ReorgSpec],
|
||||||
spec: ReorgSpec(
|
spec: ReorgSpec(
|
||||||
slotsToSafe: u256(32),
|
slotsToSafe: 32,
|
||||||
slotsToFinalized: u256(64),
|
slotsToFinalized: 64,
|
||||||
timeoutSeconds: 300,
|
timeoutSeconds: 300,
|
||||||
wdForkHeight: 1, # Genesis is Pre-Withdrawals
|
forkHeight: 1, # Genesis is Pre-Withdrawals
|
||||||
wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdBlockCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
reOrgBlockCount: 8,
|
reOrgBlockCount: 8,
|
||||||
|
@ -124,10 +126,10 @@ let wdTestList* = [
|
||||||
"the payload at the height of the fork\n",
|
"the payload at the height of the fork\n",
|
||||||
run: specExecute[ReorgSpec],
|
run: specExecute[ReorgSpec],
|
||||||
spec: ReorgSpec(
|
spec: ReorgSpec(
|
||||||
slotsToSafe: u256(32),
|
slotsToSafe: 32,
|
||||||
slotsToFinalized: u256(64),
|
slotsToFinalized: 64,
|
||||||
timeoutSeconds: 300,
|
timeoutSeconds: 300,
|
||||||
wdForkHeight: 8, # Genesis is Pre-Withdrawals
|
forkHeight: 8, # Genesis is Pre-Withdrawals
|
||||||
wdBlockCount: 8,
|
wdBlockCount: 8,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
reOrgBlockCount: 10,
|
reOrgBlockCount: 10,
|
||||||
|
@ -140,10 +142,10 @@ let wdTestList* = [
|
||||||
# than the canonical chain
|
# than the canonical chain
|
||||||
run: specExecute[ReorgSpec],
|
run: specExecute[ReorgSpec],
|
||||||
spec: ReorgSpec(
|
spec: ReorgSpec(
|
||||||
slotsToSafe: u256(32),
|
slotsToSafe: 32,
|
||||||
slotsToFinalized: u256(64),
|
slotsToFinalized: 64,
|
||||||
timeoutSeconds: 300,
|
timeoutSeconds: 300,
|
||||||
wdForkHeight: 8, # Genesis is Pre-Withdrawals
|
forkHeight: 8, # Genesis is Pre-Withdrawals
|
||||||
wdBlockCount: 8,
|
wdBlockCount: 8,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
reOrgBlockCount: 10,
|
reOrgBlockCount: 10,
|
||||||
|
@ -157,13 +159,13 @@ let wdTestList* = [
|
||||||
# than the canonical chain
|
# than the canonical chain
|
||||||
run: specExecute[ReorgSpec],
|
run: specExecute[ReorgSpec],
|
||||||
spec: ReorgSpec(
|
spec: ReorgSpec(
|
||||||
slotsToSafe: u256(32),
|
slotsToSafe: 32,
|
||||||
slotsToFinalized: u256(64),
|
slotsToFinalized: 64,
|
||||||
timeoutSeconds: 300,
|
timeoutSeconds: 300,
|
||||||
wdForkHeight: 8, # Genesis is Pre-Withdrawals
|
forkHeight: 8, # Genesis is Pre-Withdrawals
|
||||||
wdBlockCount: 8,
|
wdBlockCount: 8,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
timeIncrements: 2,
|
blockTimestampIncrement: 2,
|
||||||
reOrgBlockCount: 10,
|
reOrgBlockCount: 10,
|
||||||
reOrgViaSync: false,
|
reOrgViaSync: false,
|
||||||
sidechaintimeIncrements: 1,
|
sidechaintimeIncrements: 1,
|
||||||
|
@ -180,7 +182,7 @@ let wdTestList* = [
|
||||||
run: specExecute[SyncSpec],
|
run: specExecute[SyncSpec],
|
||||||
spec: SyncSpec(
|
spec: SyncSpec(
|
||||||
timeoutSeconds: 6,
|
timeoutSeconds: 6,
|
||||||
wdForkHeight: 1,
|
forkHeight: 1,
|
||||||
wdBlockCount: 2,
|
wdBlockCount: 2,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdAbleAccountCount: 1,
|
wdAbleAccountCount: 1,
|
||||||
|
@ -196,7 +198,7 @@ let wdTestList* = [
|
||||||
"- Wait for sync and verify withdrawn account's balance\n",
|
"- Wait for sync and verify withdrawn account's balance\n",
|
||||||
run: specExecute[SyncSpec],
|
run: specExecute[SyncSpec],
|
||||||
spec: SyncSpec(
|
spec: SyncSpec(
|
||||||
wdForkHeight: 1,
|
forkHeight: 1,
|
||||||
wdBlockCount: 2,
|
wdBlockCount: 2,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdAbleAccountCount: 1,
|
wdAbleAccountCount: 1,
|
||||||
|
@ -210,7 +212,7 @@ let wdTestList* = [
|
||||||
"- Wait for sync and verify withdrawn account's balance\n",
|
"- Wait for sync and verify withdrawn account's balance\n",
|
||||||
run: specExecute[SyncSpec],
|
run: specExecute[SyncSpec],
|
||||||
spec: SyncSpec(
|
spec: SyncSpec(
|
||||||
wdForkHeight: 0,
|
forkHeight: 0,
|
||||||
wdBlockCount: 2,
|
wdBlockCount: 2,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdAbleAccountCount: 1,
|
wdAbleAccountCount: 1,
|
||||||
|
@ -225,7 +227,7 @@ let wdTestList* = [
|
||||||
"- Wait for sync, which include syncing a pre-Withdrawals block, and verify withdrawn account's balance\n",
|
"- Wait for sync, which include syncing a pre-Withdrawals block, and verify withdrawn account's balance\n",
|
||||||
run: specExecute[SyncSpec],
|
run: specExecute[SyncSpec],
|
||||||
spec: SyncSpec(
|
spec: SyncSpec(
|
||||||
wdForkHeight: 2,
|
forkHeight: 2,
|
||||||
wdBlockCount: 2,
|
wdBlockCount: 2,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdAbleAccountCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdAbleAccountCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
|
@ -241,7 +243,7 @@ let wdTestList* = [
|
||||||
"- Wait for sync, which include syncing a pre-Withdrawals block, and verify withdrawn account's balance\n",
|
"- Wait for sync, which include syncing a pre-Withdrawals block, and verify withdrawn account's balance\n",
|
||||||
run: specExecute[SyncSpec],
|
run: specExecute[SyncSpec],
|
||||||
spec: SyncSpec(
|
spec: SyncSpec(
|
||||||
wdForkHeight: 2,
|
forkHeight: 2,
|
||||||
wdBlockCount: 2,
|
wdBlockCount: 2,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdAbleAccountCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdAbleAccountCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
|
@ -257,7 +259,7 @@ let wdTestList* = [
|
||||||
run: specExecute[SyncSpec],
|
run: specExecute[SyncSpec],
|
||||||
spec: SyncSpec(
|
spec: SyncSpec(
|
||||||
timeoutSeconds: 100,
|
timeoutSeconds: 100,
|
||||||
wdForkHeight: 2,
|
forkHeight: 2,
|
||||||
wdBlockCount: 128,
|
wdBlockCount: 128,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdAbleAccountCount: 1024,
|
wdAbleAccountCount: 1024,
|
||||||
|
@ -269,7 +271,7 @@ let wdTestList* = [
|
||||||
name: "Max Initcode Size",
|
name: "Max Initcode Size",
|
||||||
run: specExecute[MaxInitcodeSizeSpec],
|
run: specExecute[MaxInitcodeSizeSpec],
|
||||||
spec: MaxInitcodeSizeSpec(
|
spec: MaxInitcodeSizeSpec(
|
||||||
wdForkHeight: 2, # Block 1 is Pre-Withdrawals
|
forkHeight: 2, # Block 1 is Pre-Withdrawals
|
||||||
wdBlockCount: 2,
|
wdBlockCount: 2,
|
||||||
overflowMaxInitcodeTxCountBeforeFork: 0,
|
overflowMaxInitcodeTxCountBeforeFork: 0,
|
||||||
overflowMaxInitcodeTxCountAfterFork: 1,
|
overflowMaxInitcodeTxCountAfterFork: 1,
|
||||||
|
@ -280,7 +282,7 @@ let wdTestList* = [
|
||||||
about: "Verify the block value returned in GetPayloadV2.",
|
about: "Verify the block value returned in GetPayloadV2.",
|
||||||
run: specExecute[BlockValueSpec],
|
run: specExecute[BlockValueSpec],
|
||||||
spec: BlockValueSpec(
|
spec: BlockValueSpec(
|
||||||
wdForkHeight: 1,
|
forkHeight: 1,
|
||||||
wdBlockCount: 1,
|
wdBlockCount: 1,
|
||||||
)),
|
)),
|
||||||
# Base tests
|
# Base tests
|
||||||
|
@ -289,7 +291,7 @@ let wdTestList* = [
|
||||||
about: "Tests the withdrawals fork happening since genesis (e.g. on a testnet).",
|
about: "Tests the withdrawals fork happening since genesis (e.g. on a testnet).",
|
||||||
run: specExecute[WDBaseSpec],
|
run: specExecute[WDBaseSpec],
|
||||||
spec: WDBaseSpec(
|
spec: WDBaseSpec(
|
||||||
wdForkHeight: 0,
|
forkHeight: 0,
|
||||||
wdBlockCount: 2, # Genesis is a withdrawals block
|
wdBlockCount: 2, # Genesis is a withdrawals block
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
)),
|
)),
|
||||||
|
@ -298,7 +300,7 @@ let wdTestList* = [
|
||||||
about: "Tests the withdrawals fork happening directly after genesis.",
|
about: "Tests the withdrawals fork happening directly after genesis.",
|
||||||
run: specExecute[WDBaseSpec],
|
run: specExecute[WDBaseSpec],
|
||||||
spec: WDBaseSpec(
|
spec: WDBaseSpec(
|
||||||
wdForkHeight: 1, # Only Genesis is Pre-Withdrawals
|
forkHeight: 1, # Only Genesis is Pre-Withdrawals
|
||||||
wdBlockCount: 1,
|
wdBlockCount: 1,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
)),
|
)),
|
||||||
|
@ -309,7 +311,7 @@ let wdTestList* = [
|
||||||
" client is expected to respond with the appropriate error.",
|
" client is expected to respond with the appropriate error.",
|
||||||
run: specExecute[WDBaseSpec],
|
run: specExecute[WDBaseSpec],
|
||||||
spec: WDBaseSpec(
|
spec: WDBaseSpec(
|
||||||
wdForkHeight: 2, # Genesis and Block 1 are Pre-Withdrawals
|
forkHeight: 2, # Genesis and Block 1 are Pre-Withdrawals
|
||||||
wdBlockCount: 1,
|
wdBlockCount: 1,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
)),
|
)),
|
||||||
|
@ -320,7 +322,7 @@ let wdTestList* = [
|
||||||
" client is expected to respond with the appropriate error.",
|
" client is expected to respond with the appropriate error.",
|
||||||
run: specExecute[WDBaseSpec],
|
run: specExecute[WDBaseSpec],
|
||||||
spec: WDBaseSpec(
|
spec: WDBaseSpec(
|
||||||
wdForkHeight: 3, # Genesis, Block 1 and 2 are Pre-Withdrawals
|
forkHeight: 3, # Genesis, Block 1 and 2 are Pre-Withdrawals
|
||||||
wdBlockCount: 1,
|
wdBlockCount: 1,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
)),
|
)),
|
||||||
|
@ -329,7 +331,7 @@ let wdTestList* = [
|
||||||
about: "Make multiple withdrawals to a single account.",
|
about: "Make multiple withdrawals to a single account.",
|
||||||
run: specExecute[WDBaseSpec],
|
run: specExecute[WDBaseSpec],
|
||||||
spec: WDBaseSpec(
|
spec: WDBaseSpec(
|
||||||
wdForkHeight: 1,
|
forkHeight: 1,
|
||||||
wdBlockCount: 1,
|
wdBlockCount: 1,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdAbleAccountCount: 1,
|
wdAbleAccountCount: 1,
|
||||||
|
@ -342,7 +344,7 @@ let wdTestList* = [
|
||||||
" is not in ordered sequence.",
|
" is not in ordered sequence.",
|
||||||
run: specExecute[WDBaseSpec],
|
run: specExecute[WDBaseSpec],
|
||||||
spec: WDBaseSpec(
|
spec: WDBaseSpec(
|
||||||
wdForkHeight: 1,
|
forkHeight: 1,
|
||||||
wdBlockCount: 1,
|
wdBlockCount: 1,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdAbleAccountCount: 2,
|
wdAbleAccountCount: 2,
|
||||||
|
@ -354,7 +356,7 @@ let wdTestList* = [
|
||||||
# TimeoutSeconds: 240,
|
# TimeoutSeconds: 240,
|
||||||
run: specExecute[WDBaseSpec],
|
run: specExecute[WDBaseSpec],
|
||||||
spec: WDBaseSpec(
|
spec: WDBaseSpec(
|
||||||
wdForkHeight: 1,
|
forkHeight: 1,
|
||||||
wdBlockCount: 4,
|
wdBlockCount: 4,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK * 5,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK * 5,
|
||||||
wdAbleAccountCount: 1024,
|
wdAbleAccountCount: 1024,
|
||||||
|
@ -364,7 +366,7 @@ let wdTestList* = [
|
||||||
about: "Make multiple withdrawals where the amount withdrawn is 0.",
|
about: "Make multiple withdrawals where the amount withdrawn is 0.",
|
||||||
run: specExecute[WDBaseSpec],
|
run: specExecute[WDBaseSpec],
|
||||||
spec: WDBaseSpec(
|
spec: WDBaseSpec(
|
||||||
wdForkHeight: 1,
|
forkHeight: 1,
|
||||||
wdBlockCount: 1,
|
wdBlockCount: 1,
|
||||||
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
|
||||||
wdAbleAccountCount: 2,
|
wdAbleAccountCount: 2,
|
||||||
|
@ -375,7 +377,7 @@ let wdTestList* = [
|
||||||
about: "Produce withdrawals block with zero withdrawals.",
|
about: "Produce withdrawals block with zero withdrawals.",
|
||||||
run: specExecute[WDBaseSpec],
|
run: specExecute[WDBaseSpec],
|
||||||
spec: WDBaseSpec(
|
spec: WDBaseSpec(
|
||||||
wdForkHeight: 1,
|
forkHeight: 1,
|
||||||
wdBlockCount: 1,
|
wdBlockCount: 1,
|
||||||
wdPerBlock: 0,
|
wdPerBlock: 0,
|
||||||
)),
|
)),
|
||||||
|
@ -384,7 +386,7 @@ let wdTestList* = [
|
||||||
about: "Send a valid payload with a corrupted hash using engine_newPayloadV2.",
|
about: "Send a valid payload with a corrupted hash using engine_newPayloadV2.",
|
||||||
run: specExecute[WDBaseSpec],
|
run: specExecute[WDBaseSpec],
|
||||||
spec: WDBaseSpec(
|
spec: WDBaseSpec(
|
||||||
wdForkHeight: 1,
|
forkHeight: 1,
|
||||||
wdBlockCount: 1,
|
wdBlockCount: 1,
|
||||||
testCorrupedHashPayloads: true,
|
testCorrupedHashPayloads: true,
|
||||||
)
|
)
|
||||||
|
|
|
@ -11,18 +11,15 @@ import
|
||||||
../test_env,
|
../test_env,
|
||||||
../engine_client,
|
../engine_client,
|
||||||
../types,
|
../types,
|
||||||
|
../base_spec,
|
||||||
../../../nimbus/common/common,
|
../../../nimbus/common/common,
|
||||||
../../../nimbus/utils/utils,
|
../../../nimbus/utils/utils,
|
||||||
../../../nimbus/common/chain_config,
|
../../../nimbus/common/chain_config,
|
||||||
../../../nimbus/beacon/execution_types,
|
../../../nimbus/beacon/execution_types,
|
||||||
../../../nimbus/beacon/web3_eth_conv
|
../../../nimbus/beacon/web3_eth_conv
|
||||||
|
|
||||||
import ../../../tools/common/helpers except LogLevel
|
|
||||||
|
|
||||||
type
|
type
|
||||||
WDBaseSpec* = ref object of BaseSpec
|
WDBaseSpec* = ref object of BaseSpec
|
||||||
timeIncrements*: int # Timestamp increments per block throughout the test
|
|
||||||
wdForkHeight*: int # Withdrawals activation fork height
|
|
||||||
wdBlockCount*: int # Number of blocks on and after withdrawals fork activation
|
wdBlockCount*: int # Number of blocks on and after withdrawals fork activation
|
||||||
wdPerBlock*: int # Number of withdrawals per block
|
wdPerBlock*: int # Number of withdrawals per block
|
||||||
wdAbleAccountCount*: int # Number of accounts to withdraw to (round-robin)
|
wdAbleAccountCount*: int # Number of accounts to withdraw to (round-robin)
|
||||||
|
@ -37,7 +34,6 @@ type
|
||||||
nextIndex*: int
|
nextIndex*: int
|
||||||
|
|
||||||
const
|
const
|
||||||
GenesisTimestamp = 0x1234
|
|
||||||
WARM_COINBASE_ADDRESS = hexToByteArray[20]("0x0101010101010101010101010101010101010101")
|
WARM_COINBASE_ADDRESS = hexToByteArray[20]("0x0101010101010101010101010101010101010101")
|
||||||
PUSH0_ADDRESS = hexToByteArray[20]("0x0202020202020202020202020202020202020202")
|
PUSH0_ADDRESS = hexToByteArray[20]("0x0202020202020202020202020202020202020202")
|
||||||
MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK* = 16
|
MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK* = 16
|
||||||
|
@ -46,34 +42,14 @@ const
|
||||||
PUSH0_ADDRESS,
|
PUSH0_ADDRESS,
|
||||||
]
|
]
|
||||||
|
|
||||||
# Get the per-block timestamp increments configured for this test
|
|
||||||
func getBlockTimeIncrements*(ws: WDBaseSpec): int =
|
|
||||||
if ws.timeIncrements == 0:
|
|
||||||
return 1
|
|
||||||
ws.timeIncrements
|
|
||||||
|
|
||||||
# Timestamp delta between genesis and the withdrawals fork
|
# Timestamp delta between genesis and the withdrawals fork
|
||||||
func getWithdrawalsGenesisTimeDelta*(ws: WDBaseSpec): int =
|
func getWithdrawalsGenesisTimeDelta*(ws: WDBaseSpec): int =
|
||||||
ws.wdForkHeight * ws.getBlockTimeIncrements()
|
ws.forkHeight * ws.getBlockTimeIncrements()
|
||||||
|
|
||||||
# Calculates Shanghai fork timestamp given the amount of blocks that need to be
|
|
||||||
# produced beforehand.
|
|
||||||
func getWithdrawalsForkTime(ws: WDBaseSpec): int =
|
|
||||||
GenesisTimestamp + ws.getWithdrawalsGenesisTimeDelta()
|
|
||||||
|
|
||||||
# Generates the fork config, including withdrawals fork timestamp.
|
|
||||||
func getForkConfig*(ws: WDBaseSpec): ChainConfig =
|
|
||||||
result = getChainConfig("Shanghai")
|
|
||||||
result.shanghaiTime = some(ws.getWithdrawalsForkTime().EthTime)
|
|
||||||
|
|
||||||
# Get the start account for all withdrawals.
|
# Get the start account for all withdrawals.
|
||||||
func getWithdrawalsStartAccount*(ws: WDBaseSpec): UInt256 =
|
func getWithdrawalsStartAccount*(ws: WDBaseSpec): UInt256 =
|
||||||
0x1000.u256
|
0x1000.u256
|
||||||
|
|
||||||
func toAddress(x: UInt256): EthAddress =
|
|
||||||
var mm = x.toByteArrayBE
|
|
||||||
copyMem(result[0].addr, mm[11].addr, 20)
|
|
||||||
|
|
||||||
# Adds bytecode that unconditionally sets an storage key to specified account range
|
# Adds bytecode that unconditionally sets an storage key to specified account range
|
||||||
func addUnconditionalBytecode(g: Genesis, start, stop: UInt256) =
|
func addUnconditionalBytecode(g: Genesis, start, stop: UInt256) =
|
||||||
var acc = start
|
var acc = start
|
||||||
|
@ -177,7 +153,7 @@ proc verifyContractsStorage(ws: WDBaseSpec, env: TestEnv): Result[void, string]
|
||||||
r = env.client.storageAt(WARM_COINBASE_ADDRESS, latestPayloadNumber, latestPayloadNumber)
|
r = env.client.storageAt(WARM_COINBASE_ADDRESS, latestPayloadNumber, latestPayloadNumber)
|
||||||
p = env.client.storageAt(PUSH0_ADDRESS, 0.u256, latestPayloadNumber)
|
p = env.client.storageAt(PUSH0_ADDRESS, 0.u256, latestPayloadNumber)
|
||||||
|
|
||||||
if latestPayloadNumber.truncate(int) >= ws.wdForkHeight:
|
if latestPayloadNumber.truncate(int) >= ws.forkHeight:
|
||||||
# Shanghai
|
# Shanghai
|
||||||
r.expectStorageEqual(WARM_COINBASE_ADDRESS, 100.u256) # WARM_STORAGE_READ_COST
|
r.expectStorageEqual(WARM_COINBASE_ADDRESS, 100.u256) # WARM_STORAGE_READ_COST
|
||||||
p.expectStorageEqual(PUSH0_ADDRESS, latestPayloadNumber) # tx succeeded
|
p.expectStorageEqual(PUSH0_ADDRESS, latestPayloadNumber) # tx succeeded
|
||||||
|
@ -188,18 +164,13 @@ proc verifyContractsStorage(ws: WDBaseSpec, env: TestEnv): Result[void, string]
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
# Changes the CL Mocker default time increments of 1 to the value specified
|
|
||||||
# in the test spec.
|
|
||||||
proc configureCLMock*(ws: WDBaseSpec, cl: CLMocker) =
|
|
||||||
cl.blockTimestampIncrement = some(ws.getBlockTimeIncrements())
|
|
||||||
|
|
||||||
# Number of blocks to be produced (not counting genesis) before withdrawals
|
# Number of blocks to be produced (not counting genesis) before withdrawals
|
||||||
# fork.
|
# fork.
|
||||||
func getPreWithdrawalsBlockCount*(ws: WDBaseSpec): int =
|
func getPreWithdrawalsBlockCount*(ws: WDBaseSpec): int =
|
||||||
if ws.wdForkHeight == 0:
|
if ws.forkHeight == 0:
|
||||||
0
|
0
|
||||||
else:
|
else:
|
||||||
ws.wdForkHeight - 1
|
ws.forkHeight - 1
|
||||||
|
|
||||||
# Number of payloads to be produced (pre and post withdrawals) during the entire test
|
# Number of payloads to be produced (pre and post withdrawals) during the entire test
|
||||||
func getTotalPayloadCount*(ws: WDBaseSpec): int =
|
func getTotalPayloadCount*(ws: WDBaseSpec): int =
|
||||||
|
@ -235,7 +206,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
|
||||||
testCond ok
|
testCond ok
|
||||||
|
|
||||||
# Check if we have pre-Shanghai blocks
|
# Check if we have pre-Shanghai blocks
|
||||||
if ws.getWithdrawalsForkTime() > GenesisTimestamp:
|
if ws.getForkTime() > GenesisTimestamp:
|
||||||
# Check `latest` during all pre-shanghai blocks, none should
|
# Check `latest` during all pre-shanghai blocks, none should
|
||||||
# contain `withdrawalsRoot`, including genesis.
|
# contain `withdrawalsRoot`, including genesis.
|
||||||
|
|
||||||
|
@ -538,7 +509,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
|
||||||
let r = env.client.headerByNumber(bn, h)
|
let r = env.client.headerByNumber(bn, h)
|
||||||
|
|
||||||
var expectedWithdrawalsRoot: Option[common.Hash256]
|
var expectedWithdrawalsRoot: Option[common.Hash256]
|
||||||
if bn >= ws.wdForkHeight.uint64:
|
if bn >= ws.forkHeight.uint64:
|
||||||
let wds = ws.wdHistory.getWithdrawals(bn)
|
let wds = ws.wdHistory.getWithdrawals(bn)
|
||||||
expectedWithdrawalsRoot = some(calcWithdrawalsRoot(wds.list))
|
expectedWithdrawalsRoot = some(calcWithdrawalsRoot(wds.list))
|
||||||
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ import
|
||||||
../test_env,
|
../test_env,
|
||||||
../engine_client,
|
../engine_client,
|
||||||
../types,
|
../types,
|
||||||
|
../base_spec,
|
||||||
../../../nimbus/beacon/web3_eth_conv
|
../../../nimbus/beacon/web3_eth_conv
|
||||||
|
|
||||||
# Withdrawals re-org spec:
|
# Withdrawals re-org spec:
|
||||||
|
@ -22,9 +23,6 @@ type
|
||||||
# Whether the client should fetch the sidechain by syncing from the secondary client
|
# Whether the client should fetch the sidechain by syncing from the secondary client
|
||||||
reOrgViaSync* : bool
|
reOrgViaSync* : bool
|
||||||
sidechainTimeIncrements*: int
|
sidechainTimeIncrements*: int
|
||||||
slotsToSafe* : UInt256
|
|
||||||
slotsToFinalized* : UInt256
|
|
||||||
timeoutSeconds* : int
|
|
||||||
|
|
||||||
Sidechain = ref object
|
Sidechain = ref object
|
||||||
startAccount: UInt256
|
startAccount: UInt256
|
||||||
|
@ -48,14 +46,14 @@ proc getSidechainBlockTimeIncrements(ws: ReorgSpec): int=
|
||||||
return ws.getBlockTimeIncrements()
|
return ws.getBlockTimeIncrements()
|
||||||
ws.sidechainTimeIncrements
|
ws.sidechainTimeIncrements
|
||||||
|
|
||||||
proc getSidechainWdForkHeight(ws: ReorgSpec): int =
|
proc getSidechainforkHeight(ws: ReorgSpec): int =
|
||||||
if ws.getSidechainBlockTimeIncrements() != ws.getBlockTimeIncrements():
|
if ws.getSidechainBlockTimeIncrements() != ws.getBlockTimeIncrements():
|
||||||
# Block timestamp increments in both chains are different so need to
|
# Block timestamp increments in both chains are different so need to
|
||||||
# calculate different heights, only if split happens before fork.
|
# calculate different heights, only if split happens before fork.
|
||||||
# We cannot split by having two different genesis blocks.
|
# We cannot split by having two different genesis blocks.
|
||||||
doAssert(ws.getSidechainSplitHeight() != 0, "invalid sidechain split height")
|
doAssert(ws.getSidechainSplitHeight() != 0, "invalid sidechain split height")
|
||||||
|
|
||||||
if ws.getSidechainSplitHeight() <= ws.wdForkHeight:
|
if ws.getSidechainSplitHeight() <= ws.forkHeight:
|
||||||
# We need to calculate the height of the fork on the sidechain
|
# We need to calculate the height of the fork on the sidechain
|
||||||
let sidechainSplitBlocktimestamp = (ws.getSidechainSplitHeight() - 1) * ws.getBlockTimeIncrements()
|
let sidechainSplitBlocktimestamp = (ws.getSidechainSplitHeight() - 1) * ws.getBlockTimeIncrements()
|
||||||
let remainingTime = ws.getWithdrawalsGenesisTimeDelta() - sidechainSplitBlocktimestamp
|
let remainingTime = ws.getWithdrawalsGenesisTimeDelta() - sidechainSplitBlocktimestamp
|
||||||
|
@ -64,7 +62,7 @@ proc getSidechainWdForkHeight(ws: ReorgSpec): int =
|
||||||
|
|
||||||
return ((remainingTime - 1) div ws.sidechainTimeIncrements) + ws.getSidechainSplitHeight()
|
return ((remainingTime - 1) div ws.sidechainTimeIncrements) + ws.getSidechainSplitHeight()
|
||||||
|
|
||||||
return ws.wdForkHeight
|
return ws.forkHeight
|
||||||
|
|
||||||
proc execute*(ws: ReorgSpec, env: TestEnv): bool =
|
proc execute*(ws: ReorgSpec, env: TestEnv): bool =
|
||||||
result = true
|
result = true
|
||||||
|
@ -94,7 +92,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
|
||||||
onPayloadProducerSelected: proc(): bool =
|
onPayloadProducerSelected: proc(): bool =
|
||||||
env.clMock.nextWithdrawals = none(seq[WithdrawalV1])
|
env.clMock.nextWithdrawals = none(seq[WithdrawalV1])
|
||||||
|
|
||||||
if env.clMock.currentPayloadNumber >= ws.wdForkHeight.uint64:
|
if env.clMock.currentPayloadNumber >= ws.forkHeight.uint64:
|
||||||
# Prepare some withdrawals
|
# Prepare some withdrawals
|
||||||
let wfb = ws.generateWithdrawalsForBlock(canonical.nextIndex, canonical.startAccount)
|
let wfb = ws.generateWithdrawalsForBlock(canonical.nextIndex, canonical.startAccount)
|
||||||
env.clMock.nextWithdrawals = some(w3Withdrawals wfb.wds)
|
env.clMock.nextWithdrawals = some(w3Withdrawals wfb.wds)
|
||||||
|
@ -103,7 +101,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
|
||||||
|
|
||||||
if env.clMock.currentPayloadNumber >= ws.getSidechainSplitHeight().uint64:
|
if env.clMock.currentPayloadNumber >= ws.getSidechainSplitHeight().uint64:
|
||||||
# We have split
|
# We have split
|
||||||
if env.clMock.currentPayloadNumber >= ws.getSidechainWdForkHeight().uint64:
|
if env.clMock.currentPayloadNumber >= ws.getSidechainforkHeight().uint64:
|
||||||
# And we are past the withdrawals fork on the sidechain
|
# And we are past the withdrawals fork on the sidechain
|
||||||
let wfb = ws.generateWithdrawalsForBlock(sidechain.nextIndex, sidechain.startAccount)
|
let wfb = ws.generateWithdrawalsForBlock(sidechain.nextIndex, sidechain.startAccount)
|
||||||
sidechain.wdHistory.put(env.clMock.currentPayloadNumber, wfb.wds)
|
sidechain.wdHistory.put(env.clMock.currentPayloadNumber, wfb.wds)
|
||||||
|
@ -156,7 +154,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
|
||||||
else:
|
else:
|
||||||
attr.timestamp = env.clMock.latestPayloadAttributes.timestamp
|
attr.timestamp = env.clMock.latestPayloadAttributes.timestamp
|
||||||
|
|
||||||
if env.clMock.currentPayloadNumber >= ws.getSidechainwdForkHeight().uint64:
|
if env.clMock.currentPayloadNumber >= ws.getSidechainforkHeight().uint64:
|
||||||
# Withdrawals
|
# Withdrawals
|
||||||
let rr = sidechain.wdHistory.get(env.clMock.currentPayloadNumber)
|
let rr = sidechain.wdHistory.get(env.clMock.currentPayloadNumber)
|
||||||
testCond rr.isOk:
|
testCond rr.isOk:
|
||||||
|
@ -207,12 +205,12 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
|
||||||
|
|
||||||
sidechain.height = env.clMock.latestExecutedPayload.blockNumber.uint64
|
sidechain.height = env.clMock.latestExecutedPayload.blockNumber.uint64
|
||||||
|
|
||||||
if ws.wdForkHeight < ws.getSidechainwdForkHeight():
|
if ws.forkHeight < ws.getSidechainforkHeight():
|
||||||
# This means the canonical chain forked before the sidechain.
|
# This means the canonical chain forked before the sidechain.
|
||||||
# Therefore we need to produce more sidechain payloads to reach
|
# Therefore we need to produce more sidechain payloads to reach
|
||||||
# at least`ws.WithdrawalsBlockCount` withdrawals payloads produced on
|
# at least`ws.WithdrawalsBlockCount` withdrawals payloads produced on
|
||||||
# the sidechain.
|
# the sidechain.
|
||||||
let height = ws.getSidechainwdForkHeight()-ws.wdForkHeight
|
let height = ws.getSidechainforkHeight()-ws.forkHeight
|
||||||
for i in 0..<height:
|
for i in 0..<height:
|
||||||
let
|
let
|
||||||
wfb = ws.generateWithdrawalsForBlock(sidechain.nextIndex, sidechain.startAccount)
|
wfb = ws.generateWithdrawalsForBlock(sidechain.nextIndex, sidechain.startAccount)
|
||||||
|
@ -296,7 +294,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
|
||||||
while payloadNumber.uint64 <= sidechain.height:
|
while payloadNumber.uint64 <= sidechain.height:
|
||||||
let payload = sidechain.sidechain[payloadNumber.uint64]
|
let payload = sidechain.sidechain[payloadNumber.uint64]
|
||||||
var version = Version.V1
|
var version = Version.V1
|
||||||
if payloadNumber >= ws.getSidechainwdForkHeight():
|
if payloadNumber >= ws.getSidechainforkHeight():
|
||||||
version = Version.V2
|
version = Version.V2
|
||||||
|
|
||||||
info "Sending sidechain",
|
info "Sending sidechain",
|
||||||
|
@ -322,7 +320,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
|
||||||
# We are using different accounts credited between the canonical chain
|
# We are using different accounts credited between the canonical chain
|
||||||
# and the fork.
|
# and the fork.
|
||||||
# We check on `latest`.
|
# We check on `latest`.
|
||||||
let r3 = ws.wdHistory.verifyWithdrawals(uint64(ws.wdForkHeight-1), none(UInt256), env.client)
|
let r3 = ws.wdHistory.verifyWithdrawals(uint64(ws.forkHeight-1), none(UInt256), env.client)
|
||||||
testCond r3.isOk
|
testCond r3.isOk
|
||||||
|
|
||||||
# Re-Org back to the canonical chain
|
# Re-Org back to the canonical chain
|
||||||
|
|
|
@ -14,7 +14,6 @@ type
|
||||||
SyncSpec* = ref object of WDBaseSpec
|
SyncSpec* = ref object of WDBaseSpec
|
||||||
syncSteps*: int # Sync block chunks that will be passed as head through FCUs to the syncing client
|
syncSteps*: int # Sync block chunks that will be passed as head through FCUs to the syncing client
|
||||||
syncShouldFail*: bool
|
syncShouldFail*: bool
|
||||||
timeoutSeconds*: int
|
|
||||||
sleep*: int
|
sleep*: int
|
||||||
|
|
||||||
proc doSync(ws: SyncSpec, client: RpcClient, clMock: CLMocker): Future[bool] {.async.} =
|
proc doSync(ws: SyncSpec, client: RpcClient, clMock: CLMocker): Future[bool] {.async.} =
|
||||||
|
|
|
@ -85,7 +85,7 @@ const
|
||||||
BLOB_GASPRICE_UPDATE_FRACTION* = 3338477'u64
|
BLOB_GASPRICE_UPDATE_FRACTION* = 3338477'u64
|
||||||
MAX_BLOB_GAS_PER_BLOCK* = 786432
|
MAX_BLOB_GAS_PER_BLOCK* = 786432
|
||||||
MAX_ALLOWED_BLOB* = MAX_BLOB_GAS_PER_BLOCK div GAS_PER_BLOB
|
MAX_ALLOWED_BLOB* = MAX_BLOB_GAS_PER_BLOCK div GAS_PER_BLOB
|
||||||
|
|
||||||
# EIP-4788 addresses
|
# EIP-4788 addresses
|
||||||
# BEACON_ROOTS_ADDRESS is the address where historical beacon roots are stored as per EIP-4788
|
# BEACON_ROOTS_ADDRESS is the address where historical beacon roots are stored as per EIP-4788
|
||||||
BEACON_ROOTS_ADDRESS* = hexToByteArray[20]("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02")
|
BEACON_ROOTS_ADDRESS* = hexToByteArray[20]("0x000F3df6D732807Ef1319fB7B8bB8522d0Beac02")
|
||||||
|
|
Loading…
Reference in New Issue