More cancun tests (#1843)
* Engine API simulator: More Cancun tests * Fix Cancun validation in Engine API and TxPool
This commit is contained in:
parent
5048c87679
commit
77289c7795
|
@ -26,6 +26,12 @@ func getBlobList*(startId: BlobID, count: int): BlobIDs =
|
||||||
for i in 0..<count:
|
for i in 0..<count:
|
||||||
result[i] = startId + BlobID(i)
|
result[i] = startId + BlobID(i)
|
||||||
|
|
||||||
|
func getBlobList*(startId: BlobID, count: int, addition: BlobID): BlobIDs =
|
||||||
|
result = newSeq[BlobID](count+1)
|
||||||
|
for i in 0..<count:
|
||||||
|
result[i] = startId + BlobID(i)
|
||||||
|
result[^1] = addition
|
||||||
|
|
||||||
func getBlobListByIndex*(startIndex: BlobID, endIndex: BlobID): BlobIDs =
|
func getBlobListByIndex*(startIndex: BlobID, endIndex: BlobID): BlobIDs =
|
||||||
var count = uint64(0)
|
var count = uint64(0)
|
||||||
if endIndex > startIndex:
|
if endIndex > startIndex:
|
||||||
|
|
|
@ -3,6 +3,7 @@ import
|
||||||
nimcrypto/sysrand,
|
nimcrypto/sysrand,
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
./blobs,
|
./blobs,
|
||||||
|
../types,
|
||||||
../tx_sender,
|
../tx_sender,
|
||||||
../../../../nimbus/constants,
|
../../../../nimbus/constants,
|
||||||
../../../../nimbus/utils/utils,
|
../../../../nimbus/utils/utils,
|
||||||
|
@ -19,7 +20,7 @@ method setEngineAPIVersionResolver*(cust: EngineAPIVersionResolver, v: CommonRef
|
||||||
cust.com = v
|
cust.com = v
|
||||||
|
|
||||||
method forkchoiceUpdatedVersion*(cust: EngineAPIVersionResolver,
|
method forkchoiceUpdatedVersion*(cust: EngineAPIVersionResolver,
|
||||||
headTimestamp: uint64, payloadAttributesTimestamp: Option[uint64]): Version {.base.} =
|
headTimestamp: uint64, payloadAttributesTimestamp: Option[uint64] = none(uint64)): Version {.base.} =
|
||||||
let ts = if payloadAttributesTimestamp.isNone: headTimestamp.EthTime
|
let ts = if payloadAttributesTimestamp.isNone: headTimestamp.EthTime
|
||||||
else: payloadAttributesTimestamp.get().EthTime
|
else: payloadAttributesTimestamp.get().EthTime
|
||||||
if cust.com.isCancunOrLater(ts):
|
if cust.com.isCancunOrLater(ts):
|
||||||
|
@ -59,8 +60,8 @@ method getExpectedError*(cust: GetPayloadCustomizer): int {.base.} =
|
||||||
|
|
||||||
type
|
type
|
||||||
BaseGetPayloadCustomizer* = ref object of GetPayloadCustomizer
|
BaseGetPayloadCustomizer* = ref object of GetPayloadCustomizer
|
||||||
customPayloadID: Option[PayloadID]
|
customPayloadID*: Option[PayloadID]
|
||||||
expectedError : int
|
expectedError* : int
|
||||||
|
|
||||||
method getPayloadID(cust: BaseGetPayloadCustomizer,
|
method getPayloadID(cust: BaseGetPayloadCustomizer,
|
||||||
basePayloadID: PayloadID): PayloadID =
|
basePayloadID: PayloadID): PayloadID =
|
||||||
|
@ -72,7 +73,7 @@ method getExpectedError(cust: BaseGetPayloadCustomizer): int =
|
||||||
cust.expectedError
|
cust.expectedError
|
||||||
|
|
||||||
type
|
type
|
||||||
UpgradegetPayloadVersion* = ref object of GetPayloadCustomizer
|
UpgradegetPayloadVersion* = ref object of BaseGetPayloadCustomizer
|
||||||
|
|
||||||
method getPayloadVersion(cust: UpgradegetPayloadVersion, timestamp: uint64): Version =
|
method getPayloadVersion(cust: UpgradegetPayloadVersion, timestamp: uint64): Version =
|
||||||
let version = procCall getPayloadVersion(cust.GetPayloadCustomizer, timestamp)
|
let version = procCall getPayloadVersion(cust.GetPayloadCustomizer, timestamp)
|
||||||
|
@ -80,7 +81,7 @@ method getPayloadVersion(cust: UpgradegetPayloadVersion, timestamp: uint64): Ver
|
||||||
version.succ
|
version.succ
|
||||||
|
|
||||||
type
|
type
|
||||||
DowngradegetPayloadVersion* = ref object of GetPayloadCustomizer
|
DowngradegetPayloadVersion* = ref object of BaseGetPayloadCustomizer
|
||||||
|
|
||||||
method getPayloadVersion(cust: DowngradegetPayloadVersion, timestamp: uint64): Version =
|
method getPayloadVersion(cust: DowngradegetPayloadVersion, timestamp: uint64): Version =
|
||||||
let version = procCall getPayloadVersion(cust.GetPayloadCustomizer, timestamp)
|
let version = procCall getPayloadVersion(cust.GetPayloadCustomizer, timestamp)
|
||||||
|
@ -88,20 +89,20 @@ method getPayloadVersion(cust: DowngradegetPayloadVersion, timestamp: uint64): V
|
||||||
version.pred
|
version.pred
|
||||||
|
|
||||||
type
|
type
|
||||||
PayloadAttributesCustomizer* = ref object of GetPayloadCustomizer
|
PayloadAttributesCustomizer* = ref object of BaseGetPayloadCustomizer
|
||||||
|
|
||||||
method getPayloadAttributes*(cust: PayloadAttributesCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes {.base.} =
|
method getPayloadAttributes*(cust: PayloadAttributesCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes {.base.} =
|
||||||
doAssert(false, "getPayloadAttributes unimplemented")
|
doAssert(false, "getPayloadAttributes unimplemented")
|
||||||
|
|
||||||
type
|
type
|
||||||
BasePayloadAttributesCustomizer* = ref object of PayloadAttributesCustomizer
|
BasePayloadAttributesCustomizer* = ref object of PayloadAttributesCustomizer
|
||||||
timestamp : Option[uint64]
|
timestamp* : Option[uint64]
|
||||||
prevRandao : Option[common.Hash256]
|
prevRandao* : Option[common.Hash256]
|
||||||
suggestedFeeRecipient : Option[common.EthAddress]
|
suggestedFeeRecipient* : Option[common.EthAddress]
|
||||||
withdrawals : Option[seq[Withdrawal]]
|
withdrawals* : Option[seq[Withdrawal]]
|
||||||
removeWithdrawals : bool
|
removeWithdrawals* : bool
|
||||||
beaconRoot : Option[common.Hash256]
|
beaconRoot* : Option[common.Hash256]
|
||||||
removeBeaconRoot : bool
|
removeBeaconRoot* : bool
|
||||||
|
|
||||||
method getPayloadAttributes(cust: BasePayloadAttributesCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes =
|
method getPayloadAttributes(cust: BasePayloadAttributesCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes =
|
||||||
var customPayloadAttributes = PayloadAttributes(
|
var customPayloadAttributes = PayloadAttributes(
|
||||||
|
@ -133,15 +134,6 @@ method getPayloadAttributes(cust: BasePayloadAttributesCustomizer, basePayloadAt
|
||||||
|
|
||||||
return customPayloadAttributes
|
return customPayloadAttributes
|
||||||
|
|
||||||
type
|
|
||||||
TimestampDeltaPayloadAttributesCustomizer* = ref object of BasePayloadAttributesCustomizer
|
|
||||||
timestampDelta: uint64
|
|
||||||
|
|
||||||
method getPayloadAttributes(cust: TimestampDeltaPayloadAttributesCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes =
|
|
||||||
var customPayloadAttributes = procCall getPayloadAttributes(cust.BasePayloadAttributesCustomizer, basePayloadAttributes)
|
|
||||||
customPayloadAttributes.timestamp = w3Qty(customPayloadAttributes.timestamp, cust.timestampDelta)
|
|
||||||
return customPayloadAttributes
|
|
||||||
|
|
||||||
type
|
type
|
||||||
ForkchoiceUpdatedCustomizer* = ref object of BasePayloadAttributesCustomizer
|
ForkchoiceUpdatedCustomizer* = ref object of BasePayloadAttributesCustomizer
|
||||||
|
|
||||||
|
@ -156,8 +148,7 @@ method getExpectInvalidStatus*(cust: ForkchoiceUpdatedCustomizer): bool {.base.}
|
||||||
# Used as base to other customizers.
|
# Used as base to other customizers.
|
||||||
type
|
type
|
||||||
BaseForkchoiceUpdatedCustomizer* = ref object of ForkchoiceUpdatedCustomizer
|
BaseForkchoiceUpdatedCustomizer* = ref object of ForkchoiceUpdatedCustomizer
|
||||||
expectedError : int
|
expectInvalidStatus*: bool
|
||||||
expectInvalidStatus: bool
|
|
||||||
|
|
||||||
method getPayloadAttributes(cust: BaseForkchoiceUpdatedCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes =
|
method getPayloadAttributes(cust: BaseForkchoiceUpdatedCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes =
|
||||||
var customPayloadAttributes = procCall getPayloadAttributes(cust.BasePayloadAttributesCustomizer, basePayloadAttributes)
|
var customPayloadAttributes = procCall getPayloadAttributes(cust.BasePayloadAttributesCustomizer, basePayloadAttributes)
|
||||||
|
@ -166,9 +157,6 @@ method getPayloadAttributes(cust: BaseForkchoiceUpdatedCustomizer, basePayloadAt
|
||||||
method getForkchoiceState(cust: BaseForkchoiceUpdatedCustomizer, baseForkchoiceUpdate: ForkchoiceStateV1): ForkchoiceStateV1 =
|
method getForkchoiceState(cust: BaseForkchoiceUpdatedCustomizer, baseForkchoiceUpdate: ForkchoiceStateV1): ForkchoiceStateV1 =
|
||||||
return baseForkchoiceUpdate
|
return baseForkchoiceUpdate
|
||||||
|
|
||||||
method getExpectedError(cust: BaseForkchoiceUpdatedCustomizer): int =
|
|
||||||
return cust.expectedError
|
|
||||||
|
|
||||||
method getExpectInvalidStatus(cust: BaseForkchoiceUpdatedCustomizer): bool =
|
method getExpectInvalidStatus(cust: BaseForkchoiceUpdatedCustomizer): bool =
|
||||||
return cust.expectInvalidStatus
|
return cust.expectInvalidStatus
|
||||||
|
|
||||||
|
@ -176,7 +164,8 @@ method getExpectInvalidStatus(cust: BaseForkchoiceUpdatedCustomizer): bool =
|
||||||
type
|
type
|
||||||
UpgradeforkchoiceUpdatedVersion* = ref object of BaseForkchoiceUpdatedCustomizer
|
UpgradeforkchoiceUpdatedVersion* = ref object of BaseForkchoiceUpdatedCustomizer
|
||||||
|
|
||||||
method forkchoiceUpdatedVersion(cust: UpgradeforkchoiceUpdatedVersion, headTimestamp: uint64, payloadAttributesTimestamp: Option[uint64]): Version =
|
method forkchoiceUpdatedVersion(cust: UpgradeforkchoiceUpdatedVersion, headTimestamp:
|
||||||
|
uint64, payloadAttributesTimestamp: Option[uint64] = none(uint64)): Version =
|
||||||
let version = procCall forkchoiceUpdatedVersion(EngineAPIVersionResolver(cust), headTimestamp, payloadAttributesTimestamp)
|
let version = procCall forkchoiceUpdatedVersion(EngineAPIVersionResolver(cust), headTimestamp, payloadAttributesTimestamp)
|
||||||
doAssert(version != Version.high, "cannot upgrade version " & $Version.high)
|
doAssert(version != Version.high, "cannot upgrade version " & $Version.high)
|
||||||
version.succ
|
version.succ
|
||||||
|
@ -185,99 +174,110 @@ method forkchoiceUpdatedVersion(cust: UpgradeforkchoiceUpdatedVersion, headTimes
|
||||||
type
|
type
|
||||||
DowngradeforkchoiceUpdatedVersion* = ref object of BaseForkchoiceUpdatedCustomizer
|
DowngradeforkchoiceUpdatedVersion* = ref object of BaseForkchoiceUpdatedCustomizer
|
||||||
|
|
||||||
method forkchoiceUpdatedVersion(cust: DowngradeforkchoiceUpdatedVersion, headTimestamp: uint64, payloadAttributesTimestamp: Option[uint64]): Version =
|
method forkchoiceUpdatedVersion(cust: DowngradeforkchoiceUpdatedVersion, headTimestamp: uint64,
|
||||||
|
payloadAttributesTimestamp: Option[uint64] = none(uint64)): Version =
|
||||||
let version = procCall forkchoiceUpdatedVersion(EngineAPIVersionResolver(cust), headTimestamp, payloadAttributesTimestamp)
|
let version = procCall forkchoiceUpdatedVersion(EngineAPIVersionResolver(cust), headTimestamp, payloadAttributesTimestamp)
|
||||||
doAssert(version != Version.V1, "cannot downgrade version 1")
|
doAssert(version != Version.V1, "cannot downgrade version 1")
|
||||||
version.pred
|
version.pred
|
||||||
|
|
||||||
type
|
type
|
||||||
VersionedHashRef* = ref object of RootRef
|
TimestampDeltaPayloadAttributesCustomizer* = ref object of BaseForkchoiceUpdatedCustomizer
|
||||||
blobs*: seq[BlobID]
|
timestampDelta*: int
|
||||||
hashVersions*: seq[byte]
|
|
||||||
|
|
||||||
proc getVersionedHashes*(v: VersionedHashRef): seq[common.Hash256] =
|
method getPayloadAttributes(cust: TimestampDeltaPayloadAttributesCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes =
|
||||||
if v.blobs.len == 0:
|
var customPayloadAttributes = procCall getPayloadAttributes(cust.BasePayloadAttributesCustomizer, basePayloadAttributes)
|
||||||
return @[]
|
customPayloadAttributes.timestamp = w3Qty(customPayloadAttributes.timestamp, cust.timestampDelta)
|
||||||
|
return customPayloadAttributes
|
||||||
result = newSeq[common.Hash256](v.blobs.len)
|
|
||||||
|
|
||||||
var version: byte
|
|
||||||
for i, blobID in v.blobs:
|
|
||||||
if v.hashVersions.len > i:
|
|
||||||
version = v.hashVersions[i]
|
|
||||||
result[i] = blobID.getVersionedHash(version)
|
|
||||||
|
|
||||||
proc description*(v: VersionedHashRef): string =
|
|
||||||
result = "VersionedHashes: "
|
|
||||||
for x in v.blobs:
|
|
||||||
result.add x.toHex
|
|
||||||
|
|
||||||
if v.hashVersions.len > 0:
|
|
||||||
result.add " with versions "
|
|
||||||
result.add v.hashVersions.toHex
|
|
||||||
|
|
||||||
type
|
type
|
||||||
VersionedHashesCustomizer* = ref object of RootRef
|
VersionedHashesCustomizer* = ref object of RootRef
|
||||||
|
blobs*: Option[seq[BlobID]]
|
||||||
|
hashVersions*: seq[byte]
|
||||||
|
|
||||||
|
method getVersionedHashes*(cust: VersionedHashesCustomizer,
|
||||||
|
baseVersionedHashes: openArray[common.Hash256]): Option[seq[common.Hash256]] {.base.} =
|
||||||
|
if cust.blobs.isNone:
|
||||||
|
return none(seq[common.Hash256])
|
||||||
|
|
||||||
|
let blobs = cust.blobs.get
|
||||||
|
var v = newSeq[common.Hash256](blobs.len)
|
||||||
|
|
||||||
|
var version: byte
|
||||||
|
for i, blobID in blobs:
|
||||||
|
if cust.hashVersions.len > i:
|
||||||
|
version = cust.hashVersions[i]
|
||||||
|
v[i] = blobID.getVersionedHash(version)
|
||||||
|
some(v)
|
||||||
|
|
||||||
|
method description*(cust: VersionedHashesCustomizer): string {.base.} =
|
||||||
|
result = "VersionedHashes: "
|
||||||
|
if cust.blobs.isSome:
|
||||||
|
for x in cust.blobs.get:
|
||||||
|
result.add x.toHex
|
||||||
|
|
||||||
|
if cust.hashVersions.len > 0:
|
||||||
|
result.add " with versions "
|
||||||
|
result.add cust.hashVersions.toHex
|
||||||
|
|
||||||
|
type
|
||||||
IncreaseVersionVersionedHashes* = ref object of VersionedHashesCustomizer
|
IncreaseVersionVersionedHashes* = ref object of VersionedHashesCustomizer
|
||||||
|
|
||||||
method getVersionedHashes*(cust: VersionedHashesCustomizer, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] {.base.} =
|
method getVersionedHashes(cust: IncreaseVersionVersionedHashes,
|
||||||
doAssert(false, "getVersionedHashes unimplemented")
|
baseVersionedHashes: openArray[common.Hash256]): Option[seq[common.Hash256]] =
|
||||||
|
|
||||||
method getVersionedHashes(cust: IncreaseVersionVersionedHashes, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] =
|
|
||||||
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
|
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
|
||||||
|
|
||||||
result = newSeq[common.Hash256](baseVersionedHashes.len)
|
var v = newSeq[common.Hash256](baseVersionedHashes.len)
|
||||||
for i, h in baseVersionedHashes:
|
for i, h in baseVersionedHashes:
|
||||||
result[i] = h
|
v[i] = h
|
||||||
result[i].data[0] = result[i].data[0] + 1
|
v[i].data[0] = v[i].data[0] + 1
|
||||||
|
some(v)
|
||||||
|
|
||||||
type
|
type
|
||||||
CorruptVersionedHashes* = ref object of VersionedHashesCustomizer
|
CorruptVersionedHashes* = ref object of VersionedHashesCustomizer
|
||||||
|
|
||||||
method getVersionedHashes(cust: CorruptVersionedHashes, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] =
|
method getVersionedHashes(cust: CorruptVersionedHashes,
|
||||||
|
baseVersionedHashes: openArray[common.Hash256]): Option[seq[common.Hash256]] =
|
||||||
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
|
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
|
||||||
|
|
||||||
result = newSeq[common.Hash256](baseVersionedHashes.len)
|
var v = newSeq[common.Hash256](baseVersionedHashes.len)
|
||||||
for i, h in baseVersionedHashes:
|
for i, h in baseVersionedHashes:
|
||||||
result[i] = h
|
v[i] = h
|
||||||
result[i].data[h.data.len-1] = result[i].data[h.data.len-1] + 1
|
v[i].data[h.data.len-1] = v[i].data[h.data.len-1] + 1
|
||||||
|
some(v)
|
||||||
|
|
||||||
type
|
type
|
||||||
RemoveVersionedHash* = ref object of VersionedHashesCustomizer
|
RemoveVersionedHash* = ref object of VersionedHashesCustomizer
|
||||||
|
|
||||||
method getVersionedHashes(cust: RemoveVersionedHash, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] =
|
method getVersionedHashes(cust: RemoveVersionedHash,
|
||||||
|
baseVersionedHashes: openArray[common.Hash256]): Option[seq[common.Hash256]] =
|
||||||
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
|
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
|
||||||
|
|
||||||
result = newSeq[common.Hash256](baseVersionedHashes.len - 1)
|
var v = newSeq[common.Hash256](baseVersionedHashes.len - 1)
|
||||||
for i, h in baseVersionedHashes:
|
for i, h in baseVersionedHashes:
|
||||||
if i < baseVersionedHashes.len-1:
|
if i < baseVersionedHashes.len-1:
|
||||||
result[i] = h
|
v[i] = h
|
||||||
result[i].data[h.data.len-1] = result[i].data[h.data.len-1] + 1
|
v[i].data[h.data.len-1] = v[i].data[h.data.len-1] + 1
|
||||||
|
some(v)
|
||||||
|
|
||||||
type
|
type
|
||||||
ExtraVersionedHash* = ref object of VersionedHashesCustomizer
|
ExtraVersionedHash* = ref object of VersionedHashesCustomizer
|
||||||
|
|
||||||
method getVersionedHashes(cust: ExtraVersionedHash, baseVersionedHashes: openArray[common.Hash256]): seq[common.Hash256] =
|
method getVersionedHashes(cust: ExtraVersionedHash,
|
||||||
result = newSeq[common.Hash256](baseVersionedHashes.len + 1)
|
baseVersionedHashes: openArray[common.Hash256]): Option[seq[common.Hash256]] =
|
||||||
|
var v = newSeq[common.Hash256](baseVersionedHashes.len + 1)
|
||||||
for i, h in baseVersionedHashes:
|
for i, h in baseVersionedHashes:
|
||||||
result[i] = h
|
v[i] = h
|
||||||
|
|
||||||
var extraHash: common.Hash256
|
var extraHash: common.Hash256
|
||||||
doAssert randomBytes(extraHash.data) == 32
|
doAssert randomBytes(extraHash.data) == 32
|
||||||
extraHash.data[0] = VERSIONED_HASH_VERSION_KZG
|
extraHash.data[0] = VERSIONED_HASH_VERSION_KZG
|
||||||
result[^1] = extraHash
|
v[^1] = extraHash
|
||||||
|
some(v)
|
||||||
|
|
||||||
type
|
type
|
||||||
PayloadCustomizer* = ref object of EngineAPIVersionResolver
|
PayloadCustomizer* = ref object of EngineAPIVersionResolver
|
||||||
|
|
||||||
ExecutableData* = object
|
method customizePayload*(cust: PayloadCustomizer, data: ExecutableData): ExecutableData {.base.} =
|
||||||
basePayload*: ExecutionPayload
|
|
||||||
beaconRoot* : Option[common.Hash256]
|
|
||||||
attr* : PayloadAttributes
|
|
||||||
versionedHashes*: seq[common.Hash256]
|
|
||||||
|
|
||||||
method customizePayload(cust: PayloadCustomizer, data: ExecutableData): ExecutableData {.base.} =
|
|
||||||
doAssert(false, "customizePayload unimplemented")
|
doAssert(false, "customizePayload unimplemented")
|
||||||
|
|
||||||
method getTimestamp(cust: PayloadCustomizer, basePayload: ExecutionPayload): uint64 {.base.} =
|
method getTimestamp(cust: PayloadCustomizer, basePayload: ExecutionPayload): uint64 {.base.} =
|
||||||
|
@ -285,15 +285,17 @@ method getTimestamp(cust: PayloadCustomizer, basePayload: ExecutionPayload): uin
|
||||||
|
|
||||||
type
|
type
|
||||||
NewPayloadCustomizer* = ref object of PayloadCustomizer
|
NewPayloadCustomizer* = ref object of PayloadCustomizer
|
||||||
|
expectedError* : int
|
||||||
|
expectInvalidStatus*: bool
|
||||||
|
|
||||||
method getExpectedError(cust: NewPayloadCustomizer): int {.base.} =
|
method getExpectedError*(cust: NewPayloadCustomizer): int {.base.} =
|
||||||
doAssert(false, "getExpectedError unimplemented")
|
cust.expectedError
|
||||||
|
|
||||||
method getExpectInvalidStatus(cust: NewPayloadCustomizer): bool {.base.} =
|
method getExpectInvalidStatus*(cust: NewPayloadCustomizer): bool {.base.}=
|
||||||
doAssert(false, "getExpectInvalidStatus unimplemented")
|
cust.expectInvalidStatus
|
||||||
|
|
||||||
type
|
type
|
||||||
CustomPayloadData = object
|
CustomPayloadData* = object
|
||||||
parentHash* : Option[common.Hash256]
|
parentHash* : Option[common.Hash256]
|
||||||
feeRecipient* : Option[common.EthAddress]
|
feeRecipient* : Option[common.EthAddress]
|
||||||
stateRoot* : Option[common.Hash256]
|
stateRoot* : Option[common.Hash256]
|
||||||
|
@ -408,29 +410,21 @@ proc customizePayload*(cust: CustomPayloadData, data: ExecutableData): Executabl
|
||||||
)
|
)
|
||||||
|
|
||||||
if cust.versionedHashesCustomizer.isNil.not:
|
if cust.versionedHashesCustomizer.isNil.not:
|
||||||
result.versionedHashes = cust.versionedHashesCustomizer.getVersionedHashes(data.versionedHashes)
|
doAssert(data.versionedHashes.isSome)
|
||||||
|
result.versionedHashes = cust.versionedHashesCustomizer.getVersionedHashes(data.versionedHashes.get)
|
||||||
|
|
||||||
# Base new payload directive call cust.
|
# Base new payload directive call cust.
|
||||||
# Used as base to other customizers.
|
# Used as base to other customizers.
|
||||||
type
|
type
|
||||||
BaseNewPayloadVersionCustomizer* = ref object of NewPayloadCustomizer
|
BaseNewPayloadVersionCustomizer* = ref object of NewPayloadCustomizer
|
||||||
payloadCustomizer* : CustomPayloadData
|
payloadCustomizer* : CustomPayloadData
|
||||||
expectedError* : int
|
|
||||||
expectInvalidStatus*: bool
|
|
||||||
|
|
||||||
method customizePayload(cust: BaseNewPayloadVersionCustomizer, data: ExecutableData): ExecutableData =
|
method customizePayload(cust: BaseNewPayloadVersionCustomizer, data: ExecutableData): ExecutableData =
|
||||||
cust.payloadCustomizer.customizePayload(data)
|
cust.payloadCustomizer.customizePayload(data)
|
||||||
|
|
||||||
method getExpectedError(cust: BaseNewPayloadVersionCustomizer): int =
|
|
||||||
cust.expectedError
|
|
||||||
|
|
||||||
method getExpectInvalidStatus(cust: BaseNewPayloadVersionCustomizer): bool =
|
|
||||||
cust.expectInvalidStatus
|
|
||||||
|
|
||||||
# Customizer that upgrades the version of the payload to the next version.
|
# Customizer that upgrades the version of the payload to the next version.
|
||||||
type
|
type
|
||||||
UpgradeNewPayloadVersion* = ref object of NewPayloadCustomizer
|
UpgradeNewPayloadVersion* = ref object of BaseNewPayloadVersionCustomizer
|
||||||
|
|
||||||
method newPayloadVersion(cust: UpgradeNewPayloadVersion, timestamp: uint64): Version =
|
method newPayloadVersion(cust: UpgradeNewPayloadVersion, timestamp: uint64): Version =
|
||||||
let version = procCall newPayloadVersion(EngineAPIVersionResolver(cust), timestamp)
|
let version = procCall newPayloadVersion(EngineAPIVersionResolver(cust), timestamp)
|
||||||
|
@ -439,7 +433,7 @@ method newPayloadVersion(cust: UpgradeNewPayloadVersion, timestamp: uint64): Ver
|
||||||
|
|
||||||
# Customizer that downgrades the version of the payload to the previous version.
|
# Customizer that downgrades the version of the payload to the previous version.
|
||||||
type
|
type
|
||||||
DowngradeNewPayloadVersion* = ref object of NewPayloadCustomizer
|
DowngradeNewPayloadVersion* = ref object of BaseNewPayloadVersionCustomizer
|
||||||
|
|
||||||
method newPayloadVersion(cust: DowngradeNewPayloadVersion, timestamp: uint64): Version =
|
method newPayloadVersion(cust: DowngradeNewPayloadVersion, timestamp: uint64): Version =
|
||||||
let version = procCall newPayloadVersion(EngineAPIVersionResolver(cust), timestamp)
|
let version = procCall newPayloadVersion(EngineAPIVersionResolver(cust), timestamp)
|
||||||
|
@ -609,22 +603,22 @@ proc generateInvalidPayload*(sender: TxSender, data: ExecutableData, payloadFiel
|
||||||
excessBlobGas: some(modExcessBlobGas),
|
excessBlobGas: some(modExcessBlobGas),
|
||||||
)
|
)
|
||||||
of InvalidVersionedHashesVersion:
|
of InvalidVersionedHashesVersion:
|
||||||
doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification")
|
doAssert(data.versionedHashes.isNone, "no versioned hashes available for modification")
|
||||||
customPayloadMod = CustomPayloadData(
|
customPayloadMod = CustomPayloadData(
|
||||||
versionedHashesCustomizer: IncreaseVersionVersionedHashes(),
|
versionedHashesCustomizer: IncreaseVersionVersionedHashes(),
|
||||||
)
|
)
|
||||||
of InvalidVersionedHashes:
|
of InvalidVersionedHashes:
|
||||||
doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification")
|
doAssert(data.versionedHashes.isNone, "no versioned hashes available for modification")
|
||||||
customPayloadMod = CustomPayloadData(
|
customPayloadMod = CustomPayloadData(
|
||||||
versionedHashesCustomizer: CorruptVersionedHashes(),
|
versionedHashesCustomizer: CorruptVersionedHashes(),
|
||||||
)
|
)
|
||||||
of IncompleteVersionedHashes:
|
of IncompleteVersionedHashes:
|
||||||
doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification")
|
doAssert(data.versionedHashes.isNone, "no versioned hashes available for modification")
|
||||||
customPayloadMod = CustomPayloadData(
|
customPayloadMod = CustomPayloadData(
|
||||||
versionedHashesCustomizer: RemoveVersionedHash(),
|
versionedHashesCustomizer: RemoveVersionedHash(),
|
||||||
)
|
)
|
||||||
of ExtraVersionedHashes:
|
of ExtraVersionedHashes:
|
||||||
doAssert(data.versionedHashes.len > 0, "no versioned hashes available for modification")
|
doAssert(data.versionedHashes.isNone, "no versioned hashes available for modification")
|
||||||
customPayloadMod = CustomPayloadData(
|
customPayloadMod = CustomPayloadData(
|
||||||
versionedHashesCustomizer: ExtraVersionedHash(),
|
versionedHashesCustomizer: ExtraVersionedHash(),
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,134 +1,114 @@
|
||||||
import
|
import
|
||||||
./step
|
std/strutils,
|
||||||
|
eth/common,
|
||||||
|
chronicles,
|
||||||
|
./step_desc,
|
||||||
|
./helpers,
|
||||||
|
../types,
|
||||||
|
../test_env,
|
||||||
|
../../../../nimbus/utils/utils,
|
||||||
|
../../../../nimbus/sync/protocol
|
||||||
|
|
||||||
# A step that requests a Transaction hash via P2P and expects the correct full blob tx
|
# A step that requests a Transaction hash via P2P and expects the correct full blob tx
|
||||||
type DevP2PRequestPooledTransactionHash struct {
|
type
|
||||||
|
DevP2PRequestPooledTransactionHash* = ref object of TestStep
|
||||||
# Client index to request the transaction hash from
|
# Client index to request the transaction hash from
|
||||||
ClientIndex uint64
|
clientIndex*: int
|
||||||
# Transaction Index to request
|
# Transaction Index to request
|
||||||
TransactionIndexes []uint64
|
transactionIndexes*: seq[int]
|
||||||
# Wait for a new pooled transaction message before actually requesting the transaction
|
# Wait for a new pooled transaction message before actually requesting the transaction
|
||||||
WaitForNewPooledTransaction bool
|
waitForNewPooledTransaction*: bool
|
||||||
}
|
|
||||||
|
|
||||||
func (step DevP2PRequestPooledTransactionHash) Execute(t *CancunTestContext) error {
|
method execute*(step: DevP2PRequestPooledTransactionHash, ctx: CancunTestContext): bool =
|
||||||
# Get client index's enode
|
# Get client index's enode
|
||||||
if step.ClientIndex >= uint64(len(t.TestEngines)) {
|
let env = ctx.env
|
||||||
return error "invalid client index %d", step.ClientIndex)
|
doAssert(step.clientIndex < env.numEngines, "invalid client index" & $step.clientIndex)
|
||||||
}
|
let engine = env.engines(step.clientIndex)
|
||||||
engine = t.Engines[step.ClientIndex]
|
let sec = env.addEngine(false, false)
|
||||||
conn, err = devp2p.PeerEngineClient(engine, env.clMock)
|
|
||||||
if err != nil {
|
|
||||||
return error "error peering engine client: %v", err)
|
|
||||||
}
|
|
||||||
defer conn.Close()
|
|
||||||
info "Connected to client %d, remote public key: %s", step.ClientIndex, conn.RemoteKey())
|
|
||||||
|
|
||||||
var (
|
engine.connect(sec.node)
|
||||||
txHashes = make([]Hash256, len(step.TransactionIndexes))
|
|
||||||
txs = make([]typ.Transaction, len(step.TransactionIndexes))
|
|
||||||
ok bool
|
|
||||||
)
|
|
||||||
for i, txIndex = range step.TransactionIndexes {
|
|
||||||
txHashes[i], ok = t.TestBlobTxPool.HashesByIndex[txIndex]
|
|
||||||
if !ok {
|
|
||||||
return error "transaction index %d not found", step.TransactionIndexes[0])
|
|
||||||
}
|
|
||||||
txs[i], ok = t.TestBlobTxPool.transactions[txHashes[i]]
|
|
||||||
if !ok {
|
|
||||||
return error "transaction %s not found", txHashes[i].String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Timeout value for all requests
|
var
|
||||||
timeout = 20 * time.Second
|
txHashes = newSeq[common.Hash256](step.transactionIndexes.len)
|
||||||
|
txs = newSeq[Transaction](step.transactionIndexes.len)
|
||||||
|
|
||||||
|
for i, txIndex in step.transactionIndexes:
|
||||||
|
if not ctx.txPool.hashesByIndex.hasKey(txIndex):
|
||||||
|
error "transaction not found", index=step.transactionIndexes[i]
|
||||||
|
return false
|
||||||
|
|
||||||
|
txHashes[i] = ctx.txPool.hashesByIndex[txIndex]
|
||||||
|
|
||||||
|
if not ctx.txPool.transactions.hasKey(txHashes[i]):
|
||||||
|
error "transaction not found", hash=txHashes[i].short
|
||||||
|
return false
|
||||||
|
|
||||||
|
txs[i] = ctx.txPool.transactions[txHashes[i]]
|
||||||
|
|
||||||
# Wait for a new pooled transaction message
|
# Wait for a new pooled transaction message
|
||||||
if step.WaitForNewPooledTransaction {
|
if step.waitForNewPooledTransaction:
|
||||||
msg, err = conn.WaitForResponse(timeout, 0)
|
let period = chronos.seconds(1)
|
||||||
if err != nil {
|
var loop = 0
|
||||||
return errors.Wrap(err, "error waiting for response")
|
|
||||||
}
|
|
||||||
switch msg = msg.(type) {
|
|
||||||
case *devp2p.NewPooledTransactionHashes:
|
|
||||||
if len(msg.Hashes) != len(txHashes) {
|
|
||||||
return error "expected %d hashes, got %d", len(txHashes), len(msg.Hashes))
|
|
||||||
}
|
|
||||||
if len(msg.Types) != len(txHashes) {
|
|
||||||
return error "expected %d types, got %d", len(txHashes), len(msg.Types))
|
|
||||||
}
|
|
||||||
if len(msg.Sizes) != len(txHashes) {
|
|
||||||
return error "expected %d sizes, got %d", len(txHashes), len(msg.Sizes))
|
|
||||||
}
|
|
||||||
for i = 0; i < len(txHashes); i++ {
|
|
||||||
hash, typ, size = msg.Hashes[i], msg.Types[i], msg.Sizes[i]
|
|
||||||
# Get the transaction
|
|
||||||
tx, ok = t.TestBlobTxPool.transactions[hash]
|
|
||||||
if !ok {
|
|
||||||
return error "transaction %s not found", hash.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
if typ != tx.Type() {
|
while loop < 20:
|
||||||
return error "expected type %d, got %d", tx.Type(), typ)
|
if sec.numTxsInPool >= txs.len:
|
||||||
}
|
break
|
||||||
|
waitFor sleepAsync(period)
|
||||||
|
inc loop
|
||||||
|
|
||||||
b, err = tx.MarshalBinary()
|
# those txs above should have been relayed to second client
|
||||||
if err != nil {
|
# when it first connected
|
||||||
return errors.Wrap(err, "error marshaling transaction")
|
let secTxs = sec.getTxsInPool(txHashes)
|
||||||
}
|
if secTxs.len != txHashes.len:
|
||||||
if size != uint32(len(b)) {
|
error "expected txs from newPooledTxs num mismatch",
|
||||||
return error "expected size %d, got %d", len(b), size)
|
expect=txHashes.len,
|
||||||
}
|
get=secTxs.len
|
||||||
}
|
return false
|
||||||
default:
|
|
||||||
return error "unexpected message type: %T", msg)
|
for i, secTx in secTxs:
|
||||||
}
|
let secTxBytes = rlp.encode(secTx)
|
||||||
}
|
let localTxBytes = rlp.encode(txs[i])
|
||||||
|
|
||||||
|
if secTxBytes.len != localTxBytes.len:
|
||||||
|
error "expected tx from newPooledTxs size mismatch",
|
||||||
|
expect=localTxBytes.len,
|
||||||
|
get=secTxBytes.len
|
||||||
|
return false
|
||||||
|
|
||||||
|
if secTxBytes != localTxBytes:
|
||||||
|
error "expected tx from gnewPooledTxs bytes not equal"
|
||||||
|
return false
|
||||||
|
|
||||||
# Send the request for the pooled transactions
|
# Send the request for the pooled transactions
|
||||||
getTxReq = &devp2p.GetPooledTransactions{
|
let peer = sec.peer
|
||||||
RequestId: 1234,
|
let res = waitFor peer.getPooledTransactions(txHashes)
|
||||||
GetPooledTransactionsPacket: txHashes,
|
if res.isNone:
|
||||||
}
|
error "getPooledTransactions returns none"
|
||||||
if size, err = conn.Write(getTxReq); err != nil {
|
return false
|
||||||
return errors.Wrap(err, "could not write to conn")
|
|
||||||
else:
|
|
||||||
info "Wrote %d bytes to conn", size)
|
|
||||||
}
|
|
||||||
|
|
||||||
# Wait for the response
|
let remoteTxs = res.get
|
||||||
msg, err = conn.WaitForResponse(timeout, getTxReq.RequestId)
|
if remoteTxs.transactions.len != txHashes.len:
|
||||||
if err != nil {
|
error "expected txs from getPooledTransactions num mismatch",
|
||||||
return errors.Wrap(err, "error waiting for response")
|
expect=txHashes.len,
|
||||||
}
|
get=remoteTxs.transactions.len
|
||||||
switch msg = msg.(type) {
|
return false
|
||||||
case *devp2p.PooledTransactions:
|
|
||||||
if len(msg.PooledTransactionsBytesPacket) != len(txHashes) {
|
|
||||||
return error "expected %d txs, got %d", len(txHashes), len(msg.PooledTransactionsBytesPacket))
|
|
||||||
}
|
|
||||||
for i, txBytes = range msg.PooledTransactionsBytesPacket {
|
|
||||||
tx = txs[i]
|
|
||||||
|
|
||||||
expBytes, err = tx.MarshalBinary()
|
for i, remoteTx in remoteTxs.transactions:
|
||||||
if err != nil {
|
let remoteTxBytes = rlp.encode(remoteTx)
|
||||||
return errors.Wrap(err, "error marshaling transaction")
|
let localTxBytes = rlp.encode(txs[i])
|
||||||
}
|
|
||||||
|
|
||||||
if len(expBytes) != len(txBytes) {
|
if remoteTxBytes.len != localTxBytes.len:
|
||||||
return error "expected size %d, got %d", len(expBytes), len(txBytes))
|
error "expected tx from getPooledTransactions size mismatch",
|
||||||
}
|
expect=localTxBytes.len,
|
||||||
|
get=remoteTxBytes.len
|
||||||
|
return false
|
||||||
|
|
||||||
if !bytes.Equal(expBytes, txBytes) {
|
if remoteTxBytes != localTxBytes:
|
||||||
return error "expected tx %#x, got %#x", expBytes, txBytes)
|
error "expected tx from getPooledTransactions bytes not equal"
|
||||||
}
|
return false
|
||||||
|
|
||||||
}
|
return true
|
||||||
default:
|
|
||||||
return error "unexpected message type: %T", msg)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (step DevP2PRequestPooledTransactionHash) Description() string {
|
method description*(step: DevP2PRequestPooledTransactionHash): string =
|
||||||
return fmt.Sprintf("DevP2PRequestPooledTransactionHash: client %d, transaction indexes %v", step.ClientIndex, step.TransactionIndexes)
|
"DevP2PRequestPooledTransactionHash: client $1, transaction indexes $1" % [
|
||||||
}
|
$step.clientIndex, $step.transactionIndexes]
|
||||||
|
|
|
@ -1,47 +1,30 @@
|
||||||
import
|
import
|
||||||
./step
|
std/strutils,
|
||||||
|
./step_desc,
|
||||||
|
../test_env
|
||||||
|
|
||||||
# A step that launches a new client
|
# A step that launches a new client
|
||||||
type LaunchClients struct {
|
type
|
||||||
client.EngineStarter
|
LaunchClients* = ref object of TestStep
|
||||||
ClientCount uint64
|
clientCount* : int
|
||||||
SkipConnectingToBootnode bool
|
skipConnectingToBootnode*: bool
|
||||||
SkipAddingToCLMock bool
|
skipAddingToCLMock* : bool
|
||||||
}
|
|
||||||
|
|
||||||
func (step LaunchClients) GetClientCount() uint64 {
|
func getClientCount(step: LaunchClients): int =
|
||||||
clientCount = step.ClientCount
|
var clientCount = step.clientCount
|
||||||
if clientCount == 0 {
|
if clientCount == 0:
|
||||||
clientCount = 1
|
clientCount = 1
|
||||||
}
|
|
||||||
return clientCount
|
return clientCount
|
||||||
}
|
|
||||||
|
|
||||||
func (step LaunchClients) Execute(t *CancunTestContext) error {
|
method execute*(step: LaunchClients, ctx: CancunTestContext): bool =
|
||||||
# Launch a new client
|
# Launch a new client
|
||||||
var (
|
let clientCount = step.getClientCount()
|
||||||
client client.EngineClient
|
for i in 0..<clientCount:
|
||||||
err error
|
let connectBootNode = not step.skipConnectingToBootnode
|
||||||
)
|
let addToClMock = not step.skipAddingToCLMock
|
||||||
clientCount = step.GetClientCount()
|
discard ctx.env.addEngine(addToClMock, connectBootNode)
|
||||||
for i = uint64(0); i < clientCount; i++ {
|
|
||||||
if !step.SkipConnectingToBootnode {
|
|
||||||
client, err = step.StartClient(t.T, t.TestContext, t.Genesis, t.ClientParams, t.ClientFiles, t.Engines[0])
|
|
||||||
else:
|
|
||||||
client, err = step.StartClient(t.T, t.TestContext, t.Genesis, t.ClientParams, t.ClientFiles)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
t.Engines = append(t.Engines, client)
|
|
||||||
t.TestEngines = append(t.TestEngines, test.NewTestEngineClient(t.Env, client))
|
|
||||||
if !step.SkipAddingToCLMock {
|
|
||||||
env.clMock.AddEngineClient(client)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (step LaunchClients) Description() string {
|
return true
|
||||||
return fmt.Sprintf("Launch %d new engine client(s)", step.GetClientCount())
|
|
||||||
}
|
method description*(step: LaunchClients): string =
|
||||||
|
"Launch $1 new engine client(s)" % [$step.getClientCount()]
|
||||||
|
|
|
@ -202,75 +202,58 @@ method execute*(step: NewPayloads, ctx: CancunTestContext): bool =
|
||||||
shadow.p = p
|
shadow.p = p
|
||||||
let pbRes = env.clMock.produceSingleBlock(BlockProcessCallbacks(
|
let pbRes = env.clMock.produceSingleBlock(BlockProcessCallbacks(
|
||||||
onPayloadAttributesGenerated: proc(): bool =
|
onPayloadAttributesGenerated: proc(): bool =
|
||||||
#[if step.fcUOnPayloadRequest != nil:
|
if step.fcUOnPayloadRequest != nil:
|
||||||
|
step.fcUOnPayloadRequest.setEngineAPIVersionResolver(env.engine.com)
|
||||||
|
|
||||||
var
|
var
|
||||||
payloadAttributes = env.clMock.latestPayloadAttributes
|
payloadAttributes = env.clMock.latestPayloadAttributes
|
||||||
forkchoiceState = env.clMock.latestForkchoice
|
forkchoiceState = env.clMock.latestForkchoice
|
||||||
expectedError *int
|
expectedError = step.fcUOnPayloadRequest.getExpectedError()
|
||||||
expectedStatus = test.Valid
|
expectedStatus = PayloadExecutionStatus.valid
|
||||||
err error
|
timestamp = env.clMock.latestHeader.timestamp.uint64
|
||||||
)
|
version = step.fcUOnPayloadRequest.forkchoiceUpdatedVersion(timestamp)
|
||||||
step.fcUOnPayloadRequest.setEngineAPIVersionResolver(t.ForkConfig)
|
|
||||||
testEngine = t.TestEngine.WithEngineAPIVersionResolver(step.FcUOnPayloadRequest)
|
|
||||||
|
|
||||||
payloadAttributes, err = step.FcUOnPayloadRequest.getPayloadAttributes(payloadAttributes)
|
payloadAttributes = step.fcUOnPayloadRequest.getPayloadAttributes(payloadAttributes)
|
||||||
if err != nil {
|
|
||||||
fatal "Error getting custom payload attributes (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
|
||||||
|
|
||||||
expectedError, err = step.FcUOnPayloadRequest.getExpectedError()
|
if step.fcUOnPayloadRequest.getExpectInvalidStatus():
|
||||||
if err != nil {
|
expectedStatus = PayloadExecutionStatus.invalid
|
||||||
fatal "Error getting custom expected error (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
|
||||||
|
|
||||||
if step.FcUOnPayloadRequest.getExpectInvalidStatus() {
|
let r = env.engine.client.forkchoiceUpdated(version, forkchoiceState, some(payloadAttributes))
|
||||||
expectedStatus = test.Invalid
|
if expectedError != 0:
|
||||||
|
r.expectErrorCode(expectedError, step.expectationDescription)
|
||||||
|
|
||||||
r = env.client.ForkchoiceUpdated(&forkchoiceState, payloadAttributes, env.clMock.LatestHeader.Time)
|
|
||||||
r.ExpectationDescription = step.ExpectationDescription
|
|
||||||
if expectedError != nil {
|
|
||||||
r.ExpectErrorCode(*expectedError)
|
|
||||||
else:
|
else:
|
||||||
r.ExpectNoError()
|
r.expectNoError(step.expectationDescription)
|
||||||
r.ExpectPayloadStatus(expectedStatus)
|
r.expectPayloadStatus(expectedStatus)
|
||||||
|
|
||||||
|
if r.get().payloadID.isSome:
|
||||||
|
testCond env.clMock.addPayloadID(env.engine, r.get().payloadID.get())
|
||||||
|
|
||||||
if r.Response.PayloadID != nil {
|
|
||||||
env.clMock.AddPayloadID(t.Engine, r.Response.PayloadID)
|
|
||||||
]#
|
|
||||||
return true
|
return true
|
||||||
,
|
,
|
||||||
onRequestNextPayload: proc(): bool =
|
onRequestNextPayload: proc(): bool =
|
||||||
# Get the next payload
|
# Get the next payload
|
||||||
#[if step.GetPayloadCustomizer != nil {
|
if step.getPayloadCustomizer != nil:
|
||||||
var (
|
step.getPayloadCustomizer.setEngineAPIVersionResolver(env.engine.com)
|
||||||
payloadAttributes = env.clMock.latestPayloadAttributes
|
|
||||||
payloadID = env.clMock.NextPayloadID
|
|
||||||
expectedError *int
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
step.GetPayloadCustomizer.setEngineAPIVersionResolver(t.ForkConfig)
|
var
|
||||||
testEngine = t.TestEngine.WithEngineAPIVersionResolver(step.GetPayloadCustomizer)
|
payloadAttributes = env.clMock.latestPayloadAttributes
|
||||||
|
payloadID = env.clMock.nextPayloadID
|
||||||
|
expectedError = step.getPayloadCustomizer.getExpectedError()
|
||||||
|
timestamp = payloadAttributes.timestamp.uint64
|
||||||
|
version = step.getPayloadCustomizer.getPayloadVersion(timestamp)
|
||||||
|
|
||||||
|
payloadID = step.getPayloadCustomizer.getPayloadID(payloadID)
|
||||||
|
|
||||||
# We are going to sleep twice because there is no way to skip the CL Mock's sleep
|
# We are going to sleep twice because there is no way to skip the CL Mock's sleep
|
||||||
time.Sleep(time.Duration(step.GetPayloadDelay) * time.Second)
|
let period = chronos.seconds(step.getPayloadDelay)
|
||||||
|
waitFor sleepAsync(period)
|
||||||
|
|
||||||
payloadID, err = step.GetPayloadCustomizer.getPayloadID(payloadID)
|
let r = env.engine.client.getPayload(payloadID, version)
|
||||||
if err != nil {
|
if expectedError != 0:
|
||||||
fatal "Error getting custom payload ID (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
r.expectErrorCode(expectedError, step.expectationDescription)
|
||||||
}
|
|
||||||
|
|
||||||
expectedError, err = step.GetPayloadCustomizer.getExpectedError()
|
|
||||||
if err != nil {
|
|
||||||
fatal "Error getting custom expected error (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
r = env.client.GetPayload(payloadID, payloadAttributes)
|
|
||||||
r.ExpectationDescription = step.ExpectationDescription
|
|
||||||
if expectedError != nil {
|
|
||||||
r.ExpectErrorCode(*expectedError)
|
|
||||||
else:
|
else:
|
||||||
r.ExpectNoError()
|
r.expectNoError(step.expectationDescription)
|
||||||
]#
|
|
||||||
return true
|
return true
|
||||||
,
|
,
|
||||||
onGetPayload: proc(): bool =
|
onGetPayload: proc(): bool =
|
||||||
|
@ -301,69 +284,51 @@ method execute*(step: NewPayloads, ctx: CancunTestContext): bool =
|
||||||
return true
|
return true
|
||||||
,
|
,
|
||||||
onNewPayloadBroadcast: proc(): bool =
|
onNewPayloadBroadcast: proc(): bool =
|
||||||
#[if step.NewPayloadCustomizer != nil {
|
if step.newPayloadCustomizer != nil:
|
||||||
|
step.newPayloadCustomizer.setEngineAPIVersionResolver(env.engine.com)
|
||||||
# Send a test NewPayload directive with either a modified payload or modifed versioned hashes
|
# Send a test NewPayload directive with either a modified payload or modifed versioned hashes
|
||||||
var (
|
var
|
||||||
payload = env.clMock.latestPayloadBuilt
|
payload = env.clMock.latestExecutableData
|
||||||
r *test.NewPayloadResponseExpectObject
|
expectedError = step.newPayloadCustomizer.getExpectedError()
|
||||||
expectedError *int
|
expectedStatus = PayloadExecutionStatus.valid
|
||||||
expectedStatus test.PayloadStatus = test.Valid
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
# Send a custom new payload
|
# Send a custom new payload
|
||||||
step.NewPayloadCustomizer.setEngineAPIVersionResolver(t.ForkConfig)
|
payload = step.newPayloadCustomizer.customizePayload(payload)
|
||||||
testEngine = t.TestEngine.WithEngineAPIVersionResolver(step.NewPayloadCustomizer)
|
let
|
||||||
|
version = step.newPayloadCustomizer.newPayloadVersion(payload.basePayload.timestamp.uint64)
|
||||||
|
|
||||||
payload, err = step.NewPayloadCustomizer.customizePayload(payload)
|
if step.newPayloadCustomizer.getExpectInvalidStatus():
|
||||||
if err != nil {
|
expectedStatus = PayloadExecutionStatus.invalid
|
||||||
fatal "Error customizing payload (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
|
||||||
}
|
|
||||||
expectedError, err = step.NewPayloadCustomizer.getExpectedError()
|
|
||||||
if err != nil {
|
|
||||||
fatal "Error getting custom expected error (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
|
||||||
}
|
|
||||||
if step.NewPayloadCustomizer.getExpectInvalidStatus() {
|
|
||||||
expectedStatus = test.Invalid
|
|
||||||
}
|
|
||||||
|
|
||||||
r = env.client.NewPayload(payload)
|
let r = env.client.newPayload(version, payload)
|
||||||
r.ExpectationDescription = step.ExpectationDescription
|
if expectedError != 0:
|
||||||
if expectedError != nil {
|
r.expectErrorCode(expectedError, step.expectationDescription)
|
||||||
r.ExpectErrorCode(*expectedError)
|
|
||||||
else:
|
else:
|
||||||
r.ExpectNoError()
|
r.expectNoError(step.expectationDescription)
|
||||||
r.ExpectStatus(expectedStatus)
|
r.expectNPStatus(expectedStatus)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if step.FcUOnHeadSet != nil {
|
if step.fcUOnHeadSet != nil:
|
||||||
var (
|
step.fcUOnHeadSet.setEngineAPIVersionResolver(env.engine.com)
|
||||||
forkchoiceState api.ForkchoiceStateV1 = env.clMock.latestForkchoice
|
|
||||||
expectedError *int
|
|
||||||
expectedStatus test.PayloadStatus = test.Valid
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
step.FcUOnHeadSet.setEngineAPIVersionResolver(t.ForkConfig)
|
|
||||||
testEngine = t.TestEngine.WithEngineAPIVersionResolver(step.FcUOnHeadSet)
|
|
||||||
expectedError, err = step.FcUOnHeadSet.getExpectedError()
|
|
||||||
if err != nil {
|
|
||||||
fatal "Error getting custom expected error (payload %d/%d): %v", payload=shadow.p+1, count=shadow.payloadCount, err)
|
|
||||||
}
|
|
||||||
if step.FcUOnHeadSet.getExpectInvalidStatus() {
|
|
||||||
expectedStatus = test.Invalid
|
|
||||||
}
|
|
||||||
|
|
||||||
forkchoiceState.HeadBlockHash = env.clMock.latestPayloadBuilt.blockHash
|
var
|
||||||
|
forkchoiceState = env.clMock.latestForkchoice
|
||||||
|
expectedError = step.fcUOnHeadSet.getExpectedError()
|
||||||
|
expectedStatus = PayloadExecutionStatus.valid
|
||||||
|
timestamp = env.clMock.latestPayloadBuilt.timestamp.uint64
|
||||||
|
version = step.fcUOnHeadSet.forkchoiceUpdatedVersion(timestamp)
|
||||||
|
|
||||||
r = env.client.ForkchoiceUpdated(&forkchoiceState, nil, env.clMock.latestPayloadBuilt.Timestamp)
|
if step.fcUOnHeadSet.getExpectInvalidStatus():
|
||||||
r.ExpectationDescription = step.ExpectationDescription
|
expectedStatus = PayloadExecutionStatus.invalid
|
||||||
if expectedError != nil {
|
|
||||||
r.ExpectErrorCode(*expectedError)
|
forkchoiceState.headBlockHash = env.clMock.latestPayloadBuilt.blockHash
|
||||||
|
|
||||||
|
let r = env.engine.client.forkchoiceUpdated(version, forkchoiceState)
|
||||||
|
if expectedError != 0:
|
||||||
|
r.expectErrorCode(expectedError, step.expectationDescription)
|
||||||
else:
|
else:
|
||||||
r.ExpectNoError()
|
r.expectNoError(step.expectationDescription)
|
||||||
r.ExpectPayloadStatus(expectedStatus)
|
r.expectPayloadStatus(expectedStatus)
|
||||||
]#
|
|
||||||
return true
|
return true
|
||||||
,
|
,
|
||||||
onForkchoiceBroadcast: proc(): bool =
|
onForkchoiceBroadcast: proc(): bool =
|
||||||
|
|
|
@ -1,63 +1,57 @@
|
||||||
|
import
|
||||||
|
std/strutils,
|
||||||
|
chronicles,
|
||||||
|
./step_desc,
|
||||||
|
./customizer,
|
||||||
|
../test_env,
|
||||||
|
../types
|
||||||
|
|
||||||
# Send a modified version of the latest payload produced using NewPayloadV3
|
# Send a modified version of the latest payload produced using NewPayloadV3
|
||||||
type SendModifiedLatestPayload struct {
|
type
|
||||||
ClientID uint64
|
SendModifiedLatestPayload* = ref object of TestStep
|
||||||
NewPayloadCustomizer helper.NewPayloadCustomizer
|
clientID* : int
|
||||||
}
|
newPayloadCustomizer*: NewPayloadCustomizer
|
||||||
|
|
||||||
method execute*(step: SendModifiedLatestPayload, ctx: CancunTestContext): bool =
|
method execute*(step: SendModifiedLatestPayload, ctx: CancunTestContext): bool =
|
||||||
# Get the latest payload
|
# Get the latest payload
|
||||||
var (
|
doAssert(step.newPayloadCustomizer.isNil.not, "TEST-FAIL: no payload customizer available")
|
||||||
payload = &env.clMock.latestPayloadBuilt
|
|
||||||
expectedError *int = nil
|
var
|
||||||
expectedStatus test.PayloadStatus = test.Valid
|
env = ctx.env
|
||||||
err error = nil
|
payload = env.clMock.latestExecutableData
|
||||||
)
|
expectedError = step.newPayloadCustomizer.getExpectedError()
|
||||||
if payload == nil {
|
expectedStatus = PayloadExecutionStatus.valid
|
||||||
return error "TEST-FAIL: no payload available")
|
|
||||||
}
|
doAssert(env.clMock.latestBlobsBundle.isSome, "TEST-FAIL: no blob bundle available")
|
||||||
if env.clMock.LatestBlobBundle == nil {
|
|
||||||
return error "TEST-FAIL: no blob bundle available")
|
|
||||||
}
|
|
||||||
if step.NewPayloadCustomizer == nil {
|
|
||||||
return error "TEST-FAIL: no payload customizer available")
|
|
||||||
}
|
|
||||||
|
|
||||||
# Send a custom new payload
|
# Send a custom new payload
|
||||||
step.NewPayloadCustomizer.setEngineAPIVersionResolver(t.ForkConfig)
|
step.newPayloadCustomizer.setEngineAPIVersionResolver(env.engine.com)
|
||||||
payload, err = step.NewPayloadCustomizer.customizePayload(payload)
|
|
||||||
if err != nil {
|
payload = step.newPayloadCustomizer.customizePayload(payload)
|
||||||
fatal "Error customizing payload: %v", err)
|
let version = step.newPayloadCustomizer.newPayloadVersion(payload.basePayload.timestamp.uint64)
|
||||||
}
|
|
||||||
expectedError, err = step.NewPayloadCustomizer.getExpectedError()
|
if step.newPayloadCustomizer.getExpectInvalidStatus():
|
||||||
if err != nil {
|
expectedStatus = PayloadExecutionStatus.invalid
|
||||||
fatal "Error getting custom expected error: %v", err)
|
|
||||||
}
|
|
||||||
if step.NewPayloadCustomizer.getExpectInvalidStatus() {
|
|
||||||
expectedStatus = test.Invalid
|
|
||||||
}
|
|
||||||
|
|
||||||
# Send the payload
|
# Send the payload
|
||||||
if step.ClientID >= uint64(len(t.TestEngines)) {
|
doAssert(step.clientID < env.numEngines(), "invalid client index " & $step.clientID)
|
||||||
return error "invalid client index %d", step.ClientID)
|
|
||||||
}
|
let eng = env.engines(step.clientID)
|
||||||
testEngine = t.TestEngines[step.ClientID].WithEngineAPIVersionResolver(step.NewPayloadCustomizer)
|
let r = eng.client.newPayload(version, payload)
|
||||||
r = env.client.NewPayload(payload)
|
if expectedError != 0:
|
||||||
if expectedError != nil {
|
r.expectErrorCode(expectedError)
|
||||||
r.ExpectErrorCode(*expectedError)
|
|
||||||
else:
|
else:
|
||||||
r.ExpectStatus(expectedStatus)
|
r.expectNPStatus(expectedStatus)
|
||||||
}
|
|
||||||
return nil
|
return true
|
||||||
}
|
|
||||||
|
|
||||||
method description*(step: SendModifiedLatestPayload): string =
|
method description*(step: SendModifiedLatestPayload): string =
|
||||||
desc = fmt.Sprintf("SendModifiedLatestPayload: client %d, expected invalid=%T, ", step.ClientID, step.NewPayloadCustomizer.getExpectInvalidStatus())
|
let desc = "SendModifiedLatestPayload: client $1, expected invalid=$2" % [
|
||||||
/*
|
$step.clientID, $step.newPayloadCustomizer.getExpectInvalidStatus()]
|
||||||
|
#[
|
||||||
TODO: Figure out if we need this.
|
TODO: Figure out if we need this.
|
||||||
if step.VersionedHashes != nil {
|
if step.VersionedHashes != nil {
|
||||||
desc += step.VersionedHashes.Description()
|
desc += step.VersionedHashes.Description()
|
||||||
}
|
}
|
||||||
*/
|
]#
|
||||||
|
|
||||||
return desc
|
return desc
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
|
@ -43,6 +43,9 @@ type
|
||||||
# Chain History
|
# Chain History
|
||||||
headerHistory : Table[uint64, common.BlockHeader]
|
headerHistory : Table[uint64, common.BlockHeader]
|
||||||
|
|
||||||
|
# Payload ID History
|
||||||
|
payloadIDHistory : Table[string, PayloadID]
|
||||||
|
|
||||||
# PoS Chain History Information
|
# PoS Chain History Information
|
||||||
prevRandaoHistory* : Table[uint64, common.Hash256]
|
prevRandaoHistory* : Table[uint64, common.Hash256]
|
||||||
executedPayloadHistory* : Table[uint64, ExecutionPayload]
|
executedPayloadHistory* : Table[uint64, ExecutionPayload]
|
||||||
|
@ -78,6 +81,21 @@ type
|
||||||
onSafeBlockChange * : proc(): bool {.gcsafe.}
|
onSafeBlockChange * : proc(): bool {.gcsafe.}
|
||||||
onFinalizedBlockChange* : proc(): bool {.gcsafe.}
|
onFinalizedBlockChange* : proc(): bool {.gcsafe.}
|
||||||
|
|
||||||
|
|
||||||
|
proc collectBlobHashes(list: openArray[Web3Tx]): seq[common.Hash256] =
|
||||||
|
for w3tx in list:
|
||||||
|
let tx = ethTx(w3Tx)
|
||||||
|
for h in tx.versionedHashes:
|
||||||
|
result.add h
|
||||||
|
|
||||||
|
func latestExecutableData*(cl: CLMocker): ExecutableData =
|
||||||
|
ExecutableData(
|
||||||
|
basePayload: cl.latestPayloadBuilt,
|
||||||
|
beaconRoot : ethHash cl.latestPayloadAttributes.parentBeaconBlockRoot,
|
||||||
|
attr : cl.latestPayloadAttributes,
|
||||||
|
versionedHashes: some(collectBlobHashes(cl.latestPayloadBuilt.transactions)),
|
||||||
|
)
|
||||||
|
|
||||||
func latestPayloadNumber*(h: Table[uint64, ExecutionPayload]): uint64 =
|
func latestPayloadNumber*(h: Table[uint64, ExecutionPayload]): uint64 =
|
||||||
result = 0'u64
|
result = 0'u64
|
||||||
for n, _ in h:
|
for n, _ in h:
|
||||||
|
@ -167,6 +185,19 @@ proc isBlockPoS*(cl: CLMocker, bn: common.BlockNumber): bool =
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
|
||||||
|
proc addPayloadID*(cl: CLMocker, eng: EngineEnv, newPayloadID: PayloadID): bool =
|
||||||
|
# Check if payload ID has been used before
|
||||||
|
var zeroPayloadID: PayloadID
|
||||||
|
if cl.payloadIDHistory.getOrDefault(eng.ID(), zeroPayloadID) == newPayloadID:
|
||||||
|
error "reused payload ID", ID = newPayloadID.toHex
|
||||||
|
return false
|
||||||
|
|
||||||
|
# Add payload ID to history
|
||||||
|
cl.payloadIDHistory[eng.ID()] = newPayloadID
|
||||||
|
info "CLMocker: Added payload for client",
|
||||||
|
ID=newPayloadID.toHex, ID=eng.ID()
|
||||||
|
return true
|
||||||
|
|
||||||
# Return the per-block timestamp value increment
|
# Return the per-block timestamp value increment
|
||||||
func getTimestampIncrement(cl: CLMocker): EthTime =
|
func getTimestampIncrement(cl: CLMocker): EthTime =
|
||||||
EthTime cl.blockTimestampIncrement.get(1)
|
EthTime cl.blockTimestampIncrement.get(1)
|
||||||
|
@ -472,12 +503,14 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
|
||||||
|
|
||||||
if cb.onPayloadProducerSelected != nil:
|
if cb.onPayloadProducerSelected != nil:
|
||||||
if not cb.onPayloadProducerSelected():
|
if not cb.onPayloadProducerSelected():
|
||||||
|
debugEcho "***PAYLOAD PRODUCER SELECTED ERROR***"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
cl.generatePayloadAttributes()
|
cl.generatePayloadAttributes()
|
||||||
|
|
||||||
if cb.onPayloadAttributesGenerated != nil:
|
if cb.onPayloadAttributesGenerated != nil:
|
||||||
if not cb.onPayloadAttributesGenerated():
|
if not cb.onPayloadAttributesGenerated():
|
||||||
|
debugEcho "***ON PAYLOAD ATTRIBUTES ERROR***"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
if not cl.requestNextPayload():
|
if not cl.requestNextPayload():
|
||||||
|
@ -487,6 +520,7 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
|
||||||
|
|
||||||
if cb.onRequestNextPayload != nil:
|
if cb.onRequestNextPayload != nil:
|
||||||
if not cb.onRequestNextPayload():
|
if not cb.onRequestNextPayload():
|
||||||
|
debugEcho "***ON REQUEST NEXT PAYLOAD ERROR***"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
# Give the client a delay between getting the payload ID and actually retrieving the payload
|
# Give the client a delay between getting the payload ID and actually retrieving the payload
|
||||||
|
@ -499,13 +533,16 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
|
||||||
|
|
||||||
if cb.onGetPayload != nil:
|
if cb.onGetPayload != nil:
|
||||||
if not cb.onGetPayload():
|
if not cb.onGetPayload():
|
||||||
|
debugEcho "***ON GET PAYLOAD ERROR***"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
if not cl.broadcastNextNewPayload():
|
if not cl.broadcastNextNewPayload():
|
||||||
|
debugEcho "***ON BROADCAST NEXT NEW PAYLOAD ERROR***"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
if cb.onNewPayloadBroadcast != nil:
|
if cb.onNewPayloadBroadcast != nil:
|
||||||
if not cb.onNewPayloadBroadcast():
|
if not cb.onNewPayloadBroadcast():
|
||||||
|
debugEcho "***ON NEW PAYLOAD BROADCAST ERROR***"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
# Broadcast forkchoice updated with new HeadBlock to all clients
|
# Broadcast forkchoice updated with new HeadBlock to all clients
|
||||||
|
@ -523,20 +560,24 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
|
||||||
cl.latestForkchoice.finalizedBlockHash = cl.headHashHistory[hhLen - cl.slotsToFinalized - 1]
|
cl.latestForkchoice.finalizedBlockHash = cl.headHashHistory[hhLen - cl.slotsToFinalized - 1]
|
||||||
|
|
||||||
if not cl.broadcastLatestForkchoice():
|
if not cl.broadcastLatestForkchoice():
|
||||||
|
debugEcho "***ON BROADCAST LATEST FORK CHOICE ERROR***"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
if cb.onForkchoiceBroadcast != nil:
|
if cb.onForkchoiceBroadcast != nil:
|
||||||
if not cb.onForkchoiceBroadcast():
|
if not cb.onForkchoiceBroadcast():
|
||||||
|
debugEcho "***ON FORK CHOICE BROADCAST ERROR***"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
# Broadcast forkchoice updated with new SafeBlock to all clients
|
# Broadcast forkchoice updated with new SafeBlock to all clients
|
||||||
if cb.onSafeBlockChange != nil and cl.latestForkchoice.safeBlockHash != previousForkchoice.safeBlockHash:
|
if cb.onSafeBlockChange != nil and cl.latestForkchoice.safeBlockHash != previousForkchoice.safeBlockHash:
|
||||||
if not cb.onSafeBlockChange():
|
if not cb.onSafeBlockChange():
|
||||||
|
debugEcho "***ON SAFE BLOCK CHANGE ERROR***"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
# Broadcast forkchoice updated with new FinalizedBlock to all clients
|
# Broadcast forkchoice updated with new FinalizedBlock to all clients
|
||||||
if cb.onFinalizedBlockChange != nil and cl.latestForkchoice.finalizedBlockHash != previousForkchoice.finalizedBlockHash:
|
if cb.onFinalizedBlockChange != nil and cl.latestForkchoice.finalizedBlockHash != previousForkchoice.finalizedBlockHash:
|
||||||
if not cb.onFinalizedBlockChange():
|
if not cb.onFinalizedBlockChange():
|
||||||
|
debugEcho "***ON FINALIZED BLOCK CHANGE ERROR***"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
# Broadcast forkchoice updated with new FinalizedBlock to all clients
|
# Broadcast forkchoice updated with new FinalizedBlock to all clients
|
||||||
|
|
|
@ -3,6 +3,12 @@ import
|
||||||
web3/engine_api_types,
|
web3/engine_api_types,
|
||||||
../../../nimbus/rpc/execution_types
|
../../../nimbus/rpc/execution_types
|
||||||
|
|
||||||
|
proc engine_newPayloadV1(payload: ExecutionPayload): PayloadStatusV1
|
||||||
|
proc engine_newPayloadV2(payload: ExecutionPayload): PayloadStatusV1
|
||||||
|
proc engine_newPayloadV3(payload: ExecutionPayload,
|
||||||
|
expectedBlobVersionedHashes: Option[seq[VersionedHash]],
|
||||||
|
parentBeaconBlockRoot: Option[FixedBytes[32]]): PayloadStatusV1
|
||||||
|
|
||||||
proc engine_newPayloadV2(payload: ExecutionPayloadV1OrV2): PayloadStatusV1
|
proc engine_newPayloadV2(payload: ExecutionPayloadV1OrV2): PayloadStatusV1
|
||||||
proc engine_forkchoiceUpdatedV2(forkchoiceState: ForkchoiceStateV1, payloadAttributes: Option[PayloadAttributes]): ForkchoiceUpdatedResponse
|
proc engine_forkchoiceUpdatedV2(forkchoiceState: ForkchoiceStateV1, payloadAttributes: Option[PayloadAttributes]): ForkchoiceUpdatedResponse
|
||||||
proc engine_forkchoiceUpdatedV3(forkchoiceState: ForkchoiceStateV1, payloadAttributes: Option[PayloadAttributes]): ForkchoiceUpdatedResponse
|
proc engine_forkchoiceUpdatedV3(forkchoiceState: ForkchoiceStateV1, payloadAttributes: Option[PayloadAttributes]): ForkchoiceUpdatedResponse
|
||||||
|
|
|
@ -8,7 +8,8 @@ import
|
||||||
../../../premix/parser,
|
../../../premix/parser,
|
||||||
../../../nimbus/rpc/hexstrings,
|
../../../nimbus/rpc/hexstrings,
|
||||||
../../../nimbus/beacon/execution_types,
|
../../../nimbus/beacon/execution_types,
|
||||||
../../../nimbus/beacon/web3_eth_conv
|
../../../nimbus/beacon/web3_eth_conv,
|
||||||
|
./types
|
||||||
|
|
||||||
import web3/engine_api as web3_engine_api
|
import web3/engine_api as web3_engine_api
|
||||||
|
|
||||||
|
@ -144,6 +145,27 @@ proc newPayloadV3*(client: RpcClient,
|
||||||
wrapTrySimpleRes:
|
wrapTrySimpleRes:
|
||||||
client.engine_newPayloadV3(payload, versionedHashes, parentBeaconBlockRoot)
|
client.engine_newPayloadV3(payload, versionedHashes, parentBeaconBlockRoot)
|
||||||
|
|
||||||
|
proc newPayloadV1*(client: RpcClient,
|
||||||
|
payload: ExecutionPayload):
|
||||||
|
Result[PayloadStatusV1, string] =
|
||||||
|
wrapTrySimpleRes:
|
||||||
|
client.engine_newPayloadV1(payload)
|
||||||
|
|
||||||
|
proc newPayloadV2*(client: RpcClient,
|
||||||
|
payload: ExecutionPayload):
|
||||||
|
Result[PayloadStatusV1, string] =
|
||||||
|
wrapTrySimpleRes:
|
||||||
|
client.engine_newPayloadV2(payload)
|
||||||
|
|
||||||
|
proc newPayloadV3*(client: RpcClient,
|
||||||
|
payload: ExecutionPayload,
|
||||||
|
versionedHashes: Option[seq[VersionedHash]],
|
||||||
|
parentBeaconBlockRoot: Option[FixedBytes[32]]
|
||||||
|
):
|
||||||
|
Result[PayloadStatusV1, string] =
|
||||||
|
wrapTrySimpleRes:
|
||||||
|
client.engine_newPayloadV3(payload, versionedHashes, parentBeaconBlockRoot)
|
||||||
|
|
||||||
proc collectBlobHashes(list: openArray[Web3Tx]): seq[Web3Hash] =
|
proc collectBlobHashes(list: openArray[Web3Tx]): seq[Web3Hash] =
|
||||||
for w3tx in list:
|
for w3tx in list:
|
||||||
let tx = ethTx(w3Tx)
|
let tx = ethTx(w3Tx)
|
||||||
|
@ -165,6 +187,17 @@ proc newPayload*(client: RpcClient,
|
||||||
versionedHashes,
|
versionedHashes,
|
||||||
w3Hash beaconRoot.get)
|
w3Hash beaconRoot.get)
|
||||||
|
|
||||||
|
proc newPayload*(client: RpcClient,
|
||||||
|
version: Version,
|
||||||
|
payload: ExecutableData): Result[PayloadStatusV1, string] =
|
||||||
|
case version
|
||||||
|
of Version.V1: return client.newPayloadV1(payload.basePayload)
|
||||||
|
of Version.V2: return client.newPayloadV2(payload.basePayload)
|
||||||
|
of Version.V3:
|
||||||
|
return client.newPayloadV3(payload.basePayload,
|
||||||
|
w3Hashes payload.versionedHashes,
|
||||||
|
w3Hash payload.beaconRoot)
|
||||||
|
|
||||||
proc exchangeCapabilities*(client: RpcClient,
|
proc exchangeCapabilities*(client: RpcClient,
|
||||||
methods: seq[string]):
|
methods: seq[string]):
|
||||||
Result[seq[string], string] =
|
Result[seq[string], string] =
|
||||||
|
|
|
@ -11,6 +11,7 @@ import
|
||||||
core/sealer,
|
core/sealer,
|
||||||
core/chain,
|
core/chain,
|
||||||
core/tx_pool,
|
core/tx_pool,
|
||||||
|
core/tx_pool/tx_item,
|
||||||
core/block_import,
|
core/block_import,
|
||||||
rpc,
|
rpc,
|
||||||
sync/protocol,
|
sync/protocol,
|
||||||
|
@ -34,6 +35,7 @@ type
|
||||||
ttd : DifficultyInt
|
ttd : DifficultyInt
|
||||||
client : RpcHttpClient
|
client : RpcHttpClient
|
||||||
sync : BeaconSyncRef
|
sync : BeaconSyncRef
|
||||||
|
txPool : TxPoolRef
|
||||||
|
|
||||||
const
|
const
|
||||||
baseFolder = "hive_integration/nodocker/engine"
|
baseFolder = "hive_integration/nodocker/engine"
|
||||||
|
@ -135,7 +137,8 @@ proc newEngineEnv*(conf: var NimbusConf, chainFile: string, enableAuth: bool): E
|
||||||
server : server,
|
server : server,
|
||||||
sealer : sealer,
|
sealer : sealer,
|
||||||
client : client,
|
client : client,
|
||||||
sync : sync
|
sync : sync,
|
||||||
|
txPool : txPool
|
||||||
)
|
)
|
||||||
|
|
||||||
proc close*(env: EngineEnv) =
|
proc close*(env: EngineEnv) =
|
||||||
|
@ -169,3 +172,24 @@ func node*(env: EngineEnv): ENode =
|
||||||
|
|
||||||
proc connect*(env: EngineEnv, node: ENode) =
|
proc connect*(env: EngineEnv, node: ENode) =
|
||||||
waitFor env.node.connectToNode(node)
|
waitFor env.node.connectToNode(node)
|
||||||
|
|
||||||
|
func ID*(env: EngineEnv): string =
|
||||||
|
$env.node.listeningAddress
|
||||||
|
|
||||||
|
proc peer*(env: EngineEnv): Peer =
|
||||||
|
doAssert(env.node.numPeers > 0)
|
||||||
|
for peer in env.node.peers:
|
||||||
|
return peer
|
||||||
|
|
||||||
|
proc getTxsInPool*(env: EngineEnv, txHashes: openArray[Hash256]): seq[Transaction] =
|
||||||
|
result = newSeqOfCap[Transaction](txHashes.len)
|
||||||
|
for txHash in txHashes:
|
||||||
|
let res = env.txPool.getItem(txHash)
|
||||||
|
if res.isErr: continue
|
||||||
|
let item = res.get
|
||||||
|
if item.reject == txInfoOk:
|
||||||
|
result.add item.tx
|
||||||
|
|
||||||
|
proc numTxsInPool*(env: EngineEnv): int =
|
||||||
|
env.txPool.numTxs
|
||||||
|
|
||||||
|
|
|
@ -82,10 +82,11 @@ func engine*(env: TestEnv): EngineEnv =
|
||||||
proc setupCLMock*(env: TestEnv) =
|
proc setupCLMock*(env: TestEnv) =
|
||||||
env.clmock = newCLMocker(env.engine, env.engine.com)
|
env.clmock = newCLMocker(env.engine, env.engine.com)
|
||||||
|
|
||||||
proc addEngine*(env: TestEnv, addToCL: bool = true): EngineEnv =
|
proc addEngine*(env: TestEnv, addToCL: bool = true, connectBootNode: bool = true): EngineEnv =
|
||||||
doAssert(env.clMock.isNil.not)
|
doAssert(env.clMock.isNil.not)
|
||||||
var conf = env.conf # clone the conf
|
var conf = env.conf # clone the conf
|
||||||
let eng = env.addEngine(conf)
|
let eng = env.addEngine(conf)
|
||||||
|
if connectBootNode:
|
||||||
eng.connect(env.engine.node)
|
eng.connect(env.engine.node)
|
||||||
if addToCL:
|
if addToCL:
|
||||||
env.clMock.addEngine(eng)
|
env.clMock.addEngine(eng)
|
||||||
|
|
|
@ -29,6 +29,12 @@ type
|
||||||
run* : proc(spec: BaseSpec): bool
|
run* : proc(spec: BaseSpec): bool
|
||||||
spec* : BaseSpec
|
spec* : BaseSpec
|
||||||
|
|
||||||
|
ExecutableData* = object
|
||||||
|
basePayload*: ExecutionPayload
|
||||||
|
beaconRoot* : Option[common.Hash256]
|
||||||
|
attr* : PayloadAttributes
|
||||||
|
versionedHashes*: Option[seq[common.Hash256]]
|
||||||
|
|
||||||
const
|
const
|
||||||
DefaultTimeout* = 60 # seconds
|
DefaultTimeout* = 60 # seconds
|
||||||
DefaultSleep* = 1
|
DefaultSleep* = 1
|
||||||
|
@ -152,3 +158,27 @@ template expectLatestValidHash*(res: untyped, expectedHash: Web3Hash) =
|
||||||
error "Expect latest valid hash isSome"
|
error "Expect latest valid hash isSome"
|
||||||
testCond s.latestValidHash.get == expectedHash:
|
testCond s.latestValidHash.get == expectedHash:
|
||||||
error "latest valid hash mismatch", expect=expectedHash, get=s.latestValidHash.get
|
error "latest valid hash mismatch", expect=expectedHash, get=s.latestValidHash.get
|
||||||
|
|
||||||
|
template expectErrorCode*(res: untyped, errCode: int, expectedDesc: string) =
|
||||||
|
testCond res.isErr:
|
||||||
|
error "unexpected result, want error, get ok"
|
||||||
|
testCond res.error.find($errCode) != -1:
|
||||||
|
fatal "DEBUG", msg=expectedDesc
|
||||||
|
|
||||||
|
template expectNoError*(res: untyped, expectedDesc: string) =
|
||||||
|
testCond res.isOk:
|
||||||
|
fatal "DEBUG", msg=expectedDesc, err=res.error
|
||||||
|
|
||||||
|
template expectPayloadStatus*(res: untyped, cond: PayloadExecutionStatus) =
|
||||||
|
testCond res.isOk:
|
||||||
|
error "Unexpected FCU Error", msg=res.error
|
||||||
|
let s = res.get()
|
||||||
|
testCond s.payloadStatus.status == cond:
|
||||||
|
error "Unexpected FCU status", expect=cond, get=s.payloadStatus.status
|
||||||
|
|
||||||
|
template expectNPStatus*(res: untyped, cond: PayloadExecutionStatus) =
|
||||||
|
testCond res.isOk:
|
||||||
|
error "Unexpected newPayload error", msg=res.error
|
||||||
|
let s = res.get()
|
||||||
|
testCond s.status == cond:
|
||||||
|
error "Unexpected newPayload status", expect=cond, get=s.status
|
||||||
|
|
|
@ -231,7 +231,6 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
|
||||||
# Produce any blocks necessary to reach withdrawals fork
|
# Produce any blocks necessary to reach withdrawals fork
|
||||||
var pbRes = env.clMock.produceBlocks(ws.getPreWithdrawalsBlockCount, BlockProcessCallbacks(
|
var pbRes = env.clMock.produceBlocks(ws.getPreWithdrawalsBlockCount, BlockProcessCallbacks(
|
||||||
onPayloadProducerSelected: proc(): bool =
|
onPayloadProducerSelected: proc(): bool =
|
||||||
|
|
||||||
# Send some transactions
|
# Send some transactions
|
||||||
let numTx = ws.getTransactionCountPerPayload()
|
let numTx = ws.getTransactionCountPerPayload()
|
||||||
for i in 0..<numTx:
|
for i in 0..<numTx:
|
||||||
|
@ -262,8 +261,8 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
|
||||||
withdrawals: some(newSeq[WithdrawalV1]()),
|
withdrawals: some(newSeq[WithdrawalV1]()),
|
||||||
))
|
))
|
||||||
)
|
)
|
||||||
#r.ExpectationDescription = "Sent pre-shanghai Forkchoice using ForkchoiceUpdatedV2 + Withdrawals, error is expected"
|
let expectationDescription = "Sent pre-shanghai Forkchoice using ForkchoiceUpdatedV2 + Withdrawals, error is expected"
|
||||||
r.expectErrorCode(engineApiInvalidParams)
|
r.expectErrorCode(engineApiInvalidParams, expectationDescription)
|
||||||
|
|
||||||
# Send a valid Pre-Shanghai request using ForkchoiceUpdatedV2
|
# Send a valid Pre-Shanghai request using ForkchoiceUpdatedV2
|
||||||
# (clMock uses V1 by default)
|
# (clMock uses V1 by default)
|
||||||
|
@ -278,8 +277,8 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
|
||||||
withdrawals: none(seq[WithdrawalV1]),
|
withdrawals: none(seq[WithdrawalV1]),
|
||||||
))
|
))
|
||||||
)
|
)
|
||||||
#r.ExpectationDescription = "Sent pre-shanghai Forkchoice ForkchoiceUpdatedV2 + null withdrawals, no error is expected"
|
let expectationDescription2 = "Sent pre-shanghai Forkchoice ForkchoiceUpdatedV2 + null withdrawals, no error is expected"
|
||||||
r.expectNoError()
|
r.expectNoError(expectationDescription2)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
,
|
,
|
||||||
|
@ -351,8 +350,8 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
|
||||||
withdrawals: none(seq[WithdrawalV1]),
|
withdrawals: none(seq[WithdrawalV1]),
|
||||||
))
|
))
|
||||||
)
|
)
|
||||||
#r.ExpectationDescription = "Sent shanghai fcu using PayloadAttributesV1, error is expected"
|
let expectationDescription = "Sent shanghai fcu using PayloadAttributesV1, error is expected"
|
||||||
r.expectErrorCode(engineApiInvalidParams)
|
r.expectErrorCode(engineApiInvalidParams, expectationDescription)
|
||||||
|
|
||||||
# Send some withdrawals
|
# Send some withdrawals
|
||||||
let wfb = ws.generateWithdrawalsForBlock(nextIndex, startAccount)
|
let wfb = ws.generateWithdrawalsForBlock(nextIndex, startAccount)
|
||||||
|
|
|
@ -39,7 +39,7 @@ template validateVersion(attrsOpt, com, expectedVersion) =
|
||||||
raise invalidParams("if timestamp is earlier than Shanghai," &
|
raise invalidParams("if timestamp is earlier than Shanghai," &
|
||||||
" payloadAttributes must be PayloadAttributesV1")
|
" payloadAttributes must be PayloadAttributesV1")
|
||||||
|
|
||||||
if version != expectedVersion:
|
if expectedVersion == Version.V3 and version != expectedVersion:
|
||||||
raise invalidParams("forkChoiceUpdated" & $expectedVersion &
|
raise invalidParams("forkChoiceUpdated" & $expectedVersion &
|
||||||
" expect PayloadAttributes" & $expectedVersion &
|
" expect PayloadAttributes" & $expectedVersion &
|
||||||
" but got PayloadAttributes" & $version)
|
" but got PayloadAttributes" & $version)
|
||||||
|
|
|
@ -18,17 +18,25 @@ import
|
||||||
|
|
||||||
{.push gcsafe, raises:[CatchableError].}
|
{.push gcsafe, raises:[CatchableError].}
|
||||||
|
|
||||||
proc getPayload*(ben: BeaconEngineRef, id: PayloadID): GetPayloadV2Response =
|
proc getPayload*(ben: BeaconEngineRef,
|
||||||
|
expectedVersion: Version,
|
||||||
|
id: PayloadID): GetPayloadV2Response =
|
||||||
trace "Engine API request received",
|
trace "Engine API request received",
|
||||||
meth = "GetPayload", id
|
meth = "GetPayload", id
|
||||||
|
|
||||||
var payload: ExecutionPayloadV1OrV2
|
var payloadGeneric: ExecutionPayload
|
||||||
var blockValue: UInt256
|
var blockValue: UInt256
|
||||||
if not ben.get(id, blockValue, payload):
|
if not ben.get(id, blockValue, payloadGeneric):
|
||||||
raise unknownPayload("Unknown payload")
|
raise unknownPayload("Unknown payload")
|
||||||
|
|
||||||
|
let version = payloadGeneric.version
|
||||||
|
if version > expectedVersion:
|
||||||
|
raise unsupportedFork("getPayload" & $expectedVersion &
|
||||||
|
" expect ExecutionPayload" & $expectedVersion &
|
||||||
|
" but get ExecutionPayload" & $version)
|
||||||
|
|
||||||
GetPayloadV2Response(
|
GetPayloadV2Response(
|
||||||
executionPayload: payload,
|
executionPayload: payloadGeneric.V1V2,
|
||||||
blockValue: blockValue
|
blockValue: blockValue
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -36,11 +44,16 @@ proc getPayloadV3*(ben: BeaconEngineRef, id: PayloadID): GetPayloadV3Response =
|
||||||
trace "Engine API request received",
|
trace "Engine API request received",
|
||||||
meth = "GetPayload", id
|
meth = "GetPayload", id
|
||||||
|
|
||||||
var payload: ExecutionPayloadV3
|
var payloadGeneric: ExecutionPayload
|
||||||
var blockValue: UInt256
|
var blockValue: UInt256
|
||||||
if not ben.get(id, blockValue, payload):
|
if not ben.get(id, blockValue, payloadGeneric):
|
||||||
raise unknownPayload("Unknown payload")
|
raise unknownPayload("Unknown payload")
|
||||||
|
|
||||||
|
let version = payloadGeneric.version
|
||||||
|
if version != Version.V3:
|
||||||
|
raise unsupportedFork("getPayloadV3 expect ExecutionPayloadV3 but get ExecutionPayload" & $version)
|
||||||
|
|
||||||
|
let payload = payloadGeneric.V3
|
||||||
let com = ben.com
|
let com = ben.com
|
||||||
if not com.isCancunOrLater(ethTime payload.timestamp):
|
if not com.isCancunOrLater(ethTime payload.timestamp):
|
||||||
raise unsupportedFork("payload timestamp is less than Cancun activation")
|
raise unsupportedFork("payload timestamp is less than Cancun activation")
|
||||||
|
|
|
@ -38,7 +38,7 @@ template validateVersion(com, timestamp, version, expectedVersion) =
|
||||||
raise invalidParams("if timestamp is earlier than Shanghai, " &
|
raise invalidParams("if timestamp is earlier than Shanghai, " &
|
||||||
"payload must be ExecutionPayloadV1")
|
"payload must be ExecutionPayloadV1")
|
||||||
|
|
||||||
if version != expectedVersion:
|
if expectedVersion == Version.V3 and version != expectedVersion:
|
||||||
raise invalidParams("newPayload" & $expectedVersion &
|
raise invalidParams("newPayload" & $expectedVersion &
|
||||||
" expect ExecutionPayload" & $expectedVersion &
|
" expect ExecutionPayload" & $expectedVersion &
|
||||||
" but got ExecutionPayload" & $version)
|
" but got ExecutionPayload" & $version)
|
||||||
|
@ -54,6 +54,10 @@ proc newPayload*(ben: BeaconEngineRef,
|
||||||
number = payload.blockNumber,
|
number = payload.blockNumber,
|
||||||
hash = payload.blockHash
|
hash = payload.blockHash
|
||||||
|
|
||||||
|
if expectedVersion == Version.V3:
|
||||||
|
if beaconRoot.isNone:
|
||||||
|
raise invalidParams("newPayloadV3 expect beaconRoot but got none")
|
||||||
|
|
||||||
let
|
let
|
||||||
com = ben.com
|
com = ben.com
|
||||||
db = com.db
|
db = com.db
|
||||||
|
|
|
@ -71,6 +71,9 @@ func w3PrevRandao*(): Web3PrevRandao =
|
||||||
func w3Address*(): Web3Address =
|
func w3Address*(): Web3Address =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
|
func w3Hash*(): Web3Hash =
|
||||||
|
discard
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Web3 types to Eth types
|
# Web3 types to Eth types
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -144,6 +147,19 @@ func ethTxs*(list: openArray[Web3Tx], removeBlobs = false):
|
||||||
func w3Hash*(x: common.Hash256): Web3Hash =
|
func w3Hash*(x: common.Hash256): Web3Hash =
|
||||||
Web3Hash x.data
|
Web3Hash x.data
|
||||||
|
|
||||||
|
func w3Hashes*(list: openArray[common.Hash256]): seq[Web3Hash] =
|
||||||
|
for x in list:
|
||||||
|
result.add Web3Hash x.data
|
||||||
|
|
||||||
|
func w3Hashes*(z: Option[seq[common.Hash256]]): Option[seq[Web3Hash]] =
|
||||||
|
if z.isNone: none(seq[Web3Hash])
|
||||||
|
else:
|
||||||
|
let list = z.get
|
||||||
|
var v = newSeq[Web3Hash](list.len)
|
||||||
|
for x in list:
|
||||||
|
v.add Web3Hash x.data
|
||||||
|
some(v)
|
||||||
|
|
||||||
func w3Hash*(x: Option[common.Hash256]): Option[BlockHash] =
|
func w3Hash*(x: Option[common.Hash256]): Option[BlockHash] =
|
||||||
if x.isNone: none(BlockHash)
|
if x.isNone: none(BlockHash)
|
||||||
else: some(BlockHash x.get.data)
|
else: some(BlockHash x.get.data)
|
||||||
|
|
|
@ -37,7 +37,13 @@ logScope:
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc checkTxBasic(xp: TxPoolRef; item: TxItemRef): bool =
|
proc checkTxBasic(xp: TxPoolRef; item: TxItemRef): bool =
|
||||||
let res = validateTxBasic(item.tx.removeNetworkPayload, xp.chain.nextFork)
|
let res = validateTxBasic(
|
||||||
|
item.tx.removeNetworkPayload,
|
||||||
|
xp.chain.nextFork,
|
||||||
|
# A new transaction of the next fork may be
|
||||||
|
# coming before the fork activated
|
||||||
|
validateFork = false
|
||||||
|
)
|
||||||
if res.isOk:
|
if res.isOk:
|
||||||
return true
|
return true
|
||||||
item.info = res.error
|
item.info = res.error
|
||||||
|
|
|
@ -226,8 +226,10 @@ func gasCost*(tx: Transaction): UInt256 =
|
||||||
|
|
||||||
proc validateTxBasic*(
|
proc validateTxBasic*(
|
||||||
tx: Transaction; ## tx to validate
|
tx: Transaction; ## tx to validate
|
||||||
fork: EVMFork): Result[void, string] =
|
fork: EVMFork,
|
||||||
|
validateFork: bool = true): Result[void, string] =
|
||||||
|
|
||||||
|
if validateFork:
|
||||||
if tx.txType == TxEip2930 and fork < FkBerlin:
|
if tx.txType == TxEip2930 and fork < FkBerlin:
|
||||||
return err("invalid tx: Eip2930 Tx type detected before Berlin")
|
return err("invalid tx: Eip2930 Tx type detected before Berlin")
|
||||||
|
|
||||||
|
|
|
@ -15,7 +15,8 @@ import
|
||||||
../beacon/api_handler,
|
../beacon/api_handler,
|
||||||
../beacon/beacon_engine,
|
../beacon/beacon_engine,
|
||||||
../beacon/web3_eth_conv,
|
../beacon/web3_eth_conv,
|
||||||
../beacon/execution_types
|
../beacon/execution_types,
|
||||||
|
../beacon/api_handler/api_utils
|
||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
|
@ -50,17 +51,19 @@ proc setupEngineAPI*(engine: BeaconEngineRef, server: RpcServer) =
|
||||||
return engine.newPayload(Version.V2, payload)
|
return engine.newPayload(Version.V2, payload)
|
||||||
|
|
||||||
server.rpc("engine_newPayloadV3") do(payload: ExecutionPayload,
|
server.rpc("engine_newPayloadV3") do(payload: ExecutionPayload,
|
||||||
expectedBlobVersionedHashes: seq[Web3Hash],
|
expectedBlobVersionedHashes: Option[seq[Web3Hash]],
|
||||||
parentBeaconBlockRoot: Web3Hash) -> PayloadStatusV1:
|
parentBeaconBlockRoot: Option[Web3Hash]) -> PayloadStatusV1:
|
||||||
if not validateVersionedHashed(payload, expectedBlobVersionedHashes):
|
if expectedBlobVersionedHashes.isNone:
|
||||||
|
raise invalidParams("newPayloadV3 expect blobVersionedHashes but got none")
|
||||||
|
if not validateVersionedHashed(payload, expectedBlobVersionedHashes.get):
|
||||||
return invalidStatus()
|
return invalidStatus()
|
||||||
return engine.newPayload(Version.V3, payload, some(parentBeaconBlockRoot))
|
return engine.newPayload(Version.V3, payload, parentBeaconBlockRoot)
|
||||||
|
|
||||||
server.rpc("engine_getPayloadV1") do(payloadId: PayloadID) -> ExecutionPayloadV1:
|
server.rpc("engine_getPayloadV1") do(payloadId: PayloadID) -> ExecutionPayloadV1:
|
||||||
return engine.getPayload(payloadId).executionPayload.V1
|
return engine.getPayload(Version.V1, payloadId).executionPayload.V1
|
||||||
|
|
||||||
server.rpc("engine_getPayloadV2") do(payloadId: PayloadID) -> GetPayloadV2Response:
|
server.rpc("engine_getPayloadV2") do(payloadId: PayloadID) -> GetPayloadV2Response:
|
||||||
return engine.getPayload(payloadId)
|
return engine.getPayload(Version.V2, payloadId)
|
||||||
|
|
||||||
server.rpc("engine_getPayloadV3") do(payloadId: PayloadID) -> GetPayloadV3Response:
|
server.rpc("engine_getPayloadV3") do(payloadId: PayloadID) -> GetPayloadV3Response:
|
||||||
return engine.getPayloadV3(payloadId)
|
return engine.getPayloadV3(payloadId)
|
||||||
|
|
Loading…
Reference in New Issue