Refactor engine api and cleanup web3 types conversion

This commit is contained in:
jangko 2023-08-27 08:23:45 +07:00
parent 7d113b839f
commit 7a1fe5707c
No known key found for this signature in database
GPG Key ID: 31702AE10541E6B9
38 changed files with 1683 additions and 1406 deletions

View File

@ -3,6 +3,7 @@ import
test_env, test_env,
chronicles, chronicles,
nimcrypto/[hmac], nimcrypto/[hmac],
web3/engine_api_types,
json_rpc/[rpcclient], json_rpc/[rpcclient],
./types ./types

View File

@ -5,8 +5,9 @@ import
stew/[byteutils, endians2], stew/[byteutils, endians2],
eth/common, chronos, eth/common, chronos,
json_rpc/rpcclient, json_rpc/rpcclient,
../../../nimbus/rpc/merge/mergeutils, ../../../nimbus/beacon/execution_types,
../../../nimbus/rpc/execution_types, ../../../nimbus/beacon/web3_eth_conv,
../../../nimbus/beacon/payload_conv,
../../../nimbus/[constants], ../../../nimbus/[constants],
../../../nimbus/common as nimbus_common, ../../../nimbus/common as nimbus_common,
./engine_client ./engine_client
@ -322,11 +323,12 @@ proc getNextPayload*(cl: CLMocker): bool =
cl.latestBlockValue = x.blockValue cl.latestBlockValue = x.blockValue
cl.latestBlobsBundle = x.blobsBundle cl.latestBlobsBundle = x.blobsBundle
let header = toBlockHeader(cl.latestPayloadBuilt) let beaconRoot = ethHash cl.latestPayloadAttributes.parentBeaconblockRoot
let blockHash = BlockHash header.blockHash.data let header = blockHeader(cl.latestPayloadBuilt, beaconRoot)
let blockHash = w3Hash header.blockHash
if blockHash != cl.latestPayloadBuilt.blockHash: if blockHash != cl.latestPayloadBuilt.blockHash:
error "CLMocker: getNextPayload blockHash mismatch", error "CLMocker: getNextPayload blockHash mismatch",
expected=cl.latestPayloadBuilt.blockHash.toHex, expected=cl.latestPayloadBuilt.blockHash,
get=blockHash.toHex get=blockHash.toHex
return false return false
@ -338,19 +340,19 @@ proc getNextPayload*(cl: CLMocker): bool =
if cl.latestPayloadBuilt.feeRecipient != cl.latestPayloadAttributes.suggestedFeeRecipient: if cl.latestPayloadBuilt.feeRecipient != cl.latestPayloadAttributes.suggestedFeeRecipient:
error "CLMocker: Incorrect SuggestedFeeRecipient on payload built", error "CLMocker: Incorrect SuggestedFeeRecipient on payload built",
expect=cl.latestPayloadBuilt.feeRecipient.toHex, expect=cl.latestPayloadBuilt.feeRecipient,
get=cl.latestPayloadAttributes.suggestedFeeRecipient.toHex get=cl.latestPayloadAttributes.suggestedFeeRecipient
return false return false
if cl.latestPayloadBuilt.prevRandao != cl.latestPayloadAttributes.prevRandao: if cl.latestPayloadBuilt.prevRandao != cl.latestPayloadAttributes.prevRandao:
error "CLMocker: Incorrect PrevRandao on payload built", error "CLMocker: Incorrect PrevRandao on payload built",
expect=cl.latestPayloadBuilt.prevRandao.toHex, expect=cl.latestPayloadBuilt.prevRandao,
get=cl.latestPayloadAttributes.prevRandao.toHex get=cl.latestPayloadAttributes.prevRandao
return false return false
if cl.latestPayloadBuilt.parentHash != BlockHash cl.latestHeader.blockHash.data: if cl.latestPayloadBuilt.parentHash != BlockHash cl.latestHeader.blockHash.data:
error "CLMocker: Incorrect ParentHash on payload built", error "CLMocker: Incorrect ParentHash on payload built",
expect=cl.latestPayloadBuilt.parentHash.toHex, expect=cl.latestPayloadBuilt.parentHash,
get=cl.latestHeader.blockHash get=cl.latestHeader.blockHash
return false return false

View File

@ -2,15 +2,14 @@ import
std/tables, std/tables,
stew/byteutils, stew/byteutils,
chronicles, chronicles,
eth/common,
nimcrypto/sysrand, nimcrypto/sysrand,
chronos, chronos,
".."/[test_env, helper, types], ".."/[test_env, helper, types],
../../../nimbus/transaction, ../../../nimbus/transaction,
../../../nimbus/rpc/rpc_types, ../../../nimbus/rpc/rpc_types,
../../../nimbus/rpc/merge/mergeutils ../../../nimbus/beacon/web3_eth_conv,
../../../nimbus/beacon/execution_types
import eth/common/eth_types as common_eth_types
type Hash256 = common_eth_types.Hash256
type type
EngineSpec* = ref object of BaseSpec EngineSpec* = ref object of BaseSpec
@ -23,7 +22,7 @@ type
const const
prevRandaoContractAddr = hexToByteArray[20]("0000000000000000000000000000000000000316") prevRandaoContractAddr = hexToByteArray[20]("0000000000000000000000000000000000000316")
template testNP(res, cond: untyped, validHash = none(Hash256)) = template testNP(res, cond: untyped, validHash = none(common.Hash256)) =
testCond res.isOk testCond res.isOk
let s = res.get() let s = res.get()
testCond s.status == PayloadExecutionStatus.cond: testCond s.status == PayloadExecutionStatus.cond:
@ -31,7 +30,7 @@ template testNP(res, cond: untyped, validHash = none(Hash256)) =
testCond s.latestValidHash == validHash: testCond s.latestValidHash == validHash:
error "Unexpected NewPayload latestValidHash", expect=validHash, get=s.latestValidHash error "Unexpected NewPayload latestValidHash", expect=validHash, get=s.latestValidHash
template testNPEither(res, cond: untyped, validHash = none(Hash256)) = template testNPEither(res, cond: untyped, validHash = none(common.Hash256)) =
testCond res.isOk testCond res.isOk
let s = res.get() let s = res.get()
testCond s.status in cond: testCond s.status in cond:
@ -39,13 +38,13 @@ template testNPEither(res, cond: untyped, validHash = none(Hash256)) =
testCond s.latestValidHash == validHash: testCond s.latestValidHash == validHash:
error "Unexpected NewPayload latestValidHash", expect=validHash, get=s.latestValidHash error "Unexpected NewPayload latestValidHash", expect=validHash, get=s.latestValidHash
template testLatestHeader(client: untyped, expectedHash: BlockHash) = template testLatestHeader(client: untyped, expectedHash: Web3Hash) =
var lastHeader: EthBlockHeader var lastHeader: common.BlockHeader
var hRes = client.latestHeader(lastHeader) var hRes = client.latestHeader(lastHeader)
testCond hRes.isOk: testCond hRes.isOk:
error "unable to get latest header", msg=hRes.error error "unable to get latest header", msg=hRes.error
let lastHash = BlockHash lastHeader.blockHash.data let lastHash = w3Hash lastHeader.blockHash
# Latest block header available via Eth RPC should not have changed at this point # Latest block header available via Eth RPC should not have changed at this point
testCond lastHash == expectedHash: testCond lastHash == expectedHash:
error "latest block header incorrect", error "latest block header incorrect",
@ -80,7 +79,7 @@ proc invalidTerminalBlockForkchoiceUpdated*(t: TestEnv): bool =
# either obtained from the Payload validation process or as a result of # either obtained from the Payload validation process or as a result of
# validating a PoW block referenced by forkchoiceState.headBlockHash # validating a PoW block referenced by forkchoiceState.headBlockHash
testFCU(res, invalid, some(Hash256())) testFCU(res, invalid, some(common.Hash256()))
# ValidationError is not validated since it can be either null or a string message # ValidationError is not validated since it can be either null or a string message
# Check that PoW chain progresses # Check that PoW chain progresses
@ -122,7 +121,7 @@ proc invalidTerminalBlockNewPayload(t: TestEnv): TestStatus =
# Execution specification: # Execution specification:
# {status: INVALID, latestValidHash=0x00..00} # {status: INVALID, latestValidHash=0x00..00}
# if terminal block conditions are not satisfied # if terminal block conditions are not satisfied
testNP(res, invalid, some(Hash256())) testNP(res, invalid, some(common.Hash256()))
# Check that PoW chain progresses # Check that PoW chain progresses
testCond t.verifyPoWProgress(t.gHeader.blockHash) testCond t.verifyPoWProgress(t.gHeader.blockHash)
@ -133,7 +132,7 @@ proc unknownHeadBlockHash(t: TestEnv): TestStatus =
let ok = waitFor t.clMock.waitForTTD() let ok = waitFor t.clMock.waitForTTD()
testCond ok testCond ok
var randomHash: Hash256 var randomHash: common.Hash256
testCond randomBytes(randomHash.data) == 32 testCond randomBytes(randomHash.data) == 32
let clMock = t.clMock let clMock = t.clMock
@ -180,7 +179,7 @@ proc unknownSafeBlockHash(t: TestEnv): TestStatus =
# Run test after a new payload has been broadcast # Run test after a new payload has been broadcast
onNewPayloadBroadcast: proc(): bool = onNewPayloadBroadcast: proc(): bool =
# Generate a random SafeBlock hash # Generate a random SafeBlock hash
var randomSafeBlockHash: Hash256 var randomSafeBlockHash: common.Hash256
doAssert randomBytes(randomSafeBlockHash.data) == 32 doAssert randomBytes(randomSafeBlockHash.data) == 32
# Send forkchoiceUpdated with random SafeBlockHash # Send forkchoiceUpdated with random SafeBlockHash
@ -213,7 +212,7 @@ proc unknownFinalizedBlockHash(t: TestEnv): TestStatus =
# Run test after a new payload has been broadcast # Run test after a new payload has been broadcast
onNewPayloadBroadcast: proc(): bool = onNewPayloadBroadcast: proc(): bool =
# Generate a random SafeBlock hash # Generate a random SafeBlock hash
var randomFinalBlockHash: Hash256 var randomFinalBlockHash: common.Hash256
doAssert randomBytes(randomFinalBlockHash.data) == 32 doAssert randomBytes(randomFinalBlockHash.data) == 32
# Send forkchoiceUpdated with random SafeBlockHash # Send forkchoiceUpdated with random SafeBlockHash
@ -339,13 +338,13 @@ template invalidPayloadAttributesGen(procname: untyped, syncingCond: bool) =
produceBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks( produceBlockRes = clMock.produceSingleBlock(BlockProcessCallbacks(
onNewPayloadBroadcast: proc(): bool = onNewPayloadBroadcast: proc(): bool =
# Try to apply the new payload with invalid attributes # Try to apply the new payload with invalid attributes
var blockHash: Hash256 var blockHash: common.Hash256
when syncingCond: when syncingCond:
# Setting a random hash will put the client into `SYNCING` # Setting a random hash will put the client into `SYNCING`
doAssert randomBytes(blockHash.data) == 32 doAssert randomBytes(blockHash.data) == 32
else: else:
# Set the block hash to the next payload that was broadcasted # Set the block hash to the next payload that was broadcasted
blockHash = hash256(clMock.latestPayloadBuilt.blockHash) blockHash = common.Hash256(clMock.latestPayloadBuilt.blockHash)
let fcu = ForkchoiceStateV1( let fcu = ForkchoiceStateV1(
headBlockHash: Web3BlockHash blockHash.data, headBlockHash: Web3BlockHash blockHash.data,
@ -400,7 +399,7 @@ proc preTTDFinalizedBlockHash(t: TestEnv): TestStatus =
clMock = t.clMock clMock = t.clMock
var res = client.forkchoiceUpdatedV1(forkchoiceState) var res = client.forkchoiceUpdatedV1(forkchoiceState)
testFCU(res, invalid, some(Hash256())) testFCU(res, invalid, some(common.Hash256()))
res = client.forkchoiceUpdatedV1(clMock.latestForkchoice) res = client.forkchoiceUpdatedV1(clMock.latestForkchoice)
testFCU(res, valid) testFCU(res, valid)
@ -433,7 +432,7 @@ proc preTTDFinalizedBlockHash(t: TestEnv): TestStatus =
type type
Shadow = ref object Shadow = ref object
hash: Hash256 hash: common.Hash256
template badHashOnNewPayloadGen(procname: untyped, syncingCond: bool, sideChain: bool) = template badHashOnNewPayloadGen(procname: untyped, syncingCond: bool, sideChain: bool) =
proc procName(t: TestEnv): TestStatus = proc procName(t: TestEnv): TestStatus =
@ -455,7 +454,7 @@ template badHashOnNewPayloadGen(procname: untyped, syncingCond: bool, sideChain:
onGetPayload: proc(): bool = onGetPayload: proc(): bool =
# Alter hash on the payload and send it to client, should produce an error # Alter hash on the payload and send it to client, should produce an error
var alteredPayload = clMock.latestPayloadBuilt var alteredPayload = clMock.latestPayloadBuilt
var invalidPayloadHash = hash256(alteredPayload.blockHash) var invalidPayloadHash = common.Hash256(alteredPayload.blockHash)
let lastByte = int invalidPayloadHash.data[^1] let lastByte = int invalidPayloadHash.data[^1]
invalidPayloadHash.data[^1] = byte(not lastByte) invalidPayloadHash.data[^1] = byte(not lastByte)
shadow.hash = invalidPayloadHash shadow.hash = invalidPayloadHash
@ -468,7 +467,7 @@ template badHashOnNewPayloadGen(procname: untyped, syncingCond: bool, sideChain:
alteredPayload.parentHash = Web3BlockHash clMock.latestHeader.parentHash.data alteredPayload.parentHash = Web3BlockHash clMock.latestHeader.parentHash.data
elif syncingCond: elif syncingCond:
# We need to send an fcU to put the client in SYNCING state. # We need to send an fcU to put the client in SYNCING state.
var randomHeadBlock: Hash256 var randomHeadBlock: common.Hash256
doAssert randomBytes(randomHeadBlock.data) == 32 doAssert randomBytes(randomHeadBlock.data) == 32
let latestHeaderHash = clMock.latestHeader.blockHash let latestHeaderHash = clMock.latestHeader.blockHash
@ -590,12 +589,12 @@ proc invalidTransitionPayload(t: TestEnv): TestStatus =
let res = client.newPayloadV1(alteredPayload) let res = client.newPayloadV1(alteredPayload)
let cond = {PayloadExecutionStatus.invalid, PayloadExecutionStatus.accepted} let cond = {PayloadExecutionStatus.invalid, PayloadExecutionStatus.accepted}
testNPEither(res, cond, some(Hash256())) testNPEither(res, cond, some(common.Hash256()))
let rr = client.forkchoiceUpdatedV1( let rr = client.forkchoiceUpdatedV1(
ForkchoiceStateV1(headBlockHash: alteredPayload.blockHash) ForkchoiceStateV1(headBlockHash: alteredPayload.blockHash)
) )
testFCU(rr, invalid, some(Hash256())) testFCU(rr, invalid, some(common.Hash256()))
testLatestHeader(client, clMock.latestExecutedPayload.blockHash) testLatestHeader(client, clMock.latestExecutedPayload.blockHash)
return true return true
@ -648,7 +647,7 @@ template invalidPayloadTestCaseGen(procName: untyped, payloadField: InvalidPaylo
return false return false
let alteredPayload = generateInvalidPayload(clMock.latestPayloadBuilt, payloadField, t.vaultKey) let alteredPayload = generateInvalidPayload(clMock.latestPayloadBuilt, payloadField, t.vaultKey)
invalidPayload.hash = hash256(alteredPayload.blockHash) invalidPayload.hash = common.Hash256(alteredPayload.blockHash)
# Depending on the field we modified, we expect a different status # Depending on the field we modified, we expect a different status
let rr = client.newPayloadV1(alteredPayload) let rr = client.newPayloadV1(alteredPayload)
@ -715,7 +714,7 @@ template invalidPayloadTestCaseGen(procName: untyped, payloadField: InvalidPaylo
# Finally, attempt to fetch the invalid payload using the JSON-RPC endpoint # Finally, attempt to fetch the invalid payload using the JSON-RPC endpoint
var header: rpc_types.BlockHeader var header: rpc_types.BlockHeader
let rp = client.headerByHash(alteredPayload.blockHash.hash256, header) let rp = client.headerByHash(alteredPayload.blockHash.common.Hash256, header)
rp.isErr rp.isErr
)) ))
@ -872,7 +871,7 @@ template invalidMissingAncestorReOrgGen(procName: untyped,
onGetPayload: proc(): bool = onGetPayload: proc(): bool =
# Insert extraData to ensure we deviate from the main payload, which contains empty extradata # Insert extraData to ensure we deviate from the main payload, which contains empty extradata
var alternatePayload = customizePayload(clMock.latestPayloadBuilt, CustomPayload( var alternatePayload = customizePayload(clMock.latestPayloadBuilt, CustomPayload(
parentHash: some(shadow.altChainPayloads[^1].blockHash.hash256), parentHash: some(shadow.altChainPayloads[^1].blockHash.common.Hash256),
extraData: some(@[1.byte]), extraData: some(@[1.byte]),
)) ))
@ -913,7 +912,7 @@ template invalidMissingAncestorReOrgGen(procName: untyped,
if i == invalid_index: if i == invalid_index:
# If this is the first payload after the common ancestor, and this is the payload we invalidated, # If this is the first payload after the common ancestor, and this is the payload we invalidated,
# then we have all the information to determine that this payload is invalid. # then we have all the information to determine that this payload is invalid.
testNP(rr, invalid, some(shadow.altChainPayloads[i-1].blockHash.hash256)) testNP(rr, invalid, some(shadow.altChainPayloads[i-1].blockHash.common.Hash256))
elif i > invalid_index: elif i > invalid_index:
# We have already sent the invalid payload, but the client could've discarded it. # We have already sent the invalid payload, but the client could've discarded it.
# In reality the CL will not get to this point because it will have already received the `INVALID` # In reality the CL will not get to this point because it will have already received the `INVALID`
@ -922,7 +921,7 @@ template invalidMissingAncestorReOrgGen(procName: untyped,
testNPEither(rr, cond) testNPEither(rr, cond)
else: else:
# This is one of the payloads before the invalid one, therefore is valid. # This is one of the payloads before the invalid one, therefore is valid.
let latestValidHash = some(shadow.altChainPayloads[i].blockHash.hash256) let latestValidHash = some(shadow.altChainPayloads[i].blockHash.common.Hash256)
testNP(rr, valid, latestValidHash) testNP(rr, valid, latestValidHash)
testFCU(rs, valid, latestValidHash) testFCU(rs, valid, latestValidHash)
@ -989,7 +988,7 @@ proc blockStatusSafeBlock(t: TestEnv): TestStatus =
let client = t.rpcClient let client = t.rpcClient
# On PoW mode, `safe` tag shall return error. # On PoW mode, `safe` tag shall return error.
var header: EthBlockHeader var header: common.BlockHeader
var rr = client.namedHeader("safe", header) var rr = client.namedHeader("safe", header)
testCond rr.isErr testCond rr.isErr
@ -1004,10 +1003,10 @@ proc blockStatusSafeBlock(t: TestEnv): TestStatus =
let pbres = clMock.produceBlocks(3, BlockProcessCallbacks( let pbres = clMock.produceBlocks(3, BlockProcessCallbacks(
# Run test after a forkchoice with new SafeBlockHash has been broadcasted # Run test after a forkchoice with new SafeBlockHash has been broadcasted
onSafeBlockChange: proc(): bool = onSafeBlockChange: proc(): bool =
var header: EthBlockHeader var header: common.BlockHeader
let rr = client.namedHeader("safe", header) let rr = client.namedHeader("safe", header)
testCond rr.isOk testCond rr.isOk
let safeBlockHash = hash256(clMock.latestForkchoice.safeBlockHash) let safeBlockHash = common.Hash256(clMock.latestForkchoice.safeBlockHash)
header.blockHash == safeBlockHash header.blockHash == safeBlockHash
)) ))
@ -1020,7 +1019,7 @@ proc blockStatusFinalizedBlock(t: TestEnv): TestStatus =
let client = t.rpcClient let client = t.rpcClient
# On PoW mode, `finalized` tag shall return error. # On PoW mode, `finalized` tag shall return error.
var header: EthBlockHeader var header: common.BlockHeader
var rr = client.namedHeader("finalized", header) var rr = client.namedHeader("finalized", header)
testCond rr.isErr testCond rr.isErr
@ -1035,10 +1034,10 @@ proc blockStatusFinalizedBlock(t: TestEnv): TestStatus =
let pbres = clMock.produceBlocks(3, BlockProcessCallbacks( let pbres = clMock.produceBlocks(3, BlockProcessCallbacks(
# Run test after a forkchoice with new FinalizedBlockHash has been broadcasted # Run test after a forkchoice with new FinalizedBlockHash has been broadcasted
onFinalizedBlockChange: proc(): bool = onFinalizedBlockChange: proc(): bool =
var header: EthBlockHeader var header: common.BlockHeader
let rr = client.namedHeader("finalized", header) let rr = client.namedHeader("finalized", header)
testCond rr.isOk testCond rr.isOk
let finalizedBlockHash = hash256(clMock.latestForkchoice.finalizedBlockHash) let finalizedBlockHash = common.Hash256(clMock.latestForkchoice.finalizedBlockHash)
header.blockHash == finalizedBlockHash header.blockHash == finalizedBlockHash
)) ))
@ -1061,7 +1060,7 @@ proc blockStatusReorg(t: TestEnv): TestStatus =
# Run test after a forkchoice with new HeadBlockHash has been broadcasted # Run test after a forkchoice with new HeadBlockHash has been broadcasted
onForkchoiceBroadcast: proc(): bool = onForkchoiceBroadcast: proc(): bool =
# Verify the client is serving the latest HeadBlock # Verify the client is serving the latest HeadBlock
var currHeader: EthBlockHeader var currHeader: common.BlockHeader
var hRes = client.latestHeader(currHeader) var hRes = client.latestHeader(currHeader)
if hRes.isErr: if hRes.isErr:
error "unable to get latest header", msg=hRes.error error "unable to get latest header", msg=hRes.error
@ -1190,7 +1189,7 @@ proc multipleNewCanonicalPayloads(t: TestEnv): TestStatus =
onGetPayload: proc(): bool = onGetPayload: proc(): bool =
let payloadCount = 80 let payloadCount = 80
let basePayload = toExecutableData(clMock.latestPayloadBuilt) let basePayload = toExecutableData(clMock.latestPayloadBuilt)
var newPrevRandao: Hash256 var newPrevRandao: common.Hash256
# Fabricate and send multiple new payloads by changing the PrevRandao field # Fabricate and send multiple new payloads by changing the PrevRandao field
for i in 0..<payloadCount: for i in 0..<payloadCount:
@ -1322,7 +1321,7 @@ proc reorgBackFromSyncing(t: TestEnv): TestStatus =
let executableData = toExecutableData(clMock.latestPayloadBuilt) let executableData = toExecutableData(clMock.latestPayloadBuilt)
let altPayload = customizePayload(executableData, let altPayload = customizePayload(executableData,
CustomPayload( CustomPayload(
parentHash: some(altParentHash.hash256), parentHash: some(altParentHash.common.Hash256),
extraData: some(@[0x01.byte]), extraData: some(@[0x01.byte]),
)) ))
@ -1370,7 +1369,7 @@ proc reorgBackFromSyncing(t: TestEnv): TestStatus =
type type
TxReorgShadow = ref object TxReorgShadow = ref object
noTxnPayload: ExecutionPayloadV1 noTxnPayload: ExecutionPayloadV1
txHash: Hash256 txHash: common.Hash256
proc transactionReorg(t: TestEnv): TestStatus = proc transactionReorg(t: TestEnv): TestStatus =
result = TestStatus.OK result = TestStatus.OK
@ -1446,7 +1445,7 @@ proc transactionReorg(t: TestEnv): TestStatus =
return false return false
let rz = client.newPayloadV1(shadow.noTxnPayload) let rz = client.newPayloadV1(shadow.noTxnPayload)
testNP(rz, valid, some(hash256(shadow.noTxnPayload.blockHash))) testNP(rz, valid, some(common.Hash256(shadow.noTxnPayload.blockHash)))
let rx = client.forkchoiceUpdatedV1(ForkchoiceStateV1( let rx = client.forkchoiceUpdatedV1(ForkchoiceStateV1(
headBlockHash: shadow.noTxnPayload.blockHash, headBlockHash: shadow.noTxnPayload.blockHash,
@ -1469,7 +1468,7 @@ proc transactionReorg(t: TestEnv): TestStatus =
testCond pbres testCond pbres
proc testCondPrevRandaoValue(t: TestEnv, expectedPrevRandao: Hash256, blockNumber: uint64): bool = proc testCondPrevRandaoValue(t: TestEnv, expectedPrevRandao: common.Hash256, blockNumber: uint64): bool =
let storageKey = blockNumber.u256 let storageKey = blockNumber.u256
let client = t.rpcClient let client = t.rpcClient
@ -1478,7 +1477,7 @@ proc testCondPrevRandaoValue(t: TestEnv, expectedPrevRandao: Hash256, blockNumbe
error "Unable to get storage", msg=res.error error "Unable to get storage", msg=res.error
return false return false
let opcodeValueAtBlock = Hash256(data: res.get().toBytesBE) let opcodeValueAtBlock = common.Hash256(data: res.get().toBytesBE)
if opcodeValueAtBlock != expectedPrevRandao: if opcodeValueAtBlock != expectedPrevRandao:
error "Storage does not match prevRandao", error "Storage does not match prevRandao",
expected=expectedPrevRandao.data, expected=expectedPrevRandao.data,
@ -1509,7 +1508,7 @@ proc sidechainReorg(t: TestEnv): TestStatus =
onNewPayloadBroadcast: proc(): bool = onNewPayloadBroadcast: proc(): bool =
# At this point the clMocker has a payload that will result in a specific outcome, # At this point the clMocker has a payload that will result in a specific outcome,
# we can produce an alternative payload, send it, fcU to it, and verify the changes # we can produce an alternative payload, send it, fcU to it, and verify the changes
var alternativePrevRandao: Hash256 var alternativePrevRandao: common.Hash256
doAssert randomBytes(alternativePrevRandao.data) == 32 doAssert randomBytes(alternativePrevRandao.data) == 32
let timestamp = Quantity toUnix(clMock.latestHeader.timestamp + 1.seconds) let timestamp = Quantity toUnix(clMock.latestHeader.timestamp + 1.seconds)

View File

@ -7,7 +7,7 @@ import
../../../tests/rpcclient/eth_api, ../../../tests/rpcclient/eth_api,
../../../premix/parser, ../../../premix/parser,
../../../nimbus/rpc/hexstrings, ../../../nimbus/rpc/hexstrings,
../../../nimbus/rpc/execution_types ../../../nimbus/beacon/execution_types
import web3/engine_api as web3_engine_api import web3/engine_api as web3_engine_api

View File

@ -5,53 +5,48 @@ import
json_rpc/[rpcclient], json_rpc/[rpcclient],
../../../nimbus/transaction, ../../../nimbus/transaction,
../../../nimbus/utils/utils, ../../../nimbus/utils/utils,
../../../nimbus/rpc/execution_types, ../../../nimbus/beacon/execution_types,
./types ../../../nimbus/beacon/web3_eth_conv
import eth/common/eth_types as common_eth_types
type
Hash256 = common_eth_types.Hash256
EthBlockHeader = common_eth_types.BlockHeader
type type
ExecutableData* = object ExecutableData* = object
parentHash* : Hash256 parentHash* : common.Hash256
feeRecipient* : EthAddress feeRecipient* : EthAddress
stateRoot* : Hash256 stateRoot* : common.Hash256
receiptsRoot* : Hash256 receiptsRoot* : common.Hash256
logsBloom* : BloomFilter logsBloom* : BloomFilter
prevRandao* : Hash256 prevRandao* : common.Hash256
number* : uint64 number* : uint64
gasLimit* : GasInt gasLimit* : GasInt
gasUsed* : GasInt gasUsed* : GasInt
timestamp* : EthTime timestamp* : EthTime
extraData* : common.Blob extraData* : common.Blob
baseFeePerGas*: UInt256 baseFeePerGas*: UInt256
blockHash* : Hash256 blockHash* : common.Hash256
transactions* : seq[Transaction] transactions* : seq[Transaction]
withdrawals* : Option[seq[Withdrawal]] withdrawals* : Option[seq[Withdrawal]]
blobGasUsed* : Option[uint64] blobGasUsed* : Option[uint64]
excessBlobGas*: Option[uint64] excessBlobGas*: Option[uint64]
CustomPayload* = object CustomPayload* = object
parentHash* : Option[Hash256] parentHash* : Option[common.Hash256]
feeRecipient* : Option[EthAddress] feeRecipient* : Option[EthAddress]
stateRoot* : Option[Hash256] stateRoot* : Option[common.Hash256]
receiptsRoot* : Option[Hash256] receiptsRoot* : Option[common.Hash256]
logsBloom* : Option[BloomFilter] logsBloom* : Option[BloomFilter]
prevRandao* : Option[Hash256] prevRandao* : Option[common.Hash256]
number* : Option[uint64] number* : Option[uint64]
gasLimit* : Option[GasInt] gasLimit* : Option[GasInt]
gasUsed* : Option[GasInt] gasUsed* : Option[GasInt]
timestamp* : Option[EthTime] timestamp* : Option[EthTime]
extraData* : Option[common.Blob] extraData* : Option[common.Blob]
baseFeePerGas*: Option[UInt256] baseFeePerGas*: Option[UInt256]
blockHash* : Option[Hash256] blockHash* : Option[common.Hash256]
transactions* : Option[seq[Transaction]] transactions* : Option[seq[Transaction]]
withdrawals* : Option[seq[Withdrawal]] withdrawals* : Option[seq[Withdrawal]]
blobGasUsed* : Option[uint64] blobGasUsed* : Option[uint64]
excessBlobGas*: Option[uint64] excessBlobGas*: Option[uint64]
beaconRoot* : Option[Hash256] beaconRoot* : Option[common.Hash256]
removeWithdrawals*: bool removeWithdrawals*: bool
InvalidPayloadField* = enum InvalidPayloadField* = enum
@ -96,9 +91,9 @@ proc customizePayload*(basePayload: ExecutableData, customData: CustomPayload):
elif basePayload.withdrawals.isSome: elif basePayload.withdrawals.isSome:
some(calcWithdrawalsRoot(basePayload.withdrawals.get)) some(calcWithdrawalsRoot(basePayload.withdrawals.get))
else: else:
none(Hash256) none(common.Hash256)
var customHeader = EthBlockHeader( var customHeader = common.BlockHeader(
parentHash: basePayload.parentHash, parentHash: basePayload.parentHash,
ommersHash: EMPTY_UNCLE_HASH, ommersHash: EMPTY_UNCLE_HASH,
coinbase: basePayload.feeRecipient, coinbase: basePayload.feeRecipient,
@ -169,16 +164,16 @@ proc customizePayload*(basePayload: ExecutableData, customData: CustomPayload):
# Return the new payload # Return the new payload
result = ExecutionPayload( result = ExecutionPayload(
parentHash: w3Hash customHeader.parentHash, parentHash: w3Hash customHeader.parentHash,
feeRecipient: Web3Address customHeader.coinbase, feeRecipient: w3Addr customHeader.coinbase,
stateRoot: w3Hash customHeader.stateRoot, stateRoot: w3Hash customHeader.stateRoot,
receiptsRoot: w3Hash customHeader.receiptRoot, receiptsRoot: w3Hash customHeader.receiptRoot,
logsBloom: Web3Bloom customHeader.bloom, logsBloom: w3Bloom customHeader.bloom,
prevRandao: Web3PrevRandao customHeader.mixDigest.data, prevRandao: w3PrevRandao customHeader.mixDigest,
blockNumber: Web3Quantity customHeader.blockNumber.truncate(uint64), blockNumber: w3Qty customHeader.blockNumber,
gasLimit: Web3Quantity customHeader.gasLimit, gasLimit: w3Qty customHeader.gasLimit,
gasUsed: Web3Quantity customHeader.gasUsed, gasUsed: w3Qty customHeader.gasUsed,
timestamp: Web3Quantity toUnix(customHeader.timestamp), timestamp: w3Qty customHeader.timestamp,
extraData: Web3ExtraData customHeader.extraData, extraData: w3ExtraData customHeader.extraData,
baseFeePerGas: customHeader.baseFee, baseFeePerGas: customHeader.baseFee,
blockHash: w3Hash customHeader.blockHash, blockHash: w3Hash customHeader.blockHash,
blobGasUsed: w3Qty customHeader.blobGasUsed, blobGasUsed: w3Qty customHeader.blobGasUsed,
@ -201,30 +196,25 @@ proc customizePayload*(basePayload: ExecutableData, customData: CustomPayload):
proc toExecutableData*(payload: ExecutionPayload): ExecutableData = proc toExecutableData*(payload: ExecutionPayload): ExecutableData =
result = ExecutableData( result = ExecutableData(
parentHash : hash256(payload.parentHash), parentHash : ethHash payload.parentHash,
feeRecipient : distinctBase payload.feeRecipient, feeRecipient : distinctBase payload.feeRecipient,
stateRoot : hash256(payload.stateRoot), stateRoot : ethHash payload.stateRoot,
receiptsRoot : hash256(payload.receiptsRoot), receiptsRoot : ethHash payload.receiptsRoot,
logsBloom : distinctBase payload.logsBloom, logsBloom : distinctBase payload.logsBloom,
prevRandao : hash256(payload.prevRandao), prevRandao : ethHash payload.prevRandao,
number : uint64 payload.blockNumber, number : uint64 payload.blockNumber,
gasLimit : GasInt payload.gasLimit, gasLimit : GasInt payload.gasLimit,
gasUsed : GasInt payload.gasUsed, gasUsed : GasInt payload.gasUsed,
timestamp : fromUnix(int64 payload.timestamp), timestamp : ethTime payload.timestamp,
extraData : distinctBase payload.extraData, extraData : distinctBase payload.extraData,
baseFeePerGas : payload.baseFeePerGas, baseFeePerGas : payload.baseFeePerGas,
blockHash : hash256(payload.blockHash), blockHash : ethHash payload.blockHash,
blobGasUsed : u64 payload.blobGasUsed, blobGasUsed : u64 payload.blobGasUsed,
excessBlobGas : u64 payload.excessBlobGas, excessBlobGas : u64 payload.excessBlobGas,
transactions : ethTxs payload.transactions,
withdrawals : ethWithdrawals payload.withdrawals,
) )
for data in payload.transactions:
let tx = rlp.decode(distinctBase data, Transaction)
result.transactions.add tx
if payload.withdrawals.isSome:
result.withdrawals = some(withdrawals(payload.withdrawals.get))
proc customizePayload*(basePayload: ExecutionPayload, customData: CustomPayload): ExecutionPayload = proc customizePayload*(basePayload: ExecutionPayload, customData: CustomPayload): ExecutionPayload =
customizePayload(basePayload.toExecutableData, customData) customizePayload(basePayload.toExecutableData, customData)
@ -269,7 +259,7 @@ proc customizeTx(baseTx: Transaction, vaultKey: PrivateKey, customTx: CustomTx):
let chainId = baseTx.chainId let chainId = baseTx.chainId
signTransaction(modTx, vaultKey, chainId, eip155 = true) signTransaction(modTx, vaultKey, chainId, eip155 = true)
proc modifyHash(x: Hash256): Hash256 = proc modifyHash(x: common.Hash256): common.Hash256 =
result = x result = x
result.data[^1] = byte(255 - x.data[^1].int) result.data[^1] = byte(255 - x.data[^1].int)
@ -297,7 +287,7 @@ proc generateInvalidPayload*(basePayload: ExecutableData,
of InvalidPrevRandao: of InvalidPrevRandao:
# This option potentially requires a transaction that uses the PREVRANDAO opcode. # This option potentially requires a transaction that uses the PREVRANDAO opcode.
# Otherwise the payload will still be valid. # Otherwise the payload will still be valid.
var randomHash: Hash256 var randomHash: common.Hash256
doAssert randomBytes(randomHash.data) == 32 doAssert randomBytes(randomHash.data) == 32
customPayload.prevRandao = some(randomHash) customPayload.prevRandao = some(randomHash)
of RemoveTransaction: of RemoveTransaction:
@ -343,7 +333,7 @@ proc generateInvalidPayload*(basePayload: ExecutionPayload,
vaultKey = default(PrivateKey)): ExecutionPayload = vaultKey = default(PrivateKey)): ExecutionPayload =
generateInvalidPayload(basePayload.toExecutableData, payloadField, vaultKey) generateInvalidPayload(basePayload.toExecutableData, payloadField, vaultKey)
proc txInPayload*(payload: ExecutionPayload, txHash: Hash256): bool = proc txInPayload*(payload: ExecutionPayload, txHash: common.Hash256): bool =
for txBytes in payload.transactions: for txBytes in payload.transactions:
let currTx = rlp.decode(common.Blob txBytes, Transaction) let currTx = rlp.decode(common.Blob txBytes, Transaction)
if rlpHash(currTx) == txHash: if rlpHash(currTx) == txHash:

View File

@ -14,7 +14,7 @@ import
core/block_import, core/block_import,
rpc, rpc,
sync/protocol, sync/protocol,
rpc/merge/merger, beacon/beacon_engine,
common common
], ],
../../../tests/test_helpers, ../../../tests/test_helpers,
@ -112,9 +112,9 @@ proc setupELClient*(t: TestEnv, chainFile: string, enableAuth: bool) =
txPool, EngineStopped txPool, EngineStopped
) )
let merger = MergerRef.new(t.com.db) let beaconEngine = BeaconEngineRef.new(txPool, t.chainRef)
setupEthRpc(t.ethNode, t.ctx, t.com, txPool, t.rpcServer) setupEthRpc(t.ethNode, t.ctx, t.com, txPool, t.rpcServer)
setupEngineAPI(t.sealingEngine, t.rpcServer, merger) setupEngineAPI(beaconEngine, t.rpcServer)
setupDebugRpc(t.com, t.rpcServer) setupDebugRpc(t.com, t.rpcServer)
# Do not start clique sealing engine if we are using a Proof of Work chain file # Do not start clique sealing engine if we are using a Proof of Work chain file

View File

@ -1,18 +1,10 @@
import import
std/[options, times, strutils, typetraits], std/[options, typetraits, strutils],
eth/common,
web3/ethtypes, web3/ethtypes,
../../../nimbus/rpc/merge/mergeutils,
../../../nimbus/rpc/execution_types,
web3/engine_api_types, web3/engine_api_types,
eth/common/eth_types_rlp ../../../nimbus/beacon/execution_types,
../../../nimbus/beacon/web3_eth_conv
from web3/ethtypes as web3types import nil
export
ethtypes,
engine_api_types
import eth/common/eth_types as common
type type
BaseSpec* = ref object of RootObj BaseSpec* = ref object of RootObj
@ -24,13 +16,6 @@ type
run* : proc(spec: BaseSpec): bool run* : proc(spec: BaseSpec): bool
spec* : BaseSpec spec* : BaseSpec
Web3Hash256* = web3types.Hash256
Web3Address* = web3types.Address
Web3Bloom* = web3types.FixedBytes[256]
Web3Quantity* = web3types.Quantity
Web3PrevRandao* = web3types.FixedBytes[32]
Web3ExtraData* = web3types.DynamicBytes[0, 32]
template testCond*(expr: untyped) = template testCond*(expr: untyped) =
if not (expr): if not (expr):
return false return false
@ -40,88 +25,6 @@ template testCond*(expr, body: untyped) =
body body
return false return false
proc `$`*(x: Option[common.Hash256]): string =
if x.isNone:
"none"
else:
$x.get()
proc `$`*(x: Option[BlockHash]): string =
if x.isNone:
"none"
else:
$x.get()
proc `$`*(x: Option[PayloadID]): string =
if x.isNone:
"none"
else:
x.get().toHex
func w3Hash*(x: common.Hash256): Web3Hash256 =
Web3Hash256 x.data
func w3Hash*(x: Option[common.Hash256]): Option[BlockHash] =
if x.isNone:
return none(BlockHash)
some(BlockHash x.get.data)
proc w3Hash*(x: common.BlockHeader): BlockHash =
BlockHash x.blockHash.data
func w3Qty*(a: EthTime, b: int): Quantity =
Quantity(a.toUnix + b.int64)
func w3Qty*(x: Option[uint64]): Option[Quantity] =
if x.isNone:
return none(Quantity)
return some(Quantity x.get)
func u64*(x: Option[Quantity]): Option[uint64] =
if x.isNone:
return none(uint64)
return some(uint64 x.get)
func w3PrevRandao*(): Web3PrevRandao =
discard
func w3Address*(): Web3Address =
discard
proc hash256*(h: Web3Hash256): common.Hash256 =
common.Hash256(data: distinctBase h)
proc hash256*(h: Option[Web3Hash256]): Option[common.Hash256] =
if h.isNone:
return none(common.Hash256)
some(hash256(h.get))
proc w3Withdrawal*(w: Withdrawal): WithdrawalV1 =
WithdrawalV1(
index: Quantity(w.index),
validatorIndex: Quantity(w.validatorIndex),
address: Address(w.address),
amount: Quantity(w.amount)
)
proc w3Withdrawals*(list: openArray[Withdrawal]): seq[WithdrawalV1] =
result = newSeqOfCap[WithdrawalV1](list.len)
for x in list:
result.add w3Withdrawal(x)
proc withdrawal*(w: WithdrawalV1): Withdrawal =
Withdrawal(
index: uint64(w.index),
validatorIndex: uint64(w.validatorIndex),
address: distinctBase(w.address),
amount: uint64(w.amount)
)
proc withdrawals*(list: openArray[WithdrawalV1]): seq[Withdrawal] =
result = newSeqOfCap[Withdrawal](list.len)
for x in list:
result.add withdrawal(x)
proc `==`*(a: Option[BlockHash], b: Option[common.Hash256]): bool = proc `==`*(a: Option[BlockHash], b: Option[common.Hash256]): bool =
if a.isNone and b.isNone: if a.isNone and b.isNone:
return true return true
@ -190,7 +93,7 @@ template expectBalanceEqual*(res: untyped, expectedBalance: UInt256) =
testCond res.get == expectedBalance: testCond res.get == expectedBalance:
error "balance mismatch", expect=expectedBalance, get=res.get error "balance mismatch", expect=expectedBalance, get=res.get
template expectLatestValidHash*(res: untyped, expectedHash: Web3Hash256) = template expectLatestValidHash*(res: untyped, expectedHash: Web3Hash) =
testCond res.isOk: testCond res.isOk:
error "Unexpected error", msg=res.error error "Unexpected error", msg=res.error
let s = res.get let s = res.get

View File

@ -15,7 +15,8 @@ import
../../../nimbus/common/common, ../../../nimbus/common/common,
../../../nimbus/utils/utils, ../../../nimbus/utils/utils,
../../../nimbus/common/chain_config, ../../../nimbus/common/chain_config,
../../../nimbus/rpc/execution_types ../../../nimbus/beacon/execution_types,
../../../nimbus/beacon/web3_eth_conv
type type
WDBaseSpec* = ref object of BaseSpec WDBaseSpec* = ref object of BaseSpec
@ -321,7 +322,7 @@ proc execute*(ws: WDBaseSpec, t: TestEnv): bool =
let emptyWithdrawalsList = newSeq[Withdrawal]() let emptyWithdrawalsList = newSeq[Withdrawal]()
let customizer = CustomPayload( let customizer = CustomPayload(
withdrawals: some(emptyWithdrawalsList), withdrawals: some(emptyWithdrawalsList),
beaconRoot: hash256 t.clMock.latestPayloadAttributes.parentBeaconBlockRoot beaconRoot: ethHash t.clMock.latestPayloadAttributes.parentBeaconBlockRoot
) )
let payloadPlusWithdrawals = customizePayload(t.clMock.latestPayloadBuilt, customizer) let payloadPlusWithdrawals = customizePayload(t.clMock.latestPayloadBuilt, customizer)
var r = t.rpcClient.newPayloadV2(payloadPlusWithdrawals.V1V2) var r = t.rpcClient.newPayloadV2(payloadPlusWithdrawals.V1V2)
@ -410,7 +411,7 @@ proc execute*(ws: WDBaseSpec, t: TestEnv): bool =
# be checked first instead of responding `INVALID` # be checked first instead of responding `INVALID`
let customizer = CustomPayload( let customizer = CustomPayload(
removeWithdrawals: true, removeWithdrawals: true,
beaconRoot: hash256 t.clMock.latestPayloadAttributes.parentBeaconBlockRoot beaconRoot: ethHash t.clMock.latestPayloadAttributes.parentBeaconBlockRoot
) )
let nilWithdrawalsPayload = customizePayload(t.clMock.latestPayloadBuilt, customizer) let nilWithdrawalsPayload = customizePayload(t.clMock.latestPayloadBuilt, customizer)
let r = t.rpcClient.newPayloadV2(nilWithdrawalsPayload.V1V2) let r = t.rpcClient.newPayloadV2(nilWithdrawalsPayload.V1V2)
@ -431,7 +432,7 @@ proc execute*(ws: WDBaseSpec, t: TestEnv): bool =
get=wdList.len get=wdList.len
for i, x in sentList: for i, x in sentList:
let z = withdrawal wdList[i] let z = ethWithdrawal wdList[i]
testCond z == x: testCond z == x:
error "Incorrect withdrawal", index=i error "Incorrect withdrawal", index=i
return true return true

View File

@ -9,7 +9,8 @@ import
../types, ../types,
../helper, ../helper,
../../../nimbus/constants, ../../../nimbus/constants,
../../../nimbus/rpc/execution_types ../../../nimbus/beacon/execution_types,
../../../nimbus/beacon/web3_eth_conv
# EIP-3860 Shanghai Tests: # EIP-3860 Shanghai Tests:
# Send transactions overflowing the MAX_INITCODE_SIZE # Send transactions overflowing the MAX_INITCODE_SIZE
@ -100,7 +101,7 @@ proc execute*(ws: MaxInitcodeSizeSpec, t: TestEnv): bool =
# Customize the payload to include a tx with an invalid initcode # Customize the payload to include a tx with an invalid initcode
let customData = CustomPayload( let customData = CustomPayload(
beaconRoot: hash256 t.clMock.latestPayloadAttributes.parentBeaconBlockRoot, beaconRoot: ethHash t.clMock.latestPayloadAttributes.parentBeaconBlockRoot,
transactions: some( @[invalidTx] ), transactions: some( @[invalidTx] ),
) )

View File

@ -1,58 +0,0 @@
import
eth/[common],
json_rpc/[rpcclient],
web3/ethtypes,
../../../nimbus/transaction
import eth/common/eth_types as common_eth_types
type Hash256 = common_eth_types.Hash256
import web3/engine_api_types
from web3/ethtypes as web3types import nil
type
Web3BlockHash* = web3types.BlockHash
Web3Address* = web3types.Address
Web3Bloom* = web3types.FixedBytes[256]
Web3Quantity* = web3types.Quantity
Web3PrevRandao* = web3types.FixedBytes[32]
Web3ExtraData* = web3types.DynamicBytes[0, 32]
func toWdV1(wd: Withdrawal): WithdrawalV1 =
result = WithdrawalV1(
index: Web3Quantity wd.index,
validatorIndex: Web3Quantity wd.validatorIndex,
address: Web3Address wd.address,
amount: Web3Quantity wd.amount
)
func toPayloadV1OrV2*(blk: EthBlock): ExecutionPayloadV1OrV2 =
let header = blk.header
# Return the new payload
result = ExecutionPayloadV1OrV2(
parentHash: Web3BlockHash header.parentHash.data,
feeRecipient: Web3Address header.coinbase,
stateRoot: Web3BlockHash header.stateRoot.data,
receiptsRoot: Web3BlockHash header.receiptRoot.data,
logsBloom: Web3Bloom header.bloom,
prevRandao: Web3PrevRandao header.mixDigest.data,
blockNumber: Web3Quantity header.blockNumber.truncate(uint64),
gasLimit: Web3Quantity header.gasLimit,
gasUsed: Web3Quantity header.gasUsed,
timestamp: Web3Quantity toUnix(header.timestamp),
extraData: Web3ExtraData header.extraData,
baseFeePerGas: header.baseFee,
blockHash: Web3BlockHash header.blockHash.data
)
for tx in blk.txs:
let txData = rlp.encode(tx)
result.transactions.add TypedTransaction(txData)
if blk.withdrawals.isSome:
let withdrawals = blk.withdrawals.get
var wds = newSeqOfCap[WithdrawalV1](withdrawals.len)
for wd in withdrawals:
wds.add toWdV1(wd)
result.withdrawals = some(wds)

View File

@ -11,29 +11,24 @@ import
std/[os, json, strutils, times, typetraits, options], std/[os, json, strutils, times, typetraits, options],
stew/[byteutils, results], stew/[byteutils, results],
eth/common, eth/common,
web3/engine_api_types,
../sim_utils, ../sim_utils,
../../../tools/common/helpers as chp, ../../../tools/common/helpers as chp,
../../../tools/evmstate/helpers as ehp, ../../../tools/evmstate/helpers as ehp,
../../../tests/test_helpers, ../../../tests/test_helpers,
../../../nimbus/beacon/web3_eth_conv,
../../../nimbus/beacon/execution_types,
../../../nimbus/beacon/payload_conv,
../engine/engine_client, ../engine/engine_client,
./test_env, ./test_env
./helpers
const const
baseFolder = "hive_integration/nodocker/pyspec" baseFolder = "hive_integration/nodocker/pyspec"
caseFolder = baseFolder & "/testcases" caseFolder = baseFolder & "/testcases"
supportedNetwork = ["Merge", "Shanghai", "MergeToShanghaiAtTime15k"] supportedNetwork = ["Merge", "Shanghai", "MergeToShanghaiAtTime15k"]
type
Hash256 = common.Hash256
proc getPayload(node: JsonNode): ExecutionPayloadV1OrV2 = proc getPayload(node: JsonNode): ExecutionPayloadV1OrV2 =
let rlpBytes = hexToSeqByte(node.getStr) let rlpBytes = hexToSeqByte(node.getStr)
toPayloadV1OrV2(rlp.decode(rlpBytes, EthBlock)) executionPayloadV1V2(rlp.decode(rlpBytes, EthBlock))
proc hash256(h: Web3BlockHash): Hash256 =
Hash256(data: distinctBase h)
proc validatePostState(node: JsonNode, t: TestEnv): bool = proc validatePostState(node: JsonNode, t: TestEnv): bool =
# check nonce, balance & storage of accounts in final block against fixture values # check nonce, balance & storage of accounts in final block against fixture values
@ -101,7 +96,7 @@ proc runTest(node: JsonNode, network: string): TestStatus =
t.setupELClient(conf, node) t.setupELClient(conf, node)
let blks = node["blocks"] let blks = node["blocks"]
var latestValidHash = Hash256() var latestValidHash = common.Hash256()
result = TestStatus.OK result = TestStatus.OK
for blkNode in blks: for blkNode in blks:
let expectedStatus = if "expectException" in blkNode: let expectedStatus = if "expectException" in blkNode:
@ -117,7 +112,7 @@ proc runTest(node: JsonNode, network: string): TestStatus =
let pStatus = res.value let pStatus = res.value
if pStatus.status == PayloadExecutionStatus.valid: if pStatus.status == PayloadExecutionStatus.valid:
latestValidHash = hash256(pStatus.latestValidHash.get) latestValidHash = ethHash pStatus.latestValidHash.get
if pStatus.status != expectedStatus: if pStatus.status != expectedStatus:
result = TestStatus.Failed result = TestStatus.Failed
@ -128,7 +123,7 @@ proc runTest(node: JsonNode, network: string): TestStatus =
block: block:
# only update head of beacon chain if valid response occurred # only update head of beacon chain if valid response occurred
if latestValidHash != Hash256(): if latestValidHash != common.Hash256():
# update with latest valid response # update with latest valid response
let fcState = ForkchoiceStateV1(headBlockHash: BlockHash latestValidHash.data) let fcState = ForkchoiceStateV1(headBlockHash: BlockHash latestValidHash.data)
let res = t.rpcClient.forkchoiceUpdatedV2(fcState) let res = t.rpcClient.forkchoiceUpdatedV2(fcState)

View File

@ -14,7 +14,7 @@ import
core/tx_pool, core/tx_pool,
rpc, rpc,
sync/protocol, sync/protocol,
rpc/merge/merger, beacon/beacon_engine,
common common
], ],
../../../tests/test_helpers, ../../../tests/test_helpers,
@ -68,10 +68,9 @@ proc setupELClient*(t: TestEnv, conf: ChainConfig, node: JsonNode) =
txPool, EngineStopped txPool, EngineStopped
) )
let merger = MergerRef.new(t.com.db) let beaconEngine = BeaconEngineRef.new(txPool, t.chainRef)
setupEthRpc(t.ethNode, t.ctx, t.com, txPool, t.rpcServer) setupEthRpc(t.ethNode, t.ctx, t.com, txPool, t.rpcServer)
setupEngineAPI(t.sealingEngine, t.rpcServer, merger) setupEngineAPI(beaconEngine, t.rpcServer)
#setupDebugRpc(t.com, t.rpcServer)
t.rpcServer.start() t.rpcServer.start()

View File

@ -0,0 +1,50 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[options, typetraits],
eth/common,
./web3_eth_conv,
./beacon_engine,
./execution_types,
./api_handler/api_utils,
./api_handler/api_getpayload,
./api_handler/api_getbodies,
./api_handler/api_exchangeconf,
./api_handler/api_newpayload,
./api_handler/api_forkchoice
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
{.push gcsafe, raises:[CatchableError].}
func validateVersionedHashed*(payload: ExecutionPayload,
expected: openArray[Web3Hash]): bool =
var versionedHashes: seq[common.Hash256]
for x in payload.transactions:
let tx = rlp.decode(distinctBase(x), Transaction)
versionedHashes.add tx.versionedHashes
for i, x in expected:
if distinctBase(x) != versionedHashes[i].data:
return false
true
{.pop.}
export
invalidStatus,
getPayload,
getPayloadV3,
getPayloadBodiesByHash,
getPayloadBodiesByRange,
exchangeConf,
newPayload,
forkchoiceUpdated

View File

@ -0,0 +1,75 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[strutils],
eth/common,
../web3_eth_conv,
../beacon_engine,
../execution_types,
chronicles
{.push gcsafe, raises:[CatchableError].}
proc exchangeConf*(ben: BeaconEngineRef,
conf: TransitionConfigurationV1):
TransitionConfigurationV1 =
trace "Engine API request received",
meth = "exchangeTransitionConfigurationV1",
ttd = conf.terminalTotalDifficulty,
number = uint64(conf.terminalBlockNumber),
blockHash = conf.terminalBlockHash
let
com = ben.com
db = com.db
ttd = com.ttd
if ttd.isNone:
raise newException(ValueError, "invalid ttd: EL (none) CL ($1)" % [
$conf.terminalTotalDifficulty])
if conf.terminalTotalDifficulty != ttd.get:
raise newException(ValueError, "invalid ttd: EL ($1) CL ($2)" % [
$ttd.get, $conf.terminalTotalDifficulty])
let
terminalBlockNumber = u256 conf.terminalBlockNumber
terminalBlockHash = ethHash conf.terminalBlockHash
if terminalBlockHash != common.Hash256():
var headerHash: common.Hash256
if not db.getBlockHash(terminalBlockNumber, headerHash):
raise newException(ValueError, "cannot get terminal block hash, number $1" %
[$terminalBlockNumber])
if terminalBlockHash != headerHash:
raise newException(ValueError, "invalid terminal block hash, got $1 want $2" %
[$terminalBlockHash, $headerHash])
var header: common.BlockHeader
if not db.getBlockHeader(headerHash, header):
raise newException(ValueError, "cannot get terminal block header, hash $1" %
[$terminalBlockHash])
return TransitionConfigurationV1(
terminalTotalDifficulty: ttd.get,
terminalBlockHash : w3Hash headerHash,
terminalBlockNumber : w3Qty header.blockNumber
)
if terminalBlockNumber.isZero.not:
raise newException(ValueError, "invalid terminal block number: $1" % [
$terminalBlockNumber])
if terminalBlockHash != common.Hash256():
raise newException(ValueError, "invalid terminal block hash, no terminal header set")
TransitionConfigurationV1(terminalTotalDifficulty: ttd.get)

View File

@ -0,0 +1,192 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[typetraits],
eth/common,
stew/results,
../web3_eth_conv,
../beacon_engine,
../execution_types,
./api_utils,
chronicles
{.push gcsafe, raises:[CatchableError].}
template validateVersion(attrsOpt, com) =
if attrsOpt.isSome:
let
attr = attrsOpt.get
version = attr.version
timestamp = ethTime attr.timestamp
if com.isCancunOrLater(timestamp):
if version != Version.V3:
raise invalidParams("if timestamp is Cancun or later," &
" payloadAttributes must be PayloadAttributesV3")
elif com.isShanghaiOrLater(timestamp):
if version != Version.V2:
raise invalidParams("if timestamp is Shanghai or later," &
" payloadAttributes must be PayloadAttributesV2")
else:
if version != Version.V1:
raise invalidParams("if timestamp is earlier than Shanghai," &
" payloadAttributes must be PayloadAttributesV1")
proc forkchoiceUpdated*(ben: BeaconEngineRef,
update: ForkchoiceStateV1,
attrsOpt: Option[PayloadAttributes]):
ForkchoiceUpdatedResponse =
let
com = ben.com
db = com.db
chain = ben.chain
blockHash = ethHash update.headBlockHash
validateVersion(attrsOpt, com)
if blockHash == common.Hash256():
warn "Forkchoice requested update to zero hash"
return simpleFCU(PayloadExecutionStatus.invalid)
# Check whether we have the block yet in our database or not. If not, we'll
# need to either trigger a sync, or to reject this forkchoice update for a
# reason.
var header: common.BlockHeader
if not db.getBlockHeader(blockHash, header):
# If the head hash is unknown (was not given to us in a newPayload request),
# we cannot resolve the header, so not much to do. This could be extended in
# the future to resolve from the `eth` network, but it's an unexpected case
# that should be fixed, not papered over.
if not ben.get(blockHash, header):
warn "Forkchoice requested unknown head",
hash = blockHash
return simpleFCU(PayloadExecutionStatus.syncing)
# Header advertised via a past newPayload request. Start syncing to it.
# Before we do however, make sure any legacy sync in switched off so we
# don't accidentally have 2 cycles running.
if not ben.ttdReached():
ben.reachTTD()
# TODO: cancel downloader
info "Forkchoice requested sync to new head",
number = header.blockNumber,
hash = blockHash
# Update sync header (if any)
com.syncReqNewHead(header)
return simpleFCU(PayloadExecutionStatus.syncing)
# Block is known locally, just sanity check that the beacon client does not
# attempt to push us back to before the merge.
let blockNumber = header.blockNumber.truncate(uint64)
if header.difficulty > 0.u256 or blockNumber == 0'u64:
var
td, ptd: DifficultyInt
ttd = com.ttd.get(high(common.BlockNumber))
if not db.getTd(blockHash, td) or (blockNumber > 0'u64 and not db.getTd(header.parentHash, ptd)):
error "TDs unavailable for TTD check",
number = blockNumber,
hash = blockHash,
td = td,
parent = header.parentHash,
ptd = ptd
return simpleFCU(PayloadExecutionStatus.invalid, "TDs unavailable for TDD check")
if td < ttd or (blockNumber > 0'u64 and ptd > ttd):
error "Refusing beacon update to pre-merge",
number = blockNumber,
hash = blockHash,
diff = header.difficulty,
ptd = ptd,
ttd = ttd
return invalidFCU()
# If the head block is already in our canonical chain, the beacon client is
# probably resyncing. Ignore the update.
var canonHash: common.Hash256
if db.getBlockHash(header.blockNumber, canonHash) and canonHash == blockHash:
# TODO should this be possible?
# If we allow these types of reorgs, we will do lots and lots of reorgs during sync
warn "Reorg to previous block"
if chain.setCanonical(header) != ValidationResult.OK:
return invalidFCU(com, header)
elif chain.setCanonical(header) != ValidationResult.OK:
return invalidFCU(com, header)
# If the beacon client also advertised a finalized block, mark the local
# chain final and completely in PoS mode.
let finalizedBlockHash = ethHash update.finalizedBlockHash
if finalizedBlockHash != common.Hash256():
if not ben.posFinalized:
ben.finalizePoS()
# TODO: If the finalized block is not in our canonical tree, somethings wrong
var finalBlock: common.BlockHeader
if not db.getBlockHeader(finalizedBlockHash, finalBlock):
warn "Final block not available in database",
hash=finalizedBlockHash
raise invalidParams("finalized block header not available")
var finalHash: common.Hash256
if not db.getBlockHash(finalBlock.blockNumber, finalHash):
warn "Final block not in canonical chain",
number=finalBlock.blockNumber,
hash=finalizedBlockHash
raise invalidParams("finalized block hash not available")
if finalHash != finalizedBlockHash:
warn "Final block not in canonical chain",
number=finalBlock.blockNumber,
expect=finalizedBlockHash,
get=finalHash
raise invalidParams("finalilized block not canonical")
db.finalizedHeaderHash(finalizedBlockHash)
let safeBlockHash = ethHash update.safeBlockHash
if safeBlockHash != common.Hash256():
var safeBlock: common.BlockHeader
if not db.getBlockHeader(safeBlockHash, safeBlock):
warn "Safe block not available in database",
hash = safeBlockHash
raise invalidParams("safe head not available")
var safeHash: common.Hash256
if not db.getBlockHash(safeBlock.blockNumber, safeHash):
warn "Safe block hash not available in database",
hash = safeHash
raise invalidParams("safe block hash not available")
if safeHash != safeBlockHash:
warn "Safe block not in canonical chain",
blockNumber=safeBlock.blockNumber,
expect=safeBlockHash,
get=safeHash
raise invalidParams("safe head not canonical")
db.safeHeaderHash(safeBlockHash)
# If payload generation was requested, create a new block to be potentially
# sealed by the beacon client. The payload will be requested later, and we
# might replace it arbitrarilly many times in between.
if attrsOpt.isSome:
let attrs = attrsOpt.get()
let payload = ben.generatePayload(attrs).valueOr:
error "Failed to create sealing payload", err = error
raise invalidAttr(error)
let id = computePayloadId(blockHash, attrs)
ben.put(id, ben.blockValue, payload)
info "Created payload for sealing",
id = id.toHex,
hash = payload.blockHash,
number = payload.blockNumber
return validFCU(some(id), blockHash)
return validFCU(none(PayloadID), blockHash)

View File

@ -0,0 +1,93 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[options, typetraits],
eth/common,
../web3_eth_conv,
../beacon_engine,
../execution_types,
../../db/core_db,
./api_utils
{.push gcsafe, raises:[CatchableError].}
const
maxBodyRequest = 32
proc getPayloadBodyByHeader(db: CoreDbRef,
header: common.BlockHeader,
output: var seq[Option[ExecutionPayloadBodyV1]]) =
var body: common.BlockBody
if not db.getBlockBody(header, body):
output.add none(ExecutionPayloadBodyV1)
return
let txs = w3Txs body.transactions
var wds: seq[WithdrawalV1]
if body.withdrawals.isSome:
for w in body.withdrawals.get:
wds.add w3Withdrawal(w)
output.add(
some(ExecutionPayloadBodyV1(
transactions: txs,
# pre Shanghai block return null withdrawals
# post Shanghai block return at least empty slice
withdrawals: if header.withdrawalsRoot.isSome:
some(wds)
else:
none(seq[WithdrawalV1])
))
)
proc getPayloadBodiesByHash*(ben: BeaconEngineRef,
hashes: seq[Web3Hash]):
seq[Option[ExecutionPayloadBodyV1]] =
if hashes.len > maxBodyRequest:
raise tooLargeRequest("request exceeds max allowed " & $maxBodyRequest)
let db = ben.com.db
var header: common.BlockHeader
for h in hashes:
if not db.getBlockHeader(ethHash h, header):
result.add none(ExecutionPayloadBodyV1)
continue
db.getPayloadBodyByHeader(header, result)
proc getPayloadBodiesByRange*(ben: BeaconEngineRef,
start: uint64, count: uint64):
seq[Option[ExecutionPayloadBodyV1]] =
if start == 0:
raise invalidParams("start block should greater than zero")
if count == 0:
raise invalidParams("blocks count should greater than zero")
if count > maxBodyRequest:
raise tooLargeRequest("request exceeds max allowed " & $maxBodyRequest)
let
com = ben.com
db = com.db
current = com.syncCurrent.truncate(uint64)
var
header: common.BlockHeader
last = start+count-1
if last > current:
last = current
for bn in start..last:
if not db.getBlockHeader(bn.toBlockNumber, header):
result.add none(ExecutionPayloadBodyV1)
continue
db.getPayloadBodyByHeader(header, result)

View File

@ -0,0 +1,68 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[typetraits],
eth/common,
../web3_eth_conv,
../beacon_engine,
../execution_types,
./api_utils,
chronicles
{.push gcsafe, raises:[CatchableError].}
proc getPayload*(ben: BeaconEngineRef, id: PayloadID): GetPayloadV2Response =
trace "Engine API request received",
meth = "GetPayload", id
var payload: ExecutionPayloadV1OrV2
var blockValue: UInt256
if not ben.get(id, blockValue, payload):
raise unknownPayload("Unknown payload")
GetPayloadV2Response(
executionPayload: payload,
blockValue: blockValue
)
proc getPayloadV3*(ben: BeaconEngineRef, id: PayloadID): GetPayloadV3Response =
trace "Engine API request received",
meth = "GetPayload", id
var payload: ExecutionPayloadV3
var blockValue: UInt256
if not ben.get(id, blockValue, payload):
raise unknownPayload("Unknown payload")
let com = ben.com
if not com.isCancunOrLater(ethTime payload.timestamp):
raise unsupportedFork("payload timestamp is less than Cancun activation")
var
blobsBundle: BlobsBundleV1
try:
for ttx in payload.transactions:
let tx = rlp.decode(distinctBase(ttx), Transaction)
if tx.networkPayload.isNil.not:
for blob in tx.networkPayload.blobs:
blobsBundle.blobs.add Web3Blob(blob)
for p in tx.networkPayload.proofs:
blobsBundle.proofs.add Web3KZGProof(p)
for k in tx.networkPayload.commitments:
blobsBundle.commitments.add Web3KZGCommitment(k)
except RlpError:
doAssert(false, "found TypedTransaction that RLP failed to decode")
GetPayloadV3Response(
executionPayload: payload,
blockValue: blockValue,
blobsBundle: blobsBundle
)

View File

@ -0,0 +1,140 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[typetraits, times],
eth/common,
stew/results,
../web3_eth_conv,
../beacon_engine,
../execution_types,
../payload_conv,
./api_utils,
chronicles
{.push gcsafe, raises:[CatchableError].}
template validateVersion(com, timestamp, version) =
if com.isCancunOrLater(timestamp):
if version != Version.V3:
raise invalidParams("if timestamp is Cancun or later, " &
"payload must be ExecutionPayloadV3")
elif com.isShanghaiOrLater(timestamp):
if version != Version.V2:
raise invalidParams("if timestamp is Shanghai or later, " &
"payload must be ExecutionPayloadV2")
elif version != Version.V1:
if com.syncReqRelaxV2:
trace "Relaxed mode, treating payload as V1"
discard
else:
raise invalidParams("if timestamp is earlier than Shanghai, " &
"payload must be ExecutionPayloadV1")
proc newPayload*(ben: BeaconEngineRef,
payload: ExecutionPayload,
beaconRoot = none(Web3Hash)): PayloadStatusV1 =
trace "Engine API request received",
meth = "newPayload",
number = payload.blockNumber,
hash = payload.blockHash
let
com = ben.com
db = com.db
timestamp = ethTime payload.timestamp
version = payload.version
validateVersion(com, timestamp, version)
var header = blockHeader(payload, ethHash beaconRoot)
let blockHash = ethHash payload.blockHash
header.validateBlockHash(blockHash, version).isOkOr:
return error
# If we already have the block locally, ignore the entire execution and just
# return a fake success.
if db.getBlockHeader(blockHash, header):
warn "Ignoring already known beacon payload",
number = header.blockNumber, hash = blockHash
return validStatus(blockHash)
# If the parent is missing, we - in theory - could trigger a sync, but that
# would also entail a reorg. That is problematic if multiple sibling blocks
# are being fed to us, and even moreso, if some semi-distant uncle shortens
# our live chain. As such, payload execution will not permit reorgs and thus
# will not trigger a sync cycle. That is fine though, if we get a fork choice
# update after legit payload executions.
var parent: common.BlockHeader
if not db.getBlockHeader(header.parentHash, parent):
# Stash the block away for a potential forced forckchoice update to it
# at a later time.
ben.put(blockHash, header)
# Although we don't want to trigger a sync, if there is one already in
# progress, try to extend if with the current payload request to relieve
# some strain from the forkchoice update.
#if err := api.eth.Downloader().BeaconExtend(api.eth.SyncMode(), block.Header()); err == nil {
# log.Debug("Payload accepted for sync extension", "number", params.Number, "hash", params.BlockHash)
# return beacon.PayloadStatusV1{Status: beacon.SYNCING}, nil
# Either no beacon sync was started yet, or it rejected the delivered
# payload as non-integratable on top of the existing sync. We'll just
# have to rely on the beacon client to forcefully update the head with
# a forkchoice update request.
warn "Ignoring payload with missing parent",
number = header.blockNumber,
hash = blockHash,
parent = header.parentHash
return acceptedStatus()
# We have an existing parent, do some sanity checks to avoid the beacon client
# triggering too early
let ttd = com.ttd.get(high(common.BlockNumber))
if version == Version.V1:
let td = db.getScore(header.parentHash)
if (not com.forkGTE(MergeFork)) and td < ttd:
warn "Ignoring pre-merge payload",
number = header.blockNumber, hash = blockHash, td, ttd
return invalidStatus()
if header.timestamp <= parent.timestamp:
warn "Invalid timestamp",
parent = header.timestamp, header = header.timestamp
return invalidStatus(db.getHeadBlockHash(), "Invalid timestamp")
if not db.haveBlockAndState(header.parentHash):
ben.put(blockHash, header)
warn "State not available, ignoring new payload",
hash = blockHash,
number = header.blockNumber
let blockHash = latestValidHash(db, parent, ttd)
return acceptedStatus(blockHash)
trace "Inserting block without sethead",
hash = blockHash, number = header.blockNumber
let body = blockBody(payload)
let vres = ben.chain.insertBlockWithoutSetHead(header, body)
if vres != ValidationResult.OK:
let blockHash = latestValidHash(db, parent, ttd)
return invalidStatus(blockHash, "Failed to insert block")
# We've accepted a valid payload from the beacon client. Mark the local
# chain transitions to notify other subsystems (e.g. downloader) of the
# behavioral change.
if not ben.ttdReached():
ben.reachTTD()
# TODO: cancel downloader
return validStatus(blockHash)

View File

@ -0,0 +1,183 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[typetraits, strutils],
eth/[rlp],
json_rpc/errors,
nimcrypto/[hash, sha2],
stew/[results],
../../constants,
../../db/core_db,
../../utils/utils,
../../common/common,
../execution_types,
../web3_eth_conv
{.push gcsafe, raises:[].}
proc update(ctx: var sha256, wd: WithdrawalV1) =
ctx.update(toBytesBE distinctBase wd.index)
ctx.update(toBytesBE distinctBase wd.validatorIndex)
ctx.update(distinctBase wd.address)
ctx.update(toBytesBE distinctBase wd.amount)
proc computePayloadId*(blockHash: common.Hash256,
params: PayloadAttributes): PayloadID =
var dest: common.Hash256
var ctx: sha256
ctx.init()
ctx.update(blockHash.data)
ctx.update(toBytesBE distinctBase params.timestamp)
ctx.update(distinctBase params.prevRandao)
ctx.update(distinctBase params.suggestedFeeRecipient)
if params.withdrawals.isSome:
for wd in params.withdrawals.get:
ctx.update(wd)
if params.parentBeaconBlockRoot.isSome:
ctx.update(distinctBase params.parentBeaconBlockRoot.get)
ctx.finish dest.data
ctx.clear()
(distinctBase result)[0..7] = dest.data[0..7]
proc validateBlockHash*(header: common.BlockHeader,
gotHash: common.Hash256,
version: Version): Result[void, PayloadStatusV1]
{.gcsafe, raises: [ValueError].} =
let wantHash = header.blockHash
if wantHash != gotHash:
let status = if version == Version.V1:
PayloadExecutionStatus.invalid_block_hash
else:
PayloadExecutionStatus.invalid
let res = PayloadStatusV1(
status: status,
validationError: some("blockhash mismatch, want $1, got $2" % [
$wantHash, $gotHash])
)
return err(res)
return ok()
template toValidHash*(x: common.Hash256): Option[Web3Hash] =
some(w3Hash x)
proc simpleFCU*(status: PayloadExecutionStatus): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(payloadStatus: PayloadStatusV1(status: status))
proc simpleFCU*(status: PayloadExecutionStatus,
msg: string): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(
payloadStatus: PayloadStatusV1(
status: status,
validationError: some(msg)
)
)
proc invalidFCU*(hash = common.Hash256()): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(payloadStatus:
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(hash)
)
)
proc validFCU*(id: Option[PayloadID],
validHash: common.Hash256): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(
payloadStatus: PayloadStatusV1(
status: PayloadExecutionStatus.valid,
latestValidHash: toValidHash(validHash)
),
payloadId: id
)
proc invalidStatus*(validHash: common.Hash256, msg: string): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(validHash),
validationError: some(msg)
)
proc invalidStatus*(validHash = common.Hash256()): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(validHash)
)
proc acceptedStatus*(validHash: common.Hash256): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.accepted,
latestValidHash: toValidHash(validHash)
)
proc acceptedStatus*(): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.accepted
)
proc validStatus*(validHash: common.Hash256): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.valid,
latestValidHash: toValidHash(validHash)
)
proc invalidParams*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiInvalidParams,
msg: msg
)
proc unknownPayload*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiUnknownPayload,
msg: msg
)
proc invalidAttr*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiInvalidPayloadAttributes,
msg: msg
)
proc unsupportedFork*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiUnsupportedFork,
msg: msg
)
proc tooLargeRequest*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiTooLargeRequest,
msg: msg
)
proc latestValidHash*(db: CoreDbRef,
parent: common.BlockHeader,
ttd: DifficultyInt): common.Hash256
{.gcsafe, raises: [RlpError].} =
let ptd = db.getScore(parent.parentHash)
if ptd >= ttd:
parent.blockHash
else:
# If the most recent valid ancestor is a PoW block,
# latestValidHash MUST be set to ZERO
common.Hash256()
proc invalidFCU*(com: CommonRef,
header: common.BlockHeader): ForkchoiceUpdatedResponse
{.gcsafe, raises: [RlpError].} =
var parent: common.BlockHeader
if not com.db.getBlockHeader(header.parentHash, parent):
return invalidFCU(common.Hash256())
let blockHash = latestValidHash(com.db, parent,
com.ttd.get(high(common.BlockNumber)))
invalidFCU(blockHash)

View File

@ -0,0 +1,171 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
./web3_eth_conv,
./payload_conv,
./execution_types,
./merge_tracker,
./payload_queue,
../db/core_db,
../core/[tx_pool, casper, chain],
../common/common
export
common,
chain
type
BeaconEngineRef* = ref object
txPool: TxPoolRef
merge : MergeTrackerRef
queue : PayloadQueue
chain : ChainRef
{.push gcsafe, raises:[].}
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc setWithdrawals(xp: TxPoolRef, attrs: PayloadAttributes) =
case attrs.version
of Version.V2, Version.V3:
xp.withdrawals = ethWithdrawals attrs.withdrawals.get
else:
xp.withdrawals = @[]
template wrapException(body: untyped): auto =
try:
body
except CatchableError as ex:
err(ex.msg)
# ------------------------------------------------------------------------------
# Constructors
# ------------------------------------------------------------------------------
proc new*(_: type BeaconEngineRef,
txPool: TxPoolRef,
chain: ChainRef): BeaconEngineRef =
BeaconEngineRef(
txPool: txPool,
merge : MergeTrackerRef.new(txPool.com.db),
queue : PayloadQueue(),
chain : chain,
)
# ------------------------------------------------------------------------------
# Public functions, setters
# ------------------------------------------------------------------------------
proc reachTTD*(ben: BeaconEngineRef) =
## ReachTTD is called whenever the first NewHead message received
## from the consensus-layer.
ben.merge.reachTTD()
proc finalizePoS*(ben: BeaconEngineRef) =
## FinalizePoS is called whenever the first FinalisedBlock message received
## from the consensus-layer.
ben.merge.finalizePos()
proc put*(ben: BeaconEngineRef,
hash: common.Hash256, header: common.BlockHeader) =
ben.queue.put(hash, header)
proc put*(ben: BeaconEngineRef, id: PayloadID,
blockValue: UInt256, payload: ExecutionPayload) =
ben.queue.put(id, blockValue, payload)
proc put*(ben: BeaconEngineRef, id: PayloadID,
blockValue: UInt256, payload: SomeExecutionPayload) =
ben.queue.put(id, blockValue, payload)
# ------------------------------------------------------------------------------
# Public functions, getters
# ------------------------------------------------------------------------------
func com*(ben: BeaconEngineRef): CommonRef =
ben.txPool.com
func chain*(ben: BeaconEngineRef): ChainRef =
ben.chain
func ttdReached*(ben: BeaconEngineRef): bool =
## TTDReached reports whether the chain has left the PoW stage.
ben.merge.ttdReached
func posFinalized*(ben: BeaconEngineRef): bool =
## PoSFinalized reports whether the chain has entered the PoS stage.
ben.merge.posFinalized
func blockValue*(ben: BeaconEngineRef): UInt256 =
## return sum of reward for feeRecipient for each
## tx included in a block
ben.txPool.blockValue
proc get*(ben: BeaconEngineRef, hash: common.Hash256,
header: var common.BlockHeader): bool =
ben.queue.get(hash, header)
proc get*(ben: BeaconEngineRef, id: PayloadID,
blockValue: var UInt256,
payload: var ExecutionPayload): bool =
ben.queue.get(id, blockValue, payload)
proc get*(ben: BeaconEngineRef, id: PayloadID,
blockValue: var UInt256,
payload: var ExecutionPayloadV1): bool =
ben.queue.get(id, blockValue, payload)
proc get*(ben: BeaconEngineRef, id: PayloadID,
blockValue: var UInt256,
payload: var ExecutionPayloadV2): bool =
ben.queue.get(id, blockValue, payload)
proc get*(ben: BeaconEngineRef, id: PayloadID,
blockValue: var UInt256,
payload: var ExecutionPayloadV3): bool =
ben.queue.get(id, blockValue, payload)
proc get*(ben: BeaconEngineRef, id: PayloadID,
blockValue: var UInt256,
payload: var ExecutionPayloadV1OrV2): bool =
ben.queue.get(id, blockValue, payload)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc generatePayload*(ben: BeaconEngineRef,
attrs: PayloadAttributes):
Result[ExecutionPayload, string] =
wrapException:
let
xp = ben.txPool
db = xp.com.db
pos = xp.com.pos
headBlock = db.getCanonicalHead()
pos.prevRandao = ethHash attrs.prevRandao
pos.timestamp = ethTime attrs.timestamp
pos.feeRecipient = ethAddr attrs.suggestedFeeRecipient
xp.setWithdrawals(attrs)
if headBlock.blockHash != xp.head.blockHash:
# reorg
discard xp.smartHead(headBlock)
# someBaseFee = true: make sure blk.header
# have the same blockHash with generated payload
let blk = xp.ethBlock(someBaseFee = true)
if blk.header.extraData.len > 32:
return err "extraData length should not exceed 32 bytes"
ok(executionPayload(blk))

View File

@ -1,3 +1,12 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import import
stint, stint,
web3/ethtypes, web3/ethtypes,
@ -98,6 +107,17 @@ func V3*(attr: PayloadAttributes): PayloadAttributesV3 =
parentBeaconBlockRoot: attr.parentBeaconBlockRoot.get parentBeaconBlockRoot: attr.parentBeaconBlockRoot.get
) )
func payloadAttributes*(attr: PayloadAttributesV1): PayloadAttributes =
PayloadAttributes(
timestamp: attr.timestamp,
prevRandao: attr.prevRandao,
suggestedFeeRecipient: attr.suggestedFeeRecipient
)
func payloadAttributes*(x: Option[PayloadAttributesV1]): Option[PayloadAttributes] =
if x.isNone: none(PayloadAttributes)
else: some(payloadAttributes x.get)
func V1V2*(p: ExecutionPayload): ExecutionPayloadV1OrV2 = func V1V2*(p: ExecutionPayload): ExecutionPayloadV1OrV2 =
ExecutionPayloadV1OrV2( ExecutionPayloadV1OrV2(
parentHash: p.parentHash, parentHash: p.parentHash,
@ -175,6 +195,43 @@ func V3*(p: ExecutionPayload): ExecutionPayloadV3 =
excessBlobGas: p.excessBlobGas.get excessBlobGas: p.excessBlobGas.get
) )
func V1*(p: ExecutionPayloadV1OrV2): ExecutionPayloadV1 =
ExecutionPayloadV1(
parentHash: p.parentHash,
feeRecipient: p.feeRecipient,
stateRoot: p.stateRoot,
receiptsRoot: p.receiptsRoot,
logsBloom: p.logsBloom,
prevRandao: p.prevRandao,
blockNumber: p.blockNumber,
gasLimit: p.gasLimit,
gasUsed: p.gasUsed,
timestamp: p.timestamp,
extraData: p.extraData,
baseFeePerGas: p.baseFeePerGas,
blockHash: p.blockHash,
transactions: p.transactions
)
func V2*(p: ExecutionPayloadV1OrV2): ExecutionPayloadV2 =
ExecutionPayloadV2(
parentHash: p.parentHash,
feeRecipient: p.feeRecipient,
stateRoot: p.stateRoot,
receiptsRoot: p.receiptsRoot,
logsBloom: p.logsBloom,
prevRandao: p.prevRandao,
blockNumber: p.blockNumber,
gasLimit: p.gasLimit,
gasUsed: p.gasUsed,
timestamp: p.timestamp,
extraData: p.extraData,
baseFeePerGas: p.baseFeePerGas,
blockHash: p.blockHash,
transactions: p.transactions,
withdrawals: p.withdrawals.get
)
func executionPayload*(p: ExecutionPayloadV1): ExecutionPayload = func executionPayload*(p: ExecutionPayloadV1): ExecutionPayload =
ExecutionPayload( ExecutionPayload(
parentHash: p.parentHash, parentHash: p.parentHash,

View File

@ -0,0 +1,97 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push gcsafe, raises: [].}
import
chronicles,
eth/rlp,
../db/[core_db, storage_types]
type
# transitionStatus describes the status of eth1/2 transition. This switch
# between modes is a one-way action which is triggered by corresponding
# consensus-layer message.
TransitionStatus = object
# The flag is set when the first NewHead message received
leftPoW : bool
# The flag is set when the first FinalisedBlock message received
enteredPoS: bool
# Merger is an internal help structure used to track the eth1/2
# transition status. It's a common structure can be used in both full node
# and light client.
MergeTrackerRef* = ref object
db : CoreDbRef
status: TransitionStatus
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc writeStatus(db: CoreDbRef, status: TransitionStatus) =
db.kvt.put(transitionStatusKey().toOpenArray(), rlp.encode(status))
proc readStatus(db: CoreDbRef): TransitionStatus =
var bytes = db.kvt.get(transitionStatusKey().toOpenArray())
if bytes.len > 0:
try:
result = rlp.decode(bytes, typeof result)
except CatchableError:
error "Failed to decode POS transition status"
# ------------------------------------------------------------------------------
# Constructors
# ------------------------------------------------------------------------------
proc new*(_: type MergeTrackerRef, db: CoreDbRef): MergeTrackerRef =
MergeTrackerRef(
db: db,
status: db.readStatus()
)
# ------------------------------------------------------------------------------
# Public functions, setters
# ------------------------------------------------------------------------------
proc reachTTD*(m: MergeTrackerRef) =
## ReachTTD is called whenever the first NewHead message received
## from the consensus-layer.
if m.status.leftPoW:
return
m.status = TransitionStatus(leftPoW: true)
m.db.writeStatus(m.status)
info "Left PoW stage"
proc finalizePoS*(m: MergeTrackerRef) =
## FinalizePoS is called whenever the first FinalisedBlock message received
## from the consensus-layer.
if m.status.enteredPoS:
return
m.status = TransitionStatus(leftPoW: true, enteredPoS: true)
m.db.writeStatus(m.status)
info "Entered PoS stage"
# ------------------------------------------------------------------------------
# Public functions, getters
# ------------------------------------------------------------------------------
func ttdReached*(m: MergeTrackerRef): bool =
## TTDReached reports whether the chain has left the PoW stage.
m.status.leftPoW
func posFinalized*(m: MergeTrackerRef): bool =
## PoSFinalized reports whether the chain has entered the PoS stage.
m.status.enteredPoS

View File

@ -0,0 +1,124 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
./web3_eth_conv,
./execution_types,
../utils/utils,
eth/common
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
func wdRoot(list: openArray[WithdrawalV1]): common.Hash256
{.gcsafe, raises:[RlpError].} =
{.nosideEffect.}:
calcWithdrawalsRoot(ethWithdrawals list)
func wdRoot(x: Option[seq[WithdrawalV1]]): Option[common.Hash256]
{.gcsafe, raises:[RlpError].} =
{.nosideEffect.}:
if x.isNone: none(common.Hash256)
else: some(wdRoot x.get)
func txRoot(list: openArray[Web3Tx]): common.Hash256
{.gcsafe, raises:[RlpError].} =
{.nosideEffect.}:
calcTxRoot(ethTxs list)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
{.push gcsafe, raises:[].}
func executionPayload*(blk: EthBlock): ExecutionPayload =
ExecutionPayload(
parentHash : w3Hash blk.header.parentHash,
feeRecipient : w3Addr blk.header.coinbase,
stateRoot : w3Hash blk.header.stateRoot,
receiptsRoot : w3Hash blk.header.receiptRoot,
logsBloom : w3Bloom blk.header.bloom,
prevRandao : w3PrevRandao blk.header.prevRandao,
blockNumber : w3Qty blk.header.blockNumber,
gasLimit : w3Qty blk.header.gasLimit,
gasUsed : w3Qty blk.header.gasUsed,
timestamp : w3Qty blk.header.timestamp,
extraData : w3ExtraData blk.header.extraData,
baseFeePerGas: blk.header.fee.get(0.u256),
blockHash : w3Hash blk.header,
transactions : w3Txs blk.txs,
withdrawals : w3Withdrawals blk.withdrawals,
blobGasUsed : w3Qty blk.header.blobGasUsed,
excessBlobGas: w3Qty blk.header.excessBlobGas
)
func executionPayloadV1V2*(blk: EthBlock): ExecutionPayloadV1OrV2 =
ExecutionPayloadV1OrV2(
parentHash : w3Hash blk.header.parentHash,
feeRecipient : w3Addr blk.header.coinbase,
stateRoot : w3Hash blk.header.stateRoot,
receiptsRoot : w3Hash blk.header.receiptRoot,
logsBloom : w3Bloom blk.header.bloom,
prevRandao : w3PrevRandao blk.header.prevRandao,
blockNumber : w3Qty blk.header.blockNumber,
gasLimit : w3Qty blk.header.gasLimit,
gasUsed : w3Qty blk.header.gasUsed,
timestamp : w3Qty blk.header.timestamp,
extraData : w3ExtraData blk.header.extraData,
baseFeePerGas: blk.header.fee.get(0.u256),
blockHash : w3Hash blk.header,
transactions : w3Txs blk.txs,
withdrawals : w3Withdrawals blk.withdrawals,
)
func blockHeader*(p: ExecutionPayload,
beaconRoot: Option[common.Hash256]):
common.BlockHeader {.gcsafe, raises:[RlpError].} =
common.BlockHeader(
parentHash : ethHash p.parentHash,
ommersHash : EMPTY_UNCLE_HASH,
coinbase : ethAddr p.feeRecipient,
stateRoot : ethHash p.stateRoot,
txRoot : txRoot p.transactions,
receiptRoot : ethHash p.receiptsRoot,
bloom : ethBloom p.logsBloom,
difficulty : 0.u256,
blockNumber : u256 p.blockNumber,
gasLimit : ethGasInt p.gasLimit,
gasUsed : ethGasInt p.gasUsed,
timestamp : ethTime p.timestamp,
extraData : ethBlob p.extraData,
mixDigest : ethHash p.prevRandao,
nonce : default(BlockNonce),
fee : some(p.baseFeePerGas),
withdrawalsRoot: wdRoot p.withdrawals,
blobGasUsed : u64(p.blobGasUsed),
excessBlobGas : u64(p.excessBlobGas),
parentBeaconBlockRoot: beaconRoot
)
func blockBody*(p: ExecutionPayload):
common.BlockBody {.gcsafe, raises:[RlpError].} =
common.BlockBody(
uncles : @[],
transactions: ethTxs p.transactions,
withdrawals : ethWithdrawals p.withdrawals,
)
func ethBlock*(p: ExecutionPayload,
beaconRoot: Option[common.Hash256]):
common.EthBlock {.gcsafe, raises:[RlpError].} =
common.Ethblock(
header : blockHeader(p, beaconRoot),
uncles : @[],
txs : ethTxs p.transactions,
withdrawals: ethWithdrawals p.withdrawals,
)

View File

@ -8,28 +8,19 @@
# those terms. # those terms.
import import
eth/common,
web3/engine_api_types, web3/engine_api_types,
./merger, ./execution_types
../execution_types
import eth/common/eth_types except BlockHeader
export merger, eth_types
type
EthBlockHeader* = eth_types.BlockHeader
Hash256 = eth_types.Hash256
const const
# maxTrackedPayloads is the maximum number of prepared payloads the execution # maxTrackedPayloads is the maximum number of prepared payloads the execution
# engine tracks before evicting old ones. Ideally we should only ever track the # engine tracks before evicting old ones. Ideally we should only ever track
# latest one; but have a slight wiggle room for non-ideal conditions. # the latest one; but have a slight wiggle room for non-ideal conditions.
MaxTrackedPayloads = 10 MaxTrackedPayloads = 10
# maxTrackedHeaders is the maximum number of executed payloads the execution # maxTrackedHeaders is the maximum number of executed payloads the execution
# engine tracks before evicting old ones. Ideally we should only ever track the # engine tracks before evicting old ones. Ideally we should only ever track
# latest one; but have a slight wiggle room for non-ideal conditions. # the latest one; but have a slight wiggle room for non-ideal conditions.
MaxTrackedHeaders = 10 MaxTrackedHeaders = 10
type type
@ -46,14 +37,19 @@ type
blockValue: UInt256 blockValue: UInt256
HeaderItem = object HeaderItem = object
hash: Hash256 hash: common.Hash256
header: EthBlockHeader header: common.BlockHeader
EngineApiRef* = ref object PayloadQueue* = ref object
merger: MergerRef
payloadQueue: SimpleQueue[MaxTrackedPayloads, PayloadItem] payloadQueue: SimpleQueue[MaxTrackedPayloads, PayloadItem]
headerQueue: SimpleQueue[MaxTrackedHeaders, HeaderItem] headerQueue: SimpleQueue[MaxTrackedHeaders, HeaderItem]
{.push gcsafe, raises:[].}
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template shiftRight[M, T](x: var SimpleQueue[M, T]) = template shiftRight[M, T](x: var SimpleQueue[M, T]) =
x.list[1..^1] = x.list[0..^2] x.list[1..^1] = x.list[0..^2]
@ -66,34 +62,36 @@ iterator items[M, T](x: SimpleQueue[M, T]): T =
if z.used: if z.used:
yield z.data yield z.data
template new*(_: type EngineApiRef): EngineApiRef = # ------------------------------------------------------------------------------
{.error: "EngineApiRef should be created with merger param " & $instantiationInfo().} # Public functions, setters
# ------------------------------------------------------------------------------
proc new*(_: type EngineApiRef, merger: MergerRef): EngineApiRef = proc put*(api: var PayloadQueue,
EngineApiRef( hash: common.Hash256, header: common.BlockHeader) =
merger: merger
)
proc put*(api: EngineApiRef, hash: Hash256, header: EthBlockHeader) =
api.headerQueue.put(HeaderItem(hash: hash, header: header)) api.headerQueue.put(HeaderItem(hash: hash, header: header))
proc get*(api: EngineApiRef, hash: Hash256, header: var EthBlockHeader): bool = proc put*(api: var PayloadQueue, id: PayloadID,
blockValue: UInt256, payload: ExecutionPayload) =
api.payloadQueue.put(PayloadItem(id: id,
payload: payload, blockValue: blockValue))
proc put*(api: var PayloadQueue, id: PayloadID,
blockValue: UInt256, payload: SomeExecutionPayload) =
api.put(id, blockValue, payload.executionPayload)
# ------------------------------------------------------------------------------
# Public functions, getters
# ------------------------------------------------------------------------------
proc get*(api: PayloadQueue, hash: common.Hash256,
header: var common.BlockHeader): bool =
for x in api.headerQueue: for x in api.headerQueue:
if x.hash == hash: if x.hash == hash:
header = x.header header = x.header
return true return true
false false
proc put*(api: EngineApiRef, id: PayloadID, proc get*(api: PayloadQueue, id: PayloadID,
blockValue: UInt256, payload: ExecutionPayload) =
api.payloadQueue.put(PayloadItem(id: id,
payload: payload, blockValue: blockValue))
proc put*(api: EngineApiRef, id: PayloadID,
blockValue: UInt256, payload: SomeExecutionPayload) =
api.put(id, blockValue, payload.executionPayload)
proc get*(api: EngineApiRef, id: PayloadID,
blockValue: var UInt256, blockValue: var UInt256,
payload: var ExecutionPayload): bool = payload: var ExecutionPayload): bool =
for x in api.payloadQueue: for x in api.payloadQueue:
@ -103,7 +101,7 @@ proc get*(api: EngineApiRef, id: PayloadID,
return true return true
false false
proc get*(api: EngineApiRef, id: PayloadID, proc get*(api: PayloadQueue, id: PayloadID,
blockValue: var UInt256, blockValue: var UInt256,
payload: var ExecutionPayloadV1): bool = payload: var ExecutionPayloadV1): bool =
var p: ExecutionPayload var p: ExecutionPayload
@ -112,7 +110,7 @@ proc get*(api: EngineApiRef, id: PayloadID,
payload = p.V1 payload = p.V1
return found return found
proc get*(api: EngineApiRef, id: PayloadID, proc get*(api: PayloadQueue, id: PayloadID,
blockValue: var UInt256, blockValue: var UInt256,
payload: var ExecutionPayloadV2): bool = payload: var ExecutionPayloadV2): bool =
var p: ExecutionPayload var p: ExecutionPayload
@ -121,7 +119,7 @@ proc get*(api: EngineApiRef, id: PayloadID,
payload = p.V2 payload = p.V2
return found return found
proc get*(api: EngineApiRef, id: PayloadID, proc get*(api: PayloadQueue, id: PayloadID,
blockValue: var UInt256, blockValue: var UInt256,
payload: var ExecutionPayloadV3): bool = payload: var ExecutionPayloadV3): bool =
var p: ExecutionPayload var p: ExecutionPayload
@ -130,7 +128,7 @@ proc get*(api: EngineApiRef, id: PayloadID,
payload = p.V3 payload = p.V3
return found return found
proc get*(api: EngineApiRef, id: PayloadID, proc get*(api: PayloadQueue, id: PayloadID,
blockValue: var UInt256, blockValue: var UInt256,
payload: var ExecutionPayloadV1OrV2): bool = payload: var ExecutionPayloadV1OrV2): bool =
var p: ExecutionPayload var p: ExecutionPayload
@ -138,6 +136,3 @@ proc get*(api: EngineApiRef, id: PayloadID,
doAssert(p.version in {Version.V1, Version.V2}) doAssert(p.version in {Version.V1, Version.V2})
payload = p.V1V2 payload = p.V1V2
return found return found
proc merger*(api: EngineApiRef): MergerRef =
api.merger

View File

@ -0,0 +1,197 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[options, times, typetraits],
web3/ethtypes,
web3/engine_api_types,
eth/common/eth_types_rlp,
stew/byteutils
from web3/ethtypes as web3types import nil
import eth/common/eth_types as common
type
Web3Hash* = web3types.Hash256
Web3Address* = web3types.Address
Web3Bloom* = web3types.FixedBytes[256]
Web3Quantity* = web3types.Quantity
Web3PrevRandao* = web3types.FixedBytes[32]
Web3ExtraData* = web3types.DynamicBytes[0, 32]
Web3Tx* = web3types.TypedTransaction
Web3Blob* = web3types.Blob
Web3KZGProof* = web3types.KZGProof
Web3KZGCommitment* = web3types.KZGCommitment
{.push gcsafe, raises:[].}
# ------------------------------------------------------------------------------
# Pretty printers
# ------------------------------------------------------------------------------
proc `$`*(x: Option[common.Hash256]): string =
if x.isNone: "none"
else: x.get().data.toHex
proc `$`*(x: Option[Web3Hash]): string =
if x.isNone: "none"
else: x.get().toHex
proc `$`*(x: Option[PayloadID]): string =
if x.isNone: "none"
else: x.get().toHex
proc `$`*[N](x: FixedBytes[N]): string =
x.toHex
proc `$`*(x: Web3Quantity): string =
$distinctBase(x)
proc `$`*(x: Web3Address): string =
distinctBase(x).toHex
# ------------------------------------------------------------------------------
# Web3 defaults
# ------------------------------------------------------------------------------
func w3PrevRandao*(): Web3PrevRandao =
discard
func w3Address*(): Web3Address =
discard
# ------------------------------------------------------------------------------
# Web3 types to Eth types
# ------------------------------------------------------------------------------
template unsafeQuantityToInt64*(q: Web3Quantity): int64 =
int64 q
func u64*(x: Option[Web3Quantity]): Option[uint64] =
if x.isNone: none(uint64)
else: some(uint64 x.get)
func u256*(x: Web3Quantity): UInt256 =
u256(x.uint64)
func ethTime*(x: Web3Quantity): common.EthTime =
fromUnix(x.unsafeQuantityToInt64)
func ethHash*(x: Web3PrevRandao): common.Hash256 =
common.Hash256(data: distinctBase x)
func ethHash*(x: Option[Web3Hash]): Option[common.Hash256] =
if x.isNone: none(common.Hash256)
else: some(ethHash x.get)
func ethAddr*(x: Web3Address): common.EthAddress =
EthAddress x
func ethBloom*(x: Web3Bloom): common.BloomFilter =
common.BloomFilter distinctBase x
func ethGasInt*(x: Web3Quantity): common.GasInt =
common.GasInt x
func ethBlob*(x: Web3ExtraData): common.Blob =
common.Blob distinctBase x
func ethWithdrawal*(x: WithdrawalV1): common.Withdrawal =
result.index = x.index.uint64
result.validatorIndex = x.validatorIndex.uint64
result.address = x.address.EthAddress
result.amount = x.amount.uint64
func ethWithdrawals*(list: openArray[WithdrawalV1]):
seq[common.Withdrawal] =
result = newSeqOfCap[common.Withdrawal](list.len)
for x in list:
result.add ethWithdrawal(x)
func ethWithdrawals*(x: Option[seq[WithdrawalV1]]):
Option[seq[common.Withdrawal]] =
if x.isNone: none(seq[common.Withdrawal])
else: some(ethWithdrawals x.get)
func ethTx*(x: Web3Tx): common.Transaction {.gcsafe, raises:[RlpError].} =
result = rlp.decode(distinctBase x, common.Transaction)
func ethTxs*(list: openArray[Web3Tx]):
seq[common.Transaction] {.gcsafe, raises:[RlpError].} =
result = newSeqOfCap[common.Transaction](list.len)
for x in list:
result.add ethTx(x)
# ------------------------------------------------------------------------------
# Eth types to Web3 types
# ------------------------------------------------------------------------------
func w3Hash*(x: common.Hash256): Web3Hash =
Web3Hash x.data
func w3Hash*(x: Option[common.Hash256]): Option[BlockHash] =
if x.isNone: none(BlockHash)
else: some(BlockHash x.get.data)
func w3Hash*(x: common.BlockHeader): BlockHash =
BlockHash rlpHash(x).data
func w3Addr*(x: common.EthAddress): Web3Address =
Web3Address x
func w3Bloom*(x: common.BloomFilter): Web3Bloom =
Web3Bloom x
func w3PrevRandao*(x: common.Hash256): Web3PrevRandao =
Web3PrevRandao x.data
func w3Qty*(x: UInt256): Web3Quantity =
Web3Quantity x.truncate(uint64)
func w3Qty*(x: common.GasInt): Web3Quantity =
Web3Quantity x.uint64
func w3Qty*(x: common.EthTime): Web3Quantity =
Web3Quantity x.toUnix
func w3Qty*(x: common.EthTime, y: int): Web3Quantity =
Web3Quantity(x.toUnix + y.int64)
func w3Qty*(x: Option[uint64]): Option[Web3Quantity] =
if x.isNone: none(Web3Quantity)
else: some(Web3Quantity x.get)
func w3ExtraData*(x: common.Blob): Web3ExtraData =
Web3ExtraData x
func w3Withdrawal*(w: common.Withdrawal): WithdrawalV1 =
WithdrawalV1(
index : Web3Quantity w.index,
validatorIndex: Web3Quantity w.validatorIndex,
address : Web3Address w.address,
amount : Web3Quantity w.amount
)
func w3Withdrawals*(list: openArray[common.Withdrawal]): seq[WithdrawalV1] =
result = newSeqOfCap[WithdrawalV1](list.len)
for x in list:
result.add w3Withdrawal(x)
func w3Withdrawals*(x: Option[seq[common.Withdrawal]]):
Option[seq[WithdrawalV1]] =
if x.isNone: none(seq[WithdrawalV1])
else: some(w3Withdrawals x.get)
func w3Tx*(tx: common.Transaction): Web3Tx =
Web3Tx rlp.encode(tx.removeNetworkPayload)
func w3Txs*(list: openArray[common.Transaction]): seq[Web3Tx] =
result = newSeqOfCap[Web3Tx](list.len)
for tx in list:
result.add w3Tx(tx)

View File

@ -102,7 +102,7 @@ proc prepare*(c: Clique; parent: BlockHeader, header: var BlockHeader): CliqueOk
header.coinbase.reset header.coinbase.reset
let modEpoch = (parent.blockNumber+1) mod c.cfg.epoch let modEpoch = (parent.blockNumber+1) mod c.cfg.epoch
if modEpoch != 0: if modEpoch.isZero.not:
# Gather all the proposals that make sense voting on # Gather all the proposals that make sense voting on
var addresses: seq[EthAddress] var addresses: seq[EthAddress]
for (address,authorize) in c.proposals.pairs: for (address,authorize) in c.proposals.pairs:
@ -120,7 +120,7 @@ proc prepare*(c: Clique; parent: BlockHeader, header: var BlockHeader): CliqueOk
# Ensure the extra data has all its components # Ensure the extra data has all its components
header.extraData.setLen(EXTRA_VANITY) header.extraData.setLen(EXTRA_VANITY)
if modEpoch == 0: if modEpoch.isZero:
header.extraData.add c.snapshot.ballot.authSigners.mapIt(toSeq(it)).concat header.extraData.add c.snapshot.ballot.authSigners.mapIt(toSeq(it)).concat
header.extraData.add 0.byte.repeat(EXTRA_SEAL) header.extraData.add 0.byte.repeat(EXTRA_SEAL)

View File

@ -128,10 +128,10 @@ proc maxCheckPointLe(d: var LocalSnaps; number: BlockNumber): BlockNumber =
0.u256 0.u256
proc isCheckPoint(d: var LocalSnaps; number: BlockNumber): bool = proc isCheckPoint(d: var LocalSnaps; number: BlockNumber): bool =
(number mod d.c.cfg.ckpInterval) == 0 (number mod d.c.cfg.ckpInterval).isZero
proc isEpoch(d: var LocalSnaps; number: BlockNumber): bool = proc isEpoch(d: var LocalSnaps; number: BlockNumber): bool =
(number mod d.c.cfg.epoch) == 0 (number mod d.c.cfg.epoch).isZero
proc isSnapshotPosition(d: var LocalSnaps; number: BlockNumber): bool = proc isSnapshotPosition(d: var LocalSnaps; number: BlockNumber): bool =
# clique/clique.go(394): if number == 0 || (number%c.config.Epoch [..] # clique/clique.go(394): if number == 0 || (number%c.config.Epoch [..]

View File

@ -209,7 +209,7 @@ proc verifyHeaderFields(c: Clique; header: BlockHeader): CliqueOkResult =
return err((errFutureBlock,"")) return err((errFutureBlock,""))
# Checkpoint blocks need to enforce zero beneficiary # Checkpoint blocks need to enforce zero beneficiary
let isCheckPoint = (header.blockNumber mod c.cfg.epoch.u256) == 0 let isCheckPoint = (header.blockNumber mod c.cfg.epoch.u256).isZero
if isCheckPoint and not header.coinbase.isZero: if isCheckPoint and not header.coinbase.isZero:
return err((errInvalidCheckpointBeneficiary,"")) return err((errInvalidCheckpointBeneficiary,""))

View File

@ -93,7 +93,7 @@ proc snapshotApplySeq*(s: Snapshot; headers: var seq[BlockHeader],
number = header.blockNumber number = header.blockNumber
# Remove any votes on checkpoint blocks # Remove any votes on checkpoint blocks
if (number mod s.cfg.epoch) == 0: if (number mod s.cfg.epoch).isZero:
# Note that the correctness of the authorised accounts list is verified in # Note that the correctness of the authorised accounts list is verified in
# clique/clique.verifyCascadingFields(), # clique/clique.verifyCascadingFields(),
# see clique/clique.go(355): if number%c.config.Epoch == 0 { # see clique/clique.go(355): if number%c.config.Epoch == 0 {

View File

@ -1,42 +1,30 @@
# Nimbus # Nimbus
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2018-2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# http://www.apache.org/licenses/LICENSE-2.0) # * MIT license ([LICENSE-MIT](LICENSE-MIT))
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or # at your option.
# http://opensource.org/licenses/MIT) # This file may not be copied, modified, or distributed except according to
# at your option. This file may not be copied, modified, or distributed except # those terms.
# according to those terms.
import import
std/[sequtils, times, typetraits], std/[times],
pkg/[chronos, pkg/[chronos,
stew/results, stew/results,
chronicles, chronicles,
eth/keys, eth/keys],
eth/rlp],
".."/[config, ".."/[config,
constants], constants],
"."/[ "."/[
chain, chain,
tx_pool, tx_pool,
casper,
validate], validate],
"."/clique/[ "."/clique/[
clique_desc, clique_desc,
clique_cfg, clique_cfg,
clique_sealer], clique_sealer],
../utils/utils, ../utils/utils,
../common/[common, context], ../common/[common, context]
../rpc/execution_types
from web3/ethtypes as web3types import nil, TypedTransaction, WithdrawalV1, ExecutionPayloadV1OrV2, toExecutionPayloadV1OrV2, toExecutionPayloadV1
from web3/engine_api_types import PayloadAttributesV1, ExecutionPayloadV1, PayloadAttributesV2, ExecutionPayloadV2
export
# generateExecutionPayload caller will need this
casper
type type
EngineState* = enum EngineState* = enum
@ -44,11 +32,6 @@ type
EngineRunning, EngineRunning,
EnginePostMerge EnginePostMerge
Web3BlockHash = web3types.BlockHash
Web3Address = web3types.Address
Web3Bloom = web3types.FixedBytes[256]
Web3Quantity = web3types.Quantity
SealingEngineRef* = ref SealingEngineObj SealingEngineRef* = ref SealingEngineObj
SealingEngineObj = object of RootObj SealingEngineObj = object of RootObj
state: EngineState state: EngineState
@ -141,102 +124,6 @@ proc sealingLoop(engine: SealingEngineRef): Future[void] {.async.} =
discard engine.txPool.smartHead(blk.header) # add transactions update jobs discard engine.txPool.smartHead(blk.header) # add transactions update jobs
info "block generated", number=blk.header.blockNumber info "block generated", number=blk.header.blockNumber
template unsafeQuantityToInt64(q: web3types.Quantity): int64 =
int64 q
proc toTypedTransaction(tx: Transaction): TypedTransaction =
web3types.TypedTransaction(rlp.encode(tx.removeNetworkPayload))
func toWithdrawal(x: WithdrawalV1): Withdrawal =
result.index = x.index.uint64
result.validatorIndex = x.validatorIndex.uint64
result.address = x.address.EthAddress
result.amount = x.amount.uint64
func toWithdrawals(list: openArray[WithdrawalV1]): seq[Withdrawal] =
result = newSeqOfCap[Withdrawal](list.len)
for x in list:
result.add toWithdrawal(x)
proc generateExecutionPayload*(engine: SealingEngineRef,
payloadAttrs: SomePayloadAttributes): Result[ExecutionPayload, string] =
let
headBlock = try: engine.chain.db.getCanonicalHead()
except CatchableError: return err "No head block in database"
pos = engine.chain.com.pos
pos.prevRandao = Hash256(data: distinctBase payloadAttrs.prevRandao)
pos.timestamp = fromUnix(payloadAttrs.timestamp.unsafeQuantityToInt64)
pos.feeRecipient = EthAddress payloadAttrs.suggestedFeeRecipient
when payloadAttrs is PayloadAttributesV2:
engine.txPool.withdrawals = payloadAttrs.withdrawals.toWithdrawals
elif payloadAttrs is PayloadAttributesV3:
engine.txPool.withdrawals = payloadAttrs.withdrawals.toWithdrawals
else:
engine.txPool.withdrawals = @[]
if headBlock.blockHash != engine.txPool.head.blockHash:
# reorg
discard engine.txPool.smartHead(headBlock)
var blk: EthBlock
let res = engine.generateBlock(blk)
if res.isErr:
error "sealing engine generateBlock error", msg = res.error
return err(res.error)
# make sure both generated block header and payloadRes(ExecutionPayloadV2)
# produce the same blockHash
blk.header.fee = some(blk.header.fee.get(UInt256.zero)) # force it with some(UInt256)
let blockHash = rlpHash(blk.header)
if blk.header.extraData.len > 32:
return err "extraData length should not exceed 32 bytes"
let transactions = blk.txs.map(toTypedTransaction)
let withdrawals =
when payloadAttrs is PayloadAttributesV2:
some(payloadAttrs.withdrawals)
else:
none[seq[WithdrawalV1]]()
let blobGasUsed = if blk.header.blobGasUsed.isSome:
some(blk.header.blobGasUsed.get.Quantity)
else:
none(Quantity)
let excessBlobGas = if blk.header.excessBlobGas.isSome:
some(blk.header.excessBlobGas.get.Quantity)
else:
none(Quantity)
return ok(ExecutionPayload(
parentHash: Web3BlockHash blk.header.parentHash.data,
feeRecipient: Web3Address blk.header.coinbase,
stateRoot: Web3BlockHash blk.header.stateRoot.data,
receiptsRoot: Web3BlockHash blk.header.receiptRoot.data,
logsBloom: Web3Bloom blk.header.bloom,
prevRandao: payloadAttrs.prevRandao,
blockNumber: Web3Quantity blk.header.blockNumber.truncate(uint64),
gasLimit: Web3Quantity blk.header.gasLimit,
gasUsed: Web3Quantity blk.header.gasUsed,
timestamp: payloadAttrs.timestamp,
extraData: web3types.DynamicBytes[0, 32] blk.header.extraData,
baseFeePerGas: blk.header.fee.get(UInt256.zero),
blockHash: Web3BlockHash blockHash.data,
transactions: transactions,
withdrawals: withdrawals,
blobGasUsed: blobGasUsed,
excessBlobGas: excessBlobGas
))
proc blockValue*(engine: SealingEngineRef): UInt256 =
# return sum of reward for feeRecipient for each
# tx included in a block
engine.txPool.blockValue
proc new*(_: type SealingEngineRef, proc new*(_: type SealingEngineRef,
chain: ChainRef, chain: ChainRef,
ctx: EthContext, ctx: EthContext,

View File

@ -593,6 +593,10 @@ proc triggerReorg*(xp: TxPoolRef)
# Public functions, getters # Public functions, getters
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc com*(xp: TxPoolRef): CommonRef =
## Getter
xp.chain.com
proc baseFee*(xp: TxPoolRef): GasPrice = proc baseFee*(xp: TxPoolRef): GasPrice =
## Getter, this parameter modifies/determines the expected gain when packing ## Getter, this parameter modifies/determines the expected gain when packing
xp.chain.baseFee xp.chain.baseFee
@ -602,7 +606,7 @@ proc dirtyBuckets*(xp: TxPoolRef): bool =
## flag is also set. ## flag is also set.
xp.pDirtyBuckets xp.pDirtyBuckets
proc ethBlock*(xp: TxPoolRef): EthBlock proc ethBlock*(xp: TxPoolRef, someBaseFee: bool = false): EthBlock
{.gcsafe,raises: [CatchableError].} = {.gcsafe,raises: [CatchableError].} =
## Getter, retrieves a packed block ready for mining and signing depending ## Getter, retrieves a packed block ready for mining and signing depending
## on the internally cached block chain head, the txs in the pool and some ## on the internally cached block chain head, the txs in the pool and some
@ -625,6 +629,10 @@ proc ethBlock*(xp: TxPoolRef): EthBlock
if com.forkGTE(Shanghai): if com.forkGTE(Shanghai):
result.withdrawals = some(xp.chain.withdrawals) result.withdrawals = some(xp.chain.withdrawals)
if someBaseFee:
# make sure baseFee always has something
result.header.fee = some(result.header.fee.get(0.u256))
proc gasCumulative*(xp: TxPoolRef): GasInt = proc gasCumulative*(xp: TxPoolRef): GasInt =
## Getter, retrieves the gas that will be burned in the block after ## Getter, retrieves the gas that will be burned in the block after
## retrieving it via `ethBlock`. ## retrieving it via `ethBlock`.

View File

@ -12,7 +12,10 @@
## ============================================ ## ============================================
## ##
{.push raises: [CatchableError].} # basically the annotation type of a `Vm2OpFn` when defined(evmc_enabled):
{.push raises: [CatchableError].} # basically the annotation type of a `Vm2OpFn`
else:
{.push raises: [].}
import import
../../../errors, ../../../errors,

View File

@ -262,7 +262,7 @@ const
jumpIOp: Vm2OpFn = proc (k: var Vm2Ctx) = jumpIOp: Vm2OpFn = proc (k: var Vm2Ctx) =
## 0x57, Conditionally alter the program counter. ## 0x57, Conditionally alter the program counter.
let (jumpTarget, testedValue) = k.cpt.stack.popInt(2) let (jumpTarget, testedValue) = k.cpt.stack.popInt(2)
if testedValue != 0: if testedValue.isZero.not:
jumpImpl(k.cpt, jumpTarget) jumpImpl(k.cpt, jumpTarget)
pcOp: Vm2OpFn = proc (k: var Vm2Ctx) = pcOp: Vm2OpFn = proc (k: var Vm2Ctx) =

View File

@ -26,7 +26,7 @@ import
./graphql/ethapi, ./graphql/ethapi,
./core/[chain, sealer, clique/clique_desc, ./core/[chain, sealer, clique/clique_desc,
clique/clique_sealer, tx_pool, block_import], clique/clique_sealer, tx_pool, block_import],
./rpc/merge/merger, ./beacon/beacon_engine,
./sync/[legacy, full, protocol, snap, stateless, ./sync/[legacy, full, protocol, snap, stateless,
protocol/les_protocol, handlers, peers], protocol/les_protocol, handlers, peers],
./evm/async/data_sources/json_rpc_data_source ./evm/async/data_sources/json_rpc_data_source
@ -61,7 +61,7 @@ type
snapSyncRef: SnapSyncRef snapSyncRef: SnapSyncRef
fullSyncRef: FullSyncRef fullSyncRef: FullSyncRef
statelessSyncRef: StatelessSyncRef statelessSyncRef: StatelessSyncRef
merger: MergerRef beaconEngine: BeaconEngineRef
proc importBlocks(conf: NimbusConf, com: CommonRef) = proc importBlocks(conf: NimbusConf, com: CommonRef) =
if string(conf.blocksFile).len > 0: if string(conf.blocksFile).len > 0:
@ -74,9 +74,6 @@ proc importBlocks(conf: NimbusConf, com: CommonRef) =
proc basicServices(nimbus: NimbusNode, proc basicServices(nimbus: NimbusNode,
conf: NimbusConf, conf: NimbusConf,
com: CommonRef) = com: CommonRef) =
# app wide TxPool singleton
# TODO: disable some of txPool internal mechanism if
# the engineSigner is zero.
nimbus.txPool = TxPoolRef.new(com, conf.engineSigner) nimbus.txPool = TxPoolRef.new(com, conf.engineSigner)
# txPool must be informed of active head # txPool must be informed of active head
@ -92,10 +89,7 @@ proc basicServices(nimbus: NimbusNode,
nimbus.chainRef.extraValidation = 0 < verifyFrom nimbus.chainRef.extraValidation = 0 < verifyFrom
nimbus.chainRef.verifyFrom = verifyFrom nimbus.chainRef.verifyFrom = verifyFrom
# this is temporary workaround to track POS transition nimbus.beaconEngine = BeaconEngineRef.new(nimbus.txPool, nimbus.chainRef)
# until we have proper chain config and hard fork module
# see issue #640
nimbus.merger = MergerRef.new(com.db)
proc manageAccounts(nimbus: NimbusNode, conf: NimbusConf) = proc manageAccounts(nimbus: NimbusNode, conf: NimbusConf) =
if string(conf.keyStore).len > 0: if string(conf.keyStore).len > 0:
@ -371,32 +365,32 @@ proc localServices(nimbus: NimbusNode, conf: NimbusConf,
nimbus.sealingEngine.start() nimbus.sealingEngine.start()
if conf.engineApiEnabled: if conf.engineApiEnabled:
let maybeAsyncDataSource = maybeStatelessAsyncDataSource(nimbus, conf) #let maybeAsyncDataSource = maybeStatelessAsyncDataSource(nimbus, conf)
if conf.engineApiPort != conf.rpcPort: if conf.engineApiPort != conf.rpcPort:
nimbus.engineApiServer = newRpcHttpServer( nimbus.engineApiServer = newRpcHttpServer(
[initTAddress(conf.engineApiAddress, conf.engineApiPort)], [initTAddress(conf.engineApiAddress, conf.engineApiPort)],
authHooks = @[httpJwtAuthHook, httpCorsHook] authHooks = @[httpJwtAuthHook, httpCorsHook]
) )
setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiServer, nimbus.merger, maybeAsyncDataSource) setupEngineAPI(nimbus.beaconEngine, nimbus.engineApiServer)
setupEthRpc(nimbus.ethNode, nimbus.ctx, com, nimbus.txPool, nimbus.engineApiServer) setupEthRpc(nimbus.ethNode, nimbus.ctx, com, nimbus.txPool, nimbus.engineApiServer)
nimbus.engineApiServer.start() nimbus.engineApiServer.start()
else: else:
setupEngineAPI(nimbus.sealingEngine, nimbus.rpcServer, nimbus.merger, maybeAsyncDataSource) setupEngineAPI(nimbus.beaconEngine, nimbus.rpcServer)
info "Starting engine API server", port = conf.engineApiPort info "Starting engine API server", port = conf.engineApiPort
if conf.engineApiWsEnabled: if conf.engineApiWsEnabled:
let maybeAsyncDataSource = maybeStatelessAsyncDataSource(nimbus, conf) #let maybeAsyncDataSource = maybeStatelessAsyncDataSource(nimbus, conf)
if conf.engineApiWsPort != conf.wsPort: if conf.engineApiWsPort != conf.wsPort:
nimbus.engineApiWsServer = newRpcWebSocketServer( nimbus.engineApiWsServer = newRpcWebSocketServer(
initTAddress(conf.engineApiWsAddress, conf.engineApiWsPort), initTAddress(conf.engineApiWsAddress, conf.engineApiWsPort),
authHooks = @[wsJwtAuthHook, wsCorsHook] authHooks = @[wsJwtAuthHook, wsCorsHook]
) )
setupEngineAPI(nimbus.sealingEngine, nimbus.engineApiWsServer, nimbus.merger, maybeAsyncDataSource) setupEngineAPI(nimbus.beaconEngine, nimbus.engineApiWsServer)
setupEthRpc(nimbus.ethNode, nimbus.ctx, com, nimbus.txPool, nimbus.engineApiWsServer) setupEthRpc(nimbus.ethNode, nimbus.ctx, com, nimbus.txPool, nimbus.engineApiWsServer)
nimbus.engineApiWsServer.start() nimbus.engineApiWsServer.start()
else: else:
setupEngineAPI(nimbus.sealingEngine, nimbus.wsRpcServer, nimbus.merger, maybeAsyncDataSource) setupEngineAPI(nimbus.beaconEngine, nimbus.wsRpcServer)
info "Starting WebSocket engine API server", port = conf.engineApiWsPort info "Starting WebSocket engine API server", port = conf.engineApiWsPort

View File

@ -1,5 +1,5 @@
# Nimbus # Nimbus
# Copyright (c) 2018 Status Research & Development GmbH # Copyright (c) 202-2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)) # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT)) # * MIT license ([LICENSE-MIT](LICENSE-MIT))
@ -8,533 +8,17 @@
# those terms. # those terms.
import import
std/[typetraits, times, strutils, sequtils, sets], std/[typetraits, sequtils, sets],
stew/[results, byteutils], stew/[byteutils],
json_rpc/rpcserver, json_rpc/rpcserver,
web3/[conversions], web3/[conversions],
web3/engine_api_types as web3types, ../beacon/api_handler,
eth/rlp, ../beacon/beacon_engine,
eth/common/eth_types, ../beacon/web3_eth_conv,
eth/common/eth_types_rlp, ../beacon/execution_types
../common/common,
".."/core/chain/[chain_desc, persist_blocks],
".."/stateless_runner,
../constants,
../core/[tx_pool, sealer],
../evm/async/data_sources,
./merge/[mergetypes, mergeutils],
./execution_types,
# put chronicles import last because Nim
# compiler resolve `$` for logging
# arguments differently on Windows vs posix
# if chronicles import is in the middle
chronicles
{.push raises: [].} {.push raises: [].}
type
BlockHeader = eth_types.BlockHeader
Hash256 = eth_types.Hash256
Web3Blob = web3types.Blob
Web3KZGProof = web3types.KZGProof
Web3KZGCommitment = web3types.KZGCommitment
proc latestValidHash(db: CoreDbRef, parent: EthBlockHeader, ttd: DifficultyInt): Hash256
{.gcsafe, raises: [RlpError].} =
let ptd = db.getScore(parent.parentHash)
if ptd >= ttd:
parent.blockHash
else:
# If the most recent valid ancestor is a PoW block,
# latestValidHash MUST be set to ZERO
Hash256()
proc invalidFCU(com: CommonRef, header: EthBlockHeader): ForkchoiceUpdatedResponse
{.gcsafe, raises: [RlpError].} =
var parent: EthBlockHeader
if not com.db.getBlockHeader(header.parentHash, parent):
return invalidFCU(Hash256())
let blockHash = latestValidHash(com.db, parent, com.ttd.get(high(common.BlockNumber)))
invalidFCU(blockHash)
template unsafeQuantityToInt64(q: Quantity): int64 =
int64 q
# I created these handle_whatever procs to eliminate duplicated code
# between the V1 and V2 RPC endpoint implementations. (I believe
# they're meant to be implementable in that way. e.g. The V2 specs
# explicitly say "here's what to do if the `withdrawals` field is
# null.) --Adam
# https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1
proc handle_newPayload(sealingEngine: SealingEngineRef,
api: EngineApiRef,
com: CommonRef, maybeAsyncDataSource: Option[AsyncDataSource],
payload: SomeExecutionPayload): PayloadStatusV1 {.raises: [CatchableError].} =
trace "Engine API request received",
meth = "newPayload", number = $(distinctBase payload.blockNumber), hash = payload.blockHash
if com.isShanghaiOrLater(fromUnix(payload.timestamp.unsafeQuantityToInt64)):
when not(payload is ExecutionPayloadV2):
raise invalidParams("if timestamp is Shanghai or later, " &
"payload must be ExecutionPayloadV2")
else:
when not(payload is ExecutionPayloadV1):
if com.syncReqRelaxV2:
trace "Relaxed mode, treating payload as V1"
discard
else:
raise invalidParams("if timestamp is earlier than Shanghai, " &
"payload must be ExecutionPayloadV1")
var header = toBlockHeader(payload)
let blockHash = payload.blockHash.asEthHash
var res = header.validateBlockHash(blockHash)
if res.isErr:
return res.error
let db = sealingEngine.chain.db
# If we already have the block locally, ignore the entire execution and just
# return a fake success.
if db.getBlockHeader(blockHash, header):
warn "Ignoring already known beacon payload",
number = header.blockNumber, hash = blockHash
return validStatus(blockHash)
# FIXME-Adam - I'm adding this here, but I don't actually think this is the right place.
# For one thing, it won't even persist the new block. But let's worry about persisting
# after I've gotten a block to come out actually correct. --Adam
if maybeAsyncDataSource.isSome:
let r = statelesslyRunBlock(maybeAsyncDataSource.get, com, header, toBlockBody(payload))
if r.isErr:
error "Stateless execution failed", error=r.error
return invalidStatus()
else:
return validStatus(r.get)
# If the parent is missing, we - in theory - could trigger a sync, but that
# would also entail a reorg. That is problematic if multiple sibling blocks
# are being fed to us, and even moreso, if some semi-distant uncle shortens
# our live chain. As such, payload execution will not permit reorgs and thus
# will not trigger a sync cycle. That is fine though, if we get a fork choice
# update after legit payload executions.
var parent: EthBlockHeader
if not db.getBlockHeader(header.parentHash, parent):
# Stash the block away for a potential forced forckchoice update to it
# at a later time.
api.put(blockHash, header)
# Although we don't want to trigger a sync, if there is one already in
# progress, try to extend if with the current payload request to relieve
# some strain from the forkchoice update.
#if err := api.eth.Downloader().BeaconExtend(api.eth.SyncMode(), block.Header()); err == nil {
# log.Debug("Payload accepted for sync extension", "number", params.Number, "hash", params.BlockHash)
# return beacon.PayloadStatusV1{Status: beacon.SYNCING}, nil
# Either no beacon sync was started yet, or it rejected the delivered
# payload as non-integratable on top of the existing sync. We'll just
# have to rely on the beacon client to forcefully update the head with
# a forkchoice update request.
warn "Ignoring payload with missing parent",
number = header.blockNumber,
hash = blockHash,
parent = header.parentHash
return acceptedStatus()
# We have an existing parent, do some sanity checks to avoid the beacon client
# triggering too early
let ttd = com.ttd.get(high(common.BlockNumber))
when payload is ExecutionPayloadV1:
let td = db.getScore(header.parentHash)
if (not com.forkGTE(MergeFork)) and td < ttd:
warn "Ignoring pre-merge payload",
number = header.blockNumber, hash = blockHash, td, ttd
return invalidStatus()
if header.timestamp <= parent.timestamp:
warn "Invalid timestamp",
parent = header.timestamp, header = header.timestamp
return invalidStatus(db.getHeadBlockHash(), "Invalid timestamp")
if not db.haveBlockAndState(header.parentHash):
api.put(blockHash, header)
warn "State not available, ignoring new payload",
hash = blockHash,
number = header.blockNumber
let blockHash = latestValidHash(db, parent, ttd)
return acceptedStatus(blockHash)
trace "Inserting block without sethead",
hash = blockHash, number = header.blockNumber
let body = toBlockBody(payload)
let vres = sealingEngine.chain.insertBlockWithoutSetHead(header, body)
if vres != ValidationResult.OK:
let blockHash = latestValidHash(db, parent, ttd)
return invalidStatus(blockHash, "Failed to insert block")
# We've accepted a valid payload from the beacon client. Mark the local
# chain transitions to notify other subsystems (e.g. downloader) of the
# behavioral change.
if not api.merger.ttdReached():
api.merger.reachTTD()
# TODO: cancel downloader
return validStatus(blockHash)
# https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_getpayloadv1
proc handle_getPayload(api: EngineApiRef, payloadId: PayloadID): GetPayloadV2Response {.raises: [CatchableError].} =
trace "Engine API request received",
meth = "GetPayload", id = payloadId.toHex
var payload: ExecutionPayloadV1OrV2
var blockValue: UInt256
if not api.get(payloadId, blockValue, payload):
raise unknownPayload("Unknown payload")
return GetPayloadV2Response(
executionPayload: payload,
blockValue: blockValue
)
proc handle_getPayloadV3(api: EngineApiRef, com: CommonRef, payloadId: PayloadID): GetPayloadV3Response {.raises: [CatchableError].} =
trace "Engine API request received",
meth = "GetPayload", id = payloadId.toHex
var payload: ExecutionPayloadV3
var blockValue: UInt256
if not api.get(payloadId, blockValue, payload):
raise unknownPayload("Unknown payload")
if not com.isCancunOrLater(fromUnix(payload.timestamp.unsafeQuantityToInt64)):
raise unsupportedFork("payload timestamp is less than Cancun activation")
var
blobsBundle: BlobsBundleV1
try:
for ttx in payload.transactions:
let tx = rlp.decode(distinctBase(ttx), Transaction)
if tx.networkPayload.isNil.not:
for blob in tx.networkPayload.blobs:
blobsBundle.blobs.add Web3Blob(blob)
for p in tx.networkPayload.proofs:
blobsBundle.proofs.add Web3KZGProof(p)
for k in tx.networkPayload.commitments:
blobsBundle.commitments.add Web3KZGCommitment(k)
except RlpError:
doAssert(false, "found TypedTransaction that RLP failed to decode")
return GetPayloadV3Response(
executionPayload: payload,
blockValue: blockValue,
blobsBundle: blobsBundle
)
# https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_exchangetransitionconfigurationv1
proc handle_exchangeTransitionConfiguration(sealingEngine: SealingEngineRef, com: CommonRef, conf: TransitionConfigurationV1): TransitionConfigurationV1 {.raises: [CatchableError].} =
trace "Engine API request received",
meth = "exchangeTransitionConfigurationV1",
ttd = conf.terminalTotalDifficulty,
number = uint64(conf.terminalBlockNumber),
blockHash = conf.terminalBlockHash
let db = sealingEngine.chain.db
let ttd = com.ttd
if ttd.isNone:
raise newException(ValueError, "invalid ttd: EL (none) CL ($1)" % [$conf.terminalTotalDifficulty])
if conf.terminalTotalDifficulty != ttd.get:
raise newException(ValueError, "invalid ttd: EL ($1) CL ($2)" % [$ttd.get, $conf.terminalTotalDifficulty])
let terminalBlockNumber = uint64(conf.terminalBlockNumber).toBlockNumber
let terminalBlockHash = conf.terminalBlockHash.asEthHash
if terminalBlockHash != Hash256():
var headerHash: Hash256
if not db.getBlockHash(terminalBlockNumber, headerHash):
raise newException(ValueError, "cannot get terminal block hash, number $1" %
[$terminalBlockNumber])
if terminalBlockHash != headerHash:
raise newException(ValueError, "invalid terminal block hash, got $1 want $2" %
[$terminalBlockHash, $headerHash])
var header: EthBlockHeader
if not db.getBlockHeader(headerHash, header):
raise newException(ValueError, "cannot get terminal block header, hash $1" %
[$terminalBlockHash])
return TransitionConfigurationV1(
terminalTotalDifficulty: ttd.get,
terminalBlockHash : BlockHash headerHash.data,
terminalBlockNumber : Quantity header.blockNumber.truncate(uint64)
)
if terminalBlockNumber != 0:
raise newException(ValueError, "invalid terminal block number: $1" % [$terminalBlockNumber])
if terminalBlockHash != Hash256():
raise newException(ValueError, "invalid terminal block hash, no terminal header set")
return TransitionConfigurationV1(terminalTotalDifficulty: ttd.get)
# ForkchoiceUpdated has several responsibilities:
# If the method is called with an empty head block:
# we return success, which can be used to check if the catalyst mode is enabled
# If the total difficulty was not reached:
# we return INVALID
# If the finalizedBlockHash is set:
# we check if we have the finalizedBlockHash in our db, if not we start a sync
# We try to set our blockchain to the headBlock
# If there are payloadAttributes:
# we try to assemble a block with the payloadAttributes and return its payloadID
# https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_forkchoiceupdatedv2
proc handle_forkchoiceUpdated(sealingEngine: SealingEngineRef,
com: CommonRef, api: EngineApiRef,
update: ForkchoiceStateV1,
payloadAttributes: SomeOptionalPayloadAttributes): ForkchoiceUpdatedResponse {.raises: [CatchableError].} =
if payloadAttributes.isSome:
let attr = payloadAttributes.get
if com.isCancunOrLater(fromUnix(attr.timestamp.unsafeQuantityToInt64)):
when not(payloadAttributes is Option[PayloadAttributesV3]):
raise invalidParams("if timestamp is Cancun or later, payloadAttributes must be PayloadAttributesV3")
elif com.isShanghaiOrLater(fromUnix(attr.timestamp.unsafeQuantityToInt64)):
when not(payloadAttributes is Option[PayloadAttributesV2]):
raise invalidParams("if timestamp is Shanghai or later, payloadAttributes must be PayloadAttributesV2")
else:
when not(payloadAttributes is Option[PayloadAttributesV1]):
raise invalidParams("if timestamp is earlier than Shanghai, payloadAttributes must be PayloadAttributesV1")
let
chain = sealingEngine.chain
db = chain.db
blockHash = update.headBlockHash.asEthHash
if blockHash == Hash256():
warn "Forkchoice requested update to zero hash"
return simpleFCU(PayloadExecutionStatus.invalid)
# Check whether we have the block yet in our database or not. If not, we'll
# need to either trigger a sync, or to reject this forkchoice update for a
# reason.
var header: EthBlockHeader
if not db.getBlockHeader(blockHash, header):
# If the head hash is unknown (was not given to us in a newPayload request),
# we cannot resolve the header, so not much to do. This could be extended in
# the future to resolve from the `eth` network, but it's an unexpected case
# that should be fixed, not papered over.
if not api.get(blockHash, header):
warn "Forkchoice requested unknown head",
hash = blockHash
return simpleFCU(PayloadExecutionStatus.syncing)
# Header advertised via a past newPayload request. Start syncing to it.
# Before we do however, make sure any legacy sync in switched off so we
# don't accidentally have 2 cycles running.
if not api.merger.ttdReached():
api.merger.reachTTD()
# TODO: cancel downloader
info "Forkchoice requested sync to new head",
number = header.blockNumber,
hash = blockHash
# Update sync header (if any)
com.syncReqNewHead(header)
return simpleFCU(PayloadExecutionStatus.syncing)
# Block is known locally, just sanity check that the beacon client does not
# attempt to push us back to before the merge.
let blockNumber = header.blockNumber.truncate(uint64)
if header.difficulty > 0.u256 or blockNumber == 0'u64:
var
td, ptd: DifficultyInt
ttd = com.ttd.get(high(common.BlockNumber))
if not db.getTd(blockHash, td) or (blockNumber > 0'u64 and not db.getTd(header.parentHash, ptd)):
error "TDs unavailable for TTD check",
number = blockNumber,
hash = blockHash,
td = td,
parent = header.parentHash,
ptd = ptd
return simpleFCU(PayloadExecutionStatus.invalid, "TDs unavailable for TDD check")
if td < ttd or (blockNumber > 0'u64 and ptd > ttd):
error "Refusing beacon update to pre-merge",
number = blockNumber,
hash = blockHash,
diff = header.difficulty,
ptd = ptd,
ttd = ttd
return invalidFCU()
# If the head block is already in our canonical chain, the beacon client is
# probably resyncing. Ignore the update.
var canonHash: Hash256
if db.getBlockHash(header.blockNumber, canonHash) and canonHash == blockHash:
# TODO should this be possible?
# If we allow these types of reorgs, we will do lots and lots of reorgs during sync
warn "Reorg to previous block"
if chain.setCanonical(header) != ValidationResult.OK:
return invalidFCU(com, header)
elif chain.setCanonical(header) != ValidationResult.OK:
return invalidFCU(com, header)
# If the beacon client also advertised a finalized block, mark the local
# chain final and completely in PoS mode.
let finalizedBlockHash = update.finalizedBlockHash.asEthHash
if finalizedBlockHash != Hash256():
if not api.merger.posFinalized:
api.merger.finalizePoS()
# TODO: If the finalized block is not in our canonical tree, somethings wrong
var finalBlock: EthBlockHeader
if not db.getBlockHeader(finalizedBlockHash, finalBlock):
warn "Final block not available in database",
hash=finalizedBlockHash
raise invalidParams("finalized block header not available")
var finalHash: Hash256
if not db.getBlockHash(finalBlock.blockNumber, finalHash):
warn "Final block not in canonical chain",
number=finalBlock.blockNumber,
hash=finalizedBlockHash
raise invalidParams("finalized block hash not available")
if finalHash != finalizedBlockHash:
warn "Final block not in canonical chain",
number=finalBlock.blockNumber,
expect=finalizedBlockHash,
get=finalHash
raise invalidParams("finalilized block not canonical")
db.finalizedHeaderHash(finalizedBlockHash)
let safeBlockHash = update.safeBlockHash.asEthHash
if safeBlockHash != Hash256():
var safeBlock: EthBlockHeader
if not db.getBlockHeader(safeBlockHash, safeBlock):
warn "Safe block not available in database",
hash = safeBlockHash
raise invalidParams("safe head not available")
var safeHash: Hash256
if not db.getBlockHash(safeBlock.blockNumber, safeHash):
warn "Safe block hash not available in database",
hash = safeHash
raise invalidParams("safe block hash not available")
if safeHash != safeBlockHash:
warn "Safe block not in canonical chain",
blockNumber=safeBlock.blockNumber,
expect=safeBlockHash,
get=safeHash
raise invalidParams("safe head not canonical")
db.safeHeaderHash(safeBlockHash)
# If payload generation was requested, create a new block to be potentially
# sealed by the beacon client. The payload will be requested later, and we
# might replace it arbitrarilly many times in between.
if payloadAttributes.isSome:
let payloadAttrs = payloadAttributes.get()
let res = sealingEngine.generateExecutionPayload(payloadAttrs)
if res.isErr:
error "Failed to create sealing payload", err = res.error
raise invalidAttr(res.error)
let payload = res.get
let id = computePayloadId(blockHash, payloadAttrs)
api.put(id, sealingEngine.blockValue, payload)
info "Created payload for sealing",
id = id.toHex,
hash = payload.blockHash,
number = payload.blockNumber.uint64
return validFCU(some(id), blockHash)
return validFCU(none(PayloadID), blockHash)
func toHash(value: array[32, byte]): Hash256 =
result.data = value
const
maxBodyRequest = 32
proc getPayloadBodyByHeader(db: CoreDbRef,
header: BlockHeader,
output: var seq[Option[ExecutionPayloadBodyV1]]) {.raises: [CatchableError].} =
var body: BlockBody
if not db.getBlockBody(header, body):
output.add none(ExecutionPayloadBodyV1)
return
var typedTransactions: seq[TypedTransaction]
for tx in body.transactions:
typedTransactions.add(tx.toTypedTransaction)
var withdrawals: seq[WithdrawalV1]
if body.withdrawals.isSome:
for w in body.withdrawals.get:
withdrawals.add(w.toWithdrawalV1)
output.add(
some(ExecutionPayloadBodyV1(
transactions: typedTransactions,
# pre Shanghai block return null withdrawals
# post Shanghai block return at least empty slice
withdrawals: if header.withdrawalsRoot.isSome:
some(withdrawals)
else:
none(seq[WithdrawalV1])
))
)
proc handle_getPayloadBodiesByHash(com: CommonRef,
hashes: seq[BlockHash]):
seq[Option[ExecutionPayloadBodyV1]] {.raises: [CatchableError].} =
if hashes.len > maxBodyRequest:
raise tooLargeRequest("request exceeds max allowed " & $maxBodyRequest)
let db = com.db
var header: BlockHeader
for h in hashes:
if not db.getBlockHeader(toHash(distinctBase(h)), header):
result.add none(ExecutionPayloadBodyV1)
continue
db.getPayloadBodyByHeader(header, result)
proc handle_getPayloadBodiesByRange(com: CommonRef,
start: uint64, count: uint64):
seq[Option[ExecutionPayloadBodyV1]] {.raises: [CatchableError].} =
if start == 0:
raise invalidParams("start block should greater than zero")
if count == 0:
raise invalidParams("blocks count should greater than zero")
if count > maxBodyRequest:
raise tooLargeRequest("request exceeds max allowed " & $maxBodyRequest)
let db = com.db
var header: BlockHeader
var last = start+count-1
let current = com.syncCurrent.truncate(uint64)
if last > current:
last = current
for bn in start..last:
if not db.getBlockHeader(bn.toBlockNumber, header):
result.add none(ExecutionPayloadBodyV1)
continue
db.getPayloadBodyByHeader(header, result)
const supportedMethods: HashSet[string] = const supportedMethods: HashSet[string] =
toHashSet([ toHashSet([
"engine_newPayloadV1", "engine_newPayloadV1",
@ -554,98 +38,53 @@ const supportedMethods: HashSet[string] =
# I'm trying to keep the handlers below very thin, and move the # I'm trying to keep the handlers below very thin, and move the
# bodies up to the various procs above. Once we have multiple # bodies up to the various procs above. Once we have multiple
# versions, they'll need to be able to share code. # versions, they'll need to be able to share code.
proc setupEngineAPI*( proc setupEngineAPI*(engine: BeaconEngineRef, server: RpcServer) =
sealingEngine: SealingEngineRef,
server: RpcServer,
merger: MergerRef,
maybeAsyncDataSource: Option[AsyncDataSource] = none[AsyncDataSource]()) =
let
api = EngineApiRef.new(merger)
com = sealingEngine.chain.com
server.rpc("engine_exchangeCapabilities") do(methods: seq[string]) -> seq[string]: server.rpc("engine_exchangeCapabilities") do(methods: seq[string]) -> seq[string]:
return methods.filterIt(supportedMethods.contains(it)) return methods.filterIt(supportedMethods.contains(it))
# cannot use `params` as param name. see https:#github.com/status-im/nim-json-rpc/issues/128
server.rpc("engine_newPayloadV1") do(payload: ExecutionPayloadV1) -> PayloadStatusV1: server.rpc("engine_newPayloadV1") do(payload: ExecutionPayloadV1) -> PayloadStatusV1:
return handle_newPayload(sealingEngine, api, com, maybeAsyncDataSource, payload) return engine.newPayload(payload.executionPayload)
server.rpc("engine_newPayloadV2") do(payload: ExecutionPayload) -> PayloadStatusV1: server.rpc("engine_newPayloadV2") do(payload: ExecutionPayload) -> PayloadStatusV1:
if payload.version == Version.V1: return engine.newPayload(payload)
return handle_newPayload(sealingEngine, api, com, maybeAsyncDataSource, payload.V1)
else:
return handle_newPayload(sealingEngine, api, com, maybeAsyncDataSource, payload.V2)
server.rpc("engine_newPayloadV3") do(payload: ExecutionPayload, server.rpc("engine_newPayloadV3") do(payload: ExecutionPayload,
expectedBlobVersionedHashes: seq[FixedBytes[32]], expectedBlobVersionedHashes: seq[Web3Hash],
parentBeaconBlockRoot: FixedBytes[32]) -> PayloadStatusV1: parentBeaconBlockRoot: Web3Hash) -> PayloadStatusV1:
case payload.version: if not validateVersionedHashed(payload, expectedBlobVersionedHashes):
of Version.V1: return invalidStatus()
return handle_newPayload(sealingEngine, api, com, maybeAsyncDataSource, payload.V1) return engine.newPayload(payload, some(parentBeaconBlockRoot))
of Version.V2:
return handle_newPayload(sealingEngine, api, com, maybeAsyncDataSource, payload.V2)
of Version.V3:
if not com.isCancunOrLater(fromUnix(payload.timestamp.unsafeQuantityToInt64)):
raise unsupportedFork("payload timestamp is less than Cancun activation")
var versionedHashes: seq[Hash256]
for x in payload.transactions:
let tx = rlp.decode(distinctBase(x), Transaction)
versionedHashes.add tx.versionedHashes
for i, x in expectedBlobVersionedHashes:
if distinctBase(x) != versionedHashes[i].data:
return invalidStatus()
return handle_newPayload(sealingEngine, api, com, maybeAsyncDataSource, payload.V3)
server.rpc("engine_getPayloadV1") do(payloadId: PayloadID) -> ExecutionPayloadV1: server.rpc("engine_getPayloadV1") do(payloadId: PayloadID) -> ExecutionPayloadV1:
let r = handle_getPayload(api, payloadId) return engine.getPayload(payloadId).executionPayload.V1
return r.executionPayload.toExecutionPayloadV1
server.rpc("engine_getPayloadV2") do(payloadId: PayloadID) -> GetPayloadV2Response: server.rpc("engine_getPayloadV2") do(payloadId: PayloadID) -> GetPayloadV2Response:
return handle_getPayload(api, payloadId) return engine.getPayload(payloadId)
server.rpc("engine_getPayloadV3") do(payloadId: PayloadID) -> GetPayloadV3Response: server.rpc("engine_getPayloadV3") do(payloadId: PayloadID) -> GetPayloadV3Response:
return handle_getPayloadV3(api, com, payloadId) return engine.getPayloadV3(payloadId)
server.rpc("engine_exchangeTransitionConfigurationV1") do(conf: TransitionConfigurationV1) -> TransitionConfigurationV1: server.rpc("engine_exchangeTransitionConfigurationV1") do(
return handle_exchangeTransitionConfiguration(sealingEngine, com, conf) conf: TransitionConfigurationV1) -> TransitionConfigurationV1:
return engine.exchangeConf(conf)
server.rpc("engine_forkchoiceUpdatedV1") do( server.rpc("engine_forkchoiceUpdatedV1") do(update: ForkchoiceStateV1,
update: ForkchoiceStateV1, attrs: Option[PayloadAttributesV1]) -> ForkchoiceUpdatedResponse:
payloadAttributes: Option[PayloadAttributesV1]) -> ForkchoiceUpdatedResponse: return engine.forkchoiceUpdated(update, attrs.payloadAttributes)
return handle_forkchoiceUpdated(sealingEngine, com, api, update, payloadAttributes)
server.rpc("engine_forkchoiceUpdatedV2") do( server.rpc("engine_forkchoiceUpdatedV2") do(update: ForkchoiceStateV1,
update: ForkchoiceStateV1, attrs: Option[PayloadAttributes]) -> ForkchoiceUpdatedResponse:
payloadAttributes: Option[PayloadAttributes]) -> ForkchoiceUpdatedResponse: return engine.forkchoiceUpdated(update, attrs)
if payloadAttributes.isNone:
return handle_forkchoiceUpdated(sealingEngine, com, api, update, none[PayloadAttributesV2]())
else:
let attr = payloadAttributes.get
if attr.version == Version.V1:
return handle_forkchoiceUpdated(sealingEngine, com, api, update, some(attr.V1))
else:
return handle_forkchoiceUpdated(sealingEngine, com, api, update, some(attr.V2))
server.rpc("engine_forkchoiceUpdatedV3") do( server.rpc("engine_forkchoiceUpdatedV3") do(update: ForkchoiceStateV1,
update: ForkchoiceStateV1, attrs: Option[PayloadAttributes]) -> ForkchoiceUpdatedResponse:
payloadAttributes: Option[PayloadAttributes]) -> ForkchoiceUpdatedResponse: return engine.forkchoiceUpdated(update, attrs)
if payloadAttributes.isNone:
return handle_forkchoiceUpdated(sealingEngine, com, api, update, none[PayloadAttributesV3]())
else:
let attr = payloadAttributes.get
case attr.version
of Version.V1:
return handle_forkchoiceUpdated(sealingEngine, com, api, update, some(attr.V1))
of Version.V2:
return handle_forkchoiceUpdated(sealingEngine, com, api, update, some(attr.V2))
of Version.V3:
return handle_forkchoiceUpdated(sealingEngine, com, api, update, some(attr.V3))
server.rpc("engine_getPayloadBodiesByHashV1") do( server.rpc("engine_getPayloadBodiesByHashV1") do(hashes: seq[Web3Hash]) ->
hashes: seq[BlockHash]) -> seq[Option[ExecutionPayloadBodyV1]]: seq[Option[ExecutionPayloadBodyV1]]:
return handle_getPayloadBodiesByHash(com, hashes) return engine.getPayloadBodiesByHash(hashes)
server.rpc("engine_getPayloadBodiesByRangeV1") do( server.rpc("engine_getPayloadBodiesByRangeV1") do(
start: Quantity, count: Quantity) -> seq[Option[ExecutionPayloadBodyV1]]: start: Quantity, count: Quantity) -> seq[Option[ExecutionPayloadBodyV1]]:
return handle_getPayloadBodiesByRange(com, start.uint64, count.uint64) return engine.getPayloadBodiesByRange(start.uint64, count.uint64)

View File

@ -1,76 +0,0 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
{.push raises: [].}
import
chronicles,
eth/rlp,
../../db/[core_db, storage_types]
type
# transitionStatus describes the status of eth1/2 transition. This switch
# between modes is a one-way action which is triggered by corresponding
# consensus-layer message.
TransitionStatus = object
leftPoW : bool # The flag is set when the first NewHead message received
enteredPoS: bool # The flag is set when the first FinalisedBlock message received
# Merger is an internal help structure used to track the eth1/2 transition status.
# It's a common structure can be used in both full node and light client.
MergerRef* = ref object
db : CoreDbRef
status: TransitionStatus
proc writeStatus(db: CoreDbRef, status: TransitionStatus) {.gcsafe, raises: [].} =
db.kvt.put(transitionStatusKey().toOpenArray(), rlp.encode(status))
proc readStatus(db: CoreDbRef): TransitionStatus {.gcsafe, raises: [].} =
var bytes = db.kvt.get(transitionStatusKey().toOpenArray())
if bytes.len > 0:
try:
result = rlp.decode(bytes, typeof result)
except CatchableError:
error "Failed to decode POS transition status"
proc new*(_: type MergerRef, db: CoreDbRef): MergerRef {.gcsafe, raises: [].} =
MergerRef(
db: db,
status: db.readStatus()
)
# ReachTTD is called whenever the first NewHead message received
# from the consensus-layer.
proc reachTTD*(m: MergerRef) {.gcsafe, raises: [].} =
if m.status.leftPoW:
return
m.status = TransitionStatus(leftPoW: true)
m.db.writeStatus(m.status)
info "Left PoW stage"
# FinalizePoS is called whenever the first FinalisedBlock message received
# from the consensus-layer.
proc finalizePoS*(m: MergerRef) {.gcsafe, raises: [].} =
if m.status.enteredPoS:
return
m.status = TransitionStatus(leftPoW: true, enteredPoS: true)
m.db.writeStatus(m.status)
info "Entered PoS stage"
# TTDReached reports whether the chain has left the PoW stage.
proc ttdReached*(m: MergerRef): bool =
m.status.leftPoW
# PoSFinalized reports whether the chain has entered the PoS stage.
proc posFinalized*(m: MergerRef): bool =
m.status.enteredPoS

View File

@ -1,253 +0,0 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
# at your option.
# This file may not be copied, modified, or distributed except according to
# those terms.
import
std/[typetraits, times, strutils],
eth/[rlp, common],
json_rpc/errors,
nimcrypto/[hash, sha2],
stew/[results, byteutils],
web3/engine_api_types,
../../constants,
../../db/core_db,
../../utils/utils,
../../rpc/execution_types,
./mergetypes
type Hash256 = eth_types.Hash256
proc computePayloadId*(headBlockHash: Hash256, params: SomePayloadAttributes): PayloadID =
var dest: Hash256
var ctx: sha256
ctx.init()
ctx.update(headBlockHash.data)
ctx.update(toBytesBE distinctBase params.timestamp)
ctx.update(distinctBase params.prevRandao)
ctx.update(distinctBase params.suggestedFeeRecipient)
# FIXME-Adam: Do we need to include the withdrawals in this calculation?
# https://github.com/ethereum/go-ethereum/pull/25838#discussion_r1024340383
# "The execution api specs define that this ID can be completely random. It
# used to be derived from payload attributes in the past, but maybe it's
# time to use a randomized ID to not break it with any changes to the
# attributes?"
ctx.finish dest.data
ctx.clear()
(distinctBase result)[0..7] = dest.data[0..7]
proc append*(w: var RlpWriter, q: Quantity) =
w.append(uint64(q))
proc append*(w: var RlpWriter, a: Address) =
w.append(distinctBase(a))
template unsafeQuantityToInt64(q: Quantity): int64 =
int64 q
template asEthHash*(hash: engine_api_types.BlockHash): Hash256 =
Hash256(data: distinctBase(hash))
proc calcRootHashRlp*(items: openArray[seq[byte]]): Hash256 =
var tr = newCoreDbRef(LegacyDbMemory).mptPrune
for i, t in items:
tr.put(rlp.encode(i), t)
return tr.rootHash()
proc toWithdrawal*(w: WithdrawalV1): Withdrawal =
Withdrawal(
index: uint64(w.index),
validatorIndex: uint64(w.validatorIndex),
address: distinctBase(w.address),
amount: uint64(w.amount) # AARDVARK: is this wei or gwei or what?
)
proc toWithdrawalV1*(w: Withdrawal): WithdrawalV1 =
WithdrawalV1(
index: Quantity(w.index),
validatorIndex: Quantity(w.validatorIndex),
address: Address(w.address),
amount: Quantity(w.amount) # AARDVARK: is this wei or gwei or what?
)
proc maybeWithdrawalsRoot(payload: SomeExecutionPayload): Option[Hash256] =
when payload is ExecutionPayloadV1:
none(Hash256)
else:
var wds = newSeqOfCap[Withdrawal](payload.withdrawals.len)
for wd in payload.withdrawals:
wds.add toWithdrawal(wd)
some(utils.calcWithdrawalsRoot(wds))
proc toWithdrawals(withdrawals: openArray[WithdrawalV1]): seq[WithDrawal] =
result = newSeqOfCap[Withdrawal](withdrawals.len)
for wd in withdrawals:
result.add toWithdrawal(wd)
proc maybeBlobGasUsed(payload: SomeExecutionPayload): Option[uint64] =
when payload is ExecutionPayloadV3:
some(payload.blobGasUsed.uint64)
else:
none(uint64)
proc maybeExcessBlobGas(payload: SomeExecutionPayload): Option[uint64] =
when payload is ExecutionPayloadV3:
some(payload.excessBlobGas.uint64)
else:
none(uint64)
proc toBlockHeader*(payload: SomeExecutionPayload): EthBlockHeader =
let transactions = seq[seq[byte]](payload.transactions)
let txRoot = calcRootHashRlp(transactions)
EthBlockHeader(
parentHash : payload.parentHash.asEthHash,
ommersHash : EMPTY_UNCLE_HASH,
coinbase : EthAddress payload.feeRecipient,
stateRoot : payload.stateRoot.asEthHash,
txRoot : txRoot,
receiptRoot : payload.receiptsRoot.asEthHash,
bloom : distinctBase(payload.logsBloom),
difficulty : default(DifficultyInt),
blockNumber : payload.blockNumber.distinctBase.u256,
gasLimit : payload.gasLimit.unsafeQuantityToInt64,
gasUsed : payload.gasUsed.unsafeQuantityToInt64,
timestamp : fromUnix payload.timestamp.unsafeQuantityToInt64,
extraData : bytes payload.extraData,
mixDigest : payload.prevRandao.asEthHash, # EIP-4399 redefine `mixDigest` -> `prevRandao`
nonce : default(BlockNonce),
fee : some payload.baseFeePerGas,
withdrawalsRoot: payload.maybeWithdrawalsRoot, # EIP-4895
blobGasUsed : payload.maybeBlobGasUsed, # EIP-4844
excessBlobGas : payload.maybeExcessBlobGas, # EIP-4844
)
proc toBlockHeader*(payload: ExecutionPayload): EthBlockHeader =
case payload.version
of Version.V1: toBlockHeader(payload.V1)
of Version.V2: toBlockHeader(payload.V2)
of Version.V3: toBlockHeader(payload.V3)
proc toTypedTransaction*(tx: Transaction): TypedTransaction =
TypedTransaction(rlp.encode(tx))
proc toBlockBody*(payload: SomeExecutionPayload): BlockBody =
result.transactions.setLen(payload.transactions.len)
for i, tx in payload.transactions:
result.transactions[i] = rlp.decode(distinctBase tx, Transaction)
when payload is ExecutionPayloadV2:
result.withdrawals = some(payload.withdrawals.toWithdrawals)
when payload is ExecutionPayloadV3:
result.withdrawals = some(payload.withdrawals.toWithdrawals)
proc `$`*(x: BlockHash): string =
toHex(x)
template toValidHash*(x: Hash256): Option[BlockHash] =
some(BlockHash(x.data))
proc validateBlockHash*(header: EthBlockHeader, gotHash: Hash256): Result[void, PayloadStatusV1] =
let wantHash = header.blockHash
if wantHash != gotHash:
let status = PayloadStatusV1(
# This used to say invalid_block_hash, but see here:
# https://github.com/ethereum/execution-apis/blob/main/src/engine/shanghai.md#engine_newpayloadv2
# "INVALID_BLOCK_HASH status value is supplanted by INVALID."
status: PayloadExecutionStatus.invalid,
validationError: some("blockhash mismatch, want $1, got $2" % [$wantHash, $gotHash])
)
return err(status)
return ok()
proc simpleFCU*(status: PayloadExecutionStatus): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(payloadStatus: PayloadStatusV1(status: status))
proc simpleFCU*(status: PayloadExecutionStatus, msg: string): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(
payloadStatus: PayloadStatusV1(
status: status,
validationError: some(msg)
)
)
proc invalidFCU*(hash: Hash256 = Hash256()): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(payloadStatus:
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(hash)
)
)
proc validFCU*(id: Option[PayloadID], validHash: Hash256): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(
payloadStatus: PayloadStatusV1(
status: PayloadExecutionStatus.valid,
latestValidHash: toValidHash(validHash)
),
payloadId: id
)
proc invalidStatus*(validHash: Hash256, msg: string): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(validHash),
validationError: some(msg)
)
proc invalidStatus*(validHash: Hash256 = Hash256()): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(validHash)
)
proc acceptedStatus*(validHash: Hash256): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.accepted,
latestValidHash: toValidHash(validHash)
)
proc acceptedStatus*(): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.accepted
)
proc validStatus*(validHash: Hash256): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.valid,
latestValidHash: toValidHash(validHash)
)
proc invalidParams*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiInvalidParams,
msg: msg
)
proc unknownPayload*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiUnknownPayload,
msg: msg
)
proc invalidAttr*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiInvalidPayloadAttributes,
msg: msg
)
proc unsupportedFork*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiUnsupportedFork,
msg: msg
)
proc tooLargeRequest*(msg: string): ref InvalidRequest =
(ref InvalidRequest)(
code: engineApiTooLargeRequest,
msg: msg
)

View File

@ -17,7 +17,7 @@ import
../nimbus/common, ../nimbus/common,
../nimbus/config, ../nimbus/config,
../nimbus/core/[sealer, tx_pool, chain], ../nimbus/core/[sealer, tx_pool, chain],
../nimbus/rpc/merge/[mergetypes, merger], ../nimbus/beacon/[beacon_engine, payload_queue],
./test_helpers ./test_helpers
const const
@ -67,7 +67,7 @@ proc getPayload(step: Step, client: RpcClient, testStatusIMPL: var TestStatus) =
try: try:
let res = waitFor client.call(step.meth, step.params) let res = waitFor client.call(step.meth, step.params)
check toLowerAscii($res) == toLowerAscii($step.expect) check toLowerAscii($res) == toLowerAscii($step.expect)
except: except CatchableError:
check step.error == true check step.error == true
proc newPayload(step: Step, client: RpcClient, testStatusIMPL: var TestStatus) = proc newPayload(step: Step, client: RpcClient, testStatusIMPL: var TestStatus) =
@ -97,10 +97,10 @@ proc runTest(steps: Steps) =
chainRef, ctx, conf.engineSigner, chainRef, ctx, conf.engineSigner,
txPool, EnginePostMerge txPool, EnginePostMerge
) )
merger = MergerRef.new(com.db) beaconEngine = BeaconEngineRef.new(txPool, chainRef)
setupEthRpc(ethNode, ctx, com, txPool, rpcServer) setupEthRpc(ethNode, ctx, com, txPool, rpcServer)
setupEngineAPI(sealingEngine, rpcServer, merger) setupEngineAPI(beaconEngine, rpcServer)
sealingEngine.start() sealingEngine.start()
rpcServer.start() rpcServer.start()
@ -138,14 +138,14 @@ proc `==`(a, b: Quantity): bool =
uint64(a) == uint64(b) uint64(a) == uint64(b)
proc testEngineApiSupport() = proc testEngineApiSupport() =
var api = EngineAPIRef.new(nil) var api = PayloadQueue()
let let
id1 = toId(1) id1 = toId(1)
id2 = toId(2) id2 = toId(2)
ep1 = ExecutionPayloadV1(gasLimit: Quantity 100) ep1 = ExecutionPayloadV1(gasLimit: Quantity 100)
ep2 = ExecutionPayloadV1(gasLimit: Quantity 101) ep2 = ExecutionPayloadV1(gasLimit: Quantity 101)
hdr1 = EthBlockHeader(gasLimit: 100) hdr1 = common.BlockHeader(gasLimit: 100)
hdr2 = EthBlockHeader(gasLimit: 101) hdr2 = common.BlockHeader(gasLimit: 101)
hash1 = hdr1.blockHash hash1 = hdr1.blockHash
hash2 = hdr2.blockHash hash2 = hdr2.blockHash
@ -165,7 +165,7 @@ proc testEngineApiSupport() =
test "test header queue": test "test header queue":
api.put(hash1, hdr1) api.put(hash1, hdr1)
api.put(hash2, hdr2) api.put(hash2, hdr2)
var eh1, eh2: EthBlockHeader var eh1, eh2: common.BlockHeader
check api.get(hash1, eh1) check api.get(hash1, eh1)
check api.get(hash2, eh2) check api.get(hash2, eh2)
check eh1.gasLimit == hdr1.gasLimit check eh1.gasLimit == hdr1.gasLimit