Bump nim-eth, nim-web3, nimbus-eth2 (#2344)

* Bump nim-eth, nim-web3, nimbus-eth2

- Replace std.Option with results.Opt
- Fields name changes

* More fixes

* Fix Portal stream async raises and portal testnet Opt usage

* Bump eth + nimbus-eth2 + more fixes related to eth_types changes

* Fix in utp test app and nimbus-eth2 bump

* Fix test_blockchain_json rebase conflict

* Fix EVMC block_timestamp conversion plus commentary

---------

Co-authored-by: kdeme <kim.demey@gmail.com>
This commit is contained in:
andri lim 2024-06-14 14:31:08 +07:00 committed by GitHub
parent 3b5a56fd32
commit 5a18537450
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
199 changed files with 1716 additions and 1691 deletions

View File

@ -137,6 +137,7 @@ GIT_SUBMODULE_UPDATE := git -c submodule."vendor/nimbus-eth2".update=none submod
git submodule update --init vendor/sepolia; \
git submodule update --init vendor/gnosis-chain-configs; \
git submodule update --init --recursive vendor/nim-kzg4844; \
git submodule update --init vendor/mainnet; \
cd ../..
.DEFAULT:

View File

@ -20,7 +20,7 @@ import
../network/history/accumulator
from nimcrypto/hash import fromHex
from ../../nimbus/utils/utils import calcTxRoot, calcReceiptRoot
from ../../nimbus/utils/utils import calcTxRoot, calcReceiptsRoot
export e2store.readRecord
@ -475,7 +475,7 @@ proc verify*(f: Era1File): Result[Digest, string] =
if blockHeader.ommersHash != ommershHash:
return err("Invalid ommers hash")
if blockHeader.receiptRoot != calcReceiptRoot(receipts):
if blockHeader.receiptsRoot != calcReceiptsRoot(receipts):
return err("Invalid receipts root")
headerRecords.add(

View File

@ -210,9 +210,8 @@ proc writeHeaderRecord*(
writer: var JsonWriter, header: BlockHeader
) {.raises: [IOError].} =
let
dataRecord = HeaderRecord(
header: rlp.encode(header).to0xHex(), number: header.blockNumber.truncate(uint64)
)
dataRecord =
HeaderRecord(header: rlp.encode(header).to0xHex(), number: header.number)
headerHash = to0xHex(rlpHash(header).data)
@ -226,7 +225,7 @@ proc writeBlockRecord*(
header: rlp.encode(header).to0xHex(),
body: encode(body).to0xHex(),
receipts: encode(receipts).to0xHex(),
number: header.blockNumber.truncate(uint64),
number: header.number,
)
headerHash = to0xHex(rlpHash(header).data)

View File

@ -280,7 +280,7 @@ iterator headersWithProof*(
).encode()
headerWithProof = buildHeaderWithProof(blockHeader, epochAccumulator).valueOr:
raiseAssert "Failed to build header with proof: " & $blockHeader.blockNumber
raiseAssert "Failed to build header with proof: " & $blockHeader.number
contentValue = SSZ.encode(headerWithProof)

View File

@ -82,8 +82,7 @@ func getEpochAccumulatorRoot*(headerRecords: openArray[HeaderRecord]): Digest =
func updateAccumulator*(a: var Accumulator, header: BlockHeader) =
doAssert(
header.blockNumber.truncate(uint64) < mergeBlockNumber,
"No post merge blocks for header accumulator",
header.number < mergeBlockNumber, "No post merge blocks for header accumulator"
)
let lastTotalDifficulty =
@ -126,9 +125,8 @@ func getEpochIndex*(blockNumber: uint64): uint64 =
blockNumber div epochSize
func getEpochIndex*(header: BlockHeader): uint64 =
let blockNumber = header.blockNumber.truncate(uint64)
## Get the index for the historical epochs
getEpochIndex(blockNumber)
getEpochIndex(header.number)
func getHeaderRecordIndex*(blockNumber: uint64, epochIndex: uint64): uint64 =
## Get the relative header index for the epoch accumulator
@ -136,13 +134,13 @@ func getHeaderRecordIndex*(blockNumber: uint64, epochIndex: uint64): uint64 =
func getHeaderRecordIndex*(header: BlockHeader, epochIndex: uint64): uint64 =
## Get the relative header index for the epoch accumulator
getHeaderRecordIndex(header.blockNumber.truncate(uint64), epochIndex)
getHeaderRecordIndex(header.number, epochIndex)
func isPreMerge*(blockNumber: uint64): bool =
blockNumber < mergeBlockNumber
func isPreMerge*(header: BlockHeader): bool =
isPreMerge(header.blockNumber.truncate(uint64))
isPreMerge(header.number)
func verifyProof(
a: FinishedAccumulator, header: BlockHeader, proof: openArray[Digest]

View File

@ -98,7 +98,7 @@ func fromPortalBlockBody*(
BlockBody(
transactions: transactions,
uncles: @[], # Uncles must be empty: TODO where validation?
withdrawals: some(withdrawals),
withdrawals: Opt.some(withdrawals),
)
)
except RlpError as e:
@ -192,7 +192,7 @@ proc calcRootHash(items: Transactions | PortalReceipts | Withdrawals): Hash256 =
var tr = initHexaryTrie(newMemoryDB(), isPruning = false)
for i, item in items:
try:
tr.put(rlp.encode(i), item.asSeq())
tr.put(rlp.encode(i.uint), item.asSeq())
except CatchableError as e:
# tr.put now is a generic interface to whatever underlying db
# and it can raise exception if the backend db is something like aristo
@ -305,7 +305,7 @@ proc validateBlockBodyBytes*(
let body = ?decodeSsz(bytes, PortalBlockBodyShanghai)
?validateBlockBody(body, header)
BlockBody.fromPortalBlockBody(body)
elif isPoSBlock(chainConfig, header.blockNumber.truncate(uint64)):
elif isPoSBlock(chainConfig, header.number):
if header.withdrawalsRoot.isSome():
return err("Expected no withdrawalsRoot for pre Shanghai block")
elif header.ommersHash != EMPTY_UNCLE_HASH:
@ -374,7 +374,7 @@ proc get(
BlockBody.fromPortalBlockBodyOrRaise(
decodeSszOrRaise(encoded, PortalBlockBodyShanghai)
)
elif isPoSBlock(chainConfig, header.blockNumber.truncate(uint64)):
elif isPoSBlock(chainConfig, header.number):
BlockBody.fromPortalBlockBodyOrRaise(
decodeSszOrRaise(encoded, PortalBlockBodyLegacy)
)
@ -532,7 +532,7 @@ proc getBlock*(n: HistoryNetwork, hash: BlockHash): Future[Opt[Block]] {.async.}
proc getReceipts*(
n: HistoryNetwork, hash: BlockHash, header: BlockHeader
): Future[Opt[seq[Receipt]]] {.async.} =
if header.receiptRoot == EMPTY_ROOT_HASH:
if header.receiptsRoot == EMPTY_ROOT_HASH:
# Short path for empty receipts indicated by receipts root
return Opt.some(newSeq[Receipt]())
@ -554,7 +554,7 @@ proc getReceipts*(
receiptsContent = (await n.portalProtocol.contentLookup(contentKey, contentId)).valueOr:
warn "Failed fetching receipts from the network"
return Opt.none(seq[Receipt])
receipts = validateReceiptsBytes(receiptsContent.content, header.receiptRoot).valueOr:
receipts = validateReceiptsBytes(receiptsContent.content, header.receiptsRoot).valueOr:
warn "Validation of receipts failed", error
continue
@ -671,7 +671,7 @@ proc validateContent(
warn "Failed getting canonical header for receipts"
return false
let res = validateReceiptsBytes(content, header.receiptRoot)
let res = validateReceiptsBytes(content, header.receiptsRoot)
if res.isErr():
warn "Failed validating receipts", error = res.error
return false

View File

@ -141,7 +141,7 @@ proc connectTo*(
): Future[Result[UtpSocket[NodeAddress], string]] {.async.} =
let connectRes = await stream.transport.connectTo(nodeAddress, connectionId)
if connectRes.isErr():
case connectRes.error.kind
case connectRes.error
of SocketAlreadyExists:
# This means that there is already a socket to this nodeAddress with given
# connection id. This means that a peer sent us a connection id which is
@ -161,14 +161,16 @@ proc connectTo*(
proc writeContentRequest(
socket: UtpSocket[NodeAddress], stream: PortalStream, request: ContentRequest
) {.async.} =
) {.async: (raises: [CancelledError]).} =
let dataWritten = await socket.write(request.content)
if dataWritten.isErr():
debug "Error writing requested data", error = dataWritten.error
await socket.closeWait()
proc readVarint(socket: UtpSocket[NodeAddress]): Future[Opt[uint32]] {.async.} =
proc readVarint(
socket: UtpSocket[NodeAddress]
): Future[Opt[uint32]] {.async: (raises: [CancelledError]).} =
var buffer: array[5, byte]
for i in 0 ..< len(buffer):
@ -186,7 +188,9 @@ proc readVarint(socket: UtpSocket[NodeAddress]): Future[Opt[uint32]] {.async.} =
else:
return err()
proc readContentItem(socket: UtpSocket[NodeAddress]): Future[Opt[seq[byte]]] {.async.} =
proc readContentItem(
socket: UtpSocket[NodeAddress]
): Future[Opt[seq[byte]]] {.async: (raises: [CancelledError]).} =
let len = await socket.readVarint()
if len.isOk():
@ -200,7 +204,7 @@ proc readContentItem(socket: UtpSocket[NodeAddress]): Future[Opt[seq[byte]]] {.a
proc readContentOffer(
socket: UtpSocket[NodeAddress], stream: PortalStream, offer: ContentOffer
) {.async.} =
) {.async: (raises: [CancelledError]).} =
# Read number of content items according to amount of ContentKeys accepted.
# This will either end with a FIN, or because the read action times out or
# because the number of expected items was read (if this happens and no FIN
@ -220,7 +224,7 @@ proc readContentOffer(
for i in 0 ..< amount:
let contentItemFut = socket.readContentItem()
if await contentItemFut.withTimeout(stream.contentReadTimeout):
let contentItem = contentItemFut.read
let contentItem = await contentItemFut
if contentItem.isOk():
contentItems.add(contentItem.get())
@ -291,7 +295,7 @@ proc allowedConnection(
proc handleIncomingConnection(
server: UtpRouter[NodeAddress], socket: UtpSocket[NodeAddress]
): Future[void] =
): Future[void] {.async: (raw: true, raises: []).} =
let manager = getUserData[NodeAddress, StreamManager](server)
for stream in manager.streams:
@ -303,14 +307,14 @@ proc handleIncomingConnection(
request.nodeId == socket.remoteAddress.nodeId:
let fut = socket.writeContentRequest(stream, request)
stream.contentRequests.del(i)
return fut
return noCancel(fut)
for i, offer in stream.contentOffers:
if offer.connectionId == socket.connectionId and
offer.nodeId == socket.remoteAddress.nodeId:
let fut = socket.readContentOffer(stream, offer)
stream.contentOffers.del(i)
return fut
return noCancel(fut)
# TODO: Is there a scenario where this can happen,
# considering `allowRegisteredIdCallback`? If not, doAssert?

View File

@ -50,23 +50,23 @@ func init*(
txIndex: int,
): T {.raises: [ValidationError].} =
TransactionObject(
blockHash: some(w3Hash header.blockHash),
blockNumber: some(eth_api_types.BlockNumber(header.blockNumber.truncate(uint64))),
blockHash: Opt.some(w3Hash header.blockHash),
blockNumber: Opt.some(eth_api_types.BlockNumber(header.number)),
`from`: w3Addr tx.getSender(),
gas: Quantity(tx.gasLimit),
gasPrice: Quantity(tx.gasPrice),
hash: w3Hash tx.rlpHash,
input: tx.payload,
nonce: Quantity(tx.nonce),
to: some(w3Addr tx.destination),
transactionIndex: some(Quantity(txIndex)),
to: Opt.some(w3Addr tx.destination),
transactionIndex: Opt.some(Quantity(txIndex)),
value: tx.value,
v: Quantity(tx.V),
r: tx.R,
s: tx.S,
`type`: some(Quantity(tx.txType)),
maxFeePerGas: some(Quantity(tx.maxFee)),
maxPriorityFeePerGas: some(Quantity(tx.maxPriorityFee)),
`type`: Opt.some(Quantity(tx.txType)),
maxFeePerGas: Opt.some(Quantity(tx.maxFeePerGas)),
maxPriorityFeePerGas: Opt.some(Quantity(tx.maxPriorityFeePerGas)),
)
# Note: Similar as `populateBlockObject` from rpc_utils, but lacking the
@ -81,15 +81,15 @@ func init*(
let blockHash = header.blockHash
var blockObject = BlockObject(
number: eth_api_types.BlockNumber(header.blockNumber.truncate(uint64)),
number: eth_api_types.BlockNumber(header.number),
hash: w3Hash blockHash,
parentHash: w3Hash header.parentHash,
nonce: some(FixedBytes[8](header.nonce)),
nonce: Opt.some(FixedBytes[8](header.nonce)),
sha3Uncles: w3Hash header.ommersHash,
logsBloom: FixedBytes[256] header.bloom,
logsBloom: FixedBytes[256] header.logsBloom,
transactionsRoot: w3Hash header.txRoot,
stateRoot: w3Hash header.stateRoot,
receiptsRoot: w3Hash header.receiptRoot,
receiptsRoot: w3Hash header.receiptsRoot,
miner: w3Addr header.coinbase,
difficulty: header.difficulty,
extraData: HistoricExtraData header.extraData,
@ -97,9 +97,9 @@ func init*(
# https://playground.open-rpc.org/?schemaUrl=https://raw.githubusercontent.com/ethereum/eth1.0-apis/assembled-spec/openrpc.json
# So we should probably change `BlockObject`.
totalDifficulty: UInt256.low(),
gasLimit: Quantity(header.gasLimit.uint64),
gasUsed: Quantity(header.gasUsed.uint64),
timestamp: Quantity(header.timestamp.uint64),
gasLimit: Quantity(header.gasLimit),
gasUsed: Quantity(header.gasUsed),
timestamp: Quantity(header.timestamp),
)
let size = sizeof(BlockHeader) - sizeof(Blob) + header.extraData.len
@ -212,7 +212,7 @@ proc installEthApiHandlers*(
rpcServerWithProxy.rpc("eth_getBlockByHash") do(
data: eth_api_types.Hash256, fullTransactions: bool
) -> Option[BlockObject]:
) -> Opt[BlockObject]:
## Returns information about a block by hash.
##
## data: Hash of a block.
@ -223,13 +223,13 @@ proc installEthApiHandlers*(
let
blockHash = data.toHash()
(header, body) = (await historyNetwork.getBlock(blockHash)).valueOr:
return none(BlockObject)
return Opt.none(BlockObject)
return some(BlockObject.init(header, body, fullTransactions))
return Opt.some(BlockObject.init(header, body, fullTransactions))
rpcServerWithProxy.rpc("eth_getBlockByNumber") do(
quantityTag: RtBlockIdentifier, fullTransactions: bool
) -> Option[BlockObject]:
) -> Opt[BlockObject]:
if quantityTag.kind == bidAlias:
let tag = quantityTag.alias.toLowerAscii
case tag
@ -250,9 +250,9 @@ proc installEthApiHandlers*(
let
blockHash = forkyStore.optimistic_header.execution.block_hash
(header, body) = (await historyNetwork.getBlock(blockHash)).valueOr:
return none(BlockObject)
return Opt.none(BlockObject)
return some(BlockObject.init(header, body, fullTransactions))
return Opt.some(BlockObject.init(header, body, fullTransactions))
else:
raise newException(ValueError, "Not available before Capella - not synced?")
of "finalized":
@ -264,9 +264,9 @@ proc installEthApiHandlers*(
let
blockHash = forkyStore.finalized_header.execution.block_hash
(header, body) = (await historyNetwork.getBlock(blockHash)).valueOr:
return none(BlockObject)
return Opt.none(BlockObject)
return some(BlockObject.init(header, body, fullTransactions))
return Opt.some(BlockObject.init(header, body, fullTransactions))
else:
raise newException(ValueError, "Not available before Capella - not synced?")
of "pending":
@ -275,15 +275,15 @@ proc installEthApiHandlers*(
raise newException(ValueError, "Unsupported block tag " & tag)
else:
let
blockNumber = quantityTag.number.uint64.toBlockNumber
blockNumber = quantityTag.number.uint64.u256
maybeBlock = (await historyNetwork.getBlock(blockNumber)).valueOr:
raise newException(ValueError, error)
if maybeBlock.isNone():
return none(BlockObject)
return Opt.none(BlockObject)
else:
let (header, body) = maybeBlock.get()
return some(BlockObject.init(header, body, fullTransactions))
return Opt.some(BlockObject.init(header, body, fullTransactions))
rpcServerWithProxy.rpc("eth_getBlockTransactionCountByHash") do(
data: eth_api_types.Hash256
@ -309,7 +309,7 @@ proc installEthApiHandlers*(
# from from the block with that block hash. The Canonical Indices Network
# would need to be implemented to get this information.
# rpcServerWithProxy.rpc("eth_getTransactionReceipt") do(
# data: EthHashStr) -> Option[ReceiptObject]:
# data: EthHashStr) -> Opt[ReceiptObject]:
rpcServerWithProxy.rpc("eth_getLogs") do(
filterOptions: FilterOptions

View File

@ -305,7 +305,7 @@ procSuite "Portal testnet tests":
doAssert(tx.kind == tohTx)
check tx.tx.blockHash.get == w3Hash hash
let filterOptions = FilterOptions(blockHash: some(w3Hash hash))
let filterOptions = FilterOptions(blockHash: Opt.some(w3Hash hash))
let logs = await retryUntil(
proc(): Future[seq[LogObject]] {.async.} =
@ -326,7 +326,7 @@ procSuite "Portal testnet tests":
for l in logs:
check:
l.blockHash == some(w3Hash hash)
l.blockHash == Opt.some(w3Hash hash)
# TODO: Check ommersHash, need the headers and not just the hashes
# for uncle in blockObj.uncles:

View File

@ -43,7 +43,7 @@ suite "Header Accumulator Root":
let res = v.readBlockHeader()
check res.isOk()
let header = res.get()
headers[header.blockNumber.truncate(int)] = header
headers[header.number] = header
var accumulator: Accumulator

View File

@ -48,10 +48,9 @@ suite "History Content Encodings":
# Go over all content keys and headers with generated proofs and compare
# them with the ones from the test vectors.
let
blockNumber = blockHeaders[i].blockNumber
contentKeyEncoded = content[blockNumber.toString()].content_key.hexToSeqByte()
contentValueEncoded =
content[blockNumber.toString()].content_value.hexToSeqByte()
blockNumber = blockHeaders[i].number
contentKeyEncoded = content[$blockNumber].content_key.hexToSeqByte()
contentValueEncoded = content[$blockNumber].content_value.hexToSeqByte()
check:
contentKeyEncoded == headerContentKey
@ -210,7 +209,7 @@ suite "History Content Encodings":
check contentKey.isOk()
# Decode (SSZ + RLP decode step) and validate receipts
let contentValue = validateReceiptsBytes(contentValueEncoded, header.receiptRoot)
let contentValue = validateReceiptsBytes(contentValueEncoded, header.receiptsRoot)
check contentValue.isOk()
# Encode content

View File

@ -46,7 +46,7 @@ suite "History Network Content Validation":
blockBody = validateBlockBodyBytes(blockBodyBytes, blockHeader).expect(
"Should be Valid decoded block body"
)
receipts = validateReceiptsBytes(receiptsBytes, blockHeader.receiptRoot).expect(
receipts = validateReceiptsBytes(receiptsBytes, blockHeader.receiptsRoot).expect(
"Should be Valid decoded receipts"
)
@ -98,16 +98,16 @@ suite "History Network Content Validation":
check validateBlockBodyBytes(modifiedBodyBytes, blockHeader).isErr()
test "Valid Receipts":
check validateReceiptsBytes(receiptsBytes, blockHeader.receiptRoot).isOk()
check validateReceiptsBytes(receiptsBytes, blockHeader.receiptsRoot).isOk()
test "Malformed Receipts":
let malformedBytes = receiptsBytes[10 .. receiptsBytes.high]
check validateReceiptsBytes(malformedBytes, blockHeader.receiptRoot).isErr()
check validateReceiptsBytes(malformedBytes, blockHeader.receiptsRoot).isErr()
test "Invalid Receipts - Modified Receipts List":
var modifiedReceipts = receipts[1 .. receipts.high]
let modifiedReceiptsBytes = encode(modifiedReceipts)
check validateReceiptsBytes(modifiedReceiptsBytes, blockHeader.receiptRoot).isErr()
check validateReceiptsBytes(modifiedReceiptsBytes, blockHeader.receiptsRoot).isErr()

View File

@ -41,7 +41,7 @@ suite "Header Accumulator":
# Note: These test headers will not be a blockchain, as the parent hashes
# are not properly filled in. That's fine however for this test, as that
# is not the way the headers are verified with the accumulator.
headers.add(BlockHeader(blockNumber: i.stuint(256), difficulty: 1.stuint(256)))
headers.add(BlockHeader(number: i, difficulty: 1.stuint(256)))
let accumulatorRes = buildAccumulatorData(headers)
check accumulatorRes.isOk()
@ -58,7 +58,7 @@ suite "Header Accumulator":
block: # Test invalid headers
# Post merge block number must fail (> than latest header in accumulator)
var proof: AccumulatorProof
let header = BlockHeader(blockNumber: mergeBlockNumber.stuint(256))
let header = BlockHeader(number: mergeBlockNumber)
check verifyAccumulatorProof(accumulator, header, proof).isErr()
# Test altered block headers by altering the difficulty
@ -67,7 +67,7 @@ suite "Header Accumulator":
check:
proof.isOk()
# Alter the block header so the proof no longer matches
let header = BlockHeader(blockNumber: i.stuint(256), difficulty: 2.stuint(256))
let header = BlockHeader(number: i.uint64, difficulty: 2.stuint(256))
check verifyAccumulatorProof(accumulator, header, proof.get()).isErr()
@ -83,7 +83,7 @@ suite "Header Accumulator":
var headers: seq[BlockHeader]
for i in 0 ..< amount:
headers.add(BlockHeader(blockNumber: i.stuint(256), difficulty: 1.stuint(256)))
headers.add(BlockHeader(number: i, difficulty: 1.stuint(256)))
let accumulatorRes = buildAccumulator(headers)
@ -98,7 +98,7 @@ suite "Header Accumulator":
headers: seq[BlockHeader]
for i in 0 ..< amount:
let header = BlockHeader(blockNumber: u256(i), difficulty: u256(1))
let header = BlockHeader(number: i, difficulty: u256(1))
headers.add(header)
headerHashes.add(header.blockHash())

View File

@ -57,7 +57,7 @@ func buildAccumulator*(headers: seq[BlockHeader]): Result[FinishedAccumulator, s
for header in headers:
updateAccumulator(accumulator, header)
if header.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
if header.number == mergeBlockNumber - 1:
return ok(finishAccumulator(accumulator))
err("Not enough headers provided to finish the accumulator")
@ -73,7 +73,7 @@ func buildAccumulatorData*(
if accumulator.currentEpoch.len() == epochSize:
epochAccumulators.add(accumulator.currentEpoch)
if header.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
if header.number == mergeBlockNumber - 1:
epochAccumulators.add(accumulator.currentEpoch)
return ok((finishAccumulator(accumulator), epochAccumulators))

View File

@ -52,7 +52,7 @@ proc createEmptyHeaders(fromNum: int, toNum: int): seq[BlockHeader] =
var headers: seq[BlockHeader]
for i in fromNum .. toNum:
var bh = BlockHeader()
bh.blockNumber = u256(i)
bh.number = BlockNumber(i)
bh.difficulty = u256(i)
# empty so that we won't care about creating fake block bodies
bh.ommersHash = EMPTY_UNCLE_HASH

View File

@ -114,7 +114,6 @@ proc asPortalBlockData*(
): (common_types.BlockHash, BlockHeaderWithProof, PortalBlockBodyLegacy) =
let
txRoot = calculateTransactionData(payload.transactions)
withdrawalsRoot = options.none(Hash256)
header = etypes.BlockHeader(
parentHash: payload.parentHash.asEthHash,
@ -122,20 +121,20 @@ proc asPortalBlockData*(
coinbase: EthAddress payload.feeRecipient,
stateRoot: payload.stateRoot.asEthHash,
txRoot: txRoot,
receiptRoot: payload.receiptsRoot.asEthHash,
bloom: distinctBase(payload.logsBloom),
receiptsRoot: payload.receiptsRoot.asEthHash,
logsBloom: distinctBase(payload.logsBloom),
difficulty: default(DifficultyInt),
blockNumber: payload.blockNumber.distinctBase.u256,
number: payload.blockNumber.distinctBase,
gasLimit: payload.gasLimit.unsafeQuantityToInt64,
gasUsed: payload.gasUsed.unsafeQuantityToInt64,
timestamp: payload.timestamp.EthTime,
extraData: bytes payload.extraData,
mixDigest: payload.prevRandao.asEthHash,
mixHash: payload.prevRandao.asEthHash,
nonce: default(BlockNonce),
fee: some(payload.baseFeePerGas),
withdrawalsRoot: withdrawalsRoot,
blobGasUsed: options.none(uint64),
excessBlobGas: options.none(uint64),
baseFeePerGas: Opt.some(payload.baseFeePerGas),
withdrawalsRoot: Opt.none(Hash256),
blobGasUsed: Opt.none(uint64),
excessBlobGas: Opt.none(uint64),
)
headerWithProof = BlockHeaderWithProof(
@ -158,7 +157,7 @@ proc asPortalBlockData*(
): (common_types.BlockHash, BlockHeaderWithProof, PortalBlockBodyShanghai) =
let
txRoot = calculateTransactionData(payload.transactions)
withdrawalsRoot = some(calculateWithdrawalsRoot(payload.withdrawals))
withdrawalsRoot = Opt.some(calculateWithdrawalsRoot(payload.withdrawals))
# TODO: adjust blobGasUsed & excessBlobGas according to deneb fork!
header = etypes.BlockHeader(
@ -167,20 +166,20 @@ proc asPortalBlockData*(
coinbase: EthAddress payload.feeRecipient,
stateRoot: payload.stateRoot.asEthHash,
txRoot: txRoot,
receiptRoot: payload.receiptsRoot.asEthHash,
bloom: distinctBase(payload.logsBloom),
receiptsRoot: payload.receiptsRoot.asEthHash,
logsBloom: distinctBase(payload.logsBloom),
difficulty: default(DifficultyInt),
blockNumber: payload.blockNumber.distinctBase.u256,
number: payload.blockNumber.distinctBase,
gasLimit: payload.gasLimit.unsafeQuantityToInt64,
gasUsed: payload.gasUsed.unsafeQuantityToInt64,
timestamp: payload.timestamp.EthTime,
extraData: bytes payload.extraData,
mixDigest: payload.prevRandao.asEthHash,
mixHash: payload.prevRandao.asEthHash,
nonce: default(BlockNonce),
fee: some(payload.baseFeePerGas),
baseFeePerGas: Opt.some(payload.baseFeePerGas),
withdrawalsRoot: withdrawalsRoot,
blobGasUsed: options.none(uint64),
excessBlobGas: options.none(uint64),
blobGasUsed: Opt.none(uint64),
excessBlobGas: Opt.none(uint64),
)
headerWithProof = BlockHeaderWithProof(

View File

@ -57,24 +57,22 @@ import
# Need to be selective due to the `Block` type conflict from downloader
from ../network/history/history_network import encode
from ../../nimbus/utils/utils import calcTxRoot, calcReceiptRoot
from ../../nimbus/utils/utils import calcTxRoot, calcreceiptsRoot
chronicles.formatIt(IoErrorCode):
$it
proc downloadHeader(client: RpcClient, i: uint64): BlockHeader =
let blockNumber = u256(i)
try:
let jsonHeader = requestHeader(blockNumber, some(client))
let jsonHeader = requestHeader(i, some(client))
parseBlockHeader(jsonHeader)
except CatchableError as e:
fatal "Error while requesting BlockHeader", error = e.msg, number = i
quit 1
proc downloadBlock(i: uint64, client: RpcClient): Block =
let num = u256(i)
try:
return requestBlock(num, flags = {DownloadReceipts}, client = some(client))
return requestBlock(i, flags = {DownloadReceipts}, client = some(client))
except CatchableError as e:
fatal "Error while requesting Block", error = e.msg, number = i
quit 1
@ -248,9 +246,7 @@ proc cmdExportEra1(config: ExporterConf) =
# TODO: Not sure about the errors that can occur here. But the whole
# block requests over json-rpc should be reworked here (and can be
# used in the bridge also then)
requestBlock(
blockNumber.u256, flags = {DownloadReceipts}, client = some(client)
)
requestBlock(blockNumber, flags = {DownloadReceipts}, client = some(client))
except CatchableError as e:
error "Failed retrieving block, skip creation of era1 file",
blockNumber, era, error = e.msg
@ -406,7 +402,7 @@ when isMainModule:
headerHash = to0xHex(rlpHash(blockHeader).data)
debug "Header decoded successfully",
hash = headerHash, blockNumber = blockHeader.blockNumber
hash = headerHash, blockNumber = blockHeader.number
else:
warn "Skipping record, not a block header", typ = toHex(header.typ)
@ -464,10 +460,10 @@ when isMainModule:
return err("Invalid block header in " & file & ": " & e.msg)
# Quick sanity check
if blockHeader.blockNumber.truncate(uint64) != i * epochSize + count:
if blockHeader.number != i * epochSize + count:
fatal "Incorrect block headers in file",
file = file,
blockNumber = blockHeader.blockNumber,
blockNumber = blockHeader.number,
expectedBlockNumber = i * epochSize + count
quit 1
@ -478,7 +474,7 @@ when isMainModule:
# a header for the next epoch (or on finishing the epoch).
if writeEpochAccumulators:
if accumulator.currentEpoch.len() == epochSize or
blockHeader.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
blockHeader.number == mergeBlockNumber - 1:
let file =
try:
dataDir / &"mainnet-epoch-accumulator-{i.uint64:05}.ssz"
@ -495,7 +491,7 @@ when isMainModule:
info "Updated an epoch", epoch = i
count.inc()
if blockHeader.blockNumber.truncate(uint64) == mergeBlockNumber - 1:
if blockHeader.number == mergeBlockNumber - 1:
let finishedAccumulator = finishAccumulator(accumulator)
info "Updated last epoch, finished building master accumulator",
epoch = i

View File

@ -61,7 +61,7 @@ func asPortalBlock(
(headerWithProof, portalBody)
func asTxType(quantity: Option[Quantity]): Result[TxType, string] =
func asTxType(quantity: Opt[Quantity]): Result[TxType, string] =
let value = quantity.get(0.Quantity).uint8
var txType: TxType
if not checkedEnumAssign(txType, value):
@ -91,7 +91,7 @@ func asReceipt(receiptObject: ReceiptObject): Result[Receipt, string] =
isHash: false,
status: status == 1,
cumulativeGasUsed: cumulativeGasUsed,
bloom: BloomFilter(receiptObject.logsBloom),
logsBloom: BloomFilter(receiptObject.logsBloom),
logs: logs,
)
)
@ -102,7 +102,7 @@ func asReceipt(receiptObject: ReceiptObject): Result[Receipt, string] =
isHash: true,
hash: ethHash receiptObject.root.get(),
cumulativeGasUsed: cumulativeGasUsed,
bloom: BloomFilter(receiptObject.logsBloom),
logsBloom: BloomFilter(receiptObject.logsBloom),
logs: logs,
)
)
@ -260,7 +260,7 @@ proc runLatestLoop(
if validateBlockBody(body, ethBlock.header).isErr():
error "Block body is invalid"
continue
if validateReceipts(portalReceipts, ethBlock.header.receiptRoot).isErr():
if validateReceipts(portalReceipts, ethBlock.header.receiptsRoot).isErr():
error "Receipts root is invalid"
continue
@ -507,7 +507,7 @@ proc runBackfillLoopAuditMode(
error "Invalid hex for block receipts content", error = e.msg
break receiptsBlock
validateReceiptsBytes(content, header.receiptRoot).isOkOr:
validateReceiptsBytes(content, header.receiptsRoot).isOkOr:
error "Block receipts are invalid", error
break receiptsBlock

View File

@ -116,8 +116,12 @@ proc buildAcceptConnection(
t: ref Table[SKey, UtpSocket[NodeAddress]]
): AcceptConnectionCallback[NodeAddress] =
return (
proc(server: UtpRouter[NodeAddress], client: UtpSocket[NodeAddress]): Future[void] =
let fut = newFuture[void]()
proc(
server: UtpRouter[NodeAddress], client: UtpSocket[NodeAddress]
): Future[void] {.async: (raw: true, raises: []).} =
let fut = noCancel Future[void].Raising([CancelledError]).init(
"utp_test_app.AcceptConnectionCallback"
)
let key = client.socketKey.toSKey()
t[key] = client
fut.complete()

View File

@ -41,7 +41,7 @@ proc processChainData(cd: ChainData): TestStatus =
else:
trace "block hash not equal",
got=blockHash,
number=head.blockNumber,
number=head.number,
expected=cd.lastBlockHash
TestStatus.Failed

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -9,7 +9,6 @@
# according to those terms.
import
std/[options],
eth/common,
./clmock,
./types,
@ -31,7 +30,7 @@ proc configureCLMock*(s: BaseSpec, cl: CLMocker) =
if s.safeSlotsToImportOptimistically != 0:
cl.safeSlotsToImportOptimistically = s.safeSlotsToImportOptimistically
cl.blockTimestampIncrement = some(s.getBlockTimeIncrements())
cl.blockTimestampIncrement = Opt.some(s.getBlockTimeIncrements())
func getMainFork*(s: BaseSpec): EngineFork =
let mainFork = s.mainFork
@ -75,10 +74,10 @@ method getForkConfig*(s: BaseSpec): ChainConfig {.base.} =
# Cannot configure a fork before Shanghai
if previousForkTime != 0:
return nil
forkConfig.shanghaiTime = some(forkTime.EthTime)
forkConfig.shanghaiTime = Opt.some(forkTime.EthTime)
elif mainFork == ForkCancun:
forkConfig.shanghaiTime = some(previousForkTime.EthTime)
forkConfig.cancunTime = some(forkTime.EthTime)
forkConfig.shanghaiTime = Opt.some(previousForkTime.EthTime)
forkConfig.cancunTime = Opt.some(forkTime.EthTime)
else:
doAssert(false, "unknown fork: " & $mainFork)

View File

@ -29,7 +29,7 @@ method setEngineAPIVersionResolver*(cust: EngineAPIVersionResolver, v: CommonRef
cust.com = v
method forkchoiceUpdatedVersion*(cust: EngineAPIVersionResolver,
headTimestamp: uint64, payloadAttributesTimestamp: Option[uint64] = none(uint64)): Version {.base, gcsafe.} =
headTimestamp: uint64, payloadAttributesTimestamp: Opt[uint64] = Opt.none(uint64)): Version {.base, gcsafe.} =
let ts = if payloadAttributesTimestamp.isNone: headTimestamp.EthTime
else: payloadAttributesTimestamp.get().EthTime
if cust.com.isCancunOrLater(ts):
@ -69,7 +69,7 @@ method getExpectedError*(cust: GetPayloadCustomizer): int {.base, gcsafe.} =
type
BaseGetPayloadCustomizer* = ref object of GetPayloadCustomizer
customPayloadID*: Option[PayloadID]
customPayloadID*: Opt[PayloadID]
expectedError* : int
method getPayloadID(cust: BaseGetPayloadCustomizer,
@ -105,12 +105,12 @@ method getPayloadAttributes*(cust: PayloadAttributesCustomizer, basePayloadAttri
type
BasePayloadAttributesCustomizer* = ref object of PayloadAttributesCustomizer
timestamp* : Option[uint64]
prevRandao* : Option[common.Hash256]
suggestedFeeRecipient* : Option[common.EthAddress]
withdrawals* : Option[seq[Withdrawal]]
timestamp* : Opt[uint64]
prevRandao* : Opt[common.Hash256]
suggestedFeeRecipient* : Opt[common.EthAddress]
withdrawals* : Opt[seq[Withdrawal]]
removeWithdrawals* : bool
beaconRoot* : Option[common.Hash256]
beaconRoot* : Opt[common.Hash256]
removeBeaconRoot* : bool
method getPayloadAttributes(cust: BasePayloadAttributesCustomizer, basePayloadAttributes: PayloadAttributes): PayloadAttributes =
@ -132,12 +132,12 @@ method getPayloadAttributes(cust: BasePayloadAttributesCustomizer, basePayloadAt
customPayloadAttributes.suggestedFeeRecipient = w3Addr cust.suggestedFeeRecipient.get
if cust.removeWithdrawals:
customPayloadAttributes.withdrawals = none(seq[WithdrawalV1])
customPayloadAttributes.withdrawals = Opt.none(seq[WithdrawalV1])
elif cust.withdrawals.isSome:
customPayloadAttributes.withdrawals = w3Withdrawals cust.withdrawals
if cust.removeBeaconRoot:
customPayloadAttributes.parentBeaconBlockRoot = none(Web3Hash)
customPayloadAttributes.parentBeaconBlockRoot = Opt.none(Web3Hash)
elif cust.beaconRoot.isSome:
customPayloadAttributes.parentBeaconBlockRoot = w3Hash cust.beaconRoot
@ -174,7 +174,7 @@ type
UpgradeForkchoiceUpdatedVersion* = ref object of BaseForkchoiceUpdatedCustomizer
method forkchoiceUpdatedVersion(cust: UpgradeForkchoiceUpdatedVersion, headTimestamp:
uint64, payloadAttributesTimestamp: Option[uint64] = none(uint64)): Version =
uint64, payloadAttributesTimestamp: Opt[uint64] = Opt.none(uint64)): Version =
let version = procCall forkchoiceUpdatedVersion(EngineAPIVersionResolver(cust), headTimestamp, payloadAttributesTimestamp)
doAssert(version != Version.high, "cannot upgrade version " & $Version.high)
version.succ
@ -184,7 +184,7 @@ type
DowngradeForkchoiceUpdatedVersion* = ref object of BaseForkchoiceUpdatedCustomizer
method forkchoiceUpdatedVersion(cust: DowngradeForkchoiceUpdatedVersion, headTimestamp: uint64,
payloadAttributesTimestamp: Option[uint64] = none(uint64)): Version =
payloadAttributesTimestamp: Opt[uint64] = Opt.none(uint64)): Version =
let version = procCall forkchoiceUpdatedVersion(EngineAPIVersionResolver(cust), headTimestamp, payloadAttributesTimestamp)
doAssert(version != Version.V1, "cannot downgrade version 1")
version.pred
@ -200,13 +200,13 @@ method getPayloadAttributes(cust: TimestampDeltaPayloadAttributesCustomizer, bas
type
VersionedHashesCustomizer* = ref object of RootRef
blobs*: Option[seq[BlobID]]
blobs*: Opt[seq[BlobID]]
hashVersions*: seq[byte]
method getVersionedHashes*(cust: VersionedHashesCustomizer,
baseVersionedHashes: openArray[common.Hash256]): Option[seq[common.Hash256]] {.base, gcsafe.} =
baseVersionedHashes: openArray[common.Hash256]): Opt[seq[common.Hash256]] {.base, gcsafe.} =
if cust.blobs.isNone:
return none(seq[common.Hash256])
return Opt.none(seq[common.Hash256])
let blobs = cust.blobs.get
var v = newSeq[common.Hash256](blobs.len)
@ -216,7 +216,7 @@ method getVersionedHashes*(cust: VersionedHashesCustomizer,
if cust.hashVersions.len > i:
version = cust.hashVersions[i]
v[i] = blobID.getVersionedHash(version)
some(v)
Opt.some(v)
method description*(cust: VersionedHashesCustomizer): string {.base, gcsafe.} =
result = "VersionedHashes: "
@ -232,33 +232,33 @@ type
IncreaseVersionVersionedHashes* = ref object of VersionedHashesCustomizer
method getVersionedHashes(cust: IncreaseVersionVersionedHashes,
baseVersionedHashes: openArray[common.Hash256]): Option[seq[common.Hash256]] =
baseVersionedHashes: openArray[common.Hash256]): Opt[seq[common.Hash256]] =
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
var v = newSeq[common.Hash256](baseVersionedHashes.len)
for i, h in baseVersionedHashes:
v[i] = h
v[i].data[0] = v[i].data[0] + 1
some(v)
Opt.some(v)
type
CorruptVersionedHashes* = ref object of VersionedHashesCustomizer
method getVersionedHashes(cust: CorruptVersionedHashes,
baseVersionedHashes: openArray[common.Hash256]): Option[seq[common.Hash256]] =
baseVersionedHashes: openArray[common.Hash256]): Opt[seq[common.Hash256]] =
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
var v = newSeq[common.Hash256](baseVersionedHashes.len)
for i, h in baseVersionedHashes:
v[i] = h
v[i].data[h.data.len-1] = v[i].data[h.data.len-1] + 1
some(v)
Opt.some(v)
type
RemoveVersionedHash* = ref object of VersionedHashesCustomizer
method getVersionedHashes(cust: RemoveVersionedHash,
baseVersionedHashes: openArray[common.Hash256]): Option[seq[common.Hash256]] =
baseVersionedHashes: openArray[common.Hash256]): Opt[seq[common.Hash256]] =
doAssert(baseVersionedHashes.len > 0, "no versioned hashes available for modification")
var v = newSeq[common.Hash256](baseVersionedHashes.len - 1)
@ -266,13 +266,13 @@ method getVersionedHashes(cust: RemoveVersionedHash,
if i < baseVersionedHashes.len-1:
v[i] = h
v[i].data[h.data.len-1] = v[i].data[h.data.len-1] + 1
some(v)
Opt.some(v)
type
ExtraVersionedHash* = ref object of VersionedHashesCustomizer
method getVersionedHashes(cust: ExtraVersionedHash,
baseVersionedHashes: openArray[common.Hash256]): Option[seq[common.Hash256]] =
baseVersionedHashes: openArray[common.Hash256]): Opt[seq[common.Hash256]] =
var v = newSeq[common.Hash256](baseVersionedHashes.len + 1)
for i, h in baseVersionedHashes:
v[i] = h
@ -280,7 +280,7 @@ method getVersionedHashes(cust: ExtraVersionedHash,
var extraHash = common.Hash256.randomBytes()
extraHash.data[0] = VERSIONED_HASH_VERSION_KZG
v[^1] = extraHash
some(v)
Opt.some(v)
type
PayloadCustomizer* = ref object of EngineAPIVersionResolver
@ -304,27 +304,27 @@ method getExpectInvalidStatus*(cust: NewPayloadCustomizer): bool {.base, gcsafe.
type
CustomPayloadData* = object
parentHash* : Option[common.Hash256]
feeRecipient* : Option[common.EthAddress]
stateRoot* : Option[common.Hash256]
receiptsRoot* : Option[common.Hash256]
logsBloom* : Option[BloomFilter]
prevRandao* : Option[common.Hash256]
number* : Option[uint64]
gasLimit* : Option[GasInt]
gasUsed* : Option[GasInt]
timestamp* : Option[uint64]
extraData* : Option[common.Blob]
baseFeePerGas* : Option[UInt256]
blockHash* : Option[common.Hash256]
transactions* : Option[seq[Transaction]]
withdrawals* : Option[seq[Withdrawal]]
parentHash* : Opt[common.Hash256]
feeRecipient* : Opt[common.EthAddress]
stateRoot* : Opt[common.Hash256]
receiptsRoot* : Opt[common.Hash256]
logsBloom* : Opt[BloomFilter]
prevRandao* : Opt[common.Hash256]
number* : Opt[uint64]
gasLimit* : Opt[GasInt]
gasUsed* : Opt[GasInt]
timestamp* : Opt[uint64]
extraData* : Opt[common.Blob]
baseFeePerGas* : Opt[UInt256]
blockHash* : Opt[common.Hash256]
transactions* : Opt[seq[Transaction]]
withdrawals* : Opt[seq[Withdrawal]]
removeWithdrawals* : bool
blobGasUsed* : Option[uint64]
blobGasUsed* : Opt[uint64]
removeBlobGasUsed* : bool
excessBlobGas* : Option[uint64]
excessBlobGas* : Opt[uint64]
removeExcessBlobGas* : bool
parentBeaconRoot* : Option[common.Hash256]
parentBeaconRoot* : Opt[common.Hash256]
removeParentBeaconRoot* : bool
versionedHashesCustomizer*: VersionedHashesCustomizer
@ -351,16 +351,16 @@ proc customizePayload*(cust: CustomPayloadData, data: ExecutableData): Executabl
customHeader.stateRoot = cust.stateRoot.get
if cust.receiptsRoot.isSome:
customHeader.receiptRoot = cust.receiptsRoot.get
customHeader.receiptsRoot = cust.receiptsRoot.get
if cust.logsBloom.isSome:
customHeader.bloom = cust.logsBloom.get
customHeader.logsBloom = cust.logsBloom.get
if cust.prevRandao.isSome:
customHeader.mixDigest = cust.prevRandao.get
customHeader.mixHash = cust.prevRandao.get
if cust.number.isSome:
customHeader.blockNumber = cust.number.get.u256
customHeader.number = cust.number.get
if cust.gasLimit.isSome:
customHeader.gasLimit = cust.gasLimit.get
@ -375,26 +375,26 @@ proc customizePayload*(cust: CustomPayloadData, data: ExecutableData): Executabl
customHeader.extraData = cust.extraData.get
if cust.baseFeePerGas.isSome:
customHeader.fee = cust.baseFeePerGas
customHeader.baseFeePerGas = cust.baseFeePerGas
if cust.removeWithdrawals:
customHeader.withdrawalsRoot = none(common.Hash256)
customHeader.withdrawalsRoot = Opt.none(common.Hash256)
elif cust.withdrawals.isSome:
let h = calcWithdrawalsRoot(cust.withdrawals.get)
customHeader.withdrawalsRoot = some(h)
customHeader.withdrawalsRoot = Opt.some(h)
if cust.removeBlobGasUsed:
customHeader.blobGasUsed = none(uint64)
customHeader.blobGasUsed = Opt.none(uint64)
elif cust.blobGasUsed.isSome:
customHeader.blobGasUsed = cust.blobGasUsed
if cust.removeExcessBlobGas:
customHeader.excessBlobGas = none(uint64)
customHeader.excessBlobGas = Opt.none(uint64)
elif cust.excessBlobGas.isSome:
customHeader.excessBlobGas = cust.excessBlobGas
if cust.removeParentBeaconRoot:
customHeader.parentBeaconBlockRoot = none(common.Hash256)
customHeader.parentBeaconBlockRoot = Opt.none(common.Hash256)
elif cust.parentBeaconRoot.isSome:
customHeader.parentBeaconBlockRoot = cust.parentBeaconRoot
@ -408,7 +408,7 @@ proc customizePayload*(cust: CustomPayloadData, data: ExecutableData): Executabl
)
if cust.removeWithdrawals:
blk.withdrawals = none(seq[Withdrawal])
blk.withdrawals = Opt.none(seq[Withdrawal])
elif cust.withdrawals.isSome:
blk.withdrawals = cust.withdrawals
elif data.basePayload.withdrawals.isSome:
@ -454,7 +454,7 @@ method newPayloadVersion(cust: DowngradeNewPayloadVersion, timestamp: uint64): V
proc customizePayloadTransactions*(data: ExecutableData, customTransactions: openArray[Transaction]): ExecutableData =
let cpd = CustomPayloadData(
transactions: some(@customTransactions),
transactions: Opt.some(@customTransactions),
)
customizePayload(cpd, data)
@ -533,15 +533,15 @@ type
ExtraVersionedHashes
InvalidWithdrawals
func scramble(data: Web3Hash): Option[common.Hash256] =
func scramble(data: Web3Hash): Opt[common.Hash256] =
var h = ethHash data
h.data[^1] = byte(255 - h.data[^1])
some(h)
Opt.some(h)
func scramble(data: common.Hash256): Option[common.Hash256] =
func scramble(data: common.Hash256): Opt[common.Hash256] =
var h = data
h.data[0] = byte(255 - h.data[0])
some(h)
Opt.some(h)
# This function generates an invalid payload by taking a base payload and modifying the specified field such that it ends up being invalid.
# One small consideration is that the payload needs to contain transactions and specially transactions using the PREVRANDAO opcode for all the fields to be compatible with this function.
@ -565,29 +565,29 @@ proc generateInvalidPayload*(sender: TxSender, data: ExecutableData, payloadFiel
of InvalidNumber:
let modNumber = basePayload.blockNumber.uint64 - 1
customPayloadMod = CustomPayloadData(
number: some(modNumber),
number: Opt.some(modNumber),
)
of InvalidGasLimit:
let modGasLimit = basePayload.gasLimit.GasInt * 2
customPayloadMod = CustomPayloadData(
gasLimit: some(modGasLimit),
gasLimit: Opt.some(modGasLimit),
)
of InvalidGasUsed:
let modGasUsed = basePayload.gasUsed.GasInt - 1
customPayloadMod = CustomPayloadData(
gasUsed: some(modGasUsed),
gasUsed: Opt.some(modGasUsed),
)
of InvalidTimestamp:
let modTimestamp = basePayload.timestamp.uint64 - 1
customPayloadMod = CustomPayloadData(
timestamp: some(modTimestamp),
timestamp: Opt.some(modTimestamp),
)
of InvalidPrevRandao:
# This option potentially requires a transaction that uses the PREVRANDAO opcode.
# Otherwise the payload will still be valid.
let randomHash = common.Hash256.randomBytes()
customPayloadMod = CustomPayloadData(
prevRandao: some(randomHash),
prevRandao: Opt.some(randomHash),
)
of InvalidParentBeaconBlockRoot:
doAssert(data.beaconRoot.isSome,
@ -599,19 +599,19 @@ proc generateInvalidPayload*(sender: TxSender, data: ExecutableData, payloadFiel
doAssert(basePayload.blobGasUsed.isSome, "no blob gas used available for modification")
let modBlobGasUsed = basePayload.blobGasUsed.get.uint64 + 1
customPayloadMod = CustomPayloadData(
blobGasUsed: some(modBlobGasUsed),
blobGasUsed: Opt.some(modBlobGasUsed),
)
of InvalidBlobCountGasUsed:
doAssert(basePayload.blobGasUsed.isSome, "no blob gas used available for modification")
let modBlobGasUsed = basePayload.blobGasUsed.get.uint64 + GAS_PER_BLOB
customPayloadMod = CustomPayloadData(
blobGasUsed: some(modBlobGasUsed),
blobGasUsed: Opt.some(modBlobGasUsed),
)
of InvalidExcessBlobGas:
doAssert(basePayload.excessBlobGas.isSome, "no excess blob gas available for modification")
let modExcessBlobGas = basePayload.excessBlobGas.get.uint64 + 1
customPayloadMod = CustomPayloadData(
excessBlobGas: some(modExcessBlobGas),
excessBlobGas: Opt.some(modExcessBlobGas),
)
of InvalidVersionedHashesVersion:
doAssert(data.versionedHashes.isSome, "no versioned hashes available for modification")
@ -640,7 +640,7 @@ proc generateInvalidPayload*(sender: TxSender, data: ExecutableData, payloadFiel
of RemoveTransaction:
let emptyTxs = newSeq[Transaction]()
customPayloadMod = CustomPayloadData(
transactions: some(emptyTxs),
transactions: Opt.some(emptyTxs),
)
of InvalidTransactionSignature,
InvalidTransactionNonce,
@ -657,26 +657,26 @@ proc generateInvalidPayload*(sender: TxSender, data: ExecutableData, payloadFiel
case payloadField
of InvalidTransactionSignature:
var sig = CustSig(R: baseTx.R - 1.u256)
custTx.signature = some(sig)
custTx.signature = Opt.some(sig)
of InvalidTransactionNonce:
custTx.nonce = some(baseTx.nonce - 1)
custTx.nonce = Opt.some(baseTx.nonce - 1)
of InvalidTransactionGas:
custTx.gas = some(0.GasInt)
custTx.gas = Opt.some(0.GasInt)
of InvalidTransactionGasPrice:
custTx.gasPriceOrGasFeeCap = some(0.GasInt)
custTx.gasPriceOrGasFeeCap = Opt.some(0.GasInt)
of InvalidTransactionGasTipPrice:
custTx.gasTipCap = some(gasTipPrice.GasInt * 2.GasInt)
custTx.gasTipCap = Opt.some(gasTipPrice.GasInt * 2.GasInt)
of InvalidTransactionValue:
# Vault account initially has 0x123450000000000000000, so this value should overflow
custTx.value = some(UInt256.fromHex("0x123450000000000000001"))
custTx.value = Opt.some(UInt256.fromHex("0x123450000000000000001"))
of InvalidTransactionChainID:
custTx.chainId = some(ChainId(baseTx.chainId.uint64 + 1))
custTx.chainId = Opt.some(ChainId(baseTx.chainId.uint64 + 1))
else: discard
let acc = sender.getNextAccount()
let modifiedTx = sender.customizeTransaction(acc, baseTx, custTx)
customPayloadMod = CustomPayloadData(
transactions: some(@[modifiedTx]),
transactions: Opt.some(@[modifiedTx]),
)
customPayloadMod.customizePayload(data)

View File

@ -14,7 +14,7 @@ import
eth/[common, rlp],
eth/common/eth_types_rlp,
chronicles,
stew/[results, byteutils],
stew/byteutils,
kzg4844/kzg_ex as kzg,
../types,
../engine_client,
@ -112,11 +112,11 @@ proc verifyTransactionFromNode*(client: RpcClient, tx: Transaction): Result[void
if returnedTx.chainId.get.uint64 != tx.chainId.uint64:
return err("chain id mismatch: $1 != $2" % [$returnedTx.chainId.get.uint64, $tx.chainId.uint64])
if returnedTx.maxFeePerGas != tx.maxFee:
return err("max fee per gas mismatch: $1 != $2" % [$returnedTx.maxFeePerGas, $tx.maxFee])
if returnedTx.maxFeePerGas != tx.maxFeePerGas:
return err("max fee per gas mismatch: $1 != $2" % [$returnedTx.maxFeePerGas, $tx.maxFeePerGas])
if returnedTx.maxPriorityFeePerGas != tx.maxPriorityFee:
return err("max priority fee per gas mismatch: $1 != $2" % [$returnedTx.maxPriorityFeePerGas, $tx.maxPriorityFee])
if returnedTx.maxPriorityFeePerGas != tx.maxPriorityFeePerGas:
return err("max priority fee per gas mismatch: $1 != $2" % [$returnedTx.maxPriorityFeePerGas, $tx.maxPriorityFeePerGas])
if returnedTx.maxFeePerBlobGas.isNone:
return err("expect maxFeePerBlobGas is some")
@ -198,7 +198,7 @@ proc verifyBeaconRootStorage*(client: RpcClient, payload: ExecutionPayload): boo
# Read the storage keys from the stateful precompile that stores the beacon roots and verify
# that the beacon root is the same as the one in the payload
let
blockNumber = u256 payload.blockNumber
blockNumber = payload.blockNumber.uint64
precompileAddress = BEACON_ROOTS_ADDRESS
(timestampKey, beaconRootKey) = beaconRootStorageIndexes(payload.timestamp.uint64)
@ -210,7 +210,7 @@ proc verifyBeaconRootStorage*(client: RpcClient, payload: ExecutionPayload): boo
if r.get.u256 != payload.timestamp.uint64.u256:
error "verifyBeaconRootStorage storage 1",
expect=payload.timestamp.uint64.u256,
expect=payload.timestamp.uint64,
get=r.get.u256
return false

View File

@ -52,9 +52,9 @@ proc verifyPayload(step: NewPayloads,
com: CommonRef,
client: RpcClient,
blobTxsInPayload: openArray[Transaction],
shouldOverrideBuilder: Option[bool],
shouldOverrideBuilder: Opt[bool],
payload: ExecutionPayload,
previousPayload = none(ExecutionPayload)): bool =
previousPayload = Opt.none(ExecutionPayload)): bool =
var
parentExcessBlobGas = 0'u64
@ -70,8 +70,8 @@ proc verifyPayload(step: NewPayloads,
let
parent = common.BlockHeader(
excessBlobGas: some(parentExcessBlobGas),
blobGasUsed: some(parentBlobGasUsed)
excessBlobGas: Opt.some(parentExcessBlobGas),
blobGasUsed: Opt.some(parentBlobGasUsed)
)
expectedExcessBlobGas = calcExcessBlobGas(parent)
@ -223,12 +223,12 @@ method execute*(step: NewPayloads, ctx: CancunTestContext): bool =
timestamp = env.clMock.latestHeader.timestamp.uint64
payloadAttributes = step.fcUOnPayloadRequest.getPayloadAttributes(payloadAttributes)
let version = step.fcUOnPayloadRequest.forkchoiceUpdatedVersion(timestamp, some(payloadAttributes.timestamp.uint64))
let version = step.fcUOnPayloadRequest.forkchoiceUpdatedVersion(timestamp, Opt.some(payloadAttributes.timestamp.uint64))
if step.fcUOnPayloadRequest.getExpectInvalidStatus():
expectedStatus = PayloadExecutionStatus.invalid
let r = env.engine.client.forkchoiceUpdated(version, forkchoiceState, some(payloadAttributes))
let r = env.engine.client.forkchoiceUpdated(version, forkchoiceState, Opt.some(payloadAttributes))
if expectedError != 0:
r.expectErrorCode(expectedError, step.expectationDescription)
else:
@ -353,7 +353,7 @@ method execute*(step: NewPayloads, ctx: CancunTestContext): bool =
let blobData = res.get
if not step.verifyPayload(env.engine.com, env.engine.client,
blobData.txs, env.clMock.latestShouldOverrideBuilder,
payload, some(shadow.prevPayload)):
payload, Opt.some(shadow.prevPayload)):
fatal "Error verifying payload", payload=shadow.p+1, count=shadow.payloadCount
return false

View File

@ -60,7 +60,7 @@ method execute*(step: SendBlobTransactions, ctx: CancunTestContext): bool =
# Send the blob transactions
for _ in 0..<step.transactionCount:
let tc = BlobTx(
recipient: some(DATAHASH_START_ADDRESS),
recipient: Opt.some(DATAHASH_START_ADDRESS),
gasLimit: 100000.GasInt,
gasTip: step.blobTransactionGasTipCap,
gasFee: step.blobTransactionGasFeeCap,

View File

@ -527,7 +527,7 @@ let cancunTestListA* = [
testSequence: @[
NewPayloads(
fcUOnPayloadRequest: UpgradeForkchoiceUpdatedVersion(
beaconRoot: some(common.Hash256()),
beaconRoot: Opt.some(common.Hash256()),
expectedError: engineApiUnsupportedFork,
),
expectationDescription: """
@ -555,7 +555,7 @@ let cancunTestListA* = [
testSequence: @[
NewPayloads(
fcUOnPayloadRequest: BaseForkchoiceUpdatedCustomizer(
beaconRoot: some(common.Hash256()),
beaconRoot: Opt.some(common.Hash256()),
expectedError: engineApiInvalidPayloadAttributes,
),
expectationDescription: """
@ -583,7 +583,7 @@ let cancunTestListA* = [
testSequence: @[
NewPayloads(
fcUOnPayloadRequest: DowngradeForkchoiceUpdatedVersion(
beaconRoot: some(common.Hash256()),
beaconRoot: Opt.some(common.Hash256()),
expectedError: engineApiInvalidPayloadAttributes,
),
expectationDescription: """
@ -641,7 +641,7 @@ let cancunTestListA* = [
NewPayloads(
expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK,
fcUOnPayloadRequest: BaseForkchoiceUpdatedCustomizer(
beaconRoot: some(common.Hash256()),
beaconRoot: Opt.some(common.Hash256()),
),
),
SendBlobTransactions(
@ -652,7 +652,7 @@ let cancunTestListA* = [
NewPayloads(
expectedIncludedBlobCount: MAX_BLOBS_PER_BLOCK,
fcUOnPayloadRequest: BaseForkchoiceUpdatedCustomizer(
beaconRoot: some(toHash(1.u256)),
beaconRoot: Opt.some(toHash(1.u256)),
),
),
]
@ -756,7 +756,7 @@ let cancunTestListA* = [
NewPayloads(
newPayloadCustomizer: UpgradeNewPayloadVersion(
payloadCustomizer: CustomPayloadData(
blobGasUsed: some(0'u64),
blobGasUsed: Opt.some(0'u64),
),
expectedError: engineApiInvalidParams,
),
@ -786,7 +786,7 @@ let cancunTestListA* = [
newPayloadCustomizer: UpgradeNewPayloadVersion(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(newSeq[BlobID]()),
blobs: Opt.some(newSeq[BlobID]()),
),
),
expectedError: engineApiInvalidParams,
@ -816,11 +816,11 @@ let cancunTestListA* = [
NewPayloads(
newPayloadCustomizer: UpgradeNewPayloadVersion(
payloadCustomizer: CustomPayloadData(
excessBlobGas: some(0'u64),
blobGasUsed: some(0'u64),
parentBeaconRoot: some(common.Hash256()),
excessBlobGas: Opt.some(0'u64),
blobGasUsed: Opt.some(0'u64),
parentBeaconRoot: Opt.some(common.Hash256()),
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(newSeq[BlobID]()),
blobs: Opt.some(newSeq[BlobID]()),
),
),
expectedError: engineApiUnsupportedFork,
@ -920,7 +920,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobList(0, TARGET_BLOBS_PER_BLOCK-1)),
blobs: Opt.some(getBlobList(0, TARGET_BLOBS_PER_BLOCK-1)),
),
),
expectInvalidStatus: true,
@ -955,7 +955,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobList(0, TARGET_BLOBS_PER_BLOCK+1)),
blobs: Opt.some(getBlobList(0, TARGET_BLOBS_PER_BLOCK+1)),
),
),
expectInvalidStatus: true,
@ -988,7 +988,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobListByIndex(BlobID(TARGET_BLOBS_PER_BLOCK-1), 0)),
blobs: Opt.some(getBlobListByIndex(BlobID(TARGET_BLOBS_PER_BLOCK-1), 0)),
),
),
expectInvalidStatus: true,
@ -1021,7 +1021,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobList(0, TARGET_BLOBS_PER_BLOCK, BlobID(TARGET_BLOBS_PER_BLOCK-1))),
blobs: Opt.some(getBlobList(0, TARGET_BLOBS_PER_BLOCK, BlobID(TARGET_BLOBS_PER_BLOCK-1))),
),
),
expectInvalidStatus: true,
@ -1054,7 +1054,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobList(0, TARGET_BLOBS_PER_BLOCK-1, BlobID(TARGET_BLOBS_PER_BLOCK))),
blobs: Opt.some(getBlobList(0, TARGET_BLOBS_PER_BLOCK-1, BlobID(TARGET_BLOBS_PER_BLOCK))),
),
),
expectInvalidStatus: true,
@ -1087,7 +1087,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobList(0, TARGET_BLOBS_PER_BLOCK)),
blobs: Opt.some(getBlobList(0, TARGET_BLOBS_PER_BLOCK)),
hashVersions: @[VERSIONED_HASH_VERSION_KZG.byte, (VERSIONED_HASH_VERSION_KZG + 1).byte],
),
),
@ -1121,7 +1121,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: none(seq[BlobID]),
blobs: Opt.none(seq[BlobID]),
),
),
expectedError: engineApiInvalidParams,
@ -1154,7 +1154,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(newSeq[BlobID]()),
blobs: Opt.some(newSeq[BlobID]()),
),
),
expectInvalidStatus: true,
@ -1182,7 +1182,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(@[BlobID(0)]),
blobs: Opt.some(@[BlobID(0)]),
),
),
expectInvalidStatus: true,
@ -1226,7 +1226,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobList(0, TARGET_BLOBS_PER_BLOCK-1)),
blobs: Opt.some(getBlobList(0, TARGET_BLOBS_PER_BLOCK-1)),
),
),
expectInvalidStatus: true,
@ -1268,7 +1268,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobList(0, TARGET_BLOBS_PER_BLOCK+1)),
blobs: Opt.some(getBlobList(0, TARGET_BLOBS_PER_BLOCK+1)),
),
),
expectInvalidStatus: true,
@ -1308,7 +1308,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobListByIndex(BlobID(TARGET_BLOBS_PER_BLOCK-1), 0)),
blobs: Opt.some(getBlobListByIndex(BlobID(TARGET_BLOBS_PER_BLOCK-1), 0)),
),
),
expectInvalidStatus: true,
@ -1348,7 +1348,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobList(0, TARGET_BLOBS_PER_BLOCK, BlobID(TARGET_BLOBS_PER_BLOCK-1))),
blobs: Opt.some(getBlobList(0, TARGET_BLOBS_PER_BLOCK, BlobID(TARGET_BLOBS_PER_BLOCK-1))),
),
),
expectInvalidStatus: true,
@ -1388,7 +1388,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobList(0, TARGET_BLOBS_PER_BLOCK-1, BlobID(TARGET_BLOBS_PER_BLOCK))),
blobs: Opt.some(getBlobList(0, TARGET_BLOBS_PER_BLOCK-1, BlobID(TARGET_BLOBS_PER_BLOCK))),
),
),
expectInvalidStatus: true,
@ -1428,7 +1428,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(getBlobList(0, TARGET_BLOBS_PER_BLOCK)),
blobs: Opt.some(getBlobList(0, TARGET_BLOBS_PER_BLOCK)),
hashVersions: @[VERSIONED_HASH_VERSION_KZG.byte, (VERSIONED_HASH_VERSION_KZG + 1).byte],
),
),
@ -1469,7 +1469,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: none(seq[BlobID]),
blobs: Opt.none(seq[BlobID]),
),
),
expectedError: engineApiInvalidParams,
@ -1509,7 +1509,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(newSeq[BlobID]()),
blobs: Opt.some(newSeq[BlobID]()),
),
),
expectInvalidStatus: true,
@ -1543,7 +1543,7 @@ let cancunTestListA* = [
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
versionedHashesCustomizer: VersionedHashesCustomizer(
blobs: some(@[BlobID(0)]),
blobs: Opt.some(@[BlobID(0)]),
),
),
expectInvalidStatus: true,
@ -1568,7 +1568,7 @@ let cancunTestListA* = [
NewPayloads(
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
blobGasUsed: some(1'u64),
blobGasUsed: Opt.some(1'u64),
),
expectInvalidStatus: true,
),
@ -1589,7 +1589,7 @@ let cancunTestListA* = [
NewPayloads(
newPayloadCustomizer: BaseNewPayloadVersionCustomizer(
payloadCustomizer: CustomPayloadData(
blobGasUsed: some(GAS_PER_BLOB.uint64),
blobGasUsed: Opt.some(GAS_PER_BLOB.uint64),
),
expectInvalidStatus: true,
),
@ -1905,7 +1905,7 @@ proc makeCancunTest(): seq[EngineSpec] =
result.add InvalidPayloadTestCase(
mainFork : ForkCancun,
txType : some(TxEIP4844),
txType : Opt.some(TxEIP4844),
invalidField : invalidField,
syncing : syncing,
invalidDetectedOnSync: invalidDetectedOnSync,
@ -1915,26 +1915,26 @@ proc makeCancunTest(): seq[EngineSpec] =
# Invalid Transaction ChainID Tests
result.add InvalidTxChainIDTest(
mainFork: ForkCancun,
txType : some(TxEIP4844),
txType : Opt.some(TxEIP4844),
)
result.add PayloadBuildAfterInvalidPayloadTest(
mainFork: ForkCancun,
txType : some(TxEIP4844),
txType : Opt.some(TxEIP4844),
invalidField: InvalidParentBeaconBlockRoot,
)
# Suggested Fee Recipient Tests (New Transaction Type)
result.add SuggestedFeeRecipientTest(
mainFork: ForkCancun,
txType : some(TxEIP4844),
txType : Opt.some(TxEIP4844),
transactionCount: 1, # Only one blob tx gets through due to blob gas limit
)
# Prev Randao Tests (New Transaction Type)
result.add PrevRandaoTransactionTest(
mainFork: ForkCancun,
txType : some(TxEIP4844),
txType : Opt.some(TxEIP4844),
)
proc getGenesisProc(cs: BaseSpec, param: NetworkParams) =

View File

@ -40,7 +40,7 @@ type
payloadProductionClientDelay*: int
# Block production related
blockTimestampIncrement*: Option[int]
blockTimestampIncrement*: Opt[int]
# Block Production State
clients : ClientPool
@ -64,21 +64,21 @@ type
latestHeadNumber* : uint64
latestHeader* : common.BlockHeader
latestPayloadBuilt* : ExecutionPayload
latestBlockValue* : Option[UInt256]
latestBlobsBundle* : Option[BlobsBundleV1]
latestShouldOverrideBuilder*: Option[bool]
latestBlockValue* : Opt[UInt256]
latestBlobsBundle* : Opt[BlobsBundleV1]
latestShouldOverrideBuilder*: Opt[bool]
latestPayloadAttributes*: PayloadAttributes
latestExecutedPayload* : ExecutableData
latestForkchoice* : ForkchoiceStateV1
# Merge related
firstPoSBlockNumber* : Option[uint64]
firstPoSBlockNumber* : Opt[uint64]
ttdReached* : bool
transitionPayloadTimestamp: Option[int]
transitionPayloadTimestamp: Opt[int]
chainTotalDifficulty : UInt256
# Shanghai related
nextWithdrawals* : Option[seq[WithdrawalV1]]
nextWithdrawals* : Opt[seq[WithdrawalV1]]
BlockProcessCallbacks* = object
onPayloadProducerSelected* : proc(): bool {.gcsafe.}
@ -102,7 +102,7 @@ func latestExecutableData*(cl: CLMocker): ExecutableData =
basePayload: cl.latestPayloadBuilt,
beaconRoot : ethHash cl.latestPayloadAttributes.parentBeaconBlockRoot,
attr : cl.latestPayloadAttributes,
versionedHashes: some(collectBlobHashes(cl.latestPayloadBuilt.transactions)),
versionedHashes: Opt.some(collectBlobHashes(cl.latestPayloadBuilt.transactions)),
)
func latestPayloadNumber*(h: Table[uint64, ExecutionPayload]): uint64 =
@ -153,10 +153,10 @@ proc waitForTTD*(cl: CLMocker): Future[bool] {.async.} =
error "CLMocker: timeout while waiting for TTD"
return false
echo "CLMocker: TTD has been reached at block ", header.blockNumber
echo "CLMocker: TTD has been reached at block ", header.number
cl.latestHeader = header
cl.headerHistory[header.blockNumber.truncate(uint64)] = header
cl.headerHistory[header.number] = header
cl.ttdReached = true
let headerHash = BlockHash(common.blockHash(cl.latestHeader).data)
@ -167,9 +167,9 @@ proc waitForTTD*(cl: CLMocker): Future[bool] {.async.} =
cl.latestForkchoice.finalizedBlockHash = headerHash
# Reset transition values
cl.latestHeadNumber = cl.latestHeader.blockNumber.truncate(uint64)
cl.latestHeadNumber = cl.latestHeader.number
cl.headHashHistory = @[]
cl.firstPoSBlockNumber = none(uint64)
cl.firstPoSBlockNumber = Opt.none(uint64)
# Prepare initial forkchoice, to be sent to the transition payload producer
cl.latestForkchoice = ForkchoiceStateV1()
@ -195,7 +195,6 @@ proc isBlockPoS*(cl: CLMocker, bn: common.BlockNumber): bool =
return false
let number = cl.firstPoSBlockNumber.get()
let bn = bn.truncate(uint64)
if number > bn:
return false
@ -226,7 +225,7 @@ func getNextBlockTimestamp(cl: CLMocker): EthTime =
return EthTime cl.transitionPayloadTimestamp.get
return cl.latestHeader.timestamp + cl.getTimestampIncrement()
func setNextWithdrawals(cl: CLMocker, nextWithdrawals: Option[seq[WithdrawalV1]]) =
func setNextWithdrawals(cl: CLMocker, nextWithdrawals: Opt[seq[WithdrawalV1]]) =
cl.nextWithdrawals = nextWithdrawals
func isShanghai(cl: CLMocker, timestamp: Quantity): bool =
@ -259,7 +258,7 @@ proc pickNextPayloadProducer(cl: CLMocker): bool =
let latestHeader = res.get
let lastBlockHash = latestHeader.blockHash
if cl.latestHeader.blockHash != lastBlockHash or
cl.latestHeadNumber != latestHeader.blockNumber.truncate(uint64):
cl.latestHeadNumber != latestHeader.number:
# Selected client latest block hash does not match canonical chain, try again
cl.nextBlockProducer = nil
continue
@ -285,16 +284,16 @@ proc generatePayloadAttributes(cl: CLMocker) =
if cl.isCancun(timestamp):
# Write a deterministic hash based on the block number
let beaconRoot = timestampToBeaconRoot(timestamp)
cl.latestPayloadAttributes.parentBeaconBlockRoot = some(beaconRoot)
cl.latestPayloadAttributes.parentBeaconBlockRoot = Opt.some(beaconRoot)
# Save random value
let number = cl.latestHeader.blockNumber.truncate(uint64) + 1
let number = cl.latestHeader.number + 1
cl.prevRandaoHistory[number] = nextPrevRandao
proc requestNextPayload(cl: CLMocker): bool =
let version = cl.latestPayloadAttributes.version
let client = cl.nextBlockProducer.client
let res = client.forkchoiceUpdated(version, cl.latestForkchoice, some(cl.latestPayloadAttributes))
let res = client.forkchoiceUpdated(version, cl.latestForkchoice, Opt.some(cl.latestPayloadAttributes))
if res.isErr:
error "CLMocker: Could not send forkchoiceUpdated", version=version, msg=res.error
return false
@ -371,10 +370,10 @@ proc getNextPayload(cl: CLMocker): bool =
get=cl.latestHeader.blockHash
return false
if cl.latestPayloadBuilt.blockNumber.uint64.toBlockNumber != cl.latestHeader.blockNumber + 1.toBlockNumber:
if cl.latestPayloadBuilt.blockNumber.uint64 != cl.latestHeader.number + 1'u64:
error "CLMocker: Incorrect Number on payload built",
expect=cl.latestPayloadBuilt.blockNumber.uint64,
get=cl.latestHeader.blockNumber+1.toBlockNumber
get=cl.latestHeader.number+1'u64
return false
return true
@ -459,7 +458,7 @@ proc broadcastForkchoiceUpdated(cl: CLMocker,
version: Version,
update: ForkchoiceStateV1):
Result[ForkchoiceUpdatedResponse, string] =
eng.client.forkchoiceUpdated(version, update, none(PayloadAttributes))
eng.client.forkchoiceUpdated(version, update, Opt.none(PayloadAttributes))
proc broadcastForkchoiceUpdated*(cl: CLMocker,
version: Version,
@ -522,7 +521,7 @@ proc makeNextWithdrawals(cl: CLMocker): seq[WithdrawalV1] =
withdrawalIndex += 1
withdrawals[i] = WithdrawalV1(
index: w3Qty withdrawalIndex,
validatorIndex: w3Qty i,
validatorIndex: Quantity i,
address: w3Address i,
amount: w3Qty 100'u64,
)
@ -532,7 +531,7 @@ proc makeNextWithdrawals(cl: CLMocker): seq[WithdrawalV1] =
proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe.} =
doAssert(cl.ttdReached)
cl.currentPayloadNumber = cl.latestHeader.blockNumber.truncate(uint64) + 1'u64
cl.currentPayloadNumber = cl.latestHeader.number + 1'u64
if not cl.pickNextPayloadProducer():
return false
@ -540,7 +539,7 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
# `OnPayloadProducerSelected` callback
if cl.nextWithdrawals.isNone:
let nw = cl.makeNextWithdrawals()
cl.setNextWithdrawals(some(nw))
cl.setNextWithdrawals(Opt.some(nw))
if cb.onPayloadProducerSelected != nil:
if not cb.onPayloadProducerSelected():
@ -557,7 +556,7 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
if not cl.requestNextPayload():
return false
cl.setNextWithdrawals(none(seq[WithdrawalV1]))
cl.setNextWithdrawals(Opt.none(seq[WithdrawalV1]))
if cb.onRequestNextPayload != nil:
if not cb.onRequestNextPayload():
@ -624,8 +623,8 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
# Broadcast forkchoice updated with new FinalizedBlock to all clients
# Save the number of the first PoS block
if cl.firstPoSBlockNumber.isNone:
let number = cl.latestHeader.blockNumber.truncate(uint64) + 1
cl.firstPoSBlockNumber = some(number)
let number = cl.latestHeader.number + 1
cl.firstPoSBlockNumber = Opt.some(number)
# Save the header of the latest block in the PoS chain
cl.latestHeadNumber = cl.latestHeadNumber + 1
@ -655,9 +654,9 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
return false
# mixHash == prevRandao
if newHeader.mixDigest != cl.prevRandaoHistory[cl.latestHeadNumber]:
if newHeader.mixHash != cl.prevRandaoHistory[cl.latestHeadNumber]:
error "CLMocker: Client produced a new header with incorrect mixHash",
get = newHeader.mixDigest.data.toHex,
get = newHeader.mixHash.data.toHex,
expect = cl.prevRandaoHistory[cl.latestHeadNumber].data.toHex
return false
@ -675,7 +674,7 @@ proc produceSingleBlock*(cl: CLMocker, cb: BlockProcessCallbacks): bool {.gcsafe
cl.latestHeader = newHeader
cl.headerHistory[cl.latestHeadNumber] = cl.latestHeader
echo "CLMocker: New block produced: number=", newHeader.blockNumber,
echo "CLMocker: New block produced: number=", newHeader.number,
" hash=", newHeader.blockHash
return true

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -128,7 +128,7 @@ method execute(cs: BadHashOnNewPayload, env: TestEnv): bool =
# Run test after the new payload has been obtained
onGetPayload: proc(): bool =
var customizer = CustomPayloadData(
parentHash: some(ethHash shadow.payload.blockHash),
parentHash: Opt.some(ethHash shadow.payload.blockHash),
)
shadow.payload = customizer.customizePayload(env.clMock.latestExecutableData)

View File

@ -40,7 +40,7 @@ method getForkConfig*(cs: ForkIDSpec): ChainConfig =
# Merge fork happen at block 0
let mainFork = cs.getMainFork()
if mainFork == ForkParis:
forkConfig.mergeForkBlock = some(0.u256)
forkConfig.mergeForkBlock = Opt.some(0'u64)
return forkConfig
method execute(cs: ForkIDSpec, env: TestEnv): bool =

View File

@ -49,11 +49,11 @@ method execute(cs: InconsistentForkchoiceTest, env: TestEnv): bool =
onGetPayload: proc(): bool =
# Generate and send an alternative side chain
var customData = CustomPayloadData(
extraData: some(@[0x01.byte])
extraData: Opt.some(@[0x01.byte])
)
if shadow.alt.len > 0:
customData.parentHash = some(ethHash shadow.alt[^1].blockHash)
customData.parentHash = Opt.some(ethHash shadow.alt[^1].blockHash)
let altPayload = customData.customizePayload(env.clMock.latestExecutableData)
shadow.alt.add altPayload
@ -140,9 +140,9 @@ method execute(cs: ForkchoiceUpdatedUnknownBlockHashTest, env: TestEnv): bool =
payloadAttributes.timestamp = w3Qty(payloadAttributes.timestamp, 1)
# Test again using PayloadAttributes, should also return SYNCING and no PayloadID
r = env.engine.client.forkchoiceUpdated(version, fcu, some(payloadAttributes))
r = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(payloadAttributes))
r.expectPayloadStatus(PayloadExecutionStatus.syncing)
r.expectPayloadID(none(PayloadID))
r.expectPayloadID(Opt.none(PayloadID))
else:
let pbRes = env.clMock.produceSingleBlock(BlockProcessCallbacks(
# Run test after a new payload has been broadcast
@ -167,7 +167,7 @@ method execute(cs: ForkchoiceUpdatedUnknownBlockHashTest, env: TestEnv): bool =
payloadAttributes.suggestedFeeRecipient = w3Address()
# Test again using PayloadAttributes, should also return INVALID and no PayloadID
r = env.engine.client.forkchoiceUpdated(version, fcu, some(payloadAttributes))
r = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(payloadAttributes))
r.expectError()
return true
))

View File

@ -73,7 +73,7 @@ method execute(cs: InvalidMissingAncestorReOrgTest, env: TestEnv): bool =
# Send the transaction to the globals.PrevRandaoContractAddr
let eng = env.clMock.nextBlockProducer
let ok = env.sendNextTx(eng, BaseTx(
recipient: some(prevRandaoContractAddr),
recipient: Opt.some(prevRandaoContractAddr),
amount: 1.u256,
txType: cs.txType,
gasLimit: 75000,
@ -86,8 +86,8 @@ method execute(cs: InvalidMissingAncestorReOrgTest, env: TestEnv): bool =
onGetPayload: proc(): bool =
# Insert extraData to ensure we deviate from the main payload, which contains empty extradata
let customizer = CustomPayloadData(
parentHash: some(ethHash shadow.payloads[^1].blockHash),
extraData: some(@[0x01.byte]),
parentHash: Opt.some(ethHash shadow.payloads[^1].blockHash),
extraData: Opt.some(@[0x01.byte]),
)
var sidePayload = customizer.customizePayload(env.clMock.latestExecutableData)
@ -244,7 +244,7 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
if not cs.emptyTransactions:
# Send the transaction to the globals.PrevRandaoContractAddr
let tc = BaseTx(
recipient: some(prevRandaoContractAddr),
recipient: Opt.some(prevRandaoContractAddr),
amount: 1.u256,
txType: cs.txType,
gasLimit: 75000,
@ -259,8 +259,8 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
# Insert extraData to ensure we deviate from the main payload, which contains empty extradata
pHash = shadow.payloads[^1].blockHash
customizer = CustomPayloadData(
parentHash: some(ethHash pHash),
extraData: some(@[0x01.byte]),
parentHash: Opt.some(ethHash pHash),
extraData: Opt.some(@[0x01.byte]),
)
sidePayload = customizer.customizePayload(env.clMock.latestExecutableData)
@ -273,7 +273,7 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
#if shadow.payloads.len == cs.invalidIndex:
# var uncle *types.Block
# if cs.invalidField == InvalidOmmers:
# let number = sideBlock.blockNumber.uint64-1
# let number = sideBlock.number.uint64-1
# doAssert(env.clMock.executedPayloadHistory.hasKey(number), "FAIL: Unable to get uncle block")
# let unclePayload = env.clMock.executedPayloadHistory[number]
# # Uncle is a PoS payload
@ -340,7 +340,7 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
fatal "TEST ISSUE - Secondary Node has invalid blockHash",
got=head.blockHash.short,
want=shadow.payloads[shadow.n-1].blockHash.short,
gotNum=head.blockNumber,
gotNum=head.number,
wantNum=shadow.payloads[shadow.n].blockNumber
info "Secondary Node has correct block"
@ -356,7 +356,7 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
let head = res.get
info "Latest block on main client before sync",
hash=head.blockHash.short,
number=head.blockNumber
number=head.number
# If we are syncing through p2p, we need to keep polling until the client syncs the missing payloads
let period = chronos.milliseconds(500)
@ -390,8 +390,8 @@ method execute(cs: InvalidMissingAncestorReOrgSyncTest, env: TestEnv): bool =
fatal "Unable to get latest block: ", msg=res.error
# Print last shadow.n blocks, for debugging
let latestNumber = res.get.blockNumber.truncate(int64)
var k = latestNumber - int64(shadow.n)
let latestNumber = res.get.number
var k = latestNumber - uint64(shadow.n)
if k < 0: k = 0
while k <= latestNumber:

View File

@ -74,7 +74,7 @@ method execute(cs: InvalidPayloadTestCase, env: TestEnv): bool =
let ok = env.sendNextTx(
eng,
BaseTx(
recipient: some(prevRandaoContractAddr),
recipient: Opt.some(prevRandaoContractAddr),
amount: 1.u256,
txType: cs.txType,
gasLimit: 75000.GasInt,
@ -173,7 +173,7 @@ method execute(cs: InvalidPayloadTestCase, env: TestEnv): bool =
# (payloadStatus: (status: INVALID, latestValidHash: null, validationError: errorMessage | null), payloadId: null)
# obtained from the Payload validation process if the payload is deemed INVALID
version = env.engine.version(shadow.alteredPayload.timestamp)
let s = env.engine.client.forkchoiceUpdated(version, fcState, some(attr))
let s = env.engine.client.forkchoiceUpdated(version, fcState, Opt.some(attr))
if not cs.syncing:
# Execution specification:
# (payloadStatus: (status: INVALID, latestValidHash: null, validationError: errorMessage | null), payloadId: null)
@ -261,7 +261,7 @@ method execute(cs: InvalidPayloadTestCase, env: TestEnv): bool =
return true
let customizer = CustomPayloadData(
parentHash: some(ethHash shadow.alteredPayload.blockHash),
parentHash: Opt.some(ethHash shadow.alteredPayload.blockHash),
)
let followUpAlteredPayload = customizer.customizePayload(env.clMock.latestExecutableData)
@ -332,12 +332,12 @@ method execute(cs: PayloadBuildAfterInvalidPayloadTest, env: TestEnv): bool =
# Get a payload from the invalid payload producer and invalidate it
let
customizer = BasePayloadAttributesCustomizer(
prevRandao: some(common.Hash256()),
suggestedFeerecipient: some(ZeroAddr),
prevRandao: Opt.some(common.Hash256()),
suggestedFeerecipient: Opt.some(ZeroAddr),
)
payloadAttributes = customizer.getPayloadAttributes(env.clMock.latestPayloadAttributes)
version = env.engine.version(env.clMock.latestHeader.timestamp)
r = invalidPayloadProducer.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, some(payloadAttributes))
r = invalidPayloadProducer.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, Opt.some(payloadAttributes))
r.expectPayloadStatus(PayloadExecutionStatus.valid)
# Wait for the payload to be produced by the EL
@ -352,8 +352,8 @@ method execute(cs: PayloadBuildAfterInvalidPayloadTest, env: TestEnv): bool =
let basePayload = s.get.executionPayload
var src = ExecutableData(basePayload: basePayload)
if versione == Version.V3:
src.beaconRoot = some(common.Hash256())
src.versionedHashes = some(collectBlobHashes(basePayload.transactions))
src.beaconRoot = Opt.some(common.Hash256())
src.versionedHashes = Opt.some(collectBlobHashes(basePayload.transactions))
inv_p = env.generateInvalidPayload(src, InvalidStateRoot)
@ -407,7 +407,7 @@ method execute(cs: InvalidTxChainIDTest, env: TestEnv): bool =
# Run test after a new payload has been broadcast
onPayloadAttributesGenerated: proc(): bool =
let txCreator = BaseTx(
recipient: some(prevRandaoContractAddr),
recipient: Opt.some(prevRandaoContractAddr),
amount: 1.u256,
txType: cs.txType,
gasLimit: 75000,
@ -427,7 +427,7 @@ method execute(cs: InvalidTxChainIDTest, env: TestEnv): bool =
chainId = eng.com.chainId
let txCustomizerData = CustomTransactionData(
chainID: some((chainId.uint64 + 1'u64).ChainId)
chainID: Opt.some((chainId.uint64 + 1'u64).ChainId)
)
shadow.invalidTx = tx

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -28,10 +28,10 @@ method getForkConfig*(cs: NonZeroPreMergeFork): ChainConfig =
let forkConfig = procCall getForkConfig(BaseSpec(cs))
if forkConfig.isNil:
return nil
# Merge fork & pre-merge happen at block 1
forkConfig.londonBlock = some(1.u256)
forkConfig.mergeForkBlock = some(1.u256)
forkConfig.londonBlock = Opt.some(1'u64)
forkConfig.mergeForkBlock = Opt.some(1'u64)
# Post-merge fork happens at block 2
let mainFork = BaseSpec(cs).getMainFork()

View File

@ -66,11 +66,11 @@ method execute(cs: InvalidPayloadAttributesTest, env: TestEnv): bool =
let version = env.engine.version(env.clMock.latestPayloadBuilt.timestamp)
if cs.syncing:
# If we are SYNCING, the outcome should be SYNCING regardless of the validity of the payload atttributes
let r = env.engine.client.forkchoiceUpdated(version, fcu, some(attr))
let r = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(attr))
r.expectPayloadStatus(PayloadExecutionStatus.syncing)
r.expectPayloadID(none(PayloadID))
r.expectPayloadID(Opt.none(PayloadID))
else:
let r = env.engine.client.forkchoiceUpdated(version, fcu, some(attr))
let r = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(attr))
r.expectErrorCode(engineApiInvalidPayloadAttributes)
# Check that the forkchoice was applied, regardless of the error

View File

@ -117,7 +117,7 @@ method execute(cs: InOrderPayloadExecutionTest, env: TestEnv): bool =
# We send the transactions after we got the Payload ID, before the CLMocker gets the prepared Payload
onPayloadProducerSelected: proc(): bool =
let tc = BaseTx(
recipient: some(shadow.recipient),
recipient: Opt.some(shadow.recipient),
amount: shadow.amountPerTx,
txType: cs.txType,
gasLimit: 75000,
@ -220,7 +220,7 @@ method execute(cs: MultiplePayloadsExtendingCanonicalChainTest, env: TestEnv): b
onPayloadProducerSelected: proc(): bool =
let recipient = EthAddress.randomBytes()
let tc = BaseTx(
recipient: some(recipient),
recipient: Opt.some(recipient),
txType: cs.txType,
gasLimit: 75000,
)
@ -245,7 +245,7 @@ method execute(cs: MultiplePayloadsExtendingCanonicalChainTest, env: TestEnv): b
for i in 0..<payloadCount:
let newPrevRandao = common.Hash256.randomBytes()
let customizer = CustomPayloadData(
prevRandao: some(newPrevRandao),
prevRandao: Opt.some(newPrevRandao),
)
let newPayload = customizer.customizePayload(basePayload)
let version = env.engine.version(newPayload.timestamp)
@ -304,7 +304,7 @@ method execute(cs: NewPayloadOnSyncingClientTest, env: TestEnv): bool =
onPayloadProducerSelected: proc(): bool =
# Send at least one transaction per payload
let tc = BaseTx(
recipient: some(shadow.recipient),
recipient: Opt.some(shadow.recipient),
txType: cs.txType,
gasLimit: 75000,
)
@ -331,7 +331,7 @@ method execute(cs: NewPayloadOnSyncingClientTest, env: TestEnv): bool =
onPayloadProducerSelected: proc(): bool =
# Send at least one transaction per payload
let tc = BaseTx(
recipient: some(shadow.recipient),
recipient: Opt.some(shadow.recipient),
txType: cs.txType,
gasLimit: 75000,
)
@ -353,8 +353,8 @@ method execute(cs: NewPayloadOnSyncingClientTest, env: TestEnv): bool =
suggestedFeeRecipient = w3Address()
let customizer = BasePayloadAttributesCustomizer(
prevRandao: some(ethHash random),
suggestedFeerecipient: some(ethAddr suggestedFeeRecipient),
prevRandao: Opt.some(ethHash random),
suggestedFeerecipient: Opt.some(ethAddr suggestedFeeRecipient),
)
let newAttr = customizer.getPayloadAttributes(env.clMock.latestPayloadAttributes)
@ -365,7 +365,7 @@ method execute(cs: NewPayloadOnSyncingClientTest, env: TestEnv): bool =
)
var version = env.engine.version(env.clMock.latestPayloadBuilt.timestamp)
var s = env.engine.client.forkchoiceUpdated(version, fcu, some(newAttr))
var s = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(newAttr))
s.expectPayloadStatus(PayloadExecutionStatus.syncing)
# Send the previous payload to be able to continue
@ -420,7 +420,7 @@ method execute(cs: NewPayloadWithMissingFcUTest, env: TestEnv): bool =
onPayloadProducerSelected: proc(): bool =
let recipient = common.EthAddress.randomBytes()
let tc = BaseTx(
recipient: some(recipient),
recipient: Opt.some(recipient),
txType: cs.txType,
gasLimit: 75000,
)

View File

@ -68,11 +68,11 @@ method execute(cs: UniquePayloadIDTest, env: TestEnv): bool =
let newWithdrawal = WithdrawalV1()
var wd = attr.withdrawals.get
wd.add newWithdrawal
attr.withdrawals = some(wd)
attr.withdrawals = Opt.some(wd)
of PayloadAttributesRemoveWithdrawal:
var wd = attr.withdrawals.get
wd.delete(0)
attr.withdrawals = some(wd)
attr.withdrawals = Opt.some(wd)
of PayloadAttributesModifyWithdrawalAmount,
PayloadAttributesModifyWithdrawalIndex,
PayloadAttributesModifyWithdrawalValidator,
@ -98,16 +98,16 @@ method execute(cs: UniquePayloadIDTest, env: TestEnv): bool =
return false
wds[0] = wd
attr.withdrawals = some(wds)
attr.withdrawals = Opt.some(wds)
of PayloadAttributesParentBeaconRoot:
testCond attr.parentBeaconBlockRoot.isSome:
fatal "Cannot modify parent beacon root when there is no parent beacon root"
let newBeaconRoot = attr.parentBeaconBlockRoot.get.plusOne
attr.parentBeaconBlockRoot = some(newBeaconRoot)
attr.parentBeaconBlockRoot = Opt.some(newBeaconRoot)
# Request the payload with the modified attributes and add the payload ID to the list of known IDs
let version = env.engine.version(env.clMock.latestHeader.timestamp)
let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, some(attr))
let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, Opt.some(attr))
r.expectNoError()
testCond env.clMock.addPayloadID(env.engine, r.get.payloadID.get)
return true

View File

@ -41,7 +41,7 @@ method execute(cs: PrevRandaoTransactionTest, env: TestEnv): bool =
testCond env.clMock.produceSingleBlock(BlockProcessCallbacks())
var shadow = Shadow(
startBlockNumber: env.clMock.latestHeader.blockNumber.truncate(uint64) + 1,
startBlockNumber: env.clMock.latestHeader.number + 1,
# Send transactions in PoS, the value of the storage in these blocks must match the prevRandao value
blockCount: 10,
currentTxIndex: 0,
@ -53,7 +53,7 @@ method execute(cs: PrevRandaoTransactionTest, env: TestEnv): bool =
let pbRes = env.clMock.produceBlocks(shadow.blockCount, BlockProcessCallbacks(
onPayloadProducerSelected: proc(): bool =
let tc = BaseTx(
recipient: some(prevRandaoContractAddr),
recipient: Opt.some(prevRandaoContractAddr),
amount: 0.u256,
txType: cs.txType,
gasLimit: 75000,
@ -69,7 +69,7 @@ method execute(cs: PrevRandaoTransactionTest, env: TestEnv): bool =
,
onForkchoiceBroadcast: proc(): bool =
# Check the transaction tracing, which is client specific
let expectedPrevRandao = env.clMock.prevRandaoHistory[env.clMock.latestHeader.blockNumber.truncate(uint64)+1]
let expectedPrevRandao = env.clMock.prevRandaoHistory[env.clMock.latestHeader.number+1]
let res = debugPrevRandaoTransaction(env.engine.client, shadow.txs[shadow.currentTxIndex-1], expectedPrevRandao)
testCond res.isOk:
fatal "Error during transaction tracing", msg=res.error

View File

@ -40,7 +40,7 @@ method execute(cs: SidechainReOrgTest, env: TestEnv): bool =
# This single transaction will change its outcome based on the payload
let tc = BaseTx(
recipient: some(prevRandaoContractAddr),
recipient: Opt.some(prevRandaoContractAddr),
txType: cs.txType,
gasLimit: 75000,
)
@ -57,14 +57,14 @@ method execute(cs: SidechainReOrgTest, env: TestEnv): bool =
let alternativePrevRandao = common.Hash256.randomBytes()
let timestamp = w3Qty(env.clMock.latestPayloadBuilt.timestamp, 1)
let customizer = BasePayloadAttributesCustomizer(
timestamp: some(timestamp.uint64),
prevRandao: some(alternativePrevRandao),
timestamp: Opt.some(timestamp.uint64),
prevRandao: Opt.some(alternativePrevRandao),
)
let attr = customizer.getPayloadAttributes(env.clMock.latestPayloadAttributes)
var version = env.engine.version(env.clMock.latestPayloadBuilt.timestamp)
let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, some(attr))
let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, Opt.some(attr))
r.expectNoError()
let period = chronos.seconds(env.clMock.payloadProductionClientDelay)
@ -120,7 +120,7 @@ type
ShadowTx = ref object
payload: ExecutionPayload
nextTx: PooledTransaction
tx: Option[PooledTransaction]
tx: Opt[PooledTransaction]
sendTransaction: proc(i: int): PooledTransaction {.gcsafe.}
method withMainFork(cs: TransactionReOrgTest, fork: EngineFork): BaseSpec =
@ -159,7 +159,7 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool =
data[^1] = i.byte
info "transactionReorg", idx=i
let tc = BaseTx(
recipient: some(sstoreContractAddr),
recipient: Opt.some(sstoreContractAddr),
amount: 0.u256,
payload: @data,
txType: cs.txType,
@ -185,7 +185,7 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool =
attr.prevRandao = Web3Hash.randomBytes()
var version = env.engine.version(env.clMock.latestHeader.timestamp)
let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, some(attr))
let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, Opt.some(attr))
r.expectNoError()
testCond r.get.payloadID.isSome:
fatal "No payload ID returned by forkchoiceUpdated"
@ -201,7 +201,7 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool =
if cs.scenario != TransactionReOrgScenarioReOrgBackIn:
# At this point we can broadcast the transaction and it will be included in the next payload
# Data is the key where a `1` will be stored
shadow.tx = some(shadow.sendTransaction(i))
shadow.tx = Opt.some(shadow.sendTransaction(i))
# Get the receipt
let receipt = env.engine.client.txReceipt(shadow.txHash)
@ -219,7 +219,7 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool =
if cs.scenario in [TransactionReOrgScenarioReOrgDifferentBlock, TransactionReOrgScenarioNewPayloadOnRevert]:
# Create side payload with different hash
let customizer = CustomPayloadData(
extraData: some(@[0x01.byte])
extraData: Opt.some(@[0x01.byte])
)
shadow.payload = customizer.customizePayload(env.clMock.latestExecutableData).basePayload
@ -244,7 +244,7 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool =
payloadAttributes.suggestedFeeRecipient = w3Addr EthAddress.randomBytes()
var version = env.engine.version(env.clMock.latestHeader.timestamp)
let f = env.engine.client.forkchoiceUpdated(version, forkchoiceUpdated, some(payloadAttributes))
let f = env.engine.client.forkchoiceUpdated(version, forkchoiceUpdated, Opt.some(payloadAttributes))
f.expectPayloadStatus(PayloadExecutionStatus.valid)
# Wait a second for the client to prepare the payload with the included transaction
@ -332,13 +332,13 @@ method execute(cs: TransactionReOrgTest, env: TestEnv): bool =
txt.expectBlockHash(ethHash env.clMock.latestForkchoice.headBlockHash)
if cs.scenario != TransactionReOrgScenarioReOrgBackIn:
shadow.tx = none(PooledTransaction)
shadow.tx = Opt.none(PooledTransaction)
if cs.scenario == TransactionReOrgScenarioReOrgBackIn and i > 0:
# Reasoning: Most of the clients do not re-add blob transactions to the pool
# after a re-org, so we need to wait until the next tx is sent to actually
# verify.
shadow.tx = some(shadow.nextTx)
shadow.tx = Opt.some(shadow.nextTx)
return true
))
testCond pbRes
@ -426,7 +426,7 @@ method execute(cs: ReOrgBackToCanonicalTest, env: TestEnv): bool =
attr.prevRandao = Web3Hash.randomBytes()
var version = env.engine.version(env.clMock.latestHeader.timestamp)
let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, some(attr))
let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, Opt.some(attr))
r.expectNoError()
testCond r.get.payloadID.isSome:
fatal "No payload ID returned by forkchoiceUpdated"
@ -447,7 +447,7 @@ method execute(cs: ReOrgBackToCanonicalTest, env: TestEnv): bool =
onPayloadProducerSelected: proc(): bool =
# Send a transaction on each payload of the canonical chain
let tc = BaseTx(
recipient: some(ZeroAddr),
recipient: Opt.some(ZeroAddr),
amount: 1.u256,
txType: cs.txType,
gasLimit: 75000,
@ -533,7 +533,7 @@ method execute(cs: ReOrgBackFromSyncingTest, env: TestEnv): bool =
onPayloadProducerSelected: proc(): bool =
# Send a transaction on each payload of the canonical chain
let tc = BaseTx(
recipient: some(ZeroAddr),
recipient: Opt.some(ZeroAddr),
amount: 1.u256,
txType: cs.txType,
gasLimit: 75000,
@ -554,8 +554,8 @@ method execute(cs: ReOrgBackFromSyncingTest, env: TestEnv): bool =
altParentHash = shadow.payloads[^1].blockHash
let customizer = CustomPayloadData(
parentHash: some(ethHash altParentHash),
extraData: some(@[0x01.byte]),
parentHash: Opt.some(ethHash altParentHash),
extraData: Opt.some(@[0x01.byte]),
)
let payload = customizer.customizePayload(env.clMock.latestExecutableData)
@ -629,7 +629,7 @@ method execute(cs: ReOrgPrevValidatedPayloadOnSideChainTest, env: TestEnv): bool
onPayloadProducerSelected: proc(): bool =
# Send a transaction on each payload of the canonical chain
let tc = BaseTx(
recipient: some(ZeroAddr),
recipient: Opt.some(ZeroAddr),
amount: 1.u256,
txType: cs.txType,
gasLimit: 75000,
@ -646,11 +646,11 @@ method execute(cs: ReOrgPrevValidatedPayloadOnSideChainTest, env: TestEnv): bool
# The side chain will consist simply of the same payloads with extra data appended
var customData = CustomPayloadData(
extraData: some(toSeq("side")),
extraData: Opt.some(toSeq("side")),
)
if len(shadow.payloads) > 0:
customData.parentHash = some(ethHash shadow.payloads[^1].blockHash)
customData.parentHash = Opt.some(ethHash shadow.payloads[^1].blockHash)
let payload = customData.customizePayload(env.clMock.latestExecutableData)
shadow.payloads.add payload
@ -673,8 +673,8 @@ method execute(cs: ReOrgPrevValidatedPayloadOnSideChainTest, env: TestEnv): bool
suggestedFeeRecipient = ethAddress(0x12, 0x34)
let payloadAttributesCustomizer = BasePayloadAttributesCustomizer(
prevRandao: some(prevRandao),
suggestedFeerecipient: some(suggestedFeeRecipient),
prevRandao: Opt.some(prevRandao),
suggestedFeerecipient: Opt.some(suggestedFeeRecipient),
)
let reOrgPayload = shadow.payloads[^2]
@ -687,7 +687,7 @@ method execute(cs: ReOrgPrevValidatedPayloadOnSideChainTest, env: TestEnv): bool
)
var version = env.engine.version(reOrgPayload.timestamp)
let r = env.engine.client.forkchoiceUpdated(version, fcu, some(newPayloadAttributes))
let r = env.engine.client.forkchoiceUpdated(version, fcu, Opt.some(newPayloadAttributes))
r.expectPayloadStatus(PayloadExecutionStatus.valid)
r.expectLatestValidHash(reOrgPayload.blockHash)
@ -745,8 +745,8 @@ method execute(cs: SafeReOrgToSideChainTest, env: TestEnv): bool =
altParentHash = shadow.payloads[^1].blockHash
let customizer = CustomPayloadData(
parentHash: some(ethHash altParentHash),
extraData: some(@[0x01.byte]),
parentHash: Opt.some(ethHash altParentHash),
extraData: Opt.some(@[0x01.byte]),
)
let payload = customizer.customizePayload(env.clMock.latestExecutableData)

View File

@ -57,7 +57,7 @@ method execute(cs: BlockStatus, env: TestEnv): bool =
var callbacks = BlockProcessCallbacks(
onPayloadProducerSelected: proc(): bool =
let tc = BaseTx(
recipient: some(ZeroAddr),
recipient: Opt.some(ZeroAddr),
amount: 1.u256,
txType: cs.txType,
gasLimit: 75000,

View File

@ -42,7 +42,7 @@ method execute(cs: SuggestedFeeRecipientTest, env: TestEnv): bool =
# Send multiple transactions
for i in 0..<cs.transactionCount:
let tc = BaseTx(
recipient: some(txRecipient),
recipient: Opt.some(txRecipient),
amount: 0.u256,
txType: cs.txType,
gasLimit: 75000,
@ -72,7 +72,7 @@ method execute(cs: SuggestedFeeRecipientTest, env: TestEnv): bool =
var feeRecipientFees = 0.u256
for tx in blockIncluded.txs:
let effGasTip = tx.effectiveGasTip(blockIncluded.header.fee)
let effGasTip = tx.effectiveGasTip(blockIncluded.header.baseFeePerGas)
let r = env.engine.client.txReceipt(tx.rlpHash)
testCond r.isOk:

View File

@ -59,7 +59,7 @@ method execute(cs: ForkchoiceUpdatedOnPayloadRequestTest, env: TestEnv): bool =
cs.forkchoiceUpdatedCustomizer.setEngineAPIVersionResolver(env.engine.com)
let version = cs.forkchoiceUpdatedCustomizer.forkchoiceUpdatedVersion(env.clMock.latestHeader.timestamp.uint64)
let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, some(attr))
let r = env.engine.client.forkchoiceUpdated(version, env.clMock.latestForkchoice, Opt.some(attr))
#r.ExpectationDescription = cs.Expectation
if expectedError != 0:
r.expectErrorCode(expectedError)

View File

@ -46,21 +46,21 @@ template wrapTrySimpleRes(body: untyped) =
proc forkchoiceUpdatedV1*(client: RpcClient,
update: ForkchoiceStateV1,
payloadAttributes = none(PayloadAttributesV1)):
payloadAttributes = Opt.none(PayloadAttributesV1)):
Result[ForkchoiceUpdatedResponse, string] =
wrapTrySimpleRes:
client.engine_forkchoiceUpdatedV1(update, payloadAttributes)
proc forkchoiceUpdatedV2*(client: RpcClient,
update: ForkchoiceStateV1,
payloadAttributes = none(PayloadAttributes)):
payloadAttributes = Opt.none(PayloadAttributes)):
Result[ForkchoiceUpdatedResponse, string] =
wrapTrySimpleRes:
client.engine_forkchoiceUpdatedV2(update, payloadAttributes)
proc forkchoiceUpdatedV3*(client: RpcClient,
update: ForkchoiceStateV1,
payloadAttributes = none(PayloadAttributes)):
payloadAttributes = Opt.none(PayloadAttributes)):
Result[ForkchoiceUpdatedResponse, string] =
wrapTrySimpleRes:
client.engine_forkchoiceUpdatedV3(update, payloadAttributes)
@ -89,25 +89,25 @@ proc getPayload*(client: RpcClient,
return err(error)
ok(GetPayloadResponse(
executionPayload: executionPayload(x.executionPayload),
blockValue: some(x.blockValue),
blobsBundle: some(x.blobsBundle),
shouldOverrideBuilder: some(x.shouldOverrideBuilder),
blockValue: Opt.some(x.blockValue),
blobsBundle: Opt.some(x.blobsBundle),
shouldOverrideBuilder: Opt.some(x.shouldOverrideBuilder),
))
elif version == Version.V3:
let x = client.getPayloadV3(payloadId).valueOr:
return err(error)
ok(GetPayloadResponse(
executionPayload: executionPayload(x.executionPayload),
blockValue: some(x.blockValue),
blobsBundle: some(x.blobsBundle),
shouldOverrideBuilder: some(x.shouldOverrideBuilder),
blockValue: Opt.some(x.blockValue),
blobsBundle: Opt.some(x.blobsBundle),
shouldOverrideBuilder: Opt.some(x.shouldOverrideBuilder),
))
elif version == Version.V2:
let x = client.getPayloadV2(payloadId).valueOr:
return err(error)
ok(GetPayloadResponse(
executionPayload: executionPayload(x.executionPayload),
blockValue: some(x.blockValue)
blockValue: Opt.some(x.blockValue)
))
else:
let x = client.getPayloadV1(payloadId).valueOr:
@ -121,15 +121,15 @@ proc forkchoiceUpdated*(client: RpcClient,
attr: PayloadAttributes):
Result[ForkchoiceUpdatedResponse, string] =
case attr.version
of Version.V1: return client.forkchoiceUpdatedV1(update, some attr.V1)
of Version.V2: return client.forkchoiceUpdatedV2(update, some attr)
of Version.V3: return client.forkchoiceUpdatedV3(update, some attr)
of Version.V1: return client.forkchoiceUpdatedV1(update, Opt.some attr.V1)
of Version.V2: return client.forkchoiceUpdatedV2(update, Opt.some attr)
of Version.V3: return client.forkchoiceUpdatedV3(update, Opt.some attr)
of Version.V4: discard
proc forkchoiceUpdated*(client: RpcClient,
version: Version,
update: ForkchoiceStateV1,
attr = none(PayloadAttributes)):
attr = Opt.none(PayloadAttributes)):
Result[ForkchoiceUpdatedResponse, string] =
case version
of Version.V1: return client.forkchoiceUpdatedV1(update, attr.V1)
@ -187,8 +187,8 @@ proc newPayloadV2*(client: RpcClient,
proc newPayloadV3*(client: RpcClient,
payload: ExecutionPayload,
versionedHashes: Option[seq[VersionedHash]],
parentBeaconBlockRoot: Option[FixedBytes[32]]
versionedHashes: Opt[seq[VersionedHash]],
parentBeaconBlockRoot: Opt[FixedBytes[32]]
):
Result[PayloadStatusV1, string] =
wrapTrySimpleRes:
@ -196,8 +196,8 @@ proc newPayloadV3*(client: RpcClient,
proc newPayloadV4*(client: RpcClient,
payload: ExecutionPayload,
versionedHashes: Option[seq[VersionedHash]],
parentBeaconBlockRoot: Option[FixedBytes[32]]
versionedHashes: Opt[seq[VersionedHash]],
parentBeaconBlockRoot: Opt[FixedBytes[32]]
):
Result[PayloadStatusV1, string] =
wrapTrySimpleRes:
@ -211,7 +211,7 @@ proc collectBlobHashes(list: openArray[Web3Tx]): seq[Web3Hash] =
proc newPayload*(client: RpcClient,
payload: ExecutionPayload,
beaconRoot = none(common.Hash256)): Result[PayloadStatusV1, string] =
beaconRoot = Opt.none(common.Hash256)): Result[PayloadStatusV1, string] =
case payload.version
of Version.V1: return client.newPayloadV1(payload.V1)
of Version.V2: return client.newPayloadV2(payload.V2)
@ -232,19 +232,19 @@ proc newPayload*(client: RpcClient,
proc newPayload*(client: RpcClient,
version: Version,
payload: ExecutionPayload,
beaconRoot = none(common.Hash256)): Result[PayloadStatusV1, string] =
beaconRoot = Opt.none(common.Hash256)): Result[PayloadStatusV1, string] =
case version
of Version.V1: return client.newPayloadV1(payload)
of Version.V2: return client.newPayloadV2(payload)
of Version.V3:
let versionedHashes = collectBlobHashes(payload.transactions)
return client.newPayloadV3(payload,
some(versionedHashes),
Opt.some(versionedHashes),
w3Hash beaconRoot)
of Version.V4:
let versionedHashes = collectBlobHashes(payload.transactions)
return client.newPayloadV4(payload,
some(versionedHashes),
Opt.some(versionedHashes),
w3Hash beaconRoot)
proc newPayload*(client: RpcClient,
@ -268,61 +268,61 @@ proc exchangeCapabilities*(client: RpcClient,
wrapTrySimpleRes:
client.engine_exchangeCapabilities(methods)
proc toBlockNonce(n: Option[FixedBytes[8]]): common.BlockNonce =
proc toBlockNonce(n: Opt[FixedBytes[8]]): common.BlockNonce =
if n.isNone:
return default(BlockNonce)
n.get.bytes
proc maybeU64(n: Option[Quantity]): Option[uint64] =
proc maybeU64(n: Opt[Quantity]): Opt[uint64] =
if n.isNone:
return none(uint64)
some(n.get.uint64)
return Opt.none(uint64)
Opt.some(n.get.uint64)
proc maybeU64(n: Option[Web3BlockNumber]): Option[uint64] =
proc maybeU64(n: Opt[Web3BlockNumber]): Opt[uint64] =
if n.isNone:
return none(uint64)
some(n.get.uint64)
return Opt.none(uint64)
Opt.some(n.get.uint64)
proc maybeBool(n: Option[Quantity]): Option[bool] =
proc maybeBool(n: Opt[Quantity]): Opt[bool] =
if n.isNone:
return none(bool)
some(n.get.bool)
return Opt.none(bool)
Opt.some(n.get.bool)
proc maybeChainId(n: Option[Quantity]): Option[ChainId] =
proc maybeChainId(n: Opt[Quantity]): Opt[ChainId] =
if n.isNone:
return none(ChainId)
some(n.get.ChainId)
return Opt.none(ChainId)
Opt.some(n.get.ChainId)
proc maybeInt(n: Option[Quantity]): Option[int] =
proc maybeInt(n: Opt[Quantity]): Opt[int] =
if n.isNone:
return none(int)
some(n.get.int)
return Opt.none(int)
Opt.some(n.get.int)
proc toBlockHeader*(bc: BlockObject): common.BlockHeader =
common.BlockHeader(
blockNumber : bc.number.u256,
number : common.BlockNumber bc.number,
parentHash : ethHash bc.parentHash,
nonce : toBlockNonce(bc.nonce),
ommersHash : ethHash bc.sha3Uncles,
bloom : BloomFilter bc.logsBloom,
logsBloom : BloomFilter bc.logsBloom,
txRoot : ethHash bc.transactionsRoot,
stateRoot : ethHash bc.stateRoot,
receiptRoot : ethHash bc.receiptsRoot,
receiptsRoot : ethHash bc.receiptsRoot,
coinbase : ethAddr bc.miner,
difficulty : bc.difficulty,
extraData : bc.extraData.bytes,
mixDigest : ethHash bc.mixHash,
mixHash : ethHash bc.mixHash,
gasLimit : bc.gasLimit.GasInt,
gasUsed : bc.gasUsed.GasInt,
timestamp : EthTime bc.timestamp,
fee : bc.baseFeePerGas,
baseFeePerGas : bc.baseFeePerGas,
withdrawalsRoot: ethHash bc.withdrawalsRoot,
blobGasUsed : maybeU64(bc.blobGasUsed),
excessBlobGas : maybeU64(bc.excessBlobGas),
parentBeaconBlockRoot: ethHash bc.parentBeaconBlockRoot,
)
func vHashes(x: Option[seq[Web3Hash]]): seq[common.Hash256] =
func vHashes(x: Opt[seq[Web3Hash]]): seq[common.Hash256] =
if x.isNone: return
else: ethHashes(x.get)
@ -332,8 +332,8 @@ proc toTransaction(tx: TransactionObject): Transaction =
chainId : tx.chainId.get(0.Web3Quantity).ChainId,
nonce : tx.nonce.AccountNonce,
gasPrice : tx.gasPrice.GasInt,
maxPriorityFee : tx.maxPriorityFeePerGas.get(0.Web3Quantity).GasInt,
maxFee : tx.maxFeePerGas.get(0.Web3Quantity).GasInt,
maxPriorityFeePerGas: tx.maxPriorityFeePerGas.get(0.Web3Quantity).GasInt,
maxFeePerGas : tx.maxFeePerGas.get(0.Web3Quantity).GasInt,
gasLimit : tx.gas.GasInt,
to : ethAddr tx.to,
value : tx.value,
@ -341,7 +341,7 @@ proc toTransaction(tx: TransactionObject): Transaction =
accessList : ethAccessList(tx.accessList),
maxFeePerBlobGas: tx.maxFeePerBlobGas.get(0.u256),
versionedHashes : vHashes(tx.blobVersionedHashes),
V : tx.v.int64,
V : tx.v.uint64,
R : tx.r,
S : tx.s,
)
@ -364,10 +364,10 @@ proc toWithdrawals(list: seq[WithdrawalObject]): seq[Withdrawal] =
for wd in list:
result.add toWithdrawal(wd)
proc toWithdrawals*(list: Option[seq[WithdrawalObject]]): Option[seq[Withdrawal]] =
proc toWithdrawals*(list: Opt[seq[WithdrawalObject]]): Opt[seq[Withdrawal]] =
if list.isNone:
return none(seq[Withdrawal])
some(toWithdrawals(list.get))
return Opt.none(seq[Withdrawal])
Opt.some(toWithdrawals(list.get))
type
RPCReceipt* = object
@ -376,23 +376,23 @@ type
blockHash*: Hash256
blockNumber*: uint64
sender*: EthAddress
to*: Option[EthAddress]
to*: Opt[EthAddress]
cumulativeGasUsed*: GasInt
gasUsed*: GasInt
contractAddress*: Option[EthAddress]
contractAddress*: Opt[EthAddress]
logs*: seq[LogObject]
logsBloom*: FixedBytes[256]
recType*: ReceiptType
stateRoot*: Option[Hash256]
status*: Option[bool]
stateRoot*: Opt[Hash256]
status*: Opt[bool]
effectiveGasPrice*: GasInt
blobGasUsed*: Option[uint64]
blobGasPrice*: Option[UInt256]
blobGasUsed*: Opt[uint64]
blobGasPrice*: Opt[UInt256]
RPCTx* = object
txType*: TxType
blockHash*: Option[Hash256] # none if pending
blockNumber*: Option[uint64]
blockHash*: Opt[Hash256] # none if pending
blockNumber*: Opt[uint64]
sender*: EthAddress
gasLimit*: GasInt
gasPrice*: GasInt
@ -401,16 +401,16 @@ type
hash*: Hash256
payload*: seq[byte]
nonce*: AccountNonce
to*: Option[EthAddress]
txIndex*: Option[int]
to*: Opt[EthAddress]
txIndex*: Opt[int]
value*: UInt256
v*: int64
v*: uint64
r*: UInt256
s*: UInt256
chainId*: Option[ChainId]
accessList*: Option[seq[AccessTuple]]
maxFeePerBlobGas*: Option[UInt256]
versionedHashes*: Option[VersionedHashes]
chainId*: Opt[ChainId]
accessList*: Opt[seq[AccessTuple]]
maxFeePerBlobGas*: Opt[UInt256]
versionedHashes*: Opt[VersionedHashes]
proc toRPCReceipt(rec: ReceiptObject): RPCReceipt =
RPCReceipt(
@ -449,7 +449,7 @@ proc toRPCTx(tx: eth_api.TransactionObject): RPCTx =
to: ethAddr tx.to,
txIndex: maybeInt(tx.transactionIndex),
value: tx.value,
v: tx.v.int64,
v: tx.v.uint64,
r: tx.r,
s: tx.s,
chainId: maybeChainId(tx.chainId),
@ -546,9 +546,9 @@ proc balanceAt*(client: RpcClient, address: EthAddress): Result[UInt256, string]
let res = waitFor client.eth_getBalance(w3Addr(address), blockId("latest"))
return ok(res)
proc balanceAt*(client: RpcClient, address: EthAddress, number: UInt256): Result[UInt256, string] =
proc balanceAt*(client: RpcClient, address: EthAddress, number: common.BlockNumber): Result[UInt256, string] =
wrapTry:
let res = waitFor client.eth_getBalance(w3Addr(address), blockId(number.truncate(uint64)))
let res = waitFor client.eth_getBalance(w3Addr(address), blockId(number))
return ok(res)
proc nonceAt*(client: RpcClient, address: EthAddress): Result[AccountNonce, string] =
@ -577,7 +577,7 @@ proc storageAt*(client: RpcClient, address: EthAddress, slot: UInt256): Result[F
proc storageAt*(client: RpcClient, address: EthAddress, slot: UInt256, number: common.BlockNumber): Result[FixedBytes[32], string] =
wrapTry:
let res = waitFor client.eth_getStorageAt(w3Addr(address), slot, blockId(number.truncate(uint64)))
let res = waitFor client.eth_getStorageAt(w3Addr(address), slot, blockId(number))
return ok(res)
proc verifyPoWProgress*(client: RpcClient, lastBlockHash: Hash256): Future[Result[void, string]] {.async.} =

View File

@ -162,7 +162,7 @@ proc close*(env: EngineEnv) =
proc setRealTTD*(env: EngineEnv) =
let genesis = env.com.genesisHeader
let realTTD = genesis.difficulty
env.com.setTTD some(realTTD)
env.com.setTTD Opt.some(realTTD)
env.ttd = realTTD
func httpPort*(env: EngineEnv): Port =

View File

@ -36,9 +36,9 @@ import
proc getGenesis(cs: EngineSpec, param: NetworkParams) =
# Set the terminal total difficulty
let realTTD = param.genesis.difficulty + cs.ttd.u256
param.config.terminalTotalDifficulty = some(realTTD)
param.config.terminalTotalDifficulty = Opt.some(realTTD)
if param.genesis.difficulty <= realTTD:
param.config.terminalTotalDifficultyPassed = some(true)
param.config.terminalTotalDifficultyPassed = Opt.some(true)
# Set the genesis timestamp if provided
if cs.genesisTimestamp != 0:
@ -82,7 +82,7 @@ proc makeEngineTest*(): seq[EngineSpec] =
InvalidPayloadAttributesTest(
description: "Zero timestamp",
customizer: BasePayloadAttributesCustomizer(
timestamp: some(0'u64),
timestamp: Opt.some(0'u64),
),
),
InvalidPayloadAttributesTest(
@ -101,11 +101,11 @@ proc makeEngineTest*(): seq[EngineSpec] =
# Invalid Transaction ChainID Tests
result.add InvalidTxChainIDTest(
txType: some(TxLegacy),
txType: Opt.some(TxLegacy),
)
result.add InvalidTxChainIDTest(
txType: some(TxEip1559),
txType: Opt.some(TxEip1559),
)
# Invalid Ancestor Re-Org Tests (Reveal Via NewPayload)
@ -178,21 +178,21 @@ proc makeEngineTest*(): seq[EngineSpec] =
# PrevRandao opcode tests
result.add PrevRandaoTransactionTest(
txType: some(TxLegacy)
txType: Opt.some(TxLegacy)
)
result.add PrevRandaoTransactionTest(
txType: some(TxEip1559),
txType: Opt.some(TxEip1559),
)
# Suggested Fee Recipient Tests
result.add SuggestedFeeRecipientTest(
txType: some(TxLegacy),
txType: Opt.some(TxLegacy),
transactionCount: 20,
)
result.add SuggestedFeeRecipientTest(
txType: some(TxEip1559),
txType: Opt.some(TxEip1559),
transactionCount: 20,
)
@ -227,14 +227,14 @@ proc makeEngineTest*(): seq[EngineSpec] =
if invalidField != InvalidTransactionGasTipPrice:
for testTxType in [TxLegacy, TxEip1559]:
result.add InvalidPayloadTestCase(
txType: some(testTxType),
txType: Opt.some(testTxType),
invalidField: invalidField,
syncing: syncing,
invalidDetectedOnSync: invalidDetectedOnSync,
)
else:
result.add InvalidPayloadTestCase(
txType: some(TxEip1559),
txType: Opt.some(TxEip1559),
invalidField: invalidField,
syncing: syncing,
invalidDetectedOnSync: invalidDetectedOnSync,

View File

@ -62,11 +62,11 @@ proc ecCancun(env: TestEnv): bool =
proc getCCShanghai(timestamp: int): ChainConfig =
result = getChainConfig("Shanghai")
result.shanghaiTime = some(EthTime(timestamp))
result.shanghaiTime = Opt.some(EthTime(timestamp))
proc getCCCancun(timestamp: int): ChainConfig =
result = getChainConfig("Cancun")
result.cancunTime = some(EthTime(timestamp))
result.cancunTime = Opt.some(EthTime(timestamp))
proc specExecute(ws: BaseSpec): bool =
let ws = ECSpec(ws)

View File

@ -25,7 +25,6 @@ import
core/executor/process_block
],
chronicles,
stint,
results
{.push raises: [].}
@ -47,7 +46,7 @@ proc processBlock(
defer: dbTx.dispose()
if vmState.com.daoForkSupport and
vmState.com.daoForkBlock.get == header.blockNumber:
vmState.com.daoForkBlock.get == header.number:
vmState.mutateStateDB:
db.applyDAOHardFork()
@ -83,7 +82,7 @@ proc getVmState(c: ChainRef, header: BlockHeader):
let vmState = BaseVMState()
if not vmState.init(header, c.com):
debug "Cannot initialise VmState",
number = header.blockNumber
number = header.number
return err()
return ok(vmState)
@ -109,7 +108,7 @@ proc setBlock*(c: ChainRef; blk: EthBlock): Result[void, string] =
return err("Could not persist header")
try:
c.db.persistTransactions(header.blockNumber, blk.transactions)
c.db.persistTransactions(header.number, blk.transactions)
c.db.persistReceipts(vmState.receipts)
if blk.withdrawals.isSome:
@ -120,7 +119,7 @@ proc setBlock*(c: ChainRef; blk: EthBlock): Result[void, string] =
# update currentBlock *after* we persist it
# so the rpc return consistent result
# between eth_blockNumber and eth_syncing
c.com.syncCurrent = header.blockNumber
c.com.syncCurrent = header.number
dbTx.commit()
@ -131,7 +130,7 @@ proc setBlock*(c: ChainRef; blk: EthBlock): Result[void, string] =
# the parent state of the first block (as registered in `headers[0]`) was
# the canonical state before updating. So this state will be saved with
# `persistent()` together with the respective block number.
c.db.persistent(header.blockNumber - 1)
c.db.persistent(header.number - 1)
ok()

View File

@ -22,11 +22,11 @@ import
type
BaseTx* = object of RootObj
recipient* : Option[EthAddress]
recipient* : Opt[EthAddress]
gasLimit* : GasInt
amount* : UInt256
payload* : seq[byte]
txType* : Option[TxType]
txType* : Opt[TxType]
gasTip* : GasInt
gasFee* : GasInt
blobGasFee*: UInt256
@ -58,20 +58,20 @@ type
nonce* : AccountNonce
CustSig* = object
V*: int64
V*: uint64
R*: UInt256
S*: UInt256
CustomTransactionData* = object
nonce* : Option[uint64]
gasPriceOrGasFeeCap*: Option[GasInt]
gasTipCap* : Option[GasInt]
gas* : Option[GasInt]
to* : Option[common.EthAddress]
value* : Option[UInt256]
data* : Option[seq[byte]]
chainId* : Option[ChainId]
signature* : Option[CustSig]
nonce* : Opt[uint64]
gasPriceOrGasFeeCap*: Opt[GasInt]
gasTipCap* : Opt[GasInt]
gas* : Opt[GasInt]
to* : Opt[common.EthAddress]
value* : Opt[UInt256]
data* : Opt[seq[byte]]
chainId* : Opt[ChainId]
signature* : Opt[CustSig]
const
TestAccountCount = 1000
@ -157,8 +157,8 @@ proc makeTxOfType(params: MakeTxParams, tc: BaseTx): PooledTransaction =
txType : TxEIP1559,
nonce : params.nonce,
gasLimit: tc.gasLimit,
maxFee : gasFeeCap,
maxPriorityFee: gasTipCap,
maxFeePerGas: gasFeeCap,
maxPriorityFeePerGas: gasTipCap,
to : tc.recipient,
value : tc.amount,
payload : tc.payload,
@ -182,8 +182,8 @@ proc makeTxOfType(params: MakeTxParams, tc: BaseTx): PooledTransaction =
txType : TxEIP4844,
nonce : params.nonce,
chainId : params.chainId,
maxFee : gasFeeCap,
maxPriorityFee: gasTipCap,
maxFeePerGas: gasFeeCap,
maxPriorityFeePerGas: gasTipCap,
gasLimit: tc.gasLimit,
to : tc.recipient,
value : tc.amount,
@ -324,8 +324,8 @@ proc makeTx*(params: MakeTxParams, tc: BlobTx): PooledTransaction =
txType : TxEip4844,
chainId : params.chainId,
nonce : params.nonce,
maxPriorityFee: gasTipCap,
maxFee : gasFeeCap,
maxPriorityFeePerGas: gasTipCap,
maxFeePerGas: gasFeeCap,
gasLimit : tc.gasLimit,
to : tc.recipient,
value : tc.amount,
@ -436,15 +436,15 @@ proc customizeTransaction*(sender: TxSender,
modTx.chainId = custTx.chainId.get
if custTx.gasPriceOrGasFeeCap.isSome:
modTx.maxFee = custTx.gasPriceOrGasFeeCap.get.GasInt
modTx.maxFeePErGas = custTx.gasPriceOrGasFeeCap.get.GasInt
if custTx.gasTipCap.isSome:
modTx.maxPriorityFee = custTx.gasTipCap.get.GasInt
modTx.maxPriorityFeePerGas = custTx.gasTipCap.get.GasInt
if baseTx.txType == TxEip4844:
if modTx.to.isNone:
var address: EthAddress
modTx.to = some(address)
modTx.to = Opt.some(address)
if custTx.signature.isNone:
return signTransaction(modTx, acc.key, modTx.chainId, eip155 = true)

View File

@ -9,7 +9,8 @@
# according to those terms.
import
std/[options, typetraits, strutils],
std/[typetraits, strutils],
chronicles,
eth/common,
nimcrypto/[sysrand, sha2],
stew/[byteutils, endians2],
@ -33,7 +34,7 @@ type
ForkCancun = "Cancun"
BaseSpec* = ref object of RootObj
txType*: Option[TxType]
txType*: Opt[TxType]
# CL Mocker configuration for slots to `safe` and `finalized` respectively
slotsToSafe*: int
@ -56,9 +57,9 @@ type
ExecutableData* = object
basePayload*: ExecutionPayload
beaconRoot* : Option[common.Hash256]
beaconRoot* : Opt[common.Hash256]
attr* : PayloadAttributes
versionedHashes*: Option[seq[common.Hash256]]
versionedHashes*: Opt[seq[common.Hash256]]
const
DefaultTimeout* = 60 # seconds
@ -113,7 +114,7 @@ template testCond*(expr, body: untyped) =
body
return false
proc `==`*(a: Option[BlockHash], b: Option[common.Hash256]): bool =
proc `==`*(a: Opt[BlockHash], b: Opt[common.Hash256]): bool =
if a.isNone and b.isNone:
return true
if a.isSome and b.isSome:
@ -142,7 +143,7 @@ template expectPayload*(res: untyped, payload: ExecutionPayload) =
testCond x.executionPayload == payload.V3:
error "getPayloadV3 return mismatch payload"
template expectWithdrawalsRoot*(res: untyped, wdRoot: Option[common.Hash256]) =
template expectWithdrawalsRoot*(res: untyped, wdRoot: Opt[common.Hash256]) =
testCond res.isOk:
error "Unexpected error", msg=res.error
let h = res.get
@ -235,7 +236,7 @@ template expectStatus*(res: untyped, cond: PayloadExecutionStatus) =
testCond s.status == cond:
error "Unexpected newPayload status", expect=cond, get=s.status
template expectPayloadID*(res: untyped, id: Option[PayloadID]) =
template expectPayloadID*(res: untyped, id: Opt[PayloadID]) =
testCond res.isOk:
error "Unexpected expectPayloadID Error", msg=res.error
let s = res.get()

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -196,7 +196,7 @@ let wdTestList* = [
wdBlockCount: 2,
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
wdAbleAccountCount: 1,
txPerBlock: some(0),
txPerBlock: Opt.some(0),
syncSteps: 1,
)),
TestDesc(
@ -241,7 +241,7 @@ let wdTestList* = [
wdBlockCount: 2,
wdPerBlock: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
wdAbleAccountCount: MAINNET_MAX_WITHDRAWAL_COUNT_PER_BLOCK,
txPerBlock: some(0),
txPerBlock: Opt.some(0),
syncSteps: 1,
)),
TestDesc(

View File

@ -9,7 +9,6 @@
# according to those terms.
import
std/[options],
stint,
chronicles,
chronos,
@ -34,7 +33,7 @@ type
wdAbleAccountCount*: int # Number of accounts to withdraw to (round-robin)
wdHistory*: WDHistory # Internal withdrawals history that keeps track of all withdrawals
wdAmounts*: seq[uint64] # Amounts of withdrawn wei on each withdrawal (round-robin)
txPerBlock*: Option[int] # Amount of test transactions to include in withdrawal blocks
txPerBlock*: Opt[int] # Amount of test transactions to include in withdrawal blocks
testCorrupedHashPayloads*: bool # Send a valid payload with corrupted hash
skipBaseVerifications*: bool # For code reuse of the base spec procedure
@ -90,7 +89,7 @@ func getWithdrawableAccountCount*(ws: WDBaseSpec):int =
func getGenesis*(ws: WDBaseSpec, param: NetworkParams) =
# Remove PoW altogether
param.genesis.difficulty = 0.u256
param.config.terminalTotalDifficulty = some(0.u256)
param.config.terminalTotalDifficulty = Opt.some(0.u256)
param.genesis.extraData = @[]
# Add some accounts to withdraw to with unconditional SSTOREs
@ -155,14 +154,14 @@ proc verifyContractsStorage(ws: WDBaseSpec, env: TestEnv): Result[void, string]
# Assume that forkchoice updated has been already sent
let
latestPayloadNumber = env.clMock.latestExecutedPayload.blockNumber.uint64.u256
r = env.client.storageAt(WARM_COINBASE_ADDRESS, latestPayloadNumber, latestPayloadNumber)
latestPayloadNumber = env.clMock.latestExecutedPayload.blockNumber.uint64
r = env.client.storageAt(WARM_COINBASE_ADDRESS, latestPayloadNumber.u256, latestPayloadNumber)
p = env.client.storageAt(PUSH0_ADDRESS, 0.u256, latestPayloadNumber)
if latestPayloadNumber.truncate(int) >= ws.forkHeight:
if latestPayloadNumber >= ws.forkHeight.uint64:
# Shanghai
r.expectStorageEqual(WARM_COINBASE_ADDRESS, 100.u256.w3FixedBytes) # WARM_STORAGE_READ_COST
p.expectStorageEqual(PUSH0_ADDRESS, latestPayloadNumber.w3FixedBytes) # tx succeeded
p.expectStorageEqual(PUSH0_ADDRESS, latestPayloadNumber.u256.w3FixedBytes) # tx succeeded
else:
# Pre-Shanghai
r.expectStorageEqual(WARM_COINBASE_ADDRESS, 2600.u256.w3FixedBytes) # COLD_ACCOUNT_ACCESS_COST
@ -218,11 +217,11 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
# Genesis should not contain `withdrawalsRoot` either
let r = env.client.latestHeader()
r.expectWithdrawalsRoot(none(common.Hash256))
r.expectWithdrawalsRoot(Opt.none(common.Hash256))
else:
# Genesis is post shanghai, it should contain EmptyWithdrawalsRoot
let r = env.client.latestHeader()
r.expectWithdrawalsRoot(some(EMPTY_ROOT_HASH))
r.expectWithdrawalsRoot(Opt.some(EMPTY_ROOT_HASH))
# Produce any blocks necessary to reach withdrawals fork
var pbRes = env.clMock.produceBlocks(ws.getPreWithdrawalsBlockCount, BlockProcessCallbacks(
@ -234,7 +233,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
let ok = env.sendNextTx(
env.clMock.nextBlockProducer,
BaseTx(
recipient: some(destAddr),
recipient: Opt.some(destAddr),
amount: 1.u256,
txType: ws.txType,
gasLimit: 75000.GasInt,
@ -250,11 +249,11 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
ForkchoiceStateV1(
headBlockHash: w3Hash env.clMock.latestHeader,
),
some(PayloadAttributes(
Opt.some(PayloadAttributes(
timestamp: w3Qty(env.clMock.latestHeader.timestamp, ws.getBlockTimeIncrements()),
prevRandao: w3PrevRandao(),
suggestedFeeRecipient: w3Address(),
withdrawals: some(newSeq[WithdrawalV1]()),
withdrawals: Opt.some(newSeq[WithdrawalV1]()),
))
)
let expectationDescription = "Sent pre-shanghai Forkchoice using ForkchoiceUpdatedV2 + Withdrawals, error is expected"
@ -266,11 +265,11 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
ForkchoiceStateV1(
headBlockHash: w3Hash env.clMock.latestHeader,
),
some(PayloadAttributes(
Opt.some(PayloadAttributes(
timestamp: w3Qty(env.clMock.latestHeader.timestamp, ws.getBlockTimeIncrements()),
prevRandao: w3PrevRandao(),
suggestedFeeRecipient: w3Address(),
withdrawals: none(seq[WithdrawalV1]),
withdrawals: Opt.none(seq[WithdrawalV1]),
))
)
let expectationDescription2 = "Sent pre-shanghai Forkchoice ForkchoiceUpdatedV2 + null withdrawals, no error is expected"
@ -289,7 +288,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
# `withdrawals`, it should fail.
let emptyWithdrawalsList = newSeq[Withdrawal]()
let customizer = CustomPayloadData(
withdrawals: some(emptyWithdrawalsList),
withdrawals: Opt.some(emptyWithdrawalsList),
parentBeaconRoot: ethHash env.clMock.latestPayloadAttributes.parentBeaconBlockRoot
)
let payloadPlusWithdrawals = customizer.customizePayload(env.clMock.latestExecutableData).basePayload
@ -310,7 +309,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
let r = env.client.latestHeader()
#r.ExpectationDescription = "Requested "latest" block expecting block to contain
#" withdrawalRoot=nil, because (block %d).timestamp < shanghaiTime
r.expectWithdrawalsRoot(none(common.Hash256))
r.expectWithdrawalsRoot(Opt.none(common.Hash256))
return true
,
onForkchoiceBroadcast: proc(): bool =
@ -338,11 +337,11 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
ForkchoiceStateV1(
headBlockHash: w3Hash env.clMock.latestHeader,
),
some(PayloadAttributes(
Opt.some(PayloadAttributes(
timestamp: w3Qty(env.clMock.latestHeader.timestamp, ws.getBlockTimeIncrements()),
prevRandao: w3PrevRandao(),
suggestedFeeRecipient: w3Address(),
withdrawals: none(seq[WithdrawalV1]),
withdrawals: Opt.none(seq[WithdrawalV1]),
))
)
let expectationDescription = "Sent shanghai fcu using PayloadAttributesV1, error is expected"
@ -350,7 +349,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
# Send some withdrawals
let wfb = ws.generateWithdrawalsForBlock(nextIndex, startAccount)
env.clMock.nextWithdrawals = some(w3Withdrawals wfb.wds)
env.clMock.nextWithdrawals = Opt.some(w3Withdrawals wfb.wds)
ws.wdHistory.put(env.clMock.currentPayloadNumber, wfb.wds)
# Send some transactions
@ -361,7 +360,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
let ok = env.sendNextTx(
env.clMock.nextBlockProducer,
BaseTx(
recipient: some(destAddr),
recipient: Opt.some(destAddr),
amount: 1.u256,
txType: ws.txType,
gasLimit: 75000.GasInt,
@ -468,7 +467,7 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
r.expectBalanceEqual(expectedAccountBalance)
let wds = ws.wdHistory.getWithdrawals(env.clMock.latestExecutedPayload.blockNumber.uint64)
let expectedWithdrawalsRoot = some(calcWithdrawalsRoot(wds.list))
let expectedWithdrawalsRoot = Opt.some(calcWithdrawalsRoot(wds.list))
# Check the correct withdrawal root on `latest` block
let r = env.client.latestHeader()
@ -492,16 +491,16 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
if not ws.skipBaseVerifications:
let maxBlock = env.clMock.latestExecutedPayload.blockNumber.uint64
for bn in 0..maxBlock:
let res = ws.wdHistory.verifyWithdrawals(bn, some(bn.u256), env.client)
let res = ws.wdHistory.verifyWithdrawals(bn, Opt.some(bn), env.client)
testCond res.isOk:
error "verify wd error", msg=res.error
# Check the correct withdrawal root on past blocks
let r = env.client.headerByNumber(bn)
var expectedWithdrawalsRoot: Option[common.Hash256]
var expectedWithdrawalsRoot: Opt[common.Hash256]
if bn >= ws.forkHeight.uint64:
let wds = ws.wdHistory.getWithdrawals(bn)
expectedWithdrawalsRoot = some(calcWithdrawalsRoot(wds.list))
expectedWithdrawalsRoot = Opt.some(calcWithdrawalsRoot(wds.list))
#r.ExpectationDescription = fmt.Sprintf(`
# Requested block %d to verify withdrawalsRoot with the
@ -511,6 +510,6 @@ proc execute*(ws: WDBaseSpec, env: TestEnv): bool =
# Verify on `latest`
let bnu = env.clMock.latestExecutedPayload.blockNumber.uint64
let res = ws.wdHistory.verifyWithdrawals(bnu, none(UInt256), env.client)
let res = ws.wdHistory.verifyWithdrawals(bnu, Opt.none(uint64), env.client)
testCond res.isOk:
error "verify wd error", msg=res.error

View File

@ -41,7 +41,7 @@ proc execute*(ws: BlockValueSpec, env: TestEnv): bool =
let
rec = r.get
txTip = tx.effectiveGasTip(blk.header.baseFee)
txTip = tx.effectiveGasTip(blk.header.baseFeePerGas)
totalValue += txTip.uint64.u256 * rec.gasUsed.u256

View File

@ -10,9 +10,10 @@
import
std/[tables, sets, strutils],
eth/common/eth_types,
eth/common/eth_types as common,
json_rpc/[rpcclient],
stew/[byteutils, results],
stew/byteutils,
results,
../engine_client,
../../../../nimbus/utils/utils,
../../../../nimbus/beacon/web3_eth_conv
@ -78,7 +79,9 @@ func getWithdrawnAccounts*(wh: WDHistory, blockHeight: uint64): Table[EthAddress
result[wd.address] = wd.weiAmount
# Verify all withdrawals on a client at a given height
proc verifyWithdrawals*(wh: WDHistory, blockNumber: uint64, rpcBlock: Option[UInt256], client: RpcClient): Result[void, string] =
proc verifyWithdrawals*(wh: WDHistory, blockNumber: uint64,
rpcBlock: Opt[common.BlockNumber],
client: RpcClient): Result[void, string] =
let accounts = wh.getWithdrawnAccounts(blockNumber)
for account, expectedBalance in accounts:
let res = if rpcBlock.isSome:

View File

@ -112,7 +112,7 @@ proc execute*(ws: MaxInitcodeSizeSpec, env: TestEnv): bool =
# Customize the payload to include a tx with an invalid initcode
let customizer = CustomPayloadData(
parentBeaconRoot: ethHash env.clMock.latestPayloadAttributes.parentBeaconBlockRoot,
transactions: some( @[invalidTx.tx] ),
transactions: Opt.some( @[invalidTx.tx] ),
)
let customPayload = customizer.customizePayload(env.clMock.latestExecutableData).basePayload

View File

@ -41,7 +41,7 @@ type
sidechain : Table[uint64, ExecutionPayload]
payloadId : PayloadID
height : uint64
attr : Option[PayloadAttributes]
attr : Opt[PayloadAttributes]
Canonical = ref object
startAccount: UInt256
@ -100,12 +100,12 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
let numBlocks = ws.getPreWithdrawalsBlockCount()+ws.wdBlockCount
let pbRes = env.clMock.produceBlocks(numBlocks, BlockProcessCallbacks(
onPayloadProducerSelected: proc(): bool =
env.clMock.nextWithdrawals = none(seq[WithdrawalV1])
env.clMock.nextWithdrawals = Opt.none(seq[WithdrawalV1])
if env.clMock.currentPayloadNumber >= ws.forkHeight.uint64:
# Prepare some withdrawals
let wfb = ws.generateWithdrawalsForBlock(canonical.nextIndex, canonical.startAccount)
env.clMock.nextWithdrawals = some(w3Withdrawals wfb.wds)
env.clMock.nextWithdrawals = Opt.some(w3Withdrawals wfb.wds)
canonical.nextIndex = wfb.nextIndex
ws.wdHistory.put(env.clMock.currentPayloadNumber, wfb.wds)
@ -128,7 +128,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
# Send transactions to be included in the payload
let txs = env.makeTxs(
BaseTx(
recipient: some(prevRandaoContractAddr),
recipient: Opt.some(prevRandaoContractAddr),
amount: 1.u256,
txType: ws.txType,
gasLimit: 75000.GasInt,
@ -170,12 +170,12 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
testCond rr.isOk:
error "sidechain wd", msg=rr.error
attr.withdrawals = some(w3Withdrawals rr.get)
attr.withdrawals = Opt.some(w3Withdrawals rr.get)
info "Requesting sidechain payload",
number=env.clMock.currentPayloadNumber
sidechain.attr = some(attr)
sidechain.attr = Opt.some(attr)
let r = sec.client.forkchoiceUpdated(fcState, attr)
r.expectNoError()
r.expectPayloadStatus(PayloadExecutionStatus.valid)
@ -237,13 +237,13 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
timestamp: w3Qty(sidechain.sidechain[sidechain.height].timestamp, ws.getSidechainBlockTimeIncrements()),
prevRandao: env.clMock.latestPayloadAttributes.prevRandao,
suggestedFeeRecipient: env.clMock.latestPayloadAttributes.suggestedFeeRecipient,
withdrawals: some(w3Withdrawals wds),
withdrawals: Opt.some(w3Withdrawals wds),
)
fcState = ForkchoiceStateV1(
headBlockHash: sidechain.sidechain[sidechain.height].blockHash,
)
let r = sec.client.forkchoiceUpdatedV2(fcState, some(attr))
let r = sec.client.forkchoiceUpdatedV2(fcState, Opt.some(attr))
r.expectPayloadStatus(PayloadExecutionStatus.valid)
let p = sec.client.getPayloadV2(r.get().payloadID.get)
@ -262,7 +262,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
sidechain.sidechain[sidechain.height] = executionPayload(z.executionPayload)
# Check the withdrawals on the latest
let res = ws.wdHistory.verifyWithdrawals(sidechain.height, none(UInt256), env.client)
let res = ws.wdHistory.verifyWithdrawals(sidechain.height, Opt.none(uint64), env.client)
testCond res.isOk
if ws.reOrgViaSync:
@ -322,7 +322,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
# Verify withdrawals changed
let r2 = sidechain.wdHistory.verifyWithdrawals(sidechain.height, none(UInt256), env.client)
let r2 = sidechain.wdHistory.verifyWithdrawals(sidechain.height, Opt.none(uint64), env.client)
testCond r2.isOk
# Verify all balances of accounts in the original chain didn't increase
@ -330,7 +330,7 @@ proc execute*(ws: ReorgSpec, env: TestEnv): bool =
# We are using different accounts credited between the canonical chain
# and the fork.
# We check on `latest`.
let r3 = ws.wdHistory.verifyWithdrawals(uint64(ws.forkHeight-1), none(UInt256), env.client)
let r3 = ws.wdHistory.verifyWithdrawals(uint64(ws.forkHeight-1), Opt.none(uint64), env.client)
testCond r3.isOk
# Re-Org back to the canonical chain

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -72,8 +72,8 @@ proc execute*(ws: SyncSpec, env: TestEnv): bool =
if not ok:
return false
let bn = env.clMock.latestHeader.blockNumber.truncate(uint64)
let res = ws.wdHistory.verifyWithdrawals(bn, none(UInt256), sec.client)
let bn = env.clMock.latestHeader.number
let res = ws.wdHistory.verifyWithdrawals(bn, Opt.none(uint64), sec.client)
if res.isErr:
error "wd history error", msg=res.error
return false

View File

@ -8,8 +8,8 @@
# those terms.
import
std/[os, json, strutils, times, typetraits, options],
stew/[byteutils, results],
std/[os, json, strutils, times, typetraits],
stew/byteutils,
eth/common,
json_rpc/rpcclient,
web3/execution_types,
@ -37,7 +37,7 @@ type
Payload = object
badBlock: bool
payload: ExecutionPayload
beaconRoot: Option[common.Hash256]
beaconRoot: Opt[common.Hash256]
proc getPayload(node: JsonNode): Payload =
try:

View File

@ -67,7 +67,7 @@ proc txReceipt*(client: RpcClient, txHash: common.Hash256): Future[Option[Receip
status : rc.status.isSome,
hash : ethHash rc.root.get(w3Hash()),
cumulativeGasUsed: rc.cumulativeGasUsed.GasInt,
bloom : BloomFilter(rc.logsBloom),
logsBloom : BloomFilter(rc.logsBloom),
logs : toLogs(rc.logs)
)
result = some(rec)

View File

@ -88,7 +88,7 @@ proc makeFundingTx*(
nonce : v.nextNonce(),
gasPrice: v.gasPrice,
gasLimit: GasInt(75000),
to : some(predeployedVaultAddr),
to : Opt.some(predeployedVaultAddr),
value : 0.u256,
payload : sendSome(recipient, amount)
)
@ -111,7 +111,7 @@ proc signTx*(v: Vault,
nonce : nonce,
gasPrice: gasPrice,
gasLimit: gasLimit,
to : some(recipient),
to : Opt.some(recipient),
value : amount,
payload : payload
)

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@ -40,7 +40,7 @@ proc exchangeConf*(ben: BeaconEngineRef,
$ttd.get, $conf.terminalTotalDifficulty])
let
terminalBlockNumber = u256 conf.terminalBlockNumber
terminalBlockNumber = common.BlockNumber conf.terminalBlockNumber
terminalBlockHash = ethHash conf.terminalBlockHash
if terminalBlockHash != common.Hash256():
@ -62,10 +62,10 @@ proc exchangeConf*(ben: BeaconEngineRef,
return TransitionConfigurationV1(
terminalTotalDifficulty: ttd.get,
terminalBlockHash : w3Hash headerHash,
terminalBlockNumber : w3Qty header.blockNumber
terminalBlockNumber : w3Qty header.number
)
if terminalBlockNumber.isZero.not:
if terminalBlockNumber != 0'u64:
raise newException(ValueError, "invalid terminal block number: $1" % [
$terminalBlockNumber])

View File

@ -72,7 +72,7 @@ template validateHeaderTimestamp(header, com, apiVersion) =
proc forkchoiceUpdated*(ben: BeaconEngineRef,
apiVersion: Version,
update: ForkchoiceStateV1,
attrsOpt: Option[PayloadAttributes]):
attrsOpt: Opt[PayloadAttributes]):
ForkchoiceUpdatedResponse =
let
com = ben.com
@ -111,7 +111,7 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
# TODO: cancel downloader
info "Forkchoice requested sync to new head",
number = header.blockNumber,
number = header.number,
hash = blockHash.short
# Update sync header (if any)
@ -126,11 +126,11 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
# Disable terminal PoW block conditions validation for fCUV2 and later.
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/shanghai.md#specification-1
if apiVersion == Version.V1:
let blockNumber = header.blockNumber.truncate(uint64)
let blockNumber = header.number
if header.difficulty > 0.u256 or blockNumber == 0'u64:
var
td, ptd: DifficultyInt
ttd = com.ttd.get(high(common.BlockNumber))
ttd = com.ttd.get(high(UInt256))
if not db.getTd(blockHash, td) or (blockNumber > 0'u64 and not db.getTd(header.parentHash, ptd)):
error "TDs unavailable for TTD check",
@ -156,11 +156,11 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
# See point 2 of fCUV1 specification
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.4/src/engine/paris.md#specification-1
var canonHash: common.Hash256
if db.getBlockHash(header.blockNumber, canonHash) and canonHash == blockHash:
if db.getBlockHash(header.number, canonHash) and canonHash == blockHash:
notice "Ignoring beacon update to old head",
blockHash=blockHash.short,
blockNumber=header.blockNumber
return validFCU(none(PayloadID), blockHash)
blockNumber=header.number
return validFCU(Opt.none(PayloadID), blockHash)
chain.setCanonical(header).isOkOr:
return invalidFCU(error, com, header)
@ -179,14 +179,14 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
hash=finalizedBlockHash.short
raise invalidForkChoiceState("finalized block header not available")
var finalHash: common.Hash256
if not db.getBlockHash(finalBlock.blockNumber, finalHash):
if not db.getBlockHash(finalBlock.number, finalHash):
warn "Final block not in canonical chain",
number=finalBlock.blockNumber,
number=finalBlock.number,
hash=finalizedBlockHash.short
raise invalidForkChoiceState("finalized block hash not available")
if finalHash != finalizedBlockHash:
warn "Final block not in canonical chain",
number=finalBlock.blockNumber,
number=finalBlock.number,
expect=finalizedBlockHash.short,
get=finalHash.short
raise invalidForkChoiceState("finalized block not canonical")
@ -200,13 +200,13 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
hash = safeBlockHash.short
raise invalidForkChoiceState("safe head not available")
var safeHash: common.Hash256
if not db.getBlockHash(safeBlock.blockNumber, safeHash):
if not db.getBlockHash(safeBlock.number, safeHash):
warn "Safe block hash not available in database",
hash = safeHash.short
raise invalidForkChoiceState("safe block hash not available")
if safeHash != safeBlockHash:
warn "Safe block not in canonical chain",
blockNumber=safeBlock.blockNumber,
blockNumber=safeBlock.number,
expect=safeBlockHash.short,
get=safeHash.short
raise invalidForkChoiceState("safe head not canonical")
@ -231,6 +231,6 @@ proc forkchoiceUpdated*(ben: BeaconEngineRef,
hash = bundle.executionPayload.blockHash.short,
number = bundle.executionPayload.blockNumber
return validFCU(some(id), blockHash)
return validFCU(Opt.some(id), blockHash)
return validFCU(none(PayloadID), blockHash)
return validFCU(Opt.none(PayloadID), blockHash)

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@ -23,11 +23,11 @@ const
proc getPayloadBodyByHeader(db: CoreDbRef,
header: common.BlockHeader,
output: var seq[Option[ExecutionPayloadBodyV1]]) =
output: var seq[Opt[ExecutionPayloadBodyV1]]) =
var body: common.BlockBody
if not db.getBlockBody(header, body):
output.add none(ExecutionPayloadBodyV1)
output.add Opt.none(ExecutionPayloadBodyV1)
return
let txs = w3Txs body.transactions
@ -37,20 +37,20 @@ proc getPayloadBodyByHeader(db: CoreDbRef,
wds.add w3Withdrawal(w)
output.add(
some(ExecutionPayloadBodyV1(
Opt.some(ExecutionPayloadBodyV1(
transactions: txs,
# pre Shanghai block return null withdrawals
# post Shanghai block return at least empty slice
withdrawals: if header.withdrawalsRoot.isSome:
some(wds)
Opt.some(wds)
else:
none(seq[WithdrawalV1])
Opt.none(seq[WithdrawalV1])
))
)
proc getPayloadBodiesByHash*(ben: BeaconEngineRef,
hashes: seq[Web3Hash]):
seq[Option[ExecutionPayloadBodyV1]] =
seq[Opt[ExecutionPayloadBodyV1]] =
if hashes.len > maxBodyRequest:
raise tooLargeRequest("request exceeds max allowed " & $maxBodyRequest)
@ -58,13 +58,13 @@ proc getPayloadBodiesByHash*(ben: BeaconEngineRef,
var header: common.BlockHeader
for h in hashes:
if not db.getBlockHeader(ethHash h, header):
result.add none(ExecutionPayloadBodyV1)
result.add Opt.none(ExecutionPayloadBodyV1)
continue
db.getPayloadBodyByHeader(header, result)
proc getPayloadBodiesByRange*(ben: BeaconEngineRef,
start: uint64, count: uint64):
seq[Option[ExecutionPayloadBodyV1]] =
seq[Opt[ExecutionPayloadBodyV1]] =
if start == 0:
raise invalidParams("start block should greater than zero")
@ -77,7 +77,7 @@ proc getPayloadBodiesByRange*(ben: BeaconEngineRef,
let
com = ben.com
db = com.db
current = com.syncCurrent.truncate(uint64)
current = com.syncCurrent
var
header: common.BlockHeader
@ -87,7 +87,7 @@ proc getPayloadBodiesByRange*(ben: BeaconEngineRef,
last = current
for bn in start..last:
if not db.getBlockHeader(bn.toBlockNumber, header):
result.add none(ExecutionPayloadBodyV1)
if not db.getBlockHeader(bn, header):
result.add Opt.none(ExecutionPayloadBodyV1)
continue
db.getPayloadBodyByHeader(header, result)

View File

@ -26,7 +26,7 @@ proc getPayload*(ben: BeaconEngineRef,
var payloadGeneric: ExecutionPayload
var blockValue: UInt256
var blobsBundle: Option[BlobsBundleV1]
var blobsBundle: Opt[BlobsBundleV1]
if not ben.get(id, blockValue, payloadGeneric, blobsBundle):
raise unknownPayload("Unknown payload")
@ -50,7 +50,7 @@ proc getPayloadV3*(ben: BeaconEngineRef, id: PayloadID): GetPayloadV3Response =
var payloadGeneric: ExecutionPayload
var blockValue: UInt256
var blobsBundle: Option[BlobsBundleV1]
var blobsBundle: Opt[BlobsBundleV1]
if not ben.get(id, blockValue, payloadGeneric, blobsBundle):
raise unknownPayload("Unknown payload")
@ -78,7 +78,7 @@ proc getPayloadV4*(ben: BeaconEngineRef, id: PayloadID): GetPayloadV4Response =
var payloadGeneric: ExecutionPayload
var blockValue: UInt256
var blobsBundle: Option[BlobsBundleV1]
var blobsBundle: Opt[BlobsBundleV1]
if not ben.get(id, blockValue, payloadGeneric, blobsBundle):
raise unknownPayload("Unknown payload")

View File

@ -94,8 +94,8 @@ template validatePayload(apiVersion, version, payload) =
proc newPayload*(ben: BeaconEngineRef,
apiVersion: Version,
payload: ExecutionPayload,
versionedHashes = none(seq[Web3Hash]),
beaconRoot = none(Web3Hash)): PayloadStatusV1 =
versionedHashes = Opt.none(seq[Web3Hash]),
beaconRoot = Opt.none(Web3Hash)): PayloadStatusV1 =
trace "Engine API request received",
meth = "newPayload",
@ -133,7 +133,7 @@ proc newPayload*(ben: BeaconEngineRef,
# return a fake success.
if db.getBlockHeader(blockHash, header):
warn "Ignoring already known beacon payload",
number = header.blockNumber, hash = blockHash.short
number = header.number, hash = blockHash.short
return validStatus(blockHash)
# If this block was rejected previously, keep rejecting it
@ -153,19 +153,19 @@ proc newPayload*(ben: BeaconEngineRef,
# We have an existing parent, do some sanity checks to avoid the beacon client
# triggering too early
let ttd = com.ttd.get(high(common.BlockNumber))
let ttd = com.ttd.get(high(UInt256))
if version == Version.V1:
let td = db.getScore(header.parentHash).valueOr:
0.u256
if (not com.forkGTE(MergeFork)) and td < ttd:
warn "Ignoring pre-merge payload",
number = header.blockNumber, hash = blockHash, td, ttd
number = header.number, hash = blockHash, td, ttd
return invalidStatus()
if header.timestamp <= parent.timestamp:
warn "Invalid timestamp",
number = header.blockNumber, parentNumber = parent.blockNumber,
number = header.number, parentNumber = parent.number,
parent = parent.timestamp, header = header.timestamp
return invalidStatus(parent.blockHash, "Invalid timestamp")
@ -181,12 +181,12 @@ proc newPayload*(ben: BeaconEngineRef,
ben.put(blockHash, header)
warn "State not available, ignoring new payload",
hash = blockHash,
number = header.blockNumber
number = header.number
let blockHash = latestValidHash(db, parent, ttd)
return acceptedStatus(blockHash)
trace "Inserting block without sethead",
hash = blockHash, number = header.blockNumber
hash = blockHash, number = header.number
let vres = ben.chain.insertBlockWithoutSetHead(blk)
if vres.isErr:
ben.setInvalidAncestor(header, blockHash)

View File

@ -60,15 +60,15 @@ proc validateBlockHash*(header: common.BlockHeader,
let res = PayloadStatusV1(
status: status,
validationError: some("blockhash mismatch, want $1, got $2" % [
validationError: Opt.some("blockhash mismatch, want $1, got $2" % [
$wantHash, $gotHash])
)
return err(res)
return ok()
template toValidHash*(x: common.Hash256): Option[Web3Hash] =
some(w3Hash x)
template toValidHash*(x: common.Hash256): Opt[Web3Hash] =
Opt.some(w3Hash x)
proc simpleFCU*(status: PayloadStatusV1): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(payloadStatus: status)
@ -81,7 +81,7 @@ proc simpleFCU*(status: PayloadExecutionStatus,
ForkchoiceUpdatedResponse(
payloadStatus: PayloadStatusV1(
status: status,
validationError: some(msg)
validationError: Opt.some(msg)
)
)
@ -92,11 +92,11 @@ proc invalidFCU*(
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(hash),
validationError: some validationError
validationError: Opt.some validationError
)
)
proc validFCU*(id: Option[PayloadID],
proc validFCU*(id: Opt[PayloadID],
validHash: common.Hash256): ForkchoiceUpdatedResponse =
ForkchoiceUpdatedResponse(
payloadStatus: PayloadStatusV1(
@ -110,7 +110,7 @@ proc invalidStatus*(validHash: common.Hash256, msg: string): PayloadStatusV1 =
PayloadStatusV1(
status: PayloadExecutionStatus.invalid,
latestValidHash: toValidHash(validHash),
validationError: some(msg)
validationError: Opt.some(msg)
)
proc invalidStatus*(validHash = common.Hash256()): PayloadStatusV1 =
@ -193,7 +193,7 @@ proc invalidFCU*(validationError: string,
return invalidFCU(validationError)
let blockHash = try:
latestValidHash(com.db, parent, com.ttd.get(high(common.BlockNumber)))
latestValidHash(com.db, parent, com.ttd.get(high(UInt256)))
except RlpError:
default(common.Hash256)

View File

@ -132,12 +132,12 @@ proc put*(ben: BeaconEngineRef,
proc put*(ben: BeaconEngineRef, id: PayloadID,
blockValue: UInt256, payload: ExecutionPayload,
blobsBundle: Option[BlobsBundleV1]) =
blobsBundle: Opt[BlobsBundleV1]) =
ben.queue.put(id, blockValue, payload, blobsBundle)
proc put*(ben: BeaconEngineRef, id: PayloadID,
blockValue: UInt256, payload: SomeExecutionPayload,
blobsBundle: Option[BlobsBundleV1]) =
blobsBundle: Opt[BlobsBundleV1]) =
doAssert blobsBundle.isNone == (payload is
ExecutionPayloadV1 | ExecutionPayloadV2)
ben.queue.put(id, blockValue, payload, blobsBundle)
@ -146,7 +146,7 @@ proc put*(ben: BeaconEngineRef, id: PayloadID,
blockValue: UInt256,
payload: ExecutionPayloadV1 | ExecutionPayloadV2) =
ben.queue.put(
id, blockValue, payload, blobsBundle = options.none(BlobsBundleV1))
id, blockValue, payload, blobsBundle = Opt.none(BlobsBundleV1))
# ------------------------------------------------------------------------------
# Public functions, getters
@ -177,7 +177,7 @@ proc get*(ben: BeaconEngineRef, hash: common.Hash256,
proc get*(ben: BeaconEngineRef, id: PayloadID,
blockValue: var UInt256,
payload: var ExecutionPayload,
blobsBundle: var Option[BlobsBundleV1]): bool =
blobsBundle: var Opt[BlobsBundleV1]): bool =
ben.queue.get(id, blockValue, payload, blobsBundle)
proc get*(ben: BeaconEngineRef, id: PayloadID,
@ -207,7 +207,7 @@ proc get*(ben: BeaconEngineRef, id: PayloadID,
type ExecutionPayloadAndBlobsBundle* = object
executionPayload*: ExecutionPayload
blobsBundle*: Option[BlobsBundleV1]
blobsBundle*: Opt[BlobsBundleV1]
proc generatePayload*(ben: BeaconEngineRef,
attrs: PayloadAttributes):
@ -243,10 +243,10 @@ proc generatePayload*(ben: BeaconEngineRef,
if bundle.blk.header.extraData.len > 32:
return err "extraData length should not exceed 32 bytes"
var blobsBundle: Option[BlobsBundleV1]
var blobsBundle: Opt[BlobsBundleV1]
if bundle.blobsBundle.isSome:
template blobData: untyped = bundle.blobsBundle.get
blobsBundle = options.some BlobsBundleV1(
blobsBundle = Opt.some BlobsBundleV1(
commitments: blobData.commitments.mapIt it.Web3KZGCommitment,
proofs: blobData.proofs.mapIt it.Web3KZGProof,
blobs: blobData.blobs.mapIt it.Web3Blob)
@ -272,7 +272,7 @@ proc checkInvalidAncestor*(ben: BeaconEngineRef,
inc ben.invalidBlocksHits.mgetOrPut(badHash, 0)
if ben.invalidBlocksHits.getOrDefault(badHash) >= invalidBlockHitEviction:
warn "Too many bad block import attempt, trying",
number=invalid.blockNumber, hash=badHash.short
number=invalid.number, hash=badHash.short
ben.invalidBlocksHits.del(badHash)
@ -289,7 +289,7 @@ proc checkInvalidAncestor*(ben: BeaconEngineRef,
# Not too many failures yet, mark the head of the invalid chain as invalid
if check != head:
warn "Marked new chain head as invalid",
hash=head, badnumber=invalid.blockNumber, badhash=badHash
hash=head, badnumber=invalid.number, badhash=badHash
if ben.invalidTipsets.len >= invalidTipsetsCap:
let size = invalidTipsetsCap - ben.invalidTipsets.len

View File

@ -24,11 +24,11 @@ func wdRoot(list: openArray[WithdrawalV1]): common.Hash256
{.noSideEffect.}:
calcWithdrawalsRoot(ethWithdrawals list)
func wdRoot(x: Option[seq[WithdrawalV1]]): Option[common.Hash256]
func wdRoot(x: Opt[seq[WithdrawalV1]]): Opt[common.Hash256]
{.gcsafe, raises:[].} =
{.noSideEffect.}:
if x.isNone: none(common.Hash256)
else: some(wdRoot x.get)
if x.isNone: Opt.none(common.Hash256)
else: Opt.some(wdRoot x.get)
func txRoot(list: openArray[Web3Tx]): common.Hash256
{.gcsafe, raises:[RlpError].} =
@ -46,15 +46,15 @@ func executionPayload*(blk: EthBlock): ExecutionPayload =
parentHash : w3Hash blk.header.parentHash,
feeRecipient : w3Addr blk.header.coinbase,
stateRoot : w3Hash blk.header.stateRoot,
receiptsRoot : w3Hash blk.header.receiptRoot,
logsBloom : w3Bloom blk.header.bloom,
receiptsRoot : w3Hash blk.header.receiptsRoot,
logsBloom : w3Bloom blk.header.logsBloom,
prevRandao : w3PrevRandao blk.header.prevRandao,
blockNumber : w3Qty blk.header.blockNumber,
blockNumber : w3Qty blk.header.number,
gasLimit : w3Qty blk.header.gasLimit,
gasUsed : w3Qty blk.header.gasUsed,
timestamp : w3Qty blk.header.timestamp,
extraData : w3ExtraData blk.header.extraData,
baseFeePerGas: blk.header.fee.get(0.u256),
baseFeePerGas: blk.header.baseFeePerGas.get(0.u256),
blockHash : w3Hash blk.header,
transactions : w3Txs blk.txs,
withdrawals : w3Withdrawals blk.withdrawals,
@ -67,22 +67,22 @@ func executionPayloadV1V2*(blk: EthBlock): ExecutionPayloadV1OrV2 =
parentHash : w3Hash blk.header.parentHash,
feeRecipient : w3Addr blk.header.coinbase,
stateRoot : w3Hash blk.header.stateRoot,
receiptsRoot : w3Hash blk.header.receiptRoot,
logsBloom : w3Bloom blk.header.bloom,
receiptsRoot : w3Hash blk.header.receiptsRoot,
logsBloom : w3Bloom blk.header.logsBloom,
prevRandao : w3PrevRandao blk.header.prevRandao,
blockNumber : w3Qty blk.header.blockNumber,
blockNumber : w3Qty blk.header.number,
gasLimit : w3Qty blk.header.gasLimit,
gasUsed : w3Qty blk.header.gasUsed,
timestamp : w3Qty blk.header.timestamp,
extraData : w3ExtraData blk.header.extraData,
baseFeePerGas: blk.header.fee.get(0.u256),
baseFeePerGas: blk.header.baseFeePerGas.get(0.u256),
blockHash : w3Hash blk.header,
transactions : w3Txs blk.txs,
withdrawals : w3Withdrawals blk.withdrawals,
)
func blockHeader*(p: ExecutionPayload,
beaconRoot: Option[common.Hash256]):
beaconRoot: Opt[common.Hash256]):
common.BlockHeader {.gcsafe, raises:[RlpError].} =
common.BlockHeader(
parentHash : ethHash p.parentHash,
@ -90,17 +90,17 @@ func blockHeader*(p: ExecutionPayload,
coinbase : ethAddr p.feeRecipient,
stateRoot : ethHash p.stateRoot,
txRoot : txRoot p.transactions,
receiptRoot : ethHash p.receiptsRoot,
bloom : ethBloom p.logsBloom,
receiptsRoot : ethHash p.receiptsRoot,
logsBloom : ethBloom p.logsBloom,
difficulty : 0.u256,
blockNumber : u256 p.blockNumber,
number : common.BlockNumber(p.blockNumber),
gasLimit : ethGasInt p.gasLimit,
gasUsed : ethGasInt p.gasUsed,
timestamp : ethTime p.timestamp,
extraData : ethBlob p.extraData,
mixDigest : ethHash p.prevRandao,
mixHash : ethHash p.prevRandao,
nonce : default(BlockNonce),
fee : some(p.baseFeePerGas),
baseFeePerGas : Opt.some(p.baseFeePerGas),
withdrawalsRoot: wdRoot p.withdrawals,
blobGasUsed : u64(p.blobGasUsed),
excessBlobGas : u64(p.excessBlobGas),
@ -116,7 +116,7 @@ func blockBody*(p: ExecutionPayload):
)
func ethBlock*(p: ExecutionPayload,
beaconRoot: Option[common.Hash256]):
beaconRoot: Opt[common.Hash256]):
common.EthBlock {.gcsafe, raises:[RlpError].} =
common.EthBlock(
header : blockHeader(p, beaconRoot),

View File

@ -35,7 +35,7 @@ type
id: PayloadID
payload: ExecutionPayload
blockValue: UInt256
blobsBundle: Option[BlobsBundleV1]
blobsBundle: Opt[BlobsBundleV1]
HeaderItem = object
hash: common.Hash256
@ -73,13 +73,13 @@ proc put*(api: var PayloadQueue,
proc put*(api: var PayloadQueue, id: PayloadID,
blockValue: UInt256, payload: ExecutionPayload,
blobsBundle: Option[BlobsBundleV1]) =
blobsBundle: Opt[BlobsBundleV1]) =
api.payloadQueue.put(PayloadItem(id: id,
payload: payload, blockValue: blockValue, blobsBundle: blobsBundle))
proc put*(api: var PayloadQueue, id: PayloadID,
blockValue: UInt256, payload: SomeExecutionPayload,
blobsBundle: Option[BlobsBundleV1]) =
blobsBundle: Opt[BlobsBundleV1]) =
doAssert blobsBundle.isNone == (payload is
ExecutionPayloadV1 | ExecutionPayloadV2)
api.put(id, blockValue, payload.executionPayload, blobsBundle = blobsBundle)
@ -87,7 +87,7 @@ proc put*(api: var PayloadQueue, id: PayloadID,
proc put*(api: var PayloadQueue, id: PayloadID,
blockValue: UInt256,
payload: ExecutionPayloadV1 | ExecutionPayloadV2) =
api.put(id, blockValue, payload, blobsBundle = options.none(BlobsBundleV1))
api.put(id, blockValue, payload, blobsBundle = Opt.none(BlobsBundleV1))
# ------------------------------------------------------------------------------
# Public functions, getters
@ -104,7 +104,7 @@ proc get*(api: PayloadQueue, hash: common.Hash256,
proc get*(api: PayloadQueue, id: PayloadID,
blockValue: var UInt256,
payload: var ExecutionPayload,
blobsBundle: var Option[BlobsBundleV1]): bool =
blobsBundle: var Opt[BlobsBundleV1]): bool =
for x in api.payloadQueue:
if x.id == id:
payload = x.payload
@ -118,7 +118,7 @@ proc get*(api: PayloadQueue, id: PayloadID,
payload: var ExecutionPayloadV1): bool =
var
p: ExecutionPayload
blobsBundleOpt: Option[BlobsBundleV1]
blobsBundleOpt: Opt[BlobsBundleV1]
let found = api.get(id, blockValue, p, blobsBundleOpt)
if found:
doAssert(p.version == Version.V1)
@ -131,7 +131,7 @@ proc get*(api: PayloadQueue, id: PayloadID,
payload: var ExecutionPayloadV2): bool =
var
p: ExecutionPayload
blobsBundleOpt: Option[BlobsBundleV1]
blobsBundleOpt: Opt[BlobsBundleV1]
let found = api.get(id, blockValue, p, blobsBundleOpt)
if found:
doAssert(p.version == Version.V2)
@ -145,7 +145,7 @@ proc get*(api: PayloadQueue, id: PayloadID,
blobsBundle: var BlobsBundleV1): bool =
var
p: ExecutionPayload
blobsBundleOpt: Option[BlobsBundleV1]
blobsBundleOpt: Opt[BlobsBundleV1]
let found = api.get(id, blockValue, p, blobsBundleOpt)
if found:
doAssert(p.version == Version.V3)
@ -159,7 +159,7 @@ proc get*(api: PayloadQueue, id: PayloadID,
payload: var ExecutionPayloadV1OrV2): bool =
var
p: ExecutionPayload
blobsBundleOpt: Option[BlobsBundleV1]
blobsBundleOpt: Opt[BlobsBundleV1]
let found = api.get(id, blockValue, p, blobsBundleOpt)
if found:
doAssert(p.version in {Version.V1, Version.V2})

View File

@ -8,7 +8,7 @@
# those terms.
import
std/[options, typetraits],
std/[typetraits],
web3/primitives as web3types,
web3/eth_api_types,
web3/engine_api_types,
@ -41,15 +41,15 @@ type
# Pretty printers
# ------------------------------------------------------------------------------
proc `$`*(x: Option[common.Hash256]): string =
proc `$`*(x: Opt[common.Hash256]): string =
if x.isNone: "none"
else: x.get().data.toHex
proc `$`*(x: Option[Web3Hash]): string =
proc `$`*(x: Opt[Web3Hash]): string =
if x.isNone: "none"
else: x.get().toHex
proc `$`*(x: Option[PayloadID]): string =
proc `$`*(x: Opt[PayloadID]): string =
if x.isNone: "none"
else: x.get().toHex
@ -80,9 +80,9 @@ func w3Hash*(): Web3Hash =
template unsafeQuantityToInt64*(q: Web3Quantity): int64 =
int64 q
func u64*(x: Option[Web3Quantity]): Option[uint64] =
if x.isNone: none(uint64)
else: some(uint64 x.get)
func u64*(x: Opt[Web3Quantity]): Opt[uint64] =
if x.isNone: Opt.none(uint64)
else: Opt.some(uint64 x.get)
func u256*(x: Web3Quantity): UInt256 =
u256(x.uint64)
@ -99,24 +99,24 @@ func ethTime*(x: Web3Quantity): common.EthTime =
func ethHash*(x: Web3PrevRandao): common.Hash256 =
common.Hash256(data: distinctBase x)
func ethHash*(x: Option[Web3Hash]): Option[common.Hash256] =
if x.isNone: none(common.Hash256)
else: some(ethHash x.get)
func ethHash*(x: Opt[Web3Hash]): Opt[common.Hash256] =
if x.isNone: Opt.none(common.Hash256)
else: Opt.some(ethHash x.get)
func ethHashes*(list: openArray[Web3Hash]): seq[common.Hash256] =
for x in list:
result.add ethHash(x)
func ethHashes*(list: Option[seq[Web3Hash]]): Option[seq[common.Hash256]] =
if list.isNone: none(seq[common.Hash256])
else: some ethHashes(list.get)
func ethHashes*(list: Opt[seq[Web3Hash]]): Opt[seq[common.Hash256]] =
if list.isNone: Opt.none(seq[common.Hash256])
else: Opt.some ethHashes(list.get)
func ethAddr*(x: Web3Address): common.EthAddress =
EthAddress x
func ethAddr*(x: Option[Web3Address]): Option[common.EthAddress] =
if x.isNone: none(common.EthAddress)
else: some(EthAddress x.get)
func ethAddr*(x: Opt[Web3Address]): Opt[common.EthAddress] =
if x.isNone: Opt.none(common.EthAddress)
else: Opt.some(EthAddress x.get)
func ethAddrs*(list: openArray[Web3Address]): seq[common.EthAddress] =
for x in list:
@ -143,10 +143,10 @@ func ethWithdrawals*(list: openArray[WithdrawalV1]):
for x in list:
result.add ethWithdrawal(x)
func ethWithdrawals*(x: Option[seq[WithdrawalV1]]):
Option[seq[common.Withdrawal]] =
if x.isNone: none(seq[common.Withdrawal])
else: some(ethWithdrawals x.get)
func ethWithdrawals*(x: Opt[seq[WithdrawalV1]]):
Opt[seq[common.Withdrawal]] =
if x.isNone: Opt.none(seq[common.Withdrawal])
else: Opt.some(ethWithdrawals x.get)
func ethTx*(x: Web3Tx): common.Transaction {.gcsafe, raises:[RlpError].} =
result = rlp.decode(distinctBase x, common.Transaction)
@ -168,7 +168,7 @@ func ethAccessList*(list: openArray[AccessTuple]): common.AccessList =
storageKeys: storageKeys x.storageKeys,
)
func ethAccessList*(x: Option[seq[AccessTuple]]): common.AccessList =
func ethAccessList*(x: Opt[seq[AccessTuple]]): common.AccessList =
if x.isSome:
return ethAccessList(x.get)
@ -183,18 +183,18 @@ func w3Hashes*(list: openArray[common.Hash256]): seq[Web3Hash] =
for x in list:
result.add Web3Hash x.data
func w3Hashes*(z: Option[seq[common.Hash256]]): Option[seq[Web3Hash]] =
if z.isNone: none(seq[Web3Hash])
func w3Hashes*(z: Opt[seq[common.Hash256]]): Opt[seq[Web3Hash]] =
if z.isNone: Opt.none(seq[Web3Hash])
else:
let list = z.get
var v = newSeqOfCap[Web3Hash](list.len)
for x in list:
v.add Web3Hash x.data
some(v)
Opt.some(v)
func w3Hash*(x: Option[common.Hash256]): Option[BlockHash] =
if x.isNone: none(BlockHash)
else: some(BlockHash x.get.data)
func w3Hash*(x: Opt[common.Hash256]): Opt[BlockHash] =
if x.isNone: Opt.none(BlockHash)
else: Opt.some(BlockHash x.get.data)
func w3Hash*(x: common.BlockHeader): BlockHash =
BlockHash rlpHash(x).data
@ -216,9 +216,6 @@ func w3PrevRandao*(x: common.Hash256): Web3PrevRandao =
func w3Qty*(x: UInt256): Web3Quantity =
Web3Quantity x.truncate(uint64)
func w3Qty*(x: common.GasInt): Web3Quantity =
Web3Quantity x.uint64
func w3Qty*(x: common.EthTime): Web3Quantity =
Web3Quantity x.uint64
@ -234,16 +231,19 @@ func w3Qty*(x: Web3Quantity, y: EthTime): Web3Quantity =
func w3Qty*(x: Web3Quantity, y: uint64): Web3Quantity =
Web3Quantity(x.uint64 + y)
func w3Qty*(x: Option[uint64]): Option[Web3Quantity] =
if x.isNone: none(Web3Quantity)
else: some(Web3Quantity x.get)
func w3Qty*(x: Opt[uint64]): Opt[Web3Quantity] =
if x.isNone: Opt.none(Web3Quantity)
else: Opt.some(Web3Quantity x.get)
func w3Qty*(x: uint64): Web3Quantity =
Web3Quantity(x)
func w3BlockNumber*(x: Option[uint64]): Option[Web3BlockNumber] =
if x.isNone: none(Web3BlockNumber)
else: some(Web3BlockNumber x.get)
func w3Qty*(x: int64): Web3Quantity =
Web3Quantity(x)
func w3BlockNumber*(x: Opt[uint64]): Opt[Web3BlockNumber] =
if x.isNone: Opt.none(Web3BlockNumber)
else: Opt.some(Web3BlockNumber x.get)
func w3BlockNumber*(x: uint64): Web3BlockNumber =
Web3BlockNumber(x)
@ -270,10 +270,10 @@ func w3Withdrawals*(list: openArray[common.Withdrawal]): seq[WithdrawalV1] =
for x in list:
result.add w3Withdrawal(x)
func w3Withdrawals*(x: Option[seq[common.Withdrawal]]):
Option[seq[WithdrawalV1]] =
if x.isNone: none(seq[WithdrawalV1])
else: some(w3Withdrawals x.get)
func w3Withdrawals*(x: Opt[seq[common.Withdrawal]]):
Opt[seq[WithdrawalV1]] =
if x.isNone: Opt.none(seq[WithdrawalV1])
else: Opt.some(w3Withdrawals x.get)
func w3Tx*(tx: common.Transaction): Web3Tx =
Web3Tx rlp.encode(tx)

View File

@ -10,10 +10,10 @@
{.push raises: [].}
import
std/[tables, strutils, options, times, macros],
std/[tables, strutils, times, macros],
eth/[common, rlp, p2p], stint, stew/[byteutils],
json_serialization, chronicles,
json_serialization/std/options as jsoptions,
json_serialization/stew/results,
json_serialization/lexer,
"."/[genesis_alloc, hardforks]
@ -33,10 +33,10 @@ type
number* : BlockNumber
gasUser* : GasInt
parentHash* : Hash256
baseFeePerGas*: Option[UInt256] # EIP-1559
blobGasUsed* : Option[uint64] # EIP-4844
excessBlobGas*: Option[uint64] # EIP-4844
parentBeaconBlockRoot*: Option[Hash256] # EIP-4788
baseFeePerGas*: Opt[UInt256] # EIP-1559
blobGasUsed* : Opt[uint64] # EIP-4844
excessBlobGas*: Opt[uint64] # EIP-4844
parentBeaconBlockRoot*: Opt[Hash256] # EIP-4788
GenesisAlloc* = Table[EthAddress, GenesisAccount]
GenesisStorage* = Table[UInt256, UInt256]
@ -77,7 +77,7 @@ derefType(ChainConfig).useDefaultReaderIn JGenesis
# ------------------------------------------------------------------------------
# used by chronicles json writer
proc writeValue(writer: var JsonWriter, value: Option[EthTime])
proc writeValue(writer: var JsonWriter, value: Opt[EthTime])
{.gcsafe, raises: [IOError].} =
mixin writeValue
@ -225,10 +225,11 @@ proc readValue(reader: var JsonReader[JGenesis], value: var EthTime)
if data.len > 2 and data[1] == 'x':
value = fromHex[int64](data).EthTime
else:
# TODO: use safer uint64 parser
value = parseInt(data).EthTime
# but shanghaiTime and cancunTime in config is in int literal
proc readValue(reader: var JsonReader[JGenesis], value: var Option[EthTime])
proc readValue(reader: var JsonReader[JGenesis], value: var Opt[EthTime])
{.gcsafe, raises: [IOError, JsonReaderError].} =
if reader.tokKind == JsonValueKind.Null:
reset value
@ -237,7 +238,7 @@ proc readValue(reader: var JsonReader[JGenesis], value: var Option[EthTime])
# both readValue(GasInt/AccountNonce) will be called if
# we use readValue(int64/uint64)
let val = EthTime reader.parseInt(uint64)
value = some val
value = Opt.some val
proc readValue(reader: var JsonReader[JGenesis], value: var seq[byte])
{.gcsafe, raises: [SerializationError, IOError].} =
@ -254,10 +255,18 @@ proc readValue(reader: var JsonReader[JGenesis], value: var EthAddress)
wrapError:
value = parseAddress(reader.readValue(string))
proc readValue(reader: var JsonReader[JGenesis], value: var AccountNonce)
proc readValue(reader: var JsonReader[JGenesis], value: var uint64)
{.gcsafe, raises: [SerializationError, IOError].} =
wrapError:
value = fromHex[uint64](reader.readValue(string))
if reader.tokKind == JsonValueKind.Number:
value = reader.parseInt(uint64)
else:
let data = reader.readValue(string)
if data.len > 2 and data[1] == 'x':
value = fromHex[uint64](data)
else:
# TODO: use safer uint64 parser
value = parseInt(data).uint64
proc readValue(reader: var JsonReader[JGenesis], value: var GenesisStorage)
{.gcsafe, raises: [SerializationError, IOError].} =
@ -447,69 +456,69 @@ func chainConfigForNetwork*(id: NetworkId): ChainConfig =
consensusType: ConsensusType.POW,
chainId: MainNet.ChainId,
# Genesis (Frontier): # 2015-07-30 15:26:13 UTC
# Frontier Thawing: 200_000.toBlockNumber, # 2015-09-07 21:33:09 UTC
homesteadBlock: some(1_150_000.toBlockNumber), # 2016-03-14 18:49:53 UTC
daoForkBlock: some(1_920_000.toBlockNumber), # 2016-07-20 13:20:40 UTC
# Frontier Thawing: 200_000.BlockNumber, # 2015-09-07 21:33:09 UTC
homesteadBlock: Opt.some(1_150_000.BlockNumber), # 2016-03-14 18:49:53 UTC
daoForkBlock: Opt.some(1_920_000.BlockNumber), # 2016-07-20 13:20:40 UTC
daoForkSupport: true,
eip150Block: some(2_463_000.toBlockNumber), # 2016-10-18 13:19:31 UTC
eip150Block: Opt.some(2_463_000.BlockNumber), # 2016-10-18 13:19:31 UTC
eip150Hash: toDigest("2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"),
eip155Block: some(2_675_000.toBlockNumber), # Same as EIP-158
eip158Block: some(2_675_000.toBlockNumber), # 2016-11-22 16:15:44 UTC
byzantiumBlock: some(4_370_000.toBlockNumber), # 2017-10-16 05:22:11 UTC
constantinopleBlock: some(7_280_000.toBlockNumber), # Skipped on Mainnet
petersburgBlock: some(7_280_000.toBlockNumber), # 2019-02-28 19:52:04 UTC
istanbulBlock: some(9_069_000.toBlockNumber), # 2019-12-08 00:25:09 UTC
muirGlacierBlock: some(9_200_000.toBlockNumber), # 2020-01-02 08:30:49 UTC
berlinBlock: some(12_244_000.toBlockNumber), # 2021-04-15 10:07:03 UTC
londonBlock: some(12_965_000.toBlockNumber), # 2021-08-05 12:33:42 UTC
arrowGlacierBlock: some(13_773_000.toBlockNumber), # 2021-12-09 19:55:23 UTC
grayGlacierBlock: some(15_050_000.toBlockNumber), # 2022-06-30 10:54:04 UTC
terminalTotalDifficulty: some(mainNetTTD),
shanghaiTime: some(1_681_338_455.EthTime), # 2023-04-12 10:27:35 UTC
cancunTime: some(1_710_338_135.EthTime) # 2024-03-13 13:55:35 UTC
eip155Block: Opt.some(2_675_000.BlockNumber), # Same as EIP-158
eip158Block: Opt.some(2_675_000.BlockNumber), # 2016-11-22 16:15:44 UTC
byzantiumBlock: Opt.some(4_370_000.BlockNumber), # 2017-10-16 05:22:11 UTC
constantinopleBlock: Opt.some(7_280_000.BlockNumber), # Skipped on Mainnet
petersburgBlock: Opt.some(7_280_000.BlockNumber), # 2019-02-28 19:52:04 UTC
istanbulBlock: Opt.some(9_069_000.BlockNumber), # 2019-12-08 00:25:09 UTC
muirGlacierBlock: Opt.some(9_200_000.BlockNumber), # 2020-01-02 08:30:49 UTC
berlinBlock: Opt.some(12_244_000.BlockNumber), # 2021-04-15 10:07:03 UTC
londonBlock: Opt.some(12_965_000.BlockNumber), # 2021-08-05 12:33:42 UTC
arrowGlacierBlock: Opt.some(13_773_000.BlockNumber), # 2021-12-09 19:55:23 UTC
grayGlacierBlock: Opt.some(15_050_000.BlockNumber), # 2022-06-30 10:54:04 UTC
terminalTotalDifficulty: Opt.some(mainNetTTD),
shanghaiTime: Opt.some(1_681_338_455.EthTime), # 2023-04-12 10:27:35 UTC
cancunTime: Opt.some(1_710_338_135.EthTime), # 2024-03-13 13:55:35 UTC
)
of SepoliaNet:
const sepoliaTTD = parse("17000000000000000",UInt256)
ChainConfig(
consensusType: ConsensusType.POW,
chainId: SepoliaNet.ChainId,
homesteadBlock: some(0.toBlockNumber),
homesteadBlock: Opt.some(0.BlockNumber),
daoForkSupport: false,
eip150Block: some(0.toBlockNumber),
eip150Block: Opt.some(0.BlockNumber),
eip150Hash: toDigest("0000000000000000000000000000000000000000000000000000000000000000"),
eip155Block: some(0.toBlockNumber),
eip158Block: some(0.toBlockNumber),
byzantiumBlock: some(0.toBlockNumber),
constantinopleBlock: some(0.toBlockNumber),
petersburgBlock: some(0.toBlockNumber),
istanbulBlock: some(0.toBlockNumber),
muirGlacierBlock: some(0.toBlockNumber),
berlinBlock: some(0.toBlockNumber),
londonBlock: some(0.toBlockNumber),
mergeForkBlock: some(1735371.toBlockNumber),
terminalTotalDifficulty: some(sepoliaTTD),
shanghaiTime: some(1_677_557_088.EthTime),
cancunTime: some(1_706_655_072.EthTime), # 2024-01-30 22:51:12
eip155Block: Opt.some(0.BlockNumber),
eip158Block: Opt.some(0.BlockNumber),
byzantiumBlock: Opt.some(0.BlockNumber),
constantinopleBlock: Opt.some(0.BlockNumber),
petersburgBlock: Opt.some(0.BlockNumber),
istanbulBlock: Opt.some(0.BlockNumber),
muirGlacierBlock: Opt.some(0.BlockNumber),
berlinBlock: Opt.some(0.BlockNumber),
londonBlock: Opt.some(0.BlockNumber),
mergeForkBlock: Opt.some(1735371.BlockNumber),
terminalTotalDifficulty: Opt.some(sepoliaTTD),
shanghaiTime: Opt.some(1_677_557_088.EthTime),
cancunTime: Opt.some(1_706_655_072.EthTime), # 2024-01-30 22:51:12
)
of HoleskyNet:
ChainConfig(
consensusType: ConsensusType.POS,
chainId: HoleskyNet.ChainId,
homesteadBlock: some(0.toBlockNumber),
eip150Block: some(0.toBlockNumber),
eip155Block: some(0.toBlockNumber),
eip158Block: some(0.toBlockNumber),
byzantiumBlock: some(0.toBlockNumber),
constantinopleBlock: some(0.toBlockNumber),
petersburgBlock: some(0.toBlockNumber),
istanbulBlock: some(0.toBlockNumber),
berlinBlock: some(0.toBlockNumber),
londonBlock: some(0.toBlockNumber),
mergeForkBlock: some(0.toBlockNumber),
terminalTotalDifficulty: some(0.u256),
terminalTotalDifficultyPassed: some(true),
shanghaiTime: some(1_696_000_704.EthTime),
cancunTime: some(1_707_305_664.EthTime), # 2024-02-07 11:34:24
homesteadBlock: Opt.some(0.BlockNumber),
eip150Block: Opt.some(0.BlockNumber),
eip155Block: Opt.some(0.BlockNumber),
eip158Block: Opt.some(0.BlockNumber),
byzantiumBlock: Opt.some(0.BlockNumber),
constantinopleBlock: Opt.some(0.BlockNumber),
petersburgBlock: Opt.some(0.BlockNumber),
istanbulBlock: Opt.some(0.BlockNumber),
berlinBlock: Opt.some(0.BlockNumber),
londonBlock: Opt.some(0.BlockNumber),
mergeForkBlock: Opt.some(0.BlockNumber),
terminalTotalDifficulty: Opt.some(0.u256),
terminalTotalDifficultyPassed: Opt.some(true),
shanghaiTime: Opt.some(1_696_000_704.EthTime),
cancunTime: Opt.some(1_707_305_664.EthTime), # 2024-02-07 11:34:24
)
else:
ChainConfig()

View File

@ -10,7 +10,6 @@
{.push raises: [].}
import
std/[options],
chronicles,
eth/trie/trie_defs,
../core/[pow, casper],
@ -24,7 +23,6 @@ export
core_db,
constants,
errors,
options,
evmforks,
hardforks,
genesis,
@ -159,13 +157,13 @@ proc init(com : CommonRef,
# by setForkId
if genesis.isNil.not:
com.hardForkTransition(ForkDeterminationInfo(
blockNumber: 0.toBlockNumber,
number: 0.BlockNumber,
td: Opt.some(0.u256),
time: Opt.some(genesis.timestamp)
))
# Must not overwrite the global state on the single state DB
if not db.getBlockHeader(0.toBlockNumber, com.genesisHeader):
if not db.getBlockHeader(0.BlockNumber, com.genesisHeader):
com.genesisHeader = toGenesisHeader(genesis,
com.currentFork, com.db)
@ -173,7 +171,7 @@ proc init(com : CommonRef,
com.pos.timestamp = genesis.timestamp
else:
com.hardForkTransition(ForkDeterminationInfo(
blockNumber: 0.toBlockNumber,
number: 0.BlockNumber,
td: Opt.some(0.u256),
time: Opt.some(TimeZero)
))
@ -191,7 +189,7 @@ proc getTd(com: CommonRef, blockHash: Hash256): Opt[DifficultyInt] =
func needTdForHardForkDetermination(com: CommonRef): bool =
let t = com.forkTransitionTable.mergeForkTransitionThreshold
t.ttdPassed.isNone and t.blockNumber.isNone and t.ttd.isSome
t.ttdPassed.isNone and t.number.isNone and t.ttd.isSome
proc getTdIfNecessary(com: CommonRef, blockHash: Hash256): Opt[DifficultyInt] =
if needTdForHardForkDetermination(com):
@ -288,7 +286,7 @@ func hardForkTransition*(
td: Opt[DifficultyInt],
time: Opt[EthTime]) =
com.hardForkTransition(ForkDeterminationInfo(
blockNumber: number, time: time, td: td))
number: number, time: time, td: td))
proc hardForkTransition*(
com: CommonRef,
@ -301,7 +299,7 @@ proc hardForkTransition*(
com: CommonRef, header: BlockHeader)
{.gcsafe, raises: [].} =
com.hardForkTransition(
header.parentHash, header.blockNumber, Opt.some(header.timestamp))
header.parentHash, header.number, Opt.some(header.timestamp))
func toEVMFork*(com: CommonRef, forkDeterminer: ForkDeterminationInfo): EVMFork =
## similar to toFork, but produce EVMFork
@ -333,7 +331,7 @@ func forkId*(com: CommonRef, head, time: uint64): ForkID {.gcsafe.} =
func forkId*(com: CommonRef, head: BlockNumber, time: EthTime): ForkID {.gcsafe.} =
## EIP 2364/2124
com.forkIdCalculator.newID(head.truncate(uint64), time.uint64)
com.forkIdCalculator.newID(head, time.uint64)
func isEIP155*(com: CommonRef, number: BlockNumber): bool =
com.config.eip155Block.isSome and number >= com.config.eip155Block.get
@ -370,7 +368,7 @@ proc initializeEmptyDb*(com: CommonRef) =
kvt.hasKey(key).expect "valid bool"
if canonicalHeadHashKey().toOpenArray notin kvt:
info "Writing genesis to DB"
doAssert(com.genesisHeader.blockNumber.isZero,
doAssert(com.genesisHeader.number == 0.BlockNumber,
"can't commit genesis block with number > 0")
doAssert(com.db.persistHeader(com.genesisHeader,
com.consensusType == ConsensusType.POS,
@ -412,19 +410,19 @@ func db*(com: CommonRef): CoreDbRef =
func consensus*(com: CommonRef): ConsensusType =
com.consensusType
func eip150Block*(com: CommonRef): Option[BlockNumber] =
func eip150Block*(com: CommonRef): Opt[BlockNumber] =
com.config.eip150Block
func eip150Hash*(com: CommonRef): Hash256 =
com.config.eip150Hash
func daoForkBlock*(com: CommonRef): Option[BlockNumber] =
func daoForkBlock*(com: CommonRef): Opt[BlockNumber] =
com.config.daoForkBlock
func daoForkSupport*(com: CommonRef): bool =
com.config.daoForkSupport
func ttd*(com: CommonRef): Option[DifficultyInt] =
func ttd*(com: CommonRef): Opt[DifficultyInt] =
com.config.terminalTotalDifficulty
func ttdPassed*(com: CommonRef): bool =
@ -483,7 +481,7 @@ func `startOfHistory=`*(com: CommonRef, val: Hash256) =
## Setter
com.startOfHistory = val
func setTTD*(com: CommonRef, ttd: Option[DifficultyInt]) =
func setTTD*(com: CommonRef, ttd: Opt[DifficultyInt]) =
## useful for testing
com.config.terminalTotalDifficulty = ttd
# rebuild the MergeFork piece of the forkTransitionTable

View File

@ -111,19 +111,19 @@ proc toGenesisHeader*(
extraData: g.extraData,
gasLimit: g.gasLimit,
difficulty: g.difficulty,
mixDigest: g.mixHash,
mixHash: g.mixHash,
coinbase: g.coinbase,
stateRoot: sdb.rootHash(),
parentHash: GENESIS_PARENT_HASH,
txRoot: EMPTY_ROOT_HASH,
receiptRoot: EMPTY_ROOT_HASH,
receiptsRoot: EMPTY_ROOT_HASH,
ommersHash: EMPTY_UNCLE_HASH
)
if g.baseFeePerGas.isSome:
result.baseFee = g.baseFeePerGas.get()
result.baseFeePerGas = Opt.some(g.baseFeePerGas.get)
elif fork >= London:
result.baseFee = EIP1559_INITIAL_BASE_FEE.u256
result.baseFeePerGas = Opt.some(EIP1559_INITIAL_BASE_FEE)
if g.gasLimit == 0:
result.gasLimit = GENESIS_GAS_LIMIT
@ -132,12 +132,12 @@ proc toGenesisHeader*(
result.difficulty = GENESIS_DIFFICULTY
if fork >= Shanghai:
result.withdrawalsRoot = some(EMPTY_ROOT_HASH)
result.withdrawalsRoot = Opt.some(EMPTY_ROOT_HASH)
if fork >= Cancun:
result.blobGasUsed = g.blobGasUsed.get(0'u64).some
result.excessBlobGas = g.excessBlobGas.get(0'u64).some
result.parentBeaconBlockRoot = g.parentBeaconBlockRoot.get(Hash256()).some
result.blobGasUsed = Opt.some g.blobGasUsed.get(0'u64)
result.excessBlobGas = Opt.some g.excessBlobGas.get(0'u64)
result.parentBeaconBlockRoot = Opt.some g.parentBeaconBlockRoot.get(Hash256())
proc toGenesisHeader*(
genesis: Genesis;
@ -159,7 +159,7 @@ proc toGenesisHeader*(
## Generate the genesis block header from the `genesis` and `config`
## argument value.
let map = toForkTransitionTable(params.config)
let fork = map.toHardFork(forkDeterminationInfo(0.toBlockNumber, params.genesis.timestamp))
let fork = map.toHardFork(forkDeterminationInfo(0.BlockNumber, params.genesis.timestamp))
toGenesisHeader(params.genesis, fork, db)
# ------------------------------------------------------------------------------

View File

@ -8,9 +8,8 @@
# those terms.
import
std/[options, strutils],
std/[strutils],
eth/common,
results,
stew/endians2,
json_serialization,
../utils/utils,
@ -55,14 +54,14 @@ const firstTimeBasedFork* = Shanghai
type
MergeForkTransitionThreshold* = object
blockNumber*: Option[BlockNumber]
ttd*: Option[DifficultyInt]
ttdPassed*: Option[bool]
number*: Opt[BlockNumber]
ttd*: Opt[DifficultyInt]
ttdPassed*: Opt[bool]
ForkTransitionTable* = object
blockNumberThresholds*: array[Frontier..GrayGlacier, Option[BlockNumber]]
blockNumberThresholds*: array[Frontier..GrayGlacier, Opt[BlockNumber]]
mergeForkTransitionThreshold*: MergeForkTransitionThreshold
timeThresholds*: array[Shanghai..Prague, Option[EthTime]]
timeThresholds*: array[Shanghai..Prague, Opt[EthTime]]
# Starting with Shanghai, forking is based on timestamp
# rather than block number.
@ -80,7 +79,7 @@ type
# it makes sense to allow time to be optional. See the
# comment below on forkDeterminationInfo.
ForkDeterminationInfo* = object
blockNumber*: BlockNumber
number*: BlockNumber
time*: Opt[EthTime]
td*: Opt[DifficultyInt]
@ -90,15 +89,15 @@ func forkDeterminationInfo*(n: BlockNumber): ForkDeterminationInfo =
# like various tests, where we only have block number and the tests are
# meant for pre-Merge forks, so maybe those are okay.
ForkDeterminationInfo(
blockNumber: n, time: Opt.none(EthTime), td: Opt.none(DifficultyInt))
number: n, time: Opt.none(EthTime), td: Opt.none(DifficultyInt))
func forkDeterminationInfo*(n: BlockNumber, t: EthTime): ForkDeterminationInfo =
ForkDeterminationInfo(
blockNumber: n, time: Opt.some(t), td: Opt.none(DifficultyInt))
number: n, time: Opt.some(t), td: Opt.none(DifficultyInt))
func forkDeterminationInfo*(header: BlockHeader): ForkDeterminationInfo =
# FIXME-Adam-mightAlsoNeedTTD?
forkDeterminationInfo(header.blockNumber, header.timestamp)
forkDeterminationInfo(header.number, header.timestamp)
func adjustForNextBlock*(n: BlockNumber): BlockNumber =
n + 1
@ -114,7 +113,7 @@ func adjustForNextBlock*(t: EthTime): EthTime =
func adjustForNextBlock*(f: ForkDeterminationInfo): ForkDeterminationInfo =
ForkDeterminationInfo(
blockNumber: adjustForNextBlock(f.blockNumber),
number: adjustForNextBlock(f.number),
time: f.time.map(adjustForNextBlock),
td: f.td
)
@ -127,15 +126,15 @@ func adjustForNextBlock*(f: ForkDeterminationInfo): ForkDeterminationInfo =
# Shanghai.
func isGTETransitionThreshold*(map: ForkTransitionTable, forkDeterminer: ForkDeterminationInfo, fork: HardFork): bool =
if fork <= lastPurelyBlockNumberBasedFork:
map.blockNumberThresholds[fork].isSome and forkDeterminer.blockNumber >= map.blockNumberThresholds[fork].get
map.blockNumberThresholds[fork].isSome and forkDeterminer.number >= map.blockNumberThresholds[fork].get
elif fork == MergeFork:
# MergeFork is a special case that can use either block number or ttd;
# ttdPassed > block number > ttd takes precedence.
let t = map.mergeForkTransitionThreshold
if t.ttdPassed.isSome:
t.ttdPassed.get
elif t.blockNumber.isSome:
forkDeterminer.blockNumber >= t.blockNumber.get
elif t.number.isSome:
forkDeterminer.number >= t.number.get
elif t.ttd.isSome and forkDeterminer.td.isSome:
forkDeterminer.td.get >= t.ttd.get
else:
@ -150,34 +149,34 @@ type
# please update forkBlockField constant too
ChainConfig* = ref object
chainId* : ChainId
homesteadBlock* : Option[BlockNumber]
daoForkBlock* : Option[BlockNumber]
homesteadBlock* : Opt[BlockNumber]
daoForkBlock* : Opt[BlockNumber]
daoForkSupport* : bool
eip150Block* : Option[BlockNumber]
eip150Block* : Opt[BlockNumber]
eip150Hash* : Hash256
eip155Block* : Option[BlockNumber]
eip158Block* : Option[BlockNumber]
byzantiumBlock* : Option[BlockNumber]
constantinopleBlock*: Option[BlockNumber]
petersburgBlock* : Option[BlockNumber]
istanbulBlock* : Option[BlockNumber]
muirGlacierBlock* : Option[BlockNumber]
berlinBlock* : Option[BlockNumber]
londonBlock* : Option[BlockNumber]
arrowGlacierBlock* : Option[BlockNumber]
grayGlacierBlock* : Option[BlockNumber]
eip155Block* : Opt[BlockNumber]
eip158Block* : Opt[BlockNumber]
byzantiumBlock* : Opt[BlockNumber]
constantinopleBlock*: Opt[BlockNumber]
petersburgBlock* : Opt[BlockNumber]
istanbulBlock* : Opt[BlockNumber]
muirGlacierBlock* : Opt[BlockNumber]
berlinBlock* : Opt[BlockNumber]
londonBlock* : Opt[BlockNumber]
arrowGlacierBlock* : Opt[BlockNumber]
grayGlacierBlock* : Opt[BlockNumber]
# mergeNetsplitBlock is an alias to mergeForkBlock
# and is used for geth compatibility layer
mergeNetsplitBlock* : Option[BlockNumber]
mergeNetsplitBlock* : Opt[BlockNumber]
mergeForkBlock* : Option[BlockNumber]
shanghaiTime* : Option[EthTime]
cancunTime* : Option[EthTime]
pragueTime* : Option[EthTime]
mergeForkBlock* : Opt[BlockNumber]
shanghaiTime* : Opt[EthTime]
cancunTime* : Opt[EthTime]
pragueTime* : Opt[EthTime]
terminalTotalDifficulty*: Option[UInt256]
terminalTotalDifficultyPassed*: Option[bool]
terminalTotalDifficulty*: Opt[UInt256]
terminalTotalDifficultyPassed*: Opt[bool]
consensusType*
{.dontSerialize.} : ConsensusType
@ -185,10 +184,10 @@ type
# are in a valid order.
BlockNumberBasedForkOptional* = object
name*: string
number*: Option[BlockNumber]
number*: Opt[BlockNumber]
TimeBasedForkOptional* = object
name*: string
time*: Option[EthTime]
time*: Opt[EthTime]
func countTimeFields(): int {.compileTime.} =
var z = ChainConfig()
@ -240,7 +239,7 @@ const
func mergeForkTransitionThreshold*(conf: ChainConfig): MergeForkTransitionThreshold =
MergeForkTransitionThreshold(
blockNumber: conf.mergeForkBlock,
number: conf.mergeForkBlock,
ttd: conf.terminalTotalDifficulty,
ttdPassed: conf.terminalTotalDifficultyPassed
)
@ -250,7 +249,7 @@ func toForkTransitionTable*(conf: ChainConfig): ForkTransitionTable =
# field names, but it doesn't seem worthwhile anymore
# (now that there's irregularity due to block-based vs
# timestamp-based forking).
result.blockNumberThresholds[Frontier ] = some(0.toBlockNumber)
result.blockNumberThresholds[Frontier ] = Opt.some(0.BlockNumber)
result.blockNumberThresholds[Homestead ] = conf.homesteadBlock
result.blockNumberThresholds[DAOFork ] = conf.daoForkBlock
result.blockNumberThresholds[Tangerine ] = conf.eip150Block
@ -285,7 +284,7 @@ func populateFromForkTransitionTable*(conf: ChainConfig, t: ForkTransitionTable)
conf.arrowGlacierBlock = t.blockNumberThresholds[HardFork.ArrowGlacier]
conf.grayGlacierBlock = t.blockNumberThresholds[HardFork.GrayGlacier]
conf.mergeForkBlock = t.mergeForkTransitionThreshold.blockNumber
conf.mergeForkBlock = t.mergeForkTransitionThreshold.number
conf.terminalTotalDifficulty = t.mergeForkTransitionThreshold.ttd
conf.terminalTotalDifficultyPassed = t.mergeForkTransitionThreshold.ttdPassed
@ -388,15 +387,15 @@ func initForkIdCalculator*(map: ForkTransitionTable,
var forksByBlock: seq[uint64]
for fork, val in map.blockNumberThresholds:
if val.isNone: continue
let val64 = val.get.truncate(uint64)
let val64 = val.get
if forksByBlock.len == 0:
forksByBlock.add val64
elif forksByBlock[^1] != val64:
# Deduplicate fork identifiers applying multiple forks
forksByBlock.add val64
if map.mergeForkTransitionThreshold.blockNumber.isSome:
let val64 = map.mergeForkTransitionThreshold.blockNumber.get.truncate(uint64)
if map.mergeForkTransitionThreshold.number.isSome:
let val64 = map.mergeForkTransitionThreshold.number.get
if forksByBlock.len == 0:
forksByBlock.add val64
elif forksByBlock[^1] != val64:

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -36,10 +36,10 @@ const
UNCLE_DEPTH_PENALTY_FACTOR* = 8.u256
MAX_UNCLE_DEPTH* = 6.u256
MAX_UNCLE_DEPTH* = 6
MAX_UNCLES* = 2
GENESIS_BLOCK_NUMBER* = 0.toBlockNumber
GENESIS_BLOCK_NUMBER* = 0.BlockNumber
GENESIS_DIFFICULTY* = 131_072.u256
GENESIS_GAS_LIMIT* = 3_141_592
GENESIS_PARENT_HASH* = ZERO_HASH256
@ -55,7 +55,7 @@ const
GAS_MOD_EXP_QUADRATIC_DENOMINATOR* = 20.u256
MAX_PREV_HEADER_DEPTH* = 256.toBlockNumber
MAX_PREV_HEADER_DEPTH* = 256'u64
MaxCallDepth* = 1024
SECPK1_N* = UInt256.fromHex("fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141")

View File

@ -111,10 +111,6 @@ proc `verifyFrom=`*(c: ChainRef; verifyFrom: BlockNumber) =
## `true`.
c.verifyFrom = verifyFrom
proc `verifyFrom=`*(c: ChainRef; verifyFrom: uint64) =
## Variant of `verifyFrom=`
c.verifyFrom = verifyFrom.u256
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -39,10 +39,11 @@ type
PersistStats = tuple[blocks: int, txs: int, gas: GasInt]
const CleanUpEpoch = 30_000.toBlockNumber
## Regular checks for history clean up (applies to single state DB). This
## is mainly a debugging/testing feature so that the database can be held
## a bit smaller. It is not applicable to a full node.
const
CleanUpEpoch = 30_000.BlockNumber
## Regular checks for history clean up (applies to single state DB). This
## is mainly a debugging/testing feature so that the database can be held
## a bit smaller. It is not applicable to a full node.
# ------------------------------------------------------------------------------
# Private
@ -82,8 +83,8 @@ proc persistBlocksImpl(
let vmState = ?c.getVmState(blocks[0].header)
let
fromBlock = blocks[0].header.blockNumber
toBlock = blocks[blocks.high()].header.blockNumber
fromBlock = blocks[0].header.number
toBlock = blocks[blocks.high()].header.number
trace "Persisting blocks", fromBlock, toBlock
var
@ -96,10 +97,10 @@ proc persistBlocksImpl(
c.com.hardForkTransition(header)
if not vmState.reinit(header):
debug "Cannot update VmState", blockNumber = header.blockNumber
return err("Cannot update VmState to block " & $header.blockNumber)
debug "Cannot update VmState", blockNumber = header.number
return err("Cannot update VmState to block " & $header.number)
if c.extraValidation and c.verifyFrom <= header.blockNumber:
if c.extraValidation and c.verifyFrom <= header.number:
# TODO: how to checkseal from here
?c.com.validateHeaderAndKinship(blk, checkSealOK = false)
@ -118,7 +119,7 @@ proc persistBlocksImpl(
return err("Could not persist header")
if NoSaveTxs notin flags:
c.db.persistTransactions(header.blockNumber, blk.transactions)
c.db.persistTransactions(header.number, blk.transactions)
if NoSaveReceipts notin flags:
c.db.persistReceipts(vmState.receipts)
@ -129,7 +130,7 @@ proc persistBlocksImpl(
# update currentBlock *after* we persist it
# so the rpc return consistent result
# between eth_blockNumber and eth_syncing
c.com.syncCurrent = header.blockNumber
c.com.syncCurrent = header.number
txs += blk.transactions.len
gas += blk.header.gasUsed

View File

@ -22,8 +22,8 @@ proc calculateReward*(vmState: BaseVMState; account: EthAddress;
var mainReward = blockReward
for uncle in uncles:
var uncleReward = uncle.blockNumber.u256 + 8.u256
uncleReward -= number
var uncleReward = uncle.number.u256 + 8.u256
uncleReward -= number.u256
uncleReward = uncleReward * blockReward
uncleReward = uncleReward div 8.u256
vmState.mutateStateDB:
@ -36,6 +36,6 @@ proc calculateReward*(vmState: BaseVMState; account: EthAddress;
proc calculateReward*(vmState: BaseVMState;
header: BlockHeader; uncles: openArray[BlockHeader]) =
vmState.calculateReward(header.coinbase, header.blockNumber, uncles)
vmState.calculateReward(header.coinbase, header.number, uncles)
# End

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -61,7 +61,7 @@ proc makeReceipt*(vmState: BaseVMState; txType: TxType): Receipt =
rec.receiptType = txType
rec.cumulativeGasUsed = vmState.cumulativeGasUsed
rec.logs = vmState.getAndClearLogEntries()
rec.bloom = logsBloom(rec.logs).value.toBytesBE
rec.logsBloom = logsBloom(rec.logs).value.toBytesBE
rec
# ------------------------------------------------------------------------------

View File

@ -47,7 +47,7 @@ proc procBlkPreamble(vmState: BaseVMState, blk: EthBlock): Result[void, string]
template header(): BlockHeader =
blk.header
if vmState.com.daoForkSupport and vmState.com.daoForkBlock.get == header.blockNumber:
if vmState.com.daoForkSupport and vmState.com.daoForkBlock.get == header.number:
vmState.mutateStateDB:
db.applyDAOHardFork()
@ -112,23 +112,23 @@ proc procBlkEpilogue(vmState: BaseVMState, header: BlockHeader): Result[void, st
if header.stateRoot != stateDB.rootHash:
# TODO replace logging with better error
debug "wrong state root in block",
blockNumber = header.blockNumber,
blockNumber = header.number,
expected = header.stateRoot,
actual = stateDB.rootHash,
arrivedFrom = vmState.com.db.getCanonicalHead().stateRoot
return err("stateRoot mismatch")
let bloom = createBloom(vmState.receipts)
if header.bloom != bloom:
if header.logsBloom != bloom:
return err("bloom mismatch")
let receiptRoot = calcReceiptRoot(vmState.receipts)
if header.receiptRoot != receiptRoot:
let receiptsRoot = calcReceiptsRoot(vmState.receipts)
if header.receiptsRoot != receiptsRoot:
# TODO replace logging with better error
debug "wrong receiptRoot in block",
blockNumber = header.blockNumber,
actual = receiptRoot,
expected = header.receiptRoot
blockNumber = header.number,
actual = receiptsRoot,
expected = header.receiptsRoot
return err("receiptRoot mismatch")
ok()

View File

@ -32,7 +32,7 @@ proc eip1559BaseFee(header: BlockHeader; fork: EVMFork): UInt256 =
## function just plays safe. In particular, the `test_general_state_json.nim`
## module modifies this block header `baseFee` field unconditionally :(.
if FkLondon <= fork:
result = header.baseFee
result = header.baseFeePerGas.get(0.u256)
proc commitOrRollbackDependingOnGasUsed(
vmState: BaseVMState;
@ -79,7 +79,7 @@ proc processTransactionImpl(
baseFee256 = header.eip1559BaseFee(fork)
baseFee = baseFee256.truncate(GasInt)
tx = eip1559TxNormalization(tx, baseFee)
priorityFee = min(tx.maxPriorityFee, tx.maxFee - baseFee)
priorityFee = min(tx.maxPriorityFeePerGas, tx.maxFeePerGas - baseFee)
excessBlobGas = header.excessBlobGas.get(0'u64)
# Return failure unless explicitely set `ok()`

View File

@ -58,8 +58,8 @@ proc calcEip1599BaseFee*(com: CommonRef; parent: BlockHeader): UInt256 =
# If the current block is the first EIP-1559 block, return the
# initial base fee.
if com.isLondon(parent.blockNumber):
eip1559.calcEip1599BaseFee(parent.gasLimit, parent.gasUsed, parent.baseFee)
if com.isLondon(parent.number):
eip1559.calcEip1599BaseFee(parent.gasLimit, parent.gasUsed, parent.baseFeePerGas.get(0.u256))
else:
EIP1559_INITIAL_BASE_FEE
@ -68,7 +68,7 @@ proc verifyEip1559Header(com: CommonRef;
parent, header: BlockHeader): Result[void, string]
{.raises: [].} =
## Verify that the gas limit remains within allowed bounds
let limit = if com.isLondon(parent.blockNumber):
let limit = if com.isLondon(parent.number):
parent.gasLimit
else:
parent.gasLimit * EIP1559_ELASTICITY_MULTIPLIER
@ -76,7 +76,7 @@ proc verifyEip1559Header(com: CommonRef;
if rc.isErr:
return rc
let headerBaseFee = header.baseFee
let headerBaseFee = header.baseFeePerGas.get(0.u256)
# Verify the header is not malformed
if headerBaseFee.isZero:
return err("Post EIP-1559 header expected to have base fee")
@ -86,8 +86,8 @@ proc verifyEip1559Header(com: CommonRef;
if headerBaseFee != expectedBaseFee:
try:
return err(&"invalid baseFee: have {expectedBaseFee}, "&
&"want {header.baseFee}, " &
&"parent.baseFee {parent.baseFee}, "&
&"want {header.baseFeePerGas}, " &
&"parent.baseFee {parent.baseFeePerGas}, "&
&"parent.gasUsed {parent.gasUsed}")
except ValueError:
# TODO deprecate-strformat
@ -98,10 +98,11 @@ proc verifyEip1559Header(com: CommonRef;
proc validateGasLimitOrBaseFee*(com: CommonRef;
header, parent: BlockHeader): Result[void, string] =
if not com.isLondon(header.blockNumber):
if not com.isLondon(header.number):
# Verify BaseFee not present before EIP-1559 fork.
if not header.baseFee.isZero:
return err("invalid baseFee before London fork: have " & $header.baseFee & ", want <0>")
let baseFeePerGas = header.baseFeePerGas.get(0.u256)
if not baseFeePerGas.isZero:
return err("invalid baseFee before London fork: have " & $baseFeePerGas & ", want <0>")
let rc = com.validateGasLimit(header)
if rc.isErr:
return rc

View File

@ -27,16 +27,16 @@ type
PowDigest = tuple ##\
## Return value from the `hashimotoLight()` function
mixDigest: Hash256
value: Hash256
value : Hash256
PowSpecs* = object ##\
## Relevant block header parts for PoW mining & verifying. This object
## might be more useful for testing and debugging than for production.
blockNumber*: BlockNumber
number* : BlockNumber
miningHash*: Hash256
nonce: BlockNonce
mixDigest*: Hash256
difficulty: DifficultyInt
nonce : BlockNonce
mixHash* : Hash256
difficulty : DifficultyInt
PowHeader = object ##\
## Stolen from `p2p/validate.MiningHeader`
@ -45,10 +45,10 @@ type
coinbase : EthAddress
stateRoot : Hash256
txRoot : Hash256
receiptRoot : Hash256
bloom : common.BloomFilter
receiptsRoot: Hash256
logsBloom : common.BloomFilter
difficulty : DifficultyInt
blockNumber : BlockNumber
number : BlockNumber
gasLimit : GasInt
gasUsed : GasInt
timestamp : EthTime
@ -69,24 +69,24 @@ type
func append(w: var RlpWriter; specs: PowSpecs) =
## RLP support
w.startList(5)
w.append(HashOrNum(isHash: false, number: specs.blockNumber))
w.append(HashOrNum(isHash: false, number: specs.number))
w.append(HashOrNum(isHash: true, hash: specs.miningHash))
w.append(specs.nonce.toUint)
w.append(HashOrNum(isHash: true, hash: specs.mixDigest))
w.append(HashOrNum(isHash: true, hash: specs.mixHash))
w.append(specs.difficulty)
func read(rlp: var Rlp; Q: type PowSpecs): Q
{.raises: [RlpError].} =
## RLP support
rlp.tryEnterList()
result.blockNumber = rlp.read(HashOrNum).number
result.number = rlp.read(HashOrNum).number
result.miningHash = rlp.read(HashOrNum).hash
result.nonce = rlp.read(uint64).toBlockNonce
result.mixDigest = rlp.read(HashOrNum).hash
result.mixHash = rlp.read(HashOrNum).hash
result.difficulty = rlp.read(DifficultyInt)
func rlpTextEncode(specs: PowSpecs): string =
"specs #" & $specs.blockNumber & " " & rlp.encode(specs).toHex
"specs #" & $specs.number & " " & rlp.encode(specs).toHex
func decodeRlpText(data: string): PowSpecs
{.raises: [CatchableError].} =
@ -108,10 +108,10 @@ func miningHash(header: BlockHeader): Hash256 =
coinbase: header.coinbase,
stateRoot: header.stateRoot,
txRoot: header.txRoot,
receiptRoot: header.receiptRoot,
bloom: header.bloom,
receiptsRoot:header.receiptsRoot,
logsBloom: header.logsBloom,
difficulty: header.difficulty,
blockNumber: header.blockNumber,
number: header.number,
gasLimit: header.gasLimit,
gasUsed: header.gasUsed,
timestamp: header.timestamp,
@ -148,10 +148,10 @@ func getPowSpecs*(header: BlockHeader): PowSpecs =
## for mining or pow verification. This function might be more useful for
## testing and debugging than for production.
PowSpecs(
blockNumber: header.blockNumber,
number: header.number,
miningHash: header.miningHash,
nonce: header.nonce,
mixDigest: header.mixDigest,
mixHash: header.mixHash,
difficulty: header.difficulty)
func getPowCacheLookup*(tm: PowRef;
@ -178,7 +178,7 @@ func getPowCacheLookup*(tm: PowRef;
func getPowDigest(tm: PowRef; blockNumber: BlockNumber;
powHeaderDigest: Hash256; nonce: BlockNonce): PowDigest =
## Calculate the expected value of `header.mixDigest` using the
## Calculate the expected value of `header.mixHash` using the
## `hashimotoLight()` library method.
let
ds = tm.lightByEpoch.get(blockNumber)
@ -187,11 +187,11 @@ func getPowDigest(tm: PowRef; blockNumber: BlockNumber;
func getPowDigest*(tm: PowRef; header: BlockHeader): PowDigest =
## Variant of `getPowDigest()`
tm.getPowDigest(header.blockNumber, header.miningHash, header.nonce)
tm.getPowDigest(header.number, header.miningHash, header.nonce)
func getPowDigest*(tm: PowRef; specs: PowSpecs): PowDigest =
## Variant of `getPowDigest()`
tm.getPowDigest(specs.blockNumber, specs.miningHash, specs.nonce)
tm.getPowDigest(specs.number, specs.miningHash, specs.nonce)
# ------------------------------------------------------------------------------
# Public functions, debugging & testing

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018-2023 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -56,7 +56,7 @@ func calcDifficultyFrontier*(timeStamp: EthTime, parent: BlockHeader): Difficult
diff = max(diff, MinimumDifficultyU)
var periodCount = parent.blockNumber + bigOne
var periodCount = parent.number.u256 + bigOne
difficultyBomb(periodCount)
result = diff
@ -91,7 +91,7 @@ func calcDifficultyHomestead*(timeStamp: EthTime, parent: BlockHeader): Difficul
var diff = cast[UInt256](max(x, MinimumDifficultyI))
# for the exponential factor
var periodCount = parent.blockNumber + bigOne
var periodCount = parent.number.u256 + bigOne
difficultyBomb(periodCount)
result = diff
@ -138,8 +138,8 @@ func makeDifficultyCalculator(bombDelay: static[int], timeStamp: EthTime, parent
# calculate a fake block number for the ice-age delay
# Specification: https:#eips.ethereum.org/EIPS/eip-1234
var periodCount: UInt256
if parent.blockNumber >= bombDelayFromParent:
periodCount = parent.blockNumber - bombDelayFromParent
if parent.number.u256 >= bombDelayFromParent:
periodCount = parent.number.u256 - bombDelayFromParent
difficultyBomb(periodCount)

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
@ -15,7 +15,7 @@ export BlockHeader
proc hasUncles*(header: BlockHeader): bool = header.ommersHash != EMPTY_UNCLE_HASH
proc `$`*(header: BlockHeader): string =
result = &"BlockHeader(timestamp: {header.timestamp} difficulty: {header.difficulty} blockNumber: {header.blockNumber} gasLimit: {header.gasLimit})"
result = &"BlockHeader(timestamp: {header.timestamp} difficulty: {header.difficulty} blockNumber: {header.number} gasLimit: {header.gasLimit})"
# CalcGasLimit computes the gas limit of the next block after parent. It aims
# to keep the baseline gas above the provided floor, and increase it towards the

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -45,7 +45,7 @@ const
# ------------------------------------------------------------------------------
proc toKey(bn: BlockNumber): uint64 =
bn.truncate(uint64) div EPOCH_LENGTH
bn div EPOCH_LENGTH
# ------------------------------------------------------------------------------
# Public functions, constructor

View File

@ -423,7 +423,7 @@
##
import
std/[options, sequtils, tables],
std/[sequtils, tables],
./tx_pool/[tx_chain, tx_desc, tx_info, tx_item],
./tx_pool/tx_tabs,
./tx_pool/tx_tasks/[
@ -609,7 +609,7 @@ func dirtyBuckets*(xp: TxPoolRef): bool =
type EthBlockAndBlobsBundle* = object
blk*: EthBlock
blobsBundle*: Option[BlobsBundle]
blobsBundle*: Opt[BlobsBundle]
proc assembleBlock*(
xp: TxPoolRef,
@ -621,7 +621,7 @@ proc assembleBlock*(
## uninitialised:
##
## * *extraData*: Blob
## * *mixDigest*: Hash256
## * *mixHash*: Hash256
## * *nonce*: BlockNonce
##
## Note that this getter runs *ad hoc* all the txs through the VM in
@ -649,7 +649,7 @@ proc assembleBlock*(
let com = xp.chain.com
if com.forkGTE(Shanghai):
blk.withdrawals = some(com.pos.withdrawals)
blk.withdrawals = Opt.some(com.pos.withdrawals)
if not com.forkGTE(Cancun) and blobsBundle.commitments.len > 0:
return err("PooledTransaction contains blobs prior to Cancun")
@ -657,13 +657,13 @@ proc assembleBlock*(
if com.forkGTE(Cancun):
doAssert blobsBundle.commitments.len == blobsBundle.blobs.len
doAssert blobsBundle.proofs.len == blobsBundle.blobs.len
options.some blobsBundle
Opt.some blobsBundle
else:
options.none BlobsBundle
Opt.none BlobsBundle
if someBaseFee:
# make sure baseFee always has something
blk.header.fee = some(blk.header.fee.get(0.u256))
blk.header.baseFeePerGas = Opt.some(blk.header.baseFeePerGas.get(0.u256))
ok EthBlockAndBlobsBundle(
blk: blk,

View File

@ -53,9 +53,9 @@ type
txRoot: Hash256 ## `rootHash` after packing
stateRoot: Hash256 ## `stateRoot` after packing
blobGasUsed:
Option[uint64] ## EIP-4844 block blobGasUsed
Opt[uint64] ## EIP-4844 block blobGasUsed
excessBlobGas:
Option[uint64] ## EIP-4844 block excessBlobGas
Opt[uint64] ## EIP-4844 block excessBlobGas
TxChainRef* = ref object ##\
## State cache of the transaction environment for creating a new\
@ -85,7 +85,7 @@ func getTimestamp(dh: TxChainRef, parent: BlockHeader): EthTime =
func feeRecipient*(dh: TxChainRef): EthAddress
proc resetTxEnv(dh: TxChainRef; parent: BlockHeader; fee: Option[UInt256])
proc resetTxEnv(dh: TxChainRef; parent: BlockHeader; baseFeePerGas: Opt[UInt256])
{.gcsafe,raises: [].} =
dh.txEnv.reset
@ -94,7 +94,7 @@ proc resetTxEnv(dh: TxChainRef; parent: BlockHeader; fee: Option[UInt256])
let timestamp = dh.getTimestamp(parent)
dh.com.hardForkTransition(
parent.blockHash, parent.blockNumber+1, Opt.some(timestamp))
parent.blockHash, parent.number+1, Opt.some(timestamp))
dh.prepareHeader(parent, timestamp)
# we don't consider PoS difficulty here
@ -102,7 +102,7 @@ proc resetTxEnv(dh: TxChainRef; parent: BlockHeader; fee: Option[UInt256])
let blockCtx = BlockContext(
timestamp : dh.prepHeader.timestamp,
gasLimit : (if dh.maxMode: dh.limits.maxLimit else: dh.limits.trgLimit),
fee : fee,
baseFeePerGas: baseFeePerGas,
prevRandao : dh.prepHeader.prevRandao,
difficulty : dh.prepHeader.difficulty,
coinbase : dh.feeRecipient,
@ -116,8 +116,8 @@ proc resetTxEnv(dh: TxChainRef; parent: BlockHeader; fee: Option[UInt256])
dh.txEnv.txRoot = EMPTY_ROOT_HASH
dh.txEnv.stateRoot = dh.txEnv.vmState.parent.stateRoot
dh.txEnv.blobGasUsed = none(uint64)
dh.txEnv.excessBlobGas = none(uint64)
dh.txEnv.blobGasUsed = Opt.none(uint64)
dh.txEnv.excessBlobGas = Opt.none(uint64)
proc update(dh: TxChainRef; parent: BlockHeader)
{.gcsafe,raises: [].} =
@ -126,10 +126,10 @@ proc update(dh: TxChainRef; parent: BlockHeader)
timestamp = dh.getTimestamp(parent)
db = dh.com.db
acc = LedgerRef.init(db, parent.stateRoot)
fee = if dh.com.isLondon(parent.blockNumber + 1, timestamp):
some(dh.com.baseFeeGet(parent).uint64.u256)
fee = if dh.com.isLondon(parent.number + 1, timestamp):
Opt.some(dh.com.baseFeeGet(parent).uint64.u256)
else:
UInt256.none()
Opt.none UInt256
# Keep a separate accounts descriptor positioned at the sync point
dh.roAcc = ReadOnlyStateDB(acc)
@ -179,37 +179,37 @@ proc getHeader*(dh: TxChainRef): BlockHeader
else: dh.txEnv.receipts[^1].cumulativeGasUsed
result = BlockHeader(
parentHash: dh.txEnv.vmState.parent.blockHash,
ommersHash: EMPTY_UNCLE_HASH,
coinbase: dh.prepHeader.coinbase,
stateRoot: dh.txEnv.stateRoot,
txRoot: dh.txEnv.txRoot,
receiptRoot: dh.txEnv.receipts.calcReceiptRoot,
bloom: dh.txEnv.receipts.createBloom,
difficulty: dh.prepHeader.difficulty,
blockNumber: dh.txEnv.vmState.blockNumber,
gasLimit: dh.txEnv.vmState.blockCtx.gasLimit,
gasUsed: gasUsed,
timestamp: dh.prepHeader.timestamp,
# extraData: Blob # signing data
# mixDigest: Hash256 # mining hash for given difficulty
# nonce: BlockNonce # mining free vaiable
fee: dh.txEnv.vmState.blockCtx.fee,
blobGasUsed: dh.txEnv.blobGasUsed,
parentHash: dh.txEnv.vmState.parent.blockHash,
ommersHash: EMPTY_UNCLE_HASH,
coinbase: dh.prepHeader.coinbase,
stateRoot: dh.txEnv.stateRoot,
txRoot: dh.txEnv.txRoot,
receiptsRoot: dh.txEnv.receipts.calcReceiptsRoot,
logsBloom: dh.txEnv.receipts.createBloom,
difficulty: dh.prepHeader.difficulty,
number: dh.txEnv.vmState.blockNumber,
gasLimit: dh.txEnv.vmState.blockCtx.gasLimit,
gasUsed: gasUsed,
timestamp: dh.prepHeader.timestamp,
# extraData: Blob # signing data
# mixHash: Hash256 # mining hash for given difficulty
# nonce: BlockNonce # mining free vaiable
baseFeePerGas: dh.txEnv.vmState.blockCtx.baseFeePerGas,
blobGasUsed: dh.txEnv.blobGasUsed,
excessBlobGas: dh.txEnv.excessBlobGas)
if dh.com.forkGTE(Shanghai):
result.withdrawalsRoot = some(calcWithdrawalsRoot(dh.com.pos.withdrawals))
result.withdrawalsRoot = Opt.some(calcWithdrawalsRoot(dh.com.pos.withdrawals))
if dh.com.forkGTE(Cancun):
result.parentBeaconBlockRoot = some(dh.com.pos.parentBeaconBlockRoot)
result.parentBeaconBlockRoot = Opt.some(dh.com.pos.parentBeaconBlockRoot)
dh.prepareForSeal(result)
proc clearAccounts*(dh: TxChainRef)
{.gcsafe,raises: [].} =
## Reset transaction environment, e.g. before packing a new block
dh.resetTxEnv(dh.txEnv.vmState.parent, dh.txEnv.vmState.blockCtx.fee)
dh.resetTxEnv(dh.txEnv.vmState.parent, dh.txEnv.vmState.blockCtx.baseFeePerGas)
# ------------------------------------------------------------------------------
# Public functions, getters
@ -242,8 +242,8 @@ func feeRecipient*(dh: TxChainRef): EthAddress =
func baseFee*(dh: TxChainRef): GasPrice =
## Getter, baseFee for the next bock header. This value is auto-generated
## when a new insertion point is set via `head=`.
if dh.txEnv.vmState.blockCtx.fee.isSome:
dh.txEnv.vmState.blockCtx.fee.get.truncate(uint64).GasPrice
if dh.txEnv.vmState.blockCtx.baseFeePerGas.isSome:
dh.txEnv.vmState.blockCtx.baseFeePerGas.get.truncate(uint64).GasPrice
else:
0.GasPrice
@ -294,9 +294,9 @@ func `baseFee=`*(dh: TxChainRef; val: GasPrice) =
## function would be called in exceptional cases only as this parameter is
## determined by the `head=` update.
if 0 < val or dh.com.isLondon(dh.txEnv.vmState.blockNumber):
dh.txEnv.vmState.blockCtx.fee = some(val.uint64.u256)
dh.txEnv.vmState.blockCtx.baseFeePerGas = Opt.some(val.uint64.u256)
else:
dh.txEnv.vmState.blockCtx.fee = UInt256.none()
dh.txEnv.vmState.blockCtx.baseFeePerGas = Opt.none UInt256
proc `head=`*(dh: TxChainRef; val: BlockHeader)
{.gcsafe,raises: [].} =
@ -342,11 +342,11 @@ func `txRoot=`*(dh: TxChainRef; val: Hash256) =
## Setter
dh.txEnv.txRoot = val
func `excessBlobGas=`*(dh: TxChainRef; val: Option[uint64]) =
func `excessBlobGas=`*(dh: TxChainRef; val: Opt[uint64]) =
## Setter
dh.txEnv.excessBlobGas = val
func `blobGasUsed=`*(dh: TxChainRef; val: Option[uint64]) =
func `blobGasUsed=`*(dh: TxChainRef; val: Opt[uint64]) =
## Setter
dh.txEnv.blobGasUsed = val

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -51,7 +51,7 @@ proc baseFeeGet*(com: CommonRef; parent: BlockHeader): GasPrice =
# or truncate the result?
calcEip1599BaseFee(parent.gasLimit,
parent.gasUsed,
parent.baseFee).truncate(uint64).GasPrice
parent.baseFeePerGas.get(0.u256)).truncate(uint64).GasPrice
# ------------------------------------------------------------------------------
# End

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -95,7 +95,7 @@ proc gasLimitsGet*(com: CommonRef; parent: BlockHeader; parentLimit: GasInt;
## Calculate gas limits for the next block header.
result.gasLimit = parentLimit
if com.isLondon(parent.blockNumber+1):
if com.isLondon(parent.number+1):
result.setPostLondonLimits
else:
result.setPreLondonLimits
@ -108,9 +108,9 @@ proc gasLimitsGet*(com: CommonRef; parent: BlockHeader; parentLimit: GasInt;
result.trgLimit, (result.maxLimit * pc.hwmMax + 50) div 100)
# override trgLimit, see https://github.com/status-im/nimbus-eth1/issues/1032
if com.isLondon(parent.blockNumber+1):
if com.isLondon(parent.number+1):
var parentGasLimit = parent.gasLimit
if not com.isLondon(parent.blockNumber):
if not com.isLondon(parent.number):
# Bump by 2x
parentGasLimit = parent.gasLimit * EIP1559_ELASTICITY_MULTIPLIER
result.trgLimit = calcGasLimit1559(parentGasLimit, desiredLimit = pc.gasCeil)

View File

@ -168,7 +168,7 @@ proc effectiveGasTip*(tx: Transaction; baseFee: GasPrice): GasPriceEx =
(tx.gasPrice - baseFee.int64).GasPriceEx
else:
# London, EIP1559
min(tx.maxPriorityFee, tx.maxFee - baseFee.int64).GasPriceEx
min(tx.maxPriorityFeePerGas, tx.maxFeePerGas - baseFee.int64).GasPriceEx
proc effectiveGasTip*(tx: Transaction; baseFee: UInt256): GasPriceEx =
## Variant of `effectiveGasTip()`

View File

@ -15,7 +15,6 @@
##
import
std/[math],
../tx_info,
../tx_item,
eth/[common],

View File

@ -107,9 +107,9 @@ proc txFeesCovered(xp: TxPoolRef; item: TxItemRef): bool =
## Ensure that the user was willing to at least pay the base fee
## And to at least pay the current data gasprice
if item.tx.txType >= TxEip1559:
if item.tx.maxFee.GasPriceEx < xp.chain.baseFee:
if item.tx.maxFeePerGas.GasPriceEx < xp.chain.baseFee:
debug "invalid tx: maxFee is smaller than baseFee",
maxFee = item.tx.maxFee,
maxFee = item.tx.maxFeePerGas,
baseFee = xp.chain.baseFee
return false
@ -168,7 +168,7 @@ proc txPostLondonAcceptableTipAndFees(xp: TxPoolRef; item: TxItemRef): bool =
return false
if stageItems1559MinFee in xp.pFlags:
if item.tx.maxFee.GasPriceEx < xp.pMinFeePrice:
if item.tx.maxFeePerGas.GasPriceEx < xp.pMinFeePrice:
return false
true

View File

@ -133,22 +133,22 @@ proc headDiff*(xp: TxPoolRef;
# sanity check
warn "Tx-pool head forward for non-existing header",
newHead = newHash,
newNumber = newHead.blockNumber
newNumber = newHead.number
return err(txInfoErrForwardHeadMissing)
if not db.getBlockHeader(curHash, ignHeader):
# This can happen if a `setHead()` is performed, where we have discarded
# the old head from the chain.
if curHead.blockNumber <= newHead.blockNumber:
if curHead.number <= newHead.number:
warn "Tx-pool head forward from detached current header",
curHead = curHash,
curNumber = curHead.blockNumber
curNumber = curHead.number
return err(txInfoErrAncestorMissing)
debug "Tx-pool reset with detached current head",
curHeader = curHash,
curNumber = curHead.blockNumber,
curNumber = curHead.number,
newHeader = newHash,
newNumber = newHead.blockNumber
newNumber = newHead.number
return err(txInfoErrChainHeadMissing)
# Equalise block numbers between branches (typically, these btanches
@ -161,7 +161,7 @@ proc headDiff*(xp: TxPoolRef;
newBranchHead = newHead
newBranchHash = newHash
if newHead.blockNumber < curHead.blockNumber:
if newHead.number < curHead.number:
#
# new head block number smaller than the current head one
#
@ -178,7 +178,7 @@ proc headDiff*(xp: TxPoolRef;
# + txs of blocks with numbers between #new..#current need to be
# re-inserted into the pool
#
while newHead.blockNumber < curBranchHead.blockNumber:
while newHead.number < curBranchHead.number:
xp.insert(txDiffs, curBranchHash)
let
tmpHead = curBranchHead # cache value for error logging
@ -187,7 +187,7 @@ proc headDiff*(xp: TxPoolRef;
if not db.getBlockHeader(curBranchHash, curBranchHead):
error "Unrooted old chain seen by tx-pool",
curBranchHead = tmpHash,
curBranchNumber = tmpHead.blockNumber
curBranchNumber = tmpHead.number
return err(txInfoErrUnrootedCurChain)
else:
#
@ -206,7 +206,7 @@ proc headDiff*(xp: TxPoolRef;
# + txs of blocks with numbers between #current..#new need to be
# deleted from the pool (as they are on the block chain, now)
#
while curHead.blockNumber < newBranchHead.blockNumber:
while curHead.number < newBranchHead.number:
xp.remove(txDiffs, newBranchHash)
let
tmpHead = newBranchHead # cache value for error logging
@ -215,7 +215,7 @@ proc headDiff*(xp: TxPoolRef;
if not db.getBlockHeader(newBranchHash, newBranchHead):
error "Unrooted new chain seen by tx-pool",
newBranchHead = tmpHash,
newBranchNumber = tmpHead.blockNumber
newBranchNumber = tmpHead.number
return err(txInfoErrUnrootedNewChain)
# simultaneously step back until junction-head (aka common ancestor) while
@ -231,7 +231,7 @@ proc headDiff*(xp: TxPoolRef;
if not db.getBlockHeader(curBranchHash, curBranchHead):
error "Unrooted old chain seen by tx-pool",
curBranchHead = tmpHash,
curBranchNumber = tmpHead.blockNumber
curBranchNumber = tmpHead.number
return err(txInfoErrUnrootedCurChain)
block:
xp.remove(txDiffs, newBranchHash)
@ -242,7 +242,7 @@ proc headDiff*(xp: TxPoolRef;
if not db.getBlockHeader(newBranchHash, newBranchHead):
error "Unrooted new chain seen by tx-pool",
newBranchHead = tmpHash,
newBranchNumber = tmpHead.blockNumber
newBranchNumber = tmpHead.number
return err(txInfoErrUnrootedNewChain)
# figure out difference sets

View File

@ -159,7 +159,7 @@ proc vmExecInit(xp: TxPoolRef): Result[TxPackerStateRef, string]
xp.chain.maxMode = (packItemsMaxGasLimit in xp.pFlags)
if xp.chain.com.daoForkSupport and
xp.chain.com.daoForkBlock.get == xp.chain.head.blockNumber + 1:
xp.chain.com.daoForkBlock.get == xp.chain.head.number + 1:
xp.chain.vmState.mutateStateDB:
db.applyDAOHardFork()
@ -261,8 +261,8 @@ proc vmExecCommit(pst: TxPackerStateRef)
if vmState.com.forkGTE(Cancun):
# EIP-4844
xp.chain.excessBlobGas = some(vmState.blockCtx.excessBlobGas)
xp.chain.blobGasUsed = some(pst.blobGasUsed)
xp.chain.excessBlobGas = Opt.some(vmState.blockCtx.excessBlobGas)
xp.chain.blobGasUsed = Opt.some(pst.blobGasUsed)
proc balanceDelta: UInt256 =
let postBalance = vmState.readOnlyStateDB.getBalance(xp.chain.feeRecipient)

View File

@ -39,18 +39,18 @@ const
proc validateSeal(pow: PowRef; header: BlockHeader): Result[void,string] =
try:
let (expMixDigest, miningValue) = pow.getPowDigest(header)
let (expmixHash, miningValue) = pow.getPowDigest(header)
if expMixDigest != header.mixDigest:
if expmixHash != header.mixHash:
let
miningHash = header.getPowSpecs.miningHash
(size, cachedHash) = try: pow.getPowCacheLookup(header.blockNumber)
(size, cachedHash) = try: pow.getPowCacheLookup(header.number)
except KeyError: return err("Unknown block")
except CatchableError as e: return err(e.msg)
return err("mixHash mismatch. actual=$1, expected=$2," &
" blockNumber=$3, miningHash=$4, nonce=$5, difficulty=$6," &
" size=$7, cachedHash=$8" % [
$header.mixDigest, $expMixDigest, $header.blockNumber,
$header.mixHash, $expmixHash, $header.number,
$miningHash, header.nonce.toHex, $header.difficulty,
$size, $cachedHash])
@ -77,7 +77,7 @@ proc validateHeader(
# Blocks with block numbers in the range [1_920_000, 1_920_009]
# MUST have DAOForkBlockExtra
let daoForkBlock = com.daoForkBlock.get
let DAOHigh = daoForkBlock + DAOForkExtraRange.u256
let DAOHigh = daoForkBlock + DAOForkExtraRange
daoForkBlock <= blockNumber and
blockNumber < DAOHigh
@ -90,19 +90,19 @@ proc validateHeader(
if header.gasUsed < 0 or header.gasUsed > header.gasLimit:
return err("gasUsed should be non negative and smaller or equal gasLimit")
if header.blockNumber != parentHeader.blockNumber + 1:
if header.number != parentHeader.number + 1:
return err("Blocks must be numbered consecutively")
if header.timestamp <= parentHeader.timestamp:
return err("timestamp must be strictly later than parent")
if com.daoForkSupport and inDAOExtraRange(header.blockNumber):
if com.daoForkSupport and inDAOExtraRange(header.number):
if header.extraData != daoForkBlockExtraData:
return err("header extra data should be marked DAO")
if com.consensus == ConsensusType.POS:
# EIP-4399 and EIP-3675
# no need to check mixDigest because EIP-4399 override this field
# no need to check mixHash because EIP-4399 override this field
# checking rule
if not header.difficulty.isZero:
@ -185,7 +185,7 @@ proc validateUncles(com: CommonRef; header: BlockHeader;
(uncle.parentHash == header.parentHash):
return err("Uncle's parent is not an ancestor")
if uncle.blockNumber >= header.blockNumber:
if uncle.number >= header.number:
return err("uncle block number larger than current block number")
# check uncle against own parent
@ -215,9 +215,9 @@ proc validateUncles(com: CommonRef; header: BlockHeader;
func gasCost*(tx: Transaction): UInt256 =
if tx.txType >= TxEip4844:
tx.gasLimit.u256 * tx.maxFee.u256 + tx.getTotalBlobGas.u256 * tx.maxFeePerBlobGas.u256
tx.gasLimit.u256 * tx.maxFeePerGas.u256 + tx.getTotalBlobGas.u256 * tx.maxFeePerBlobGas
elif tx.txType >= TxEip1559:
tx.gasLimit.u256 * tx.maxFee.u256
tx.gasLimit.u256 * tx.maxFeePerGas.u256
else:
tx.gasLimit.u256 * tx.gasPrice.u256
@ -241,9 +241,9 @@ proc validateTxBasic*(
try:
# The total must be the larger of the two
if tx.maxFee < tx.maxPriorityFee:
if tx.maxFeePerGas < tx.maxPriorityFeePerGas:
return err("invalid tx: maxFee is smaller than maPriorityFee. maxFee=$1, maxPriorityFee=$2" % [
$tx.maxFee, $tx.maxPriorityFee])
$tx.maxFeePerGas, $tx.maxPriorityFeePerGas])
if tx.gasLimit < tx.intrinsicGas(fork):
return err("invalid tx: not enough gas to perform calculation. avail=$1, require=$2" % [
@ -322,9 +322,9 @@ proc validateTransaction*(
$maxLimit, $tx.gasLimit])
# ensure that the user was willing to at least pay the base fee
if tx.maxFee < baseFee.truncate(int64):
if tx.maxFeePerGas < baseFee.truncate(GasInt):
return err("invalid tx: maxFee is smaller than baseFee. maxFee=$1, baseFee=$2" % [
$tx.maxFee, $baseFee])
$tx.maxFeePerGas, $baseFee])
# the signer must be able to fully afford the transaction
let gasCost = tx.gasCost()

View File

@ -14,7 +14,7 @@ import results, ../common/common
# https://eips.ethereum.org/EIPS/eip-4895
proc validateWithdrawals*(
com: CommonRef, header: BlockHeader, withdrawals: Option[seq[Withdrawal]]
com: CommonRef, header: BlockHeader, withdrawals: Opt[seq[Withdrawal]]
): Result[void, string] =
if com.forkGTE(Shanghai):
if header.withdrawalsRoot.isNone:
@ -24,7 +24,7 @@ proc validateWithdrawals*(
else:
try:
if withdrawals.get.calcWithdrawalsRoot != header.withdrawalsRoot.get:
return err("Mismatched withdrawalsRoot blockNumber =" & $header.blockNumber)
return err("Mismatched withdrawalsRoot blockNumber =" & $header.number)
except RlpError as ex:
return err(ex.msg)
else:

View File

@ -300,10 +300,14 @@ func to*(key: Hash256; T: type HashKey): T =
else:
T(len: 32, buf: key.data)
func to*(n: SomeUnsignedInt|UInt256; T: type PathID): T =
func to*(n: SomeUnsignedInt; T: type PathID): T =
## Representation of a scalar as `PathID` (preserving full information)
T(pfx: n.u256, length: 64)
func to*(n: UInt256; T: type PathID): T =
## Representation of a scalar as `PathID` (preserving full information)
T(pfx: n, length: 64)
# ------------------------------------------------------------------------------
# Public helpers: Miscellaneous mappings
# ------------------------------------------------------------------------------

View File

@ -13,7 +13,6 @@
import
std/tables,
eth/common,
results,
../../aristo as use_ari,
../../aristo/aristo_walk,
../../kvt as use_kvt,
@ -134,11 +133,11 @@ proc baseMethods(db: AristoCoreDbRef): CoreDbBaseFns =
db.tracer.push(flags)
CoreDxCaptRef(methods: db.tracer.cptMethods)
proc persistent(bn: Option[BlockNumber]): CoreDbRc[void] =
proc persistent(bn: Opt[BlockNumber]): CoreDbRc[void] =
const info = "persistentFn()"
let sid =
if bn.isNone: 0u64
else: bn.unsafeGet.truncate(uint64)
else: bn.unsafeGet
? kBase.persistent info
? aBase.persistent(sid, info)
ok()
@ -183,7 +182,7 @@ proc baseMethods(db: AristoCoreDbRef): CoreDbBaseFns =
newCaptureFn: proc(flags: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] =
ok(db.bless flags.tracerSetup()),
persistentFn: proc(bn: Option[BlockNumber]): CoreDbRc[void] =
persistentFn: proc(bn: Opt[BlockNumber]): CoreDbRc[void] =
persistent(bn))
# ------------------------------------------------------------------------------
@ -247,8 +246,8 @@ proc toAristoSavedStateBlockNumber*(
if not mBe.isNil and mBe.parent.isAristo:
let rc = mBe.parent.AristoCoreDbRef.adbBase.getSavedState()
if rc.isOk:
return (rc.value.src.to(Hash256), rc.value.serial.toBlockNumber)
(EMPTY_ROOT_HASH, 0.toBlockNumber)
return (rc.value.src.to(Hash256), rc.value.serial.BlockNumber)
(EMPTY_ROOT_HASH, 0.BlockNumber)
# ------------------------------------------------------------------------------
# Public aristo iterators

View File

@ -11,11 +11,10 @@
{.push raises: [].}
import
std/[options, strutils, typetraits],
std/[strutils, typetraits],
chronicles,
eth/[common, trie/nibbles],
stew/byteutils,
results,
../../../aristo,
../../../aristo/aristo_desc,
../../base,
@ -433,7 +432,7 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
proc ctxNewCol(
colType: CoreDbColType;
colState: Hash256;
address: Option[EthAddress];
address: Opt[EthAddress];
): CoreDbRc[CoreDbColRef] =
const info = "ctx/newColFn()"
@ -533,7 +532,7 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
newColFn: proc(
col: CoreDbColType;
colState: Hash256;
address: Option[EthAddress];
address: Opt[EthAddress];
): CoreDbRc[CoreDbColRef] =
ctxNewCol(col, colState, address),

View File

@ -11,10 +11,8 @@
{.push raises: [].}
import
std/options,
chronicles,
eth/common,
results,
"../.."/[constants, errors],
./base/[api_tracking, base_desc]
@ -418,7 +416,7 @@ proc newColumn*(
ctx: CoreDbCtxRef;
colType: CoreDbColType;
colState: Hash256;
address = none(EthAddress);
address = Opt.none(EthAddress);
): CoreDbRc[CoreDbColRef] =
## Retrieve a new column descriptor.
##
@ -463,7 +461,7 @@ proc newColumn*(
## Shortcut for `ctx.newColumn(CtStorage,colState,some(address))`.
##
ctx.setTrackNewApi CtxNewColFn
result = ctx.methods.newColFn(CtStorage, colState, some(address))
result = ctx.methods.newColFn(CtStorage, colState, Opt.some(address))
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, colState, address, result
proc newColumn*(
@ -476,7 +474,7 @@ proc newColumn*(
##
ctx.setTrackNewApi CtxNewColFn
result = ctx.methods.newColFn(
CtStorage, EMPTY_ROOT_HASH, some(address)).valueOr:
CtStorage, EMPTY_ROOT_HASH, Opt.some(address)).valueOr:
raiseAssert error.prettyText()
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
@ -533,7 +531,7 @@ proc getMpt*(
proc getMpt*(
ctx: CoreDbCtxRef;
colType: CoreDbColType;
address = none(EthAddress);
address = Opt.none(EthAddress);
): CoreDxMptRef =
## Shortcut for `getMpt(col)` where the `col` argument is
## `db.getColumn(colType,EMPTY_ROOT_HASH).value`. This function will always
@ -830,7 +828,7 @@ proc persistent*(
## is no transaction pending.
##
db.setTrackNewApi BasePersistentFn
result = db.methods.persistentFn none(BlockNumber)
result = db.methods.persistentFn Opt.none(BlockNumber)
db.ifTrackNewApi: debug newApiTxt, api, elapsed, result
proc persistent*(
@ -859,7 +857,7 @@ proc persistent*(
## db.persistent(stateBlockNumber)
##
db.setTrackNewApi BasePersistentFn
result = db.methods.persistentFn some(blockNumber)
result = db.methods.persistentFn Opt.some(blockNumber)
db.ifTrackNewApi: debug newApiTxt, api, elapsed, blockNumber, result
proc newTransaction*(db: CoreDbRef): CoreDxTxRef =
@ -1013,7 +1011,7 @@ when ProvideLegacyAPI:
db.setTrackLegaApi LegaNewMptFn
let
trie = db.ctx.methods.newColFn(
CtGeneric, root, none(EthAddress)).valueOr:
CtGeneric, root, Opt.none(EthAddress)).valueOr:
raiseAssert error.prettyText() & ": " & $api
mpt = db.ctx.getMpt(trie).valueOr:
raiseAssert error.prettyText() & ": " & $api
@ -1022,7 +1020,7 @@ when ProvideLegacyAPI:
proc mptPrune*(db: CoreDbRef): CoreDbMptRef =
db.setTrackLegaApi LegaNewMptFn
result = db.ctx.getMpt(CtGeneric, none(EthAddress)).CoreDbMptRef
result = db.ctx.getMpt(CtGeneric, Opt.none(EthAddress)).CoreDbMptRef
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed
# ----------------
@ -1036,7 +1034,7 @@ when ProvideLegacyAPI:
db.setTrackLegaApi LegaNewPhkFn
let
trie = db.ctx.methods.newColFn(
CtGeneric, root, none(EthAddress)).valueOr:
CtGeneric, root, Opt.none(EthAddress)).valueOr:
raiseAssert error.prettyText() & ": " & $api
phk = db.ctx.getMpt(trie).valueOr:
raiseAssert error.prettyText() & ": " & $api
@ -1046,7 +1044,7 @@ when ProvideLegacyAPI:
proc phkPrune*(db: CoreDbRef): CoreDbPhkRef =
db.setTrackLegaApi LegaNewPhkFn
result = db.ctx.getMpt(
CtGeneric, none(EthAddress)).toCoreDxPhkRef.CoreDbPhkRef
CtGeneric, Opt.none(EthAddress)).toCoreDxPhkRef.CoreDbPhkRef
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed
# ----------------

View File

@ -13,7 +13,6 @@
import
std/tables,
eth/common,
results,
../../aristo/aristo_profile
from ../../aristo
@ -113,7 +112,7 @@ type
proc(flgs: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] {.noRaise.}
CoreDbBaseGetCaptFn* = proc(): CoreDbRc[CoreDxCaptRef] {.noRaise.}
CoreDbBasePersistentFn* =
proc(bn: Option[BlockNumber]): CoreDbRc[void] {.noRaise.}
proc(bn: Opt[BlockNumber]): CoreDbRc[void] {.noRaise.}
CoreDbBaseFns* = object
destroyFn*: CoreDbBaseDestroyFn
@ -166,7 +165,7 @@ type
CoreDbCtxFromTxFn* =
proc(root: Hash256; kind: CoreDbColType): CoreDbRc[CoreDbCtxRef] {.noRaise.}
CoreDbCtxNewColFn* = proc(
colType: CoreDbColType; colState: Hash256; address: Option[EthAddress];
colType: CoreDbColType; colState: Hash256; address: Opt[EthAddress];
): CoreDbRc[CoreDbColRef] {.noRaise.}
CoreDbCtxGetMptFn* = proc(
root: CoreDbColRef): CoreDbRc[CoreDxMptRef] {.noRaise.}

Some files were not shown because too many files have changed in this diff Show More