Compare commits
9 Commits
0a20b9e5c6
...
bae151d343
Author | SHA1 | Date |
---|---|---|
|
bae151d343 | |
|
6a10dfd0fe | |
|
9521582005 | |
|
99ff8dc876 | |
|
73a0f7caeb | |
|
7c679a8ea9 | |
|
d40529ebef | |
|
682d0ff575 | |
|
b004f9e2ff |
|
@ -22,17 +22,17 @@ OK: 15/15 Fail: 0/15 Skip: 0/15
|
|||
## bcArrowGlacierToParis
|
||||
```diff
|
||||
+ difficultyFormula.json OK
|
||||
powToPosBlockRejection.json Skip
|
||||
+ powToPosBlockRejection.json OK
|
||||
+ powToPosTest.json OK
|
||||
```
|
||||
OK: 2/3 Fail: 0/3 Skip: 1/3
|
||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||
## bcBerlinToLondon
|
||||
```diff
|
||||
+ BerlinToLondonTransition.json OK
|
||||
initialVal.json Skip
|
||||
+ initialVal.json OK
|
||||
+ londonUncles.json OK
|
||||
```
|
||||
OK: 2/3 Fail: 0/3 Skip: 1/3
|
||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||
## bcBlockGasLimitTest
|
||||
```diff
|
||||
+ BlockGasLimit2p63m1.json OK
|
||||
|
@ -115,35 +115,35 @@ OK: 3/4 Fail: 0/4 Skip: 1/4
|
|||
## bcForkStressTest
|
||||
```diff
|
||||
+ AmIOnEIP150.json OK
|
||||
ForkStressTest.json Skip
|
||||
+ ForkStressTest.json OK
|
||||
```
|
||||
OK: 1/2 Fail: 0/2 Skip: 1/2
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## bcFrontierToHomestead
|
||||
```diff
|
||||
+ CallContractThatCreateContractBeforeAndAfterSwitchover.json OK
|
||||
+ ContractCreationFailsOnHomestead.json OK
|
||||
HomesteadOverrideFrontier.json Skip
|
||||
+ HomesteadOverrideFrontier.json OK
|
||||
+ UncleFromFrontierInHomestead.json OK
|
||||
+ UnclePopulation.json OK
|
||||
blockChainFrontierWithLargerTDvsHomesteadBlockchain.json Skip
|
||||
blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json Skip
|
||||
+ blockChainFrontierWithLargerTDvsHomesteadBlockchain.json OK
|
||||
+ blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json OK
|
||||
```
|
||||
OK: 4/7 Fail: 0/7 Skip: 3/7
|
||||
OK: 7/7 Fail: 0/7 Skip: 0/7
|
||||
## bcGasPricerTest
|
||||
```diff
|
||||
RPC_API_Test.json Skip
|
||||
+ RPC_API_Test.json OK
|
||||
+ highGasUsage.json OK
|
||||
+ notxs.json OK
|
||||
```
|
||||
OK: 2/3 Fail: 0/3 Skip: 1/3
|
||||
OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||
## bcHomesteadToDao
|
||||
```diff
|
||||
DaoTransactions.json Skip
|
||||
+ DaoTransactions.json OK
|
||||
+ DaoTransactions_EmptyTransactionAndForkBlocksAhead.json OK
|
||||
+ DaoTransactions_UncleExtradata.json OK
|
||||
+ DaoTransactions_XBlockm1.json OK
|
||||
```
|
||||
OK: 3/4 Fail: 0/4 Skip: 1/4
|
||||
OK: 4/4 Fail: 0/4 Skip: 0/4
|
||||
## bcHomesteadToEIP150
|
||||
```diff
|
||||
+ EIP150Transition.json OK
|
||||
|
@ -182,17 +182,17 @@ OK: 22/22 Fail: 0/22 Skip: 0/22
|
|||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
## bcMultiChainTest
|
||||
```diff
|
||||
CallContractFromNotBestBlock.json Skip
|
||||
ChainAtoChainB.json Skip
|
||||
ChainAtoChainBCallContractFormA.json Skip
|
||||
ChainAtoChainB_BlockHash.json Skip
|
||||
ChainAtoChainB_difficultyB.json Skip
|
||||
ChainAtoChainBtoChainA.json Skip
|
||||
ChainAtoChainBtoChainAtoChainB.json Skip
|
||||
UncleFromSideChain.json Skip
|
||||
lotsOfLeafs.json Skip
|
||||
+ CallContractFromNotBestBlock.json OK
|
||||
+ ChainAtoChainB.json OK
|
||||
+ ChainAtoChainBCallContractFormA.json OK
|
||||
+ ChainAtoChainB_BlockHash.json OK
|
||||
+ ChainAtoChainB_difficultyB.json OK
|
||||
+ ChainAtoChainBtoChainA.json OK
|
||||
+ ChainAtoChainBtoChainAtoChainB.json OK
|
||||
+ UncleFromSideChain.json OK
|
||||
+ lotsOfLeafs.json OK
|
||||
```
|
||||
OK: 0/9 Fail: 0/9 Skip: 9/9
|
||||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||
## bcRandomBlockhashTest
|
||||
```diff
|
||||
+ 201503110226PYTHON_DUP6BC.json OK
|
||||
|
@ -408,18 +408,18 @@ OK: 105/105 Fail: 0/105 Skip: 0/105
|
|||
OK: 99/100 Fail: 0/100 Skip: 1/100
|
||||
## bcTotalDifficultyTest
|
||||
```diff
|
||||
lotsOfBranchesOverrideAtTheEnd.json Skip
|
||||
lotsOfBranchesOverrideAtTheMiddle.json Skip
|
||||
newChainFrom4Block.json Skip
|
||||
newChainFrom5Block.json Skip
|
||||
newChainFrom6Block.json Skip
|
||||
sideChainWithMoreTransactions.json Skip
|
||||
sideChainWithMoreTransactions2.json Skip
|
||||
sideChainWithNewMaxDifficultyStartingFromBlock3AfterBlock4.json Skip
|
||||
uncleBlockAtBlock3AfterBlock3.json Skip
|
||||
uncleBlockAtBlock3afterBlock4.json Skip
|
||||
+ lotsOfBranchesOverrideAtTheEnd.json OK
|
||||
+ lotsOfBranchesOverrideAtTheMiddle.json OK
|
||||
+ newChainFrom4Block.json OK
|
||||
+ newChainFrom5Block.json OK
|
||||
+ newChainFrom6Block.json OK
|
||||
+ sideChainWithMoreTransactions.json OK
|
||||
+ sideChainWithMoreTransactions2.json OK
|
||||
+ sideChainWithNewMaxDifficultyStartingFromBlock3AfterBlock4.json OK
|
||||
+ uncleBlockAtBlock3AfterBlock3.json OK
|
||||
+ uncleBlockAtBlock3afterBlock4.json OK
|
||||
```
|
||||
OK: 0/10 Fail: 0/10 Skip: 10/10
|
||||
OK: 10/10 Fail: 0/10 Skip: 0/10
|
||||
## bcUncleHeaderValidity
|
||||
```diff
|
||||
+ correct.json OK
|
||||
|
@ -3726,4 +3726,4 @@ OK: 11/11 Fail: 0/11 Skip: 0/11
|
|||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
|
||||
---TOTAL---
|
||||
OK: 3140/3272 Fail: 0/3272 Skip: 132/3272
|
||||
OK: 3167/3272 Fail: 0/3272 Skip: 105/3272
|
||||
|
|
|
@ -0,0 +1,255 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
import
|
||||
std/tables,
|
||||
../../common,
|
||||
../../db/core_db,
|
||||
../../evm/types,
|
||||
../../evm/state,
|
||||
../validate,
|
||||
../executor/process_block
|
||||
|
||||
type
|
||||
HeadDesc = object
|
||||
number: BlockNumber
|
||||
hash: Hash256
|
||||
|
||||
BlockDesc = object
|
||||
blk: EthBlock
|
||||
receipts: seq[Receipt]
|
||||
|
||||
ForkedChain* = object
|
||||
stagingTx: CoreDbTxRef
|
||||
db: CoreDbRef
|
||||
com: CommonRef
|
||||
blocks: Table[Hash256, BlockDesc]
|
||||
headHash: Hash256
|
||||
baseHash: Hash256
|
||||
baseHeader: BlockHeader
|
||||
headHeader: BlockHeader
|
||||
heads: seq[HeadDesc]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc processBlock(c: ForkedChain,
|
||||
parent: BlockHeader,
|
||||
blk: EthBlock): Result[seq[Receipt], string] =
|
||||
template header(): BlockHeader =
|
||||
blk.header
|
||||
|
||||
let vmState = BaseVMState()
|
||||
vmState.init(parent, header, c.com)
|
||||
c.com.hardForkTransition(header)
|
||||
|
||||
?c.com.validateHeaderAndKinship(blk, vmState.parent, checkSealOK = false)
|
||||
|
||||
?vmState.processBlock(
|
||||
blk,
|
||||
skipValidation = false,
|
||||
skipReceipts = false,
|
||||
skipUncles = true,
|
||||
)
|
||||
|
||||
# We still need to write header to database
|
||||
# because validateUncles still need it
|
||||
let blockHash = header.blockHash()
|
||||
if not c.db.persistHeader(
|
||||
blockHash,
|
||||
header, c.com.consensus == ConsensusType.POS,
|
||||
c.com.startOfHistory):
|
||||
return err("Could not persist header")
|
||||
|
||||
ok(move(vmState.receipts))
|
||||
|
||||
proc updateHeads(c: var ForkedChain,
|
||||
hash: Hash256,
|
||||
header: BlockHeader) =
|
||||
for i in 0..<c.heads.len:
|
||||
if c.heads[i].hash == header.parentHash:
|
||||
c.heads[i] = HeadDesc(
|
||||
hash: hash,
|
||||
number: header.number,
|
||||
)
|
||||
return
|
||||
|
||||
c.heads.add HeadDesc(
|
||||
hash: hash,
|
||||
number: header.number,
|
||||
)
|
||||
|
||||
proc updateHead(c: var ForkedChain,
|
||||
blk: EthBlock,
|
||||
receipts: sink seq[Receipt]) =
|
||||
template header(): BlockHeader =
|
||||
blk.header
|
||||
|
||||
c.headHeader = header
|
||||
c.headHash = header.blockHash
|
||||
c.blocks[c.headHash] = BlockDesc(
|
||||
blk: blk,
|
||||
receipts: move(receipts)
|
||||
)
|
||||
c.updateHeads(c.headHash, header)
|
||||
|
||||
proc validatePotentialHead(c: var ForkedChain,
|
||||
parent: BlockHeader,
|
||||
blk: EthBlock,
|
||||
updateHead: bool = true) =
|
||||
let dbTx = c.db.newTransaction()
|
||||
defer:
|
||||
dbTx.dispose()
|
||||
|
||||
var res = c.processBlock(parent, blk)
|
||||
if res.isErr:
|
||||
dbTx.rollback()
|
||||
return
|
||||
|
||||
dbTx.commit()
|
||||
if updateHead:
|
||||
c.updateHead(blk, move(res.value))
|
||||
|
||||
proc replaySegment(c: var ForkedChain,
|
||||
head: Hash256) =
|
||||
var
|
||||
prevHash = head
|
||||
chain = newSeq[EthBlock]()
|
||||
|
||||
while prevHash != c.baseHash:
|
||||
chain.add c.blocks[prevHash].blk
|
||||
prevHash = chain[^1].header.parentHash
|
||||
|
||||
c.stagingTx.rollback()
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.headHeader = c.baseHeader
|
||||
for i in countdown(chain.high, chain.low):
|
||||
c.validatePotentialHead(c.headHeader, chain[i], updateHead = false)
|
||||
c.headHeader = chain[i].header
|
||||
|
||||
proc writeBaggage(c: var ForkedChain, blockHash: Hash256) =
|
||||
var prevHash = blockHash
|
||||
while prevHash != c.baseHash:
|
||||
let blk = c.blocks[prevHash]
|
||||
c.db.persistTransactions(blk.blk.header.number, blk.blk.transactions)
|
||||
c.db.persistReceipts(blk.receipts)
|
||||
discard c.db.persistUncles(blk.blk.uncles)
|
||||
if blk.blk.withdrawals.isSome:
|
||||
c.db.persistWithdrawals(blk.blk.withdrawals.get)
|
||||
prevHash = blk.blk.header.parentHash
|
||||
|
||||
proc updateBase(c: var ForkedChain,
|
||||
newBaseHash: Hash256, newBaseHeader: BlockHeader) =
|
||||
# remove obsolete chains
|
||||
for i in 0..<c.heads.len:
|
||||
if c.heads[i].number <= c.baseHeader.number:
|
||||
var prevHash = c.heads[i].hash
|
||||
while prevHash != c.baseHash:
|
||||
c.blocks.withValue(prevHash, val) do:
|
||||
let rmHash = prevHash
|
||||
prevHash = val.blk.header.parentHash
|
||||
c.blocks.del(rmHash)
|
||||
do:
|
||||
# older chain segment have been deleted
|
||||
# by previous head
|
||||
break
|
||||
c.heads.del(i)
|
||||
|
||||
c.baseHeader = newBaseHeader
|
||||
c.baseHash = newBaseHash
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc initForkedChain*(com: CommonRef): ForkedChain =
|
||||
result.com = com
|
||||
result.db = com.db
|
||||
result.stagingTx = com.db.newTransaction()
|
||||
result.baseHeader = com.db.getCanonicalHead()
|
||||
let headHash = result.baseHeader.blockHash
|
||||
result.headHash = headHash
|
||||
result.baseHash = headHash
|
||||
result.headHeader = result.baseHeader
|
||||
|
||||
proc addBlock*(c: var ForkedChain, blk: EthBlock) =
|
||||
template header(): BlockHeader =
|
||||
blk.header
|
||||
|
||||
if header.parentHash == c.headHash:
|
||||
c.validatePotentialHead(c.headHeader, blk)
|
||||
return
|
||||
|
||||
if header.parentHash == c.baseHash:
|
||||
c.stagingTx.rollback()
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.validatePotentialHead(c.baseHeader, blk)
|
||||
return
|
||||
|
||||
if header.parentHash notin c.blocks:
|
||||
# if it's parent is an invalid block
|
||||
# there is no hope the descendant is valid
|
||||
return
|
||||
|
||||
c.replaySegment(header.parentHash)
|
||||
c.validatePotentialHead(c.headHeader, blk)
|
||||
|
||||
proc finalizeSegment*(c: var ForkedChain,
|
||||
finalizedHash: Hash256): Result[void, string] =
|
||||
if finalizedHash == c.headHash:
|
||||
c.writeBaggage(finalizedHash)
|
||||
|
||||
# the current segment is canonical chain
|
||||
c.stagingTx.commit()
|
||||
|
||||
# Save and record the block number before the last saved block state.
|
||||
c.db.persistent(c.headHeader.number).isOkOr:
|
||||
return err("Failed to save state: " & $$error)
|
||||
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
|
||||
c.updateBase(finalizedHash, c.headHeader)
|
||||
return ok()
|
||||
|
||||
var
|
||||
newBaseHash: Hash256
|
||||
newBaseHeader: BlockHeader
|
||||
|
||||
c.blocks.withValue(finalizedHash, val) do:
|
||||
if c.headHeader.number <= 128:
|
||||
if val.blk.header.number < c.headHeader.number:
|
||||
newBaseHash = finalizedHash
|
||||
newBaseHeader = val.blk.header
|
||||
else:
|
||||
newBaseHash = c.headHash
|
||||
newBaseHeader = c.headHeader
|
||||
elif val.blk.header.number < c.headHeader.number - 128:
|
||||
newBaseHash = finalizedHash
|
||||
newBaseHeader = val.blk.header
|
||||
else:
|
||||
newBaseHash = c.headHash
|
||||
newBaseHeader = c.headHeader
|
||||
do:
|
||||
return err("Finalized head not in segments list")
|
||||
|
||||
c.stagingTx.rollback()
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.replaySegment(newBaseHash)
|
||||
c.writeBaggage(newBaseHash)
|
||||
|
||||
c.stagingTx.commit()
|
||||
c.db.persistent(newBaseHeader.number).isOkOr:
|
||||
return err("Failed to save state: " & $$error)
|
||||
|
||||
c.stagingTx = c.db.newTransaction()
|
||||
c.updateBase(newBaseHash, newBaseHeader)
|
||||
|
||||
ok()
|
|
@ -117,7 +117,7 @@ proc procBlkPreamble(
|
|||
ok()
|
||||
|
||||
proc procBlkEpilogue(
|
||||
vmState: BaseVMState, header: BlockHeader, skipValidation: bool
|
||||
vmState: BaseVMState, header: BlockHeader, skipValidation: bool, skipReceipts: bool
|
||||
): Result[void, string] =
|
||||
# Reward beneficiary
|
||||
vmState.mutateStateDB:
|
||||
|
@ -137,19 +137,20 @@ proc procBlkEpilogue(
|
|||
arrivedFrom = vmState.com.db.getCanonicalHead().stateRoot
|
||||
return err("stateRoot mismatch")
|
||||
|
||||
let bloom = createBloom(vmState.receipts)
|
||||
|
||||
if header.logsBloom != bloom:
|
||||
return err("bloom mismatch")
|
||||
|
||||
let receiptsRoot = calcReceiptsRoot(vmState.receipts)
|
||||
if header.receiptsRoot != receiptsRoot:
|
||||
# TODO replace logging with better error
|
||||
debug "wrong receiptRoot in block",
|
||||
blockNumber = header.number,
|
||||
actual = receiptsRoot,
|
||||
expected = header.receiptsRoot
|
||||
return err("receiptRoot mismatch")
|
||||
if not skipReceipts:
|
||||
let bloom = createBloom(vmState.receipts)
|
||||
|
||||
if header.logsBloom != bloom:
|
||||
return err("bloom mismatch")
|
||||
|
||||
let receiptsRoot = calcReceiptsRoot(vmState.receipts)
|
||||
if header.receiptsRoot != receiptsRoot:
|
||||
# TODO replace logging with better error
|
||||
debug "wrong receiptRoot in block",
|
||||
blockNumber = header.number,
|
||||
actual = receiptsRoot,
|
||||
expected = header.receiptsRoot
|
||||
return err("receiptRoot mismatch")
|
||||
|
||||
ok()
|
||||
|
||||
|
@ -171,7 +172,7 @@ proc processBlock*(
|
|||
if vmState.com.consensus == ConsensusType.POW:
|
||||
vmState.calculateReward(blk.header, blk.uncles)
|
||||
|
||||
?vmState.procBlkEpilogue(blk.header, skipValidation)
|
||||
?vmState.procBlkEpilogue(blk.header, skipValidation, skipReceipts)
|
||||
|
||||
ok()
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@ import
|
|||
../../evm/state,
|
||||
../../evm/types,
|
||||
../../constants,
|
||||
../eip4844,
|
||||
../validate
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -92,6 +93,12 @@ proc processTransactionImpl(
|
|||
|
||||
vmState.gasPool -= tx.gasLimit
|
||||
|
||||
let blobGasUsed = tx.getTotalBlobGas
|
||||
if vmState.blobGasUsed + blobGasUsed > MAX_BLOB_GAS_PER_BLOCK:
|
||||
return err("blobGasUsed " & $blobGasUsed &
|
||||
" exceeds maximum allowance " & $MAX_BLOB_GAS_PER_BLOCK)
|
||||
vmState.blobGasUsed += blobGasUsed
|
||||
|
||||
# Actually, the eip-1559 reference does not mention an early exit.
|
||||
#
|
||||
# Even though database was not changed yet but, a `persist()` directive
|
||||
|
|
|
@ -22,7 +22,7 @@ import
|
|||
../../../common/common,
|
||||
../../../utils/utils,
|
||||
../../../constants,
|
||||
"../.."/[dao, executor, validate, eip4844, casper],
|
||||
"../.."/[dao, executor, validate, casper],
|
||||
../../../transaction/call_evm,
|
||||
../../../transaction,
|
||||
../../../evm/state,
|
||||
|
@ -39,7 +39,6 @@ type
|
|||
tr: CoreDbMptRef
|
||||
cleanState: bool
|
||||
balance: UInt256
|
||||
blobGasUsed: uint64
|
||||
numBlobPerBlock: int
|
||||
|
||||
const
|
||||
|
@ -131,10 +130,6 @@ proc runTxCommit(pst: TxPackerStateRef; item: TxItemRef; gasBurned: GasInt)
|
|||
vmState.cumulativeGasUsed += gasBurned
|
||||
vmState.receipts[inx] = vmState.makeReceipt(item.tx.txType)
|
||||
|
||||
# EIP-4844, count blobGasUsed
|
||||
if item.tx.txType >= TxEip4844:
|
||||
pst.blobGasUsed += item.tx.getTotalBlobGas
|
||||
|
||||
# Update txRoot
|
||||
pst.tr.merge(rlp.encode(inx), rlp.encode(item.tx)).isOkOr:
|
||||
raiseAssert "runTxCommit(): merge failed, " & $$error
|
||||
|
@ -262,7 +257,7 @@ proc vmExecCommit(pst: TxPackerStateRef)
|
|||
if vmState.com.forkGTE(Cancun):
|
||||
# EIP-4844
|
||||
xp.chain.excessBlobGas = Opt.some(vmState.blockCtx.excessBlobGas)
|
||||
xp.chain.blobGasUsed = Opt.some(pst.blobGasUsed)
|
||||
xp.chain.blobGasUsed = Opt.some(vmState.blobGasUsed)
|
||||
|
||||
proc balanceDelta: UInt256 =
|
||||
let postBalance = vmState.readOnlyStateDB.getBalance(xp.chain.feeRecipient)
|
||||
|
|
|
@ -183,20 +183,19 @@ func toVoidRc[T](
|
|||
# ------------------------------------------------------------------------------
|
||||
# Private `MPT` call back functions
|
||||
# ------------------------------------------------------------------------------
|
||||
proc mptMethods(): CoreDbMptFns =
|
||||
# These templates are a hack to remove a closure environment that was using
|
||||
# hundreds of mb of memory to have this syntactic convenience
|
||||
# TODO remove methods / abstraction entirely - it is no longer needed
|
||||
template base: untyped = cMpt.base
|
||||
template db: untyped = base.parent # Ditto
|
||||
template api: untyped = base.api # Ditto
|
||||
template mpt: untyped = base.ctx.mpt # Ditto
|
||||
|
||||
proc mptMethods(cMpt: AristoCoreDbMptRef): CoreDbMptFns =
|
||||
## Generic columns database handlers
|
||||
let
|
||||
cMpt = cMpt # So it can savely be captured
|
||||
base = cMpt.base # Will not change and can be captured
|
||||
db = base.parent # Ditto
|
||||
api = base.api # Ditto
|
||||
mpt = base.ctx.mpt # Ditto
|
||||
|
||||
proc mptBackend(): CoreDbMptBackendRef =
|
||||
proc mptBackend(cMpt: AristoCoreDbMptRef): CoreDbMptBackendRef =
|
||||
db.bless AristoCoreDbMptBE(adb: mpt)
|
||||
|
||||
proc mptColFn(): CoreDbColRef =
|
||||
proc mptColFn(cMpt: AristoCoreDbMptRef): CoreDbColRef =
|
||||
if cMpt.mptRoot.distinctBase < LEAST_FREE_VID:
|
||||
return db.bless(AristoColRef(
|
||||
base: base,
|
||||
|
@ -219,7 +218,7 @@ proc mptMethods(cMpt: AristoCoreDbMptRef): CoreDbMptFns =
|
|||
stoRoot: cMpt.mptRoot,
|
||||
stoAddr: cMpt.address)
|
||||
|
||||
proc mptFetch(key: openArray[byte]): CoreDbRc[Blob] =
|
||||
proc mptFetch(cMpt: AristoCoreDbMptRef, key: openArray[byte]): CoreDbRc[Blob] =
|
||||
const info = "fetchFn()"
|
||||
|
||||
let rc = block:
|
||||
|
@ -241,7 +240,7 @@ proc mptMethods(cMpt: AristoCoreDbMptRef): CoreDbMptFns =
|
|||
else:
|
||||
err(rc.error.toError(base, info, MptNotFound))
|
||||
|
||||
proc mptMerge(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
|
||||
proc mptMerge(cMpt: AristoCoreDbMptRef, k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
|
||||
const info = "mergeFn()"
|
||||
|
||||
if cMpt.accPath.isValid:
|
||||
|
@ -257,7 +256,7 @@ proc mptMethods(cMpt: AristoCoreDbMptRef): CoreDbMptFns =
|
|||
|
||||
ok()
|
||||
|
||||
proc mptDelete(key: openArray[byte]): CoreDbRc[void] =
|
||||
proc mptDelete(cMpt: AristoCoreDbMptRef, key: openArray[byte]): CoreDbRc[void] =
|
||||
const info = "deleteFn()"
|
||||
|
||||
let rc = block:
|
||||
|
@ -281,7 +280,7 @@ proc mptMethods(cMpt: AristoCoreDbMptRef): CoreDbMptFns =
|
|||
|
||||
ok()
|
||||
|
||||
proc mptHasPath(key: openArray[byte]): CoreDbRc[bool] =
|
||||
proc mptHasPath(cMpt: AristoCoreDbMptRef, key: openArray[byte]): CoreDbRc[bool] =
|
||||
const info = "hasPathFn()"
|
||||
|
||||
let rc = block:
|
||||
|
@ -295,52 +294,50 @@ proc mptMethods(cMpt: AristoCoreDbMptRef): CoreDbMptFns =
|
|||
return err(rc.error.toError(base, info))
|
||||
ok(rc.value)
|
||||
|
||||
|
||||
## Generic columns database handlers
|
||||
CoreDbMptFns(
|
||||
backendFn: proc(): CoreDbMptBackendRef =
|
||||
mptBackend(),
|
||||
backendFn: proc(cMpt: CoreDbMptRef): CoreDbMptBackendRef =
|
||||
mptBackend(AristoCoreDbMptRef(cMpt)),
|
||||
|
||||
fetchFn: proc(k: openArray[byte]): CoreDbRc[Blob] =
|
||||
mptFetch(k),
|
||||
fetchFn: proc(cMpt: CoreDbMptRef, k: openArray[byte]): CoreDbRc[Blob] =
|
||||
mptFetch(AristoCoreDbMptRef(cMpt), k),
|
||||
|
||||
deleteFn: proc(k: openArray[byte]): CoreDbRc[void] =
|
||||
mptDelete(k),
|
||||
deleteFn: proc(cMpt: CoreDbMptRef, k: openArray[byte]): CoreDbRc[void] =
|
||||
mptDelete(AristoCoreDbMptRef(cMpt), k),
|
||||
|
||||
mergeFn: proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
|
||||
mptMerge(k, v),
|
||||
mergeFn: proc(cMpt: CoreDbMptRef, k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
|
||||
mptMerge(AristoCoreDbMptRef(cMpt), k, v),
|
||||
|
||||
hasPathFn: proc(k: openArray[byte]): CoreDbRc[bool] =
|
||||
mptHasPath(k),
|
||||
hasPathFn: proc(cMpt: CoreDbMptRef, k: openArray[byte]): CoreDbRc[bool] =
|
||||
mptHasPath(AristoCoreDbMptRef(cMpt), k),
|
||||
|
||||
getColFn: proc(): CoreDbColRef =
|
||||
mptColFn())
|
||||
getColFn: proc(cMpt: CoreDbMptRef): CoreDbColRef =
|
||||
mptColFn(AristoCoreDbMptRef(cMpt)))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private account call back functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc accMethods(cAcc: AristoCoreDbAccRef): CoreDbAccFns =
|
||||
proc accMethods(): CoreDbAccFns =
|
||||
## Account columns database handlers
|
||||
let
|
||||
cAcc = cAcc # So it can savely be captured
|
||||
base = cAcc.base # Will not change and can be captured
|
||||
db = base.parent # Ditto
|
||||
api = base.api # Ditto
|
||||
mpt = base.ctx.mpt # Ditto
|
||||
template base: untyped = cAcc.base
|
||||
template db: untyped = base.parent
|
||||
template api: untyped = base.api
|
||||
template mpt: untyped = base.ctx.mpt
|
||||
|
||||
proc getColFn(): CoreDbColRef =
|
||||
proc getColFn(cAcc: AristoCoreDbAccRef): CoreDbColRef =
|
||||
db.bless AristoColRef(
|
||||
base: base,
|
||||
colType: CtAccounts)
|
||||
|
||||
proc accCloneMpt(): CoreDbRc[CoreDbMptRef] =
|
||||
proc accCloneMpt(cAcc: AristoCoreDbAccRef): CoreDbRc[CoreDbMptRef] =
|
||||
var xpt = AristoCoreDbMptRef(
|
||||
base: base,
|
||||
mptRoot: AccountsVID)
|
||||
xpt.methods = xpt.mptMethods
|
||||
xpt.methods = mptMethods()
|
||||
ok(db.bless xpt)
|
||||
|
||||
proc accFetch(address: EthAddress): CoreDbRc[CoreDbAccount] =
|
||||
proc accFetch(cAcc: AristoCoreDbAccRef, address: EthAddress): CoreDbRc[CoreDbAccount] =
|
||||
const info = "acc/fetchFn()"
|
||||
|
||||
let
|
||||
|
@ -352,7 +349,7 @@ proc accMethods(cAcc: AristoCoreDbAccRef): CoreDbAccFns =
|
|||
|
||||
ok cAcc.toCoreDbAccount(acc, address)
|
||||
|
||||
proc accMerge(account: CoreDbAccount): CoreDbRc[void] =
|
||||
proc accMerge(cAcc: AristoCoreDbAccRef, account: CoreDbAccount): CoreDbRc[void] =
|
||||
const info = "acc/mergeFn()"
|
||||
|
||||
let
|
||||
|
@ -363,7 +360,7 @@ proc accMethods(cAcc: AristoCoreDbAccRef): CoreDbAccFns =
|
|||
return err(rc.error.toError(base, info))
|
||||
ok()
|
||||
|
||||
proc accDelete(address: EthAddress): CoreDbRc[void] =
|
||||
proc accDelete(cAcc: AristoCoreDbAccRef, address: EthAddress): CoreDbRc[void] =
|
||||
const info = "acc/deleteFn()"
|
||||
|
||||
let key = address.keccakHash.data
|
||||
|
@ -374,7 +371,7 @@ proc accMethods(cAcc: AristoCoreDbAccRef): CoreDbAccFns =
|
|||
|
||||
ok()
|
||||
|
||||
proc accStoDelete(address: EthAddress): CoreDbRc[void] =
|
||||
proc accStoDelete(cAcc: AristoCoreDbAccRef, address: EthAddress): CoreDbRc[void] =
|
||||
const info = "stoDeleteFn()"
|
||||
|
||||
let rc = api.deleteStorageTree(mpt, address.to(PathID))
|
||||
|
@ -383,7 +380,7 @@ proc accMethods(cAcc: AristoCoreDbAccRef): CoreDbAccFns =
|
|||
|
||||
ok()
|
||||
|
||||
proc accHasPath(address: EthAddress): CoreDbRc[bool] =
|
||||
proc accHasPath(cAcc: AristoCoreDbAccRef, address: EthAddress): CoreDbRc[bool] =
|
||||
const info = "hasPathFn()"
|
||||
|
||||
let
|
||||
|
@ -394,40 +391,39 @@ proc accMethods(cAcc: AristoCoreDbAccRef): CoreDbAccFns =
|
|||
|
||||
|
||||
CoreDbAccFns(
|
||||
getMptFn: proc(): CoreDbRc[CoreDbMptRef] =
|
||||
accCloneMpt(),
|
||||
getMptFn: proc(cAcc: CoreDbAccRef): CoreDbRc[CoreDbMptRef] =
|
||||
accCloneMpt(AristoCoreDbAccRef(cAcc)),
|
||||
|
||||
fetchFn: proc(address: EthAddress): CoreDbRc[CoreDbAccount] =
|
||||
accFetch(address),
|
||||
fetchFn: proc(cAcc: CoreDbAccRef, address: EthAddress): CoreDbRc[CoreDbAccount] =
|
||||
accFetch(AristoCoreDbAccRef(cAcc), address),
|
||||
|
||||
deleteFn: proc(address: EthAddress): CoreDbRc[void] =
|
||||
accDelete(address),
|
||||
deleteFn: proc(cAcc: CoreDbAccRef, address: EthAddress): CoreDbRc[void] =
|
||||
accDelete(AristoCoreDbAccRef(cAcc), address),
|
||||
|
||||
stoDeleteFn: proc(address: EthAddress): CoreDbRc[void] =
|
||||
accStoDelete(address),
|
||||
stoDeleteFn: proc(cAcc: CoreDbAccRef, address: EthAddress): CoreDbRc[void] =
|
||||
accStoDelete(AristoCoreDbAccRef(cAcc), address),
|
||||
|
||||
mergeFn: proc(acc: CoreDbAccount): CoreDbRc[void] =
|
||||
accMerge(acc),
|
||||
mergeFn: proc(cAcc: CoreDbAccRef, acc: CoreDbAccount): CoreDbRc[void] =
|
||||
accMerge(AristoCoreDbAccRef(cAcc), acc),
|
||||
|
||||
hasPathFn: proc(address: EthAddress): CoreDbRc[bool] =
|
||||
accHasPath(address),
|
||||
hasPathFn: proc(cAcc: CoreDbAccRef, address: EthAddress): CoreDbRc[bool] =
|
||||
accHasPath(AristoCoreDbAccRef(cAcc), address),
|
||||
|
||||
getColFn: proc(): CoreDbColRef =
|
||||
getColFn())
|
||||
getColFn: proc(cAcc: CoreDbAccRef): CoreDbColRef =
|
||||
getColFn(AristoCoreDbAccRef(cAcc)))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private context call back functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
|
||||
let
|
||||
cCtx = cCtx # So it can savely be captured
|
||||
base = cCtx.base # Will not change and can be captured
|
||||
db = base.parent # Ditto
|
||||
api = base.api # Ditto
|
||||
mpt = cCtx.mpt # Ditto
|
||||
template base: untyped = cCtx.base
|
||||
template db: untyped = base.parent
|
||||
template api: untyped = base.api
|
||||
template mpt: untyped = cCtx.mpt
|
||||
|
||||
proc ctxNewCol(
|
||||
cCtx: AristoCoreDbCtxRef,
|
||||
colType: CoreDbColType;
|
||||
colState: Hash256;
|
||||
address: Opt[EthAddress];
|
||||
|
@ -463,7 +459,7 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
|
|||
err(aristo.GenericError.toError(base, info, RootNotFound))
|
||||
|
||||
|
||||
proc ctxGetMpt(col: CoreDbColRef): CoreDbRc[CoreDbMptRef] =
|
||||
proc ctxGetMpt(cCtx: AristoCoreDbCtxRef, col: CoreDbColRef): CoreDbRc[CoreDbMptRef] =
|
||||
const
|
||||
info = "ctx/getMptFn()"
|
||||
let
|
||||
|
@ -505,10 +501,10 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
|
|||
col.reset = false
|
||||
|
||||
newMpt.base = base
|
||||
newMpt.methods = newMpt.mptMethods()
|
||||
newMpt.methods = mptMethods()
|
||||
ok(db.bless newMpt)
|
||||
|
||||
proc ctxGetAcc(col: CoreDbColRef): CoreDbRc[CoreDbAccRef] =
|
||||
proc ctxGetAcc(cCtx: AristoCoreDbCtxRef, col: CoreDbColRef): CoreDbRc[CoreDbAccRef] =
|
||||
const info = "getAccFn()"
|
||||
|
||||
let col = AristoColRef(col)
|
||||
|
@ -517,31 +513,32 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
|
|||
return err(error.toError(base, info, RootUnacceptable))
|
||||
|
||||
let acc = AristoCoreDbAccRef(base: base)
|
||||
acc.methods = acc.accMethods()
|
||||
acc.methods = accMethods()
|
||||
|
||||
ok(db.bless acc)
|
||||
|
||||
proc ctxForget() =
|
||||
proc ctxForget(cCtx: AristoCoreDbCtxRef) =
|
||||
api.forget(mpt).isOkOr:
|
||||
raiseAssert "forgetFn(): " & $error
|
||||
|
||||
|
||||
CoreDbCtxFns(
|
||||
newColFn: proc(
|
||||
cCtx: CoreDbCtxRef;
|
||||
col: CoreDbColType;
|
||||
colState: Hash256;
|
||||
address: Opt[EthAddress];
|
||||
): CoreDbRc[CoreDbColRef] =
|
||||
ctxNewCol(col, colState, address),
|
||||
ctxNewCol(AristoCoreDbCtxRef(cCtx), col, colState, address),
|
||||
|
||||
getMptFn: proc(col: CoreDbColRef): CoreDbRc[CoreDbMptRef] =
|
||||
ctxGetMpt(col),
|
||||
getMptFn: proc(cCtx: CoreDbCtxRef, col: CoreDbColRef): CoreDbRc[CoreDbMptRef] =
|
||||
ctxGetMpt(AristoCoreDbCtxRef(cCtx), col),
|
||||
|
||||
getAccFn: proc(col: CoreDbColRef): CoreDbRc[CoreDbAccRef] =
|
||||
ctxGetAcc(col),
|
||||
getAccFn: proc(cCtx: CoreDbCtxRef, col: CoreDbColRef): CoreDbRc[CoreDbAccRef] =
|
||||
ctxGetAcc(AristoCoreDbCtxRef(cCtx), col),
|
||||
|
||||
forgetFn: proc() =
|
||||
ctxForget())
|
||||
forgetFn: proc(cCtx: CoreDbCtxRef) =
|
||||
ctxForget(AristoCoreDbCtxRef(cCtx)))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public handlers and helpers
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
eth/common,
|
||||
"../.."/[constants, errors],
|
||||
./base/[api_tracking, base_desc]
|
||||
|
@ -205,13 +204,20 @@ proc parent*[T: CoreDbKvtRef |
|
|||
##
|
||||
result = child.parent
|
||||
|
||||
proc backend*(dsc: CoreDbKvtRef | CoreDbMptRef): auto =
|
||||
proc backend*(dsc: CoreDbKvtRef): auto =
|
||||
## Getter, retrieves the *raw* backend object for special/localised support.
|
||||
##
|
||||
dsc.setTrackNewApi AnyBackendFn
|
||||
result = dsc.methods.backendFn()
|
||||
dsc.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
|
||||
proc backend*(mpt: CoreDbMptRef): auto =
|
||||
## Getter, retrieves the *raw* backend object for special/localised support.
|
||||
##
|
||||
mpt.setTrackNewApi AnyBackendFn
|
||||
result = mpt.methods.backendFn(mpt)
|
||||
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
|
||||
proc finish*(db: CoreDbRef; eradicate = false) =
|
||||
## Database destructor. If the argument `eradicate` is set `false`, the
|
||||
## database is left as-is and only the in-memory handlers are cleaned up.
|
||||
|
@ -337,7 +343,7 @@ proc forget*(ctx: CoreDbCtxRef) =
|
|||
## context. This function fails if `ctx` is the default context.
|
||||
##
|
||||
ctx.setTrackNewApi CtxForgetFn
|
||||
ctx.methods.forgetFn()
|
||||
ctx.methods.forgetFn(ctx)
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -381,7 +387,7 @@ proc newColumn*(
|
|||
## db.getAcc col
|
||||
##
|
||||
ctx.setTrackNewApi CtxNewColFn
|
||||
result = ctx.methods.newColFn(colType, colState, address)
|
||||
result = ctx.methods.newColFn(ctx, colType, colState, address)
|
||||
ctx.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, colType, colState, address, result
|
||||
|
||||
|
@ -393,7 +399,7 @@ proc newColumn*(
|
|||
## Shortcut for `ctx.newColumn(CtStorage,colState,some(address))`.
|
||||
##
|
||||
ctx.setTrackNewApi CtxNewColFn
|
||||
result = ctx.methods.newColFn(CtStorage, colState, Opt.some(address))
|
||||
result = ctx.methods.newColFn(ctx, CtStorage, colState, Opt.some(address))
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, colState, address, result
|
||||
|
||||
proc newColumn*(
|
||||
|
@ -406,7 +412,7 @@ proc newColumn*(
|
|||
##
|
||||
ctx.setTrackNewApi CtxNewColFn
|
||||
result = ctx.methods.newColFn(
|
||||
CtStorage, EMPTY_ROOT_HASH, Opt.some(address)).valueOr:
|
||||
ctx, CtStorage, EMPTY_ROOT_HASH, Opt.some(address)).valueOr:
|
||||
raiseAssert error.prettyText()
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
|
||||
|
||||
|
@ -474,7 +480,7 @@ proc getMpt*(
|
|||
## function `getColumn()`.
|
||||
##
|
||||
ctx.setTrackNewApi CtxGetMptFn
|
||||
result = ctx.methods.getMptFn col
|
||||
result = ctx.methods.getMptFn(ctx, col)
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, col, result
|
||||
|
||||
proc getMpt*(
|
||||
|
@ -487,8 +493,8 @@ proc getMpt*(
|
|||
## return a non-nil descriptor or throw an exception.
|
||||
##
|
||||
ctx.setTrackNewApi CtxGetMptFn
|
||||
let col = ctx.methods.newColFn(colType, EMPTY_ROOT_HASH, address).value
|
||||
result = ctx.methods.getMptFn(col).valueOr:
|
||||
let col = ctx.methods.newColFn(ctx, colType, EMPTY_ROOT_HASH, address).value
|
||||
result = ctx.methods.getMptFn(ctx, col).valueOr:
|
||||
raiseAssert error.prettyText()
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, colType, elapsed
|
||||
|
||||
|
@ -500,7 +506,7 @@ proc getMpt*(acc: CoreDbAccRef): CoreDbMptRef =
|
|||
## argument.
|
||||
##
|
||||
acc.setTrackNewApi AccToMptFn
|
||||
result = acc.methods.getMptFn().valueOr:
|
||||
result = acc.methods.getMptFn(acc).valueOr:
|
||||
raiseAssert error.prettyText()
|
||||
acc.ifTrackNewApi:
|
||||
let colState = result.methods.getColFn()
|
||||
|
@ -526,7 +532,7 @@ proc getAcc*(
|
|||
## This function works similar to `getMpt()` for handling accounts.
|
||||
##
|
||||
ctx.setTrackNewApi CtxGetAccFn
|
||||
result = ctx.methods.getAccFn col
|
||||
result = ctx.methods.getAccFn(ctx, col)
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, col, result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -537,14 +543,14 @@ proc getColumn*(acc: CoreDbAccRef): CoreDbColRef =
|
|||
## Getter, result is not `nil`
|
||||
##
|
||||
acc.setTrackNewApi AccGetColFn
|
||||
result = acc.methods.getColFn()
|
||||
result = acc.methods.getColFn(acc)
|
||||
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
|
||||
proc getColumn*(mpt: CoreDbMptRef): CoreDbColRef =
|
||||
## Variant of `getColumn()`
|
||||
##
|
||||
mpt.setTrackNewApi MptGetColFn
|
||||
result = mpt.methods.getColFn()
|
||||
result = mpt.methods.getColFn(mpt)
|
||||
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -556,9 +562,9 @@ proc fetch*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
## non-empty `Blob` or an error code.
|
||||
##
|
||||
mpt.setTrackNewApi MptFetchFn
|
||||
result = mpt.methods.fetchFn key
|
||||
result = mpt.methods.fetchFn(mpt, key)
|
||||
mpt.ifTrackNewApi:
|
||||
let col = mpt.methods.getColFn()
|
||||
let col = mpt.methods.getColFn(mpt)
|
||||
debug newApiTxt, api, elapsed, col, key=key.toStr, result
|
||||
|
||||
proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
||||
|
@ -566,16 +572,16 @@ proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
## on the database.
|
||||
##
|
||||
mpt.setTrackNewApi MptFetchOrEmptyFn
|
||||
result = mpt.methods.fetchFn key
|
||||
result = mpt.methods.fetchFn(mpt, key)
|
||||
if result.isErr and result.error.error == MptNotFound:
|
||||
result = CoreDbRc[Blob].ok(EmptyBlob)
|
||||
mpt.ifTrackNewApi:
|
||||
let col = mpt.methods.getColFn()
|
||||
let col = mpt.methods.getColFn(mpt)
|
||||
debug newApiTxt, api, elapsed, col, key=key.toStr, result
|
||||
|
||||
proc delete*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[void] =
|
||||
mpt.setTrackNewApi MptDeleteFn
|
||||
result = mpt.methods.deleteFn key
|
||||
result = mpt.methods.deleteFn(mpt, key)
|
||||
mpt.ifTrackNewApi:
|
||||
let col = mpt.methods.getColFn()
|
||||
debug newApiTxt, api, elapsed, col, key=key.toStr, result
|
||||
|
@ -586,9 +592,9 @@ proc merge*(
|
|||
val: openArray[byte];
|
||||
): CoreDbRc[void] =
|
||||
mpt.setTrackNewApi MptMergeFn
|
||||
result = mpt.methods.mergeFn(key, val)
|
||||
result = mpt.methods.mergeFn(mpt, key, val)
|
||||
mpt.ifTrackNewApi:
|
||||
let col = mpt.methods.getColFn()
|
||||
let col = mpt.methods.getColFn(mpt)
|
||||
debug newApiTxt, api, elapsed, col, key=key.toStr, val=val.toLenStr, result
|
||||
|
||||
proc hasPath*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[bool] =
|
||||
|
@ -596,9 +602,9 @@ proc hasPath*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[bool] =
|
|||
## than a `Result[]`.
|
||||
##
|
||||
mpt.setTrackNewApi MptHasPathFn
|
||||
result = mpt.methods.hasPathFn key
|
||||
result = mpt.methods.hasPathFn(mpt, key)
|
||||
mpt.ifTrackNewApi:
|
||||
let col = mpt.methods.getColFn()
|
||||
let col = mpt.methods.getColFn(mpt)
|
||||
debug newApiTxt, api, elapsed, col, key=key.toStr, result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -609,7 +615,7 @@ proc fetch*(acc: CoreDbAccRef; address: EthAddress): CoreDbRc[CoreDbAccount] =
|
|||
## Fetch data from the argument `acc`.
|
||||
##
|
||||
acc.setTrackNewApi AccFetchFn
|
||||
result = acc.methods.fetchFn address
|
||||
result = acc.methods.fetchFn(acc, address)
|
||||
acc.ifTrackNewApi:
|
||||
let storage = if result.isErr: "n/a" else: result.value.storage.prettyText()
|
||||
debug newApiTxt, api, elapsed, address, storage, result
|
||||
|
@ -617,7 +623,7 @@ proc fetch*(acc: CoreDbAccRef; address: EthAddress): CoreDbRc[CoreDbAccount] =
|
|||
|
||||
proc delete*(acc: CoreDbAccRef; address: EthAddress): CoreDbRc[void] =
|
||||
acc.setTrackNewApi AccDeleteFn
|
||||
result = acc.methods.deleteFn address
|
||||
result = acc.methods.deleteFn(acc, address)
|
||||
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
|
||||
|
||||
proc stoDelete*(acc: CoreDbAccRef; address: EthAddress): CoreDbRc[void] =
|
||||
|
@ -632,7 +638,7 @@ proc stoDelete*(acc: CoreDbAccRef; address: EthAddress): CoreDbRc[void] =
|
|||
## backend.
|
||||
##
|
||||
acc.setTrackNewApi AccStoDeleteFn
|
||||
result = acc.methods.stoDeleteFn address
|
||||
result = acc.methods.stoDeleteFn(acc, address)
|
||||
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
|
||||
|
||||
|
||||
|
@ -641,7 +647,7 @@ proc merge*(
|
|||
account: CoreDbAccount;
|
||||
): CoreDbRc[void] =
|
||||
acc.setTrackNewApi AccMergeFn
|
||||
result = acc.methods.mergeFn account
|
||||
result = acc.methods.mergeFn(acc, account)
|
||||
acc.ifTrackNewApi:
|
||||
let address = account.address
|
||||
debug newApiTxt, api, elapsed, address, result
|
||||
|
@ -651,7 +657,7 @@ proc hasPath*(acc: CoreDbAccRef; address: EthAddress): CoreDbRc[bool] =
|
|||
## Would be named `contains` if it returned `bool` rather than `Result[]`.
|
||||
##
|
||||
acc.setTrackNewApi AccHasPathFn
|
||||
result = acc.methods.hasPathFn address
|
||||
result = acc.methods.hasPathFn(acc, address)
|
||||
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
|
||||
|
||||
|
||||
|
|
|
@ -160,16 +160,14 @@ type
|
|||
# --------------------------------------------------
|
||||
# Sub-descriptor: MPT context methods
|
||||
# --------------------------------------------------
|
||||
CoreDbCtxFromTxFn* =
|
||||
proc(root: Hash256; kind: CoreDbColType): CoreDbRc[CoreDbCtxRef] {.noRaise.}
|
||||
CoreDbCtxNewColFn* = proc(
|
||||
colType: CoreDbColType; colState: Hash256; address: Opt[EthAddress];
|
||||
cCtx: CoreDbCtxRef; colType: CoreDbColType; colState: Hash256; address: Opt[EthAddress];
|
||||
): CoreDbRc[CoreDbColRef] {.noRaise.}
|
||||
CoreDbCtxGetMptFn* = proc(
|
||||
root: CoreDbColRef): CoreDbRc[CoreDbMptRef] {.noRaise.}
|
||||
cCtx: CoreDbCtxRef; root: CoreDbColRef): CoreDbRc[CoreDbMptRef] {.noRaise.}
|
||||
CoreDbCtxGetAccFn* = proc(
|
||||
root: CoreDbColRef): CoreDbRc[CoreDbAccRef] {.noRaise.}
|
||||
CoreDbCtxForgetFn* = proc() {.noRaise.}
|
||||
cCtx: CoreDbCtxRef; root: CoreDbColRef): CoreDbRc[CoreDbAccRef] {.noRaise.}
|
||||
CoreDbCtxForgetFn* = proc(cCtx: CoreDbCtxRef) {.noRaise.}
|
||||
|
||||
CoreDbCtxFns* = object
|
||||
## Methods for context maniulation
|
||||
|
@ -181,20 +179,20 @@ type
|
|||
# --------------------------------------------------
|
||||
# Sub-descriptor: generic Mpt/hexary trie methods
|
||||
# --------------------------------------------------
|
||||
CoreDbMptBackendFn* = proc(): CoreDbMptBackendRef {.noRaise.}
|
||||
CoreDbMptBackendFn* = proc(cMpt: CoreDbMptRef): CoreDbMptBackendRef {.noRaise.}
|
||||
CoreDbMptFetchFn* =
|
||||
proc(k: openArray[byte]): CoreDbRc[Blob] {.noRaise.}
|
||||
proc(cMpt: CoreDbMptRef, k: openArray[byte]): CoreDbRc[Blob] {.noRaise.}
|
||||
CoreDbMptFetchAccountFn* =
|
||||
proc(k: openArray[byte]): CoreDbRc[CoreDbAccount] {.noRaise.}
|
||||
proc(cMpt: CoreDbMptRef, k: openArray[byte]): CoreDbRc[CoreDbAccount] {.noRaise.}
|
||||
CoreDbMptDeleteFn* =
|
||||
proc(k: openArray[byte]): CoreDbRc[void] {.noRaise.}
|
||||
proc(cMpt: CoreDbMptRef, k: openArray[byte]): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbMptMergeFn* =
|
||||
proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] {.noRaise.}
|
||||
proc(cMpt: CoreDbMptRef, k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbMptMergeAccountFn* =
|
||||
proc(k: openArray[byte]; v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbMptHasPathFn* = proc(k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbMptGetColFn* = proc(): CoreDbColRef {.noRaise.}
|
||||
CoreDbMptForgetFn* = proc(): CoreDbRc[void] {.noRaise.}
|
||||
proc(cMpt: CoreDbMptRef, k: openArray[byte]; v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbMptHasPathFn* = proc(cMpt: CoreDbMptRef, k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbMptGetColFn* = proc(cMpt: CoreDbMptRef): CoreDbColRef {.noRaise.}
|
||||
CoreDbMptForgetFn* = proc(cMpt: CoreDbMptRef): CoreDbRc[void] {.noRaise.}
|
||||
|
||||
CoreDbMptFns* = object
|
||||
## Methods for trie objects
|
||||
|
@ -209,14 +207,13 @@ type
|
|||
# ----------------------------------------------------
|
||||
# Sub-descriptor: Mpt/hexary trie methods for accounts
|
||||
# ------------------------------------------------------
|
||||
CoreDbAccGetMptFn* = proc(): CoreDbRc[CoreDbMptRef] {.noRaise.}
|
||||
CoreDbAccFetchFn* = proc(k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.}
|
||||
CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccStoDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccMergeFn* = proc(v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccHasPathFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbAccGetColFn* = proc(): CoreDbColRef {.noRaise.}
|
||||
CoreDbAccForgetFn* = proc(): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccGetMptFn* = proc(cAcc: CoreDbAccRef): CoreDbRc[CoreDbMptRef] {.noRaise.}
|
||||
CoreDbAccFetchFn* = proc(cAcc: CoreDbAccRef, k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.}
|
||||
CoreDbAccDeleteFn* = proc(cAcc: CoreDbAccRef, k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccStoDeleteFn* = proc(cAcc: CoreDbAccRef,k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccMergeFn* = proc(cAcc: CoreDbAccRef, v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccHasPathFn* = proc(cAcc: CoreDbAccRef, k: EthAddress): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbAccGetColFn* = proc(cAcc: CoreDbAccRef): CoreDbColRef {.noRaise.}
|
||||
|
||||
CoreDbAccFns* = object
|
||||
## Methods for trie objects
|
||||
|
|
|
@ -97,7 +97,7 @@ const
|
|||
#
|
||||
# to pick right function when <op> is a variable . Using
|
||||
#
|
||||
# VmOpHandlers[fork][op].exec.run
|
||||
# VmOpHandlers[fork][op].exec
|
||||
#
|
||||
# only works when <op> is a constant. There seems to be some optimisation
|
||||
# that garbles the <exec> sub-structures elements <prep>, <run>, and <post>.
|
||||
|
@ -113,7 +113,7 @@ const
|
|||
for op in Op:
|
||||
rc[fork][op].name = tab[op].name
|
||||
rc[fork][op].info = tab[op].info
|
||||
rc[fork][op].run = tab[op].exec.run
|
||||
rc[fork][op].run = tab[op].exec
|
||||
rc
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -290,179 +290,157 @@ const
|
|||
forks: VmOpAllForks,
|
||||
name: "add",
|
||||
info: "Addition operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: VmOpFn addOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: VmOpFn addOp),
|
||||
|
||||
|
||||
(opCode: Mul, ## 0x02, Multiplication
|
||||
forks: VmOpAllForks,
|
||||
name: "mul",
|
||||
info: "Multiplication operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: mulOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: mulOp),
|
||||
|
||||
|
||||
(opCode: Sub, ## 0x03, Subtraction
|
||||
forks: VmOpAllForks,
|
||||
name: "sub",
|
||||
info: "Subtraction operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: subOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: subOp),
|
||||
|
||||
|
||||
(opCode: Div, ## 0x04, Division
|
||||
forks: VmOpAllForks,
|
||||
name: "divide",
|
||||
info: "Integer division operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: divideOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: divideOp),
|
||||
|
||||
|
||||
(opCode: Sdiv, ## 0x05, Signed division
|
||||
forks: VmOpAllForks,
|
||||
name: "sdiv",
|
||||
info: "Signed integer division operation (truncated)",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sdivOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: sdivOp),
|
||||
|
||||
|
||||
(opCode: Mod, ## 0x06, Modulo
|
||||
forks: VmOpAllForks,
|
||||
name: "modulo",
|
||||
info: "Modulo remainder operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: moduloOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: moduloOp),
|
||||
|
||||
|
||||
(opCode: Smod, ## 0x07, Signed modulo
|
||||
forks: VmOpAllForks,
|
||||
name: "smod",
|
||||
info: "Signed modulo remainder operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: smodOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: smodOp),
|
||||
|
||||
|
||||
(opCode: Addmod, ## 0x08, Modulo addition, Intermediate
|
||||
## computations do not roll over at 2^256
|
||||
forks: VmOpAllForks,
|
||||
name: "addmod",
|
||||
info: "Modulo addition operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: addmodOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: addmodOp),
|
||||
|
||||
|
||||
(opCode: Mulmod, ## 0x09, Modulo multiplication, Intermediate
|
||||
## computations do not roll over at 2^256
|
||||
forks: VmOpAllForks,
|
||||
name: "mulmod",
|
||||
info: "Modulo multiplication operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: mulmodOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: mulmodOp),
|
||||
|
||||
|
||||
(opCode: Exp, ## 0x0a, Exponentiation
|
||||
forks: VmOpAllForks,
|
||||
name: "exp",
|
||||
info: "Exponentiation operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: expOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: expOp),
|
||||
|
||||
|
||||
(opCode: SignExtend, ## 0x0b, Extend 2's complemet length
|
||||
forks: VmOpAllForks,
|
||||
name: "signExtend",
|
||||
info: "Extend length of two’s complement signed integer",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: signExtendOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: signExtendOp),
|
||||
|
||||
|
||||
(opCode: Lt, ## 0x10, Less-than
|
||||
forks: VmOpAllForks,
|
||||
name: "lt",
|
||||
info: "Less-than comparison",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: ltOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: ltOp),
|
||||
|
||||
|
||||
(opCode: Gt, ## 0x11, Greater-than
|
||||
forks: VmOpAllForks,
|
||||
name: "gt",
|
||||
info: "Greater-than comparison",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: gtOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: gtOp),
|
||||
|
||||
|
||||
(opCode: Slt, ## 0x12, Signed less-than
|
||||
forks: VmOpAllForks,
|
||||
name: "slt",
|
||||
info: "Signed less-than comparison",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sltOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: sltOp),
|
||||
|
||||
|
||||
(opCode: Sgt, ## 0x13, Signed greater-than
|
||||
forks: VmOpAllForks,
|
||||
name: "sgt",
|
||||
info: "Signed greater-than comparison",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sgtOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: sgtOp),
|
||||
|
||||
|
||||
(opCode: Eq, ## 0x14, Equality
|
||||
forks: VmOpAllForks,
|
||||
name: "eq",
|
||||
info: "Equality comparison",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: eqOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: eqOp),
|
||||
|
||||
|
||||
(opCode: IsZero, ## 0x15, Not operator
|
||||
forks: VmOpAllForks,
|
||||
name: "isZero",
|
||||
info: "Simple not operator (Note: real Yellow Paper description)",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: isZeroOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: isZeroOp),
|
||||
|
||||
|
||||
(opCode: And, ## 0x16, AND
|
||||
forks: VmOpAllForks,
|
||||
name: "andOp",
|
||||
info: "Bitwise AND operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: andOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: andOp),
|
||||
|
||||
|
||||
(opCode: Or, ## 0x17, OR
|
||||
forks: VmOpAllForks,
|
||||
name: "orOp",
|
||||
info: "Bitwise OR operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: orOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: orOp),
|
||||
|
||||
|
||||
(opCode: Xor, ## 0x18, XOR
|
||||
forks: VmOpAllForks,
|
||||
name: "xorOp",
|
||||
info: "Bitwise XOR operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: xorOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: xorOp),
|
||||
|
||||
|
||||
(opCode: Not, ## 0x19, NOT
|
||||
forks: VmOpAllForks,
|
||||
name: "notOp",
|
||||
info: "Bitwise NOT operation",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: notOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: notOp),
|
||||
|
||||
|
||||
(opCode: Byte, ## 0x1a, Retrieve byte
|
||||
forks: VmOpAllForks,
|
||||
name: "byteOp",
|
||||
info: "Retrieve single byte from word",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: byteOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: byteOp),
|
||||
|
||||
|
||||
# Constantinople's new opcodes
|
||||
|
||||
|
@ -470,25 +448,21 @@ const
|
|||
forks: VmOpConstantinopleAndLater,
|
||||
name: "shlOp",
|
||||
info: "Shift left",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: shlOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: shlOp),
|
||||
|
||||
|
||||
(opCode: Shr, ## 0x1c, Shift right logical
|
||||
forks: VmOpConstantinopleAndLater,
|
||||
name: "shrOp",
|
||||
info: "Logical shift right",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: shrOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: shrOp),
|
||||
|
||||
|
||||
(opCode: Sar, ## 0x1d, Shift right arithmetic
|
||||
forks: VmOpConstantinopleAndLater,
|
||||
name: "sarOp",
|
||||
info: "Arithmetic shift right",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sarOp,
|
||||
post: VmOpIgnore))]
|
||||
exec: sarOp)]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -100,89 +100,77 @@ const
|
|||
forks: VmOpAllForks,
|
||||
name: "blockhash",
|
||||
info: "Get the hash of one of the 256 most recent complete blocks",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: blockhashOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: blockhashOp),
|
||||
|
||||
|
||||
(opCode: Coinbase, ## 0x41, Beneficiary address
|
||||
forks: VmOpAllForks,
|
||||
name: "coinbase",
|
||||
info: "Get the block's beneficiary address",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: coinBaseOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: coinBaseOp),
|
||||
|
||||
|
||||
(opCode: Timestamp, ## 0x42, Block timestamp.
|
||||
forks: VmOpAllForks,
|
||||
name: "timestamp",
|
||||
info: "Get the block's timestamp",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: timestampOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: timestampOp),
|
||||
|
||||
|
||||
(opCode: Number, ## 0x43, Block number
|
||||
forks: VmOpAllForks,
|
||||
name: "blockNumber",
|
||||
info: "Get the block's number",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: blocknumberOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: blocknumberOp),
|
||||
|
||||
|
||||
(opCode: Difficulty, ## 0x44, Block difficulty
|
||||
forks: VmOpAllForks,
|
||||
name: "difficulty",
|
||||
info: "Get the block's difficulty",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: difficultyOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: difficultyOp),
|
||||
|
||||
|
||||
(opCode: GasLimit, ## 0x45, Block gas limit
|
||||
forks: VmOpAllForks,
|
||||
name: "gasLimit",
|
||||
info: "Get the block's gas limit",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: gasLimitOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: gasLimitOp),
|
||||
|
||||
|
||||
(opCode: ChainIdOp, ## 0x46, EIP-155 chain identifier
|
||||
forks: VmOpIstanbulAndLater,
|
||||
name: "chainId",
|
||||
info: "Get current chain’s EIP-155 unique identifier",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: chainIdOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: chainIdOp),
|
||||
|
||||
|
||||
(opCode: SelfBalance, ## 0x47, Contract balance.
|
||||
forks: VmOpIstanbulAndLater,
|
||||
name: "selfBalance",
|
||||
info: "Get current contract's balance",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: selfBalanceOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: selfBalanceOp),
|
||||
|
||||
|
||||
(opCode: BaseFee, ## 0x48, EIP-1559 Block base fee.
|
||||
forks: VmOpLondonAndLater,
|
||||
name: "baseFee",
|
||||
info: "Get current block's EIP-1559 base fee",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: baseFeeOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: baseFeeOp),
|
||||
|
||||
|
||||
(opCode: BlobHash, ## 0x49, EIP-4844 Transaction versioned hash
|
||||
forks: VmOpCancunAndLater,
|
||||
name: "blobHash",
|
||||
info: "Get current transaction's EIP-4844 versioned hash",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: blobHashOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: blobHashOp),
|
||||
|
||||
|
||||
(opCode: BlobBaseFee, ## 0x4a, EIP-7516 Returns the current data-blob base-fee
|
||||
forks: VmOpCancunAndLater,
|
||||
name: "blobBaseFee",
|
||||
info: "Returns the current data-blob base-fee",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: blobBaseFeeOp,
|
||||
post: VmOpIgnore))]
|
||||
exec: blobBaseFeeOp)]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -514,34 +514,29 @@ const
|
|||
forks: VmOpAllForks,
|
||||
name: "call",
|
||||
info: "Message-Call into an account",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: callOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: callOp),
|
||||
|
||||
|
||||
(opCode: CallCode, ## 0xf2, Message-Call with alternative code
|
||||
forks: VmOpAllForks,
|
||||
name: "callCode",
|
||||
info: "Message-call into this account with alternative account's code",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: callCodeOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: callCodeOp),
|
||||
|
||||
|
||||
(opCode: DelegateCall, ## 0xf4, CallCode with persisting sender and value
|
||||
forks: VmOpHomesteadAndLater,
|
||||
name: "delegateCall",
|
||||
info: "Message-call into this account with an alternative account's " &
|
||||
"code but persisting the current values for sender and value.",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: delegateCallOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: delegateCallOp),
|
||||
|
||||
|
||||
(opCode: StaticCall, ## 0xfa, Static message-call into an account
|
||||
forks: VmOpByzantiumAndLater,
|
||||
name: "staticCall",
|
||||
info: "Static message-call into an account",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: staticCallOp,
|
||||
post: VmOpIgnore))]
|
||||
exec: staticCallOp)]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -255,17 +255,14 @@ const
|
|||
forks: VmOpAllForks,
|
||||
name: "create",
|
||||
info: "Create a new account with associated code",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: createOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: createOp),
|
||||
|
||||
|
||||
(opCode: Create2, ## 0xf5, Create using keccak256
|
||||
forks: VmOpConstantinopleAndLater,
|
||||
name: "create2",
|
||||
info: "Behaves identically to CREATE, except using keccak256",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: create2Op,
|
||||
post: VmOpIgnore))]
|
||||
exec: create2Op)]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -28,20 +28,12 @@ type
|
|||
## back via argument descriptor ``k``
|
||||
proc(k: var VmCtx): EvmResultVoid {.nimcall, gcsafe, raises:[].}
|
||||
|
||||
|
||||
VmOpHanders* = tuple ## three step op code execution, typically
|
||||
## only the ``run`` entry is activated
|
||||
prep: VmOpFn
|
||||
run: VmOpFn
|
||||
post: VmOpFn
|
||||
|
||||
|
||||
VmOpExec* = tuple ## op code handler entry
|
||||
opCode: Op ## index back-reference
|
||||
forks: set[EVMFork] ## forks applicable for this operation
|
||||
name: string ## handler name
|
||||
info: string ## handter info, explainer
|
||||
exec: VmOpHanders
|
||||
exec: VmOpFn
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public
|
||||
|
|
|
@ -253,163 +253,142 @@ const
|
|||
forks: VmOpAllForks,
|
||||
name: "address",
|
||||
info: "Get address of currently executing account",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: VmOpFn addressOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: VmOpFn addressOp),
|
||||
|
||||
|
||||
(opCode: Balance, ## 0x31, Balance
|
||||
forks: VmOpAllForks - VmOpBerlinAndLater,
|
||||
name: "balance",
|
||||
info: "Get balance of the given account",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: balanceOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: balanceOp),
|
||||
|
||||
|
||||
(opCode: Balance, ## 0x31, Balance for Berlin and later
|
||||
forks: VmOpBerlinAndLater,
|
||||
name: "balanceEIP2929",
|
||||
info: "EIP2929: Get balance of the given account",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: balanceEIP2929Op,
|
||||
post: VmOpIgnore)),
|
||||
exec: balanceEIP2929Op),
|
||||
|
||||
|
||||
(opCode: Origin, ## 0x32, Origination address
|
||||
forks: VmOpAllForks,
|
||||
name: "origin",
|
||||
info: "Get execution origination address",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: originOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: originOp),
|
||||
|
||||
|
||||
(opCode: Caller, ## 0x33, Caller address
|
||||
forks: VmOpAllForks,
|
||||
name: "caller",
|
||||
info: "Get caller address",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: callerOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: callerOp),
|
||||
|
||||
|
||||
(opCode: CallValue, ## 0x34, Execution deposited value
|
||||
forks: VmOpAllForks,
|
||||
name: "callValue",
|
||||
info: "Get deposited value by the instruction/transaction " &
|
||||
"responsible for this execution",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: callValueOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: callValueOp),
|
||||
|
||||
|
||||
(opCode: CallDataLoad, ## 0x35, Input data
|
||||
forks: VmOpAllForks,
|
||||
name: "callDataLoad",
|
||||
info: "Get input data of current environment",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: callDataLoadOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: callDataLoadOp),
|
||||
|
||||
|
||||
(opCode: CallDataSize, ## 0x36, Size of input data
|
||||
forks: VmOpAllForks,
|
||||
name: "callDataSize",
|
||||
info: "Get size of input data in current environment",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: callDataSizeOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: callDataSizeOp),
|
||||
|
||||
|
||||
(opCode: CallDataCopy, ## 0x37, Copy input data to memory.
|
||||
forks: VmOpAllForks,
|
||||
name: "callDataCopy",
|
||||
info: "Copy input data in current environment to memory",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: callDataCopyOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: callDataCopyOp),
|
||||
|
||||
|
||||
(opCode: CodeSize, ## 0x38, Size of code
|
||||
forks: VmOpAllForks,
|
||||
name: "codeSize",
|
||||
info: "Get size of code running in current environment",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: codeSizeOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: codeSizeOp),
|
||||
|
||||
|
||||
(opCode: CodeCopy, ## 0x39, Copy code to memory.
|
||||
forks: VmOpAllForks,
|
||||
name: "codeCopy",
|
||||
info: "Copy code running in current environment to memory",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: codeCopyOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: codeCopyOp),
|
||||
|
||||
|
||||
(opCode: GasPrice, ## 0x3a, Gas price
|
||||
forks: VmOpAllForks,
|
||||
name: "gasPrice",
|
||||
info: "Get price of gas in current environment",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: gasPriceOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: gasPriceOp),
|
||||
|
||||
|
||||
(opCode: ExtCodeSize, ## 0x3b, Account code size
|
||||
forks: VmOpAllForks - VmOpBerlinAndLater,
|
||||
name: "extCodeSize",
|
||||
info: "Get size of an account's code",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: extCodeSizeOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: extCodeSizeOp),
|
||||
|
||||
|
||||
(opCode: ExtCodeSize, ## 0x3b, Account code size for Berlin and later
|
||||
forks: VmOpBerlinAndLater,
|
||||
name: "extCodeSizeEIP2929",
|
||||
info: "EIP2929: Get size of an account's code",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: extCodeSizeEIP2929Op,
|
||||
post: VmOpIgnore)),
|
||||
exec: extCodeSizeEIP2929Op),
|
||||
|
||||
|
||||
(opCode: ExtCodeCopy, ## 0x3c, Account code copy to memory.
|
||||
forks: VmOpAllForks - VmOpBerlinAndLater,
|
||||
name: "extCodeCopy",
|
||||
info: "Copy an account's code to memory",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: extCodeCopyOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: extCodeCopyOp),
|
||||
|
||||
|
||||
(opCode: ExtCodeCopy, ## 0x3c, Account Code-copy for Berlin and later
|
||||
forks: VmOpBerlinAndLater,
|
||||
name: "extCodeCopyEIP2929",
|
||||
info: "EIP2929: Copy an account's code to memory",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: extCodeCopyEIP2929Op,
|
||||
post: VmOpIgnore)),
|
||||
exec: extCodeCopyEIP2929Op),
|
||||
|
||||
|
||||
(opCode: ReturnDataSize, ## 0x3d, Previous call output data size
|
||||
forks: VmOpByzantiumAndLater,
|
||||
name: "returnDataSize",
|
||||
info: "Get size of output data from the previous call " &
|
||||
"from the current environment",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: returnDataSizeOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: returnDataSizeOp),
|
||||
|
||||
|
||||
(opCode: ReturnDataCopy, ## 0x3e, Previous call output data copy to memory
|
||||
forks: VmOpByzantiumAndLater,
|
||||
name: "returnDataCopy",
|
||||
info: "Copy output data from the previous call to memory",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: returnDataCopyOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: returnDataCopyOp),
|
||||
|
||||
|
||||
(opCode: ExtCodeHash, ## 0x3f, Contract hash
|
||||
forks: VmOpConstantinopleAndLater - VmOpBerlinAndLater,
|
||||
name: "extCodeHash",
|
||||
info: "Returns the keccak256 hash of a contract’s code",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: extCodeHashOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: extCodeHashOp),
|
||||
|
||||
|
||||
(opCode: ExtCodeHash, ## 0x3f, Contract hash for berlin and later
|
||||
forks: VmOpBerlinAndLater,
|
||||
name: "extCodeHashEIP2929",
|
||||
info: "EIP2929: Returns the keccak256 hash of a contract’s code",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: extCodeHashEIP2929Op,
|
||||
post: VmOpIgnore))]
|
||||
exec: extCodeHashEIP2929Op)]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -99,10 +99,7 @@ macro genOphList*(runHandler: static[OphNumToTextFn];
|
|||
"info".asText(n.handlerInfo),
|
||||
nnkExprColonExpr.newTree(
|
||||
newIdentNode("exec"),
|
||||
nnkPar.newTree(
|
||||
"prep".asIdent("VmOpIgnore"),
|
||||
"run".asIdent(n.runHandler),
|
||||
"post".asIdent("VmOpIgnore"))))
|
||||
newIdentNode(n.runHandler)))
|
||||
|
||||
# => const <varName>*: seq[VmOpExec] = @[ <records> ]
|
||||
result = nnkStmtList.newTree(
|
||||
|
|
|
@ -62,9 +62,7 @@ const
|
|||
forks: VmOpAllForks,
|
||||
name: "sha3",
|
||||
info: "Compute Keccak-256 hash",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sha3Op,
|
||||
post: VmOpIgnore))]
|
||||
exec: sha3Op)]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -324,164 +324,143 @@ const
|
|||
forks: VmOpAllForks,
|
||||
name: "pop",
|
||||
info: "Remove item from stack",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: VmOpFn popOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: VmOpFn popOp),
|
||||
|
||||
|
||||
(opCode: Mload, ## 0x51, Load word from memory
|
||||
forks: VmOpAllForks,
|
||||
name: "mload",
|
||||
info: "Load word from memory",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: mloadOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: mloadOp),
|
||||
|
||||
|
||||
(opCode: Mstore, ## 0x52, Save word to memory
|
||||
forks: VmOpAllForks,
|
||||
name: "mstore",
|
||||
info: "Save word to memory",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: mstoreOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: mstoreOp),
|
||||
|
||||
|
||||
(opCode: Mstore8, ## 0x53, Save byte to memory
|
||||
forks: VmOpAllForks,
|
||||
name: "mstore8",
|
||||
info: "Save byte to memory",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: mstore8Op,
|
||||
post: VmOpIgnore)),
|
||||
exec: mstore8Op),
|
||||
|
||||
|
||||
(opCode: Sload, ## 0x54, Load word from storage
|
||||
forks: VmOpAllForks - VmOpBerlinAndLater,
|
||||
name: "sload",
|
||||
info: "Load word from storage",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sloadOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: sloadOp),
|
||||
|
||||
|
||||
(opCode: Sload, ## 0x54, sload for Berlin and later
|
||||
forks: VmOpBerlinAndLater,
|
||||
name: "sloadEIP2929",
|
||||
info: "EIP2929: sload for Berlin and later",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sloadEIP2929Op,
|
||||
post: VmOpIgnore)),
|
||||
exec: sloadEIP2929Op),
|
||||
|
||||
|
||||
(opCode: Sstore, ## 0x55, Save word
|
||||
forks: VmOpAllForks - VmOpConstantinopleAndLater,
|
||||
name: "sstore",
|
||||
info: "Save word to storage",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sstoreOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: sstoreOp),
|
||||
|
||||
|
||||
(opCode: Sstore, ## 0x55, sstore for Constantinople and later
|
||||
forks: VmOpConstantinopleAndLater - VmOpPetersburgAndLater,
|
||||
name: "sstoreEIP1283",
|
||||
info: "EIP1283: sstore for Constantinople and later",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sstoreEIP1283Op,
|
||||
post: VmOpIgnore)),
|
||||
exec: sstoreEIP1283Op),
|
||||
|
||||
|
||||
(opCode: Sstore, ## 0x55, sstore for Petersburg and later
|
||||
forks: VmOpPetersburgAndLater - VmOpIstanbulAndLater,
|
||||
name: "sstore",
|
||||
info: "sstore for Constantinople and later",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sstoreOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: sstoreOp),
|
||||
|
||||
|
||||
(opCode: Sstore, ## 0x55, sstore for Istanbul and later
|
||||
forks: VmOpIstanbulAndLater - VmOpBerlinAndLater,
|
||||
name: "sstoreEIP2200",
|
||||
info: "EIP2200: sstore for Istanbul and later",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sstoreEIP2200Op,
|
||||
post: VmOpIgnore)),
|
||||
exec: sstoreEIP2200Op),
|
||||
|
||||
|
||||
(opCode: Sstore, ## 0x55, sstore for Berlin and later
|
||||
forks: VmOpBerlinAndLater,
|
||||
name: "sstoreEIP2929",
|
||||
info: "EIP2929: sstore for Istanbul and later",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: sstoreEIP2929Op,
|
||||
post: VmOpIgnore)),
|
||||
exec: sstoreEIP2929Op),
|
||||
|
||||
|
||||
(opCode: Jump, ## 0x56, Jump
|
||||
forks: VmOpAllForks,
|
||||
name: "jump",
|
||||
info: "Alter the program counter",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: jumpOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: jumpOp),
|
||||
|
||||
|
||||
(opCode: JumpI, ## 0x57, Conditional jump
|
||||
forks: VmOpAllForks,
|
||||
name: "jumpI",
|
||||
info: "Conditionally alter the program counter",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: jumpIOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: jumpIOp),
|
||||
|
||||
|
||||
(opCode: Pc, ## 0x58, Program counter prior to instruction
|
||||
forks: VmOpAllForks,
|
||||
name: "pc",
|
||||
info: "Get the value of the program counter prior to the increment "&
|
||||
"corresponding to this instruction",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: pcOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: pcOp),
|
||||
|
||||
|
||||
(opCode: Msize, ## 0x59, Memory size
|
||||
forks: VmOpAllForks,
|
||||
name: "msize",
|
||||
info: "Get the size of active memory in bytes",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: msizeOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: msizeOp),
|
||||
|
||||
|
||||
(opCode: Gas, ## 0x5a, Get available gas
|
||||
forks: VmOpAllForks,
|
||||
name: "gas",
|
||||
info: "Get the amount of available gas, including the corresponding "&
|
||||
"reduction for the cost of this instruction",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: gasOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: gasOp),
|
||||
|
||||
|
||||
(opCode: JumpDest, ## 0x5b, Mark jump target. This operation has no effect
|
||||
## on machine state during execution
|
||||
forks: VmOpAllForks,
|
||||
name: "jumpDest",
|
||||
info: "Mark a valid destination for jumps",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: jumpDestOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: jumpDestOp),
|
||||
|
||||
|
||||
(opCode: Tload, ## 0x5c, Load word from transient storage.
|
||||
forks: VmOpCancunAndLater,
|
||||
name: "tLoad",
|
||||
info: "Load word from transient storage",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: tloadOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: tloadOp),
|
||||
|
||||
|
||||
(opCode: Tstore, ## 0x5d, Save word to transient storage.
|
||||
forks: VmOpCancunAndLater,
|
||||
name: "tStore",
|
||||
info: "Save word to transient storage",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: tstoreOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: tstoreOp),
|
||||
|
||||
|
||||
(opCode: Mcopy, ## 0x5e, Copy memory
|
||||
forks: VmOpCancunAndLater,
|
||||
name: "MCopy",
|
||||
info: "Copy memory",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: mCopyOp,
|
||||
post: VmOpIgnore))]
|
||||
exec: mCopyOp)]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -76,9 +76,7 @@ const
|
|||
forks: VmOpShanghaiAndLater,
|
||||
name: "Push0",
|
||||
info: "Push 0 on the stack",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: push0Op,
|
||||
post: VmOpIgnore))]
|
||||
exec: push0Op)]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -164,58 +164,50 @@ const
|
|||
forks: VmOpAllForks,
|
||||
name: "returnOp",
|
||||
info: "Halt execution returning output data",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: returnOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: returnOp),
|
||||
|
||||
|
||||
(opCode: Revert, ## 0xfd, Halt and revert state changes
|
||||
forks: VmOpByzantiumAndLater,
|
||||
name: "revert",
|
||||
info: "Halt execution reverting state changes but returning data " &
|
||||
"and remaining gas",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: revertOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: revertOp),
|
||||
|
||||
|
||||
(opCode: Invalid, ## 0xfe, invalid instruction.
|
||||
forks: VmOpAllForks,
|
||||
name: "invalidInstruction",
|
||||
info: "Designated invalid instruction",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: invalidOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: invalidOp),
|
||||
|
||||
|
||||
(opCode: SelfDestruct, ## 0xff, Halt execution, prep for later deletion
|
||||
forks: VmOpAllForks - VmOpTangerineAndLater,
|
||||
name: "selfDestruct",
|
||||
info: "Halt execution and register account for later deletion",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: selfDestructOp,
|
||||
post: VmOpIgnore)),
|
||||
exec: selfDestructOp),
|
||||
|
||||
|
||||
(opCode: SelfDestruct, ## 0xff, EIP150: self destruct, Tangerine
|
||||
forks: VmOpTangerineAndLater - VmOpSpuriousAndLater,
|
||||
name: "selfDestructEIP150",
|
||||
info: "EIP150: Halt execution and register account for later deletion",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: selfDestructEIP150Op,
|
||||
post: VmOpIgnore)),
|
||||
exec: selfDestructEIP150Op),
|
||||
|
||||
|
||||
(opCode: SelfDestruct, ## 0xff, EIP161: self destruct, Spurious and later
|
||||
forks: VmOpSpuriousAndLater - VmOpBerlinAndLater,
|
||||
name: "selfDestructEIP161",
|
||||
info: "EIP161: Halt execution and register account for later deletion",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: selfDestructEIP161Op,
|
||||
post: VmOpIgnore)),
|
||||
exec: selfDestructEIP161Op),
|
||||
|
||||
|
||||
(opCode: SelfDestruct, ## 0xff, EIP2929: self destruct, Berlin and later
|
||||
forks: VmOpBerlinAndLater,
|
||||
name: "selfDestructEIP2929",
|
||||
info: "EIP2929: Halt execution and register account for later deletion",
|
||||
exec: (prep: VmOpIgnore,
|
||||
run: selfDestructEIP2929Op,
|
||||
post: VmOpIgnore))]
|
||||
exec: selfDestructEIP2929Op)]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -36,6 +36,7 @@ proc init(
|
|||
self.tracer = tracer
|
||||
self.stateDB = ac
|
||||
self.flags = flags
|
||||
self.blobGasUsed = 0'u64
|
||||
|
||||
func blockCtx(com: CommonRef, header: BlockHeader):
|
||||
BlockContext =
|
||||
|
|
|
@ -66,6 +66,7 @@ type
|
|||
receipts* : seq[Receipt]
|
||||
cumulativeGasUsed*: GasInt
|
||||
gasCosts* : GasCosts
|
||||
blobGasUsed* : uint64
|
||||
|
||||
Computation* = ref object
|
||||
# The execution computation
|
||||
|
|
|
@ -21,6 +21,9 @@ import
|
|||
./db/era1_db,
|
||||
beacon_chain/era_db
|
||||
|
||||
declareGauge nec_import_block_number,
|
||||
"Latest imported block number"
|
||||
|
||||
declareCounter nec_imported_blocks,
|
||||
"Blocks processed during import"
|
||||
|
||||
|
@ -105,6 +108,8 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) =
|
|||
if csv != nil:
|
||||
close(csv)
|
||||
|
||||
nec_import_block_number.set(start.int64)
|
||||
|
||||
template blockNumber(): uint64 =
|
||||
start + imported
|
||||
|
||||
|
@ -155,6 +160,7 @@ proc importBlocks*(conf: NimbusConf, com: CommonRef) =
|
|||
avgMGps = f(gas.float / 1000000 / diff0),
|
||||
elapsed = shortLog(time2 - time0, 3)
|
||||
|
||||
metrics.set(nec_import_block_number, int64(blockNumber))
|
||||
nec_imported_blocks.inc(blocks.len)
|
||||
nec_imported_transactions.inc(statsRes[].txs)
|
||||
nec_imported_gas.inc(statsRes[].gas)
|
||||
|
|
|
@ -115,47 +115,10 @@ func skipBCTests*(folder: string, name: string): bool =
|
|||
"DelegateCallSpam.json",
|
||||
]
|
||||
|
||||
# skip failing cases
|
||||
# TODO: see issue #2260
|
||||
const
|
||||
problematicCases = [
|
||||
"powToPosBlockRejection.json",
|
||||
"initialVal.json",
|
||||
"ForkStressTest.json",
|
||||
"HomesteadOverrideFrontier.json",
|
||||
"blockChainFrontierWithLargerTDvsHomesteadBlockchain.json",
|
||||
"blockChainFrontierWithLargerTDvsHomesteadBlockchain2.json",
|
||||
"RPC_API_Test.json",
|
||||
"DaoTransactions.json",
|
||||
"CallContractFromNotBestBlock.json",
|
||||
"ChainAtoChainB.json",
|
||||
"ChainAtoChainBCallContractFormA.json",
|
||||
"ChainAtoChainB_BlockHash.json",
|
||||
"ChainAtoChainB_difficultyB.json",
|
||||
"ChainAtoChainBtoChainA.json",
|
||||
"ChainAtoChainBtoChainAtoChainB.json",
|
||||
"UncleFromSideChain.json",
|
||||
"lotsOfLeafs.json",
|
||||
"lotsOfBranchesOverrideAtTheEnd.json",
|
||||
"lotsOfBranchesOverrideAtTheMiddle.json",
|
||||
"newChainFrom4Block.json",
|
||||
"newChainFrom5Block.json",
|
||||
"newChainFrom6Block.json",
|
||||
"sideChainWithMoreTransactions.json",
|
||||
"sideChainWithMoreTransactions2.json",
|
||||
"sideChainWithNewMaxDifficultyStartingFromBlock3AfterBlock4.json",
|
||||
"uncleBlockAtBlock3AfterBlock3.json",
|
||||
"uncleBlockAtBlock3afterBlock4.json",
|
||||
]
|
||||
|
||||
func skipNewBCTests*(folder: string, name: string): bool =
|
||||
if folder in ["vmPerformance"]:
|
||||
return true
|
||||
|
||||
# TODO: fix this
|
||||
if name in problematicCases:
|
||||
return true
|
||||
|
||||
|
||||
# the new BC tests also contains these slow tests
|
||||
# for Istanbul fork
|
||||
if slowGSTTests(folder, name):
|
||||
|
@ -166,7 +129,7 @@ func skipNewBCTests*(folder: string, name: string): bool =
|
|||
"randomStatetest94.json",
|
||||
"DelegateCallSpam.json",
|
||||
]
|
||||
|
||||
|
||||
func skipPrecompilesTests*(folder: string, name: string): bool =
|
||||
# EIP2565: modExp gas cost
|
||||
# reason: included in berlin
|
||||
|
|
|
@ -9,521 +9,95 @@
|
|||
# according to those terms.
|
||||
|
||||
import
|
||||
std/[json, os, tables, strutils, options, streams],
|
||||
std/json,
|
||||
unittest2,
|
||||
eth/rlp, eth/trie/trie_defs, eth/common/eth_types_rlp,
|
||||
stew/byteutils,
|
||||
./test_helpers, ./test_allowed_to_fail,
|
||||
../premix/parser, test_config,
|
||||
../nimbus/[evm/state, evm/types, errors, constants],
|
||||
./test_helpers,
|
||||
./test_allowed_to_fail,
|
||||
../nimbus/db/ledger,
|
||||
../nimbus/utils/[utils, debug],
|
||||
../nimbus/evm/tracer/legacy_tracer,
|
||||
../nimbus/evm/tracer/json_tracer,
|
||||
../nimbus/core/[validate, chain, pow/header],
|
||||
../nimbus/core/chain/forked_chain,
|
||||
../tools/common/helpers as chp,
|
||||
../tools/evmstate/helpers,
|
||||
../nimbus/common/common,
|
||||
../nimbus/core/eip4844,
|
||||
../nimbus/rpc/experimental
|
||||
../nimbus/core/eip4844
|
||||
|
||||
type
|
||||
SealEngine = enum
|
||||
NoProof
|
||||
Ethash
|
||||
|
||||
TestBlock = object
|
||||
goodBlock: bool
|
||||
blockRLP : Blob
|
||||
header : BlockHeader
|
||||
body : BlockBody
|
||||
hasException: bool
|
||||
withdrawals: Option[seq[Withdrawal]]
|
||||
|
||||
TestCtx = object
|
||||
lastBlockHash: Hash256
|
||||
TestEnv = object
|
||||
blocks: seq[EthBlock]
|
||||
genesisHeader: BlockHeader
|
||||
blocks : seq[TestBlock]
|
||||
sealEngine : Option[SealEngine]
|
||||
debugMode : bool
|
||||
trace : bool
|
||||
vmState : BaseVMState
|
||||
debugData : JsonNode
|
||||
network : string
|
||||
postStateHash: Hash256
|
||||
json : bool
|
||||
lastBlockHash: Hash256
|
||||
network: string
|
||||
pre: JsonNode
|
||||
|
||||
proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus, debugMode = false, trace = false)
|
||||
|
||||
func normalizeNumber(n: JsonNode): JsonNode =
|
||||
let str = n.getStr
|
||||
if str == "0x":
|
||||
result = newJString("0x0")
|
||||
elif str == "0x0":
|
||||
result = n
|
||||
elif str == "0x00":
|
||||
result = newJString("0x0")
|
||||
elif str[2] == '0':
|
||||
var i = 2
|
||||
while str[i] == '0':
|
||||
inc i
|
||||
result = newJString("0x" & str.substr(i))
|
||||
else:
|
||||
result = n
|
||||
|
||||
func normalizeData(n: JsonNode): JsonNode =
|
||||
if n.getStr() == "":
|
||||
result = newJString("0x")
|
||||
else:
|
||||
result = n
|
||||
|
||||
func normalizeBlockHeader(node: JsonNode): JsonNode =
|
||||
for k, v in node:
|
||||
case k
|
||||
of "bloom": node["logsBloom"] = v
|
||||
of "coinbase": node["miner"] = v
|
||||
of "uncleHash": node["sha3Uncles"] = v
|
||||
of "receiptTrie": node["receiptsRoot"] = v
|
||||
of "transactionsTrie": node["transactionsRoot"] = v
|
||||
of "number", "difficulty", "gasUsed",
|
||||
"gasLimit", "timestamp", "baseFeePerGas":
|
||||
node[k] = normalizeNumber(v)
|
||||
of "extraData":
|
||||
node[k] = normalizeData(v)
|
||||
else: discard
|
||||
result = node
|
||||
|
||||
func normalizeWithdrawal(node: JsonNode): JsonNode =
|
||||
for k, v in node:
|
||||
case k
|
||||
of "amount", "index", "validatorIndex":
|
||||
node[k] = normalizeNumber(v)
|
||||
else: discard
|
||||
result = node
|
||||
|
||||
proc parseHeader(blockHeader: JsonNode, testStatusIMPL: var TestStatus): BlockHeader =
|
||||
result = normalizeBlockHeader(blockHeader).parseBlockHeader
|
||||
var blockHash: Hash256
|
||||
blockHeader.fromJson "hash", blockHash
|
||||
check blockHash == rlpHash(result)
|
||||
|
||||
proc parseWithdrawals(withdrawals: JsonNode): Option[seq[Withdrawal]] =
|
||||
case withdrawals.kind
|
||||
of JArray:
|
||||
var ws: seq[Withdrawal]
|
||||
for v in withdrawals:
|
||||
ws.add(parseWithdrawal(normalizeWithdrawal(v)))
|
||||
some(ws)
|
||||
else:
|
||||
none[seq[Withdrawal]]()
|
||||
|
||||
proc parseBlocks(blocks: JsonNode): seq[TestBlock] =
|
||||
for fixture in blocks:
|
||||
var t: TestBlock
|
||||
t.withdrawals = none[seq[Withdrawal]]()
|
||||
for key, value in fixture:
|
||||
case key
|
||||
of "blockHeader":
|
||||
# header is absent in bad block
|
||||
t.goodBlock = true
|
||||
of "rlp":
|
||||
fixture.fromJson "rlp", t.blockRLP
|
||||
of "transactions", "uncleHeaders", "hasBigInt",
|
||||
"blocknumber", "chainname", "chainnetwork":
|
||||
discard
|
||||
of "transactionSequence":
|
||||
var noError = true
|
||||
for tx in value:
|
||||
let valid = tx["valid"].getStr == "true"
|
||||
noError = noError and valid
|
||||
doAssert(noError == false, "NOT A VALID TEST CASE")
|
||||
of "withdrawals":
|
||||
t.withdrawals = parseWithdrawals(value)
|
||||
of "rlp_decoded":
|
||||
# this field is intended for client who
|
||||
# doesn't support rlp encoding(e.g. evmone)
|
||||
discard
|
||||
else:
|
||||
doAssert("expectException" in key, key)
|
||||
t.hasException = true
|
||||
|
||||
result.add t
|
||||
|
||||
proc parseTestCtx(fixture: JsonNode, testStatusIMPL: var TestStatus): TestCtx =
|
||||
result.blocks = parseBlocks(fixture["blocks"])
|
||||
|
||||
fixture.fromJson "lastblockhash", result.lastBlockHash
|
||||
|
||||
if "genesisRLP" in fixture:
|
||||
var genesisRLP: Blob
|
||||
fixture.fromJson "genesisRLP", genesisRLP
|
||||
result.genesisHeader = rlp.decode(genesisRLP, EthBlock).header
|
||||
else:
|
||||
result.genesisHeader = parseHeader(fixture["genesisBlockHeader"], testStatusIMPL)
|
||||
var goodBlock = true
|
||||
for h in result.blocks:
|
||||
goodBlock = goodBlock and h.goodBlock
|
||||
check goodBlock == false
|
||||
|
||||
if "sealEngine" in fixture:
|
||||
result.sealEngine = some(parseEnum[SealEngine](fixture["sealEngine"].getStr))
|
||||
|
||||
if "postStateHash" in fixture:
|
||||
result.postStateHash.data = hexToByteArray[32](fixture["postStateHash"].getStr)
|
||||
|
||||
result.network = fixture["network"].getStr
|
||||
|
||||
proc testGetMultiKeys(chain: ChainRef, parentHeader, currentHeader: BlockHeader) =
|
||||
# check that current state matches current header
|
||||
let currentStateRoot = chain.vmState.stateDB.rootHash
|
||||
if currentStateRoot != currentHeader.stateRoot:
|
||||
raise newException(ValidationError, "Expected currentStateRoot == currentHeader.stateRoot")
|
||||
|
||||
let mkeys = getMultiKeys(chain.com, currentHeader, false)
|
||||
|
||||
# check that the vmstate hasn't changed after call to getMultiKeys
|
||||
if chain.vmState.stateDB.rootHash != currentHeader.stateRoot:
|
||||
raise newException(ValidationError, "Expected chain.vmstate.stateDB.rootHash == currentHeader.stateRoot")
|
||||
|
||||
# use the MultiKeysRef to build the block proofs
|
||||
let
|
||||
ac = LedgerRef.init(chain.com.db, currentHeader.stateRoot)
|
||||
blockProofs = getBlockProofs(ac, mkeys)
|
||||
if blockProofs.len() != 0:
|
||||
raise newException(ValidationError, "Expected blockProofs.len() == 0")
|
||||
|
||||
proc setupTracer(ctx: TestCtx): TracerRef =
|
||||
if ctx.trace:
|
||||
if ctx.json:
|
||||
var tracerFlags = {
|
||||
TracerFlags.DisableMemory,
|
||||
TracerFlags.DisableStorage,
|
||||
TracerFlags.DisableState,
|
||||
TracerFlags.DisableStateDiff,
|
||||
TracerFlags.DisableReturnData
|
||||
}
|
||||
let stream = newFileStream(stdout)
|
||||
newJsonTracer(stream, tracerFlags, false)
|
||||
else:
|
||||
newLegacyTracer({})
|
||||
else:
|
||||
TracerRef()
|
||||
|
||||
proc importBlock(ctx: var TestCtx, com: CommonRef,
|
||||
tb: TestBlock, checkSeal: bool) =
|
||||
if ctx.vmState.isNil or ctx.vmState.stateDB.isTopLevelClean.not:
|
||||
let
|
||||
parentHeader = com.db.getBlockHeader(tb.header.parentHash)
|
||||
tracerInst = ctx.setupTracer()
|
||||
ctx.vmState = BaseVMState.new(
|
||||
parentHeader,
|
||||
tb.header,
|
||||
com,
|
||||
tracerInst,
|
||||
)
|
||||
ctx.vmState.collectWitnessData = true # Enable saving witness data
|
||||
|
||||
let
|
||||
chain = newChain(com, extraValidation = true, ctx.vmState)
|
||||
res = chain.persistBlocks([EthBlock.init(tb.header, tb.body)])
|
||||
|
||||
if res.isErr():
|
||||
raise newException(ValidationError, res.error())
|
||||
# testGetMultiKeys fails with:
|
||||
# Unhandled defect: AccountLedger.init(): RootNotFound(Aristo, ctx=ctx/newColFn(), error=GenericError) [AssertionDefect]
|
||||
#else:
|
||||
# testGetMultiKeys(chain, chain.vmState.parent, tb.header)
|
||||
|
||||
proc applyFixtureBlockToChain(ctx: var TestCtx, tb: var TestBlock,
|
||||
com: CommonRef, checkSeal: bool) =
|
||||
decompose(tb.blockRLP, tb.header, tb.body)
|
||||
ctx.importBlock(com, tb, checkSeal)
|
||||
|
||||
func shouldCheckSeal(ctx: TestCtx): bool =
|
||||
if ctx.sealEngine.isSome:
|
||||
result = ctx.sealEngine.get() != NoProof
|
||||
|
||||
proc collectDebugData(ctx: var TestCtx) =
|
||||
if ctx.vmState.isNil:
|
||||
return
|
||||
|
||||
let vmState = ctx.vmState
|
||||
let tracerInst = LegacyTracer(vmState.tracer)
|
||||
let tracingResult = if ctx.trace: tracerInst.getTracingResult() else: %[]
|
||||
ctx.debugData.add %{
|
||||
"blockNumber": %($vmState.blockNumber),
|
||||
"structLogs": tracingResult,
|
||||
}
|
||||
|
||||
proc runTestCtx(ctx: var TestCtx, com: CommonRef, testStatusIMPL: var TestStatus) =
|
||||
doAssert com.db.persistHeader(ctx.genesisHeader,
|
||||
com.consensus == ConsensusType.POS)
|
||||
check com.db.getCanonicalHead().blockHash == ctx.genesisHeader.blockHash
|
||||
let checkSeal = ctx.shouldCheckSeal
|
||||
|
||||
if ctx.debugMode:
|
||||
ctx.debugData = newJArray()
|
||||
|
||||
for idx, tb in ctx.blocks:
|
||||
if tb.goodBlock:
|
||||
try:
|
||||
|
||||
ctx.applyFixtureBlockToChain(
|
||||
ctx.blocks[idx], com, checkSeal)
|
||||
|
||||
except CatchableError as ex:
|
||||
debugEcho "FATAL ERROR(WE HAVE BUG): ", ex.msg
|
||||
|
||||
else:
|
||||
var noError = true
|
||||
try:
|
||||
ctx.applyFixtureBlockToChain(ctx.blocks[idx],
|
||||
com, checkSeal)
|
||||
except ValueError, ValidationError, BlockNotFound, RlpError:
|
||||
# failure is expected on this bad block
|
||||
check (tb.hasException or (not tb.goodBlock))
|
||||
noError = false
|
||||
if ctx.debugMode:
|
||||
ctx.debugData.add %{
|
||||
"exception": %($getCurrentException().name),
|
||||
"msg": %getCurrentExceptionMsg()
|
||||
}
|
||||
|
||||
# Block should have caused a validation error
|
||||
check noError == false
|
||||
|
||||
if ctx.debugMode and not ctx.json:
|
||||
ctx.collectDebugData()
|
||||
|
||||
proc debugDataFromAccountList(ctx: TestCtx): JsonNode =
|
||||
let vmState = ctx.vmState
|
||||
result = %{"debugData": ctx.debugData}
|
||||
if not vmState.isNil:
|
||||
result["accounts"] = vmState.dumpAccounts()
|
||||
|
||||
proc debugDataFromPostStateHash(ctx: TestCtx): JsonNode =
|
||||
let vmState = ctx.vmState
|
||||
%{
|
||||
"debugData": ctx.debugData,
|
||||
"postStateHash": %($vmState.readOnlyStateDB.rootHash),
|
||||
"expectedStateHash": %($ctx.postStateHash),
|
||||
"accounts": vmState.dumpAccounts()
|
||||
}
|
||||
|
||||
proc dumpDebugData(ctx: TestCtx, fixtureName: string, fixtureIndex: int, success: bool) =
|
||||
let debugData = if ctx.postStateHash != Hash256():
|
||||
debugDataFromPostStateHash(ctx)
|
||||
else:
|
||||
debugDataFromAccountList(ctx)
|
||||
|
||||
let status = if success: "_success" else: "_failed"
|
||||
let name = fixtureName.replace('/', '-').replace(':', '-')
|
||||
writeFile("debug_" & name & "_" & $fixtureIndex & status & ".json", debugData.pretty())
|
||||
|
||||
proc testFixture(node: JsonNode, testStatusIMPL: var TestStatus, debugMode = false, trace = false) =
|
||||
# 1 - mine the genesis block
|
||||
# 2 - loop over blocks:
|
||||
# - apply transactions
|
||||
# - mine block
|
||||
# 3 - diff resulting state with expected state
|
||||
# 4 - check that all previous blocks were valid
|
||||
let specifyIndex = test_config.getConfiguration().index.get(0)
|
||||
var fixtureIndex = 0
|
||||
var fixtureTested = false
|
||||
|
||||
for fixtureName, fixture in node:
|
||||
inc fixtureIndex
|
||||
if specifyIndex > 0 and fixtureIndex != specifyIndex:
|
||||
continue
|
||||
|
||||
var ctx = parseTestCtx(fixture, testStatusIMPL)
|
||||
|
||||
let
|
||||
memDB = newCoreDbRef DefaultDbMemory
|
||||
stateDB = LedgerRef.init(memDB, emptyRlpHash)
|
||||
config = getChainConfig(ctx.network)
|
||||
com = CommonRef.new(memDB, config)
|
||||
|
||||
setupStateDB(fixture["pre"], stateDB)
|
||||
stateDB.persist()
|
||||
|
||||
check stateDB.rootHash == ctx.genesisHeader.stateRoot
|
||||
|
||||
ctx.debugMode = debugMode
|
||||
ctx.trace = trace
|
||||
ctx.json = test_config.getConfiguration().json
|
||||
|
||||
var success = true
|
||||
proc parseBlocks(node: JsonNode): seq[EthBlock] =
|
||||
for x in node:
|
||||
try:
|
||||
ctx.runTestCtx(com, testStatusIMPL)
|
||||
let header = com.db.getCanonicalHead()
|
||||
let lastBlockHash = header.blockHash
|
||||
check lastBlockHash == ctx.lastBlockHash
|
||||
success = lastBlockHash == ctx.lastBlockHash
|
||||
if ctx.postStateHash != Hash256():
|
||||
let rootHash = ctx.vmState.stateDB.rootHash
|
||||
if ctx.postStateHash != rootHash:
|
||||
raise newException(ValidationError, "incorrect postStateHash, expect=" &
|
||||
$rootHash & ", get=" &
|
||||
$ctx.postStateHash
|
||||
)
|
||||
elif lastBlockHash == ctx.lastBlockHash:
|
||||
# multiple chain, we are using the last valid canonical
|
||||
# state root to test against 'postState'
|
||||
let stateDB = LedgerRef.init(memDB, header.stateRoot)
|
||||
verifyStateDB(fixture["postState"], ledger.ReadOnlyStateDB(stateDB))
|
||||
let blockRLP = hexToSeqByte(x["rlp"].getStr)
|
||||
let blk = rlp.decode(blockRLP, EthBlock)
|
||||
result.add blk
|
||||
except RlpError:
|
||||
# invalid rlp will not participate in block validation
|
||||
# e.g. invalid rlp received from network
|
||||
discard
|
||||
|
||||
success = lastBlockHash == ctx.lastBlockHash
|
||||
except ValidationError as E:
|
||||
echo fixtureName, " ERROR: ", E.msg
|
||||
success = false
|
||||
proc parseEnv(node: JsonNode): TestEnv =
|
||||
result.blocks = parseBlocks(node["blocks"])
|
||||
let genesisRLP = hexToSeqByte(node["genesisRLP"].getStr)
|
||||
result.genesisHeader = rlp.decode(genesisRLP, EthBlock).header
|
||||
result.lastBlockHash = Hash256(data: hexToByteArray[32](node["lastblockhash"].getStr))
|
||||
result.network = node["network"].getStr
|
||||
result.pre = node["pre"]
|
||||
|
||||
if ctx.debugMode:
|
||||
ctx.dumpDebugData(fixtureName, fixtureIndex, success)
|
||||
proc executeCase(node: JsonNode): bool =
|
||||
let
|
||||
env = parseEnv(node)
|
||||
memDB = newCoreDbRef DefaultDbMemory
|
||||
stateDB = LedgerRef.init(memDB, EMPTY_ROOT_HASH)
|
||||
config = getChainConfig(env.network)
|
||||
com = CommonRef.new(memDB, config)
|
||||
|
||||
fixtureTested = true
|
||||
check success == true
|
||||
setupStateDB(env.pre, stateDB)
|
||||
stateDB.persist()
|
||||
|
||||
if not fixtureTested:
|
||||
echo test_config.getConfiguration().testSubject, " not tested at all, wrong index?"
|
||||
if specifyIndex <= 0 or specifyIndex > node.len:
|
||||
echo "Maximum subtest available: ", node.len
|
||||
if not com.db.persistHeader(env.genesisHeader,
|
||||
com.consensus == ConsensusType.POS):
|
||||
debugEcho "Failed to put genesis header into database"
|
||||
return false
|
||||
|
||||
proc blockchainJsonMain*(debugMode = false) =
|
||||
if com.db.getCanonicalHead().blockHash != env.genesisHeader.blockHash:
|
||||
debugEcho "Genesis block hash is database different with expected genesis block hash"
|
||||
return false
|
||||
|
||||
var c = initForkedChain(com)
|
||||
for blk in env.blocks:
|
||||
c.addBlock(blk)
|
||||
|
||||
c.finalizeSegment(env.lastBlockHash).isOkOr:
|
||||
debugEcho error
|
||||
return false
|
||||
|
||||
true
|
||||
|
||||
proc executeFile(node: JsonNode, testStatusIMPL: var TestStatus) =
|
||||
for name, bctCase in node:
|
||||
check executeCase(bctCase)
|
||||
|
||||
proc blockchainJsonMain*() =
|
||||
const
|
||||
legacyFolder = "eth_tests/LegacyTests/Constantinople/BlockchainTests"
|
||||
newFolder = "eth_tests/BlockchainTests"
|
||||
#newFolder = "eth_tests/EIPTests/BlockchainTests"
|
||||
#newFolder = "eth_tests/EIPTests/Pyspecs/cancun"
|
||||
|
||||
let res = loadKzgTrustedSetup()
|
||||
if res.isErr:
|
||||
echo "FATAL: ", res.error
|
||||
quit(QuitFailure)
|
||||
|
||||
let config = test_config.getConfiguration()
|
||||
if config.testSubject == "" or not debugMode:
|
||||
# run all test fixtures
|
||||
if config.legacy:
|
||||
suite "block chain json tests":
|
||||
jsonTest(legacyFolder, "BlockchainTests", testFixture, skipBCTests)
|
||||
else:
|
||||
suite "new block chain json tests":
|
||||
jsonTest(newFolder, "newBlockchainTests", testFixture, skipNewBCTests)
|
||||
if false:
|
||||
suite "block chain json tests":
|
||||
jsonTest(legacyFolder, "BlockchainTests", executeFile, skipBCTests)
|
||||
else:
|
||||
# execute single test in debug mode
|
||||
if config.testSubject.len == 0:
|
||||
echo "missing test subject"
|
||||
quit(QuitFailure)
|
||||
|
||||
let folder = if config.legacy: legacyFolder else: newFolder
|
||||
let path = "tests/fixtures/" & folder
|
||||
let n = json.parseFile(path / config.testSubject)
|
||||
var testStatusIMPL: TestStatus
|
||||
testFixture(n, testStatusIMPL, debugMode = true, config.trace)
|
||||
suite "new block chain json tests":
|
||||
jsonTest(newFolder, "newBlockchainTests", executeFile, skipNewBCTests)
|
||||
|
||||
when isMainModule:
|
||||
import std/times
|
||||
var message: string
|
||||
|
||||
let start = getTime()
|
||||
|
||||
## Processing command line arguments
|
||||
if test_config.processArguments(message) != test_config.Success:
|
||||
echo message
|
||||
quit(QuitFailure)
|
||||
else:
|
||||
if len(message) > 0:
|
||||
echo message
|
||||
quit(QuitSuccess)
|
||||
|
||||
blockchainJsonMain(true)
|
||||
let elpd = getTime() - start
|
||||
echo "TIME: ", elpd
|
||||
|
||||
# lastBlockHash -> every fixture has it, hash of a block header
|
||||
# genesisRLP -> NOT every fixture has it, rlp bytes of genesis block header
|
||||
# _info -> every fixture has it, can be omitted
|
||||
# pre, postState -> every fixture has it, prestate and post state
|
||||
# genesisHeader -> every fixture has it
|
||||
# network -> every fixture has it
|
||||
# # EIP150 247
|
||||
# # ConstantinopleFix 286
|
||||
# # Homestead 256
|
||||
# # Frontier 396
|
||||
# # Byzantium 263
|
||||
# # EIP158ToByzantiumAt5 1
|
||||
# # EIP158 233
|
||||
# # HomesteadToDaoAt5 4
|
||||
# # Constantinople 285
|
||||
# # HomesteadToEIP150At5 1
|
||||
# # FrontierToHomesteadAt5 7
|
||||
# # ByzantiumToConstantinopleFixAt5 1
|
||||
|
||||
# sealEngine -> NOT every fixture has it
|
||||
# # NoProof 1709
|
||||
# # Ethash 112
|
||||
|
||||
# blocks -> every fixture has it, an array of blocks ranging from 1 block to 303 blocks
|
||||
# # transactions 6230 can be empty
|
||||
# # # to 6089 -> "" if contractCreation
|
||||
# # # value 6089
|
||||
# # # gasLimit 6089 -> "gas"
|
||||
# # # s 6089
|
||||
# # # r 6089
|
||||
# # # gasPrice 6089
|
||||
# # # v 6089
|
||||
# # # data 6089 -> "input"
|
||||
# # # nonce 6089
|
||||
# # blockHeader 6230 can be not present, e.g. bad rlp
|
||||
# # uncleHeaders 6230 can be empty
|
||||
|
||||
# # rlp 6810 has rlp but no blockheader, usually has exception
|
||||
# # blocknumber 2733
|
||||
# # chainname 1821 -> 'A' to 'H', and 'AA' to 'DD'
|
||||
# # chainnetwork 21 -> all values are "Frontier"
|
||||
# # expectExceptionALL 420
|
||||
# # # UncleInChain 55
|
||||
# # # InvalidTimestamp 42
|
||||
# # # InvalidGasLimit 42
|
||||
# # # InvalidNumber 42
|
||||
# # # InvalidDifficulty 35
|
||||
# # # InvalidBlockNonce 28
|
||||
# # # InvalidUncleParentHash 26
|
||||
# # # ExtraDataTooBig 21
|
||||
# # # InvalidStateRoot 21
|
||||
# # # ExtraDataIncorrect 19
|
||||
# # # UnknownParent 16
|
||||
# # # TooMuchGasUsed 14
|
||||
# # # InvalidReceiptsStateRoot 9
|
||||
# # # InvalidUnclesHash 7
|
||||
# # # UncleIsBrother 7
|
||||
# # # UncleTooOld 7
|
||||
# # # InvalidTransactionsRoot 7
|
||||
# # # InvalidGasUsed 7
|
||||
# # # InvalidLogBloom 7
|
||||
# # # TooManyUncles 7
|
||||
# # # OutOfGasIntrinsic 1
|
||||
# # expectExceptionEIP150 17
|
||||
# # # TooMuchGasUsed 7
|
||||
# # # InvalidReceiptsStateRoot 7
|
||||
# # # InvalidStateRoot 3
|
||||
# # expectExceptionByzantium 17
|
||||
# # # InvalidStateRoot 10
|
||||
# # # TooMuchGasUsed 7
|
||||
# # expectExceptionHomestead 17
|
||||
# # # InvalidReceiptsStateRoot 7
|
||||
# # # BlockGasLimitReached 7
|
||||
# # # InvalidStateRoot 3
|
||||
# # expectExceptionConstantinople 14
|
||||
# # # InvalidStateRoot 7
|
||||
# # # TooMuchGasUsed 7
|
||||
# # expectExceptionEIP158 14
|
||||
# # # TooMuchGasUsed 7
|
||||
# # # InvalidReceiptsStateRoot 7
|
||||
# # expectExceptionFrontier 14
|
||||
# # # InvalidReceiptsStateRoot 7
|
||||
# # # BlockGasLimitReached 7
|
||||
# # expectExceptionConstantinopleFix 14
|
||||
# # # InvalidStateRoot 7
|
||||
# # # TooMuchGasUsed 7
|
||||
blockchainJsonMain()
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -569,6 +569,15 @@ const
|
|||
output: T8nOutput(trace: true, result: true),
|
||||
expOut: "istanbul.txt",
|
||||
),
|
||||
TestSpec(
|
||||
name : "Blob gas used exceeds max allowance",
|
||||
base : "testdata/00-523",
|
||||
input : t8nInput(
|
||||
"alloc.json", "txs.rlp", "env.json", "Cancun", "0",
|
||||
),
|
||||
output: T8nOutput(result: true),
|
||||
expOut: "exp.json",
|
||||
),
|
||||
]
|
||||
|
||||
proc main() =
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"0x000f3df6d732807ef1319fb7b8bb8522d0beac02": {
|
||||
"balance": "0x00",
|
||||
"code": "0x3373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500",
|
||||
"nonce": "0x01",
|
||||
"storage": {
|
||||
}
|
||||
},
|
||||
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": {
|
||||
"balance": "0x1db38f",
|
||||
"code": "0x",
|
||||
"nonce": "0x00",
|
||||
"storage": {
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
{
|
||||
"blockHashes": {
|
||||
"0": "0x142abad1cb1f9c8a277d59f52cc29560472cf7bf4c46e12bfca8cf6b728acee2",
|
||||
"1": "0x13af3033e1f55060b7d587ab559289599c74454c74403f3d8f05c6e237bb619e"
|
||||
},
|
||||
"currentBaseFee": "0x7",
|
||||
"currentBlobGasUsed": "0xe0000",
|
||||
"currentCoinbase": "0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba",
|
||||
"currentDifficulty": "0x0",
|
||||
"currentExcessBlobGas": "0xe0000",
|
||||
"currentGasLimit": "0x16345785d8a0000",
|
||||
"currentNumber": "0x1",
|
||||
"currentRandom": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"currentTimestamp": "0xc",
|
||||
"parentBaseFee": "0x7",
|
||||
"parentBeaconBlockRoot": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"parentBlobGasUsed": "0x0",
|
||||
"parentDifficulty": "0x0",
|
||||
"parentExcessBlobGas": "0x140000",
|
||||
"parentGasLimit": "0x16345785d8a0000",
|
||||
"parentGasUsed": "0x0",
|
||||
"parentTimestamp": "0x0",
|
||||
"parentUncleHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"withdrawals": [
|
||||
]
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
{
|
||||
"result": {
|
||||
"stateRoot": "0x2d5a3738dc0d76c5d1625b96d1597549c4cd218934167a672be4cc364646bdfc",
|
||||
"txRoot": "0x3836ad4f15ec36789c84c94fb8342a0e5765d80446986c417b22954d1c9a5e8b",
|
||||
"receiptsRoot": "0xc88bbb6ffab5658b295a44086ed7e77d4526e07e4025496e68a55042b24c81be",
|
||||
"logsHash": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"receipts": [
|
||||
{
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0x5208",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"logs": null,
|
||||
"transactionHash": "0x2f68a5bb6b843147e9ef8628047b6c5d5a0df834dc572007af7d4fce8e644c20",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x5208",
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x0",
|
||||
"type": "0x3"
|
||||
},
|
||||
{
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0xa410",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"logs": null,
|
||||
"transactionHash": "0xfd836af5a833b60c4b07612a7d77f4fc9d9412841c03f94c6eef90ab2e716bf6",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x5208",
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x1",
|
||||
"type": "0x3"
|
||||
},
|
||||
{
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0xf618",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"logs": null,
|
||||
"transactionHash": "0xa15a612ac2c6c92a62da1c8e8431a0335ad67066f078ea0434ee6bd48243caa5",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x5208",
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x2",
|
||||
"type": "0x3"
|
||||
},
|
||||
{
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0x14820",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"logs": null,
|
||||
"transactionHash": "0x53402f0a35345a4a4b6d47eb19fedfcaa21ba2239ed3997a080e317377f1b777",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x5208",
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x3",
|
||||
"type": "0x3"
|
||||
},
|
||||
{
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0x19a28",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"logs": null,
|
||||
"transactionHash": "0x5bd89296bd9454785bed316caeba5e6381552ed1f24f8386ee4774e390d6823e",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x5208",
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x4",
|
||||
"type": "0x3"
|
||||
},
|
||||
{
|
||||
"root": "0x",
|
||||
"status": "0x1",
|
||||
"cumulativeGasUsed": "0x1ec30",
|
||||
"logsBloom": "0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
|
||||
"logs": null,
|
||||
"transactionHash": "0xdbb3a1b212d44a97f43b0b9f0db7e47d91c3d8baf3745accffe16b607901eba7",
|
||||
"contractAddress": "0x0000000000000000000000000000000000000000",
|
||||
"gasUsed": "0x5208",
|
||||
"blockHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"transactionIndex": "0x5",
|
||||
"type": "0x3"
|
||||
}
|
||||
],
|
||||
"currentDifficulty": null,
|
||||
"gasUsed": "0x1ec30",
|
||||
"rejected": [
|
||||
{
|
||||
"index": 6,
|
||||
"error": "blobGasUsed 917504 exceeds maximum allowance 786432"
|
||||
}
|
||||
],
|
||||
"currentBaseFee": "0x7",
|
||||
"withdrawalsRoot": "0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421",
|
||||
"currentExcessBlobGas": "0xe0000",
|
||||
"blobGasUsed": "0xe0000"
|
||||
}
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
"0xf903c6b88803f885018080078252089400000000000000000000000000000000000001000180c001e1a0010000000000000000000000000000000000000000000000000000000000000001a0a8f4757869fbb831ba4ed3a7c8f868b0e2e0c1eda97937aab035560fffdedf3ca019d9b041540e3d6f5f56dc29deb8834a08171e92037cf567b922357e70f8e54ab88803f885010180078252089400000000000000000000000000000000000001000180c001e1a0010000000000000000000000000000000000000000000000000000000000000001a0ef4c93a2afbe03bc2f31334b5c42654f2b88f3d1526e2719454638d2c87f3eaaa06234b91bfba07b555f8e11d44486319ef599f61fdb70bd5ec02085a41ff8e2ccb88803f885010280078252089400000000000000000000000000000000000001000180c001e1a0010000000000000000000000000000000000000000000000000000000000000080a0fe46a6659784d1c49e66bfe79f53c9282521940f406d321a953600d3297498e1a011d6bd31ffcfc37bd89923bd565eca3df245ab923b95799811f227502a95a429b88803f885010380078252089400000000000000000000000000000000000001000180c001e1a0010000000000000000000000000000000000000000000000000000000000000001a05d87fd0644fda3b8ae7c840519b0a51c86e54097b63c394a8ebfb13f0212da78a07054fc9d2468c15c2d8257a54e42419e6a53fe0d4568ccf95ecd4414e3481cdeb88803f885010480078252089400000000000000000000000000000000000001000180c001e1a0010000000000000000000000000000000000000000000000000000000000000001a0903154f2ee69dbdc29f7369ac4270a31d32b8af6c28959d5c6b2b2ba696e9e7da06989cf772024d3efa30b4b99bc1e1dee27813964f39448d07377537a2681d139b88803f885010580078252089400000000000000000000000000000000000001000180c001e1a0010000000000000000000000000000000000000000000000000000000000000080a07efec980ef3b40c74b2de3dee9e9f081b9b4ae4ae1732d64ba0e9553aaf08dc4a0464e6720d2d74b4d68f37f339608278be3a16802b61a46dc9895b898a70939eab88803f885010680078252089400000000000000000000000000000000000001000180c001e1a0010000000000000000000000000000000000000000000000000000000000000001a02145ded5025c6144b8f5ae446db8b617c5ff760eb7c17fa439dedb576ada3ab3a03a15f5307cc6a12f853f6f3732a1d2598d117a387256ab0f8f49d9431caf43bf"
|
|
@ -234,7 +234,6 @@ proc exec(ctx: var TransContext,
|
|||
vmState.processBeaconBlockRoot(ctx.env.parentBeaconBlockRoot.get).isOkOr:
|
||||
raise newError(ErrorConfig, error)
|
||||
|
||||
var blobGasUsed = 0'u64
|
||||
for txIndex, txRes in txList:
|
||||
if txRes.isErr:
|
||||
rejected.add RejectedTx(
|
||||
|
@ -274,7 +273,6 @@ proc exec(ctx: var TransContext,
|
|||
rec, tx, sender, txIndex, gasUsed
|
||||
)
|
||||
includedTx.add tx
|
||||
blobGasUsed += tx.getTotalBlobGas
|
||||
|
||||
# Add mining reward? (-1 means rewards are disabled)
|
||||
if stateReward.isSome and stateReward.get >= 0:
|
||||
|
@ -323,7 +321,7 @@ proc exec(ctx: var TransContext,
|
|||
)
|
||||
|
||||
if fork >= FkCancun:
|
||||
result.result.blobGasUsed = Opt.some blobGasUsed
|
||||
result.result.blobGasUsed = Opt.some vmState.blobGasUsed
|
||||
if ctx.env.currentExcessBlobGas.isSome:
|
||||
result.result.currentExcessBlobGas = ctx.env.currentExcessBlobGas
|
||||
elif ctx.env.parentExcessBlobGas.isSome and ctx.env.parentBlobGasUsed.isSome:
|
||||
|
|
Loading…
Reference in New Issue