nimbus-eth1/tests/test_rpc.nim

724 lines
24 KiB
Nim
Raw Normal View History

# Nimbus
# Copyright (c) 2018-2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
chronicles,
std/[json, typetraits, sequtils],
asynctest,
web3/eth_api,
stew/byteutils,
2022-12-02 11:39:12 +07:00
json_rpc/[rpcserver, rpcclient],
eth/[p2p, rlp, trie/hexary_proof_verification],
eth/common/[transaction_utils, addresses],
../hive_integration/nodocker/engine/engine_client,
../nimbus/[constants, transaction, config, version],
../nimbus/db/[ledger, storage_types],
../nimbus/sync/protocol,
../nimbus/core/[tx_pool, chain, pow/difficulty],
2022-12-02 11:39:12 +07:00
../nimbus/utils/utils,
../nimbus/[common, rpc],
../nimbus/rpc/rpc_types,
../nimbus/beacon/web3_eth_conv,
./test_helpers,
./macro_assembler,
./test_block_fixture
2020-07-30 14:21:11 +07:00
type
Hash32 = common.Hash32
2020-07-30 14:21:11 +07:00
TestEnv = object
conf : NimbusConf
com : CommonRef
txPool : TxPoolRef
server : RpcHttpServer
client : RpcHttpClient
chain : ForkedChainRef
ctx : EthContext
node : EthereumNode
txHash : Hash32
blockHash: Hash32
nonce : uint64
chainId : ChainId
2020-07-30 14:21:11 +07:00
const
zeroHash = hash32"0x0000000000000000000000000000000000000000000000000000000000000000"
emptyCodeHash = hash32"0xc5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
emptyStorageHash = hash32"0x56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421"
genesisFile = "tests/customgenesis/cancun123.json"
contractCode = evmByteCode:
Push4 "0xDEADBEEF" # PUSH
Push1 "0x00" # MSTORE AT 0x00
Mstore
Push1 "0x04" # RETURN LEN
Push1 "0x1C" # RETURN OFFSET at 28
Return
keyStore = "tests/keystore"
signer = address"0x0e69cde81b1aa07a45c32c6cd85d67229d36bb1b"
contractAddress = address"0xa3b2222afa5c987da6ef773fde8d01b9f23d481f"
extraAddress = address"0x597176e9a64aad0845d83afdaf698fbeff77703b"
regularAcc = address"0x0000000000000000000000000000000000000001"
contractAccWithStorage = address"0x0000000000000000000000000000000000000002"
contractAccNoStorage = address"0x0000000000000000000000000000000000000003"
feeRecipient = address"0000000000000000000000000000000000000212"
prevRandao = Bytes32 EMPTY_UNCLE_HASH # it can be any valid hash
oneETH = 1.u256 * 1_000_000_000.u256 * 1_000_000_000.u256
proc verifyAccountProof(trustedStateRoot: Hash32, res: ProofResponse): MptProofVerificationResult =
let
key = toSeq(keccak256(res.address.data).data)
value = rlp.encode(Account(
nonce: res.nonce.uint64,
balance: res.balance,
storageRoot: res.storageHash,
codeHash: res.codeHash))
verifyMptProof(
seq[seq[byte]](res.accountProof),
trustedStateRoot,
key,
value)
proc verifySlotProof(trustedStorageRoot: Hash32, slot: StorageProof): MptProofVerificationResult =
let
key = toSeq(keccak256(toBytesBE(slot.key)).data)
value = rlp.encode(slot.value)
verifyMptProof(
seq[seq[byte]](slot.proof),
trustedStorageRoot,
key,
value)
aristo: fork support via layers/txframes (#2960) * aristo: fork support via layers/txframes This change reorganises how the database is accessed: instead holding a "current frame" in the database object, a dag of frames is created based on the "base frame" held in `AristoDbRef` and all database access happens through this frame, which can be thought of as a consistent point-in-time snapshot of the database based on a particular fork of the chain. In the code, "frame", "transaction" and "layer" is used to denote more or less the same thing: a dag of stacked changes backed by the on-disk database. Although this is not a requirement, in practice each frame holds the change set of a single block - as such, the frame and its ancestors leading up to the on-disk state represents the state of the database after that block has been applied. "committing" means merging the changes to its parent frame so that the difference between them is lost and only the cumulative changes remain - this facility enables frames to be combined arbitrarily wherever they are in the dag. In particular, it becomes possible to consolidate a set of changes near the base of the dag and commit those to disk without having to re-do the in-memory frames built on top of them - this is useful for "flattening" a set of changes during a base update and sending those to storage without having to perform a block replay on top. Looking at abstractions, a side effect of this change is that the KVT and Aristo are brought closer together by considering them to be part of the "same" atomic transaction set - the way the code gets organised, applying a block and saving it to the kvt happens in the same "logical" frame - therefore, discarding the frame discards both the aristo and kvt changes at the same time - likewise, they are persisted to disk together - this makes reasoning about the database somewhat easier but has the downside of increased memory usage, something that perhaps will need addressing in the future. Because the code reasons more strictly about frames and the state of the persisted database, it also makes it more visible where ForkedChain should be used and where it is still missing - in particular, frames represent a single branch of history while forkedchain manages multiple parallel forks - user-facing services such as the RPC should use the latter, ie until it has been finalized, a getBlock request should consider all forks and not just the blocks in the canonical head branch. Another advantage of this approach is that `AristoDbRef` conceptually becomes more simple - removing its tracking of the "current" transaction stack simplifies reasoning about what can go wrong since this state now has to be passed around in the form of `AristoTxRef` - as such, many of the tests and facilities in the code that were dealing with "stack inconsistency" are now structurally prevented from happening. The test suite will need significant refactoring after this change. Once this change has been merged, there are several follow-ups to do: * there's no mechanism for keeping frames up to date as they get committed or rolled back - TODO * naming is confused - many names for the same thing for legacy reason * forkedchain support is still missing in lots of code * clean up redundant logic based on previous designs - in particular the debug and introspection code no longer makes sense * the way change sets are stored will probably need revisiting - because it's a stack of changes where each frame must be interrogated to find an on-disk value, with a base distance of 128 we'll at minimum have to perform 128 frame lookups for *every* database interaction - regardless, the "dag-like" nature will stay * dispose and commit are poorly defined and perhaps redundant - in theory, one could simply let the GC collect abandoned frames etc, though it's likely an explicit mechanism will remain useful, so they stay for now More about the changes: * `AristoDbRef` gains a `txRef` field (todo: rename) that "more or less" corresponds to the old `balancer` field * `AristoDbRef.stack` is gone - instead, there's a chain of `AristoTxRef` objects that hold their respective "layer" which has the actual changes * No more reasoning about "top" and "stack" - instead, each `AristoTxRef` can be a "head" that "more or less" corresponds to the old single-history `top` notion and its stack * `level` still represents "distance to base" - it's computed from the parent chain instead of being stored * one has to be careful not to use frames where forkedchain was intended - layers are only for a single branch of history! * fix layer vtop after rollback * engine fix * Fix test_txpool * Fix test_rpc * Fix copyright year * fix simulator * Fix copyright year * Fix copyright year * Fix tracer * Fix infinite recursion bug * Remove aristo and kvt empty files * Fic copyright year * Fix fc chain_kvt * ForkedChain refactoring * Fix merge master conflict * Fix copyright year * Reparent txFrame * Fix test * Fix txFrame reparent again * Cleanup and fix test * UpdateBase bugfix and fix test * Fixe newPayload bug discovered by hive * Fix engine api fcu * Clean up call template, chain_kvt, andn txguid * Fix copyright year * work around base block loading issue * Add test * Fix updateHead bug * Fix updateBase bug * Change func commitBase to proc commitBase * Touch up and fix debug mode crash --------- Co-authored-by: jangko <jangko128@gmail.com>
2025-02-06 08:04:50 +01:00
proc persistFixtureBlock(chainDB: CoreDbTxRef) =
let header = getBlockHeader4514995()
# Manually inserting header to avoid any parent checks
aristo: fork support via layers/txframes (#2960) * aristo: fork support via layers/txframes This change reorganises how the database is accessed: instead holding a "current frame" in the database object, a dag of frames is created based on the "base frame" held in `AristoDbRef` and all database access happens through this frame, which can be thought of as a consistent point-in-time snapshot of the database based on a particular fork of the chain. In the code, "frame", "transaction" and "layer" is used to denote more or less the same thing: a dag of stacked changes backed by the on-disk database. Although this is not a requirement, in practice each frame holds the change set of a single block - as such, the frame and its ancestors leading up to the on-disk state represents the state of the database after that block has been applied. "committing" means merging the changes to its parent frame so that the difference between them is lost and only the cumulative changes remain - this facility enables frames to be combined arbitrarily wherever they are in the dag. In particular, it becomes possible to consolidate a set of changes near the base of the dag and commit those to disk without having to re-do the in-memory frames built on top of them - this is useful for "flattening" a set of changes during a base update and sending those to storage without having to perform a block replay on top. Looking at abstractions, a side effect of this change is that the KVT and Aristo are brought closer together by considering them to be part of the "same" atomic transaction set - the way the code gets organised, applying a block and saving it to the kvt happens in the same "logical" frame - therefore, discarding the frame discards both the aristo and kvt changes at the same time - likewise, they are persisted to disk together - this makes reasoning about the database somewhat easier but has the downside of increased memory usage, something that perhaps will need addressing in the future. Because the code reasons more strictly about frames and the state of the persisted database, it also makes it more visible where ForkedChain should be used and where it is still missing - in particular, frames represent a single branch of history while forkedchain manages multiple parallel forks - user-facing services such as the RPC should use the latter, ie until it has been finalized, a getBlock request should consider all forks and not just the blocks in the canonical head branch. Another advantage of this approach is that `AristoDbRef` conceptually becomes more simple - removing its tracking of the "current" transaction stack simplifies reasoning about what can go wrong since this state now has to be passed around in the form of `AristoTxRef` - as such, many of the tests and facilities in the code that were dealing with "stack inconsistency" are now structurally prevented from happening. The test suite will need significant refactoring after this change. Once this change has been merged, there are several follow-ups to do: * there's no mechanism for keeping frames up to date as they get committed or rolled back - TODO * naming is confused - many names for the same thing for legacy reason * forkedchain support is still missing in lots of code * clean up redundant logic based on previous designs - in particular the debug and introspection code no longer makes sense * the way change sets are stored will probably need revisiting - because it's a stack of changes where each frame must be interrogated to find an on-disk value, with a base distance of 128 we'll at minimum have to perform 128 frame lookups for *every* database interaction - regardless, the "dag-like" nature will stay * dispose and commit are poorly defined and perhaps redundant - in theory, one could simply let the GC collect abandoned frames etc, though it's likely an explicit mechanism will remain useful, so they stay for now More about the changes: * `AristoDbRef` gains a `txRef` field (todo: rename) that "more or less" corresponds to the old `balancer` field * `AristoDbRef.stack` is gone - instead, there's a chain of `AristoTxRef` objects that hold their respective "layer" which has the actual changes * No more reasoning about "top" and "stack" - instead, each `AristoTxRef` can be a "head" that "more or less" corresponds to the old single-history `top` notion and its stack * `level` still represents "distance to base" - it's computed from the parent chain instead of being stored * one has to be careful not to use frames where forkedchain was intended - layers are only for a single branch of history! * fix layer vtop after rollback * engine fix * Fix test_txpool * Fix test_rpc * Fix copyright year * fix simulator * Fix copyright year * Fix copyright year * Fix tracer * Fix infinite recursion bug * Remove aristo and kvt empty files * Fic copyright year * Fix fc chain_kvt * ForkedChain refactoring * Fix merge master conflict * Fix copyright year * Reparent txFrame * Fix test * Fix txFrame reparent again * Cleanup and fix test * UpdateBase bugfix and fix test * Fixe newPayload bug discovered by hive * Fix engine api fcu * Clean up call template, chain_kvt, andn txguid * Fix copyright year * work around base block loading issue * Add test * Fix updateHead bug * Fix updateBase bug * Change func commitBase to proc commitBase * Touch up and fix debug mode crash --------- Co-authored-by: jangko <jangko128@gmail.com>
2025-02-06 08:04:50 +01:00
discard chainDB.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
chainDB.addBlockNumberToHashLookup(header.number, header.blockHash)
chainDB.persistTransactions(header.number, header.txRoot, getBlockBody4514995().transactions)
chainDB.persistReceipts(header.receiptsRoot, getReceipts4514995())
proc setupConfig(): NimbusConf =
makeConfig(@[
"--custom-network:" & genesisFile
])
proc setupCom(conf: NimbusConf): CommonRef =
CommonRef.new(
newCoreDbRef DefaultDbMemory,
nil,
conf.networkId,
conf.networkParams
)
proc setupClient(port: Port): RpcHttpClient =
let client = newRpcHttpClient()
waitFor client.connect("127.0.0.1", port, false)
return client
proc close(env: TestEnv) =
waitFor env.client.close()
waitFor env.server.closeWait()
func makeTx(
env: var TestEnv,
signerKey: PrivateKey,
recipient: addresses.Address,
amount: UInt256,
gasPrice: GasInt,
payload: openArray[byte] = []
): Transaction =
const
gasLimit = 70000.GasInt
let tx = Transaction(
txType: TxLegacy,
chainId: env.chainId,
nonce: AccountNonce(env.nonce),
gasPrice: gasPrice,
gasLimit: gasLimit,
to: Opt.some(recipient),
value: amount,
payload: @payload,
)
inc env.nonce
signTransaction(tx, signerKey, eip155 = true)
proc setupEnv(envFork: HardFork = MergeFork): TestEnv =
doAssert(envFork >= MergeFork)
Redesign of BaseVMState descriptor (#923) * Redesign of BaseVMState descriptor why: BaseVMState provides an environment for executing transactions. The current descriptor also provides data that cannot generally be known within the execution environment, e.g. the total gasUsed which is available not before after all transactions have finished. Also, the BaseVMState constructor has been replaced by a constructor that does not need pre-initialised input of the account database. also: Previous constructor and some fields are provided with a deprecated annotation (producing a lot of noise.) * Replace legacy directives in production sources * Replace legacy directives in unit test sources * fix CI (missing premix update) * Remove legacy directives * chase CI problem * rebased * Re-introduce 'AccountsCache' constructor optimisation for 'BaseVmState' re-initialisation why: Constructing a new 'AccountsCache' descriptor can be avoided sometimes when the current state root is properly positioned already. Such a feature existed already as the update function 'initStateDB()' for the 'BaseChanDB' where the accounts cache was linked into this desctiptor. The function 'initStateDB()' was removed and re-implemented into the 'BaseVmState' constructor without optimisation. The old version was of restricted use as a wrong accounts cache state would unconditionally throw an exception rather than conceptually ask for a remedy. The optimised 'BaseVmState' re-initialisation has been implemented for the 'persistBlocks()' function. also: moved some test helpers to 'test/replay' folder * Remove unused & undocumented fields from Chain descriptor why: Reduces attack surface in general & improves reading the code.
2022-01-18 16:19:32 +00:00
let
conf = setupConfig()
Redesign of BaseVMState descriptor (#923) * Redesign of BaseVMState descriptor why: BaseVMState provides an environment for executing transactions. The current descriptor also provides data that cannot generally be known within the execution environment, e.g. the total gasUsed which is available not before after all transactions have finished. Also, the BaseVMState constructor has been replaced by a constructor that does not need pre-initialised input of the account database. also: Previous constructor and some fields are provided with a deprecated annotation (producing a lot of noise.) * Replace legacy directives in production sources * Replace legacy directives in unit test sources * fix CI (missing premix update) * Remove legacy directives * chase CI problem * rebased * Re-introduce 'AccountsCache' constructor optimisation for 'BaseVmState' re-initialisation why: Constructing a new 'AccountsCache' descriptor can be avoided sometimes when the current state root is properly positioned already. Such a feature existed already as the update function 'initStateDB()' for the 'BaseChanDB' where the accounts cache was linked into this desctiptor. The function 'initStateDB()' was removed and re-implemented into the 'BaseVmState' constructor without optimisation. The old version was of restricted use as a wrong accounts cache state would unconditionally throw an exception rather than conceptually ask for a remedy. The optimised 'BaseVmState' re-initialisation has been implemented for the 'persistBlocks()' function. also: moved some test helpers to 'test/replay' folder * Remove unused & undocumented fields from Chain descriptor why: Reduces attack surface in general & improves reading the code.
2022-01-18 16:19:32 +00:00
conf.networkParams.genesis.alloc[contractAddress] = GenesisAccount(code: contractCode)
conf.networkParams.genesis.alloc[signer] = GenesisAccount(balance: oneETH)
# Test data created for eth_getProof tests
conf.networkParams.genesis.alloc[regularAcc] = GenesisAccount(
balance: 2_000_000_000.u256,
nonce: 1.uint64)
conf.networkParams.genesis.alloc[contractAccWithStorage] = GenesisAccount(
balance: 1_000_000_000.u256,
nonce: 2.uint64,
code: contractCode,
storage: {
0.u256: 1234.u256,
1.u256: 2345.u256,
}.toTable)
conf.networkParams.genesis.alloc[contractAccNoStorage] = GenesisAccount(code: contractCode)
if envFork >= Shanghai:
conf.networkParams.config.shanghaiTime = Opt.some(0.EthTime)
if envFork >= Cancun:
conf.networkParams.config.cancunTime = Opt.some(0.EthTime)
if envFork >= Prague:
conf.networkParams.config.pragueTime = Opt.some(0.EthTime)
let
com = setupCom(conf)
chain = ForkedChainRef.init(com)
txPool = TxPoolRef.new(chain)
let
server = newRpcHttpServerWithParams("127.0.0.1:0").valueOr:
echo "Failed to create rpc server: ", error
quit(QuitFailure)
serverApi = newServerAPI(txPool)
client = setupClient(server.localAddress[0].port)
ctx = newEthContext()
node = setupEthNode(conf, ctx, eth)
ctx.am.loadKeystores(keyStore).isOkOr:
debugEcho error
quit(QuitFailure)
let acc1 = ctx.am.getAccount(signer).tryGet()
ctx.am.unlockAccount(signer, acc1.keystore["password"].getStr()).isOkOr:
debugEcho error
quit(QuitFailure)
setupServerAPI(serverApi, server, ctx)
setupCommonRpc(node, conf, server)
server.start()
TestEnv(
conf : conf,
com : com,
txPool : txPool,
server : server,
client : client,
chain : chain,
ctx : ctx,
node : node,
chainId: conf.networkParams.config.chainId,
)
proc generateBlock(env: var TestEnv) =
let
com = env.com
xp = env.txPool
ctx = env.ctx
aristo: fork support via layers/txframes (#2960) * aristo: fork support via layers/txframes This change reorganises how the database is accessed: instead holding a "current frame" in the database object, a dag of frames is created based on the "base frame" held in `AristoDbRef` and all database access happens through this frame, which can be thought of as a consistent point-in-time snapshot of the database based on a particular fork of the chain. In the code, "frame", "transaction" and "layer" is used to denote more or less the same thing: a dag of stacked changes backed by the on-disk database. Although this is not a requirement, in practice each frame holds the change set of a single block - as such, the frame and its ancestors leading up to the on-disk state represents the state of the database after that block has been applied. "committing" means merging the changes to its parent frame so that the difference between them is lost and only the cumulative changes remain - this facility enables frames to be combined arbitrarily wherever they are in the dag. In particular, it becomes possible to consolidate a set of changes near the base of the dag and commit those to disk without having to re-do the in-memory frames built on top of them - this is useful for "flattening" a set of changes during a base update and sending those to storage without having to perform a block replay on top. Looking at abstractions, a side effect of this change is that the KVT and Aristo are brought closer together by considering them to be part of the "same" atomic transaction set - the way the code gets organised, applying a block and saving it to the kvt happens in the same "logical" frame - therefore, discarding the frame discards both the aristo and kvt changes at the same time - likewise, they are persisted to disk together - this makes reasoning about the database somewhat easier but has the downside of increased memory usage, something that perhaps will need addressing in the future. Because the code reasons more strictly about frames and the state of the persisted database, it also makes it more visible where ForkedChain should be used and where it is still missing - in particular, frames represent a single branch of history while forkedchain manages multiple parallel forks - user-facing services such as the RPC should use the latter, ie until it has been finalized, a getBlock request should consider all forks and not just the blocks in the canonical head branch. Another advantage of this approach is that `AristoDbRef` conceptually becomes more simple - removing its tracking of the "current" transaction stack simplifies reasoning about what can go wrong since this state now has to be passed around in the form of `AristoTxRef` - as such, many of the tests and facilities in the code that were dealing with "stack inconsistency" are now structurally prevented from happening. The test suite will need significant refactoring after this change. Once this change has been merged, there are several follow-ups to do: * there's no mechanism for keeping frames up to date as they get committed or rolled back - TODO * naming is confused - many names for the same thing for legacy reason * forkedchain support is still missing in lots of code * clean up redundant logic based on previous designs - in particular the debug and introspection code no longer makes sense * the way change sets are stored will probably need revisiting - because it's a stack of changes where each frame must be interrogated to find an on-disk value, with a base distance of 128 we'll at minimum have to perform 128 frame lookups for *every* database interaction - regardless, the "dag-like" nature will stay * dispose and commit are poorly defined and perhaps redundant - in theory, one could simply let the GC collect abandoned frames etc, though it's likely an explicit mechanism will remain useful, so they stay for now More about the changes: * `AristoDbRef` gains a `txRef` field (todo: rename) that "more or less" corresponds to the old `balancer` field * `AristoDbRef.stack` is gone - instead, there's a chain of `AristoTxRef` objects that hold their respective "layer" which has the actual changes * No more reasoning about "top" and "stack" - instead, each `AristoTxRef` can be a "head" that "more or less" corresponds to the old single-history `top` notion and its stack * `level` still represents "distance to base" - it's computed from the parent chain instead of being stored * one has to be careful not to use frames where forkedchain was intended - layers are only for a single branch of history! * fix layer vtop after rollback * engine fix * Fix test_txpool * Fix test_rpc * Fix copyright year * fix simulator * Fix copyright year * Fix copyright year * Fix tracer * Fix infinite recursion bug * Remove aristo and kvt empty files * Fic copyright year * Fix fc chain_kvt * ForkedChain refactoring * Fix merge master conflict * Fix copyright year * Reparent txFrame * Fix test * Fix txFrame reparent again * Cleanup and fix test * UpdateBase bugfix and fix test * Fixe newPayload bug discovered by hive * Fix engine api fcu * Clean up call template, chain_kvt, andn txguid * Fix copyright year * work around base block loading issue * Add test * Fix updateHead bug * Fix updateBase bug * Change func commitBase to proc commitBase * Touch up and fix debug mode crash --------- Co-authored-by: jangko <jangko128@gmail.com>
2025-02-06 08:04:50 +01:00
txFrame = com.db.baseTxFrame()
acc = ctx.am.getAccount(signer).tryGet()
tx1 = env.makeTx(acc.privateKey, zeroAddress, 1.u256, 30_000_000_000'u64)
tx2 = env.makeTx(acc.privateKey, zeroAddress, 2.u256, 30_000_000_100'u64)
chain = env.chain
doAssert xp.addTx(tx1).isOk
doAssert xp.addTx(tx2).isOk
doAssert(xp.len == 2)
2020-07-30 14:21:11 +07:00
# generate block
xp.prevRandao = prevRandao
xp.feeRecipient = feeRecipient
xp.timestamp = EthTime.now()
let bundle = xp.assembleBlock().valueOr:
debugEcho error
quit(QuitFailure)
let blk = bundle.blk
doAssert(blk.transactions.len == 2)
# import block
chain.importBlock(blk).isOkOr:
debugEcho error
quit(QuitFailure)
xp.removeNewBlockTxs(blk)
aristo: fork support via layers/txframes (#2960) * aristo: fork support via layers/txframes This change reorganises how the database is accessed: instead holding a "current frame" in the database object, a dag of frames is created based on the "base frame" held in `AristoDbRef` and all database access happens through this frame, which can be thought of as a consistent point-in-time snapshot of the database based on a particular fork of the chain. In the code, "frame", "transaction" and "layer" is used to denote more or less the same thing: a dag of stacked changes backed by the on-disk database. Although this is not a requirement, in practice each frame holds the change set of a single block - as such, the frame and its ancestors leading up to the on-disk state represents the state of the database after that block has been applied. "committing" means merging the changes to its parent frame so that the difference between them is lost and only the cumulative changes remain - this facility enables frames to be combined arbitrarily wherever they are in the dag. In particular, it becomes possible to consolidate a set of changes near the base of the dag and commit those to disk without having to re-do the in-memory frames built on top of them - this is useful for "flattening" a set of changes during a base update and sending those to storage without having to perform a block replay on top. Looking at abstractions, a side effect of this change is that the KVT and Aristo are brought closer together by considering them to be part of the "same" atomic transaction set - the way the code gets organised, applying a block and saving it to the kvt happens in the same "logical" frame - therefore, discarding the frame discards both the aristo and kvt changes at the same time - likewise, they are persisted to disk together - this makes reasoning about the database somewhat easier but has the downside of increased memory usage, something that perhaps will need addressing in the future. Because the code reasons more strictly about frames and the state of the persisted database, it also makes it more visible where ForkedChain should be used and where it is still missing - in particular, frames represent a single branch of history while forkedchain manages multiple parallel forks - user-facing services such as the RPC should use the latter, ie until it has been finalized, a getBlock request should consider all forks and not just the blocks in the canonical head branch. Another advantage of this approach is that `AristoDbRef` conceptually becomes more simple - removing its tracking of the "current" transaction stack simplifies reasoning about what can go wrong since this state now has to be passed around in the form of `AristoTxRef` - as such, many of the tests and facilities in the code that were dealing with "stack inconsistency" are now structurally prevented from happening. The test suite will need significant refactoring after this change. Once this change has been merged, there are several follow-ups to do: * there's no mechanism for keeping frames up to date as they get committed or rolled back - TODO * naming is confused - many names for the same thing for legacy reason * forkedchain support is still missing in lots of code * clean up redundant logic based on previous designs - in particular the debug and introspection code no longer makes sense * the way change sets are stored will probably need revisiting - because it's a stack of changes where each frame must be interrogated to find an on-disk value, with a base distance of 128 we'll at minimum have to perform 128 frame lookups for *every* database interaction - regardless, the "dag-like" nature will stay * dispose and commit are poorly defined and perhaps redundant - in theory, one could simply let the GC collect abandoned frames etc, though it's likely an explicit mechanism will remain useful, so they stay for now More about the changes: * `AristoDbRef` gains a `txRef` field (todo: rename) that "more or less" corresponds to the old `balancer` field * `AristoDbRef.stack` is gone - instead, there's a chain of `AristoTxRef` objects that hold their respective "layer" which has the actual changes * No more reasoning about "top" and "stack" - instead, each `AristoTxRef` can be a "head" that "more or less" corresponds to the old single-history `top` notion and its stack * `level` still represents "distance to base" - it's computed from the parent chain instead of being stored * one has to be careful not to use frames where forkedchain was intended - layers are only for a single branch of history! * fix layer vtop after rollback * engine fix * Fix test_txpool * Fix test_rpc * Fix copyright year * fix simulator * Fix copyright year * Fix copyright year * Fix tracer * Fix infinite recursion bug * Remove aristo and kvt empty files * Fic copyright year * Fix fc chain_kvt * ForkedChain refactoring * Fix merge master conflict * Fix copyright year * Reparent txFrame * Fix test * Fix txFrame reparent again * Cleanup and fix test * UpdateBase bugfix and fix test * Fixe newPayload bug discovered by hive * Fix engine api fcu * Clean up call template, chain_kvt, andn txguid * Fix copyright year * work around base block loading issue * Add test * Fix updateHead bug * Fix updateBase bug * Change func commitBase to proc commitBase * Touch up and fix debug mode crash --------- Co-authored-by: jangko <jangko128@gmail.com>
2025-02-06 08:04:50 +01:00
txFrame.persistFixtureBlock()
env.txHash = tx1.rlpHash
env.blockHash = blk.header.blockHash
createRpcSigsFromNim(RpcClient):
proc web3_clientVersion(): string
proc web3_sha3(data: seq[byte]): Hash32
proc net_version(): string
proc net_listening(): bool
proc net_peerCount(): Quantity
proc rpcMain*() =
suite "Remote Procedure Calls":
var env = setupEnv()
env.generateBlock()
let
client = env.client
node = env.node
com = env.com
test "web3_clientVersion":
let res = await client.web3_clientVersion()
2024-11-01 15:29:38 +01:00
check res == ClientId
test "web3_sha3":
let data = @(NimbusName.toOpenArrayByte(0, NimbusName.len-1))
let res = await client.web3_sha3(data)
let hash = keccak256(data)
check hash == res
test "net_version":
let res = await client.net_version()
check res == $env.conf.networkId
test "net_listening":
let res = await client.net_listening()
let listening = node.peerPool.connectedNodes.len < env.conf.maxPeers
check res == listening
test "net_peerCount":
let res = await client.net_peerCount()
let peerCount = node.peerPool.connectedNodes.len
check res == w3Qty(peerCount)
test "eth_chainId":
let res = await client.eth_chainId()
check res == w3Qty(distinctBase(com.chainId))
test "eth_syncing":
let res = await client.eth_syncing()
if res.syncing == false:
let syncing = node.peerPool.connectedNodes.len > 0
check syncing == false
else:
check com.syncStart == res.syncObject.startingBlock.uint64
check com.syncCurrent == res.syncObject.currentBlock.uint64
check com.syncHighest == res.syncObject.highestBlock.uint64
test "eth_gasPrice":
let res = await client.eth_gasPrice()
check res == w3Qty(30_000_000_050) # Avg of `unsignedTx1` / `unsignedTx2`
test "eth_accounts":
let res = await client.eth_accounts()
check signer in res
check contractAddress in res
check extraAddress in res
test "eth_blockNumber":
let res = await client.eth_blockNumber()
check res == w3Qty(0x1'u64)
test "eth_getBalance":
let a = await client.eth_getBalance(signer, blockId(1'u64))
check a == 998739999997899997'u256
let b = await client.eth_getBalance(regularAcc, blockId(1'u64))
check b == 2_000_000_000.u256
let c = await client.eth_getBalance(contractAccWithStorage, blockId(1'u64))
check c == 1_000_000_000.u256
test "eth_getStorageAt":
let res = await client.eth_getStorageAt(contractAccWithStorage, 1.u256, blockId(1'u64))
check FixedBytes[32](2345.u256.toBytesBE) == res
test "eth_getTransactionCount":
let res = await client.eth_getTransactionCount(signer, blockId(1'u64))
check res == w3Qty(2'u64)
test "eth_getBlockTransactionCountByHash":
let res = await client.eth_getBlockTransactionCountByHash(env.blockHash)
check res == w3Qty(2'u64)
test "eth_getBlockTransactionCountByNumber":
let res = await client.eth_getBlockTransactionCountByNumber(blockId(1'u64))
check res == w3Qty(2'u64)
test "eth_getUncleCountByBlockHash":
let res = await client.eth_getUncleCountByBlockHash(env.blockHash)
check res == w3Qty(0'u64)
test "eth_getUncleCountByBlockNumber":
let res = await client.eth_getUncleCountByBlockNumber(blockId(0'u64))
check res == w3Qty(0'u64)
test "eth_getCode":
let res = await client.eth_getCode(contractAddress, blockId(1'u64))
check res.len == contractCode.len
test "eth_sign":
let msg = "hello world"
let msgBytes = @(msg.toOpenArrayByte(0, msg.len-1))
expect JsonRpcError:
discard await client.eth_sign(contractAddress, msgBytes)
let res = await client.eth_sign(signer, msgBytes)
let sig = Signature.fromRaw(res).tryGet()
# now let us try to verify signature
let msgData = "\x19Ethereum Signed Message:\n" & $msg.len & msg
let msgDataBytes = @(msgData.toOpenArrayByte(0, msgData.len-1))
let msgHash = await client.web3_sha3(msgDataBytes)
let pubkey = recover(sig, SkMessage(msgHash.data)).tryGet()
let recoveredAddr = pubkey.toCanonicalAddress()
check recoveredAddr == signer # verified
test "eth_signTransaction, eth_sendTransaction":
let unsignedTx = TransactionArgs(
`from`: Opt.some(signer),
to: Opt.some(contractAddress),
gas: Opt.some(w3Qty(100000'u)),
gasPrice: Opt.none(Quantity),
value: Opt.some(100.u256),
nonce: Opt.some(2.Quantity)
)
let signedTxBytes = await client.eth_signTransaction(unsignedTx)
let signedTx = rlp.decode(signedTxBytes, Transaction)
check signer == signedTx.recoverSender().expect("valid signature") # verified
let txHash = await client.eth_sendTransaction(unsignedTx)
const expHash = hash32"0x929d48788096f26cfff70296b16c9974e6b1bf693c0121742e8527bb92b6d074"
check txHash == expHash
test "eth_sendRawTransaction":
let unsignedTx = TransactionArgs(
`from`: Opt.some(signer),
to: Opt.some(contractAddress),
gas: Opt.some(w3Qty(100001'u)),
gasPrice: Opt.none(Quantity),
value: Opt.some(100.u256),
nonce: Opt.some(3.Quantity)
)
let signedTxBytes = await client.eth_signTransaction(unsignedTx)
let signedTx = rlp.decode(signedTxBytes, Transaction)
check signer == signedTx.recoverSender().expect("valid signature") # verified
let txHash = await client.eth_sendRawTransaction(signedTxBytes)
const expHash = hash32"0xeea79669dd904921d203fb720c7228f5c7854e5a768248f494f36fa68c83c191"
check txHash == expHash
test "eth_call":
let ec = TransactionArgs(
`from`: Opt.some(signer),
to: Opt.some(contractAddress),
gas: Opt.some(w3Qty(100000'u)),
gasPrice: Opt.none(Quantity),
value: Opt.some(100.u256)
)
let res = await client.eth_call(ec, "latest")
check res == hexToSeqByte("deadbeef")
test "eth_estimateGas":
let ec = TransactionArgs(
`from`: Opt.some(signer),
to: Opt.some(extraAddress),
gas: Opt.some(w3Qty(42000'u)),
gasPrice: Opt.some(w3Qty(100'u)),
value: Opt.some(100.u256)
)
let res = await client.eth_estimateGas(ec)
check res == w3Qty(21000'u64)
test "eth_getBlockByHash":
let res = await client.eth_getBlockByHash(env.blockHash, true)
check res.isNil.not
check res.hash == env.blockHash
let res2 = await client.eth_getBlockByHash(env.txHash, true)
check res2.isNil
test "eth_getBlockByNumber":
let res = await client.eth_getBlockByNumber("latest", true)
check res.isNil.not
check res.hash == env.blockHash
let res2 = await client.eth_getBlockByNumber($1, true)
check res2.isNil
test "eth_getTransactionByHash":
let res = await client.eth_getTransactionByHash(env.txHash)
check res.isNil.not
check res.blockNumber.get() == w3Qty(1'u64)
let res2 = await client.eth_getTransactionByHash(env.blockHash)
check res2.isNil
test "eth_getTransactionByBlockHashAndIndex":
let res = await client.eth_getTransactionByBlockHashAndIndex(env.blockHash, w3Qty(0'u64))
check res.isNil.not
check res.blockNumber.get() == w3Qty(1'u64)
let res2 = await client.eth_getTransactionByBlockHashAndIndex(env.blockHash, w3Qty(3'u64))
check res2.isNil
let res3 = await client.eth_getTransactionByBlockHashAndIndex(env.txHash, w3Qty(3'u64))
check res3.isNil
test "eth_getTransactionByBlockNumberAndIndex":
let res = await client.eth_getTransactionByBlockNumberAndIndex("latest", w3Qty(1'u64))
check res.isNil.not
check res.blockNumber.get() == w3Qty(1'u64)
let res2 = await client.eth_getTransactionByBlockNumberAndIndex("latest", w3Qty(3'u64))
check res2.isNil
test "eth_getBlockReceipts":
let recs = await client.eth_getBlockReceipts(blockId(1'u64))
check recs.isSome
if recs.isSome:
let receipts = recs.get
check receipts.len == 2
check receipts[0].transactionIndex == 0.Quantity
check receipts[1].transactionIndex == 1.Quantity
test "eth_getTransactionReceipt":
let res = await client.eth_getTransactionReceipt(env.txHash)
check res.isNil.not
check res.blockNumber == w3Qty(1'u64)
let res2 = await client.eth_getTransactionReceipt(env.blockHash)
check res2.isNil
test "eth_getUncleByBlockHashAndIndex":
let res = await client.eth_getUncleByBlockHashAndIndex(env.blockHash, w3Qty(0'u64))
check res.isNil
let res2 = await client.eth_getUncleByBlockHashAndIndex(env.blockHash, w3Qty(1'u64))
check res2.isNil
let res3 = await client.eth_getUncleByBlockHashAndIndex(env.txHash, w3Qty(0'u64))
check res3.isNil
test "eth_getUncleByBlockNumberAndIndex":
let res = await client.eth_getUncleByBlockNumberAndIndex("latest", w3Qty(0'u64))
check res.isNil
let res2 = await client.eth_getUncleByBlockNumberAndIndex("latest", w3Qty(1'u64))
check res2.isNil
test "eth_getLogs by blockhash, no filters":
let testHeader = getBlockHeader4514995()
let testHash = testHeader.blockHash
let filterOptions = FilterOptions(
blockHash: Opt.some(testHash),
topics: @[]
)
let logs = await client.eth_getLogs(filterOptions)
check:
len(logs) == 54
var i = 0
for l in logs:
check:
l.blockHash.isSome()
l.blockHash.get() == testHash
l.logIndex.get() == w3Qty(i.uint64)
inc i
test "eth_getLogs by blockhash, filter logs at specific positions":
let testHeader = getBlockHeader4514995()
let testHash = testHeader.blockHash
let topic = bytes32"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"
let topic1 = bytes32"0x000000000000000000000000fdc183d01a793613736cd40a5a578f49add1772b"
let filterOptions = FilterOptions(
blockHash: Opt.some(testHash),
topics: @[
TopicOrList(kind: slkList, list: @[topic]),
TopicOrList(kind: slkNull),
TopicOrList(kind: slkList, list: @[topic1])
]
)
let logs = await client.eth_getLogs(filterOptions)
check:
len(logs) == 1
test "eth_getLogs by blockhash, filter logs at specific postions with or options":
let testHeader = getBlockHeader4514995()
let testHash = testHeader.blockHash
let topic = bytes32"0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef"
let topic1 = bytes32"0xa64da754fccf55aa65a1f0128a648633fade3884b236e879ee9f64c78df5d5d7"
let topic2 = bytes32"0x000000000000000000000000e16c02eac87920033ac72fc55ee1df3151c75786"
let topic3 = bytes32"0x000000000000000000000000b626a5facc4de1c813f5293ec3be31979f1d1c78"
let filterOptions = FilterOptions(
blockHash: Opt.some(testHash),
topics: @[
TopicOrList(kind: slkList, list: @[topic, topic1]),
TopicOrList(kind: slkList, list: @[topic2, topic3])
]
)
let logs = await client.eth_getLogs(filterOptions)
check:
len(logs) == 2
test "eth_getProof - Non existent account and storage slots":
let blockData = await client.eth_getBlockByNumber("latest", true)
block:
# account doesn't exist
let
address = address"0x0000000000000000000000000000000000000004"
proofResponse = await client.eth_getProof(address, @[], blockId(1'u64))
storageProof = proofResponse.storageProof
check:
proofResponse.address == address
verifyAccountProof(blockData.stateRoot, proofResponse).isMissing()
proofResponse.balance == 0.u256
proofResponse.codeHash == zeroHash
proofResponse.nonce == w3Qty(0.uint64)
proofResponse.storageHash == zeroHash
storageProof.len() == 0
block:
# account exists but requested slots don't exist
let
address = regularAcc
slot1Key = 0.u256
slot2Key = 1.u256
proofResponse = await client.eth_getProof(address, @[slot1Key, slot2Key], blockId(1'u64))
storageProof = proofResponse.storageProof
check:
proofResponse.address == address
verifyAccountProof(blockData.stateRoot, proofResponse).isValid()
proofResponse.balance == 2_000_000_000.u256
proofResponse.codeHash == emptyCodeHash
proofResponse.nonce == w3Qty(1.uint64)
proofResponse.storageHash == emptyStorageHash
storageProof.len() == 2
storageProof[0].key == slot1Key
storageProof[0].proof.len() == 0
storageProof[0].value == 0.u256
storageProof[1].key == slot2Key
storageProof[1].proof.len() == 0
storageProof[1].value == 0.u256
block:
# contract account with no storage slots
let
address = contractAccNoStorage
slot1Key = 0.u256 # Doesn't exist
proofResponse = await client.eth_getProof(address, @[slot1Key], blockId(1'u64))
storageProof = proofResponse.storageProof
check:
proofResponse.address == address
verifyAccountProof(blockData.stateRoot, proofResponse).isValid()
proofResponse.balance == 0.u256
proofResponse.codeHash == hash32"0x09044b55d7aba83cb8ac3d2c9c8d8bcadbfc33f06f1be65e8cc1e4ddab5f3074"
proofResponse.nonce == w3Qty(0.uint64)
proofResponse.storageHash == emptyStorageHash
storageProof.len() == 1
storageProof[0].key == slot1Key
storageProof[0].proof.len() == 0
storageProof[0].value == 0.u256
test "eth_getProof - Existing accounts and storage slots":
let blockData = await client.eth_getBlockByNumber("latest", true)
block:
# contract account with storage slots
let
address = contractAccWithStorage
slot1Key = 0.u256
slot2Key = 1.u256
slot3Key = 2.u256 # Doesn't exist
proofResponse = await client.eth_getProof(address, @[slot1Key, slot2Key, slot3Key], blockId(1'u64))
storageProof = proofResponse.storageProof
check:
proofResponse.address == address
verifyAccountProof(blockData.stateRoot, proofResponse).isValid()
proofResponse.balance == 1_000_000_000.u256
proofResponse.codeHash == hash32"0x09044b55d7aba83cb8ac3d2c9c8d8bcadbfc33f06f1be65e8cc1e4ddab5f3074"
proofResponse.nonce == w3Qty(2.uint64)
proofResponse.storageHash == hash32"0x2ed06ec37dad4cd8c8fc1a1172d633a8973987fa6995b14a7c0a50c0e8d1a9c3"
storageProof.len() == 3
storageProof[0].key == slot1Key
storageProof[0].proof.len() > 0
storageProof[0].value == 1234.u256
storageProof[1].key == slot2Key
storageProof[1].proof.len() > 0
storageProof[1].value == 2345.u256
storageProof[2].key == slot3Key
storageProof[2].proof.len() > 0
storageProof[2].value == 0.u256
verifySlotProof(proofResponse.storageHash, storageProof[0]).isValid()
verifySlotProof(proofResponse.storageHash, storageProof[1]).isValid()
verifySlotProof(proofResponse.storageHash, storageProof[2]).isMissing()
block:
# externally owned account
let
address = regularAcc
proofResponse = await client.eth_getProof(address, @[], blockId(1'u64))
storageProof = proofResponse.storageProof
check:
proofResponse.address == address
verifyAccountProof(blockData.stateRoot, proofResponse).isValid()
proofResponse.balance == 2_000_000_000.u256
proofResponse.codeHash == emptyCodeHash
proofResponse.nonce == w3Qty(1.uint64)
proofResponse.storageHash == emptyStorageHash
storageProof.len() == 0
test "eth_getProof - Multiple blocks":
let blockData = await client.eth_getBlockByNumber("latest", true)
block:
# block 1 - account has balance, code and storage
let
address = contractAccWithStorage
slot2Key = 1.u256
proofResponse = await client.eth_getProof(address, @[slot2Key], blockId(1'u64))
storageProof = proofResponse.storageProof
check:
proofResponse.address == address
verifyAccountProof(blockData.stateRoot, proofResponse).isValid()
proofResponse.balance == 1_000_000_000.u256
proofResponse.codeHash == hash32"0x09044b55d7aba83cb8ac3d2c9c8d8bcadbfc33f06f1be65e8cc1e4ddab5f3074"
proofResponse.nonce == w3Qty(2.uint64)
proofResponse.storageHash == hash32"0x2ed06ec37dad4cd8c8fc1a1172d633a8973987fa6995b14a7c0a50c0e8d1a9c3"
storageProof.len() == 1
verifySlotProof(proofResponse.storageHash, storageProof[0]).isValid()
env.close()
when isMainModule:
rpcMain()