nimbus-eth1/tests/test_tracer_json.nim

120 lines
3.8 KiB
Nim
Raw Normal View History

2018-12-12 15:18:46 +00:00
# Nimbus
# Copyright (c) 2018-2024 Status Research & Development GmbH
2018-12-12 15:18:46 +00:00
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified,
# or distributed except according to those terms.
2018-12-12 15:18:46 +00:00
import
std/[json, os, sets, tables, strutils],
stew/byteutils,
chronicles,
2022-12-02 04:39:12 +00:00
unittest2,
results,
2022-12-02 04:39:12 +00:00
./test_helpers,
../nimbus/sync/protocol/snap/snap_types,
../nimbus/db/aristo/aristo_merge,
../nimbus/db/kvt/kvt_utils,
../nimbus/db/aristo,
../nimbus/[tracer, evm/types],
2022-12-02 04:39:12 +00:00
../nimbus/common/common
2018-12-12 15:18:46 +00:00
proc setErrorLevel {.used.} =
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.ERROR)
proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
## Hack for `Aristo` pre-lading using the `snap` protocol proof-loader
var
proof: seq[SnapProof] # for pre-loading MPT
predRoot: Hash256 # from predecessor header
txRoot: Hash256 # header with block number `num`
rcptRoot: Hash256 # ditto
let
Update storage tree admin (#2419) * Tighten `CoreDb` API for accounts why: Apart from cruft, the way to fetch the accounts state root via a `CoreDbColRef` record was unnecessarily complicated. * Extend `CoreDb` API for accounts to cover storage tries why: In future, this will make the notion of column objects obsolete. Storage trees will then be indexed by the account address rather than the vertex ID equivalent like a `CoreDbColRef`. * Apply new/extended accounts API to ledger and tests details: This makes the `distinct_ledger` module obsolete * Remove column object constructors why: They were needed as an abstraction of MPT sub-trees including storage trees. Now, storage trees are handled by the account (e.g. via address) they belong to and all other trees can be identified by a constant well known vertex ID. So there is no need for column objects anymore. Still there are some left-over column object methods wnich will be removed next. * Remove `serialise()` and `PayloadRef` from default Aristo API why: Not needed. `PayloadRef` was used for unstructured/unknown payload formats (account or blob) and `serialise()` was used for decodng `PayloadRef`. Now it is known in advance what the payload looks like. * Added query function `hasStorageData()` whether a storage area exists why: Useful for supporting `slotStateEmpty()` of the `CoreDb` API * In the `Ledger` replace `storage.stateEmpty()` by `slotStateEmpty()` * On Aristo, hide the storage root/vertex ID in the `PayloadRef` why: The storage vertex ID is fully controlled by Aristo while the `AristoAccount` object is controlled by the application. With the storage root part of the `AristoAccount` object, there was a useless administrative burden to keep that storage root field up to date. * Remove cruft, update comments etc. * Update changed MPT access paradigms why: Fixes verified proxy tests * Fluffy cosmetics
2024-06-27 09:01:26 +00:00
adb = cdb.ctx.getColumn(CtGeneric).backend.toAristo
kdb = cdb.newKvt.backend.toAristo
# Fill KVT and collect `proof` data
for (k,v) in jKvp.pairs:
let
key = hexToSeqByte(k)
val = hexToSeqByte(v.getStr())
if key.len == 32:
doAssert key == val.keccakHash.data
if val != @[0x80u8]: # Exclude empty item
proof.add SnapProof(val)
else:
if key[0] == 0:
try:
# Pull our particular header fields (if possible)
let header = rlp.decode(val, BlockHeader)
if header.number == num:
txRoot = header.txRoot
rcptRoot = header.receiptsRoot
elif header.number == num-1:
predRoot = header.stateRoot
except RlpError:
discard
check kdb.put(key, val).isOk
# Install sub-trie roots onto production db
if txRoot.isValid:
doAssert adb.mergeProof(txRoot, VertexID(CtTxs)).isOk
if rcptRoot.isValid:
doAssert adb.mergeProof(rcptRoot, VertexID(CtReceipts)).isOk
doAssert adb.mergeProof(predRoot, VertexID(CtAccounts)).isOk
# Set up production MPT
doAssert adb.mergeProof(proof).isOk
# Remove locks so that hashify can re-assign changed nodes
adb.top.final.pPrf.clear
adb.top.final.fRpp.clear
2018-12-12 15:18:46 +00:00
# use tracerTestGen.nim to generate additional test data
proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) =
setErrorLevel()
2018-12-12 15:18:46 +00:00
var
blockNumberHex = node["blockNumber"].getStr()
blockNumber = parseHexInt(blockNumberHex).uint64
2022-12-02 04:39:12 +00:00
com = CommonRef.new(memoryDB, chainConfigForNetwork(MainNet))
2018-12-12 15:18:46 +00:00
state = node["state"]
2018-12-25 10:31:51 +00:00
receipts = node["receipts"]
2018-12-12 15:18:46 +00:00
# disable POS/post Merge feature
com.setTTD Opt.none(DifficultyInt)
# Import raw data into database
# Some hack for `Aristo` using the `snap` protocol proof-loader
memoryDB.preLoadAristoDb(state, blockNumber)
2018-12-12 15:18:46 +00:00
Consolidate block type for block processing (#2325) This PR consolidates the split header-body sequences into a single EthBlock sequence and cleans up the fallout from that which significantly reduces block processing overhead during import thanks to less garbage collection and fewer copies of things all around. Notably, since the number of headers must always match the number of bodies, we also get rid of a pointless degree of freedom that in the future could introduce unnecessary bugs. * only read header and body from era file * avoid several unnecessary copies along the block processing way * simplify signatures, cleaning up unused arguemnts and returns * use `stew/assign2` in a few strategic places where the generated nim assignent is slow and add a few `move` to work around poor analysis in nim 1.6 (will need to be revisited for 2.0) ``` stats-20240607_2223-a814aa0b.csv vs stats-20240608_0714-21c1d0a9.csv bps_x bps_y tps_x tps_y bpsd tpsd timed block_number (498305, 713245] 1,540.52 1,809.73 2,361.58 2775.340189 17.63% 17.63% -14.92% (713245, 928185] 730.36 865.26 1,715.90 2028.973852 18.01% 18.01% -15.21% (928185, 1143126] 663.03 789.10 2,529.26 3032.490771 19.79% 19.79% -16.28% (1143126, 1358066] 393.46 508.05 2,152.50 2777.578119 29.13% 29.13% -22.50% (1358066, 1573007] 370.88 440.72 2,351.31 2791.896052 18.81% 18.81% -15.80% (1573007, 1787947] 283.65 335.11 2,068.93 2441.373402 17.60% 17.60% -14.91% (1787947, 2002888] 287.29 342.11 2,078.39 2474.179448 18.99% 18.99% -15.91% (2002888, 2217828] 293.38 343.16 2,208.83 2584.77457 17.16% 17.16% -14.61% (2217828, 2432769] 140.09 167.86 1,081.87 1296.336926 18.82% 18.82% -15.80% blocks: 1934464, baseline: 3h13m1s, contender: 2h43m47s bpsd (mean): 19.55% tpsd (mean): 19.55% Time (total): -29m13s, -15.14% ```
2024-06-09 14:32:20 +00:00
var blk = com.db.getEthBlock(blockNumber)
2018-12-12 15:18:46 +00:00
Consolidate block type for block processing (#2325) This PR consolidates the split header-body sequences into a single EthBlock sequence and cleans up the fallout from that which significantly reduces block processing overhead during import thanks to less garbage collection and fewer copies of things all around. Notably, since the number of headers must always match the number of bodies, we also get rid of a pointless degree of freedom that in the future could introduce unnecessary bugs. * only read header and body from era file * avoid several unnecessary copies along the block processing way * simplify signatures, cleaning up unused arguemnts and returns * use `stew/assign2` in a few strategic places where the generated nim assignent is slow and add a few `move` to work around poor analysis in nim 1.6 (will need to be revisited for 2.0) ``` stats-20240607_2223-a814aa0b.csv vs stats-20240608_0714-21c1d0a9.csv bps_x bps_y tps_x tps_y bpsd tpsd timed block_number (498305, 713245] 1,540.52 1,809.73 2,361.58 2775.340189 17.63% 17.63% -14.92% (713245, 928185] 730.36 865.26 1,715.90 2028.973852 18.01% 18.01% -15.21% (928185, 1143126] 663.03 789.10 2,529.26 3032.490771 19.79% 19.79% -16.28% (1143126, 1358066] 393.46 508.05 2,152.50 2777.578119 29.13% 29.13% -22.50% (1358066, 1573007] 370.88 440.72 2,351.31 2791.896052 18.81% 18.81% -15.80% (1573007, 1787947] 283.65 335.11 2,068.93 2441.373402 17.60% 17.60% -14.91% (1787947, 2002888] 287.29 342.11 2,078.39 2474.179448 18.99% 18.99% -15.91% (2002888, 2217828] 293.38 343.16 2,208.83 2584.77457 17.16% 17.16% -14.61% (2217828, 2432769] 140.09 167.86 1,081.87 1296.336926 18.82% 18.82% -15.80% blocks: 1934464, baseline: 3h13m1s, contender: 2h43m47s bpsd (mean): 19.55% tpsd (mean): 19.55% Time (total): -29m13s, -15.14% ```
2024-06-09 14:32:20 +00:00
let txTraces = traceTransactions(com, blk.header, blk.transactions)
let stateDump = dumpBlockState(com, blk)
let blockTrace = traceBlock(com, blk, {DisableState})
2018-12-12 15:18:46 +00:00
2018-12-25 10:31:51 +00:00
check node["txTraces"] == txTraces
2018-12-12 15:18:46 +00:00
check node["stateDump"] == stateDump
check node["blockTrace"] == blockTrace
2018-12-25 10:31:51 +00:00
for i in 0 ..< receipts.len:
let receipt = receipts[i]
let stateDiff = txTraces[i]["stateDiff"]
check receipt["root"].getStr().toLowerAscii() == stateDiff["afterRoot"].getStr().toLowerAscii()
proc testFixtureAristo(node: JsonNode, testStatusIMPL: var TestStatus) =
node.testFixtureImpl(testStatusIMPL, newCoreDbRef AristoDbMemory)
proc tracerJsonMain*() =
suite "tracer json tests for Aristo DB":
jsonTest("TracerTests", testFixtureAristo)
when isMainModule:
tracerJsonMain()