mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-14 14:24:32 +00:00
01ca415721
Currently, computed hash keys are stored in a separate column family with respect to the MPT data they're generated from - this has several disadvantages: * A lot of space is wasted because the lookup key (`RootedVertexID`) is repeated in both tables - this is 30% of the `AriKey` content! * rocksdb must maintain in-memory bloom filters and LRU caches for said keys, doubling its "minimal efficient cache size" * An extra disk traversal must be made to check for existence of cached hash key * Doubles the amount of files on disk due to each column family being its own set of files Here, the two CFs are joined such that both key and data is stored in `AriVtx`. This means: * we save ~30% disk space on repeated lookup keys * we save ~2gb of memory overhead that can be used to cache data instead of indices * we can skip storing hash keys for MPT leaf nodes - these are trivial to compute and waste a lot of space - previously they had to present in the `AriKey` CF to avoid having to look in two tables on the happy path. * There is a small increase in write amplification because when a hash value is updated for a branch node, we must write both key and branch data - previously we would write only the key * There's a small shift in CPU usage - instead of performing lookups in the database, hashes for leaf nodes are (re)-computed on the fly * We can return to slightly smaller on-disk SST files since there's fewer of them, which should reduce disk traffic a bit Internally, there are also other advantages: * when clearing keys, we no longer have to store a zero hash in memory - instead, we deduce staleness of the cached key from the presence of an updated VertexRef - this saves ~1gb of mem overhead during import * hash key cache becomes dedicated to branch keys since leaf keys are no longer stored in memory, reducing churn * key computation is a lot faster thanks to the skipped second disk traversal - a key computation for mainnet can be completed in 11 hours instead of ~2 days (!) thanks to better cache usage and less read amplification - with additional improvements to the on-disk format, we can probably get rid of the initial full traversal method of seeding the key cache on first start after import All in all, this PR reduces the size of a mainnet database from 160gb to 110gb and the peak memory footprint during import by ~1-2gb.
178 lines
6.0 KiB
Nim
178 lines
6.0 KiB
Nim
# Nimbus
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
|
# Licensed under either of
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
# http://opensource.org/licenses/MIT)
|
|
# at your option. This file may not be copied, modified,
|
|
# or distributed except according to those terms.
|
|
|
|
import
|
|
std/[json, os, tables, strutils],
|
|
stew/byteutils,
|
|
chronicles,
|
|
unittest2,
|
|
results,
|
|
./test_helpers,
|
|
../nimbus/db/aristo,
|
|
../nimbus/db/aristo/[aristo_desc, aristo_layers, aristo_part],
|
|
../nimbus/db/aristo/aristo_part/part_debug,
|
|
../nimbus/db/kvt/kvt_utils,
|
|
../nimbus/[tracer, evm/types],
|
|
../nimbus/common/common
|
|
|
|
proc setErrorLevel {.used.} =
|
|
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
|
setLogLevel(LogLevel.ERROR)
|
|
|
|
proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
|
|
## Hack for `Aristo` pre-lading using the `snap` protocol proof-loader
|
|
const
|
|
info = "preLoadAristoDb"
|
|
var
|
|
proof: seq[seq[byte]] # for pre-loading MPT
|
|
predRoot: Hash32 # from predecessor header
|
|
txRoot: Hash32 # header with block number `num`
|
|
rcptRoot: Hash32 # ditto
|
|
let
|
|
adb = cdb.ctx.mpt # `Aristo` db
|
|
kdb = cdb.ctx.kvt # `Kvt` db
|
|
ps = PartStateRef.init adb # Partial DB descriptor
|
|
|
|
# Fill KVT and collect `proof` data
|
|
for (k,v) in jKvp.pairs:
|
|
let
|
|
key = hexToSeqByte(k)
|
|
val = hexToSeqByte(v.getStr())
|
|
if key.len == 32:
|
|
doAssert key == val.keccak256.data
|
|
if val != @[0x80u8]: # Exclude empty item
|
|
proof.add val
|
|
else:
|
|
if key[0] == 0:
|
|
try:
|
|
# Pull our particular header fields (if possible)
|
|
let header = rlp.decode(val, Header)
|
|
if header.number == num:
|
|
txRoot = header.txRoot
|
|
rcptRoot = header.receiptsRoot
|
|
elif header.number == num-1:
|
|
predRoot = header.stateRoot
|
|
except RlpError:
|
|
discard
|
|
check kdb.put(key, val).isOk
|
|
|
|
# Set up production MPT
|
|
ps.partPut(proof, AutomaticPayload).isOkOr:
|
|
raiseAssert info & ": partPut => " & $error
|
|
|
|
# TODO code needs updating after removal of generic payloads
|
|
# # Handle transaction sub-tree
|
|
# if txRoot.isValid:
|
|
# var txs: seq[Transaction]
|
|
# for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree txRoot):
|
|
# let
|
|
# inx = key.path.to(UInt256).truncate(uint)
|
|
# tx = rlp.decode(pyl.rawBlob, Transaction)
|
|
# #
|
|
# # FIXME: Is this might be a bug in the test data?
|
|
# #
|
|
# # The single item test key is always `128`. For non-single test
|
|
# # lists, the keys are `1`,`2`, ..,`N`, `128` (some single digit
|
|
# # number `N`.)
|
|
# #
|
|
# # Unless the `128` item value is put at the start of the argument
|
|
# # list `txs[]` for `persistTransactions()`, the `tracer` module
|
|
# # will throw an exception at
|
|
# # `doAssert(transactions.calcTxRoot == header.txRoot)` in the
|
|
# # function `traceTransactionImpl()`.
|
|
# #
|
|
# if (inx and 0x80) != 0:
|
|
# txs = @[tx] & txs
|
|
# else:
|
|
# txs.add tx
|
|
# cdb.persistTransactions(num, txRoot, txs)
|
|
|
|
# # Handle receipts sub-tree
|
|
# if rcptRoot.isValid:
|
|
# var rcpts: seq[Receipt]
|
|
# for (key,pyl) in adb.rightPairs LeafTie(root: ps.partGetSubTree rcptRoot):
|
|
# let
|
|
# inx = key.path.to(UInt256).truncate(uint)
|
|
# rcpt = rlp.decode(pyl.rawBlob, Receipt)
|
|
# # FIXME: See comment at `txRoot` section.
|
|
# if (inx and 0x80) != 0:
|
|
# rcpts = @[rcpt] & rcpts
|
|
# else:
|
|
# rcpts.add rcpt
|
|
# cdb.persistReceipts(rcptRoot, rcpts)
|
|
|
|
# # Save keys to database
|
|
# for (rvid,key) in ps.vkPairs:
|
|
# adb.layersPutKey(rvid, key)
|
|
|
|
ps.check().isOkOr:
|
|
raiseAssert info & ": check => " & $error
|
|
|
|
#echo ">>> preLoadAristoDb (9)",
|
|
# "\n ps\n ", ps.pp(byKeyOk=false,byVidOk=false),
|
|
# ""
|
|
# -----------
|
|
#if true: quit()
|
|
|
|
# use tracerTestGen.nim to generate additional test data
|
|
proc testFixtureImpl(node: JsonNode, testStatusIMPL: var TestStatus, memoryDB: CoreDbRef) {.deprecated: "needs fixing for non-generic payloads".} =
|
|
block:
|
|
return
|
|
setErrorLevel()
|
|
|
|
var
|
|
blockNumberHex = node["blockNumber"].getStr()
|
|
blockNumber = parseHexInt(blockNumberHex).uint64
|
|
com = CommonRef.new(memoryDB, chainConfigForNetwork(MainNet))
|
|
state = node["state"]
|
|
receipts = node["receipts"]
|
|
|
|
# disable POS/post Merge feature
|
|
com.setTTD Opt.none(DifficultyInt)
|
|
|
|
# Import raw data into database
|
|
# Some hack for `Aristo` using the `snap` protocol proof-loader
|
|
memoryDB.preLoadAristoDb(state, blockNumber)
|
|
|
|
var blk = com.db.getEthBlock(blockNumber).expect("eth block exists")
|
|
|
|
let txTraces = traceTransactions(com, blk.header, blk.transactions)
|
|
let stateDump = dumpBlockState(com, blk)
|
|
let blockTrace = traceBlock(com, blk, {DisableState})
|
|
|
|
# Fix hex representation
|
|
for inx in 0 ..< node["txTraces"].len:
|
|
for key in ["beforeRoot", "afterRoot"]:
|
|
# Here, `node["txTraces"]` stores a string while `txTraces` uses a
|
|
# `Hash32` which might expand to a didfferent upper/lower case.
|
|
var strHash = txTraces[inx]["stateDiff"][key].getStr.toUpperAscii
|
|
if strHash.len < 64:
|
|
strHash = '0'.repeat(64 - strHash.len) & strHash
|
|
txTraces[inx]["stateDiff"][key] = %(strHash)
|
|
|
|
check node["txTraces"] == txTraces
|
|
check node["stateDump"] == stateDump
|
|
check node["blockTrace"] == blockTrace
|
|
|
|
for i in 0 ..< receipts.len:
|
|
let receipt = receipts[i]
|
|
let stateDiff = txTraces[i]["stateDiff"]
|
|
check receipt["root"].getStr().toLowerAscii() == stateDiff["afterRoot"].getStr().toLowerAscii()
|
|
|
|
proc testFixtureAristo(node: JsonNode, testStatusIMPL: var TestStatus) =
|
|
node.testFixtureImpl(testStatusIMPL, newCoreDbRef AristoDbMemory)
|
|
|
|
proc tracerJsonMain*() =
|
|
suite "tracer json tests for Aristo DB":
|
|
jsonTest("TracerTests", testFixtureAristo)
|
|
|
|
when isMainModule:
|
|
tracerJsonMain()
|