Jacek Sieka 01ca415721
Store keys together with node data (#2849)
Currently, computed hash keys are stored in a separate column family
with respect to the MPT data they're generated from - this has several
disadvantages:

* A lot of space is wasted because the lookup key (`RootedVertexID`) is
repeated in both tables - this is 30% of the `AriKey` content!
* rocksdb must maintain in-memory bloom filters and LRU caches for said
keys, doubling its "minimal efficient cache size"
* An extra disk traversal must be made to check for existence of cached
hash key
* Doubles the amount of files on disk due to each column family being
its own set of files

Here, the two CFs are joined such that both key and data is stored in
`AriVtx`. This means:

* we save ~30% disk space on repeated lookup keys
* we save ~2gb of memory overhead that can be used to cache data instead
of indices
* we can skip storing hash keys for MPT leaf nodes - these are trivial
to compute and waste a lot of space - previously they had to present in
the `AriKey` CF to avoid having to look in two tables on the happy path.
* There is a small increase in write amplification because when a hash
value is updated for a branch node, we must write both key and branch
data - previously we would write only the key
* There's a small shift in CPU usage - instead of performing lookups in
the database, hashes for leaf nodes are (re)-computed on the fly
* We can return to slightly smaller on-disk SST files since there's
fewer of them, which should reduce disk traffic a bit

Internally, there are also other advantages:

* when clearing keys, we no longer have to store a zero hash in memory -
instead, we deduce staleness of the cached key from the presence of an
updated VertexRef - this saves ~1gb of mem overhead during import
* hash key cache becomes dedicated to branch keys since leaf keys are no
longer stored in memory, reducing churn
* key computation is a lot faster thanks to the skipped second disk
traversal - a key computation for mainnet can be completed in 11 hours
instead of ~2 days (!) thanks to better cache usage and less read
amplification - with additional improvements to the on-disk format, we
can probably get rid of the initial full traversal method of seeding the
key cache on first start after import

All in all, this PR reduces the size of a mainnet database from 160gb to
110gb and the peak memory footprint during import by ~1-2gb.
2024-11-20 09:56:27 +01:00

184 lines
6.1 KiB
Nim

# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Rocksdb constructor/destructor for Aristo DB
## ============================================
{.push raises: [].}
import
std/[exitprocs, sets, sequtils, strformat, os],
rocksdb,
results,
../../aristo_desc,
./rdb_desc,
../../../opts
# ------------------------------------------------------------------------------
# Private constructor
# ------------------------------------------------------------------------------
const
lruOverhead = 20
# Approximate LRU cache overhead per entry based on minilru sizes
proc dumpCacheStats(keySize, vtxSize: int) =
block vtx:
var misses, hits: uint64
echo "vtxLru(", vtxSize, ")"
echo " state vtype miss hit total hitrate"
for state in RdbStateType:
for vtype in VertexType:
let
(miss, hit) = (
rdbVtxLruStats[state][vtype].get(false),
rdbVtxLruStats[state][vtype].get(true),
)
hitRate = float64(hit * 100) / (float64(hit + miss))
misses += miss
hits += hit
echo &"{state:>8} {vtype:>8} {miss:>10} {hit:>10} {miss+hit:>10} {hitRate:>6.2f}%"
let hitRate = float64(hits * 100) / (float64(hits + misses))
echo &" all all {misses:>10} {hits:>10} {misses+hits:>10} {hitRate:>6.2f}%"
block key:
var misses, hits: uint64
echo "keyLru(", keySize, ") "
echo " state miss hit total hitrate"
for state in RdbStateType:
let
(miss, hit) =
(rdbKeyLruStats[state].get(false), rdbKeyLruStats[state].get(true))
hitRate = float64(hit * 100) / (float64(hit + miss))
misses += miss
hits += hit
echo &"{state:>8} {miss:>10} {hit:>10} {miss+hit:>10} {hitRate:>5.2f}%"
let hitRate = float64(hits * 100) / (float64(hits + misses))
echo &" all {misses:>10} {hits:>10} {misses+hits:>10} {hitRate:>5.2f}%"
proc initImpl(
rdb: var RdbInst;
basePath: string;
opts: DbOptions;
dbOpts: DbOptionsRef,
cfOpts: ColFamilyOptionsRef;
guestCFs: openArray[ColFamilyDescriptor] = [];
): Result[seq[ColFamilyReadWrite],(AristoError,string)] =
## Database backend constructor
const initFailed = "RocksDB/init() failed"
rdb.basePath = basePath
# bytes -> entries based on overhead estimates
rdb.rdKeySize =
opts.rdbKeyCacheSize div (sizeof(VertexID) + sizeof(HashKey) + lruOverhead)
rdb.rdVtxSize =
opts.rdbVtxCacheSize div (sizeof(VertexID) + sizeof(default(VertexRef)[]) + lruOverhead)
rdb.rdKeyLru = typeof(rdb.rdKeyLru).init(rdb.rdKeySize)
rdb.rdVtxLru = typeof(rdb.rdVtxLru).init(rdb.rdVtxSize)
if opts.rdbPrintStats:
let
ks = rdb.rdKeySize
vs = rdb.rdVtxSize
# TODO instead of dumping at exit, these stats could be logged or written
# to a file for better tracking over time - that said, this is mainly
# a debug utility at this point
addExitProc(proc() =
dumpCacheStats(ks, vs))
let
dataDir = rdb.dataDir
try:
dataDir.createDir
except OSError, IOError:
return err((RdbBeCantCreateDataDir, ""))
# Column familiy names to allocate when opening the database. This list
# might be extended below.
var useCFs = AristoCFs.mapIt($it).toHashSet
# The `guestCFs` list must not overwrite `AristoCFs` options
let guestCFs = guestCFs.filterIt(it.name notin useCFs)
# If the database exists already, check for missing column families and
# allocate them for opening. Otherwise rocksdb might reject the peristent
# database.
if (dataDir / "CURRENT").fileExists:
let hdCFs = dataDir.listColumnFamilies.valueOr:
raiseAssert initFailed & " cannot read existing CFs: " & error
# Update list of column families for opener.
useCFs = useCFs + hdCFs.toHashSet
# The `guestCFs` list might come with a different set of options. So it is
# temporarily removed from `useCFs` and will be re-added with appropriate
# options.
let guestCFq = @guestCFs
useCFs = useCFs - guestCFs.mapIt(it.name).toHashSet
# Finalise list of column families
let cfs = useCFs.toSeq.mapIt(it.initColFamilyDescriptor cfOpts) & guestCFq
# Open database for the extended family :)
let baseDb = openRocksDb(dataDir, dbOpts, columnFamilies=cfs).valueOr:
raiseAssert initFailed & " cannot create base descriptor: " & error
# Initialise column handlers (this stores implicitely `baseDb`)
rdb.admCol = baseDb.getColFamily($AdmCF).valueOr:
raiseAssert initFailed & " cannot initialise AdmCF descriptor: " & error
rdb.vtxCol = baseDb.getColFamily($VtxCF).valueOr:
raiseAssert initFailed & " cannot initialise VtxCF descriptor: " & error
ok(guestCFs.mapIt(baseDb.getColFamily(it.name).expect("loaded cf")))
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
proc init*(
rdb: var RdbInst;
basePath: string;
opts: DbOptions;
dbOpts: DbOptionsRef;
cfOpts: ColFamilyOptionsRef;
guestCFs: openArray[ColFamilyDescriptor];
): Result[seq[ColFamilyReadWrite],(AristoError,string)] =
## Temporarily define a guest CF list here.
rdb.initImpl(basePath, opts, dbOpts, cfOpts, guestCFs)
proc destroy*(rdb: var RdbInst; eradicate: bool) =
## Destructor
rdb.baseDb.close()
if eradicate:
try:
rdb.dataDir.removeDir
# Remove the base folder if it is empty
block done:
for w in rdb.baseDir.walkDirRec:
# Ignore backup files
if 0 < w.len and w[^1] != '~':
break done
rdb.baseDir.removeDir
except CatchableError:
discard
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------