180 lines
5.4 KiB
Nim
Raw Normal View History

# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Rocks DB fetch data record
## ==========================
{.push raises: [].}
import
eth/common,
rocksdb,
results,
../../[aristo_blobify, aristo_desc],
../init_common,
./rdb_desc,
metrics,
std/concurrency/atomics
const
extraTraceMessages = false
## Enable additional logging noise
when extraTraceMessages:
import
chronicles
logScope:
topics = "aristo-rocksdb"
type
RdbVtxLruCounter = ref object of Counter
RdbKeyLruCounter = ref object of Counter
var
rdbVtxLruStatsMetric {.used.} = RdbVtxLruCounter.newCollector(
"aristo_rdb_vtx_lru_total",
"Vertex LRU lookup (hit/miss, world/account, branch/leaf)",
labels = ["state", "vtype", "hit"],
)
rdbKeyLruStatsMetric {.used.} = RdbKeyLruCounter.newCollector(
"aristo_rdb_key_lru_total", "HashKey LRU lookup", labels = ["state", "hit"]
)
method collect*(collector: RdbVtxLruCounter, output: MetricHandler) =
let timestamp = collector.now()
# We don't care about synchronization between each type of metric or between
# the metrics thread and others since small differences like this don't matter
for state in RdbStateType:
for vtype in VertexType:
for hit in [false, true]:
output(
name = "aristo_rdb_vtx_lru_total",
value = float64(rdbVtxLruStats[state][vtype].get(hit)),
labels = ["state", "vtype", "hit"],
labelValues = [$state, $vtype, $ord(hit)],
timestamp = timestamp,
)
method collect*(collector: RdbKeyLruCounter, output: MetricHandler) =
let timestamp = collector.now()
for state in RdbStateType:
for hit in [false, true]:
output(
name = "aristo_rdb_key_lru_total",
value = float64(rdbKeyLruStats[state].get(hit)),
labels = ["state", "hit"],
labelValues = [$state, $ord(hit)],
timestamp = timestamp,
)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc getAdm*(rdb: RdbInst; xid: AdminTabID): Result[seq[byte],(AristoError,string)] =
var res: seq[byte]
let onData = proc(data: openArray[byte]) =
res = @data
let gotData = rdb.admCol.get(xid.toOpenArray, onData).valueOr:
const errSym = RdbBeDriverGetAdmError
when extraTraceMessages:
trace logTxt "getAdm", xid, error=errSym, info=error
return err((errSym,error))
# Correct result if needed
if not gotData:
res = EmptyBlob
2024-05-24 11:27:17 +02:00
ok move(res)
proc getKey*(
rdb: var RdbInst;
rvid: RootedVertexID;
): Result[HashKey,(AristoError,string)] =
# Try LRU cache first
var rc = rdb.rdKeyLru.get(rvid.vid)
if rc.isOk:
rdbKeyLruStats[rvid.to(RdbStateType)].inc(true)
2024-05-24 11:27:17 +02:00
return ok(move(rc.value))
rdbKeyLruStats[rvid.to(RdbStateType)].inc(false)
# Otherwise fetch from backend database
# A threadvar is used to avoid allocating an environment for onData
var res{.threadvar.}: Opt[HashKey]
let onData = proc(data: openArray[byte]) =
Store keys together with node data (#2849) Currently, computed hash keys are stored in a separate column family with respect to the MPT data they're generated from - this has several disadvantages: * A lot of space is wasted because the lookup key (`RootedVertexID`) is repeated in both tables - this is 30% of the `AriKey` content! * rocksdb must maintain in-memory bloom filters and LRU caches for said keys, doubling its "minimal efficient cache size" * An extra disk traversal must be made to check for existence of cached hash key * Doubles the amount of files on disk due to each column family being its own set of files Here, the two CFs are joined such that both key and data is stored in `AriVtx`. This means: * we save ~30% disk space on repeated lookup keys * we save ~2gb of memory overhead that can be used to cache data instead of indices * we can skip storing hash keys for MPT leaf nodes - these are trivial to compute and waste a lot of space - previously they had to present in the `AriKey` CF to avoid having to look in two tables on the happy path. * There is a small increase in write amplification because when a hash value is updated for a branch node, we must write both key and branch data - previously we would write only the key * There's a small shift in CPU usage - instead of performing lookups in the database, hashes for leaf nodes are (re)-computed on the fly * We can return to slightly smaller on-disk SST files since there's fewer of them, which should reduce disk traffic a bit Internally, there are also other advantages: * when clearing keys, we no longer have to store a zero hash in memory - instead, we deduce staleness of the cached key from the presence of an updated VertexRef - this saves ~1gb of mem overhead during import * hash key cache becomes dedicated to branch keys since leaf keys are no longer stored in memory, reducing churn * key computation is a lot faster thanks to the skipped second disk traversal - a key computation for mainnet can be completed in 11 hours instead of ~2 days (!) thanks to better cache usage and less read amplification - with additional improvements to the on-disk format, we can probably get rid of the initial full traversal method of seeding the key cache on first start after import All in all, this PR reduces the size of a mainnet database from 160gb to 110gb and the peak memory footprint during import by ~1-2gb.
2024-11-20 09:56:27 +01:00
res = data.deblobify(HashKey)
Store keys together with node data (#2849) Currently, computed hash keys are stored in a separate column family with respect to the MPT data they're generated from - this has several disadvantages: * A lot of space is wasted because the lookup key (`RootedVertexID`) is repeated in both tables - this is 30% of the `AriKey` content! * rocksdb must maintain in-memory bloom filters and LRU caches for said keys, doubling its "minimal efficient cache size" * An extra disk traversal must be made to check for existence of cached hash key * Doubles the amount of files on disk due to each column family being its own set of files Here, the two CFs are joined such that both key and data is stored in `AriVtx`. This means: * we save ~30% disk space on repeated lookup keys * we save ~2gb of memory overhead that can be used to cache data instead of indices * we can skip storing hash keys for MPT leaf nodes - these are trivial to compute and waste a lot of space - previously they had to present in the `AriKey` CF to avoid having to look in two tables on the happy path. * There is a small increase in write amplification because when a hash value is updated for a branch node, we must write both key and branch data - previously we would write only the key * There's a small shift in CPU usage - instead of performing lookups in the database, hashes for leaf nodes are (re)-computed on the fly * We can return to slightly smaller on-disk SST files since there's fewer of them, which should reduce disk traffic a bit Internally, there are also other advantages: * when clearing keys, we no longer have to store a zero hash in memory - instead, we deduce staleness of the cached key from the presence of an updated VertexRef - this saves ~1gb of mem overhead during import * hash key cache becomes dedicated to branch keys since leaf keys are no longer stored in memory, reducing churn * key computation is a lot faster thanks to the skipped second disk traversal - a key computation for mainnet can be completed in 11 hours instead of ~2 days (!) thanks to better cache usage and less read amplification - with additional improvements to the on-disk format, we can probably get rid of the initial full traversal method of seeding the key cache on first start after import All in all, this PR reduces the size of a mainnet database from 160gb to 110gb and the peak memory footprint during import by ~1-2gb.
2024-11-20 09:56:27 +01:00
let gotData = rdb.vtxCol.get(rvid.blobify().data(), onData).valueOr:
const errSym = RdbBeDriverGetKeyError
when extraTraceMessages:
trace logTxt "getKey", rvid, error=errSym, info=error
return err((errSym,error))
# Correct result if needed
Store keys together with node data (#2849) Currently, computed hash keys are stored in a separate column family with respect to the MPT data they're generated from - this has several disadvantages: * A lot of space is wasted because the lookup key (`RootedVertexID`) is repeated in both tables - this is 30% of the `AriKey` content! * rocksdb must maintain in-memory bloom filters and LRU caches for said keys, doubling its "minimal efficient cache size" * An extra disk traversal must be made to check for existence of cached hash key * Doubles the amount of files on disk due to each column family being its own set of files Here, the two CFs are joined such that both key and data is stored in `AriVtx`. This means: * we save ~30% disk space on repeated lookup keys * we save ~2gb of memory overhead that can be used to cache data instead of indices * we can skip storing hash keys for MPT leaf nodes - these are trivial to compute and waste a lot of space - previously they had to present in the `AriKey` CF to avoid having to look in two tables on the happy path. * There is a small increase in write amplification because when a hash value is updated for a branch node, we must write both key and branch data - previously we would write only the key * There's a small shift in CPU usage - instead of performing lookups in the database, hashes for leaf nodes are (re)-computed on the fly * We can return to slightly smaller on-disk SST files since there's fewer of them, which should reduce disk traffic a bit Internally, there are also other advantages: * when clearing keys, we no longer have to store a zero hash in memory - instead, we deduce staleness of the cached key from the presence of an updated VertexRef - this saves ~1gb of mem overhead during import * hash key cache becomes dedicated to branch keys since leaf keys are no longer stored in memory, reducing churn * key computation is a lot faster thanks to the skipped second disk traversal - a key computation for mainnet can be completed in 11 hours instead of ~2 days (!) thanks to better cache usage and less read amplification - with additional improvements to the on-disk format, we can probably get rid of the initial full traversal method of seeding the key cache on first start after import All in all, this PR reduces the size of a mainnet database from 160gb to 110gb and the peak memory footprint during import by ~1-2gb.
2024-11-20 09:56:27 +01:00
if not gotData or res.isNone():
res.ok(VOID_HASH_KEY)
# Update cache and return
rdb.rdKeyLru.put(rvid.vid, res.value())
ok res.value()
proc getVtx*(
rdb: var RdbInst;
rvid: RootedVertexID;
flags: set[GetVtxFlag];
): Result[VertexRef,(AristoError,string)] =
# Try LRU cache first
var rc =
if GetVtxFlag.PeekCache in flags:
rdb.rdVtxLru.peek(rvid.vid)
else:
rdb.rdVtxLru.get(rvid.vid)
if rc.isOk:
rdbVtxLruStats[rvid.to(RdbStateType)][rc.value().vType].inc(true)
return ok(move(rc.value))
# Otherwise fetch from backend database
# A threadvar is used to avoid allocating an environment for onData
var res {.threadvar.}: Result[VertexRef,AristoError]
let onData = proc(data: openArray[byte]) =
res = data.deblobify(VertexRef)
let gotData = rdb.vtxCol.get(rvid.blobify().data(), onData).valueOr:
const errSym = RdbBeDriverGetVtxError
when extraTraceMessages:
trace logTxt "getVtx", vid, error=errSym, info=error
return err((errSym,error))
if not gotData:
# As a hack, we count missing data as leaf nodes
rdbVtxLruStats[rvid.to(RdbStateType)][VertexType.Leaf].inc(false)
return ok(VertexRef(nil))
if res.isErr():
return err((res.error(), "Parsing failed")) # Parsing failed
rdbVtxLruStats[rvid.to(RdbStateType)][res.value().vType].inc(false)
# Update cache and return - in peek mode, avoid evicting cache items
if GetVtxFlag.PeekCache notin flags or rdb.rdVtxLru.len < rdb.rdVtxLru.capacity:
rdb.rdVtxLru.put(rvid.vid, res.value())
ok res.value()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------