Jacek Sieka d39c589ec3
lru cache updates (#2590)
* replace rocksdb row cache with larger rdb lru caches - these serve the
same purpose but are more efficient because they skips serialization,
locking and rocksdb layering
* don't append fresh items to cache - this has the effect of evicting
the existing items and replacing them with low-value entries that might
never be read - during write-heavy periods of processing, the
newly-added entries were evicted during the store loop
* allow tuning rdb lru size at runtime
* add (hidden) option to print lru stats at exit (replacing the
compile-time flag)

pre:
```
INF 2024-09-03 15:07:01.136+02:00 Imported blocks
blockNumber=20012001 blocks=12000 importedSlot=9216851 txs=1837042
mgas=181911.265 bps=11.675 tps=1870.397 mgps=176.819 avgBps=10.288
avgTps=1574.889 avgMGps=155.952 elapsed=19m26s458ms
```

post:
```
INF 2024-09-03 13:54:26.730+02:00 Imported blocks
blockNumber=20012001 blocks=12000 importedSlot=9216851 txs=1837042
mgas=181911.265 bps=11.637 tps=1864.384 mgps=176.250 avgBps=11.202
avgTps=1714.920 avgMGps=169.818 elapsed=17m51s211ms
```

9%:ish import perf improvement on similar mem usage :)
2024-09-05 11:18:32 +02:00

107 lines
3.5 KiB
Nim

# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Persistent constructor for Aristo DB
## ====================================
##
## This module automatically pulls in the persistent backend library at the
## linking stage (e.g. `rocksdb`) which can be avoided for pure memory DB
## applications by importing `./aristo_init/memory_only` (rather than
## `./aristo_init/persistent`.)
##
{.push raises: [].}
import
results,
rocksdb,
../../opts,
../aristo_desc,
./rocks_db/rdb_desc,
"."/[rocks_db, memory_only]
export
AristoDbRef,
RdbBackendRef,
RdbWriteEventCb,
memory_only
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc newAristoRdbDbRef(
basePath: string;
opts: DbOptions;
dbOpts: DbOptionsRef;
cfOpts: ColFamilyOptionsRef;
guestCFs: openArray[ColFamilyDescriptor];
): Result[(AristoDbRef, seq[ColFamilyReadWrite]), AristoError]=
let
(be, oCfs) = ? rocksDbBackend(basePath, opts, dbOpts, cfOpts, guestCFs)
vTop = block:
let rc = be.getTuvFn()
if rc.isErr:
be.closeFn(eradicate = false)
return err(rc.error)
rc.value
ok((AristoDbRef(
top: LayerRef(vTop: vTop),
backend: be), oCfs))
# ------------------------------------------------------------------------------
# Public database constuctors, destructor
# ------------------------------------------------------------------------------
proc init*(
T: type AristoDbRef;
B: type RdbBackendRef;
basePath: string;
opts: DbOptions;
dbOpts: DbOptionsRef;
cfOpts: ColFamilyOptionsRef;
guestCFs: openArray[ColFamilyDescriptor];
): Result[(T, seq[ColFamilyReadWrite]), AristoError] =
## Generic constructor, `basePath` argument is ignored for memory backend
## databases (which also unconditionally succeed initialising.)
##
basePath.newAristoRdbDbRef opts, dbOpts, cfOpts, guestCFs
proc activateWrTrigger*(
db: AristoDbRef;
hdl: RdbWriteEventCb;
): Result[void,AristoError] =
## This function allows to link an application to the `Aristo` storage event
## for the `RocksDb` backend via call back argument function `hdl`.
##
## The argument handler `hdl` of type
## ::
## proc(session: WriteBatchRef): bool
##
## will be invoked when a write batch for the `Aristo` database is opened in
## order to save current changes to the backend. The `session` argument passed
## to the handler in conjunction with a list of `ColFamilyReadWrite` items
## (as returned from `reinit()`) might be used to store additional items
## to the database with the same write batch.
##
## If the handler returns `true` upon return from running, the write batch
## will proceed saving. Otherwise it is aborted and no data are saved at all.
##
case db.backend.kind:
of BackendRocksDB:
db.backend.rocksDbSetEventTrigger hdl
of BackendRdbHosting:
err(RdbBeWrTriggerActiveAlready)
else:
err(RdbBeTypeUnsupported)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------