Add some basic rocksdb options to command line (#2286)

These options are there mainly to drive experiments, and are therefore
hidden.

One thing that this PR brings in is an initial set of caches and buffers for rocksdb - the set that I've been using during various performance tests to get to a viable baseline performance level.
This commit is contained in:
Jacek Sieka 2024-06-05 17:08:29 +02:00 committed by GitHub
parent 95a4adc1e8
commit c876729c4d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 148 additions and 33 deletions

View File

@ -26,7 +26,8 @@ import
],
eth/[common, net/utils, net/nat, p2p/bootnodes, p2p/enode, p2p/discoveryv5/enr],
"."/[constants, vm_compile_info, version],
common/chain_config
common/chain_config,
db/opts
export net
@ -380,6 +381,30 @@ type
defaultValueDesc: $ProtocolFlag.Eth
name: "protocols" .}: seq[string]
rocksdbMaxOpenFiles {.
hidden
defaultValue: defaultMaxOpenFiles
defaultValueDesc: $defaultMaxOpenFiles
name: "debug-rocksdb-max-open-files".}: int
rocksdbWriteBufferSize {.
hidden
defaultValue: defaultWriteBufferSize
defaultValueDesc: $defaultWriteBufferSize
name: "debug-rocksdb-write-buffer-size".}: int
rocksdbRowCacheSize {.
hidden
defaultValue: defaultRowCacheSize
defaultValueDesc: $defaultRowCacheSize
name: "debug-rocksdb-row-cache-size".}: int
rocksdbBlockCacheSize {.
hidden
defaultValue: defaultBlockCacheSize
defaultValueDesc: $defaultBlockCacheSize
name: "debug-rocksdb-block-cache-size".}: int
case cmd* {.
command
defaultValue: NimbusCmd.noCommand }: NimbusCmd
@ -751,6 +776,14 @@ func httpServerEnabled*(conf: NimbusConf): bool =
func era1Dir*(conf: NimbusConf): OutDir =
conf.era1DirOpt.get(OutDir(conf.dataDir.string & "/era1"))
func dbOptions*(conf: NimbusConf): DbOptions =
DbOptions.init(
maxOpenFiles = conf.rocksdbMaxOpenFiles,
writeBufferSize = conf.rocksdbWriteBufferSize,
rowCacheSize = conf.rocksdbRowCacheSize,
blockCacheSize = conf.rocksdbBlockCacheSize,
)
# KLUDGE: The `load()` template does currently not work within any exception
# annotated environment.
{.pop.}

View File

@ -23,7 +23,8 @@ import
rocksdb,
../aristo_desc,
./rocks_db/rdb_desc,
"."/[rocks_db, memory_only]
"."/[rocks_db, memory_only],
../../opts
export
RdbBackendRef,
@ -35,9 +36,10 @@ export
proc newAristoRdbDbRef(
basePath: string;
opts: DbOptions;
): Result[AristoDbRef, AristoError]=
let
be = ? rocksDbAristoBackend(basePath)
be = ? rocksDbBackend(basePath, opts)
vTop = block:
let rc = be.getTuvFn()
if rc.isErr:
@ -58,12 +60,13 @@ proc init*[W: RdbBackendRef](
T: type AristoDbRef;
B: type W;
basePath: string;
opts: DbOptions
): Result[T, AristoError] =
## Generic constructor, `basePath` argument is ignored for memory backend
## databases (which also unconditionally succeed initialising.)
##
when B is RdbBackendRef:
basePath.newAristoRdbDbRef()
basePath.newAristoRdbDbRef opts
proc getRocksDbFamily*(
gdb: GuestDbRef;

View File

@ -35,11 +35,10 @@ import
../aristo_desc/desc_backend,
../aristo_blobify,
./init_common,
./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk]
./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk],
../../opts
const
maxOpenFiles = 512 ## Rocks DB setup, open files limit
extraTraceMessages = false
## Enabled additional logging noise
@ -265,13 +264,16 @@ proc closeFn(db: RdbBackendRef): CloseFn =
# Public functions
# ------------------------------------------------------------------------------
proc rocksDbAristoBackend*(path: string): Result[BackendRef,AristoError] =
proc rocksDbBackend*(
path: string;
opts: DbOptions
): Result[BackendRef,AristoError] =
let db = RdbBackendRef(
beKind: BackendRocksDB)
# Initialise RocksDB
block:
let rc = db.rdb.init(path, maxOpenFiles)
let rc = db.rdb.init(path, opts)
if rc.isErr:
when extraTraceMessages:
trace logTxt "constructor failed",

View File

@ -18,7 +18,8 @@ import
rocksdb,
results,
../../aristo_desc,
./rdb_desc
./rdb_desc,
../../../opts
const
extraTraceMessages = false
@ -38,7 +39,7 @@ when extraTraceMessages:
proc init*(
rdb: var RdbInst;
basePath: string;
openMax: int;
opts: DbOptions;
): Result[void,(AristoError,string)] =
## Constructor c ode inspired by `RocksStoreRef.init()` from
## kvstore_rocksdb.nim
@ -53,13 +54,29 @@ proc init*(
return err((RdbBeCantCreateDataDir, ""))
let
cfs = @[initColFamilyDescriptor AristoFamily] &
RdbGuest.mapIt(initColFamilyDescriptor $it)
opts = defaultDbOptions()
opts.setMaxOpenFiles(openMax)
cfOpts = defaultColFamilyOptions()
if opts.writeBufferSize > 0:
cfOpts.setWriteBufferSize(opts.writeBufferSize)
let
cfs = @[initColFamilyDescriptor(AristoFamily, cfOpts)] &
RdbGuest.mapIt(initColFamilyDescriptor($it, cfOpts))
dbOpts = defaultDbOptions()
dbOpts.setMaxOpenFiles(opts.maxOpenFiles)
dbOpts.setMaxBytesForLevelBase(opts.writeBufferSize)
if opts.rowCacheSize > 0:
dbOpts.setRowCache(cacheCreateLRU(opts.rowCacheSize))
if opts.blockCacheSize > 0:
let tableOpts = defaultTableOptions()
tableOpts.setBlockCache(cacheCreateLRU(opts.rowCacheSize))
dbOpts.setBlockBasedTableFactory(tableOpts)
# Reserve a family corner for `Aristo` on the database
let baseDb = openRocksDb(dataDir, opts, columnFamilies=cfs).valueOr:
let baseDb = openRocksDb(dataDir, dbOpts, columnFamilies=cfs).valueOr:
let errSym = RdbBeDriverInitError
when extraTraceMessages:
trace logTxt "init failed", dataDir, openMax, error=errSym, info=error

View File

@ -20,7 +20,8 @@ import
../../kvt/kvt_persistent as use_kvt,
../base,
./aristo_db,
./aristo_db/[common_desc, handlers_aristo]
./aristo_db/[common_desc, handlers_aristo],
../../opts
include
./aristo_db/aristo_replicate
@ -37,9 +38,9 @@ const
# Public constructor
# ------------------------------------------------------------------------------
proc newAristoRocksDbCoreDbRef*(path: string): CoreDbRef =
proc newAristoRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef =
let
adb = AristoDbRef.init(use_ari.RdbBackendRef, path).expect aristoFail
adb = AristoDbRef.init(use_ari.RdbBackendRef, path, opts).expect aristoFail
gdb = adb.guestDb().valueOr: GuestDbRef(nil)
kdb = KvtDbRef.init(use_kvt.RdbBackendRef, path, gdb).expect kvtFail
AristoDbRocks.create(kdb, adb)

View File

@ -25,7 +25,8 @@ import
../aristo,
./memory_only,
base_iterators_persistent,
./backend/aristo_rocksdb
./backend/aristo_rocksdb,
../opts
export
memory_only,
@ -34,13 +35,14 @@ export
proc newCoreDbRef*(
dbType: static[CoreDbType]; # Database type symbol
path: string; # Storage path for database
opts: DbOptions;
): CoreDbRef =
## Constructor for persistent type DB
##
## Note: Using legacy notation `newCoreDbRef()` rather than
## `CoreDbRef.init()` because of compiler coughing.
when dbType == AristoDbRocks:
newAristoRocksDbCoreDbRef path
newAristoRocksDbCoreDbRef path, opts
else:
{.error: "Unsupported dbType for persistent newCoreDbRef()".}

42
nimbus/db/opts.nim Normal file
View File

@ -0,0 +1,42 @@
# Nimbus
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import results
export results
const
# https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
defaultMaxOpenFiles* = 512
defaultWriteBufferSize* = 64 * 1024 * 1024
defaultRowCacheSize* = 512 * 1024 * 1024
defaultBlockCacheSize* = 256 * 1024 * 1024
type DbOptions* = object # Options that are transported to the database layer
maxOpenFiles*: int
writeBufferSize*: int
rowCacheSize*: int
blockCacheSize*: int
func init*(
T: type DbOptions,
maxOpenFiles = defaultMaxOpenFiles,
writeBufferSize = defaultWriteBufferSize,
rowCacheSize = defaultRowCacheSize,
blockCacheSize = defaultBlockCacheSize,
): T =
T(
maxOpenFiles: maxOpenFiles,
writeBufferSize: writeBufferSize,
rowCacheSize: rowCacheSize,
blockCacheSize: blockCacheSize,
)

View File

@ -239,7 +239,8 @@ proc run(nimbus: NimbusNode, conf: NimbusConf) =
# Resolve statically for database type
case conf.chainDbMode:
of Aristo,AriPrune:
AristoDbRocks.newCoreDbRef(string conf.dataDir)
AristoDbRocks.newCoreDbRef(string conf.dataDir, conf.dbOptions())
let com = CommonRef.new(
db = coreDB,
pruneHistory = (conf.chainDbMode == AriPrune),

View File

@ -16,6 +16,7 @@
import
stint,
../nimbus/common/common,
../nimbus/db/opts,
../nimbus/db/core_db/persistent,
../nimbus/core/executor,
../nimbus/[vm_state, vm_types],
@ -47,7 +48,8 @@ proc dumpDebug(com: CommonRef, blockNumber: UInt256) =
proc main() {.used.} =
let conf = getConfiguration()
let com = CommonRef.new(newCoreDbRef(DefaultDbPersistent, conf.dataDir))
let com = CommonRef.new(
newCoreDbRef(DefaultDbPersistent, conf.dataDir, DbOptions.init()))
if conf.head != 0.u256:
dumpDebug(com, conf.head)

View File

@ -16,6 +16,7 @@ import
../nimbus/errors,
../nimbus/core/chain,
../nimbus/common,
../nimbus/db/opts,
../nimbus/db/[core_db/persistent, storage_types],
configuration # must be late (compilation annoyance)
@ -54,7 +55,7 @@ proc main() {.used.} =
let conf = configuration.getConfiguration()
let com = CommonRef.new(
newCoreDbRef(DefaultDbPersistent, conf.dataDir),
newCoreDbRef(DefaultDbPersistent, conf.dataDir, DbOptions.init()),
conf.netId, networkParams(conf.netId))
# move head to block number ...

View File

@ -13,6 +13,7 @@ import
../nimbus/[vm_state, vm_types],
../nimbus/core/executor,
../nimbus/common/common,
../nimbus/db/opts,
../nimbus/db/core_db/persistent,
configuration # must be late (compilation annoyance)
@ -52,7 +53,8 @@ proc validateBlock(com: CommonRef, blockNumber: BlockNumber): BlockNumber =
proc main() {.used.} =
let
conf = getConfiguration()
com = CommonRef.new(newCoreDbRef(DefaultDbPersistent, conf.dataDir))
com = CommonRef.new(newCoreDbRef(
DefaultDbPersistent, conf.dataDir, DbOptions.init()))
# move head to block number ...
if conf.head == 0.u256:

View File

@ -13,6 +13,7 @@ import
../nimbus/[tracer, config],
../nimbus/core/chain,
../nimbus/common/common,
../nimbus/db/opts,
../nimbus/db/core_db/persistent
proc dumpTest(com: CommonRef, blockNumber: int) =
@ -58,7 +59,8 @@ proc main() {.used.} =
# nimbus --rpcapi: eth, debug --prune: archive
var conf = makeConfig()
let db = newCoreDbRef(DefaultDbPersistent, string conf.dataDir)
let db = newCoreDbRef(
DefaultDbPersistent, string conf.dataDir, DbOptions.init())
let com = CommonRef.new(db)
com.dumpTest(97)

View File

@ -16,6 +16,7 @@ import
eth/common,
results,
unittest2,
../../nimbus/db/opts,
../../nimbus/db/aristo/[
aristo_check,
aristo_debug,
@ -100,7 +101,7 @@ iterator quadripartite(td: openArray[ProofTrieData]): LeafQuartet =
proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
let db = block:
if 0 < rdbPath.len:
let rc = AristoDbRef.init(RdbBackendRef, rdbPath)
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
xCheckRc rc.error == 0
rc.value
else:

View File

@ -16,6 +16,7 @@ import
results,
unittest2,
stew/endians2,
../../nimbus/db/opts,
../../nimbus/db/aristo/[
aristo_check,
aristo_debug,
@ -345,7 +346,7 @@ proc testTxMergeAndDeleteOneByOne*(
# Start with brand new persistent database.
db = block:
if 0 < rdbPath.len:
let rc = AristoDbRef.init(RdbBackendRef, rdbPath)
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
xCheckRc rc.error == 0
rc.value
else:
@ -453,7 +454,7 @@ proc testTxMergeAndDeleteSubTree*(
# Start with brand new persistent database.
db = block:
if 0 < rdbPath.len:
let rc = AristoDbRef.init(RdbBackendRef, rdbPath)
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
xCheckRc rc.error == 0
rc.value
else:
@ -555,7 +556,7 @@ proc testTxMergeProofAndKvpList*(
db = block:
# New DB with disabled filter slots management
if 0 < rdbPath.len:
let rc = AristoDbRef.init(RdbBackendRef, rdbPath)
let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
xCheckRc rc.error == 0
rc.value
else:

View File

@ -16,6 +16,7 @@ import
eth/common,
results,
unittest2,
../nimbus/db/opts,
../nimbus/db/core_db/persistent,
../nimbus/core/chain,
./replay/pp,
@ -157,7 +158,7 @@ proc initRunnerDB(
# Resolve for static `dbType`
case dbType:
of AristoDbMemory: AristoDbMemory.newCoreDbRef()
of AristoDbRocks: AristoDbRocks.newCoreDbRef path
of AristoDbRocks: AristoDbRocks.newCoreDbRef(path, DbOptions.init())
of AristoDbVoid: AristoDbVoid.newCoreDbRef()
else: raiseAssert "Oops"

View File

@ -31,6 +31,7 @@ import
../nimbus/core/chain,
../nimbus/common/common,
../nimbus/rpc,
../nimbus/db/opts,
../nimbus/db/core_db,
../nimbus/db/core_db/persistent,
../nimbus/db/state_db/base,
@ -145,7 +146,8 @@ proc rpcGetProofsTrackStateChangesMain*() =
test "Test tracking the changes introduced in every block":
let com = CommonRef.new(newCoreDbRef(DefaultDbPersistent, DATABASE_PATH))
let com = CommonRef.new(newCoreDbRef(
DefaultDbPersistent, DATABASE_PATH, DbOptions.init()))
com.initializeEmptyDb()
let

View File

@ -11,6 +11,7 @@
import
json,
../nimbus/common/common, # must be early (compilation annoyance)
../nimbus/db/opts,
../nimbus/db/core_db/persistent,
../nimbus/[config, tracer, vm_types]
@ -57,7 +58,8 @@ proc main() {.used.} =
# nimbus --rpc-api: eth, debug --prune: archive
var conf = makeConfig()
let db = newCoreDbRef(DefaultDbPersistent, string conf.dataDir)
let db = newCoreDbRef(
DefaultDbPersistent, string conf.dataDir, DbOptions.init())
let com = CommonRef.new(db)
com.dumpTest(97)