Add some basic rocksdb options to command line (#2286)

These options are there mainly to drive experiments, and are therefore
hidden.

One thing that this PR brings in is an initial set of caches and buffers for rocksdb - the set that I've been using during various performance tests to get to a viable baseline performance level.
This commit is contained in:
Jacek Sieka 2024-06-05 17:08:29 +02:00 committed by GitHub
parent 95a4adc1e8
commit c876729c4d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 148 additions and 33 deletions

View File

@ -26,7 +26,8 @@ import
], ],
eth/[common, net/utils, net/nat, p2p/bootnodes, p2p/enode, p2p/discoveryv5/enr], eth/[common, net/utils, net/nat, p2p/bootnodes, p2p/enode, p2p/discoveryv5/enr],
"."/[constants, vm_compile_info, version], "."/[constants, vm_compile_info, version],
common/chain_config common/chain_config,
db/opts
export net export net
@ -380,6 +381,30 @@ type
defaultValueDesc: $ProtocolFlag.Eth defaultValueDesc: $ProtocolFlag.Eth
name: "protocols" .}: seq[string] name: "protocols" .}: seq[string]
rocksdbMaxOpenFiles {.
hidden
defaultValue: defaultMaxOpenFiles
defaultValueDesc: $defaultMaxOpenFiles
name: "debug-rocksdb-max-open-files".}: int
rocksdbWriteBufferSize {.
hidden
defaultValue: defaultWriteBufferSize
defaultValueDesc: $defaultWriteBufferSize
name: "debug-rocksdb-write-buffer-size".}: int
rocksdbRowCacheSize {.
hidden
defaultValue: defaultRowCacheSize
defaultValueDesc: $defaultRowCacheSize
name: "debug-rocksdb-row-cache-size".}: int
rocksdbBlockCacheSize {.
hidden
defaultValue: defaultBlockCacheSize
defaultValueDesc: $defaultBlockCacheSize
name: "debug-rocksdb-block-cache-size".}: int
case cmd* {. case cmd* {.
command command
defaultValue: NimbusCmd.noCommand }: NimbusCmd defaultValue: NimbusCmd.noCommand }: NimbusCmd
@ -751,6 +776,14 @@ func httpServerEnabled*(conf: NimbusConf): bool =
func era1Dir*(conf: NimbusConf): OutDir = func era1Dir*(conf: NimbusConf): OutDir =
conf.era1DirOpt.get(OutDir(conf.dataDir.string & "/era1")) conf.era1DirOpt.get(OutDir(conf.dataDir.string & "/era1"))
func dbOptions*(conf: NimbusConf): DbOptions =
DbOptions.init(
maxOpenFiles = conf.rocksdbMaxOpenFiles,
writeBufferSize = conf.rocksdbWriteBufferSize,
rowCacheSize = conf.rocksdbRowCacheSize,
blockCacheSize = conf.rocksdbBlockCacheSize,
)
# KLUDGE: The `load()` template does currently not work within any exception # KLUDGE: The `load()` template does currently not work within any exception
# annotated environment. # annotated environment.
{.pop.} {.pop.}

View File

@ -23,7 +23,8 @@ import
rocksdb, rocksdb,
../aristo_desc, ../aristo_desc,
./rocks_db/rdb_desc, ./rocks_db/rdb_desc,
"."/[rocks_db, memory_only] "."/[rocks_db, memory_only],
../../opts
export export
RdbBackendRef, RdbBackendRef,
@ -35,9 +36,10 @@ export
proc newAristoRdbDbRef( proc newAristoRdbDbRef(
basePath: string; basePath: string;
opts: DbOptions;
): Result[AristoDbRef, AristoError]= ): Result[AristoDbRef, AristoError]=
let let
be = ? rocksDbAristoBackend(basePath) be = ? rocksDbBackend(basePath, opts)
vTop = block: vTop = block:
let rc = be.getTuvFn() let rc = be.getTuvFn()
if rc.isErr: if rc.isErr:
@ -58,12 +60,13 @@ proc init*[W: RdbBackendRef](
T: type AristoDbRef; T: type AristoDbRef;
B: type W; B: type W;
basePath: string; basePath: string;
opts: DbOptions
): Result[T, AristoError] = ): Result[T, AristoError] =
## Generic constructor, `basePath` argument is ignored for memory backend ## Generic constructor, `basePath` argument is ignored for memory backend
## databases (which also unconditionally succeed initialising.) ## databases (which also unconditionally succeed initialising.)
## ##
when B is RdbBackendRef: when B is RdbBackendRef:
basePath.newAristoRdbDbRef() basePath.newAristoRdbDbRef opts
proc getRocksDbFamily*( proc getRocksDbFamily*(
gdb: GuestDbRef; gdb: GuestDbRef;

View File

@ -35,11 +35,10 @@ import
../aristo_desc/desc_backend, ../aristo_desc/desc_backend,
../aristo_blobify, ../aristo_blobify,
./init_common, ./init_common,
./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk] ./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk],
../../opts
const const
maxOpenFiles = 512 ## Rocks DB setup, open files limit
extraTraceMessages = false extraTraceMessages = false
## Enabled additional logging noise ## Enabled additional logging noise
@ -265,13 +264,16 @@ proc closeFn(db: RdbBackendRef): CloseFn =
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc rocksDbAristoBackend*(path: string): Result[BackendRef,AristoError] = proc rocksDbBackend*(
path: string;
opts: DbOptions
): Result[BackendRef,AristoError] =
let db = RdbBackendRef( let db = RdbBackendRef(
beKind: BackendRocksDB) beKind: BackendRocksDB)
# Initialise RocksDB # Initialise RocksDB
block: block:
let rc = db.rdb.init(path, maxOpenFiles) let rc = db.rdb.init(path, opts)
if rc.isErr: if rc.isErr:
when extraTraceMessages: when extraTraceMessages:
trace logTxt "constructor failed", trace logTxt "constructor failed",

View File

@ -18,7 +18,8 @@ import
rocksdb, rocksdb,
results, results,
../../aristo_desc, ../../aristo_desc,
./rdb_desc ./rdb_desc,
../../../opts
const const
extraTraceMessages = false extraTraceMessages = false
@ -38,7 +39,7 @@ when extraTraceMessages:
proc init*( proc init*(
rdb: var RdbInst; rdb: var RdbInst;
basePath: string; basePath: string;
openMax: int; opts: DbOptions;
): Result[void,(AristoError,string)] = ): Result[void,(AristoError,string)] =
## Constructor c ode inspired by `RocksStoreRef.init()` from ## Constructor c ode inspired by `RocksStoreRef.init()` from
## kvstore_rocksdb.nim ## kvstore_rocksdb.nim
@ -53,13 +54,29 @@ proc init*(
return err((RdbBeCantCreateDataDir, "")) return err((RdbBeCantCreateDataDir, ""))
let let
cfs = @[initColFamilyDescriptor AristoFamily] & cfOpts = defaultColFamilyOptions()
RdbGuest.mapIt(initColFamilyDescriptor $it)
opts = defaultDbOptions() if opts.writeBufferSize > 0:
opts.setMaxOpenFiles(openMax) cfOpts.setWriteBufferSize(opts.writeBufferSize)
let
cfs = @[initColFamilyDescriptor(AristoFamily, cfOpts)] &
RdbGuest.mapIt(initColFamilyDescriptor($it, cfOpts))
dbOpts = defaultDbOptions()
dbOpts.setMaxOpenFiles(opts.maxOpenFiles)
dbOpts.setMaxBytesForLevelBase(opts.writeBufferSize)
if opts.rowCacheSize > 0:
dbOpts.setRowCache(cacheCreateLRU(opts.rowCacheSize))
if opts.blockCacheSize > 0:
let tableOpts = defaultTableOptions()
tableOpts.setBlockCache(cacheCreateLRU(opts.rowCacheSize))
dbOpts.setBlockBasedTableFactory(tableOpts)
# Reserve a family corner for `Aristo` on the database # Reserve a family corner for `Aristo` on the database
let baseDb = openRocksDb(dataDir, opts, columnFamilies=cfs).valueOr: let baseDb = openRocksDb(dataDir, dbOpts, columnFamilies=cfs).valueOr:
let errSym = RdbBeDriverInitError let errSym = RdbBeDriverInitError
when extraTraceMessages: when extraTraceMessages:
trace logTxt "init failed", dataDir, openMax, error=errSym, info=error trace logTxt "init failed", dataDir, openMax, error=errSym, info=error

View File

@ -20,7 +20,8 @@ import
../../kvt/kvt_persistent as use_kvt, ../../kvt/kvt_persistent as use_kvt,
../base, ../base,
./aristo_db, ./aristo_db,
./aristo_db/[common_desc, handlers_aristo] ./aristo_db/[common_desc, handlers_aristo],
../../opts
include include
./aristo_db/aristo_replicate ./aristo_db/aristo_replicate
@ -37,9 +38,9 @@ const
# Public constructor # Public constructor
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc newAristoRocksDbCoreDbRef*(path: string): CoreDbRef = proc newAristoRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef =
let let
adb = AristoDbRef.init(use_ari.RdbBackendRef, path).expect aristoFail adb = AristoDbRef.init(use_ari.RdbBackendRef, path, opts).expect aristoFail
gdb = adb.guestDb().valueOr: GuestDbRef(nil) gdb = adb.guestDb().valueOr: GuestDbRef(nil)
kdb = KvtDbRef.init(use_kvt.RdbBackendRef, path, gdb).expect kvtFail kdb = KvtDbRef.init(use_kvt.RdbBackendRef, path, gdb).expect kvtFail
AristoDbRocks.create(kdb, adb) AristoDbRocks.create(kdb, adb)

View File

@ -25,7 +25,8 @@ import
../aristo, ../aristo,
./memory_only, ./memory_only,
base_iterators_persistent, base_iterators_persistent,
./backend/aristo_rocksdb ./backend/aristo_rocksdb,
../opts
export export
memory_only, memory_only,
@ -34,13 +35,14 @@ export
proc newCoreDbRef*( proc newCoreDbRef*(
dbType: static[CoreDbType]; # Database type symbol dbType: static[CoreDbType]; # Database type symbol
path: string; # Storage path for database path: string; # Storage path for database
opts: DbOptions;
): CoreDbRef = ): CoreDbRef =
## Constructor for persistent type DB ## Constructor for persistent type DB
## ##
## Note: Using legacy notation `newCoreDbRef()` rather than ## Note: Using legacy notation `newCoreDbRef()` rather than
## `CoreDbRef.init()` because of compiler coughing. ## `CoreDbRef.init()` because of compiler coughing.
when dbType == AristoDbRocks: when dbType == AristoDbRocks:
newAristoRocksDbCoreDbRef path newAristoRocksDbCoreDbRef path, opts
else: else:
{.error: "Unsupported dbType for persistent newCoreDbRef()".} {.error: "Unsupported dbType for persistent newCoreDbRef()".}

42
nimbus/db/opts.nim Normal file
View File

@ -0,0 +1,42 @@
# Nimbus
# Copyright (c) 2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import results
export results
const
# https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
defaultMaxOpenFiles* = 512
defaultWriteBufferSize* = 64 * 1024 * 1024
defaultRowCacheSize* = 512 * 1024 * 1024
defaultBlockCacheSize* = 256 * 1024 * 1024
type DbOptions* = object # Options that are transported to the database layer
maxOpenFiles*: int
writeBufferSize*: int
rowCacheSize*: int
blockCacheSize*: int
func init*(
T: type DbOptions,
maxOpenFiles = defaultMaxOpenFiles,
writeBufferSize = defaultWriteBufferSize,
rowCacheSize = defaultRowCacheSize,
blockCacheSize = defaultBlockCacheSize,
): T =
T(
maxOpenFiles: maxOpenFiles,
writeBufferSize: writeBufferSize,
rowCacheSize: rowCacheSize,
blockCacheSize: blockCacheSize,
)

View File

@ -239,7 +239,8 @@ proc run(nimbus: NimbusNode, conf: NimbusConf) =
# Resolve statically for database type # Resolve statically for database type
case conf.chainDbMode: case conf.chainDbMode:
of Aristo,AriPrune: of Aristo,AriPrune:
AristoDbRocks.newCoreDbRef(string conf.dataDir) AristoDbRocks.newCoreDbRef(string conf.dataDir, conf.dbOptions())
let com = CommonRef.new( let com = CommonRef.new(
db = coreDB, db = coreDB,
pruneHistory = (conf.chainDbMode == AriPrune), pruneHistory = (conf.chainDbMode == AriPrune),

View File

@ -16,6 +16,7 @@
import import
stint, stint,
../nimbus/common/common, ../nimbus/common/common,
../nimbus/db/opts,
../nimbus/db/core_db/persistent, ../nimbus/db/core_db/persistent,
../nimbus/core/executor, ../nimbus/core/executor,
../nimbus/[vm_state, vm_types], ../nimbus/[vm_state, vm_types],
@ -47,7 +48,8 @@ proc dumpDebug(com: CommonRef, blockNumber: UInt256) =
proc main() {.used.} = proc main() {.used.} =
let conf = getConfiguration() let conf = getConfiguration()
let com = CommonRef.new(newCoreDbRef(DefaultDbPersistent, conf.dataDir)) let com = CommonRef.new(
newCoreDbRef(DefaultDbPersistent, conf.dataDir, DbOptions.init()))
if conf.head != 0.u256: if conf.head != 0.u256:
dumpDebug(com, conf.head) dumpDebug(com, conf.head)

View File

@ -16,6 +16,7 @@ import
../nimbus/errors, ../nimbus/errors,
../nimbus/core/chain, ../nimbus/core/chain,
../nimbus/common, ../nimbus/common,
../nimbus/db/opts,
../nimbus/db/[core_db/persistent, storage_types], ../nimbus/db/[core_db/persistent, storage_types],
configuration # must be late (compilation annoyance) configuration # must be late (compilation annoyance)
@ -54,7 +55,7 @@ proc main() {.used.} =
let conf = configuration.getConfiguration() let conf = configuration.getConfiguration()
let com = CommonRef.new( let com = CommonRef.new(
newCoreDbRef(DefaultDbPersistent, conf.dataDir), newCoreDbRef(DefaultDbPersistent, conf.dataDir, DbOptions.init()),
conf.netId, networkParams(conf.netId)) conf.netId, networkParams(conf.netId))
# move head to block number ... # move head to block number ...

View File

@ -13,6 +13,7 @@ import
../nimbus/[vm_state, vm_types], ../nimbus/[vm_state, vm_types],
../nimbus/core/executor, ../nimbus/core/executor,
../nimbus/common/common, ../nimbus/common/common,
../nimbus/db/opts,
../nimbus/db/core_db/persistent, ../nimbus/db/core_db/persistent,
configuration # must be late (compilation annoyance) configuration # must be late (compilation annoyance)
@ -52,7 +53,8 @@ proc validateBlock(com: CommonRef, blockNumber: BlockNumber): BlockNumber =
proc main() {.used.} = proc main() {.used.} =
let let
conf = getConfiguration() conf = getConfiguration()
com = CommonRef.new(newCoreDbRef(DefaultDbPersistent, conf.dataDir)) com = CommonRef.new(newCoreDbRef(
DefaultDbPersistent, conf.dataDir, DbOptions.init()))
# move head to block number ... # move head to block number ...
if conf.head == 0.u256: if conf.head == 0.u256:

View File

@ -13,6 +13,7 @@ import
../nimbus/[tracer, config], ../nimbus/[tracer, config],
../nimbus/core/chain, ../nimbus/core/chain,
../nimbus/common/common, ../nimbus/common/common,
../nimbus/db/opts,
../nimbus/db/core_db/persistent ../nimbus/db/core_db/persistent
proc dumpTest(com: CommonRef, blockNumber: int) = proc dumpTest(com: CommonRef, blockNumber: int) =
@ -58,7 +59,8 @@ proc main() {.used.} =
# nimbus --rpcapi: eth, debug --prune: archive # nimbus --rpcapi: eth, debug --prune: archive
var conf = makeConfig() var conf = makeConfig()
let db = newCoreDbRef(DefaultDbPersistent, string conf.dataDir) let db = newCoreDbRef(
DefaultDbPersistent, string conf.dataDir, DbOptions.init())
let com = CommonRef.new(db) let com = CommonRef.new(db)
com.dumpTest(97) com.dumpTest(97)

View File

@ -16,6 +16,7 @@ import
eth/common, eth/common,
results, results,
unittest2, unittest2,
../../nimbus/db/opts,
../../nimbus/db/aristo/[ ../../nimbus/db/aristo/[
aristo_check, aristo_check,
aristo_debug, aristo_debug,
@ -100,7 +101,7 @@ iterator quadripartite(td: openArray[ProofTrieData]): LeafQuartet =
proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] = proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
let db = block: let db = block:
if 0 < rdbPath.len: if 0 < rdbPath.len:
let rc = AristoDbRef.init(RdbBackendRef, rdbPath) let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
xCheckRc rc.error == 0 xCheckRc rc.error == 0
rc.value rc.value
else: else:

View File

@ -16,6 +16,7 @@ import
results, results,
unittest2, unittest2,
stew/endians2, stew/endians2,
../../nimbus/db/opts,
../../nimbus/db/aristo/[ ../../nimbus/db/aristo/[
aristo_check, aristo_check,
aristo_debug, aristo_debug,
@ -345,7 +346,7 @@ proc testTxMergeAndDeleteOneByOne*(
# Start with brand new persistent database. # Start with brand new persistent database.
db = block: db = block:
if 0 < rdbPath.len: if 0 < rdbPath.len:
let rc = AristoDbRef.init(RdbBackendRef, rdbPath) let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
xCheckRc rc.error == 0 xCheckRc rc.error == 0
rc.value rc.value
else: else:
@ -453,7 +454,7 @@ proc testTxMergeAndDeleteSubTree*(
# Start with brand new persistent database. # Start with brand new persistent database.
db = block: db = block:
if 0 < rdbPath.len: if 0 < rdbPath.len:
let rc = AristoDbRef.init(RdbBackendRef, rdbPath) let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
xCheckRc rc.error == 0 xCheckRc rc.error == 0
rc.value rc.value
else: else:
@ -555,7 +556,7 @@ proc testTxMergeProofAndKvpList*(
db = block: db = block:
# New DB with disabled filter slots management # New DB with disabled filter slots management
if 0 < rdbPath.len: if 0 < rdbPath.len:
let rc = AristoDbRef.init(RdbBackendRef, rdbPath) let rc = AristoDbRef.init(RdbBackendRef, rdbPath, DbOptions.init())
xCheckRc rc.error == 0 xCheckRc rc.error == 0
rc.value rc.value
else: else:

View File

@ -16,6 +16,7 @@ import
eth/common, eth/common,
results, results,
unittest2, unittest2,
../nimbus/db/opts,
../nimbus/db/core_db/persistent, ../nimbus/db/core_db/persistent,
../nimbus/core/chain, ../nimbus/core/chain,
./replay/pp, ./replay/pp,
@ -157,7 +158,7 @@ proc initRunnerDB(
# Resolve for static `dbType` # Resolve for static `dbType`
case dbType: case dbType:
of AristoDbMemory: AristoDbMemory.newCoreDbRef() of AristoDbMemory: AristoDbMemory.newCoreDbRef()
of AristoDbRocks: AristoDbRocks.newCoreDbRef path of AristoDbRocks: AristoDbRocks.newCoreDbRef(path, DbOptions.init())
of AristoDbVoid: AristoDbVoid.newCoreDbRef() of AristoDbVoid: AristoDbVoid.newCoreDbRef()
else: raiseAssert "Oops" else: raiseAssert "Oops"

View File

@ -31,6 +31,7 @@ import
../nimbus/core/chain, ../nimbus/core/chain,
../nimbus/common/common, ../nimbus/common/common,
../nimbus/rpc, ../nimbus/rpc,
../nimbus/db/opts,
../nimbus/db/core_db, ../nimbus/db/core_db,
../nimbus/db/core_db/persistent, ../nimbus/db/core_db/persistent,
../nimbus/db/state_db/base, ../nimbus/db/state_db/base,
@ -145,7 +146,8 @@ proc rpcGetProofsTrackStateChangesMain*() =
test "Test tracking the changes introduced in every block": test "Test tracking the changes introduced in every block":
let com = CommonRef.new(newCoreDbRef(DefaultDbPersistent, DATABASE_PATH)) let com = CommonRef.new(newCoreDbRef(
DefaultDbPersistent, DATABASE_PATH, DbOptions.init()))
com.initializeEmptyDb() com.initializeEmptyDb()
let let

View File

@ -11,6 +11,7 @@
import import
json, json,
../nimbus/common/common, # must be early (compilation annoyance) ../nimbus/common/common, # must be early (compilation annoyance)
../nimbus/db/opts,
../nimbus/db/core_db/persistent, ../nimbus/db/core_db/persistent,
../nimbus/[config, tracer, vm_types] ../nimbus/[config, tracer, vm_types]
@ -57,7 +58,8 @@ proc main() {.used.} =
# nimbus --rpc-api: eth, debug --prune: archive # nimbus --rpc-api: eth, debug --prune: archive
var conf = makeConfig() var conf = makeConfig()
let db = newCoreDbRef(DefaultDbPersistent, string conf.dataDir) let db = newCoreDbRef(
DefaultDbPersistent, string conf.dataDir, DbOptions.init())
let com = CommonRef.new(db) let com = CommonRef.new(db)
com.dumpTest(97) com.dumpTest(97)