Coeredb related clean up and maint fixes (#2360)

* Fix initialiser

why:
  Possible crash (app profiling, tracer etc.)

* Update column family options processing

why:
  Same for kvt as for aristo

* Move `AristoDbDualRocks` backend type to the test suite

why:
  So it is not available for production

* Fix typos in API jump table

why:
  Used for tracing and app profiling only. Needed some update

* Purged CoreDb legacy API

why:
  Not needed anymore, was transitionary and disabled.

* Rename `flush` argument to `eradicate` in a DB close context

why:
  The word `eradicate` leaves no doubt what is meant

* Rename `stoFlush()` -> `stoDelete()`

* Rename `core_apps_newapi` -> `core_apps` (not so new anymore)
This commit is contained in:
Jordan Hrycaj 2024-06-14 11:19:48 +00:00 committed by GitHub
parent 8a0772ac10
commit debba5a620
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
31 changed files with 203 additions and 485 deletions

View File

@ -126,12 +126,12 @@ type
AristoApiFinishFn* = AristoApiFinishFn* =
proc(db: AristoDbRef; proc(db: AristoDbRef;
flush = false; eradicate = false;
) {.noRaise.} ) {.noRaise.}
## Backend destructor. The argument `flush` indicates that a full ## Backend destructor. The argument `eradicate` indicates that a full
## database deletion is requested. If set `false` the outcome might ## database deletion is requested. If set `false` the outcome might
## differ depending on the type of backend (e.g. the `BackendMemory` ## differ depending on the type of backend (e.g. the `BackendMemory`
## backend will always flush on close.) ## backend will always eradicate on close.)
## ##
## In case of distributed descriptors accessing the same backend, all ## In case of distributed descriptors accessing the same backend, all
## distributed descriptors will be destroyed. ## distributed descriptors will be destroyed.
@ -439,11 +439,11 @@ type
AristoApiProfBeGetVtxFn = "be/getVtx" AristoApiProfBeGetVtxFn = "be/getVtx"
AristoApiProfBeGetKeyFn = "be/getKey" AristoApiProfBeGetKeyFn = "be/getKey"
AristoApiProfBeGetIdgFn = "be/getIfg" AristoApiProfBeGetTuvFn = "be/getTuv"
AristoApiProfBeGetLstFn = "be/getLst" AristoApiProfBeGetLstFn = "be/getLst"
AristoApiProfBePutVtxFn = "be/putVtx" AristoApiProfBePutVtxFn = "be/putVtx"
AristoApiProfBePutKeyFn = "be/putKey" AristoApiProfBePutKeyFn = "be/putKey"
AristoApiProfBePutIdgFn = "be/putIdg" AristoApiProfBePutTuvFn = "be/putTuv"
AristoApiProfBePutLstFn = "be/putLst" AristoApiProfBePutLstFn = "be/putLst"
AristoApiProfBePutEndFn = "be/putEnd" AristoApiProfBePutEndFn = "be/putEnd"
@ -757,11 +757,11 @@ func init*(
result = be.getKeyFn(a) result = be.getKeyFn(a)
data.list[AristoApiProfBeGetKeyFn.ord].masked = true data.list[AristoApiProfBeGetKeyFn.ord].masked = true
beDup.getIdgFn = beDup.getTuvFn =
proc(): auto = proc(): auto =
AristoApiProfBeGetIdgFn.profileRunner: AristoApiProfBeGetTuvFn.profileRunner:
result = be.getIdgFn() result = be.getTuvFn()
data.list[AristoApiProfBeGetIdgFn.ord].masked = true data.list[AristoApiProfBeGetTuvFn.ord].masked = true
beDup.getLstFn = beDup.getLstFn =
proc(): auto = proc(): auto =
@ -781,11 +781,11 @@ func init*(
be.putKeyFn(a,b) be.putKeyFn(a,b)
data.list[AristoApiProfBePutKeyFn.ord].masked = true data.list[AristoApiProfBePutKeyFn.ord].masked = true
beDup.putIdgFn = beDup.putTuvFn =
proc(a: PutHdlRef; b: openArray[VertexID]) = proc(a: PutHdlRef; b: VertexID) =
AristoApiProfBePutIdgFn.profileRunner: AristoApiProfBePutTuvFn.profileRunner:
be.putIdgFn(a,b) be.putTuvFn(a,b)
data.list[AristoApiProfBePutIdgFn.ord].masked = true data.list[AristoApiProfBePutTuvFn.ord].masked = true
beDup.putLstFn = beDup.putLstFn =
proc(a: PutHdlRef; b: SavedState) = proc(a: PutHdlRef; b: SavedState) =

View File

@ -82,11 +82,11 @@ type
# ------------- # -------------
CloseFn* = CloseFn* =
proc(flush: bool) {.gcsafe, raises: [].} proc(eradicate: bool) {.gcsafe, raises: [].}
## Generic destructor for the `Aristo DB` backend. The argument `flush` ## Generic destructor for the `Aristo DB` backend. The argument
## indicates that a full database deletion is requested. If passed ## `eradicate` indicates that a full database deletion is requested. If
## `false` the outcome might differ depending on the type of backend ## passed `false` the outcome might differ depending on the type of
## (e.g. in-memory backends would flush on close.) ## backend (e.g. in-memory backends will always eradicate on close.)
# ------------- # -------------

View File

@ -85,11 +85,11 @@ proc init*(
AristoDbRef.init VoidBackendRef AristoDbRef.init VoidBackendRef
proc finish*(db: AristoDbRef; flush = false) = proc finish*(db: AristoDbRef; eradicate = false) =
## Backend destructor. The argument `flush` indicates that a full database ## Backend destructor. The argument `eradicate` indicates that a full
## deletion is requested. If set `false` the outcome might differ depending ## database deletion is requested. If set `false` the outcome might differ
## on the type of backend (e.g. the `BackendMemory` backend will always ## depending on the type of backend (e.g. the `BackendMemory` backend will
## flush on close.) ## always eradicate on close.)
## ##
## In case of distributed descriptors accessing the same backend, all ## In case of distributed descriptors accessing the same backend, all
## distributed descriptors will be destroyed. ## distributed descriptors will be destroyed.
@ -97,7 +97,7 @@ proc finish*(db: AristoDbRef; flush = false) =
## This distructor may be used on already *destructed* descriptors. ## This distructor may be used on already *destructed* descriptors.
## ##
if not db.backend.isNil: if not db.backend.isNil:
db.backend.closeFn flush db.backend.closeFn eradicate
discard db.getCentre.forgetOthers() discard db.getCentre.forgetOthers()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -45,7 +45,7 @@ proc newAristoRdbDbRef(
vTop = block: vTop = block:
let rc = be.getTuvFn() let rc = be.getTuvFn()
if rc.isErr: if rc.isErr:
be.closeFn(flush = false) be.closeFn(eradicate = false)
return err(rc.error) return err(rc.error)
rc.value rc.value
ok AristoDbRef( ok AristoDbRef(

View File

@ -225,8 +225,8 @@ proc putEndFn(db: RdbBackendRef): PutEndFn =
proc closeFn(db: RdbBackendRef): CloseFn = proc closeFn(db: RdbBackendRef): CloseFn =
result = result =
proc(flush: bool) = proc(eradicate: bool) =
db.rdb.destroy(flush) db.rdb.destroy(eradicate)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions: hosting interface changes # Private functions: hosting interface changes

View File

@ -231,11 +231,11 @@ proc reinit*(
ok guestCols ok guestCols
proc destroy*(rdb: var RdbInst; flush: bool) = proc destroy*(rdb: var RdbInst; eradicate: bool) =
## Destructor ## Destructor
rdb.baseDb.close() rdb.baseDb.close()
if flush: if eradicate:
try: try:
rdb.dataDir.removeDir rdb.dataDir.removeDir

View File

@ -143,9 +143,9 @@ proc baseMethods(db: AristoCoreDbRef): CoreDbBaseFns =
ok() ok()
CoreDbBaseFns( CoreDbBaseFns(
destroyFn: proc(flush: bool) = destroyFn: proc(eradicate: bool) =
aBase.destroy(flush) aBase.destroy(eradicate)
kBase.destroy(flush), kBase.destroy(eradicate),
levelFn: proc(): int = levelFn: proc(): int =
aBase.getLevel, aBase.getLevel,

View File

@ -367,8 +367,8 @@ proc accMethods(cAcc: AristoCoreDxAccRef): CoreDbAccFns =
return err(rc.error.toError(base, info)) return err(rc.error.toError(base, info))
ok() ok()
proc accStoFlush(address: EthAddress): CoreDbRc[void] = proc accStoDelete(address: EthAddress): CoreDbRc[void] =
const info = "stoFlushFn()" const info = "stoDeleteFn()"
let let
key = address.keccakHash.data key = address.keccakHash.data
@ -405,8 +405,8 @@ proc accMethods(cAcc: AristoCoreDxAccRef): CoreDbAccFns =
deleteFn: proc(address: EthAddress): CoreDbRc[void] = deleteFn: proc(address: EthAddress): CoreDbRc[void] =
accDelete(address), accDelete(address),
stoFlushFn: proc(address: EthAddress): CoreDbRc[void] = stoDeleteFn: proc(address: EthAddress): CoreDbRc[void] =
accStoFlush(address), accStoDelete(address),
mergeFn: proc(acc: CoreDbAccount): CoreDbRc[void] = mergeFn: proc(acc: CoreDbAccount): CoreDbRc[void] =
accMerge(acc), accMerge(acc),
@ -702,8 +702,8 @@ proc persistent*(
# Public constructors and related # Public constructors and related
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc destroy*(base: AristoBaseRef; flush: bool) = proc destroy*(base: AristoBaseRef; eradicate: bool) =
base.api.finish(base.ctx.mpt, flush) base.api.finish(base.ctx.mpt, eradicate)
func init*(T: type AristoBaseRef; db: CoreDbRef; adb: AristoDbRef): T = func init*(T: type AristoBaseRef; db: CoreDbRef; adb: AristoDbRef): T =

View File

@ -223,8 +223,8 @@ proc newKvtHandler*(
ok(base.cache) ok(base.cache)
proc destroy*(base: KvtBaseRef; flush: bool) = proc destroy*(base: KvtBaseRef; eradicate: bool) =
base.api.finish(base.kdb, flush) # Close descriptor base.api.finish(base.kdb, eradicate) # Close descriptor
func init*(T: type KvtBaseRef; db: CoreDbRef; kdb: KvtDbRef): T = func init*(T: type KvtBaseRef; db: CoreDbRef; kdb: KvtDbRef): T =

View File

@ -719,7 +719,7 @@ proc traceRecorder(
# Delete from DB # Delete from DB
api.delTree(mpt, root, accPath).isOkOr: api.delTree(mpt, root, accPath).isOkOr:
when EnableDebugLog: when EnableDebugLog:
debug logTxt, level, flags, key, error debug logTxt, level, flags, error
return err(error) return err(error)
# Update journal # Update journal

View File

@ -48,7 +48,7 @@ proc newAristoRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef =
AristoDbRocks.create(kdb, adb) AristoDbRocks.create(kdb, adb)
proc newAristoDualRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef = proc newAristoDualRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef =
## This is mainly for debugging. The KVT is run on a completely separate ## This is only for debugging. The KVT is run on a completely separate
## database backend. ## database backend.
let let
adb = AristoDbRef.init(use_ari.RdbBackendRef, path, opts).valueOr: adb = AristoDbRef.init(use_ari.RdbBackendRef, path, opts).valueOr:

View File

@ -20,9 +20,6 @@ from ../aristo
import EmptyBlob, PayloadRef, isValid import EmptyBlob, PayloadRef, isValid
const const
ProvideLegacyAPI = false
## Enable legacy API. For now everybody would want this enabled.
EnableApiTracking = false EnableApiTracking = false
## When enabled, functions using this tracking facility need to import ## When enabled, functions using this tracking facility need to import
## `chronicles`, as well. Tracking is enabled by setting `true` the flags ## `chronicles`, as well. Tracking is enabled by setting `true` the flags
@ -61,19 +58,9 @@ export
PayloadRef PayloadRef
const const
CoreDbProvideLegacyAPI* = ProvideLegacyAPI
CoreDbEnableApiTracking* = EnableApiTracking CoreDbEnableApiTracking* = EnableApiTracking
CoreDbEnableApiProfiling* = EnableApiTracking and EnableApiProfiling CoreDbEnableApiProfiling* = EnableApiTracking and EnableApiProfiling
when ProvideLegacyAPI:
import
std/typetraits
type
TxWrapperApiError* = object of CoreDbApiError
## For re-routing exception on tx/action template
export
CoreDbKvtRef, CoreDbMptRef, CoreDbPhkRef, CoreDbTxRef, CoreDbCaptRef
when AutoValidateDescriptors: when AutoValidateDescriptors:
import ./base/validate import ./base/validate
@ -106,35 +93,6 @@ when EnableApiTracking:
proc `$`(v: CoreDbColRef): string = v.toStr proc `$`(v: CoreDbColRef): string = v.toStr
proc `$`(h: Hash256): string = h.toStr proc `$`(h: Hash256): string = h.toStr
when ProvideLegacyAPI:
when EnableApiTracking:
proc `$`(k: CoreDbKvtRef): string = k.toStr
template setTrackLegaApi(
w: CoreDbApiTrackRef;
s: static[CoreDbFnInx];
code: untyped;
) =
## Template with code section that will be discarded if logging is
## disabled at compile time when `EnableApiTracking` is `false`.
when EnableApiTracking:
w.beginLegaApi(s)
code
const api {.inject,used.} = s
template setTrackLegaApi*(
w: CoreDbApiTrackRef;
s: static[CoreDbFnInx];
) =
w.setTrackLegaApi(s):
discard
template ifTrackLegaApi*(w: CoreDbApiTrackRef; code: untyped) =
when EnableApiTracking:
w.endLegaApiIf:
code
template setTrackNewApi( template setTrackNewApi(
w: CoreDxApiTrackRef; w: CoreDxApiTrackRef;
s: static[CoreDbFnInx]; s: static[CoreDbFnInx];
@ -286,16 +244,16 @@ proc backend*(dsc: CoreDxKvtRef | CoreDxMptRef): auto =
result = dsc.methods.backendFn() result = dsc.methods.backendFn()
dsc.ifTrackNewApi: debug newApiTxt, api, elapsed dsc.ifTrackNewApi: debug newApiTxt, api, elapsed
proc finish*(db: CoreDbRef; flush = false) = proc finish*(db: CoreDbRef; eradicate = false) =
## Database destructor. If the argument `flush` is set `false`, the database ## Database destructor. If the argument `eradicate` is set `false`, the
## is left as-is and only the in-memory handlers are cleaned up. ## database is left as-is and only the in-memory handlers are cleaned up.
## ##
## Otherwise the destructor is allowed to remove the database. This feature ## Otherwise the destructor is allowed to remove the database. This feature
## depends on the backend database. Currently, only the `AristoDbRocks` type ## depends on the backend database. Currently, only the `AristoDbRocks` type
## backend removes the database on `true`. ## backend removes the database on `true`.
## ##
db.setTrackNewApi BaseFinishFn db.setTrackNewApi BaseFinishFn
db.methods.destroyFn flush db.methods.destroyFn eradicate
db.ifTrackNewApi: debug newApiTxt, api, elapsed db.ifTrackNewApi: debug newApiTxt, api, elapsed
proc `$$`*(e: CoreDbErrorRef): string = proc `$$`*(e: CoreDbErrorRef): string =
@ -746,7 +704,7 @@ proc delete*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
result = acc.methods.deleteFn address result = acc.methods.deleteFn address
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
proc stoFlush*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] = proc stoDelete*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
## Recursively delete all data elements from the storage trie associated to ## Recursively delete all data elements from the storage trie associated to
## the account identified by the argument `address`. After successful run, ## the account identified by the argument `address`. After successful run,
## the storage trie will be empty. ## the storage trie will be empty.
@ -757,8 +715,8 @@ proc stoFlush*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
## shared by several accounts whereas they are unique on the `Aristo` ## shared by several accounts whereas they are unique on the `Aristo`
## backend. ## backend.
## ##
acc.setTrackNewApi AccStoFlushFn acc.setTrackNewApi AccStoDeleteFn
result = acc.methods.stoFlushFn address result = acc.methods.stoDeleteFn address
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
@ -959,206 +917,6 @@ proc forget*(cp: CoreDxCaptRef) =
cp.methods.forgetFn() cp.methods.forgetFn()
cp.ifTrackNewApi: debug newApiTxt, api, elapsed cp.ifTrackNewApi: debug newApiTxt, api, elapsed
# ------------------------------------------------------------------------------
# Public methods, legacy API
# ------------------------------------------------------------------------------
when ProvideLegacyAPI:
proc parent*[T: CoreDbKvtRef | CoreDbMptRef | CoreDbPhkRef |
CoreDbTxRef | CoreDbCaptRef](
cld: T): CoreDbRef =
## Getter, common method for all sub-modules
result = cld.distinctBase.parent
# ----------------
proc kvt*(db: CoreDbRef): CoreDbKvtRef =
## Legacy pseudo constructor, see `toKvt()` for production constructor
db.setTrackLegaApi LegaNewKvtFn
result = db.newKvt().CoreDbKvtRef
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed, result
proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): Blob =
kvt.setTrackLegaApi LegaKvtGetFn
result = kvt.distinctBase.getOrEmpty(key).expect $api
kvt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr, result
proc del*(kvt: CoreDbKvtRef; key: openArray[byte]): void =
kvt.setTrackLegaApi LegaKvtDelFn
kvt.distinctBase.del(key).expect $api
kvt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr
proc put*(kvt: CoreDbKvtRef; key: openArray[byte]; val: openArray[byte]) =
kvt.setTrackLegaApi LegaKvtPutFn
kvt.distinctBase.parent.newKvt().put(key, val).expect $api
kvt.ifTrackLegaApi:
debug legaApiTxt, api, elapsed, key=key.toStr, val=val.toLenStr
proc contains*(kvt: CoreDbKvtRef; key: openArray[byte]): bool =
kvt.setTrackLegaApi LegaKvtContainsFn
result = kvt.distinctBase.hasKey(key).expect $api
kvt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr, result
# ----------------
proc toMpt*(phk: CoreDbPhkRef): CoreDbMptRef =
phk.setTrackLegaApi LegaToMptFn
result = phk.distinctBase.toMpt.CoreDbMptRef
phk.ifTrackLegaApi: debug legaApiTxt, api, elapsed
proc mptPrune*(db: CoreDbRef; root: Hash256): CoreDbMptRef =
db.setTrackLegaApi LegaNewMptFn
let
trie = db.ctx.methods.newColFn(
CtGeneric, root, Opt.none(EthAddress)).valueOr:
raiseAssert error.prettyText() & ": " & $api
mpt = db.ctx.getMpt(trie).valueOr:
raiseAssert error.prettyText() & ": " & $api
result = mpt.CoreDbMptRef
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed, root
proc mptPrune*(db: CoreDbRef): CoreDbMptRef =
db.setTrackLegaApi LegaNewMptFn
result = db.ctx.getMpt(CtGeneric, Opt.none(EthAddress)).CoreDbMptRef
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed
# ----------------
proc toPhk*(mpt: CoreDbMptRef): CoreDbPhkRef =
mpt.setTrackLegaApi LegaToPhkFn
result = mpt.distinctBase.toPhk.CoreDbPhkRef
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
proc phkPrune*(db: CoreDbRef; root: Hash256): CoreDbPhkRef =
db.setTrackLegaApi LegaNewPhkFn
let
trie = db.ctx.methods.newColFn(
CtGeneric, root, Opt.none(EthAddress)).valueOr:
raiseAssert error.prettyText() & ": " & $api
phk = db.ctx.getMpt(trie).valueOr:
raiseAssert error.prettyText() & ": " & $api
result = phk.toCoreDxPhkRef.CoreDbPhkRef
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed, root
proc phkPrune*(db: CoreDbRef): CoreDbPhkRef =
db.setTrackLegaApi LegaNewPhkFn
result = db.ctx.getMpt(
CtGeneric, Opt.none(EthAddress)).toCoreDxPhkRef.CoreDbPhkRef
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed
# ----------------
proc get*(mpt: CoreDbMptRef; key: openArray[byte]): Blob =
mpt.setTrackLegaApi LegaMptGetFn
result = mpt.distinctBase.fetchOrEmpty(key).expect $api
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr, result
proc get*(phk: CoreDbPhkRef; key: openArray[byte]): Blob =
phk.setTrackLegaApi LegaPhkGetFn
result = phk.distinctBase.fetchOrEmpty(key).expect $api
phk.ifTrackLegaApi:
debug legaApiTxt, api, elapsed, key=key.toStr, result
proc del*(mpt: CoreDbMptRef; key: openArray[byte]) =
mpt.setTrackLegaApi LegaMptDelFn
mpt.distinctBase.delete(key).expect $api
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr
proc del*(phk: CoreDbPhkRef; key: openArray[byte]) =
phk.setTrackLegaApi LegaPhkDelFn
phk.distinctBase.delete(key).expect $api
phk.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr
proc put*(mpt: CoreDbMptRef; key: openArray[byte]; val: openArray[byte]) =
mpt.setTrackLegaApi LegaMptPutFn
mpt.distinctBase.merge(key, val).expect $api
mpt.ifTrackLegaApi:
debug legaApiTxt, api, elapsed, key=key.toStr, val=val.toLenStr
proc put*(phk: CoreDbPhkRef; key: openArray[byte]; val: openArray[byte]) =
phk.setTrackLegaApi LegaPhkPutFn
phk.distinctBase.merge(key, val).expect $api
phk.ifTrackLegaApi:
debug legaApiTxt, api, elapsed, key=key.toStr, val=val.toLenStr
proc contains*(mpt: CoreDbMptRef; key: openArray[byte]): bool =
mpt.setTrackLegaApi LegaMptContainsFn
result = mpt.distinctBase.hasPath(key).expect $api
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr, result
proc contains*(phk: CoreDbPhkRef; key: openArray[byte]): bool =
phk.setTrackLegaApi LegaPhkContainsFn
result = phk.distinctBase.hasPath(key).expect $api
phk.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr, result
proc rootHash*(mpt: CoreDbMptRef): Hash256 =
mpt.setTrackLegaApi LegaMptRootHashFn
result = mpt.distinctBase.methods.getColFn().state.valueOr:
raiseAssert error.prettyText() & ": " & $api
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, result
proc rootHash*(phk: CoreDbPhkRef): Hash256 =
phk.setTrackLegaApi LegaPhkRootHashFn
result = phk.distinctBase.methods.getColFn().state.valueOr:
raiseAssert error.prettyText() & ": " & $api
phk.ifTrackLegaApi: debug legaApiTxt, api, elapsed, result
# ----------------
proc beginTransaction*(db: CoreDbRef): CoreDbTxRef =
db.setTrackLegaApi LegaBeginTxFn
result = db.distinctBase.methods.beginFn().CoreDbTxRef
db.ifTrackLegaApi:
debug legaApiTxt, api, elapsed, newLevel=db.methods.levelFn()
proc commit*(tx: CoreDbTxRef, applyDeletes = true) =
tx.setTrackLegaApi LegaTxCommitFn:
let prvLevel {.used.} = tx.distinctBase.methods.levelFn()
tx.distinctBase.commit()
tx.ifTrackLegaApi: debug legaApiTxt, api, elapsed, prvLevel
proc rollback*(tx: CoreDbTxRef) =
tx.setTrackLegaApi LegaTxCommitFn:
let prvLevel {.used.} = tx.distinctBase.methods.levelFn()
tx.distinctBase.rollback()
tx.ifTrackLegaApi: debug legaApiTxt, api, elapsed, prvLevel
proc dispose*(tx: CoreDbTxRef) =
tx.setTrackLegaApi LegaTxDisposeFn:
let prvLevel {.used.} = tx.distinctBase.methods.levelFn()
tx.distinctBase.dispose()
tx.ifTrackLegaApi: debug legaApiTxt, api, elapsed, prvLevel
# ----------------
proc capture*(
db: CoreDbRef;
flags: set[CoreDbCaptFlags] = {};
): CoreDbCaptRef =
db.setTrackLegaApi LegaCaptureFn
result = db.newCapture(flags).expect($api).CoreDbCaptRef
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed
proc recorder*(cp: CoreDbCaptRef): CoreDbRef =
cp.setTrackLegaApi LegaCptRecorderFn
result = cp.distinctBase.recorder()
cp.ifTrackLegaApi: debug legaApiTxt, api, elapsed
proc logDb*(cp: CoreDbCaptRef): TableRef[Blob,Blob] =
cp.setTrackLegaApi LegaCptLogDbFn
result = cp.distinctBase.logDb()
cp.ifTrackLegaApi: debug legaApiTxt, api, elapsed
proc flags*(cp: CoreDbCaptRef): set[CoreDbCaptFlags] =
cp.setTrackLegaApi LegaCptFlagsFn
result = cp.distinctBase.flags()
cp.ifTrackLegaApi: debug legaApiTxt, api, elapsed, result
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -19,10 +19,6 @@ import
./base_desc ./base_desc
type type
CoreDbApiTrackRef* =
CoreDbRef | CoreDbKvtRef | CoreDbMptRef | CoreDbPhkRef |
CoreDbTxRef | CoreDbCaptRef
CoreDxApiTrackRef* = CoreDxApiTrackRef* =
CoreDbRef | CoreDxKvtRef | CoreDbColRef | CoreDbRef | CoreDxKvtRef | CoreDbColRef |
CoreDbCtxRef | CoreDxMptRef | CoreDxPhkRef | CoreDxAccRef | CoreDbCtxRef | CoreDxMptRef | CoreDxPhkRef | CoreDxAccRef |
@ -39,7 +35,7 @@ type
AccHasPathFn = "acc/hasPath" AccHasPathFn = "acc/hasPath"
AccMergeFn = "acc/merge" AccMergeFn = "acc/merge"
AccGetMptFn = "acc/getMpt" AccGetMptFn = "acc/getMpt"
AccStoFlushFn = "acc/stoFlush" AccStoDeleteFn = "acc/stoDelete"
AccToMptFn = "acc/toMpt" AccToMptFn = "acc/toMpt"
AnyBackendFn = "any/backend" AnyBackendFn = "any/backend"
@ -78,44 +74,6 @@ type
KvtPairsIt = "kvt/pairs" KvtPairsIt = "kvt/pairs"
KvtPutFn = "kvt/put" KvtPutFn = "kvt/put"
LegaBeginTxFn = "lega/beginTransaction"
LegaCaptureFn = "lega/cpt/capture"
LegaCptFlagsFn = "lega/cpt/flags"
LegaCptLogDbFn = "lega/cpt/logDb"
LegaCptRecorderFn = "lega/cpt/recorder"
LegaKvtContainsFn = "lega/kvt/contains"
LegaKvtDelFn = "lega/kvt/del"
LegaKvtGetFn = "lega/kvt/get"
LegaKvtPairsIt = "lega/kvt/pairs"
LegaKvtPutFn = "lega/kvt/put"
LegaMptContainsFn = "lega/mpt/contains"
LegaMptDelFn = "lega/mpt/del"
LegaMptGetFn = "lega/mpt/get"
LegaMptPutFn = "lega/mpt/put"
LegaMptRootHashFn = "lega/mpt/rootHash"
LegaMptPairsIt = "lega/mpt/pairs"
LegaMptReplicateIt = "lega/mpt/replicate"
LegaNewKvtFn = "lega/kvt"
LegaNewMptFn = "lega/mptPrune"
LegaNewPhkFn = "lega/phkPrune"
LegaPhkContainsFn = "lega/phk/contains"
LegaPhkDelFn = "lega/phk/del"
LegaPhkGetFn = "lega/phk/get"
LegaPhkPutFn = "lega/phk/put"
LegaPhkRootHashFn = "lega/phk/rootHash"
LegaToMptFn = "lega/phk/toMpt"
LegaToPhkFn = "lega/mpt/toPhk"
LegaTxCommitFn = "lega/commit"
LegaTxDisposeFn = "lega/dispose"
LegaTxRollbackFn = "lega/rollback"
LegaTxSaveDisposeFn = "lega/safeDispose"
MptDeleteFn = "mpt/delete" MptDeleteFn = "mpt/delete"
MptFetchFn = "mpt/fetch" MptFetchFn = "mpt/fetch"
MptFetchOrEmptyFn = "mpt/fetchOrEmpty" MptFetchOrEmptyFn = "mpt/fetchOrEmpty"
@ -165,9 +123,6 @@ proc toStr*(p: CoreDbColRef): string =
(a,b) = if 0 < w.len and w[0] == '(': ("","") else: ("(",")") (a,b) = if 0 < w.len and w[0] == '(': ("","") else: ("(",")")
"Col" & a & w & b "Col" & a & w & b
func toStr*(w: CoreDbKvtRef): string =
if w.distinctBase.isNil: "kvt(nil)" else: "kvt"
func toLenStr*(w: openArray[byte]): string = func toLenStr*(w: openArray[byte]): string =
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">" if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
else: "openArray[" & $w.len & "]" else: "openArray[" & $w.len & "]"
@ -219,38 +174,6 @@ proc toStr*(rc: CoreDbRc[CoreDxAccRef]): string = rc.toStr "acc"
func toStr*(ela: Duration): string = func toStr*(ela: Duration): string =
aristo_profile.toStr(ela) aristo_profile.toStr(ela)
# ------------------------------------------------------------------------------
# Public legacy API logging framework
# ------------------------------------------------------------------------------
template beginLegaApi*(w: CoreDbApiTrackRef; s: static[CoreDbFnInx]) =
when typeof(w) is CoreDbRef:
let db = w
else:
let db = w.distinctBase.parent
# Prevent from cascaded logging
let save = db.trackNewApi
db.trackNewApi = false
defer: db.trackNewApi = save
when CoreDbEnableApiProfiling:
const blaCtx {.inject.} = s # Local use only
let blaStart {.inject.} = getTime() # Local use only
template endLegaApiIf*(w: CoreDbApiTrackRef; code: untyped) =
block:
when typeof(w) is CoreDbRef:
let db = w
else:
let db = w.distinctBase.parent
when CoreDbEnableApiProfiling:
let elapsed {.inject,used.} = getTime() - blaStart
aristo_profile.update(db.profTab, blaCtx.ord, elapsed)
if db.trackLegaApi:
when not CoreDbEnableApiProfiling: # otherwise use variable above
let elapsed {.inject,used.} = getTime() - blaStart
code
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public new API logging framework # Public new API logging framework
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -27,19 +27,12 @@ type
Ooops Ooops
AristoDbMemory ## Memory backend emulator AristoDbMemory ## Memory backend emulator
AristoDbRocks ## RocksDB backend AristoDbRocks ## RocksDB backend
AristoDbDualRocks ## Dual RocksDB backends for `Kvt` and `Aristo`
AristoDbVoid ## No backend AristoDbVoid ## No backend
const const
CoreDbPersistentTypes* = {AristoDbRocks} CoreDbPersistentTypes* = {AristoDbRocks}
type type
CoreDbKvtRef* = distinct CoreDxKvtRef # Legacy descriptor
CoreDbMptRef* = distinct CoreDxMptRef # Legacy descriptor
CoreDbPhkRef* = distinct CoreDxPhkRef # Legacy descriptor
CoreDbTxRef* = distinct CoreDxTxRef # Legacy descriptor
CoreDbCaptRef* = distinct CoreDxCaptRef # Legacy descriptor
CoreDbProfListRef* = AristoDbProfListRef CoreDbProfListRef* = AristoDbProfListRef
## Borrowed from `aristo_profile`, only used in profiling mode ## Borrowed from `aristo_profile`, only used in profiling mode
@ -96,7 +89,7 @@ type
# -------------------------------------------------- # --------------------------------------------------
# Sub-descriptor: Misc methods for main descriptor # Sub-descriptor: Misc methods for main descriptor
# -------------------------------------------------- # --------------------------------------------------
CoreDbBaseDestroyFn* = proc(flush = true) {.noRaise.} CoreDbBaseDestroyFn* = proc(eradicate = true) {.noRaise.}
CoreDbBaseColStateFn* = proc( CoreDbBaseColStateFn* = proc(
col: CoreDbColRef): CoreDbRc[Hash256] {.noRaise.} col: CoreDbColRef): CoreDbRc[Hash256] {.noRaise.}
CoreDbBaseColPrintFn* = proc(vid: CoreDbColRef): string {.noRaise.} CoreDbBaseColPrintFn* = proc(vid: CoreDbColRef): string {.noRaise.}
@ -214,7 +207,7 @@ type
CoreDbAccGetMptFn* = proc(): CoreDbRc[CoreDxMptRef] {.noRaise.} CoreDbAccGetMptFn* = proc(): CoreDbRc[CoreDxMptRef] {.noRaise.}
CoreDbAccFetchFn* = proc(k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.} CoreDbAccFetchFn* = proc(k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.}
CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.} CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
CoreDbAccStoFlushFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.} CoreDbAccStoDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
CoreDbAccMergeFn* = proc(v: CoreDbAccount): CoreDbRc[void] {.noRaise.} CoreDbAccMergeFn* = proc(v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
CoreDbAccHasPathFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.} CoreDbAccHasPathFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.}
CoreDbAccGetColFn* = proc(): CoreDbColRef {.noRaise.} CoreDbAccGetColFn* = proc(): CoreDbColRef {.noRaise.}
@ -225,7 +218,7 @@ type
getMptFn*: CoreDbAccGetMptFn getMptFn*: CoreDbAccGetMptFn
fetchFn*: CoreDbAccFetchFn fetchFn*: CoreDbAccFetchFn
deleteFn*: CoreDbAccDeleteFn deleteFn*: CoreDbAccDeleteFn
stoFlushFn*: CoreDbAccStoFlushFn stoDeleteFn*: CoreDbAccStoDeleteFn
mergeFn*: CoreDbAccMergeFn mergeFn*: CoreDbAccMergeFn
hasPathFn*: CoreDbAccHasPathFn hasPathFn*: CoreDbAccHasPathFn
getColFn*: CoreDbAccGetColFn getColFn*: CoreDbAccGetColFn

View File

@ -67,7 +67,7 @@ proc validateMethodsDesc(fns: CoreDbAccFns) =
doAssert not fns.getMptFn.isNil doAssert not fns.getMptFn.isNil
doAssert not fns.fetchFn.isNil doAssert not fns.fetchFn.isNil
doAssert not fns.deleteFn.isNil doAssert not fns.deleteFn.isNil
doAssert not fns.stoFlushFn.isNil doAssert not fns.stoDeleteFn.isNil
doAssert not fns.mergeFn.isNil doAssert not fns.mergeFn.isNil
doAssert not fns.hasPathFn.isNil doAssert not fns.hasPathFn.isNil
doAssert not fns.getColFn.isNil doAssert not fns.getColFn.isNil

View File

@ -20,13 +20,8 @@ import
when CoreDbEnableApiTracking: when CoreDbEnableApiTracking:
import chronicles import chronicles
const
ProvideLegacyAPI = CoreDbProvideLegacyAPI
when ProvideLegacyAPI and CoreDbEnableApiTracking:
const const
logTxt = "CoreDb/it " logTxt = "CoreDb/it "
legaApiTxt = logTxt & "legacy API"
newApiTxt = logTxt & "API" newApiTxt = logTxt & "API"
# Annotation helper(s) # Annotation helper(s)
@ -82,25 +77,6 @@ iterator replicate*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} =
let trie = mpt.methods.getColFn() let trie = mpt.methods.getColFn()
debug newApiTxt, api, elapsed, trie debug newApiTxt, api, elapsed, trie
when ProvideLegacyAPI:
iterator pairs*(kvt: CoreDbKvtRef): (Blob, Blob) {.apiRaise.} =
kvt.setTrackLegaApi LegaKvtPairsIt
for k,v in kvt.distinctBase.pairs(): yield (k,v)
kvt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) =
## Trie traversal, not supported for `CoreDbPhkRef`
mpt.setTrackLegaApi LegaMptPairsIt
for k,v in mpt.distinctBase.pairs(): yield (k,v)
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
iterator replicate*(mpt: CoreDbMptRef): (Blob, Blob) {.apiRaise.} =
## Low level trie dump, not supported for `CoreDbPhkRef`
mpt.setTrackLegaApi LegaMptReplicateIt
for k,v in mpt.distinctBase.replicate(): yield (k,v)
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -20,13 +20,8 @@ import
when CoreDbEnableApiTracking: when CoreDbEnableApiTracking:
import chronicles import chronicles
const
ProvideLegacyAPI = CoreDbProvideLegacyAPI
when ProvideLegacyAPI and CoreDbEnableApiTracking:
const const
logTxt = "CoreDb/itp " logTxt = "CoreDb/itp "
legaApiTxt = logTxt & "legacy API"
newApiTxt = logTxt & "API" newApiTxt = logTxt & "API"
# Annotation helper(s) # Annotation helper(s)
@ -56,14 +51,6 @@ iterator replicatePersistent*(mpt: CoreDxMptRef): (Blob, Blob) {.rlpRaise.} =
let trie = mpt.methods.getColFn() let trie = mpt.methods.getColFn()
debug newApiTxt, api, elapsed, trie debug newApiTxt, api, elapsed, trie
when ProvideLegacyAPI:
iterator replicatePersistent*(mpt: CoreDbMptRef): (Blob, Blob) {.rlpRaise.} =
## Low level trie dump, not supported for `CoreDbPhkRef`
mpt.setTrackLegaApi LegaMptReplicateIt
for k,v in mpt.distinctBase.replicatePersistent(): yield (k,v)
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -13,14 +13,11 @@
import import
eth/common, eth/common,
../aristo, ../aristo,
./backend/aristo_db ./backend/aristo_db,
"."/[base_iterators, core_apps]
import
./core_apps_newapi as core_apps
import import
./base except bless ./base except bless
import
./base_iterators
export export
EmptyBlob, EmptyBlob,

View File

@ -42,17 +42,9 @@ proc newCoreDbRef*(
## The production database type is `AristoDbRocks` which uses a single ## The production database type is `AristoDbRocks` which uses a single
## `RocksDb` backend for both, `Aristo` and `KVT`. ## `RocksDb` backend for both, `Aristo` and `KVT`.
## ##
## For debugging, there is the `AristoDbDualRocks` database with split
## backends for `Aristo` and `KVT`. This database is not compatible with
## `AristoDbRocks` so it cannot be reliably switched between both versions
## with consecutive sessions.
##
when dbType == AristoDbRocks: when dbType == AristoDbRocks:
newAristoRocksDbCoreDbRef path, opts newAristoRocksDbCoreDbRef path, opts
elif dbType == AristoDbDualRocks:
newAristoDualRocksDbCoreDbRef path, opts
else: else:
{.error: "Unsupported dbType for persistent newCoreDbRef()".} {.error: "Unsupported dbType for persistent newCoreDbRef()".}

View File

@ -44,7 +44,7 @@ type
KvtApiCommitFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.} KvtApiCommitFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
KvtApiDelFn* = proc(db: KvtDbRef, KvtApiDelFn* = proc(db: KvtDbRef,
key: openArray[byte]): Result[void,KvtError] {.noRaise.} key: openArray[byte]): Result[void,KvtError] {.noRaise.}
KvtApiFinishFn* = proc(db: KvtDbRef, flush = false) {.noRaise.} KvtApiFinishFn* = proc(db: KvtDbRef, eradicate = false) {.noRaise.}
KvtApiForgetFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.} KvtApiForgetFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
KvtApiForkTxFn* = proc(db: KvtDbRef, KvtApiForkTxFn* = proc(db: KvtDbRef,
backLevel: int): Result[KvtDbRef,KvtError] {.noRaise.} backLevel: int): Result[KvtDbRef,KvtError] {.noRaise.}

View File

@ -47,11 +47,11 @@ type
# ------------- # -------------
CloseFn* = CloseFn* =
proc(flush: bool) {.gcsafe, raises: [].} proc(eradicate: bool) {.gcsafe, raises: [].}
## Generic destructor for the `Kvt DB` backend. The argument `flush` ## Generic destructor for the `Kvt DB` backend. The argument `eradicate`
## indicates that a full database deletion is requested. If passed ## indicates that a full database deletion is requested. If passed
## `false` the outcome might differ depending on the type of backend ## `false` the outcome might differ depending on the type of backend
## (e.g. in-memory backends would flush on close.) ## (e.g. in-memory backends would eradicate on close.)
CanModFn* = CanModFn* =
proc(): Result[void,KvtError] {.gcsafe, raises: [].} proc(): Result[void,KvtError] {.gcsafe, raises: [].}
@ -64,9 +64,10 @@ type
## This function stores a request function for the piggiback mode ## This function stores a request function for the piggiback mode
## writing to the `Aristo` set of column families. ## writing to the `Aristo` set of column families.
## ##
## If used at all, this function would run `rocks_db.setWrReqTriggeredFn()()` ## If used at all, this function would run thee function closure
## with a `KvtDbRef` type argument for `db`. This allows to run the `Kvt` ## `rocks_db.setWrReqTriggeredFn()()` with a `KvtDbRef` type argument
## without linking to the rocksdb interface unless it is really needed. ## for `db`. This allows to run the `Kvt` without linking to the
## rocksdb interface unless it is really needed.
# ------------- # -------------
@ -92,6 +93,7 @@ proc init*(trg: var BackendObj; src: BackendObj) =
trg.putEndFn = src.putEndFn trg.putEndFn = src.putEndFn
trg.closeFn = src.closeFn trg.closeFn = src.closeFn
trg.canModFn = src.canModFn trg.canModFn = src.canModFn
trg.setWrReqFn = src.setWrReqFn
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -65,16 +65,14 @@ proc init*(
KvtDbRef.init VoidBackendRef KvtDbRef.init VoidBackendRef
proc finish*(db: KvtDbRef; flush = false) = proc finish*(db: KvtDbRef; eradicate = false) =
## Backend destructor. The argument `flush` indicates that a full database ## Backend destructor. The argument `eradicate` indicates that a full
## deletion is requested. If set `false` the outcome might differ depending ## database deletion is requested. If set `false` the outcome might differ
## on the type of backend (e.g. the `BackendMemory` backend will always ## depending on the type of backend (e.g. the `BackendMemory` backend will
## flush on close.) ## always eradicate on close.)
##
## This distructor may be used on already *destructed* descriptors.
## ##
if not db.backend.isNil: if not db.backend.isNil:
db.backend.closeFn flush db.backend.closeFn eradicate
discard db.getCentre.forgetOthers() discard db.getCentre.forgetOthers()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -134,8 +134,8 @@ proc putEndFn(db: RdbBackendRef): PutEndFn =
proc closeFn(db: RdbBackendRef): CloseFn = proc closeFn(db: RdbBackendRef): CloseFn =
result = result =
proc(flush: bool) = proc(eradicate: bool) =
db.rdb.destroy(flush) db.rdb.destroy(eradicate)
proc canModFn(db: RdbBackendRef): CanModFn = proc canModFn(db: RdbBackendRef): CanModFn =
result = result =
@ -186,7 +186,7 @@ proc putEndTriggeredFn(db: RdbBackendRef): PutEndFn =
proc closeTriggeredFn(db: RdbBackendRef): CloseFn = proc closeTriggeredFn(db: RdbBackendRef): CloseFn =
## Variant of `closeFn()` for piggyback write batch ## Variant of `closeFn()` for piggyback write batch
result = result =
proc(flush: bool) = proc(eradicate: bool) =
# Nothing to do here as we do not own the backend # Nothing to do here as we do not own the backend
discard discard

View File

@ -28,9 +28,50 @@ import
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc getCFInitOptions(opts: DbOptions): ColFamilyOptionsRef = proc getCFInitOptions(opts: DbOptions): ColFamilyOptionsRef =
result = defaultColFamilyOptions() # TODO the configuration options below have not been tuned but are rather
# based on gut feeling, guesses and by looking at other clients - it
# would make sense to test different settings and combinations once the
# data model itself has settled down as their optimal values will depend
# on the shape of the data - it'll also be different per column family..
let cfOpts = defaultColFamilyOptions()
if opts.writeBufferSize > 0: if opts.writeBufferSize > 0:
result.setWriteBufferSize(opts.writeBufferSize) cfOpts.setWriteBufferSize(opts.writeBufferSize)
# Without this option, the WAL might never get flushed since a small column
# family (like the admin CF) with only tiny writes might keep it open - this
# negatively affects startup times since the WAL is replayed on every startup.
# https://github.com/facebook/rocksdb/blob/af50823069818fc127438e39fef91d2486d6e76c/include/rocksdb/options.h#L719
# Flushing the oldest
let writeBufferSize =
if opts.writeBufferSize > 0:
opts.writeBufferSize
else:
64 * 1024 * 1024 # TODO read from rocksdb?
cfOpts.setMaxTotalWalSize(2 * writeBufferSize)
# When data is written to rocksdb, it is first put in an in-memory table
# whose index is a skip list. Since the mem table holds the most recent data,
# all reads must go through this skiplist which results in slow lookups for
# already-written data.
# We enable a bloom filter on the mem table to avoid this lookup in the cases
# where the data is actually on disk already (ie wasn't updated recently).
# TODO there's also a hashskiplist that has both a hash index and a skip list
# which maybe could be used - uses more memory, requires a key prefix
# extractor
cfOpts.setMemtableWholeKeyFiltering(true)
cfOpts.setMemtablePrefixBloomSizeRatio(0.1)
# LZ4 seems to cut database size to 2/3 roughly, at the time of writing
# Using it for the bottom-most level means it applies to 90% of data but
# delays compression until data has settled a bit, which seems like a
# reasonable tradeoff.
# TODO evaluate zstd compression with a trained dictionary
# https://github.com/facebook/rocksdb/wiki/Compression
cfOpts.setBottommostCompression(Compression.lz4Compression)
cfOpts
proc getDbInitOptions(opts: DbOptions): DbOptionsRef = proc getDbInitOptions(opts: DbOptions): DbOptionsRef =
@ -107,12 +148,12 @@ proc init*(
ok() ok()
proc destroy*(rdb: var RdbInst; flush: bool) = proc destroy*(rdb: var RdbInst; eradicate: bool) =
## Destructor (no need to do anything if piggybacked) ## Destructor (no need to do anything if piggybacked)
if 0 < rdb.basePath.len: if 0 < rdb.basePath.len:
rdb.baseDb.close() rdb.baseDb.close()
if flush: if eradicate:
try: try:
rdb.dataDir.removeDir rdb.dataDir.removeDir

View File

@ -152,13 +152,13 @@ proc merge*(al: AccountLedger; account: CoreDbAccount) =
proc freeStorage*(al: AccountLedger, eAddr: EthAddress) = proc freeStorage*(al: AccountLedger, eAddr: EthAddress) =
const info = "AccountLedger/freeStorage()" const info = "AccountLedger/freeStorage()"
# Flush associated storage trie # Flush associated storage trie
al.distinctBase.stoFlush(eAddr).isOkOr: al.distinctBase.stoDelete(eAddr).isOkOr:
raiseAssert info & $$error raiseAssert info & $$error
proc delete*(al: AccountLedger, eAddr: EthAddress) = proc delete*(al: AccountLedger, eAddr: EthAddress) =
const info = "AccountLedger/delete()" const info = "AccountLedger/delete()"
# Flush associated storage trie # Flush associated storage trie
al.distinctBase.stoFlush(eAddr).isOkOr: al.distinctBase.stoDelete(eAddr).isOkOr:
raiseAssert info & $$error raiseAssert info & $$error
# Clear account # Clear account
al.distinctBase.delete(eAddr).isOkOr: al.distinctBase.delete(eAddr).isOkOr:

View File

@ -144,8 +144,8 @@ proc subBalance*(db: AccountStateDB, eAddr: EthAddress, delta: UInt256) =
proc clearStorage*(db: AccountStateDB, eAddr: EthAddress) = proc clearStorage*(db: AccountStateDB, eAddr: EthAddress) =
# Flush associated storage trie (will update account record on disk) # Flush associated storage trie (will update account record on disk)
db.trie.distinctBase.stoFlush(eAddr).isOkOr: db.trie.distinctBase.stoDelete(eAddr).isOkOr:
raiseAssert "clearStorage(): stoFlush() failed, " & $$error raiseAssert "clearStorage(): stoDelete() failed, " & $$error
# Reset storage info locally so that `Aristo` would not complain when # Reset storage info locally so that `Aristo` would not complain when
# updating the account record on disk # updating the account record on disk
var account = db.getAccount(eAddr) var account = db.getAccount(eAddr)

View File

@ -110,7 +110,7 @@ proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
block: block:
let report = db.mergeList w[0] let report = db.mergeList w[0]
if report.error != 0: if report.error != 0:
db.finish(flush=true) db.finish(eradicate=true)
check report.error == 0 check report.error == 0
return err(report.error) return err(report.error)
let rc = db.persist() let rc = db.persist()
@ -130,7 +130,7 @@ proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
for n in 0 ..< dx.len: for n in 0 ..< dx.len:
let report = dx[n].mergeList w[n+1] let report = dx[n].mergeList w[n+1]
if report.error != 0: if report.error != 0:
db.finish(flush=true) db.finish(eradicate=true)
check (n, report.error) == (n,0) check (n, report.error) == (n,0)
return err(report.error) return err(report.error)
@ -140,7 +140,7 @@ proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
proc cleanUp(dx: var DbTriplet) = proc cleanUp(dx: var DbTriplet) =
if not dx[0].isNil: if not dx[0].isNil:
dx[0].finish(flush=true) dx[0].finish(eradicate=true)
dx.reset dx.reset
proc isDbEq(a, b: LayerDeltaRef; db: AristoDbRef; noisy = true): bool = proc isDbEq(a, b: LayerDeltaRef; db: AristoDbRef; noisy = true): bool =

View File

@ -112,7 +112,7 @@ proc innerCleanUp(db: var AristoDbRef): bool {.discardable.} =
if rx.isOk: if rx.isOk:
let rc = rx.value.collapse(commit=false) let rc = rx.value.collapse(commit=false)
xCheckRc rc.error == 0 xCheckRc rc.error == 0
db.finish(flush=true) db.finish(eradicate=true)
db = AristoDbRef(nil) db = AristoDbRef(nil)
proc schedStow( proc schedStow(
@ -340,7 +340,7 @@ proc testTxMergeAndDeleteOneByOne*(
fwdRevVfyToggle = true fwdRevVfyToggle = true
defer: defer:
if not db.isNil: if not db.isNil:
db.finish(flush=true) db.finish(eradicate=true)
for n,w in list: for n,w in list:
# Start with brand new persistent database. # Start with brand new persistent database.
@ -448,7 +448,7 @@ proc testTxMergeAndDeleteSubTree*(
db = AristoDbRef(nil) db = AristoDbRef(nil)
defer: defer:
if not db.isNil: if not db.isNil:
db.finish(flush=true) db.finish(eradicate=true)
for n,w in list: for n,w in list:
# Start with brand new persistent database. # Start with brand new persistent database.
@ -546,7 +546,7 @@ proc testTxMergeProofAndKvpList*(
count = 0 count = 0
defer: defer:
if not db.isNil: if not db.isNil:
db.finish(flush=true) db.finish(eradicate=true)
for n,w in list: for n,w in list:

View File

@ -20,7 +20,8 @@ import
../nimbus/db/core_db/persistent, ../nimbus/db/core_db/persistent,
../nimbus/core/chain, ../nimbus/core/chain,
./replay/pp, ./replay/pp,
./test_coredb/[coredb_test_xx, test_chainsync, test_helpers] ./test_coredb/[
coredb_test_xx, test_chainsync, test_coredb_helpers, test_helpers]
const const
# If `true`, this compile time option set up `unittest2` for manual parsing # If `true`, this compile time option set up `unittest2` for manual parsing
@ -151,17 +152,17 @@ proc setErrorLevel {.used.} =
proc initRunnerDB( proc initRunnerDB(
path: string; path: string;
specs: CaptureSpecs; specs: CaptureSpecs;
dbType: CoreDbType; dbType: CdbTypeEx;
pruneHistory: bool; pruneHistory: bool;
): CommonRef = ): CommonRef =
let coreDB = let coreDB =
# Resolve for static `dbType` # Resolve for static `dbType`
case dbType: case dbType:
of AristoDbMemory: AristoDbMemory.newCoreDbRef() of CdbAristoMemory: AristoDbMemory.newCoreDbRef()
of AristoDbRocks: AristoDbRocks.newCoreDbRef(path, DbOptions.init()) of CdbAristoRocks: AristoDbRocks.newCoreDbRef(path, DbOptions.init())
of AristoDbDualRocks: AristoDbDualRocks.newCoreDbRef(path, DbOptions.init()) of CdbAristoDualRocks: newCdbAriAristoDualRocks(path, DbOptions.init())
of AristoDbVoid: AristoDbVoid.newCoreDbRef() of CdbAristoVoid: AristoDbVoid.newCoreDbRef()
of Ooops: raiseAssert "Ooops" of CdbOoops: raiseAssert "Ooops"
when false: # or true: when false: # or true:
setDebugLevel() setDebugLevel()
@ -198,7 +199,7 @@ proc initRunnerDB(
proc chainSyncRunner( proc chainSyncRunner(
noisy = true; noisy = true;
capture = memorySampleDefault; capture = memorySampleDefault;
dbType = CoreDbType(0); dbType = CdbTypeEx(0);
pruneHistory = false; pruneHistory = false;
profilingOk = false; profilingOk = false;
finalDiskCleanUpOk = true; finalDiskCleanUpOk = true;
@ -220,14 +221,14 @@ proc chainSyncRunner(
dbType = block: dbType = block:
# Decreasing priority: dbType, capture.dbType, dbTypeDefault # Decreasing priority: dbType, capture.dbType, dbTypeDefault
var effDbType = dbTypeDefault var effDbType = dbTypeDefault.to(CdbTypeEx)
if dbType != CoreDbType(0): if dbType != CdbTypeEx(0):
effDbType = dbType effDbType = dbType
elif capture.dbType != CoreDbType(0): elif capture.dbType != CoreDbType(0):
effDbType = capture.dbType effDbType = capture.dbType.to(CdbTypeEx)
effDbType effDbType
persistent = dbType in CoreDbPersistentTypes persistent = dbType in CdbTypeExPersistent
defer: defer:
if persistent: baseDir.flushDbDir if persistent: baseDir.flushDbDir
@ -238,7 +239,7 @@ proc chainSyncRunner(
let let
com = initRunnerDB(dbDir, capture, dbType, pruneHistory) com = initRunnerDB(dbDir, capture, dbType, pruneHistory)
defer: defer:
com.db.finish(flush = finalDiskCleanUpOk) com.db.finish(eradicate = finalDiskCleanUpOk)
if profilingOk: noisy.test_chainSyncProfilingPrint numBlocks if profilingOk: noisy.test_chainSyncProfilingPrint numBlocks
if persistent and finalDiskCleanUpOk: dbDir.flushDbDir if persistent and finalDiskCleanUpOk: dbDir.flushDbDir
@ -255,7 +256,7 @@ proc chainSyncRunner(
proc persistentSyncPreLoadAndResumeRunner( proc persistentSyncPreLoadAndResumeRunner(
noisy = true; noisy = true;
capture = persistentSampleDefault; capture = persistentSampleDefault;
dbType = CoreDbType(0); dbType = CdbTypeEx(0);
profilingOk = false; profilingOk = false;
pruneHistory = false; pruneHistory = false;
finalDiskCleanUpOk = true; finalDiskCleanUpOk = true;
@ -271,14 +272,14 @@ proc persistentSyncPreLoadAndResumeRunner(
dbType = block: dbType = block:
# Decreasing priority: dbType, capture.dbType, dbTypeDefault # Decreasing priority: dbType, capture.dbType, dbTypeDefault
var effDbType = dbTypeDefault var effDbType = dbTypeDefault.to(CdbTypeEx)
if dbType != CoreDbType(0): if dbType != CdbTypeEx(0):
effDbType = dbType effDbType = dbType
elif capture.dbType != CoreDbType(0): elif capture.dbType != CoreDbType(0):
effDbType = capture.dbType effDbType = capture.dbType.to(CdbTypeEx)
effDbType effDbType
doAssert dbType in CoreDbPersistentTypes doAssert dbType in CdbTypeExPersistent
defer: baseDir.flushDbDir defer: baseDir.flushDbDir
let let
@ -292,7 +293,7 @@ proc persistentSyncPreLoadAndResumeRunner(
let let
com = initRunnerDB(dbDir, capture, dbType, pruneHistory) com = initRunnerDB(dbDir, capture, dbType, pruneHistory)
defer: defer:
com.db.finish(flush = finalDiskCleanUpOk) com.db.finish(eradicate = finalDiskCleanUpOk)
if profilingOk: noisy.test_chainSyncProfilingPrint firstPart if profilingOk: noisy.test_chainSyncProfilingPrint firstPart
if noisy: if noisy:
@ -308,7 +309,7 @@ proc persistentSyncPreLoadAndResumeRunner(
let let
com = initRunnerDB(dbDir, capture, dbType, pruneHistory) com = initRunnerDB(dbDir, capture, dbType, pruneHistory)
defer: defer:
com.db.finish(flush = finalDiskCleanUpOk) com.db.finish(eradicate = finalDiskCleanUpOk)
if profilingOk: noisy.test_chainSyncProfilingPrint secndPart if profilingOk: noisy.test_chainSyncProfilingPrint secndPart
if finalDiskCleanUpOk: dbDir.flushDbDir if finalDiskCleanUpOk: dbDir.flushDbDir
@ -354,7 +355,7 @@ when isMainModule:
for n,capture in sampleList: for n,capture in sampleList:
noisy.profileSection("@sample #" & $n, state): noisy.profileSection("@sample #" & $n, state):
noisy.chainSyncRunner( noisy.chainSyncRunner(
#dbType = AristoDbDualRocks, #dbType = CdbAristoDualRocks,
capture = capture, capture = capture,
pruneHistory = true, pruneHistory = true,
#profilingOk = true, #profilingOk = true,

View File

@ -0,0 +1,50 @@
# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Some extended rocksdb backend modes for testing
import
std/sequtils,
../../nimbus/db/core_db/backend/aristo_rocksdb,
../../nimbus/db/[core_db, opts]
type
CdbTypeEx* = enum
CdbOoops
CdbAristoMemory = AristoDbMemory ## Memory backend emulator
CdbAristoRocks = AristoDbRocks ## RocksDB backend
CdbAristoVoid = AristoDbVoid ## No backend
CdbAristoDualRocks ## Dual RocksDB backends for Kvt & Aristo
func to*(cdb: CoreDbType; T: type CdbTypeEx): T =
case cdb:
# Let the compiler find out whether the enum is complete
of Ooops, AristoDbMemory, AristoDbRocks, AristoDbVoid:
return CdbTypeEx(cdb.ord)
const
CdbTypeExPersistent* =
CoreDbPersistentTypes.mapIt(it.to(CdbTypeEx)) & @[CdbAristoDualRocks]
func `$`*(w: CdbTypeEx): string =
case w:
of CdbOoops, CdbAristoMemory, CdbAristoRocks, CdbAristoVoid:
$CoreDbType(w.ord)
of CdbAristoDualRocks:
"CdbAristoDualRocks"
proc newCdbAriAristoDualRocks*(path: string, opts: DbOptions): CoreDbRef =
## For debugging, there is the `AristoDbDualRocks` database with split
## backends for `Aristo` and `KVT`. This database is not compatible with
## `AristoDbRocks` so it cannot be reliably switched between both versions
## with consecutive sessions.
newAristoDualRocksDbCoreDbRef path, opts
# End