Coeredb related clean up and maint fixes (#2360)
* Fix initialiser why: Possible crash (app profiling, tracer etc.) * Update column family options processing why: Same for kvt as for aristo * Move `AristoDbDualRocks` backend type to the test suite why: So it is not available for production * Fix typos in API jump table why: Used for tracing and app profiling only. Needed some update * Purged CoreDb legacy API why: Not needed anymore, was transitionary and disabled. * Rename `flush` argument to `eradicate` in a DB close context why: The word `eradicate` leaves no doubt what is meant * Rename `stoFlush()` -> `stoDelete()` * Rename `core_apps_newapi` -> `core_apps` (not so new anymore)
This commit is contained in:
parent
8a0772ac10
commit
debba5a620
|
@ -126,12 +126,12 @@ type
|
|||
|
||||
AristoApiFinishFn* =
|
||||
proc(db: AristoDbRef;
|
||||
flush = false;
|
||||
eradicate = false;
|
||||
) {.noRaise.}
|
||||
## Backend destructor. The argument `flush` indicates that a full
|
||||
## Backend destructor. The argument `eradicate` indicates that a full
|
||||
## database deletion is requested. If set `false` the outcome might
|
||||
## differ depending on the type of backend (e.g. the `BackendMemory`
|
||||
## backend will always flush on close.)
|
||||
## backend will always eradicate on close.)
|
||||
##
|
||||
## In case of distributed descriptors accessing the same backend, all
|
||||
## distributed descriptors will be destroyed.
|
||||
|
@ -439,11 +439,11 @@ type
|
|||
|
||||
AristoApiProfBeGetVtxFn = "be/getVtx"
|
||||
AristoApiProfBeGetKeyFn = "be/getKey"
|
||||
AristoApiProfBeGetIdgFn = "be/getIfg"
|
||||
AristoApiProfBeGetTuvFn = "be/getTuv"
|
||||
AristoApiProfBeGetLstFn = "be/getLst"
|
||||
AristoApiProfBePutVtxFn = "be/putVtx"
|
||||
AristoApiProfBePutKeyFn = "be/putKey"
|
||||
AristoApiProfBePutIdgFn = "be/putIdg"
|
||||
AristoApiProfBePutTuvFn = "be/putTuv"
|
||||
AristoApiProfBePutLstFn = "be/putLst"
|
||||
AristoApiProfBePutEndFn = "be/putEnd"
|
||||
|
||||
|
@ -757,11 +757,11 @@ func init*(
|
|||
result = be.getKeyFn(a)
|
||||
data.list[AristoApiProfBeGetKeyFn.ord].masked = true
|
||||
|
||||
beDup.getIdgFn =
|
||||
beDup.getTuvFn =
|
||||
proc(): auto =
|
||||
AristoApiProfBeGetIdgFn.profileRunner:
|
||||
result = be.getIdgFn()
|
||||
data.list[AristoApiProfBeGetIdgFn.ord].masked = true
|
||||
AristoApiProfBeGetTuvFn.profileRunner:
|
||||
result = be.getTuvFn()
|
||||
data.list[AristoApiProfBeGetTuvFn.ord].masked = true
|
||||
|
||||
beDup.getLstFn =
|
||||
proc(): auto =
|
||||
|
@ -781,11 +781,11 @@ func init*(
|
|||
be.putKeyFn(a,b)
|
||||
data.list[AristoApiProfBePutKeyFn.ord].masked = true
|
||||
|
||||
beDup.putIdgFn =
|
||||
proc(a: PutHdlRef; b: openArray[VertexID]) =
|
||||
AristoApiProfBePutIdgFn.profileRunner:
|
||||
be.putIdgFn(a,b)
|
||||
data.list[AristoApiProfBePutIdgFn.ord].masked = true
|
||||
beDup.putTuvFn =
|
||||
proc(a: PutHdlRef; b: VertexID) =
|
||||
AristoApiProfBePutTuvFn.profileRunner:
|
||||
be.putTuvFn(a,b)
|
||||
data.list[AristoApiProfBePutTuvFn.ord].masked = true
|
||||
|
||||
beDup.putLstFn =
|
||||
proc(a: PutHdlRef; b: SavedState) =
|
||||
|
|
|
@ -82,11 +82,11 @@ type
|
|||
# -------------
|
||||
|
||||
CloseFn* =
|
||||
proc(flush: bool) {.gcsafe, raises: [].}
|
||||
## Generic destructor for the `Aristo DB` backend. The argument `flush`
|
||||
## indicates that a full database deletion is requested. If passed
|
||||
## `false` the outcome might differ depending on the type of backend
|
||||
## (e.g. in-memory backends would flush on close.)
|
||||
proc(eradicate: bool) {.gcsafe, raises: [].}
|
||||
## Generic destructor for the `Aristo DB` backend. The argument
|
||||
## `eradicate` indicates that a full database deletion is requested. If
|
||||
## passed `false` the outcome might differ depending on the type of
|
||||
## backend (e.g. in-memory backends will always eradicate on close.)
|
||||
|
||||
# -------------
|
||||
|
||||
|
|
|
@ -85,11 +85,11 @@ proc init*(
|
|||
AristoDbRef.init VoidBackendRef
|
||||
|
||||
|
||||
proc finish*(db: AristoDbRef; flush = false) =
|
||||
## Backend destructor. The argument `flush` indicates that a full database
|
||||
## deletion is requested. If set `false` the outcome might differ depending
|
||||
## on the type of backend (e.g. the `BackendMemory` backend will always
|
||||
## flush on close.)
|
||||
proc finish*(db: AristoDbRef; eradicate = false) =
|
||||
## Backend destructor. The argument `eradicate` indicates that a full
|
||||
## database deletion is requested. If set `false` the outcome might differ
|
||||
## depending on the type of backend (e.g. the `BackendMemory` backend will
|
||||
## always eradicate on close.)
|
||||
##
|
||||
## In case of distributed descriptors accessing the same backend, all
|
||||
## distributed descriptors will be destroyed.
|
||||
|
@ -97,7 +97,7 @@ proc finish*(db: AristoDbRef; flush = false) =
|
|||
## This distructor may be used on already *destructed* descriptors.
|
||||
##
|
||||
if not db.backend.isNil:
|
||||
db.backend.closeFn flush
|
||||
db.backend.closeFn eradicate
|
||||
discard db.getCentre.forgetOthers()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -45,7 +45,7 @@ proc newAristoRdbDbRef(
|
|||
vTop = block:
|
||||
let rc = be.getTuvFn()
|
||||
if rc.isErr:
|
||||
be.closeFn(flush = false)
|
||||
be.closeFn(eradicate = false)
|
||||
return err(rc.error)
|
||||
rc.value
|
||||
ok AristoDbRef(
|
||||
|
|
|
@ -225,8 +225,8 @@ proc putEndFn(db: RdbBackendRef): PutEndFn =
|
|||
|
||||
proc closeFn(db: RdbBackendRef): CloseFn =
|
||||
result =
|
||||
proc(flush: bool) =
|
||||
db.rdb.destroy(flush)
|
||||
proc(eradicate: bool) =
|
||||
db.rdb.destroy(eradicate)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: hosting interface changes
|
||||
|
|
|
@ -231,11 +231,11 @@ proc reinit*(
|
|||
ok guestCols
|
||||
|
||||
|
||||
proc destroy*(rdb: var RdbInst; flush: bool) =
|
||||
proc destroy*(rdb: var RdbInst; eradicate: bool) =
|
||||
## Destructor
|
||||
rdb.baseDb.close()
|
||||
|
||||
if flush:
|
||||
if eradicate:
|
||||
try:
|
||||
rdb.dataDir.removeDir
|
||||
|
||||
|
|
|
@ -143,9 +143,9 @@ proc baseMethods(db: AristoCoreDbRef): CoreDbBaseFns =
|
|||
ok()
|
||||
|
||||
CoreDbBaseFns(
|
||||
destroyFn: proc(flush: bool) =
|
||||
aBase.destroy(flush)
|
||||
kBase.destroy(flush),
|
||||
destroyFn: proc(eradicate: bool) =
|
||||
aBase.destroy(eradicate)
|
||||
kBase.destroy(eradicate),
|
||||
|
||||
levelFn: proc(): int =
|
||||
aBase.getLevel,
|
||||
|
|
|
@ -367,8 +367,8 @@ proc accMethods(cAcc: AristoCoreDxAccRef): CoreDbAccFns =
|
|||
return err(rc.error.toError(base, info))
|
||||
ok()
|
||||
|
||||
proc accStoFlush(address: EthAddress): CoreDbRc[void] =
|
||||
const info = "stoFlushFn()"
|
||||
proc accStoDelete(address: EthAddress): CoreDbRc[void] =
|
||||
const info = "stoDeleteFn()"
|
||||
|
||||
let
|
||||
key = address.keccakHash.data
|
||||
|
@ -405,8 +405,8 @@ proc accMethods(cAcc: AristoCoreDxAccRef): CoreDbAccFns =
|
|||
deleteFn: proc(address: EthAddress): CoreDbRc[void] =
|
||||
accDelete(address),
|
||||
|
||||
stoFlushFn: proc(address: EthAddress): CoreDbRc[void] =
|
||||
accStoFlush(address),
|
||||
stoDeleteFn: proc(address: EthAddress): CoreDbRc[void] =
|
||||
accStoDelete(address),
|
||||
|
||||
mergeFn: proc(acc: CoreDbAccount): CoreDbRc[void] =
|
||||
accMerge(acc),
|
||||
|
@ -702,8 +702,8 @@ proc persistent*(
|
|||
# Public constructors and related
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc destroy*(base: AristoBaseRef; flush: bool) =
|
||||
base.api.finish(base.ctx.mpt, flush)
|
||||
proc destroy*(base: AristoBaseRef; eradicate: bool) =
|
||||
base.api.finish(base.ctx.mpt, eradicate)
|
||||
|
||||
|
||||
func init*(T: type AristoBaseRef; db: CoreDbRef; adb: AristoDbRef): T =
|
||||
|
|
|
@ -223,8 +223,8 @@ proc newKvtHandler*(
|
|||
ok(base.cache)
|
||||
|
||||
|
||||
proc destroy*(base: KvtBaseRef; flush: bool) =
|
||||
base.api.finish(base.kdb, flush) # Close descriptor
|
||||
proc destroy*(base: KvtBaseRef; eradicate: bool) =
|
||||
base.api.finish(base.kdb, eradicate) # Close descriptor
|
||||
|
||||
|
||||
func init*(T: type KvtBaseRef; db: CoreDbRef; kdb: KvtDbRef): T =
|
||||
|
|
|
@ -719,7 +719,7 @@ proc traceRecorder(
|
|||
# Delete from DB
|
||||
api.delTree(mpt, root, accPath).isOkOr:
|
||||
when EnableDebugLog:
|
||||
debug logTxt, level, flags, key, error
|
||||
debug logTxt, level, flags, error
|
||||
return err(error)
|
||||
|
||||
# Update journal
|
||||
|
|
|
@ -48,7 +48,7 @@ proc newAristoRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef =
|
|||
AristoDbRocks.create(kdb, adb)
|
||||
|
||||
proc newAristoDualRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef =
|
||||
## This is mainly for debugging. The KVT is run on a completely separate
|
||||
## This is only for debugging. The KVT is run on a completely separate
|
||||
## database backend.
|
||||
let
|
||||
adb = AristoDbRef.init(use_ari.RdbBackendRef, path, opts).valueOr:
|
||||
|
|
|
@ -20,9 +20,6 @@ from ../aristo
|
|||
import EmptyBlob, PayloadRef, isValid
|
||||
|
||||
const
|
||||
ProvideLegacyAPI = false
|
||||
## Enable legacy API. For now everybody would want this enabled.
|
||||
|
||||
EnableApiTracking = false
|
||||
## When enabled, functions using this tracking facility need to import
|
||||
## `chronicles`, as well. Tracking is enabled by setting `true` the flags
|
||||
|
@ -61,19 +58,9 @@ export
|
|||
PayloadRef
|
||||
|
||||
const
|
||||
CoreDbProvideLegacyAPI* = ProvideLegacyAPI
|
||||
CoreDbEnableApiTracking* = EnableApiTracking
|
||||
CoreDbEnableApiProfiling* = EnableApiTracking and EnableApiProfiling
|
||||
|
||||
when ProvideLegacyAPI:
|
||||
import
|
||||
std/typetraits
|
||||
type
|
||||
TxWrapperApiError* = object of CoreDbApiError
|
||||
## For re-routing exception on tx/action template
|
||||
export
|
||||
CoreDbKvtRef, CoreDbMptRef, CoreDbPhkRef, CoreDbTxRef, CoreDbCaptRef
|
||||
|
||||
when AutoValidateDescriptors:
|
||||
import ./base/validate
|
||||
|
||||
|
@ -106,35 +93,6 @@ when EnableApiTracking:
|
|||
proc `$`(v: CoreDbColRef): string = v.toStr
|
||||
proc `$`(h: Hash256): string = h.toStr
|
||||
|
||||
when ProvideLegacyAPI:
|
||||
when EnableApiTracking:
|
||||
proc `$`(k: CoreDbKvtRef): string = k.toStr
|
||||
|
||||
template setTrackLegaApi(
|
||||
w: CoreDbApiTrackRef;
|
||||
s: static[CoreDbFnInx];
|
||||
code: untyped;
|
||||
) =
|
||||
## Template with code section that will be discarded if logging is
|
||||
## disabled at compile time when `EnableApiTracking` is `false`.
|
||||
when EnableApiTracking:
|
||||
w.beginLegaApi(s)
|
||||
code
|
||||
const api {.inject,used.} = s
|
||||
|
||||
template setTrackLegaApi*(
|
||||
w: CoreDbApiTrackRef;
|
||||
s: static[CoreDbFnInx];
|
||||
) =
|
||||
w.setTrackLegaApi(s):
|
||||
discard
|
||||
|
||||
template ifTrackLegaApi*(w: CoreDbApiTrackRef; code: untyped) =
|
||||
when EnableApiTracking:
|
||||
w.endLegaApiIf:
|
||||
code
|
||||
|
||||
|
||||
template setTrackNewApi(
|
||||
w: CoreDxApiTrackRef;
|
||||
s: static[CoreDbFnInx];
|
||||
|
@ -286,16 +244,16 @@ proc backend*(dsc: CoreDxKvtRef | CoreDxMptRef): auto =
|
|||
result = dsc.methods.backendFn()
|
||||
dsc.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
|
||||
proc finish*(db: CoreDbRef; flush = false) =
|
||||
## Database destructor. If the argument `flush` is set `false`, the database
|
||||
## is left as-is and only the in-memory handlers are cleaned up.
|
||||
proc finish*(db: CoreDbRef; eradicate = false) =
|
||||
## Database destructor. If the argument `eradicate` is set `false`, the
|
||||
## database is left as-is and only the in-memory handlers are cleaned up.
|
||||
##
|
||||
## Otherwise the destructor is allowed to remove the database. This feature
|
||||
## depends on the backend database. Currently, only the `AristoDbRocks` type
|
||||
## backend removes the database on `true`.
|
||||
##
|
||||
db.setTrackNewApi BaseFinishFn
|
||||
db.methods.destroyFn flush
|
||||
db.methods.destroyFn eradicate
|
||||
db.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
|
||||
proc `$$`*(e: CoreDbErrorRef): string =
|
||||
|
@ -746,7 +704,7 @@ proc delete*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
|
|||
result = acc.methods.deleteFn address
|
||||
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
|
||||
|
||||
proc stoFlush*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
|
||||
proc stoDelete*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
|
||||
## Recursively delete all data elements from the storage trie associated to
|
||||
## the account identified by the argument `address`. After successful run,
|
||||
## the storage trie will be empty.
|
||||
|
@ -757,8 +715,8 @@ proc stoFlush*(acc: CoreDxAccRef; address: EthAddress): CoreDbRc[void] =
|
|||
## shared by several accounts whereas they are unique on the `Aristo`
|
||||
## backend.
|
||||
##
|
||||
acc.setTrackNewApi AccStoFlushFn
|
||||
result = acc.methods.stoFlushFn address
|
||||
acc.setTrackNewApi AccStoDeleteFn
|
||||
result = acc.methods.stoDeleteFn address
|
||||
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
|
||||
|
||||
|
||||
|
@ -959,206 +917,6 @@ proc forget*(cp: CoreDxCaptRef) =
|
|||
cp.methods.forgetFn()
|
||||
cp.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public methods, legacy API
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when ProvideLegacyAPI:
|
||||
|
||||
proc parent*[T: CoreDbKvtRef | CoreDbMptRef | CoreDbPhkRef |
|
||||
CoreDbTxRef | CoreDbCaptRef](
|
||||
cld: T): CoreDbRef =
|
||||
## Getter, common method for all sub-modules
|
||||
result = cld.distinctBase.parent
|
||||
|
||||
# ----------------
|
||||
|
||||
proc kvt*(db: CoreDbRef): CoreDbKvtRef =
|
||||
## Legacy pseudo constructor, see `toKvt()` for production constructor
|
||||
db.setTrackLegaApi LegaNewKvtFn
|
||||
result = db.newKvt().CoreDbKvtRef
|
||||
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed, result
|
||||
|
||||
proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): Blob =
|
||||
kvt.setTrackLegaApi LegaKvtGetFn
|
||||
result = kvt.distinctBase.getOrEmpty(key).expect $api
|
||||
kvt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc del*(kvt: CoreDbKvtRef; key: openArray[byte]): void =
|
||||
kvt.setTrackLegaApi LegaKvtDelFn
|
||||
kvt.distinctBase.del(key).expect $api
|
||||
kvt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr
|
||||
|
||||
proc put*(kvt: CoreDbKvtRef; key: openArray[byte]; val: openArray[byte]) =
|
||||
kvt.setTrackLegaApi LegaKvtPutFn
|
||||
kvt.distinctBase.parent.newKvt().put(key, val).expect $api
|
||||
kvt.ifTrackLegaApi:
|
||||
debug legaApiTxt, api, elapsed, key=key.toStr, val=val.toLenStr
|
||||
|
||||
proc contains*(kvt: CoreDbKvtRef; key: openArray[byte]): bool =
|
||||
kvt.setTrackLegaApi LegaKvtContainsFn
|
||||
result = kvt.distinctBase.hasKey(key).expect $api
|
||||
kvt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
# ----------------
|
||||
|
||||
proc toMpt*(phk: CoreDbPhkRef): CoreDbMptRef =
|
||||
phk.setTrackLegaApi LegaToMptFn
|
||||
result = phk.distinctBase.toMpt.CoreDbMptRef
|
||||
phk.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
proc mptPrune*(db: CoreDbRef; root: Hash256): CoreDbMptRef =
|
||||
db.setTrackLegaApi LegaNewMptFn
|
||||
let
|
||||
trie = db.ctx.methods.newColFn(
|
||||
CtGeneric, root, Opt.none(EthAddress)).valueOr:
|
||||
raiseAssert error.prettyText() & ": " & $api
|
||||
mpt = db.ctx.getMpt(trie).valueOr:
|
||||
raiseAssert error.prettyText() & ": " & $api
|
||||
result = mpt.CoreDbMptRef
|
||||
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed, root
|
||||
|
||||
proc mptPrune*(db: CoreDbRef): CoreDbMptRef =
|
||||
db.setTrackLegaApi LegaNewMptFn
|
||||
result = db.ctx.getMpt(CtGeneric, Opt.none(EthAddress)).CoreDbMptRef
|
||||
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
# ----------------
|
||||
|
||||
proc toPhk*(mpt: CoreDbMptRef): CoreDbPhkRef =
|
||||
mpt.setTrackLegaApi LegaToPhkFn
|
||||
result = mpt.distinctBase.toPhk.CoreDbPhkRef
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
proc phkPrune*(db: CoreDbRef; root: Hash256): CoreDbPhkRef =
|
||||
db.setTrackLegaApi LegaNewPhkFn
|
||||
let
|
||||
trie = db.ctx.methods.newColFn(
|
||||
CtGeneric, root, Opt.none(EthAddress)).valueOr:
|
||||
raiseAssert error.prettyText() & ": " & $api
|
||||
phk = db.ctx.getMpt(trie).valueOr:
|
||||
raiseAssert error.prettyText() & ": " & $api
|
||||
result = phk.toCoreDxPhkRef.CoreDbPhkRef
|
||||
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed, root
|
||||
|
||||
proc phkPrune*(db: CoreDbRef): CoreDbPhkRef =
|
||||
db.setTrackLegaApi LegaNewPhkFn
|
||||
result = db.ctx.getMpt(
|
||||
CtGeneric, Opt.none(EthAddress)).toCoreDxPhkRef.CoreDbPhkRef
|
||||
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
# ----------------
|
||||
|
||||
proc get*(mpt: CoreDbMptRef; key: openArray[byte]): Blob =
|
||||
mpt.setTrackLegaApi LegaMptGetFn
|
||||
result = mpt.distinctBase.fetchOrEmpty(key).expect $api
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc get*(phk: CoreDbPhkRef; key: openArray[byte]): Blob =
|
||||
phk.setTrackLegaApi LegaPhkGetFn
|
||||
result = phk.distinctBase.fetchOrEmpty(key).expect $api
|
||||
phk.ifTrackLegaApi:
|
||||
debug legaApiTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
|
||||
proc del*(mpt: CoreDbMptRef; key: openArray[byte]) =
|
||||
mpt.setTrackLegaApi LegaMptDelFn
|
||||
mpt.distinctBase.delete(key).expect $api
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr
|
||||
|
||||
proc del*(phk: CoreDbPhkRef; key: openArray[byte]) =
|
||||
phk.setTrackLegaApi LegaPhkDelFn
|
||||
phk.distinctBase.delete(key).expect $api
|
||||
phk.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr
|
||||
|
||||
|
||||
proc put*(mpt: CoreDbMptRef; key: openArray[byte]; val: openArray[byte]) =
|
||||
mpt.setTrackLegaApi LegaMptPutFn
|
||||
mpt.distinctBase.merge(key, val).expect $api
|
||||
mpt.ifTrackLegaApi:
|
||||
debug legaApiTxt, api, elapsed, key=key.toStr, val=val.toLenStr
|
||||
|
||||
proc put*(phk: CoreDbPhkRef; key: openArray[byte]; val: openArray[byte]) =
|
||||
phk.setTrackLegaApi LegaPhkPutFn
|
||||
phk.distinctBase.merge(key, val).expect $api
|
||||
phk.ifTrackLegaApi:
|
||||
debug legaApiTxt, api, elapsed, key=key.toStr, val=val.toLenStr
|
||||
|
||||
|
||||
proc contains*(mpt: CoreDbMptRef; key: openArray[byte]): bool =
|
||||
mpt.setTrackLegaApi LegaMptContainsFn
|
||||
result = mpt.distinctBase.hasPath(key).expect $api
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc contains*(phk: CoreDbPhkRef; key: openArray[byte]): bool =
|
||||
phk.setTrackLegaApi LegaPhkContainsFn
|
||||
result = phk.distinctBase.hasPath(key).expect $api
|
||||
phk.ifTrackLegaApi: debug legaApiTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
|
||||
proc rootHash*(mpt: CoreDbMptRef): Hash256 =
|
||||
mpt.setTrackLegaApi LegaMptRootHashFn
|
||||
result = mpt.distinctBase.methods.getColFn().state.valueOr:
|
||||
raiseAssert error.prettyText() & ": " & $api
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed, result
|
||||
|
||||
proc rootHash*(phk: CoreDbPhkRef): Hash256 =
|
||||
phk.setTrackLegaApi LegaPhkRootHashFn
|
||||
result = phk.distinctBase.methods.getColFn().state.valueOr:
|
||||
raiseAssert error.prettyText() & ": " & $api
|
||||
phk.ifTrackLegaApi: debug legaApiTxt, api, elapsed, result
|
||||
|
||||
# ----------------
|
||||
|
||||
proc beginTransaction*(db: CoreDbRef): CoreDbTxRef =
|
||||
db.setTrackLegaApi LegaBeginTxFn
|
||||
result = db.distinctBase.methods.beginFn().CoreDbTxRef
|
||||
db.ifTrackLegaApi:
|
||||
debug legaApiTxt, api, elapsed, newLevel=db.methods.levelFn()
|
||||
|
||||
proc commit*(tx: CoreDbTxRef, applyDeletes = true) =
|
||||
tx.setTrackLegaApi LegaTxCommitFn:
|
||||
let prvLevel {.used.} = tx.distinctBase.methods.levelFn()
|
||||
tx.distinctBase.commit()
|
||||
tx.ifTrackLegaApi: debug legaApiTxt, api, elapsed, prvLevel
|
||||
|
||||
proc rollback*(tx: CoreDbTxRef) =
|
||||
tx.setTrackLegaApi LegaTxCommitFn:
|
||||
let prvLevel {.used.} = tx.distinctBase.methods.levelFn()
|
||||
tx.distinctBase.rollback()
|
||||
tx.ifTrackLegaApi: debug legaApiTxt, api, elapsed, prvLevel
|
||||
|
||||
proc dispose*(tx: CoreDbTxRef) =
|
||||
tx.setTrackLegaApi LegaTxDisposeFn:
|
||||
let prvLevel {.used.} = tx.distinctBase.methods.levelFn()
|
||||
tx.distinctBase.dispose()
|
||||
tx.ifTrackLegaApi: debug legaApiTxt, api, elapsed, prvLevel
|
||||
|
||||
# ----------------
|
||||
|
||||
proc capture*(
|
||||
db: CoreDbRef;
|
||||
flags: set[CoreDbCaptFlags] = {};
|
||||
): CoreDbCaptRef =
|
||||
db.setTrackLegaApi LegaCaptureFn
|
||||
result = db.newCapture(flags).expect($api).CoreDbCaptRef
|
||||
db.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
proc recorder*(cp: CoreDbCaptRef): CoreDbRef =
|
||||
cp.setTrackLegaApi LegaCptRecorderFn
|
||||
result = cp.distinctBase.recorder()
|
||||
cp.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
proc logDb*(cp: CoreDbCaptRef): TableRef[Blob,Blob] =
|
||||
cp.setTrackLegaApi LegaCptLogDbFn
|
||||
result = cp.distinctBase.logDb()
|
||||
cp.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
proc flags*(cp: CoreDbCaptRef): set[CoreDbCaptFlags] =
|
||||
cp.setTrackLegaApi LegaCptFlagsFn
|
||||
result = cp.distinctBase.flags()
|
||||
cp.ifTrackLegaApi: debug legaApiTxt, api, elapsed, result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -19,10 +19,6 @@ import
|
|||
./base_desc
|
||||
|
||||
type
|
||||
CoreDbApiTrackRef* =
|
||||
CoreDbRef | CoreDbKvtRef | CoreDbMptRef | CoreDbPhkRef |
|
||||
CoreDbTxRef | CoreDbCaptRef
|
||||
|
||||
CoreDxApiTrackRef* =
|
||||
CoreDbRef | CoreDxKvtRef | CoreDbColRef |
|
||||
CoreDbCtxRef | CoreDxMptRef | CoreDxPhkRef | CoreDxAccRef |
|
||||
|
@ -39,7 +35,7 @@ type
|
|||
AccHasPathFn = "acc/hasPath"
|
||||
AccMergeFn = "acc/merge"
|
||||
AccGetMptFn = "acc/getMpt"
|
||||
AccStoFlushFn = "acc/stoFlush"
|
||||
AccStoDeleteFn = "acc/stoDelete"
|
||||
AccToMptFn = "acc/toMpt"
|
||||
|
||||
AnyBackendFn = "any/backend"
|
||||
|
@ -78,44 +74,6 @@ type
|
|||
KvtPairsIt = "kvt/pairs"
|
||||
KvtPutFn = "kvt/put"
|
||||
|
||||
LegaBeginTxFn = "lega/beginTransaction"
|
||||
LegaCaptureFn = "lega/cpt/capture"
|
||||
LegaCptFlagsFn = "lega/cpt/flags"
|
||||
LegaCptLogDbFn = "lega/cpt/logDb"
|
||||
LegaCptRecorderFn = "lega/cpt/recorder"
|
||||
|
||||
LegaKvtContainsFn = "lega/kvt/contains"
|
||||
LegaKvtDelFn = "lega/kvt/del"
|
||||
LegaKvtGetFn = "lega/kvt/get"
|
||||
LegaKvtPairsIt = "lega/kvt/pairs"
|
||||
LegaKvtPutFn = "lega/kvt/put"
|
||||
|
||||
LegaMptContainsFn = "lega/mpt/contains"
|
||||
LegaMptDelFn = "lega/mpt/del"
|
||||
LegaMptGetFn = "lega/mpt/get"
|
||||
LegaMptPutFn = "lega/mpt/put"
|
||||
LegaMptRootHashFn = "lega/mpt/rootHash"
|
||||
LegaMptPairsIt = "lega/mpt/pairs"
|
||||
LegaMptReplicateIt = "lega/mpt/replicate"
|
||||
|
||||
LegaNewKvtFn = "lega/kvt"
|
||||
LegaNewMptFn = "lega/mptPrune"
|
||||
LegaNewPhkFn = "lega/phkPrune"
|
||||
|
||||
LegaPhkContainsFn = "lega/phk/contains"
|
||||
LegaPhkDelFn = "lega/phk/del"
|
||||
LegaPhkGetFn = "lega/phk/get"
|
||||
LegaPhkPutFn = "lega/phk/put"
|
||||
LegaPhkRootHashFn = "lega/phk/rootHash"
|
||||
|
||||
LegaToMptFn = "lega/phk/toMpt"
|
||||
LegaToPhkFn = "lega/mpt/toPhk"
|
||||
|
||||
LegaTxCommitFn = "lega/commit"
|
||||
LegaTxDisposeFn = "lega/dispose"
|
||||
LegaTxRollbackFn = "lega/rollback"
|
||||
LegaTxSaveDisposeFn = "lega/safeDispose"
|
||||
|
||||
MptDeleteFn = "mpt/delete"
|
||||
MptFetchFn = "mpt/fetch"
|
||||
MptFetchOrEmptyFn = "mpt/fetchOrEmpty"
|
||||
|
@ -165,9 +123,6 @@ proc toStr*(p: CoreDbColRef): string =
|
|||
(a,b) = if 0 < w.len and w[0] == '(': ("","") else: ("(",")")
|
||||
"Col" & a & w & b
|
||||
|
||||
func toStr*(w: CoreDbKvtRef): string =
|
||||
if w.distinctBase.isNil: "kvt(nil)" else: "kvt"
|
||||
|
||||
func toLenStr*(w: openArray[byte]): string =
|
||||
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
|
||||
else: "openArray[" & $w.len & "]"
|
||||
|
@ -219,38 +174,6 @@ proc toStr*(rc: CoreDbRc[CoreDxAccRef]): string = rc.toStr "acc"
|
|||
func toStr*(ela: Duration): string =
|
||||
aristo_profile.toStr(ela)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public legacy API logging framework
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template beginLegaApi*(w: CoreDbApiTrackRef; s: static[CoreDbFnInx]) =
|
||||
when typeof(w) is CoreDbRef:
|
||||
let db = w
|
||||
else:
|
||||
let db = w.distinctBase.parent
|
||||
# Prevent from cascaded logging
|
||||
let save = db.trackNewApi
|
||||
db.trackNewApi = false
|
||||
defer: db.trackNewApi = save
|
||||
|
||||
when CoreDbEnableApiProfiling:
|
||||
const blaCtx {.inject.} = s # Local use only
|
||||
let blaStart {.inject.} = getTime() # Local use only
|
||||
|
||||
template endLegaApiIf*(w: CoreDbApiTrackRef; code: untyped) =
|
||||
block:
|
||||
when typeof(w) is CoreDbRef:
|
||||
let db = w
|
||||
else:
|
||||
let db = w.distinctBase.parent
|
||||
when CoreDbEnableApiProfiling:
|
||||
let elapsed {.inject,used.} = getTime() - blaStart
|
||||
aristo_profile.update(db.profTab, blaCtx.ord, elapsed)
|
||||
if db.trackLegaApi:
|
||||
when not CoreDbEnableApiProfiling: # otherwise use variable above
|
||||
let elapsed {.inject,used.} = getTime() - blaStart
|
||||
code
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public new API logging framework
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -27,19 +27,12 @@ type
|
|||
Ooops
|
||||
AristoDbMemory ## Memory backend emulator
|
||||
AristoDbRocks ## RocksDB backend
|
||||
AristoDbDualRocks ## Dual RocksDB backends for `Kvt` and `Aristo`
|
||||
AristoDbVoid ## No backend
|
||||
|
||||
const
|
||||
CoreDbPersistentTypes* = {AristoDbRocks}
|
||||
|
||||
type
|
||||
CoreDbKvtRef* = distinct CoreDxKvtRef # Legacy descriptor
|
||||
CoreDbMptRef* = distinct CoreDxMptRef # Legacy descriptor
|
||||
CoreDbPhkRef* = distinct CoreDxPhkRef # Legacy descriptor
|
||||
CoreDbTxRef* = distinct CoreDxTxRef # Legacy descriptor
|
||||
CoreDbCaptRef* = distinct CoreDxCaptRef # Legacy descriptor
|
||||
|
||||
CoreDbProfListRef* = AristoDbProfListRef
|
||||
## Borrowed from `aristo_profile`, only used in profiling mode
|
||||
|
||||
|
@ -96,7 +89,7 @@ type
|
|||
# --------------------------------------------------
|
||||
# Sub-descriptor: Misc methods for main descriptor
|
||||
# --------------------------------------------------
|
||||
CoreDbBaseDestroyFn* = proc(flush = true) {.noRaise.}
|
||||
CoreDbBaseDestroyFn* = proc(eradicate = true) {.noRaise.}
|
||||
CoreDbBaseColStateFn* = proc(
|
||||
col: CoreDbColRef): CoreDbRc[Hash256] {.noRaise.}
|
||||
CoreDbBaseColPrintFn* = proc(vid: CoreDbColRef): string {.noRaise.}
|
||||
|
@ -214,7 +207,7 @@ type
|
|||
CoreDbAccGetMptFn* = proc(): CoreDbRc[CoreDxMptRef] {.noRaise.}
|
||||
CoreDbAccFetchFn* = proc(k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.}
|
||||
CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccStoFlushFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccStoDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccMergeFn* = proc(v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccHasPathFn* = proc(k: EthAddress): CoreDbRc[bool] {.noRaise.}
|
||||
CoreDbAccGetColFn* = proc(): CoreDbColRef {.noRaise.}
|
||||
|
@ -225,7 +218,7 @@ type
|
|||
getMptFn*: CoreDbAccGetMptFn
|
||||
fetchFn*: CoreDbAccFetchFn
|
||||
deleteFn*: CoreDbAccDeleteFn
|
||||
stoFlushFn*: CoreDbAccStoFlushFn
|
||||
stoDeleteFn*: CoreDbAccStoDeleteFn
|
||||
mergeFn*: CoreDbAccMergeFn
|
||||
hasPathFn*: CoreDbAccHasPathFn
|
||||
getColFn*: CoreDbAccGetColFn
|
||||
|
|
|
@ -67,7 +67,7 @@ proc validateMethodsDesc(fns: CoreDbAccFns) =
|
|||
doAssert not fns.getMptFn.isNil
|
||||
doAssert not fns.fetchFn.isNil
|
||||
doAssert not fns.deleteFn.isNil
|
||||
doAssert not fns.stoFlushFn.isNil
|
||||
doAssert not fns.stoDeleteFn.isNil
|
||||
doAssert not fns.mergeFn.isNil
|
||||
doAssert not fns.hasPathFn.isNil
|
||||
doAssert not fns.getColFn.isNil
|
||||
|
|
|
@ -20,13 +20,8 @@ import
|
|||
when CoreDbEnableApiTracking:
|
||||
import chronicles
|
||||
|
||||
const
|
||||
ProvideLegacyAPI = CoreDbProvideLegacyAPI
|
||||
|
||||
when ProvideLegacyAPI and CoreDbEnableApiTracking:
|
||||
const
|
||||
logTxt = "CoreDb/it "
|
||||
legaApiTxt = logTxt & "legacy API"
|
||||
newApiTxt = logTxt & "API"
|
||||
|
||||
# Annotation helper(s)
|
||||
|
@ -82,25 +77,6 @@ iterator replicate*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} =
|
|||
let trie = mpt.methods.getColFn()
|
||||
debug newApiTxt, api, elapsed, trie
|
||||
|
||||
when ProvideLegacyAPI:
|
||||
|
||||
iterator pairs*(kvt: CoreDbKvtRef): (Blob, Blob) {.apiRaise.} =
|
||||
kvt.setTrackLegaApi LegaKvtPairsIt
|
||||
for k,v in kvt.distinctBase.pairs(): yield (k,v)
|
||||
kvt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) =
|
||||
## Trie traversal, not supported for `CoreDbPhkRef`
|
||||
mpt.setTrackLegaApi LegaMptPairsIt
|
||||
for k,v in mpt.distinctBase.pairs(): yield (k,v)
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
iterator replicate*(mpt: CoreDbMptRef): (Blob, Blob) {.apiRaise.} =
|
||||
## Low level trie dump, not supported for `CoreDbPhkRef`
|
||||
mpt.setTrackLegaApi LegaMptReplicateIt
|
||||
for k,v in mpt.distinctBase.replicate(): yield (k,v)
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -20,13 +20,8 @@ import
|
|||
when CoreDbEnableApiTracking:
|
||||
import chronicles
|
||||
|
||||
const
|
||||
ProvideLegacyAPI = CoreDbProvideLegacyAPI
|
||||
|
||||
when ProvideLegacyAPI and CoreDbEnableApiTracking:
|
||||
const
|
||||
logTxt = "CoreDb/itp "
|
||||
legaApiTxt = logTxt & "legacy API"
|
||||
newApiTxt = logTxt & "API"
|
||||
|
||||
# Annotation helper(s)
|
||||
|
@ -56,14 +51,6 @@ iterator replicatePersistent*(mpt: CoreDxMptRef): (Blob, Blob) {.rlpRaise.} =
|
|||
let trie = mpt.methods.getColFn()
|
||||
debug newApiTxt, api, elapsed, trie
|
||||
|
||||
when ProvideLegacyAPI:
|
||||
|
||||
iterator replicatePersistent*(mpt: CoreDbMptRef): (Blob, Blob) {.rlpRaise.} =
|
||||
## Low level trie dump, not supported for `CoreDbPhkRef`
|
||||
mpt.setTrackLegaApi LegaMptReplicateIt
|
||||
for k,v in mpt.distinctBase.replicatePersistent(): yield (k,v)
|
||||
mpt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -13,14 +13,11 @@
|
|||
import
|
||||
eth/common,
|
||||
../aristo,
|
||||
./backend/aristo_db
|
||||
./backend/aristo_db,
|
||||
"."/[base_iterators, core_apps]
|
||||
|
||||
import
|
||||
./core_apps_newapi as core_apps
|
||||
import
|
||||
./base except bless
|
||||
import
|
||||
./base_iterators
|
||||
|
||||
export
|
||||
EmptyBlob,
|
||||
|
|
|
@ -42,17 +42,9 @@ proc newCoreDbRef*(
|
|||
## The production database type is `AristoDbRocks` which uses a single
|
||||
## `RocksDb` backend for both, `Aristo` and `KVT`.
|
||||
##
|
||||
## For debugging, there is the `AristoDbDualRocks` database with split
|
||||
## backends for `Aristo` and `KVT`. This database is not compatible with
|
||||
## `AristoDbRocks` so it cannot be reliably switched between both versions
|
||||
## with consecutive sessions.
|
||||
##
|
||||
when dbType == AristoDbRocks:
|
||||
newAristoRocksDbCoreDbRef path, opts
|
||||
|
||||
elif dbType == AristoDbDualRocks:
|
||||
newAristoDualRocksDbCoreDbRef path, opts
|
||||
|
||||
else:
|
||||
{.error: "Unsupported dbType for persistent newCoreDbRef()".}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ type
|
|||
KvtApiCommitFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
|
||||
KvtApiDelFn* = proc(db: KvtDbRef,
|
||||
key: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
||||
KvtApiFinishFn* = proc(db: KvtDbRef, flush = false) {.noRaise.}
|
||||
KvtApiFinishFn* = proc(db: KvtDbRef, eradicate = false) {.noRaise.}
|
||||
KvtApiForgetFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
|
||||
KvtApiForkTxFn* = proc(db: KvtDbRef,
|
||||
backLevel: int): Result[KvtDbRef,KvtError] {.noRaise.}
|
||||
|
|
|
@ -47,11 +47,11 @@ type
|
|||
# -------------
|
||||
|
||||
CloseFn* =
|
||||
proc(flush: bool) {.gcsafe, raises: [].}
|
||||
## Generic destructor for the `Kvt DB` backend. The argument `flush`
|
||||
proc(eradicate: bool) {.gcsafe, raises: [].}
|
||||
## Generic destructor for the `Kvt DB` backend. The argument `eradicate`
|
||||
## indicates that a full database deletion is requested. If passed
|
||||
## `false` the outcome might differ depending on the type of backend
|
||||
## (e.g. in-memory backends would flush on close.)
|
||||
## (e.g. in-memory backends would eradicate on close.)
|
||||
|
||||
CanModFn* =
|
||||
proc(): Result[void,KvtError] {.gcsafe, raises: [].}
|
||||
|
@ -64,9 +64,10 @@ type
|
|||
## This function stores a request function for the piggiback mode
|
||||
## writing to the `Aristo` set of column families.
|
||||
##
|
||||
## If used at all, this function would run `rocks_db.setWrReqTriggeredFn()()`
|
||||
## with a `KvtDbRef` type argument for `db`. This allows to run the `Kvt`
|
||||
## without linking to the rocksdb interface unless it is really needed.
|
||||
## If used at all, this function would run thee function closure
|
||||
## `rocks_db.setWrReqTriggeredFn()()` with a `KvtDbRef` type argument
|
||||
## for `db`. This allows to run the `Kvt` without linking to the
|
||||
## rocksdb interface unless it is really needed.
|
||||
|
||||
# -------------
|
||||
|
||||
|
@ -92,6 +93,7 @@ proc init*(trg: var BackendObj; src: BackendObj) =
|
|||
trg.putEndFn = src.putEndFn
|
||||
trg.closeFn = src.closeFn
|
||||
trg.canModFn = src.canModFn
|
||||
trg.setWrReqFn = src.setWrReqFn
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -65,16 +65,14 @@ proc init*(
|
|||
KvtDbRef.init VoidBackendRef
|
||||
|
||||
|
||||
proc finish*(db: KvtDbRef; flush = false) =
|
||||
## Backend destructor. The argument `flush` indicates that a full database
|
||||
## deletion is requested. If set `false` the outcome might differ depending
|
||||
## on the type of backend (e.g. the `BackendMemory` backend will always
|
||||
## flush on close.)
|
||||
##
|
||||
## This distructor may be used on already *destructed* descriptors.
|
||||
proc finish*(db: KvtDbRef; eradicate = false) =
|
||||
## Backend destructor. The argument `eradicate` indicates that a full
|
||||
## database deletion is requested. If set `false` the outcome might differ
|
||||
## depending on the type of backend (e.g. the `BackendMemory` backend will
|
||||
## always eradicate on close.)
|
||||
##
|
||||
if not db.backend.isNil:
|
||||
db.backend.closeFn flush
|
||||
db.backend.closeFn eradicate
|
||||
discard db.getCentre.forgetOthers()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -134,8 +134,8 @@ proc putEndFn(db: RdbBackendRef): PutEndFn =
|
|||
|
||||
proc closeFn(db: RdbBackendRef): CloseFn =
|
||||
result =
|
||||
proc(flush: bool) =
|
||||
db.rdb.destroy(flush)
|
||||
proc(eradicate: bool) =
|
||||
db.rdb.destroy(eradicate)
|
||||
|
||||
proc canModFn(db: RdbBackendRef): CanModFn =
|
||||
result =
|
||||
|
@ -186,7 +186,7 @@ proc putEndTriggeredFn(db: RdbBackendRef): PutEndFn =
|
|||
proc closeTriggeredFn(db: RdbBackendRef): CloseFn =
|
||||
## Variant of `closeFn()` for piggyback write batch
|
||||
result =
|
||||
proc(flush: bool) =
|
||||
proc(eradicate: bool) =
|
||||
# Nothing to do here as we do not own the backend
|
||||
discard
|
||||
|
||||
|
|
|
@ -28,9 +28,50 @@ import
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getCFInitOptions(opts: DbOptions): ColFamilyOptionsRef =
|
||||
result = defaultColFamilyOptions()
|
||||
# TODO the configuration options below have not been tuned but are rather
|
||||
# based on gut feeling, guesses and by looking at other clients - it
|
||||
# would make sense to test different settings and combinations once the
|
||||
# data model itself has settled down as their optimal values will depend
|
||||
# on the shape of the data - it'll also be different per column family..
|
||||
let cfOpts = defaultColFamilyOptions()
|
||||
|
||||
if opts.writeBufferSize > 0:
|
||||
result.setWriteBufferSize(opts.writeBufferSize)
|
||||
cfOpts.setWriteBufferSize(opts.writeBufferSize)
|
||||
|
||||
# Without this option, the WAL might never get flushed since a small column
|
||||
# family (like the admin CF) with only tiny writes might keep it open - this
|
||||
# negatively affects startup times since the WAL is replayed on every startup.
|
||||
# https://github.com/facebook/rocksdb/blob/af50823069818fc127438e39fef91d2486d6e76c/include/rocksdb/options.h#L719
|
||||
# Flushing the oldest
|
||||
let writeBufferSize =
|
||||
if opts.writeBufferSize > 0:
|
||||
opts.writeBufferSize
|
||||
else:
|
||||
64 * 1024 * 1024 # TODO read from rocksdb?
|
||||
|
||||
cfOpts.setMaxTotalWalSize(2 * writeBufferSize)
|
||||
|
||||
# When data is written to rocksdb, it is first put in an in-memory table
|
||||
# whose index is a skip list. Since the mem table holds the most recent data,
|
||||
# all reads must go through this skiplist which results in slow lookups for
|
||||
# already-written data.
|
||||
# We enable a bloom filter on the mem table to avoid this lookup in the cases
|
||||
# where the data is actually on disk already (ie wasn't updated recently).
|
||||
# TODO there's also a hashskiplist that has both a hash index and a skip list
|
||||
# which maybe could be used - uses more memory, requires a key prefix
|
||||
# extractor
|
||||
cfOpts.setMemtableWholeKeyFiltering(true)
|
||||
cfOpts.setMemtablePrefixBloomSizeRatio(0.1)
|
||||
|
||||
# LZ4 seems to cut database size to 2/3 roughly, at the time of writing
|
||||
# Using it for the bottom-most level means it applies to 90% of data but
|
||||
# delays compression until data has settled a bit, which seems like a
|
||||
# reasonable tradeoff.
|
||||
# TODO evaluate zstd compression with a trained dictionary
|
||||
# https://github.com/facebook/rocksdb/wiki/Compression
|
||||
cfOpts.setBottommostCompression(Compression.lz4Compression)
|
||||
|
||||
cfOpts
|
||||
|
||||
|
||||
proc getDbInitOptions(opts: DbOptions): DbOptionsRef =
|
||||
|
@ -107,12 +148,12 @@ proc init*(
|
|||
ok()
|
||||
|
||||
|
||||
proc destroy*(rdb: var RdbInst; flush: bool) =
|
||||
proc destroy*(rdb: var RdbInst; eradicate: bool) =
|
||||
## Destructor (no need to do anything if piggybacked)
|
||||
if 0 < rdb.basePath.len:
|
||||
rdb.baseDb.close()
|
||||
|
||||
if flush:
|
||||
if eradicate:
|
||||
try:
|
||||
rdb.dataDir.removeDir
|
||||
|
||||
|
|
|
@ -152,13 +152,13 @@ proc merge*(al: AccountLedger; account: CoreDbAccount) =
|
|||
proc freeStorage*(al: AccountLedger, eAddr: EthAddress) =
|
||||
const info = "AccountLedger/freeStorage()"
|
||||
# Flush associated storage trie
|
||||
al.distinctBase.stoFlush(eAddr).isOkOr:
|
||||
al.distinctBase.stoDelete(eAddr).isOkOr:
|
||||
raiseAssert info & $$error
|
||||
|
||||
proc delete*(al: AccountLedger, eAddr: EthAddress) =
|
||||
const info = "AccountLedger/delete()"
|
||||
# Flush associated storage trie
|
||||
al.distinctBase.stoFlush(eAddr).isOkOr:
|
||||
al.distinctBase.stoDelete(eAddr).isOkOr:
|
||||
raiseAssert info & $$error
|
||||
# Clear account
|
||||
al.distinctBase.delete(eAddr).isOkOr:
|
||||
|
|
|
@ -144,8 +144,8 @@ proc subBalance*(db: AccountStateDB, eAddr: EthAddress, delta: UInt256) =
|
|||
|
||||
proc clearStorage*(db: AccountStateDB, eAddr: EthAddress) =
|
||||
# Flush associated storage trie (will update account record on disk)
|
||||
db.trie.distinctBase.stoFlush(eAddr).isOkOr:
|
||||
raiseAssert "clearStorage(): stoFlush() failed, " & $$error
|
||||
db.trie.distinctBase.stoDelete(eAddr).isOkOr:
|
||||
raiseAssert "clearStorage(): stoDelete() failed, " & $$error
|
||||
# Reset storage info locally so that `Aristo` would not complain when
|
||||
# updating the account record on disk
|
||||
var account = db.getAccount(eAddr)
|
||||
|
|
|
@ -110,7 +110,7 @@ proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
|
|||
block:
|
||||
let report = db.mergeList w[0]
|
||||
if report.error != 0:
|
||||
db.finish(flush=true)
|
||||
db.finish(eradicate=true)
|
||||
check report.error == 0
|
||||
return err(report.error)
|
||||
let rc = db.persist()
|
||||
|
@ -130,7 +130,7 @@ proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
|
|||
for n in 0 ..< dx.len:
|
||||
let report = dx[n].mergeList w[n+1]
|
||||
if report.error != 0:
|
||||
db.finish(flush=true)
|
||||
db.finish(eradicate=true)
|
||||
check (n, report.error) == (n,0)
|
||||
return err(report.error)
|
||||
|
||||
|
@ -140,7 +140,7 @@ proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
|
|||
|
||||
proc cleanUp(dx: var DbTriplet) =
|
||||
if not dx[0].isNil:
|
||||
dx[0].finish(flush=true)
|
||||
dx[0].finish(eradicate=true)
|
||||
dx.reset
|
||||
|
||||
proc isDbEq(a, b: LayerDeltaRef; db: AristoDbRef; noisy = true): bool =
|
||||
|
|
|
@ -112,7 +112,7 @@ proc innerCleanUp(db: var AristoDbRef): bool {.discardable.} =
|
|||
if rx.isOk:
|
||||
let rc = rx.value.collapse(commit=false)
|
||||
xCheckRc rc.error == 0
|
||||
db.finish(flush=true)
|
||||
db.finish(eradicate=true)
|
||||
db = AristoDbRef(nil)
|
||||
|
||||
proc schedStow(
|
||||
|
@ -340,7 +340,7 @@ proc testTxMergeAndDeleteOneByOne*(
|
|||
fwdRevVfyToggle = true
|
||||
defer:
|
||||
if not db.isNil:
|
||||
db.finish(flush=true)
|
||||
db.finish(eradicate=true)
|
||||
|
||||
for n,w in list:
|
||||
# Start with brand new persistent database.
|
||||
|
@ -448,7 +448,7 @@ proc testTxMergeAndDeleteSubTree*(
|
|||
db = AristoDbRef(nil)
|
||||
defer:
|
||||
if not db.isNil:
|
||||
db.finish(flush=true)
|
||||
db.finish(eradicate=true)
|
||||
|
||||
for n,w in list:
|
||||
# Start with brand new persistent database.
|
||||
|
@ -546,7 +546,7 @@ proc testTxMergeProofAndKvpList*(
|
|||
count = 0
|
||||
defer:
|
||||
if not db.isNil:
|
||||
db.finish(flush=true)
|
||||
db.finish(eradicate=true)
|
||||
|
||||
for n,w in list:
|
||||
|
||||
|
|
|
@ -20,7 +20,8 @@ import
|
|||
../nimbus/db/core_db/persistent,
|
||||
../nimbus/core/chain,
|
||||
./replay/pp,
|
||||
./test_coredb/[coredb_test_xx, test_chainsync, test_helpers]
|
||||
./test_coredb/[
|
||||
coredb_test_xx, test_chainsync, test_coredb_helpers, test_helpers]
|
||||
|
||||
const
|
||||
# If `true`, this compile time option set up `unittest2` for manual parsing
|
||||
|
@ -151,17 +152,17 @@ proc setErrorLevel {.used.} =
|
|||
proc initRunnerDB(
|
||||
path: string;
|
||||
specs: CaptureSpecs;
|
||||
dbType: CoreDbType;
|
||||
dbType: CdbTypeEx;
|
||||
pruneHistory: bool;
|
||||
): CommonRef =
|
||||
let coreDB =
|
||||
# Resolve for static `dbType`
|
||||
case dbType:
|
||||
of AristoDbMemory: AristoDbMemory.newCoreDbRef()
|
||||
of AristoDbRocks: AristoDbRocks.newCoreDbRef(path, DbOptions.init())
|
||||
of AristoDbDualRocks: AristoDbDualRocks.newCoreDbRef(path, DbOptions.init())
|
||||
of AristoDbVoid: AristoDbVoid.newCoreDbRef()
|
||||
of Ooops: raiseAssert "Ooops"
|
||||
of CdbAristoMemory: AristoDbMemory.newCoreDbRef()
|
||||
of CdbAristoRocks: AristoDbRocks.newCoreDbRef(path, DbOptions.init())
|
||||
of CdbAristoDualRocks: newCdbAriAristoDualRocks(path, DbOptions.init())
|
||||
of CdbAristoVoid: AristoDbVoid.newCoreDbRef()
|
||||
of CdbOoops: raiseAssert "Ooops"
|
||||
|
||||
when false: # or true:
|
||||
setDebugLevel()
|
||||
|
@ -198,7 +199,7 @@ proc initRunnerDB(
|
|||
proc chainSyncRunner(
|
||||
noisy = true;
|
||||
capture = memorySampleDefault;
|
||||
dbType = CoreDbType(0);
|
||||
dbType = CdbTypeEx(0);
|
||||
pruneHistory = false;
|
||||
profilingOk = false;
|
||||
finalDiskCleanUpOk = true;
|
||||
|
@ -220,14 +221,14 @@ proc chainSyncRunner(
|
|||
|
||||
dbType = block:
|
||||
# Decreasing priority: dbType, capture.dbType, dbTypeDefault
|
||||
var effDbType = dbTypeDefault
|
||||
if dbType != CoreDbType(0):
|
||||
var effDbType = dbTypeDefault.to(CdbTypeEx)
|
||||
if dbType != CdbTypeEx(0):
|
||||
effDbType = dbType
|
||||
elif capture.dbType != CoreDbType(0):
|
||||
effDbType = capture.dbType
|
||||
effDbType = capture.dbType.to(CdbTypeEx)
|
||||
effDbType
|
||||
|
||||
persistent = dbType in CoreDbPersistentTypes
|
||||
persistent = dbType in CdbTypeExPersistent
|
||||
|
||||
defer:
|
||||
if persistent: baseDir.flushDbDir
|
||||
|
@ -238,7 +239,7 @@ proc chainSyncRunner(
|
|||
let
|
||||
com = initRunnerDB(dbDir, capture, dbType, pruneHistory)
|
||||
defer:
|
||||
com.db.finish(flush = finalDiskCleanUpOk)
|
||||
com.db.finish(eradicate = finalDiskCleanUpOk)
|
||||
if profilingOk: noisy.test_chainSyncProfilingPrint numBlocks
|
||||
if persistent and finalDiskCleanUpOk: dbDir.flushDbDir
|
||||
|
||||
|
@ -255,7 +256,7 @@ proc chainSyncRunner(
|
|||
proc persistentSyncPreLoadAndResumeRunner(
|
||||
noisy = true;
|
||||
capture = persistentSampleDefault;
|
||||
dbType = CoreDbType(0);
|
||||
dbType = CdbTypeEx(0);
|
||||
profilingOk = false;
|
||||
pruneHistory = false;
|
||||
finalDiskCleanUpOk = true;
|
||||
|
@ -271,14 +272,14 @@ proc persistentSyncPreLoadAndResumeRunner(
|
|||
|
||||
dbType = block:
|
||||
# Decreasing priority: dbType, capture.dbType, dbTypeDefault
|
||||
var effDbType = dbTypeDefault
|
||||
if dbType != CoreDbType(0):
|
||||
var effDbType = dbTypeDefault.to(CdbTypeEx)
|
||||
if dbType != CdbTypeEx(0):
|
||||
effDbType = dbType
|
||||
elif capture.dbType != CoreDbType(0):
|
||||
effDbType = capture.dbType
|
||||
effDbType = capture.dbType.to(CdbTypeEx)
|
||||
effDbType
|
||||
|
||||
doAssert dbType in CoreDbPersistentTypes
|
||||
doAssert dbType in CdbTypeExPersistent
|
||||
defer: baseDir.flushDbDir
|
||||
|
||||
let
|
||||
|
@ -292,7 +293,7 @@ proc persistentSyncPreLoadAndResumeRunner(
|
|||
let
|
||||
com = initRunnerDB(dbDir, capture, dbType, pruneHistory)
|
||||
defer:
|
||||
com.db.finish(flush = finalDiskCleanUpOk)
|
||||
com.db.finish(eradicate = finalDiskCleanUpOk)
|
||||
if profilingOk: noisy.test_chainSyncProfilingPrint firstPart
|
||||
|
||||
if noisy:
|
||||
|
@ -308,7 +309,7 @@ proc persistentSyncPreLoadAndResumeRunner(
|
|||
let
|
||||
com = initRunnerDB(dbDir, capture, dbType, pruneHistory)
|
||||
defer:
|
||||
com.db.finish(flush = finalDiskCleanUpOk)
|
||||
com.db.finish(eradicate = finalDiskCleanUpOk)
|
||||
if profilingOk: noisy.test_chainSyncProfilingPrint secndPart
|
||||
if finalDiskCleanUpOk: dbDir.flushDbDir
|
||||
|
||||
|
@ -354,7 +355,7 @@ when isMainModule:
|
|||
for n,capture in sampleList:
|
||||
noisy.profileSection("@sample #" & $n, state):
|
||||
noisy.chainSyncRunner(
|
||||
#dbType = AristoDbDualRocks,
|
||||
#dbType = CdbAristoDualRocks,
|
||||
capture = capture,
|
||||
pruneHistory = true,
|
||||
#profilingOk = true,
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or
|
||||
# distributed except according to those terms.
|
||||
|
||||
## Some extended rocksdb backend modes for testing
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
../../nimbus/db/core_db/backend/aristo_rocksdb,
|
||||
../../nimbus/db/[core_db, opts]
|
||||
|
||||
type
|
||||
CdbTypeEx* = enum
|
||||
CdbOoops
|
||||
CdbAristoMemory = AristoDbMemory ## Memory backend emulator
|
||||
CdbAristoRocks = AristoDbRocks ## RocksDB backend
|
||||
CdbAristoVoid = AristoDbVoid ## No backend
|
||||
CdbAristoDualRocks ## Dual RocksDB backends for Kvt & Aristo
|
||||
|
||||
func to*(cdb: CoreDbType; T: type CdbTypeEx): T =
|
||||
case cdb:
|
||||
# Let the compiler find out whether the enum is complete
|
||||
of Ooops, AristoDbMemory, AristoDbRocks, AristoDbVoid:
|
||||
return CdbTypeEx(cdb.ord)
|
||||
|
||||
const
|
||||
CdbTypeExPersistent* =
|
||||
CoreDbPersistentTypes.mapIt(it.to(CdbTypeEx)) & @[CdbAristoDualRocks]
|
||||
|
||||
func `$`*(w: CdbTypeEx): string =
|
||||
case w:
|
||||
of CdbOoops, CdbAristoMemory, CdbAristoRocks, CdbAristoVoid:
|
||||
$CoreDbType(w.ord)
|
||||
of CdbAristoDualRocks:
|
||||
"CdbAristoDualRocks"
|
||||
|
||||
proc newCdbAriAristoDualRocks*(path: string, opts: DbOptions): CoreDbRef =
|
||||
## For debugging, there is the `AristoDbDualRocks` database with split
|
||||
## backends for `Aristo` and `KVT`. This database is not compatible with
|
||||
## `AristoDbRocks` so it cannot be reliably switched between both versions
|
||||
## with consecutive sessions.
|
||||
newAristoDualRocksDbCoreDbRef path, opts
|
||||
|
||||
# End
|
Loading…
Reference in New Issue