Separate config for core db and ledger (#2479)
* Updates and corrections * Extract `CoreDb` configuration from `base.nim` into separate module why: This makes it easier to avoid circular imports, in particular when the capture journal (aka tracer) is revived. * Extract `Ledger` configuration from `base.nim` into separate module why: This makes it easier to avoid circular imports (if any.) also: Move `accounts_ledger.nim` file to sub-folder `backend`. That way the layout resembles that of the `core_db`.
This commit is contained in:
parent
01ab209497
commit
b924fdcaa7
|
@ -301,12 +301,16 @@ type
|
|||
{.noRaise.}
|
||||
## Merge the key-value-pair argument `(accKey,accRec)` as an account
|
||||
## ledger value, i.e. the the sub-tree starting at `VertexID(1)`.
|
||||
##
|
||||
## On success, the function returns `true` if the `accPath` argument was
|
||||
## not on the database already or the value differend from `accRec`, and
|
||||
## `false` otherwise.
|
||||
|
||||
AristoApiMergeGenericDataFn* =
|
||||
proc( db: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
data: openArray[byte];
|
||||
proc(db: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
data: openArray[byte];
|
||||
): Result[bool,AristoError]
|
||||
{.noRaise.}
|
||||
## Variant of `mergeXXX()` for generic sub-trees, i.e. for arguments
|
||||
|
@ -683,7 +687,7 @@ func dup*(api: AristoApiRef): AristoApiRef =
|
|||
txLevel: api.txLevel,
|
||||
txTop: api.txTop)
|
||||
when AutoValidateApiHooks:
|
||||
api.validate
|
||||
result.validate
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public profile API constuctor
|
||||
|
@ -718,9 +722,9 @@ func init*(
|
|||
result = api.commit(a)
|
||||
|
||||
profApi.deleteAccountRecord =
|
||||
proc(a: AristoDbRef; accPath: Hash256): auto =
|
||||
proc(a: AristoDbRef; b: Hash256): auto =
|
||||
AristoApiProfDeleteAccountRecordFn.profileRunner:
|
||||
result = api.deleteAccountRecord(a, accPath)
|
||||
result = api.deleteAccountRecord(a, b)
|
||||
|
||||
profApi.deleteGenericData =
|
||||
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto =
|
||||
|
@ -733,14 +737,14 @@ func init*(
|
|||
result = api.deleteGenericTree(a, b)
|
||||
|
||||
profApi.deleteStorageData =
|
||||
proc(a: AristoDbRef; accPath: Hash256, c: openArray[byte]): auto =
|
||||
proc(a: AristoDbRef; b: Hash256, c: Hash256): auto =
|
||||
AristoApiProfDeleteStorageDataFn.profileRunner:
|
||||
result = api.deleteStorageData(a, accPath, c)
|
||||
result = api.deleteStorageData(a, b, c)
|
||||
|
||||
profApi.deleteStorageTree =
|
||||
proc(a: AristoDbRef; accPath: Hash256): auto =
|
||||
proc(a: AristoDbRef; b: Hash256): auto =
|
||||
AristoApiProfDeleteStorageTreeFn.profileRunner:
|
||||
result = api.deleteStorageTree(a, accPath)
|
||||
result = api.deleteStorageTree(a, b)
|
||||
|
||||
profApi.fetchLastSavedState =
|
||||
proc(a: AristoDbRef): auto =
|
||||
|
@ -748,9 +752,9 @@ func init*(
|
|||
result = api.fetchLastSavedState(a)
|
||||
|
||||
profApi.fetchAccountRecord =
|
||||
proc(a: AristoDbRef; accPath: Hash256): auto =
|
||||
proc(a: AristoDbRef; b: Hash256): auto =
|
||||
AristoApiProfFetchAccountRecordFn.profileRunner:
|
||||
result = api.fetchAccountRecord(a, accPath)
|
||||
result = api.fetchAccountRecord(a, b)
|
||||
|
||||
profApi.fetchAccountState =
|
||||
proc(a: AristoDbRef; b: bool): auto =
|
||||
|
@ -768,14 +772,14 @@ func init*(
|
|||
result = api.fetchGenericState(a, b, c)
|
||||
|
||||
profApi.fetchStorageData =
|
||||
proc(a: AristoDbRef; accPath, stoPath: Hash256): auto =
|
||||
proc(a: AristoDbRef; b, stoPath: Hash256): auto =
|
||||
AristoApiProfFetchStorageDataFn.profileRunner:
|
||||
result = api.fetchStorageData(a, accPath, stoPath)
|
||||
result = api.fetchStorageData(a, b, stoPath)
|
||||
|
||||
profApi.fetchStorageState =
|
||||
proc(a: AristoDbRef; accPath: Hash256; c: bool): auto =
|
||||
proc(a: AristoDbRef; b: Hash256; c: bool): auto =
|
||||
AristoApiProfFetchStorageStateFn.profileRunner:
|
||||
result = api.fetchStorageState(a, accPath, c)
|
||||
result = api.fetchStorageState(a, b, c)
|
||||
|
||||
profApi.findTx =
|
||||
proc(a: AristoDbRef; b: RootedVertexID; c: HashKey): auto =
|
||||
|
@ -798,9 +802,9 @@ func init*(
|
|||
result = api.forkTx(a, b)
|
||||
|
||||
profApi.hasPathAccount =
|
||||
proc(a: AristoDbRef; accPath: Hash256): auto =
|
||||
proc(a: AristoDbRef; b: Hash256): auto =
|
||||
AristoApiProfHasPathAccountFn.profileRunner:
|
||||
result = api.hasPathAccount(a, accPath)
|
||||
result = api.hasPathAccount(a, b)
|
||||
|
||||
profApi.hasPathGeneric =
|
||||
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto =
|
||||
|
@ -808,14 +812,14 @@ func init*(
|
|||
result = api.hasPathGeneric(a, b, c)
|
||||
|
||||
profApi.hasPathStorage =
|
||||
proc(a: AristoDbRef; accPath: Hash256, c: openArray[byte]): auto =
|
||||
proc(a: AristoDbRef; b, c: Hash256): auto =
|
||||
AristoApiProfHasPathStorageFn.profileRunner:
|
||||
result = api.hasPathStorage(a, accPath, c)
|
||||
result = api.hasPathStorage(a, b, c)
|
||||
|
||||
profApi.hasStorageData =
|
||||
proc(a: AristoDbRef; accPath: Hash256): auto =
|
||||
proc(a: AristoDbRef; b: Hash256): auto =
|
||||
AristoApiProfHasStorageDataFn.profileRunner:
|
||||
result = api.hasStorageData(a, accPath)
|
||||
result = api.hasStorageData(a, b)
|
||||
|
||||
profApi.isTop =
|
||||
proc(a: AristoTxRef): auto =
|
||||
|
@ -833,9 +837,9 @@ func init*(
|
|||
result = api.nForked(a)
|
||||
|
||||
profApi.mergeAccountRecord =
|
||||
proc(a: AristoDbRef; accPath: Hash256; c: AristoAccount): auto =
|
||||
proc(a: AristoDbRef; b: Hash256; c: AristoAccount): auto =
|
||||
AristoApiProfMergeAccountRecordFn.profileRunner:
|
||||
result = api.mergeAccountRecord(a, accPath, c)
|
||||
result = api.mergeAccountRecord(a, b, c)
|
||||
|
||||
profApi.mergeGenericData =
|
||||
proc(a: AristoDbRef; b: VertexID, c, d: openArray[byte]): auto =
|
||||
|
@ -843,9 +847,9 @@ func init*(
|
|||
result = api.mergeGenericData(a, b, c, d)
|
||||
|
||||
profApi.mergeStorageData =
|
||||
proc(a: AristoDbRef; accPath: Hash256, c, d: openArray[byte]): auto =
|
||||
proc(a: AristoDbRef; b, c: Hash256, d: Uint256): auto =
|
||||
AristoApiProfMergeStorageDataFn.profileRunner:
|
||||
result = api.mergeStorageData(a, accPath, c, d)
|
||||
result = api.mergeStorageData(a, b, c, d)
|
||||
|
||||
profApi.pathAsBlob =
|
||||
proc(a: PathID): auto =
|
||||
|
|
|
@ -43,7 +43,7 @@ type
|
|||
## Type of leaf data.
|
||||
RawData ## Generic data
|
||||
AccountData ## `Aristo account` with vertex IDs links
|
||||
StoData ## Slot storage data
|
||||
StoData ## Slot storage data
|
||||
|
||||
PayloadRef* = ref object of RootRef
|
||||
## The payload type depends on the sub-tree used. The `VertesID(1)` rooted
|
||||
|
|
|
@ -43,16 +43,12 @@ proc mergeAccountRecord*(
|
|||
accPath: Hash256; # Even nibbled byte path
|
||||
accRec: AristoAccount; # Account data
|
||||
): Result[bool,AristoError] =
|
||||
## Merge the key-value-pair argument `(accKey,accPayload)` as an account
|
||||
## Merge the key-value-pair argument `(accKey,accRec)` as an account
|
||||
## ledger value, i.e. the the sub-tree starting at `VertexID(1)`.
|
||||
##
|
||||
## The payload argument `accPayload` must have the `storageID` field either
|
||||
## unset/invalid or referring to a existing vertex which will be assumed
|
||||
## to be a storage tree.
|
||||
##
|
||||
## On success, the function returns `true` if the `accPayload` argument was
|
||||
## merged into the database ot updated, and `false` if it was on the database
|
||||
## already.
|
||||
## On success, the function returns `true` if the `accRec` argument was
|
||||
## not on the database already or different from `accRec`, and `false`
|
||||
## otherwise.
|
||||
##
|
||||
let
|
||||
pyl = PayloadRef(pType: AccountData, account: accRec)
|
||||
|
|
|
@ -31,7 +31,7 @@ proc aristoError(error: AristoError): NodeRef =
|
|||
## Allows returning de
|
||||
NodeRef(vType: Leaf, error: error)
|
||||
|
||||
proc serialise*(
|
||||
proc serialise(
|
||||
pyl: PayloadRef;
|
||||
getKey: ResolveVidFn;
|
||||
): Result[Blob,(VertexID,AristoError)] =
|
||||
|
|
|
@ -15,7 +15,8 @@ import
|
|||
../../aristo/[aristo_init/memory_only, aristo_walk],
|
||||
../../kvt as use_kvt,
|
||||
../../kvt/[kvt_init/memory_only, kvt_walk],
|
||||
".."/[base, base/base_desc]
|
||||
../base,
|
||||
../base/[base_config, base_desc]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor and helper
|
||||
|
@ -26,19 +27,19 @@ proc create*(dbType: CoreDbType; kvt: KvtDbRef; mpt: AristoDbRef): CoreDbRef =
|
|||
var db = CoreDbRef(dbType: dbType)
|
||||
db.defCtx = db.bless CoreDbCtxRef(mpt: mpt, kvt: kvt)
|
||||
|
||||
when CoreDbEnableApiTracking:
|
||||
when CoreDbEnableApiJumpTable:
|
||||
db.kvtApi = KvtApiRef.init()
|
||||
db.ariApi = AristoApiRef.init()
|
||||
|
||||
when CoreDbEnableApiProfiling:
|
||||
block:
|
||||
let profApi = KvtApiProfRef.init(db.kvtApi, kvt.backend)
|
||||
db.kvtApi = profApi
|
||||
kvt.backend = profApi.be
|
||||
block:
|
||||
let profApi = AristoApiProfRef.init(db.ariApi, mpt.backend)
|
||||
db.ariApi = profApi
|
||||
mpt.backend = profApi.be
|
||||
when CoreDbEnableProfiling:
|
||||
block:
|
||||
let profApi = KvtApiProfRef.init(db.kvtApi, kvt.backend)
|
||||
db.kvtApi = profApi
|
||||
kvt.backend = profApi.be
|
||||
block:
|
||||
let profApi = AristoApiProfRef.init(db.ariApi, mpt.backend)
|
||||
db.ariApi = profApi
|
||||
mpt.backend = profApi.be
|
||||
bless db
|
||||
|
||||
proc newAristoMemoryCoreDbRef*(): CoreDbRef =
|
||||
|
|
|
@ -29,9 +29,6 @@ const
|
|||
aristoFail = "Aristo/RocksDB init() failed"
|
||||
kvtFail = "Kvt/RocksDB init() failed"
|
||||
|
||||
# Annotation helper(s)
|
||||
{.pragma: rlpRaise, gcsafe, raises: [CoreDbApiError].}
|
||||
|
||||
proc toRocksDb*(
|
||||
opts: DbOptions
|
||||
): tuple[dbOpts: DbOptionsRef, cfOpts: ColFamilyOptionsRef] =
|
||||
|
|
|
@ -14,29 +14,8 @@ import
|
|||
std/typetraits,
|
||||
eth/common,
|
||||
"../.."/[constants, errors],
|
||||
../kvt,
|
||||
../aristo,
|
||||
./base/[api_tracking, base_desc]
|
||||
|
||||
const
|
||||
EnableApiTracking = false
|
||||
## When enabled, functions using this tracking facility need to import
|
||||
## `chronicles`, as well. Also, some `func` designators might need to
|
||||
## be changed to `proc` for possible side effects.
|
||||
##
|
||||
## Tracking noise is then enabled by setting the flag `trackNewApi` to
|
||||
## `true` in the `CoreDbRef` descriptor.
|
||||
|
||||
EnableApiProfiling = true
|
||||
## Enables functions profiling if `EnableApiTracking` is also set `true`.
|
||||
|
||||
EnableApiJumpTable* = false
|
||||
## This flag enables the functions jump table even if `EnableApiTracking`
|
||||
## and `EnableApiProfiling` is set `false`. This should be used for
|
||||
## debugging, only.
|
||||
|
||||
AutoValidateDescriptors = defined(release).not
|
||||
## No validatinon needed for production suite.
|
||||
".."/[kvt, aristo],
|
||||
./base/[api_tracking, base_config, base_desc]
|
||||
|
||||
export
|
||||
CoreDbAccRef,
|
||||
|
@ -47,23 +26,16 @@ export
|
|||
CoreDbCtxRef,
|
||||
CoreDbErrorCode,
|
||||
CoreDbErrorRef,
|
||||
CoreDbFnInx,
|
||||
CoreDbKvtRef,
|
||||
CoreDbMptRef,
|
||||
CoreDbPersistentTypes,
|
||||
CoreDbProfListRef,
|
||||
CoreDbRef,
|
||||
CoreDbTxRef,
|
||||
CoreDbType
|
||||
|
||||
const
|
||||
CoreDbEnableApiTracking* = EnableApiTracking
|
||||
CoreDbEnableApiProfiling* = EnableApiTracking and EnableApiProfiling
|
||||
CoreDbEnableApiJumpTable* =
|
||||
CoreDbEnableApiTracking or CoreDbEnableApiProfiling or EnableApiJumpTable
|
||||
|
||||
when AutoValidateDescriptors:
|
||||
import ./base/validate
|
||||
when CoreDbAutoValidateDescriptors:
|
||||
import
|
||||
./base/base_validate
|
||||
|
||||
when CoreDbEnableApiJumpTable:
|
||||
discard
|
||||
|
@ -73,58 +45,20 @@ else:
|
|||
aristo_delete, aristo_desc, aristo_fetch, aristo_merge, aristo_tx],
|
||||
../kvt/[kvt_desc, kvt_utils, kvt_tx]
|
||||
|
||||
# More settings
|
||||
const
|
||||
logTxt = "CoreDb "
|
||||
newApiTxt = logTxt & "API"
|
||||
|
||||
# Annotation helpers
|
||||
{.pragma: apiRaise, gcsafe, raises: [CoreDbApiError].}
|
||||
{.pragma: catchRaise, gcsafe, raises: [CatchableError].}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when CoreDbEnableApiTracking:
|
||||
when CoreDbEnableApiProfiling:
|
||||
{.warning: "*** Provided API profiling for CoreDB (disabled by default)".}
|
||||
else:
|
||||
{.warning: "*** Provided API logging for CoreDB (disabled by default)".}
|
||||
|
||||
{.warning: "*** Provided API logging for CoreDB (disabled by default)".}
|
||||
import
|
||||
std/times,
|
||||
chronicles
|
||||
logScope:
|
||||
topics = "core_db"
|
||||
const
|
||||
logTxt = "API"
|
||||
|
||||
proc `$`[T](rc: CoreDbRc[T]): string = rc.toStr
|
||||
proc `$`(q: set[CoreDbCaptFlags]): string = q.toStr
|
||||
proc `$`(t: Duration): string = t.toStr
|
||||
proc `$`(e: EthAddress): string = e.toStr
|
||||
proc `$`(h: Hash256): string = h.toStr
|
||||
|
||||
template setTrackNewApi(
|
||||
w: CoreDbApiTrackRef;
|
||||
s: static[CoreDbFnInx];
|
||||
code: untyped;
|
||||
) =
|
||||
## Template with code section that will be discarded if logging is
|
||||
## disabled at compile time when `EnableApiTracking` is `false`.
|
||||
when CoreDbEnableApiTracking:
|
||||
w.beginNewApi(s)
|
||||
code
|
||||
const api {.inject,used.} = s
|
||||
|
||||
template setTrackNewApi*(
|
||||
w: CoreDbApiTrackRef;
|
||||
s: static[CoreDbFnInx];
|
||||
) =
|
||||
w.setTrackNewApi(s):
|
||||
discard
|
||||
|
||||
template ifTrackNewApi*(w: CoreDbApiTrackRef; code: untyped) =
|
||||
when EnableApiTracking:
|
||||
w.endNewApiIf:
|
||||
code
|
||||
when CoreDbEnableProfiling:
|
||||
{.warning: "*** Enabled API profiling for CoreDB".}
|
||||
export
|
||||
CoreDbFnInx,
|
||||
CoreDbProfListRef
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private KVT helpers
|
||||
|
@ -199,21 +133,21 @@ func toError(e: AristoError; s: string; error = Unspecified): CoreDbErrorRef =
|
|||
|
||||
proc bless*(db: CoreDbRef): CoreDbRef =
|
||||
## Verify descriptor
|
||||
when AutoValidateDescriptors:
|
||||
when CoreDbAutoValidateDescriptors:
|
||||
db.validate
|
||||
when CoreDbEnableApiProfiling:
|
||||
when CoreDbEnableProfiling:
|
||||
db.profTab = CoreDbProfListRef.init()
|
||||
db
|
||||
|
||||
proc bless*(db: CoreDbRef; ctx: CoreDbCtxRef): CoreDbCtxRef =
|
||||
ctx.parent = db
|
||||
when AutoValidateDescriptors:
|
||||
when CoreDbAutoValidateDescriptors:
|
||||
ctx.validate
|
||||
ctx
|
||||
|
||||
proc bless*(ctx: CoreDbCtxRef; dsc: CoreDbMptRef | CoreDbTxRef): auto =
|
||||
dsc.ctx = ctx
|
||||
when AutoValidateDescriptors:
|
||||
when CoreDbAutoValidateDescriptors:
|
||||
dsc.validate
|
||||
dsc
|
||||
|
||||
|
@ -249,7 +183,7 @@ proc swapCtx*(db: CoreDbRef; ctx: CoreDbCtxRef): CoreDbCtxRef =
|
|||
CoreDbKvtRef(ctx).call(reCentre, db.ctx.kvt).isOkOr:
|
||||
raiseAssert $api & " failed: " & $error
|
||||
db.defCtx = ctx
|
||||
db.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
proc forget*(ctx: CoreDbCtxRef) =
|
||||
## Dispose `ctx` argument context and related columns created with this
|
||||
|
@ -260,7 +194,7 @@ proc forget*(ctx: CoreDbCtxRef) =
|
|||
raiseAssert $api & ": " & $error
|
||||
CoreDbKvtRef(ctx).call(forget, ctx.kvt).isOkOr:
|
||||
raiseAssert $api & ": " & $error
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
ctx.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public main descriptor methods
|
||||
|
@ -277,7 +211,7 @@ proc finish*(db: CoreDbRef; eradicate = false) =
|
|||
db.setTrackNewApi BaseFinishFn
|
||||
CoreDbKvtRef(db.ctx).call(finish, db.ctx.kvt, eradicate)
|
||||
CoreDbAccRef(db.ctx).call(finish, db.ctx.mpt, eradicate)
|
||||
db.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
proc `$$`*(e: CoreDbErrorRef): string =
|
||||
## Pretty print error symbol, note that this directive may have side effects
|
||||
|
@ -320,7 +254,7 @@ proc persistent*(
|
|||
result = err(rc.error.toError $api)
|
||||
break body
|
||||
result = ok()
|
||||
db.ifTrackNewApi: debug newApiTxt, api, elapsed, blockNumber, result
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed, blockNumber, result
|
||||
|
||||
proc stateBlockNumber*(db: CoreDbRef): BlockNumber =
|
||||
## Rhis function returns the block number stored with the latest `persist()`
|
||||
|
@ -333,7 +267,7 @@ proc stateBlockNumber*(db: CoreDbRef): BlockNumber =
|
|||
rc.value.serial.BlockNumber
|
||||
else:
|
||||
0u64
|
||||
db.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public key-value table methods
|
||||
|
@ -360,7 +294,7 @@ proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
err(rc.error.toError($api, KvtNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc getOrEmpty*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
|
||||
## Variant of `get()` returning an empty `Blob` if the key is not found
|
||||
|
@ -375,7 +309,7 @@ proc getOrEmpty*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
CoreDbRc[Blob].ok(EmptyBlob)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc len*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[int] =
|
||||
## This function returns the size of the value associated with `key`.
|
||||
|
@ -388,7 +322,7 @@ proc len*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[int] =
|
|||
err(rc.error.toError($api, KvtNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc del*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[void] =
|
||||
kvt.setTrackNewApi KvtDelFn
|
||||
|
@ -398,7 +332,7 @@ proc del*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[void] =
|
|||
ok()
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc put*(
|
||||
kvt: CoreDbKvtRef;
|
||||
|
@ -413,7 +347,7 @@ proc put*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, key=key.toStr, val=val.toLenStr, result
|
||||
debug logTxt, api, elapsed, key=key.toStr, val=val.toLenStr, result
|
||||
|
||||
proc hasKey*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[bool] =
|
||||
## Would be named `contains` if it returned `bool` rather than `Result[]`.
|
||||
|
@ -425,7 +359,7 @@ proc hasKey*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[bool] =
|
|||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions for generic columns
|
||||
|
@ -442,7 +376,7 @@ proc getGeneric*(
|
|||
if clearData:
|
||||
result.call(deleteGenericTree, ctx.mpt, CoreDbVidGeneric).isOkOr:
|
||||
raiseAssert $api & ": " & $error
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, clearData, elapsed
|
||||
ctx.ifTrackNewApi: debug logTxt, api, clearData, elapsed
|
||||
|
||||
# ----------- generic MPT ---------------
|
||||
|
||||
|
@ -459,7 +393,7 @@ proc fetch*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
err(rc.error.toError($api, MptNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
|
||||
mpt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
||||
## This function returns an empty `Blob` if the argument `key` is not found
|
||||
|
@ -474,7 +408,7 @@ proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
|
|||
CoreDbRc[Blob].ok(EmptyBlob)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
|
||||
mpt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc delete*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[void] =
|
||||
mpt.setTrackNewApi MptDeleteFn
|
||||
|
@ -486,7 +420,7 @@ proc delete*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[void] =
|
|||
err(rc.error.toError($api, MptNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
|
||||
mpt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc merge*(
|
||||
mpt: CoreDbMptRef;
|
||||
|
@ -501,7 +435,7 @@ proc merge*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
mpt.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, key=key.toStr, val=val.toLenStr, result
|
||||
debug logTxt, api, elapsed, key=key.toStr, val=val.toLenStr, result
|
||||
|
||||
proc hasPath*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[bool] =
|
||||
## This function would be named `contains()` if it returned `bool` rather
|
||||
|
@ -514,7 +448,7 @@ proc hasPath*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[bool] =
|
|||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
|
||||
mpt.ifTrackNewApi: debug logTxt, api, elapsed, key=key.toStr, result
|
||||
|
||||
proc state*(mpt: CoreDbMptRef; updateOk = false): CoreDbRc[Hash256] =
|
||||
## This function retrieves the Merkle state hash of the argument
|
||||
|
@ -530,7 +464,7 @@ proc state*(mpt: CoreDbMptRef; updateOk = false): CoreDbRc[Hash256] =
|
|||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, updateOK, result
|
||||
mpt.ifTrackNewApi: debug logTxt, api, elapsed, updateOK, result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public methods for accounts
|
||||
|
@ -541,7 +475,7 @@ proc getAccounts*(ctx: CoreDbCtxRef): CoreDbAccRef =
|
|||
##
|
||||
ctx.setTrackNewApi CtxGetAccountsFn
|
||||
result = CoreDbAccRef(ctx)
|
||||
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
ctx.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
# ----------- accounts ---------------
|
||||
|
||||
|
@ -561,8 +495,7 @@ proc fetch*(
|
|||
err(rc.error.toError($api, AccNotFound))
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
|
||||
acc.ifTrackNewApi: debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
|
||||
proc delete*(
|
||||
acc: CoreDbAccRef;
|
||||
|
@ -582,7 +515,7 @@ proc delete*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
|
||||
proc clearStorage*(
|
||||
acc: CoreDbAccRef;
|
||||
|
@ -599,7 +532,7 @@ proc clearStorage*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
|
||||
proc merge*(
|
||||
acc: CoreDbAccRef;
|
||||
|
@ -617,7 +550,7 @@ proc merge*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
|
||||
proc hasPath*(
|
||||
acc: CoreDbAccRef;
|
||||
|
@ -633,7 +566,7 @@ proc hasPath*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
|
||||
proc state*(acc: CoreDbAccRef; updateOk = false): CoreDbRc[Hash256] =
|
||||
## This function retrieves the Merkle state hash of the accounts
|
||||
|
@ -649,7 +582,7 @@ proc state*(acc: CoreDbAccRef; updateOk = false): CoreDbRc[Hash256] =
|
|||
ok(rc.value)
|
||||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, updateOK, result
|
||||
acc.ifTrackNewApi: debug logTxt, api, elapsed, updateOK, result
|
||||
|
||||
# ------------ storage ---------------
|
||||
|
||||
|
@ -669,8 +602,8 @@ proc slotFetch*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr,
|
||||
stoPath=stoPath.toStr, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath),
|
||||
stoPath=($$stoPath), result
|
||||
|
||||
proc slotDelete*(
|
||||
acc: CoreDbAccRef;
|
||||
|
@ -690,8 +623,8 @@ proc slotDelete*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr,
|
||||
stoPath=stoPath.toStr, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath),
|
||||
stoPath=($$stoPath), result
|
||||
|
||||
proc slotHasPath*(
|
||||
acc: CoreDbAccRef;
|
||||
|
@ -707,8 +640,8 @@ proc slotHasPath*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr,
|
||||
stoPath=stoPath.toStr, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath),
|
||||
stoPath=($$stoPath), result
|
||||
|
||||
proc slotMerge*(
|
||||
acc: CoreDbAccRef;
|
||||
|
@ -725,8 +658,8 @@ proc slotMerge*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr,
|
||||
stoPath=stoPath.toStr, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath),
|
||||
stoPath=($$stoPath), stoData, result
|
||||
|
||||
proc slotState*(
|
||||
acc: CoreDbAccRef;
|
||||
|
@ -748,7 +681,7 @@ proc slotState*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr, updateOk, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), updateOk, result
|
||||
|
||||
proc slotStateEmpty*(
|
||||
acc: CoreDbAccRef;
|
||||
|
@ -765,7 +698,7 @@ proc slotStateEmpty*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
|
||||
proc slotStateEmptyOrVoid*(
|
||||
acc: CoreDbAccRef;
|
||||
|
@ -780,7 +713,7 @@ proc slotStateEmptyOrVoid*(
|
|||
else:
|
||||
true
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), result
|
||||
|
||||
# ------------- other ----------------
|
||||
|
||||
|
@ -806,8 +739,8 @@ proc recast*(
|
|||
else:
|
||||
err(rc.error.toError $api)
|
||||
acc.ifTrackNewApi:
|
||||
let slotState = if rc.isOk: rc.value.toStr else: "n/a"
|
||||
debug newApiTxt, api, elapsed, accPath=accPath.toStr, slotState, result
|
||||
let slotState = if rc.isOk: $$(rc.value) else: "n/a"
|
||||
debug logTxt, api, elapsed, accPath=($$accPath), slotState, result
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public transaction related methods
|
||||
|
@ -818,7 +751,7 @@ proc level*(db: CoreDbRef): int =
|
|||
##
|
||||
db.setTrackNewApi BaseLevelFn
|
||||
result = CoreDbAccRef(db.ctx).call(level, db.ctx.mpt)
|
||||
db.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
|
||||
proc newTransaction*(ctx: CoreDbCtxRef): CoreDbTxRef =
|
||||
## Constructor
|
||||
|
@ -832,14 +765,14 @@ proc newTransaction*(ctx: CoreDbCtxRef): CoreDbTxRef =
|
|||
result = ctx.bless CoreDbTxRef(kTx: kTx, aTx: aTx)
|
||||
ctx.ifTrackNewApi:
|
||||
let newLevel = CoreDbAccRef(ctx).call(level, ctx.mpt)
|
||||
debug newApiTxt, api, elapsed, newLevel
|
||||
debug logTxt, api, elapsed, newLevel
|
||||
|
||||
proc level*(tx: CoreDbTxRef): int =
|
||||
## Print positive transaction level for argument `tx`
|
||||
##
|
||||
tx.setTrackNewApi TxLevelFn
|
||||
result = CoreDbAccRef(tx.ctx).call(txLevel, tx.aTx)
|
||||
tx.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
tx.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
|
||||
proc commit*(tx: CoreDbTxRef) =
|
||||
tx.setTrackNewApi TxCommitFn:
|
||||
|
@ -848,7 +781,7 @@ proc commit*(tx: CoreDbTxRef) =
|
|||
raiseAssert $api & ": " & $error
|
||||
CoreDbKvtRef(tx.ctx).call(commit, tx.kTx).isOkOr:
|
||||
raiseAssert $api & ": " & $error
|
||||
tx.ifTrackNewApi: debug newApiTxt, api, elapsed, prvLevel
|
||||
tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel
|
||||
|
||||
proc rollback*(tx: CoreDbTxRef) =
|
||||
tx.setTrackNewApi TxRollbackFn:
|
||||
|
@ -857,7 +790,7 @@ proc rollback*(tx: CoreDbTxRef) =
|
|||
raiseAssert $api & ": " & $error
|
||||
CoreDbKvtRef(tx.ctx).call(rollback, tx.kTx).isOkOr:
|
||||
raiseAssert $api & ": " & $error
|
||||
tx.ifTrackNewApi: debug newApiTxt, api, elapsed, prvLevel
|
||||
tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel
|
||||
|
||||
proc dispose*(tx: CoreDbTxRef) =
|
||||
tx.setTrackNewApi TxDisposeFn:
|
||||
|
@ -868,7 +801,7 @@ proc dispose*(tx: CoreDbTxRef) =
|
|||
if CoreDbKvtRef(tx.ctx).call(isTop, tx.kTx):
|
||||
CoreDbKvtRef(tx.ctx).call(rollback, tx.kTx).isOkOr:
|
||||
raiseAssert $api & ": " & $error
|
||||
tx.ifTrackNewApi: debug newApiTxt, api, elapsed, prvLevel
|
||||
tx.ifTrackNewApi: debug logTxt, api, elapsed, prvLevel
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public tracer methods
|
||||
|
@ -877,7 +810,6 @@ proc dispose*(tx: CoreDbTxRef) =
|
|||
when false: # currently disabled
|
||||
proc newCapture*(
|
||||
db: CoreDbRef;
|
||||
flags: set[CoreDbCaptFlags] = {};
|
||||
): CoreDbRc[CoreDbCaptRef] =
|
||||
## Trace constructor providing an overlay on top of the argument database
|
||||
## `db`. This overlay provides a replacement database handle that can be
|
||||
|
@ -892,7 +824,7 @@ when false: # currently disabled
|
|||
##
|
||||
db.setTrackNewApi BaseNewCaptureFn
|
||||
result = db.methods.newCaptureFn flags
|
||||
db.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
db.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
|
||||
proc recorder*(cpt: CoreDbCaptRef): CoreDbRef =
|
||||
## Getter, returns a tracer replacement handle to be used as new database.
|
||||
|
@ -907,7 +839,7 @@ when false: # currently disabled
|
|||
##
|
||||
cpt.setTrackNewApi CptRecorderFn
|
||||
result = cpt.methods.recorderFn()
|
||||
cpt.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
cpt.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
proc logDb*(cp: CoreDbCaptRef): TableRef[Blob,Blob] =
|
||||
## Getter, returns the logger table for the overlay tracer database.
|
||||
|
@ -919,14 +851,14 @@ when false: # currently disabled
|
|||
##
|
||||
cp.setTrackNewApi CptLogDbFn
|
||||
result = cp.methods.logDbFn()
|
||||
cp.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
cp.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
proc flags*(cp: CoreDbCaptRef):set[CoreDbCaptFlags] =
|
||||
## Getter
|
||||
##
|
||||
cp.setTrackNewApi CptFlagsFn
|
||||
result = cp.methods.getFlagsFn()
|
||||
cp.ifTrackNewApi: debug newApiTxt, api, elapsed, result
|
||||
cp.ifTrackNewApi: debug logTxt, api, elapsed, result
|
||||
|
||||
proc forget*(cp: CoreDbCaptRef) =
|
||||
## Explicitely stop recording the current tracer instance and reset to
|
||||
|
@ -934,7 +866,7 @@ when false: # currently disabled
|
|||
##
|
||||
cp.setTrackNewApi CptForgetFn
|
||||
cp.methods.forgetFn()
|
||||
cp.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
cp.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -16,9 +16,12 @@ import
|
|||
results,
|
||||
stew/byteutils,
|
||||
../../aristo/aristo_profile,
|
||||
./base_desc
|
||||
"."/[base_config, base_desc]
|
||||
|
||||
type
|
||||
Elapsed* = distinct Duration
|
||||
## Needed for local `$` as it would be ambiguous for `Duration`
|
||||
|
||||
CoreDbApiTrackRef* =
|
||||
# CoreDbCaptRef |
|
||||
CoreDbRef | CoreDbKvtRef | CoreDbCtxRef | CoreDbMptRef | CoreDbAccRef |
|
||||
|
@ -55,7 +58,6 @@ type
|
|||
BaseStateBlockNumberFn = "stateBlockNumber"
|
||||
BaseSwapCtxFn = "swapCtx"
|
||||
|
||||
CptFlagsFn = "cpt/flags"
|
||||
CptLogDbFn = "cpt/logDb"
|
||||
CptRecorderFn = "cpt/recorder"
|
||||
CptForgetFn = "cpt/forget"
|
||||
|
@ -88,6 +90,8 @@ type
|
|||
TxRollbackFn = "rollback"
|
||||
TxSaveDisposeFn = "safeDispose"
|
||||
|
||||
proc toStr*(e: CoreDbErrorRef): string {.gcsafe.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -99,9 +103,53 @@ func oaToStr(w: openArray[byte]): string =
|
|||
# Public API logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func toStr*(w: Hash256): string =
|
||||
func toStr(w: Hash256): string =
|
||||
if w == EMPTY_ROOT_HASH: "EMPTY_ROOT_HASH" else: w.data.oaToStr
|
||||
|
||||
func toLenStr(w: Blob): string =
|
||||
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
|
||||
else: "Blob[" & $w.len & "]"
|
||||
|
||||
func toStr(ela: Duration): string =
|
||||
aristo_profile.toStr(ela)
|
||||
|
||||
proc toStr*(rc: CoreDbRc[int]|CoreDbRc[UInt256]): string =
|
||||
if rc.isOk: "ok(" & $rc.value & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr(rc: CoreDbRc[bool]): string =
|
||||
if rc.isOk: "ok(" & $rc.value & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr(rc: CoreDbRc[void]): string =
|
||||
if rc.isOk: "ok()" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr(rc: CoreDbRc[Blob]): string =
|
||||
if rc.isOk: "ok(Blob[" & $rc.value.len & "])"
|
||||
else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr(rc: CoreDbRc[Hash256]): string =
|
||||
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr(rc: CoreDbRc[Account]): string =
|
||||
if rc.isOk: "ok(Account)" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr(rc: CoreDbRc[CoreDbAccount]): string =
|
||||
if rc.isOk: "ok(AristoAccount)" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr[T](rc: CoreDbRc[T]; ifOk: static[string]): string =
|
||||
if rc.isOk: "ok(" & ifOk & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr(rc: CoreDbRc[CoreDbRef]): string = rc.toStr "db"
|
||||
proc toStr(rc: CoreDbRc[CoreDbKvtRef]): string = rc.toStr "kvt"
|
||||
proc toStr(rc: CoreDbRc[CoreDbTxRef]): string = rc.toStr "tx"
|
||||
#proc toStr(rc: CoreDbRc[CoreDbCaptRef]): string = rc.toStr "capt"
|
||||
proc toStr(rc: CoreDbRc[CoreDbCtxRef]): string = rc.toStr "ctx"
|
||||
proc toStr(rc: CoreDbRc[CoreDbMptRef]): string = rc.toStr "mpt"
|
||||
proc toStr(rc: CoreDbRc[CoreDbAccRef]): string = rc.toStr "acc"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public API logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc toStr*(e: CoreDbErrorRef): string =
|
||||
result = $e.error & "("
|
||||
result &= (if e.isAristo: "Aristo" else: "Kvt")
|
||||
|
@ -109,80 +157,64 @@ proc toStr*(e: CoreDbErrorRef): string =
|
|||
result &= (if e.isAristo: $e.aErr else: $e.kErr)
|
||||
result &= ")"
|
||||
|
||||
func toStr*(w: openArray[byte]): string =
|
||||
w.oaToStr
|
||||
|
||||
func toLenStr*(w: openArray[byte]): string =
|
||||
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
|
||||
else: "openArray[" & $w.len & "]"
|
||||
|
||||
func toLenStr*(w: Blob): string =
|
||||
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
|
||||
else: "Blob[" & $w.len & "]"
|
||||
|
||||
func toStr*(w: openArray[byte]): string =
|
||||
w.oaToStr
|
||||
|
||||
func toStr*(w: set[CoreDbCaptFlags]): string =
|
||||
"Flags[" & $w.len & "]"
|
||||
|
||||
proc toStr*(rc: CoreDbRc[int]|CoreDbRc[UInt256]): string =
|
||||
if rc.isOk: "ok(" & $rc.value & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr*(rc: CoreDbRc[bool]): string =
|
||||
if rc.isOk: "ok(" & $rc.value & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr*(rc: CoreDbRc[void]): string =
|
||||
if rc.isOk: "ok()" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr*(rc: CoreDbRc[Blob]): string =
|
||||
if rc.isOk: "ok(Blob[" & $rc.value.len & "])"
|
||||
else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr*[T](rc: CoreDbRc[T]): string =
|
||||
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr*(rc: CoreDbRc[Account]): string =
|
||||
if rc.isOk: "ok(Account)" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr[T](rc: CoreDbRc[T]; ifOk: static[string]): string =
|
||||
if rc.isOk: "ok(" & ifOk & ")" else: "err(" & rc.error.toStr & ")"
|
||||
|
||||
proc toStr*(rc: CoreDbRc[CoreDbRef]): string = rc.toStr "db"
|
||||
proc toStr*(rc: CoreDbRc[CoreDbAccount]): string = rc.toStr "acc"
|
||||
proc toStr*(rc: CoreDbRc[CoreDbKvtRef]): string = rc.toStr "kvt"
|
||||
proc toStr*(rc: CoreDbRc[CoreDbTxRef]): string = rc.toStr "tx"
|
||||
#proc toStr*(rc: CoreDbRc[CoreDbCaptRef]): string = rc.toStr "capt"
|
||||
proc toStr*(rc: CoreDbRc[CoreDbCtxRef]): string = rc.toStr "ctx"
|
||||
proc toStr*(rc: CoreDbRc[CoreDbMptRef]): string = rc.toStr "mpt"
|
||||
proc toStr*(rc: CoreDbRc[CoreDbAccRef]): string = rc.toStr "acc"
|
||||
|
||||
func toStr*(ela: Duration): string =
|
||||
aristo_profile.toStr(ela)
|
||||
proc `$`*[T](rc: CoreDbRc[T]): string = rc.toStr
|
||||
func `$`*(t: Elapsed): string = t.Duration.toStr
|
||||
func `$`*(e: EthAddress): string = e.toStr
|
||||
func `$$`*(h: Hash256): string = h.toStr # otherwise collision w/existing `$`
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public new API logging framework
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template beginNewApi*(w: CoreDbApiTrackRef; s: static[CoreDbFnInx]) =
|
||||
when CoreDbEnableApiProfiling:
|
||||
const bnaCtx {.inject.} = s # Local use only
|
||||
let bnaStart {.inject.} = getTime() # Local use only
|
||||
template setTrackNewApi*(
|
||||
w: CoreDbApiTrackRef;
|
||||
s: static[CoreDbFnInx];
|
||||
code: untyped;
|
||||
) =
|
||||
## Template with code section that will be discarded if logging is
|
||||
## disabled at compile time when `EnableApiTracking` is `false`.
|
||||
when CoreDbEnableApiTracking:
|
||||
#w.beginNewApi(s)
|
||||
when CoreDbEnableProfiling:
|
||||
const bnaCtx {.inject.} = s # Local use only
|
||||
let bnaStart {.inject.} = getTime() # Local use only
|
||||
code
|
||||
const api {.inject,used.} = s
|
||||
|
||||
template endNewApiIf*(w: CoreDbApiTrackRef; code: untyped) =
|
||||
block body:
|
||||
when typeof(w) is CoreDbRef:
|
||||
let db = w
|
||||
elif typeof(w) is CoreDbTxRef:
|
||||
let db = w.ctx.parent
|
||||
if w.isNil: break body
|
||||
else:
|
||||
let db = w.distinctBase.parent
|
||||
if w.distinctBase.isNil: break body
|
||||
when CoreDbEnableApiProfiling:
|
||||
let elapsed {.inject,used.} = getTime() - bnaStart
|
||||
aristo_profile.update(db.profTab, bnaCtx.ord, elapsed)
|
||||
if db.trackNewApi:
|
||||
when not CoreDbEnableApiProfiling: # otherwise use variable above
|
||||
let elapsed {.inject,used.} = getTime() - bnaStart
|
||||
code
|
||||
template setTrackNewApi*(
|
||||
w: CoreDbApiTrackRef;
|
||||
s: static[CoreDbFnInx];
|
||||
) =
|
||||
w.setTrackNewApi(s):
|
||||
discard
|
||||
|
||||
template ifTrackNewApi*(w: CoreDbApiTrackRef; code: untyped) =
|
||||
when CoreDbEnableApiTracking:
|
||||
#w.endNewApiIf:
|
||||
# code
|
||||
block body:
|
||||
when typeof(w) is CoreDbRef:
|
||||
let db = w
|
||||
elif typeof(w) is CoreDbTxRef:
|
||||
let db = w.ctx.parent
|
||||
if w.isNil: break body
|
||||
else:
|
||||
let db = w.distinctBase.parent
|
||||
if w.distinctBase.isNil: break body
|
||||
when CoreDbEnableProfiling:
|
||||
let elapsed {.inject,used.} = (getTime() - bnaStart).Elapsed
|
||||
aristo_profile.update(db.profTab, bnaCtx.ord, elapsed.Duration)
|
||||
if db.trackCoreDbApi:
|
||||
when not CoreDbEnableProfiling: # otherwise use variable above
|
||||
let elapsed {.inject,used.} = (getTime() - bnaStart).Elapsed
|
||||
code
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
# Configuration section
|
||||
const
|
||||
EnableApiTracking = false
|
||||
## When enabled, functions using this tracking facility need to import
|
||||
## `chronicles`, as well. Also, some `func` designators might need to
|
||||
## be changed to `proc` for possible side effects.
|
||||
##
|
||||
## Tracking noise is then enabled by setting the flag `trackCoreDbApi` to
|
||||
## `true` in the `CoreDbRef` descriptor.
|
||||
|
||||
EnableProfiling = false
|
||||
## Enables profiling of the backend. If the flag `EnableApiTracking` is
|
||||
## also set the API will also be subject to profiling.
|
||||
|
||||
EnableCaptJournal = defined(release).not
|
||||
## Enables the tracer facility. If set `true` capture journal directives
|
||||
## like `newCapture()` will be available.
|
||||
|
||||
NoisyCaptJournal = true
|
||||
## Provide extra logging with the tracer facility if available.
|
||||
|
||||
EnableApiJumpTable = false
|
||||
## This flag enables the functions jump table even if `EnableApiProfiling`
|
||||
## and `EnableCaptJournal` is set `false` in realease mode. This setting
|
||||
## should be used for debugging, only.
|
||||
|
||||
AutoValidateDescriptors = defined(release).not
|
||||
## No validatinon needed for production suite.
|
||||
|
||||
# Exportable constants (leave alone this section)
|
||||
const
|
||||
CoreDbEnableApiTracking* = EnableApiTracking
|
||||
|
||||
CoreDbEnableProfiling* = EnableProfiling
|
||||
|
||||
CoreDbEnableCaptJournal* = EnableCaptJournal
|
||||
|
||||
CoreDbNoisyCaptJournal* = CoreDbEnableCaptJournal and NoisyCaptJournal
|
||||
|
||||
CoreDbEnableApiJumpTable* =
|
||||
CoreDbEnableProfiling or CoreDbEnableCaptJournal or EnableApiJumpTable
|
||||
|
||||
CoreDbAutoValidateDescriptors* = AutoValidateDescriptors
|
||||
|
||||
# End
|
|
@ -12,9 +12,8 @@
|
|||
|
||||
import
|
||||
results,
|
||||
../../aristo,
|
||||
../../kvt,
|
||||
../../aristo/aristo_profile
|
||||
"../.."/[aristo, aristo/aristo_profile, kvt],
|
||||
./base_config
|
||||
|
||||
# Annotation helpers
|
||||
{.pragma: noRaise, gcsafe, raises: [].}
|
||||
|
@ -72,18 +71,21 @@ type
|
|||
# --------------------------------------------------
|
||||
CoreDbRef* = ref object
|
||||
## Database descriptor
|
||||
dbType*: CoreDbType ## Type of database backend
|
||||
defCtx*: CoreDbCtxRef ## Default context
|
||||
dbType*: CoreDbType ## Type of database backend
|
||||
defCtx*: CoreDbCtxRef ## Default context
|
||||
|
||||
# Optional api interface (can be re-directed/intercepted)
|
||||
ariApi*: AristoApiRef ## `Aristo` api
|
||||
kvtApi*: KvtApiRef ## `KVT` api
|
||||
ariApi*: AristoApiRef ## `Aristo` api
|
||||
kvtApi*: KvtApiRef ## `KVT` api
|
||||
|
||||
# Optional profiling and debugging stuff
|
||||
trackNewApi*: bool ## Debugging, support
|
||||
trackLedgerApi*: bool ## Debugging, suggestion for subsequent ledger
|
||||
profTab*: CoreDbProfListRef ## Profiling data (if any)
|
||||
ledgerHook*: RootRef ## Debugging/profiling, to be used by ledger
|
||||
when CoreDbEnableApiTracking:
|
||||
trackLedgerApi*: bool ## Debugging, suggestion for ledger
|
||||
trackCoreDbApi*: bool ## Debugging, support
|
||||
when CoreDbEnableApiJumpTable:
|
||||
profTab*: CoreDbProfListRef ## Profiling data (if any)
|
||||
ledgerHook*: RootRef ## Debugging/profiling, to be used by ledger
|
||||
tracerHook*: RootRef ## Debugging/tracing
|
||||
|
||||
CoreDbCtxRef* = ref object
|
||||
## Shared context for `CoreDbMptRef`, `CoreDbAccRef`, `CoreDbKvtRef`
|
||||
|
|
|
@ -13,12 +13,12 @@
|
|||
import
|
||||
std/typetraits,
|
||||
eth/common,
|
||||
../../errors,
|
||||
../aristo as use_ari,
|
||||
../aristo/[aristo_walk, aristo_serialise],
|
||||
../kvt as use_kvt,
|
||||
../kvt/[kvt_init/memory_only, kvt_walk],
|
||||
./base/[api_tracking, base_desc],
|
||||
./base
|
||||
./base/[api_tracking, base_config, base_desc]
|
||||
|
||||
when CoreDbEnableApiJumpTable:
|
||||
discard
|
||||
|
@ -31,11 +31,12 @@ include
|
|||
./backend/aristo_replicate
|
||||
|
||||
when CoreDbEnableApiTracking:
|
||||
import chronicles
|
||||
|
||||
import
|
||||
chronicles
|
||||
logScope:
|
||||
topics = "core_db"
|
||||
const
|
||||
logTxt = "CoreDb/it "
|
||||
newApiTxt = logTxt & "API"
|
||||
logTxt = "API"
|
||||
|
||||
# Annotation helper(s)
|
||||
{.pragma: apiRaise, gcsafe, raises: [CoreDbApiError].}
|
||||
|
@ -61,7 +62,7 @@ iterator pairs*(kvt: CoreDbKvtRef): (Blob, Blob) {.apiRaise.} =
|
|||
yield (k,v)
|
||||
of Ooops, AristoDbRocks:
|
||||
raiseAssert: "Unsupported database type: " & $kvt.dbType
|
||||
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
kvt.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) =
|
||||
## Trie traversal, only supported for `CoreDbMptRef`
|
||||
|
@ -73,7 +74,7 @@ iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) =
|
|||
yield (mpt.call(pathAsBlob, path), data)
|
||||
of Ooops:
|
||||
raiseAssert: "Unsupported database type: " & $mpt.dbType
|
||||
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
mpt.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
iterator slotPairs*(acc: CoreDbAccRef; accPath: Hash256): (Blob, UInt256) =
|
||||
## Trie traversal, only supported for `CoreDbMptRef`
|
||||
|
@ -86,7 +87,7 @@ iterator slotPairs*(acc: CoreDbAccRef; accPath: Hash256): (Blob, UInt256) =
|
|||
of Ooops:
|
||||
raiseAssert: "Unsupported database type: " & $acc.dbType
|
||||
acc.ifTrackNewApi:
|
||||
debug newApiTxt, api, elapsed
|
||||
debug logTxt, api, elapsed
|
||||
|
||||
iterator replicate*(mpt: CoreDbMptRef): (Blob, Blob) {.apiRaise.} =
|
||||
## Low level trie dump, only supported for non persistent `CoreDbMptRef`
|
||||
|
@ -101,7 +102,7 @@ iterator replicate*(mpt: CoreDbMptRef): (Blob, Blob) {.apiRaise.} =
|
|||
yield (k,v)
|
||||
of Ooops, AristoDbRocks:
|
||||
raiseAssert: "Unsupported database type: " & $mpt.dbType
|
||||
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
mpt.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -13,22 +13,23 @@
|
|||
import
|
||||
std/typetraits,
|
||||
eth/common,
|
||||
../../errors,
|
||||
../aristo as use_ari,
|
||||
../aristo/aristo_init/rocks_db,
|
||||
../aristo/[aristo_desc, aristo_walk/persistent, aristo_tx],
|
||||
../aristo/[aristo_desc, aristo_walk/persistent, aristo_serialise, aristo_tx],
|
||||
../kvt, # needed for `aristo_replicate`
|
||||
./base/[api_tracking, base_desc],
|
||||
./base
|
||||
./base/[api_tracking, base_config, base_desc], ./base
|
||||
|
||||
include
|
||||
./backend/aristo_replicate
|
||||
|
||||
when CoreDbEnableApiTracking:
|
||||
import chronicles
|
||||
|
||||
import
|
||||
chronicles
|
||||
logScope:
|
||||
topics = "core_db"
|
||||
const
|
||||
logTxt = "CoreDb/itp "
|
||||
newApiTxt = logTxt & "API"
|
||||
logTxt = "API"
|
||||
|
||||
# Annotation helper(s)
|
||||
{.pragma: rlpRaise, gcsafe, raises: [CoreDbApiError].}
|
||||
|
@ -53,7 +54,7 @@ iterator replicatePersistent*(mpt: CoreDbMptRef): (Blob, Blob) {.rlpRaise.} =
|
|||
yield (k, v)
|
||||
else:
|
||||
raiseAssert: "Unsupported database type: " & $mpt.dbType
|
||||
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed
|
||||
mpt.ifTrackNewApi: debug logTxt, api, elapsed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -23,7 +23,7 @@ import
|
|||
"."/base
|
||||
|
||||
logScope:
|
||||
topics = "core_db-apps"
|
||||
topics = "core_db"
|
||||
|
||||
type
|
||||
TransactionKey = tuple
|
||||
|
@ -74,7 +74,7 @@ proc getCanonicalHeaderHash*(db: CoreDbRef): Opt[Hash256] {.gcsafe.}
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Core apps " & info
|
||||
"Core app " & info
|
||||
|
||||
template discardRlpException(info: static[string]; code: untyped) =
|
||||
try:
|
||||
|
@ -104,7 +104,7 @@ iterator findNewAncestors(
|
|||
break
|
||||
else:
|
||||
if not db.getBlockHeader(h.parentHash, h):
|
||||
warn "Could not find parent while iterating", hash = h.parentHash
|
||||
warn logTxt "Could not find parent while iterating", hash = h.parentHash
|
||||
break
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -138,7 +138,7 @@ iterator getBlockTransactions*(
|
|||
try:
|
||||
yield rlp.decode(encodedTx, Transaction)
|
||||
except RlpError as exc:
|
||||
warn "Cannot decode database transaction", data = toHex(encodedTx), error = exc.msg
|
||||
warn logTxt "Cannot decode database transaction", data = toHex(encodedTx), error = exc.msg
|
||||
|
||||
iterator getBlockTransactionHashes*(
|
||||
db: CoreDbRef;
|
||||
|
@ -226,7 +226,7 @@ proc setAsCanonicalChainHead(
|
|||
db.removeTransactionFromCanonicalChain(txHash)
|
||||
# TODO re-add txn to internal pending pool (only if local sender)
|
||||
except BlockNotFound:
|
||||
warn "Could not load old header", oldHash
|
||||
warn logTxt "Could not load old header", oldHash
|
||||
|
||||
for h in newCanonicalHeaders:
|
||||
# TODO don't recompute block hash
|
||||
|
|
|
@ -14,6 +14,7 @@ import
|
|||
eth/common,
|
||||
../aristo,
|
||||
./backend/aristo_db,
|
||||
./base/base_config,
|
||||
"."/[base_iterators, core_apps]
|
||||
|
||||
import
|
||||
|
@ -22,6 +23,7 @@ import
|
|||
export
|
||||
EmptyBlob,
|
||||
base,
|
||||
base_config,
|
||||
base_iterators,
|
||||
common,
|
||||
core_apps
|
||||
|
|
|
@ -223,7 +223,7 @@ func dup*(api: KvtApiRef): KvtApiRef =
|
|||
txBegin: api.txBegin,
|
||||
txTop: api.txTop)
|
||||
when AutoValidateApiHooks:
|
||||
api.validate
|
||||
result.validate
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public profile API constuctor
|
||||
|
|
|
@ -123,8 +123,8 @@ proc len*(
|
|||
db: KvtDbRef; # Database
|
||||
key: openArray[byte]; # Key of database record
|
||||
): Result[int,KvtError] =
|
||||
## For the argument `key` return the associated value preferably from the
|
||||
## top layer, or the database otherwise.
|
||||
## For the argument `key` return the length of the associated value,
|
||||
## preferably from the top layer, or the database otherwise.
|
||||
##
|
||||
if key.len == 0:
|
||||
return err(KeyInvalid)
|
||||
|
|
|
@ -18,16 +18,15 @@
|
|||
import
|
||||
eth/common,
|
||||
./core_db,
|
||||
./ledger/[base_iterators, accounts_ledger]
|
||||
|
||||
import
|
||||
./ledger/base except LedgerApiTxt, beginTrackApi, bless, ifTrackApi
|
||||
./ledger/backend/accounts_ledger,
|
||||
./ledger/base/[base_config, base_desc],
|
||||
./ledger/[base, base_iterators]
|
||||
|
||||
export
|
||||
AccountsLedgerRef,
|
||||
base,
|
||||
base_iterators,
|
||||
init
|
||||
base_config,
|
||||
base_iterators
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
|
|
|
@ -1,52 +0,0 @@
|
|||
The file `accounts_cache.nim` has been relocated
|
||||
================================================
|
||||
|
||||
Background
|
||||
----------
|
||||
|
||||
The new *LedgerRef* module unifies different implementations of the
|
||||
legacy *accounts_cache*. It is intended to be used as new base method for
|
||||
all of the *AccountsCache* implementations. The legacy *accounts_cache*
|
||||
version has been removed.
|
||||
|
||||
This was needed to accomodate for different *CoreDb* API paradigms. While the
|
||||
overloaded legacy *AccountsCache* implementation is just a closure based
|
||||
wrapper around the *accounts_cache* module, the overloaded *AccountsLedgerRef*
|
||||
is a closure based wrapper around the *accounts_ledger* module with the new
|
||||
*CoreDb* API returning *Result[]* values and saparating the meaning of trie
|
||||
root hash and trie root reference.
|
||||
|
||||
This allows to use the legacy hexary database (with the new *CoreDb* API) as
|
||||
well as the *Aristo* database (only supported on new API.)
|
||||
|
||||
Instructions
|
||||
------------
|
||||
|
||||
| **Legacy notation** | **LedgerRef replacement** | **Comment**
|
||||
|:-----------------------|:------------------------------|----------------------
|
||||
| | |
|
||||
| import accounts_cache | import ledger | preferred method,
|
||||
| AccountsCache.init(..) | AccountsCache.init(..) | wraps *AccountsCache*
|
||||
| | | methods
|
||||
| | *or* |
|
||||
| | |
|
||||
| | import ledger/accounts_cache | stay with legacy
|
||||
| | AccountsCache.init(..) | version of
|
||||
| | | *AccountsCache*
|
||||
| -- | |
|
||||
| fn(ac: AccountsCache) | fn(ac: LedgerRef) | function example for
|
||||
| | | preferred wrapper
|
||||
| | *or* | method
|
||||
| | |
|
||||
| | fn(ac: AccountsCache) | with legacy version,
|
||||
| | | no change here
|
||||
|
||||
|
||||
### The constructor decides which *CoreDb* API is to be used
|
||||
|
||||
| **Legacy API constructor** | **new API Constructor** |
|
||||
|:-------------------------------|:-----------------------------------|
|
||||
| | |
|
||||
| import ledger | import ledger |
|
||||
| let w = AccountsCache.init(..) | let w = AccountsLedgerRef.init(..) |
|
||||
| | |
|
|
@ -16,14 +16,12 @@ import
|
|||
eth/common,
|
||||
results,
|
||||
stew/keyed_queue,
|
||||
../../stateless/multi_keys,
|
||||
"../.."/[constants, utils/utils],
|
||||
../access_list as ac_access_list,
|
||||
../../evm/code_bytes,
|
||||
".."/[core_db, storage_types, transient_storage],
|
||||
../aristo/aristo_blobify
|
||||
|
||||
export code_bytes
|
||||
../../../evm/code_bytes,
|
||||
../../../stateless/multi_keys,
|
||||
"../../.."/[constants, utils/utils],
|
||||
../../access_list as ac_access_list,
|
||||
"../.."/[core_db, storage_types, transient_storage],
|
||||
../../aristo/aristo_blobify
|
||||
|
||||
const
|
||||
debugAccountsLedgerRef = false
|
|
@ -8,7 +8,7 @@
|
|||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Unify different ledger management APIs.
|
||||
## Ledger management APIs.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
|
@ -17,67 +17,38 @@ import
|
|||
../../evm/code_bytes,
|
||||
../../stateless/multi_keys,
|
||||
../core_db,
|
||||
./base/[api_tracking, base_desc]
|
||||
|
||||
const
|
||||
EnableApiTracking = false
|
||||
## When enabled, API functions are logged. Tracking is enabled by setting
|
||||
## the `trackApi` flag to `true`. This setting is typically inherited from
|
||||
## the `CoreDb` descriptor flag `trackLedgerApi`.
|
||||
|
||||
EnableApiProfiling = true
|
||||
## Enable functions profiling (only if `EnableApiTracking` is set `true`.)
|
||||
|
||||
apiTxt = "Ledger API"
|
||||
|
||||
./backend/accounts_ledger,
|
||||
./base/[api_tracking, base_config, base_desc]
|
||||
|
||||
type
|
||||
ReadOnlyStateDB* = distinct LedgerRef
|
||||
|
||||
export
|
||||
code_bytes,
|
||||
LedgerFnInx,
|
||||
LedgerProfListRef,
|
||||
LedgerRef,
|
||||
LedgerSpRef
|
||||
|
||||
const
|
||||
LedgerEnableApiTracking* = EnableApiTracking
|
||||
LedgerEnableApiProfiling* = EnableApiTracking and EnableApiProfiling
|
||||
LedgerApiTxt* = apiTxt
|
||||
|
||||
proc ldgProfData*(db: CoreDbRef): LedgerProfListRef {.gcsafe.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Logging/tracking helpers (some public)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when EnableApiTracking:
|
||||
when EnableApiProfiling:
|
||||
{.warning: "*** Provided API profiling for Ledger (disabled by default)".}
|
||||
else:
|
||||
{.warning: "*** Provided API logging for Ledger (disabled by default)".}
|
||||
|
||||
when LedgerEnableApiTracking:
|
||||
{.warning: "*** Provided API logging for Ledger (disabled by default)".}
|
||||
import
|
||||
std/times,
|
||||
chronicles
|
||||
logScope:
|
||||
topics = "ledger"
|
||||
const
|
||||
apiTxt = "API"
|
||||
|
||||
func `$`(w: CodeBytesRef): string {.used.} = w.toStr
|
||||
func `$`(e: Duration): string {.used.} = e.toStr
|
||||
# func `$`(c: CoreDbMptRef): string {.used.} = c.toStr
|
||||
func `$`(l: seq[Log]): string {.used.} = l.toStr
|
||||
func `$`(h: Hash256): string {.used.} = h.toStr
|
||||
func `$`(a: EthAddress): string {.used.} = a.toStr
|
||||
|
||||
# Publicly available for API logging
|
||||
template beginTrackApi*(ldg: LedgerRef; s: LedgerFnInx) =
|
||||
when EnableApiTracking:
|
||||
ldg.beginApi(s)
|
||||
|
||||
template ifTrackApi*(ldg: LedgerRef; code: untyped) =
|
||||
when EnableApiTracking:
|
||||
ldg.endApiIf:
|
||||
code
|
||||
when LedgerEnableApiProfiling:
|
||||
{.warning: "*** Provided API profiling for Ledger (disabled by default)".}
|
||||
export
|
||||
LedgerFnInx,
|
||||
LedgerProfListRef
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor helper
|
||||
|
@ -85,7 +56,7 @@ template ifTrackApi*(ldg: LedgerRef; code: untyped) =
|
|||
|
||||
proc bless*(ldg: LedgerRef; db: CoreDbRef): LedgerRef =
|
||||
ldg.beginTrackApi LdgBlessFn
|
||||
when EnableApiTracking:
|
||||
when LedgerEnableApiTracking:
|
||||
ldg.trackApi = db.trackLedgerApi
|
||||
when LedgerEnableApiProfiling:
|
||||
ldg.profTab = db.ldgProfData()
|
||||
|
@ -112,22 +83,22 @@ proc ldgProfData*(db: CoreDbRef): LedgerProfListRef =
|
|||
proc accessList*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.beginTrackApi LdgAccessListFn
|
||||
ldg.ac.accessList(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr)
|
||||
|
||||
proc accessList*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256) =
|
||||
ldg.beginTrackApi LdgAccessListFn
|
||||
ldg.ac.accessList(eAddr, slot)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, slot
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot
|
||||
|
||||
proc accountExists*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
ldg.beginTrackApi LdgAccountExistsFn
|
||||
result = ldg.ac.accountExists(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result
|
||||
|
||||
proc addBalance*(ldg: LedgerRef, eAddr: EthAddress, delta: UInt256) =
|
||||
ldg.beginTrackApi LdgAddBalanceFn
|
||||
ldg.ac.addBalance(eAddr, delta)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, delta
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), delta
|
||||
|
||||
proc addLogEntry*(ldg: LedgerRef, log: Log) =
|
||||
ldg.beginTrackApi LdgAddLogEntryFn
|
||||
|
@ -142,7 +113,7 @@ proc beginSavepoint*(ldg: LedgerRef): LedgerSpRef =
|
|||
proc clearStorage*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.beginTrackApi LdgClearStorageFn
|
||||
ldg.ac.clearStorage(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr)
|
||||
|
||||
proc clearTransientStorage*(ldg: LedgerRef) =
|
||||
ldg.beginTrackApi LdgClearTransientStorageFn
|
||||
|
@ -162,7 +133,7 @@ proc commit*(ldg: LedgerRef, sp: LedgerSpRef) =
|
|||
proc deleteAccount*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.beginTrackApi LdgDeleteAccountFn
|
||||
ldg.ac.deleteAccount(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr)
|
||||
|
||||
proc dispose*(ldg: LedgerRef, sp: LedgerSpRef) =
|
||||
ldg.beginTrackApi LdgDisposeFn
|
||||
|
@ -177,22 +148,22 @@ proc getAndClearLogEntries*(ldg: LedgerRef): seq[Log] =
|
|||
proc getBalance*(ldg: LedgerRef, eAddr: EthAddress): UInt256 =
|
||||
ldg.beginTrackApi LdgGetBalanceFn
|
||||
result = ldg.ac.getBalance(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result
|
||||
|
||||
proc getCode*(ldg: LedgerRef, eAddr: EthAddress): CodeBytesRef =
|
||||
ldg.beginTrackApi LdgGetCodeFn
|
||||
result = ldg.ac.getCode(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result
|
||||
|
||||
proc getCodeHash*(ldg: LedgerRef, eAddr: EthAddress): Hash256 =
|
||||
ldg.beginTrackApi LdgGetCodeHashFn
|
||||
result = ldg.ac.getCodeHash(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result=($$result)
|
||||
|
||||
proc getCodeSize*(ldg: LedgerRef, eAddr: EthAddress): int =
|
||||
ldg.beginTrackApi LdgGetCodeSizeFn
|
||||
result = ldg.ac.getCodeSize(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result
|
||||
|
||||
proc getCommittedStorage*(
|
||||
ldg: LedgerRef;
|
||||
|
@ -201,22 +172,22 @@ proc getCommittedStorage*(
|
|||
): UInt256 =
|
||||
ldg.beginTrackApi LdgGetCommittedStorageFn
|
||||
result = ldg.ac.getCommittedStorage(eAddr, slot)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, slot, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, result
|
||||
|
||||
proc getNonce*(ldg: LedgerRef, eAddr: EthAddress): AccountNonce =
|
||||
ldg.beginTrackApi LdgGetNonceFn
|
||||
result = ldg.ac.getNonce(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result
|
||||
|
||||
proc getStorage*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
ldg.beginTrackApi LdgGetStorageFn
|
||||
result = ldg.ac.getStorage(eAddr, slot)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, slot, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, result
|
||||
|
||||
proc getStorageRoot*(ldg: LedgerRef, eAddr: EthAddress): Hash256 =
|
||||
ldg.beginTrackApi LdgGetStorageRootFn
|
||||
result = ldg.ac.getStorageRoot(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result=($$result)
|
||||
|
||||
proc getTransientStorage*(
|
||||
ldg: LedgerRef;
|
||||
|
@ -225,37 +196,37 @@ proc getTransientStorage*(
|
|||
): UInt256 =
|
||||
ldg.beginTrackApi LdgGetTransientStorageFn
|
||||
result = ldg.ac.getTransientStorage(eAddr, slot)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, slot, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, result
|
||||
|
||||
proc contractCollision*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
ldg.beginTrackApi LdgContractCollisionFn
|
||||
result = ldg.ac.contractCollision(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result
|
||||
|
||||
proc inAccessList*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
ldg.beginTrackApi LdgInAccessListFn
|
||||
result = ldg.ac.inAccessList(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result
|
||||
|
||||
proc inAccessList*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256): bool =
|
||||
ldg.beginTrackApi LdgInAccessListFn
|
||||
result = ldg.ac.inAccessList(eAddr, slot)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, slot, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, result
|
||||
|
||||
proc incNonce*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.beginTrackApi LdgIncNonceFn
|
||||
ldg.ac.incNonce(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr)
|
||||
|
||||
proc isDeadAccount*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
ldg.beginTrackApi LdgIsDeadAccountFn
|
||||
result = ldg.ac.isDeadAccount(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result
|
||||
|
||||
proc isEmptyAccount*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
ldg.beginTrackApi LdgIsEmptyAccountFn
|
||||
result = ldg.ac.isEmptyAccount(eAddr)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), result
|
||||
|
||||
proc isTopLevelClean*(ldg: LedgerRef): bool =
|
||||
ldg.beginTrackApi LdgIsTopLevelCleanFn
|
||||
|
@ -310,22 +281,22 @@ proc selfDestructLen*(ldg: LedgerRef): int =
|
|||
proc setBalance*(ldg: LedgerRef, eAddr: EthAddress, balance: UInt256) =
|
||||
ldg.beginTrackApi LdgSetBalanceFn
|
||||
ldg.ac.setBalance(eAddr, balance)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, balance
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), balance
|
||||
|
||||
proc setCode*(ldg: LedgerRef, eAddr: EthAddress, code: Blob) =
|
||||
ldg.beginTrackApi LdgSetCodeFn
|
||||
ldg.ac.setCode(eAddr, code)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, code=code.toStr
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), code
|
||||
|
||||
proc setNonce*(ldg: LedgerRef, eAddr: EthAddress, nonce: AccountNonce) =
|
||||
ldg.beginTrackApi LdgSetNonceFn
|
||||
ldg.ac.setNonce(eAddr, nonce)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, nonce
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), nonce
|
||||
|
||||
proc setStorage*(ldg: LedgerRef, eAddr: EthAddress, slot, val: UInt256) =
|
||||
ldg.beginTrackApi LdgSetStorageFn
|
||||
ldg.ac.setStorage(eAddr, slot, val)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, slot, val
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, val
|
||||
|
||||
proc setTransientStorage*(
|
||||
ldg: LedgerRef;
|
||||
|
@ -335,7 +306,7 @@ proc setTransientStorage*(
|
|||
) =
|
||||
ldg.beginTrackApi LdgSetTransientStorageFn
|
||||
ldg.ac.setTransientStorage(eAddr, slot, val)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, slot, val
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), slot, val
|
||||
|
||||
proc state*(ldg: LedgerRef): Hash256 =
|
||||
ldg.beginTrackApi LdgStateFn
|
||||
|
@ -345,7 +316,7 @@ proc state*(ldg: LedgerRef): Hash256 =
|
|||
proc subBalance*(ldg: LedgerRef, eAddr: EthAddress, delta: UInt256) =
|
||||
ldg.beginTrackApi LdgSubBalanceFn
|
||||
ldg.ac.subBalance(eAddr, delta)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, delta
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr), delta
|
||||
|
||||
proc getAccessList*(ldg: LedgerRef): AccessList =
|
||||
ldg.beginTrackApi LdgGetAccessListFn
|
||||
|
|
|
@ -14,11 +14,15 @@ import
|
|||
std/[strutils, times],
|
||||
eth/common,
|
||||
stew/byteutils,
|
||||
../../../evm/code_bytes,
|
||||
../../aristo/aristo_profile,
|
||||
../../core_db,
|
||||
"."/base_desc
|
||||
"."/[base_config, base_desc]
|
||||
|
||||
type
|
||||
Elapsed* = distinct Duration
|
||||
## Needed for local `$` as it would be ambiguous for `Duration`
|
||||
|
||||
LedgerFnInx* = enum
|
||||
## Profiling table index
|
||||
SummaryItem = "total"
|
||||
|
@ -58,7 +62,6 @@ type
|
|||
LdgLogEntriesFn = "logEntries"
|
||||
LdgMakeMultiKeysFn = "makeMultiKeys"
|
||||
LdgPersistFn = "persist"
|
||||
LdgRawRootHashFn = "rawRootHash"
|
||||
LdgRipemdSpecialFn = "ripemdSpecial"
|
||||
LdgRollbackFn = "rollback"
|
||||
LdgRootHashFn = "rootHash"
|
||||
|
@ -87,46 +90,55 @@ type
|
|||
func oaToStr(w: openArray[byte]): string =
|
||||
w.toHex.toLowerAscii
|
||||
|
||||
func toStr(w: EthAddress): string =
|
||||
w.oaToStr
|
||||
|
||||
func toStr(w: Hash256): string =
|
||||
w.data.oaToStr
|
||||
|
||||
func toStr(w: CodeBytesRef): string =
|
||||
if w.isNil: "nil"
|
||||
else: "[" & $w.bytes.len & "]"
|
||||
|
||||
func toStr(w: Blob): string =
|
||||
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
|
||||
else: "Blob[" & $w.len & "]"
|
||||
|
||||
func toStr(w: seq[Log]): string =
|
||||
"Logs[" & $w.len & "]"
|
||||
|
||||
func toStr(ela: Duration): string =
|
||||
aristo_profile.toStr(ela)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public API logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func toStr*(w: EthAddress): string =
|
||||
w.oaToStr
|
||||
|
||||
func toStr*(w: Hash256): string =
|
||||
w.data.oaToStr
|
||||
|
||||
func toStr*(w: CodeBytesRef): string =
|
||||
if w.isNil: "nil"
|
||||
else: "[" & $w.bytes.len & "]"
|
||||
|
||||
func toStr*(w: Blob): string =
|
||||
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
|
||||
else: "Blob[" & $w.len & "]"
|
||||
|
||||
func toStr*(w: seq[Log]): string =
|
||||
"Logs[" & $w.len & "]"
|
||||
|
||||
func toStr*(ela: Duration): string =
|
||||
aristo_profile.toStr(ela)
|
||||
func `$`*(w: CodeBytesRef): string {.used.} = w.toStr
|
||||
func `$`*(e: Elapsed): string = e.Duration.toStr
|
||||
func `$`*(l: seq[Log]): string = l.toStr
|
||||
func `$`*(b: Blob): string = b.toStr
|
||||
func `$$`*(a: EthAddress): string = a.toStr # otherwise collision w/existing `$`
|
||||
func `$$`*(h: Hash256): string = h.toStr # otherwise collision w/existing `$`
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public API logging framework
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template beginApi*(ldg: LedgerRef; s: static[LedgerFnInx]) =
|
||||
const api {.inject,used.} = s # Generally available
|
||||
let baStart {.inject.} = getTime() # Local use only
|
||||
template beginTrackApi*(ldg: LedgerRef; s: LedgerFnInx) =
|
||||
when LedgerEnableApiTracking:
|
||||
const api {.inject,used.} = s # Generally available
|
||||
let baStart {.inject.} = getTime() # Local use only
|
||||
|
||||
template endApiIf*(ldg: LedgerRef; code: untyped) =
|
||||
when CoreDbEnableApiProfiling:
|
||||
let elapsed {.inject,used.} = getTime() - baStart
|
||||
aristo_profile.update(ldg.profTab, api.ord, elapsed)
|
||||
if ldg.trackApi:
|
||||
when not CoreDbEnableApiProfiling: # otherwise use variable above
|
||||
let elapsed {.inject,used.} = getTime() - baStart
|
||||
code
|
||||
template ifTrackApi*(ldg: LedgerRef; code: untyped) =
|
||||
when LedgerEnableApiTracking:
|
||||
when LedgerEnableApiProfiling:
|
||||
let elapsed {.inject,used.} = (getTime() - baStart).Elapsed
|
||||
aristo_profile.update(ldg.profTab, api.ord, elapsed.Duration)
|
||||
if ldg.trackApi:
|
||||
when not LedgerEnableApiProfiling: # otherwise use variable above
|
||||
let elapsed {.inject,used.} = (getTime() - baStart).Elapsed
|
||||
code
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
../../core_db/base/base_config
|
||||
|
||||
# Configuration section
|
||||
const
|
||||
EnableApiTracking = false
|
||||
## When enabled, API functions are logged. Tracking is enabled by setting
|
||||
## the `trackApi` flag to `true`. This setting is typically inherited from
|
||||
## the `CoreDb` descriptor flag `trackLedgerApi` (which is only available
|
||||
## if the flag `CoreDbEnableApiTracking` is set `true`.
|
||||
|
||||
EnableApiProfiling = false
|
||||
## Enable API functions profiling. This setting is only effective if the
|
||||
## flag `CoreDbEnableApiJumpTable` is set `true`.
|
||||
|
||||
# Exportable constants (leave alone this section)
|
||||
const
|
||||
LedgerEnableApiTracking* = EnableApiTracking and CoreDbEnableApiTracking
|
||||
LedgerEnableApiProfiling* = EnableApiProfiling and CoreDbEnableApiJumpTable
|
||||
|
||||
# End
|
|
@ -12,10 +12,7 @@
|
|||
|
||||
import
|
||||
../../aristo/aristo_profile,
|
||||
../accounts_ledger
|
||||
|
||||
export
|
||||
accounts_ledger
|
||||
../backend/accounts_ledger
|
||||
|
||||
type
|
||||
LedgerProfListRef* = AristoDbProfListRef
|
||||
|
|
|
@ -13,18 +13,17 @@
|
|||
import
|
||||
eth/common,
|
||||
../core_db,
|
||||
./accounts_ledger,
|
||||
./base/api_tracking,
|
||||
./base
|
||||
./backend/accounts_ledger,
|
||||
./base/[api_tracking, base_config, base_desc]
|
||||
|
||||
when LedgerEnableApiTracking:
|
||||
import
|
||||
std/times,
|
||||
chronicles
|
||||
logScope:
|
||||
topics = "ledger"
|
||||
const
|
||||
apiTxt = LedgerApiTxt
|
||||
|
||||
func `$`(a: EthAddress): string {.used.} = a.toStr
|
||||
apiTxt = "API"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
|
@ -48,7 +47,7 @@ iterator cachedStorage*(ldg: LedgerRef, eAddr: EthAddress): (UInt256,UInt256) =
|
|||
ldg.beginTrackApi LdgCachedStorageIt
|
||||
for w in ldg.ac.cachedStorage(eAddr):
|
||||
yield w
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr)
|
||||
|
||||
|
||||
iterator pairs*(ldg: LedgerRef): (EthAddress,Account) =
|
||||
|
@ -65,7 +64,7 @@ iterator storage*(
|
|||
ldg.beginTrackApi LdgStorageIt
|
||||
for w in ldg.ac.storage(eAddr):
|
||||
yield w
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr=($$eAddr)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -188,8 +188,9 @@ proc initRunnerDB(
|
|||
result.initializeEmptyDb
|
||||
|
||||
setErrorLevel()
|
||||
coreDB.trackNewApi = false
|
||||
coreDB.trackLedgerApi =false
|
||||
when CoreDbEnableApiTracking:
|
||||
coreDB.trackCoreDbApi = false
|
||||
coreDB.trackLedgerApi = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Test Runners: accounts and accounts storages
|
||||
|
@ -242,10 +243,10 @@ proc chainSyncRunner(
|
|||
if profilingOk: noisy.test_chainSyncProfilingPrint numBlocks
|
||||
if persistent and finalDiskCleanUpOk: dbDir.flushDbDir
|
||||
|
||||
if noisy:
|
||||
com.db.trackNewApi = true
|
||||
com.db.trackNewApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
when CoreDbEnableApiTracking:
|
||||
if noisy:
|
||||
com.db.trackCoreDbApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
|
||||
check noisy.test_chainSync(filePaths, com, numBlocks,
|
||||
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk,
|
||||
|
@ -295,10 +296,10 @@ proc persistentSyncPreLoadAndResumeRunner(
|
|||
com.db.finish(eradicate = finalDiskCleanUpOk)
|
||||
if profilingOk: noisy.test_chainSyncProfilingPrint firstPart
|
||||
|
||||
if noisy:
|
||||
com.db.trackNewApi = true
|
||||
com.db.trackNewApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
when CoreDbEnableApiTracking:
|
||||
if noisy:
|
||||
com.db.trackCoreDbApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
|
||||
check noisy.test_chainSync(filePaths, com, firstPart,
|
||||
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk,
|
||||
|
@ -312,10 +313,10 @@ proc persistentSyncPreLoadAndResumeRunner(
|
|||
if profilingOk: noisy.test_chainSyncProfilingPrint secndPart
|
||||
if finalDiskCleanUpOk: dbDir.flushDbDir
|
||||
|
||||
if noisy:
|
||||
com.db.trackNewApi = true
|
||||
com.db.trackNewApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
when CoreDbEnableApiTracking:
|
||||
if noisy:
|
||||
com.db.trackCoreDbApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
|
||||
check noisy.test_chainSync(filePaths, com, secndPart,
|
||||
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk,
|
||||
|
@ -356,8 +357,8 @@ when isMainModule:
|
|||
noisy.chainSyncRunner(
|
||||
#dbType = CdbAristoDualRocks,
|
||||
capture = capture,
|
||||
pruneHistory = true,
|
||||
profilingOk = true,
|
||||
#pruneHistory = true,
|
||||
#profilingOk = true,
|
||||
#finalDiskCleanUpOk = false,
|
||||
oldLogAlign = true
|
||||
)
|
||||
|
|
|
@ -19,7 +19,7 @@ import
|
|||
../replay/[pp, undump_blocks, undump_blocks_era1, xcheck],
|
||||
./test_helpers
|
||||
|
||||
when CoreDbEnableApiProfiling:
|
||||
when CoreDbEnableProfiling:
|
||||
import
|
||||
std/sequtils,
|
||||
../../nimbus/db/aristo/[aristo_api, aristo_profile],
|
||||
|
@ -30,7 +30,7 @@ when CoreDbEnableApiProfiling:
|
|||
cdbProfData: CoreDbProfListRef
|
||||
|
||||
when LedgerEnableApiProfiling:
|
||||
when not CoreDbEnableApiProfiling:
|
||||
when not CoreDbEnableProfiling:
|
||||
import
|
||||
std/sequtils
|
||||
var
|
||||
|
@ -66,15 +66,17 @@ template initLogging(noisy: bool, com: CommonRef) =
|
|||
setDebugLevel()
|
||||
debug "start undumping into persistent blocks"
|
||||
logStartTime = Time()
|
||||
logSavedEnv = (com.db.trackNewApi, com.db.trackLedgerApi)
|
||||
setErrorLevel()
|
||||
com.db.trackNewApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
when CoreDbEnableApiTracking:
|
||||
logSavedEnv = (com.db.trackCoreDbApi, com.db.trackLedgerApi)
|
||||
com.db.trackCoreDbApi = true
|
||||
com.db.trackLedgerApi = true
|
||||
|
||||
proc finishLogging(com: CommonRef) =
|
||||
when EnableExtraLoggingControl:
|
||||
setErrorLevel()
|
||||
(com.db.trackNewApi, com.db.trackLedgerApi) = logSavedEnv
|
||||
when CoreDbEnableApiTracking:
|
||||
(com.db.trackCoreDbApi, com.db.trackLedgerApi) = logSavedEnv
|
||||
|
||||
|
||||
template startLogging(noisy: bool; num: BlockNumber) =
|
||||
|
@ -125,7 +127,7 @@ proc test_chainSyncProfilingPrint*(
|
|||
names = LedgerFnInx.toSeq.mapIt($it),
|
||||
header = "Ledger profiling results" & info,
|
||||
indent)
|
||||
when CoreDbEnableApiProfiling:
|
||||
when CoreDbEnableProfiling:
|
||||
blurb.add cdbProfData.profilingPrinter(
|
||||
names = CoreDbFnInx.toSeq.mapIt($it),
|
||||
header = "CoreDb profiling results" & info,
|
||||
|
@ -183,7 +185,7 @@ proc test_chainSync*(
|
|||
# Profile variables will be non-nil if profiling is available. The profiling
|
||||
# API data need to be captured so it will be available after the services
|
||||
# have terminated.
|
||||
when CoreDbEnableApiProfiling:
|
||||
when CoreDbEnableProfiling:
|
||||
aristoProfData = com.db.ariApi.AristoApiProfRef.data
|
||||
kvtProfData = com.db.kvtApi.KvtApiProfRef.data
|
||||
cdbProfData = com.db.profTab
|
||||
|
@ -235,8 +237,9 @@ proc test_chainSync*(
|
|||
if noisy:
|
||||
noisy.whisper "***", "Re-run with logging enabled...\n"
|
||||
setTraceLevel()
|
||||
com.db.trackNewApi = false
|
||||
com.db.trackLedgerApi = false
|
||||
when CoreDbEnableApiTracking:
|
||||
com.db.trackCoreDbApi = false
|
||||
com.db.trackLedgerApi = false
|
||||
discard chain.persistBlocks(w)
|
||||
blocks += w.len
|
||||
continue
|
||||
|
|
|
@ -22,7 +22,7 @@ import
|
|||
../nimbus/core/casper,
|
||||
../nimbus/transaction,
|
||||
../nimbus/constants,
|
||||
../nimbus/db/ledger/accounts_ledger {.all.}, # import all private symbols
|
||||
../nimbus/db/ledger/backend/accounts_ledger {.all.}, # import all private symbols
|
||||
unittest2
|
||||
|
||||
const
|
||||
|
|
Loading…
Reference in New Issue