Only use pre hashed addresses as account keys (#2424)

* Normalised storage tree addressing in function prototypes

detail:
  Argument list is always `<db> <account-path> <slot-path> ..` with
  both path arguments as `openArray[]`

* Remove cruft

* CoreDb internally Use full account paths rather than addresses

* Update API logging

* Use hashed account address only in prototypes

why:
  This avoids unnecessary repeated hashing of the same account address.
  The burden of doing that is upon the application. In the case here,
  the ledger caches all kinds of stuff anyway so it is common sense to
  exploit that for account address hashes.

caveat:
  Using `openArray[byte]` argument types for hashed accounts is inherently
  fragile. In non-release mode, a length verification `doAssert` is
  enabled by default.

* No accPath in data record (use `AristoAccount` as `CoreDbAccount`)

* Remove now unused `eAddr` field from ledger `AccountRef` type

why:
  Is duplicate of lookup key

* Avoid merging the account record/statement in the ledger twice.
This commit is contained in:
Jordan Hrycaj 2024-06-27 19:21:01 +00:00 committed by GitHub
parent ae094692fb
commit 6dc2773957
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 496 additions and 424 deletions

View File

@ -18,9 +18,8 @@ import
results,
./aristo_desc/desc_backend,
./aristo_init/memory_db,
"."/[aristo_delete, aristo_desc, aristo_fetch, aristo_get, aristo_hashify,
aristo_hike, aristo_init, aristo_merge, aristo_path, aristo_profile,
aristo_tx]
"."/[aristo_delete, aristo_desc, aristo_fetch, aristo_hashify,
aristo_init, aristo_merge, aristo_path, aristo_profile, aristo_tx]
export
AristoDbProfListRef
@ -51,7 +50,7 @@ type
AristoApiDeleteAccountRecordFn* =
proc(db: AristoDbRef;
path: openArray[byte];
accPath: openArray[byte];
): Result[void,AristoError]
{.noRaise.}
## Delete the account leaf entry addressed by the argument `path`. If
@ -81,20 +80,20 @@ type
AristoApiDeleteStorageDataFn* =
proc(db: AristoDbRef;
path: openArray[byte];
accPath: PathID;
accPath: openArray[byte];
stoPath: openArray[byte];
): Result[bool,AristoError]
{.noRaise.}
## For a given account argument `accPath`, this function deletes the
## argument `path` from the associated storage tree (if any, at all.) If
## the if the argument `path` deleted was the last one on the storage
## tree, account leaf referred to by `accPath` will be updated so that
## it will not refer to a storage tree anymore. In the latter case only
## the function will return `true`.
## argument `stoPath` from the associated storage tree (if any, at all.)
## If the if the argument `stoPath` deleted was the last one on the
## storage tree, account leaf referred to by `accPath` will be updated
## so that it will not refer to a storage tree anymore. In the latter
## case only the function will return `true`.
AristoApiDeleteStorageTreeFn* =
proc(db: AristoDbRef;
accPath: PathID;
accPath: openArray[byte];
): Result[void,AristoError]
{.noRaise.}
## Variant of `deleteStorageData()` for purging the whole storage tree
@ -110,10 +109,10 @@ type
AristoApiFetchAccountRecordFn* =
proc(db: AristoDbRef;
path: openArray[byte];
accPath: openArray[byte];
): Result[AristoAccount,AristoError]
{.noRaise.}
## Fetch an account record from the database indexed by `path`.
## Fetch an account record from the database indexed by `accPath`.
AristoApiFetchAccountStateFn* =
proc(db: AristoDbRef;
@ -139,16 +138,16 @@ type
AristoApiFetchStorageDataFn* =
proc(db: AristoDbRef;
path: openArray[byte];
accPath: PathID;
accPath: openArray[byte];
stoPath: openArray[byte];
): Result[Blob,AristoError]
{.noRaise.}
## For a storage tree related to account `accPath`, fetch the data
## record from the database indexed by `path`.
## record from the database indexed by `stoPath`.
AristoApiFetchStorageStateFn* =
proc(db: AristoDbRef;
accPath: PathID;
accPath: openArray[byte];
): Result[Hash256,AristoError]
{.noRaise.}
## Fetch the Merkle hash of the storage root related to `accPath`.
@ -225,14 +224,6 @@ type
##
## Use `aristo_desc.forget()` to clean up this descriptor.
AristoApiGetKeyRcFn* =
proc(db: AristoDbRef;
vid: VertexID;
): Result[HashKey,AristoError]
{.noRaise.}
## Cascaded attempt to fetch a Merkle hash from the cache layers or
## the backend (if available.)
AristoApiHashifyFn* =
proc(db: AristoDbRef;
): Result[void,(VertexID,AristoError)]
@ -242,10 +233,10 @@ type
AristoApiHasPathAccountFn* =
proc(db: AristoDbRef;
path: openArray[byte];
accPath: openArray[byte];
): Result[bool,AristoError]
{.noRaise.}
## For an account record indexed by `path` query whether this record
## For an account record indexed by `accPath` query whether this record
## exists on the database.
AristoApiHasPathGenericFn* =
@ -259,30 +250,21 @@ type
AristoApiHasPathStorageFn* =
proc(db: AristoDbRef;
path: openArray[byte];
accPath: PathID;
accPath: openArray[byte];
stoPath: openArray[byte];
): Result[bool,AristoError]
{.noRaise.}
## For a storage tree related to account `accPath`, query whether the
## data record indexed by `path` exists on the database.
## data record indexed by `stoPath` exists on the database.
AristoApiHasStorageDataFn* =
proc(db: AristoDbRef;
accPath: PathID;
accPath: openArray[byte];
): Result[bool,AristoError]
{.noRaise.}
## For a storage tree related to account `accPath`, query whether there
## is a non-empty data storage area at all.
AristoApiHikeUpFn* =
proc(path: NibblesBuf;
root: VertexID;
db: AristoDbRef;
): Result[Hike,(VertexID,AristoError,Hike)]
{.noRaise.}
## For the argument `path`, find and return the logest possible path
## in the argument database `db`.
AristoApiIsTopFn* =
proc(tx: AristoTxRef;
): bool
@ -326,20 +308,14 @@ type
AristoApiMergeStorageDataFn* =
proc(db: AristoDbRef;
stoKey: openArray[byte];
accPath: openArray[byte];
stoPath: openArray[byte];
stoData: openArray[byte];
accPath: PathID;
): Result[VertexID,AristoError]
): Result[void,AristoError]
{.noRaise.}
## Merge the key-value-pair argument `(stoKey,stoData)` as a storage
## value. This means, the root vertex will be derived from the `accPath`
## argument, the Patricia tree path for the storage tree is given by
## `stoKey` and the leaf value with the payload will be stored as a
## `PayloadRef` object of type `RawData`.
##
## If the storage tree does not exist yet it will be created and the
## payload leaf accessed by `accPath` will be updated with the storage
## tree vertex ID.
## Store the `stoData` data argument on the storage area addressed by
## `(accPath,stoPath)` where `accPath` is the account key (into the MPT)
## and `stoPath` is the slot path of the corresponding storage area.
AristoApiPathAsBlobFn* =
proc(tag: PathID;
@ -445,7 +421,6 @@ type
finish*: AristoApiFinishFn
forget*: AristoApiForgetFn
forkTx*: AristoApiForkTxFn
getKeyRc*: AristoApiGetKeyRcFn
hashify*: AristoApiHashifyFn
hasPathAccount*: AristoApiHasPathAccountFn
@ -453,7 +428,6 @@ type
hasPathStorage*: AristoApiHasPathStorageFn
hasStorageData*: AristoApiHasStorageDataFn
hikeUp*: AristoApiHikeUpFn
isTop*: AristoApiIsTopFn
level*: AristoApiLevelFn
nForked*: AristoApiNForkedFn
@ -494,7 +468,6 @@ type
AristoApiProfFinishFn = "finish"
AristoApiProfForgetFn = "forget"
AristoApiProfForkTxFn = "forkTx"
AristoApiProfGetKeyRcFn = "getKeyRc"
AristoApiProfHashifyFn = "hashify"
AristoApiProfHasPathAccountFn = "hasPathAccount"
@ -502,7 +475,6 @@ type
AristoApiProfHasPathStorageFn = "hasPathStorage"
AristoApiProfHasStorageDataFn = "hasStorageData"
AristoApiProfHikeUpFn = "hikeUp"
AristoApiProfIsTopFn = "isTop"
AristoApiProfLevelFn = "level"
AristoApiProfNForkedFn = "nForked"
@ -560,7 +532,6 @@ when AutoValidateApiHooks:
doAssert not api.finish.isNil
doAssert not api.forget.isNil
doAssert not api.forkTx.isNil
doAssert not api.getKeyRc.isNil
doAssert not api.hashify.isNil
doAssert not api.hasPathAccount.isNil
@ -568,7 +539,6 @@ when AutoValidateApiHooks:
doAssert not api.hasPathStorage.isNil
doAssert not api.hasStorageData.isNil
doAssert not api.hikeUp.isNil
doAssert not api.isTop.isNil
doAssert not api.level.isNil
doAssert not api.nForked.isNil
@ -630,7 +600,6 @@ func init*(api: var AristoApiObj) =
api.finish = finish
api.forget = forget
api.forkTx = forkTx
api.getKeyRc = getKeyRc
api.hashify = hashify
api.hasPathAccount = hasPathAccount
@ -638,7 +607,6 @@ func init*(api: var AristoApiObj) =
api.hasPathStorage = hasPathStorage
api.hasStorageData = hasStorageData
api.hikeUp = hikeUp
api.isTop = isTop
api.level = level
api.nForked = nForked
@ -682,7 +650,6 @@ func dup*(api: AristoApiRef): AristoApiRef =
finish: api.finish,
forget: api.forget,
forkTx: api.forkTx,
getKeyRc: api.getKeyRc,
hashify: api.hashify,
hasPathAccount: api.hasPathAccount,
@ -690,7 +657,6 @@ func dup*(api: AristoApiRef): AristoApiRef =
hasPathStorage: api.hasPathStorage,
hasStorageData: api.hasStorageData,
hikeUp: api.hikeUp,
isTop: api.isTop,
level: api.level,
nForked: api.nForked,
@ -756,12 +722,12 @@ func init*(
result = api.deleteGenericTree(a, b)
profApi.deleteStorageData =
proc(a: AristoDbRef; b: openArray[byte]; c: PathID): auto =
proc(a: AristoDbRef; b, c: openArray[byte]): auto =
AristoApiProfDeleteStorageDataFn.profileRunner:
result = api.deleteStorageData(a, b, c)
profApi.deleteStorageTree =
proc(a: AristoDbRef; b: PathID): auto =
proc(a: AristoDbRef; b: openArray[byte]): auto =
AristoApiProfDeleteStorageTreeFn.profileRunner:
result = api.deleteStorageTree(a, b)
@ -791,12 +757,12 @@ func init*(
result = api.fetchGenericState(a, b)
profApi.fetchStorageData =
proc(a: AristoDbRef; b: openArray[byte]; c: PathID;): auto =
proc(a: AristoDbRef; b, c: openArray[byte]): auto =
AristoApiProfFetchStorageDataFn.profileRunner:
result = api.fetchStorageData(a, b, c)
profApi.fetchStorageState =
proc(a: AristoDbRef; b: PathID;): auto =
proc(a: AristoDbRef; b: openArray[byte]): auto =
AristoApiProfFetchStorageStateFn.profileRunner:
result = api.fetchStorageState(a, b)
@ -816,14 +782,9 @@ func init*(
result = api.forget(a)
profApi.forkTx =
proc(a: AristoDbRef; b: int; c = false): auto =
proc(a: AristoDbRef; b: int): auto =
AristoApiProfForkTxFn.profileRunner:
result = api.forkTx(a, b, c)
profApi.getKeyRc =
proc(a: AristoDbRef; b: VertexID): auto =
AristoApiProfGetKeyRcFn.profileRunner:
result = api.getKeyRc(a, b)
result = api.forkTx(a, b)
profApi.hashify =
proc(a: AristoDbRef): auto =
@ -841,20 +802,15 @@ func init*(
result = api.hasPathGeneric(a, b, c)
profApi.hasPathStorage =
proc(a: AristoDbRef; b: openArray[byte]; c: PathID): auto =
proc(a: AristoDbRef; b, c: openArray[byte]): auto =
AristoApiProfHasPathStorageFn.profileRunner:
result = api.hasPathStorage(a, b, c)
profApi.hasStorageData =
proc(a: AristoDbRef; b: PathID): auto =
proc(a: AristoDbRef; b: openArray[byte]): auto =
AristoApiProfHasStorageDataFn.profileRunner:
result = api.hasStorageData(a, b)
profApi.hikeUp =
proc(a: NibblesBuf; b: VertexID; c: AristoDbRef): auto =
AristoApiProfHikeUpFn.profileRunner:
result = api.hikeUp(a, b, c)
profApi.isTop =
proc(a: AristoTxRef): auto =
AristoApiProfIsTopFn.profileRunner:
@ -871,7 +827,7 @@ func init*(
result = api.nForked(a)
profApi.mergeAccountRecord =
proc(a: AristoDbRef; b, c: openArray[byte]): auto =
proc(a: AristoDbRef; b: openArray[byte]; c: AristoAccount): auto =
AristoApiProfMergeAccountRecordFn.profileRunner:
result = api.mergeAccountRecord(a, b, c)
@ -881,7 +837,7 @@ func init*(
result = api.mergeGenericData(a, b, c, d)
profApi.mergeStorageData =
proc(a: AristoDbRef; b, c: openArray[byte]; d: PathID): auto =
proc(a: AristoDbRef; b, c, d: openArray[byte]): auto =
AristoApiProfMergeStorageDataFn.profileRunner:
result = api.mergeStorageData(a, b, c, d)
@ -945,15 +901,15 @@ func init*(
data.list[AristoApiProfBeGetLstFn.ord].masked = true
beDup.putVtxFn =
proc(a: PutHdlRef; b: openArray[(VertexID,VertexRef)]) =
proc(a: PutHdlRef; b: VertexID, c: VertexRef) =
AristoApiProfBePutVtxFn.profileRunner:
be.putVtxFn(a,b)
be.putVtxFn(a, b, c)
data.list[AristoApiProfBePutVtxFn.ord].masked = true
beDup.putKeyFn =
proc(a: PutHdlRef; b: openArray[(VertexID,HashKey)]) =
proc(a: PutHdlRef; b: VertexID, c: HashKey) =
AristoApiProfBePutKeyFn.profileRunner:
be.putKeyFn(a,b)
be.putKeyFn(a, b, c)
data.list[AristoApiProfBePutKeyFn.ord].masked = true
beDup.putTuvFn =

View File

@ -331,13 +331,13 @@ proc deleteImpl(
proc deleteAccountRecord*(
db: AristoDbRef;
path: openArray[byte];
accPath: openArray[byte];
): Result[void,AristoError] =
## Delete the account leaf entry addressed by the argument `path`. If this
## leaf entry referres to a storage tree, this one will be deleted as well.
##
let
hike = NibblesBuf.fromBytes(path).hikeUp(VertexID(1), db).valueOr:
hike = accPath.hikeUp(VertexID(1), db).valueOr:
if error[1] in HikeAcceptableStopsNotFound:
return err(DelPathNotFound)
return err(error[1])
@ -374,7 +374,7 @@ proc deleteGenericData*(
elif LEAST_FREE_VID <= root.distinctBase:
return err(DelStoRootNotAccepted)
let hike = NibblesBuf.fromBytes(path).hikeUp(root, db).valueOr:
let hike = path.hikeUp(root, db).valueOr:
if error[1] in HikeAcceptableStopsNotFound:
return err(DelPathNotFound)
return err(error[1])
@ -403,12 +403,12 @@ proc deleteGenericTree*(
proc deleteStorageData*(
db: AristoDbRef;
path: openArray[byte];
accPath: PathID; # Needed for accounts payload
accPath: openArray[byte]; # Implies storage data tree
stoPath: openArray[byte];
): Result[bool,AristoError] =
## For a given account argument `accPath`, this function deletes the
## argument `path` from the associated storage tree (if any, at all.) If
## the if the argument `path` deleted was the last one on the storage tree,
## argument `stoPath` from the associated storage tree (if any, at all.) If
## the if the argument `stoPath` deleted was the last one on the storage tree,
## account leaf referred to by `accPath` will be updated so that it will
## not refer to a storage tree anymore. In the latter case only the function
## will return `true`.
@ -424,7 +424,7 @@ proc deleteStorageData*(
if not stoID.isValid:
return err(DelStoRootMissing)
let stoHike = NibblesBuf.fromBytes(path).hikeUp(stoID, db).valueOr:
let stoHike = stoPath.hikeUp(stoID, db).valueOr:
if error[1] in HikeAcceptableStopsNotFound:
return err(DelPathNotFound)
return err(error[1])
@ -448,7 +448,7 @@ proc deleteStorageData*(
proc deleteStorageTree*(
db: AristoDbRef; # Database, top layer
accPath: PathID; # Needed for accounts payload
accPath: openArray[byte]; # Implies storage data tree
): Result[void,AristoError] =
## Variant of `deleteStorageData()` for purging the whole storage tree
## associated to the account argument `accPath`.

View File

@ -15,14 +15,8 @@ type
# Miscelaneous/unclassified handy helpers
GenericError
AccRootUnacceptable
MptRootUnacceptable
MptRootMissing
NotImplemented
TrieInvalid
# Data record transcoders, `deblobify()` and `blobify()`
# Data record transcoders, `blobify()` from `blobify.nim`
BlobifyBranchMissingRefs
BlobifyExtMissingRefs
BlobifyExtPathOverflow

View File

@ -71,7 +71,7 @@ proc hasPayload(
if path.len == 0:
return err(FetchPathInvalid)
let hike = NibblesBuf.fromBytes(path).hikeUp(VertexID(1), db).valueOr:
let hike = path.hikeUp(VertexID(1), db).valueOr:
if error[1] in HikeAcceptableStopsNotFound:
return ok(false)
return err(error[1])
@ -83,7 +83,7 @@ proc hasPayload(
proc fetchAccountHike*(
db: AristoDbRef; # Database
accPath: PathID; # Implies a storage ID (if any)
accPath: openArray[byte]; # Implies a storage ID (if any)
): Result[Hike,AristoError] =
## Verify that the `accPath` argument properly referres to a storage root
## vertex ID. The function will reset the keys along the `accPath` for
@ -93,7 +93,7 @@ proc fetchAccountHike*(
## vertex and the vertex ID.
##
# Expand vertex path to account leaf
var hike = accPath.to(NibblesBuf).hikeUp(VertexID(1), db).valueOr:
var hike = accPath.hikeUp(VertexID(1), db).valueOr:
return err(FetchAccInaccessible)
# Extract the account payload from the leaf
@ -107,7 +107,7 @@ proc fetchAccountHike*(
proc fetchStorageID*(
db: AristoDbRef;
accPath: PathID;
accPath: openArray[byte];
): Result[VertexID,AristoError] =
## Public helper function fro retrieving a storage (vertex) ID for a
## given account.
@ -138,11 +138,11 @@ proc fetchLastSavedState*(
proc fetchAccountRecord*(
db: AristoDbRef;
path: openArray[byte];
accPath: openArray[byte];
): Result[AristoAccount,AristoError] =
## Fetch an account record from the database indexed by `path`.
## Fetch an account record from the database indexed by `accPath`.
##
let pyl = ? db.retrievePayload(VertexID(1), path)
let pyl = ? db.retrievePayload(VertexID(1), accPath)
assert pyl.pType == AccountData # debugging only
ok pyl.account
@ -154,12 +154,12 @@ proc fetchAccountState*(
proc hasPathAccount*(
db: AristoDbRef;
path: openArray[byte];
accPath: openArray[byte];
): Result[bool,AristoError] =
## For an account record indexed by `path` query whether this record exists
## For an account record indexed by `accPath` query whether this record exists
## on the database.
##
db.hasPayload(VertexID(1), path)
db.hasPayload(VertexID(1), accPath)
proc fetchGenericData*(
@ -196,19 +196,19 @@ proc hasPathGeneric*(
proc fetchStorageData*(
db: AristoDbRef;
path: openArray[byte];
accPath: PathID;
accPath: openArray[byte];
stoPath: openArray[byte];
): Result[Blob,AristoError] =
## For a storage tree related to account `accPath`, fetch the data record
## from the database indexed by `path`.
##
let pyl = ? db.retrievePayload(? db.fetchStorageID accPath, path)
let pyl = ? db.retrievePayload(? db.fetchStorageID accPath, stoPath)
assert pyl.pType == RawData # debugging only
ok pyl.rawBlob
proc fetchStorageState*(
db: AristoDbRef;
accPath: PathID;
accPath: openArray[byte];
): Result[Hash256,AristoError] =
## Fetch the Merkle hash of the storage root related to `accPath`.
let stoID = db.fetchStorageID(accPath).valueOr:
@ -219,17 +219,17 @@ proc fetchStorageState*(
proc hasPathStorage*(
db: AristoDbRef;
path: openArray[byte];
accPath: PathID;
accPath: openArray[byte];
stoPath: openArray[byte];
): Result[bool,AristoError] =
## For a storage tree related to account `accPath`, query whether the data
## record indexed by `path` exists on the database.
##
db.hasPayload(? db.fetchStorageID accPath, path)
db.hasPayload(? db.fetchStorageID accPath, stoPath)
proc hasStorageData*(
db: AristoDbRef;
accPath: PathID;
accPath: openArray[byte];
): Result[bool,AristoError] =
## For a storage tree related to account `accPath`, query whether there
## is a non-empty data storage area at all.

View File

@ -165,6 +165,14 @@ proc hikeUp*(
## Variant of `hike()`
lty.path.to(NibblesBuf).hikeUp(lty.root, db)
proc hikeUp*(
path: openArray[byte];
root: VertexID;
db: AristoDbRef;
): Result[Hike,(VertexID,AristoError,Hike)] =
## Variant of `hike()`
NibblesBuf.fromBytes(path).hikeUp(root, db)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -43,7 +43,7 @@ const
proc mergeAccountRecord*(
db: AristoDbRef; # Database, top layer
accKey: openArray[byte]; # Even nibbled byte path
accPath: openArray[byte]; # Even nibbled byte path
accRec: AristoAccount; # Account data
): Result[bool,AristoError] =
## Merge the key-value-pair argument `(accKey,accPayload)` as an account
@ -59,7 +59,7 @@ proc mergeAccountRecord*(
##
let
pyl = PayloadRef(pType: AccountData, account: accRec)
rc = db.mergePayloadImpl(VertexID(1), accKey, pyl, VidVtxPair())
rc = db.mergePayloadImpl(VertexID(1), accPath, pyl)
if rc.isOk:
ok true
elif rc.error in MergeNoAction:
@ -91,7 +91,7 @@ proc mergeGenericData*(
let
pyl = PayloadRef(pType: RawData, rawBlob: @data)
rc = db.mergePayloadImpl(root, path, pyl, VidVtxPair())
rc = db.mergePayloadImpl(root, path, pyl)
if rc.isOk:
ok true
elif rc.error in MergeNoAction:
@ -102,22 +102,13 @@ proc mergeGenericData*(
proc mergeStorageData*(
db: AristoDbRef; # Database, top layer
stoKey: openArray[byte]; # Storage data path (aka key)
accPath: openArray[byte]; # Needed for accounts payload
stoPath: openArray[byte]; # Storage data path (aka key)
stoData: openArray[byte]; # Storage data payload value
accPath: PathID; # Needed for accounts payload
): Result[VertexID,AristoError] =
## Merge the key-value-pair argument `(stoKey,stoData)` as a storage value.
## This means, the root vertex will be derived from the `accPath` argument,
## the Patricia tree path for the storage tree is given by `stoKey` and the
## leaf value with the payload will be stored as a `PayloadRef` object of
## type `RawData`.
##
## If the storage tree does not exist yet it will be created and the
## payload leaf accessed by `accPath` will be updated with the storage
## tree vertex ID.
##
## The function returns the new vertex ID if a new storage tree was created,
## otherwise `VertexID(0)`.
): Result[void,AristoError] =
## Store the `stoData` data argument on the storage area addressed by
## `(accPath,stoPath)` where `accPath` is the account key (into the MPT)
## and `stoPath` is the slot path of the corresponding storage area.
##
let
accHike = db.fetchAccountHike(accPath).valueOr:
@ -132,14 +123,14 @@ proc mergeStorageData*(
# Call merge
pyl = PayloadRef(pType: RawData, rawBlob: @stoData)
rc = db.mergePayloadImpl(useID, stoKey, pyl, wpAcc)
rc = db.mergePayloadImpl(useID, stoPath, pyl)
if rc.isOk:
# Mark account path for update for `hashify()`
db.updateAccountForHasher accHike
if stoID.isValid:
return ok VertexID(0)
return ok()
else:
# Make sure that there is an account that refers to that storage trie
@ -147,11 +138,11 @@ proc mergeStorageData*(
leaf.lData.stoID = useID
db.layersPutVtx(VertexID(1), wpAcc.vid, leaf)
db.layersResKey(VertexID(1), wpAcc.vid)
return ok useID
return ok()
elif rc.error in MergeNoAction:
assert stoID.isValid # debugging only
return ok VertexID(0)
return ok()
# Error: mark account path for update for `hashify()`
db.updateAccountForHasher accHike

View File

@ -450,32 +450,12 @@ proc mergePayloadImpl*(
root: VertexID; # MPT state root
path: openArray[byte]; # Leaf item to add to the database
payload: PayloadRef; # Payload value
wpAcc: VidVtxPair; # Needed for storage tree
): Result[void,AristoError] =
## Merge the argument `(root,path)` key-value-pair into the top level vertex
## table of the database `db`. The `path` argument is used to address the
## leaf vertex with the payload. It is stored or updated on the database
## accordingly.
##
## If the `root` argument is `VertexID(1)` this function relies upon that the
## payload argument is of type `AccountData`. If the payload exists already
## on the database, the `storageID` field of the `payload` and on the database
## must be the same or an error is returned. The argument `wpAcc` will be
## ignored for accounts.
##
## Otherwise, if the `root` argument belongs to a well known sub trie (i.e.
## it does not exceed `LEAST_FREE_VID`) the entry will just be merged. The
## argument `wpAcc` will be ignored .
##
## Otherwise, a valid `wpAcc` must be given referring to an `AccountData`
## payload type leaf vertex. If the `storageID` field of that payload
## does not have a valid entry, a new sub-trie will be created. Otherwise
## this function expects that the `root` argument is the same as the
## `storageID` field.
##
## The function returns `true` iff a new sub-tree was linked to an account
## leaf record.
##
let
nibblesPath = NibblesBuf.fromBytes(path)
hike = nibblesPath.hikeUp(root, db).to(Hike)

View File

@ -472,7 +472,7 @@ iterator rightPairsGeneric*(
iterator rightPairsStorage*(
db: AristoDbRef; # Database layer
accPath: PathID; # Account the storage data belong to
accPath: openArray[byte]; # Account the storage data belong to
start = low(PathID); # Before or at first value
): (PathID,Blob) =
## Variant of `rightPairs()` for a storage tree

View File

@ -284,14 +284,13 @@ iterator aristoMptPairs*(dsc: CoreDbMptRef): (Blob,Blob) {.noRaise.} =
iterator aristoSlotPairs*(
dsc: CoreDbAccRef;
eAddr: EthAddress;
accPath: openArray[byte];
): (Blob,Blob)
{.noRaise.} =
let
api = dsc.to(AristoApiRef)
mpt = dsc.to(AristoDbRef)
accKey = HashKey.fromBytes(eAddr.keccakHash.data).value.to(PathID)
for (path,data) in mpt.rightPairsStorage accKey:
for (path,data) in mpt.rightPairsStorage accPath:
yield (api.pathAsBlob(path), data)
iterator aristoReplicateMem*(dsc: CoreDbMptRef): (Blob,Blob) {.rlpRaise.} =

View File

@ -12,7 +12,6 @@
import
std/typetraits,
chronicles,
eth/common,
stew/byteutils,
../../../aristo,
@ -44,9 +43,6 @@ type
AristoCoreDbAccBE* = ref object of CoreDbAccBackendRef
adb*: AristoDbRef
logScope:
topics = "aristo-hdl"
static:
doAssert high(CoreDbColType).ord < LEAST_FREE_VID
@ -54,11 +50,6 @@ static:
# Private helpers
# ------------------------------------------------------------------------------
func to(eAddr: EthAddress; T: type PathID): T =
HashKey.fromBytes(eAddr.keccakHash.data).value.to(T)
# -------------------------------
func toError(
e: AristoError;
base: AristoBaseRef;
@ -199,58 +190,71 @@ proc accMethods(): CoreDbAccFns =
proc accBackend(cAcc: AristoCoreDbAccRef): CoreDbAccBackendRef =
db.bless AristoCoreDbAccBE(adb: mpt)
proc accFetch(cAcc: AristoCoreDbAccRef; eAddr: EthAddress): CoreDbRc[CoreDbAccount] =
proc accFetch(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[CoreDbAccount] =
const info = "acc/fetchFn()"
let acc = api.fetchAccountRecord(mpt, eAddr.keccakHash.data).valueOr:
let acc = api.fetchAccountRecord(mpt, accPath).valueOr:
if error != FetchPathNotFound:
return err(error.toError(base, info))
return err(error.toError(base, info, AccNotFound))
ok CoreDbAccount(
address: eAddr,
nonce: acc.nonce,
balance: acc.balance,
codeHash: acc.codeHash)
ok acc
proc accMerge(cAcc: AristoCoreDbAccRef, acc: CoreDbAccount): CoreDbRc[void] =
proc accMerge(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
accRec: CoreDbAccount;
): CoreDbRc[void] =
const info = "acc/mergeFn()"
let
key = acc.address.keccakHash.data
val = AristoAccount(
nonce: acc.nonce,
balance: acc.balance,
codeHash: acc.codeHash)
api.mergeAccountRecord(mpt, key, val).isOkOr:
let val = AristoAccount(
nonce: accRec.nonce,
balance: accRec.balance,
codeHash: accRec.codeHash)
api.mergeAccountRecord(mpt, accPath, val).isOkOr:
return err(error.toError(base, info))
ok()
proc accDelete(cAcc: AristoCoreDbAccRef; eAddr: EthAddress): CoreDbRc[void] =
proc accDelete(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[void] =
const info = "acc/deleteFn()"
api.deleteAccountRecord(mpt, eAddr.keccakHash.data).isOkOr:
api.deleteAccountRecord(mpt, accPath).isOkOr:
if error == DelPathNotFound:
# TODO: Would it be conseqient to just return `ok()` here?
return err(error.toError(base, info, AccNotFound))
return err(error.toError(base, info))
ok()
proc accClearStorage(cAcc: AristoCoreDbAccRef; eAddr: EthAddress): CoreDbRc[void] =
proc accClearStorage(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[void] =
const info = "acc/clearStoFn()"
api.deleteStorageTree(mpt, eAddr.to(PathID)).isOkOr:
api.deleteStorageTree(mpt, accPath).isOkOr:
if error notin {DelStoRootMissing,DelStoAccMissing}:
return err(error.toError(base, info))
ok()
proc accHasPath(cAcc: AristoCoreDbAccRef; eAddr: EthAddress): CoreDbRc[bool] =
proc accHasPath(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[bool] =
const info = "hasPathFn()"
let yn = api.hasPathAccount(mpt, eAddr.keccakHash.data).valueOr:
let yn = api.hasPathAccount(mpt, accPath).valueOr:
return err(error.toError(base, info))
ok(yn)
proc accState(cAcc: AristoCoreDbAccRef, updateOk: bool): CoreDbRc[Hash256] =
proc accState(
cAcc: AristoCoreDbAccRef,
updateOk: bool;
): CoreDbRc[Hash256] =
const info = "accStateFn()"
let rc = api.fetchAccountState(mpt)
@ -267,19 +271,27 @@ proc accMethods(): CoreDbAccFns =
ok(state)
proc slotFetch(cAcc: AristoCoreDbAccRef; eAddr: EthAddress; key: openArray[byte]): CoreDbRc[Blob] =
proc slotFetch(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
stoPath: openArray[byte];
): CoreDbRc[Blob] =
const info = "slotFetchFn()"
let data = api.fetchStorageData(mpt, key, eAddr.to(PathID)).valueOr:
let data = api.fetchStorageData(mpt, accPath, stoPath).valueOr:
if error != FetchPathNotFound:
return err(error.toError(base, info))
return err(error.toError(base, info, StoNotFound))
ok(data)
proc slotDelete(cAcc: AristoCoreDbAccRef; eAddr: EthAddress; key: openArray[byte]): CoreDbRc[void] =
proc slotDelete(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
stoPath: openArray[byte];
): CoreDbRc[void] =
const info = "slotDeleteFn()"
api.deleteStorageData(mpt, key, eAddr.to(PathID)).isOkOr:
api.deleteStorageData(mpt, accPath, stoPath).isOkOr:
if error == DelPathNotFound:
return err(error.toError(base, info, StoNotFound))
if error == DelStoRootMissing:
@ -289,24 +301,37 @@ proc accMethods(): CoreDbAccFns =
return err(error.toError(base, info))
ok()
proc slotHasPath(cAcc: AristoCoreDbAccRef; eAddr: EthAddress; key: openArray[byte]): CoreDbRc[bool] =
proc slotHasPath(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
stoPath: openArray[byte];
): CoreDbRc[bool] =
const info = "slotHasPathFn()"
let yn = api.hasPathStorage(mpt, key, eAddr.to(PathID)).valueOr:
let yn = api.hasPathStorage(mpt, accPath, stoPath).valueOr:
return err(error.toError(base, info))
ok(yn)
proc slotMerge(cAcc: AristoCoreDbAccRef; eAddr: EthAddress; key, val: openArray[byte]): CoreDbRc[void] =
proc slotMerge(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
stoPath: openArray[byte];
stoData: openArray[byte];
): CoreDbRc[void] =
const info = "slotMergeFn()"
api.mergeStorageData(mpt, key, val, eAddr.to(PathID)).isOkOr:
api.mergeStorageData(mpt, accPath, stoPath, stoData).isOkOr:
return err(error.toError(base, info))
ok()
proc slotState(cAcc: AristoCoreDbAccRef; eAddr: EthAddress; updateOk: bool): CoreDbRc[Hash256] =
proc slotState(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
updateOk: bool;
): CoreDbRc[Hash256] =
const info = "slotStateFn()"
let rc = api.fetchStorageState(mpt, eAddr.to(PathID))
let rc = api.fetchStorageState(mpt, accPath)
if rc.isOk:
return ok(rc.value)
elif not updateOk and rc.error != GetKeyUpdateNeeded:
@ -315,14 +340,17 @@ proc accMethods(): CoreDbAccFns =
# FIXME: `hashify()` should probably throw an assert on failure
? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable)
let state = api.fetchStorageState(mpt, eAddr.to(PathID)).valueOr:
let state = api.fetchStorageState(mpt, accPath).valueOr:
return err(error.toError(base, info))
ok(state)
proc slotStateEmpty(cAcc: AristoCoreDbAccRef; eAddr: EthAddress): CoreDbRc[bool] =
proc slotStateEmpty(
cAcc: AristoCoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[bool] =
const info = "slotStateEmptyFn()"
let yn = api.hasStorageData(mpt, eAddr.to(PathID)).valueOr:
let yn = api.hasStorageData(mpt, accPath).valueOr:
return err(error.toError(base, info))
ok(not yn)
@ -331,41 +359,84 @@ proc accMethods(): CoreDbAccFns =
backendFn: proc(cAcc: CoreDbAccRef): CoreDbAccBackendRef =
accBackend(AristoCoreDbAccRef(cAcc)),
fetchFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress): CoreDbRc[CoreDbAccount] =
accFetch(AristoCoreDbAccRef(cAcc), eAddr),
fetchFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[CoreDbAccount] =
accFetch(AristoCoreDbAccRef(cAcc), accPath),
deleteFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress): CoreDbRc[void] =
accDelete(AristoCoreDbAccRef(cAcc), eAddr),
deleteFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[void] =
accDelete(AristoCoreDbAccRef(cAcc), accPath),
clearStorageFn: proc(cAcc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[void] =
accClearStorage(AristoCoreDbAccRef(cAcc), eAddr),
clearStorageFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[void] =
accClearStorage(AristoCoreDbAccRef(cAcc), accPath),
mergeFn: proc(cAcc: CoreDbAccRef, acc: CoreDbAccount): CoreDbRc[void] =
accMerge(AristoCoreDbAccRef(cAcc), acc),
mergeFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
accRec: CoreDbAccount;
): CoreDbRc[void] =
accMerge(AristoCoreDbAccRef(cAcc), accPath, accRec),
hasPathFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress): CoreDbRc[bool] =
accHasPath(AristoCoreDbAccRef(cAcc), eAddr),
hasPathFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[bool] =
accHasPath(AristoCoreDbAccRef(cAcc), accPath),
stateFn: proc(cAcc: CoreDbAccRef, updateOk: bool): CoreDbRc[Hash256] =
stateFn: proc(
cAcc: CoreDbAccRef;
updateOk: bool;
): CoreDbRc[Hash256] =
accState(AristoCoreDbAccRef(cAcc), updateOk),
slotFetchFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress; k: openArray[byte]): CoreDbRc[Blob] =
slotFetch(AristoCoreDbAccRef(cAcc), eAddr, k),
slotFetchFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
stoPath: openArray[byte];
): CoreDbRc[Blob] =
slotFetch(AristoCoreDbAccRef(cAcc), accPath, stoPath),
slotDeleteFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress; k: openArray[byte]): CoreDbRc[void] =
slotDelete(AristoCoreDbAccRef(cAcc), eAddr, k),
slotDeleteFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
stoPath: openArray[byte];
): CoreDbRc[void] =
slotDelete(AristoCoreDbAccRef(cAcc), accPath, stoPath),
slotHasPathFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress; k: openArray[byte]): CoreDbRc[bool] =
slotHasPath(AristoCoreDbAccRef(cAcc), eAddr, k),
slotHasPathFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
stoPath: openArray[byte];
): CoreDbRc[bool] =
slotHasPath(AristoCoreDbAccRef(cAcc), accPath, stoPath),
slotMergeFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress; k,v: openArray[byte]): CoreDbRc[void] =
slotMerge(AristoCoreDbAccRef(cAcc), eAddr, k, v),
slotMergeFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
stoPath: openArray[byte];
stoData: openArray[byte];
): CoreDbRc[void] =
slotMerge(AristoCoreDbAccRef(cAcc), accPath, stoPath, stoData),
slotStateFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress; updateOk: bool): CoreDbRc[Hash256] =
slotState(AristoCoreDbAccRef(cAcc), eAddr, updateOk),
slotStateFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
updateOk: bool;
): CoreDbRc[Hash256] =
slotState(AristoCoreDbAccRef(cAcc), accPath, updateOk),
slotStateEmptyFn: proc(cAcc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[bool] =
slotStateEmpty(AristoCoreDbAccRef(cAcc), eAddr))
slotStateEmptyFn: proc(
cAcc: CoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[bool] =
slotStateEmpty(AristoCoreDbAccRef(cAcc), accPath))
# ------------------------------------------------------------------------------
# Private context call back functions

View File

@ -19,10 +19,18 @@ from ../aristo
import EmptyBlob, isValid
const
EnableAccountKeyValidation = defined(release).not
## If this flag is enabled, the length of an account key is verified
## to habe exactly 32 bytes. An assert is thrown if seen otherwise (a
## notoriously week spot of the `openArray[byte]` argument type.)
EnableApiTracking = false
## When enabled, functions using this tracking facility need to import
## `chronicles`, as well. Tracking is enabled by setting `true` the flags
## `trackLegaApi` and/or `trackNewApi` in the `CoreDbTxRef` descriptor.
## `chronicles`, as well. Also, some `func` designators might need to
## be changed to `proc` for possible side effects.
##
## Tracking noise is then enabled by setting the flag `trackNewApi` to
## `true` in the `CoreDbRef` descriptor.
EnableApiProfiling = true
## Enables functions profiling if `EnableApiTracking` is also set `true`.
@ -79,7 +87,8 @@ when EnableApiTracking:
{.warning: "*** Provided API logging for CoreDB (disabled by default)".}
import
std/times
std/times,
chronicles
proc `$`[T](rc: CoreDbRc[T]): string = rc.toStr
proc `$`(q: set[CoreDbCaptFlags]): string = q.toStr
@ -337,9 +346,7 @@ proc fetch*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
##
mpt.setTrackNewApi MptFetchFn
result = mpt.methods.fetchFn(mpt, key)
mpt.ifTrackNewApi:
let col = mpt.methods.getColFn(mpt)
debug newApiTxt, api, elapsed, col, key=key.toStr, result
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
## This function returns an empty `Blob` if the argument `key` is not found
@ -349,16 +356,12 @@ proc fetchOrEmpty*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
result = mpt.methods.fetchFn(mpt, key)
if result.isErr and result.error.error == MptNotFound:
result = CoreDbRc[Blob].ok(EmptyBlob)
mpt.ifTrackNewApi:
let col = mpt.methods.getColFn(mpt)
debug newApiTxt, api, elapsed, col, key=key.toStr, result
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
proc delete*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[void] =
mpt.setTrackNewApi MptDeleteFn
result = mpt.methods.deleteFn(mpt, key)
mpt.ifTrackNewApi:
let col = mpt.methods.getColFn()
debug newApiTxt, api, elapsed, col, key=key.toStr, result
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
proc merge*(
mpt: CoreDbMptRef;
@ -368,8 +371,7 @@ proc merge*(
mpt.setTrackNewApi MptMergeFn
result = mpt.methods.mergeFn(mpt, key, val)
mpt.ifTrackNewApi:
let col = mpt.methods.getColFn(mpt)
debug newApiTxt, api, elapsed, col, key=key.toStr, val=val.toLenStr, result
debug newApiTxt, api, elapsed, key=key.toStr, val=val.toLenStr, result
proc hasPath*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[bool] =
## This function would be named `contains()` if it returned `bool` rather
@ -377,9 +379,7 @@ proc hasPath*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[bool] =
##
mpt.setTrackNewApi MptHasPathFn
result = mpt.methods.hasPathFn(mpt, key)
mpt.ifTrackNewApi:
let col = mpt.methods.getColFn(mpt)
debug newApiTxt, api, elapsed, col, key=key.toStr, result
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
proc state*(mpt: CoreDbMptRef; updateOk = false): CoreDbRc[Hash256] =
## This function retrieves the Merkle state hash of the argument
@ -401,54 +401,81 @@ proc getAccounts*(ctx: CoreDbCtxRef): CoreDbAccRef =
##
ctx.setTrackNewApi CtxGetAccountsFn
result = ctx.methods.getAccountsFn(ctx)
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, col, result
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed
# ----------- accounts ---------------
proc fetch*(acc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[CoreDbAccount] =
proc fetch*(
acc: CoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[CoreDbAccount] =
## Fetch the account data record for the particular account indexed by
## the address `eAddr`.
## the key `accPath`.
##
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi AccFetchFn
result = acc.methods.fetchFn(acc, eAddr)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
result = acc.methods.fetchFn(acc, accPath)
acc.ifTrackNewApi:
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
proc delete*(acc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[void] =
## Delete the particular account indexed by the address `eAddr`. This
proc delete*(
acc: CoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[void] =
## Delete the particular account indexed by the key `accPath`. This
## will also destroy an associated storage area.
##
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi AccDeleteFn
result = acc.methods.deleteFn(acc, eAddr)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
result = acc.methods.deleteFn(acc, accPath)
acc.ifTrackNewApi:
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
proc clearStorage*(acc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[void] =
proc clearStorage*(
acc: CoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[void] =
## Delete all data slots from the storage area associated with the
## particular account indexed by the address `eAddr`.
## particular account indexed by the key `accPath`.
##
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi AccClearStorageFn
result = acc.methods.clearStorageFn(acc, eAddr)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
result = acc.methods.clearStorageFn(acc, accPath)
acc.ifTrackNewApi:
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
proc merge*(acc: CoreDbAccRef; account: CoreDbAccount): CoreDbRc[void] =
proc merge*(
acc: CoreDbAccRef;
accPath: openArray[byte];
accRec: CoreDbAccount;
): CoreDbRc[void] =
## Add or update the argument account data record `account`. Note that the
## `account` argument uniquely idendifies the particular account address.
##
acc.setTrackNewApi AccMergeFn
result = acc.methods.mergeFn(acc, account)
result = acc.methods.mergeFn(acc, accPath, accRec)
acc.ifTrackNewApi:
let eAddr = account.address
debug newApiTxt, api, elapsed, eAddr, result
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
proc hasPath*(acc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[bool] =
proc hasPath*(
acc: CoreDbAccRef;
accPath: openArray[byte];
): CoreDbRc[bool] =
## Would be named `contains` if it returned `bool` rather than `Result[]`.
##
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi AccHasPathFn
result = acc.methods.hasPathFn(acc, eAddr)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
result = acc.methods.hasPathFn(acc, accPath)
acc.ifTrackNewApi:
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
proc state*(acc: CoreDbAccRef; updateOk = false): CoreDbRc[Hash256] =
## This function retrieves the Merkle state hash of the accounts
## column (if acvailable.)
## column (if available.)
##
## If the argument `updateOk` is set `true`, the Merkle hashes of the
## database will be updated first (if needed, at all).
@ -461,94 +488,133 @@ proc state*(acc: CoreDbAccRef; updateOk = false): CoreDbRc[Hash256] =
proc slotFetch*(
acc: CoreDbAccRef;
eAddr: EthAddress;
accPath: openArray[byte];
slot: openArray[byte];
): CoreDbRc[Blob] =
## Like `fetch()` but with cascaded index `(accPath,slot)`.
acc.setTrackNewApi AccSlotFetchFn
result = acc.methods.slotFetchFn(acc, eAddr, slot)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
result = acc.methods.slotFetchFn(acc, accPath, slot)
acc.ifTrackNewApi:
doAssert accPath.len == 32
debug newApiTxt, api, elapsed, accPath=accPath.toStr,
slot=slot.toStr, result
proc slotDelete*(
acc: CoreDbAccRef;
eAddr: EthAddress;
accPath: openArray[byte];
slot: openArray[byte];
): CoreDbRc[void] =
## Like `delete()` but with cascaded index `(accPath,slot)`.
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi AccSlotDeleteFn
result = acc.methods.slotDeleteFn(acc, eAddr, slot)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
result = acc.methods.slotDeleteFn(acc, accPath, slot)
acc.ifTrackNewApi:
debug newApiTxt, api, elapsed, accPath=accPath.toStr,
slot=slot.toStr, result
proc slotHasPath*(
acc: CoreDbAccRef;
eAddr: EthAddress;
accPath: openArray[byte];
slot: openArray[byte];
): CoreDbRc[bool] =
## Like `hasPath()` but with cascaded index `(accPath,slot)`.
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi AccSlotHasPathFn
result = acc.methods.slotHasPathFn(acc, eAddr, slot)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
result = acc.methods.slotHasPathFn(acc, accPath, slot)
acc.ifTrackNewApi:
debug newApiTxt, api, elapsed, accPath=accPath.toStr,
slot=slot.toStr, result
proc slotMerge*(
acc: CoreDbAccRef;
eAddr: EthAddress;
accPath: openArray[byte];
slot: openArray[byte];
data: openArray[byte];
): CoreDbRc[void] =
## Like `merge()` but with cascaded index `(accPath,slot)`.
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi AccSlotMergeFn
result = acc.methods.slotMergeFn(acc, eAddr, slot, data)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
result = acc.methods.slotMergeFn(acc, accPath, slot, data)
acc.ifTrackNewApi:
debug newApiTxt, api, elapsed, accPath=accPath.toStr,
slot=slot.toStr, result
proc slotState*(
acc: CoreDbAccRef;
eAddr: EthAddress;
accPath: openArray[byte];
updateOk = false;
): CoreDbRc[Hash256] =
## This function retrieves the Merkle state hash of the storage data
## column (if available) related to the account indexed by the key
## `accPath`.`.
##
## If the argument `updateOk` is set `true`, the Merkle hashes of the
## database will be updated first (if needed, at all).
##
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi AccSlotStateFn
result = acc.methods.slotStateFn(acc, eAddr, updateOk)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, updateOk, result
result = acc.methods.slotStateFn(acc, accPath, updateOk)
acc.ifTrackNewApi:
debug newApiTxt, api, elapsed, accPath=accPath.toStr, updateOk, result
proc slotStateEmpty*(
acc: CoreDbAccRef;
eAddr: EthAddress;
accPath: openArray[byte];
): CoreDbRc[bool] =
## ...
## This function returns `true` if the storage data column is empty or
## missing.
##
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi AccSlotStateEmptyFn
result = acc.methods.slotStateEmptyFn(acc, eAddr)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, updateOk, result
result = acc.methods.slotStateEmptyFn(acc, accPath)
acc.ifTrackNewApi:
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
proc slotStateEmptyOrVoid*(
acc: CoreDbAccRef;
eAddr: EthAddress;
accPath: openArray[byte];
): bool =
## Convenience wrapper, returns `true` where `slotStateEmpty()` would fail.
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi AccSlotStateEmptyOrVoidFn
result = acc.methods.slotStateEmptyFn(acc, eAddr).valueOr: true
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, updateOk, result
result = acc.methods.slotStateEmptyFn(acc, accPath).valueOr: true
acc.ifTrackNewApi:
debug newApiTxt, api, elapsed, accPath=accPath.toStr, result
# ------------- other ----------------
proc recast*(
acc: CoreDbAccRef;
statement: CoreDbAccount;
accPath: openArray[byte];
accRec: CoreDbAccount;
updateOk = false;
): CoreDbRc[Account] =
## Convert the argument `statement` to the portable Ethereum representation
## of an account statement. This conversion may fail if the storage colState
## hash (see `hash()` above) is currently unavailable.
##
## Note:
## With the legacy backend, this function always succeeds.
##
when EnableAccountKeyValidation:
doAssert accPath.len == 32
acc.setTrackNewApi EthAccRecastFn
let rc = acc.methods.slotStateFn(acc, statement.address, updateOk)
let rc = acc.methods.slotStateFn(acc, accPath, updateOk)
result =
if rc.isOk:
ok Account(
nonce: statement.nonce,
balance: statement.balance,
codeHash: statement.codeHash,
nonce: accRec.nonce,
balance: accRec.balance,
codeHash: accRec.codeHash,
storageRoot: rc.value)
else:
err(rc.error)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, storage, result
acc.ifTrackNewApi:
let slotState = if rc.isOk: rc.value.toStr else: "n/a"
debug newApiTxt, api, elapsed, accPath=accPath.toStr, slotState, result
# ------------------------------------------------------------------------------
# Public transaction related methods

View File

@ -127,6 +127,9 @@ func toStr*(w: openArray[byte]): string =
func toStr*(w: set[CoreDbCaptFlags]): string =
"Flags[" & $w.len & "]"
proc toStr*(rc: CoreDbRc[int]): string =
if rc.isOk: "ok(" & $rc.value & ")" else: "err(" & rc.error.toStr & ")"
proc toStr*(rc: CoreDbRc[bool]): string =
if rc.isOk: "ok(" & $rc.value & ")" else: "err(" & rc.error.toStr & ")"
@ -171,11 +174,11 @@ template beginNewApi*(w: CoreDbApiTrackRef; s: static[CoreDbFnInx]) =
let bnaStart {.inject.} = getTime() # Local use only
template endNewApiIf*(w: CoreDbApiTrackRef; code: untyped) =
block:
block body:
when typeof(w) is CoreDbRef:
let db = w
else:
if w.isNil: break
if w.isNil: break body
let db = w.parent
when CoreDbEnableApiProfiling:
let elapsed {.inject,used.} = getTime() - bnaStart

View File

@ -13,6 +13,7 @@
import
std/tables,
eth/common,
../../aristo,
../../aristo/aristo_profile
# Annotation helpers
@ -38,33 +39,24 @@ type
CoreDbRc*[T] = Result[T,CoreDbErrorRef]
CoreDbAccount* = object
## Generic account representation referencing an *MPT* sub-trie
address*: EthAddress ## Reverse reference for storage trie path
nonce*: AccountNonce ## Some `uint64` type
balance*: UInt256
codeHash*: Hash256
CoreDbAccount* = AristoAccount
## Generic account record representation. The data fields
## look like:
## * nonce*: AccountNonce -- Some `uint64` type
## * balance*: UInt256 -- Account balance
## * codeHash*: Hash256 -- Lookup value
CoreDbErrorCode* = enum
Unset = 0
Unspecified
AccAddrMissing
AccNotFound
AccTxPending
AutoFlushFailed
ColUnacceptable
ColLocked
CtxNotFound
HashNotAvailable
KvtNotFound
MptNotFound
NotImplemented
RlpException
RootNotFound
RootUnacceptable
StoNotFound
StorageFailed
TxPending
CoreDbColType* = enum
@ -182,26 +174,41 @@ type
# ----------------------------------------------------
# Sub-descriptor: Account column methods
# ------------------------------------------------------
CoreDbAccBackendFn* = proc(cAcc: CoreDbAccRef): CoreDbAccBackendRef {.noRaise.}
CoreDbAccFetchFn* = proc(cAcc: CoreDbAccRef, k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.}
CoreDbAccDeleteFn* = proc(cAcc: CoreDbAccRef, k: EthAddress): CoreDbRc[void] {.noRaise.}
CoreDbAccClearStorageFn* = proc(cAcc: CoreDbAccRef,k: EthAddress): CoreDbRc[void] {.noRaise.}
CoreDbAccMergeFn* = proc(cAcc: CoreDbAccRef, v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
CoreDbAccHasPathFn* = proc(cAcc: CoreDbAccRef, k: EthAddress): CoreDbRc[bool] {.noRaise.}
CoreDbAccStateFn* = proc(cAcc: CoreDbAccRef, updateOk: bool): CoreDbRc[Hash256] {.noRaise.}
CoreDbAccBackendFn* = proc(
cAcc: CoreDbAccRef): CoreDbAccBackendRef {.noRaise.}
CoreDbAccFetchFn* = proc(
cAcc: CoreDbAccRef; accPath: openArray[byte];
): CoreDbRc[CoreDbAccount] {.noRaise.}
CoreDbAccDeleteFn* = proc(
cAcc: CoreDbAccRef, accPath: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbAccClearStorageFn* = proc(
cAcc: CoreDbAccRef; accPath: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbAccMergeFn* = proc(
cAcc: CoreDbAccRef; accPath: openArray[byte]; accRec: CoreDbAccount;
): CoreDbRc[void] {.noRaise.}
CoreDbAccHasPathFn* = proc(
cAcc: CoreDbAccRef; accPath: openArray[byte]): CoreDbRc[bool] {.noRaise.}
CoreDbAccStateFn* = proc(
cAcc: CoreDbAccRef; updateOk: bool): CoreDbRc[Hash256] {.noRaise.}
CoreDbSlotFetchFn* =
proc(cAcc: CoreDbAccRef, a: EthAddress; k: openArray[byte]): CoreDbRc[Blob] {.noRaise.}
CoreDbSlotDeleteFn* =
proc(cAcc: CoreDbAccRef,a: EthAddress; k: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbSlotHasPathFn* =
proc(cAcc: CoreDbAccRef, a: EthAddress; k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
CoreDbSlotMergeFn* =
proc(cAcc: CoreDbAccRef, a: EthAddress; k, v: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbSlotStateFn* =
proc(cAcc: CoreDbAccRef, a: EthAddress; updateOk: bool): CoreDbRc[Hash256] {.noRaise.}
CoreDbSlotStateEmptyFn* =
proc(cAcc: CoreDbAccRef, a: EthAddress): CoreDbRc[bool] {.noRaise.}
CoreDbSlotFetchFn* = proc(
cAcc: CoreDbAccRef; accPath, stoPath: openArray[byte];
): CoreDbRc[Blob] {.noRaise.}
CoreDbSlotDeleteFn* = proc(
cAcc: CoreDbAccRef; accPath, stoPath: openArray[byte];
): CoreDbRc[void] {.noRaise.}
CoreDbSlotHasPathFn* = proc(
cAcc: CoreDbAccRef; accPath, stoPath: openArray[byte];
): CoreDbRc[bool] {.noRaise.}
CoreDbSlotMergeFn* = proc(
cAcc: CoreDbAccRef; accPath, stoPath, stoData: openArray[byte];
): CoreDbRc[void] {.noRaise.}
CoreDbSlotStateFn* = proc(
cAcc: CoreDbAccRef; accPath: openArray[byte]; updateOk: bool;
): CoreDbRc[Hash256] {.noRaise.}
CoreDbSlotStateEmptyFn* = proc(
cAcc: CoreDbAccRef; accPath: openArray[byte];
): CoreDbRc[bool] {.noRaise.}
CoreDbAccFns* = object
## Methods for trie objects
@ -256,7 +263,6 @@ type
CoreDbRef* = ref object of RootRef
## Database descriptor
dbType*: CoreDbType ## Type of database backend
trackLegaApi*: bool ## Debugging, support
trackNewApi*: bool ## Debugging, support
trackLedgerApi*: bool ## Debugging, suggestion for subsequent ledger
profTab*: CoreDbProfListRef ## Profiling data (if any)

View File

@ -58,17 +58,19 @@ iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) =
raiseAssert: "Unsupported database type: " & $mpt.parent.dbType
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed
iterator slotPairs*(acc: CoreDbAccRef; eAddr: EthAddress): (Blob, Blob) =
iterator slotPairs*(acc: CoreDbAccRef; accPath: openArray[byte]): (Blob, Blob) =
## Trie traversal, only supported for `CoreDbMptRef`
##
acc.setTrackNewApi AccSlotPairsIt
case acc.parent.dbType:
of AristoDbMemory, AristoDbRocks, AristoDbVoid:
for k,v in acc.aristoSlotPairs(eAddr):
for k,v in acc.aristoSlotPairs accPath:
yield (k,v)
of Ooops:
raiseAssert: "Unsupported database type: " & $acc.parent.dbType
acc.ifTrackNewApi: debug newApiTxt, api, elapsed
acc.ifTrackNewApi:
doAssert accPath.len == 32
debug newApiTxt, api, elapsed
iterator replicate*(mpt: CoreDbMptRef): (Blob, Blob) {.apiRaise.} =
## Low level trie dump, only supported for non persistent `CoreDbMptRef`

View File

@ -47,9 +47,7 @@ iterator replicatePersistent*(mpt: CoreDbMptRef): (Blob, Blob) {.rlpRaise.} =
yield (k,v)
else:
raiseAssert: "Unsupported database type: " & $mpt.parent.dbType
mpt.ifTrackNewApi:
let trie = mpt.methods.getColFn()
debug newApiTxt, api, elapsed, trie
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed
# ------------------------------------------------------------------------------
# End

View File

@ -10,19 +10,6 @@
{.push raises: [].}
## Re-write of legacy `accounts_cache.nim` using new database API.
##
## Notable changes are:
##
## * `AccountRef`
## + renamed from `RefAccount`
## + the `statement` entry is sort of a superset of an `Account` object
## - contains an `EthAddress` field
##
## * `AccountsLedgerRef`
## + renamed from `AccountsCache`
##
import
std/[tables, hashes, sets, typetraits],
chronicles,
@ -61,6 +48,7 @@ type
AccountRef = ref object
statement: CoreDbAccount
accPath: Hash256
flags: AccountFlags
code: CodeBytesRef
originalStorage: TableRef[UInt256, UInt256]
@ -137,22 +125,22 @@ when debugAccountsLedgerRef:
template logTxt(info: static[string]): static[string] =
"AccountsLedgerRef " & info
template toAccountKey(acc: AccountRef): openArray[byte] =
acc.accPath.data.toOpenArray(0,31)
template toAccountKey(eAddr: EthAddress): openArray[byte] =
eAddr.keccakHash.data.toOpenArray(0,31)
proc beginSavepoint*(ac: AccountsLedgerRef): LedgerSavePoint {.gcsafe.}
func newCoreDbAccount(address: EthAddress): CoreDbAccount =
CoreDbAccount(
address: address,
nonce: emptyEthAccount.nonce,
balance: emptyEthAccount.balance,
codeHash: emptyEthAccount.codeHash)
proc resetCoreDbAccount(ac: AccountsLedgerRef, v: var CoreDbAccount) =
proc resetCoreDbAccount(ac: AccountsLedgerRef, acc: AccountRef) =
const info = "resetCoreDbAccount(): "
ac.ledger.clearStorage(v.address).isOkOr:
ac.ledger.clearStorage(acc.toAccountKey).isOkOr:
raiseAssert info & $$error
v.nonce = emptyEthAccount.nonce
v.balance = emptyEthAccount.balance
v.codeHash = emptyEthAccount.codeHash
acc.statement.nonce = emptyEthAccount.nonce
acc.statement.balance = emptyEthAccount.balance
acc.statement.codeHash = emptyEthAccount.codeHash
template noRlpException(info: static[string]; code: untyped) =
try:
@ -270,15 +258,20 @@ proc getAccount(
return
# not found in cache, look into state trie
let rc = ac.ledger.fetch address
let rc = ac.ledger.fetch address.toAccountKey
if rc.isOk:
result = AccountRef(
statement: rc.value,
flags: {Alive})
accPath: address.keccakHash,
flags: {Alive})
elif shouldCreate:
result = AccountRef(
statement: address.newCoreDbAccount(),
flags: {Alive, IsNew})
statement: CoreDbAccount(
nonce: emptyEthAccount.nonce,
balance: emptyEthAccount.balance,
codeHash: emptyEthAccount.codeHash),
accPath: address.keccakHash,
flags: {Alive, IsNew})
else:
return # ignore, don't cache
@ -289,6 +282,7 @@ proc getAccount(
proc clone(acc: AccountRef, cloneStorage: bool): AccountRef =
result = AccountRef(
statement: acc.statement,
accPath: acc.accPath,
flags: acc.flags,
code: acc.code)
@ -321,7 +315,7 @@ proc originalStorageValue(
# Not in the original values cache - go to the DB.
let
slotKey = slot.toBytesBE.keccakHash.data
rc = ac.ledger.slotFetch(acc.statement.address, slotKey)
rc = ac.ledger.slotFetch(acc.toAccountKey, slotKey)
if rc.isOk and 0 < rc.value.len:
noRlpException "originalStorageValue()":
result = rlp.decode(rc.value, UInt256)
@ -342,7 +336,7 @@ proc kill(ac: AccountsLedgerRef, acc: AccountRef) =
acc.flags.excl Alive
acc.overlayStorage.clear()
acc.originalStorage = nil
ac.resetCoreDbAccount acc.statement
ac.resetCoreDbAccount acc
acc.code.reset()
type
@ -379,9 +373,10 @@ proc persistStorage(acc: AccountRef, ac: AccountsLedgerRef) =
if acc.originalStorage.isNil:
acc.originalStorage = newTable[UInt256, UInt256]()
# Make sure that there is an account address row on the database. This is
# needed for saving the account-linked storage column on the Aristo database.
ac.ledger.merge(acc.statement).isOkOr:
# Make sure that there is an account entry on the database. This is needed by
# `Aristo` for updating the account's storage area reference. As a side effect,
# this action also updates the latest statement data.
ac.ledger.merge(acc.toAccountKey, acc.statement).isOkOr:
raiseAssert info & $$error
# Save `overlayStorage[]` on database
@ -389,11 +384,10 @@ proc persistStorage(acc: AccountRef, ac: AccountsLedgerRef) =
let slotKey = slot.toBytesBE.keccakHash.data
if value > 0:
let encodedValue = rlp.encode(value)
ac.ledger.slotMerge(
acc.statement.address, slotKey, encodedValue).isOkOr:
raiseAssert info & $$error
ac.ledger.slotMerge(acc.toAccountKey, slotKey, encodedValue).isOkOr:
raiseAssert info & $$error
else:
ac.ledger.slotDelete(acc.statement.address, slotKey).isOkOr:
ac.ledger.slotDelete(acc.toAccountKey, slotKey).isOkOr:
if error.error != StoNotFound:
raiseAssert info & $$error
discard
@ -504,7 +498,7 @@ proc contractCollision*(ac: AccountsLedgerRef, address: EthAddress): bool =
return
acc.statement.nonce != 0 or
acc.statement.codeHash != EMPTY_CODE_HASH or
not ac.ledger.slotStateEmptyOrVoid(address)
not ac.ledger.slotStateEmptyOrVoid(acc.toAccountKey)
proc accountExists*(ac: AccountsLedgerRef, address: EthAddress): bool =
let acc = ac.getAccount(address, false)
@ -590,11 +584,11 @@ proc clearStorage*(ac: AccountsLedgerRef, address: EthAddress) =
let acc = ac.getAccount(address)
acc.flags.incl {Alive, NewlyCreated}
let empty = ac.ledger.slotStateEmpty(address).valueOr: return
let empty = ac.ledger.slotStateEmpty(acc.toAccountKey).valueOr: return
if not empty:
# need to clear the storage from the database first
let acc = ac.makeDirty(address, cloneStorage = false)
ac.ledger.clearStorage(address).isOkOr:
ac.ledger.clearStorage(acc.toAccountKey).isOkOr:
raiseAssert info & $$error
# update caches
if acc.originalStorage.isNil.not:
@ -676,27 +670,28 @@ proc persist*(ac: AccountsLedgerRef,
for address in ac.savePoint.selfDestruct:
ac.deleteAccount(address)
for acc in ac.savePoint.dirty.values(): # This is a hotspot in block processing
for (eAddr,acc) in ac.savePoint.dirty.pairs(): # This is a hotspot in block processing
case acc.persistMode()
of Update:
if CodeChanged in acc.flags:
acc.persistCode(ac)
if StorageChanged in acc.flags:
# storageRoot must be updated first
# before persisting account into merkle trie
acc.persistStorage(ac)
ac.ledger.merge(acc.statement).isOkOr:
raiseAssert info & $$error
else:
# This one is only necessary unless `persistStorage()` is run which needs
# to `merge()` the latest statement as well.
ac.ledger.merge(acc.toAccountKey, acc.statement).isOkOr:
raiseAssert info & $$error
of Remove:
ac.ledger.delete(acc.statement.address).isOkOr:
ac.ledger.delete(acc.toAccountKey).isOkOr:
if error.error != AccNotFound:
raiseAssert info & $$error
ac.savePoint.cache.del acc.statement.address
ac.savePoint.cache.del eAddr
of DoNothing:
# dead man tell no tales
# remove touched dead account from cache
if Alive notin acc.flags:
ac.savePoint.cache.del acc.statement.address
ac.savePoint.cache.del eAddr
acc.flags = acc.flags - resetFlags
ac.savePoint.dirty.clear()
@ -724,16 +719,17 @@ iterator addresses*(ac: AccountsLedgerRef): EthAddress =
iterator accounts*(ac: AccountsLedgerRef): Account =
# make sure all savepoint already committed
doAssert(ac.savePoint.parentSavepoint.isNil)
for _, account in ac.savePoint.cache:
yield ac.ledger.recast(account.statement, updateOk=true).value
for _, acc in ac.savePoint.cache:
yield ac.ledger.recast(
acc.toAccountKey, acc.statement, updateOk=true).value
iterator pairs*(ac: AccountsLedgerRef): (EthAddress, Account) =
# make sure all savepoint already committed
doAssert(ac.savePoint.parentSavepoint.isNil)
for address, account in ac.savePoint.cache:
yield (address, ac.ledger.recast(account.statement, updateOk=true).value)
for address, acc in ac.savePoint.cache:
yield (address, ac.ledger.recast(
acc.toAccountKey, acc.statement, updateOk=true).value)
import stew/byteutils
iterator storage*(
ac: AccountsLedgerRef;
eAddr: EthAddress;
@ -741,8 +737,7 @@ iterator storage*(
# beware that if the account not persisted,
# the storage root will not be updated
noRlpException "storage()":
for (slotHash, value) in ac.ledger.slotPairs eAddr:
echo ">>> storage: ", slotHash.toHex, ":", value.toHex
for (slotHash, value) in ac.ledger.slotPairs eAddr.toAccountKey:
let rc = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
if rc.isErr:
warn logTxt "storage()", slotHash, error=($$rc.error)
@ -761,7 +756,7 @@ proc getStorageRoot*(ac: AccountsLedgerRef, address: EthAddress): Hash256 =
# the storage root will not be updated
let acc = ac.getAccount(address, false)
if acc.isNil: EMPTY_ROOT_HASH
else: ac.ledger.slotState(address).valueOr: EMPTY_ROOT_HASH
else: ac.ledger.slotState(acc.toAccountKey).valueOr: EMPTY_ROOT_HASH
proc update(wd: var WitnessData, acc: AccountRef) =
# once the code is touched make sure it doesn't get reset back to false in another update
@ -855,7 +850,7 @@ proc getEthAccount*(ac: AccountsLedgerRef, address: EthAddress): Account =
return emptyEthAccount
## Convert to legacy object, will throw an assert if that fails
let rc = ac.ledger.recast(acc.statement)
let rc = ac.ledger.recast(acc.toAccountKey, acc.statement)
if rc.isErr:
raiseAssert "getAccount(): cannot convert account: " & $$rc.error
rc.value

View File

@ -22,7 +22,8 @@ import
const
EnableApiTracking = false
## When enabled, API functions are logged. Tracking is enabled by setting
## the `trackApi` flag to `true`.
## the `trackApi` flag to `true`. This setting is typically inherited from
## the `CoreDb` descriptor flag `trackLedgerApi`.
EnableApiProfiling = true
## Enable functions profiling (only if `EnableApiTracking` is set `true`.)
@ -61,6 +62,7 @@ when EnableApiTracking:
std/times,
chronicles
func `$`(w: CodeBytesRef): string {.used.} = w.toStr
func `$`(e: Duration): string {.used.} = e.toStr
func `$`(c: CoreDbMptRef): string {.used.} = c.toStr
func `$`(l: seq[Log]): string {.used.} = l.toStr
@ -87,7 +89,7 @@ proc bless*(ldg: LedgerRef; db: CoreDbRef): LedgerRef =
ldg.trackApi = db.trackLedgerApi
when LedgerEnableApiProfiling:
ldg.profTab = db.ldgProfData()
ldg.ifTrackApi: debug apiTxt, api, elapsed, ldgType=ldg.ldgType
ldg.ifTrackApi: debug apiTxt, api, elapsed
ldg
# ------------------------------------------------------------------------------
@ -180,7 +182,7 @@ proc getBalance*(ldg: LedgerRef, eAddr: EthAddress): UInt256 =
proc getCode*(ldg: LedgerRef, eAddr: EthAddress): CodeBytesRef =
ldg.beginTrackApi LdgGetCodeFn
result = ldg.ac.getCode(eAddr)
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result=result.toStr
ldg.ifTrackApi: debug apiTxt, api, elapsed, eAddr, result
proc getCodeHash*(ldg: LedgerRef, eAddr: EthAddress): Hash256 =
ldg.beginTrackApi LdgGetCodeHashFn

View File

@ -97,9 +97,12 @@ func toStr*(w: EthAddress): string =
func toStr*(w: Hash256): string =
w.data.oaToStr
when declared(CoreDbMptRef):
func toStr*(w: CoreDbMptRef): string =
if w.CoreDbMptRef.isNil: "MptRef(nil)" else: "MptRef"
func toStr*(w: CoreDbMptRef): string =
if w.CoreDbMptRef.isNil: "nil" else: "MptRef"
func toStr*(w: CodeBytesRef): string =
if w.CodeBytesRef.isNil: "nil"
else: "[" & $w.bytes.len & "]"
func toStr*(w: Blob): string =
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"

View File

@ -397,7 +397,8 @@ proc execSelfDestruct*(c: Computation, beneficiary: EthAddress) =
localBalance = localBalance.toString,
beneficiary = beneficiary.toHex
func addLogEntry*(c: Computation, log: Log) =
# Using `proc` as `addLogEntry()` might be `proc` in logging mode
proc addLogEntry*(c: Computation, log: Log) =
c.vmState.stateDB.addLogEntry(log)
# some gasRefunded operations still relying
@ -406,7 +407,8 @@ func getGasRefund*(c: Computation): GasInt =
if c.isSuccess:
result = c.gasMeter.gasRefunded
func refundSelfDestruct*(c: Computation) =
# Using `proc` as `selfDestructLen()` might be `proc` in logging mode
proc refundSelfDestruct*(c: Computation) =
let cost = gasFees[c.fork][RefundSelfDestruct]
let num = c.vmState.stateDB.selfDestructLen
c.gasMeter.refundGas(cost * num)

View File

@ -35,12 +35,13 @@ proc setupTxContext*(vmState: BaseVMState,
vmState.determineFork
vmState.gasCosts = vmState.fork.forkToSchedule
# Using `proc` as `incNonce()` might be `proc` in logging mode
proc preExecComputation(c: Computation) =
if not c.msg.isCreate:
c.vmState.mutateStateDB:
db.incNonce(c.msg.sender)
func postExecComputation(c: Computation) =
proc postExecComputation(c: Computation) =
if c.isSuccess:
if c.fork < FkLondon:
# EIP-3529: Reduction in refunds

View File

@ -329,13 +329,13 @@ proc verifyAsmResult(vmState: BaseVMState, boa: Assembler, asmResult: CallResult
let
al = com.db.ctx.getAccounts()
acc = al.fetch(codeAddress).expect "Valid Account Handle"
accPath = codeAddress.keccakHash.data
for kv in boa.storage:
let key = kv[0].toHex()
let val = kv[1].toHex()
let slotKey = UInt256.fromBytesBE(kv[0]).toBytesBE.keccakHash.data
let data = al.slotFetch(codeAddress, slotKey).valueOr: EmptyBlob
let data = al.slotFetch(accPath, slotKey).valueOr: EmptyBlob
let actual = data.toHex
let zerosLen = 64 - (actual.len)
let value = repeat('0', zerosLen) & actual

View File

@ -188,7 +188,6 @@ proc initRunnerDB(
result.initializeEmptyDb
setErrorLevel()
coreDB.trackLegaApi = false
coreDB.trackNewApi = false
coreDB.trackLedgerApi =false

View File

@ -87,7 +87,7 @@ let
mainTest0m* = mainSample
.cloneWith(
name = "-am-some",
numBlocks = 1_000)
numBlocks = 5) # 1_000)
mainTest1m* = mainSample
.cloneWith(

View File

@ -40,7 +40,7 @@ const
EnableExtraLoggingControl = true
var
logStartTime {.used.} = Time()
logSavedEnv {.used.}: (bool,bool,bool)
logSavedEnv {.used.}: (bool,bool)
# ------------------------------------------------------------------------------
# Private helpers
@ -66,18 +66,15 @@ template initLogging(noisy: bool, com: CommonRef) =
setDebugLevel()
debug "start undumping into persistent blocks"
logStartTime = Time()
logSavedEnv = (com.db.trackLegaApi, com.db.trackNewApi,
com.db.trackLedgerApi)
logSavedEnv = (com.db.trackNewApi, com.db.trackLedgerApi)
setErrorLevel()
com.db.trackLegaApi = true
com.db.trackNewApi = true
com.db.trackLedgerApi = true
proc finishLogging(com: CommonRef) =
when EnableExtraLoggingControl:
setErrorLevel()
(com.db.trackLegaApi, com.db.trackNewApi,
com.db.trackLedgerApi) = logSavedEnv
(com.db.trackNewApi, com.db.trackLedgerApi) = logSavedEnv
template startLogging(noisy: bool; num: BlockNumber) =
@ -238,7 +235,6 @@ proc test_chainSync*(
if noisy:
noisy.whisper "***", "Re-run with logging enabled...\n"
setTraceLevel()
com.db.trackLegaApi = false
com.db.trackNewApi = false
com.db.trackLedgerApi = false
discard chain.persistBlocks(w)