Update storage tree admin (#2419)

* Tighten `CoreDb` API for accounts

why:
  Apart from cruft, the way to fetch the accounts state root via a
  `CoreDbColRef` record was unnecessarily complicated.

* Extend `CoreDb` API for accounts to cover storage tries

why:
  In future, this will make the notion of column objects obsolete. Storage
  trees will then be indexed by the account address rather than the vertex
  ID equivalent like a `CoreDbColRef`.

* Apply new/extended accounts API to ledger and tests

details:
  This makes the `distinct_ledger` module obsolete

* Remove column object constructors

why:
  They were needed as an abstraction of MPT sub-trees including storage
  trees. Now, storage trees are handled by the account (e.g. via address)
  they belong to and all other trees can be identified by a constant well
  known vertex ID. So there is no need for column objects anymore.

  Still there are some left-over column object methods wnich will be
  removed next.

* Remove `serialise()` and `PayloadRef` from default Aristo API

why:
  Not needed. `PayloadRef` was used for unstructured/unknown payload
  formats (account or blob) and `serialise()` was used for decodng
  `PayloadRef`. Now it is known in advance what the payload looks
  like.

* Added query function `hasStorageData()` whether a storage area exists

why:
  Useful for supporting `slotStateEmpty()` of the `CoreDb` API

* In the `Ledger` replace `storage.stateEmpty()` by 	`slotStateEmpty()`

* On Aristo, hide the storage root/vertex ID in the `PayloadRef`

why:
  The storage vertex ID is fully controlled by Aristo while the
  `AristoAccount` object is controlled by the application. With the
  storage root part of the `AristoAccount` object, there was a useless
  administrative burden to keep that storage root field up to date.

* Remove cruft, update comments etc.

* Update changed MPT access paradigms

why:
  Fixes verified proxy tests

* Fluffy cosmetics
This commit is contained in:
Jordan Hrycaj 2024-06-27 09:01:26 +00:00 committed by GitHub
parent ea94e8a351
commit 61bbf40014
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 936 additions and 1365 deletions

View File

@ -166,7 +166,7 @@ proc vmExecInit(xp: TxPoolRef): Result[TxPackerStateRef, string]
let packer = TxPackerStateRef( # return value
xp: xp,
tr: AristoDbMemory.newCoreDbRef().ctx.getMpt CtGeneric,
tr: AristoDbMemory.newCoreDbRef().ctx.getColumn(CtGeneric, clearData=true),
balance: xp.chain.vmState.readOnlyStateDB.getBalance(xp.chain.feeRecipient),
numBlobPerBlock: 0,
)
@ -250,7 +250,7 @@ proc vmExecCommit(pst: TxPackerStateRef)
vmState.receipts.setLen(nItems)
xp.chain.receipts = vmState.receipts
xp.chain.txRoot = pst.tr.getColumn.state.valueOr:
xp.chain.txRoot = pst.tr.state.valueOr:
raiseAssert "vmExecCommit(): state() failed " & $$error
xp.chain.stateRoot = vmState.stateDB.rootHash

View File

@ -29,14 +29,15 @@ import
aristo/aristo_nearby
export
leftPairs, # iterators
rightPairs
rightPairs,
rightPairsAccount,
rightPairsGeneric,
rightPairsStorage
import
aristo/aristo_desc/[desc_identifiers, desc_structural]
export
AristoAccount,
PayloadRef,
PayloadType,
desc_identifiers,
`==`

View File

@ -20,7 +20,7 @@ import
./aristo_init/memory_db,
"."/[aristo_delete, aristo_desc, aristo_fetch, aristo_get, aristo_hashify,
aristo_hike, aristo_init, aristo_merge, aristo_path, aristo_profile,
aristo_serialise, aristo_tx]
aristo_tx]
export
AristoDbProfListRef
@ -49,7 +49,7 @@ type
## previous layer. The previous transaction is returned if there
## was any.
AristoApiDeleteAccountPayloadFn* =
AristoApiDeleteAccountRecordFn* =
proc(db: AristoDbRef;
path: openArray[byte];
): Result[void,AristoError]
@ -108,13 +108,19 @@ type
## Merkle hash tag for vertex with ID 1 and a bespoke `uint64` identifier
## (may be interpreted as block number.)
AristoApiFetchAccountPayloadFn* =
AristoApiFetchAccountRecordFn* =
proc(db: AristoDbRef;
path: openArray[byte];
): Result[AristoAccount,AristoError]
{.noRaise.}
## Fetch an account record from the database indexed by `path`.
AristoApiFetchAccountStateFn* =
proc(db: AristoDbRef;
): Result[Hash256,AristoError]
{.noRaise.}
## Fetch the Merkle hash of the account root.
AristoApiFetchGenericDataFn* =
proc(db: AristoDbRef;
root: VertexID;
@ -124,6 +130,13 @@ type
## For a generic sub-tree starting at `root`, fetch the data record
## indexed by `path`.
AristoApiFetchGenericStateFn* =
proc(db: AristoDbRef;
root: VertexID;
): Result[Hash256,AristoError]
{.noRaise.}
## Fetch the Merkle hash of the argument `root`.
AristoApiFetchStorageDataFn* =
proc(db: AristoDbRef;
path: openArray[byte];
@ -133,6 +146,13 @@ type
## For a storage tree related to account `accPath`, fetch the data
## record from the database indexed by `path`.
AristoApiFetchStorageStateFn* =
proc(db: AristoDbRef;
accPath: PathID;
): Result[Hash256,AristoError]
{.noRaise.}
## Fetch the Merkle hash of the storage root related to `accPath`.
AristoApiFindTxFn* =
proc(db: AristoDbRef;
vid: VertexID;
@ -246,6 +266,14 @@ type
## For a storage tree related to account `accPath`, query whether the
## data record indexed by `path` exists on the database.
AristoApiHasStorageDataFn* =
proc(db: AristoDbRef;
accPath: PathID;
): Result[bool,AristoError]
{.noRaise.}
## For a storage tree related to account `accPath`, query whether there
## is a non-empty data storage area at all.
AristoApiHikeUpFn* =
proc(path: NibblesBuf;
root: VertexID;
@ -277,18 +305,14 @@ type
## `reCentre()` for details.) This function is a fast version of
## `db.forked.toSeq.len`.
AristoApiMergeAccountPayloadFn* =
AristoApiMergeAccountRecordFn* =
proc(db: AristoDbRef;
accPath: openArray[byte];
accPayload: AristoAccount;
accRec: AristoAccount;
): Result[bool,AristoError]
{.noRaise.}
## Merge the key-value-pair argument `(accKey,accPayload)` as an account
## Merge the key-value-pair argument `(accKey,accRec)` as an account
## ledger value, i.e. the the sub-tree starting at `VertexID(1)`.
##
## The payload argument `accPayload` must have the `storageID` field
## either unset/invalid or referring to a existing vertex which will be
## assumed to be a storage tree.
AristoApiMergeGenericDataFn* =
proc( db: AristoDbRef;
@ -377,14 +401,6 @@ type
## operations performed for this transactio. The previous transaction
## is returned if there was any.
AristoApiSerialiseFn* =
proc(db: AristoDbRef;
pyl: PayloadRef;
): Result[Blob,(VertexID,AristoError)]
{.noRaise.}
## Encode the data payload of the argument `pyl` as RLP `Blob` if
## it is of account type, otherwise pass the data as is.
AristoApiTxBeginFn* =
proc(db: AristoDbRef
): Result[AristoTxRef,AristoError]
@ -409,75 +425,96 @@ type
AristoApiObj* = object of RootObj
## Useful set of `Aristo` fuctions that can be filtered, stacked etc.
commit*: AristoApiCommitFn
deleteAccountPayload*: AristoApiDeleteAccountPayloadFn
deleteAccountRecord*: AristoApiDeleteAccountRecordFn
deleteGenericData*: AristoApiDeleteGenericDataFn
deleteGenericTree*: AristoApiDeleteGenericTreeFn
deleteStorageData*: AristoApiDeleteStorageDataFn
deleteStorageTree*: AristoApiDeleteStorageTreeFn
fetchLastSavedState*: AristoApiFetchLastSavedStateFn
fetchAccountPayload*: AristoApiFetchAccountPayloadFn
fetchAccountRecord*: AristoApiFetchAccountRecordFn
fetchAccountState*: AristoApiFetchAccountStateFn
fetchGenericData*: AristoApiFetchGenericDataFn
fetchGenericState*: AristoApiFetchGenericStateFn
fetchStorageData*: AristoApiFetchStorageDataFn
fetchStorageState*: AristoApiFetchStorageStateFn
findTx*: AristoApiFindTxFn
finish*: AristoApiFinishFn
forget*: AristoApiForgetFn
forkTx*: AristoApiForkTxFn
getKeyRc*: AristoApiGetKeyRcFn
hashify*: AristoApiHashifyFn
hasPathAccount*: AristoApiHasPathAccountFn
hasPathGeneric*: AristoApiHasPathGenericFn
hasPathStorage*: AristoApiHasPathStorageFn
hasStorageData*: AristoApiHasStorageDataFn
hikeUp*: AristoApiHikeUpFn
isTop*: AristoApiIsTopFn
level*: AristoApiLevelFn
nForked*: AristoApiNForkedFn
mergeAccountPayload*: AristoApiMergeAccountPayloadFn
mergeAccountRecord*: AristoApiMergeAccountRecordFn
mergeGenericData*: AristoApiMergeGenericDataFn
mergeStorageData*: AristoApiMergeStorageDataFn
pathAsBlob*: AristoApiPathAsBlobFn
persist*: AristoApiPersistFn
reCentre*: AristoApiReCentreFn
rollback*: AristoApiRollbackFn
serialise*: AristoApiSerialiseFn
txBegin*: AristoApiTxBeginFn
txTop*: AristoApiTxTopFn
AristoApiProfNames* = enum
## Index/name mapping for profile slots
AristoApiProfTotal = "total"
AristoApiProfTotal = "total"
AristoApiProfCommitFn = "commit"
AristoApiProfDeleteAccountPayloadFn = "deleteAccountPayload"
AristoApiProfDeleteAccountRecordFn = "deleteAccountRecord"
AristoApiProfDeleteGenericDataFn = "deleteGnericData"
AristoApiProfDeleteGenericTreeFn = "deleteGnericTree"
AristoApiProfDeleteStorageDataFn = "deleteStorageData"
AristoApiProfDeleteStorageTreeFn = "deleteStorageTree"
AristoApiProfFetchLastSavedStateFn = "fetchPayload"
AristoApiProfFetchAccountPayloadFn = "fetchAccountPayload"
AristoApiProfFetchLastSavedStateFn = "fetchLastSavedState"
AristoApiProfFetchAccountRecordFn = "fetchAccountRecord"
AristoApiProfFetchAccountStateFn = "fetchAccountState"
AristoApiProfFetchGenericDataFn = "fetchGenericData"
AristoApiProfFetchGenericStateFn = "fetchGenericState"
AristoApiProfFetchStorageDataFn = "fetchStorageData"
AristoApiProfFetchStorageStateFn = "fetchStorageState"
AristoApiProfFindTxFn = "findTx"
AristoApiProfFinishFn = "finish"
AristoApiProfForgetFn = "forget"
AristoApiProfForkTxFn = "forkTx"
AristoApiProfGetKeyRcFn = "getKeyRc"
AristoApiProfHashifyFn = "hashify"
AristoApiProfHasPathAccountFn = "hasPathAccount"
AristoApiProfHasPathGenericFn = "hasPathGeneric"
AristoApiProfHasPathStorageFn = "hasPathStorage"
AristoApiProfHasStorageDataFn = "hasStorageData"
AristoApiProfHikeUpFn = "hikeUp"
AristoApiProfIsTopFn = "isTop"
AristoApiProfLevelFn = "level"
AristoApiProfNForkedFn = "nForked"
AristoApiProfMergeAccountPayloadFn = "mergeAccountPayload"
AristoApiProfMergeAccountRecordFn = "mergeAccountRecord"
AristoApiProfMergeGenericDataFn = "mergeGenericData"
AristoApiProfMergeStorageDataFn = "mergeStorageData"
AristoApiProfPathAsBlobFn = "pathAsBlob"
AristoApiProfPersistFn = "persist"
AristoApiProfReCentreFn = "reCentre"
AristoApiProfRollbackFn = "rollback"
AristoApiProfSerialiseFn = "serialise"
AristoApiProfTxBeginFn = "txBegin"
AristoApiProfTxTopFn = "txTop"
@ -503,36 +540,47 @@ type
when AutoValidateApiHooks:
proc validate(api: AristoApiObj|AristoApiRef) =
doAssert not api.commit.isNil
doAssert not api.deleteAccountPayload.isNil
doAssert not api.deleteAccountRecord.isNil
doAssert not api.deleteGenericData.isNil
doAssert not api.deleteGenericTree.isNil
doAssert not api.deleteStorageData.isNil
doAssert not api.deleteStorageTree.isNil
doAssert not api.fetchLastSavedState.isNil
doAssert not api.fetchAccountPayload.isNil
doAssert not api.fetchAccountRecord.isNil
doAssert not api.fetchAccountState.isNil
doAssert not api.fetchGenericData.isNil
doAssert not api.fetchGenericState.isNil
doAssert not api.fetchStorageData.isNil
doAssert not api.fetchStorageState.isNil
doAssert not api.findTx.isNil
doAssert not api.finish.isNil
doAssert not api.forget.isNil
doAssert not api.forkTx.isNil
doAssert not api.getKeyRc.isNil
doAssert not api.hashify.isNil
doAssert not api.hasPathAccount.isNil
doAssert not api.hasPathGeneric.isNil
doAssert not api.hasPathStorage.isNil
doAssert not api.hasStorageData.isNil
doAssert not api.hikeUp.isNil
doAssert not api.isTop.isNil
doAssert not api.level.isNil
doAssert not api.nForked.isNil
doAssert not api.mergeAccountPayload.isNil
doAssert not api.mergeAccountRecord.isNil
doAssert not api.mergeGenericData.isNil
doAssert not api.mergeStorageData.isNil
doAssert not api.pathAsBlob.isNil
doAssert not api.persist.isNil
doAssert not api.reCentre.isNil
doAssert not api.rollback.isNil
doAssert not api.serialise.isNil
doAssert not api.txBegin.isNil
doAssert not api.txTop.isNil
@ -562,36 +610,47 @@ func init*(api: var AristoApiObj) =
when AutoValidateApiHooks:
api.reset
api.commit = commit
api.deleteAccountPayload = deleteAccountPayload
api.deleteAccountRecord = deleteAccountRecord
api.deleteGenericData = deleteGenericData
api.deleteGenericTree = deleteGenericTree
api.deleteStorageData = deleteStorageData
api.deleteStorageTree = deleteStorageTree
api.fetchLastSavedState = fetchLastSavedState
api.fetchAccountPayload = fetchAccountPayload
api.fetchAccountRecord = fetchAccountRecord
api.fetchAccountState = fetchAccountState
api.fetchGenericData = fetchGenericData
api.fetchGenericState = fetchGenericState
api.fetchStorageData = fetchStorageData
api.fetchStorageState = fetchStorageState
api.findTx = findTx
api.finish = finish
api.forget = forget
api.forkTx = forkTx
api.getKeyRc = getKeyRc
api.hashify = hashify
api.hasPathAccount = hasPathAccount
api.hasPathGeneric = hasPathGeneric
api.hasPathStorage = hasPathStorage
api.hasStorageData = hasStorageData
api.hikeUp = hikeUp
api.isTop = isTop
api.level = level
api.nForked = nForked
api.mergeAccountPayload = mergeAccountPayload
api.mergeAccountRecord = mergeAccountRecord
api.mergeGenericData = mergeGenericData
api.mergeStorageData = mergeStorageData
api.pathAsBlob = pathAsBlob
api.persist = persist
api.reCentre = reCentre
api.rollback = rollback
api.serialise = serialise
api.txBegin = txBegin
api.txTop = txTop
when AutoValidateApiHooks:
@ -604,36 +663,46 @@ func init*(T: type AristoApiRef): T =
func dup*(api: AristoApiRef): AristoApiRef =
result = AristoApiRef(
commit: api.commit,
deleteAccountPayload: api.deleteAccountPayload,
deleteAccountRecord: api.deleteAccountRecord,
deleteGenericData: api.deleteGenericData,
deleteGenericTree: api.deleteGenericTree,
deleteStorageData: api.deleteStorageData,
deleteStorageTree: api.deleteStorageTree,
fetchLastSavedState: api.fetchLastSavedState,
fetchAccountPayload: api.fetchAccountPayload,
fetchAccountRecord: api.fetchAccountRecord,
fetchAccountState: api.fetchAccountState,
fetchGenericData: api.fetchGenericData,
fetchGenericState: api.fetchGenericState,
fetchStorageData: api.fetchStorageData,
fetchStorageState: api.fetchStorageState,
findTx: api.findTx,
finish: api.finish,
forget: api.forget,
forkTx: api.forkTx,
getKeyRc: api.getKeyRc,
hashify: api.hashify,
hasPathAccount: api.hasPathAccount,
hasPathGeneric: api.hasPathGeneric,
hasPathStorage: api.hasPathStorage,
hasStorageData: api.hasStorageData,
hikeUp: api.hikeUp,
isTop: api.isTop,
level: api.level,
nForked: api.nForked,
mergeAccountPayload: api.mergeAccountPayload,
mergeAccountRecord: api.mergeAccountRecord,
mergeGenericData: api.mergeGenericData,
mergeStorageData: api.mergeStorageData,
pathAsBlob: api.pathAsBlob,
persist: api.persist,
reCentre: api.reCentre,
rollback: api.rollback,
serialise: api.serialise,
txBegin: api.txBegin,
txTop: api.txTop)
when AutoValidateApiHooks:
@ -671,10 +740,10 @@ func init*(
AristoApiProfCommitFn.profileRunner:
result = api.commit(a)
profApi.deleteAccountPayload =
profApi.deleteAccountRecord =
proc(a: AristoDbRef; b: openArray[byte]): auto =
AristoApiProfDeleteAccountPayloadFn.profileRunner:
result = api.deleteAccountPayload(a, b)
AristoApiProfDeleteAccountRecordFn.profileRunner:
result = api.deleteAccountRecord(a, b)
profApi.deleteGenericData =
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto =
@ -701,21 +770,36 @@ func init*(
AristoApiProfFetchLastSavedStateFn.profileRunner:
result = api.fetchLastSavedState(a)
profApi.fetchAccountPayload =
profApi.fetchAccountRecord =
proc(a: AristoDbRef; b: openArray[byte]): auto =
AristoApiProfFetchAccountPayloadFn.profileRunner:
result = api.fetchAccountPayload(a, b)
AristoApiProfFetchAccountRecordFn.profileRunner:
result = api.fetchAccountRecord(a, b)
profApi.fetchAccountState =
proc(a: AristoDbRef): auto =
AristoApiProfFetchAccountStateFn.profileRunner:
result = api.fetchAccountState(a)
profApi.fetchGenericData =
proc(a: AristoDbRef; b: VertexID; c: openArray[byte]): auto =
AristoApiProfFetchGenericDataFn.profileRunner:
result = api.fetchGenericData(a, b, c)
profApi.fetchGenericState =
proc(a: AristoDbRef; b: VertexID;): auto =
AristoApiProfFetchGenericStateFn.profileRunner:
result = api.fetchGenericState(a, b)
profApi.fetchStorageData =
proc(a: AristoDbRef; b: openArray[byte]; c: PathID;): auto =
AristoApiProfFetchStorageDataFn.profileRunner:
result = api.fetchStorageData(a, b, c)
profApi.fetchStorageState =
proc(a: AristoDbRef; b: PathID;): auto =
AristoApiProfFetchStorageStateFn.profileRunner:
result = api.fetchStorageState(a, b)
profApi.findTx =
proc(a: AristoDbRef; b: VertexID; c: HashKey): auto =
AristoApiProfFindTxFn.profileRunner:
@ -757,10 +841,15 @@ func init*(
result = api.hasPathGeneric(a, b, c)
profApi.hasPathStorage =
proc(a: AristoDbRef; b: openArray[byte]; c: PathID;): auto =
proc(a: AristoDbRef; b: openArray[byte]; c: PathID): auto =
AristoApiProfHasPathStorageFn.profileRunner:
result = api.hasPathStorage(a, b, c)
profApi.hasStorageData =
proc(a: AristoDbRef; b: PathID): auto =
AristoApiProfHasStorageDataFn.profileRunner:
result = api.hasStorageData(a, b)
profApi.hikeUp =
proc(a: NibblesBuf; b: VertexID; c: AristoDbRef): auto =
AristoApiProfHikeUpFn.profileRunner:
@ -781,10 +870,10 @@ func init*(
AristoApiProfNForkedFn.profileRunner:
result = api.nForked(a)
profApi.mergeAccountPayload =
profApi.mergeAccountRecord =
proc(a: AristoDbRef; b, c: openArray[byte]): auto =
AristoApiProfMergeAccountPayloadFn.profileRunner:
result = api.mergeAccountPayload(a, b, c)
AristoApiProfMergeAccountRecordFn.profileRunner:
result = api.mergeAccountRecord(a, b, c)
profApi.mergeGenericData =
proc(a: AristoDbRef; b: VertexID, c, d: openArray[byte]): auto =
@ -816,11 +905,6 @@ func init*(
AristoApiProfRollbackFn.profileRunner:
result = api.rollback(a)
profApi.serialise =
proc(a: AristoDbRef; b: PayloadRef): auto =
AristoApiProfSerialiseFn.profileRunner:
result = api.serialise(a, b)
profApi.txBegin =
proc(a: AristoDbRef): auto =
AristoApiProfTxBeginFn.profileRunner:

View File

@ -60,9 +60,9 @@ proc blobifyTo*(pyl: PayloadRef, data: var Blob) =
mask = mask or 0x04
data &= pyl.account.balance.truncate(uint64).uint64.toBytesBE
if VertexID(0) < pyl.account.storageID:
if VertexID(0) < pyl.stoID:
mask = mask or 0x10
data &= pyl.account.storageID.uint64.toBytesBE
data &= pyl.stoID.uint64.toBytesBE
if pyl.account.codeHash != VOID_CODE_HASH:
mask = mask or 0x80
@ -218,7 +218,7 @@ proc deblobifyTo(
of 0x00:
discard
of 0x10:
pAcc.account.storageID = (? data.load64 start).VertexID
pAcc.stoID = (? data.load64 start).VertexID
else:
return err(DeblobStorageLenUnsupported)

View File

@ -32,7 +32,7 @@ proc toNodeBE(
of Leaf:
let node = NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
if vtx.lData.pType == AccountData:
let vid = vtx.lData.account.storageID
let vid = vtx.lData.stoID
if vid.isValid:
let rc = db.getKeyBE vid
if rc.isErr or not rc.value.isValid:

View File

@ -106,7 +106,7 @@ proc checkTopCommon*(
case vtx.vType:
of Leaf:
if vtx.lData.pType == AccountData:
let stoVid = vtx.lData.account.storageID
let stoVid = vtx.lData.stoID
if stoVid.isValid:
if stoVid in stoRoots:
return err((stoVid,CheckAnyVidSharedStorageRoot))

View File

@ -214,7 +214,7 @@ proc ppPayload(p: PayloadRef, db: AristoDbRef): string =
result = "("
result &= ($p.account.nonce).stripZeros(toExp=true) & ","
result &= ($p.account.balance).stripZeros(toExp=true) & ","
result &= p.account.storageID.ppVid & ","
result &= p.stoID.ppVid & ","
result &= p.account.codeHash.ppCodeHash & ")"
proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =

View File

@ -19,30 +19,13 @@ import
std/[sets, typetraits],
eth/common,
results,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers,
"."/[aristo_desc, aristo_fetch, aristo_get, aristo_hike, aristo_layers,
aristo_utils, aristo_vid]
type
SaveToVaeVidFn =
proc(err: AristoError): (VertexID,AristoError) {.gcsafe, raises: [].}
# ------------------------------------------------------------------------------
# Private heplers
# ------------------------------------------------------------------------------
func toVae(err: AristoError): (VertexID,AristoError) =
## Map single error to error pair with dummy vertex
(VertexID(0),err)
func toVae(vid: VertexID): SaveToVaeVidFn =
## Map single error to error pair with argument vertex
result =
proc(err: AristoError): (VertexID,AristoError) =
return (vid,err)
func toVae(err: (VertexID,AristoError,Hike)): (VertexID,AristoError) =
(err[0], err[1])
proc branchStillNeeded(vtx: VertexRef): Result[int,void] =
## Returns the nibble if there is only one reference left.
var nibble = -1
@ -346,7 +329,7 @@ proc deleteImpl(
# Public functions
# ------------------------------------------------------------------------------
proc deleteAccountPayload*(
proc deleteAccountRecord*(
db: AristoDbRef;
path: openArray[byte];
): Result[void,AristoError] =
@ -358,7 +341,7 @@ proc deleteAccountPayload*(
if error[1] in HikeAcceptableStopsNotFound:
return err(DelPathNotFound)
return err(error[1])
stoID = hike.legs[^1].wp.vtx.lData.account.storageID
stoID = hike.legs[^1].wp.vtx.lData.stoID
# Delete storage tree if present
if stoID.isValid:
@ -431,9 +414,12 @@ proc deleteStorageData*(
## will return `true`.
##
let
accHike = ? db.retrieveStoAccHike accPath
accHike = db.fetchAccountHike(accPath).valueOr:
if error == FetchAccInaccessible:
return err(DelStoAccMissing)
return err(error)
wpAcc = accHike.legs[^1].wp
stoID = wpAcc.vtx.lData.account.storageID
stoID = wpAcc.vtx.lData.stoID
if not stoID.isValid:
return err(DelStoRootMissing)
@ -455,7 +441,7 @@ proc deleteStorageData*(
# De-register the deleted storage tree from the account record
let leaf = wpAcc.vtx.dup # Dup on modify
leaf.lData.account.storageID = VertexID(0)
leaf.lData.stoID = VertexID(0)
db.layersPutVtx(VertexID(1), wpAcc.vid, leaf)
db.layersResKey(VertexID(1), wpAcc.vid)
ok(true)
@ -468,12 +454,12 @@ proc deleteStorageTree*(
## associated to the account argument `accPath`.
##
let
accHike = db.retrieveStoAccHike(accPath).valueOr:
if error == UtilsAccInaccessible:
accHike = db.fetchAccountHike(accPath).valueOr:
if error == FetchAccInaccessible:
return err(DelStoAccMissing)
return err(error)
wpAcc = accHike.legs[^1].wp
stoID = wpAcc.vtx.lData.account.storageID
stoID = wpAcc.vtx.lData.stoID
if not stoID.isValid:
return err(DelStoRootMissing)
@ -485,7 +471,7 @@ proc deleteStorageTree*(
# De-register the deleted storage tree from the accounts record
let leaf = wpAcc.vtx.dup # Dup on modify
leaf.lData.account.storageID = VertexID(0)
leaf.lData.stoID = VertexID(0)
db.layersPutVtx(VertexID(1), wpAcc.vid, leaf)
db.layersResKey(VertexID(1), wpAcc.vid)
ok()

View File

@ -143,11 +143,13 @@ type
# Fetch functions from `aristo_fetch.nim`
FetchPathNotFound
FetchAccInaccessible
FetchAccPathWithoutLeaf
FetchAccRootNotAccepted
FetchLeafKeyInvalid
FetchPathInvalid
FetchPathNotFound
FetchRootVidMissing
FetchAccRootNotAccepted
FetchStoRootNotAccepted
@ -192,7 +194,6 @@ type
MergeBranchRootExpected
MergeHashKeyDiffersFromCached
MergeHashKeyInvalid
MergeLeafCantChangeStorageID
MergeLeafGarbledHike
MergeLeafPathCachedAlready
MergeLeafPathOnBackendAlready
@ -214,6 +215,7 @@ type
MergeRootKeysMissing
MergeRootKeysOverflow
MergeRootVidMissing
MergeStoAccMissing
# Neighbour vertex, tree traversal `nearbyRight()` and `nearbyLeft()`
@ -294,11 +296,9 @@ type
# Functions from `aristo_utils.nim`
UtilsAccInaccessible
UtilsAccLeafPayloadExpected
UtilsAccNodeUnsupported
UtilsAccPathMissing
UtilsAccPathWithoutLeaf
UtilsAccStorageKeyMissing
UtilsAccVtxUnsupported
UtilsAccWrongStorageRoot

View File

@ -33,9 +33,10 @@ type
Branch
AristoAccount* = object
## Application relevant part of an Ethereum account. Note that the storage
## data/tree reference is not part of the account (see `PayloadRef` below.)
nonce*: AccountNonce ## Some `uint64` type
balance*: UInt256
storageID*: VertexID ## Implies storage root Merkle hash key
codeHash*: Hash256
PayloadType* = enum
@ -52,6 +53,7 @@ type
rawBlob*: Blob ## Opaque data, default value
of AccountData:
account*: AristoAccount
stoID*: VertexID ## Storage vertex ID (if any)
VertexRef* = ref object of RootRef
## Vertex for building a hexary Patricia or Merkle Patricia Trie
@ -163,7 +165,8 @@ proc `==`*(a, b: PayloadRef): bool =
if a.rawBlob != b.rawBlob:
return false
of AccountData:
if a.account != b.account:
if a.account != b.account or
a.stoID != b.stoID:
return false
true
@ -219,7 +222,8 @@ func dup*(pld: PayloadRef): PayloadRef =
of AccountData:
PayloadRef(
pType: AccountData,
account: pld.account)
account: pld.account,
stoID: pld.stoID)
func dup*(vtx: VertexRef): VertexRef =
## Duplicate vertex.

View File

@ -17,7 +17,7 @@ import
std/typetraits,
eth/common,
results,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_utils]
"."/[aristo_desc, aristo_get, aristo_hike]
# ------------------------------------------------------------------------------
# Private functions
@ -52,18 +52,15 @@ proc retrievePayload(
ok hike.legs[^1].wp.vtx.lData
proc retrieveStoID(
proc retrieveMerkleHash(
db: AristoDbRef;
accPath: PathID;
): Result[VertexID,AristoError] =
let
accHike = ? db.retrieveStoAccHike accPath # checks for `AccountData`
stoID = accHike.legs[^1].wp.vtx.lData.account.storageID
if not stoID.isValid:
return err(FetchPathNotFound)
ok stoID
root: VertexID;
): Result[Hash256,AristoError] =
let key = db.getKeyRc(root).valueOr:
if error == GetKeyNotFound:
return ok(EMPTY_ROOT_HASH) # empty sub-tree
return err(error)
ok key.to(Hash256)
proc hasPayload(
@ -80,6 +77,52 @@ proc hasPayload(
return err(error[1])
ok(true)
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
proc fetchAccountHike*(
db: AristoDbRef; # Database
accPath: PathID; # Implies a storage ID (if any)
): Result[Hike,AristoError] =
## Verify that the `accPath` argument properly referres to a storage root
## vertex ID. The function will reset the keys along the `accPath` for
## being modified.
##
## On success, the function will return an account leaf pair with the leaf
## vertex and the vertex ID.
##
# Expand vertex path to account leaf
var hike = accPath.to(NibblesBuf).hikeUp(VertexID(1), db).valueOr:
return err(FetchAccInaccessible)
# Extract the account payload from the leaf
let wp = hike.legs[^1].wp
if wp.vtx.vType != Leaf:
return err(FetchAccPathWithoutLeaf)
assert wp.vtx.lData.pType == AccountData # debugging only
ok(move(hike))
proc fetchStorageID*(
db: AristoDbRef;
accPath: PathID;
): Result[VertexID,AristoError] =
## Public helper function fro retrieving a storage (vertex) ID for a
## given account.
let
accHike = db.fetchAccountHike(accPath).valueOr:
if error == FetchAccInaccessible:
return err(FetchPathNotFound)
return err(error)
stoID = accHike.legs[^1].wp.vtx.lData.stoID
if not stoID.isValid:
return err(FetchPathNotFound)
ok stoID
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
@ -93,7 +136,7 @@ proc fetchLastSavedState*(
db.getLstUbe()
proc fetchAccountPayload*(
proc fetchAccountRecord*(
db: AristoDbRef;
path: openArray[byte];
): Result[AristoAccount,AristoError] =
@ -103,6 +146,12 @@ proc fetchAccountPayload*(
assert pyl.pType == AccountData # debugging only
ok pyl.account
proc fetchAccountState*(
db: AristoDbRef;
): Result[Hash256,AristoError] =
## Fetch the Merkle hash of the account root.
db.retrieveMerkleHash VertexID(1)
proc hasPathAccount*(
db: AristoDbRef;
path: openArray[byte];
@ -126,6 +175,13 @@ proc fetchGenericData*(
assert pyl.pType == RawData # debugging only
ok pyl.rawBlob
proc fetchGenericState*(
db: AristoDbRef;
root: VertexID;
): Result[Hash256,AristoError] =
## Fetch the Merkle hash of the argument `root`.
db.retrieveMerkleHash root
proc hasPathGeneric*(
db: AristoDbRef;
root: VertexID;
@ -146,10 +202,21 @@ proc fetchStorageData*(
## For a storage tree related to account `accPath`, fetch the data record
## from the database indexed by `path`.
##
let pyl = ? db.retrievePayload(? db.retrieveStoID accPath, path)
let pyl = ? db.retrievePayload(? db.fetchStorageID accPath, path)
assert pyl.pType == RawData # debugging only
ok pyl.rawBlob
proc fetchStorageState*(
db: AristoDbRef;
accPath: PathID;
): Result[Hash256,AristoError] =
## Fetch the Merkle hash of the storage root related to `accPath`.
let stoID = db.fetchStorageID(accPath).valueOr:
if error == FetchPathNotFound:
return ok(EMPTY_ROOT_HASH) # no sub-tree
return err(error)
db.retrieveMerkleHash stoID
proc hasPathStorage*(
db: AristoDbRef;
path: openArray[byte];
@ -158,7 +225,20 @@ proc hasPathStorage*(
## For a storage tree related to account `accPath`, query whether the data
## record indexed by `path` exists on the database.
##
db.hasPayload(? db.retrieveStoID accPath, path)
db.hasPayload(? db.fetchStorageID accPath, path)
proc hasStorageData*(
db: AristoDbRef;
accPath: PathID;
): Result[bool,AristoError] =
## For a storage tree related to account `accPath`, query whether there
## is a non-empty data storage area at all.
##
let stoID = db.fetchStorageID(accPath).valueOr:
if error == FetchPathNotFound:
return ok(false) # no sub-tree
return err(error)
ok stoID.isValid
# ------------------------------------------------------------------------------
# End

View File

@ -28,7 +28,7 @@ import
std/typetraits,
eth/common,
results,
"."/[aristo_desc, aristo_layers, aristo_utils, aristo_vid],
"."/[aristo_desc, aristo_fetch, aristo_layers, aristo_utils, aristo_vid],
./aristo_merge/[merge_payload_helper, merge_proof]
export
@ -41,10 +41,10 @@ const
# Public functions
# ------------------------------------------------------------------------------
proc mergeAccountPayload*(
proc mergeAccountRecord*(
db: AristoDbRef; # Database, top layer
accKey: openArray[byte]; # Even nibbled byte path
accPayload: AristoAccount; # Payload value
accKey: openArray[byte]; # Even nibbled byte path
accRec: AristoAccount; # Account data
): Result[bool,AristoError] =
## Merge the key-value-pair argument `(accKey,accPayload)` as an account
## ledger value, i.e. the the sub-tree starting at `VertexID(1)`.
@ -58,7 +58,7 @@ proc mergeAccountPayload*(
## already.
##
let
pyl = PayloadRef(pType: AccountData, account: accPayload)
pyl = PayloadRef(pType: AccountData, account: accRec)
rc = db.mergePayloadImpl(VertexID(1), accKey, pyl, VidVtxPair())
if rc.isOk:
ok true
@ -120,9 +120,12 @@ proc mergeStorageData*(
## otherwise `VertexID(0)`.
##
let
accHike = ? db.retrieveStoAccHike accPath # checks for `AccountData`
accHike = db.fetchAccountHike(accPath).valueOr:
if error == FetchAccInaccessible:
return err(MergeStoAccMissing)
return err(error)
wpAcc = accHike.legs[^1].wp
stoID = wpAcc.vtx.lData.account.storageID
stoID = wpAcc.vtx.lData.stoID
# Provide new storage ID when needed
useID = if stoID.isValid: stoID else: db.vidFetch()
@ -141,7 +144,7 @@ proc mergeStorageData*(
else:
# Make sure that there is an account that refers to that storage trie
let leaf = wpAcc.vtx.dup # Dup on modify
leaf.lData.account.storageID = useID
leaf.lData.stoID = useID
db.layersPutVtx(VertexID(1), wpAcc.vid, leaf)
db.layersResKey(VertexID(1), wpAcc.vid)
return ok useID

View File

@ -418,11 +418,9 @@ proc mergePayloadUpdate(
if vid in db.pPrf:
return err(MergeLeafProofModeLock)
# Make certain that the account leaf can be replaced
# Update accounts storage root which is handled implicitely
if hike.root == VertexID(1):
# Only `AccountData` payload on `VertexID(1)` tree
if payload.account.storageID != leafLeg.wp.vtx.lData.account.storageID:
return err(MergeLeafCantChangeStorageID)
payload.stoID = leafLeg.wp.vtx.lData.stoID
# Update vertex and hike
let vtx = VertexRef(

View File

@ -90,7 +90,7 @@ proc mergeNodeImpl(
if not sid.isValid:
sid = db.vidFetch
db.layersPutProof(sid, acc.storageRoot.to(HashKey))
pyl.account.storageID = sid
pyl.stoID = sid
vtx.lData = pyl
except RlpError:
return err(MergeNodeAccountPayloadError)

View File

@ -21,10 +21,10 @@
{.push raises: [].}
import
std/tables,
std/[tables, typetraits],
eth/common,
results,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_path]
"."/[aristo_desc, aristo_fetch, aristo_get, aristo_hike, aristo_path]
# ------------------------------------------------------------------------------
# Private helpers
@ -451,6 +451,38 @@ iterator rightPairs*(
rc = hike.right db
# End while
iterator rightPairsAccount*(
db: AristoDbRef; # Database layer
start = low(PathID); # Before or at first value
): (PathID,AristoAccount) =
## Variant of `rightPairs()` for accounts tree
for (lty,pyl) in db.rightPairs LeafTie(root: VertexID(1), path: start):
yield (lty.path, pyl.account)
iterator rightPairsGeneric*(
db: AristoDbRef; # Database layer
root: VertexID; # Generic root (different from VertexID)
start = low(PathID); # Before or at first value
): (PathID,Blob) =
## Variant of `rightPairs()` for a generic tree
# Verify that `root` is neither from an accounts tree nor a strorage tree.
if VertexID(1) < root and root.distinctBase < LEAST_FREE_VID:
for (lty,pyl) in db.rightPairs LeafTie(root: VertexID(1), path: start):
yield (lty.path, pyl.rawBlob)
iterator rightPairsStorage*(
db: AristoDbRef; # Database layer
accPath: PathID; # Account the storage data belong to
start = low(PathID); # Before or at first value
): (PathID,Blob) =
## Variant of `rightPairs()` for a storage tree
block body:
let stoID = db.fetchStorageID(accPath).valueOr:
break body
if stoID.isValid:
for (lty,pyl) in db.rightPairs LeafTie(root: stoID, path: start):
yield (lty.path, pyl.rawBlob)
# ----------------
proc left*(

View File

@ -43,7 +43,7 @@ proc serialise(
ok pyl.rawBlob
of AccountData:
let
vid = pyl.account.storageID
vid = pyl.stoID
key = block:
if vid.isValid:
vid.getKey.valueOr:

View File

@ -36,8 +36,8 @@ proc toAccount*(
balance: payload.account.balance,
codeHash: payload.account.codeHash,
storageRoot: EMPTY_ROOT_HASH)
if payload.account.storageID.isValid:
acc.storageRoot = (? db.getKeyRc payload.account.storageID).to(Hash256)
if payload.stoID.isValid:
acc.storageRoot = (? db.getKeyRc payload.stoID).to(Hash256)
return ok(acc)
err UtilsPayloadTypeUnsupported
@ -63,7 +63,7 @@ proc toAccount*(
balance: node.lData.account.balance,
codeHash: node.lData.account.codeHash,
storageRoot: EMPTY_ROOT_HASH)
if node.lData.account.storageID.isValid:
if node.lData.stoID.isValid:
if not node.key[0].isValid:
return err(UtilsAccStorageKeyMissing)
acc.storageRoot = node.key[0].to(Hash256)
@ -111,7 +111,7 @@ proc toNode*(
let node = NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
# Need to resolve storage root for account leaf
if vtx.lData.pType == AccountData:
let vid = vtx.lData.account.storageID
let vid = vtx.lData.stoID
if vid.isValid:
let key = db.getKey vid
if not key.isValid:
@ -120,7 +120,7 @@ proc toNode*(
# Stale storage trie?
if LEAST_FREE_VID <= vid.distinctBase and
not db.getVtx(vid).isValid:
node.lData.account.storageID = VertexID(0)
node.lData.stoID = VertexID(0)
break looseCoupling
# Otherwise this is a stale storage trie.
return err(@[vid])
@ -160,7 +160,7 @@ proc subVids*(vtx: VertexRef): seq[VertexID] =
case vtx.vType:
of Leaf:
if vtx.lData.pType == AccountData:
let vid = vtx.lData.account.storageID
let vid = vtx.lData.stoID
if vid.isValid:
result.add vid
of Branch:
@ -172,29 +172,6 @@ proc subVids*(vtx: VertexRef): seq[VertexID] =
# ---------------------
proc retrieveStoAccHike*(
db: AristoDbRef; # Database
accPath: PathID; # Implies a storage ID (if any)
): Result[Hike,AristoError] =
## Verify that the `accPath` argument properly referres to a storage root
## vertex ID. The function will reset the keys along the `accPath` for
## being modified.
##
## On success, the function will return an account leaf pair with the leaf
## vertex and the vertex ID.
##
# Expand vertex path to account leaf
var hike = accPath.to(NibblesBuf).hikeUp(VertexID(1), db).valueOr:
return err(UtilsAccInaccessible)
# Extract the account payload fro the leaf
let wp = hike.legs[^1].wp
if wp.vtx.vType != Leaf:
return err(UtilsAccPathWithoutLeaf)
assert wp.vtx.lData.pType == AccountData # debugging only
ok(move(hike))
proc updateAccountForHasher*(
db: AristoDbRef; # Database
hike: Hike; # Return value from `retrieveStorageID()`

View File

@ -152,15 +152,6 @@ proc baseMethods(db: AristoCoreDbRef): CoreDbBaseFns =
levelFn: proc(): int =
aBase.getLevel,
colStateEmptyFn: proc(col: CoreDbColRef): CoreDbRc[bool] =
aBase.rootHashEmpty(col, "rootHashFn()"),
colStateFn: proc(col: CoreDbColRef): CoreDbRc[Hash256] =
aBase.rootHash(col, "rootHashFn()"),
colPrintFn: proc(vid: CoreDbColRef): string =
aBase.colPrint(vid),
errorPrintFn: proc(e: CoreDbErrorRef): string =
e.errorPrint(),
@ -246,6 +237,10 @@ func toAristo*(mBe: CoreDbMptBackendRef): AristoDbRef =
if not mBe.isNil and mBe.parent.isAristo:
return mBe.AristoCoreDbMptBE.adb
func toAristo*(mBe: CoreDbAccBackendRef): AristoDbRef =
if not mBe.isNil and mBe.parent.isAristo:
return mBe.AristoCoreDbAccBE.adb
proc toAristoSavedStateBlockNumber*(
mBe: CoreDbMptBackendRef;
): tuple[stateRoot: Hash256, blockNumber: BlockNumber] =
@ -284,8 +279,20 @@ iterator aristoMptPairs*(dsc: CoreDbMptRef): (Blob,Blob) {.noRaise.} =
let
api = dsc.to(AristoApiRef)
mpt = dsc.to(AristoDbRef)
for (k,v) in mpt.rightPairs LeafTie(root: dsc.rootID):
yield (api.pathAsBlob(k.path), api.serialise(mpt, v).valueOr(EmptyBlob))
for (path,data) in mpt.rightPairsGeneric dsc.rootID:
yield (api.pathAsBlob(path), data)
iterator aristoSlotPairs*(
dsc: CoreDbAccRef;
eAddr: EthAddress;
): (Blob,Blob)
{.noRaise.} =
let
api = dsc.to(AristoApiRef)
mpt = dsc.to(AristoDbRef)
accKey = HashKey.fromBytes(eAddr.keccakHash.data).value.to(PathID)
for (path,data) in mpt.rightPairsStorage accKey:
yield (api.pathAsBlob(path), data)
iterator aristoReplicateMem*(dsc: CoreDbMptRef): (Blob,Blob) {.rlpRaise.} =
## Instantiation for `MemBackendRef`

View File

@ -11,7 +11,7 @@
{.push raises: [].}
import
std/[strutils, typetraits],
std/typetraits,
chronicles,
eth/common,
stew/byteutils,
@ -37,88 +37,25 @@ type
AristoCoreDbMptRef = ref object of CoreDbMptRef
base: AristoBaseRef ## Local base descriptor
mptRoot: VertexID ## State root, may be zero unless account
accPath: PathID ## Needed for storage tree/columns
address: EthAddress ## For storage tree debugging
AristoColRef* = ref object of CoreDbColRef
## Vertex ID wrapper, optionally with *MPT* context
base: AristoBaseRef
case colType: CoreDbColType ## Current column type
of CtStorage:
stoRoot: VertexID ## State root, may be zero if unknown
stoAddr: EthAddress ## Associated storage account address
else:
reset: bool ## Internal delete request
AristoCoreDbMptBE* = ref object of CoreDbMptBackendRef
adb*: AristoDbRef
const
VoidVID = VertexID(0)
# StorageVID = VertexID(CtStorage) -- currently unused
AccountsVID = VertexID(CtAccounts)
GenericVID = VertexID(CtGeneric)
AristoCoreDbAccBE* = ref object of CoreDbAccBackendRef
adb*: AristoDbRef
logScope:
topics = "aristo-hdl"
static:
doAssert CtStorage.ord == 0
doAssert CtAccounts.ord == 1
doAssert low(CoreDbColType).ord == 0
doAssert high(CoreDbColType).ord < LEAST_FREE_VID
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
func isValid(col: CoreDbColRef): bool =
not col.isNil and col.ready
func to(col: CoreDbColRef; T: type VertexID): T =
if col.isValid:
let col = AristoColRef(col)
if col.colType == CtStorage:
return col.stoRoot
return VertexID(col.colType)
func to(address: EthAddress; T: type PathID): T =
HashKey.fromBytes(address.keccakHash.data).value.to(T)
func resetCol(colType: CoreDbColType): bool =
## Check whether to reset some non-dynamic column when instantiating. It
## emulates the behaviour of a new empty MPT on the legacy database.
colType == CtGeneric or
(high(CoreDbColType) < colType and colType.ord < LEAST_FREE_VID)
# -------------------------------
func toCoreDbAccount(
cAcc: AristoCoreDbAccRef;
acc: AristoAccount;
address: EthAddress;
): CoreDbAccount =
let db = cAcc.base.parent
result = CoreDbAccount(
address: address,
nonce: acc.nonce,
balance: acc.balance,
codeHash: acc.codeHash)
if acc.storageID.isValid:
result.storage = db.bless AristoColRef(
base: cAcc.base,
colType: CtStorage,
stoRoot: acc.storageID,
stoAddr: address)
func toPayloadRef(acc: CoreDbAccount): PayloadRef =
PayloadRef(
pType: AccountData,
account: AristoAccount(
nonce: acc.nonce,
balance: acc.balance,
storageID: acc.storage.to(VertexID),
codeHash: acc.codeHash))
func to(eAddr: EthAddress; T: type PathID): T =
HashKey.fromBytes(eAddr.keccakHash.data).value.to(T)
# -------------------------------
@ -133,28 +70,17 @@ func toError(
isAristo: true,
aErr: e))
# Forward declaration, see below in public section
func toError*(
func toError(
e: (VertexID,AristoError);
base: AristoBaseRef;
info: string;
error = Unspecified;
): CoreDbErrorRef
func toRc[T](
rc: Result[T,(VertexID,AristoError)];
base: AristoBaseRef;
info: string;
error = Unspecified;
): CoreDbRc[T] =
if rc.isOk:
when T is void:
return ok()
else:
return ok(rc.value)
err rc.error.toError(base, info, error)
): CoreDbErrorRef =
base.parent.bless(error, AristoCoreDbError(
ctx: info,
isAristo: true,
vid: e[0],
aErr: e[1]))
func toRc[T](
rc: Result[T,AristoError];
@ -167,8 +93,7 @@ func toRc[T](
return ok()
else:
return ok(rc.value)
err((VoidVID,rc.error).toError(base, info, error))
err((VertexID(0),rc.error).toError(base, info, error))
func toVoidRc[T](
rc: Result[T,(VertexID,AristoError)];
@ -183,6 +108,7 @@ func toVoidRc[T](
# ------------------------------------------------------------------------------
# Private `MPT` call back functions
# ------------------------------------------------------------------------------
proc mptMethods(): CoreDbMptFns =
# These templates are a hack to remove a closure environment that was using
# hundreds of mb of memory to have this syntactic convenience
@ -195,104 +121,49 @@ proc mptMethods(): CoreDbMptFns =
proc mptBackend(cMpt: AristoCoreDbMptRef): CoreDbMptBackendRef =
db.bless AristoCoreDbMptBE(adb: mpt)
proc mptColFn(cMpt: AristoCoreDbMptRef): CoreDbColRef =
if cMpt.mptRoot.distinctBase < LEAST_FREE_VID:
return db.bless(AristoColRef(
base: base,
colType: CoreDbColType(cMpt.mptRoot)))
assert cMpt.accPath.isValid # debug mode only
if cMpt.mptRoot.isValid:
# The mpt might have become empty
let
key = cMpt.address.keccakHash.data
acc = api.fetchAccountPayload(mpt, key).valueOr:
raiseAssert "mptColFn(): " & $error
# Update by accounts data
cMpt.mptRoot = acc.storageID
db.bless AristoColRef(
base: base,
colType: CtStorage,
stoRoot: cMpt.mptRoot,
stoAddr: cMpt.address)
proc mptFetch(cMpt: AristoCoreDbMptRef, key: openArray[byte]): CoreDbRc[Blob] =
const info = "fetchFn()"
let rc = block:
if cMpt.accPath.isValid:
api.fetchStorageData(mpt, key, cMpt.accPath)
elif cMpt.mptRoot.isValid:
api.fetchGenericData(mpt, cMpt.mptRoot, key)
else:
# Some pathological behaviour observed with storage column due to lazy
# update. The `fetchXxxPayload()` does not now about this and would
# complain an error different from `FetchPathNotFound`.
return err(MptRootMissing.toError(base, info, MptNotFound))
# let rc = api.fetchPayload(mpt, rootVID, key)
if rc.isOk:
ok rc.value
elif rc.error != FetchPathNotFound:
err(rc.error.toError(base, info))
else:
err(rc.error.toError(base, info, MptNotFound))
let data = api.fetchGenericData(mpt, cMpt.mptRoot, key).valueOr:
if error == FetchPathNotFound:
return err(error.toError(base, info, MptNotFound))
return err(error.toError(base, info))
ok(data)
proc mptMerge(cMpt: AristoCoreDbMptRef, k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
const info = "mergeFn()"
if cMpt.accPath.isValid:
let rc = api.mergeStorageData(mpt, k, v, cMpt.accPath)
if rc.isErr:
return err(rc.error.toError(base, info))
if rc.value.isValid:
cMpt.mptRoot = rc.value
else:
let rc = api.mergeGenericData(mpt, cMpt.mptRoot, k, v)
if rc.isErr:
return err(rc.error.toError(base, info))
api.mergeGenericData(mpt, cMpt.mptRoot, k, v).isOkOr:
return err(error.toError(base, info))
ok()
proc mptDelete(cMpt: AristoCoreDbMptRef, key: openArray[byte]): CoreDbRc[void] =
const info = "deleteFn()"
let rc = block:
if cMpt.accPath.isValid:
api.deleteStorageData(mpt, key, cMpt.accPath)
else:
api.deleteGenericData(mpt, cMpt.mptRoot, key)
if rc.isErr:
if rc.error == DelPathNotFound:
return err(rc.error.toError(base, info, MptNotFound))
if rc.error == DelStoRootMissing:
# This is insane but legit. A storage column was announced for an
# account but no data have been added, yet.
return ok()
return err(rc.error.toError(base, info))
if rc.value:
# Column has become empty
cMpt.mptRoot = VoidVID
api.deleteGenericData(mpt, cMpt.mptRoot, key).isOkOr:
if error == DelPathNotFound:
return err(error.toError(base, info, MptNotFound))
return err(error.toError(base, info))
ok()
proc mptHasPath(cMpt: AristoCoreDbMptRef, key: openArray[byte]): CoreDbRc[bool] =
const info = "hasPathFn()"
let yn = api.hasPathGeneric(mpt, cMpt.mptRoot, key).valueOr:
return err(error.toError(base, info))
ok(yn)
let rc = block:
if cMpt.accPath.isValid:
api.hasPathStorage(mpt, key, cMpt.accPath)
else:
api.hasPathGeneric(mpt, cMpt.mptRoot, key)
proc mptState(cMpt: AristoCoreDbMptRef, updateOk: bool): CoreDbRc[Hash256] =
const info = "mptState()"
#let rc = api.hasPath(mpt, cMpt.mptRoot, key)
if rc.isErr:
let rc = api.fetchGenericState(mpt, cMpt.mptRoot)
if rc.isOk:
return ok(rc.value)
elif not updateOk and rc.error != GetKeyUpdateNeeded:
return err(rc.error.toError(base, info))
ok(rc.value)
# FIXME: `hashify()` should probably throw an assert on failure
? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable)
let state = api.fetchGenericState(mpt, cMpt.mptRoot).valueOr:
raiseAssert info & ": " & $error
ok(state)
## Generic columns database handlers
CoreDbMptFns(
@ -311,8 +182,8 @@ proc mptMethods(): CoreDbMptFns =
hasPathFn: proc(cMpt: CoreDbMptRef, k: openArray[byte]): CoreDbRc[bool] =
mptHasPath(AristoCoreDbMptRef(cMpt), k),
getColFn: proc(cMpt: CoreDbMptRef): CoreDbColRef =
mptColFn(AristoCoreDbMptRef(cMpt)))
stateFn: proc(cMpt: CoreDbMptRef, updateOk: bool): CoreDbRc[Hash256] =
mptState(AristoCoreDbMptRef(cMpt), updateOk))
# ------------------------------------------------------------------------------
# Private account call back functions
@ -325,197 +196,201 @@ proc accMethods(): CoreDbAccFns =
template api: untyped = base.api
template mpt: untyped = base.ctx.mpt
proc getColFn(cAcc: AristoCoreDbAccRef): CoreDbColRef =
db.bless AristoColRef(
base: base,
colType: CtAccounts)
proc accBackend(cAcc: AristoCoreDbAccRef): CoreDbAccBackendRef =
db.bless AristoCoreDbAccBE(adb: mpt)
proc accCloneMpt(cAcc: AristoCoreDbAccRef): CoreDbRc[CoreDbMptRef] =
var xpt = AristoCoreDbMptRef(
base: base,
mptRoot: AccountsVID)
xpt.methods = mptMethods()
ok(db.bless xpt)
proc accFetch(cAcc: AristoCoreDbAccRef, address: EthAddress): CoreDbRc[CoreDbAccount] =
proc accFetch(cAcc: AristoCoreDbAccRef; eAddr: EthAddress): CoreDbRc[CoreDbAccount] =
const info = "acc/fetchFn()"
let
key = address.keccakHash.data
acc = api.fetchAccountPayload(mpt, key).valueOr:
if error != FetchPathNotFound:
return err(error.toError(base, info))
return err(error.toError(base, info, AccNotFound))
ok cAcc.toCoreDbAccount(acc, address)
proc accMerge(cAcc: AristoCoreDbAccRef, account: CoreDbAccount): CoreDbRc[void] =
let acc = api.fetchAccountRecord(mpt, eAddr.keccakHash.data).valueOr:
if error != FetchPathNotFound:
return err(error.toError(base, info))
return err(error.toError(base, info, AccNotFound))
ok CoreDbAccount(
address: eAddr,
nonce: acc.nonce,
balance: acc.balance,
codeHash: acc.codeHash)
proc accMerge(cAcc: AristoCoreDbAccRef, acc: CoreDbAccount): CoreDbRc[void] =
const info = "acc/mergeFn()"
let
key = account.address.keccakHash.data
val = account.toPayloadRef()
rc = api.mergeAccountPayload(mpt, key, val.account)
if rc.isErr:
return err(rc.error.toError(base, info))
key = acc.address.keccakHash.data
val = AristoAccount(
nonce: acc.nonce,
balance: acc.balance,
codeHash: acc.codeHash)
api.mergeAccountRecord(mpt, key, val).isOkOr:
return err(error.toError(base, info))
ok()
proc accDelete(cAcc: AristoCoreDbAccRef, address: EthAddress): CoreDbRc[void] =
proc accDelete(cAcc: AristoCoreDbAccRef; eAddr: EthAddress): CoreDbRc[void] =
const info = "acc/deleteFn()"
let key = address.keccakHash.data
api.deleteAccountPayload(mpt, key).isOkOr:
api.deleteAccountRecord(mpt, eAddr.keccakHash.data).isOkOr:
if error == DelPathNotFound:
# TODO: Would it be conseqient to just return `ok()` here?
return err(error.toError(base, info, AccNotFound))
return err(error.toError(base, info))
ok()
proc accStoDelete(cAcc: AristoCoreDbAccRef, address: EthAddress): CoreDbRc[void] =
const info = "stoDeleteFn()"
let rc = api.deleteStorageTree(mpt, address.to(PathID))
if rc.isErr and rc.error notin {DelStoRootMissing,DelStoAccMissing}:
return err(rc.error.toError(base, info))
proc accClearStorage(cAcc: AristoCoreDbAccRef; eAddr: EthAddress): CoreDbRc[void] =
const info = "acc/clearStoFn()"
api.deleteStorageTree(mpt, eAddr.to(PathID)).isOkOr:
if error notin {DelStoRootMissing,DelStoAccMissing}:
return err(error.toError(base, info))
ok()
proc accHasPath(cAcc: AristoCoreDbAccRef, address: EthAddress): CoreDbRc[bool] =
proc accHasPath(cAcc: AristoCoreDbAccRef; eAddr: EthAddress): CoreDbRc[bool] =
const info = "hasPathFn()"
let
key = address.keccakHash.data
yn = api.hasPathAccount(mpt, key).valueOr:
return err(error.toError(base, info))
let yn = api.hasPathAccount(mpt, eAddr.keccakHash.data).valueOr:
return err(error.toError(base, info))
ok(yn)
proc accState(cAcc: AristoCoreDbAccRef, updateOk: bool): CoreDbRc[Hash256] =
const info = "accStateFn()"
let rc = api.fetchAccountState(mpt)
if rc.isOk:
return ok(rc.value)
elif not updateOk and rc.error != GetKeyUpdateNeeded:
return err(rc.error.toError(base, info))
# FIXME: `hashify()` should probably throw an assert on failure
? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable)
let state = api.fetchAccountState(mpt).valueOr:
raiseAssert info & ": " & $error
ok(state)
proc slotFetch(cAcc: AristoCoreDbAccRef; eAddr: EthAddress; key: openArray[byte]): CoreDbRc[Blob] =
const info = "slotFetchFn()"
let data = api.fetchStorageData(mpt, key, eAddr.to(PathID)).valueOr:
if error != FetchPathNotFound:
return err(error.toError(base, info))
return err(error.toError(base, info, StoNotFound))
ok(data)
proc slotDelete(cAcc: AristoCoreDbAccRef; eAddr: EthAddress; key: openArray[byte]): CoreDbRc[void] =
const info = "slotDeleteFn()"
api.deleteStorageData(mpt, key, eAddr.to(PathID)).isOkOr:
if error == DelPathNotFound:
return err(error.toError(base, info, StoNotFound))
if error == DelStoRootMissing:
# This is insane but legit. A storage column was announced for an
# account but no data have been added, yet.
return ok()
return err(error.toError(base, info))
ok()
proc slotHasPath(cAcc: AristoCoreDbAccRef; eAddr: EthAddress; key: openArray[byte]): CoreDbRc[bool] =
const info = "slotHasPathFn()"
let yn = api.hasPathStorage(mpt, key, eAddr.to(PathID)).valueOr:
return err(error.toError(base, info))
ok(yn)
proc slotMerge(cAcc: AristoCoreDbAccRef; eAddr: EthAddress; key, val: openArray[byte]): CoreDbRc[void] =
const info = "slotMergeFn()"
api.mergeStorageData(mpt, key, val, eAddr.to(PathID)).isOkOr:
return err(error.toError(base, info))
ok()
proc slotState(cAcc: AristoCoreDbAccRef; eAddr: EthAddress; updateOk: bool): CoreDbRc[Hash256] =
const info = "slotStateFn()"
let rc = api.fetchStorageState(mpt, eAddr.to(PathID))
if rc.isOk:
return ok(rc.value)
elif not updateOk and rc.error != GetKeyUpdateNeeded:
return err(rc.error.toError(base, info))
# FIXME: `hashify()` should probably throw an assert on failure
? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable)
let state = api.fetchStorageState(mpt, eAddr.to(PathID)).valueOr:
return err(error.toError(base, info))
ok(state)
proc slotStateEmpty(cAcc: AristoCoreDbAccRef; eAddr: EthAddress): CoreDbRc[bool] =
const info = "slotStateEmptyFn()"
let yn = api.hasStorageData(mpt, eAddr.to(PathID)).valueOr:
return err(error.toError(base, info))
ok(not yn)
CoreDbAccFns(
getMptFn: proc(cAcc: CoreDbAccRef): CoreDbRc[CoreDbMptRef] =
accCloneMpt(AristoCoreDbAccRef(cAcc)),
backendFn: proc(cAcc: CoreDbAccRef): CoreDbAccBackendRef =
accBackend(AristoCoreDbAccRef(cAcc)),
fetchFn: proc(cAcc: CoreDbAccRef, address: EthAddress): CoreDbRc[CoreDbAccount] =
accFetch(AristoCoreDbAccRef(cAcc), address),
fetchFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress): CoreDbRc[CoreDbAccount] =
accFetch(AristoCoreDbAccRef(cAcc), eAddr),
deleteFn: proc(cAcc: CoreDbAccRef, address: EthAddress): CoreDbRc[void] =
accDelete(AristoCoreDbAccRef(cAcc), address),
deleteFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress): CoreDbRc[void] =
accDelete(AristoCoreDbAccRef(cAcc), eAddr),
stoDeleteFn: proc(cAcc: CoreDbAccRef, address: EthAddress): CoreDbRc[void] =
accStoDelete(AristoCoreDbAccRef(cAcc), address),
clearStorageFn: proc(cAcc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[void] =
accClearStorage(AristoCoreDbAccRef(cAcc), eAddr),
mergeFn: proc(cAcc: CoreDbAccRef, acc: CoreDbAccount): CoreDbRc[void] =
accMerge(AristoCoreDbAccRef(cAcc), acc),
hasPathFn: proc(cAcc: CoreDbAccRef, address: EthAddress): CoreDbRc[bool] =
accHasPath(AristoCoreDbAccRef(cAcc), address),
hasPathFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress): CoreDbRc[bool] =
accHasPath(AristoCoreDbAccRef(cAcc), eAddr),
getColFn: proc(cAcc: CoreDbAccRef): CoreDbColRef =
getColFn(AristoCoreDbAccRef(cAcc)))
stateFn: proc(cAcc: CoreDbAccRef, updateOk: bool): CoreDbRc[Hash256] =
accState(AristoCoreDbAccRef(cAcc), updateOk),
slotFetchFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress; k: openArray[byte]): CoreDbRc[Blob] =
slotFetch(AristoCoreDbAccRef(cAcc), eAddr, k),
slotDeleteFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress; k: openArray[byte]): CoreDbRc[void] =
slotDelete(AristoCoreDbAccRef(cAcc), eAddr, k),
slotHasPathFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress; k: openArray[byte]): CoreDbRc[bool] =
slotHasPath(AristoCoreDbAccRef(cAcc), eAddr, k),
slotMergeFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress; k,v: openArray[byte]): CoreDbRc[void] =
slotMerge(AristoCoreDbAccRef(cAcc), eAddr, k, v),
slotStateFn: proc(cAcc: CoreDbAccRef, eAddr: EthAddress; updateOk: bool): CoreDbRc[Hash256] =
slotState(AristoCoreDbAccRef(cAcc), eAddr, updateOk),
slotStateEmptyFn: proc(cAcc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[bool] =
slotStateEmpty(AristoCoreDbAccRef(cAcc), eAddr))
# ------------------------------------------------------------------------------
# Private context call back functions
# ------------------------------------------------------------------------------
proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
proc ctxMethods(): CoreDbCtxFns =
template base: untyped = cCtx.base
template db: untyped = base.parent
template api: untyped = base.api
template mpt: untyped = cCtx.mpt
proc ctxNewCol(
cCtx: AristoCoreDbCtxRef,
colType: CoreDbColType;
colState: Hash256;
address: Opt[EthAddress];
): CoreDbRc[CoreDbColRef] =
const info = "ctx/newColFn()"
let col = AristoColRef(
proc ctxGetColumn(cCtx: AristoCoreDbCtxRef; colType: CoreDbColType; clearData: bool): CoreDbMptRef =
const info = "getColumnFn()"
if clearData:
api.deleteGenericTree(mpt, VertexID(colType)).isOkOr:
raiseAssert info & " clearing up failed: " & $error
db.bless AristoCoreDbMptRef(
methods: mptMethods(),
base: base,
colType: colType)
mptRoot: VertexID(colType))
if colType == CtStorage:
if address.isNone:
let error = aristo.UtilsAccPathMissing
return err(error.toError(base, info, AccAddrMissing))
col.stoAddr = address.unsafeGet
if not colState.isValid:
return ok(db.bless col)
# Reset some non-dynamic col when instantiating. It emulates the behaviour
# of a new empty MPT on the legacy database.
col.reset = colType.resetCol()
# Update hashes in order to verify the column state.
? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable)
# Assure that hash is available as state for the main/accounts column
let rc = api.getKeyRc(mpt, VertexID colType)
if rc.isErr:
doAssert rc.error == GetKeyNotFound
elif rc.value == colState.to(HashKey):
return ok(db.bless col)
err(aristo.GenericError.toError(base, info, RootNotFound))
proc ctxGetMpt(cCtx: AristoCoreDbCtxRef, col: CoreDbColRef): CoreDbRc[CoreDbMptRef] =
const
info = "ctx/getMptFn()"
let
col = AristoColRef(col)
var
reset = false
newMpt: AristoCoreDbMptRef
if not col.isValid:
reset = true
newMpt = AristoCoreDbMptRef(
mptRoot: GenericVID,
accPath: VOID_PATH_ID)
elif col.colType == CtStorage:
newMpt = AristoCoreDbMptRef(
mptRoot: col.stoRoot,
accPath: col.stoAddr.to(PathID),
address: col.stoAddr)
if col.stoRoot.isValid:
if col.stoRoot.distinctBase < LEAST_FREE_VID:
let error = (col.stoRoot,MptRootUnacceptable)
return err(error.toError(base, info, RootUnacceptable))
# Verify path if there is a particular storge root VID
let rc = api.hikeUp(newMpt.accPath.to(NibblesBuf), AccountsVID, mpt)
if rc.isErr:
return err(rc.error[1].toError(base, info, AccNotFound))
else:
reset = col.colType.resetCol()
newMpt = AristoCoreDbMptRef(
mptRoot: VertexID(col.colType),
accPath: VOID_PATH_ID)
# Reset column. This a emulates the behaviour of a new empty MPT on the
# legacy database.
if reset:
let rc = api.deleteGenericTree(mpt, newMpt.mptRoot)
if rc.isErr:
return err(rc.error.toError(base, info, AutoFlushFailed))
col.reset = false
newMpt.base = base
newMpt.methods = mptMethods()
ok(db.bless newMpt)
proc ctxGetAcc(cCtx: AristoCoreDbCtxRef, col: CoreDbColRef): CoreDbRc[CoreDbAccRef] =
const info = "getAccFn()"
let col = AristoColRef(col)
if col.colType != CtAccounts:
let error = (AccountsVID, AccRootUnacceptable)
return err(error.toError(base, info, RootUnacceptable))
let acc = AristoCoreDbAccRef(base: base)
acc.methods = accMethods()
ok(db.bless acc)
proc ctxGetAccounts(cCtx: AristoCoreDbCtxRef): CoreDbAccRef =
db.bless AristoCoreDbAccRef(
methods: accMethods(),
base: base)
proc ctxForget(cCtx: AristoCoreDbCtxRef) =
api.forget(mpt).isOkOr:
@ -523,19 +398,11 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
CoreDbCtxFns(
newColFn: proc(
cCtx: CoreDbCtxRef;
col: CoreDbColType;
colState: Hash256;
address: Opt[EthAddress];
): CoreDbRc[CoreDbColRef] =
ctxNewCol(AristoCoreDbCtxRef(cCtx), col, colState, address),
getColumnFn: proc(cCtx: CoreDbCtxRef; colType: CoreDbColType; clearData: bool): CoreDbMptRef =
ctxGetColumn(AristoCoreDbCtxRef(cCtx), colType, clearData),
getMptFn: proc(cCtx: CoreDbCtxRef, col: CoreDbColRef): CoreDbRc[CoreDbMptRef] =
ctxGetMpt(AristoCoreDbCtxRef(cCtx), col),
getAccFn: proc(cCtx: CoreDbCtxRef, col: CoreDbColRef): CoreDbRc[CoreDbAccRef] =
ctxGetAcc(AristoCoreDbCtxRef(cCtx), col),
getAccountsFn: proc(cCtx: CoreDbCtxRef): CoreDbAccRef =
ctxGetAccounts(AristoCoreDbCtxRef(cCtx)),
forgetFn: proc(cCtx: CoreDbCtxRef) =
ctxForget(AristoCoreDbCtxRef(cCtx)))
@ -544,28 +411,6 @@ proc ctxMethods(cCtx: AristoCoreDbCtxRef): CoreDbCtxFns =
# Public handlers and helpers
# ------------------------------------------------------------------------------
func toError*(
e: (VertexID,AristoError);
base: AristoBaseRef;
info: string;
error = Unspecified;
): CoreDbErrorRef =
base.parent.bless(error, AristoCoreDbError(
ctx: info,
isAristo: true,
vid: e[0],
aErr: e[1]))
func toVoidRc*[T](
rc: Result[T,AristoError];
base: AristoBaseRef;
info: string;
error = Unspecified;
): CoreDbRc[void] =
if rc.isOk:
return ok()
err((VoidVID,rc.error).toError(base, info, error))
proc getSavedState*(base: AristoBaseRef): Result[SavedState,void] =
let be = base.ctx.mpt.backend
if not be.isNil:
@ -579,9 +424,15 @@ proc getSavedState*(base: AristoBaseRef): Result[SavedState,void] =
func to*(dsc: CoreDbMptRef, T: type AristoDbRef): T =
AristoCoreDbMptRef(dsc).base.ctx.mpt
func to*(dsc: CoreDbAccRef, T: type AristoDbRef): T =
AristoCoreDbAccRef(dsc).base.ctx.mpt
func to*(dsc: CoreDbMptRef, T: type AristoApiRef): T =
AristoCoreDbMptRef(dsc).base.api
func to*(dsc: CoreDbAccRef, T: type AristoApiRef): T =
AristoCoreDbAccRef(dsc).base.api
func rootID*(dsc: CoreDbMptRef): VertexID =
AristoCoreDbMptRef(dsc).mptRoot
@ -605,82 +456,6 @@ proc getLevel*(base: AristoBaseRef): int =
# ---------------------
proc colPrint*(
base: AristoBaseRef;
col: CoreDbColRef;
): string =
if col.isValid:
let
col = AristoColRef(col)
root = col.to(VertexID)
result = "(" & $col.colType & ","
# Do vertex ID and address/hash
if col.colType == CtStorage:
result &= col.stoRoot.toStr
if col.stoAddr != EthAddress.default:
result &= ",%" & $col.stoAddr.toHex
else:
result &= VertexID(col.colType).toStr
# Do the Merkle hash key
if not root.isValid:
result &= ",£ø"
else:
let rc = base.api.getKeyRc(col.base.ctx.mpt, root)
if rc.isErr:
result &= "," & $rc.error
elif rc.value.isValid:
result &= "" & rc.value.to(Hash256).data.toHex
else:
result &= ",£ø"
result &= ")"
elif not col.isNil:
result &= "$?"
proc rootHashEmpty*(
base: AristoBaseRef;
col: CoreDbColRef;
info: static[string];
): CoreDbRc[bool] =
let col = AristoColRef(col)
if not col.isValid:
return err(TrieInvalid.toError(base, info, HashNotAvailable))
let root = col.to(VertexID)
if not root.isValid:
return ok(true)
return ok(false)
proc rootHash*(
base: AristoBaseRef;
col: CoreDbColRef;
info: static[string];
): CoreDbRc[Hash256] =
let col = AristoColRef(col)
if not col.isValid:
return err(TrieInvalid.toError(base, info, HashNotAvailable))
let root = col.to(VertexID)
if not root.isValid:
return ok(EMPTY_ROOT_HASH)
let
api = base.api
mpt = base.ctx.mpt
? api.hashify(mpt).toVoidRc(base, info, HashNotAvailable)
let key = block:
let rc = api.getKeyRc(mpt, root)
if rc.isErr:
doAssert rc.error in {GetKeyNotFound, GetKeyUpdateNeeded}
return err(rc.error.toError(base, info, HashNotAvailable))
rc.value
ok key.to(Hash256)
proc swapCtx*(base: AristoBaseRef; ctx: CoreDbCtxRef): CoreDbCtxRef =
doAssert not ctx.isNil
result = base.ctx
@ -722,9 +497,9 @@ func init*(T: type AristoBaseRef; db: CoreDbRef; adb: AristoDbRef): T =
# Create initial context
let ctx = AristoCoreDbCtxRef(
base: result,
mpt: adb)
ctx.methods = ctx.ctxMethods
methods: ctxMethods(),
base: result,
mpt: adb)
result.ctx = db.bless ctx
when CoreDbEnableApiProfiling:
@ -764,9 +539,9 @@ proc init*(
# Create new context
let ctx = AristoCoreDbCtxRef(
base: base,
mpt: newMpt)
ctx.methods = ctx.ctxMethods
methods: ctxMethods(),
base: base,
mpt: newMpt)
ok(base.parent.bless ctx)
# ------------------------------------------------------------------------------

View File

@ -16,7 +16,7 @@ import
./base/[api_tracking, base_desc]
from ../aristo
import EmptyBlob, PayloadRef, isValid
import EmptyBlob, isValid
const
EnableApiTracking = false
@ -36,14 +36,12 @@ export
CoreDbApiError,
CoreDbCaptFlags,
CoreDbColType,
CoreDbColRef,
CoreDbCtxRef,
CoreDbErrorCode,
CoreDbErrorRef,
CoreDbFnInx,
CoreDbKvtBackendRef,
CoreDbMptBackendRef,
CoreDbPayloadRef,
CoreDbPersistentTypes,
CoreDbProfListRef,
CoreDbRef,
@ -52,8 +50,7 @@ export
CoreDbCaptRef,
CoreDbKvtRef,
CoreDbMptRef,
CoreDbTxRef,
PayloadRef
CoreDbTxRef
const
CoreDbEnableApiTracking* = EnableApiTracking
@ -88,7 +85,6 @@ when EnableApiTracking:
proc `$`(q: set[CoreDbCaptFlags]): string = q.toStr
proc `$`(t: Duration): string = t.toStr
proc `$`(e: EthAddress): string = e.toStr
proc `$`(v: CoreDbColRef): string = v.toStr
proc `$`(h: Hash256): string = h.toStr
template setTrackNewApi(
@ -127,14 +123,6 @@ proc bless*(db: CoreDbRef): CoreDbRef =
db.profTab = CoreDbProfListRef.init()
db
proc bless*(db: CoreDbRef; col: CoreDbColRef): CoreDbColRef =
## Complete sub-module descriptor, fill in `parent` and actvate it.
col.parent = db
col.ready = true
when AutoValidateDescriptors:
col.validate
col
proc bless*(db: CoreDbRef; kvt: CoreDbKvtRef): CoreDbKvtRef =
## Complete sub-module descriptor, fill in `parent`.
kvt.parent = db
@ -145,7 +133,7 @@ proc bless*(db: CoreDbRef; kvt: CoreDbKvtRef): CoreDbKvtRef =
proc bless*[T: CoreDbKvtRef |
CoreDbCtxRef | CoreDbMptRef | CoreDbAccRef |
CoreDbTxRef | CoreDbCaptRef |
CoreDbKvtBackendRef | CoreDbMptBackendRef](
CoreDbKvtBackendRef | CoreDbMptBackendRef | CoreDbAccBackendRef] (
db: CoreDbRef;
dsc: T;
): auto =
@ -171,10 +159,6 @@ proc prettyText*(e: CoreDbErrorRef): string =
## Pretty print argument object (for tracking use `$$()`)
if e.isNil: "" else: e.toStr()
proc prettyText*(col: CoreDbColRef): string =
## Pretty print argument object (for tracking use `$$()`)
if col.isNil or not col.ready: "" else: col.toStr()
# ------------------------------------------------------------------------------
# Public main descriptor methods
# ------------------------------------------------------------------------------
@ -194,7 +178,6 @@ proc dbType*(db: CoreDbRef): CoreDbType =
db.ifTrackNewApi: debug newApiTxt, api, elapsed, result
proc parent*[T: CoreDbKvtRef |
CoreDbColRef |
CoreDbCtxRef | CoreDbMptRef | CoreDbAccRef |
CoreDbTxRef |
CoreDbCaptRef |
@ -204,7 +187,7 @@ proc parent*[T: CoreDbKvtRef |
##
result = child.parent
proc backend*(dsc: CoreDbKvtRef): auto =
proc backend*(dsc: CoreDbKvtRef | CoreDbMptRef | CoreDbAccRef): auto =
## Getter, retrieves the *raw* backend object for special/localised support.
##
dsc.setTrackNewApi AnyBackendFn
@ -262,7 +245,7 @@ proc get*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[Blob] =
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
proc len*(kvt: CoreDbKvtRef; key: openArray[byte]): CoreDbRc[int] =
## This function always returns a non-empty `Blob` or an error code.
## This function returns the size of the value associated with `key`.
kvt.setTrackNewApi KvtLenFn
result = kvt.methods.lenFn key
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed, key=key.toStr, result
@ -311,19 +294,6 @@ proc ctx*(db: CoreDbRef): CoreDbCtxRef =
result = db.methods.newCtxFn()
db.ifTrackNewApi: debug newApiTxt, api, elapsed
proc ctxFromTx*(
db: CoreDbRef;
colState: Hash256;
colType = CtAccounts;
): CoreDbRc[CoreDbCtxRef] =
## Create new context derived from matching transaction of the currently
## active column context. For the legacy backend, this function always
## returns the currently active context (i.e. the same as `db.ctx()`.)
##
db.setTrackNewApi BaseNewCtxFromTxFn
result = db.methods.newCtxFromTxFn(colState, colType)
db.ifTrackNewApi: debug newApiTxt, api, elapsed, result
proc swapCtx*(db: CoreDbRef; ctx: CoreDbCtxRef): CoreDbCtxRef =
## Activate argument context `ctx` and return the previously active column
## context. This function goes typically together with `forget()`. A valid
@ -347,215 +317,19 @@ proc forget*(ctx: CoreDbCtxRef) =
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed
# ------------------------------------------------------------------------------
# Public Merkle Patricia Tree sub-trie abstaction management
# Public functions for generic columns
# ------------------------------------------------------------------------------
proc newColumn*(
proc getColumn*(
ctx: CoreDbCtxRef;
colType: CoreDbColType;
colState: Hash256;
address = Opt.none(EthAddress);
): CoreDbRc[CoreDbColRef] =
## Retrieve a new column descriptor.
##
## The database is can be viewed as a matrix of rows and columns, potenially
## with values at their intersection. A row is identified by a lookup key
## and a column is identified by a state hash.
##
## Additionally, any column has a column type attribute given as `colType`
## argument. Only storage columns also have an address attribute which must
## be passed as argument `address` when the `colType` argument is `CtStorage`.
##
## If the state hash argument `colState` is passed as `EMPTY_ROOT_HASH`, this
## function always succeeds. The result is the equivalent of a potential
## column be incarnated later. If the column type is different from
## `CtStorage` and `CtAccounts`, then the returned column descriptor will be
## flagged to reset all column data when incarnated as MPT (see `newMpt()`.).
##
## Otherwise, the function will fail unless a column with the corresponding
## argument `colState` identifier exists and can be found on the database.
## Note that on a single state database like `Aristo`, the requested column
## might exist but is buried in some history journal (which needs an extra
## effort to unwrap.)
##
## This function is intended to open a column on the database as in:
## ::
## proc openAccountLedger(db: CoreDbRef, colState: Hash256): CoreDbMptRef =
## let col = db.ctx.newColumn(CtAccounts, colState).valueOr:
## # some error handling
## return
## db.getAcc col
##
ctx.setTrackNewApi CtxNewColFn
result = ctx.methods.newColFn(ctx, colType, colState, address)
ctx.ifTrackNewApi:
debug newApiTxt, api, elapsed, colType, colState, address, result
proc newColumn*(
ctx: CoreDbCtxRef;
colState: Hash256;
address: EthAddress;
): CoreDbRc[CoreDbColRef] =
## Shortcut for `ctx.newColumn(CtStorage,colState,some(address))`.
##
ctx.setTrackNewApi CtxNewColFn
result = ctx.methods.newColFn(ctx, CtStorage, colState, Opt.some(address))
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, colState, address, result
proc newColumn*(
ctx: CoreDbCtxRef;
address: EthAddress;
): CoreDbColRef =
## Shortcut for `ctx.newColumn(EMPTY_ROOT_HASH,address).value`. The function
## will throw an exception on error. So the result will always be a valid
## descriptor.
##
ctx.setTrackNewApi CtxNewColFn
result = ctx.methods.newColFn(
ctx, CtStorage, EMPTY_ROOT_HASH, Opt.some(address)).valueOr:
raiseAssert error.prettyText()
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
proc `$$`*(col: CoreDbColRef): string =
## Pretty print the column descriptor. Note that this directive may have side
## effects as it calls a backend function.
##
#col.setTrackNewApi ColPrintFn
result = col.prettyText()
#col.ifTrackNewApi: debug newApiTxt, api, elapsed, result
proc stateEmpty*(col: CoreDbColRef): CoreDbRc[bool] =
## Getter (well, sort of). It retrieves the column state hash for the
## argument `col` descriptor. The function might fail unless the current
## state is available (e.g. on `Aristo`.)
##
## The value `EMPTY_ROOT_HASH` is returned on the void `col` descriptor
## argument `CoreDbColRef(nil)`.
##
col.setTrackNewApi BaseColStateEmptyFn
result = block:
if not col.isNil and col.ready:
col.parent.methods.colStateEmptyFn col
else:
ok true
# Note: tracker will be silent if `vid` is NIL
col.ifTrackNewApi: debug newApiTxt, api, elapsed, col, result
proc state*(col: CoreDbColRef): CoreDbRc[Hash256] =
## Getter (well, sort of). It retrieves the column state hash for the
## argument `col` descriptor. The function might fail unless the current
## state is available (e.g. on `Aristo`.)
##
## The value `EMPTY_ROOT_HASH` is returned on the void `col` descriptor
## argument `CoreDbColRef(nil)`.
##
col.setTrackNewApi BaseColStateFn
result = block:
if not col.isNil and col.ready:
col.parent.methods.colStateFn col
else:
ok EMPTY_ROOT_HASH
# Note: tracker will be silent if `vid` is NIL
col.ifTrackNewApi: debug newApiTxt, api, elapsed, col, result
proc stateEmptyOrVoid*(col: CoreDbColRef): bool =
## Convenience wrapper, returns `true` where `stateEmpty()` would fail.
col.stateEmpty.valueOr: true
# ------------------------------------------------------------------------------
# Public Merkle Patricia Tree, hexary trie constructors
# ------------------------------------------------------------------------------
proc getMpt*(
ctx: CoreDbCtxRef;
col: CoreDbColRef;
): CoreDbRc[CoreDbMptRef] =
## Get an MPT sub-trie view.
##
## If the `col` argument descriptor was created for an `EMPTY_ROOT_HASH`
## column state of type different form `CtStorage` or `CtAccounts`, all
## column will be flushed. There is no need to hold the `col` argument for
## later use. It can always be rerieved for this particular MPT using the
## function `getColumn()`.
##
ctx.setTrackNewApi CtxGetMptFn
result = ctx.methods.getMptFn(ctx, col)
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, col, result
proc getMpt*(
ctx: CoreDbCtxRef;
colType: CoreDbColType;
address = Opt.none(EthAddress);
clearData = false;
): CoreDbMptRef =
## Shortcut for `getMpt(col)` where the `col` argument is
## `db.getColumn(colType,EMPTY_ROOT_HASH).value`. This function will always
## return a non-nil descriptor or throw an exception.
## ...
##
ctx.setTrackNewApi CtxGetMptFn
let col = ctx.methods.newColFn(ctx, colType, EMPTY_ROOT_HASH, address).value
result = ctx.methods.getMptFn(ctx, col).valueOr:
raiseAssert error.prettyText()
ctx.ifTrackNewApi: debug newApiTxt, api, colType, elapsed
proc getMpt*(acc: CoreDbAccRef): CoreDbMptRef =
## Variant of `getMpt()`, will defect on failure.
##
## The needed sub-trie information is taken/implied from the current `acc`
## argument.
##
acc.setTrackNewApi AccToMptFn
result = acc.methods.getMptFn(acc).valueOr:
raiseAssert error.prettyText()
acc.ifTrackNewApi:
let colState = result.methods.getColFn()
debug newApiTxt, api, elapsed, colState
proc getAcc*(
ctx: CoreDbCtxRef;
col: CoreDbColRef;
): CoreDbRc[CoreDbAccRef] =
## Accounts trie constructor, will defect on failure.
##
## Example:
## ::
## let col = db.getColumn(CtAccounts,<some-hash>).valueOr:
## ... # No node available with <some-hash>
## return
##
## let acc = db.getAccMpt(col)
## ... # Was not the state root for the accounts column
## return
##
## This function works similar to `getMpt()` for handling accounts.
##
ctx.setTrackNewApi CtxGetAccFn
result = ctx.methods.getAccFn(ctx, col)
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, col, result
# ------------------------------------------------------------------------------
# Public common methods for all hexary trie databases (`mpt`, or `acc`)
# ------------------------------------------------------------------------------
proc getColumn*(acc: CoreDbAccRef): CoreDbColRef =
## Getter, result is not `nil`
##
acc.setTrackNewApi AccGetColFn
result = acc.methods.getColFn(acc)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, result
proc getColumn*(mpt: CoreDbMptRef): CoreDbColRef =
## Variant of `getColumn()`
##
mpt.setTrackNewApi MptGetColFn
result = mpt.methods.getColFn(mpt)
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, result
# ------------------------------------------------------------------------------
# Public generic hexary trie database methods
# ------------------------------------------------------------------------------
ctx.setTrackNewApi CtxGetColumnFn
result = ctx.methods.getColumnFn(ctx, colType, clearData)
ctx.ifTrackNewApi: debug newApiTxt, api, colType, clearData, elapsed
proc fetch*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[Blob] =
## Fetch data from the argument `mpt`. The function always returns a
@ -607,61 +381,155 @@ proc hasPath*(mpt: CoreDbMptRef; key: openArray[byte]): CoreDbRc[bool] =
let col = mpt.methods.getColFn(mpt)
debug newApiTxt, api, elapsed, col, key=key.toStr, result
proc state*(mpt: CoreDbMptRef; updateOk = false): CoreDbRc[Hash256] =
## This function retrieves the Merkle state hash of the argument
## database column (if acvailable.)
##
## If the argument `updateOk` is set `true`, the Merkle hashes of the
## database will be updated first (if needed, at all).
##
mpt.setTrackNewApi MptStateFn
result = mpt.methods.stateFn(mpt, updateOk)
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed, updateOK, result
# ------------------------------------------------------------------------------
# Public trie database methods for accounts
# Public methods for accounts
# ------------------------------------------------------------------------------
proc fetch*(acc: CoreDbAccRef; address: EthAddress): CoreDbRc[CoreDbAccount] =
## Fetch data from the argument `acc`.
proc getAccounts*(ctx: CoreDbCtxRef): CoreDbAccRef =
## Accounts column constructor, will defect on failure.
##
ctx.setTrackNewApi CtxGetAccountsFn
result = ctx.methods.getAccountsFn(ctx)
ctx.ifTrackNewApi: debug newApiTxt, api, elapsed, col, result
# ----------- accounts ---------------
proc fetch*(acc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[CoreDbAccount] =
## Fetch the account data record for the particular account indexed by
## the address `eAddr`.
##
acc.setTrackNewApi AccFetchFn
result = acc.methods.fetchFn(acc, address)
acc.ifTrackNewApi:
let storage = if result.isErr: "n/a" else: result.value.storage.prettyText()
debug newApiTxt, api, elapsed, address, storage, result
result = acc.methods.fetchFn(acc, eAddr)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
proc delete*(acc: CoreDbAccRef; address: EthAddress): CoreDbRc[void] =
proc delete*(acc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[void] =
## Delete the particular account indexed by the address `eAddr`. This
## will also destroy an associated storage area.
##
acc.setTrackNewApi AccDeleteFn
result = acc.methods.deleteFn(acc, address)
result = acc.methods.deleteFn(acc, eAddr)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
proc stoDelete*(acc: CoreDbAccRef; address: EthAddress): CoreDbRc[void] =
## Recursively delete all data elements from the storage trie associated to
## the account identified by the argument `address`. After successful run,
## the storage trie will be empty.
proc clearStorage*(acc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[void] =
## Delete all data slots from the storage area associated with the
## particular account indexed by the address `eAddr`.
##
## Caveat:
## This function has no effect on the legacy backend so it must not be
## relied upon in general. On the legacy backend, storage tries might be
## shared by several accounts whereas they are unique on the `Aristo`
## backend.
acc.setTrackNewApi AccClearStorageFn
result = acc.methods.clearStorageFn(acc, eAddr)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
proc merge*(acc: CoreDbAccRef; account: CoreDbAccount): CoreDbRc[void] =
## Add or update the argument account data record `account`. Note that the
## `account` argument uniquely idendifies the particular account address.
##
acc.setTrackNewApi AccStoDeleteFn
result = acc.methods.stoDeleteFn(acc, address)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
proc merge*(
acc: CoreDbAccRef;
account: CoreDbAccount;
): CoreDbRc[void] =
acc.setTrackNewApi AccMergeFn
result = acc.methods.mergeFn(acc, account)
acc.ifTrackNewApi:
let address = account.address
debug newApiTxt, api, elapsed, address, result
let eAddr = account.address
debug newApiTxt, api, elapsed, eAddr, result
proc hasPath*(acc: CoreDbAccRef; address: EthAddress): CoreDbRc[bool] =
proc hasPath*(acc: CoreDbAccRef; eAddr: EthAddress): CoreDbRc[bool] =
## Would be named `contains` if it returned `bool` rather than `Result[]`.
##
acc.setTrackNewApi AccHasPathFn
result = acc.methods.hasPathFn(acc, address)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, address, result
result = acc.methods.hasPathFn(acc, eAddr)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
proc state*(acc: CoreDbAccRef; updateOk = false): CoreDbRc[Hash256] =
## This function retrieves the Merkle state hash of the accounts
## column (if acvailable.)
##
## If the argument `updateOk` is set `true`, the Merkle hashes of the
## database will be updated first (if needed, at all).
##
acc.setTrackNewApi AccStateFn
result = acc.methods.stateFn(acc, updateOk)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, updateOK, result
proc recast*(statement: CoreDbAccount): CoreDbRc[Account] =
# ------------ storage ---------------
proc slotFetch*(
acc: CoreDbAccRef;
eAddr: EthAddress;
slot: openArray[byte];
): CoreDbRc[Blob] =
acc.setTrackNewApi AccSlotFetchFn
result = acc.methods.slotFetchFn(acc, eAddr, slot)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
proc slotDelete*(
acc: CoreDbAccRef;
eAddr: EthAddress;
slot: openArray[byte];
): CoreDbRc[void] =
acc.setTrackNewApi AccSlotDeleteFn
result = acc.methods.slotDeleteFn(acc, eAddr, slot)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
proc slotHasPath*(
acc: CoreDbAccRef;
eAddr: EthAddress;
slot: openArray[byte];
): CoreDbRc[bool] =
acc.setTrackNewApi AccSlotHasPathFn
result = acc.methods.slotHasPathFn(acc, eAddr, slot)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
proc slotMerge*(
acc: CoreDbAccRef;
eAddr: EthAddress;
slot: openArray[byte];
data: openArray[byte];
): CoreDbRc[void] =
acc.setTrackNewApi AccSlotMergeFn
result = acc.methods.slotMergeFn(acc, eAddr, slot, data)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, result
proc slotState*(
acc: CoreDbAccRef;
eAddr: EthAddress;
updateOk = false;
): CoreDbRc[Hash256] =
acc.setTrackNewApi AccSlotStateFn
result = acc.methods.slotStateFn(acc, eAddr, updateOk)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, updateOk, result
proc slotStateEmpty*(
acc: CoreDbAccRef;
eAddr: EthAddress;
): CoreDbRc[bool] =
## ...
acc.setTrackNewApi AccSlotStateEmptyFn
result = acc.methods.slotStateEmptyFn(acc, eAddr)
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, updateOk, result
proc slotStateEmptyOrVoid*(
acc: CoreDbAccRef;
eAddr: EthAddress;
): bool =
## Convenience wrapper, returns `true` where `slotStateEmpty()` would fail.
acc.setTrackNewApi AccSlotStateEmptyOrVoidFn
result = acc.methods.slotStateEmptyFn(acc, eAddr).valueOr: true
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, eAddr, updateOk, result
# ------------- other ----------------
proc recast*(
acc: CoreDbAccRef;
statement: CoreDbAccount;
updateOk = false;
): CoreDbRc[Account] =
## Convert the argument `statement` to the portable Ethereum representation
## of an account statement. This conversion may fail if the storage colState
## hash (see `hash()` above) is currently unavailable.
@ -669,11 +537,8 @@ proc recast*(statement: CoreDbAccount): CoreDbRc[Account] =
## Note:
## With the legacy backend, this function always succeeds.
##
let storage = statement.storage
storage.setTrackNewApi EthAccRecastFn
let rc =
if storage.isNil or not storage.ready: CoreDbRc[Hash256].ok(EMPTY_ROOT_HASH)
else: storage.parent.methods.colStateFn storage
acc.setTrackNewApi EthAccRecastFn
let rc = acc.methods.slotStateFn(acc, statement.address, updateOk)
result =
if rc.isOk:
ok Account(
@ -683,7 +548,7 @@ proc recast*(statement: CoreDbAccount): CoreDbRc[Account] =
storageRoot: rc.value)
else:
err(rc.error)
storage.ifTrackNewApi: debug newApiTxt, api, elapsed, storage, result
acc.ifTrackNewApi: debug newApiTxt, api, elapsed, storage, result
# ------------------------------------------------------------------------------
# Public transaction related methods

View File

@ -20,7 +20,7 @@ import
type
CoreDbApiTrackRef* =
CoreDbRef | CoreDbKvtRef | CoreDbColRef |
CoreDbRef | CoreDbKvtRef |
CoreDbCtxRef | CoreDbMptRef | CoreDbAccRef |
CoreDbTxRef | CoreDbCaptRef | CoreDbErrorRef
@ -31,18 +31,22 @@ type
AccDeleteFn = "acc/delete"
AccFetchFn = "acc/fetch"
AccForgetFn = "acc/forget"
AccGetColFn = "acc/getColumn"
AccHasPathFn = "acc/hasPath"
AccMergeFn = "acc/merge"
AccGetMptFn = "acc/getMpt"
AccStoDeleteFn = "acc/stoDelete"
AccToMptFn = "acc/toMpt"
AccStateFn = "acc/state"
AccClearStorageFn = "acc/clearStorage"
AccSlotFetchFn = "slotFetch"
AccSlotDeleteFn = "slotDelete"
AccSlotHasPathFn = "slotHasPath"
AccSlotMergeFn = "slotMerge"
AccSlotStateFn = "slotState"
AccSlotStateEmptyFn = "slotStateEmpty"
AccSlotStateEmptyOrVoidFn = "slotStateEmptyOrVoid"
AccSlotPairsIt = "slotPairs"
AnyBackendFn = "any/backend"
BaseColPrintFn = "$$"
BaseColStateEmptyFn = "stateEmpty"
BaseColStateFn = "state"
BaseDbTypeFn = "dbType"
BaseFinishFn = "finish"
BaseLevelFn = "level"
@ -60,31 +64,31 @@ type
CptForgetFn = "cpt/forget"
CtxForgetFn = "ctx/forget"
CtxGetAccFn = "ctx/getAcc"
CtxGetMptFn = "ctx/getMpt"
CtxGetAccountsFn = "getAccounts"
CtxGetColumnFn = "getColumn"
CtxNewColFn = "ctx/newColumn"
ErrorPrintFn = "$$"
EthAccRecastFn = "recast"
KvtDelFn = "kvt/del"
KvtForgetFn = "kvt/forget"
KvtGetFn = "kvt/get"
KvtLenFn = "kvt/len"
KvtGetOrEmptyFn = "kvt/getOrEmpty"
KvtHasKeyFn = "kvt/hasKey"
KvtPairsIt = "kvt/pairs"
KvtPutFn = "kvt/put"
KvtDelFn = "del"
KvtForgetFn = "forget"
KvtGetFn = "get"
KvtGetOrEmptyFn = "getOrEmpty"
KvtHasKeyFn = "hasKey"
KvtLenFn = "len"
KvtPairsIt = "pairs"
KvtPutFn = "put"
MptDeleteFn = "mpt/delete"
MptFetchFn = "mpt/fetch"
MptFetchOrEmptyFn = "mpt/fetchOrEmpty"
MptForgetFn = "mpt/forget"
MptGetColFn = "mpt/getColumn"
MptHasPathFn = "mpt/hasPath"
MptMergeFn = "mpt/merge"
MptPairsIt = "mpt/pairs"
MptReplicateIt = "mpt/replicate"
MptStateFn = "mpt/state"
TxCommitFn = "commit"
TxDisposeFn = "dispose"
@ -109,12 +113,6 @@ func toStr*(w: Hash256): string =
proc toStr*(e: CoreDbErrorRef): string =
$e.error & "(" & e.parent.methods.errorPrintFn(e) & ")"
proc toStr*(p: CoreDbColRef): string =
let
w = if p.isNil or not p.ready: "nil" else: p.parent.methods.colPrintFn(p)
(a,b) = if 0 < w.len and w[0] == '(': ("","") else: ("(",")")
"Col" & a & w & b
func toLenStr*(w: openArray[byte]): string =
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
else: "openArray[" & $w.len & "]"
@ -142,9 +140,6 @@ proc toStr*(rc: CoreDbRc[Blob]): string =
proc toStr*(rc: CoreDbRc[Hash256]): string =
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")"
proc toStr*(rc: CoreDbRc[CoreDbColRef]): string =
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")"
proc toStr*(rc: CoreDbRc[set[CoreDbCaptFlags]]): string =
if rc.isOk: "ok(" & rc.value.toStr & ")" else: "err(" & rc.error.toStr & ")"

View File

@ -15,9 +15,6 @@ import
eth/common,
../../aristo/aristo_profile
from ../../aristo
import PayloadRef
# Annotation helpers
{.pragma: noRaise, gcsafe, raises: [].}
{.pragma: apiRaise, gcsafe, raises: [CoreDbApiError].}
@ -46,13 +43,8 @@ type
address*: EthAddress ## Reverse reference for storage trie path
nonce*: AccountNonce ## Some `uint64` type
balance*: UInt256
storage*: CoreDbColRef ## Implies storage root MPT (aka column)
codeHash*: Hash256
CoreDbPayloadRef* = ref object of PayloadRef
## Extension of `Aristo` payload used in the tracer
blob*: Blob ## Serialised version for accounts data
CoreDbErrorCode* = enum
Unset = 0
Unspecified
@ -71,13 +63,12 @@ type
RlpException
RootNotFound
RootUnacceptable
StoNotFound
StorageFailed
TxPending
CoreDbColType* = enum
CtStorage = 0
CtAccounts
CtGeneric
CtGeneric = 2 # columns smaller than 2 are not provided
CtReceipts
CtTxs
CtWithdrawals
@ -90,11 +81,6 @@ type
# Sub-descriptor: Misc methods for main descriptor
# --------------------------------------------------
CoreDbBaseDestroyFn* = proc(eradicate = true) {.noRaise.}
CoreDbBaseColStateFn* = proc(
col: CoreDbColRef): CoreDbRc[Hash256] {.noRaise.}
CoreDbBaseColStateEmptyFn* = proc(
col: CoreDbColRef): CoreDbRc[bool] {.noRaise.}
CoreDbBaseColPrintFn* = proc(vid: CoreDbColRef): string {.noRaise.}
CoreDbBaseErrorPrintFn* = proc(e: CoreDbErrorRef): string {.noRaise.}
CoreDbBaseLevelFn* = proc(): int {.noRaise.}
CoreDbBaseNewKvtFn* = proc(): CoreDbRc[CoreDbKvtRef] {.noRaise.}
@ -111,9 +97,6 @@ type
CoreDbBaseFns* = object
destroyFn*: CoreDbBaseDestroyFn
colStateFn*: CoreDbBaseColStateFn
colStateEmptyFn*: CoreDbBaseColStateEmptyFn
colPrintFn*: CoreDbBaseColPrintFn
errorPrintFn*: CoreDbBaseErrorPrintFn
levelFn*: CoreDbBaseLevelFn
@ -160,24 +143,19 @@ type
# --------------------------------------------------
# Sub-descriptor: MPT context methods
# --------------------------------------------------
CoreDbCtxNewColFn* = proc(
cCtx: CoreDbCtxRef; colType: CoreDbColType; colState: Hash256; address: Opt[EthAddress];
): CoreDbRc[CoreDbColRef] {.noRaise.}
CoreDbCtxGetMptFn* = proc(
cCtx: CoreDbCtxRef; root: CoreDbColRef): CoreDbRc[CoreDbMptRef] {.noRaise.}
CoreDbCtxGetAccFn* = proc(
cCtx: CoreDbCtxRef; root: CoreDbColRef): CoreDbRc[CoreDbAccRef] {.noRaise.}
CoreDbCtxGetColumnFn* = proc(
cCtx: CoreDbCtxRef; colType: CoreDbColType; clearData: bool): CoreDbMptRef {.noRaise.}
CoreDbCtxGetAccountsFn* = proc(cCtx: CoreDbCtxRef): CoreDbAccRef {.noRaise.}
CoreDbCtxForgetFn* = proc(cCtx: CoreDbCtxRef) {.noRaise.}
CoreDbCtxFns* = object
## Methods for context maniulation
newColFn*: CoreDbCtxNewColFn
getMptFn*: CoreDbCtxGetMptFn
getAccFn*: CoreDbCtxGetAccFn
forgetFn*: CoreDbCtxForgetFn
getColumnFn*: CoreDbCtxGetColumnFn
getAccountsFn*: CoreDbCtxGetAccountsFn
forgetFn*: CoreDbCtxForgetFn
# --------------------------------------------------
# Sub-descriptor: generic Mpt/hexary trie methods
# Sub-descriptor: generic Mpt methods
# --------------------------------------------------
CoreDbMptBackendFn* = proc(cMpt: CoreDbMptRef): CoreDbMptBackendRef {.noRaise.}
CoreDbMptFetchFn* =
@ -188,11 +166,8 @@ type
proc(cMpt: CoreDbMptRef, k: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbMptMergeFn* =
proc(cMpt: CoreDbMptRef, k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbMptMergeAccountFn* =
proc(cMpt: CoreDbMptRef, k: openArray[byte]; v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
CoreDbMptHasPathFn* = proc(cMpt: CoreDbMptRef, k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
CoreDbMptGetColFn* = proc(cMpt: CoreDbMptRef): CoreDbColRef {.noRaise.}
CoreDbMptForgetFn* = proc(cMpt: CoreDbMptRef): CoreDbRc[void] {.noRaise.}
CoreDbMptStateFn* = proc(cMpt: CoreDbMptRef, updateOk: bool): CoreDbRc[Hash256] {.noRaise.}
CoreDbMptFns* = object
## Methods for trie objects
@ -201,30 +176,49 @@ type
deleteFn*: CoreDbMptDeleteFn
mergeFn*: CoreDbMptMergeFn
hasPathFn*: CoreDbMptHasPathFn
getColFn*: CoreDbMptGetColFn
stateFn*: CoreDbMptStateFn
# ----------------------------------------------------
# Sub-descriptor: Mpt/hexary trie methods for accounts
# Sub-descriptor: Account column methods
# ------------------------------------------------------
CoreDbAccGetMptFn* = proc(cAcc: CoreDbAccRef): CoreDbRc[CoreDbMptRef] {.noRaise.}
CoreDbAccBackendFn* = proc(cAcc: CoreDbAccRef): CoreDbAccBackendRef {.noRaise.}
CoreDbAccFetchFn* = proc(cAcc: CoreDbAccRef, k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.}
CoreDbAccDeleteFn* = proc(cAcc: CoreDbAccRef, k: EthAddress): CoreDbRc[void] {.noRaise.}
CoreDbAccStoDeleteFn* = proc(cAcc: CoreDbAccRef,k: EthAddress): CoreDbRc[void] {.noRaise.}
CoreDbAccClearStorageFn* = proc(cAcc: CoreDbAccRef,k: EthAddress): CoreDbRc[void] {.noRaise.}
CoreDbAccMergeFn* = proc(cAcc: CoreDbAccRef, v: CoreDbAccount): CoreDbRc[void] {.noRaise.}
CoreDbAccHasPathFn* = proc(cAcc: CoreDbAccRef, k: EthAddress): CoreDbRc[bool] {.noRaise.}
CoreDbAccGetColFn* = proc(cAcc: CoreDbAccRef): CoreDbColRef {.noRaise.}
CoreDbAccStateFn* = proc(cAcc: CoreDbAccRef, updateOk: bool): CoreDbRc[Hash256] {.noRaise.}
CoreDbSlotFetchFn* =
proc(cAcc: CoreDbAccRef, a: EthAddress; k: openArray[byte]): CoreDbRc[Blob] {.noRaise.}
CoreDbSlotDeleteFn* =
proc(cAcc: CoreDbAccRef,a: EthAddress; k: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbSlotHasPathFn* =
proc(cAcc: CoreDbAccRef, a: EthAddress; k: openArray[byte]): CoreDbRc[bool] {.noRaise.}
CoreDbSlotMergeFn* =
proc(cAcc: CoreDbAccRef, a: EthAddress; k, v: openArray[byte]): CoreDbRc[void] {.noRaise.}
CoreDbSlotStateFn* =
proc(cAcc: CoreDbAccRef, a: EthAddress; updateOk: bool): CoreDbRc[Hash256] {.noRaise.}
CoreDbSlotStateEmptyFn* =
proc(cAcc: CoreDbAccRef, a: EthAddress): CoreDbRc[bool] {.noRaise.}
CoreDbAccFns* = object
## Methods for trie objects
getMptFn*: CoreDbAccGetMptFn
fetchFn*: CoreDbAccFetchFn
deleteFn*: CoreDbAccDeleteFn
stoDeleteFn*: CoreDbAccStoDeleteFn
mergeFn*: CoreDbAccMergeFn
hasPathFn*: CoreDbAccHasPathFn
getColFn*: CoreDbAccGetColFn
backendFn*: CoreDbAccBackendFn
fetchFn*: CoreDbAccFetchFn
clearStorageFn*: CoreDbAccClearStorageFn
deleteFn*: CoreDbAccDeleteFn
hasPathFn*: CoreDbAccHasPathFn
mergeFn*: CoreDbAccMergeFn
stateFn*: CoreDbAccStateFn
slotFetchFn*: CoreDbSlotFetchFn
slotDeleteFn*: CoreDbSlotDeleteFn
slotHasPathFn*: CoreDbSlotHasPathFn
slotMergeFn*: CoreDbSlotMergeFn
slotStateFn*: CoreDbSlotStateFn
slotStateEmptyFn*: CoreDbSlotStateEmptyFn
# --------------------------------------------------
# Sub-descriptor: Transaction frame management
@ -282,6 +276,10 @@ type
## Backend wrapper for direct backend access
parent*: CoreDbRef
CoreDbAccBackendRef* = ref object of RootRef
## Backend wrapper for direct backend access
parent*: CoreDbRef
CoreDbKvtRef* = ref object of RootRef
## Statically initialised Key-Value pair table living in `CoreDbRef`
parent*: CoreDbRef
@ -304,12 +302,6 @@ type
parent*: CoreDbRef
methods*: CoreDbAccFns
CoreDbColRef* = ref object of RootRef
## Generic state root: `Hash256` for legacy, `VertexID` for Aristo. This
## object makes only sense in the context of an *MPT*.
parent*: CoreDbRef
ready*: bool ## Must be set `true` to enable
CoreDbTxRef* = ref object of RootRef
## Transaction descriptor derived from `CoreDbRef`
parent*: CoreDbRef

View File

@ -13,7 +13,8 @@ import
./base_desc
type
EphemMethodsDesc = CoreDbKvtBackendRef | CoreDbMptBackendRef | CoreDbColRef
EphemMethodsDesc =
CoreDbKvtBackendRef | CoreDbMptBackendRef | CoreDbAccBackendRef
MethodsDesc =
CoreDbKvtRef |
@ -29,8 +30,6 @@ type
proc validateMethodsDesc(base: CoreDbBaseFns) =
doAssert not base.destroyFn.isNil
doAssert not base.colStateFn.isNil
doAssert not base.colPrintFn.isNil
doAssert not base.errorPrintFn.isNil
doAssert not base.levelFn.isNil
doAssert not base.newKvtFn.isNil
@ -51,9 +50,8 @@ proc validateMethodsDesc(kvt: CoreDbKvtFns) =
doAssert not kvt.forgetFn.isNil
proc validateMethodsDesc(ctx: CoreDbCtxFns) =
doAssert not ctx.newColFn.isNil
doAssert not ctx.getMptFn.isNil
doAssert not ctx.getAccFn.isNil
doAssert not ctx.getAccountsFn.isNil
doAssert not ctx.getColumnFn.isNil
doAssert not ctx.forgetFn.isNil
proc validateMethodsDesc(fns: CoreDbMptFns) =
@ -62,24 +60,26 @@ proc validateMethodsDesc(fns: CoreDbMptFns) =
doAssert not fns.deleteFn.isNil
doAssert not fns.mergeFn.isNil
doAssert not fns.hasPathFn.isNil
doAssert not fns.getColFn.isNil
doAssert not fns.stateFn.isNil
proc validateMethodsDesc(fns: CoreDbAccFns) =
doAssert not fns.getMptFn.isNil
doAssert not fns.backendFn.isNil
doAssert not fns.fetchFn.isNil
doAssert not fns.clearStorageFn.isNil
doAssert not fns.deleteFn.isNil
doAssert not fns.stoDeleteFn.isNil
doAssert not fns.mergeFn.isNil
doAssert not fns.hasPathFn.isNil
doAssert not fns.getColFn.isNil
doAssert not fns.mergeFn.isNil
doAssert not fns.stateFn.isNil
doAssert not fns.slotFetchFn.isNil
doAssert not fns.slotDeleteFn.isNil
doAssert not fns.slotHasPathFn.isNil
doAssert not fns.slotMergeFn.isNil
doAssert not fns.slotStateFn.isNil
doAssert not fns.slotStateEmptyFn.isNil
# ------------
proc validateMethodsDesc(col: CoreDbColRef) =
doAssert not col.isNil
doAssert not col.parent.isNil
doAssert col.ready == true
proc validateMethodsDesc(e: CoreDbErrorRef) =
doAssert e.error != CoreDbErrorCode(0)
doAssert not e.isNil

View File

@ -42,7 +42,7 @@ iterator pairs*(kvt: CoreDbKvtRef): (Blob, Blob) {.apiRaise.} =
of AristoDbVoid:
for k,v in kvt.aristoKvtPairsVoid():
yield (k,v)
else:
of Ooops, AristoDbRocks:
raiseAssert: "Unsupported database type: " & $kvt.parent.dbType
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed
@ -54,14 +54,24 @@ iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) =
of AristoDbMemory, AristoDbRocks, AristoDbVoid:
for k,v in mpt.aristoMptPairs():
yield (k,v)
else:
of Ooops:
raiseAssert: "Unsupported database type: " & $mpt.parent.dbType
mpt.ifTrackNewApi:
let trie = mpt.methods.getColFn()
debug newApiTxt, api, elapsed, trie
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed
iterator slotPairs*(acc: CoreDbAccRef; eAddr: EthAddress): (Blob, Blob) =
## Trie traversal, only supported for `CoreDbMptRef`
##
acc.setTrackNewApi AccSlotPairsIt
case acc.parent.dbType:
of AristoDbMemory, AristoDbRocks, AristoDbVoid:
for k,v in acc.aristoSlotPairs(eAddr):
yield (k,v)
of Ooops:
raiseAssert: "Unsupported database type: " & $acc.parent.dbType
acc.ifTrackNewApi: debug newApiTxt, api, elapsed
iterator replicate*(mpt: CoreDbMptRef): (Blob, Blob) {.apiRaise.} =
## Low level trie dump, only supported for `CoreDbMptRef`
## Low level trie dump, only supported for non persistent `CoreDbMptRef`
##
mpt.setTrackNewApi MptReplicateIt
case mpt.parent.dbType:
@ -71,11 +81,9 @@ iterator replicate*(mpt: CoreDbMptRef): (Blob, Blob) {.apiRaise.} =
of AristoDbVoid:
for k,v in aristoReplicateVoid(mpt):
yield (k,v)
else:
of Ooops, AristoDbRocks:
raiseAssert: "Unsupported database type: " & $mpt.parent.dbType
mpt.ifTrackNewApi:
let trie = mpt.methods.getColFn()
debug newApiTxt, api, elapsed, trie
mpt.ifTrackNewApi: debug newApiTxt, api, elapsed
# ------------------------------------------------------------------------------
# End

View File

@ -116,26 +116,23 @@ iterator getBlockTransactionData*(
db: CoreDbRef;
transactionRoot: Hash256;
): Blob =
const info = "getBlockTransactionData()"
block body:
if transactionRoot == EMPTY_ROOT_HASH:
break body
let
ctx = db.ctx
col = ctx.newColumn(CtTxs, transactionRoot).valueOr:
warn logTxt "getBlockTransactionData()",
transactionRoot, action="newColumn()", `error`=($$error)
break body
transactionDb = ctx.getMpt(col).valueOr:
warn logTxt "getBlockTransactionData()", transactionRoot,
action="newMpt()", col=($$col), error=($$error)
break body
transactionDb = db.ctx.getColumn CtTxs
state = transactionDb.state(updateOk=true).valueOr:
raiseAssert info & ": " & $$error
if state != transactionRoot:
warn logTxt info, transactionRoot, state, error="state mismatch"
break body
var transactionIdx = 0'u64
while true:
let transactionKey = rlp.encode(transactionIdx)
let data = transactionDb.fetch(transactionKey).valueOr:
if error.error != MptNotFound:
warn logTxt "getBlockTransactionData()", transactionRoot,
warn logTxt info, transactionRoot,
transactionKey, action="fetch()", error=($$error)
break body
yield data
@ -165,20 +162,17 @@ iterator getWithdrawalsData*(
db: CoreDbRef;
withdrawalsRoot: Hash256;
): Blob =
const info = "getWithdrawalsData()"
block body:
if withdrawalsRoot == EMPTY_ROOT_HASH:
break body
let
ctx = db.ctx
col = ctx.newColumn(CtWithdrawals, withdrawalsRoot).valueOr:
warn logTxt "getWithdrawalsData()",
withdrawalsRoot, action="newColumn()", error=($$error)
break body
wddb = ctx.getMpt(col).valueOr:
warn logTxt "getWithdrawalsData()",
withdrawalsRoot, action="newMpt()", col=($$col), error=($$error)
break body
wddb = db.ctx.getColumn CtWithdrawals
state = wddb.state(updateOk=true).valueOr:
raiseAssert info & ": " & $$error
if state != withdrawalsRoot:
warn logTxt info, withdrawalsRoot, state, error="state mismatch"
break body
var idx = 0
while true:
let wdKey = rlp.encode(idx.uint)
@ -196,20 +190,17 @@ iterator getReceipts*(
receiptsRoot: Hash256;
): Receipt
{.gcsafe, raises: [RlpError].} =
const info = "getReceipts()"
block body:
if receiptsRoot == EMPTY_ROOT_HASH:
break body
let
ctx = db.ctx
col = ctx.newColumn(CtReceipts, receiptsRoot).valueOr:
warn logTxt "getWithdrawalsData()",
receiptsRoot, action="newColumn()", error=($$error)
break body
receiptDb = ctx.getMpt(col).valueOr:
warn logTxt "getWithdrawalsData()",
receiptsRoot, action="getMpt()", col=($$col), error=($$error)
break body
receiptDb = db.ctx.getColumn CtReceipts
state = receiptDb.state(updateOk=true).valueOr:
raiseAssert info & ": " & $$error
if state != receiptsRoot:
warn logTxt info, receiptsRoot, state, error="state mismatch"
break body
var receiptIdx = 0
while true:
let receiptKey = rlp.encode(receiptIdx.uint)
@ -347,15 +338,16 @@ proc getSavedStateBlockNumber*(
## the `relax` argument can be set `true` so this function also returns
## zero if the state consistency check fails.
##
const info = "getSavedStateBlockNumber(): "
var header: BlockHeader
let st = db.ctx.getMpt(CtGeneric).backend.toAristoSavedStateBlockNumber()
let st = db.ctx.getColumn(CtGeneric).backend.toAristoSavedStateBlockNumber()
if db.getBlockHeader(st.blockNumber, header):
discard db.ctx.newColumn(CtAccounts,header.stateRoot).valueOr:
if relax:
return
raiseAssert "getSavedStateBlockNumber(): state mismatch at " &
"#" & $st.blockNumber
return st.blockNumber
let state = db.ctx.getAccounts.state.valueOr:
raiseAssert info & $$error
if state == header.stateRoot:
return st.blockNumber
if not relax:
raiseAssert info & ": state mismatch at " & "#" & $st.blockNumber
proc getBlockHeader*(
db: CoreDbRef;
@ -548,7 +540,7 @@ proc persistTransactions*(
return
let
mpt = db.ctx.getMpt(CtTxs)
mpt = db.ctx.getColumn(CtTxs, clearData=true)
kvt = db.newKvt()
for idx, tx in transactions:
@ -592,23 +584,23 @@ proc getTransaction*(
const
info = "getTransaction()"
let
ctx = db.ctx
col = ctx.newColumn(CtTxs, txRoot).valueOr:
warn logTxt info, txRoot, action="newColumn()", error=($$error)
return false
mpt = ctx.getMpt(col).valueOr:
warn logTxt info,
txRoot, action="newMpt()", col=($$col), error=($$error)
clearOk = txRoot == EMPTY_ROOT_HASH
mpt = db.ctx.getColumn(CtTxs, clearData=clearOk)
if not clearOk:
let state = mpt.state(updateOk=true).valueOr:
raiseAssert info & ": " & $$error
if state != txRoot:
warn logTxt info, txRoot, state, error="state mismatch"
return false
let
txData = mpt.fetch(rlp.encode(txIndex)).valueOr:
if error.error != MptNotFound:
warn logTxt info, txIndex, action="fetch()", error=($$error)
warn logTxt info, txIndex, error=($$error)
return false
try:
res = rlp.decode(txData, Transaction)
except RlpError as exc:
warn logTxt info,
txRoot, action="rlp.decode()", col=($$col), error=exc.msg
except RlpError as e:
warn logTxt info, txRoot, action="rlp.decode()", name=($e.name), msg=e.msg
return false
true
@ -619,13 +611,13 @@ proc getTransactionCount*(
const
info = "getTransactionCount()"
let
ctx = db.ctx
col = ctx.newColumn(CtTxs, txRoot).valueOr:
warn logTxt info, txRoot, action="newColumn()", error=($$error)
return 0
mpt = ctx.getMpt(col).valueOr:
warn logTxt info, txRoot,
action="newMpt()", col=($$col), error=($$error)
clearOk = txRoot == EMPTY_ROOT_HASH
mpt = db.ctx.getColumn(CtTxs, clearData=clearOk)
if not clearOk:
let state = mpt.state(updateOk=true).valueOr:
raiseAssert info & ": " & $$error
if state != txRoot:
warn logTxt info, txRoot, state, error="state mismatch"
return 0
var txCount = 0
while true:
@ -676,11 +668,10 @@ proc persistWithdrawals*(
const info = "persistWithdrawals()"
if withdrawals.len == 0:
return
let mpt = db.ctx.getMpt(CtWithdrawals)
let mpt = db.ctx.getColumn(CtWithdrawals, clearData=true)
for idx, wd in withdrawals:
mpt.merge(rlp.encode(idx.uint), rlp.encode(wd)).isOkOr:
warn logTxt info, idx, action="merge()", error=($$error)
warn logTxt info, idx, error=($$error)
return
proc getWithdrawals*(
@ -847,8 +838,7 @@ proc persistReceipts*(
const info = "persistReceipts()"
if receipts.len == 0:
return
let mpt = db.ctx.getMpt(CtReceipts)
let mpt = db.ctx.getColumn(CtReceipts, clearData=true)
for idx, rec in receipts:
mpt.merge(rlp.encode(idx.uint), rlp.encode(rec)).isOkOr:
warn logTxt info, idx, action="merge()", error=($$error)

View File

@ -32,7 +32,7 @@ func nLayersKeys*(db: KvtDbRef): int =
# ------------------------------------------------------------------------------
func layersLen*(db: KvtDbRef; key: openArray[byte]|seq[byte]): Opt[int] =
## Return `true` id the argument key is cached.
## Returns the size of the value associated with `key`.
##
when key isnot seq[byte]:
let key = @key
@ -47,7 +47,7 @@ func layersLen*(db: KvtDbRef; key: openArray[byte]|seq[byte]): Opt[int] =
Opt.none(int)
func layersHasKey*(db: KvtDbRef; key: openArray[byte]|seq[byte]): bool =
## Return `true` id the argument key is cached.
## Return `true` if the argument key is cached.
##
db.layersLen(key).isSome()

View File

@ -18,7 +18,7 @@
import
eth/common,
./core_db,
./ledger/[base_iterators, distinct_ledgers, accounts_ledger]
./ledger/[base_iterators, accounts_ledger]
import
./ledger/base except LedgerApiTxt, beginTrackApi, bless, ifTrackApi
@ -27,7 +27,6 @@ export
AccountsLedgerRef,
base,
base_iterators,
distinct_ledgers,
init
# ------------------------------------------------------------------------------

View File

@ -18,24 +18,22 @@
## + renamed from `RefAccount`
## + the `statement` entry is sort of a superset of an `Account` object
## - contains an `EthAddress` field
## - the storage root hash is generalised as a `CoreDbTrieRef` object
##
## * `AccountsLedgerRef`
## + renamed from `AccountsCache`
##
import
stew/keyed_queue,
std/[tables, hashes, sets],
std/[tables, hashes, sets, typetraits],
chronicles,
eth/[common, rlp],
results,
stew/keyed_queue,
../../stateless/multi_keys,
"../.."/[constants, utils/utils],
../access_list as ac_access_list,
".."/[core_db, storage_types, transient_storage],
../../evm/code_bytes,
./distinct_ledgers
".."/[core_db, storage_types, transient_storage]
export code_bytes
@ -73,7 +71,7 @@ type
codeTouched*: bool
AccountsLedgerRef* = ref object
ledger: AccountLedger
ledger: CoreDbAccRef # AccountLedger
kvt: CoreDbKvtRef
savePoint: LedgerSavePoint
witnessCache: Table[EthAddress, WitnessData]
@ -141,24 +139,20 @@ template logTxt(info: static[string]): static[string] =
proc beginSavepoint*(ac: AccountsLedgerRef): LedgerSavePoint {.gcsafe.}
# FIXME-Adam: this is only necessary because of my sanity checks on the latest rootHash;
# take this out once those are gone.
proc rawTrie*(ac: AccountsLedgerRef): AccountLedger = ac.ledger
func newCoreDbAccount(address: EthAddress): CoreDbAccount =
CoreDbAccount(
address: address,
nonce: emptyEthAccount.nonce,
balance: emptyEthAccount.balance,
codeHash: emptyEthAccount.codeHash,
storage: CoreDbColRef(nil))
codeHash: emptyEthAccount.codeHash)
proc resetCoreDbAccount(ac: AccountsLedgerRef, v: var CoreDbAccount) =
ac.ledger.freeStorage v.address
const info = "resetCoreDbAccount(): "
ac.ledger.clearStorage(v.address).isOkOr:
raiseAssert info & $$error
v.nonce = emptyEthAccount.nonce
v.balance = emptyEthAccount.balance
v.codeHash = emptyEthAccount.codeHash
v.storage = nil
template noRlpException(info: static[string]; code: untyped) =
try:
@ -169,8 +163,15 @@ template noRlpException(info: static[string]; code: untyped) =
# The AccountsLedgerRef is modeled after TrieDatabase for it's transaction style
proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef,
root: KeccakHash): AccountsLedgerRef =
const info = "AccountsLedgerRef.init(): "
new result
result.ledger = AccountLedger.init(db, root)
result.ledger = db.ctx.getAccounts()
if root != EMPTY_ROOT_HASH:
let rc = result.ledger.state(updateOk=true)
if rc.isErr:
raiseAssert info & $$rc.error
if rc.value != root:
raiseAssert info & ": wrong account state"
result.kvt = db.newKvt() # save manually in `persist()`
result.witnessCache = Table[EthAddress, WitnessData]()
discard result.beginSavepoint
@ -180,11 +181,13 @@ proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef): AccountsLedgerRef =
# Renamed `rootHash()` => `state()`
proc state*(ac: AccountsLedgerRef): KeccakHash =
const info = "state(): "
# make sure all savepoint already committed
doAssert(ac.savePoint.parentSavepoint.isNil)
# make sure all cache already committed
doAssert(ac.isDirty == false)
ac.ledger.state
ac.ledger.state.valueOr:
raiseAssert info & $$error
proc isTopLevelClean*(ac: AccountsLedgerRef): bool =
## Getter, returns `true` if all pending data have been commited.
@ -316,7 +319,9 @@ proc originalStorageValue(
return val[]
# Not in the original values cache - go to the DB.
let rc = StorageLedger.init(ac.ledger, acc.statement).fetch slot
let
slotKey = slot.toBytesBE.keccakHash.data
rc = ac.ledger.slotFetch(acc.statement.address, slotKey)
if rc.isOk and 0 < rc.value.len:
noRlpException "originalStorageValue()":
result = rlp.decode(rc.value, UInt256)
@ -364,6 +369,8 @@ proc persistCode(acc: AccountRef, ac: AccountsLedgerRef) =
codeHash=acc.statement.codeHash, error=($$rc.error)
proc persistStorage(acc: AccountRef, ac: AccountsLedgerRef) =
const info = "persistStorage(): "
if acc.overlayStorage.len == 0:
# TODO: remove the storage too if we figure out
# how to create 'virtual' storage room for each account
@ -374,17 +381,22 @@ proc persistStorage(acc: AccountRef, ac: AccountsLedgerRef) =
# Make sure that there is an account address row on the database. This is
# needed for saving the account-linked storage column on the Aristo database.
if acc.statement.storage.isNil:
ac.ledger.merge(acc.statement)
var storageLedger = StorageLedger.init(ac.ledger, acc.statement)
ac.ledger.merge(acc.statement).isOkOr:
raiseAssert info & $$error
# Save `overlayStorage[]` on database
for slot, value in acc.overlayStorage:
let slotKey = slot.toBytesBE.keccakHash.data
if value > 0:
let encodedValue = rlp.encode(value)
storageLedger.merge(slot, encodedValue)
ac.ledger.slotMerge(
acc.statement.address, slotKey, encodedValue).isOkOr:
raiseAssert info & $$error
else:
storageLedger.delete(slot)
ac.ledger.slotDelete(acc.statement.address, slotKey).isOkOr:
if error.error != StoNotFound:
raiseAssert info & $$error
discard
let
key = slot.toBytesBE.keccakHash.data.slotHashToSlotKey
rc = ac.kvt.put(key.toOpenArray, rlp.encode(slot))
@ -399,16 +411,6 @@ proc persistStorage(acc: AccountRef, ac: AccountsLedgerRef) =
acc.originalStorage.del(slot)
acc.overlayStorage.clear()
# Changing the storage trie might also change the `storage` descriptor when
# the trie changes from empty to exixting or v.v.
acc.statement.storage = storageLedger.getColumn()
# No need to hold descriptors for longer than needed
let stateEmpty = acc.statement.storage.stateEmpty.valueOr:
raiseAssert "Storage column state error: " & $$error
if stateEmpty:
acc.statement.storage = CoreDbColRef(nil)
proc makeDirty(ac: AccountsLedgerRef, address: EthAddress, cloneStorage = true): AccountRef =
ac.isDirty = true
@ -502,7 +504,7 @@ proc contractCollision*(ac: AccountsLedgerRef, address: EthAddress): bool =
return
acc.statement.nonce != 0 or
acc.statement.codeHash != EMPTY_CODE_HASH or
not acc.statement.storage.stateEmptyOrVoid
not ac.ledger.slotStateEmptyOrVoid(address)
proc accountExists*(ac: AccountsLedgerRef, address: EthAddress): bool =
let acc = ac.getAccount(address, false)
@ -580,18 +582,20 @@ proc setStorage*(ac: AccountsLedgerRef, address: EthAddress, slot, value: UInt25
acc.flags.incl StorageChanged
proc clearStorage*(ac: AccountsLedgerRef, address: EthAddress) =
const info = "clearStorage(): "
# a.k.a createStateObject. If there is an existing account with
# the given address, it is overwritten.
let acc = ac.getAccount(address)
acc.flags.incl {Alive, NewlyCreated}
let empty = acc.statement.storage.stateEmpty.valueOr: return
let empty = ac.ledger.slotStateEmpty(address).valueOr: return
if not empty:
# need to clear the storage from the database first
let acc = ac.makeDirty(address, cloneStorage = false)
ac.ledger.freeStorage address
acc.statement.storage = CoreDbColRef(nil)
ac.ledger.clearStorage(address).isOkOr:
raiseAssert info & $$error
# update caches
if acc.originalStorage.isNil.not:
# also clear originalStorage cache, otherwise
@ -661,6 +665,8 @@ proc clearEmptyAccounts(ac: AccountsLedgerRef) =
proc persist*(ac: AccountsLedgerRef,
clearEmptyAccount: bool = false,
clearCache = false) =
const info = "persist(): "
# make sure all savepoint already committed
doAssert(ac.savePoint.parentSavepoint.isNil)
@ -679,9 +685,12 @@ proc persist*(ac: AccountsLedgerRef,
# storageRoot must be updated first
# before persisting account into merkle trie
acc.persistStorage(ac)
ac.ledger.merge(acc.statement)
ac.ledger.merge(acc.statement).isOkOr:
raiseAssert info & $$error
of Remove:
ac.ledger.delete acc.statement.address
ac.ledger.delete(acc.statement.address).isOkOr:
if error.error != AccNotFound:
raiseAssert info & $$error
ac.savePoint.cache.del acc.statement.address
of DoNothing:
# dead man tell no tales
@ -716,27 +725,29 @@ iterator accounts*(ac: AccountsLedgerRef): Account =
# make sure all savepoint already committed
doAssert(ac.savePoint.parentSavepoint.isNil)
for _, account in ac.savePoint.cache:
yield account.statement.recast().value
yield ac.ledger.recast(account.statement, updateOk=true).value
iterator pairs*(ac: AccountsLedgerRef): (EthAddress, Account) =
# make sure all savepoint already committed
doAssert(ac.savePoint.parentSavepoint.isNil)
for address, account in ac.savePoint.cache:
yield (address, account.statement.recast().value)
yield (address, ac.ledger.recast(account.statement, updateOk=true).value)
iterator storage*(ac: AccountsLedgerRef, address: EthAddress): (UInt256, UInt256) =
import stew/byteutils
iterator storage*(
ac: AccountsLedgerRef;
eAddr: EthAddress;
): (UInt256, UInt256) =
# beware that if the account not persisted,
# the storage root will not be updated
let acc = ac.getAccount(address, false)
if not acc.isNil:
noRlpException "storage()":
for slotHash, value in ac.ledger.storage acc.statement:
if slotHash.len == 0: continue
let rc = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
if rc.isErr:
warn logTxt "storage()", slotHash, error=($$rc.error)
else:
yield (rlp.decode(rc.value, UInt256), rlp.decode(value, UInt256))
noRlpException "storage()":
for (slotHash, value) in ac.ledger.slotPairs eAddr:
echo ">>> storage: ", slotHash.toHex, ":", value.toHex
let rc = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
if rc.isErr:
warn logTxt "storage()", slotHash, error=($$rc.error)
else:
yield (rlp.decode(rc.value, UInt256), rlp.decode(value, UInt256))
iterator cachedStorage*(ac: AccountsLedgerRef, address: EthAddress): (UInt256, UInt256) =
let acc = ac.getAccount(address, false)
@ -750,7 +761,7 @@ proc getStorageRoot*(ac: AccountsLedgerRef, address: EthAddress): Hash256 =
# the storage root will not be updated
let acc = ac.getAccount(address, false)
if acc.isNil: EMPTY_ROOT_HASH
else: acc.statement.storage.state.valueOr: EMPTY_ROOT_HASH
else: ac.ledger.slotState(address).valueOr: EMPTY_ROOT_HASH
proc update(wd: var WitnessData, acc: AccountRef) =
# once the code is touched make sure it doesn't get reset back to false in another update
@ -844,7 +855,7 @@ proc getEthAccount*(ac: AccountsLedgerRef, address: EthAddress): Account =
return emptyEthAccount
## Convert to legacy object, will throw an assert if that fails
let rc = acc.statement.recast()
let rc = ac.ledger.recast(acc.statement)
if rc.isErr:
raiseAssert "getAccount(): cannot convert account: " & $$rc.error
rc.value

View File

@ -353,11 +353,6 @@ proc getAccessList*(ldg: LedgerRef): AccessList =
proc rootHash*(ldg: LedgerRef): KeccakHash =
ldg.state()
proc getMpt*(ldg: LedgerRef): CoreDbMptRef =
ldg.beginTrackApi LdgGetMptFn
result = ldg.ac.rawTrie.CoreDbAccRef.getMpt
ldg.ifTrackApi: debug apiTxt, api, elapsed, result
proc getEthAccount*(ldg: LedgerRef, eAddr: EthAddress): Account =
ldg.beginTrackApi LdgGetAthAccountFn
result = ldg.ac.getEthAccount(eAddr)

View File

@ -45,7 +45,6 @@ type
LdgGetCodeHashFn = "getCodeHash"
LdgGetCodeSizeFn = "getCodeSize"
LdgGetCommittedStorageFn = "getCommittedStorage"
LdgGetMptFn = "getMpt"
LdgGetNonceFn = "getNonce"
LdgGetStorageFn = "getStorage"
LdgGetStorageRootFn = "getStorageRoot"

View File

@ -1,225 +0,0 @@
# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
# The point of this file is just to give a little more type-safety
# and clarity to our use of SecureHexaryTrie, by having distinct
# types for the big trie containing all the accounts and the little
# tries containing the storage for an individual account.
#
# It's nice to have all the accesses go through "getAccountBytes"
# rather than just "get" (which is hard to search for). Plus we
# may want to put in assertions to make sure that the nodes for
# the account are all present (in stateless mode), etc.
{.push raises: [].}
## Re-write of `distinct_tries.nim` to be imported into `accounts_ledger.nim`
## for using new database API.
##
import
std/[algorithm, sequtils, strutils, tables, typetraits],
chronicles,
eth/common,
results,
".."/[core_db, storage_types]
type
AccountLedger* = distinct CoreDbAccRef
StorageLedger* = distinct CoreDbMptRef
SomeLedger* = AccountLedger | StorageLedger
const
EnableMptDump = false # or true
## Provide database dumper. Note that the dump function needs to link
## against the `rocksdb` library. The# dependency lies in import of
## `aristo_debug`.
# ------------------------------------------------------------------------------
# Public debugging helpers
# ------------------------------------------------------------------------------
proc toSvp*(sl: StorageLedger): seq[(UInt256,UInt256)] =
## Dump as slot id-value pair sequence
let
db = sl.distinctBase.parent
save = db.trackNewApi
db.trackNewApi = false
defer: db.trackNewApi = save
let kvt = db.newKvt
var kvp: Table[UInt256,UInt256]
try:
for (slotHash,val) in sl.distinctBase.pairs:
let rc = kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
if rc.isErr:
warn "StorageLedger.dump()", slotHash, error=($$rc.error)
else:
kvp[rlp.decode(rc.value,UInt256)] = rlp.decode(val,UInt256)
except CatchableError as e:
raiseAssert "Ooops(" & $e.name & "): " & e.msg
kvp.keys.toSeq.sorted.mapIt((it,kvp.getOrDefault(it,high UInt256)))
proc toStr*(w: seq[(UInt256,UInt256)]): string =
"[" & w.mapIt("(" & it[0].toHex & "," & it[1].toHex & ")").join(", ") & "]"
when EnableMptDump:
import
eth/trie,
stew/byteutils,
../aristo,
../aristo/aristo_debug
proc dump*(led: SomeLedger): string =
## Dump database (beware of large backend)
let db = led.distinctBase.parent
if db.dbType notin CoreDbPersistentTypes:
# Memory based storage only
let be = led.distinctBase.backend
if db.isAristo:
let adb = be.toAristo()
if not adb.isNil:
return adb.pp(kMapOk=false,backendOK=true)
# Oops
"<" & $db.dbType & ">"
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
proc db*(led: SomeLedger): CoreDbRef =
led.distinctBase.parent
proc state*(led: SomeLedger): Hash256 =
when SomeLedger is AccountLedger:
const info = "AccountLedger/state(): "
else:
const info = "StorageLedger/state(): "
let rc = led.distinctBase.getColumn().state()
if rc.isErr:
raiseAssert info & $$rc.error
rc.value
proc getColumn*(led: SomeLedger): CoreDbColRef =
led.distinctBase.getColumn()
# ------------------------------------------------------------------------------
# Public functions: accounts ledger
# ------------------------------------------------------------------------------
proc init*(
T: type AccountLedger;
db: CoreDbRef;
root: Hash256;
): T =
const
info = "AccountLedger.init(): "
let
ctx = db.ctx
col = block:
let rc = ctx.newColumn(CtAccounts, root)
if rc.isErr:
raiseAssert info & $$rc.error
rc.value
mpt = block:
let rc = ctx.getAcc(col)
if rc.isErr:
raiseAssert info & $$rc.error
rc.value
mpt.T
proc fetch*(al: AccountLedger; eAddr: EthAddress): Result[CoreDbAccount,void] =
## Using `fetch()` for trie data retrieval
let rc = al.distinctBase.fetch(eAddr)
if rc.isErr:
return err()
ok rc.value
proc merge*(al: AccountLedger; account: CoreDbAccount) =
## Using `merge()` for trie data storage
const info = "AccountLedger/merge(): "
al.distinctBase.merge(account).isOkOr:
raiseAssert info & $$error
proc freeStorage*(al: AccountLedger, eAddr: EthAddress) =
const info = "AccountLedger/freeStorage()"
# Flush associated storage trie
al.distinctBase.stoDelete(eAddr).isOkOr:
raiseAssert info & $$error
proc delete*(al: AccountLedger, eAddr: EthAddress) =
const info = "AccountLedger/delete()"
# Delete account and associated storage tree (if any)
al.distinctBase.delete(eAddr).isOkOr:
if error.error == MptNotFound:
return
raiseAssert info & $$error
# ------------------------------------------------------------------------------
# Public functions: storage ledger
# ------------------------------------------------------------------------------
proc init*(
T: type StorageLedger;
al: AccountLedger;
account: CoreDbAccount;
): T =
## Storage trie constructor.
##
const
info = "StorageLedger/init(): "
let
db = al.distinctBase.parent
stt = account.storage
ctx = db.ctx
trie = if stt.isNil: ctx.newColumn(account.address) else: stt
mpt = block:
let rc = ctx.getMpt(trie)
if rc.isErr:
raiseAssert info & $$rc.error
rc.value
mpt.T
proc fetch*(sl: StorageLedger, slot: UInt256): Result[Blob,void] =
var rc = sl.distinctBase.fetch(slot.toBytesBE.keccakHash.data)
if rc.isErr:
return err()
ok move(rc.value)
proc merge*(sl: StorageLedger, slot: UInt256, value: openArray[byte]) =
const info = "StorageLedger/merge(): "
sl.distinctBase.merge(slot.toBytesBE.keccakHash.data, value).isOkOr:
raiseAssert info & $$error
proc delete*(sl: StorageLedger, slot: UInt256) =
const info = "StorageLedger/delete(): "
sl.distinctBase.delete(slot.toBytesBE.keccakHash.data).isOkOr:
if error.error == MptNotFound:
return
raiseAssert info & $$error
iterator storage*(
al: AccountLedger;
account: CoreDbAccount;
): (Blob,Blob) =
## For given account, iterate over storage slots
const
info = "storage(): "
let col = account.storage
if not col.isNil:
let mpt = al.distinctBase.parent.ctx.getMpt(col).valueOr:
raiseAssert info & $$error
for (key,val) in mpt.pairs:
yield (key,val)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -89,7 +89,7 @@ proc calculateTransactionData(
## - root of transactions trie
## - list of transactions hashes
## - total size of transactions in block
var tr = newCoreDbRef(DefaultDbMemory).ctx.getMpt(CtGeneric)
var tr = newCoreDbRef(DefaultDbMemory).ctx.getColumn(CtGeneric)
var txHashes: seq[TxOrHash]
var txSize: uint64
for i, t in items:
@ -97,7 +97,7 @@ proc calculateTransactionData(
txSize = txSize + uint64(len(tx))
tr.merge(rlp.encode(i), tx).expect "merge data"
txHashes.add(txOrHash toFixedBytes(keccakHash(tx)))
let rootHash = tr.getColumn().state().expect "hash"
let rootHash = tr.state(updateOk = true).expect "hash"
(rootHash, txHashes, txSize)
func blockHeaderSize(payload: ExecutionData, txRoot: etypes.Hash256): uint64 =

View File

@ -328,15 +328,14 @@ proc verifyAsmResult(vmState: BaseVMState, boa: Assembler, asmResult: CallResult
stateDB.persist()
let
al = AccountLedger.init(com.db, EMPTY_ROOT_HASH)
al = com.db.ctx.getAccounts()
acc = al.fetch(codeAddress).expect "Valid Account Handle"
sl = StorageLedger.init(al, acc)
for kv in boa.storage:
let key = kv[0].toHex()
let val = kv[1].toHex()
let slot = UInt256.fromBytesBE kv[0]
let data = sl.fetch(slot).valueOr: EmptyBlob
let slotKey = UInt256.fromBytesBE(kv[0]).toBytesBE.keccakHash.data
let data = al.slotFetch(codeAddress, slotKey).valueOr: EmptyBlob
let actual = data.toHex
let zerosLen = 64 - (actual.len)
let value = repeat('0', zerosLen) & actual

View File

@ -251,7 +251,7 @@ proc mergeDummyAccLeaf*(
# Add a dummy entry so the balancer logic can be triggered
let
acc = AristoAccount(nonce: nonce.AccountNonce)
rc = db.mergeAccountPayload(pathID.uint64.toBytesBE, acc)
rc = db.mergeAccountRecord(pathID.uint64.toBytesBE, acc)
if rc.isOk:
ok()
else:

View File

@ -59,13 +59,9 @@ proc parseEnv(node: JsonNode): TestEnv =
result.pre = node["pre"]
proc rootExists(db: CoreDbRef; root: Hash256): bool =
let
ctx = db.ctx
col = ctx.newColumn(CtAccounts, root).valueOr:
return false
ctx.getAcc(col).isOkOr:
let state = db.ctx.getAccounts().state(updateOk=true).valueOr:
return false
true
state == root
proc executeCase(node: JsonNode): bool =
let

View File

@ -187,13 +187,13 @@ proc runTrial3(env: TestEnv, ledger: LedgerRef; inx: int; rollback: bool) =
ledger.commit(accTx)
ledger.persist()
block:
block body2:
let accTx = ledger.beginSavepoint
ledger.modBalance(eAddr)
if rollback:
ledger.rollback(accTx)
break
break body2
ledger.commit(accTx)
ledger.persist()
@ -259,13 +259,13 @@ proc runTrial4(env: TestEnv, ledger: LedgerRef; inx: int; rollback: bool) =
ledger.commit(accTx)
ledger.persist()
block:
block body3:
let accTx = ledger.beginSavepoint
ledger.modBalance(eAddr)
if rollback:
ledger.rollback(accTx)
break
break body3
ledger.commit(accTx)
ledger.persist()

View File

@ -85,7 +85,7 @@ proc verifySlotProof(trustedStorageRoot: Web3Hash, slot: StorageProof): MptProof
proc persistFixtureBlock(chainDB: CoreDbRef) =
let header = getBlockHeader4514995()
# Manually inserting header to avoid any parent checks
chainDB.kvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
discard chainDB.newKvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
chainDB.addBlockNumberToHashLookup(header)
chainDB.persistTransactions(header.number, getBlockBody4514995().transactions)
chainDB.persistReceipts(getReceipts4514995())
@ -155,7 +155,7 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv
txs = [signedTx1, signedTx2]
com.db.persistTransactions(blockNumber, txs)
let txRoot = com.db.ctx.getMpt(CtTxs).getColumn().state().valueOr(EMPTY_ROOT_HASH)
let txRoot = com.db.ctx.getColumn(CtTxs).state(updateOk=true).valueOr(EMPTY_ROOT_HASH)
vmState.receipts = newSeq[Receipt](txs.len)
vmState.cumulativeGasUsed = 0
@ -167,7 +167,7 @@ proc setupEnv(com: CommonRef, signer, ks2: EthAddress, ctx: EthContext): TestEnv
com.db.persistReceipts(vmState.receipts)
let
receiptRoot = com.db.ctx.getMpt(CtReceipts).getColumn().state().valueOr(EMPTY_ROOT_HASH)
receiptRoot = com.db.ctx.getColumn(CtReceipts).state(updateOk=true).valueOr(EMPTY_ROOT_HASH)
date = dateTime(2017, mMar, 30)
timeStamp = date.toTime.toUnix.EthTime
difficulty = com.calcDifficulty(timeStamp, parent)

View File

@ -34,7 +34,7 @@ proc preLoadAristoDb(cdb: CoreDbRef; jKvp: JsonNode; num: BlockNumber) =
txRoot: Hash256 # header with block number `num`
rcptRoot: Hash256 # ditto
let
adb = cdb.ctx.getMpt(CtGeneric).backend.toAristo
adb = cdb.ctx.getColumn(CtGeneric).backend.toAristo
kdb = cdb.newKvt.backend.toAristo
# Fill KVT and collect `proof` data