cache account payload (#2478)

Instead of caching just the storage id, we can cache the full payload
which further reduces expensive hikes
This commit is contained in:
Jacek Sieka 2024-07-12 15:08:26 +02:00 committed by GitHub
parent d07540766f
commit 01ab209497
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 78 additions and 58 deletions

View File

@ -340,11 +340,12 @@ proc deleteAccountRecord*(
# Delete storage tree if present
if stoID.isValid:
? db.delSubTreeImpl stoID
db.layersPutStoID(accPath, VertexID(0))
db.deleteImpl(hike).isOkOr:
return err(error[1])
db.layersPutAccPayload(accPath, nil)
ok()
@ -437,7 +438,7 @@ proc deleteStorageData*(
# De-register the deleted storage tree from the account record
let leaf = wpAcc.vtx.dup # Dup on modify
leaf.lData.stoID = VertexID(0)
db.layersPutStoID(accPath, VertexID(0))
db.layersPutAccPayload(accPath, leaf.lData)
db.layersPutVtx((accHike.root, wpAcc.vid), leaf)
db.layersResKey((accHike.root, wpAcc.vid))
ok(true)
@ -468,7 +469,7 @@ proc deleteStorageTree*(
# De-register the deleted storage tree from the accounts record
let leaf = wpAcc.vtx.dup # Dup on modify
leaf.lData.stoID = VertexID(0)
db.layersPutStoID(accPath, VertexID(0))
db.layersPutAccPayload(accPath, leaf.lData)
db.layersPutVtx((accHike.root, wpAcc.vid), leaf)
db.layersResKey((accHike.root, wpAcc.vid))
ok()

View File

@ -82,15 +82,11 @@ proc deltaPersistent*(
be.putLstFn(writeBatch, lSst)
? be.putEndFn writeBatch # Finalise write batch
# Copy back updated payloads - these only change when storage is written to
# the first time or when storage is removed completely
for accPath, stoID in db.balancer.accSids:
# Copy back updated payloads
for accPath, pyl in db.balancer.accPyls:
let accKey = accPath.to(AccountKey)
if stoID.isValid:
if not db.accSids.lruUpdate(accKey, stoID):
discard db.accSids.lruAppend(accKey, stoID, accLruSize)
else:
db.accSids.del(accKey)
if not db.accPyls.lruUpdate(accKey, pyl):
discard db.accPyls.lruAppend(accKey, pyl, accLruSize)
# Update dudes and this descriptor
? updateSiblings.update().commit()

View File

@ -84,12 +84,13 @@ type
# Debugging data below, might go away in future
xMap*: Table[HashKey,HashSet[RootedVertexID]] ## For pretty printing/debugging
accSids*: KeyedQueue[AccountKey, VertexID]
## Account path to storage id cache, for contract accounts - storage is
## frequently accessed by account path when contracts interact with it -
## this cache ensures that we don't have to re-travers the storage trie
## path for every such interaction - a better solution would probably be
## to cache this in a type exposed to the high-level API
accPyls*: KeyedQueue[AccountKey, PayloadRef]
## Account path to payload cache - accounts are frequently accessed by
## account path when contracts interact with them - this cache ensures
## that we don't have to re-traverse the storage trie for every such
## interaction
## TODO a better solution would probably be to cache this in a type
## exposed to the high-level API
AristoDbAction* = proc(db: AristoDbRef) {.gcsafe, raises: [].}
## Generic call back function/closure.

View File

@ -115,7 +115,7 @@ type
kMap*: Table[RootedVertexID,HashKey] ## Merkle hash key mapping
vTop*: VertexID ## Last used vertex ID
accSids*: Table[Hash256, VertexID] ## Account path -> stoID
accPyls*: Table[Hash256, PayloadRef] ## Account path -> VertexRef
LayerRef* = ref LayerObj
LayerObj* = object

View File

@ -55,6 +55,32 @@ proc retrievePayload(
return err(FetchPathNotFound)
proc retrieveAccountPayload(
db: AristoDbRef;
accPath: Hash256;
): Result[PayloadRef,AristoError] =
if (let pyl = db.layersGetAccPayload(accPath); pyl.isSome()):
if not pyl[].isValid():
return err(FetchPathNotFound)
return ok pyl[]
let accKey = accPath.to(AccountKey)
if (let pyl = db.accPyls.lruFetch(accKey); pyl.isSome()):
if not pyl[].isValid():
return err(FetchPathNotFound)
return ok pyl[]
# Updated payloads are stored in the layers so if we didn't find them there,
# it must have been in the database
let
payload = db.retrievePayload(VertexID(1), accPath.data).valueOr:
if error == FetchAccInaccessible:
discard db.accPyls.lruAppend(accKey, nil, accLruSize)
return err(FetchPathNotFound)
return err(error)
ok db.accPyls.lruAppend(accKey, payload, accLruSize)
proc retrieveMerkleHash(
db: AristoDbRef;
root: VertexID;
@ -79,9 +105,6 @@ proc hasPayload(
root: VertexID;
path: openArray[byte];
): Result[bool,AristoError] =
if path.len == 0:
return err(FetchPathInvalid)
let error = db.retrievePayload(root, path).errorOr:
return ok(true)
@ -89,6 +112,17 @@ proc hasPayload(
return ok(false)
err(error)
proc hasAccountPayload(
db: AristoDbRef;
accPath: Hash256;
): Result[bool,AristoError] =
let error = db.retrieveAccountPayload(accPath).errorOr:
return ok(true)
if error == FetchPathNotFound:
return ok(false)
err(error)
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
@ -120,20 +154,10 @@ proc fetchStorageID*(
db: AristoDbRef;
accPath: Hash256;
): Result[VertexID,AristoError] =
## Public helper function fro retrieving a storage (vertex) ID for a
## Public helper function for retrieving a storage (vertex) ID for a
## given account.
if (let stoID = db.layersGetStoID(accPath); stoID.isSome()):
if not stoID[].isValid():
return err(FetchPathNotFound)
return ok stoID[]
let accKey = accPath.to(AccountKey)
if (let stoID = db.accSids.lruFetch(accKey); stoID.isSome()):
return ok stoID[]
let
payload = db.retrievePayload(VertexID(1), accPath.data).valueOr:
payload = db.retrieveAccountPayload(accPath).valueOr:
if error == FetchAccInaccessible:
return err(FetchPathNotFound)
return err(error)
@ -143,9 +167,8 @@ proc fetchStorageID*(
if not stoID.isValid:
return err(FetchPathNotFound)
# If we didn't find a cached storage ID in the layers, we must be using the
# database version which we cache here, even across database commits
ok db.accSids.lruAppend(accKey, stoID, accLruSize)
ok stoID
# ------------------------------------------------------------------------------
# Public functions
@ -159,15 +182,15 @@ proc fetchLastSavedState*(
## `uint64` identifier (may be interpreted as block number.)
db.getLstUbe()
proc fetchAccountRecord*(
db: AristoDbRef;
accPath: Hash256;
): Result[AristoAccount,AristoError] =
## Fetch an account record from the database indexed by `accPath`.
##
let pyl = ? db.retrievePayload(VertexID(1), accPath.data)
let pyl = ? db.retrieveAccountPayload(accPath)
assert pyl.pType == AccountData # debugging only
ok pyl.account
proc fetchAccountState*(
@ -184,8 +207,7 @@ proc hasPathAccount*(
## For an account record indexed by `accPath` query whether this record exists
## on the database.
##
db.hasPayload(VertexID(1), accPath.data)
db.hasAccountPayload(accPath)
proc fetchGenericData*(
db: AristoDbRef;
@ -219,7 +241,6 @@ proc hasPathGeneric*(
? root.mustBeGeneric()
db.hasPayload(root, path)
proc fetchStorageData*(
db: AristoDbRef;
accPath: Hash256;

View File

@ -91,15 +91,16 @@ func layersGetKeyOrVoid*(db: AristoDbRef; rvid: RootedVertexID): HashKey =
## Simplified version of `layersGetKey()`
db.layersGetKey(rvid).valueOr: VOID_HASH_KEY
func layersGetStoID*(db: AristoDbRef; accPath: Hash256): Opt[VertexID] =
db.top.delta.accSids.withValue(accPath, item):
func layersGetAccPayload*(db: AristoDbRef; accPath: Hash256): Opt[PayloadRef] =
db.top.delta.accPyls.withValue(accPath, item):
return Opt.some(item[])
for w in db.rstack:
w.delta.accSids.withValue(accPath, item):
w.delta.accPyls.withValue(accPath, item):
return Opt.some(item[])
Opt.none(VertexID)
Opt.none(PayloadRef)
# ------------------------------------------------------------------------------
# Public functions: setter variants
@ -146,8 +147,8 @@ proc layersUpdateVtx*(
db.layersResKey(rvid)
func layersPutStoID*(db: AristoDbRef; accPath: Hash256; stoID: VertexID) =
db.top.delta.accSids[accPath] = stoID
func layersPutAccPayload*(db: AristoDbRef; accPath: Hash256; pyl: PayloadRef) =
db.top.delta.accPyls[accPath] = pyl
# ------------------------------------------------------------------------------
# Public functions
@ -164,8 +165,8 @@ func layersMergeOnto*(src: LayerRef; trg: var LayerObj) =
for (vid,key) in src.delta.kMap.pairs:
trg.delta.kMap[vid] = key
trg.delta.vTop = src.delta.vTop
for (accPath,stoID) in src.delta.accSids.pairs:
trg.delta.accSids[accPath] = stoID
for (accPath,pyl) in src.delta.accPyls.pairs:
trg.delta.accPyls[accPath] = pyl
func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
## Provide a collapsed copy of layers up to a particular transaction level.
@ -181,7 +182,7 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
sTab: layers[0].delta.sTab.dup, # explicit dup for ref values
kMap: layers[0].delta.kMap,
vTop: layers[^1].delta.vTop,
accSids: layers[0].delta.accSids,
accPyls: layers[0].delta.accPyls,
))
# Consecutively merge other layers on top
@ -190,8 +191,8 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
result.delta.sTab[vid] = vtx
for (vid,key) in layers[n].delta.kMap.pairs:
result.delta.kMap[vid] = key
for (accPath,stoID) in layers[n].delta.accSids.pairs:
result.delta.accSids[accPath] = stoID
for (accPath,pyl) in layers[n].delta.accPyls.pairs:
result.delta.accPyls[accPath] = pyl
# ------------------------------------------------------------------------------
# Public iterators

View File

@ -28,7 +28,7 @@ import
std/typetraits,
eth/common,
results,
"."/[aristo_desc, aristo_hike, aristo_layers, aristo_utils, aristo_vid],
"."/[aristo_desc, aristo_hike, aristo_layers, aristo_vid],
./aristo_merge/merge_payload_helper
const
@ -58,6 +58,7 @@ proc mergeAccountRecord*(
pyl = PayloadRef(pType: AccountData, account: accRec)
rc = db.mergePayloadImpl(VertexID(1), accPath.data, pyl)
if rc.isOk:
db.layersPutAccPayload(accPath, pyl)
ok true
elif rc.error in MergeNoAction:
ok false
@ -140,16 +141,15 @@ proc mergeStorageData*(
# Mark account path Merkle keys for update
resetKeys()
if stoID.isValid:
return ok()
else:
if not stoID.isValid:
# Make sure that there is an account that refers to that storage trie
let leaf = vtx.dup # Dup on modify
leaf.lData.stoID = useID
db.layersPutStoID(accPath, useID)
db.layersPutAccPayload(accPath, leaf.lData)
db.layersPutVtx((VertexID(1), touched[pos - 1]), leaf)
return ok()
return ok()
elif rc.error in MergeNoAction:
assert stoID.isValid # debugging only