Cache account path storage id (#2443)
The storage id is frequently accessed when executing contract code and finding the path via the database requires several hops making the process slow - here, we add a cache to keep the most recently used account storage id:s in memory. A possible future improvement would be to cache all account accesses so that for example updating the balance doesn't cause several hikes.
This commit is contained in:
parent
989f20a740
commit
443c6d1f8e
|
@ -346,6 +346,7 @@ proc deleteAccountRecord*(
|
|||
# Delete storage tree if present
|
||||
if stoID.isValid:
|
||||
? db.delSubTreeImpl stoID
|
||||
db.layersPutStoID(accPath, VertexID(0))
|
||||
|
||||
db.deleteImpl(hike).isOkOr:
|
||||
return err(error[1])
|
||||
|
@ -442,6 +443,7 @@ proc deleteStorageData*(
|
|||
# De-register the deleted storage tree from the account record
|
||||
let leaf = wpAcc.vtx.dup # Dup on modify
|
||||
leaf.lData.stoID = VertexID(0)
|
||||
db.layersPutStoID(accPath, VertexID(0))
|
||||
db.layersPutVtx(VertexID(1), wpAcc.vid, leaf)
|
||||
db.layersResKey(VertexID(1), wpAcc.vid)
|
||||
ok(true)
|
||||
|
@ -472,6 +474,7 @@ proc deleteStorageTree*(
|
|||
# De-register the deleted storage tree from the accounts record
|
||||
let leaf = wpAcc.vtx.dup # Dup on modify
|
||||
leaf.lData.stoID = VertexID(0)
|
||||
db.layersPutStoID(accPath, VertexID(0))
|
||||
db.layersPutVtx(VertexID(1), wpAcc.vid, leaf)
|
||||
db.layersResKey(VertexID(1), wpAcc.vid)
|
||||
ok()
|
||||
|
|
|
@ -82,6 +82,16 @@ proc deltaPersistent*(
|
|||
be.putLstFn(writeBatch, lSst)
|
||||
? be.putEndFn writeBatch # Finalise write batch
|
||||
|
||||
# Copy back updated payloads - these only change when storage is written to
|
||||
# the first time or when storage is removed completely
|
||||
for accPath, stoID in db.balancer.accSids:
|
||||
let accKey = accPath.to(AccountKey)
|
||||
if stoID.isValid:
|
||||
if not db.accSids.lruUpdate(accKey, stoID):
|
||||
discard db.accSids.lruAppend(accKey, stoID, accLruSize)
|
||||
else:
|
||||
db.accSids.del(accKey)
|
||||
|
||||
# Update dudes and this descriptor
|
||||
? updateSiblings.update().commit()
|
||||
ok()
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
import
|
||||
std/[hashes, sets, tables],
|
||||
stew/keyed_queue,
|
||||
eth/common,
|
||||
results,
|
||||
./aristo_constants,
|
||||
|
@ -33,7 +34,11 @@ from ./aristo_desc/desc_backend
|
|||
|
||||
# Not auto-exporting backend
|
||||
export
|
||||
aristo_constants, desc_error, desc_identifiers, desc_structural
|
||||
aristo_constants, desc_error, desc_identifiers, desc_structural, keyed_queue
|
||||
|
||||
const
|
||||
accLruSize* = 1024 * 1024
|
||||
# LRU cache size for accounts that have storage
|
||||
|
||||
type
|
||||
AristoTxRef* = ref object
|
||||
|
@ -58,6 +63,12 @@ type
|
|||
centre: AristoDbRef ## Link to peer with write permission
|
||||
peers: HashSet[AristoDbRef] ## List of all peers
|
||||
|
||||
AccountKey* = distinct ref Hash256
|
||||
# `ref` version of the account path / key
|
||||
# `KeyedQueue` is inefficient for large keys, so we have to use this ref
|
||||
# workaround to not experience a memory explosion in the account cache
|
||||
# TODO rework KeyedQueue to deal with large keys and/or heterogenous lookup
|
||||
|
||||
AristoDbRef* = ref object
|
||||
## Three tier database object supporting distributed instances.
|
||||
top*: LayerRef ## Database working layer, mutable
|
||||
|
@ -72,6 +83,13 @@ type
|
|||
# Debugging data below, might go away in future
|
||||
xMap*: Table[HashKey,HashSet[VertexID]] ## For pretty printing/debugging
|
||||
|
||||
accSids*: KeyedQueue[AccountKey, VertexID]
|
||||
## Account path to storage id cache, for contract accounts - storage is
|
||||
## frequently accessed by account path when contracts interact with it -
|
||||
## this cache ensures that we don't have to re-travers the storage trie
|
||||
## path for every such interaction - a better solution would probably be
|
||||
## to cache this in a type exposed to the high-level API
|
||||
|
||||
AristoDbAction* = proc(db: AristoDbRef) {.gcsafe, raises: [].}
|
||||
## Generic call back function/closure.
|
||||
|
||||
|
@ -79,6 +97,17 @@ type
|
|||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template hash*(a: AccountKey): Hash =
|
||||
mixin hash
|
||||
hash((ref Hash256)(a)[])
|
||||
|
||||
template `==`*(a, b: AccountKey): bool =
|
||||
mixin `==`
|
||||
(ref Hash256)(a)[] == (ref Hash256)(b)[]
|
||||
|
||||
template to*(a: Hash256, T: type AccountKey): T =
|
||||
AccountKey((ref Hash256)(data: a.data))
|
||||
|
||||
func getOrVoid*[W](tab: Table[W,VertexRef]; w: W): VertexRef =
|
||||
tab.getOrDefault(w, VertexRef(nil))
|
||||
|
||||
|
|
|
@ -112,6 +112,8 @@ type
|
|||
kMap*: Table[VertexID,HashKey] ## Merkle hash key mapping
|
||||
vTop*: VertexID ## Last used vertex ID
|
||||
|
||||
accSids*: Table[Hash256, VertexID] ## Account path -> stoID
|
||||
|
||||
LayerFinalRef* = ref object
|
||||
## Final tables fully supersede tables on lower layers when stacked as a
|
||||
## whole. Missing entries on a higher layers are the final state (for the
|
||||
|
|
|
@ -17,7 +17,7 @@ import
|
|||
std/typetraits,
|
||||
eth/common,
|
||||
results,
|
||||
"."/[aristo_compute, aristo_desc, aristo_get, aristo_hike]
|
||||
"."/[aristo_compute, aristo_desc, aristo_get, aristo_layers, aristo_hike]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
|
@ -116,13 +116,22 @@ proc fetchAccountHike*(
|
|||
|
||||
ok(move(hike))
|
||||
|
||||
|
||||
proc fetchStorageID*(
|
||||
db: AristoDbRef;
|
||||
accPath: Hash256;
|
||||
): Result[VertexID,AristoError] =
|
||||
## Public helper function fro retrieving a storage (vertex) ID for a
|
||||
## given account.
|
||||
|
||||
if (let stoID = db.layersGetStoID(accPath); stoID.isSome()):
|
||||
if not stoID[].isValid():
|
||||
return err(FetchPathNotFound)
|
||||
return ok stoID[]
|
||||
|
||||
let accKey = accPath.to(AccountKey)
|
||||
if (let stoID = db.accSids.lruFetch(accKey); stoID.isSome()):
|
||||
return ok stoID[]
|
||||
|
||||
let
|
||||
payload = db.retrievePayload(VertexID(1), accPath.data).valueOr:
|
||||
if error == FetchAccInaccessible:
|
||||
|
@ -134,7 +143,9 @@ proc fetchStorageID*(
|
|||
if not stoID.isValid:
|
||||
return err(FetchPathNotFound)
|
||||
|
||||
ok stoID
|
||||
# If we didn't find a cached storage ID in the layers, we must be using the
|
||||
# database version which we cache here, even across database commits
|
||||
ok db.accSids.lruAppend(accKey, stoID, accLruSize)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
|
|
|
@ -108,6 +108,16 @@ func layerGetProofVidOrVoid*(db: AristoDbRef; key: HashKey): VertexID =
|
|||
## argument `key` refers to a link key of a registered proof node.
|
||||
db.top.final.fRpp.getOrVoid key
|
||||
|
||||
func layersGetStoID*(db: AristoDbRef; accPath: Hash256): Opt[VertexID] =
|
||||
db.top.delta.accSids.withValue(accPath, item):
|
||||
return Opt.some(item[])
|
||||
|
||||
for w in db.rstack:
|
||||
w.delta.accSids.withValue(accPath, item):
|
||||
return Opt.some(item[])
|
||||
|
||||
Opt.none(VertexID)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions: setter variants
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -167,6 +177,9 @@ func layersPutProof*(
|
|||
db.top.final.pPrf.incl vid
|
||||
db.layersPutProof(vid, key)
|
||||
|
||||
func layersPutStoID*(db: AristoDbRef; accPath: Hash256; stoID: VertexID) =
|
||||
db.top.delta.accSids[accPath] = stoID
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -183,7 +196,8 @@ func layersMergeOnto*(src: LayerRef; trg: var LayerObj) =
|
|||
for (vid,key) in src.delta.kMap.pairs:
|
||||
trg.delta.kMap[vid] = key
|
||||
trg.delta.vTop = src.delta.vTop
|
||||
|
||||
for (accPath,stoID) in src.delta.accSids.pairs:
|
||||
trg.delta.accSids[accPath] = stoID
|
||||
|
||||
func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
|
||||
## Provide a collapsed copy of layers up to a particular transaction level.
|
||||
|
@ -199,7 +213,9 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
|
|||
delta: LayerDeltaRef(
|
||||
sTab: layers[0].delta.sTab.dup, # explicit dup for ref values
|
||||
kMap: layers[0].delta.kMap,
|
||||
vTop: layers[^1].delta.vTop))
|
||||
vTop: layers[^1].delta.vTop,
|
||||
accSids: layers[0].delta.accSids,
|
||||
))
|
||||
|
||||
# Consecutively merge other layers on top
|
||||
for n in 1 ..< layers.len:
|
||||
|
@ -207,6 +223,8 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
|
|||
result.delta.sTab[vid] = vtx
|
||||
for (vid,key) in layers[n].delta.kMap.pairs:
|
||||
result.delta.kMap[vid] = key
|
||||
for (accPath,stoID) in layers[n].delta.accSids.pairs:
|
||||
result.delta.accSids[accPath] = stoID
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
|
|
|
@ -136,6 +136,7 @@ proc mergeStorageData*(
|
|||
# Make sure that there is an account that refers to that storage trie
|
||||
let leaf = wpAcc.vtx.dup # Dup on modify
|
||||
leaf.lData.stoID = useID
|
||||
db.layersPutStoID(accPath, useID)
|
||||
db.layersPutVtx(VertexID(1), wpAcc.vid, leaf)
|
||||
db.layersResKey(VertexID(1), wpAcc.vid)
|
||||
return ok()
|
||||
|
|
|
@ -419,7 +419,7 @@ proc mergePayloadUpdate(
|
|||
if vid in db.pPrf:
|
||||
return err(MergeLeafProofModeLock)
|
||||
|
||||
# Update accounts storage root which is handled implicitely
|
||||
# Update accounts storage root which is handled implicitly
|
||||
if hike.root == VertexID(1):
|
||||
payload.stoID = leafLeg.wp.vtx.lData.stoID
|
||||
|
||||
|
|
Loading…
Reference in New Issue