Clear account cache after each block (#2411)
When processing long ranges of blocks, the account cache grows unbounded which cause huge memory spikes. Here, we move the cache to a second-level cache after each block - the second-level cache is cleared on the next block after that which creates a simple LRU effect. There's a small performance cost of course, though overall the freed-up memory can now be reassigned to the rocksdb row cache which not only makes up for the loss but overall leads to a performance increase. The bump to 2gb of rocksdb row cache here needs more testing but is slightly less and loosely basedy on the savings from this PR and the circular ref fix in #2408 - another way to phrase this is that it's better to give rocksdb more breathing room than let the memory sit unused until circular ref collection happens ;)
This commit is contained in:
parent
c79b0b8a47
commit
f294d1e086
|
@ -124,7 +124,11 @@ proc procBlkEpilogue(
|
|||
if vmState.collectWitnessData:
|
||||
db.collectWitnessData()
|
||||
|
||||
db.persist(clearEmptyAccount = vmState.determineFork >= FkSpurious)
|
||||
# Clearing the account cache here helps manage its size when replaying
|
||||
# large ranges of blocks, implicitly limiting its size using the gas limit
|
||||
db.persist(
|
||||
clearEmptyAccount = vmState.determineFork >= FkSpurious,
|
||||
clearCache = true)
|
||||
|
||||
if not skipValidation:
|
||||
let stateDB = vmState.stateDB
|
||||
|
|
|
@ -79,6 +79,9 @@ type
|
|||
witnessCache: Table[EthAddress, WitnessData]
|
||||
isDirty: bool
|
||||
ripemdSpecial: bool
|
||||
cache: Table[EthAddress, AccountRef]
|
||||
# Second-level cache for the ledger save point, which is cleared on every
|
||||
# persist
|
||||
code: KeyedQueue[Hash256, CodeBytesRef]
|
||||
## The code cache provides two main benefits:
|
||||
##
|
||||
|
@ -258,6 +261,11 @@ proc getAccount(
|
|||
return
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
if ac.cache.pop(address, result):
|
||||
# Check second-level cache
|
||||
ac.savePoint.cache[address] = result
|
||||
return
|
||||
|
||||
# not found in cache, look into state trie
|
||||
let rc = ac.ledger.fetch address
|
||||
if rc.isOk:
|
||||
|
@ -651,7 +659,8 @@ proc clearEmptyAccounts(ac: AccountsLedgerRef) =
|
|||
ac.ripemdSpecial = false
|
||||
|
||||
proc persist*(ac: AccountsLedgerRef,
|
||||
clearEmptyAccount: bool = false) =
|
||||
clearEmptyAccount: bool = false,
|
||||
clearCache = false) =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
|
||||
|
@ -683,6 +692,11 @@ proc persist*(ac: AccountsLedgerRef,
|
|||
acc.flags = acc.flags - resetFlags
|
||||
ac.savePoint.dirty.clear()
|
||||
|
||||
if clearCache:
|
||||
# This overwrites the cache from the previous persist, providing a crude LRU
|
||||
# scheme with little overhead
|
||||
ac.cache = move(ac.savePoint.cache)
|
||||
|
||||
ac.savePoint.selfDestruct.clear()
|
||||
|
||||
# EIP2929
|
||||
|
|
|
@ -270,10 +270,10 @@ proc makeMultiKeys*(ldg: LedgerRef): MultiKeysRef =
|
|||
result = ldg.ac.makeMultiKeys()
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed
|
||||
|
||||
proc persist*(ldg: LedgerRef, clearEmptyAccount = false) =
|
||||
proc persist*(ldg: LedgerRef, clearEmptyAccount = false, clearCache = false) =
|
||||
ldg.beginTrackApi LdgPersistFn
|
||||
ldg.ac.persist(clearEmptyAccount)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, clearEmptyAccount
|
||||
ldg.ac.persist(clearEmptyAccount, clearCache)
|
||||
ldg.ifTrackApi: debug apiTxt, api, elapsed, clearEmptyAccount, clearCache
|
||||
|
||||
proc ripemdSpecial*(ldg: LedgerRef) =
|
||||
ldg.beginTrackApi LdgRipemdSpecialFn
|
||||
|
|
|
@ -18,7 +18,7 @@ const
|
|||
# https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
|
||||
defaultMaxOpenFiles* = 512
|
||||
defaultWriteBufferSize* = 64 * 1024 * 1024
|
||||
defaultRowCacheSize* = 512 * 1024 * 1024
|
||||
defaultRowCacheSize* = 2048 * 1024 * 1024
|
||||
defaultBlockCacheSize* = 256 * 1024 * 1024
|
||||
|
||||
type DbOptions* = object # Options that are transported to the database layer
|
||||
|
|
Loading…
Reference in New Issue