nimbus-eth1/tests/test_aristo/test_tx_frame.nim
Jacek Sieka 4576727817
Introduce frame snapshots (#3098)
With the introduction of layered frames, each database lookup may result
in hundreds of table lookups as the frame stack is traversed.

This change restores performance by introducing snapshots to limit the
lookup depth at the expense of slightly increased memory usage.

The snapshot contains the cumulative changes of all ancestors and itself
allowing the lookup recursion to stop whenever it is encountered.

The number of snapshots to keep in memory is a tradeoff between lookup
performance and memory usage - this change starts with a simple strategy
of keeping snapshots for head frames (approximately). T

he snapshot is created during checkpointing, ie after block validation,
to make sure that it's cheap to start verifying blocks - parent
snapshots are moved to the descendant as part of checkpointing which
effectively means that head frames hold snapshots in most cases.

The outcome of this tradeoff is that applying a block to a known head is
fast while creating a new branch of history remains expensive.

Another consequence is that when persisting changes to disk, we must
re-traverse the stack of changes to build a cumulative set of changes to
be persisted.

A future strategy might be to keep additional "keyframes" along the way,
ie one per epoch for example - this would bound the "branch creation"
cost to a constant factor, but memory overhead should first be
considered.

Another strategy might be to avoid keeping snapshots for non-canonical
branches, specially when they become older and thus less likely to be
branched from.

* `level` is updated to work like a temporary serial number to maintain
its relative position in the sorting order as frames are persisted
* a `snapshot` is added to some TxFrame instances - the snapshot
collects all ancestor changes up to and including the given frame.
`level` is used as a marker to prune the snapshot of changes that have
been persisted already.
* stack traversals for the purpose of lookup stop when they encounter a
snapshot - this bounds the lookup depth to the first encountered
snapshot

After this PR, sync performance lands at about 2-3 blocks per second
(~10x improvement) - this is quite reasonable when comparing with block
import which skips the expensive state root verification and thus
achieves ~20 blk/s on the same hardware. Additional work to bring live
syncing performance in line with disk-based block import would focus on
reducing state root verification cost.
2025-02-28 14:33:42 +01:00

111 lines
3.3 KiB
Nim

# Nimbus
# Copyright (c) 2025 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
{.used.}
import
unittest2,
stew/endians2,
results,
eth/common/hashes,
../../execution_chain/db/aristo/[
aristo_delete,
aristo_desc,
aristo_fetch,
aristo_tx_frame,
aristo_init/init_common,
aristo_init/memory_only,
aristo_layers,
aristo_merge,
]
proc makeAccount(i: uint64): (Hash32, AristoAccount) =
var path: Hash32
path.data()[0 .. 7] = i.toBytesBE()
(path, AristoAccount(balance: i.u256, codeHash: EMPTY_CODE_HASH))
const
acc1 = makeAccount(1)
acc2 = makeAccount(2)
acc3 = makeAccount(3)
suite "Aristo TxFrame":
setup:
let db = AristoDbRef.init()
test "Frames should independently keep data":
let
tx0 = db.txFrameBegin(db.baseTxFrame())
tx1 = db.txFrameBegin(tx0)
tx2 = db.txFrameBegin(tx1)
tx2b = db.txFrameBegin(tx1)
tx2c = db.txFrameBegin(tx1)
check:
tx0.mergeAccountRecord(acc1[0], acc1[1]).isOk()
tx1.mergeAccountRecord(acc2[0], acc2[1]).isOk()
tx2.deleteAccountRecord(acc2[0]).isOk()
tx2b.deleteAccountRecord(acc1[0]).isOk()
tx2c.mergeAccountRecord(acc2[0], acc3[1]).isOk()
check:
tx0.fetchAccountRecord(acc1[0]).isOk()
tx0.fetchAccountRecord(acc2[0]).isErr() # Doesn't exist in tx0
tx1.fetchAccountRecord(acc1[0]).isOk()
tx1.fetchAccountRecord(acc1[0]).isOk()
tx2.fetchAccountRecord(acc1[0]).isOk()
tx2.fetchAccountRecord(acc2[0]).isErr() # Doesn't exist in tx2
tx2b.fetchAccountRecord(acc1[0]).isErr() # Doesn't exist in tx2b
tx0.fetchAccountRecord(acc1[0]) == tx2.fetchAccountRecord(acc1[0])
tx0.fetchStateRoot() != tx1.fetchStateRoot()
tx0.fetchStateRoot() == tx2.fetchStateRoot()
var acc1Hike: Hike
check:
tx2c.fetchAccountHike(acc1[0], acc1Hike).isOk()
# The vid for acc1 gets created in tx1 because it has to move to a new
# mpt node from the root - tx2c updates only data, so the level at which
# we find the vtx should be one below tx2c!
(
tx2c.level -
tx2c.layersGetVtx((VertexID(1), acc1Hike.legs[^1].wp.vid)).value()[1]
) == 1
tx0.checkpoint(1, skipSnapshot = false)
tx1.checkpoint(2, skipSnapshot = false)
tx2.checkpoint(3, skipSnapshot = false)
tx2b.checkpoint(3, skipSnapshot = false)
tx2c.checkpoint(3, skipSnapshot = false)
check:
# Even after checkpointing, we should maintain the same relative levels
(
tx2c.level -
tx2c.layersGetVtx((VertexID(1), acc1Hike.legs[^1].wp.vid)).value()[1]
) == 1
let batch = db.putBegFn().expect("working batch")
db.persist(batch, tx2)
check:
db.putEndFn(batch).isOk()
db.finish()
block:
# using the same backend but new txRef and cache
db.initInstance().expect("working backend")
let tx = db.baseTxFrame()
check:
tx.fetchAccountRecord(acc1[0]).isOk()
tx.fetchAccountRecord(acc2[0]).isErr() # Doesn't exist in tx2