mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-03-01 04:10:45 +00:00
Introduce frame snapshots (#3098)
With the introduction of layered frames, each database lookup may result in hundreds of table lookups as the frame stack is traversed. This change restores performance by introducing snapshots to limit the lookup depth at the expense of slightly increased memory usage. The snapshot contains the cumulative changes of all ancestors and itself allowing the lookup recursion to stop whenever it is encountered. The number of snapshots to keep in memory is a tradeoff between lookup performance and memory usage - this change starts with a simple strategy of keeping snapshots for head frames (approximately). T he snapshot is created during checkpointing, ie after block validation, to make sure that it's cheap to start verifying blocks - parent snapshots are moved to the descendant as part of checkpointing which effectively means that head frames hold snapshots in most cases. The outcome of this tradeoff is that applying a block to a known head is fast while creating a new branch of history remains expensive. Another consequence is that when persisting changes to disk, we must re-traverse the stack of changes to build a cumulative set of changes to be persisted. A future strategy might be to keep additional "keyframes" along the way, ie one per epoch for example - this would bound the "branch creation" cost to a constant factor, but memory overhead should first be considered. Another strategy might be to avoid keeping snapshots for non-canonical branches, specially when they become older and thus less likely to be branched from. * `level` is updated to work like a temporary serial number to maintain its relative position in the sorting order as frames are persisted * a `snapshot` is added to some TxFrame instances - the snapshot collects all ancestor changes up to and including the given frame. `level` is used as a marker to prune the snapshot of changes that have been persisted already. * stack traversals for the purpose of lookup stop when they encounter a snapshot - this bounds the lookup depth to the first encountered snapshot After this PR, sync performance lands at about 2-3 blocks per second (~10x improvement) - this is quite reasonable when comparing with block import which skips the expensive state root verification and thus achieves ~20 blk/s on the same hardware. Additional work to bring live syncing performance in line with disk-based block import would focus on reducing state root verification cost.
This commit is contained in:
parent
0f89c1f901
commit
4576727817
@ -147,8 +147,12 @@ proc initializeDb(com: CommonRef) =
|
||||
txFrame.persistHeaderAndSetHead(com.genesisHeader,
|
||||
startOfHistory=com.genesisHeader.parentHash).
|
||||
expect("can persist genesis header")
|
||||
|
||||
doAssert(canonicalHeadHashKey().toOpenArray in txFrame)
|
||||
|
||||
txFrame.checkpoint(com.genesisHeader.number)
|
||||
com.db.persist(txFrame)
|
||||
|
||||
# The database must at least contain the base and head pointers - the base
|
||||
# is implicitly considered finalized
|
||||
let
|
||||
|
@ -143,8 +143,12 @@ proc validateBlock(c: ForkedChainRef,
|
||||
c.writeBaggage(blk, blkHash, txFrame, receipts)
|
||||
|
||||
# Block fully written to txFrame, mark it as such
|
||||
# Checkpoint creates a snapshot of ancestor changes in txFrame - it is an
|
||||
# expensive operation, specially when creating a new branch (ie when blk
|
||||
# is being applied to a block that is currently not a head)
|
||||
txFrame.checkpoint(blk.header.number)
|
||||
|
||||
|
||||
c.updateBranch(parent, blk, blkHash, txFrame, move(receipts))
|
||||
|
||||
for i, tx in blk.transactions:
|
||||
|
@ -102,7 +102,9 @@ proc checkpoint*(p: var Persister): Result[void, string] =
|
||||
)
|
||||
|
||||
# Move in-memory state to disk
|
||||
p.vmState.ledger.txFrame.checkpoint(p.parent.number, skipSnapshot = true)
|
||||
p.com.db.persist(p.vmState.ledger.txFrame)
|
||||
|
||||
# Get a new frame since the DB assumes ownership
|
||||
p.vmState.ledger.txFrame = p.com.db.baseTxFrame().txFrameBegin()
|
||||
|
||||
@ -172,8 +174,6 @@ proc persistBlock*(p: var Persister, blk: Block): Result[void, string] =
|
||||
p.stats.txs += blk.transactions.len
|
||||
p.stats.gas += blk.header.gasUsed
|
||||
|
||||
txFrame.checkpoint(header.number)
|
||||
|
||||
assign(p.parent, header)
|
||||
|
||||
ok()
|
||||
|
@ -15,7 +15,7 @@ import
|
||||
chronicles,
|
||||
eth/common/[accounts_rlp, base_rlp, hashes_rlp],
|
||||
results,
|
||||
"."/[aristo_desc, aristo_get, aristo_walk/persistent],
|
||||
"."/[aristo_desc, aristo_get, aristo_tx_frame, aristo_walk/persistent],
|
||||
./aristo_desc/desc_backend
|
||||
|
||||
type WriteBatch = tuple[writer: PutHdlRef, count: int, depth: int, prefix: uint64]
|
||||
@ -74,7 +74,7 @@ proc putKeyAtLevel(
|
||||
## set (vertex data may have been committed to disk without computing the
|
||||
## corresponding hash!)
|
||||
|
||||
if level == -2:
|
||||
if level < db.db.baseTxFrame().level:
|
||||
?batch.putVtx(db.db, rvid, vtx, key)
|
||||
|
||||
if batch.count mod batchSize == 0:
|
||||
@ -90,16 +90,6 @@ proc putKeyAtLevel(
|
||||
|
||||
ok()
|
||||
|
||||
func maxLevel(cur, other: int): int =
|
||||
# Compare two levels and return the topmost in the stack, taking into account
|
||||
# the odd reversal of order around the zero point
|
||||
if cur < 0:
|
||||
max(cur, other) # >= 0 is always more topmost than <0
|
||||
elif other < 0:
|
||||
cur
|
||||
else:
|
||||
min(cur, other) # Here the order is reversed and 0 is the top layer
|
||||
|
||||
template encodeLeaf(w: var RlpWriter, pfx: NibblesBuf, leafData: untyped): HashKey =
|
||||
w.startList(2)
|
||||
w.append(pfx.toHexPrefix(isLeaf = true).data())
|
||||
@ -123,7 +113,7 @@ proc getKey(
|
||||
db: AristoTxRef, rvid: RootedVertexID, skipLayers: static bool
|
||||
): Result[((HashKey, VertexRef), int), AristoError] =
|
||||
ok when skipLayers:
|
||||
(?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2)
|
||||
(?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), dbLevel)
|
||||
else:
|
||||
?db.getKeyRc(rvid, {})
|
||||
|
||||
@ -178,7 +168,7 @@ proc computeKeyImpl(
|
||||
keyvtxl[1],
|
||||
skipLayers = skipLayers,
|
||||
)
|
||||
level = maxLevel(level, sl)
|
||||
level = max(level, sl)
|
||||
skey
|
||||
else:
|
||||
VOID_HASH_KEY
|
||||
@ -252,7 +242,7 @@ proc computeKeyImpl(
|
||||
template writeBranch(w: var RlpWriter): HashKey =
|
||||
w.encodeBranch(vtx):
|
||||
if subvid.isValid:
|
||||
level = maxLevel(level, keyvtxs[n][1])
|
||||
level = max(level, keyvtxs[n][1])
|
||||
keyvtxs[n][0][0]
|
||||
else:
|
||||
VOID_HASH_KEY
|
||||
@ -280,7 +270,7 @@ proc computeKeyImpl(
|
||||
): Result[HashKey, AristoError] =
|
||||
let (keyvtx, level) =
|
||||
when skipLayers:
|
||||
(?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2)
|
||||
(?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), dbLevel)
|
||||
else:
|
||||
?db.getKeyRc(rvid, {})
|
||||
|
||||
|
@ -22,7 +22,7 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[hashes, sets, tables],
|
||||
std/[hashes, sequtils, sets, tables],
|
||||
eth/common/hashes,
|
||||
results,
|
||||
./aristo_constants,
|
||||
@ -73,6 +73,21 @@ type
|
||||
|
||||
blockNumber*: Opt[uint64] ## Block number set when checkpointing the frame
|
||||
|
||||
snapshot*: Table[RootedVertexID, Snapshot]
|
||||
## Optional snapshot containing the cumulative changes from ancestors and
|
||||
## the current frame
|
||||
snapshotLevel*: Opt[int] # base level when the snapshot was taken
|
||||
|
||||
level*: int
|
||||
## Ancestry level of frame, increases with age but otherwise meaningless -
|
||||
## used to order data by age when working with layers.
|
||||
## -1 = stored in database, where relevant though typically should be
|
||||
## compared with the base layer level instead.
|
||||
|
||||
Snapshot* = (VertexRef, HashKey, int)
|
||||
## Unlike sTab/kMap, snapshot contains both vertex and key since at the time
|
||||
## of writing, it's primarily used in contexts where both are present
|
||||
|
||||
AristoDbRef* = ref object
|
||||
## Backend interface.
|
||||
getVtxFn*: GetVtxFn ## Read vertex record
|
||||
@ -116,6 +131,8 @@ type
|
||||
legs*: ArrayBuf[NibblesBuf.high + 1, Leg] ## Chain of vertices and IDs
|
||||
tail*: NibblesBuf ## Portion of non completed path
|
||||
|
||||
const dbLevel* = -1
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -181,52 +198,40 @@ func isValid*(sqv: HashSet[RootedVertexID]): bool =
|
||||
# Public functions, miscellaneous
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Hash set helper
|
||||
func hash*(db: AristoDbRef): Hash =
|
||||
## Table/KeyedQueue/HashSet mixin
|
||||
cast[pointer](db).hash
|
||||
func hash*(db: AristoDbRef): Hash {.error.}
|
||||
func hash*(db: AristoTxRef): Hash {.error.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
iterator stack*(tx: AristoTxRef): AristoTxRef =
|
||||
# Stack going from base to tx
|
||||
var frames: seq[AristoTxRef]
|
||||
iterator rstack*(tx: AristoTxRef, stopAtSnapshot = false): AristoTxRef =
|
||||
# Stack in reverse order, ie going from tx to base
|
||||
var tx = tx
|
||||
|
||||
while tx != nil:
|
||||
frames.add tx
|
||||
yield tx
|
||||
|
||||
if stopAtSnapshot and tx.snapshotLevel.isSome():
|
||||
break
|
||||
|
||||
tx = tx.parent
|
||||
|
||||
iterator stack*(tx: AristoTxRef, stopAtSnapshot = false): AristoTxRef =
|
||||
# Stack going from base to tx
|
||||
var frames = toSeq(tx.rstack(stopAtSnapshot))
|
||||
|
||||
while frames.len > 0:
|
||||
yield frames.pop()
|
||||
|
||||
iterator rstack*(tx: AristoTxRef): (AristoTxRef, int) =
|
||||
# Stack in reverse order, ie going from tx to base
|
||||
var tx = tx
|
||||
|
||||
var i = 0
|
||||
while tx != nil:
|
||||
let level = if tx.parent == nil: -1 else: i
|
||||
yield (tx, level)
|
||||
tx = tx.parent
|
||||
i += 1
|
||||
|
||||
proc deltaAtLevel*(db: AristoTxRef, level: int): AristoTxRef =
|
||||
if level == -2:
|
||||
if level < db.db.txRef.level:
|
||||
nil
|
||||
elif level == -1:
|
||||
db.db.txRef
|
||||
else:
|
||||
var
|
||||
frame = db
|
||||
level = level
|
||||
|
||||
while level > 0:
|
||||
frame = frame.parent
|
||||
level -= 1
|
||||
|
||||
frame
|
||||
for frame in db.rstack():
|
||||
if frame.level == level:
|
||||
return frame
|
||||
nil
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
@ -69,7 +69,7 @@ proc getVtxRc*(
|
||||
else:
|
||||
return err(GetVtxNotFound)
|
||||
|
||||
ok (?db.db.getVtxBe(rvid, flags), -2)
|
||||
ok (?db.db.getVtxBe(rvid, flags), dbLevel)
|
||||
|
||||
proc getVtx*(db: AristoTxRef; rvid: RootedVertexID, flags: set[GetVtxFlag] = {}): VertexRef =
|
||||
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
|
||||
@ -103,7 +103,7 @@ proc getKeyRc*(
|
||||
# The vertex is to be deleted. So is the value key.
|
||||
return err(GetKeyNotFound)
|
||||
|
||||
ok (?db.db.getKeyBe(rvid, flags), -2)
|
||||
ok (?db.db.getKeyBe(rvid, flags), dbLevel)
|
||||
|
||||
proc getKey*(db: AristoTxRef; rvid: RootedVertexID): HashKey =
|
||||
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
|
||||
|
@ -83,7 +83,7 @@ proc finishSession*(hdl: TypedPutHdlRef; db: TypedBackendRef) =
|
||||
|
||||
proc initInstance*(db: AristoDbRef): Result[void, AristoError] =
|
||||
let vTop = ?db.getTuvFn()
|
||||
db.txRef = AristoTxRef(db: db, vTop: vTop)
|
||||
db.txRef = AristoTxRef(db: db, vTop: vTop, snapshotLevel: Opt.some(0))
|
||||
db.accLeaves = LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE)
|
||||
db.stoLeaves = LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE)
|
||||
ok()
|
||||
|
@ -25,9 +25,14 @@ func layersGetVtx*(db: AristoTxRef; rvid: RootedVertexID): Opt[(VertexRef, int)]
|
||||
## Find a vertex on the cache layers. An `ok()` result might contain a
|
||||
## `nil` vertex if it is stored on the cache that way.
|
||||
##
|
||||
for w, level in db.rstack:
|
||||
for w in db.rstack(stopAtSnapshot = true):
|
||||
if w.snapshotLevel.isSome():
|
||||
w.snapshot.withValue(rvid, item):
|
||||
return Opt.some((item[][0], item[][2]))
|
||||
break
|
||||
|
||||
w.sTab.withValue(rvid, item):
|
||||
return Opt.some((item[], level))
|
||||
return Opt.some((item[], w.level))
|
||||
|
||||
Opt.none((VertexRef, int))
|
||||
|
||||
@ -36,11 +41,17 @@ func layersGetKey*(db: AristoTxRef; rvid: RootedVertexID): Opt[(HashKey, int)] =
|
||||
## hash key if it is stored on the cache that way.
|
||||
##
|
||||
|
||||
for w, level in db.rstack:
|
||||
for w in db.rstack(stopAtSnapshot = true):
|
||||
if w.snapshotLevel.isSome():
|
||||
w.snapshot.withValue(rvid, item):
|
||||
return Opt.some((item[][1], item[][2]))
|
||||
break
|
||||
|
||||
w.kMap.withValue(rvid, item):
|
||||
return ok((item[], level))
|
||||
return ok((item[], w.level))
|
||||
if rvid in w.sTab:
|
||||
return Opt.some((VOID_HASH_KEY, level))
|
||||
return Opt.some((VOID_HASH_KEY, w.level))
|
||||
|
||||
|
||||
Opt.none((HashKey, int))
|
||||
|
||||
@ -49,14 +60,14 @@ func layersGetKeyOrVoid*(db: AristoTxRef; rvid: RootedVertexID): HashKey =
|
||||
(db.layersGetKey(rvid).valueOr (VOID_HASH_KEY, 0))[0]
|
||||
|
||||
func layersGetAccLeaf*(db: AristoTxRef; accPath: Hash32): Opt[VertexRef] =
|
||||
for w, _ in db.rstack:
|
||||
for w in db.rstack:
|
||||
w.accLeaves.withValue(accPath, item):
|
||||
return Opt.some(item[])
|
||||
|
||||
Opt.none(VertexRef)
|
||||
|
||||
func layersGetStoLeaf*(db: AristoTxRef; mixPath: Hash32): Opt[VertexRef] =
|
||||
for w, _ in db.rstack:
|
||||
for w in db.rstack:
|
||||
w.stoLeaves.withValue(mixPath, item):
|
||||
return Opt.some(item[])
|
||||
|
||||
@ -75,6 +86,9 @@ func layersPutVtx*(
|
||||
db.sTab[rvid] = vtx
|
||||
db.kMap.del(rvid)
|
||||
|
||||
if db.snapshotLevel.isSome():
|
||||
db.snapshot[rvid] = (vtx, VOID_HASH_KEY, db.level)
|
||||
|
||||
func layersResVtx*(
|
||||
db: AristoTxRef;
|
||||
rvid: RootedVertexID;
|
||||
@ -93,6 +107,9 @@ func layersPutKey*(
|
||||
db.sTab[rvid] = vtx
|
||||
db.kMap[rvid] = key
|
||||
|
||||
if db.snapshotLevel.isSome():
|
||||
db.snapshot[rvid] = (vtx, key, db.level)
|
||||
|
||||
func layersResKey*(db: AristoTxRef; rvid: RootedVertexID, vtx: VertexRef) =
|
||||
## Shortcut for `db.layersPutKey(vid, VOID_HASH_KEY)`. It is sort of the
|
||||
## equivalent of a delete function.
|
||||
@ -116,16 +133,35 @@ func layersPutStoLeaf*(db: AristoTxRef; mixPath: Hash32; leafVtx: VertexRef) =
|
||||
func isEmpty*(ly: AristoTxRef): bool =
|
||||
## Returns `true` if the layer does not contain any changes, i.e. all the
|
||||
## tables are empty.
|
||||
ly.snapshot.len == 0 and
|
||||
ly.sTab.len == 0 and
|
||||
ly.kMap.len == 0 and
|
||||
ly.accLeaves.len == 0 and
|
||||
ly.stoLeaves.len == 0
|
||||
|
||||
proc copyFrom*(snapshot: var Table[RootedVertexID, Snapshot], tx: AristoTxRef) =
|
||||
for rvid, vtx in tx.sTab:
|
||||
tx.kMap.withValue(rvid, key):
|
||||
snapshot[rvid] = (vtx, key[], tx.level)
|
||||
do:
|
||||
snapshot[rvid] = (vtx, VOID_HASH_KEY, tx.level)
|
||||
|
||||
proc mergeAndReset*(trg, src: AristoTxRef) =
|
||||
## Merges the argument `src` into the argument `trg` and clears `src`.
|
||||
trg.vTop = src.vTop
|
||||
trg.blockNumber = src.blockNumber
|
||||
trg.level = src.level
|
||||
trg.parent = move(src.parent)
|
||||
|
||||
doAssert not src.snapshotLevel.isSome(),
|
||||
"If the source is a snapshot, it should have been used as a starting point for merge"
|
||||
|
||||
if trg.snapshotLevel.isSome():
|
||||
# If there already was a snapshot, we might as well add to it
|
||||
trg.snapshot.copyFrom(src)
|
||||
src.sTab.reset()
|
||||
src.kMap.reset()
|
||||
else:
|
||||
if trg.kMap.len > 0:
|
||||
# Invalidate cached keys in the lower layer
|
||||
for vid in src.sTab.keys:
|
||||
@ -133,37 +169,10 @@ proc mergeAndReset*(trg, src: AristoTxRef) =
|
||||
|
||||
mergeAndReset(trg.sTab, src.sTab)
|
||||
mergeAndReset(trg.kMap, src.kMap)
|
||||
|
||||
mergeAndReset(trg.accLeaves, src.accLeaves)
|
||||
mergeAndReset(trg.stoLeaves, src.stoLeaves)
|
||||
|
||||
# func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
|
||||
# ## Provide a collapsed copy of layers up to a particular transaction level.
|
||||
# ## If the `level` argument is too large, the maximum transaction level is
|
||||
# ## returned.
|
||||
# ##
|
||||
# let layers = if db.stack.len <= level: db.stack & @[db.top]
|
||||
# else: db.stack[0 .. level]
|
||||
|
||||
# # Set up initial layer (bottom layer)
|
||||
# result = LayerRef(
|
||||
# sTab: layers[0].sTab.dup, # explicit dup for ref values
|
||||
# kMap: layers[0].kMap,
|
||||
# vTop: layers[^1].vTop,
|
||||
# accLeaves: layers[0].accLeaves,
|
||||
# stoLeaves: layers[0].stoLeaves)
|
||||
|
||||
# # Consecutively merge other layers on top
|
||||
# for n in 1 ..< layers.len:
|
||||
# for (vid,vtx) in layers[n].sTab.pairs:
|
||||
# result.sTab[vid] = vtx
|
||||
# result.kMap.del vid
|
||||
# for (vid,key) in layers[n].kMap.pairs:
|
||||
# result.kMap[vid] = key
|
||||
# for (accPath,vtx) in layers[n].accLeaves.pairs:
|
||||
# result.accLeaves[accPath] = vtx
|
||||
# for (mixPath,vtx) in layers[n].stoLeaves.pairs:
|
||||
# result.stoLeaves[mixPath] = vtx
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -179,11 +188,10 @@ iterator layersWalkVtx*(
|
||||
## the one with a zero vertex which are othewise skipped by the iterator.
|
||||
## The `seen` argument must not be modified while the iterator is active.
|
||||
##
|
||||
for w, _ in db.rstack:
|
||||
for w in db.rstack:
|
||||
for (rvid,vtx) in w.sTab.pairs:
|
||||
if rvid.vid notin seen:
|
||||
if not seen.containsOrIncl(rvid.vid):
|
||||
yield (rvid,vtx)
|
||||
seen.incl rvid.vid
|
||||
|
||||
iterator layersWalkVtx*(
|
||||
db: AristoTxRef;
|
||||
@ -200,11 +208,10 @@ iterator layersWalkKey*(
|
||||
## Walk over all `(VertexID,HashKey)` pairs on the cache layers. Note that
|
||||
## entries are unsorted.
|
||||
var seen: HashSet[VertexID]
|
||||
for w, _ in db.rstack:
|
||||
for w in db.rstack:
|
||||
for (rvid,key) in w.kMap.pairs:
|
||||
if rvid.vid notin seen:
|
||||
if not seen.containsOrIncl(rvid.vid):
|
||||
yield (rvid,key)
|
||||
seen.incl rvid.vid
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
@ -13,68 +13,148 @@
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
results,
|
||||
./[aristo_desc, aristo_layers]
|
||||
import results, ./[aristo_desc, aristo_layers]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc buildSnapshot(txFrame: AristoTxRef, minLevel: int) =
|
||||
# Starting from the previous snapshot, build a snapshot that includes all
|
||||
# ancestor changes as well as the changes in txFrame itself
|
||||
for frame in txFrame.stack(stopAtSnapshot = true):
|
||||
if frame != txFrame:
|
||||
# Keyframes keep their snapshot insted of it being transferred to the new
|
||||
# frame - right now, only the base frame is a keyframe but this support
|
||||
# could be extended for example to epoch boundary frames which are likely
|
||||
# to become new bases.
|
||||
let isKeyframe = frame == frame.db.txRef
|
||||
|
||||
if frame.snapshotLevel.isSome() and not isKeyframe:
|
||||
# `frame` has a snapshot only in the first iteration of the for loop
|
||||
txFrame.snapshot = move(frame.snapshot)
|
||||
txFrame.snapshotLevel = frame.snapshotLevel
|
||||
|
||||
assert frame.snapshot.len == 0 # https://github.com/nim-lang/Nim/issues/23759
|
||||
frame.snapshotLevel.reset() # in case there was a snapshot in txFrame already
|
||||
|
||||
if txFrame.snapshotLevel != Opt.some(minLevel):
|
||||
# When recycling an existing snapshot, some of its content may have
|
||||
# already been persisted to disk (since it was made base on the
|
||||
# in-memory frames at the time of its creation).
|
||||
# Annoyingly, there's no way to remove items while iterating but even
|
||||
# with the extra seq, move + remove turns out to be faster than
|
||||
# creating a new table - specially when the ratio between old and
|
||||
# and current items favors current items.
|
||||
var toRemove = newSeqOfCap[RootedVertexID](txFrame.snapshot.len div 2)
|
||||
for rvid, v in txFrame.snapshot:
|
||||
if v[2] < minLevel:
|
||||
toRemove.add rvid
|
||||
for rvid in toRemove:
|
||||
txFrame.snapshot.del(rvid)
|
||||
|
||||
if frame.snapshotLevel.isSome() and isKeyframe:
|
||||
txFrame.snapshot = initTable[RootedVertexID, Snapshot](
|
||||
max(1024, max(frame.sTab.len, frame.snapshot.len))
|
||||
)
|
||||
|
||||
for k, v in frame.snapshot:
|
||||
if v[2] >= minLevel:
|
||||
txFrame.snapshot[k] = v
|
||||
|
||||
# Copy changes into snapshot but keep the diff - the next builder might
|
||||
# steal the snapshot!
|
||||
txFrame.snapshot.copyFrom(frame)
|
||||
|
||||
txFrame.snapshotLevel = Opt.some(minLevel)
|
||||
|
||||
proc txFrameBegin*(db: AristoDbRef, parent: AristoTxRef): AristoTxRef =
|
||||
let parent = if parent == nil:
|
||||
db.txRef
|
||||
else:
|
||||
parent
|
||||
let parent = if parent == nil: db.txRef else: parent
|
||||
AristoTxRef(db: db, parent: parent, vTop: parent.vTop, level: parent.level + 1)
|
||||
|
||||
AristoTxRef(
|
||||
db: db,
|
||||
parent: parent,
|
||||
vTop: parent.vTop)
|
||||
|
||||
proc baseTxFrame*(db: AristoDbRef): AristoTxRef=
|
||||
proc baseTxFrame*(db: AristoDbRef): AristoTxRef =
|
||||
db.txRef
|
||||
|
||||
proc dispose*(tx: AristoTxRef) =
|
||||
tx[].reset()
|
||||
|
||||
proc checkpoint*(tx: AristoTxRef; blockNumber: uint64) =
|
||||
proc checkpoint*(tx: AristoTxRef, blockNumber: uint64, skipSnapshot: bool) =
|
||||
tx.blockNumber = Opt.some(blockNumber)
|
||||
|
||||
proc persist*(
|
||||
db: AristoDbRef; # Database
|
||||
batch: PutHdlRef;
|
||||
txFrame: AristoTxRef;
|
||||
) =
|
||||
if not skipSnapshot:
|
||||
# Snapshots are expensive, therefore we only do it at checkpoints (which
|
||||
# presumably have gone through enough validation)
|
||||
tx.buildSnapshot(tx.db.txRef.level)
|
||||
|
||||
if txFrame == db.txRef and txFrame.sTab.len == 0:
|
||||
proc persist*(db: AristoDbRef, batch: PutHdlRef, txFrame: AristoTxRef) =
|
||||
if txFrame == db.txRef and txFrame.isEmpty():
|
||||
# No changes in frame - no `checkpoint` requirement - nothing to do here
|
||||
return
|
||||
|
||||
let lSst = SavedState(
|
||||
key: emptyRoot, # placeholder for more
|
||||
serial: txFrame.blockNumber.expect("`checkpoint` before persisting frame"))
|
||||
serial: txFrame.blockNumber.expect("`checkpoint` before persisting frame"),
|
||||
)
|
||||
|
||||
# Squash all changes up to the base
|
||||
let oldLevel = db.txRef.level
|
||||
if txFrame != db.txRef:
|
||||
# Consolidate the changes from the old to the new base going from the
|
||||
# bottom of the stack to avoid having to cascade each change through
|
||||
# the full stack
|
||||
assert txFrame.parent != nil
|
||||
for frame in txFrame.stack():
|
||||
if frame == db.txRef:
|
||||
|
||||
var bottom: AristoTxRef
|
||||
|
||||
for frame in txFrame.stack(stopAtSnapshot = true):
|
||||
if bottom == nil:
|
||||
# db.txRef always is a snapshot, therefore we're guaranteed to end up
|
||||
# here
|
||||
bottom = frame
|
||||
|
||||
# If there is no snapshot, consolidate changes into sTab/kMap instead
|
||||
# which caters to the scenario where changes from multiple blocks
|
||||
# have already been written to sTab and the changes can moved into
|
||||
# the bottom.
|
||||
if bottom.snapshot.len == 0:
|
||||
bottom.snapshotLevel.reset()
|
||||
else:
|
||||
# Incoming snapshots already have sTab baked in - make sure we don't
|
||||
# overwrite merged data from more recent layers with this old version
|
||||
bottom.sTab.reset()
|
||||
bottom.kMap.reset()
|
||||
continue
|
||||
mergeAndReset(db.txRef, frame)
|
||||
|
||||
doAssert not bottom.isNil, "should have found db.txRef at least"
|
||||
mergeAndReset(bottom, frame)
|
||||
|
||||
frame.dispose() # This will also dispose `txFrame` itself!
|
||||
|
||||
# Put the now-merged contents in txFrame and make it the new base
|
||||
swap(db.txRef[], txFrame[])
|
||||
swap(bottom[], txFrame[])
|
||||
db.txRef = txFrame
|
||||
|
||||
if txFrame.parent != nil:
|
||||
# Can't use rstack here because dispose will break the parent chain
|
||||
for frame in txFrame.parent.stack():
|
||||
frame.dispose()
|
||||
|
||||
txFrame.parent = nil
|
||||
else:
|
||||
if txFrame.snapshotLevel.isSome():
|
||||
# Clear out redundant copy so we don't write it twice, below
|
||||
txFrame.sTab.reset()
|
||||
txFrame.kMap.reset()
|
||||
|
||||
# Store structural single trie entries
|
||||
assert txFrame.snapshot.len == 0 or txFrame.sTab.len == 0,
|
||||
"Either snapshot or sTab should have been cleared as part of merging"
|
||||
|
||||
for rvid, item in txFrame.snapshot:
|
||||
if item[2] >= oldLevel:
|
||||
db.putVtxFn(batch, rvid, item[0], item[1])
|
||||
|
||||
for rvid, vtx in txFrame.sTab:
|
||||
txFrame.kMap.withValue(rvid, key) do:
|
||||
txFrame.kMap.withValue(rvid, key):
|
||||
db.putVtxFn(batch, rvid, vtx, key[])
|
||||
do:
|
||||
db.putVtxFn(batch, rvid, vtx, default(HashKey))
|
||||
@ -94,6 +174,10 @@ proc persist*(
|
||||
for mixPath, vtx in txFrame.stoLeaves:
|
||||
db.stoLeaves.put(mixPath, vtx)
|
||||
|
||||
txFrame.snapshot.clear()
|
||||
# Since txFrame is now the base, it contains all changes and therefore acts
|
||||
# as a snapshot
|
||||
txFrame.snapshotLevel = Opt.some(txFrame.level)
|
||||
txFrame.sTab.clear()
|
||||
txFrame.kMap.clear()
|
||||
txFrame.accLeaves.clear()
|
||||
|
@ -448,8 +448,8 @@ proc txFrameBegin*(parent: CoreDbTxRef): CoreDbTxRef =
|
||||
|
||||
CoreDbTxRef(kTx: kTx, aTx: aTx)
|
||||
|
||||
proc checkpoint*(tx: CoreDbTxRef, blockNumber: BlockNumber) =
|
||||
tx.aTx.checkpoint(blockNumber)
|
||||
proc checkpoint*(tx: CoreDbTxRef, blockNumber: BlockNumber, skipSnapshot = false) =
|
||||
tx.aTx.checkpoint(blockNumber, skipSnapshot)
|
||||
|
||||
proc dispose*(tx: CoreDbTxRef) =
|
||||
tx.aTx.dispose()
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at
|
||||
# https://opensource.org/licenses/MIT).
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
||||
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at
|
||||
# https://opensource.org/licenses/MIT).
|
||||
|
@ -10,7 +10,6 @@
|
||||
import
|
||||
./[
|
||||
test_aristo,
|
||||
test_blockchain_json,
|
||||
test_configuration,
|
||||
test_coredb,
|
||||
test_difficulty,
|
||||
@ -19,7 +18,6 @@ import
|
||||
test_filters,
|
||||
test_forked_chain,
|
||||
test_forkid,
|
||||
test_generalstate_json,
|
||||
test_genesis,
|
||||
test_getproof_json,
|
||||
test_jwt_auth,
|
||||
@ -36,4 +34,8 @@ import
|
||||
test_tracer_json,
|
||||
test_transaction_json,
|
||||
test_txpool,
|
||||
|
||||
# These two suites are much slower than all the rest, so run them last
|
||||
test_blockchain_json,
|
||||
test_generalstate_json,
|
||||
]
|
||||
|
@ -122,7 +122,7 @@ suite "Aristo compute":
|
||||
for (k, v, r) in samples[^1]:
|
||||
check:
|
||||
txFrame.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true)
|
||||
txFrame.checkpoint(1)
|
||||
txFrame.checkpoint(1, skipSnapshot = true)
|
||||
|
||||
let batch = db.putBegFn()[]
|
||||
db.persist(batch, txFrame)
|
||||
|
@ -23,7 +23,7 @@ import
|
||||
aristo_init/init_common,
|
||||
aristo_init/memory_only,
|
||||
aristo_layers,
|
||||
aristo_merge
|
||||
aristo_merge,
|
||||
]
|
||||
|
||||
proc makeAccount(i: uint64): (Hash32, AristoAccount) =
|
||||
@ -38,8 +38,7 @@ const
|
||||
|
||||
suite "Aristo TxFrame":
|
||||
setup:
|
||||
let
|
||||
db = AristoDbRef.init()
|
||||
let db = AristoDbRef.init()
|
||||
|
||||
test "Frames should independently keep data":
|
||||
let
|
||||
@ -77,9 +76,24 @@ suite "Aristo TxFrame":
|
||||
# The vid for acc1 gets created in tx1 because it has to move to a new
|
||||
# mpt node from the root - tx2c updates only data, so the level at which
|
||||
# we find the vtx should be one below tx2c!
|
||||
tx2c.layersGetVtx((VertexID(1), acc1Hike.legs[^1].wp.vid)).value()[1] == 1
|
||||
(
|
||||
tx2c.level -
|
||||
tx2c.layersGetVtx((VertexID(1), acc1Hike.legs[^1].wp.vid)).value()[1]
|
||||
) == 1
|
||||
|
||||
tx0.checkpoint(1, skipSnapshot = false)
|
||||
tx1.checkpoint(2, skipSnapshot = false)
|
||||
tx2.checkpoint(3, skipSnapshot = false)
|
||||
tx2b.checkpoint(3, skipSnapshot = false)
|
||||
tx2c.checkpoint(3, skipSnapshot = false)
|
||||
|
||||
check:
|
||||
# Even after checkpointing, we should maintain the same relative levels
|
||||
(
|
||||
tx2c.level -
|
||||
tx2c.layersGetVtx((VertexID(1), acc1Hike.legs[^1].wp.vid)).value()[1]
|
||||
) == 1
|
||||
|
||||
tx2.checkpoint(1)
|
||||
let batch = db.putBegFn().expect("working batch")
|
||||
db.persist(batch, tx2)
|
||||
check:
|
||||
|
@ -11,16 +11,19 @@
|
||||
import
|
||||
pkg/chronicles,
|
||||
pkg/unittest2,
|
||||
std/[os, strutils],
|
||||
../execution_chain/common,
|
||||
../execution_chain/config,
|
||||
../execution_chain/utils/utils,
|
||||
../execution_chain/core/chain/forked_chain,
|
||||
../execution_chain/db/ledger,
|
||||
../execution_chain/db/era1_db,
|
||||
./test_forked_chain/chain_debug
|
||||
|
||||
const
|
||||
genesisFile = "tests/customgenesis/cancun123.json"
|
||||
senderAddr = address"73cf19657412508833f618a15e8251306b3e6ee5"
|
||||
sourcePath = currentSourcePath.rsplit({DirSep, AltSep}, 1)[0]
|
||||
|
||||
type
|
||||
TestEnv = object
|
||||
@ -154,90 +157,69 @@ proc forkedChainMain*() =
|
||||
genesisHash = cc.genesisHeader.blockHash
|
||||
genesis = Block.init(cc.genesisHeader, BlockBody())
|
||||
baseTxFrame = cc.db.baseTxFrame()
|
||||
|
||||
let
|
||||
blk1 = baseTxFrame.makeBlk(1, genesis)
|
||||
blk2 = baseTxFrame.makeBlk(2, blk1)
|
||||
blk3 = baseTxFrame.makeBlk(3, blk2)
|
||||
|
||||
dbTx = baseTxFrame.txFrameBegin
|
||||
blk4 = dbTx.makeBlk(4, blk3)
|
||||
blk5 = dbTx.makeBlk(5, blk4)
|
||||
blk6 = dbTx.makeBlk(6, blk5)
|
||||
blk7 = dbTx.makeBlk(7, blk6)
|
||||
|
||||
dbTx.dispose()
|
||||
|
||||
let
|
||||
B4 = baseTxFrame.makeBlk(4, blk3, 1.byte)
|
||||
dbTx2 = baseTxFrame.txFrameBegin
|
||||
B5 = dbTx2.makeBlk(5, B4)
|
||||
B6 = dbTx2.makeBlk(6, B5)
|
||||
B7 = dbTx2.makeBlk(7, B6)
|
||||
|
||||
dbTx2.dispose()
|
||||
|
||||
let
|
||||
C5 = baseTxFrame.makeBlk(5, blk4, 1.byte)
|
||||
C6 = baseTxFrame.makeBlk(6, C5)
|
||||
C7 = baseTxFrame.makeBlk(7, C6)
|
||||
|
||||
test "newBase == oldBase":
|
||||
const info = "newBase == oldBase"
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com)
|
||||
|
||||
# same header twice
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk1)
|
||||
|
||||
checkImportBlock(chain, blk2)
|
||||
checkImportBlock(chain, blk3)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
# no parent
|
||||
checkImportBlockErr(chain, blk5)
|
||||
|
||||
check chain.headHash == genesisHash
|
||||
check chain.latestHash == blk3.blockHash
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
# finalized > head -> error
|
||||
checkForkChoiceErr(chain, blk1, blk3)
|
||||
check chain.validate info & " (3)"
|
||||
|
||||
# blk4 is not part of chain
|
||||
checkForkChoiceErr(chain, blk4, blk2)
|
||||
|
||||
# finalized > head -> error
|
||||
checkForkChoiceErr(chain, blk1, blk2)
|
||||
|
||||
# blk4 is not part of chain
|
||||
checkForkChoiceErr(chain, blk2, blk4)
|
||||
|
||||
# finalized < head -> ok
|
||||
checkForkChoice(chain, blk2, blk1)
|
||||
check chain.headHash == blk2.blockHash
|
||||
check chain.latestHash == blk2.blockHash
|
||||
check chain.validate info & " (7)"
|
||||
|
||||
# finalized == head -> ok
|
||||
checkForkChoice(chain, blk2, blk2)
|
||||
check chain.headHash == blk2.blockHash
|
||||
check chain.latestHash == blk2.blockHash
|
||||
check chain.baseNumber == 0'u64
|
||||
check chain.validate info & " (8)"
|
||||
|
||||
# baggage written
|
||||
check chain.wdWritten(blk1) == 1
|
||||
check chain.wdWritten(blk2) == 2
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "newBase on activeBranch":
|
||||
const info = "newBase on activeBranch"
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -246,31 +228,23 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
|
||||
checkImportBlock(chain, blk4)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
# newbase == head
|
||||
checkForkChoice(chain, blk7, blk6)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
check chain.headHash == blk7.blockHash
|
||||
check chain.latestHash == blk7.blockHash
|
||||
check chain.baseBranch == chain.activeBranch
|
||||
|
||||
check chain.wdWritten(blk7) == 7
|
||||
|
||||
# head - baseDistance must been persisted
|
||||
checkPersisted(chain, blk3)
|
||||
|
||||
# make sure aristo not wiped out baggage
|
||||
check chain.wdWritten(blk3) == 3
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "newBase between oldBase and head":
|
||||
const info = "newBase between oldBase and head"
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -280,28 +254,21 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
checkForkChoice(chain, blk7, blk6)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
check chain.headHash == blk7.blockHash
|
||||
check chain.latestHash == blk7.blockHash
|
||||
check chain.baseBranch == chain.activeBranch
|
||||
|
||||
check chain.wdWritten(blk6) == 6
|
||||
check chain.wdWritten(blk7) == 7
|
||||
|
||||
# head - baseDistance must been persisted
|
||||
checkPersisted(chain, blk3)
|
||||
|
||||
# make sure aristo not wiped out baggage
|
||||
check chain.wdWritten(blk3) == 3
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "newBase == oldBase, fork and stay on that fork":
|
||||
const info = "newBase == oldBase, fork .."
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -310,26 +277,20 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
checkImportBlock(chain, B5)
|
||||
checkImportBlock(chain, B6)
|
||||
checkImportBlock(chain, B7)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
checkForkChoice(chain, B7, B5)
|
||||
|
||||
check chain.headHash == B7.blockHash
|
||||
check chain.latestHash == B7.blockHash
|
||||
check chain.baseNumber == 0'u64
|
||||
check chain.branches.len == 2
|
||||
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "newBase move forward, fork and stay on that fork":
|
||||
const info = "newBase move forward, fork .."
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -338,28 +299,22 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
checkImportBlock(chain, B5)
|
||||
checkImportBlock(chain, B6)
|
||||
checkImportBlock(chain, B7)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
checkForkChoice(chain, B6, B4)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
check chain.headHash == B6.blockHash
|
||||
check chain.latestHash == B6.blockHash
|
||||
check chain.baseNumber == 3'u64
|
||||
check chain.branches.len == 2
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "newBase on shorter canonical arc, remove oldBase branches":
|
||||
const info = "newBase on shorter canonical, remove oldBase branches"
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -368,26 +323,21 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
checkImportBlock(chain, B5)
|
||||
checkImportBlock(chain, B6)
|
||||
checkImportBlock(chain, B7)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
checkForkChoice(chain, B7, B6)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
check chain.headHash == B7.blockHash
|
||||
check chain.latestHash == B7.blockHash
|
||||
check chain.baseNumber == 4'u64
|
||||
check chain.branches.len == 1
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "newBase on curbed non-canonical arc":
|
||||
const info = "newBase on curbed non-canonical .."
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com, baseDistance = 5)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -396,27 +346,22 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
checkImportBlock(chain, B5)
|
||||
checkImportBlock(chain, B6)
|
||||
checkImportBlock(chain, B7)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
checkForkChoice(chain, B7, B5)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
check chain.headHash == B7.blockHash
|
||||
check chain.latestHash == B7.blockHash
|
||||
check chain.baseNumber > 0
|
||||
check chain.baseNumber < B4.header.number
|
||||
check chain.branches.len == 2
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "newBase == oldBase, fork and return to old chain":
|
||||
const info = "newBase == oldBase, fork .."
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -425,25 +370,20 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
checkImportBlock(chain, B5)
|
||||
checkImportBlock(chain, B6)
|
||||
checkImportBlock(chain, B7)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
checkForkChoice(chain, blk7, blk5)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
check chain.headHash == blk7.blockHash
|
||||
check chain.latestHash == blk7.blockHash
|
||||
check chain.baseNumber == 0'u64
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "newBase on activeBranch, fork and return to old chain":
|
||||
const info = "newBase on activeBranch, fork .."
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -452,28 +392,22 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
checkImportBlock(chain, B5)
|
||||
checkImportBlock(chain, B6)
|
||||
checkImportBlock(chain, B7)
|
||||
|
||||
checkImportBlock(chain, blk4)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
checkForkChoice(chain, blk7, blk5)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
check chain.headHash == blk7.blockHash
|
||||
check chain.latestHash == blk7.blockHash
|
||||
check chain.baseBranch == chain.activeBranch
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "newBase on shorter canonical arc, discard arc with oldBase" &
|
||||
" (ign dup block)":
|
||||
const info = "newBase on shorter canonical .."
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -482,28 +416,22 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
checkImportBlock(chain, B5)
|
||||
checkImportBlock(chain, B6)
|
||||
checkImportBlock(chain, B7)
|
||||
|
||||
checkImportBlock(chain, blk4)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
checkForkChoice(chain, B7, B5)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
check chain.headHash == B7.blockHash
|
||||
check chain.latestHash == B7.blockHash
|
||||
check chain.baseNumber == 4'u64
|
||||
check chain.branches.len == 1
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "newBase on longer canonical arc, discard new branch":
|
||||
const info = "newBase on longer canonical .."
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -512,27 +440,22 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
checkImportBlock(chain, B5)
|
||||
checkImportBlock(chain, B6)
|
||||
checkImportBlock(chain, B7)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
checkForkChoice(chain, blk7, blk5)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
check chain.headHash == blk7.blockHash
|
||||
check chain.latestHash == blk7.blockHash
|
||||
check chain.baseNumber > 0
|
||||
check chain.baseNumber < blk5.header.number
|
||||
check chain.branches.len == 1
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "headerByNumber":
|
||||
const info = "headerByNumber"
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
@ -541,98 +464,77 @@ proc forkedChainMain*() =
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, blk7)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
checkImportBlock(chain, B5)
|
||||
checkImportBlock(chain, B6)
|
||||
checkImportBlock(chain, B7)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
checkForkChoice(chain, blk7, blk5)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
# cursor
|
||||
check chain.headerByNumber(8).isErr
|
||||
check chain.headerByNumber(7).expect("OK").number == 7
|
||||
check chain.headerByNumber(7).expect("OK").blockHash == blk7.blockHash
|
||||
|
||||
# from db
|
||||
check chain.headerByNumber(3).expect("OK").number == 3
|
||||
check chain.headerByNumber(3).expect("OK").blockHash == blk3.blockHash
|
||||
|
||||
# base
|
||||
check chain.headerByNumber(4).expect("OK").number == 4
|
||||
check chain.headerByNumber(4).expect("OK").blockHash == blk4.blockHash
|
||||
|
||||
# from cache
|
||||
check chain.headerByNumber(5).expect("OK").number == 5
|
||||
check chain.headerByNumber(5).expect("OK").blockHash == blk5.blockHash
|
||||
check chain.validate info & " (9)"
|
||||
|
||||
test "3 branches, alternating imports":
|
||||
const info = "3 branches, alternating imports"
|
||||
let com = env.newCom()
|
||||
|
||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
checkImportBlock(chain, blk3)
|
||||
|
||||
checkImportBlock(chain, B4)
|
||||
checkImportBlock(chain, blk4)
|
||||
|
||||
checkImportBlock(chain, B5)
|
||||
checkImportBlock(chain, blk5)
|
||||
checkImportBlock(chain, C5)
|
||||
|
||||
checkImportBlock(chain, B6)
|
||||
checkImportBlock(chain, blk6)
|
||||
checkImportBlock(chain, C6)
|
||||
|
||||
checkImportBlock(chain, B7)
|
||||
checkImportBlock(chain, blk7)
|
||||
checkImportBlock(chain, C7)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
check chain.latestHash == C7.blockHash
|
||||
check chain.latestNumber == 7'u64
|
||||
check chain.branches.len == 3
|
||||
|
||||
checkForkChoice(chain, B7, blk3)
|
||||
check chain.validate info & " (2)"
|
||||
check chain.branches.len == 3
|
||||
|
||||
checkForkChoice(chain, B7, B6)
|
||||
check chain.validate info & " (2)"
|
||||
check chain.branches.len == 1
|
||||
|
||||
test "importing blocks with new CommonRef and FC instance, 3 blocks":
|
||||
const info = "importing blocks with new CommonRef and FC instance, 3 blocks"
|
||||
let com = env.newCom()
|
||||
|
||||
let chain = ForkedChainRef.init(com, baseDistance = 0)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkImportBlock(chain, blk2)
|
||||
checkImportBlock(chain, blk3)
|
||||
checkForkChoice(chain, blk3, blk3)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
let cc = env.newCom(com.db)
|
||||
let fc = ForkedChainRef.init(cc, baseDistance = 0)
|
||||
check fc.headHash == blk3.blockHash
|
||||
checkImportBlock(fc, blk4)
|
||||
checkForkChoice(fc, blk4, blk4)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
test "importing blocks with new CommonRef and FC instance, 1 block":
|
||||
const info = "importing blocks with new CommonRef and FC instance, 1 block"
|
||||
let com = env.newCom()
|
||||
|
||||
let chain = ForkedChainRef.init(com, baseDistance = 0)
|
||||
checkImportBlock(chain, blk1)
|
||||
checkForkChoice(chain, blk1, blk1)
|
||||
check chain.validate info & " (1)"
|
||||
|
||||
let cc = env.newCom(com.db)
|
||||
let fc = ForkedChainRef.init(cc, baseDistance = 0)
|
||||
check fc.headHash == blk1.blockHash
|
||||
@ -640,5 +542,48 @@ proc forkedChainMain*() =
|
||||
checkForkChoice(fc, blk2, blk2)
|
||||
check chain.validate info & " (2)"
|
||||
|
||||
suite "ForkedChain mainnet replay":
|
||||
# A short mainnet replay test to check that the first few hundred blocks can
|
||||
# be imported using a typical importBlock / fcu sequence - this does not
|
||||
# test any transactions since these blocks are practically empty, but thanks
|
||||
# to block rewards the state db keeps changing anyway providing a simple
|
||||
# smoke test
|
||||
setup:
|
||||
let
|
||||
era0 = Era1DbRef.init(sourcePath / "replay", "mainnet").expect("Era files present")
|
||||
com = CommonRef.new(AristoDbMemory.newCoreDbRef(), nil)
|
||||
fc = ForkedChainRef.init(com)
|
||||
|
||||
test "Replay mainnet era, single FCU":
|
||||
var blk: EthBlock
|
||||
for i in 1..<fc.baseDistance * 2:
|
||||
era0.getEthBlock(i.BlockNumber, blk).expect("block in test database")
|
||||
check:
|
||||
fc.importBlock(blk).isOk()
|
||||
|
||||
check:
|
||||
fc.forkChoice(blk.blockHash, blk.blockHash).isOk()
|
||||
|
||||
test "Replay mainnet era, multiple FCU":
|
||||
# Simulates the typical case where fcu comes after the block
|
||||
var blk: EthBlock
|
||||
era0.getEthBlock(0.BlockNumber, blk).expect("block in test database")
|
||||
|
||||
var blocks = [blk.blockHash, blk.blockHash]
|
||||
|
||||
for i in 1..<fc.baseDistance * 2:
|
||||
era0.getEthBlock(i.BlockNumber, blk).expect("block in test database")
|
||||
check:
|
||||
fc.importBlock(blk).isOk()
|
||||
|
||||
let hash = blk.blockHash
|
||||
check:
|
||||
fc.forkChoice(hash, blocks[0]).isOk()
|
||||
if i mod 32 == 0:
|
||||
# in reality, finalized typically lags a bit more than this, but
|
||||
# for the purpose of the test, this should be good enough
|
||||
blocks[0] = blocks[1]
|
||||
blocks[1] = hash
|
||||
|
||||
forkedChainMain()
|
||||
|
||||
|
@ -138,7 +138,7 @@ createRpcSigsFromNim(RpcClient):
|
||||
# Test Runners
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc runKeyLoader(noisy = true;
|
||||
proc runKeyLoader(noisy = defined(debug);
|
||||
keyFile = jwtKeyFile; strippedFile = jwtKeyStripped) =
|
||||
let
|
||||
filePath = keyFile.findFilePath.value
|
||||
@ -229,7 +229,7 @@ proc runKeyLoader(noisy = true;
|
||||
# Compare key against contents of shared key file
|
||||
check hexKey.cmpIgnoreCase(hexLine) == 0
|
||||
|
||||
proc runJwtAuth(noisy = true; keyFile = jwtKeyFile) =
|
||||
proc runJwtAuth(noisy = defined(debug); keyFile = jwtKeyFile) =
|
||||
let
|
||||
filePath = keyFile.findFilePath.value
|
||||
dataDir = filePath.splitPath.head
|
||||
@ -297,10 +297,6 @@ proc runJwtAuth(noisy = true; keyFile = jwtKeyFile) =
|
||||
# Main function(s)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc jwtAuthMain*(noisy = defined(debug)) =
|
||||
noisy.runKeyLoader
|
||||
noisy.runJwtAuth
|
||||
|
||||
when isMainModule:
|
||||
setErrorLevel()
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user