mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-03-01 04:10:45 +00:00
Introduce frame snapshots (#3098)
With the introduction of layered frames, each database lookup may result in hundreds of table lookups as the frame stack is traversed. This change restores performance by introducing snapshots to limit the lookup depth at the expense of slightly increased memory usage. The snapshot contains the cumulative changes of all ancestors and itself allowing the lookup recursion to stop whenever it is encountered. The number of snapshots to keep in memory is a tradeoff between lookup performance and memory usage - this change starts with a simple strategy of keeping snapshots for head frames (approximately). T he snapshot is created during checkpointing, ie after block validation, to make sure that it's cheap to start verifying blocks - parent snapshots are moved to the descendant as part of checkpointing which effectively means that head frames hold snapshots in most cases. The outcome of this tradeoff is that applying a block to a known head is fast while creating a new branch of history remains expensive. Another consequence is that when persisting changes to disk, we must re-traverse the stack of changes to build a cumulative set of changes to be persisted. A future strategy might be to keep additional "keyframes" along the way, ie one per epoch for example - this would bound the "branch creation" cost to a constant factor, but memory overhead should first be considered. Another strategy might be to avoid keeping snapshots for non-canonical branches, specially when they become older and thus less likely to be branched from. * `level` is updated to work like a temporary serial number to maintain its relative position in the sorting order as frames are persisted * a `snapshot` is added to some TxFrame instances - the snapshot collects all ancestor changes up to and including the given frame. `level` is used as a marker to prune the snapshot of changes that have been persisted already. * stack traversals for the purpose of lookup stop when they encounter a snapshot - this bounds the lookup depth to the first encountered snapshot After this PR, sync performance lands at about 2-3 blocks per second (~10x improvement) - this is quite reasonable when comparing with block import which skips the expensive state root verification and thus achieves ~20 blk/s on the same hardware. Additional work to bring live syncing performance in line with disk-based block import would focus on reducing state root verification cost.
This commit is contained in:
parent
0f89c1f901
commit
4576727817
@ -147,8 +147,12 @@ proc initializeDb(com: CommonRef) =
|
|||||||
txFrame.persistHeaderAndSetHead(com.genesisHeader,
|
txFrame.persistHeaderAndSetHead(com.genesisHeader,
|
||||||
startOfHistory=com.genesisHeader.parentHash).
|
startOfHistory=com.genesisHeader.parentHash).
|
||||||
expect("can persist genesis header")
|
expect("can persist genesis header")
|
||||||
|
|
||||||
doAssert(canonicalHeadHashKey().toOpenArray in txFrame)
|
doAssert(canonicalHeadHashKey().toOpenArray in txFrame)
|
||||||
|
|
||||||
|
txFrame.checkpoint(com.genesisHeader.number)
|
||||||
|
com.db.persist(txFrame)
|
||||||
|
|
||||||
# The database must at least contain the base and head pointers - the base
|
# The database must at least contain the base and head pointers - the base
|
||||||
# is implicitly considered finalized
|
# is implicitly considered finalized
|
||||||
let
|
let
|
||||||
|
@ -143,8 +143,12 @@ proc validateBlock(c: ForkedChainRef,
|
|||||||
c.writeBaggage(blk, blkHash, txFrame, receipts)
|
c.writeBaggage(blk, blkHash, txFrame, receipts)
|
||||||
|
|
||||||
# Block fully written to txFrame, mark it as such
|
# Block fully written to txFrame, mark it as such
|
||||||
|
# Checkpoint creates a snapshot of ancestor changes in txFrame - it is an
|
||||||
|
# expensive operation, specially when creating a new branch (ie when blk
|
||||||
|
# is being applied to a block that is currently not a head)
|
||||||
txFrame.checkpoint(blk.header.number)
|
txFrame.checkpoint(blk.header.number)
|
||||||
|
|
||||||
|
|
||||||
c.updateBranch(parent, blk, blkHash, txFrame, move(receipts))
|
c.updateBranch(parent, blk, blkHash, txFrame, move(receipts))
|
||||||
|
|
||||||
for i, tx in blk.transactions:
|
for i, tx in blk.transactions:
|
||||||
|
@ -102,7 +102,9 @@ proc checkpoint*(p: var Persister): Result[void, string] =
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Move in-memory state to disk
|
# Move in-memory state to disk
|
||||||
|
p.vmState.ledger.txFrame.checkpoint(p.parent.number, skipSnapshot = true)
|
||||||
p.com.db.persist(p.vmState.ledger.txFrame)
|
p.com.db.persist(p.vmState.ledger.txFrame)
|
||||||
|
|
||||||
# Get a new frame since the DB assumes ownership
|
# Get a new frame since the DB assumes ownership
|
||||||
p.vmState.ledger.txFrame = p.com.db.baseTxFrame().txFrameBegin()
|
p.vmState.ledger.txFrame = p.com.db.baseTxFrame().txFrameBegin()
|
||||||
|
|
||||||
@ -172,8 +174,6 @@ proc persistBlock*(p: var Persister, blk: Block): Result[void, string] =
|
|||||||
p.stats.txs += blk.transactions.len
|
p.stats.txs += blk.transactions.len
|
||||||
p.stats.gas += blk.header.gasUsed
|
p.stats.gas += blk.header.gasUsed
|
||||||
|
|
||||||
txFrame.checkpoint(header.number)
|
|
||||||
|
|
||||||
assign(p.parent, header)
|
assign(p.parent, header)
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
@ -15,7 +15,7 @@ import
|
|||||||
chronicles,
|
chronicles,
|
||||||
eth/common/[accounts_rlp, base_rlp, hashes_rlp],
|
eth/common/[accounts_rlp, base_rlp, hashes_rlp],
|
||||||
results,
|
results,
|
||||||
"."/[aristo_desc, aristo_get, aristo_walk/persistent],
|
"."/[aristo_desc, aristo_get, aristo_tx_frame, aristo_walk/persistent],
|
||||||
./aristo_desc/desc_backend
|
./aristo_desc/desc_backend
|
||||||
|
|
||||||
type WriteBatch = tuple[writer: PutHdlRef, count: int, depth: int, prefix: uint64]
|
type WriteBatch = tuple[writer: PutHdlRef, count: int, depth: int, prefix: uint64]
|
||||||
@ -74,7 +74,7 @@ proc putKeyAtLevel(
|
|||||||
## set (vertex data may have been committed to disk without computing the
|
## set (vertex data may have been committed to disk without computing the
|
||||||
## corresponding hash!)
|
## corresponding hash!)
|
||||||
|
|
||||||
if level == -2:
|
if level < db.db.baseTxFrame().level:
|
||||||
?batch.putVtx(db.db, rvid, vtx, key)
|
?batch.putVtx(db.db, rvid, vtx, key)
|
||||||
|
|
||||||
if batch.count mod batchSize == 0:
|
if batch.count mod batchSize == 0:
|
||||||
@ -90,16 +90,6 @@ proc putKeyAtLevel(
|
|||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
func maxLevel(cur, other: int): int =
|
|
||||||
# Compare two levels and return the topmost in the stack, taking into account
|
|
||||||
# the odd reversal of order around the zero point
|
|
||||||
if cur < 0:
|
|
||||||
max(cur, other) # >= 0 is always more topmost than <0
|
|
||||||
elif other < 0:
|
|
||||||
cur
|
|
||||||
else:
|
|
||||||
min(cur, other) # Here the order is reversed and 0 is the top layer
|
|
||||||
|
|
||||||
template encodeLeaf(w: var RlpWriter, pfx: NibblesBuf, leafData: untyped): HashKey =
|
template encodeLeaf(w: var RlpWriter, pfx: NibblesBuf, leafData: untyped): HashKey =
|
||||||
w.startList(2)
|
w.startList(2)
|
||||||
w.append(pfx.toHexPrefix(isLeaf = true).data())
|
w.append(pfx.toHexPrefix(isLeaf = true).data())
|
||||||
@ -123,7 +113,7 @@ proc getKey(
|
|||||||
db: AristoTxRef, rvid: RootedVertexID, skipLayers: static bool
|
db: AristoTxRef, rvid: RootedVertexID, skipLayers: static bool
|
||||||
): Result[((HashKey, VertexRef), int), AristoError] =
|
): Result[((HashKey, VertexRef), int), AristoError] =
|
||||||
ok when skipLayers:
|
ok when skipLayers:
|
||||||
(?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2)
|
(?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), dbLevel)
|
||||||
else:
|
else:
|
||||||
?db.getKeyRc(rvid, {})
|
?db.getKeyRc(rvid, {})
|
||||||
|
|
||||||
@ -178,7 +168,7 @@ proc computeKeyImpl(
|
|||||||
keyvtxl[1],
|
keyvtxl[1],
|
||||||
skipLayers = skipLayers,
|
skipLayers = skipLayers,
|
||||||
)
|
)
|
||||||
level = maxLevel(level, sl)
|
level = max(level, sl)
|
||||||
skey
|
skey
|
||||||
else:
|
else:
|
||||||
VOID_HASH_KEY
|
VOID_HASH_KEY
|
||||||
@ -252,7 +242,7 @@ proc computeKeyImpl(
|
|||||||
template writeBranch(w: var RlpWriter): HashKey =
|
template writeBranch(w: var RlpWriter): HashKey =
|
||||||
w.encodeBranch(vtx):
|
w.encodeBranch(vtx):
|
||||||
if subvid.isValid:
|
if subvid.isValid:
|
||||||
level = maxLevel(level, keyvtxs[n][1])
|
level = max(level, keyvtxs[n][1])
|
||||||
keyvtxs[n][0][0]
|
keyvtxs[n][0][0]
|
||||||
else:
|
else:
|
||||||
VOID_HASH_KEY
|
VOID_HASH_KEY
|
||||||
@ -280,7 +270,7 @@ proc computeKeyImpl(
|
|||||||
): Result[HashKey, AristoError] =
|
): Result[HashKey, AristoError] =
|
||||||
let (keyvtx, level) =
|
let (keyvtx, level) =
|
||||||
when skipLayers:
|
when skipLayers:
|
||||||
(?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), -2)
|
(?db.db.getKeyBe(rvid, {GetVtxFlag.PeekCache}), dbLevel)
|
||||||
else:
|
else:
|
||||||
?db.getKeyRc(rvid, {})
|
?db.getKeyRc(rvid, {})
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[hashes, sets, tables],
|
std/[hashes, sequtils, sets, tables],
|
||||||
eth/common/hashes,
|
eth/common/hashes,
|
||||||
results,
|
results,
|
||||||
./aristo_constants,
|
./aristo_constants,
|
||||||
@ -73,6 +73,21 @@ type
|
|||||||
|
|
||||||
blockNumber*: Opt[uint64] ## Block number set when checkpointing the frame
|
blockNumber*: Opt[uint64] ## Block number set when checkpointing the frame
|
||||||
|
|
||||||
|
snapshot*: Table[RootedVertexID, Snapshot]
|
||||||
|
## Optional snapshot containing the cumulative changes from ancestors and
|
||||||
|
## the current frame
|
||||||
|
snapshotLevel*: Opt[int] # base level when the snapshot was taken
|
||||||
|
|
||||||
|
level*: int
|
||||||
|
## Ancestry level of frame, increases with age but otherwise meaningless -
|
||||||
|
## used to order data by age when working with layers.
|
||||||
|
## -1 = stored in database, where relevant though typically should be
|
||||||
|
## compared with the base layer level instead.
|
||||||
|
|
||||||
|
Snapshot* = (VertexRef, HashKey, int)
|
||||||
|
## Unlike sTab/kMap, snapshot contains both vertex and key since at the time
|
||||||
|
## of writing, it's primarily used in contexts where both are present
|
||||||
|
|
||||||
AristoDbRef* = ref object
|
AristoDbRef* = ref object
|
||||||
## Backend interface.
|
## Backend interface.
|
||||||
getVtxFn*: GetVtxFn ## Read vertex record
|
getVtxFn*: GetVtxFn ## Read vertex record
|
||||||
@ -88,7 +103,7 @@ type
|
|||||||
|
|
||||||
closeFn*: CloseFn ## Generic destructor
|
closeFn*: CloseFn ## Generic destructor
|
||||||
|
|
||||||
txRef*: AristoTxRef ## Bottom-most in-memory frame
|
txRef*: AristoTxRef ## Bottom-most in-memory frame
|
||||||
|
|
||||||
accLeaves*: LruCache[Hash32, VertexRef]
|
accLeaves*: LruCache[Hash32, VertexRef]
|
||||||
## Account path to payload cache - accounts are frequently accessed by
|
## Account path to payload cache - accounts are frequently accessed by
|
||||||
@ -116,6 +131,8 @@ type
|
|||||||
legs*: ArrayBuf[NibblesBuf.high + 1, Leg] ## Chain of vertices and IDs
|
legs*: ArrayBuf[NibblesBuf.high + 1, Leg] ## Chain of vertices and IDs
|
||||||
tail*: NibblesBuf ## Portion of non completed path
|
tail*: NibblesBuf ## Portion of non completed path
|
||||||
|
|
||||||
|
const dbLevel* = -1
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -181,52 +198,40 @@ func isValid*(sqv: HashSet[RootedVertexID]): bool =
|
|||||||
# Public functions, miscellaneous
|
# Public functions, miscellaneous
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
# Hash set helper
|
func hash*(db: AristoDbRef): Hash {.error.}
|
||||||
func hash*(db: AristoDbRef): Hash =
|
func hash*(db: AristoTxRef): Hash {.error.}
|
||||||
## Table/KeyedQueue/HashSet mixin
|
|
||||||
cast[pointer](db).hash
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
iterator stack*(tx: AristoTxRef): AristoTxRef =
|
iterator rstack*(tx: AristoTxRef, stopAtSnapshot = false): AristoTxRef =
|
||||||
# Stack going from base to tx
|
# Stack in reverse order, ie going from tx to base
|
||||||
var frames: seq[AristoTxRef]
|
|
||||||
var tx = tx
|
var tx = tx
|
||||||
|
|
||||||
while tx != nil:
|
while tx != nil:
|
||||||
frames.add tx
|
yield tx
|
||||||
|
|
||||||
|
if stopAtSnapshot and tx.snapshotLevel.isSome():
|
||||||
|
break
|
||||||
|
|
||||||
tx = tx.parent
|
tx = tx.parent
|
||||||
|
|
||||||
|
iterator stack*(tx: AristoTxRef, stopAtSnapshot = false): AristoTxRef =
|
||||||
|
# Stack going from base to tx
|
||||||
|
var frames = toSeq(tx.rstack(stopAtSnapshot))
|
||||||
|
|
||||||
while frames.len > 0:
|
while frames.len > 0:
|
||||||
yield frames.pop()
|
yield frames.pop()
|
||||||
|
|
||||||
iterator rstack*(tx: AristoTxRef): (AristoTxRef, int) =
|
|
||||||
# Stack in reverse order, ie going from tx to base
|
|
||||||
var tx = tx
|
|
||||||
|
|
||||||
var i = 0
|
|
||||||
while tx != nil:
|
|
||||||
let level = if tx.parent == nil: -1 else: i
|
|
||||||
yield (tx, level)
|
|
||||||
tx = tx.parent
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
proc deltaAtLevel*(db: AristoTxRef, level: int): AristoTxRef =
|
proc deltaAtLevel*(db: AristoTxRef, level: int): AristoTxRef =
|
||||||
if level == -2:
|
if level < db.db.txRef.level:
|
||||||
nil
|
nil
|
||||||
elif level == -1:
|
|
||||||
db.db.txRef
|
|
||||||
else:
|
else:
|
||||||
var
|
for frame in db.rstack():
|
||||||
frame = db
|
if frame.level == level:
|
||||||
level = level
|
return frame
|
||||||
|
nil
|
||||||
while level > 0:
|
|
||||||
frame = frame.parent
|
|
||||||
level -= 1
|
|
||||||
|
|
||||||
frame
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -69,7 +69,7 @@ proc getVtxRc*(
|
|||||||
else:
|
else:
|
||||||
return err(GetVtxNotFound)
|
return err(GetVtxNotFound)
|
||||||
|
|
||||||
ok (?db.db.getVtxBe(rvid, flags), -2)
|
ok (?db.db.getVtxBe(rvid, flags), dbLevel)
|
||||||
|
|
||||||
proc getVtx*(db: AristoTxRef; rvid: RootedVertexID, flags: set[GetVtxFlag] = {}): VertexRef =
|
proc getVtx*(db: AristoTxRef; rvid: RootedVertexID, flags: set[GetVtxFlag] = {}): VertexRef =
|
||||||
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
|
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
|
||||||
@ -103,7 +103,7 @@ proc getKeyRc*(
|
|||||||
# The vertex is to be deleted. So is the value key.
|
# The vertex is to be deleted. So is the value key.
|
||||||
return err(GetKeyNotFound)
|
return err(GetKeyNotFound)
|
||||||
|
|
||||||
ok (?db.db.getKeyBe(rvid, flags), -2)
|
ok (?db.db.getKeyBe(rvid, flags), dbLevel)
|
||||||
|
|
||||||
proc getKey*(db: AristoTxRef; rvid: RootedVertexID): HashKey =
|
proc getKey*(db: AristoTxRef; rvid: RootedVertexID): HashKey =
|
||||||
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
|
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
|
||||||
|
@ -83,7 +83,7 @@ proc finishSession*(hdl: TypedPutHdlRef; db: TypedBackendRef) =
|
|||||||
|
|
||||||
proc initInstance*(db: AristoDbRef): Result[void, AristoError] =
|
proc initInstance*(db: AristoDbRef): Result[void, AristoError] =
|
||||||
let vTop = ?db.getTuvFn()
|
let vTop = ?db.getTuvFn()
|
||||||
db.txRef = AristoTxRef(db: db, vTop: vTop)
|
db.txRef = AristoTxRef(db: db, vTop: vTop, snapshotLevel: Opt.some(0))
|
||||||
db.accLeaves = LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE)
|
db.accLeaves = LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE)
|
||||||
db.stoLeaves = LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE)
|
db.stoLeaves = LruCache[Hash32, VertexRef].init(ACC_LRU_SIZE)
|
||||||
ok()
|
ok()
|
||||||
|
@ -25,9 +25,14 @@ func layersGetVtx*(db: AristoTxRef; rvid: RootedVertexID): Opt[(VertexRef, int)]
|
|||||||
## Find a vertex on the cache layers. An `ok()` result might contain a
|
## Find a vertex on the cache layers. An `ok()` result might contain a
|
||||||
## `nil` vertex if it is stored on the cache that way.
|
## `nil` vertex if it is stored on the cache that way.
|
||||||
##
|
##
|
||||||
for w, level in db.rstack:
|
for w in db.rstack(stopAtSnapshot = true):
|
||||||
|
if w.snapshotLevel.isSome():
|
||||||
|
w.snapshot.withValue(rvid, item):
|
||||||
|
return Opt.some((item[][0], item[][2]))
|
||||||
|
break
|
||||||
|
|
||||||
w.sTab.withValue(rvid, item):
|
w.sTab.withValue(rvid, item):
|
||||||
return Opt.some((item[], level))
|
return Opt.some((item[], w.level))
|
||||||
|
|
||||||
Opt.none((VertexRef, int))
|
Opt.none((VertexRef, int))
|
||||||
|
|
||||||
@ -36,11 +41,17 @@ func layersGetKey*(db: AristoTxRef; rvid: RootedVertexID): Opt[(HashKey, int)] =
|
|||||||
## hash key if it is stored on the cache that way.
|
## hash key if it is stored on the cache that way.
|
||||||
##
|
##
|
||||||
|
|
||||||
for w, level in db.rstack:
|
for w in db.rstack(stopAtSnapshot = true):
|
||||||
|
if w.snapshotLevel.isSome():
|
||||||
|
w.snapshot.withValue(rvid, item):
|
||||||
|
return Opt.some((item[][1], item[][2]))
|
||||||
|
break
|
||||||
|
|
||||||
w.kMap.withValue(rvid, item):
|
w.kMap.withValue(rvid, item):
|
||||||
return ok((item[], level))
|
return ok((item[], w.level))
|
||||||
if rvid in w.sTab:
|
if rvid in w.sTab:
|
||||||
return Opt.some((VOID_HASH_KEY, level))
|
return Opt.some((VOID_HASH_KEY, w.level))
|
||||||
|
|
||||||
|
|
||||||
Opt.none((HashKey, int))
|
Opt.none((HashKey, int))
|
||||||
|
|
||||||
@ -49,14 +60,14 @@ func layersGetKeyOrVoid*(db: AristoTxRef; rvid: RootedVertexID): HashKey =
|
|||||||
(db.layersGetKey(rvid).valueOr (VOID_HASH_KEY, 0))[0]
|
(db.layersGetKey(rvid).valueOr (VOID_HASH_KEY, 0))[0]
|
||||||
|
|
||||||
func layersGetAccLeaf*(db: AristoTxRef; accPath: Hash32): Opt[VertexRef] =
|
func layersGetAccLeaf*(db: AristoTxRef; accPath: Hash32): Opt[VertexRef] =
|
||||||
for w, _ in db.rstack:
|
for w in db.rstack:
|
||||||
w.accLeaves.withValue(accPath, item):
|
w.accLeaves.withValue(accPath, item):
|
||||||
return Opt.some(item[])
|
return Opt.some(item[])
|
||||||
|
|
||||||
Opt.none(VertexRef)
|
Opt.none(VertexRef)
|
||||||
|
|
||||||
func layersGetStoLeaf*(db: AristoTxRef; mixPath: Hash32): Opt[VertexRef] =
|
func layersGetStoLeaf*(db: AristoTxRef; mixPath: Hash32): Opt[VertexRef] =
|
||||||
for w, _ in db.rstack:
|
for w in db.rstack:
|
||||||
w.stoLeaves.withValue(mixPath, item):
|
w.stoLeaves.withValue(mixPath, item):
|
||||||
return Opt.some(item[])
|
return Opt.some(item[])
|
||||||
|
|
||||||
@ -75,6 +86,9 @@ func layersPutVtx*(
|
|||||||
db.sTab[rvid] = vtx
|
db.sTab[rvid] = vtx
|
||||||
db.kMap.del(rvid)
|
db.kMap.del(rvid)
|
||||||
|
|
||||||
|
if db.snapshotLevel.isSome():
|
||||||
|
db.snapshot[rvid] = (vtx, VOID_HASH_KEY, db.level)
|
||||||
|
|
||||||
func layersResVtx*(
|
func layersResVtx*(
|
||||||
db: AristoTxRef;
|
db: AristoTxRef;
|
||||||
rvid: RootedVertexID;
|
rvid: RootedVertexID;
|
||||||
@ -93,6 +107,9 @@ func layersPutKey*(
|
|||||||
db.sTab[rvid] = vtx
|
db.sTab[rvid] = vtx
|
||||||
db.kMap[rvid] = key
|
db.kMap[rvid] = key
|
||||||
|
|
||||||
|
if db.snapshotLevel.isSome():
|
||||||
|
db.snapshot[rvid] = (vtx, key, db.level)
|
||||||
|
|
||||||
func layersResKey*(db: AristoTxRef; rvid: RootedVertexID, vtx: VertexRef) =
|
func layersResKey*(db: AristoTxRef; rvid: RootedVertexID, vtx: VertexRef) =
|
||||||
## Shortcut for `db.layersPutKey(vid, VOID_HASH_KEY)`. It is sort of the
|
## Shortcut for `db.layersPutKey(vid, VOID_HASH_KEY)`. It is sort of the
|
||||||
## equivalent of a delete function.
|
## equivalent of a delete function.
|
||||||
@ -116,54 +133,46 @@ func layersPutStoLeaf*(db: AristoTxRef; mixPath: Hash32; leafVtx: VertexRef) =
|
|||||||
func isEmpty*(ly: AristoTxRef): bool =
|
func isEmpty*(ly: AristoTxRef): bool =
|
||||||
## Returns `true` if the layer does not contain any changes, i.e. all the
|
## Returns `true` if the layer does not contain any changes, i.e. all the
|
||||||
## tables are empty.
|
## tables are empty.
|
||||||
|
ly.snapshot.len == 0 and
|
||||||
ly.sTab.len == 0 and
|
ly.sTab.len == 0 and
|
||||||
ly.kMap.len == 0 and
|
ly.kMap.len == 0 and
|
||||||
ly.accLeaves.len == 0 and
|
ly.accLeaves.len == 0 and
|
||||||
ly.stoLeaves.len == 0
|
ly.stoLeaves.len == 0
|
||||||
|
|
||||||
|
proc copyFrom*(snapshot: var Table[RootedVertexID, Snapshot], tx: AristoTxRef) =
|
||||||
|
for rvid, vtx in tx.sTab:
|
||||||
|
tx.kMap.withValue(rvid, key):
|
||||||
|
snapshot[rvid] = (vtx, key[], tx.level)
|
||||||
|
do:
|
||||||
|
snapshot[rvid] = (vtx, VOID_HASH_KEY, tx.level)
|
||||||
|
|
||||||
proc mergeAndReset*(trg, src: AristoTxRef) =
|
proc mergeAndReset*(trg, src: AristoTxRef) =
|
||||||
## Merges the argument `src` into the argument `trg` and clears `src`.
|
## Merges the argument `src` into the argument `trg` and clears `src`.
|
||||||
trg.vTop = src.vTop
|
trg.vTop = src.vTop
|
||||||
trg.blockNumber = src.blockNumber
|
trg.blockNumber = src.blockNumber
|
||||||
|
trg.level = src.level
|
||||||
|
trg.parent = move(src.parent)
|
||||||
|
|
||||||
if trg.kMap.len > 0:
|
doAssert not src.snapshotLevel.isSome(),
|
||||||
# Invalidate cached keys in the lower layer
|
"If the source is a snapshot, it should have been used as a starting point for merge"
|
||||||
for vid in src.sTab.keys:
|
|
||||||
trg.kMap.del vid
|
if trg.snapshotLevel.isSome():
|
||||||
|
# If there already was a snapshot, we might as well add to it
|
||||||
|
trg.snapshot.copyFrom(src)
|
||||||
|
src.sTab.reset()
|
||||||
|
src.kMap.reset()
|
||||||
|
else:
|
||||||
|
if trg.kMap.len > 0:
|
||||||
|
# Invalidate cached keys in the lower layer
|
||||||
|
for vid in src.sTab.keys:
|
||||||
|
trg.kMap.del vid
|
||||||
|
|
||||||
|
mergeAndReset(trg.sTab, src.sTab)
|
||||||
|
mergeAndReset(trg.kMap, src.kMap)
|
||||||
|
|
||||||
mergeAndReset(trg.sTab, src.sTab)
|
|
||||||
mergeAndReset(trg.kMap, src.kMap)
|
|
||||||
mergeAndReset(trg.accLeaves, src.accLeaves)
|
mergeAndReset(trg.accLeaves, src.accLeaves)
|
||||||
mergeAndReset(trg.stoLeaves, src.stoLeaves)
|
mergeAndReset(trg.stoLeaves, src.stoLeaves)
|
||||||
|
|
||||||
# func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
|
|
||||||
# ## Provide a collapsed copy of layers up to a particular transaction level.
|
|
||||||
# ## If the `level` argument is too large, the maximum transaction level is
|
|
||||||
# ## returned.
|
|
||||||
# ##
|
|
||||||
# let layers = if db.stack.len <= level: db.stack & @[db.top]
|
|
||||||
# else: db.stack[0 .. level]
|
|
||||||
|
|
||||||
# # Set up initial layer (bottom layer)
|
|
||||||
# result = LayerRef(
|
|
||||||
# sTab: layers[0].sTab.dup, # explicit dup for ref values
|
|
||||||
# kMap: layers[0].kMap,
|
|
||||||
# vTop: layers[^1].vTop,
|
|
||||||
# accLeaves: layers[0].accLeaves,
|
|
||||||
# stoLeaves: layers[0].stoLeaves)
|
|
||||||
|
|
||||||
# # Consecutively merge other layers on top
|
|
||||||
# for n in 1 ..< layers.len:
|
|
||||||
# for (vid,vtx) in layers[n].sTab.pairs:
|
|
||||||
# result.sTab[vid] = vtx
|
|
||||||
# result.kMap.del vid
|
|
||||||
# for (vid,key) in layers[n].kMap.pairs:
|
|
||||||
# result.kMap[vid] = key
|
|
||||||
# for (accPath,vtx) in layers[n].accLeaves.pairs:
|
|
||||||
# result.accLeaves[accPath] = vtx
|
|
||||||
# for (mixPath,vtx) in layers[n].stoLeaves.pairs:
|
|
||||||
# result.stoLeaves[mixPath] = vtx
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public iterators
|
# Public iterators
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -179,11 +188,10 @@ iterator layersWalkVtx*(
|
|||||||
## the one with a zero vertex which are othewise skipped by the iterator.
|
## the one with a zero vertex which are othewise skipped by the iterator.
|
||||||
## The `seen` argument must not be modified while the iterator is active.
|
## The `seen` argument must not be modified while the iterator is active.
|
||||||
##
|
##
|
||||||
for w, _ in db.rstack:
|
for w in db.rstack:
|
||||||
for (rvid,vtx) in w.sTab.pairs:
|
for (rvid,vtx) in w.sTab.pairs:
|
||||||
if rvid.vid notin seen:
|
if not seen.containsOrIncl(rvid.vid):
|
||||||
yield (rvid,vtx)
|
yield (rvid,vtx)
|
||||||
seen.incl rvid.vid
|
|
||||||
|
|
||||||
iterator layersWalkVtx*(
|
iterator layersWalkVtx*(
|
||||||
db: AristoTxRef;
|
db: AristoTxRef;
|
||||||
@ -200,11 +208,10 @@ iterator layersWalkKey*(
|
|||||||
## Walk over all `(VertexID,HashKey)` pairs on the cache layers. Note that
|
## Walk over all `(VertexID,HashKey)` pairs on the cache layers. Note that
|
||||||
## entries are unsorted.
|
## entries are unsorted.
|
||||||
var seen: HashSet[VertexID]
|
var seen: HashSet[VertexID]
|
||||||
for w, _ in db.rstack:
|
for w in db.rstack:
|
||||||
for (rvid,key) in w.kMap.pairs:
|
for (rvid,key) in w.kMap.pairs:
|
||||||
if rvid.vid notin seen:
|
if not seen.containsOrIncl(rvid.vid):
|
||||||
yield (rvid,key)
|
yield (rvid,key)
|
||||||
seen.incl rvid.vid
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -13,68 +13,148 @@
|
|||||||
##
|
##
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import results, ./[aristo_desc, aristo_layers]
|
||||||
results,
|
|
||||||
./[aristo_desc, aristo_layers]
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc buildSnapshot(txFrame: AristoTxRef, minLevel: int) =
|
||||||
|
# Starting from the previous snapshot, build a snapshot that includes all
|
||||||
|
# ancestor changes as well as the changes in txFrame itself
|
||||||
|
for frame in txFrame.stack(stopAtSnapshot = true):
|
||||||
|
if frame != txFrame:
|
||||||
|
# Keyframes keep their snapshot insted of it being transferred to the new
|
||||||
|
# frame - right now, only the base frame is a keyframe but this support
|
||||||
|
# could be extended for example to epoch boundary frames which are likely
|
||||||
|
# to become new bases.
|
||||||
|
let isKeyframe = frame == frame.db.txRef
|
||||||
|
|
||||||
|
if frame.snapshotLevel.isSome() and not isKeyframe:
|
||||||
|
# `frame` has a snapshot only in the first iteration of the for loop
|
||||||
|
txFrame.snapshot = move(frame.snapshot)
|
||||||
|
txFrame.snapshotLevel = frame.snapshotLevel
|
||||||
|
|
||||||
|
assert frame.snapshot.len == 0 # https://github.com/nim-lang/Nim/issues/23759
|
||||||
|
frame.snapshotLevel.reset() # in case there was a snapshot in txFrame already
|
||||||
|
|
||||||
|
if txFrame.snapshotLevel != Opt.some(minLevel):
|
||||||
|
# When recycling an existing snapshot, some of its content may have
|
||||||
|
# already been persisted to disk (since it was made base on the
|
||||||
|
# in-memory frames at the time of its creation).
|
||||||
|
# Annoyingly, there's no way to remove items while iterating but even
|
||||||
|
# with the extra seq, move + remove turns out to be faster than
|
||||||
|
# creating a new table - specially when the ratio between old and
|
||||||
|
# and current items favors current items.
|
||||||
|
var toRemove = newSeqOfCap[RootedVertexID](txFrame.snapshot.len div 2)
|
||||||
|
for rvid, v in txFrame.snapshot:
|
||||||
|
if v[2] < minLevel:
|
||||||
|
toRemove.add rvid
|
||||||
|
for rvid in toRemove:
|
||||||
|
txFrame.snapshot.del(rvid)
|
||||||
|
|
||||||
|
if frame.snapshotLevel.isSome() and isKeyframe:
|
||||||
|
txFrame.snapshot = initTable[RootedVertexID, Snapshot](
|
||||||
|
max(1024, max(frame.sTab.len, frame.snapshot.len))
|
||||||
|
)
|
||||||
|
|
||||||
|
for k, v in frame.snapshot:
|
||||||
|
if v[2] >= minLevel:
|
||||||
|
txFrame.snapshot[k] = v
|
||||||
|
|
||||||
|
# Copy changes into snapshot but keep the diff - the next builder might
|
||||||
|
# steal the snapshot!
|
||||||
|
txFrame.snapshot.copyFrom(frame)
|
||||||
|
|
||||||
|
txFrame.snapshotLevel = Opt.some(minLevel)
|
||||||
|
|
||||||
proc txFrameBegin*(db: AristoDbRef, parent: AristoTxRef): AristoTxRef =
|
proc txFrameBegin*(db: AristoDbRef, parent: AristoTxRef): AristoTxRef =
|
||||||
let parent = if parent == nil:
|
let parent = if parent == nil: db.txRef else: parent
|
||||||
db.txRef
|
AristoTxRef(db: db, parent: parent, vTop: parent.vTop, level: parent.level + 1)
|
||||||
else:
|
|
||||||
parent
|
|
||||||
|
|
||||||
AristoTxRef(
|
proc baseTxFrame*(db: AristoDbRef): AristoTxRef =
|
||||||
db: db,
|
|
||||||
parent: parent,
|
|
||||||
vTop: parent.vTop)
|
|
||||||
|
|
||||||
proc baseTxFrame*(db: AristoDbRef): AristoTxRef=
|
|
||||||
db.txRef
|
db.txRef
|
||||||
|
|
||||||
proc dispose*(tx: AristoTxRef) =
|
proc dispose*(tx: AristoTxRef) =
|
||||||
tx[].reset()
|
tx[].reset()
|
||||||
|
|
||||||
proc checkpoint*(tx: AristoTxRef; blockNumber: uint64) =
|
proc checkpoint*(tx: AristoTxRef, blockNumber: uint64, skipSnapshot: bool) =
|
||||||
tx.blockNumber = Opt.some(blockNumber)
|
tx.blockNumber = Opt.some(blockNumber)
|
||||||
|
|
||||||
proc persist*(
|
if not skipSnapshot:
|
||||||
db: AristoDbRef; # Database
|
# Snapshots are expensive, therefore we only do it at checkpoints (which
|
||||||
batch: PutHdlRef;
|
# presumably have gone through enough validation)
|
||||||
txFrame: AristoTxRef;
|
tx.buildSnapshot(tx.db.txRef.level)
|
||||||
) =
|
|
||||||
|
|
||||||
if txFrame == db.txRef and txFrame.sTab.len == 0:
|
proc persist*(db: AristoDbRef, batch: PutHdlRef, txFrame: AristoTxRef) =
|
||||||
|
if txFrame == db.txRef and txFrame.isEmpty():
|
||||||
# No changes in frame - no `checkpoint` requirement - nothing to do here
|
# No changes in frame - no `checkpoint` requirement - nothing to do here
|
||||||
return
|
return
|
||||||
|
|
||||||
let lSst = SavedState(
|
let lSst = SavedState(
|
||||||
key: emptyRoot, # placeholder for more
|
key: emptyRoot, # placeholder for more
|
||||||
serial: txFrame.blockNumber.expect("`checkpoint` before persisting frame"))
|
serial: txFrame.blockNumber.expect("`checkpoint` before persisting frame"),
|
||||||
|
)
|
||||||
|
|
||||||
# Squash all changes up to the base
|
let oldLevel = db.txRef.level
|
||||||
if txFrame != db.txRef:
|
if txFrame != db.txRef:
|
||||||
# Consolidate the changes from the old to the new base going from the
|
# Consolidate the changes from the old to the new base going from the
|
||||||
# bottom of the stack to avoid having to cascade each change through
|
# bottom of the stack to avoid having to cascade each change through
|
||||||
# the full stack
|
# the full stack
|
||||||
assert txFrame.parent != nil
|
assert txFrame.parent != nil
|
||||||
for frame in txFrame.stack():
|
|
||||||
if frame == db.txRef:
|
var bottom: AristoTxRef
|
||||||
|
|
||||||
|
for frame in txFrame.stack(stopAtSnapshot = true):
|
||||||
|
if bottom == nil:
|
||||||
|
# db.txRef always is a snapshot, therefore we're guaranteed to end up
|
||||||
|
# here
|
||||||
|
bottom = frame
|
||||||
|
|
||||||
|
# If there is no snapshot, consolidate changes into sTab/kMap instead
|
||||||
|
# which caters to the scenario where changes from multiple blocks
|
||||||
|
# have already been written to sTab and the changes can moved into
|
||||||
|
# the bottom.
|
||||||
|
if bottom.snapshot.len == 0:
|
||||||
|
bottom.snapshotLevel.reset()
|
||||||
|
else:
|
||||||
|
# Incoming snapshots already have sTab baked in - make sure we don't
|
||||||
|
# overwrite merged data from more recent layers with this old version
|
||||||
|
bottom.sTab.reset()
|
||||||
|
bottom.kMap.reset()
|
||||||
continue
|
continue
|
||||||
mergeAndReset(db.txRef, frame)
|
|
||||||
|
doAssert not bottom.isNil, "should have found db.txRef at least"
|
||||||
|
mergeAndReset(bottom, frame)
|
||||||
|
|
||||||
frame.dispose() # This will also dispose `txFrame` itself!
|
frame.dispose() # This will also dispose `txFrame` itself!
|
||||||
|
|
||||||
# Put the now-merged contents in txFrame and make it the new base
|
# Put the now-merged contents in txFrame and make it the new base
|
||||||
swap(db.txRef[], txFrame[])
|
swap(bottom[], txFrame[])
|
||||||
db.txRef = txFrame
|
db.txRef = txFrame
|
||||||
|
|
||||||
|
if txFrame.parent != nil:
|
||||||
|
# Can't use rstack here because dispose will break the parent chain
|
||||||
|
for frame in txFrame.parent.stack():
|
||||||
|
frame.dispose()
|
||||||
|
|
||||||
|
txFrame.parent = nil
|
||||||
|
else:
|
||||||
|
if txFrame.snapshotLevel.isSome():
|
||||||
|
# Clear out redundant copy so we don't write it twice, below
|
||||||
|
txFrame.sTab.reset()
|
||||||
|
txFrame.kMap.reset()
|
||||||
|
|
||||||
# Store structural single trie entries
|
# Store structural single trie entries
|
||||||
|
assert txFrame.snapshot.len == 0 or txFrame.sTab.len == 0,
|
||||||
|
"Either snapshot or sTab should have been cleared as part of merging"
|
||||||
|
|
||||||
|
for rvid, item in txFrame.snapshot:
|
||||||
|
if item[2] >= oldLevel:
|
||||||
|
db.putVtxFn(batch, rvid, item[0], item[1])
|
||||||
|
|
||||||
for rvid, vtx in txFrame.sTab:
|
for rvid, vtx in txFrame.sTab:
|
||||||
txFrame.kMap.withValue(rvid, key) do:
|
txFrame.kMap.withValue(rvid, key):
|
||||||
db.putVtxFn(batch, rvid, vtx, key[])
|
db.putVtxFn(batch, rvid, vtx, key[])
|
||||||
do:
|
do:
|
||||||
db.putVtxFn(batch, rvid, vtx, default(HashKey))
|
db.putVtxFn(batch, rvid, vtx, default(HashKey))
|
||||||
@ -94,6 +174,10 @@ proc persist*(
|
|||||||
for mixPath, vtx in txFrame.stoLeaves:
|
for mixPath, vtx in txFrame.stoLeaves:
|
||||||
db.stoLeaves.put(mixPath, vtx)
|
db.stoLeaves.put(mixPath, vtx)
|
||||||
|
|
||||||
|
txFrame.snapshot.clear()
|
||||||
|
# Since txFrame is now the base, it contains all changes and therefore acts
|
||||||
|
# as a snapshot
|
||||||
|
txFrame.snapshotLevel = Opt.some(txFrame.level)
|
||||||
txFrame.sTab.clear()
|
txFrame.sTab.clear()
|
||||||
txFrame.kMap.clear()
|
txFrame.kMap.clear()
|
||||||
txFrame.accLeaves.clear()
|
txFrame.accLeaves.clear()
|
||||||
|
@ -448,8 +448,8 @@ proc txFrameBegin*(parent: CoreDbTxRef): CoreDbTxRef =
|
|||||||
|
|
||||||
CoreDbTxRef(kTx: kTx, aTx: aTx)
|
CoreDbTxRef(kTx: kTx, aTx: aTx)
|
||||||
|
|
||||||
proc checkpoint*(tx: CoreDbTxRef, blockNumber: BlockNumber) =
|
proc checkpoint*(tx: CoreDbTxRef, blockNumber: BlockNumber, skipSnapshot = false) =
|
||||||
tx.aTx.checkpoint(blockNumber)
|
tx.aTx.checkpoint(blockNumber, skipSnapshot)
|
||||||
|
|
||||||
proc dispose*(tx: CoreDbTxRef) =
|
proc dispose*(tx: CoreDbTxRef) =
|
||||||
tx.aTx.dispose()
|
tx.aTx.dispose()
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at
|
# * MIT license (license terms in the root directory or at
|
||||||
# https://opensource.org/licenses/MIT).
|
# https://opensource.org/licenses/MIT).
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
# Nimbus
|
# Nimbus
|
||||||
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
# Copyright (c) 2023-2025 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at
|
# * MIT license (license terms in the root directory or at
|
||||||
# https://opensource.org/licenses/MIT).
|
# https://opensource.org/licenses/MIT).
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
import
|
import
|
||||||
./[
|
./[
|
||||||
test_aristo,
|
test_aristo,
|
||||||
test_blockchain_json,
|
|
||||||
test_configuration,
|
test_configuration,
|
||||||
test_coredb,
|
test_coredb,
|
||||||
test_difficulty,
|
test_difficulty,
|
||||||
@ -19,7 +18,6 @@ import
|
|||||||
test_filters,
|
test_filters,
|
||||||
test_forked_chain,
|
test_forked_chain,
|
||||||
test_forkid,
|
test_forkid,
|
||||||
test_generalstate_json,
|
|
||||||
test_genesis,
|
test_genesis,
|
||||||
test_getproof_json,
|
test_getproof_json,
|
||||||
test_jwt_auth,
|
test_jwt_auth,
|
||||||
@ -36,4 +34,8 @@ import
|
|||||||
test_tracer_json,
|
test_tracer_json,
|
||||||
test_transaction_json,
|
test_transaction_json,
|
||||||
test_txpool,
|
test_txpool,
|
||||||
|
|
||||||
|
# These two suites are much slower than all the rest, so run them last
|
||||||
|
test_blockchain_json,
|
||||||
|
test_generalstate_json,
|
||||||
]
|
]
|
||||||
|
@ -122,7 +122,7 @@ suite "Aristo compute":
|
|||||||
for (k, v, r) in samples[^1]:
|
for (k, v, r) in samples[^1]:
|
||||||
check:
|
check:
|
||||||
txFrame.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true)
|
txFrame.mergeAccountRecord(k, v) == Result[bool, AristoError].ok(true)
|
||||||
txFrame.checkpoint(1)
|
txFrame.checkpoint(1, skipSnapshot = true)
|
||||||
|
|
||||||
let batch = db.putBegFn()[]
|
let batch = db.putBegFn()[]
|
||||||
db.persist(batch, txFrame)
|
db.persist(batch, txFrame)
|
||||||
|
@ -23,7 +23,7 @@ import
|
|||||||
aristo_init/init_common,
|
aristo_init/init_common,
|
||||||
aristo_init/memory_only,
|
aristo_init/memory_only,
|
||||||
aristo_layers,
|
aristo_layers,
|
||||||
aristo_merge
|
aristo_merge,
|
||||||
]
|
]
|
||||||
|
|
||||||
proc makeAccount(i: uint64): (Hash32, AristoAccount) =
|
proc makeAccount(i: uint64): (Hash32, AristoAccount) =
|
||||||
@ -38,8 +38,7 @@ const
|
|||||||
|
|
||||||
suite "Aristo TxFrame":
|
suite "Aristo TxFrame":
|
||||||
setup:
|
setup:
|
||||||
let
|
let db = AristoDbRef.init()
|
||||||
db = AristoDbRef.init()
|
|
||||||
|
|
||||||
test "Frames should independently keep data":
|
test "Frames should independently keep data":
|
||||||
let
|
let
|
||||||
@ -77,9 +76,24 @@ suite "Aristo TxFrame":
|
|||||||
# The vid for acc1 gets created in tx1 because it has to move to a new
|
# The vid for acc1 gets created in tx1 because it has to move to a new
|
||||||
# mpt node from the root - tx2c updates only data, so the level at which
|
# mpt node from the root - tx2c updates only data, so the level at which
|
||||||
# we find the vtx should be one below tx2c!
|
# we find the vtx should be one below tx2c!
|
||||||
tx2c.layersGetVtx((VertexID(1), acc1Hike.legs[^1].wp.vid)).value()[1] == 1
|
(
|
||||||
|
tx2c.level -
|
||||||
|
tx2c.layersGetVtx((VertexID(1), acc1Hike.legs[^1].wp.vid)).value()[1]
|
||||||
|
) == 1
|
||||||
|
|
||||||
|
tx0.checkpoint(1, skipSnapshot = false)
|
||||||
|
tx1.checkpoint(2, skipSnapshot = false)
|
||||||
|
tx2.checkpoint(3, skipSnapshot = false)
|
||||||
|
tx2b.checkpoint(3, skipSnapshot = false)
|
||||||
|
tx2c.checkpoint(3, skipSnapshot = false)
|
||||||
|
|
||||||
|
check:
|
||||||
|
# Even after checkpointing, we should maintain the same relative levels
|
||||||
|
(
|
||||||
|
tx2c.level -
|
||||||
|
tx2c.layersGetVtx((VertexID(1), acc1Hike.legs[^1].wp.vid)).value()[1]
|
||||||
|
) == 1
|
||||||
|
|
||||||
tx2.checkpoint(1)
|
|
||||||
let batch = db.putBegFn().expect("working batch")
|
let batch = db.putBegFn().expect("working batch")
|
||||||
db.persist(batch, tx2)
|
db.persist(batch, tx2)
|
||||||
check:
|
check:
|
||||||
|
@ -11,16 +11,19 @@
|
|||||||
import
|
import
|
||||||
pkg/chronicles,
|
pkg/chronicles,
|
||||||
pkg/unittest2,
|
pkg/unittest2,
|
||||||
|
std/[os, strutils],
|
||||||
../execution_chain/common,
|
../execution_chain/common,
|
||||||
../execution_chain/config,
|
../execution_chain/config,
|
||||||
../execution_chain/utils/utils,
|
../execution_chain/utils/utils,
|
||||||
../execution_chain/core/chain/forked_chain,
|
../execution_chain/core/chain/forked_chain,
|
||||||
../execution_chain/db/ledger,
|
../execution_chain/db/ledger,
|
||||||
|
../execution_chain/db/era1_db,
|
||||||
./test_forked_chain/chain_debug
|
./test_forked_chain/chain_debug
|
||||||
|
|
||||||
const
|
const
|
||||||
genesisFile = "tests/customgenesis/cancun123.json"
|
genesisFile = "tests/customgenesis/cancun123.json"
|
||||||
senderAddr = address"73cf19657412508833f618a15e8251306b3e6ee5"
|
senderAddr = address"73cf19657412508833f618a15e8251306b3e6ee5"
|
||||||
|
sourcePath = currentSourcePath.rsplit({DirSep, AltSep}, 1)[0]
|
||||||
|
|
||||||
type
|
type
|
||||||
TestEnv = object
|
TestEnv = object
|
||||||
@ -147,498 +150,440 @@ template checkPersisted(chain, blk) =
|
|||||||
debugEcho "Block Number: ", blk.header.number
|
debugEcho "Block Number: ", blk.header.number
|
||||||
|
|
||||||
proc forkedChainMain*() =
|
proc forkedChainMain*() =
|
||||||
suite "ForkedChainRef tests":
|
suite "ForkedChainRef tests":
|
||||||
var env = setupEnv()
|
var env = setupEnv()
|
||||||
let
|
let
|
||||||
cc = env.newCom
|
cc = env.newCom
|
||||||
genesisHash = cc.genesisHeader.blockHash
|
genesisHash = cc.genesisHeader.blockHash
|
||||||
genesis = Block.init(cc.genesisHeader, BlockBody())
|
genesis = Block.init(cc.genesisHeader, BlockBody())
|
||||||
baseTxFrame = cc.db.baseTxFrame()
|
baseTxFrame = cc.db.baseTxFrame()
|
||||||
|
let
|
||||||
let
|
blk1 = baseTxFrame.makeBlk(1, genesis)
|
||||||
blk1 = baseTxFrame.makeBlk(1, genesis)
|
blk2 = baseTxFrame.makeBlk(2, blk1)
|
||||||
blk2 = baseTxFrame.makeBlk(2, blk1)
|
blk3 = baseTxFrame.makeBlk(3, blk2)
|
||||||
blk3 = baseTxFrame.makeBlk(3, blk2)
|
dbTx = baseTxFrame.txFrameBegin
|
||||||
|
blk4 = dbTx.makeBlk(4, blk3)
|
||||||
dbTx = baseTxFrame.txFrameBegin
|
blk5 = dbTx.makeBlk(5, blk4)
|
||||||
blk4 = dbTx.makeBlk(4, blk3)
|
blk6 = dbTx.makeBlk(6, blk5)
|
||||||
blk5 = dbTx.makeBlk(5, blk4)
|
blk7 = dbTx.makeBlk(7, blk6)
|
||||||
blk6 = dbTx.makeBlk(6, blk5)
|
dbTx.dispose()
|
||||||
blk7 = dbTx.makeBlk(7, blk6)
|
let
|
||||||
|
B4 = baseTxFrame.makeBlk(4, blk3, 1.byte)
|
||||||
dbTx.dispose()
|
dbTx2 = baseTxFrame.txFrameBegin
|
||||||
|
B5 = dbTx2.makeBlk(5, B4)
|
||||||
let
|
B6 = dbTx2.makeBlk(6, B5)
|
||||||
B4 = baseTxFrame.makeBlk(4, blk3, 1.byte)
|
B7 = dbTx2.makeBlk(7, B6)
|
||||||
dbTx2 = baseTxFrame.txFrameBegin
|
dbTx2.dispose()
|
||||||
B5 = dbTx2.makeBlk(5, B4)
|
let
|
||||||
B6 = dbTx2.makeBlk(6, B5)
|
C5 = baseTxFrame.makeBlk(5, blk4, 1.byte)
|
||||||
B7 = dbTx2.makeBlk(7, B6)
|
C6 = baseTxFrame.makeBlk(6, C5)
|
||||||
|
C7 = baseTxFrame.makeBlk(7, C6)
|
||||||
dbTx2.dispose()
|
test "newBase == oldBase":
|
||||||
|
const info = "newBase == oldBase"
|
||||||
let
|
let com = env.newCom()
|
||||||
C5 = baseTxFrame.makeBlk(5, blk4, 1.byte)
|
var chain = ForkedChainRef.init(com)
|
||||||
C6 = baseTxFrame.makeBlk(6, C5)
|
# same header twice
|
||||||
C7 = baseTxFrame.makeBlk(7, C6)
|
checkImportBlock(chain, blk1)
|
||||||
|
checkImportBlock(chain, blk1)
|
||||||
test "newBase == oldBase":
|
checkImportBlock(chain, blk2)
|
||||||
const info = "newBase == oldBase"
|
checkImportBlock(chain, blk3)
|
||||||
let com = env.newCom()
|
check chain.validate info & " (1)"
|
||||||
|
# no parent
|
||||||
var chain = ForkedChainRef.init(com)
|
checkImportBlockErr(chain, blk5)
|
||||||
|
check chain.headHash == genesisHash
|
||||||
# same header twice
|
check chain.latestHash == blk3.blockHash
|
||||||
checkImportBlock(chain, blk1)
|
check chain.validate info & " (2)"
|
||||||
checkImportBlock(chain, blk1)
|
# finalized > head -> error
|
||||||
|
checkForkChoiceErr(chain, blk1, blk3)
|
||||||
checkImportBlock(chain, blk2)
|
check chain.validate info & " (3)"
|
||||||
checkImportBlock(chain, blk3)
|
# blk4 is not part of chain
|
||||||
check chain.validate info & " (1)"
|
checkForkChoiceErr(chain, blk4, blk2)
|
||||||
|
# finalized > head -> error
|
||||||
# no parent
|
checkForkChoiceErr(chain, blk1, blk2)
|
||||||
checkImportBlockErr(chain, blk5)
|
# blk4 is not part of chain
|
||||||
|
checkForkChoiceErr(chain, blk2, blk4)
|
||||||
check chain.headHash == genesisHash
|
# finalized < head -> ok
|
||||||
check chain.latestHash == blk3.blockHash
|
checkForkChoice(chain, blk2, blk1)
|
||||||
check chain.validate info & " (2)"
|
check chain.headHash == blk2.blockHash
|
||||||
|
check chain.latestHash == blk2.blockHash
|
||||||
# finalized > head -> error
|
check chain.validate info & " (7)"
|
||||||
checkForkChoiceErr(chain, blk1, blk3)
|
# finalized == head -> ok
|
||||||
check chain.validate info & " (3)"
|
checkForkChoice(chain, blk2, blk2)
|
||||||
|
check chain.headHash == blk2.blockHash
|
||||||
# blk4 is not part of chain
|
check chain.latestHash == blk2.blockHash
|
||||||
checkForkChoiceErr(chain, blk4, blk2)
|
check chain.baseNumber == 0'u64
|
||||||
|
check chain.validate info & " (8)"
|
||||||
# finalized > head -> error
|
# baggage written
|
||||||
checkForkChoiceErr(chain, blk1, blk2)
|
check chain.wdWritten(blk1) == 1
|
||||||
|
check chain.wdWritten(blk2) == 2
|
||||||
# blk4 is not part of chain
|
check chain.validate info & " (9)"
|
||||||
checkForkChoiceErr(chain, blk2, blk4)
|
test "newBase on activeBranch":
|
||||||
|
const info = "newBase on activeBranch"
|
||||||
# finalized < head -> ok
|
let com = env.newCom()
|
||||||
checkForkChoice(chain, blk2, blk1)
|
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||||
check chain.headHash == blk2.blockHash
|
checkImportBlock(chain, blk1)
|
||||||
check chain.latestHash == blk2.blockHash
|
checkImportBlock(chain, blk2)
|
||||||
check chain.validate info & " (7)"
|
checkImportBlock(chain, blk3)
|
||||||
|
checkImportBlock(chain, blk4)
|
||||||
# finalized == head -> ok
|
checkImportBlock(chain, blk5)
|
||||||
checkForkChoice(chain, blk2, blk2)
|
checkImportBlock(chain, blk6)
|
||||||
check chain.headHash == blk2.blockHash
|
checkImportBlock(chain, blk7)
|
||||||
check chain.latestHash == blk2.blockHash
|
checkImportBlock(chain, blk4)
|
||||||
check chain.baseNumber == 0'u64
|
check chain.validate info & " (1)"
|
||||||
check chain.validate info & " (8)"
|
# newbase == head
|
||||||
|
checkForkChoice(chain, blk7, blk6)
|
||||||
# baggage written
|
check chain.validate info & " (2)"
|
||||||
check chain.wdWritten(blk1) == 1
|
check chain.headHash == blk7.blockHash
|
||||||
check chain.wdWritten(blk2) == 2
|
check chain.latestHash == blk7.blockHash
|
||||||
check chain.validate info & " (9)"
|
check chain.baseBranch == chain.activeBranch
|
||||||
|
check chain.wdWritten(blk7) == 7
|
||||||
test "newBase on activeBranch":
|
# head - baseDistance must been persisted
|
||||||
const info = "newBase on activeBranch"
|
checkPersisted(chain, blk3)
|
||||||
let com = env.newCom()
|
# make sure aristo not wiped out baggage
|
||||||
|
check chain.wdWritten(blk3) == 3
|
||||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
check chain.validate info & " (9)"
|
||||||
checkImportBlock(chain, blk1)
|
test "newBase between oldBase and head":
|
||||||
checkImportBlock(chain, blk2)
|
const info = "newBase between oldBase and head"
|
||||||
checkImportBlock(chain, blk3)
|
let com = env.newCom()
|
||||||
checkImportBlock(chain, blk4)
|
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||||
checkImportBlock(chain, blk5)
|
checkImportBlock(chain, blk1)
|
||||||
checkImportBlock(chain, blk6)
|
checkImportBlock(chain, blk2)
|
||||||
checkImportBlock(chain, blk7)
|
checkImportBlock(chain, blk3)
|
||||||
|
checkImportBlock(chain, blk4)
|
||||||
checkImportBlock(chain, blk4)
|
checkImportBlock(chain, blk5)
|
||||||
check chain.validate info & " (1)"
|
checkImportBlock(chain, blk6)
|
||||||
|
checkImportBlock(chain, blk7)
|
||||||
# newbase == head
|
check chain.validate info & " (1)"
|
||||||
checkForkChoice(chain, blk7, blk6)
|
checkForkChoice(chain, blk7, blk6)
|
||||||
check chain.validate info & " (2)"
|
check chain.validate info & " (2)"
|
||||||
|
check chain.headHash == blk7.blockHash
|
||||||
check chain.headHash == blk7.blockHash
|
check chain.latestHash == blk7.blockHash
|
||||||
check chain.latestHash == blk7.blockHash
|
check chain.baseBranch == chain.activeBranch
|
||||||
check chain.baseBranch == chain.activeBranch
|
check chain.wdWritten(blk6) == 6
|
||||||
|
check chain.wdWritten(blk7) == 7
|
||||||
check chain.wdWritten(blk7) == 7
|
# head - baseDistance must been persisted
|
||||||
|
checkPersisted(chain, blk3)
|
||||||
# head - baseDistance must been persisted
|
# make sure aristo not wiped out baggage
|
||||||
checkPersisted(chain, blk3)
|
check chain.wdWritten(blk3) == 3
|
||||||
|
check chain.validate info & " (9)"
|
||||||
# make sure aristo not wiped out baggage
|
test "newBase == oldBase, fork and stay on that fork":
|
||||||
check chain.wdWritten(blk3) == 3
|
const info = "newBase == oldBase, fork .."
|
||||||
check chain.validate info & " (9)"
|
let com = env.newCom()
|
||||||
|
var chain = ForkedChainRef.init(com)
|
||||||
test "newBase between oldBase and head":
|
checkImportBlock(chain, blk1)
|
||||||
const info = "newBase between oldBase and head"
|
checkImportBlock(chain, blk2)
|
||||||
let com = env.newCom()
|
checkImportBlock(chain, blk3)
|
||||||
|
checkImportBlock(chain, blk4)
|
||||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
checkImportBlock(chain, blk5)
|
||||||
checkImportBlock(chain, blk1)
|
checkImportBlock(chain, blk6)
|
||||||
checkImportBlock(chain, blk2)
|
checkImportBlock(chain, blk7)
|
||||||
checkImportBlock(chain, blk3)
|
checkImportBlock(chain, B4)
|
||||||
checkImportBlock(chain, blk4)
|
checkImportBlock(chain, B5)
|
||||||
checkImportBlock(chain, blk5)
|
checkImportBlock(chain, B6)
|
||||||
checkImportBlock(chain, blk6)
|
checkImportBlock(chain, B7)
|
||||||
checkImportBlock(chain, blk7)
|
check chain.validate info & " (1)"
|
||||||
check chain.validate info & " (1)"
|
checkForkChoice(chain, B7, B5)
|
||||||
|
check chain.headHash == B7.blockHash
|
||||||
checkForkChoice(chain, blk7, blk6)
|
check chain.latestHash == B7.blockHash
|
||||||
check chain.validate info & " (2)"
|
check chain.baseNumber == 0'u64
|
||||||
|
check chain.branches.len == 2
|
||||||
check chain.headHash == blk7.blockHash
|
check chain.validate info & " (9)"
|
||||||
check chain.latestHash == blk7.blockHash
|
test "newBase move forward, fork and stay on that fork":
|
||||||
check chain.baseBranch == chain.activeBranch
|
const info = "newBase move forward, fork .."
|
||||||
|
let com = env.newCom()
|
||||||
check chain.wdWritten(blk6) == 6
|
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||||
check chain.wdWritten(blk7) == 7
|
checkImportBlock(chain, blk1)
|
||||||
|
checkImportBlock(chain, blk2)
|
||||||
# head - baseDistance must been persisted
|
checkImportBlock(chain, blk3)
|
||||||
checkPersisted(chain, blk3)
|
checkImportBlock(chain, blk4)
|
||||||
|
checkImportBlock(chain, blk5)
|
||||||
# make sure aristo not wiped out baggage
|
checkImportBlock(chain, blk6)
|
||||||
check chain.wdWritten(blk3) == 3
|
checkImportBlock(chain, blk7)
|
||||||
check chain.validate info & " (9)"
|
checkImportBlock(chain, B4)
|
||||||
|
checkImportBlock(chain, B5)
|
||||||
test "newBase == oldBase, fork and stay on that fork":
|
checkImportBlock(chain, B6)
|
||||||
const info = "newBase == oldBase, fork .."
|
checkImportBlock(chain, B7)
|
||||||
let com = env.newCom()
|
checkImportBlock(chain, B4)
|
||||||
|
check chain.validate info & " (1)"
|
||||||
var chain = ForkedChainRef.init(com)
|
checkForkChoice(chain, B6, B4)
|
||||||
checkImportBlock(chain, blk1)
|
check chain.validate info & " (2)"
|
||||||
checkImportBlock(chain, blk2)
|
check chain.headHash == B6.blockHash
|
||||||
checkImportBlock(chain, blk3)
|
check chain.latestHash == B6.blockHash
|
||||||
checkImportBlock(chain, blk4)
|
check chain.baseNumber == 3'u64
|
||||||
checkImportBlock(chain, blk5)
|
check chain.branches.len == 2
|
||||||
checkImportBlock(chain, blk6)
|
check chain.validate info & " (9)"
|
||||||
checkImportBlock(chain, blk7)
|
test "newBase on shorter canonical arc, remove oldBase branches":
|
||||||
|
const info = "newBase on shorter canonical, remove oldBase branches"
|
||||||
checkImportBlock(chain, B4)
|
let com = env.newCom()
|
||||||
checkImportBlock(chain, B5)
|
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||||
checkImportBlock(chain, B6)
|
checkImportBlock(chain, blk1)
|
||||||
checkImportBlock(chain, B7)
|
checkImportBlock(chain, blk2)
|
||||||
check chain.validate info & " (1)"
|
checkImportBlock(chain, blk3)
|
||||||
|
checkImportBlock(chain, blk4)
|
||||||
checkForkChoice(chain, B7, B5)
|
checkImportBlock(chain, blk5)
|
||||||
|
checkImportBlock(chain, blk6)
|
||||||
check chain.headHash == B7.blockHash
|
checkImportBlock(chain, blk7)
|
||||||
check chain.latestHash == B7.blockHash
|
checkImportBlock(chain, B4)
|
||||||
check chain.baseNumber == 0'u64
|
checkImportBlock(chain, B5)
|
||||||
check chain.branches.len == 2
|
checkImportBlock(chain, B6)
|
||||||
|
checkImportBlock(chain, B7)
|
||||||
check chain.validate info & " (9)"
|
check chain.validate info & " (1)"
|
||||||
|
checkForkChoice(chain, B7, B6)
|
||||||
test "newBase move forward, fork and stay on that fork":
|
check chain.validate info & " (2)"
|
||||||
const info = "newBase move forward, fork .."
|
check chain.headHash == B7.blockHash
|
||||||
let com = env.newCom()
|
check chain.latestHash == B7.blockHash
|
||||||
|
check chain.baseNumber == 4'u64
|
||||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
check chain.branches.len == 1
|
||||||
checkImportBlock(chain, blk1)
|
check chain.validate info & " (9)"
|
||||||
checkImportBlock(chain, blk2)
|
test "newBase on curbed non-canonical arc":
|
||||||
checkImportBlock(chain, blk3)
|
const info = "newBase on curbed non-canonical .."
|
||||||
checkImportBlock(chain, blk4)
|
let com = env.newCom()
|
||||||
checkImportBlock(chain, blk5)
|
var chain = ForkedChainRef.init(com, baseDistance = 5)
|
||||||
checkImportBlock(chain, blk6)
|
checkImportBlock(chain, blk1)
|
||||||
checkImportBlock(chain, blk7)
|
checkImportBlock(chain, blk2)
|
||||||
|
checkImportBlock(chain, blk3)
|
||||||
checkImportBlock(chain, B4)
|
checkImportBlock(chain, blk4)
|
||||||
checkImportBlock(chain, B5)
|
checkImportBlock(chain, blk5)
|
||||||
checkImportBlock(chain, B6)
|
checkImportBlock(chain, blk6)
|
||||||
checkImportBlock(chain, B7)
|
checkImportBlock(chain, blk7)
|
||||||
|
checkImportBlock(chain, B4)
|
||||||
checkImportBlock(chain, B4)
|
checkImportBlock(chain, B5)
|
||||||
check chain.validate info & " (1)"
|
checkImportBlock(chain, B6)
|
||||||
|
checkImportBlock(chain, B7)
|
||||||
checkForkChoice(chain, B6, B4)
|
check chain.validate info & " (1)"
|
||||||
check chain.validate info & " (2)"
|
checkForkChoice(chain, B7, B5)
|
||||||
|
check chain.validate info & " (2)"
|
||||||
check chain.headHash == B6.blockHash
|
check chain.headHash == B7.blockHash
|
||||||
check chain.latestHash == B6.blockHash
|
check chain.latestHash == B7.blockHash
|
||||||
check chain.baseNumber == 3'u64
|
check chain.baseNumber > 0
|
||||||
check chain.branches.len == 2
|
check chain.baseNumber < B4.header.number
|
||||||
check chain.validate info & " (9)"
|
check chain.branches.len == 2
|
||||||
|
check chain.validate info & " (9)"
|
||||||
test "newBase on shorter canonical arc, remove oldBase branches":
|
test "newBase == oldBase, fork and return to old chain":
|
||||||
const info = "newBase on shorter canonical, remove oldBase branches"
|
const info = "newBase == oldBase, fork .."
|
||||||
let com = env.newCom()
|
let com = env.newCom()
|
||||||
|
var chain = ForkedChainRef.init(com)
|
||||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
checkImportBlock(chain, blk1)
|
||||||
checkImportBlock(chain, blk1)
|
checkImportBlock(chain, blk2)
|
||||||
checkImportBlock(chain, blk2)
|
checkImportBlock(chain, blk3)
|
||||||
checkImportBlock(chain, blk3)
|
checkImportBlock(chain, blk4)
|
||||||
checkImportBlock(chain, blk4)
|
checkImportBlock(chain, blk5)
|
||||||
checkImportBlock(chain, blk5)
|
checkImportBlock(chain, blk6)
|
||||||
checkImportBlock(chain, blk6)
|
checkImportBlock(chain, blk7)
|
||||||
checkImportBlock(chain, blk7)
|
checkImportBlock(chain, B4)
|
||||||
|
checkImportBlock(chain, B5)
|
||||||
checkImportBlock(chain, B4)
|
checkImportBlock(chain, B6)
|
||||||
checkImportBlock(chain, B5)
|
checkImportBlock(chain, B7)
|
||||||
checkImportBlock(chain, B6)
|
check chain.validate info & " (1)"
|
||||||
checkImportBlock(chain, B7)
|
checkForkChoice(chain, blk7, blk5)
|
||||||
check chain.validate info & " (1)"
|
check chain.validate info & " (2)"
|
||||||
|
check chain.headHash == blk7.blockHash
|
||||||
checkForkChoice(chain, B7, B6)
|
check chain.latestHash == blk7.blockHash
|
||||||
check chain.validate info & " (2)"
|
check chain.baseNumber == 0'u64
|
||||||
|
check chain.validate info & " (9)"
|
||||||
check chain.headHash == B7.blockHash
|
test "newBase on activeBranch, fork and return to old chain":
|
||||||
check chain.latestHash == B7.blockHash
|
const info = "newBase on activeBranch, fork .."
|
||||||
check chain.baseNumber == 4'u64
|
let com = env.newCom()
|
||||||
check chain.branches.len == 1
|
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||||
check chain.validate info & " (9)"
|
checkImportBlock(chain, blk1)
|
||||||
|
checkImportBlock(chain, blk2)
|
||||||
test "newBase on curbed non-canonical arc":
|
checkImportBlock(chain, blk3)
|
||||||
const info = "newBase on curbed non-canonical .."
|
checkImportBlock(chain, blk4)
|
||||||
let com = env.newCom()
|
checkImportBlock(chain, blk5)
|
||||||
|
checkImportBlock(chain, blk6)
|
||||||
var chain = ForkedChainRef.init(com, baseDistance = 5)
|
checkImportBlock(chain, blk7)
|
||||||
checkImportBlock(chain, blk1)
|
checkImportBlock(chain, B4)
|
||||||
checkImportBlock(chain, blk2)
|
checkImportBlock(chain, B5)
|
||||||
checkImportBlock(chain, blk3)
|
checkImportBlock(chain, B6)
|
||||||
checkImportBlock(chain, blk4)
|
checkImportBlock(chain, B7)
|
||||||
checkImportBlock(chain, blk5)
|
checkImportBlock(chain, blk4)
|
||||||
checkImportBlock(chain, blk6)
|
check chain.validate info & " (1)"
|
||||||
checkImportBlock(chain, blk7)
|
checkForkChoice(chain, blk7, blk5)
|
||||||
|
check chain.validate info & " (2)"
|
||||||
checkImportBlock(chain, B4)
|
check chain.headHash == blk7.blockHash
|
||||||
checkImportBlock(chain, B5)
|
check chain.latestHash == blk7.blockHash
|
||||||
checkImportBlock(chain, B6)
|
check chain.baseBranch == chain.activeBranch
|
||||||
checkImportBlock(chain, B7)
|
check chain.validate info & " (9)"
|
||||||
check chain.validate info & " (1)"
|
test "newBase on shorter canonical arc, discard arc with oldBase" &
|
||||||
|
" (ign dup block)":
|
||||||
checkForkChoice(chain, B7, B5)
|
const info = "newBase on shorter canonical .."
|
||||||
check chain.validate info & " (2)"
|
let com = env.newCom()
|
||||||
|
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||||
check chain.headHash == B7.blockHash
|
checkImportBlock(chain, blk1)
|
||||||
check chain.latestHash == B7.blockHash
|
checkImportBlock(chain, blk2)
|
||||||
check chain.baseNumber > 0
|
checkImportBlock(chain, blk3)
|
||||||
check chain.baseNumber < B4.header.number
|
checkImportBlock(chain, blk4)
|
||||||
check chain.branches.len == 2
|
checkImportBlock(chain, blk5)
|
||||||
check chain.validate info & " (9)"
|
checkImportBlock(chain, blk6)
|
||||||
|
checkImportBlock(chain, blk7)
|
||||||
test "newBase == oldBase, fork and return to old chain":
|
checkImportBlock(chain, B4)
|
||||||
const info = "newBase == oldBase, fork .."
|
checkImportBlock(chain, B5)
|
||||||
let com = env.newCom()
|
checkImportBlock(chain, B6)
|
||||||
|
checkImportBlock(chain, B7)
|
||||||
var chain = ForkedChainRef.init(com)
|
checkImportBlock(chain, blk4)
|
||||||
checkImportBlock(chain, blk1)
|
check chain.validate info & " (1)"
|
||||||
checkImportBlock(chain, blk2)
|
checkForkChoice(chain, B7, B5)
|
||||||
checkImportBlock(chain, blk3)
|
check chain.validate info & " (2)"
|
||||||
checkImportBlock(chain, blk4)
|
check chain.headHash == B7.blockHash
|
||||||
checkImportBlock(chain, blk5)
|
check chain.latestHash == B7.blockHash
|
||||||
checkImportBlock(chain, blk6)
|
check chain.baseNumber == 4'u64
|
||||||
checkImportBlock(chain, blk7)
|
check chain.branches.len == 1
|
||||||
|
check chain.validate info & " (9)"
|
||||||
checkImportBlock(chain, B4)
|
test "newBase on longer canonical arc, discard new branch":
|
||||||
checkImportBlock(chain, B5)
|
const info = "newBase on longer canonical .."
|
||||||
checkImportBlock(chain, B6)
|
let com = env.newCom()
|
||||||
checkImportBlock(chain, B7)
|
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||||
check chain.validate info & " (1)"
|
checkImportBlock(chain, blk1)
|
||||||
|
checkImportBlock(chain, blk2)
|
||||||
checkForkChoice(chain, blk7, blk5)
|
checkImportBlock(chain, blk3)
|
||||||
check chain.validate info & " (2)"
|
checkImportBlock(chain, blk4)
|
||||||
|
checkImportBlock(chain, blk5)
|
||||||
check chain.headHash == blk7.blockHash
|
checkImportBlock(chain, blk6)
|
||||||
check chain.latestHash == blk7.blockHash
|
checkImportBlock(chain, blk7)
|
||||||
check chain.baseNumber == 0'u64
|
checkImportBlock(chain, B4)
|
||||||
check chain.validate info & " (9)"
|
checkImportBlock(chain, B5)
|
||||||
|
checkImportBlock(chain, B6)
|
||||||
test "newBase on activeBranch, fork and return to old chain":
|
checkImportBlock(chain, B7)
|
||||||
const info = "newBase on activeBranch, fork .."
|
check chain.validate info & " (1)"
|
||||||
let com = env.newCom()
|
checkForkChoice(chain, blk7, blk5)
|
||||||
|
check chain.validate info & " (2)"
|
||||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
check chain.headHash == blk7.blockHash
|
||||||
checkImportBlock(chain, blk1)
|
check chain.latestHash == blk7.blockHash
|
||||||
checkImportBlock(chain, blk2)
|
check chain.baseNumber > 0
|
||||||
checkImportBlock(chain, blk3)
|
check chain.baseNumber < blk5.header.number
|
||||||
checkImportBlock(chain, blk4)
|
check chain.branches.len == 1
|
||||||
checkImportBlock(chain, blk5)
|
check chain.validate info & " (9)"
|
||||||
checkImportBlock(chain, blk6)
|
test "headerByNumber":
|
||||||
checkImportBlock(chain, blk7)
|
const info = "headerByNumber"
|
||||||
|
let com = env.newCom()
|
||||||
checkImportBlock(chain, B4)
|
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||||
checkImportBlock(chain, B5)
|
checkImportBlock(chain, blk1)
|
||||||
checkImportBlock(chain, B6)
|
checkImportBlock(chain, blk2)
|
||||||
checkImportBlock(chain, B7)
|
checkImportBlock(chain, blk3)
|
||||||
|
checkImportBlock(chain, blk4)
|
||||||
checkImportBlock(chain, blk4)
|
checkImportBlock(chain, blk5)
|
||||||
check chain.validate info & " (1)"
|
checkImportBlock(chain, blk6)
|
||||||
|
checkImportBlock(chain, blk7)
|
||||||
checkForkChoice(chain, blk7, blk5)
|
checkImportBlock(chain, B4)
|
||||||
check chain.validate info & " (2)"
|
checkImportBlock(chain, B5)
|
||||||
|
checkImportBlock(chain, B6)
|
||||||
check chain.headHash == blk7.blockHash
|
checkImportBlock(chain, B7)
|
||||||
check chain.latestHash == blk7.blockHash
|
check chain.validate info & " (1)"
|
||||||
check chain.baseBranch == chain.activeBranch
|
checkForkChoice(chain, blk7, blk5)
|
||||||
check chain.validate info & " (9)"
|
check chain.validate info & " (2)"
|
||||||
|
# cursor
|
||||||
test "newBase on shorter canonical arc, discard arc with oldBase" &
|
check chain.headerByNumber(8).isErr
|
||||||
" (ign dup block)":
|
check chain.headerByNumber(7).expect("OK").number == 7
|
||||||
const info = "newBase on shorter canonical .."
|
check chain.headerByNumber(7).expect("OK").blockHash == blk7.blockHash
|
||||||
let com = env.newCom()
|
# from db
|
||||||
|
check chain.headerByNumber(3).expect("OK").number == 3
|
||||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
check chain.headerByNumber(3).expect("OK").blockHash == blk3.blockHash
|
||||||
checkImportBlock(chain, blk1)
|
# base
|
||||||
checkImportBlock(chain, blk2)
|
check chain.headerByNumber(4).expect("OK").number == 4
|
||||||
checkImportBlock(chain, blk3)
|
check chain.headerByNumber(4).expect("OK").blockHash == blk4.blockHash
|
||||||
checkImportBlock(chain, blk4)
|
# from cache
|
||||||
checkImportBlock(chain, blk5)
|
check chain.headerByNumber(5).expect("OK").number == 5
|
||||||
checkImportBlock(chain, blk6)
|
check chain.headerByNumber(5).expect("OK").blockHash == blk5.blockHash
|
||||||
checkImportBlock(chain, blk7)
|
check chain.validate info & " (9)"
|
||||||
|
test "3 branches, alternating imports":
|
||||||
checkImportBlock(chain, B4)
|
const info = "3 branches, alternating imports"
|
||||||
checkImportBlock(chain, B5)
|
let com = env.newCom()
|
||||||
checkImportBlock(chain, B6)
|
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
||||||
checkImportBlock(chain, B7)
|
checkImportBlock(chain, blk1)
|
||||||
|
checkImportBlock(chain, blk2)
|
||||||
checkImportBlock(chain, blk4)
|
checkImportBlock(chain, blk3)
|
||||||
check chain.validate info & " (1)"
|
checkImportBlock(chain, B4)
|
||||||
|
checkImportBlock(chain, blk4)
|
||||||
checkForkChoice(chain, B7, B5)
|
checkImportBlock(chain, B5)
|
||||||
check chain.validate info & " (2)"
|
checkImportBlock(chain, blk5)
|
||||||
|
checkImportBlock(chain, C5)
|
||||||
check chain.headHash == B7.blockHash
|
checkImportBlock(chain, B6)
|
||||||
check chain.latestHash == B7.blockHash
|
checkImportBlock(chain, blk6)
|
||||||
check chain.baseNumber == 4'u64
|
checkImportBlock(chain, C6)
|
||||||
check chain.branches.len == 1
|
checkImportBlock(chain, B7)
|
||||||
check chain.validate info & " (9)"
|
checkImportBlock(chain, blk7)
|
||||||
|
checkImportBlock(chain, C7)
|
||||||
test "newBase on longer canonical arc, discard new branch":
|
check chain.validate info & " (1)"
|
||||||
const info = "newBase on longer canonical .."
|
check chain.latestHash == C7.blockHash
|
||||||
let com = env.newCom()
|
check chain.latestNumber == 7'u64
|
||||||
|
check chain.branches.len == 3
|
||||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
checkForkChoice(chain, B7, blk3)
|
||||||
checkImportBlock(chain, blk1)
|
check chain.validate info & " (2)"
|
||||||
checkImportBlock(chain, blk2)
|
check chain.branches.len == 3
|
||||||
checkImportBlock(chain, blk3)
|
checkForkChoice(chain, B7, B6)
|
||||||
checkImportBlock(chain, blk4)
|
check chain.validate info & " (2)"
|
||||||
checkImportBlock(chain, blk5)
|
check chain.branches.len == 1
|
||||||
checkImportBlock(chain, blk6)
|
test "importing blocks with new CommonRef and FC instance, 3 blocks":
|
||||||
checkImportBlock(chain, blk7)
|
const info = "importing blocks with new CommonRef and FC instance, 3 blocks"
|
||||||
|
let com = env.newCom()
|
||||||
checkImportBlock(chain, B4)
|
let chain = ForkedChainRef.init(com, baseDistance = 0)
|
||||||
checkImportBlock(chain, B5)
|
checkImportBlock(chain, blk1)
|
||||||
checkImportBlock(chain, B6)
|
checkImportBlock(chain, blk2)
|
||||||
checkImportBlock(chain, B7)
|
checkImportBlock(chain, blk3)
|
||||||
check chain.validate info & " (1)"
|
checkForkChoice(chain, blk3, blk3)
|
||||||
|
check chain.validate info & " (1)"
|
||||||
checkForkChoice(chain, blk7, blk5)
|
let cc = env.newCom(com.db)
|
||||||
check chain.validate info & " (2)"
|
let fc = ForkedChainRef.init(cc, baseDistance = 0)
|
||||||
|
check fc.headHash == blk3.blockHash
|
||||||
check chain.headHash == blk7.blockHash
|
checkImportBlock(fc, blk4)
|
||||||
check chain.latestHash == blk7.blockHash
|
checkForkChoice(fc, blk4, blk4)
|
||||||
check chain.baseNumber > 0
|
check chain.validate info & " (2)"
|
||||||
check chain.baseNumber < blk5.header.number
|
test "importing blocks with new CommonRef and FC instance, 1 block":
|
||||||
check chain.branches.len == 1
|
const info = "importing blocks with new CommonRef and FC instance, 1 block"
|
||||||
check chain.validate info & " (9)"
|
let com = env.newCom()
|
||||||
|
let chain = ForkedChainRef.init(com, baseDistance = 0)
|
||||||
test "headerByNumber":
|
checkImportBlock(chain, blk1)
|
||||||
const info = "headerByNumber"
|
checkForkChoice(chain, blk1, blk1)
|
||||||
let com = env.newCom()
|
check chain.validate info & " (1)"
|
||||||
|
let cc = env.newCom(com.db)
|
||||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
let fc = ForkedChainRef.init(cc, baseDistance = 0)
|
||||||
checkImportBlock(chain, blk1)
|
check fc.headHash == blk1.blockHash
|
||||||
checkImportBlock(chain, blk2)
|
checkImportBlock(fc, blk2)
|
||||||
checkImportBlock(chain, blk3)
|
checkForkChoice(fc, blk2, blk2)
|
||||||
checkImportBlock(chain, blk4)
|
check chain.validate info & " (2)"
|
||||||
checkImportBlock(chain, blk5)
|
|
||||||
checkImportBlock(chain, blk6)
|
suite "ForkedChain mainnet replay":
|
||||||
checkImportBlock(chain, blk7)
|
# A short mainnet replay test to check that the first few hundred blocks can
|
||||||
|
# be imported using a typical importBlock / fcu sequence - this does not
|
||||||
checkImportBlock(chain, B4)
|
# test any transactions since these blocks are practically empty, but thanks
|
||||||
checkImportBlock(chain, B5)
|
# to block rewards the state db keeps changing anyway providing a simple
|
||||||
checkImportBlock(chain, B6)
|
# smoke test
|
||||||
checkImportBlock(chain, B7)
|
setup:
|
||||||
check chain.validate info & " (1)"
|
let
|
||||||
|
era0 = Era1DbRef.init(sourcePath / "replay", "mainnet").expect("Era files present")
|
||||||
checkForkChoice(chain, blk7, blk5)
|
com = CommonRef.new(AristoDbMemory.newCoreDbRef(), nil)
|
||||||
check chain.validate info & " (2)"
|
fc = ForkedChainRef.init(com)
|
||||||
|
|
||||||
# cursor
|
test "Replay mainnet era, single FCU":
|
||||||
check chain.headerByNumber(8).isErr
|
var blk: EthBlock
|
||||||
check chain.headerByNumber(7).expect("OK").number == 7
|
for i in 1..<fc.baseDistance * 2:
|
||||||
check chain.headerByNumber(7).expect("OK").blockHash == blk7.blockHash
|
era0.getEthBlock(i.BlockNumber, blk).expect("block in test database")
|
||||||
|
check:
|
||||||
# from db
|
fc.importBlock(blk).isOk()
|
||||||
check chain.headerByNumber(3).expect("OK").number == 3
|
|
||||||
check chain.headerByNumber(3).expect("OK").blockHash == blk3.blockHash
|
check:
|
||||||
|
fc.forkChoice(blk.blockHash, blk.blockHash).isOk()
|
||||||
# base
|
|
||||||
check chain.headerByNumber(4).expect("OK").number == 4
|
test "Replay mainnet era, multiple FCU":
|
||||||
check chain.headerByNumber(4).expect("OK").blockHash == blk4.blockHash
|
# Simulates the typical case where fcu comes after the block
|
||||||
|
var blk: EthBlock
|
||||||
# from cache
|
era0.getEthBlock(0.BlockNumber, blk).expect("block in test database")
|
||||||
check chain.headerByNumber(5).expect("OK").number == 5
|
|
||||||
check chain.headerByNumber(5).expect("OK").blockHash == blk5.blockHash
|
var blocks = [blk.blockHash, blk.blockHash]
|
||||||
check chain.validate info & " (9)"
|
|
||||||
|
for i in 1..<fc.baseDistance * 2:
|
||||||
test "3 branches, alternating imports":
|
era0.getEthBlock(i.BlockNumber, blk).expect("block in test database")
|
||||||
const info = "3 branches, alternating imports"
|
check:
|
||||||
let com = env.newCom()
|
fc.importBlock(blk).isOk()
|
||||||
|
|
||||||
var chain = ForkedChainRef.init(com, baseDistance = 3)
|
let hash = blk.blockHash
|
||||||
checkImportBlock(chain, blk1)
|
check:
|
||||||
checkImportBlock(chain, blk2)
|
fc.forkChoice(hash, blocks[0]).isOk()
|
||||||
checkImportBlock(chain, blk3)
|
if i mod 32 == 0:
|
||||||
|
# in reality, finalized typically lags a bit more than this, but
|
||||||
checkImportBlock(chain, B4)
|
# for the purpose of the test, this should be good enough
|
||||||
checkImportBlock(chain, blk4)
|
blocks[0] = blocks[1]
|
||||||
|
blocks[1] = hash
|
||||||
checkImportBlock(chain, B5)
|
|
||||||
checkImportBlock(chain, blk5)
|
|
||||||
checkImportBlock(chain, C5)
|
|
||||||
|
|
||||||
checkImportBlock(chain, B6)
|
|
||||||
checkImportBlock(chain, blk6)
|
|
||||||
checkImportBlock(chain, C6)
|
|
||||||
|
|
||||||
checkImportBlock(chain, B7)
|
|
||||||
checkImportBlock(chain, blk7)
|
|
||||||
checkImportBlock(chain, C7)
|
|
||||||
check chain.validate info & " (1)"
|
|
||||||
|
|
||||||
check chain.latestHash == C7.blockHash
|
|
||||||
check chain.latestNumber == 7'u64
|
|
||||||
check chain.branches.len == 3
|
|
||||||
|
|
||||||
checkForkChoice(chain, B7, blk3)
|
|
||||||
check chain.validate info & " (2)"
|
|
||||||
check chain.branches.len == 3
|
|
||||||
|
|
||||||
checkForkChoice(chain, B7, B6)
|
|
||||||
check chain.validate info & " (2)"
|
|
||||||
check chain.branches.len == 1
|
|
||||||
|
|
||||||
test "importing blocks with new CommonRef and FC instance, 3 blocks":
|
|
||||||
const info = "importing blocks with new CommonRef and FC instance, 3 blocks"
|
|
||||||
let com = env.newCom()
|
|
||||||
|
|
||||||
let chain = ForkedChainRef.init(com, baseDistance = 0)
|
|
||||||
checkImportBlock(chain, blk1)
|
|
||||||
checkImportBlock(chain, blk2)
|
|
||||||
checkImportBlock(chain, blk3)
|
|
||||||
checkForkChoice(chain, blk3, blk3)
|
|
||||||
check chain.validate info & " (1)"
|
|
||||||
|
|
||||||
let cc = env.newCom(com.db)
|
|
||||||
let fc = ForkedChainRef.init(cc, baseDistance = 0)
|
|
||||||
check fc.headHash == blk3.blockHash
|
|
||||||
checkImportBlock(fc, blk4)
|
|
||||||
checkForkChoice(fc, blk4, blk4)
|
|
||||||
check chain.validate info & " (2)"
|
|
||||||
|
|
||||||
test "importing blocks with new CommonRef and FC instance, 1 block":
|
|
||||||
const info = "importing blocks with new CommonRef and FC instance, 1 block"
|
|
||||||
let com = env.newCom()
|
|
||||||
|
|
||||||
let chain = ForkedChainRef.init(com, baseDistance = 0)
|
|
||||||
checkImportBlock(chain, blk1)
|
|
||||||
checkForkChoice(chain, blk1, blk1)
|
|
||||||
check chain.validate info & " (1)"
|
|
||||||
|
|
||||||
let cc = env.newCom(com.db)
|
|
||||||
let fc = ForkedChainRef.init(cc, baseDistance = 0)
|
|
||||||
check fc.headHash == blk1.blockHash
|
|
||||||
checkImportBlock(fc, blk2)
|
|
||||||
checkForkChoice(fc, blk2, blk2)
|
|
||||||
check chain.validate info & " (2)"
|
|
||||||
|
|
||||||
|
|
||||||
forkedChainMain()
|
forkedChainMain()
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ createRpcSigsFromNim(RpcClient):
|
|||||||
# Test Runners
|
# Test Runners
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc runKeyLoader(noisy = true;
|
proc runKeyLoader(noisy = defined(debug);
|
||||||
keyFile = jwtKeyFile; strippedFile = jwtKeyStripped) =
|
keyFile = jwtKeyFile; strippedFile = jwtKeyStripped) =
|
||||||
let
|
let
|
||||||
filePath = keyFile.findFilePath.value
|
filePath = keyFile.findFilePath.value
|
||||||
@ -229,7 +229,7 @@ proc runKeyLoader(noisy = true;
|
|||||||
# Compare key against contents of shared key file
|
# Compare key against contents of shared key file
|
||||||
check hexKey.cmpIgnoreCase(hexLine) == 0
|
check hexKey.cmpIgnoreCase(hexLine) == 0
|
||||||
|
|
||||||
proc runJwtAuth(noisy = true; keyFile = jwtKeyFile) =
|
proc runJwtAuth(noisy = defined(debug); keyFile = jwtKeyFile) =
|
||||||
let
|
let
|
||||||
filePath = keyFile.findFilePath.value
|
filePath = keyFile.findFilePath.value
|
||||||
dataDir = filePath.splitPath.head
|
dataDir = filePath.splitPath.head
|
||||||
@ -297,10 +297,6 @@ proc runJwtAuth(noisy = true; keyFile = jwtKeyFile) =
|
|||||||
# Main function(s)
|
# Main function(s)
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc jwtAuthMain*(noisy = defined(debug)) =
|
|
||||||
noisy.runKeyLoader
|
|
||||||
noisy.runJwtAuth
|
|
||||||
|
|
||||||
when isMainModule:
|
when isMainModule:
|
||||||
setErrorLevel()
|
setErrorLevel()
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user