Fix pivot setup after switch to full sync (#1562)

* Cosmetics, update logging, docu

* Fix pivot hand-over after switch to full sync

why:
  Got garbled after code clean up
This commit is contained in:
Jordan Hrycaj 2023-04-25 13:24:32 +01:00 committed by GitHub
parent 7f56e90654
commit d6ee672ba5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 42 additions and 39 deletions

View File

@ -155,13 +155,13 @@ proc snapTicker(t: TickerRef) {.gcsafe.} =
&"({(data.nContracts[1]+0.5).int64})")
if t.snap.recovery:
info "Snap sync statistics (recovery)",
info "Snap sync ticker (recovery)",
up, nInst, bc, pv, nAcc, accCov, nSto, nStoQ, nCon, nConQ, mem
elif recoveryDone:
info "Snap sync statistics (recovery done)",
info "Snap sync ticker (recovery done)",
up, nInst, bc, pv, nAcc, accCov, nSto, nStoQ, nCon, nConQ, mem
else:
info "Snap sync statistics",
info "Snap sync ticker",
up, nInst, bc, pv, nAcc, accCov, nSto, nStoQ, nCon, nConQ, mem
@ -190,10 +190,10 @@ proc fullTicker(t: TickerRef) {.gcsafe.} =
t.visited = now
if data.suspended:
info "Full sync statistics (suspended)", up, nInst, pv,
info "Full sync ticker (suspended)", up, nInst, pv,
persistent, unprocessed, staged, queued, reOrg, mem
else:
info "Full sync statistics", up, nInst, pv,
info "Full sync ticker", up, nInst, pv,
persistent, unprocessed, staged, queued, reOrg, mem
# ------------------------------------------------------------------------------

View File

@ -129,10 +129,7 @@ proc processStaged(buddy: SnapBuddyRef): bool =
# ------------------------------------------------------------------------------
proc fullSyncSetup(ctx: SnapCtxRef) =
let blockNum = if ctx.pool.fullHeader.isNone: 0.toBlockNumber
else: ctx.pool.fullHeader.unsafeGet.blockNumber
ctx.pool.bCtx = BlockQueueCtxRef.init(blockNum + 1)
ctx.pool.bCtx = BlockQueueCtxRef.init()
ctx.pool.bPivot = BestPivotCtxRef.init(rng=ctx.pool.rng, minPeers=0)
ctx.pool.ticker.init(cb = ctx.tickerUpdater())
@ -173,38 +170,43 @@ proc fullSyncDaemon(ctx: SnapCtxRef) {.async.} =
proc fullSyncPool(buddy: SnapBuddyRef, last: bool; laps: int): bool =
let ctx = buddy.ctx
# Take over soft restart after switch to full sync mode.
# This process needs to be applied to all buddy peers.
# There is a soft re-setup after switch over to full sync mode if a pivot
# block header is available initialised from outside, i.e. snap sync swich.
if ctx.pool.fullHeader.isSome:
# Soft start all peers on the second lap.
ignoreException("fullSyncPool"):
if not buddy.fullSyncStart():
# Start() method failed => wait for another peer
buddy.ctrl.stopped = true
let stateHeader = ctx.pool.fullHeader.unsafeGet
# Reinialise block queue descriptor relative to current pivot
ctx.pool.startNumber = some(stateHeader.blockNumber)
ctx.pool.bCtx = BlockQueueCtxRef.init(stateHeader.blockNumber + 1)
# Kick off ticker (was stopped by snap `release()` method)
ctx.pool.ticker.start()
# Store pivot as parent hash in database
ctx.pool.snapDb.kvDb.persistentBlockHeaderPut stateHeader
# Instead of genesis.
ctx.chain.com.startOfHistory = stateHeader.blockHash
when dumpDatabaseOnRollOver: # <--- will go away (debugging only)
# Dump database ... <--- will go away (debugging only)
let nRecords = # <--- will go away (debugging only)
ctx.pool.snapDb.rockDb.dumpAllDb # <--- will go away (debugging only)
trace logTxt "dumped block chain database", nRecords
# Reset so that this action would not be triggered, again
ctx.pool.fullHeader = none(BlockHeader)
# Soft re-start buddy peers if on the second lap.
if 0 < laps and ctx.pool.startNumber.isSome:
if not buddy.fullSyncStart():
# Start() method failed => wait for another peer
buddy.ctrl.stopped = true
if last:
let stateHeader = ctx.pool.fullHeader.unsafeGet
trace logTxt "soft restart done", peer=buddy.peer, last, laps,
pivot=stateHeader.blockNumber.toStr,
pivot=ctx.pool.startNumber.toStr,
mode=ctx.pool.syncMode.active, state= buddy.ctrl.state
# Kick off ticker (was stopped by snap `release()` method)
ctx.pool.ticker.start()
# Store pivot as parent hash in database
ctx.pool.snapDb.kvDb.persistentBlockHeaderPut stateHeader
# Instead of genesis.
ctx.chain.com.startOfHistory = stateHeader.blockHash
when dumpDatabaseOnRollOver: # <--- will go away (debugging only)
# Dump database ... <--- will go away (debugging only)
let nRecords = # <--- will go away (debugging only)
ctx.pool.snapDb.rockDb.dumpAllDb # <--- will go away (debugging only)
trace logTxt "dumped block chain database", nRecords
# Reset so that this action would not be triggered, again
ctx.pool.fullHeader = none(BlockHeader)
return false # do stop magically when looping over peers is exhausted
return false # does stop magically when looping over peers is exhausted
# Mind the gap, fill in if necessary (function is peer independent)
buddy.only.bQueue.blockQueueGrout()

View File

@ -136,7 +136,8 @@ type
recovery*: SnapRecoveryRef ## Current recovery checkpoint/context
# Full sync continuation parameters
fullHeader*: Option[BlockHeader] ## Start full sync from here
fullHeader*: Option[BlockHeader] ## Pivot hand over
startNumber*: Option[BlockNumber] ## Start full sync from here
bPivot*: BestPivotCtxRef ## Global pivot descriptor
bCtx*: BlockQueueCtxRef ## Global block queue descriptor

View File

@ -49,7 +49,7 @@
## the loop starts. Re-setting it again results in repeating the loop. The
## argument `laps` (starting with `0`) indicated the currend lap of the
## repeated loops. To avoid continous looping, the number of `laps` is
## limited (see `exexPoolModeMax`, below.)
## limited (see `execPoolModeMax`, below.)
##
## The argument `last` is set `true` if the last entry of the current loop
## has been reached.