Simplify beacon stream pivot update (#1435)

* Simplify pivot update

why:
  No need to fetch the pivot header from the network when it can be
  be made available in the ivot cache

also:
  Keep `txPool` update disabled while syncing

* Cosmetics, tune down some logging noise

* Support `snap/1` without `eth/6?`

why:
  Eth is not needed here.

* Snap is an (optional) extension of `eth`

so:
  It it must be supported somehow. Nevertheless it will be currently
  unused in the snap syncer.
This commit is contained in:
Jordan Hrycaj 2023-01-18 08:31:57 +00:00 committed by GitHub
parent a1163b4ade
commit 30135ab1ef
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 44 additions and 97 deletions

View File

@ -34,7 +34,7 @@ type
current: BlockNumber
highest: BlockNumber
SyncReqNewHeadCB* = proc(number: BlockNumber; hash: Hash256) {.gcsafe.}
SyncReqNewHeadCB* = proc(header: BlockHeader) {.gcsafe.}
## Update head for syncing
CommonRef* = ref object
@ -362,10 +362,10 @@ proc initializeEmptyDb*(com: CommonRef)
com.consensusType == ConsensusType.POS)
doAssert(canonicalHeadHashKey().toOpenArray in trieDB)
proc syncReqNewHead*(com: CommonRef; number: BlockNumber; hash: Hash256) =
proc syncReqNewHead*(com: CommonRef; header: BlockHeader) =
## Used by RPC to update the beacon head for snap sync
if not com.syncReqNewHead.isNil:
com.syncReqNewHead(number, hash)
com.syncReqNewHead(header)
# ------------------------------------------------------------------------------
# Getters

View File

@ -250,7 +250,7 @@ proc setupEngineApi*(
hash = blockHash
# Update sync header (if any)
com.syncReqNewHead(header.blockNumber, blockHash)
com.syncReqNewHead(header)
return simpleFCU(PayloadExecutionStatus.syncing)

View File

@ -17,7 +17,7 @@ import
../../common as nimcom,
../../db/select_backend,
../../utils/prettify,
".."/[protocol, sync_desc],
".."/[handlers, protocol, sync_desc],
./worker/[pivot, ticker],
./worker/com/com_error,
./worker/db/[hexary_desc, snapdb_desc, snapdb_pivot],
@ -106,6 +106,7 @@ proc setup*(ctx: SnapCtxRef; tickerOK: bool): bool =
## Global set up
ctx.data.coveredAccounts = NodeTagRangeSet.init()
noExceptionOops("worker.setup()"):
ctx.ethWireCtx.txPoolEnabled false
ctx.chain.com.syncReqNewHead = ctx.pivotUpdateBeaconHeaderCB
ctx.data.snapDb =
if ctx.data.dbBackend.isNil: SnapDbRef.init(ctx.chain.db.db)
@ -135,6 +136,8 @@ proc release*(ctx: SnapCtxRef) =
if not ctx.data.ticker.isNil:
ctx.data.ticker.stop()
ctx.data.ticker = nil
noExceptionOops("worker.release()"):
ctx.ethWireCtx.txPoolEnabled true
ctx.chain.com.syncReqNewHead = nil
proc start*(buddy: SnapBuddyRef): bool =

View File

@ -16,7 +16,6 @@ import
stew/[interval_set, keyed_queue, sorted_set],
../../sync_desc,
".."/[constants, range_desc, worker_desc],
./com/get_block_header,
./db/[hexary_error, snapdb_accounts, snapdb_pivot],
./pivot/[heal_accounts, heal_storage_slots,
range_fetch_accounts, range_fetch_storage_slots,
@ -166,8 +165,8 @@ proc tickerStats*(
pivotBlock = some(env.stateHeader.blockNumber)
procChunks = env.fetchAccounts.processed.chunks
stoQuLen = some(env.storageQueueTotal())
if 0 < ctx.data.beaconNumber:
beaconBlock = some(ctx.data.beaconNumber)
if 0 < ctx.data.beaconHeader.blockNumber:
beaconBlock = some(ctx.data.beaconHeader.blockNumber)
TickerStats(
beaconBlock: beaconBlock,
@ -343,102 +342,48 @@ proc pivotApprovePeer*(buddy: SnapBuddyRef) {.async.} =
let
ctx = buddy.ctx
peer = buddy.peer
# Cache beacon header data, iyt might change while waiting for a peer
# reply message
beaconNumber = ctx.data.beaconNumber
beaconHash = ctx.data.beaconHash
beaconHeader = ctx.data.beaconHeader
var
pivotHeader: BlockHeader
pivotNumber: BlockNumber
peerNumber: BlockNumber # for error message
block:
let rc = ctx.data.pivotTable.lastValue
if rc.isOk:
pivotHeader = rc.value.stateHeader
pivotNumber = pivotHeader.blockNumber
# Check whether the pivot needs to be updated
if pivotNumber + pivotBlockDistanceMin < beaconNumber:
let rc = await buddy.getBlockHeader(beaconHash)
if rc.isErr:
buddy.ctrl.zombie = true
return
if pivotHeader.blockNumber + pivotBlockDistanceMin < beaconHeader.blockNumber:
# If the entry before the previous entry is unused, then run a pool mode
# based session (which should enable a pivot table purge).
block:
let rc = ctx.data.pivotTable.beforeLast
if rc.isOk and rc.value.data.fetchAccounts.processed.isEmpty:
ctx.poolMode = true
# Sanity check => append a new pivot environment
if rc.value.blockNumber == beaconNumber:
# If the entry before the previous entry is unused, then run a pool mode
# based session (which should enable a pivot table purge).
block:
let rx = ctx.data.pivotTable.beforeLast
if rx.isOk and rx.value.data.fetchAccounts.processed.isEmpty:
ctx.poolMode = true
when extraTraceMessages:
trace "New pivot from beacon chein", peer, pivotNumber,
beaconNumber, beaconHash, poolMode=ctx.poolMode
discard ctx.data.pivotTable.lruAppend(
rc.value.stateRoot, SnapPivotRef.init(ctx, rc.value),
pivotTableLruEntriesMax)
# Done, buddy runs on updated environment. It has proved already that it
# supports the current header.
return
# Header/block number mismatch => process at the end of function
peerNumber = rc.value.blockNumber
elif 0 < pivotNumber:
# So there was no reason to update update then pivot. Verify that
# the pivot header can be fetched from peer.
let
pivotHash = pivotHeader.hash
rc = await buddy.getBlockHeader(pivotHash)
if rc.isErr:
when extraTraceMessages:
trace "Header unsupported by peer", peer, pivotNumber, pivotHash
buddy.ctrl.zombie = true
return
# Sanity check
if rc.value.blockNumber == beaconNumber:
return
# Header/block number mismatch => process at the end of function
peerNumber = rc.value.blockNumber
else:
# Not ready yet for initialising. Currently there is no pivot, at all
when extraTraceMessages:
trace "Beacon chain header not ready yet", peer
trace "New pivot from beacon chain", peer,
pivot=("#" & $pivotHeader.blockNumber),
beacon=("#" & $beaconHeader.blockNumber), poolMode=ctx.poolMode
discard ctx.data.pivotTable.lruAppend(
beaconHeader.stateRoot, SnapPivotRef.init(ctx, beaconHeader),
pivotTableLruEntriesMax)
pivotHeader = beaconHeader
# Not ready yet?
if pivotHeader.blockNumber == 0:
buddy.ctrl.stopped = true
return
# Header/block number mismatch
trace "Oops, cannot approve peer due to header mismatch", peer, peerNumber,
beaconNumber, hash=beaconHash
# Reset cache
ctx.data.beaconNumber = 0.u256
ctx.data.beaconHash = Hash256.default
# See you later :)
buddy.ctrl.stopped = true
proc pivotUpdateBeaconHeaderCB*(ctx: SnapCtxRef): SyncReqNewHeadCB =
## Update beacon header. This function is intended as a call back function
## for the RPC module.
result = proc(number: BlockNumber; hash: Hash256) {.gcsafe.} =
if ctx.data.beaconNumber < number:
when extraTraceMessages:
trace "External beacon info update", number, hash
ctx.data.beaconNumber = number
ctx.data.beaconHash = hash
result = proc(h: BlockHeader) {.gcsafe.} =
if ctx.data.beaconHeader.blockNumber < h.blockNumber:
# when extraTraceMessages:
# trace "External beacon info update", header=("#" & $h.blockNumber)
ctx.data.beaconHeader = h
# ------------------------------------------------------------------------------
# End

View File

@ -123,8 +123,8 @@ proc runLogTicker(t: TickerRef) {.gcsafe.} =
tickerLogSuppressMax < (now - t.visited):
var
nAcc, nSto, bulk: string
pivot = "n/a"
beacon = "n/a"
pv = "n/a"
bc = "n/a"
nStoQue = "n/a"
let
recoveryDone = t.lastRecov
@ -132,7 +132,7 @@ proc runLogTicker(t: TickerRef) {.gcsafe.} =
"(" & data.accountsFill[1].pc99 & ")" &
"/" & data.accountsFill[2].pc99 &
"~" & data.nAccountStats.uint.toSI
buddies = t.nBuddies
nInst = t.nBuddies
# With `int64`, there are more than 29*10^10 years range for seconds
up = (now - t.started).seconds.uint64.toSI
@ -144,9 +144,9 @@ proc runLogTicker(t: TickerRef) {.gcsafe.} =
noFmtError("runLogTicker"):
if data.pivotBlock.isSome:
pivot = &"#{data.pivotBlock.get}/{data.nQueues}"
pv = &"#{data.pivotBlock.get}/{data.nQueues}"
if data.beaconBlock.isSome:
beacon = &"#{data.beaconBlock.get}"
bc = &"#{data.beaconBlock.get}"
nAcc = (&"{(data.nAccounts[0]+0.5).int64}" &
&"({(data.nAccounts[1]+0.5).int64})")
nSto = (&"{(data.nSlotLists[0]+0.5).int64}" &
@ -157,13 +157,13 @@ proc runLogTicker(t: TickerRef) {.gcsafe.} =
if t.recovery:
info "Snap sync statistics (recovery)",
up, buddies, beacon, pivot, nAcc, accCov, nSto, nStoQue, mem
up, nInst, bc, pv, nAcc, accCov, nSto, nStoQue, mem
elif recoveryDone:
info "Snap sync statistics (recovery done)",
up, buddies, beacon, pivot, nAcc, accCov, nSto, nStoQue, mem
up, nInst, bc, pv, nAcc, accCov, nSto, nStoQue, mem
else:
info "Snap sync statistics",
up, buddies, beacon, pivot, nAcc, accCov, nSto, nStoQue, mem
up, nInst, bc, pv, nAcc, accCov, nSto, nStoQue, mem
t.setLogTicker(Moment.fromNow(tickerLogInterval))

View File

@ -96,8 +96,7 @@ type
# Pivot table
pivotTable*: SnapPivotTable ## Per state root environment
beaconNumber*: BlockNumber ## Running on beacon chain
beaconHash*: Hash256 ## Ditto
beaconHeader*: BlockHeader ## Running on beacon chain
coveredAccounts*: NodeTagRangeSet ## Derived from all available accounts
covAccTimesFull*: uint ## # of 100% coverages
recovery*: SnapRecoveryRef ## Current recovery checkpoint/context