Flare sync: Remove `debug` modules (#2665)

why:
  Not needed anymore but deletion kept in a separate PR to make it easy
  to refer back and find these modules.
This commit is contained in:
Jordan Hrycaj 2024-09-27 16:59:16 +00:00 committed by GitHub
parent 0d2a72d2a9
commit debb68b3a7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 0 additions and 344 deletions

View File

@ -1,2 +1 @@
* Update/resolve code fragments which are tagged FIXME
* Remove debug.nim file with a separate PR

View File

@ -1,133 +0,0 @@
# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at
# https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at
# https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises:[].}
import
pkg/chronicles,
pkg/eth/[common, p2p],
pkg/stew/[interval_set, sorted_set],
../../../../common,
../../worker_desc,
../blocks_unproc
type
BlocksForImportQueueWalk = SortedSetWalkRef[BlockNumber,BlocksForImport]
## Traversal descriptor (for `verifyStagedBlocksQueue()`)
# ------------------------------------------------------------------------------
# Public debugging helpers
# ------------------------------------------------------------------------------
proc rlpSize*(blk: ref BlocksForImport): int =
rlp.encode(blk[]).len
proc verifyStagedBlocksQueue*(ctx: FlareCtxRef; info: static[string]) =
## Verify staged queue
##
# Walk queue items
let walk = BlocksForImportQueueWalk.init(ctx.blk.staged)
defer: walk.destroy()
var
stTotal = 0u
rc = walk.first()
prv = BlockNumber(0)
while rc.isOk:
let
key = rc.value.key
nBlocks = rc.value.data.blocks.len.uint64
maxPt = key + nBlocks - 1
unproc = ctx.blocksUnprocCovered(key, maxPt)
if 0 < unproc:
raiseAssert info & ": unprocessed staged chain " &
key.bnStr & " overlap=" & $unproc
if key <= prv:
raiseAssert info & ": overlapping staged chain " &
key.bnStr & " prvKey=" & prv.bnStr & " overlap=" & $(prv - key + 1)
stTotal += nBlocks
prv = maxPt
rc = walk.next()
let t = ctx.dbStateBlockNumber()
if 0 < stTotal:
let first = ctx.blk.staged.ge(0).value.key
# Check `T < staged[] <= B`
if first <= t:
raiseAssert info & ": staged bottom mismatch " &
" T=" & t.bnStr & " stBottom=" & first.bnStr
if ctx.lhc.layout.base < prv:
raiseAssert info & ": staged top mismatch " &
" B=" & ctx.lhc.layout.base.bnStr & " stTop=" & prv.bnStr
if not ctx.blocksUnprocIsEmpty():
let
uBottom = ctx.blocksUnprocBottom()
uTop = ctx.blocksUnprocTop()
topReq = ctx.blk.topRequest
# Check `T < unprocessed{} <= B`
if uBottom <= t:
raiseAssert info & ": unproc bottom mismatch " &
" T=" & t.bnStr & " uBottom=" & uBottom.bnStr
if ctx.lhc.layout.base < uTop:
raiseAssert info & ": unproc top mismatch " &
" B=" & ctx.lhc.layout.base.bnStr & " uTop=" & uTop.bnStr
# Check `unprocessed{} <= topRequest <= B`
if topReq < uTop:
raiseAssert info & ": unproc top req mismatch " &
" uTop=" & uTop.bnStr & " topRequest=" & topReq.bnStr
if ctx.lhc.layout.base < topReq:
raiseAssert info & ": unproc top req mismatch " &
" B=" & ctx.lhc.layout.base.bnStr & " topReq=" & topReq.bnStr
# Check `staged[] + unprocessed{} == (T,B]`
let
uTotal = ctx.blocksUnprocTotal()
uBorrowed = ctx.blocksUnprocBorrowed()
all3 = stTotal + uTotal + uBorrowed
unfilled = if t < ctx.layout.base: ctx.layout.base - t
else: 0u
trace info & ": verify staged", stTotal, uTotal, uBorrowed, all3, unfilled
if unfilled < all3:
raiseAssert info & ": staged/unproc too large" & " staged=" & $stTotal &
" unproc=" & $uTotal & " borrowed=" & $uBorrowed & " exp-sum=" & $unfilled
proc verifyStagedBlocksItem*(blk: ref BlocksForImport; info: static[string]) =
## Verify record
##
if blk.blocks.len == 0:
trace info & ": verifying ok", nBlocks=0
return
trace info & ": verifying", nBlocks=blk.blocks.len
if blk.blocks[0].header.txRoot != EMPTY_ROOT_HASH:
doAssert 0 < blk.blocks[0].transactions.len
else:
doAssert blk.blocks[0].transactions.len == 0
for n in 1 ..< blk.blocks.len:
doAssert blk.blocks[n-1].header.number + 1 == blk.blocks[n].header.number
if blk.blocks[n].header.txRoot != EMPTY_ROOT_HASH:
doAssert 0 < blk.blocks[n].transactions.len
else:
doAssert blk.blocks[n].transactions.len == 0
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,99 +0,0 @@
# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at
# https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at
# https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises:[].}
import
pkg/[chronicles, chronos],
pkg/eth/[common, rlp],
pkg/stew/[byteutils, interval_set, sorted_set],
pkg/results,
../../../../db/storage_types,
../../worker_desc
logScope:
topics = "flare db"
# ------------------------------------------------------------------------------
# Private debugging & logging helpers
# ------------------------------------------------------------------------------
formatIt(Hash256):
it.data.toHex
# ------------------------------------------------------------------------------
# Public debugging functions
# ------------------------------------------------------------------------------
proc dbVerifyStashedHeaders*(
ctx: FlareCtxRef;
info: static[string];
): Future[bool] {.async.} =
## For debugging. Verify integrity of stashed headers on the database.
# Last executed block on database
let
db = ctx.db
kvt = ctx.db.ctx.getKvt()
elNum = db.getSavedStateBlockNumber()
lyLeast = ctx.layout.least
lyFinal = ctx.layout.final
lyFinalHash = ctx.layout.finalHash
if lyLeast == 0:
return true
if lyLeast <= elNum and 0 < elNum:
debug info & ": base header B unsynced", elNum=elNum.bnStr, B=lyLeast.bnStr
return false
let iv = BnRange.new(lyLeast,lyFinal)
trace info & ": verifying stashed headers", iv, len=(lyFinal-lyLeast+1)
var lastHash = ctx.layout.leastParent
for num in lyLeast .. lyFinal:
let data = kvt.get(flareHeaderKey(num).toOpenArray).valueOr:
debug info & ": unstashed header", num=num.bnStr
return false
var header: BlockHeader
try: header = rlp.decode(data, BlockHeader)
except RlpError:
debug info & ": cannot decode rlp header", num=num.bnStr
return false
if header.number != num:
debug info & ": wrongly addressed header",
num=header.number.bnStr, expected=num.bnStr
return false
if header.parentHash != lastHash:
debug info & ": hash mismatch", lastNum=(num-1).bnStr, lastHash,
parentHash=header.parentHash
return false
lastHash = data.keccakHash
# Allow thread change
if (num mod 100_000) == 98_765:
# trace info & ": thread change offer", num=num.bnStr
await sleepAsync asyncThreadSwitchTimeSlot
if lyFinalHash != lastHash:
debug info & ": base header B hash mismatch", num=lyFinal.bnStr,
hash=lyFinalHash, expected=lastHash
return false
trace info & ": done verifying", iv, len=(lyFinal-lyLeast+1)
true
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,111 +0,0 @@
# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at
# https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at
# https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises:[].}
import
pkg/chronicles,
pkg/eth/[common, p2p],
pkg/stew/[interval_set, sorted_set],
../../../../common,
../../worker_desc,
../headers_unproc
type
LinkedHChainQueueWalk = SortedSetWalkRef[BlockNumber,LinkedHChain]
## Traversal descriptor (for `verifyStagedHeadersQueue()`)
# ------------------------------------------------------------------------------
# Public debugging helpers
# ------------------------------------------------------------------------------
proc verifyStagedHeadersQueue*(ctx: FlareCtxRef; info: static[string]) =
## Verify staged queue
##
# Walk queue items
let walk = LinkedHChainQueueWalk.init(ctx.lhc.staged)
defer: walk.destroy()
var
stTotal = 0u
rc = walk.first()
prv = BlockNumber(0)
while rc.isOk:
let
key = rc.value.key
nHeaders = rc.value.data.revHdrs.len.uint64
minPt = key - nHeaders + 1
unproc = ctx.headersUnprocCovered(minPt, key)
if 0 < unproc:
raiseAssert info & ": unprocessed staged chain " &
key.bnStr & " overlap=" & $unproc
if minPt <= prv:
raiseAssert info & ": overlapping staged chain " &
key.bnStr & " prvKey=" & prv.bnStr & " overlap=" & $(prv - minPt + 1)
stTotal += nHeaders
prv = key
rc = walk.next()
# Check `staged[] <= L`
if ctx.layout.least <= prv:
raiseAssert info & ": staged top mismatch " &
" L=" & ctx.layout.least.bnStr & " stagedTop=" & prv.bnStr
# Check `unprocessed{} <= L`
let uTop = ctx.headersUnprocTop()
if ctx.layout.least <= uTop:
raiseAssert info & ": unproc top mismatch " &
" L=" & ctx.layout.least.bnStr & " unprocTop=" & uTop.bnStr
# Check `staged[] + unprocessed{} == (B,L)`
let
uTotal = ctx.headersUnprocTotal()
uBorrowed = ctx.headersUnprocBorrowed()
all3 = stTotal + uTotal + uBorrowed
unfilled = if ctx.layout.least <= ctx.layout.base + 1: 0u
else: ctx.layout.least - ctx.layout.base - 1
trace info & ": verify staged", stTotal, uTotal, uBorrowed, all3, unfilled
if all3 != unfilled:
raiseAssert info & ": staged/unproc mismatch " & " staged=" & $stTotal &
" unproc=" & $uTotal & " borrowed=" & $uBorrowed &
" exp-sum=" & $unfilled
proc verifyHeaderChainItem*(lhc: ref LinkedHChain; info: static[string]) =
## Verify a header chain.
if lhc.revHdrs.len == 0:
trace info & ": verifying ok", nLhc=0
return
trace info & ": verifying", nLhc=lhc.revHdrs.len
var
topHdr, childHdr: BlockHeader
try:
doAssert lhc.revHdrs[0].keccakHash == lhc.hash
topHdr = rlp.decode(lhc.revHdrs[0], BlockHeader)
childHdr = topHdr
for n in 1 ..< lhc.revHdrs.len:
let header = rlp.decode(lhc.revHdrs[n], BlockHeader)
doAssert childHdr.number == header.number + 1
doAssert lhc.revHdrs[n].keccakHash == childHdr.parentHash
childHdr = header
doAssert childHdr.parentHash == lhc.parentHash
except RlpError as e:
raiseAssert "verifyHeaderChainItem oops(" & $e.name & ") msg=" & e.msg
trace info & ": verify ok",
iv=BnRange.new(childHdr.number,topHdr.number), nLhc=lhc.revHdrs.len
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------