Unified mode for undumping gzip-ed or era1-ed encoded block dumps (#2198)

ackn:
  Built on Daniel's work
This commit is contained in:
Jordan Hrycaj 2024-05-20 13:59:18 +00:00 committed by GitHub
parent ee9aea171d
commit de0388919f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 592 additions and 225 deletions

22
nimbus/db/era1_db.nim Normal file
View File

@ -0,0 +1,22 @@
# Nimbus
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
{.push raises: [].}
import
era1_db/[db_desc, db_utils]
export
Era1DbRef,
dispose,
init,
db_utils
# End

View File

@ -0,0 +1,95 @@
# Nimbus
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
{.push raises: [].}
import
std/[os, tables],
stew/[interval_set, keyed_queue, sorted_set],
results,
../../fluffy/eth_data/era1
const
NumOpenEra1DbBlocks* = 10
type
Era1DbError* = enum
NothingWrong = 0
Era1DbBlocks* = object
## Capability of an `era1` file, to be indexed by starting block num
fileName*: string # File name on disk
nBlocks*: uint # Number of blocks available
Era1DbRef* = ref object
dir*: string # Database folder
blocks*: KeyedQueue[uint64,Era1File] # Era1 block on disk
byBlkNum*: SortedSet[uint64,Era1DbBlocks] # File access info
ranges*: IntervalSetRef[uint64,uint64] # Covered ranges
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc load(db: Era1DbRef, eFile: string) =
let
path = db.dir / eFile
dsc = Era1File.open(path).valueOr:
return
key = dsc.blockIdx.startNumber
if db.blocks.lruFetch(key).isOk or dsc.blockIdx.offsets.len == 0:
dsc.close()
else:
# Add to LRU table
while NumOpenEra1DbBlocks <= db.blocks.len:
db.blocks.shift.value.data.close() # unqueue first/least item
discard db.blocks.lruAppend(key, dsc, NumOpenEra1DbBlocks)
# Add to index list
let w = db.byBlkNum.findOrInsert(key).valueOr:
raiseAssert "Load error, index corrupted: " & $error
if w.data.nBlocks != 0:
discard db.ranges.reduce(key, key+w.data.nBlocks.uint64-1)
w.data.fileName = eFile
w.data.nBlocks = dsc.blockIdx.offsets.len.uint
discard db.ranges.merge(key, key+dsc.blockIdx.offsets.len.uint64-1)
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
proc init*(
T: type Era1DbRef;
dir: string;
): T =
## Load `era1` index
result = T(
dir: dir,
byBlkNum: SortedSet[uint64,Era1DbBlocks].init(),
ranges: IntervalSetRef[uint64,uint64].init())
try:
for w in dir.walkDir(relative=true):
if w.kind in {pcFile, pcLinkToFile}:
result.load w.path
except CatchableError:
discard
proc dispose*(db: Era1DbRef) =
for w in db.blocks.nextValues:
w.close()
db.blocks.clear()
db.ranges.clear()
db.byBlkNum.clear()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,166 @@
# Nimbus
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
{.push raises: [].}
import
std/os,
eth/common,
stew/[interval_set, keyed_queue, sorted_set],
../../fluffy/eth_data/era1,
./db_desc
export
BlockTuple
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc getEra1DbBlocks(
db: Era1DbRef;
bn: uint64;
): Result[SortedSetItemRef[uint64,Era1DbBlocks],void] =
## Get item referring to particular `era1` file
let w = db.byBlkNum.le(bn).valueOr:
return err()
if w.key + w.data.nBlocks <= bn:
return err()
ok(w)
proc deleteEra1DbBlocks(
db: Era1DbRef;
it: SortedSetItemRef[uint64,Era1DbBlocks];
) =
## Remove `era1` file index descriptor from list and LRU table
discard db.byBlkNum.delete it.key
db.blocks.del it.key
discard db.ranges.reduce(it.key, it.data.nBlocks-1)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc hasAllKeys*(db: Era1DbRef, first, last: uint64): bool =
if first <= last:
db.ranges.covered(first, last) == last - first + 1
else:
false
proc hasSomeKey*(db: Era1DbRef, first, last: uint64): bool =
if first <= last:
0 < db.ranges.covered(first, last)
else:
false
proc hasKey*(db: Era1DbRef, key: uint64): bool =
0 < db.ranges.covered(key, key)
proc fetch*(
db: Era1DbRef;
blockNumber: uint64;
updateInxOnError = true;
): Result[BlockTuple,string] =
## Fetch block data for argument height `blockNumber`. If `updateInxOnError`
## is set `true` (which is the default), a data file that cannot be opened
## anymore will be ignored in future.
##
let blkDsc = db.getEra1DbBlocks(blockNumber).valueOr:
return err("")
# Get `era1` file index descriptor
let dsc = block:
let rc = db.blocks.lruFetch(blkDsc.key)
if rc.isOk:
rc.value
else:
let w = Era1File.open(db.dir / blkDsc.data.fileName).valueOr:
if updateInxOnError:
db.deleteEra1DbBlocks blkDsc
return err("") # no way
while NumOpenEra1DbBlocks <= db.blocks.len:
db.blocks.shift.value.data.close() # unqueue first/least item
discard db.blocks.lruAppend(blkDsc.key, w, NumOpenEra1DbBlocks)
w
# Fetch the result via `dsc`
dsc.getBlockTuple(blockNumber)
proc clearInx*(db: Era1DbRef, blockNumber: uint64): bool {.discardable.} =
## Remove the `era1` block containing `blockNumber` from index. This might
## be useful after rejection the block contents for height `blockNumber`.
##
## The function returns true if the index was found and could be cleared.
##
let blkDsc = db.getEra1DbBlocks(blockNumber).valueOr:
return false
db.deleteEra1DbBlocks blkDsc
true
# -----------------
iterator blockRanges*(db: Era1DbRef): tuple[startBlock,endBlock: uint64] =
for w in db.ranges.increasing:
yield (w.minPt, w.maxPt)
iterator headerBodyPairs*(
db: Era1DbRef;
firstBlockNumber = 0u64;
maxBlockNumber = high(uint64);
blocksPerUnit = 192;
): (seq[BlockHeader],seq[BlockBody]) =
## Provide blocks until there are no more or the block number exceeds
## `maxBlockNumber`.
##
let uSize = blocksPerUnit.uint64
var left = 1u64
block yieldBody:
if 0 < firstBlockNumber:
left = firstBlockNumber
elif db.hasKey(0):
# Zero block (aka genesis)
let tp = db.fetch(0).expect "valid genesis"
yield(@[tp.header],@[tp.body])
else:
break yieldBody # no way
# Full block ranges
while left+uSize < maxBlockNumber and db.hasAllKeys(left,left+uSize-1):
var
hdr: seq[BlockHeader]
bdy: seq[BlockBody]
for bn in left ..< left+uSize:
let tp = db.fetch(bn).expect "valid block tuple"
hdr.add tp.header
bdy.add tp.body
yield(hdr,bdy)
left += uSize
# Final range (if any)
block:
var
hdr: seq[BlockHeader]
bdy: seq[BlockBody]
while left <= maxBlockNumber and db.hasKey(left):
let tp = db.fetch(left).expect "valid block tuple"
hdr.add tp.header
bdy.add tp.body
left.inc
if 0 < hdr.len:
yield(hdr,bdy)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -8,7 +8,7 @@
# according to those terms.
excluded_files="config.yaml|.gitmodules"
excluded_extensions="json|md|png|txt|toml|gz|key|rlp"
excluded_extensions="json|md|png|txt|toml|gz|key|rlp|era1"
current_year=$(date +"%Y")
outdated_files=()

Binary file not shown.

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2021-2023 Status Research & Development GmbH
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -9,190 +9,32 @@
# according to those terms.
import
std/[os, sequtils, strformat, strutils],
eth/[common, rlp],
nimcrypto/utils,
../../nimbus/db/core_db,
./gunzip
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template say(args: varargs[untyped]) =
# echo args
discard
proc startAt(
h: openArray[BlockHeader];
b: openArray[BlockBody];
start: uint64;
): (seq[BlockHeader],seq[BlockBody]) =
## Filter out blocks with smaller `blockNumber`
if start.toBlockNumber <= h[0].blockNumber:
return (h.toSeq,b.toSeq)
if start.toBlockNumber <= h[^1].blockNumber:
# There are at least two headers, find the least acceptable one
var n = 1
while h[n].blockNumber < start.toBlockNumber:
n.inc
return (h[n ..< h.len], b[n ..< b.len])
proc stopAfter(
h: openArray[BlockHeader];
b: openArray[BlockBody];
last: uint64;
): (seq[BlockHeader],seq[BlockBody]) =
## Filter out blocks with larger `blockNumber`
if h[^1].blockNumber <= last.toBlockNumber:
return (h.toSeq,b.toSeq)
if h[0].blockNumber <= last.toBlockNumber:
# There are at least two headers, find the last acceptable one
var n = 1
while h[n].blockNumber <= last.toBlockNumber:
n.inc
return (h[0 ..< n], b[0 ..< n])
# ------------------------------------------------------------------------------
# Public capture
# ------------------------------------------------------------------------------
proc dumpBlocksBegin*(headers: openArray[BlockHeader]): string =
& "transaction #{headers[0].blockNumber} {headers.len}"
proc dumpBlocksList*(header: BlockHeader; body: BlockBody): string =
& "block {rlp.encode(header).toHex} {rlp.encode(body).toHex}"
proc dumpBlocksEnd*: string =
"commit"
proc dumpBlocksEndNl*: string =
dumpBlocksEnd() & "\n\n"
proc dumpBlocksListNl*(header: BlockHeader; body: BlockBody): string =
dumpBlocksList(header, body) & "\n"
proc dumpBlocksBeginNl*(db: CoreDbRef;
headers: openArray[BlockHeader]): string =
if headers[0].blockNumber == 1.u256:
let
h0 = db.getBlockHeader(0.u256)
b0 = db.getBlockBody(h0.blockHash)
result = "" &
dumpBlocksBegin(@[h0]) & "\n" &
dumpBlocksListNl(h0,b0) &
dumpBlocksEndNl()
result &= dumpBlocksBegin(headers) & "\n"
proc dumpBlocksNl*(db: CoreDbRef; headers: openArray[BlockHeader];
bodies: openArray[BlockBody]): string =
## Add this below the line `transaction.commit()` in the function
## `p2p/chain/persist_blocks.persistBlocksImpl()`:
## ::
## dumpStream.write c.db.dumpGroupNl(headers,bodies)
## dumpStream.flushFile
##
## where `dumpStream` is some stream (think of `stdout`) of type `File`
## that could be initialised with
## ::
## var dumpStream: File
## if dumpStream.isNil:
## doAssert dumpStream.open("./dump-stream.out", fmWrite)
##
db.dumpBlocksBeginNl(headers) &
toSeq(countup(0, headers.len-1))
.mapIt(dumpBlocksListNl(headers[it], bodies[it]))
.join &
dumpBlocksEndNl()
std/os,
eth/common,
"."/[undump_blocks_era1, undump_blocks_gz]
# ------------------------------------------------------------------------------
# Public undump
# ------------------------------------------------------------------------------
iterator undumpBlocks*(gzFile: string): (seq[BlockHeader],seq[BlockBody]) =
var
headerQ: seq[BlockHeader]
bodyQ: seq[BlockBody]
current = 0u
start = 0u
top = 0u
waitFor = "transaction"
iterator undumpBlocks*(file: string): (seq[BlockHeader],seq[BlockBody]) =
if file.dirExists:
for w in file.undumpBlocksEra1:
yield w
else:
let ext = file.splitFile.ext
if ext == ".gz":
for w in file.undumpBlocksGz:
yield w
else:
raiseAssert "Unsupported extension for \"" &
file & "\" (got \"" & ext & "\")"
if not gzFile.fileExists:
raiseAssert &"No such file: \"{gzFile}\""
for lno,line in gzFile.gunzipLines:
if line.len == 0 or line[0] == '#':
continue
var flds = line.split
if 0 < flds.len and (waitFor == "" or waitFor == flds[0]):
case flds[0]
of "transaction":
let flds1Len = flds[1].len
if flds.len == 3 and
0 < flds1Len and flds[1][0] == '#' and
0 < flds[2].len:
start = flds[1][1 ..< flds1Len].parseUInt
top = start + flds[2].parseUInt
current = start
waitFor = ""
headerQ.reset
bodyQ.reset
continue
else:
echo &"*** Ignoring line({lno}): {line}."
waitFor = "transaction"
of "block":
if flds.len == 3 and
0 < flds[1].len and
0 < flds[2].len and
start <= current and current < top:
var
rlpHeader = flds[1].rlpFromHex
rlpBody = flds[2].rlpFromHex
headerQ.add rlpHeader.read(BlockHeader)
bodyQ.add rlpBody.read(BlockBody)
current.inc
continue
else:
echo &"*** Ignoring line({lno}): {line}."
waitFor = "transaction"
of "commit":
if current == top:
say &"*** commit({lno}) #{start}..{top-1}"
else:
echo &"*** commit({lno}) error, current({current}) should be {top}"
yield (headerQ, bodyQ)
waitFor = "transaction"
continue
echo &"*** Ignoring line({lno}): {line}."
waitFor = "transaction"
iterator undumpBlocks*(gzs: seq[string]): (seq[BlockHeader],seq[BlockBody])=
## Variant of `undumpBlocks()`
for f in gzs:
iterator undumpBlocks*(files: seq[string]): (seq[BlockHeader],seq[BlockBody]) =
for f in files:
for w in f.undumpBlocks:
yield w
iterator undumpBlocks*(
gzFile: string; # Data dump file
least: uint64; # First block to extract
stopAfter = high(uint64); # Last block to extract
): (seq[BlockHeader],seq[BlockBody]) =
## Variant of `undumpBlocks()`
for (seqHdr,seqBdy) in gzFile.undumpBlocks:
let (h,b) = startAt(seqHdr, seqBdy, least)
if h.len == 0:
continue
let w = stopAfter(h, b, stopAfter)
if w[0].len == 0:
break
yield w
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,30 @@
# Nimbus
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
eth/common,
../../nimbus/db/era1_db
# ------------------------------------------------------------------------------
# Public undump
# ------------------------------------------------------------------------------
iterator undumpBlocksEra1*(dir: string): (seq[BlockHeader],seq[BlockBody]) =
let db = Era1DbRef.init dir
defer: db.dispose()
doAssert db.hasAllKeys(0,500) # check whether `init()` succeeded
for w in db.headerBodyPairs:
yield w
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,198 @@
# Nimbus
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[os, sequtils, strformat, strutils],
eth/[common, rlp],
nimcrypto/utils,
../../nimbus/db/core_db,
./gunzip
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template say(args: varargs[untyped]) =
# echo args
discard
proc startAt(
h: openArray[BlockHeader];
b: openArray[BlockBody];
start: uint64;
): (seq[BlockHeader],seq[BlockBody]) =
## Filter out blocks with smaller `blockNumber`
if start.toBlockNumber <= h[0].blockNumber:
return (h.toSeq,b.toSeq)
if start.toBlockNumber <= h[^1].blockNumber:
# There are at least two headers, find the least acceptable one
var n = 1
while h[n].blockNumber < start.toBlockNumber:
n.inc
return (h[n ..< h.len], b[n ..< b.len])
proc stopAfter(
h: openArray[BlockHeader];
b: openArray[BlockBody];
last: uint64;
): (seq[BlockHeader],seq[BlockBody]) =
## Filter out blocks with larger `blockNumber`
if h[^1].blockNumber <= last.toBlockNumber:
return (h.toSeq,b.toSeq)
if h[0].blockNumber <= last.toBlockNumber:
# There are at least two headers, find the last acceptable one
var n = 1
while h[n].blockNumber <= last.toBlockNumber:
n.inc
return (h[0 ..< n], b[0 ..< n])
# ------------------------------------------------------------------------------
# Public capture
# ------------------------------------------------------------------------------
proc dumpBlocksBegin*(headers: openArray[BlockHeader]): string =
& "transaction #{headers[0].blockNumber} {headers.len}"
proc dumpBlocksList*(header: BlockHeader; body: BlockBody): string =
& "block {rlp.encode(header).toHex} {rlp.encode(body).toHex}"
proc dumpBlocksEnd*: string =
"commit"
proc dumpBlocksEndNl*: string =
dumpBlocksEnd() & "\n\n"
proc dumpBlocksListNl*(header: BlockHeader; body: BlockBody): string =
dumpBlocksList(header, body) & "\n"
proc dumpBlocksBeginNl*(db: CoreDbRef;
headers: openArray[BlockHeader]): string =
if headers[0].blockNumber == 1.u256:
let
h0 = db.getBlockHeader(0.u256)
b0 = db.getBlockBody(h0.blockHash)
result = "" &
dumpBlocksBegin(@[h0]) & "\n" &
dumpBlocksListNl(h0,b0) &
dumpBlocksEndNl()
result &= dumpBlocksBegin(headers) & "\n"
proc dumpBlocksNl*(db: CoreDbRef; headers: openArray[BlockHeader];
bodies: openArray[BlockBody]): string =
## Add this below the line `transaction.commit()` in the function
## `p2p/chain/persist_blocks.persistBlocksImpl()`:
## ::
## dumpStream.write c.db.dumpGroupNl(headers,bodies)
## dumpStream.flushFile
##
## where `dumpStream` is some stream (think of `stdout`) of type `File`
## that could be initialised with
## ::
## var dumpStream: File
## if dumpStream.isNil:
## doAssert dumpStream.open("./dump-stream.out", fmWrite)
##
db.dumpBlocksBeginNl(headers) &
toSeq(countup(0, headers.len-1))
.mapIt(dumpBlocksListNl(headers[it], bodies[it]))
.join &
dumpBlocksEndNl()
# ------------------------------------------------------------------------------
# Public undump
# ------------------------------------------------------------------------------
iterator undumpBlocksGz*(gzFile: string): (seq[BlockHeader],seq[BlockBody]) =
var
headerQ: seq[BlockHeader]
bodyQ: seq[BlockBody]
current = 0u
start = 0u
top = 0u
waitFor = "transaction"
if not gzFile.fileExists:
raiseAssert &"No such file: \"{gzFile}\""
for lno,line in gzFile.gunzipLines:
if line.len == 0 or line[0] == '#':
continue
var flds = line.split
if 0 < flds.len and (waitFor == "" or waitFor == flds[0]):
case flds[0]
of "transaction":
let flds1Len = flds[1].len
if flds.len == 3 and
0 < flds1Len and flds[1][0] == '#' and
0 < flds[2].len:
start = flds[1][1 ..< flds1Len].parseUInt
top = start + flds[2].parseUInt
current = start
waitFor = ""
headerQ.reset
bodyQ.reset
continue
else:
echo &"*** Ignoring line({lno}): {line}."
waitFor = "transaction"
of "block":
if flds.len == 3 and
0 < flds[1].len and
0 < flds[2].len and
start <= current and current < top:
var
rlpHeader = flds[1].rlpFromHex
rlpBody = flds[2].rlpFromHex
headerQ.add rlpHeader.read(BlockHeader)
bodyQ.add rlpBody.read(BlockBody)
current.inc
continue
else:
echo &"*** Ignoring line({lno}): {line}."
waitFor = "transaction"
of "commit":
if current == top:
say &"*** commit({lno}) #{start}..{top-1}"
else:
echo &"*** commit({lno}) error, current({current}) should be {top}"
yield (headerQ, bodyQ)
waitFor = "transaction"
continue
echo &"*** Ignoring line({lno}): {line}."
waitFor = "transaction"
iterator undumpBlocksGz*(gzs: seq[string]): (seq[BlockHeader],seq[BlockBody]) =
## Variant of `undumpBlocks()`
for f in gzs:
for w in f.undumpBlocksGz:
yield w
iterator undumpBlocksGz*(
gzFile: string; # Data dump file
least: uint64; # First block to extract
stopAfter = high(uint64); # Last block to extract
): (seq[BlockHeader],seq[BlockBody]) =
## Variant of `undumpBlocks()`
for (seqHdr,seqBdy) in gzFile.undumpBlocksGz:
let (h,b) = startAt(seqHdr, seqBdy, least)
if h.len == 0:
continue
let w = stopAfter(h, b, stopAfter)
if w[0].len == 0:
break
yield w
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -24,7 +24,7 @@ import
../nimbus/utils/[ec_recover, utils],
../nimbus/[config, constants],
./test_clique/pool,
./replay/undump_blocks
./replay/undump_blocks_gz
const
baseDir = [".", "tests", ".." / "tests", $DirSep] # path containg repo
@ -158,7 +158,7 @@ proc runGoerliReplay(noisy = true; showElapsed = false,
suite &"Replay Goerli chain from {fileInfo} capture":
for w in filePath.undumpBlocks:
for w in filePath.undumpBlocksGz:
if w[0][0].blockNumber == 0.u256:
# Verify Genesis
@ -244,7 +244,7 @@ proc runGoerliBaybySteps(noisy = true;
suite &"Replay Goerli chain from {fileInfo} capture, single blockwise":
for w in filePath.undumpBlocks:
for w in filePath.undumpBlocksGz:
if stoppedOk:
break
if w[0][0].blockNumber == 0.u256:

View File

@ -28,7 +28,7 @@ const
baseDir = [".", "..", ".."/"..", $DirSep]
repoDir = [".", "tests", "nimbus-eth1-blobs"]
subDir = ["replay", "test_coredb", "custom-network", "customgenesis"]
subDir = ["replay", "test_coredb", "custom-network", "main-era1"]
# Reference file for finding some database directory base
sampleDirRefFile = "coredb_test_xx.nim"
@ -273,10 +273,7 @@ when isMainModule:
sampleList = cmdLineConfig().samples
if sampleList.len == 0:
sampleList = @[bulkTest0]
when true:
sampleList = @[bulkTest2, bulkTest3]
sampleList = @[ariTest1] # debugging
sampleList = @[bChainCapture]
var state: (Duration, int)
for n,capture in sampleList:

View File

@ -64,66 +64,74 @@ let
builtIn: true,
name: "goerli",
network: GoerliNet,
files: @["goerli68161.txt.gz"]) # lon local replay folder
files: @["goerli68161.txt.gz"]) # on local replay folder
goerliSampleEx = CaptureSpecs(
builtIn: true,
name: "goerli",
network: GoerliNet,
files: @[
"goerli482304.txt.gz", # on nimbus-eth1-blobs/replay
"goerli482304.txt.gz", # on nimbus-eth1-blobs/replay
"goerli482305-504192.txt.gz"])
mainSample = CaptureSpecs(
builtIn: true,
name: "main",
network: MainNet,
files: @["mainnet-00000-5ec1ffb8.era1"], # on local replay folder
numBlocks: high(int),
dbType: AristoDbRocks)
mainSampleEx = CaptureSpecs(
builtIn: true,
name: "main",
network: MainNet,
files: @[
"mainnet332160.txt.gz", # on nimbus-eth1-blobs/replay
"mainnet332161-550848.txt.gz",
"mainnet550849-719232.txt.gz",
"mainnet719233-843841.txt.gz"])
# will run over all avail files in parent folder
files: @["mainnet-era1.txt"]) # on external repo
# ------------------
bulkTest0* = goerliSample
.cloneWith(
name = "-some",
numBlocks = 1_000)
bulkTest1* = goerliSample
# Supposed to run mostly on defaults
bulkTest0* = mainSample
.cloneWith(
name = "-more",
numBlocks = high(int))
bulkTest2* = goerliSampleEx
bulkTest1* = mainSample
.cloneWith(
numBlocks = high(int))
name = "-some",
numBlocks = 1_000)
bulkTest3* = mainSampleEx
.cloneWith(
numBlocks = high(int))
# Test samples with all the problems one can expect
ariTest0* = goerliSampleEx
.cloneWith(
name = "-am",
numBlocks = high(int),
dbType = AristoDbMemory)
ariTest1* = goerliSampleEx
.cloneWith(
name = "-ar",
numBlocks = high(int),
dbType = AristoDbRocks)
ariTest2* = mainSampleEx
bulkTest2* = mainSampleEx
.cloneWith(
name = "-am",
numBlocks = 500_000,
dbType = AristoDbMemory)
ariTest3* = mainSampleEx
bulkTest3* = mainSampleEx
.cloneWith(
name = "-ar",
numBlocks = high(int),
dbType = AristoDbRocks)
bulkTest4* = goerliSample
.cloneWith(
name = "-more",
numBlocks = high(int))
bulkTest5* = goerliSample
.cloneWith(
name = "-some",
numBlocks = 1_000)
bulkTest6* = goerliSampleEx
.cloneWith(
name = "-am",
numBlocks = high(int),
dbType = AristoDbMemory)
bulkTest7* = goerliSampleEx
.cloneWith(
name = "-ar",
numBlocks = high(int),
@ -133,6 +141,6 @@ let
allSamples* = [
bulkTest0, bulkTest1, bulkTest2, bulkTest3,
ariTest0, ariTest1, ariTest2, ariTest3]
bulkTest4, bulkTest5, bulkTest6, bulkTest7]
# End

View File

@ -9,7 +9,7 @@
# distributed except according to those terms.
import
std/[strformat, times],
std/[os, strformat, times],
chronicles,
eth/common,
results,
@ -175,7 +175,16 @@ proc test_chainSync*(
when LedgerEnableApiProfiling:
ldgProfData = com.db.ldgProfData()
for w in filePaths.undumpBlocks:
# Scan folder for `era1` files (ignoring the argument file name)
let
(dir, _, ext) = filePaths[0].splitFile
files =
if filePaths.len == 1 and ext == ".era1":
@[dir]
else:
filePaths
for w in files.undumpBlocks:
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
if fromBlock == 0.u256:
xCheck w[0][0] == com.db.getBlockHeader(0.u256)

View File

@ -12,7 +12,7 @@ import
std/[os, strformat, sequtils, strutils, times],
../../nimbus/core/tx_pool/[tx_chain, tx_desc, tx_gauge, tx_item, tx_tabs],
../../nimbus/core/tx_pool/tx_tasks/[tx_packer, tx_recover],
../replay/[pp, undump_blocks],
../replay/[pp, undump_blocks_gz],
chronicles,
eth/[common, keys],
stew/[byteutils,keyed_queue, sorted_set],
@ -55,7 +55,7 @@ export
tx_tabs.reassign,
tx_tabs.reject,
tx_tabs.verify,
undumpBlocks
undumpBlocksGz
const
# pretty printing

View File

@ -69,7 +69,7 @@ proc toTxPool*(
result[0] = TxPoolRef.new(com,testAddress)
result[0].baseFee = baseFee
for chain in file.undumpBlocks:
for chain in file.undumpBlocksGz:
let leadBlkNum = chain[0][0].blockNumber
chainNo.inc