Exported blobs and some scripts to parallel project nimbus-eth1-blobs (#995)
why: TDD data and test script that are not needed for CI are externally held. This saves space. also: Added support for test-custom_networks.nim to run import Devnet5 dump.
This commit is contained in:
parent
b00ac490a9
commit
ed0e882387
|
@ -275,6 +275,12 @@ rm vendor/Nim/bin/nim
|
|||
make -j8 build-nim
|
||||
```
|
||||
|
||||
- some programs in the _tests_ subdirectory do a replay of blockchain
|
||||
database dumps when compiled and run locally. The dumps are found in
|
||||
[this](https://github.com/status-im/nimbus-eth1-blobs) module which
|
||||
need to be cloned as _nimbus-eth1-blobs_ parellel to the _nimbus-eth1_
|
||||
file system root.
|
||||
|
||||
#### Git submodule workflow
|
||||
|
||||
Working on a dependency:
|
||||
|
|
115
run-nimbus-sync
115
run-nimbus-sync
|
@ -1,115 +0,0 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Script to run Nimbus-eth1 on the same networks Geth supports by name,
|
||||
# with a trusted connection to a dedicated peer for that network.
|
||||
#
|
||||
# All protocols are disabled other than the minimum we need for block sync.
|
||||
#
|
||||
# This is very helpful for debugging and improving pipelined sync, and for
|
||||
# improving the database and other processing, even though p2p sync is the
|
||||
# endgame.
|
||||
#
|
||||
# - Discovery protocols are turned off
|
||||
# - NAT hole punching protocols are turned off
|
||||
# - Whisper protocol is turned off (`--protocols:eth`)
|
||||
# - The only connection is to a single, active Geth for that network.
|
||||
# - Each network's data is stored in a different location to avoid conflicts.
|
||||
# - Each network is accessed by binding to a different port locally,
|
||||
# so we can run several at the same time.
|
||||
# - Log level is set to `TRACE` because we can read them when we're not
|
||||
# swamped with discovery messages. Sync isn't fast enough yet.
|
||||
#
|
||||
# The enode URLs below are public facing Geth instances that are syncing to
|
||||
# each of the networks. Nimbus-eth1 devs are free to use them for testing
|
||||
# while they remain up. However, better results will be obtained if those nodes
|
||||
# also have the Nimbus instances as "trusted peers".
|
||||
#
|
||||
set -e -u -o pipefail
|
||||
|
||||
# First argument says which testnet, or use mainnet for that.
|
||||
# Defaults to goerli if omitted.
|
||||
testnet=${1:-goerli}
|
||||
|
||||
# Additional arguments after the first are passed to Nimbus.
|
||||
shift || :
|
||||
|
||||
staticnode_geth_mainnet='enode://7af995207d620d363ffbdac3216c45140c8fc31a1a30cac94dfad94713ba6b03efeb4f8dd4c0d676ec3e32a9eac2804560c3d3001c7551a2bb955c1e5ce22d17@mainnet.geth.ethereum.arxlogic.com:30303'
|
||||
staticnode_geth_goerli='enode://9a8651c02d14ffbf7e328cd6c31307d90c9411673deeec819a1b7a205eed121c7eea192146937958608eaebff25dcd232fce958f031bf82ba3d55deaac3d0715@goerli.geth.ethereum.arxlogic.com:30303';
|
||||
staticnode_geth_ropsten='enode://861f2b16e3da33f2af677de97087dd489b17f9a0685fdaf751fb524fdf171cd4b8f02a5dc9e25a2730d1aa1b22176f5c88397b7f01180d032375d1526a8e1421@ropsten.geth.ethereum.arxlogic.com:30303'
|
||||
staticnode_geth_rinkeby='enode://bb34c7a91c9895769f782cd1f0da88025f302960beebac305010b7395912b3835eb954426b3cf4be1b47bae4c32973d87688ace8cce412a3efb88baabc77bd98@rinkeby.geth.ethereum.arxlogic.com:30303'
|
||||
staticnode_geth_yolov3='enode://a11e7ed2a1a21b9464619f77734b9dec76befbc5ebb95ac7820f45728bc42c30f9bd406a83ddc28b28141bc0a8469638467ad6a48065977e1ac8e8f1c7a1e6b4@yolov3.geth.ethereum.arxlogic.com:30303'
|
||||
|
||||
case $testnet in
|
||||
mainnet)
|
||||
net_option= port=30193 staticnodes=$staticnode_geth_mainnet ;;
|
||||
goerli)
|
||||
net_option=--goerli port=30194 staticnodes=$staticnode_geth_goerli ;;
|
||||
ropsten)
|
||||
net_option=--ropsten port=30195 staticnodes=$staticnode_geth_ropsten ;;
|
||||
rinkeby)
|
||||
net_option=--rinkeby port=30196 staticnodes=$staticnode_geth_rinkeby ;;
|
||||
yolov3)
|
||||
net_option=--yolov3 port=30197 staticnodes=$staticnode_geth_yolov3 ;;
|
||||
*)
|
||||
echo "Unrecognised network: $testnet" 1>&2; exit 1 ;;
|
||||
esac
|
||||
|
||||
# Perform DNS name lookup for enodes with names.
|
||||
# Geth supports this nowadays, but Nimbus does not.
|
||||
resolve_enodes() {
|
||||
local node prefix suffix host port ip
|
||||
set --
|
||||
for node in $staticnodes; do
|
||||
case $node in
|
||||
enode://*@*:*)
|
||||
prefix=${node%@*} suffix=${node##*@}
|
||||
host=${suffix%:*} port=${suffix##*:}
|
||||
case $host in
|
||||
*[^0-9.]*)
|
||||
ip=$(host -t a "$host" 2>/dev/null)
|
||||
case $ip in
|
||||
"$host has address "[0-9]*)
|
||||
ip=${ip##* has address }
|
||||
;;
|
||||
*)
|
||||
echo "Name lookup for $host failed" 1>&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
node=$prefix@$ip:$port
|
||||
esac
|
||||
esac
|
||||
set -- "$@" "$node"
|
||||
done
|
||||
staticnodes="$*"
|
||||
}
|
||||
resolve_enodes
|
||||
|
||||
datadir="$HOME"/.nimbus/"$testnet"
|
||||
|
||||
# Use a stable nodekey if we have one, to ensure the remote Geth almost always
|
||||
# accepts our connections. The nodekey's corresponding `enode` URL must be
|
||||
# added with `admin.addTrustedPeer` to the remote Geth. This isn't perfect.
|
||||
# Sometimes Geth is too busy even for a trusted peer. But usually it works.
|
||||
#
|
||||
# Note, this nodekey file isn't created automatically by nimbus-eth1 at the
|
||||
# moment. We have to have done it manually before now.
|
||||
#
|
||||
if [ -e "$datadir"/nimbus/nodekey ]; then
|
||||
nodekey=$(cat "$datadir"/nimbus/nodekey)
|
||||
if [ -n "$nodekey" ]; then
|
||||
set -- --nodekey:"$nodekey"
|
||||
fi
|
||||
fi
|
||||
|
||||
# So the process name shows up without a path in `netstat`.
|
||||
export PATH=$HOME/Status/nimbus-eth1/build:$PATH
|
||||
|
||||
exec nimbus \
|
||||
--datadir:"$datadir" $net_option \
|
||||
--prune:full \
|
||||
--logMetrics --logMetricsInterval:5 \
|
||||
--log-level:TRACE \
|
||||
--nodiscover --nat:none --port:$port --protocols:eth \
|
||||
--staticnodes:"$staticnodes" \
|
||||
"$@"
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"config": {
|
||||
"chainId": 1337702,
|
||||
"chainId": 1337752,
|
||||
"homesteadBlock": 0,
|
||||
"eip150Block": 0,
|
||||
"eip155Block": 0,
|
File diff suppressed because one or more lines are too long
|
@ -12,73 +12,21 @@
|
|||
## ----------------------------------------------------
|
||||
|
||||
import
|
||||
std/[sequtils, strformat, strutils, tables, times],
|
||||
../../nimbus/[chain_config, constants],
|
||||
eth/[common, trie/trie_defs]
|
||||
std/[tables, times],
|
||||
./pp_light,
|
||||
../../nimbus/chain_config,
|
||||
eth/common
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, units pretty printer
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc ppMs*(elapsed: Duration): string =
|
||||
result = $elapsed.inMilliSeconds
|
||||
let ns = elapsed.inNanoSeconds mod 1_000_000
|
||||
if ns != 0:
|
||||
# to rounded deca milli seconds
|
||||
let dm = (ns + 5_000i64) div 10_000i64
|
||||
result &= &".{dm:02}"
|
||||
result &= "ms"
|
||||
|
||||
proc ppSecs*(elapsed: Duration): string =
|
||||
result = $elapsed.inSeconds
|
||||
let ns = elapsed.inNanoseconds mod 1_000_000_000
|
||||
if ns != 0:
|
||||
# to rounded decs seconds
|
||||
let ds = (ns + 5_000_000i64) div 10_000_000i64
|
||||
result &= &".{ds:02}"
|
||||
result &= "s"
|
||||
|
||||
proc toKMG*[T](s: T): string =
|
||||
proc subst(s: var string; tag, new: string): bool =
|
||||
if tag.len < s.len and s[s.len - tag.len ..< s.len] == tag:
|
||||
s = s[0 ..< s.len - tag.len] & new
|
||||
return true
|
||||
result = $s
|
||||
for w in [("000", "K"),("000K","M"),("000M","G"),("000G","T"),
|
||||
("000T","P"),("000P","E"),("000E","Z"),("000Z","Y")]:
|
||||
if not result.subst(w[0],w[1]):
|
||||
return
|
||||
export
|
||||
pp_light
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, pretty printer
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp*(s: string; hex = false): string =
|
||||
if hex:
|
||||
let n = (s.len + 1) div 2
|
||||
(if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. s.len-1]) &
|
||||
"[" & (if 0 < n: "#" & $n else: "") & "]"
|
||||
elif s.len <= 30:
|
||||
s
|
||||
else:
|
||||
(if (s.len and 1) == 0: s[0 ..< 8] else: "0" & s[0 ..< 7]) &
|
||||
"..(" & $s.len & ").." & s[s.len-16 ..< s.len]
|
||||
|
||||
proc pp*(b: Blob): string =
|
||||
b.mapIt(it.toHex(2)).join.toLowerAscii.pp(hex = true)
|
||||
|
||||
proc pp*(a: Hash256; collapse = true): string =
|
||||
if not collapse:
|
||||
a.data.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
elif a == emptyRlpHash:
|
||||
"emptyRlpHash"
|
||||
elif a == EMPTY_UNCLE_HASH:
|
||||
"EMPTY_UNCLE_HASH"
|
||||
elif a == EMPTY_SHA3:
|
||||
"EMPTY_SHA3"
|
||||
else:
|
||||
a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
|
||||
|
||||
proc pp*(a: EthAddress): string =
|
||||
a.mapIt(it.toHex(2)).join[32 .. 39].toLowerAscii
|
||||
|
||||
|
@ -119,6 +67,7 @@ proc pp*(g: Genesis; sep = " "): string =
|
|||
&"parentHash={g.parentHash.pp}{sep}" &
|
||||
&"baseFeePerGas={g.baseFeePerGas}"
|
||||
|
||||
|
||||
proc pp*(h: BlockHeader; indent: int): string =
|
||||
h.pp("\n" & " ".repeat(max(1,indent)))
|
||||
|
||||
|
|
|
@ -0,0 +1,160 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2019 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
## Pretty printing, an alternative to `$` for debugging
|
||||
## ----------------------------------------------------
|
||||
##
|
||||
## minimal dependencies, avoiding circular import
|
||||
|
||||
import
|
||||
std/[sequtils, strformat, strutils, tables, times],
|
||||
nimcrypto/hash
|
||||
|
||||
export
|
||||
sequtils, strformat, strutils
|
||||
|
||||
const
|
||||
ZeroHash256 = MDigest[256].default
|
||||
|
||||
EmptyUncleHash = ( "1dcc4de8dec75d7aab85b567b6ccd41a" &
|
||||
"d312451b948a7413f0a142fd40d49347" ).toDigest
|
||||
|
||||
BlankRootHash = ( "56e81f171bcc55a6ff8345e692c0f86e" &
|
||||
"5b48e01b996cadc001622fb5e363b421" ).toDigest
|
||||
|
||||
EmptySha3 = ( "c5d2460186f7233c927e7db2dcc703c0" &
|
||||
"e500b653ca82273b7bfad8045d85a470" ).toDigest
|
||||
|
||||
EmptyRlpHash = ( "56e81f171bcc55a6ff8345e692c0f86e" &
|
||||
"5b48e01b996cadc001622fb5e363b421" ).toDigest
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc reGroup(q: openArray[int]; itemsPerSegment = 16): seq[seq[int]] =
|
||||
var top = 0
|
||||
while top < q.len:
|
||||
let w = top
|
||||
top = min(w + itemsPerSegment, q.len)
|
||||
result.add q[w ..< top]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, units pretty printer
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc ppMs*(elapsed: Duration): string =
|
||||
result = $elapsed.inMilliSeconds
|
||||
let ns = elapsed.inNanoSeconds mod 1_000_000
|
||||
if ns != 0:
|
||||
# to rounded deca milli seconds
|
||||
let dm = (ns + 5_000i64) div 10_000i64
|
||||
result &= &".{dm:02}"
|
||||
result &= "ms"
|
||||
|
||||
proc ppSecs*(elapsed: Duration): string =
|
||||
result = $elapsed.inSeconds
|
||||
let ns = elapsed.inNanoseconds mod 1_000_000_000
|
||||
if ns != 0:
|
||||
# to rounded decs seconds
|
||||
let ds = (ns + 5_000_000i64) div 10_000_000i64
|
||||
result &= &".{ds:02}"
|
||||
result &= "s"
|
||||
|
||||
proc toKMG*[T](s: T): string =
|
||||
proc subst(s: var string; tag, new: string): bool =
|
||||
if tag.len < s.len and s[s.len - tag.len ..< s.len] == tag:
|
||||
s = s[0 ..< s.len - tag.len] & new
|
||||
return true
|
||||
result = $s
|
||||
for w in [("000", "K"),("000K","M"),("000M","G"),("000G","T"),
|
||||
("000T","P"),("000P","E"),("000E","Z"),("000Z","Y")]:
|
||||
if not result.subst(w[0],w[1]):
|
||||
return
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, pretty printer
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp*(s: string; hex = false): string =
|
||||
if hex:
|
||||
let n = (s.len + 1) div 2
|
||||
(if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. s.len-1]) &
|
||||
"[" & (if 0 < n: "#" & $n else: "") & "]"
|
||||
elif s.len <= 30:
|
||||
s
|
||||
else:
|
||||
(if (s.len and 1) == 0: s[0 ..< 8] else: "0" & s[0 ..< 7]) &
|
||||
"..(" & $s.len & ").." & s[s.len-16 ..< s.len]
|
||||
|
||||
proc pp*(q: openArray[int]; itemsPerLine: int; lineSep: string): string =
|
||||
doAssert q == q.reGroup(itemsPerLine).concat
|
||||
q.reGroup(itemsPerLine)
|
||||
.mapIt(it.mapIt(&"0x{it:02x}").join(", "))
|
||||
.join("," & lineSep)
|
||||
|
||||
proc pp*(a: MDigest[256]; collapse = true): string =
|
||||
if not collapse:
|
||||
a.data.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
elif a == EmptyRlpHash:
|
||||
"emptyRlpHash"
|
||||
elif a == EmptyUncleHash:
|
||||
"emptyUncleHash"
|
||||
elif a == EmptySha3:
|
||||
"EmptySha3"
|
||||
elif a == ZeroHash256:
|
||||
"zeroHash256"
|
||||
else:
|
||||
a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
|
||||
|
||||
proc pp*(a: openArray[MDigest[256]]; collapse = true): string =
|
||||
"@[" & a.toSeq.mapIt(it.pp).join(" ") & "]"
|
||||
|
||||
proc pp*(q: openArray[int]; itemsPerLine: int; indent: int): string =
|
||||
q.pp(itemsPerLine = itemsPerLine, lineSep = "\n" & " ".repeat(max(1,indent)))
|
||||
|
||||
proc pp*(q: openArray[byte]; noHash = false): string =
|
||||
if q.len == 32 and not noHash:
|
||||
var a: array[32,byte]
|
||||
for n in 0..31: a[n] = q[n]
|
||||
MDigest[256](data: a).pp
|
||||
else:
|
||||
q.toSeq.mapIt(it.toHex(2)).join.toLowerAscii.pp(hex = true)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Elapsed time pretty printer
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template showElapsed*(noisy: bool; info: string; code: untyped) =
|
||||
block:
|
||||
let start = getTime()
|
||||
code
|
||||
if noisy:
|
||||
let elpd {.inject.} = getTime() - start
|
||||
if 0 < times.inSeconds(elpd):
|
||||
echo "*** ", info, &": {elpd.ppSecs:>4}"
|
||||
else:
|
||||
echo "*** ", info, &": {elpd.ppMs:>4}"
|
||||
|
||||
template catchException*(info: string; trace: bool; code: untyped) =
|
||||
block:
|
||||
try:
|
||||
code
|
||||
except CatchableError as e:
|
||||
if trace:
|
||||
echo "*** ", info, ": exception ", e.name, "(", e.msg, ")"
|
||||
echo " ", e.getStackTrace.strip.replace("\n","\n ")
|
||||
|
||||
template catchException*(info: string; code: untyped) =
|
||||
catchException(info, false, code)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
|
@ -67,6 +67,7 @@ proc dumpGroupNl*(db: BaseChainDB; headers: openArray[BlockHeader];
|
|||
## `p2p/chain/persist_blocks.persistBlocksImpl()`:
|
||||
## ::
|
||||
## dumpStream.write c.db.dumpGroupNl(headers,bodies)
|
||||
## dumpStream.flushFile
|
||||
##
|
||||
## where `dumpStream` is some stream (think of `stdout`) of type `File`
|
||||
## that could be initialised with
|
||||
|
|
|
@ -20,19 +20,56 @@
|
|||
## from `issue 932` <https://github.com/status-im/nimbus-eth1/issues/932>`_.
|
||||
|
||||
import
|
||||
std/[distros, os, strformat, strutils, sequtils],
|
||||
std/[distros, os],
|
||||
../nimbus/[chain_config, config, genesis],
|
||||
../nimbus/db/[db_chain, select_backend],
|
||||
./replay/pp,
|
||||
../nimbus/p2p/chain,
|
||||
./replay/[undump, pp],
|
||||
chronicles,
|
||||
eth/[common, p2p, trie/db],
|
||||
nimcrypto/hash,
|
||||
stew/results,
|
||||
unittest2
|
||||
|
||||
const
|
||||
baseDir = [".", "tests", ".." / "tests", $DirSep] # path containg repo
|
||||
repoDir = ["customgenesis", "."] # alternative repo paths
|
||||
jFile = "kintsugi.json"
|
||||
type
|
||||
ReplaySession = object
|
||||
fancyName: string # display name
|
||||
genesisFile: string # json file base name
|
||||
termTotalDff: UInt256 # terminal total difficulty (to verify)
|
||||
captureFile: string # gzipped RPL data dump
|
||||
ttdReachedAt: uint64 # block number where total difficulty becomes `true`
|
||||
failBlockAt: uint64 # stop here and expect that block to fail
|
||||
|
||||
const
|
||||
baseDir = [".", "..", ".."/"..", $DirSep]
|
||||
repoDir = [".", "tests"/"replay", "tests"/"customgenesis",
|
||||
"nimbus-eth1-blobs"/"replay",
|
||||
"nimbus-eth1-blobs"/"custom-network"]
|
||||
|
||||
devnet4 = ReplaySession(
|
||||
fancyName: "Devnet4",
|
||||
genesisFile: "devnet4.json",
|
||||
captureFile: "devnetfour5664.txt.gz",
|
||||
termTotalDff: 5_000_000_000.u256,
|
||||
ttdReachedAt: 5645,
|
||||
# Previously failed at `ttdReachedAt` (needed `state.nim` fix/update)
|
||||
failBlockAt: 99999999)
|
||||
|
||||
devnet5 = ReplaySession(
|
||||
fancyName: "Devnet5",
|
||||
genesisFile: "devnet5.json",
|
||||
captureFile: "devnetfive43968.txt.gz",
|
||||
termTotalDff: 500_000_000_000.u256,
|
||||
ttdReachedAt: 43711,
|
||||
failBlockAt: 99999999)
|
||||
|
||||
kiln = ReplaySession(
|
||||
fancyName: "Kiln",
|
||||
genesisFile: "kiln.json",
|
||||
captureFile: "kiln25872.txt.gz",
|
||||
termTotalDff: 20_000_000_000_000.u256,
|
||||
ttdReachedAt: 9999999,
|
||||
failBlockAt: 9999999)
|
||||
|
||||
when not defined(linux):
|
||||
const isUbuntu32bit = false
|
||||
|
@ -58,6 +95,18 @@ let
|
|||
#
|
||||
disablePersistentDB = isUbuntu32bit
|
||||
|
||||
# Block chains shared between test suites
|
||||
var
|
||||
mdb: BaseChainDB # memory DB
|
||||
ddb: BaseChainDB # perstent DB on disk
|
||||
ddbDir: string # data directory for disk database
|
||||
sSpcs: ReplaySession # current replay session specs
|
||||
|
||||
const
|
||||
# FIXED: Persistent database crash on `Devnet4` replay if the database
|
||||
# directory was acidentally deleted (due to a stray "defer:" directive.)
|
||||
ddbCrashBlockNumber = 2105
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -71,10 +120,11 @@ proc findFilePath(file: string): string =
|
|||
return path
|
||||
|
||||
proc flushDbDir(s: string) =
|
||||
let dataDir = s / "nimbus"
|
||||
if (dataDir / "data").dirExists:
|
||||
# Typically under Windows: there might be stale file locks.
|
||||
try: dataDir.removeDir except: discard
|
||||
if s != "":
|
||||
let dataDir = s / "nimbus"
|
||||
if (dataDir / "data").dirExists:
|
||||
# Typically under Windows: there might be stale file locks.
|
||||
try: dataDir.removeDir except: discard
|
||||
|
||||
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
||||
if noisy:
|
||||
|
@ -85,37 +135,90 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
|||
else:
|
||||
echo pfx, args.toSeq.join
|
||||
|
||||
proc setTraceLevel =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
setLogLevel(LogLevel.TRACE)
|
||||
|
||||
proc setErrorLevel =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
setLogLevel(LogLevel.ERROR)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc ddbCleanUp(dir: string) =
|
||||
if not disablePersistentDB:
|
||||
ddbDir = dir
|
||||
dir.flushDbDir
|
||||
|
||||
proc ddbCleanUp =
|
||||
ddbDir.ddbCleanUp
|
||||
|
||||
proc isOK(rc: ValidationResult): bool =
|
||||
rc == ValidationResult.OK
|
||||
|
||||
proc ttdReached(db: BaseChainDB): bool =
|
||||
if db.config.terminalTotalDifficulty.isSome:
|
||||
return db.config.terminalTotalDifficulty.get <= db.totalDifficulty
|
||||
|
||||
proc importBlocks(c: Chain; h: seq[BlockHeader]; b: seq[BlockBody];
|
||||
noisy = false): bool =
|
||||
## On error, the block number of the failng block is returned
|
||||
let
|
||||
(first, last) = (h[0].blockNumber, h[^1].blockNumber)
|
||||
nTxs = b.mapIt(it.transactions.len).foldl(a+b)
|
||||
nUnc = b.mapIt(it.uncles.len).foldl(a+b)
|
||||
tddOk = c.db.ttdReached
|
||||
bRng = if 1 < h.len: &"s [#{first}..#{last}]={h.len}" else: &" #{first}"
|
||||
blurb = &"persistBlocks([#{first}..#"
|
||||
|
||||
noisy.say "***", &"block{bRng} #txs={nTxs} #uncles={nUnc}"
|
||||
|
||||
catchException("persistBlocks()", trace = true):
|
||||
if c.persistBlocks(h, b).isOk:
|
||||
if not tddOk and c.db.ttdReached:
|
||||
noisy.say "***", &"block{bRng} => tddReached"
|
||||
return true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Test Runner
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc runner(noisy = true; file = jFile) =
|
||||
proc genesisLoadRunner(noisy = true;
|
||||
captureSession = devnet4;
|
||||
persistPruneTrie = true) =
|
||||
sSpcs = captureSession
|
||||
|
||||
let
|
||||
fileInfo = file.splitFile.name.split(".")[0]
|
||||
filePath = file.findFilePath
|
||||
gFileInfo = sSpcs.genesisFile.splitFile.name.split(".")[0]
|
||||
gFilePath = sSpcs.genesisFile.findFilePath
|
||||
|
||||
tmpDir = if disablePersistentDB: "*notused*"
|
||||
else: filePath.splitFile.dir / "tmp"
|
||||
else: gFilePath.splitFile.dir / "tmp"
|
||||
|
||||
defer:
|
||||
if not disablePersistentDB: tmpDir.flushDbDir
|
||||
persistPruneInfo = if persistPruneTrie: "pruning enabled"
|
||||
else: "no pruning"
|
||||
|
||||
suite "Kintsugi custom network test scenario":
|
||||
suite &"{sSpcs.fancyName} custom network genesis & database setup":
|
||||
var
|
||||
params: NetworkParams
|
||||
mdb, ddb: BaseChainDB
|
||||
|
||||
test &"Load params from {fileInfo}":
|
||||
noisy.say "***", "custom-file=", filePath
|
||||
check filePath.loadNetworkParams(params)
|
||||
test &"Load params from {gFileInfo}":
|
||||
noisy.say "***", "custom-file=", gFilePath
|
||||
check gFilePath.loadNetworkParams(params)
|
||||
|
||||
test "Construct in-memory BaseChainDB":
|
||||
test "Construct in-memory BaseChainDB, pruning enabled":
|
||||
mdb = newBaseChainDB(
|
||||
newMemoryDb(),
|
||||
id = params.config.chainID.NetworkId,
|
||||
params = params)
|
||||
|
||||
test &"Construct persistent BaseChainDB on {tmpDir}":
|
||||
check mdb.ttd == sSpcs.termTotalDff
|
||||
|
||||
test &"Construct persistent BaseChainDB on {tmpDir}, {persistPruneInfo}":
|
||||
if disablePersistentDB:
|
||||
skip()
|
||||
else:
|
||||
|
@ -123,15 +226,17 @@ proc runner(noisy = true; file = jFile) =
|
|||
# cleared. There might be left overs from a previous crash or
|
||||
# because there were file locks under Windows which prevented a
|
||||
# previous clean up.
|
||||
tmpDir.flushDbDir
|
||||
tmpDir.ddbCleanUp
|
||||
|
||||
# Constructor ...
|
||||
ddb = newBaseChainDB(
|
||||
tmpDir.newChainDb.trieDB,
|
||||
id = params.config.chainID.NetworkId,
|
||||
pruneTrie = true,
|
||||
pruneTrie = persistPruneTrie,
|
||||
params = params)
|
||||
|
||||
check mdb.ttd == sSpcs.termTotalDff
|
||||
|
||||
test "Initialise in-memory Genesis":
|
||||
mdb.initializeEmptyDb
|
||||
|
||||
|
@ -156,17 +261,109 @@ proc runner(noisy = true; file = jFile) =
|
|||
onTheFlyHeaderPP = ddb.toGenesisHeader.pp
|
||||
check storedhHeaderPP == onTheFlyHeaderPP
|
||||
|
||||
|
||||
proc testnetChainRunner(noisy = true;
|
||||
memoryDB = true;
|
||||
stopAfterBlock = 999999999) =
|
||||
let
|
||||
cFileInfo = sSpcs.captureFile.splitFile.name.split(".")[0]
|
||||
cFilePath = sSpcs.captureFile.findFilePath
|
||||
dbInfo = if memoryDB: "in-memory" else: "persistent"
|
||||
|
||||
pivotBlockNumber = sSpcs.failBlockAt.u256
|
||||
lastBlockNumber = stopAfterBlock.u256
|
||||
ttdBlockNumber = sSpcs.ttdReachedAt.u256
|
||||
|
||||
suite &"Block chain DB inspector for {sSpcs.fancyName}":
|
||||
var
|
||||
bdb: BaseChainDB
|
||||
chn: Chain
|
||||
pivotHeader: BlockHeader
|
||||
pivotBody: BlockBody
|
||||
|
||||
test &"Inherit {dbInfo} block chain DB from previous session":
|
||||
check not mdb.isNil
|
||||
check not ddb.isNil
|
||||
|
||||
# Whatever DB suits, mdb: in-memory, ddb: persistet/on-disk
|
||||
bdb = if memoryDB: mdb else: ddb
|
||||
|
||||
chn = bdb.newChain
|
||||
noisy.say "***", "ttd",
|
||||
" db.config.TTD=", chn.db.config.terminalTotalDifficulty
|
||||
# " db.arrowGlacierBlock=0x", chn.db.config.arrowGlacierBlock.toHex
|
||||
|
||||
test &"Replay {cFileInfo} capture, may fail ~#{pivotBlockNumber} "&
|
||||
&"(slow -- time for coffee break)":
|
||||
noisy.say "***", "capture-file=", cFilePath
|
||||
discard
|
||||
|
||||
test &"Processing {sSpcs.fancyName} blocks":
|
||||
for w in cFilePath.undumpNextGroup:
|
||||
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
|
||||
|
||||
# Install & verify Genesis
|
||||
if w[0][0].blockNumber == 0.u256:
|
||||
doAssert w[0][0] == bdb.getBlockHeader(0.u256)
|
||||
continue
|
||||
|
||||
# Persist blocks, full range before `pivotBlockNumber`
|
||||
if toBlock < pivotBlockNumber:
|
||||
if not chn.importBlocks(w[0], w[1], noisy):
|
||||
# Just a guess -- might be any block in that range
|
||||
(pivotHeader, pivotBody) = (w[0][0],w[1][0])
|
||||
break
|
||||
if chn.db.ttdReached:
|
||||
check ttdBlockNumber <= toBlock
|
||||
else:
|
||||
check toBlock < ttdBlockNumber
|
||||
if lastBlockNumber <= toBlock:
|
||||
break
|
||||
|
||||
else:
|
||||
let top = (pivotBlockNumber - fromBlock).truncate(uint64).int
|
||||
|
||||
# Load the blocks before the pivot block
|
||||
if 0 < top:
|
||||
check chn.importBlocks(w[0][0 ..< top],w[1][0 ..< top], noisy)
|
||||
|
||||
(pivotHeader, pivotBody) = (w[0][top],w[1][top])
|
||||
break
|
||||
|
||||
test &"Processing {sSpcs.fancyName} block #{pivotHeader.blockNumber}, "&
|
||||
&"persistBlocks() will fail":
|
||||
|
||||
setTraceLevel()
|
||||
|
||||
if pivotHeader.blockNumber == 0:
|
||||
skip()
|
||||
else:
|
||||
# Expecting that the import fails at the current block ...
|
||||
check not chn.importBlocks(@[pivotHeader], @[pivotBody], noisy)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Main function(s)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc customNetworkMain*(noisy = defined(debug)) =
|
||||
noisy.runner
|
||||
defer: ddbCleanUp()
|
||||
noisy.genesisLoadRunner
|
||||
|
||||
when isMainModule:
|
||||
var noisy = defined(debug)
|
||||
noisy = true
|
||||
noisy.runner
|
||||
let noisy = defined(debug) or true
|
||||
setErrorLevel()
|
||||
|
||||
noisy.showElapsed("customNetwork"):
|
||||
defer: ddbCleanUp()
|
||||
|
||||
noisy.genesisLoadRunner(
|
||||
# any of: devnet4, devnet5, kiln, etc.
|
||||
captureSession = devnet4)
|
||||
|
||||
# Note that the `testnetChainRunner()` finds the replay dump files
|
||||
# typically on the `nimbus-eth1-blobs` module.
|
||||
noisy.testnetChainRunner(
|
||||
stopAfterBlock = 999999999)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -4,8 +4,8 @@ import
|
|||
../nimbus/[genesis, config, chain_config]
|
||||
|
||||
const
|
||||
baseDir = [".", "tests", ".." / "tests", $DirSep] # path containg repo
|
||||
repoDir = ["customgenesis", "status"] # alternative repo paths
|
||||
baseDir = [".", "tests", ".."/"tests", $DirSep] # path containg repo
|
||||
repoDir = [".", "customgenesis"] # alternative repo paths
|
||||
|
||||
proc findFilePath(file: string): string =
|
||||
result = "?unknown?" / file
|
||||
|
@ -56,9 +56,9 @@ proc customGenesisTest() =
|
|||
check cg.config.cliquePeriod == 30
|
||||
check cg.config.cliqueEpoch == 30000
|
||||
|
||||
test "kintsugi.json":
|
||||
test "Devnet4.json (aka Kintsugi in all but chainId)":
|
||||
var cg: NetworkParams
|
||||
check loadNetworkParams("kintsugi.json".findFilePath, cg)
|
||||
check loadNetworkParams("devnet4.json".findFilePath, cg)
|
||||
let h = cg.toGenesisHeader
|
||||
let stateRoot = "3b84f313bfd49c03cc94729ade2e0de220688f813c0c895a99bd46ecc9f45e1e".toDigest
|
||||
let genesisHash = "a28d8d73e087a01d09d8cb806f60863652f30b6b6dfa4e0157501ff07d422399".toDigest
|
||||
|
@ -66,6 +66,16 @@ proc customGenesisTest() =
|
|||
check h.blockHash == genesisHash
|
||||
check cg.config.poaEngine == false
|
||||
|
||||
test "Devnet5.json (aka Kiln in all but chainId and TTD)":
|
||||
var cg: NetworkParams
|
||||
check loadNetworkParams("devnet5.json".findFilePath, cg)
|
||||
let h = cg.toGenesisHeader
|
||||
let stateRoot = "52e628c7f35996ba5a0402d02b34535993c89ff7fc4c430b2763ada8554bee62".toDigest
|
||||
let genesisHash = "51c7fe41be669f69c45c33a56982cbde405313342d9e2b00d7c91a7b284dd4f8".toDigest
|
||||
check h.stateRoot == stateRoot
|
||||
check h.blockHash == genesisHash
|
||||
check cg.config.poaEngine == false
|
||||
|
||||
proc genesisMain*() =
|
||||
genesisTest()
|
||||
customGenesisTest()
|
||||
|
|
|
@ -10,14 +10,14 @@
|
|||
|
||||
import
|
||||
std/[os, sequtils, strformat, strutils, times],
|
||||
./replay/gunzip,
|
||||
./replay/[pp, gunzip],
|
||||
../nimbus/utils/[pow, pow/pow_cache, pow/pow_dataset],
|
||||
eth/[common],
|
||||
unittest2
|
||||
|
||||
const
|
||||
baseDir = [".", "tests", ".." / "tests", $DirSep] # path containg repo
|
||||
repoDir = ["test_pow", "status"] # alternative repos
|
||||
repoDir = ["replay"] # alternative repos
|
||||
|
||||
specsDump = "mainspecs2k.txt.gz"
|
||||
|
||||
|
@ -25,45 +25,6 @@ const
|
|||
# Helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc ppMs*(elapsed: Duration): string =
|
||||
result = $elapsed.inMilliSeconds
|
||||
let ns = elapsed.inNanoSeconds mod 1_000_000
|
||||
if ns != 0:
|
||||
# to rounded deca milli seconds
|
||||
let dm = (ns + 5_000i64) div 10_000i64
|
||||
result &= &".{dm:02}"
|
||||
result &= "ms"
|
||||
|
||||
proc ppSecs*(elapsed: Duration): string =
|
||||
result = $elapsed.inSeconds
|
||||
let ns = elapsed.inNanoseconds mod 1_000_000_000
|
||||
if ns != 0:
|
||||
# to rounded decs seconds
|
||||
let ds = (ns + 5_000_000i64) div 10_000_000i64
|
||||
result &= &".{ds:02}"
|
||||
result &= "s"
|
||||
|
||||
proc toKMG*[T](s: T): string =
|
||||
proc subst(s: var string; tag, new: string): bool =
|
||||
if tag.len < s.len and s[s.len - tag.len ..< s.len] == tag:
|
||||
s = s[0 ..< s.len - tag.len] & new
|
||||
return true
|
||||
result = $s
|
||||
for w in [("000", "K"),("000K","M"),("000M","G"),("000G","T"),
|
||||
("000T","P"),("000P","E"),("000E","Z"),("000Z","Y")]:
|
||||
if not result.subst(w[0],w[1]):
|
||||
return
|
||||
|
||||
template showElapsed*(noisy: bool; info: string; code: untyped) =
|
||||
let start = getTime()
|
||||
code
|
||||
if noisy:
|
||||
let elpd {.inject.} = getTime() - start
|
||||
if 0 < elpd.inSeconds:
|
||||
echo "*** ", info, &": {elpd.ppSecs:>4}"
|
||||
else:
|
||||
echo "*** ", info, &": {elpd.ppMs:>4}"
|
||||
|
||||
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
||||
if noisy:
|
||||
if args.len == 0:
|
||||
|
@ -73,13 +34,6 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
|||
else:
|
||||
echo pfx, args.toSeq.join
|
||||
|
||||
proc pp*(a: BlockNonce): string =
|
||||
a.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
|
||||
proc pp*(a: Hash256): string =
|
||||
a.data.mapIt(it.toHex(2)).join[24 .. 31].toLowerAscii
|
||||
|
||||
|
||||
proc findFilePath(file: string): string =
|
||||
result = "?unknown?" / file
|
||||
for dir in baseDir:
|
||||
|
|
|
@ -29,8 +29,8 @@ type
|
|||
const
|
||||
prngSeed = 42
|
||||
|
||||
baseDir = [".", "tests", ".." / "tests", $DirSep] # path containg repo
|
||||
repoDir = ["replay", "status"] # alternative repos
|
||||
baseDir = [".", "..", ".."/"..", $DirSep]
|
||||
repoDir = [".", "tests"/"replay", "nimbus-eth1-blobs"/"replay"]
|
||||
|
||||
goerliCapture: CaptureSpecs = (
|
||||
network: GoerliNet,
|
||||
|
@ -134,6 +134,16 @@ proc findFilePath(file: string): string =
|
|||
if path.fileExists:
|
||||
return path
|
||||
|
||||
proc setTraceLevel =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
setLogLevel(LogLevel.TRACE)
|
||||
|
||||
proc setErrorLevel =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
setLogLevel(LogLevel.ERROR)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Test Runners
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -894,11 +904,13 @@ when isMainModule:
|
|||
const
|
||||
noisy = defined(debug)
|
||||
capts0: CaptureSpecs = goerliCapture
|
||||
capts1: CaptureSpecs = (GoerliNet, "goerli504192.txt.gz", 30000, 500, 1500)
|
||||
capts1: CaptureSpecs = (GoerliNet, "goerli482304.txt.gz", 30000, 500, 1500)
|
||||
# Note: mainnet has the leading 45k blocks without any transactions
|
||||
capts2: CaptureSpecs = (MainNet, "mainnet843841.txt.gz", 30000, 500, 1500)
|
||||
capts2: CaptureSpecs = (MainNet, "mainnet332160.txt.gz", 30000, 500, 1500)
|
||||
|
||||
noisy.runTxLoader(capture = capts2)
|
||||
setErrorLevel()
|
||||
|
||||
noisy.runTxLoader(capture = capts1)
|
||||
noisy.runTxPoolTests
|
||||
true.runTxPackerTests
|
||||
|
||||
|
|
|
@ -186,16 +186,6 @@ proc isOK*(rc: ValidationResult): bool =
|
|||
proc toHex*(acc: EthAddress): string =
|
||||
acc.toSeq.mapIt(it.toHex(2)).join
|
||||
|
||||
template showElapsed*(noisy: bool; info: string; code: untyped) =
|
||||
let start = getTime()
|
||||
code
|
||||
if noisy:
|
||||
let elpd {.inject.} = getTime() - start
|
||||
if 0 < elpd.inSeconds:
|
||||
echo "*** ", info, &": {elpd.ppSecs:>4}"
|
||||
else:
|
||||
echo "*** ", info, &": {elpd.ppMs:>4}"
|
||||
|
||||
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
||||
if noisy:
|
||||
if args.len == 0:
|
||||
|
|
Loading…
Reference in New Issue