Update snap offline tests (#1199)

* Re-implemented `hexaryFollow()` in a more general fashion

details:
+ New name for re-implemented `hexaryFollow()` is `hexaryPath()`
+ Renamed `rTreeFollow()` as `hexaryPath()`

why:
  Returning similarly organised structures, the results of the
  `hexaryPath()` functions become comparable when running over
  the persistent and the in-memory databases.

* Added traversal functionality for persistent ChainDB

* Using `Account` values as re-packed Blob

* Repack samples as compressed data files

* Produce test data

details:
+ Can force pivot state root switch after minimal coverage.
+ For emulating certain network behaviour, downloading accounts stops for
  a particular pivot state root if 30% (some static number) coverage is
  reached. Following accounts are downloaded for a later pivot state root.
This commit is contained in:
Jordan Hrycaj 2022-08-24 14:44:18 +01:00 committed by GitHub
parent 91a3425731
commit de2c13e136
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 1372 additions and 5779 deletions

View File

@ -39,8 +39,8 @@ proc runStart(buddy: FullBuddyRef): bool =
proc runStop(buddy: FullBuddyRef) =
worker.stop(buddy)
proc runPool(buddy: FullBuddyRef) =
worker.runPool(buddy)
proc runPool(buddy: FullBuddyRef; last: bool) =
worker.runPool(buddy, last)
proc runSingle(buddy: FullBuddyRef) {.async.} =
await worker.runSingle(buddy)

View File

@ -877,7 +877,7 @@ proc runSingle*(buddy: FullBuddyRef) {.async.} =
await sleepAsync(2.seconds)
proc runPool*(buddy: FullBuddyRef) =
proc runPool*(buddy: FullBuddyRef; last: bool) =
## Ocne started, the function `runPool()` is called for all worker peers in
## a row (as the body of an iteration.) There will be no other worker peer
## functions activated simultaneously.
@ -885,7 +885,9 @@ proc runPool*(buddy: FullBuddyRef) =
## This procedure is started if the global flag `buddy.ctx.poolMode` is set
## `true` (default is `false`.) It is the responsibility of the `runPool()`
## instance to reset the flag `buddy.ctx.poolMode`, typically at the first
## peer instance as the number of active instances is unknown to `runPool()`.
## peer instance.
##
## The argument `last` is set `true` if the last entry is reached.
##
## Note that this function does not run in `async` mode.
##

View File

@ -41,8 +41,8 @@ proc runStart(buddy: SnapBuddyRef): bool =
proc runStop(buddy: SnapBuddyRef) =
worker.stop(buddy)
proc runPool(buddy: SnapBuddyRef) =
worker.runPool(buddy)
proc runPool(buddy: SnapBuddyRef; last: bool) =
worker.runPool(buddy, last)
proc runSingle(buddy: SnapBuddyRef) {.async.} =
await worker.runSingle(buddy)

View File

@ -16,6 +16,7 @@ import
stew/[byteutils, interval_set],
stint,
../../constants,
../protocol,
../types
{.push raises: [Defect].}
@ -34,6 +35,20 @@ type
## Managed structure to handle non-adjacent `LeafRange` intervals
IntervalSetRef[NodeTag,UInt256]
PackedAccountRange* = object
## Re-packed version of `SnapAccountRange`. The reason why repacking is
## needed is that the `snap/1` protocol uses another RLP encoding than is
## used for storing in the database. So the `PackedAccount` is `BaseDB`
## trie compatible.
accounts*: seq[PackedAccount] ## List of re-packed accounts data
proof*: SnapAccountProof ## Boundary proofs
PackedAccount* = object
## In fact, the `snap/1` driver returns the `Account` structure which is
## unwanted overhead, gere.
accHash*: Hash256
accBlob*: Blob
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
@ -66,8 +81,7 @@ proc init*(nh: var NodeHash; data: openArray[byte]): bool =
## Import argument `data` into `nh` which must have length either `32` or `0`.
## The latter case is equivalent to an all zero byte array of size `32`.
if data.len == 32:
for n in 0 ..< 32:
nh.Hash256.data[n] = data[n]
(addr nh.Hash256.data[0]).copyMem(unsafeAddr data[0], 32)
return true
elif data.len == 0:
nh.reset
@ -85,7 +99,7 @@ proc init*(nt: var NodeTag; data: openArray[byte]): bool =
# ------------------------------------------------------------------------------
proc read*(rlp: var Rlp, T: type NodeTag): T
{.gcsafe, raises: [Defect,RlpError]} =
{.gcsafe, raises: [Defect,RlpError].} =
rlp.read(Hash256).to(T)
proc append*(writer: var RlpWriter, nid: NodeTag) =

View File

@ -16,6 +16,7 @@ import
eth/[common/eth_types, p2p],
stew/[interval_set, keyed_queue],
../../db/select_backend,
../../utils/prettify,
".."/[protocol, sync_desc],
./worker/[accounts_db, fetch_accounts, pivot, ticker],
"."/[range_desc, worker_desc]
@ -36,6 +37,16 @@ proc meanStdDev(sum, sqSum: float; length: int): (float,float) =
result[0] = sum / length.float
result[1] = sqrt(sqSum / length.float - result[0] * result[0])
template noExceptionOops(info: static[string]; code: untyped) =
try:
code
except CatchableError as e:
raiseAssert "Inconveivable (" & info & ": name=" & $e.name & " msg=" & e.msg
except Defect as e:
raise e
except Exception as e:
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
@ -70,14 +81,8 @@ proc setPivotEnv(buddy: SnapBuddyRef; header: BlockHeader) =
# Statistics
ctx.data.pivotCount.inc
# Activate per-state root environment
ctx.data.pivotEnv = ctx.data.pivotTable.lruAppend(key, env, ctx.buddiesMax)
# -----
if ctx.data.proofDumpOk:
let peer = buddy.peer
trace "Snap proofs dump enabled", peer
ctx.data.proofDumpOk = false
env.proofDumpOk = true
#env.pivotAccount = 0.to(NodeTag)
proc updatePivotEnv(buddy: SnapBuddyRef): bool =
@ -86,9 +91,17 @@ proc updatePivotEnv(buddy: SnapBuddyRef): bool =
if buddy.data.pivotHeader.isSome:
let
ctx = buddy.ctx
env = ctx.data.pivotEnv
newStateNumber = buddy.data.pivotHeader.unsafeGet.blockNumber
stateNumber = if ctx.data.pivotEnv.isNil: 0.toBlockNumber
else: ctx.data.pivotEnv.stateHeader.blockNumber
stateNumber = if env.isNil: 0.toBlockNumber
else: env.stateHeader.blockNumber
when switchPivotAfterCoverage < 1.0:
if not env.isNil:
if stateNumber < newStateNumber and env.minCoverageReachedOk:
buddy.setPivotEnv(buddy.data.pivotHeader.get)
return true
if stateNumber + maxPivotBlockWindow < newStateNumber:
buddy.setPivotEnv(buddy.data.pivotHeader.get)
return true
@ -117,12 +130,20 @@ proc tickerUpdate*(ctx: SnapCtxRef): TickerStatsUpdater =
tabLen = ctx.data.pivotTable.len
pivotBlock = if ctx.data.pivotEnv.isNil: none(BlockNumber)
else: some(ctx.data.pivotEnv.stateHeader.blockNumber)
accCoverage = ctx.data.coveredAccounts.fullFactor
when snapAccountsDumpEnable:
if snapAccountsDumpCoverageStop < accCoverage:
trace " Snap proofs dump stop",
threshold=snapAccountsDumpCoverageStop, coverage=accCoverage.toPC
ctx.data.proofDumpOk = false
TickerStats(
pivotBlock: pivotBlock,
activeQueues: tabLen,
flushedQueues: ctx.data.pivotCount.int64 - tabLen,
accounts: meanStdDev(aSum, aSqSum, count),
accCoverage: ctx.data.coveredAccounts.fullFactor,
accCoverage: accCoverage,
fillFactor: meanStdDev(uSum, uSqSum, count),
bulkStore: ctx.data.accountsDb.dbImportStats)
@ -141,12 +162,12 @@ proc setup*(ctx: SnapCtxRef; tickerOK: bool): bool =
ctx.data.ticker = TickerRef.init(ctx.tickerUpdate)
else:
trace "Ticker is disabled"
# ----
if snapAccountsDumpEnable:
result = true
# -----------------------
when snapAccountsDumpEnable:
doAssert ctx.data.proofDumpFile.open("./dump-stream.out", fmWrite)
ctx.data.proofDumpOk = true
# ----
true
proc release*(ctx: SnapCtxRef) =
## Global clean up
@ -196,7 +217,7 @@ proc runSingle*(buddy: SnapBuddyRef) {.async.} =
buddy.ctrl.multiOk = true
proc runPool*(buddy: SnapBuddyRef) =
proc runPool*(buddy: SnapBuddyRef, last: bool) =
## Ocne started, the function `runPool()` is called for all worker peers in
## a row (as the body of an iteration.) There will be no other worker peer
## functions activated simultaneously.
@ -204,11 +225,20 @@ proc runPool*(buddy: SnapBuddyRef) =
## This procedure is started if the global flag `buddy.ctx.poolMode` is set
## `true` (default is `false`.) It is the responsibility of the `runPool()`
## instance to reset the flag `buddy.ctx.poolMode`, typically at the first
## peer instance as the number of active instances is unknown to `runPool()`.
## peer instance.
##
## The argument `last` is set `true` if the last entry is reached.
##
## Note that this function does not run in `async` mode.
##
discard
let ctx = buddy.ctx
if ctx.poolMode:
ctx.poolMode = false
if not ctx.data.runPoolHook.isNil:
noExceptionOops("runPool"):
ctx.data.runPoolHook(buddy)
if last:
ctx.data.runPoolHook = nil
proc runMulti*(buddy: SnapBuddyRef) {.async.} =
@ -219,15 +249,41 @@ proc runMulti*(buddy: SnapBuddyRef) {.async.} =
let
ctx = buddy.ctx
peer = buddy.peer
var
havePivotOk = (buddy.data.pivotHeader.isSome and
buddy.data.pivotHeader.get.blockNumber != 0)
if buddy.data.pivotHeader.isNone or
buddy.data.pivotHeader.get.blockNumber == 0:
# Switch pivot state root if this much coverage has been achieved, already
when switchPivotAfterCoverage < 1.0:
if havePivotOk:
# So there is a `ctx.data.pivotEnv`
if ctx.data.pivotEnv.minCoverageReachedOk:
# Force fetching new pivot if coverage reached
havePivotOk = false
else:
# Not sure yet, so check whether coverage has been reached at all
let cov = ctx.data.pivotEnv.availAccounts.freeFactor
if switchPivotAfterCoverage <= cov:
trace " Snap accounts coverage reached",
threshold=switchPivotAfterCoverage, coverage=cov.toPC
# Need to reset pivot handlers
buddy.ctx.poolMode = true
buddy.ctx.data.runPoolHook = proc(b: SnapBuddyRef) =
b.ctx.data.pivotEnv.minCoverageReachedOk = true
b.pivotRestart
return
if not havePivotOk:
await buddy.pivotExec()
if not buddy.updatePivotEnv():
return
# Ignore rest if the pivot is still acceptably covered
when switchPivotAfterCoverage < 1.0:
if ctx.data.pivotEnv.minCoverageReachedOk:
await sleepAsync(50.milliseconds)
return
if await buddy.fetchAccounts():
buddy.ctrl.multiOk = false
buddy.data.pivotHeader = none(BlockHeader)

View File

@ -22,8 +22,8 @@ import
../../../db/[kvstore_rocksdb, select_backend, storage_types],
"../.."/[protocol, types],
../range_desc,
./db/[bulk_storage, hexary_defs, hexary_desc, hexary_follow, hexary_import,
hexary_interpolate, rocky_bulk_load]
./db/[bulk_storage, hexary_defs, hexary_desc, hexary_import,
hexary_interpolate, hexary_paths, rocky_bulk_load]
{.push raises: [Defect].}
@ -57,6 +57,9 @@ type
proc to(h: Hash256; T: type NodeKey): T =
h.data.T
proc convertTo(data: openArray[byte]; T: type Hash256): T =
discard result.NodeHash.init(data) # error => zero
template elapsed(duration: times.Duration; code: untyped) =
block:
let start = getTime()
@ -64,6 +67,22 @@ template elapsed(duration: times.Duration; code: untyped) =
code
duration = getTime() - start
template noKeyError(info: static[string]; code: untyped) =
try:
code
except KeyError as e:
raiseAssert "Not possible (" & info & "): " & e.msg
template noRlpExceptionOops(info: static[string]; code: untyped) =
try:
code
except RlpError:
return err(RlpEncoding)
except Defect as e:
raise e
except Exception as e:
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------
# Private debugging helpers
# ------------------------------------------------------------------------------
@ -219,7 +238,7 @@ proc merge*(
proc merge*(
ps: AccountsDbSessionRef;
base: NodeTag;
acc: seq[SnapAccount];
acc: seq[PackedAccount];
): Result[void,HexaryDbError]
{.gcsafe, raises: [Defect, RlpError].} =
## Import account records (as received with the snap message `AccountRange`)
@ -254,7 +273,7 @@ proc merge*(
# Verify lower bound
if pathTag0 < base:
error = HexaryDbError.AccountSmallerThanBase
trace "merge(seq[SnapAccount])", peer, proofs, base, accounts, error
trace "merge(seq[PackedAccount])", peer, proofs, base, accounts, error
break collectAccounts
# Add base for the records (no payload). Note that the assumption
@ -265,12 +284,11 @@ proc merge*(
# Check for the case that accounts are appended
elif 0 < ps.rpDB.acc.len and pathTag0 <= ps.rpDB.acc[^1].pathTag:
error = HexaryDbError.AccountsNotSrictlyIncreasing
trace "merge(seq[SnapAccount])", peer, proofs, base, accounts, error
trace "merge(seq[PackedAccount])", peer, proofs, base, accounts, error
break collectAccounts
# Add first account
ps.rpDB.acc.add RLeafSpecs(
pathTag: pathTag0, payload: acc[0].accBody.encode)
ps.rpDB.acc.add RLeafSpecs(pathTag: pathTag0, payload: acc[0].accBlob)
# Veify & add other accounts
for n in 1 ..< acc.len:
@ -281,11 +299,10 @@ proc merge*(
ps.rpDB.acc.setLen(saveLen)
error = AccountsNotSrictlyIncreasing
trace "merge(seq[SnapAccount])", peer, proofs, base, accounts, error
trace "merge(seq[PackedAccount])", peer, proofs, base, accounts, error
break collectAccounts
ps.rpDB.acc.add RLeafSpecs(
pathTag: nodeTag, payload: acc[n].accBody.encode)
ps.rpDB.acc.add RLeafSpecs(pathTag: nodeTag, payload: acc[n].accBlob)
# End block `collectAccounts`
@ -310,7 +327,8 @@ proc interpolate*(ps: AccountsDbSessionRef): Result[void,HexaryDbError] =
## it must be replaced by the new facility of the upcoming re-factored
## database layer.
##
ps.rpDB.hexaryInterpolate()
noKeyError("interpolate"):
result = ps.rpDB.hexaryInterpolate()
proc dbImports*(ps: AccountsDbSessionRef): Result[void,HexaryDbError] =
## Experimental: try several db-import modes and record statistics
@ -342,7 +360,7 @@ proc dbImports*(ps: AccountsDbSessionRef): Result[void,HexaryDbError] =
proc sortMerge*(base: openArray[NodeTag]): NodeTag =
## Helper for merging several `(NodeTag,seq[SnapAccount])` data sets
## Helper for merging several `(NodeTag,seq[PackedAccount])` data sets
## so that there are no overlap which would be rejected by `merge()`.
##
## This function selects a `NodeTag` from a list.
@ -351,13 +369,13 @@ proc sortMerge*(base: openArray[NodeTag]): NodeTag =
if w < result:
result = w
proc sortMerge*(acc: openArray[seq[SnapAccount]]): seq[SnapAccount] =
## Helper for merging several `(NodeTag,seq[SnapAccount])` data sets
proc sortMerge*(acc: openArray[seq[PackedAccount]]): seq[PackedAccount] =
## Helper for merging several `(NodeTag,seq[PackedAccount])` data sets
## so that there are no overlap which would be rejected by `merge()`.
##
## This function flattens and sorts the argument account lists.
noPpError("sortMergeAccounts"):
var accounts: Table[NodeTag,SnapAccount]
var accounts: Table[NodeTag,PackedAccount]
for accList in acc:
for item in accList:
accounts[item.accHash.to(NodeTag)] = item
@ -386,10 +404,10 @@ proc dbBackendRocksDb*(ps: AccountsDbSessionRef): bool =
proc importAccounts*(
pv: AccountsDbRef;
peer: Peer, ## for log messages
root: Hash256; ## state root
base: NodeTag; ## before or at first account entry in `data`
data: SnapAccountRange; ## `snap/1 ` reply data
peer: Peer, ## for log messages
root: Hash256; ## state root
base: NodeTag; ## before or at first account entry in `data`
data: PackedAccountRange; ## re-packed `snap/1 ` reply data
storeData = false
): Result[void,HexaryDbError] =
## Validate and accounts and proofs (as received with the snap message
@ -433,55 +451,77 @@ proc importAccounts*(
ok()
# ------------------------------------------------------------------------------
# Debugging
# Debugging (and playing with the hexary database)
# ------------------------------------------------------------------------------
proc getChainDbAccount*(
ps: AccountsDbSessionRef;
accHash: Hash256
): Result[Account,HexaryDbError] =
): Result[Account,HexaryDbError] =
## Fetch account via `BaseChainDB`
try:
noRlpExceptionOops("getChainDbAccount()"):
let
getFn: HexaryGetFn = proc(key: Blob): Blob = ps.base.db.get(key)
leaf = accHash.to(NodeKey).hexaryPath(ps.rpDB.rootKey, getFn).leafData
if 0 < leaf.len:
let acc = rlp.decode(leaf,Account)
return ok(acc)
err(AccountNotFound)
proc nextChainDbKey*(
ps: AccountsDbSessionRef;
accHash: Hash256
): Result[Hash256,HexaryDbError] =
## Fetch the account path on the `BaseChainDB`, the one next to the
## argument account.
noRlpExceptionOops("getChainDbAccount()"):
let
getFn: HexaryGetFn = proc(key: Blob): Blob = ps.base.db.get(key)
path = accHash.to(NodeKey)
(_, _, leafBlob) = ps.rpDB.hexaryFollow(ps.rpDB.rootKey, path, getFn)
if 0 < leafBlob.len:
let acc = rlp.decode(leafBlob,Account)
return ok(acc)
except RlpError:
return err(RlpEncoding)
except Defect as e:
raise e
except Exception as e:
raiseAssert "Ooops getChainDbAccount(): name=" & $e.name & " msg=" & e.msg
.hexaryPath(ps.rpDB.rootKey, getFn)
.next(getFn)
.getNibbles
if 64 == path.len:
return ok(path.getBytes.convertTo(Hash256))
err(AccountNotFound)
proc prevChainDbKey*(
ps: AccountsDbSessionRef;
accHash: Hash256
): Result[Hash256,HexaryDbError] =
## Fetch the account path on the `BaseChainDB`, the one before to the
## argument account.
noRlpExceptionOops("getChainDbAccount()"):
let
getFn: HexaryGetFn = proc(key: Blob): Blob = ps.base.db.get(key)
path = accHash.to(NodeKey)
.hexaryPath(ps.rpDB.rootKey, getFn)
.prev(getFn)
.getNibbles
if 64 == path.len:
return ok(path.getBytes.convertTo(Hash256))
err(AccountNotFound)
proc getBulkDbXAccount*(
ps: AccountsDbSessionRef;
accHash: Hash256
): Result[Account,HexaryDbError] =
): Result[Account,HexaryDbError] =
## Fetch account additional sub-table (paraellel to `BaseChainDB`), when
## rocksdb was used to store dicectly, and a paralell table was used to
## store the same via `put()`.
try:
noRlpExceptionOops("getBulkDbXAccount()"):
let
getFn: HexaryGetFn = proc(key: Blob): Blob =
var tag: NodeTag
discard tag.init(key)
ps.base.db.get(tag.bulkStorageChainDbHexaryXKey().toOpenArray)
path = accHash.to(NodeKey)
(_, _, leafBlob) = ps.rpDB.hexaryFollow(ps.rpDB.rootKey, path, getFn)
if 0 < leafBlob.len:
let acc = rlp.decode(leafBlob,Account)
leaf = accHash.to(NodeKey).hexaryPath(ps.rpDB.rootKey, getFn).leafData
if 0 < leaf.len:
let acc = rlp.decode(leaf,Account)
return ok(acc)
except RlpError:
return err(RlpEncoding)
except Defect as e:
raise e
except Exception as e:
raiseAssert "Ooops getChainDbAccount(): name=" & $e.name & " msg=" & e.msg
err(AccountNotFound)
@ -515,7 +555,9 @@ proc assignPrettyKeys*(ps: AccountsDbSessionRef) =
proc dumpPath*(ps: AccountsDbSessionRef; key: NodeTag): seq[string] =
## Pretty print helper compiling the path into the repair tree for the
## argument `key`.
ps.rpDB.dumpPath(key)
noKeyError("dumpPath"):
let rPath = key.hexaryPath(ps.rpDB)
result = rPath.path.mapIt(it.pp(ps.rpDB)) & @["(" & rPath.tail.pp & ")"]
proc dumpProofsDB*(ps: AccountsDbSessionRef): seq[string] =
## Dump the entries from the repair tree.
@ -527,58 +569,6 @@ proc dumpProofsDB*(ps: AccountsDbSessionRef): seq[string] =
cmp(x[0],y[0])
result = accu.sorted(cmpIt).mapIt(it[1])
# ---------
proc dumpRoot*(root: Hash256; name = "snapRoot*"): string =
noPpError("dumpRoot"):
result = "import\n"
result &= " eth/common/eth_types,\n"
result &= " nimcrypto/hash,\n"
result &= " stew/byteutils\n\n"
result &= "const\n"
result &= &" {name} =\n"
result &= &" \"{root.pp(false)}\".toDigest\n"
proc dumpSnapAccountRange*(
base: NodeTag;
data: SnapAccountRange;
name = "snapData*"
): string =
noPpError("dumpSnapAccountRange"):
result = &" {name} = ("
result &= &"\n \"{base.to(Hash256).pp(false)}\".toDigest,"
result &= "\n @["
let accPfx = "\n "
for n in 0 ..< data.accounts.len:
let
hash = data.accounts[n].accHash
body = data.accounts[n].accBody
if 0 < n:
result &= accPfx
result &= &"# <{n}>"
result &= &"{accPfx}(\"{hash.pp(false)}\".toDigest,"
result &= &"{accPfx} {body.nonce}u64,"
result &= &"{accPfx} \"{body.balance}\".parse(Uint256),"
result &= &"{accPfx} \"{body.storageRoot.pp(false)}\".toDigest,"
result &= &"{accPfx} \"{body.codehash.pp(false)}\".toDigest),"
if result[^1] == ',':
result[^1] = ']'
else:
result &= "]"
result &= ",\n @["
let blobPfx = "\n "
for n in 0 ..< data.proof.len:
let blob = data.proof[n]
if 0 < n:
result &= blobPfx
result &= &"# <{n}>"
result &= &"{blobPfx}\"{blob.pp}\".hexToSeqByte,"
if result[^1] == ',':
result[^1] = ']'
else:
result &= "]"
result &= ")\n"
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -9,7 +9,7 @@
# except according to those terms.
import
std/[hashes, sequtils, strformat, strutils, tables],
std/[hashes, sequtils, strutils, tables],
eth/[common/eth_types, p2p, trie/nibbles],
nimcrypto/keccak,
stint,
@ -33,16 +33,6 @@ type
RepairKey* = distinct ByteArray33
## Byte prefixed `NodeKey` for internal DB records
RNodeKind* = enum
Branch
Extension
Leaf
RNodeState* = enum
Static = 0 ## Inserted as proof record
Locked ## Like `Static`, only added on-the-fly
Mutable ## Open for modification
# Example trie from https://eth.wiki/en/fundamentals/patricia-tree
#
# lookup data:
@ -72,70 +62,129 @@ type
# "dodge": 6 4 6 f 6 7 6 5
# "horse": 6 8 6 f 7 2 7 3 6 5
NodeKind* = enum
Branch
Extension
Leaf
RNodeState* = enum
Static = 0 ## Inserted as proof record
Locked ## Like `Static`, only added on-the-fly
Mutable ## Open for modification
RNodeRef* = ref object
## For building a temporary repair tree
state*: RNodeState ## `Static` if added as proof data
case kind*: RNodeKind
## Node for building a temporary hexary trie coined `repair tree`.
state*: RNodeState ## `Static` if added from proof data set
case kind*: NodeKind
of Leaf:
lPfx*: NibblesSeq ## Portion of path segment
lPfx*: NibblesSeq ## Portion of path segment
lData*: Blob
of Extension:
ePfx*: NibblesSeq ## Portion of path segment
eLink*: RepairKey ## Single down link
ePfx*: NibblesSeq ## Portion of path segment
eLink*: RepairKey ## Single down link
of Branch:
bLink*: array[16,RepairKey] ## Down links
bLink*: array[16,RepairKey] ## Down links
#
# Paraphrased comment from Andri's `stateless/readme.md` file in chapter
# `Deviation from yellow paper`, (also found here
# github.com/status-im/nimbus-eth1
# /tree/master/stateless#deviation-from-yellow-paper)
# [..] In the Yellow Paper, the 17th elem of the branch node can contain
# a value. But it is always empty in a real Ethereum state trie. The
# block witness spec also ignores this 17th elem when encoding or
# decoding a branch node. This can happen because in a Ethereum secure
# hexary trie, every keys have uniform length of 32 bytes or 64 nibbles.
# With the absence of the 17th element, a branch node will never contain
# a leaf value.
bData*: Blob
RPathStep* = object
## For constructing tree traversal `seq[RPathStep]` path
key*: RepairKey ## Tree label, node hash
node*: RNodeRef ## Referes to data record
nibble*: int8 ## Branch node selector (if any)
XNodeObj* = object
## Simplified version of `RNodeRef` to be used as a node for `XPathStep`
case kind*: NodeKind
of Leaf:
lPfx*: NibblesSeq ## Portion of path segment
lData*: Blob
of Extension:
ePfx*: NibblesSeq ## Portion of path segment
eLink*: Blob ## Single down link
of Branch:
bLink*: array[17,Blob] ## Down links followed by data
RPathXStep* = object
## Extended `RPathStep` needed for `NodeKey` assignmant
pos*: int ## Some position into `seq[RPathStep]`
step*: RPathStep ## Modified copy of an `RPathStep`
canLock*: bool ## Can set `Locked` state
RPathStep* = object
## For constructing a repair tree traversal path `RPath`
key*: RepairKey ## Tree label, node hash
node*: RNodeRef ## Referes to data record
nibble*: int8 ## Branch node selector (if any)
RPath* = object
path*: seq[RPathStep]
tail*: NibblesSeq ## Stands for non completed leaf path
tail*: NibblesSeq ## Stands for non completed leaf path
XPathStep* = object
## Similar to `RPathStep` for an arbitrary (sort of transparent) trie
key*: Blob ## Node hash implied by `node` data
node*: XNodeObj
nibble*: int8 ## Branch node selector (if any)
XPath* = object
path*: seq[XPathStep]
tail*: NibblesSeq ## Stands for non completed leaf path
depth*: int ## May indicate path length (typically 64)
RLeafSpecs* = object
## Temporarily stashed leaf data (as for an account.) Proper records
## have non-empty payload. Records with empty payload are administrative
## items, e.g. lower boundary records.
pathTag*: NodeTag ## Equivalent to account hash
nodeKey*: RepairKey ## Leaf hash into hexary repair table
payload*: Blob ## Data payload
pathTag*: NodeTag ## Equivalent to account hash
nodeKey*: RepairKey ## Leaf hash into hexary repair table
payload*: Blob ## Data payload
HexaryTreeDB* = object
rootKey*: NodeKey ## Current root node
tab*: Table[RepairKey,RNodeRef] ## Repair table
acc*: seq[RLeafSpecs] ## Accounts to appprove of
repairKeyGen*: uint64 ## Unique tmp key generator
keyPp*: HexaryPpFn ## For debugging
rootKey*: NodeKey ## Current root node
tab*: Table[RepairKey,RNodeRef] ## Repair table
acc*: seq[RLeafSpecs] ## Accounts to appprove of
repairKeyGen*: uint64 ## Unique tmp key generator
keyPp*: HexaryPpFn ## For debugging
const
EmptyNodeBlob* = seq[byte].default
EmptyNibbleRange* = EmptyNodeBlob.initNibbleRange
static:
# Not that there is no doubt about this ...
doAssert NodeKey.default.ByteArray32.initNibbleRange.len == 64
var
disablePrettyKeys* = false ## Degugging, pint raw keys if `true`
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc pp(key: RepairKey): string =
key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.toLowerAscii
proc initImpl(key: var RepairKey; data: openArray[byte]): bool =
key.reset
if data.len <= 33:
if 0 < data.len:
let trg = addr key.ByteArray33[33 - data.len]
trg.copyMem(unsafeAddr data[0], data.len)
return true
proc initImpl(key: var NodeKey; data: openArray[byte]): bool =
key.reset
if data.len <= 32:
if 0 < data.len:
let trg = addr key.ByteArray32[32 - data.len]
trg.copyMem(unsafeAddr data[0], data.len)
return true
# ------------------------------------------------------------------------------
# Public debugging helpers
# Private debugging helpers
# ------------------------------------------------------------------------------
proc pp*(s: string; hex = false): string =
proc toPfx(indent: int): string =
"\n" & " ".repeat(indent)
proc ppImpl(s: string; hex = false): string =
## For long strings print `begin..end` only
if hex:
let n = (s.len + 1) div 2
@ -147,53 +196,97 @@ proc pp*(s: string; hex = false): string =
(if (s.len and 1) == 0: s[0 ..< 8] else: "0" & s[0 ..< 7]) &
"..(" & $s.len & ").." & s[s.len-16 ..< s.len]
proc ppImpl(key: RepairKey; db: HexaryTreeDB): string =
try:
if not disablePrettyKeys and not db.keyPp.isNil:
return db.keyPp(key)
except:
discard
key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.toLowerAscii
proc ppImpl(w: openArray[RepairKey]; db: HexaryTreeDB): string =
w.mapIt(it.ppImpl(db)).join(",")
proc ppImpl(w: openArray[Blob]; db: HexaryTreeDB): string =
var q: seq[RepairKey]
for a in w:
var key: RepairKey
discard key.initImpl(a)
q.add key
q.ppImpl(db)
proc ppStr(blob: Blob): string =
if blob.len == 0: ""
else: blob.mapIt(it.toHex(2)).join.toLowerAscii.ppImpl(hex = true)
proc ppImpl(n: RNodeRef; db: HexaryTreeDB): string =
let so = n.state.ord
case n.kind:
of Leaf:
["l","ł","L"][so] & "(" & $n.lPfx & "," & n.lData.ppStr & ")"
of Extension:
["e","","E"][so] & "(" & $n.ePfx & "," & n.eLink.ppImpl(db) & ")"
of Branch:
["b","þ","B"][so] & "(" & n.bLink.ppImpl(db) & "," & n.bData.ppStr & ")"
proc ppImpl(n: XNodeObj; db: HexaryTreeDB): string =
case n.kind:
of Leaf:
"l(" & $n.lPfx & "," & n.lData.ppStr & ")"
of Extension:
var key: RepairKey
discard key.initImpl(n.eLink)
"e(" & $n.ePfx & "," & key.ppImpl(db) & ")"
of Branch:
"b(" & n.bLink[0..15].ppImpl(db) & "," & n.bLink[16].ppStr & ")"
proc ppImpl(w: RPathStep; db: HexaryTreeDB): string =
let
nibble = if 0 <= w.nibble: w.nibble.toHex(1).toLowerAscii else: "ø"
key = w.key.ppImpl(db)
"(" & key & "," & nibble & "," & w.node.ppImpl(db) & ")"
proc ppImpl(w: XPathStep; db: HexaryTreeDB): string =
let nibble = if 0 <= w.nibble: w.nibble.toHex(1).toLowerAscii else: "ø"
var key: RepairKey
discard key.initImpl(w.key)
"(" & key.ppImpl(db) & "," & $nibble & "," & w.node.ppImpl(db) & ")"
# ------------------------------------------------------------------------------
# Public debugging helpers
# ------------------------------------------------------------------------------
proc pp*(s: string; hex = false): string =
## For long strings print `begin..end` only
s.ppImpl(hex)
proc pp*(w: NibblesSeq): string =
$w
proc pp*(key: RepairKey; db: HexaryTreeDB): string =
try:
if not db.keyPp.isNil:
return db.keyPp(key)
except:
discard
key.pp
key.ppImpl(db)
proc pp*(w: openArray[RepairKey]; db: HexaryTreeDB): string =
"<" & w.mapIt(it.pp(db)).join(",") & ">"
proc pp*(w: RNodeRef|XNodeObj|RPathStep; db: HexaryTreeDB): string =
w.ppImpl(db)
proc pp*(n: RNodeRef; db: HexaryTreeDB): string
{.gcsafe, raises: [Defect, ValueError].} =
proc ppStr(blob: Blob): string =
if blob.len == 0: ""
else: blob.mapIt(it.toHex(2)).join.toLowerAscii.pp(hex = true)
let so = n.state.ord
case n.kind:
of Leaf:
result = ["l","ł","L"][so] & &"({n.lPfx.pp},{n.lData.ppStr})"
of Extension:
result = ["e","","E"][so] & &"({n.ePfx.pp},{n.eLink.pp(db)})"
of Branch:
result = ["b","þ","B"][so] & &"({n.bLink.pp(db)},{n.bData.ppStr})"
proc pp*(w:openArray[RPathStep|XPathStep]; db:HexaryTreeDB; indent=4): string =
w.toSeq.mapIt(it.ppImpl(db)).join(indent.toPfx)
proc pp*(w: RPath; db: HexaryTreeDB; indent=4): string =
w.path.pp(db,indent) & indent.toPfx & "(" & $w.tail & ")"
proc pp*(w: XPath; db: HexaryTreeDB; indent=4): string =
w.path.pp(db,indent) & indent.toPfx & "(" & $w.tail & "," & $w.depth & ")"
# ------------------------------------------------------------------------------
# Public constructor (or similar)
# ------------------------------------------------------------------------------
proc init*(key: var NodeKey; data: openArray[byte]): bool =
key.reset
if data.len <= 32:
if 0 < data.len:
let trg = addr key.ByteArray32[32 - data.len]
trg.copyMem(unsafeAddr data[0], data.len)
return true
key.initImpl(data)
proc init*(key: var RepairKey; data: openArray[byte]): bool =
key.reset
if data.len <= 33:
if 0 < data.len:
let trg = addr key.ByteArray33[33 - data.len]
trg.copyMem(unsafeAddr data[0], data.len)
return true
key.initImpl(data)
proc newRepairKey*(db: var HexaryTreeDB): RepairKey =
db.repairKeyGen.inc
@ -239,7 +332,7 @@ proc isNodeKey*(a: RepairKey): bool =
proc digestTo*(data: Blob; T: type NodeKey): T =
keccak256.digest(data).data.T
proc convertTo*[W: NodeKey|RepairKey](data: openArray[byte]; T: type W): T =
proc convertTo*[W: NodeKey|RepairKey](data: Blob; T: type W): T =
## Probably lossy conversion, use `init()` for safe conversion
discard result.init(data)

View File

@ -1,146 +0,0 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## This module is sort of a customised rewrite of the function
## `eth/trie/hexary.getAux()`, `getkeysAux()`, etc.
import
std/sequtils,
chronicles,
eth/[common/eth_types, trie/nibbles],
./hexary_desc
{.push raises: [Defect].}
const
HexaryFollowDebugging = false or true
type
HexaryGetFn* = proc(key: Blob): Blob {.gcsafe.}
## Fortesting/debugging: database get() function
# ------------------------------------------------------------------------------
# Public walk along hexary trie records
# ------------------------------------------------------------------------------
proc hexaryFollow*(
db: HexaryTreeDB;
root: NodeKey;
path: NibblesSeq;
getFn: HexaryGetFn
): (int, bool, Blob)
{.gcsafe, raises: [Defect,RlpError]} =
## Returns the number of matching digits/nibbles from the argument `path`
## found in the proofs trie.
let
nNibbles = path.len
var
inPath = path
recKey = root.ByteArray32.toSeq
leafBlob: Blob
emptyRef = false
when HexaryFollowDebugging:
trace "follow", rootKey=root.to(RepairKey).pp(db), path
while true:
let value = recKey.getFn()
if value.len == 0:
break
var nodeRlp = rlpFromBytes value
case nodeRlp.listLen:
of 2:
let
(isLeaf, pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
sharedNibbles = inPath.sharedPrefixLen(pathSegment)
fullPath = sharedNibbles == pathSegment.len
inPathLen = inPath.len
inPath = inPath.slice(sharedNibbles)
# Leaf node
if isLeaf:
let leafMode = sharedNibbles == inPathLen
if fullPath and leafMode:
leafBlob = nodeRlp.listElem(1).toBytes
when HexaryFollowDebugging:
let nibblesLeft = inPathLen - sharedNibbles
trace "follow leaf",
fullPath, leafMode, sharedNibbles, nibblesLeft,
pathSegment, newPath=inPath
break
# Extension node
if fullPath:
let branch = nodeRlp.listElem(1)
if branch.isEmpty:
when HexaryFollowDebugging:
trace "follow extension", newKey="n/a"
emptyRef = true
break
recKey = branch.toBytes
when HexaryFollowDebugging:
trace "follow extension",
newKey=recKey.convertTo(RepairKey).pp(db), newPath=inPath
else:
when HexaryFollowDebugging:
trace "follow extension",
fullPath, sharedNibbles, pathSegment, inPathLen, newPath=inPath
break
of 17:
# Branch node
if inPath.len == 0:
leafBlob = nodeRlp.listElem(1).toBytes
break
let
inx = inPath[0].int
branch = nodeRlp.listElem(inx)
if branch.isEmpty:
when HexaryFollowDebugging:
trace "follow branch", newKey="n/a"
emptyRef = true
break
inPath = inPath.slice(1)
recKey = branch.toBytes
when HexaryFollowDebugging:
trace "follow branch",
newKey=recKey.convertTo(RepairKey).pp(db), inx, newPath=inPath
else:
when HexaryFollowDebugging:
trace "follow oops",
nColumns = nodeRlp.listLen
break
# end while
let pathLen = nNibbles - inPath.len
when HexaryFollowDebugging:
trace "follow done",
recKey, emptyRef, pathLen, leafSize=leafBlob.len
(pathLen, emptyRef, leafBlob)
proc hexaryFollow*(
db: HexaryTreeDB;
root: NodeKey;
path: NodeKey;
getFn: HexaryGetFn;
): (int, bool, Blob)
{.gcsafe, raises: [Defect,RlpError]} =
## Variant of `hexaryFollow()`
db.hexaryFollow(root, path.to(NibblesSeq), getFn)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -15,59 +15,35 @@
## re-factored database layer.
import
std/[sequtils, strformat, strutils, tables],
std/[sequtils, strutils, tables],
eth/[common/eth_types, trie/nibbles],
stew/results,
../../range_desc,
"."/[hexary_defs, hexary_desc]
"."/[hexary_defs, hexary_desc, hexary_paths]
{.push raises: [Defect].}
const
RepairTreeDebugging = false
EmptyNibbleRange = EmptyNodeBlob.initNibbleRange
type
RPathXStep = object
## Extended `RPathStep` needed for `NodeKey` assignmant
pos*: int ## Some position into `seq[RPathStep]`
step*: RPathStep ## Modified copy of an `RPathStep`
canLock*: bool ## Can set `Locked` state
# ------------------------------------------------------------------------------
# Private debugging helpers
# ------------------------------------------------------------------------------
template noPpError(info: static[string]; code: untyped) =
try:
code
except ValueError as e:
raiseAssert "Inconveivable (" & info & "): " & e.msg
except KeyError as e:
raiseAssert "Not possible (" & info & "): " & e.msg
except Defect as e:
raise e
except Exception as e:
raiseAssert "Ooops (" & info & ") " & $e.name & ": " & e.msg
proc pp(w: RPathStep; db: HexaryTreeDB): string =
noPpError("pp(RPathStep)])"):
let nibble = if 0 <= w.nibble: &"{w.nibble:x}" else: "ø"
result = &"({w.key.pp(db)},{nibble},{w.node.pp(db)})"
proc pp(w: openArray[RPathStep]; db: HexaryTreeDB; indent = 4): string =
let pfx = "\n" & " ".repeat(indent)
noPpError("pp(seq[RPathStep])"):
result = w.toSeq.mapIt(it.pp(db)).join(pfx)
proc pp(w: RPath; db: HexaryTreeDB; indent = 4): string =
let pfx = "\n" & " ".repeat(indent)
noPpError("pp(RPath)"):
result = w.path.pp(db,indent) & &"{pfx}({w.tail.pp})"
proc pp(w: RPathXStep; db: HexaryTreeDB): string =
noPpError("pp(RPathXStep)"):
let y = if w.canLock: "lockOk" else: "noLock"
result = &"({w.pos},{y},{w.step.pp(db)})"
let y = if w.canLock: "lockOk" else: "noLock"
"(" & $w.pos & "," & y & "," & w.step.pp(db) & ")"
proc pp(w: seq[RPathXStep]; db: HexaryTreeDB; indent = 4): string =
let pfx = "\n" & " ".repeat(indent)
noPpError("pp(seq[RPathXStep])"):
result = w.mapIt(it.pp(db)).join(pfx)
w.mapIt(it.pp(db)).join(pfx)
# ------------------------------------------------------------------------------
# Private helpers
@ -77,13 +53,6 @@ proc dup(node: RNodeRef): RNodeRef =
new result
result[] = node[]
template noKeyError(info: static[string]; code: untyped) =
try:
code
except KeyError as e:
raiseAssert "Not possible (" & info & "): " & e.msg
# ------------------------------------------------------------------------------
# Private getters & setters
# ------------------------------------------------------------------------------
@ -230,123 +199,85 @@ proc rTreeSplitNode(
# Private functions, repair tree actions
# ------------------------------------------------------------------------------
proc rTreeFollow(
nodeKey: NodeKey;
db: var HexaryTreeDB
): RPath =
## Compute logest possible path matching the `nodeKey` nibbles.
result.tail = nodeKey.to(NibblesSeq)
noKeyError("rTreeFollow"):
var key = db.rootKey.to(RepairKey)
while db.tab.hasKey(key) and 0 < result.tail.len:
let node = db.tab[key]
case node.kind:
of Leaf:
if result.tail.len == result.tail.sharedPrefixLen(node.lPfx):
# Bingo, got full path
result.path.add RPathStep(key: key, node: node, nibble: -1)
result.tail = EmptyNibbleRange
return
of Branch:
let nibble = result.tail[0].int8
if node.bLink[nibble].isZero:
return
result.path.add RPathStep(key: key, node: node, nibble: nibble)
result.tail = result.tail.slice(1)
key = node.bLink[nibble]
of Extension:
if node.ePfx.len != result.tail.sharedPrefixLen(node.ePfx):
return
result.path.add RPathStep(key: key, node: node, nibble: -1)
result.tail = result.tail.slice(node.ePfx.len)
key = node.eLink
proc rTreeFollow(
nodeTag: NodeTag;
db: var HexaryTreeDB
): RPath =
## Variant of `rTreeFollow()`
nodeTag.to(NodeKey).rTreeFollow(db)
proc rTreeInterpolate(
rPath: RPath;
db: var HexaryTreeDB
): RPath =
): RPath
{.gcsafe, raises: [Defect,KeyError]} =
## Extend path, add missing nodes to tree. The last node added will be
## a `Leaf` node if this function succeeds.
##
## The function assumed that the `RPath` argument is the longest possible
## as just constructed by `rTreeFollow()`
if 0 < rPath.path.len and 0 < rPath.tail.len:
noKeyError("rTreeExtend"):
let step = rPath.path[^1]
case step.node.kind:
let step = rPath.path[^1]
case step.node.kind:
of Branch:
# Now, the slot must not be empty. An empty slot would lead to a
# rejection of this record as last valid step, contrary to the
# assumption `path` is the longest one.
if step.nibble < 0:
return # sanitary check failed
let key = step.node.bLink[step.nibble]
if key.isZero:
return # sanitary check failed
# Case: unused slot => add leaf record
if not db.tab.hasKey(key):
return db.rTreeExtendLeaf(rPath, key)
# So a `child` node exits but it is something that could not be used to
# extend the argument `path` which is assumed the longest possible one.
let child = db.tab[key]
case child.kind:
of Branch:
# Now, the slot must not be empty. An empty slot would lead to a
# rejection of this record as last valid step, contrary to the
# assumption `path` is the longest one.
if step.nibble < 0:
return # sanitary check failed
let key = step.node.bLink[step.nibble]
if key.isZero:
return # sanitary check failed
# Case: unused slot => add leaf record
if not db.tab.hasKey(key):
return db.rTreeExtendLeaf(rPath, key)
# So a `child` node exits but it is something that could not be used to
# extend the argument `path` which is assumed the longest possible one.
let child = db.tab[key]
case child.kind:
of Branch:
# So a `Leaf` node can be linked into the `child` branch
return db.rTreeExtendLeaf(rPath, key, child)
# Need to split the right `grandChild` in `child -> grandChild`
# into parts:
#
# left(Extension) -> middle(Branch)
# | |
# | +-----> right(Extension or Leaf) ...
# +---------> new Leaf record
#
# where either `left()` or `right()` extensions might be missing
of Extension, Leaf:
var xPath = db.rTreeSplitNode(rPath, key, child)
if 0 < xPath.path.len:
# Append `Leaf` node
xPath.path[^1].nibble = xPath.tail[0].int8
xPath.tail = xPath.tail.slice(1)
return db.rTreeExtendLeaf(xPath, db.newRepairKey())
of Leaf:
return # Oops
of Extension:
let key = step.node.eLink
var child: RNodeRef
if db.tab.hasKey(key):
child = db.tab[key]
# `Extension` can only be followed by a `Branch` node
if child.kind != Branch:
return
else:
# Case: unused slot => add `Branch` and `Leaf` record
child = RNodeRef(
state: Mutable,
kind: Branch)
db.tab[key] = child
# So a `Leaf` node can be linked into the `child` branch
return db.rTreeExtendLeaf(rPath, key, child)
# Need to split the right `grandChild` in `child -> grandChild`
# into parts:
#
# left(Extension) -> middle(Branch)
# | |
# | +-----> right(Extension or Leaf) ...
# +---------> new Leaf record
#
# where either `left()` or `right()` extensions might be missing
of Extension, Leaf:
var xPath = db.rTreeSplitNode(rPath, key, child)
if 0 < xPath.path.len:
# Append `Leaf` node
xPath.path[^1].nibble = xPath.tail[0].int8
xPath.tail = xPath.tail.slice(1)
return db.rTreeExtendLeaf(xPath, db.newRepairKey())
of Leaf:
return # Oops
of Extension:
let key = step.node.eLink
var child: RNodeRef
if db.tab.hasKey(key):
child = db.tab[key]
# `Extension` can only be followed by a `Branch` node
if child.kind != Branch:
return
else:
# Case: unused slot => add `Branch` and `Leaf` record
child = RNodeRef(
state: Mutable,
kind: Branch)
db.tab[key] = child
# So a `Leaf` node can be linked into the `child` branch
return db.rTreeExtendLeaf(rPath, key, child)
proc rTreeInterpolate(
rPath: RPath;
db: var HexaryTreeDB;
payload: Blob
): RPath =
): RPath
{.gcsafe, raises: [Defect,KeyError]} =
## Variant of `rTreeExtend()` which completes a `Leaf` record.
result = rPath.rTreeInterpolate(db)
if 0 < result.path.len and result.tail.len == 0:
@ -358,7 +289,8 @@ proc rTreeInterpolate(
proc rTreeUpdateKeys(
rPath: RPath;
db: var HexaryTreeDB
): Result[void,int] =
): Result[void,int]
{.gcsafe, raises: [Defect,KeyError]} =
## The argument `rPath` is assumed to organise database nodes as
##
## root -> ... -> () -> () -> ... -> () -> () ...
@ -468,7 +400,10 @@ proc rTreeUpdateKeys(
# Public fuctions
# ------------------------------------------------------------------------------
proc hexary_interpolate*(db: var HexaryTreeDB): Result[void,HexaryDbError] =
proc hexary_interpolate*(
db: var HexaryTreeDB
): Result[void,HexaryDbError]
{.gcsafe, raises: [Defect,KeyError]} =
## Verifiy accounts by interpolating the collected accounts on the hexary
## trie of the repair database. If all accounts can be represented in the
## hexary trie, they are vonsidered validated.
@ -477,7 +412,7 @@ proc hexary_interpolate*(db: var HexaryTreeDB): Result[void,HexaryDbError] =
for n in countDown(db.acc.len-1,0):
let acc = db.acc[n]
if acc.payload.len != 0:
let rPath = acc.pathTag.rTreeFollow(db)
let rPath = acc.pathTag.hexaryPath(db)
var repairKey = acc.nodeKey
if repairKey.isZero and 0 < rPath.path.len and rPath.tail.len == 0:
repairKey = rPath.path[^1].key
@ -485,7 +420,7 @@ proc hexary_interpolate*(db: var HexaryTreeDB): Result[void,HexaryDbError] =
if repairKey.isZero:
let
update = rPath.rTreeInterpolate(db, acc.payload)
final = acc.pathTag.rTreeFollow(db)
final = acc.pathTag.hexaryPath(db)
if update != final:
return err(AccountRepairBlocked)
db.acc[n].nodeKey = rPath.path[^1].key
@ -495,7 +430,7 @@ proc hexary_interpolate*(db: var HexaryTreeDB): Result[void,HexaryDbError] =
for n in countDown(db.acc.len-1,0):
let acc = db.acc[n]
if not acc.nodeKey.isZero:
let rPath = acc.pathTag.rTreeFollow(db)
let rPath = acc.pathTag.hexaryPath(db)
if rPath.path[^1].node.state == Mutable:
let rc = rPath.rTreeUpdateKeys(db)
if rc.isErr:
@ -504,7 +439,7 @@ proc hexary_interpolate*(db: var HexaryTreeDB): Result[void,HexaryDbError] =
while 0 < reVisit.len:
var again: seq[NodeTag]
for nodeTag in reVisit:
let rc = nodeTag.rTreeFollow(db).rTreeUpdateKeys(db)
let rc = nodeTag.hexaryPath(db).rTreeUpdateKeys(db)
if rc.isErr:
again.add nodeTag
if reVisit.len <= again.len:
@ -513,16 +448,6 @@ proc hexary_interpolate*(db: var HexaryTreeDB): Result[void,HexaryDbError] =
ok()
# ------------------------------------------------------------------------------
# Debugging
# ------------------------------------------------------------------------------
proc dumpPath*(db: var HexaryTreeDB; key: NodeTag): seq[string] =
## Pretty print helper compiling the path into the repair tree for the
## argument `key`.
let rPath = key.rTreeFollow(db)
rPath.path.mapIt(it.pp(db)) & @["(" & rPath.tail.pp & ")"]
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,487 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Find node paths in hexary tries.
import
std/[sequtils, tables],
eth/[common/eth_types, trie/nibbles],
../../range_desc,
./hexary_desc
{.push raises: [Defect].}
const
HexaryXPathDebugging = false # or true
type
HexaryGetFn* = proc(key: Blob): Blob {.gcsafe.}
## Fortesting/debugging: database get() function
# ------------------------------------------------------------------------------
# Private debugging helpers
# ------------------------------------------------------------------------------
proc pp(w: Blob; db: HexaryTreeDB): string =
w.convertTo(RepairKey).pp(db)
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc to(w: NodeKey; T: type Blob): T =
w.ByteArray32.toSeq
proc getNibblesImpl(path: XPath; start = 0): NibblesSeq =
## Re-build the key path
for n in start ..< path.path.len:
let it = path.path[n]
case it.node.kind:
of Branch:
result = result & @[it.nibble.byte].initNibbleRange.slice(1)
of Extension:
result = result & it.node.ePfx
of Leaf:
result = result & it.node.lPfx
result = result & path.tail
proc toBranchNode(
rlp: Rlp
): XNodeObj
{.gcsafe, raises: [Defect,RlpError]} =
var rlp = rlp
XNodeObj(kind: Branch, bLink: rlp.read(array[17,Blob]))
proc toLeafNode(
rlp: Rlp;
pSegm: NibblesSeq
): XNodeObj
{.gcsafe, raises: [Defect,RlpError]} =
XNodeObj(kind: Leaf, lPfx: pSegm, lData: rlp.listElem(1).toBytes)
proc toExtensionNode(
rlp: Rlp;
pSegm: NibblesSeq
): XNodeObj
{.gcsafe, raises: [Defect,RlpError]} =
XNodeObj(kind: Extension, ePfx: pSegm, eLink: rlp.listElem(1).toBytes)
# not now ...
when false:
proc `[]`(path: XPath; n: int): XPathStep =
path.path[n]
proc `[]`(path: XPath; s: Slice[int]): XPath =
XPath(path: path.path[s.a .. s.b], tail: path.getNibbles(s.b+1))
proc len(path: XPath): int =
path.path.len
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc pathExtend(
path: RPath;
key: RepairKey;
db: HexaryTreeDB
): RPath
{.gcsafe, raises: [Defect,KeyError]} =
## For the given path, extend to the longest possible repair tree `db`
## path following the argument `path.tail`.
result = path
var key = key
while db.tab.hasKey(key) and 0 < result.tail.len:
let node = db.tab[key]
case node.kind:
of Leaf:
if result.tail.len == result.tail.sharedPrefixLen(node.lPfx):
# Bingo, got full path
result.path.add RPathStep(key: key, node: node, nibble: -1)
result.tail = EmptyNibbleRange
return
of Branch:
let nibble = result.tail[0].int8
if node.bLink[nibble].isZero:
return
result.path.add RPathStep(key: key, node: node, nibble: nibble)
result.tail = result.tail.slice(1)
key = node.bLink[nibble]
of Extension:
if node.ePfx.len != result.tail.sharedPrefixLen(node.ePfx):
return
result.path.add RPathStep(key: key, node: node, nibble: -1)
result.tail = result.tail.slice(node.ePfx.len)
key = node.eLink
proc pathExtend(
path: XPath;
key: Blob;
getFn: HexaryGetFn;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
## Ditto for `XPath` rather than `RPath`
result = path
var key = key
while true:
let value = key.getFn()
if value.len == 0:
return
var nodeRlp = rlpFromBytes value
case nodeRlp.listLen:
of 2:
let
(isLeaf, pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
nSharedNibbles = result.tail.sharedPrefixLen(pathSegment)
fullPath = (nSharedNibbles == pathSegment.len)
newTail = result.tail.slice(nSharedNibbles)
# Leaf node
if isLeaf:
let node = nodeRlp.toLeafNode(pathSegment)
result.path.add XPathStep(key: key, node: node, nibble: -1)
result.tail = newTail
return
# Extension node
if fullPath:
let node = nodeRlp.toExtensionNode(pathSegment)
if node.eLink.len == 0:
return
result.path.add XPathStep(key: key, node: node, nibble: -1)
result.tail = newTail
key = node.eLink
else:
return
of 17:
# Branch node
let node = nodeRlp.toBranchNode
if result.tail.len == 0:
result.path.add XPathStep(key: key, node: node, nibble: -1)
return
let inx = result.tail[0].int8
if node.bLink[inx].len == 0:
return
result.path.add XPathStep(key: key, node: node, nibble: inx)
result.tail = result.tail.slice(1)
key = node.bLink[inx]
else:
return
# end while
# notreached
proc pathLeast(
path: XPath;
key: Blob;
getFn: HexaryGetFn;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
## For the partial path given, extend by branch nodes with least node
## indices.
result = path
result.tail = EmptyNibbleRange
result.depth = result.getNibblesImpl.len
var
key = key
value = key.getFn()
if value.len == 0:
return
while true:
block loopContinue:
let nodeRlp = rlpFromBytes value
case nodeRlp.listLen:
of 2:
let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
# Leaf node
if isLeaf:
let node = nodeRlp.toLeafNode(pathSegment)
result.path.add XPathStep(key: key, node: node, nibble: -1)
result.depth += pathSegment.len
return # done ok
let node = nodeRlp.toExtensionNode(pathSegment)
if 0 < node.eLink.len:
value = node.eLink.getFn()
if 0 < value.len:
result.path.add XPathStep(key: key, node: node, nibble: -1)
result.depth += pathSegment.len
key = node.eLink
break loopContinue
of 17:
# Branch node
let node = nodeRlp.toBranchNode
if node.bLink[16].len != 0 and 64 <= result.depth:
result.path.add XPathStep(key: key, node: node, nibble: -1)
return # done ok
for inx in 0 .. 15:
let newKey = node.bLink[inx]
if 0 < newKey.len:
value = newKey.getFn()
if 0 < value.len:
result.path.add XPathStep(key: key, node: node, nibble: inx.int8)
result.depth.inc
key = newKey
break loopContinue
else:
discard
# Recurse (iteratively)
while true:
block loopRecurse:
# Modify last branch node and try again
if result.path[^1].node.kind == Branch:
for inx in result.path[^1].nibble+1 .. 15:
let newKey = result.path[^1].node.bLink[inx]
if 0 < newKey.len:
value = newKey.getFn()
if 0 < value.len:
result.path[^1].nibble = inx.int8
key = newKey
break loopContinue
# Failed, step back and try predecessor branch.
while path.path.len < result.path.len:
case result.path[^1].node.kind:
of Branch:
result.depth.dec
result.path.setLen(result.path.len - 1)
break loopRecurse
of Extension:
result.depth -= result.path[^1].node.ePfx.len
result.path.setLen(result.path.len - 1)
of Leaf:
return # Ooops
return # Failed
# Notreached
# End while
# Notreached
proc pathMost(
path: XPath;
key: Blob;
getFn: HexaryGetFn;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
## For the partial path given, extend by branch nodes with greatest node
## indices.
result = path
result.tail = EmptyNibbleRange
result.depth = result.getNibblesImpl.len
var
key = key
value = key.getFn()
if value.len == 0:
return
while true:
block loopContinue:
let nodeRlp = rlpFromBytes value
case nodeRlp.listLen:
of 2:
let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
# Leaf node
if isLeaf:
let node = nodeRlp.toLeafNode(pathSegment)
result.path.add XPathStep(key: key, node: node, nibble: -1)
result.depth += pathSegment.len
return # done ok
# Extension node
let node = nodeRlp.toExtensionNode(pathSegment)
if 0 < node.eLink.len:
value = node.eLink.getFn()
if 0 < value.len:
result.path.add XPathStep(key: key, node: node, nibble: -1)
result.depth += pathSegment.len
key = node.eLink
break loopContinue
of 17:
# Branch node
let node = nodeRlp.toBranchNode
if node.bLink[16].len != 0 and 64 <= result.depth:
result.path.add XPathStep(key: key, node: node, nibble: -1)
return # done ok
for inx in 15.countDown(0):
let newKey = node.bLink[inx]
if 0 < newKey.len:
value = newKey.getFn()
if 0 < value.len:
result.path.add XPathStep(key: key, node: node, nibble: inx.int8)
result.depth.inc
key = newKey
break loopContinue
else:
discard
# Recurse (iteratively)
while true:
block loopRecurse:
# Modify last branch node and try again
if result.path[^1].node.kind == Branch:
for inx in (result.path[^1].nibble-1).countDown(0):
let newKey = result.path[^1].node.bLink[inx]
if 0 < newKey.len:
value = newKey.getFn()
if 0 < value.len:
result.path[^1].nibble = inx.int8
key = newKey
break loopContinue
# Failed, step back and try predecessor branch.
while path.path.len < result.path.len:
case result.path[^1].node.kind:
of Branch:
result.depth.dec
result.path.setLen(result.path.len - 1)
break loopRecurse
of Extension:
result.depth -= result.path[^1].node.ePfx.len
result.path.setLen(result.path.len - 1)
of Leaf:
return # Ooops
return # Failed
# Notreached
# End while
# Notreached
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
proc getNibbles*(path: XPath; start = 0): NibblesSeq =
## Re-build the key path
path.getNibblesImpl(start)
proc leafData*(path: XPath): Blob =
## Return the leaf data from a successful `XPath` computation (if any.)
if path.tail.len == 0 and 0 < path.path.len:
let node = path.path[^1].node
case node.kind:
of Branch:
return node.bLink[16]
of Leaf:
return node.lData
of Extension:
discard
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc hexaryPath*(
nodeKey: NodeKey;
db: HexaryTreeDB
): RPath
{.gcsafe, raises: [Defect,KeyError]} =
## Compute logest possible repair tree `db` path matching the `nodeKey`
## nibbles. The `nodeNey` path argument come first to support a more
## functional notation.
RPath(tail: nodeKey.to(NibblesSeq)).pathExtend(db.rootKey.to(RepairKey),db)
proc hexaryPath*(
nodeTag: NodeTag;
db: HexaryTreeDB
): RPath
{.gcsafe, raises: [Defect,KeyError]} =
## Variant of `hexaryPath()` for traversing a repair tree
nodeTag.to(NodeKey).hexaryPath(db)
proc hexaryPath*(
nodeKey: NodeKey;
root: NodeKey;
getFn: HexaryGetFn;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
## Compute logest possible path on an arbitrary hexary trie. Note that this
## prototype resembles the other ones with the implict `state root`. The
## rules for the protopye arguments are:
## * First argument is the node key, the node path to be followed
## * Last argument is the database (needed only here for debugging)
##
## Note that this function will flag a potential lowest level `Extception`
## in the invoking function due to the `getFn` argument.
XPath(tail: nodeKey.to(NibblesSeq)).pathExtend(root.to(Blob), getFn)
proc next*(
path: XPath;
getFn: HexaryGetFn;
minDepth = 64;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
## Advance the argument `path` to the next leaf node (if any.). The
## `minDepth` argument requires the result of `next()` to satisfy
## `minDepth <= next().getNibbles.len`.
var pLen = path.path.len
# Find the last branch in the path, increase link and step down
while 0 < pLen:
# Find branch none
pLen.dec
let it = path.path[pLen]
if it.node.kind == Branch and it.nibble < 15:
# Find the next item to the right in the branch list
for inx in (it.nibble + 1) .. 15:
let link = it.node.bLink[inx]
if link.len != 0:
let
branch = XPathStep(key: it.key, node: it.node, nibble: inx.int8)
walk = path.path[0 ..< pLen] & branch
newPath = XPath(path: walk).pathLeast(link, getFn)
if minDepth <= newPath.depth and 0 < newPath.leafData.len:
return newPath
proc prev*(
path: XPath;
getFn: HexaryGetFn;
minDepth = 64;
): XPath
{.gcsafe, raises: [Defect,RlpError]} =
## Advance the argument `path` to the previous leaf node (if any.) The
## `minDepth` argument requires the result of `next()` to satisfy
## `minDepth <= next().getNibbles.len`.
var pLen = path.path.len
# Find the last branch in the path, decrease link and step down
while 0 < pLen:
# Find branch none
pLen.dec
let it = path.path[pLen]
if it.node.kind == Branch and 0 < it.nibble:
# Find the next item to the right in the branch list
for inx in (it.nibble - 1).countDown(0):
let link = it.node.bLink[inx]
if link.len != 0:
let
branch = XPathStep(key: it.key, node: it.node, nibble: inx.int8)
walk = path.path[0 ..< pLen] & branch
newPath = XPath(path: walk).pathMost(link, getFn)
if minDepth <= newPath.depth and 0 < newPath.leafData.len:
return newPath
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -20,6 +20,9 @@ import
".."/[range_desc, worker_desc],
"."/[accounts_db, get_account_range]
when snapAccountsDumpEnable:
import ../../../tests/replay/undump_proofs
{.push raises: [Defect].}
logScope:
@ -40,9 +43,7 @@ const
proc withMaxLen(buddy: SnapBuddyRef; iv: LeafRange): LeafRange =
## Reduce accounts interval to maximal size
let maxlen =
if buddy.ctx.data.pivotEnv.proofDumpOk: snapAccountsDumpRange
else: buddy.ctx.data.accountRangeMax
let maxlen = buddy.ctx.data.accountRangeMax
if 0 < iv.len and iv.len <= maxLen:
iv
else:
@ -178,21 +179,18 @@ proc fetchAccounts*(buddy: SnapBuddyRef): Future[bool] {.async.} =
# it was double processed which if ok.
buddy.delUnprocessed(ry.value)
# ----
# --------------------
# For dumping data ready to be used in unit tests
if env.proofDumpOk:
var fd = ctx.data.proofDumpFile
if env.proofDumpInx == 0:
fd.write dumpRoot(stateRoot)
fd.write "\n"
if rc.isErr:
fd.write " # Error: base=" & $iv.minPt & " msg=" & $rc.error & "\n"
fd.write dumpSnapAccountRange(
iv.minPt, dd.data, "snapProofData" & $env.proofDumpInx & "*")
fd.flushFile
env.proofDumpInx.inc
if snapAccountsDumpMax <= env.proofDumpInx:
env.proofDumpOk = false
when snapAccountsDumpEnable:
trace " Snap proofs dump", peer, enabled=ctx.data.proofDumpOk, iv
if ctx.data.proofDumpOk:
var fd = ctx.data.proofDumpFile
if rc.isErr:
fd.write " # Error: base=" & $iv.minPt & " msg=" & $rc.error & "\n"
fd.write "# count ", $ctx.data.proofDumpInx & "\n"
fd.write stateRoot.dumpAccountProof(iv.minPt, dd.data) & "\n"
fd.flushFile
ctx.data.proofDumpInx.inc
# ------------------------------------------------------------------------------
# End

View File

@ -14,6 +14,7 @@
## using the `snap` protocol.
import
std/sequtils,
chronos,
eth/[common/eth_types, p2p],
stew/interval_set,
@ -36,8 +37,8 @@ type
ResponseTimeout
GetAccountRange* = object
consumed*: LeafRange ## Real accounts interval covered
data*: SnapAccountRange ## reply data
consumed*: LeafRange ## Real accounts interval covered
data*: PackedAccountRange ## Re-packed reply data
# ------------------------------------------------------------------------------
# Private functions
@ -82,10 +83,14 @@ proc getAccountRange*(
if rc.value.isNone:
trace trSnapRecvTimeoutWaiting & "for reply to GetAccountRange", peer
return err(ResponseTimeout)
let snAccRange = rc.value.get
GetAccountRange(
consumed: iv,
data: rc.value.get)
consumed: iv,
data: PackedAccountRange(
proof: snAccRange.proof,
accounts: snAccRange.accounts.mapIt(PackedAccount(
accHash: it.acchash,
accBlob: it.accBody.encode))))
let
nAccounts = dd.data.accounts.len
nProof = dd.data.proof.len

View File

@ -121,7 +121,7 @@ type
HuntRange
HuntRangeFinal
WorkerHuntEx = ref object of WorkerBase
WorkerHuntEx = ref object of WorkerPivotBase
## Peer canonical chain head ("best block") search state.
syncMode: WorkerMode ## Action mode
lowNumber: BlockNumber ## Recent lowest known block number.
@ -147,10 +147,10 @@ static:
# ------------------------------------------------------------------------------
proc hunt(buddy: SnapBuddyRef): WorkerHuntEx =
buddy.data.workerBase.WorkerHuntEx
buddy.data.workerPivot.WorkerHuntEx
proc `hunt=`(buddy: SnapBuddyRef; value: WorkerHuntEx) =
buddy.data.workerBase = value
buddy.data.workerPivot = value
proc new(T: type WorkerHuntEx; syncMode: WorkerMode): T =
T(syncMode: syncMode,
@ -529,6 +529,9 @@ proc pivotStop*(buddy: SnapBuddyRef) =
## Clean up this peer
discard
proc pivotRestart*(buddy: SnapBuddyRef) =
buddy.pivotStart
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
@ -565,7 +568,8 @@ proc pivotExec*(buddy: SnapBuddyRef) {.async.} =
trace trEthRecvError & "waiting for GetBlockHeaders reply", peer,
error=e.msg
inc buddy.data.stats.major.networkErrors
buddy.pivotStop()
# Just try another peer
buddy.ctrl.zombie = true
return
if reply.isNone:

View File

@ -114,34 +114,27 @@ proc runLogTicker(t: TickerRef) {.gcsafe.} =
t.lastStats = data
t.lastTick = t.tick
var
avAccounts = ""
avUtilisation = ""
avAcc = ""
pivot = "n/a"
bulker = ""
accCoverage = "n/a"
bulk = ""
let
avCov = data.fillFactor[0].toPC(1) & "(" &
data.fillFactor[1].toPC(1) & ")"
allCov = data.accCoverage.toPC(1)
flushed = data.flushedQueues
buddies = t.nBuddies
tick = t.tick.toSI
mem = getTotalMem().uint.toSI
noFmtError("runLogTicker"):
if data.pivotBlock.isSome:
pivot = &"#{data.pivotBlock.get}({data.activeQueues})"
avAccounts =
&"{(data.accounts[0]+0.5).int64}({(data.accounts[1]+0.5).int64})"
avUtilisation =
&"{data.fillFactor[0]*100.0:.1f}%({data.fillFactor[1]*100.0:.1f}%)"
bulker =
"[" & data.bulkStore.size.toSeq.mapIt(it.toSI).join(",") & "," &
data.bulkStore.dura.toSeq.mapIt(it.pp).join(",") & "]"
accCoverage =
&"{(data.accCoverage*100.0):.1f}%"
pivot = &"#{data.pivotBlock.get}/{data.activeQueues}"
avAcc = &"{(data.accounts[0]+0.5).int64}({(data.accounts[1]+0.5).int64})"
bulk = "[" & data.bulkStore.size.toSeq.mapIt(it.toSI).join(",") & "," &
data.bulkStore.dura.toSeq.mapIt(it.pp).join(",") & "]"
info "Snap sync statistics",
tick, buddies, pivot, avAccounts, avUtilisation, accCoverage,
flushed, bulker, mem
tick, buddies, pivot, avAcc, avCov, allCov, flushed, bulk, mem
t.tick.inc
t.setLogTicker(Moment.fromNow(tickerLogInterval))

View File

@ -26,29 +26,43 @@ const
snapRequestBytesLimit* = 2 * 1024 * 1024
## Soft bytes limit to request in `snap` protocol calls.
maxPivotBlockWindow* = 500
maxPivotBlockWindow* = 50
## The maximal depth of two block headers. If the pivot block header
## (containing the current state root) is more than this many blocks
## away from a new pivot block header candidate, then the latter one
## replaces the current block header.
##
## This mechanism applies to new worker buddies which start by finding
## a new pivot.
snapAccountsDumpRangeKiln = (high(UInt256) div 300000)
## Sample size for a single snap dump on `kiln` (for debugging)
switchPivotAfterCoverage* = 1.0 # * 0.30
## Stop fetching from the same pivot state root with this much coverage
## and try to find a new one. Setting this value to `1.0`, this feature
## is disabled. Note that settting low coverage levels is primarily for
## testing/debugging (may produce stress conditions.)
##
## If this setting is active, it typically supersedes the pivot update
## mechainsm implied by the `maxPivotBlockWindow`. This for the simple
## reason that the pivot state root is typically near the head of the
## block chain.
##
## This mechanism applies to running worker buddies. When triggered, all
## pivot handlers are reset so they will start from scratch finding a
## better pivot.
snapAccountsDumpRange* = snapAccountsDumpRangeKiln
## Activated size of a data slice if dump is anabled
# ---
snapAccountsDumpMax* = 20
## Max number of snap proof dumps (for debugging)
snapAccountsDumpEnable* = false
snapAccountsDumpEnable* = false # or true
## Enable data dump
snapAccountsDumpCoverageStop* = 0.99999
## Stop dumping if most accounts are covered
seenBlocksMax = 500
## Internal size of LRU cache (for debugging)
type
WorkerBase* = ref object of RootObj
WorkerPivotBase* = ref object of RootObj
## Stub object, to be inherited in file `worker.nim`
BuddyStat* = distinct uint
@ -84,9 +98,8 @@ type
pivotAccount*: NodeTag ## Random account
availAccounts*: LeafRangeSet ## Accounts to fetch (organised as ranges)
nAccounts*: uint64 ## Number of accounts imported
# ---
proofDumpOk*: bool
proofDumpInx*: int
when switchPivotAfterCoverage < 1.0:
minCoverageReachedOk*: bool ## Stop filling this pivot
SnapPivotTable* = ##\
## LRU table, indexed by state root
@ -97,7 +110,11 @@ type
stats*: SnapBuddyStats ## Statistics counters
errors*: SnapBuddyErrors ## For error handling
pivotHeader*: Option[BlockHeader] ## For pivot state hunter
workerBase*: WorkerBase ## Opaque object reference for sub-module
workerPivot*: WorkerPivotBase ## Opaque object reference for sub-module
BuddyPoolHookFn* = proc(buddy: BuddyRef[CtxData,BuddyData]) {.gcsafe.}
## All buddies call back (the argument type is defined below with
## pretty name `SnapBuddyRef`.)
CtxData* = object
## Globally shared data extension
@ -111,17 +128,18 @@ type
pivotEnv*: SnapPivotRef ## Environment containing state root
accountRangeMax*: UInt256 ## Maximal length, high(u256)/#peers
accountsDb*: AccountsDbRef ## Proof processing for accounts
# ---
proofDumpOk*: bool
proofDumpFile*: File
runPoolHook*: BuddyPoolHookFn ## Callback for `runPool()`
# --------
when snapAccountsDumpEnable:
proofDumpOk*: bool
proofDumpFile*: File
proofDumpInx*: int
SnapBuddyRef* = ##\
SnapBuddyRef* = BuddyRef[CtxData,BuddyData]
## Extended worker peer descriptor
BuddyRef[CtxData,BuddyData]
SnapCtxRef* = ##\
SnapCtxRef* = CtxRef[CtxData]
## Extended global descriptor
CtxRef[CtxData]
# ------------------------------------------------------------------------------
# Public functions

View File

@ -28,7 +28,7 @@
## Clean up this worker peer.
##
##
## *runPool(buddy: BuddyRef[S,W])*
## *runPool(buddy: BuddyRef[S,W], last: bool)*
## Once started, the function `runPool()` is called for all worker peers in
## sequence as the body of an iteration. There will be no other worker peer
## functions activated simultaneously.
@ -36,7 +36,9 @@
## This procedure is started if the global flag `buddy.ctx.poolMode` is set
## `true` (default is `false`.) It is the responsibility of the `runPool()`
## instance to reset the flag `buddy.ctx.poolMode`, typically at the first
## peer instance as the number of active instances is unknown to `runPool()`.
## peer instance.
##
## The argument `last` is set `true` if the last entry is reached.
##
## Note that this function does not run in `async` mode.
##
@ -131,8 +133,10 @@ proc workerLoop[S,W](buddy: RunnerBuddyRef[S,W]) {.async.} =
await sleepAsync(50.milliseconds)
while dsc.singleRunLock:
await sleepAsync(50.milliseconds)
var count = dsc.buddies.len
for w in dsc.buddies.nextValues:
worker.runPool()
count.dec
worker.runPool(count == 0)
dsc.monitorLock = false
continue

View File

@ -96,22 +96,6 @@ proc hash*(root: SomeDistinctHash256): Hash =
# Public printing and pretty printing
# ------------------------------------------------------------------------------
proc toPC*(
num: float;
digitsAfterDot: static[int] = 2;
rounding: static[float] = 5.0
): string =
## Convert argument number `num` to percent string with decimal precision
## stated as argument `digitsAfterDot`. Standard rounding is enabled by
## default adjusting the first invisible digit, set `rounding = 0` to disable.
const
minDigits = digitsAfterDot + 1
multiplier = (10 ^ (minDigits + 1)).float
roundUp = rounding / 10.0
result = ((num * multiplier) + roundUp).int.intToStr(minDigits) & "%"
result.insert(".", result.len - minDigits)
func toHex*(hash: Hash256): string =
## Shortcut for `byteutils.toHex(hash.data)`
hash.data.toHex

View File

@ -12,7 +12,7 @@
## Some logging helper moved here in absence of a known better place.
import
std/strutils
std/[math, strutils]
proc toSI*(num: SomeUnsignedInt): string =
## Prints `num` argument value greater than 99 as rounded SI unit.
@ -45,3 +45,17 @@ proc toSI*(num: SomeUnsignedInt): string =
result.insert(".", result.len - 3)
proc toPC*(
num: float;
digitsAfterDot: static[int] = 2;
rounding: static[float] = 5.0
): string =
## Convert argument number `num` to percent string with decimal precision
## stated as argument `digitsAfterDot`. Standard rounding is enabled by
## default adjusting the first invisible digit, set `rounding = 0` to disable.
const
minDigits = digitsAfterDot + 1
multiplier = (10 ^ (minDigits + 1)).float
roundUp = rounding / 10.0
result = ((num * multiplier) + roundUp).int.intToStr(minDigits) & "%"
result.insert(".", result.len - minDigits)

View File

@ -0,0 +1,170 @@
# Nimbus
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[os, sequtils, strformat, strutils],
eth/common,
nimcrypto,
stew/byteutils,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/hexary_desc,
./gunzip
type
UndumpState = enum
UndumpHeader
UndumpStateRoot
UndumpBase
UndumpAccounts
UndumpProofs
UndumpCommit
UndumpError
UndumpProof* = object
## Palatable output for iterator
root*: Hash256
base*: NodeTag
data*: PackedAccountRange
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template say(args: varargs[untyped]) =
# echo args
discard
proc toByteSeq(s: string): seq[byte] =
nimcrypto.fromHex(s)
proc fromHex(T: type Hash256; s: string): T =
result.data = ByteArray32.fromHex(s)
proc fromHex(T: type NodeTag; s: string): T =
UInt256.fromBytesBE(ByteArray32.fromHex(s)).T
# ------------------------------------------------------------------------------
# Public capture
# ------------------------------------------------------------------------------
proc dumpAccountProof*(
root: Hash256;
base: NodeTag;
data: PackedAccountRange;
): string =
## Dump accounts data in parseable Ascii text
proc ppStr(blob: Blob): string =
blob.mapIt(it.toHex(2)).join.toLowerAscii
proc ppStr(hash: Hash256): string =
hash.data.mapIt(it.toHex(2)).join.toLowerAscii
result = "accounts " & $data.accounts.len & " " & $data.proof.len & "\n"
result &= root.ppStr & "\n"
result &= base.to(Hash256).ppStr & "\n"
for n in 0 ..< data.accounts.len:
result &= data.accounts[n].accHash.ppStr & " "
result &= data.accounts[n].accBlob.ppStr & "\n"
for n in 0 ..< data.proof.len:
result &= data.proof[n].ppStr & "\n"
result &= "commit\n"
# ------------------------------------------------------------------------------
# Public undump
# ------------------------------------------------------------------------------
iterator undumpNextProof*(gzFile: string): UndumpProof =
var
line = ""
lno = 0
state = UndumpHeader
data: UndumpProof
nAccounts = 0u
nProofs = 0u
if not gzFile.fileExists:
raiseAssert &"No such file: \"{gzFile}\""
for lno,line in gzFile.gunzipLines:
if line.len == 0 or line[0] == '#':
continue
var flds = line.split
#echo ">>> ",
# " lno=", lno,
# " state=", state,
# " nAccounts=", nAccounts,
# " nProofs=", nProofs,
# " flds=", flds
case state:
of UndumpHeader, UndumpError:
if flds.len == 3 and flds[0] == "accounts":
nAccounts = flds[1].parseUInt
nProofs = flds[2].parseUInt
data.reset
state = UndumpStateRoot
continue
if state != UndumpError:
state = UndumpError
say &"*** line {lno}: expected header, got {line}"
of UndumpStateRoot:
if flds.len == 1:
data.root = Hash256.fromHex(flds[0])
state = UndumpBase
continue
state = UndumpError
say &"*** line {lno}: expected state root, got {line}"
of UndumpBase:
if flds.len == 1:
data.base = NodeTag.fromHex(flds[0])
state = UndumpAccounts
continue
state = UndumpError
say &"*** line {lno}: expected account base, got {line}"
of UndumpAccounts:
if flds.len == 2:
data.data.accounts.add PackedAccount(
accHash: Hash256.fromHex(flds[0]),
accBlob: flds[1].toByteSeq)
nAccounts.dec
if nAccounts <= 0:
state = UndumpProofs
continue
state = UndumpError
say &"*** line {lno}: expected account data, got {line}"
of UndumpProofs:
if flds.len == 1:
data.data.proof.add flds[0].toByteSeq
nProofs.dec
if nProofs <= 0:
state = UndumpCommit
continue
state = UndumpError
say &"*** expected proof data, got {line}"
of UndumpCommit:
if flds.len == 1 and flds[0] == "commit":
yield data
state = UndumpHeader
continue
state = UndumpError
say &"*** line {lno}: expected commit, got {line}"
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -16,7 +16,7 @@ import
../nimbus/transaction,
../nimbus/vm_state,
../nimbus/vm_types,
./replay/undump,
./replay/undump_blocks,
eth/[common, p2p, trie/db],
unittest2

View File

@ -24,7 +24,7 @@ import
../nimbus/utils/ec_recover,
../nimbus/[config, utils, constants, context],
./test_clique/pool,
./replay/undump
./replay/undump_blocks
const
baseDir = [".", "tests", ".." / "tests", $DirSep] # path containg repo

View File

@ -32,7 +32,7 @@ import
../nimbus/[chain_config, config, genesis],
../nimbus/db/[db_chain, select_backend],
../nimbus/p2p/chain,
./replay/[undump, pp],
./replay/[undump_blocks, pp],
chronicles,
eth/[common, p2p, trie/db],
nimcrypto/hash,

View File

@ -12,7 +12,7 @@
## Snap sync components tester
import
std/[algorithm, distros, hashes, math, os,
std/[algorithm, distros, hashes, math, os, sets,
sequtils, strformat, strutils, tables, times],
chronicles,
eth/[common/eth_types, p2p, rlp, trie/db],
@ -23,12 +23,12 @@ import
../nimbus/[chain_config, config, genesis],
../nimbus/db/[db_chain, select_backend, storage_types],
../nimbus/p2p/chain,
../nimbus/sync/[types, protocol],
../nimbus/sync/types,
../nimbus/sync/snap/range_desc,
../nimbus/sync/snap/worker/[accounts_db, db/hexary_desc, db/rocky_bulk_load],
../nimbus/sync/snap/worker/accounts_db,
../nimbus/sync/snap/worker/db/[hexary_desc, rocky_bulk_load],
../nimbus/utils/prettify,
./replay/[pp, undump],
./test_sync_snap/[sample0, sample1]
./replay/[pp, undump_blocks, undump_proofs]
const
baseDir = [".", "..", ".."/"..", $DirSep]
@ -44,21 +44,11 @@ type
file: string ## name of capture file
numBlocks: int ## Number of blocks to load
AccountsProofSample = object
AccountsSample = object
name: string ## sample name, also used as sub-directory for db separation
root: Hash256
data: seq[TestSample]
TestSample = tuple
## Data layout provided by the data dump `sample0.nim`
base: Hash256
accounts: seq[(Hash256,uint64,UInt256,Hash256,Hash256)]
proofs: seq[Blob]
TestItem = object
## Palatable input format for test function
base: NodeTag
data: SnapAccountRange
file: string
firstItem: int
lastItem: int
TestDbs = object
## Provide enough spare empty databases
@ -74,16 +64,19 @@ else:
const isUbuntu32bit = false
const
sampleDirRefFile = "sample0.txt.gz"
goerliCapture: CaptureSpecs = (
name: "goerli",
network: GoerliNet,
file: "goerli68161.txt.gz",
numBlocks: 1_000)
accSample0 = AccountsProofSample(
accSample0 = AccountsSample(
name: "sample0",
root: sample0.snapRoot,
data: sample0.snapProofData)
file: "sample0.txt.gz",
firstItem: 0,
lastItem: high(int))
let
# Forces `check()` to print the error (as opposed when using `isOk()`)
@ -118,7 +111,7 @@ proc findFilePath(file: string;
return ok(path)
err()
proc getTmpDir(sampleDir = "sample0.nim"): string =
proc getTmpDir(sampleDir = sampleDirRefFile): string =
sampleDir.findFilePath(baseDir,repoDir).value.splitFile.dir
proc pp(d: Duration): string =
@ -138,6 +131,9 @@ proc pp(d: AccountLoadStats): string =
proc pp(rc: Result[Account,HexaryDbError]): string =
if rc.isErr: $rc.error else: rc.value.pp
proc pp(rc: Result[Hash256,HexaryDbError]): string =
if rc.isErr: $rc.error else: $rc.value.to(NodeTag)
proc ppKvPc(w: openArray[(string,int)]): string =
w.mapIt(&"{it[0]}={it[1]}%").join(", ")
@ -164,21 +160,23 @@ proc setErrorLevel =
# Private functions
# ------------------------------------------------------------------------------
proc to(data: seq[TestSample]; T: type seq[TestItem]): T =
proc to(sample: AccountsSample; T: type seq[UndumpProof]): T =
## Convert test data into usable format
for r in data:
result.add TestItem(
base: r.base.to(NodeTag),
data: SnapAccountRange(
proof: r.proofs,
accounts: r.accounts.mapIt(
SnapAccount(
accHash: it[0],
accBody: Account(
nonce: it[1],
balance: it[2],
storageRoot: it[3],
codeHash: it[4])))))
let file = sample.file.findFilePath(baseDir,repoDir).value
var
n = -1
root: Hash256
for w in file.undumpNextProof:
n.inc
if n < sample.firstItem:
continue
if sample.lastItem < n:
break
if sample.firstItem == n:
root = w.root
elif w.root != root:
break
result.add w
proc to(b: openArray[byte]; T: type ByteArray32): T =
## Convert to other representation (or exception)
@ -263,8 +261,8 @@ proc meanStdDev(sum, sqSum: float; length: int): (float,float) =
proc accountsRunner(noisy = true; persistent = true; sample = accSample0) =
let
peer = Peer.new
root = sample.root
testItemLst = sample.data.to(seq[TestItem])
testItemLst = sample.to(seq[UndumpProof])
root = testItemLst[0].root
tmpDir = getTmpDir()
db = if persistent: tmpDir.testDbs(sample.name) else: testDbs()
dbDir = db.dbDir.split($DirSep).lastTwo.join($DirSep)
@ -275,10 +273,10 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample0) =
if db.persistent:
tmpDir.flushDbDir(sample.name)
suite &"SyncSnap: {sample.name} accounts and proofs for {info}":
suite &"SyncSnap: {sample.file} accounts and proofs for {info}":
var
desc: AccountsDbSessionRef
accounts: seq[SnapAccount]
accKeys: seq[Hash256]
test &"Snap-proofing {testItemLst.len} items for state root ..{root.pp}":
let dbBase = if persistent: AccountsDbRef.init(db.cdb[0])
@ -299,7 +297,7 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample0) =
# Load/accumulate accounts (needs some unique sorting)
let lowerBound = testItemLst.mapIt(it.base).sortMerge
accounts = testItemLst.mapIt(it.data.accounts).sortMerge
var accounts = testItemLst.mapIt(it.data.accounts).sortMerge
check desc.merge(lowerBound, accounts) == OkHexDb
desc.assignPrettyKeys() # for debugging, make sure that state root ~ "$0"
@ -310,14 +308,61 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample0) =
check desc.dbImports() == OkHexDb
noisy.say "***", "import stats=", desc.dbImportStats.pp
test &"Revisiting {accounts.len} items stored items on BaseChainDb":
for acc in accounts:
# Update list of accounts. There might be additional accounts in the set
# of proof nodes, typically before the `lowerBound` of each block. As
# there is a list of account ranges (that were merged for testing), one
# need to check for additional records only on either end of a range.
var keySet = accounts.mapIt(it.accHash).toHashSet
for w in testItemLst:
var key = desc.prevChainDbKey(w.data.accounts[0].accHash)
while key.isOk and key.value notin keySet:
keySet.incl key.value
let newKey = desc.prevChainDbKey(key.value)
check newKey != key
key = newKey
key = desc.nextChainDbKey(w.data.accounts[^1].accHash)
while key.isOk and key.value notin keySet:
keySet.incl key.value
let newKey = desc.nextChainDbKey(key.value)
check newKey != key
key = newKey
accKeys = toSeq(keySet).mapIt(it.to(NodeTag)).sorted(cmp)
.mapIt(it.to(Hash256))
check accounts.len <= accKeys.len
test &"Revisiting {accKeys.len} items stored items on BaseChainDb":
var
nextAccount = accKeys[0]
prevAccount: Hash256
count = 0
for accHash in accKeys:
count.inc
let
byChainDB = desc.getChainDbAccount(acc.accHash)
byBulker = desc.getBulkDbXAccount(acc.accHash)
pfx = $count & "#"
byChainDB = desc.getChainDbAccount(accHash)
byNextKey = desc.nextChainDbKey(accHash)
byPrevKey = desc.prevChainDbKey(accHash)
byBulker = desc.getBulkDbXAccount(accHash)
noisy.say "*** find",
"byChainDb=", byChainDB.pp, " inBulker=", byBulker.pp
"<", count, "> byChainDb=", byChainDB.pp, " inBulker=", byBulker.pp
check byChainDB.isOk
# Check `next` traversal funcionality. If `byNextKey.isOk` fails, the
# `nextAccount` value is still the old one and will be different from
# the account in the next for-loop cycle (if any.)
check pfx & accHash.pp(false) == pfx & nextAccount.pp(false)
if byNextKey.isOk:
nextAccount = byNextKey.value
else:
nextAccount = Hash256.default
# Check `prev` traversal funcionality
if prevAccount != Hash256.default:
check byPrevKey.isOk
if byPrevKey.isOk:
check pfx & byPrevKey.value.pp(false) == pfx & prevAccount.pp(false)
prevAccount = accHash
if desc.dbBackendRocksDb():
check byBulker.isOk
check byChainDB == byBulker
@ -345,7 +390,8 @@ proc accountsRunner(noisy = true; persistent = true; sample = accSample0) =
# letters stand for `Locked` nodes which are like `Static` ones but
# added later (typically these nodes are update `Mutable` nodes.)
#
noisy.say "***", "database dump\n ", desc.dumpProofsDB.join("\n ")
# Beware: dumping a large database is not recommended
#noisy.say "***", "database dump\n ", desc.dumpProofsDB.join("\n ")
proc importRunner(noisy = true; persistent = true; capture = goerliCapture) =
@ -799,22 +845,70 @@ when isMainModule:
# Some 20 `snap/1` reply equivalents
snapTest0 =
accSample0
# Only the the first `snap/1` reply from the sample
snapTest1 = AccountsProofSample(
snapTest1 = AccountsSample(
name: "test1",
root: snapTest0.root,
data: snapTest0.data[0..0])
file: snapTest0.file,
lastItem: 0)
# Ditto for sample1
snapTest2 = AccountsProofSample(
snapTest2 = AccountsSample(
name: "test2",
root: sample1.snapRoot,
data: sample1.snapProofData)
snapTest3 = AccountsProofSample(
file: "sample1.txt.gz",
firstItem: 0,
lastItem: high(int))
snapTest3 = AccountsSample(
name: "test3",
root: snapTest2.root,
data: snapTest2.data[0..0])
file: snapTest2.file,
lastItem: 0)
# Other samples from bulk folder
snapOther0a = AccountsSample(
name: "Other0a",
file: "account0_00_06_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapOther0b = AccountsSample(
name: "Other0b",
file: "account0_07_08_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapOther1a = AccountsSample(
name: "Other1a",
file: "account1_09_09_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapOther1b = AccountsSample(
name: "Other1b",
file: "account1_10_17_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapOther2 = AccountsSample(
name: "Other2",
file: "account2_18_25_dump.txt.gz",
firstItem: 1,
lastItem: high(int))
snapOther3 = AccountsSample(
name: "Other3",
file: "account3_26_33_dump.txt.gz",
firstItem: 2,
lastItem: high(int))
snapOther4 = AccountsSample(
name: "Other4",
file: "account4_34_41_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
snapOther5 = AccountsSample(
name: "Other5",
file: "account5_42_49_dump.txt.gz",
firstItem: 2,
lastItem: high(int))
snapOther6 = AccountsSample(
name: "Other6",
file: "account6_50_54_dump.txt.gz",
firstItem: 0,
lastItem: high(int))
bulkTest0 = goerliCapture
bulkTest1: CaptureSpecs = (
@ -833,7 +927,6 @@ when isMainModule:
file: "mainnet332160.txt.gz",
numBlocks: high(int))
#setTraceLevel()
setErrorLevel()
@ -885,13 +978,24 @@ when isMainModule:
# re-visted using the account hash as access path.
#
false.accountsRunner(persistent=true, snapTest0)
false.accountsRunner(persistent=false, snapTest0)
noisy.accountsRunner(persistent=true, snapTest1)
false.accountsRunner(persistent=false, snapTest2)
#noisy.accountsRunner(persistent=true, snapTest3)
noisy.showElapsed("accountsRunner()"):
#false.accountsRunner(persistent=true, snapOther0a)
false.accountsRunner(persistent=true, snapOther0b)
#false.accountsRunner(persistent=true, snapOther1a)
#false.accountsRunner(persistent=true, snapOther1b)
#false.accountsRunner(persistent=true, snapOther2)
#false.accountsRunner(persistent=true, snapOther3)
#false.accountsRunner(persistent=true, snapOther4)
#false.accountsRunner(persistent=true, snapOther5)
#false.accountsRunner(persistent=true, snapOther6)
when true and false:
false.accountsRunner(persistent=true, snapTest0)
#noisy.accountsRunner(persistent=true, snapTest1)
false.accountsRunner(persistent=true, snapTest2)
#noisy.accountsRunner(persistent=true, snapTest3)
discard
when true: # and false:
# ---- database storage timings -------
noisy.showElapsed("importRunner()"):

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -12,7 +12,7 @@ import
std/[os, strformat, sequtils, strutils, times],
../../nimbus/utils/tx_pool/[tx_chain, tx_desc, tx_gauge, tx_item, tx_tabs],
../../nimbus/utils/tx_pool/tx_tasks/[tx_packer, tx_recover],
../replay/[pp, undump],
../replay/[pp, undump_blocks],
chronicles,
eth/[common, keys],
stew/[keyed_queue, sorted_set],