nimbus-eth1/tests/replay/undump_storages.nim

236 lines
6.8 KiB
Nim
Raw Normal View History

# Nimbus
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[os, sequtils, strformat, strutils],
eth/common,
nimcrypto/utils,
stew/byteutils,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/protocol,
./gunzip
type
UndumpState = enum
UndumpStoragesHeader
UndumpStoragesRoot
UndumpSlotsHeader
UndumpSlotsAccount
UndumpSlotsRoot
UndumpSlotsList
UndumpProofs
UndumpCommit
UndumpError
UndumpSkipUntilCommit
UndumpStorages* = object
## Palatable output for iterator
root*: Hash256
data*: AccountStorageRange
seenAccounts*: int
seenStorages*: int
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template say(args: varargs[untyped]) =
# echo args
discard
proc toByteSeq(s: string): seq[byte] =
utils.fromHex(s)
proc fromHex(T: type Hash256; s: string): T =
result.data = ByteArray32.fromHex(s)
Prep for full sync after snap make 4 (#1282) * Re-arrange fetching storage slots in batch module why; Previously, fetching partial slot ranges first has a chance of terminating the worker peer 9due to network error) while there were many inheritable storage slots on the queue. Now, inheritance is checked first, then full slot ranges and finally partial ranges. * Update logging * Bundled node information for healing into single object `NodeSpecs` why: Previously, partial paths and node keys were kept in separate variables. This approach was error prone due to copying/reassembling function argument objects. As all partial paths, keys, and node data types are more or less handled as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to hold these `Blob`s as named field in a single object (even if not all fields are active for the current purpose.) * For good housekeeping, using `NodeKey` type only for account keys why: previously, a mixture of `NodeKey` and `Hash256` was used. Now, only state or storage root keys use the `Hash256` type. * Always accept latest pivot (and not a slightly older one) why; For testing it was tried to use a slightly older pivot state root than available. Some anecdotal tests seemed to suggest an advantage so that more peers are willing to serve on that older pivot. But this could not be confirmed in subsequent tests (still anecdotal, though.) As a side note, the distance of the latest pivot to its predecessor is at least 128 (or whatever the constant `minPivotBlockDistance` is assigned to.) * Reshuffle name components for some file and function names why: Clarifies purpose: "storages" becomes: "storage slots" "store" becomes: "range fetch" * Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
proc fromHex(T: type NodeKey; s: string): T =
ByteArray32.fromHex(s).T
proc fromHex(T: type NodeTag; s: string): T =
UInt256.fromBytesBE(ByteArray32.fromHex(s)).T
# ------------------------------------------------------------------------------
# Public capture
# ------------------------------------------------------------------------------
proc dumpStorages*(
root: Hash256;
data: AccountStorageRange
): string =
## Dump account and storage data in parseable Ascii text
proc ppStr(blob: Blob): string =
blob.mapIt(it.toHex(2)).join.toLowerAscii
proc ppStr(hash: Hash256): string =
hash.data.mapIt(it.toHex(2)).join.toLowerAscii
Prep for full sync after snap make 4 (#1282) * Re-arrange fetching storage slots in batch module why; Previously, fetching partial slot ranges first has a chance of terminating the worker peer 9due to network error) while there were many inheritable storage slots on the queue. Now, inheritance is checked first, then full slot ranges and finally partial ranges. * Update logging * Bundled node information for healing into single object `NodeSpecs` why: Previously, partial paths and node keys were kept in separate variables. This approach was error prone due to copying/reassembling function argument objects. As all partial paths, keys, and node data types are more or less handled as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to hold these `Blob`s as named field in a single object (even if not all fields are active for the current purpose.) * For good housekeeping, using `NodeKey` type only for account keys why: previously, a mixture of `NodeKey` and `Hash256` was used. Now, only state or storage root keys use the `Hash256` type. * Always accept latest pivot (and not a slightly older one) why; For testing it was tried to use a slightly older pivot state root than available. Some anecdotal tests seemed to suggest an advantage so that more peers are willing to serve on that older pivot. But this could not be confirmed in subsequent tests (still anecdotal, though.) As a side note, the distance of the latest pivot to its predecessor is at least 128 (or whatever the constant `minPivotBlockDistance` is assigned to.) * Reshuffle name components for some file and function names why: Clarifies purpose: "storages" becomes: "storage slots" "store" becomes: "range fetch" * Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
proc ppStr(key: NodeKey): string =
key.ByteArray32.mapIt(it.toHex(2)).join.toLowerAscii
result = "storages " & $data.storages.len & " " & $data.proof.len & "\n"
result &= root.ppStr & "\n"
for n in 0 ..< data.storages.len:
let slots = data.storages[n]
result &= "# -- " & $n & " --\n"
result &= "slots " & $slots.data.len & "\n"
Prep for full sync after snap make 4 (#1282) * Re-arrange fetching storage slots in batch module why; Previously, fetching partial slot ranges first has a chance of terminating the worker peer 9due to network error) while there were many inheritable storage slots on the queue. Now, inheritance is checked first, then full slot ranges and finally partial ranges. * Update logging * Bundled node information for healing into single object `NodeSpecs` why: Previously, partial paths and node keys were kept in separate variables. This approach was error prone due to copying/reassembling function argument objects. As all partial paths, keys, and node data types are more or less handled as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to hold these `Blob`s as named field in a single object (even if not all fields are active for the current purpose.) * For good housekeeping, using `NodeKey` type only for account keys why: previously, a mixture of `NodeKey` and `Hash256` was used. Now, only state or storage root keys use the `Hash256` type. * Always accept latest pivot (and not a slightly older one) why; For testing it was tried to use a slightly older pivot state root than available. Some anecdotal tests seemed to suggest an advantage so that more peers are willing to serve on that older pivot. But this could not be confirmed in subsequent tests (still anecdotal, though.) As a side note, the distance of the latest pivot to its predecessor is at least 128 (or whatever the constant `minPivotBlockDistance` is assigned to.) * Reshuffle name components for some file and function names why: Clarifies purpose: "storages" becomes: "storage slots" "store" becomes: "range fetch" * Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
result &= slots.account.accKey.ppStr & "\n"
result &= slots.account.storageRoot.ppStr & "\n"
for i in 0 ..< slots.data.len:
result &= slots.data[i].slotHash.ppStr & " "
result &= slots.data[i].slotData.ppStr & "\n"
if 0 < data.proof.len:
result &= "# ----\n"
for n in 0 ..< data.proof.len:
result &= data.proof[n].ppStr & "\n"
result &= "commit\n"
# ------------------------------------------------------------------------------
# Public undump
# ------------------------------------------------------------------------------
iterator undumpNextStorages*(gzFile: string): UndumpStorages =
var
line = ""
lno = 0
state = UndumpStoragesHeader
data: UndumpStorages
nAccounts = 0u
nProofs = 0u
nSlots = 0u
seenAccounts = 0
seenStorages = 0
if not gzFile.fileExists:
raiseAssert &"No such file: \"{gzFile}\""
for lno,line in gzFile.gunzipLines:
if line.len == 0 or line[0] == '#':
continue
var flds = line.split
#echo ">>> ",
# " lno=", lno,
# " state=", state,
# " nAccounts=", nAccounts,
# " nProofs=", nProofs,
# " nSlots=", nSlots,
# " flds=", flds
case state:
of UndumpSkipUntilCommit:
if flds.len == 1 and flds[0] == "commit":
state = UndumpStoragesHeader
of UndumpStoragesHeader, UndumpError:
if flds.len == 3 and flds[0] == "storages":
nAccounts = flds[1].parseUInt
nProofs = flds[2].parseUInt
data.reset
state = UndumpStoragesRoot
seenStorages.inc
continue
if 1 < flds.len and flds[0] == "accounts":
state = UndumpSkipUntilCommit
seenAccounts.inc
continue
if state != UndumpError:
state = UndumpError
say &"*** line {lno}: expected storages header, got {line}"
of UndumpStoragesRoot:
if flds.len == 1:
data.root = Hash256.fromHex(flds[0])
if 0 < nAccounts:
state = UndumpSlotsHeader
continue
state = UndumpCommit
continue
state = UndumpError
say &"*** line {lno}: expected storages state root, got {line}"
of UndumpSlotsHeader:
if flds.len == 2 and flds[0] == "slots":
nSlots = flds[1].parseUInt
state = UndumpSlotsAccount
continue
state = UndumpError
say &"*** line {lno}: expected slots header, got {line}"
of UndumpSlotsAccount:
if flds.len == 1:
data.data.storages.add AccountSlots(
Prep for full sync after snap make 4 (#1282) * Re-arrange fetching storage slots in batch module why; Previously, fetching partial slot ranges first has a chance of terminating the worker peer 9due to network error) while there were many inheritable storage slots on the queue. Now, inheritance is checked first, then full slot ranges and finally partial ranges. * Update logging * Bundled node information for healing into single object `NodeSpecs` why: Previously, partial paths and node keys were kept in separate variables. This approach was error prone due to copying/reassembling function argument objects. As all partial paths, keys, and node data types are more or less handled as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to hold these `Blob`s as named field in a single object (even if not all fields are active for the current purpose.) * For good housekeeping, using `NodeKey` type only for account keys why: previously, a mixture of `NodeKey` and `Hash256` was used. Now, only state or storage root keys use the `Hash256` type. * Always accept latest pivot (and not a slightly older one) why; For testing it was tried to use a slightly older pivot state root than available. Some anecdotal tests seemed to suggest an advantage so that more peers are willing to serve on that older pivot. But this could not be confirmed in subsequent tests (still anecdotal, though.) As a side note, the distance of the latest pivot to its predecessor is at least 128 (or whatever the constant `minPivotBlockDistance` is assigned to.) * Reshuffle name components for some file and function names why: Clarifies purpose: "storages" becomes: "storage slots" "store" becomes: "range fetch" * Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
account: AccountSlotsHeader(
accKey: NodeKey.fromHex(flds[0])))
state = UndumpSlotsRoot
continue
state = UndumpError
say &"*** line {lno}: expected slots account, got {line}"
of UndumpSlotsRoot:
if flds.len == 1:
data.data.storages[^1].account.storageRoot = Hash256.fromHex(flds[0])
state = UndumpSlotsList
continue
state = UndumpError
say &"*** line {lno}: expected slots storage root, got {line}"
of UndumpSlotsList:
if flds.len == 2:
data.data.storages[^1].data.add SnapStorage(
slotHash: Hash256.fromHex(flds[0]),
Prep for full sync after snap make 6 (#1291) * Update log ticker, using time interval rather than ticker count why: Counting and logging ticker occurrences is inherently imprecise. So time intervals are used. * Use separate storage tables for snap sync data * Left boundary proof update why: Was not properly implemented, yet. * Capture pivot in peer worker (aka buddy) tasks why: The pivot environment is linked to the `buddy` descriptor. While there is a task switch, the pivot may change. So it is passed on as function argument `env` rather than retrieved from the buddy at the start of a sub-function. * Split queues `fetchStorage` into `fetchStorageFull` and `fetchStoragePart` * Remove obsolete account range returned from `GetAccountRange` message why: Handler returned the wrong right value of the range. This range was for convenience, only. * Prioritise storage slots if the queue becomes large why: Currently, accounts processing is prioritised up until all accounts are downloaded. The new prioritisation has two thresholds for + start processing storage slots with a new worker + stop account processing and switch to storage processing also: Provide api for `SnapTodoRanges` pair of range sets in `worker_desc.nim` * Generalise left boundary proof for accounts or storage slots. why: Detailed explanation how this works is documented with `snapdb_accounts.importAccounts()`. Instead of enforcing a left boundary proof (which is still the default), the importer functions return a list of `holes` (aka node paths) found in the argument ranges of leaf nodes. This in turn is used by the book keeping software for data download. * Forgot to pass on variable in function wrapper also: + Start healing not before 99% accounts covered (previously 95%) + Logging updated/prettified
2022-11-08 18:56:04 +00:00
slotData: flds[1].toByteSeq)
nSlots.dec
if 0 < nSlots:
continue
nAccounts.dec
if 0 < nAccounts:
state = UndumpSlotsHeader
continue
if 0 < nProofs:
state = UndumpProofs
continue
state = UndumpCommit
continue
state = UndumpError
say &"*** line {lno}: expected slot data, got {line}"
of UndumpProofs:
if flds.len == 1:
data.data.proof.add flds[0].toByteSeq
nProofs.dec
if nProofs <= 0:
state = UndumpCommit
Prep for full sync after snap make 6 (#1291) * Update log ticker, using time interval rather than ticker count why: Counting and logging ticker occurrences is inherently imprecise. So time intervals are used. * Use separate storage tables for snap sync data * Left boundary proof update why: Was not properly implemented, yet. * Capture pivot in peer worker (aka buddy) tasks why: The pivot environment is linked to the `buddy` descriptor. While there is a task switch, the pivot may change. So it is passed on as function argument `env` rather than retrieved from the buddy at the start of a sub-function. * Split queues `fetchStorage` into `fetchStorageFull` and `fetchStoragePart` * Remove obsolete account range returned from `GetAccountRange` message why: Handler returned the wrong right value of the range. This range was for convenience, only. * Prioritise storage slots if the queue becomes large why: Currently, accounts processing is prioritised up until all accounts are downloaded. The new prioritisation has two thresholds for + start processing storage slots with a new worker + stop account processing and switch to storage processing also: Provide api for `SnapTodoRanges` pair of range sets in `worker_desc.nim` * Generalise left boundary proof for accounts or storage slots. why: Detailed explanation how this works is documented with `snapdb_accounts.importAccounts()`. Instead of enforcing a left boundary proof (which is still the default), the importer functions return a list of `holes` (aka node paths) found in the argument ranges of leaf nodes. This in turn is used by the book keeping software for data download. * Forgot to pass on variable in function wrapper also: + Start healing not before 99% accounts covered (previously 95%) + Logging updated/prettified
2022-11-08 18:56:04 +00:00
# KLUDGE: set base (field was later added)
if 0 < data.data.storages.len:
let topList = data.data.storages[^1]
if 0 < topList.data.len:
data.data.base = topList.data[0].slotHash.to(NodeTag)
continue
state = UndumpError
say &"*** expected proof data, got {line}"
of UndumpCommit:
if flds.len == 1 and flds[0] == "commit":
data.seenAccounts = seenAccounts
data.seenStorages = seenStorages
yield data
state = UndumpStoragesHeader
continue
state = UndumpError
say &"*** line {lno}: expected commit, got {line}"
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------