nimbus-eth1/tests/replay/undump_accounts.nim

205 lines
5.6 KiB
Nim
Raw Normal View History

# Nimbus
# Copyright (c) 2018-2019 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[os, sequtils, strformat, strutils],
eth/common,
nimcrypto/utils,
stew/byteutils,
../../nimbus/sync/[protocol, snap/range_desc],
./gunzip
type
UndumpState = enum
UndumpHeader
UndumpStateRoot
UndumpBase
UndumpAccountList
UndumpProofs
UndumpCommit
UndumpError
UndumpSkipUntilCommit
UndumpAccounts* = object
## Palatable output for iterator
root*: Hash256
base*: NodeTag
data*: PackedAccountRange
seenAccounts*: int
seenStorages*: int
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template say(args: varargs[untyped]) =
# echo args
discard
proc toByteSeq(s: string): seq[byte] =
utils.fromHex(s)
proc fromHex(T: type Hash256; s: string): T =
result.data = ByteArray32.fromHex(s)
Prep for full sync after snap make 4 (#1282) * Re-arrange fetching storage slots in batch module why; Previously, fetching partial slot ranges first has a chance of terminating the worker peer 9due to network error) while there were many inheritable storage slots on the queue. Now, inheritance is checked first, then full slot ranges and finally partial ranges. * Update logging * Bundled node information for healing into single object `NodeSpecs` why: Previously, partial paths and node keys were kept in separate variables. This approach was error prone due to copying/reassembling function argument objects. As all partial paths, keys, and node data types are more or less handled as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to hold these `Blob`s as named field in a single object (even if not all fields are active for the current purpose.) * For good housekeeping, using `NodeKey` type only for account keys why: previously, a mixture of `NodeKey` and `Hash256` was used. Now, only state or storage root keys use the `Hash256` type. * Always accept latest pivot (and not a slightly older one) why; For testing it was tried to use a slightly older pivot state root than available. Some anecdotal tests seemed to suggest an advantage so that more peers are willing to serve on that older pivot. But this could not be confirmed in subsequent tests (still anecdotal, though.) As a side note, the distance of the latest pivot to its predecessor is at least 128 (or whatever the constant `minPivotBlockDistance` is assigned to.) * Reshuffle name components for some file and function names why: Clarifies purpose: "storages" becomes: "storage slots" "store" becomes: "range fetch" * Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
proc fromHex(T: type NodeKey; s: string): T =
ByteArray32.fromHex(s).T
proc fromHex(T: type NodeTag; s: string): T =
UInt256.fromBytesBE(ByteArray32.fromHex(s)).T
# ------------------------------------------------------------------------------
# Public capture
# ------------------------------------------------------------------------------
proc dumpAccounts*(
root: Hash256;
base: NodeTag;
data: PackedAccountRange;
): string =
## Dump accounts data in parseable Ascii text
proc ppStr(blob: Blob): string =
blob.mapIt(it.toHex(2)).join.toLowerAscii
proc ppStr(proof: SnapProof): string =
proof.to(Blob).ppStr
proc ppStr(hash: Hash256): string =
hash.data.mapIt(it.toHex(2)).join.toLowerAscii
Prep for full sync after snap make 4 (#1282) * Re-arrange fetching storage slots in batch module why; Previously, fetching partial slot ranges first has a chance of terminating the worker peer 9due to network error) while there were many inheritable storage slots on the queue. Now, inheritance is checked first, then full slot ranges and finally partial ranges. * Update logging * Bundled node information for healing into single object `NodeSpecs` why: Previously, partial paths and node keys were kept in separate variables. This approach was error prone due to copying/reassembling function argument objects. As all partial paths, keys, and node data types are more or less handled as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to hold these `Blob`s as named field in a single object (even if not all fields are active for the current purpose.) * For good housekeeping, using `NodeKey` type only for account keys why: previously, a mixture of `NodeKey` and `Hash256` was used. Now, only state or storage root keys use the `Hash256` type. * Always accept latest pivot (and not a slightly older one) why; For testing it was tried to use a slightly older pivot state root than available. Some anecdotal tests seemed to suggest an advantage so that more peers are willing to serve on that older pivot. But this could not be confirmed in subsequent tests (still anecdotal, though.) As a side note, the distance of the latest pivot to its predecessor is at least 128 (or whatever the constant `minPivotBlockDistance` is assigned to.) * Reshuffle name components for some file and function names why: Clarifies purpose: "storages" becomes: "storage slots" "store" becomes: "range fetch" * Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
proc ppStr(key: NodeKey): string =
key.ByteArray32.mapIt(it.toHex(2)).join.toLowerAscii
result = "accounts " & $data.accounts.len & " " & $data.proof.len & "\n"
result &= root.ppStr & "\n"
result &= base.to(Hash256).ppStr & "\n"
for n in 0 ..< data.accounts.len:
Prep for full sync after snap make 4 (#1282) * Re-arrange fetching storage slots in batch module why; Previously, fetching partial slot ranges first has a chance of terminating the worker peer 9due to network error) while there were many inheritable storage slots on the queue. Now, inheritance is checked first, then full slot ranges and finally partial ranges. * Update logging * Bundled node information for healing into single object `NodeSpecs` why: Previously, partial paths and node keys were kept in separate variables. This approach was error prone due to copying/reassembling function argument objects. As all partial paths, keys, and node data types are more or less handled as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to hold these `Blob`s as named field in a single object (even if not all fields are active for the current purpose.) * For good housekeeping, using `NodeKey` type only for account keys why: previously, a mixture of `NodeKey` and `Hash256` was used. Now, only state or storage root keys use the `Hash256` type. * Always accept latest pivot (and not a slightly older one) why; For testing it was tried to use a slightly older pivot state root than available. Some anecdotal tests seemed to suggest an advantage so that more peers are willing to serve on that older pivot. But this could not be confirmed in subsequent tests (still anecdotal, though.) As a side note, the distance of the latest pivot to its predecessor is at least 128 (or whatever the constant `minPivotBlockDistance` is assigned to.) * Reshuffle name components for some file and function names why: Clarifies purpose: "storages" becomes: "storage slots" "store" becomes: "range fetch" * Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
result &= data.accounts[n].accKey.ppStr & " "
result &= data.accounts[n].accBlob.ppStr & "\n"
if 0 < data.proof.len:
result &= "# ----\n"
for n in 0 ..< data.proof.len:
result &= data.proof[n].ppStr & "\n"
result &= "commit\n"
# ------------------------------------------------------------------------------
# Public undump
# ------------------------------------------------------------------------------
iterator undumpNextAccount*(gzFile: string): UndumpAccounts =
var
state = UndumpHeader
data: UndumpAccounts
nAccounts = 0u
nProofs = 0u
seenAccounts = 0
seenStorages = 0
if not gzFile.fileExists:
raiseAssert &"No such file: \"{gzFile}\""
for lno,line in gzFile.gunzipLines:
if line.len == 0 or line[0] == '#':
continue
var flds = line.split
#echo ">>> ",
# " lno=", lno,
# " state=", state,
# " nAccounts=", nAccounts,
# " nProofs=", nProofs,
# " flds=", flds
case state:
of UndumpSkipUntilCommit:
if flds.len == 1 and flds[0] == "commit":
state = UndumpHeader
of UndumpHeader, UndumpError:
if flds.len == 3 and flds[0] == "accounts":
nAccounts = flds[1].parseUInt
nProofs = flds[2].parseUInt
data.reset
state = UndumpStateRoot
seenAccounts.inc
continue
if 1 < flds.len and flds[0] == "storages":
seenStorages.inc
state = UndumpSkipUntilCommit
continue
if state != UndumpError:
state = UndumpError
say &"*** line {lno}: expected header, got {line}"
of UndumpStateRoot:
if flds.len == 1:
data.root = Hash256.fromHex(flds[0])
state = UndumpBase
continue
state = UndumpError
say &"*** line {lno}: expected state root, got {line}"
of UndumpBase:
if flds.len == 1:
data.base = NodeTag.fromHex(flds[0])
if 0 < nAccounts:
state = UndumpAccountList
continue
if 0 < nProofs:
state = UndumpProofs
continue
state = UndumpCommit
continue
state = UndumpError
say &"*** line {lno}: expected account base, got {line}"
of UndumpAccountList:
if flds.len == 2:
data.data.accounts.add PackedAccount(
Prep for full sync after snap make 4 (#1282) * Re-arrange fetching storage slots in batch module why; Previously, fetching partial slot ranges first has a chance of terminating the worker peer 9due to network error) while there were many inheritable storage slots on the queue. Now, inheritance is checked first, then full slot ranges and finally partial ranges. * Update logging * Bundled node information for healing into single object `NodeSpecs` why: Previously, partial paths and node keys were kept in separate variables. This approach was error prone due to copying/reassembling function argument objects. As all partial paths, keys, and node data types are more or less handled as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to hold these `Blob`s as named field in a single object (even if not all fields are active for the current purpose.) * For good housekeeping, using `NodeKey` type only for account keys why: previously, a mixture of `NodeKey` and `Hash256` was used. Now, only state or storage root keys use the `Hash256` type. * Always accept latest pivot (and not a slightly older one) why; For testing it was tried to use a slightly older pivot state root than available. Some anecdotal tests seemed to suggest an advantage so that more peers are willing to serve on that older pivot. But this could not be confirmed in subsequent tests (still anecdotal, though.) As a side note, the distance of the latest pivot to its predecessor is at least 128 (or whatever the constant `minPivotBlockDistance` is assigned to.) * Reshuffle name components for some file and function names why: Clarifies purpose: "storages" becomes: "storage slots" "store" becomes: "range fetch" * Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
accKey: NodeKey.fromHex(flds[0]),
accBlob: flds[1].toByteSeq)
nAccounts.dec
if 0 < nAccounts:
continue
if 0 < nProofs:
state = UndumpProofs
continue
state = UndumpCommit
continue
state = UndumpError
say &"*** line {lno}: expected account data, got {line}"
of UndumpProofs:
if flds.len == 1:
data.data.proof.add flds[0].toByteSeq.to(SnapProof)
nProofs.dec
if nProofs <= 0:
state = UndumpCommit
continue
state = UndumpError
say &"*** expected proof data, got {line}"
of UndumpCommit:
if flds.len == 1 and flds[0] == "commit":
data.seenAccounts = seenAccounts
data.seenStorages = seenStorages
yield data
state = UndumpHeader
continue
state = UndumpError
say &"*** line {lno}: expected commit, got {line}"
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------