nimbus-eth1/tests/replay/undump_kvp.nim

167 lines
4.8 KiB
Nim
Raw Normal View History

# Nimbus
Core db and aristo updates for destructor and tx logic (#1894) * Disable `TransactionID` related functions from `state_db.nim` why: Functions `getCommittedStorage()` and `updateOriginalRoot()` from the `state_db` module are nowhere used. The emulation of a legacy `TransactionID` type functionality is administratively expensive to provide by `Aristo` (the legacy DB version is only partially implemented, anyway). As there is no other place where `TransactionID`s are used, they will not be provided by the `Aristo` variant of the `CoreDb`. For the legacy DB API, nothing will change. * Fix copyright headers in source code * Get rid of compiler warning * Update Aristo code, remove unused `merge()` variant, export `hashify()` why: Adapt to upcoming `CoreDb` wrapper * Remove synced tx feature from `Aristo` why: + This feature allowed to synchronise transaction methods like begin, commit, and rollback for a group of descriptors. + The feature is over engineered and not needed for `CoreDb`, neither is it complete (some convergence features missing.) * Add debugging helpers to `Kvt` also: Update database iterator, add count variable yield argument similar to `Aristo`. * Provide optional destructors for `CoreDb` API why; For the upcoming Aristo wrapper, this allows to control when certain smart destruction and update can take place. The auto destructor works fine in general when the storage/cache strategy is known and acceptable when creating descriptors. * Add update option for `CoreDb` API function `hash()` why; The hash function is typically used to get the state root of the MPT. Due to lazy hashing, this might be not available on the `Aristo` DB. So the `update` function asks for re-hashing the gurrent state changes if needed. * Update API tracking log mode: `info` => `debug * Use shared `Kvt` descriptor in new Ledger API why: No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
# Copyright (c) 2021-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
std/[os, sequtils, strformat, strutils],
chronicles,
eth/common,
rocksdb,
stew/byteutils,
Unified database frontend integration (#1670) * Nimbus folder environment update details: * Integrated `CoreDbRef` for the sources in the `nimbus` sub-folder. * The `nimbus` program does not compile yet as it needs the updates in the parallel `stateless` sub-folder. * Stateless environment update details: * Integrated `CoreDbRef` for the sources in the `stateless` sub-folder. * The `nimbus` program compiles now. * Premix environment update details: * Integrated `CoreDbRef` for the sources in the `premix` sub-folder. * Fluffy environment update details: * Integrated `CoreDbRef` for the sources in the `fluffy` sub-folder. * Tools environment update details: * Integrated `CoreDbRef` for the sources in the `tools` sub-folder. * Nodocker environment update details: * Integrated `CoreDbRef` for the sources in the `hive_integration/nodocker` sub-folder. * Tests environment update details: * Integrated `CoreDbRef` for the sources in the `tests` sub-folder. * The unit tests compile and run cleanly now. * Generalise `CoreDbRef` to any `select_backend` supported database why: Generalisation was just missed due to overcoming some compiler oddity which was tied to rocksdb for testing. * Suppress compiler warning for `newChainDB()` why: Warning was added to this function which must be wrapped so that any `CatchableError` is re-raised as `Defect`. * Split off persistent `CoreDbRef` constructor into separate file why: This allows to compile a memory only database version without linking the backend library. * Use memory `CoreDbRef` database by default detail: Persistent DB constructor needs to import `db/core_db/persistent why: Most tests use memory DB anyway. This avoids linking `-lrocksdb` or any other backend by default. * fix `toLegacyBackend()` availability check why: got garbled after memory/persistent split. * Clarify raw access to MPT for snap sync handler why: Logically, `kvt` is not the raw access for the hexary trie (although this holds for the legacy database)
2023-08-04 11:10:09 +00:00
../../nimbus/db/kvstore_rocksdb,
../../nimbus/sync/snap/[constants, range_desc, worker/db/hexary_desc],
./gunzip
type
UndumpRecordKey* = enum
UndumpKey32
UndumpKey33
UndumpOther
UndumpRecord* = object
case kind*: UndumpRecordKey
of UndumpKey32:
key32*: ByteArray32
of UndumpKey33:
key33*: ByteArray33
of UndumpOther:
other*: Blob
data*: Blob
id*: uint
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template ignExceptionOops(info: static[string]; code: untyped) =
try:
code
except CatchableError as e:
error "Ooops", `info`=info, name=($e.name), msg=(e.msg)
template say(args: varargs[untyped]) =
# echo args
discard
proc walkAllDb(
rocky: RocksStoreRef;
kvpFn: proc(k,v: Blob): bool;
) =
## Walk over all key-value pairs of the database (`RocksDB` only.)
let
rop = rocky.store.readOptions
rit = rocky.store.db.rocksdb_create_iterator(rop)
rit.rocksdb_iter_seek_to_first()
while rit.rocksdb_iter_valid() != 0:
# Read key-value pair
var
kLen, vLen: csize_t
let
kData = rit.rocksdb_iter_key(addr kLen)
vData = rit.rocksdb_iter_value(addr vLen)
# Store data
let
key = if kData.isNil: EmptyBlob
else: kData.toOpenArrayByte(0,int(kLen)-1).toSeq
value = if vData.isNil: EmptyBlob
else: vData.toOpenArrayByte(0,int(vLen)-1).toSeq
# Call key-value handler
if kvpFn(key, value):
break
# Update Iterator (might overwrite kData/vdata)
rit.rocksdb_iter_next()
# End while
rit.rocksdb_iter_destroy()
proc dumpAllDbImpl(
rocky: RocksStoreRef; # Persistent database handle
fd: File; # File name to dump database records to
nItemsMax: int; # Max number of items to dump
): int
{.discardable.} =
## Dump datatbase records to argument file descriptor `fd`.
var count = 0
if not rocky.isNil and not fd.isNil:
rocky.walkAllDb proc(k,v: Blob): bool {.raises: [IOError].} =
count.inc
fd.write k.toHex & ":" & v.toHex & " #" & $count & "\n"
nItemsMax <= count
count
# ------------------------------------------------------------------------------
# Public capture
# ------------------------------------------------------------------------------
proc dumpAllDb*(
rocky: RocksStoreRef; # Persistent database handle
dumpFile = "snapdb.dmp"; # File name to dump database records to
nItemsMax = high(int); # Max number of items to dump
): int
{.discardable.} =
## variant of `dumpAllDb()`
var fd: File
if fd.open(dumpFile, fmWrite):
defer: fd.close
ignExceptionOops("dumpAddDb"):
result = rocky.dumpAllDbImpl(fd, nItemsMax)
fd.flushFile
# ------------------------------------------------------------------------------
# Public undump
# ------------------------------------------------------------------------------
iterator undumpKVP*(gzFile: string): UndumpRecord =
if not gzFile.fileExists:
raiseAssert &"No such file: \"{gzFile}\""
for lno,line in gzFile.gunzipLines:
if line.len == 0 or line[0] == '#':
continue
let flds = line.split
if 0 < flds.len:
let kvp = flds[0].split(":")
if kvp.len < 2:
say &"*** line {lno}: expected \"<key>:<value>\" pair, got {line}"
continue
var id = 0u
if 1 < flds.len and flds[1][0] == '#':
let flds1Len = flds[1].len
id = flds[1][1 ..< flds1Len].parseUInt
case kvp[0].len:
of 64:
yield UndumpRecord(
kind: UndumpKey32,
key32: ByteArray32.fromHex kvp[0],
data: kvp[1].hexToSeqByte,
id: id)
of 66:
yield UndumpRecord(
kind: UndumpKey33,
key33: ByteArray33.fromHex kvp[0],
data: kvp[1].hexToSeqByte,
id: id)
else:
yield UndumpRecord(
kind: UndumpOther,
other: kvp[1].hexToSeqByte,
data: kvp[1].hexToSeqByte,
id: id)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------