Remodel persistent snapdb access (#1274)
* Re-model persistent database access why: Storage slots healing just run on the wrong sub-trie (i.e. the wrong key mapping). So get/put and bulk functions now use the definitions in `snapdb_desc` (earlier there were some shortcuts for `get()`.) * Fixes: missing return code, typo, redundant imports etc. * Remove obsolete debugging directives from `worker_desc` module * Correct failing unit tests for storage slots trie inspection why: Some pathological cases for the extended tests do not produce any hexary trie data. This is rightly detected by the trie inspection and the result checks needed to adjusted.
This commit is contained in:
parent
74a83c1229
commit
c0d580715e
|
@ -9,7 +9,7 @@
|
|||
# except according to those terms.
|
||||
|
||||
import
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
chronicles,
|
||||
chronos,
|
||||
stew/[interval_set, sorted_set],
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
import
|
||||
chronos,
|
||||
chronicles,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
stint,
|
||||
../../utils/prettify,
|
||||
../misc/timer_helper
|
||||
|
|
|
@ -12,7 +12,7 @@ import
|
|||
std/[options],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
".."/[protocol, sync_desc],
|
||||
../misc/[best_pivot, block_queue],
|
||||
./ticker
|
||||
|
|
|
@ -28,7 +28,7 @@ proc notEnabled(name: string) =
|
|||
proc notImplemented(name: string) =
|
||||
debug "Wire handler method not implemented", meth = name
|
||||
|
||||
method poolEnabled*(ctx: EthWireRef; ena: bool) =
|
||||
proc poolEnabled*(ctx: EthWireRef; ena: bool) =
|
||||
ctx.disablePool = not ena
|
||||
|
||||
method getStatus*(ctx: EthWireRef): EthState {.gcsafe.} =
|
||||
|
|
|
@ -17,7 +17,7 @@ import
|
|||
std/[hashes, options, sets],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
stew/byteutils,
|
||||
".."/[protocol, sync_desc, types]
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ import
|
|||
std/[algorithm, options, sequtils, strutils],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
stew/[byteutils, interval_set, sorted_set],
|
||||
"../.."/[db/db_chain, utils],
|
||||
".."/[protocol, sync_desc, types]
|
||||
|
|
|
@ -62,10 +62,10 @@
|
|||
## the current best block disappears and be able to reduce block number.
|
||||
|
||||
import
|
||||
std/bitops,
|
||||
std/[bitops, sequtils, strutils],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p, p2p/private/p2p_types],
|
||||
eth/[common, p2p, p2p/private/p2p_types],
|
||||
"../.."/[constants, genesis, p2p/chain/chain_desc],
|
||||
".."/[protocol, sync_desc, types],
|
||||
../snap/worker_desc
|
||||
|
@ -170,6 +170,29 @@ static:
|
|||
# Private logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp(a: MDigest[256]; collapse = true): string =
|
||||
if not collapse:
|
||||
a.data.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
elif a == EMPTY_ROOT_HASH:
|
||||
"EMPTY_ROOT_HASH"
|
||||
elif a == EMPTY_UNCLE_HASH:
|
||||
"EMPTY_UNCLE_HASH"
|
||||
elif a == EMPTY_SHA3:
|
||||
"EMPTY_SHA3"
|
||||
elif a == ZERO_HASH256:
|
||||
"ZERO_HASH256"
|
||||
else:
|
||||
a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
|
||||
|
||||
proc pp(bh: BlockHash): string =
|
||||
"%" & $bh.Hash256.pp
|
||||
|
||||
proc pp(bn: BlockNumber): string =
|
||||
"#" & $bn
|
||||
|
||||
proc pp(bhn: HashOrNum): string =
|
||||
if bhn.isHash: bhn.hash.pp else: bhn.number.pp
|
||||
|
||||
proc traceSyncLocked(
|
||||
sp: SnapPivotWorkerRef;
|
||||
num: BlockNumber;
|
||||
|
@ -178,7 +201,7 @@ proc traceSyncLocked(
|
|||
## Trace messages when peer canonical head is confirmed or updated.
|
||||
let
|
||||
peer = sp.peer
|
||||
bestBlock = sp.global.ctx.pp(hash, num)
|
||||
bestBlock = num.pp
|
||||
if sp.syncMode != SyncLocked:
|
||||
debug "Now tracking chain head of peer", peer,
|
||||
bestBlock
|
||||
|
@ -207,7 +230,7 @@ proc lockSyncStateAndFetch(sp: SnapPivotWorkerRef; header: BlockHeader) =
|
|||
peer = sp.peer
|
||||
stateRoot = header.stateRoot
|
||||
hash = header.blockHash.BlockHash
|
||||
thisBlock = sp.global.ctx.pp(hash, header.blockNumber)
|
||||
thisBlock = header.blockNumber.pp
|
||||
|
||||
sp.traceSyncLocked(header.blockNumber, hash)
|
||||
sp.bestNumber = header.blockNumber
|
||||
|
@ -456,7 +479,6 @@ proc peerSyncChainEmptyReply(sp: SnapPivotWorkerRef; request: BlocksRequest) =
|
|||
if lowestAbsent == 0.toBlockNumber: lowestAbsent
|
||||
else: lowestAbsent - 1.toBlockNumber
|
||||
sp.bestHash = default(typeof(sp.bestHash))
|
||||
sp.global.ctx.seen(sp.bestHash,sp.bestNumber)
|
||||
|
||||
|
||||
proc peerSyncChainNonEmptyReply(
|
||||
|
@ -514,7 +536,6 @@ proc peerSyncChainNonEmptyReply(
|
|||
if highestPresent > sp.bestNumber:
|
||||
sp.bestNumber = highestPresent
|
||||
sp.bestHash = headers[highestIndex].blockHash.BlockHash
|
||||
sp.global.ctx.seen(sp.bestHash,sp.bestNumber)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, constructor
|
||||
|
@ -584,7 +605,7 @@ proc pivotNegotiate*(
|
|||
let request = sp.peerSyncChainRequest
|
||||
trace trEthSendSendingGetBlockHeaders, peer,
|
||||
count=request.maxResults,
|
||||
startBlock=sp.global.ctx.pp(request.startBlock), step=request.traceStep
|
||||
startBlock=request.startBlock.pp, step=request.traceStep
|
||||
|
||||
inc sp.global.stats.ok.getBlockHeaders
|
||||
var reply: Option[protocol.blockHeadersObj]
|
||||
|
|
|
@ -253,7 +253,7 @@ p2pProtocol snap1(version = 1,
|
|||
proc getAccountRange(peer: Peer, rootHash: Hash256, origin: Hash256,
|
||||
limit: Hash256, responseBytes: uint64) =
|
||||
trace trSnapRecvReceived & "GetAccountRange (0x00)", peer,
|
||||
accountRange=(origin,limit), stateRoot=($rootHash), responseBytes
|
||||
accountRange=[origin,limit], stateRoot=($rootHash), responseBytes
|
||||
|
||||
trace trSnapSendReplying & "EMPTY AccountRange (0x01)", peer, sent=0
|
||||
await response.send(@[], @[])
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
# except according to those terms.
|
||||
|
||||
import
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
chronicles,
|
||||
chronos,
|
||||
../db/select_backend,
|
||||
|
|
|
@ -185,6 +185,7 @@ proc isEmpty*(lrs: openArray[NodeTagRangeSet]): bool =
|
|||
for ivSet in lrs:
|
||||
if 0 < ivSet.total or 0 < ivSet.chunks:
|
||||
return false
|
||||
true
|
||||
|
||||
proc isFull*(lrs: NodeTagRangeSet): bool =
|
||||
## Returns `true` if the argument set `lrs` contains of the single
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
import
|
||||
std/sequtils,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p, trie/trie_defs],
|
||||
eth/[common, p2p, trie/trie_defs],
|
||||
stew/interval_set,
|
||||
"../../.."/[protocol, protocol/trace_config],
|
||||
"../.."/[range_desc, worker_desc],
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
import
|
||||
std/options,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
"../../.."/[protocol, types],
|
||||
../../worker_desc,
|
||||
./com_error
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
import
|
||||
std/[options, sequtils],
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
"../../.."/[protocol, protocol/trace_config],
|
||||
"../.."/[range_desc, worker_desc],
|
||||
./com_error
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
import
|
||||
std/[options, sequtils],
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
stew/interval_set,
|
||||
"../../.."/[protocol, protocol/trace_config],
|
||||
"../.."/[range_desc, worker_desc],
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
import
|
||||
std/[options, sequtils],
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
"../../.."/[protocol, protocol/trace_config],
|
||||
../../worker_desc,
|
||||
./com_error
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
import
|
||||
std/[algorithm, hashes, sequtils, sets, strutils, tables],
|
||||
eth/[common/eth_types, p2p, trie/nibbles],
|
||||
eth/[common, p2p, trie/nibbles],
|
||||
stint,
|
||||
../../range_desc,
|
||||
./hexary_error
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
import
|
||||
std/[sequtils, sets, strutils, tables],
|
||||
eth/[common/eth_types_rlp, trie/nibbles],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/results,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error]
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
import
|
||||
std/[hashes, sequtils, sets, tables],
|
||||
chronicles,
|
||||
eth/[common/eth_types_rlp, trie/nibbles],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/results,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_paths]
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
import
|
||||
std/[sequtils, sets, strutils, tables],
|
||||
eth/[common/eth_types, trie/nibbles],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/results,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, hexary_paths]
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
import
|
||||
std/[tables],
|
||||
eth/[common/eth_types_rlp, trie/nibbles],
|
||||
eth/[common, trie/nibbles],
|
||||
../../range_desc,
|
||||
./hexary_desc
|
||||
|
||||
|
|
|
@ -11,11 +11,11 @@
|
|||
import
|
||||
std/[algorithm, sequtils, strutils, tables],
|
||||
chronicles,
|
||||
eth/[common, p2p, rlp, trie/nibbles, trie/db],
|
||||
eth/[common, p2p, rlp, trie/nibbles],
|
||||
stew/byteutils,
|
||||
../../range_desc,
|
||||
"."/[bulk_storage, hexary_desc, hexary_error, hexary_import,
|
||||
hexary_interpolate, hexary_inspect, hexary_paths, snapdb_desc]
|
||||
"."/[hexary_desc, hexary_error, hexary_import, hexary_interpolate,
|
||||
hexary_inspect, hexary_paths, snapdb_desc, snapdb_persistent]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
|
@ -25,7 +25,7 @@ logScope:
|
|||
type
|
||||
SnapDbAccountsRef* = ref object of SnapDbBaseRef
|
||||
peer: Peer ## For log messages
|
||||
getFn: HexaryGetFn ## Persistent database `get()` closure
|
||||
getClsFn: AccountsGetFn ## Persistent database `get()` closure
|
||||
|
||||
const
|
||||
extraTraceMessages = false or true
|
||||
|
@ -40,6 +40,11 @@ proc to(h: Hash256; T: type NodeKey): T =
|
|||
proc convertTo(data: openArray[byte]; T: type Hash256): T =
|
||||
discard result.data.NodeKey.init(data) # size error => zero
|
||||
|
||||
proc getFn(ps: SnapDbAccountsRef): HexaryGetFn =
|
||||
## Derive from `GetClsFn` closure => `HexaryGetFn`. There reason for that
|
||||
## seemingly redundant mapping is that here is space for additional localised
|
||||
## and locked parameters as done with the `StorageSlotsGetFn`.
|
||||
return proc(key: openArray[byte]): Blob = ps.getClsFn(key)
|
||||
|
||||
template noKeyError(info: static[string]; code: untyped) =
|
||||
try:
|
||||
|
@ -70,10 +75,10 @@ proc persistentAccounts(
|
|||
{.gcsafe, raises: [Defect,OSError,KeyError].} =
|
||||
## Store accounts trie table on databse
|
||||
if ps.rockDb.isNil:
|
||||
let rc = db.bulkStorageAccounts(ps.kvDb)
|
||||
let rc = db.persistentAccountsPut(ps.kvDb)
|
||||
if rc.isErr: return rc
|
||||
else:
|
||||
let rc = db.bulkStorageAccountsRocky(ps.rockDb)
|
||||
let rc = db.persistentAccountsPut(ps.rockDb)
|
||||
if rc.isErr: return rc
|
||||
ok()
|
||||
|
||||
|
@ -143,7 +148,7 @@ proc init*(
|
|||
new result
|
||||
result.init(pv, root.to(NodeKey))
|
||||
result.peer = peer
|
||||
result.getFn = proc(key: openArray[byte]): Blob = db.get(key)
|
||||
result.getClsFn = db.persistentAccountsGetFn()
|
||||
|
||||
proc dup*(
|
||||
ps: SnapDbAccountsRef;
|
||||
|
@ -456,7 +461,7 @@ proc nextAccountsChainDbKey*(
|
|||
accHash: Hash256;
|
||||
): Result[Hash256,HexaryDbError] =
|
||||
## Fetch the account path on the `BaseChainDB`, the one next to the
|
||||
## argument account.
|
||||
## argument account key.
|
||||
noRlpExceptionOops("getChainDbAccount()"):
|
||||
let path = accHash.to(NodeKey)
|
||||
.hexaryPath(ps.root, ps.getFn)
|
||||
|
|
|
@ -11,17 +11,20 @@
|
|||
import
|
||||
std/[sequtils, tables],
|
||||
chronicles,
|
||||
eth/[common/eth_types, p2p, trie/db],
|
||||
../../../../db/select_backend,
|
||||
eth/[common, p2p, trie/db],
|
||||
../../../../db/[select_backend, storage_types],
|
||||
../../range_desc,
|
||||
"."/[bulk_storage, hexary_desc, hexary_error, hexary_import, hexary_paths,
|
||||
rocky_bulk_load]
|
||||
"."/[hexary_desc, hexary_error, hexary_import, hexary_paths, rocky_bulk_load]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
logScope:
|
||||
topics = "snap-db"
|
||||
|
||||
const
|
||||
RockyBulkCache* = "accounts.sst"
|
||||
## Name of temporary file to accomodate SST records for `rocksdb`
|
||||
|
||||
type
|
||||
SnapDbRef* = ref object
|
||||
## Global, re-usable descriptor
|
||||
|
@ -80,6 +83,19 @@ proc pp*(a: RepairKey; ps: SnapDbBaseRef): string =
|
|||
proc pp*(a: NodeTag; ps: SnapDbBaseRef): string =
|
||||
a.to(NodeKey).pp(ps)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helper
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc clearRockyCacheFile(rocky: RocksStoreRef): bool =
|
||||
if not rocky.isNil:
|
||||
# A cache file might hang about from a previous crash
|
||||
try:
|
||||
discard rocky.clearCacheFile(RockyBulkCache)
|
||||
return true
|
||||
except OSError as e:
|
||||
error "Cannot clear rocksdb cache", exception=($e.name), msg=e.msg
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -97,7 +113,7 @@ proc init*(
|
|||
): T =
|
||||
## Variant of `init()` allowing bulk import on rocksdb backend
|
||||
result = T(db: db.trieDB, rocky: db.rocksStoreRef)
|
||||
if not result.rocky.bulkStorageClearRockyCacheFile():
|
||||
if not result.rocky.clearRockyCacheFile():
|
||||
result.rocky = nil
|
||||
|
||||
proc init*(
|
||||
|
@ -157,6 +173,22 @@ proc kvDb*(pv: SnapDbRef): TrieDatabaseRef =
|
|||
## Getter, low level access to underlying persistent key-value DB
|
||||
pv.db
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, select sub-tables for persistent storage
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc toAccountsKey*(a: NodeKey): ByteArray32 =
|
||||
a.ByteArray32
|
||||
|
||||
proc toStorageSlotsKey*(a: NodeKey): ByteArray33 =
|
||||
a.ByteArray32.slotHashToSlotKey.data
|
||||
|
||||
template toOpenArray*(k: ByteArray32): openArray[byte] =
|
||||
k.toOpenArray(0, 31)
|
||||
|
||||
template toOpenArray*(k: ByteArray33): openArray[byte] =
|
||||
k.toOpenArray(0, 32)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -9,29 +9,29 @@
|
|||
# except according to those terms.
|
||||
|
||||
import
|
||||
std/[algorithm, strutils, tables],
|
||||
std/[algorithm, tables],
|
||||
chronicles,
|
||||
eth/[common, trie/db],
|
||||
../../../../db/[kvstore_rocksdb, storage_types],
|
||||
../../../types,
|
||||
../../../../db/kvstore_rocksdb,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, rocky_bulk_load]
|
||||
"."/[hexary_desc, hexary_error, rocky_bulk_load, snapdb_desc]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
logScope:
|
||||
topics = "snap-db"
|
||||
|
||||
const
|
||||
RockyBulkCache = "accounts.sst"
|
||||
type
|
||||
AccountsGetFn* = proc(key: openArray[byte]): Blob {.gcsafe.}
|
||||
## The `get()` function for the accounts trie
|
||||
|
||||
StorageSlotsGetFn* = proc(acc: Hash256, key: openArray[byte]): Blob {.gcsafe.}
|
||||
## The `get()` function for the storage trie depends on the current account
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc to(tag: NodeTag; T: type RepairKey): T =
|
||||
tag.to(NodeKey).to(RepairKey)
|
||||
|
||||
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
||||
## Might be lossy, check before use
|
||||
discard result.init(key.ByteArray33[1 .. 32])
|
||||
|
@ -40,46 +40,33 @@ proc convertTo(key: RepairKey; T: type NodeTag): T =
|
|||
## Might be lossy, check before use
|
||||
UInt256.fromBytesBE(key.ByteArray33[1 .. 32]).T
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers for bulk load testing
|
||||
# ------------------------------------------------------------------------------
|
||||
proc toAccountsKey(a: RepairKey): ByteArray32 =
|
||||
a.convertTo(NodeKey).toAccountsKey
|
||||
|
||||
proc chainDbKey(a: RepairKey): ByteArray32 =
|
||||
a.convertTo(NodeKey).ByteArray32
|
||||
|
||||
proc storagesKey(a: NodeKey): ByteArray33 =
|
||||
a.ByteArray32.slotHashToSlotKey.data
|
||||
|
||||
proc storagesKey(a: RepairKey): ByteArray33 =
|
||||
a.convertTo(NodeKey).storagesKey
|
||||
|
||||
template toOpenArray*(k: ByteArray32): openArray[byte] =
|
||||
k.toOpenArray(0, 31)
|
||||
|
||||
template toOpenArray*(k: NodeKey): openArray[byte] =
|
||||
k.ByteArray32.toOpenArray
|
||||
|
||||
template toOpenArray*(k: ByteArray33): openArray[byte] =
|
||||
k.toOpenArray(0, 32)
|
||||
proc toStorageSlotsKey(a: RepairKey): ByteArray33 =
|
||||
a.convertTo(NodeKey).toStorageSlotsKey
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helperd
|
||||
# Public functions: get
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc bulkStorageClearRockyCacheFile*(rocky: RocksStoreRef): bool =
|
||||
if not rocky.isNil:
|
||||
# A cache file might hang about from a previous crash
|
||||
try:
|
||||
discard rocky.clearCacheFile(RockyBulkCache)
|
||||
return true
|
||||
except OSError as e:
|
||||
error "Cannot clear rocksdb cache", exception=($e.name), msg=e.msg
|
||||
proc persistentAccountsGetFn*(db: TrieDatabaseRef): AccountsGetFn =
|
||||
return proc(key: openArray[byte]): Blob =
|
||||
var nodeKey: NodeKey
|
||||
if nodeKey.init(key):
|
||||
return db.get(nodeKey.toAccountsKey.toOpenArray)
|
||||
|
||||
proc persistentStorageSlotsGetFn*(db: TrieDatabaseRef): StorageSlotsGetFn =
|
||||
return proc(accHash: Hash256; key: openArray[byte]): Blob =
|
||||
var nodeKey: NodeKey
|
||||
if nodeKey.init(key):
|
||||
return db.get(nodeKey.toStorageSlotsKey.toOpenArray)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public bulk store examples
|
||||
# Public functions: store/put
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc bulkStorageAccounts*(
|
||||
proc persistentAccountsPut*(
|
||||
db: HexaryTreeDbRef;
|
||||
base: TrieDatabaseRef
|
||||
): Result[void,HexaryDbError] =
|
||||
|
@ -92,10 +79,10 @@ proc bulkStorageAccounts*(
|
|||
let error = UnresolvedRepairNode
|
||||
trace "Unresolved node in repair table", error
|
||||
return err(error)
|
||||
base.put(key.chainDbKey.toOpenArray, value.convertTo(Blob))
|
||||
base.put(key.toAccountsKey.toOpenArray, value.convertTo(Blob))
|
||||
ok()
|
||||
|
||||
proc bulkStorageStorages*(
|
||||
proc persistentStorageSlotsPut*(
|
||||
db: HexaryTreeDbRef;
|
||||
base: TrieDatabaseRef
|
||||
): Result[void,HexaryDbError] =
|
||||
|
@ -108,11 +95,11 @@ proc bulkStorageStorages*(
|
|||
let error = UnresolvedRepairNode
|
||||
trace "Unresolved node in repair table", error
|
||||
return err(error)
|
||||
base.put(key.storagesKey.toOpenArray, value.convertTo(Blob))
|
||||
base.put(key.toStorageSlotsKey.toOpenArray, value.convertTo(Blob))
|
||||
ok()
|
||||
|
||||
|
||||
proc bulkStorageAccountsRocky*(
|
||||
proc persistentAccountsPut*(
|
||||
db: HexaryTreeDbRef;
|
||||
rocky: RocksStoreRef
|
||||
): Result[void,HexaryDbError]
|
||||
|
@ -147,7 +134,7 @@ proc bulkStorageAccountsRocky*(
|
|||
let
|
||||
nodeKey = nodeTag.to(NodeKey)
|
||||
data = db.tab[nodeKey.to(RepairKey)].convertTo(Blob)
|
||||
if not bulker.add(nodeKey.toOpenArray, data):
|
||||
if not bulker.add(nodeKey.toAccountsKey.toOpenArray, data):
|
||||
let error = AddBulkItemFailed
|
||||
trace "Rocky hexary bulk load failure",
|
||||
n, len=db.tab.len, error, info=bulker.lastError()
|
||||
|
@ -161,7 +148,7 @@ proc bulkStorageAccountsRocky*(
|
|||
ok()
|
||||
|
||||
|
||||
proc bulkStorageStoragesRocky*(
|
||||
proc persistentStorageSlotsPut*(
|
||||
db: HexaryTreeDbRef;
|
||||
rocky: RocksStoreRef
|
||||
): Result[void,HexaryDbError]
|
||||
|
@ -196,7 +183,7 @@ proc bulkStorageStoragesRocky*(
|
|||
let
|
||||
nodeKey = nodeTag.to(NodeKey)
|
||||
data = db.tab[nodeKey.to(RepairKey)].convertTo(Blob)
|
||||
if not bulker.add(nodeKey.storagesKey.toOpenArray, data):
|
||||
if not bulker.add(nodeKey.toStorageSlotsKey.toOpenArray, data):
|
||||
let error = AddBulkItemFailed
|
||||
trace "Rocky hexary bulk load failure",
|
||||
n, len=db.tab.len, error, info=bulker.lastError()
|
|
@ -11,11 +11,11 @@
|
|||
import
|
||||
std/tables,
|
||||
chronicles,
|
||||
eth/[common, p2p, rlp, trie/db],
|
||||
eth/[common, p2p, rlp],
|
||||
../../../protocol,
|
||||
../../range_desc,
|
||||
"."/[bulk_storage, hexary_desc, hexary_error, hexary_import, hexary_inspect,
|
||||
hexary_interpolate, hexary_paths, snapdb_desc]
|
||||
"."/[hexary_desc, hexary_error, hexary_import, hexary_inspect,
|
||||
hexary_interpolate, hexary_paths, snapdb_desc, snapdb_persistent]
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
|
@ -26,13 +26,10 @@ const
|
|||
extraTraceMessages = false or true
|
||||
|
||||
type
|
||||
GetAccFn = proc(accHash: Hash256, key: openArray[byte]): Blob {.gcsafe.}
|
||||
## The `get()` function for the storage trie depends on the current account
|
||||
|
||||
SnapDbStorageSlotsRef* = ref object of SnapDbBaseRef
|
||||
peer: Peer ## For log messages
|
||||
accHash: Hash256 ## Accounts address hash (curr.unused)
|
||||
getAccFn: GetAccFn ## Persistent database `get()` closure
|
||||
getClsFn: StorageSlotsGetFn ## Persistent database `get()` closure
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
|
@ -44,9 +41,9 @@ proc to(h: Hash256; T: type NodeKey): T =
|
|||
proc convertTo(data: openArray[byte]; T: type Hash256): T =
|
||||
discard result.data.NodeKey.init(data) # size error => zero
|
||||
|
||||
proc getAccCls(ps: SnapDbStorageSlotsRef; accHash: Hash256): HexaryGetFn =
|
||||
## Fix `accHash` argument in `GetAccFn` closure => `HexaryGetFn`
|
||||
result = proc(key: openArray[byte]): Blob = ps.getAccFn(accHash,key)
|
||||
proc getFn(ps: SnapDbStorageSlotsRef; accHash: Hash256): HexaryGetFn =
|
||||
## Lock `accHash` argument into `GetClsFn` closure => `HexaryGetFn`
|
||||
return proc(key: openArray[byte]): Blob = ps.getClsFn(accHash,key)
|
||||
|
||||
|
||||
template noKeyError(info: static[string]; code: untyped) =
|
||||
|
@ -88,10 +85,10 @@ proc persistentStorageSlots(
|
|||
{.gcsafe, raises: [Defect,OSError,KeyError].} =
|
||||
## Store accounts trie table on databse
|
||||
if ps.rockDb.isNil:
|
||||
let rc = db.bulkStorageStorages(ps.kvDb)
|
||||
let rc = db.persistentStorageSlotsPut(ps.kvDb)
|
||||
if rc.isErr: return rc
|
||||
else:
|
||||
let rc = db.bulkStorageStoragesRocky(ps.rockDb)
|
||||
let rc = db.persistentStorageSlotsPut(ps.rockDb)
|
||||
if rc.isErr: return rc
|
||||
ok()
|
||||
|
||||
|
@ -177,10 +174,7 @@ proc init*(
|
|||
result.init(pv, root.to(NodeKey))
|
||||
result.peer = peer
|
||||
result.accHash = account
|
||||
|
||||
# At the moment, the resulting `getAccFn()` is independent of `accHash`
|
||||
result.getAccFn = proc(accHash: Hash256, key: openArray[byte]): Blob =
|
||||
db.get(key)
|
||||
result.getClsFn = db.persistentStorageSlotsGetFn()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
|
@ -364,7 +358,7 @@ proc inspectStorageSlotsTrie*(
|
|||
var stats: TrieNodeStat
|
||||
noRlpExceptionOops("inspectStorageSlotsTrie()"):
|
||||
if persistent:
|
||||
stats = ps.getAccCls(ps.accHash).hexaryInspectTrie(ps.root, pathList)
|
||||
stats = ps.getFn(ps.accHash).hexaryInspectTrie(ps.root, pathList)
|
||||
else:
|
||||
stats = ps.hexaDb.hexaryInspectTrie(ps.root, pathList)
|
||||
|
||||
|
@ -410,7 +404,7 @@ proc getStorageSlotsNodeKey*(
|
|||
var rc: Result[NodeKey,void]
|
||||
noRlpExceptionOops("inspectAccountsPath()"):
|
||||
if persistent:
|
||||
rc = ps.getAccCls(ps.accHash).hexaryInspectPath(ps.root, path)
|
||||
rc = ps.getFn(ps.accHash).hexaryInspectPath(ps.root, path)
|
||||
else:
|
||||
rc = ps.hexaDb.hexaryInspectPath(ps.root, path)
|
||||
if rc.isOk:
|
||||
|
@ -443,7 +437,7 @@ proc getStorageSlotsData*(
|
|||
noRlpExceptionOops("getStorageSlotsData()"):
|
||||
var leaf: Blob
|
||||
if persistent:
|
||||
leaf = path.hexaryPath(ps.root, ps.getAccCls(ps.accHash)).leafData
|
||||
leaf = path.hexaryPath(ps.root, ps.getFn(ps.accHash)).leafData
|
||||
else:
|
||||
leaf = path.hexaryPath(ps.root.to(RepairKey),ps.hexaDb).leafData
|
||||
|
||||
|
@ -475,7 +469,7 @@ proc haveStorageSlotsData*(
|
|||
## Caveat: There is no unit test yet
|
||||
noGenericExOrKeyError("haveStorageSlotsData()"):
|
||||
if persistent:
|
||||
let getFn = ps.getAccCls(ps.accHash)
|
||||
let getFn = ps.getFn(ps.accHash)
|
||||
return 0 < ps.root.ByteArray32.getFn().len
|
||||
else:
|
||||
return ps.hexaDb.tab.hasKey(ps.root.to(RepairKey))
|
||||
|
|
|
@ -108,7 +108,7 @@ import
|
|||
std/sequtils,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p, trie/nibbles, trie/trie_defs, rlp],
|
||||
eth/[common, p2p, trie/nibbles, trie/trie_defs, rlp],
|
||||
stew/[interval_set, keyed_queue],
|
||||
../../../utils/prettify,
|
||||
../../sync_desc,
|
||||
|
|
|
@ -18,7 +18,7 @@ import
|
|||
std/sequtils,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p, trie/nibbles, trie/trie_defs, rlp],
|
||||
eth/[common, p2p, trie/nibbles, trie/trie_defs, rlp],
|
||||
stew/[interval_set, keyed_queue],
|
||||
../../../utils/prettify,
|
||||
../../sync_desc,
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
stew/[interval_set, keyed_queue],
|
||||
stint,
|
||||
../../sync_desc,
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
stew/[interval_set, keyed_queue],
|
||||
stint,
|
||||
../../sync_desc,
|
||||
|
|
|
@ -13,7 +13,7 @@ import
|
|||
std/[strformat, strutils, times],
|
||||
chronos,
|
||||
chronicles,
|
||||
eth/[common/eth_types, p2p],
|
||||
eth/[common, p2p],
|
||||
stint,
|
||||
../../../utils/prettify,
|
||||
../../misc/timer_helper
|
||||
|
|
|
@ -9,11 +9,11 @@
|
|||
# except according to those terms.
|
||||
|
||||
import
|
||||
std/[hashes, sequtils, strutils],
|
||||
eth/[common/eth_types, p2p],
|
||||
stew/[byteutils, interval_set, keyed_queue],
|
||||
"../.."/[constants, db/select_backend],
|
||||
".."/[sync_desc, types],
|
||||
std/hashes,
|
||||
eth/[common, p2p],
|
||||
stew/[interval_set, keyed_queue],
|
||||
../../db/select_backend,
|
||||
../sync_desc,
|
||||
./worker/[com/com_error, db/snapdb_desc, ticker],
|
||||
./range_desc
|
||||
|
||||
|
@ -85,15 +85,7 @@ const
|
|||
## If set `true`, new peers will not change the pivot even if the
|
||||
## negotiated pivot would be newer. This should be the default.
|
||||
|
||||
# -------
|
||||
|
||||
seenBlocksMax = 500
|
||||
## Internal size of LRU cache (for debugging)
|
||||
|
||||
type
|
||||
WorkerSeenBlocks = KeyedQueue[NodeKey,BlockNumber]
|
||||
## Temporary for pretty debugging, `BlockHash` keyed lru cache
|
||||
|
||||
SnapSlotsQueue* = KeyedQueue[NodeKey,SnapSlotQueueItemRef]
|
||||
## Handles list of storage slots data for fetch indexed by storage root.
|
||||
##
|
||||
|
@ -159,7 +151,6 @@ type
|
|||
|
||||
CtxData* = object
|
||||
## Globally shared data extension
|
||||
seenBlock: WorkerSeenBlocks ## Temporary, debugging, pretty logs
|
||||
rng*: ref HmacDrbgContext ## Random generator
|
||||
dbBackend*: ChainDB ## Low level DB driver access (if any)
|
||||
pivotTable*: SnapPivotTable ## Per state root environment
|
||||
|
@ -266,59 +257,6 @@ proc merge*(
|
|||
for w in reqList:
|
||||
q.merge w
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, debugging helpers (will go away eventually)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp*(ctx: SnapCtxRef; bh: BlockHash): string =
|
||||
## Pretty printer for debugging
|
||||
let rc = ctx.data.seenBlock.lruFetch(bh.Hash256.to(NodeKey))
|
||||
if rc.isOk:
|
||||
return "#" & $rc.value
|
||||
"%" & $bh.to(Hash256).data.toHex
|
||||
|
||||
proc pp*(ctx: SnapCtxRef; bh: BlockHash; bn: BlockNumber): string =
|
||||
## Pretty printer for debugging
|
||||
let rc = ctx.data.seenBlock.lruFetch(bh.Hash256.to(NodeKey))
|
||||
if rc.isOk:
|
||||
return "#" & $rc.value
|
||||
"#" & $ctx.data.seenBlock.lruAppend(bh.Hash256.to(NodeKey), bn, seenBlocksMax)
|
||||
|
||||
proc pp*(ctx: SnapCtxRef; bhn: HashOrNum): string =
|
||||
if not bhn.isHash:
|
||||
return "#" & $bhn.number
|
||||
let rc = ctx.data.seenBlock.lruFetch(bhn.hash.to(NodeKey))
|
||||
if rc.isOk:
|
||||
return "%" & $rc.value
|
||||
return "%" & $bhn.hash.data.toHex
|
||||
|
||||
proc seen*(ctx: SnapCtxRef; bh: BlockHash; bn: BlockNumber) =
|
||||
## Register for pretty printing
|
||||
if not ctx.data.seenBlock.lruFetch(bh.Hash256.to(NodeKey)).isOk:
|
||||
discard ctx.data.seenBlock.lruAppend(
|
||||
bh.Hash256.to(NodeKey), bn, seenBlocksMax)
|
||||
|
||||
proc pp*(a: MDigest[256]; collapse = true): string =
|
||||
if not collapse:
|
||||
a.data.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
elif a == EMPTY_ROOT_HASH:
|
||||
"EMPTY_ROOT_HASH"
|
||||
elif a == EMPTY_UNCLE_HASH:
|
||||
"EMPTY_UNCLE_HASH"
|
||||
elif a == EMPTY_SHA3:
|
||||
"EMPTY_SHA3"
|
||||
elif a == ZERO_HASH256:
|
||||
"ZERO_HASH256"
|
||||
else:
|
||||
a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
|
||||
|
||||
proc pp*(bh: BlockHash): string =
|
||||
"%" & bh.Hash256.pp
|
||||
|
||||
proc pp*(bn: BlockNumber): string =
|
||||
if bn == high(BlockNumber): "#high"
|
||||
else: "#" & $bn
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -62,7 +62,7 @@
|
|||
##
|
||||
##
|
||||
## Additional import files needed when using this template:
|
||||
## * eth/[common/eth_types, p2p]
|
||||
## * eth/[common, p2p]
|
||||
## * chronicles
|
||||
## * chronos
|
||||
## * stew/[interval_set, sorted_set],
|
||||
|
@ -72,7 +72,7 @@
|
|||
import
|
||||
std/hashes,
|
||||
chronos,
|
||||
eth/[common/eth_types, p2p, p2p/peer_pool, p2p/private/p2p_types],
|
||||
eth/[common, p2p, p2p/peer_pool, p2p/private/p2p_types],
|
||||
stew/keyed_queue,
|
||||
"."/[handlers, sync_desc]
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ import
|
|||
std/[algorithm, distros, hashes, math, os, sets,
|
||||
sequtils, strformat, strutils, tables, times],
|
||||
chronicles,
|
||||
eth/[common/eth_types, p2p, rlp, trie/db],
|
||||
eth/[common, p2p, rlp, trie/db],
|
||||
rocksdb,
|
||||
stint,
|
||||
stew/[byteutils, results],
|
||||
|
@ -442,6 +442,25 @@ proc storagesRunner(
|
|||
OkStoDb
|
||||
check dbDesc.importStorageSlots(w.data, persistent).toStoDbRc == expRc
|
||||
|
||||
test &"Inspecting {storagesList.len} imported storages lists sub-tries":
|
||||
let ignore = knownFailures.toTable
|
||||
for n,w in storagesList:
|
||||
let
|
||||
testId = fileInfo & "#" & $n
|
||||
errInx = if ignore.hasKey(testId): ignore[testId][0][0]
|
||||
else: high(int)
|
||||
for m in 0 ..< w.data.storages.len:
|
||||
let
|
||||
accHash = w.data.storages[m].account.accHash
|
||||
root = w.data.storages[m].account.storageRoot
|
||||
dbDesc = SnapDbStorageSlotsRef.init(dbBase, accHash, root, peer)
|
||||
rc = dbDesc.inspectStorageSlotsTrie(persistent=persistent)
|
||||
if m == errInx:
|
||||
check rc == Result[TrieNodeStat,HexaryDbError].err(TrieIsEmpty)
|
||||
else:
|
||||
check rc.isOk # ok => level > 0 and not stopped
|
||||
|
||||
|
||||
proc inspectionRunner(
|
||||
noisy = true;
|
||||
persistent = true;
|
||||
|
@ -1108,7 +1127,7 @@ when isMainModule:
|
|||
# value is mostly ignored but carried through.
|
||||
#
|
||||
# * `Proof`: There is a list of hexary nodes which allow to build a partial
|
||||
# Patricia-Mercle trie starting at the state root with all the account
|
||||
# Patricia-Merkle trie starting at the state root with all the account
|
||||
# leaves. There are enough nodes that show that there is no account before
|
||||
# the least account (which is currently ignored.)
|
||||
#
|
||||
|
@ -1128,10 +1147,7 @@ when isMainModule:
|
|||
# * Load/accumulate accounts (needs some unique sorting)
|
||||
# * Build/complete hexary trie for accounts
|
||||
# * Save/bulk-store hexary trie on disk. If rocksdb is available, data
|
||||
# are bulk stored via sst. An additional data set is stored in a table
|
||||
# with key prefix 200 using transactional `put()` (for time comparison.)
|
||||
# If there is no rocksdb, standard transactional `put()` is used, only
|
||||
# (no key prefix 200 storage.)
|
||||
# are bulk stored via sst.
|
||||
#
|
||||
# 3. Traverse trie nodes stored earlier. The accounts from test 2 are
|
||||
# re-visted using the account hash as access path.
|
||||
|
|
Loading…
Reference in New Issue