mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-13 13:55:45 +00:00
Remodel persistent snapdb access (#1274)
* Re-model persistent database access why: Storage slots healing just run on the wrong sub-trie (i.e. the wrong key mapping). So get/put and bulk functions now use the definitions in `snapdb_desc` (earlier there were some shortcuts for `get()`.) * Fixes: missing return code, typo, redundant imports etc. * Remove obsolete debugging directives from `worker_desc` module * Correct failing unit tests for storage slots trie inspection why: Some pathological cases for the extended tests do not produce any hexary trie data. This is rightly detected by the trie inspection and the result checks needed to adjusted.
This commit is contained in:
parent
74a83c1229
commit
c0d580715e
@ -9,7 +9,7 @@
|
|||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
stew/[interval_set, sorted_set],
|
stew/[interval_set, sorted_set],
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
import
|
import
|
||||||
chronos,
|
chronos,
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
stint,
|
stint,
|
||||||
../../utils/prettify,
|
../../utils/prettify,
|
||||||
../misc/timer_helper
|
../misc/timer_helper
|
||||||
|
@ -12,7 +12,7 @@ import
|
|||||||
std/[options],
|
std/[options],
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
".."/[protocol, sync_desc],
|
".."/[protocol, sync_desc],
|
||||||
../misc/[best_pivot, block_queue],
|
../misc/[best_pivot, block_queue],
|
||||||
./ticker
|
./ticker
|
||||||
|
@ -28,7 +28,7 @@ proc notEnabled(name: string) =
|
|||||||
proc notImplemented(name: string) =
|
proc notImplemented(name: string) =
|
||||||
debug "Wire handler method not implemented", meth = name
|
debug "Wire handler method not implemented", meth = name
|
||||||
|
|
||||||
method poolEnabled*(ctx: EthWireRef; ena: bool) =
|
proc poolEnabled*(ctx: EthWireRef; ena: bool) =
|
||||||
ctx.disablePool = not ena
|
ctx.disablePool = not ena
|
||||||
|
|
||||||
method getStatus*(ctx: EthWireRef): EthState {.gcsafe.} =
|
method getStatus*(ctx: EthWireRef): EthState {.gcsafe.} =
|
||||||
|
@ -17,7 +17,7 @@ import
|
|||||||
std/[hashes, options, sets],
|
std/[hashes, options, sets],
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
".."/[protocol, sync_desc, types]
|
".."/[protocol, sync_desc, types]
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ import
|
|||||||
std/[algorithm, options, sequtils, strutils],
|
std/[algorithm, options, sequtils, strutils],
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
stew/[byteutils, interval_set, sorted_set],
|
stew/[byteutils, interval_set, sorted_set],
|
||||||
"../.."/[db/db_chain, utils],
|
"../.."/[db/db_chain, utils],
|
||||||
".."/[protocol, sync_desc, types]
|
".."/[protocol, sync_desc, types]
|
||||||
|
@ -62,10 +62,10 @@
|
|||||||
## the current best block disappears and be able to reduce block number.
|
## the current best block disappears and be able to reduce block number.
|
||||||
|
|
||||||
import
|
import
|
||||||
std/bitops,
|
std/[bitops, sequtils, strutils],
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p, p2p/private/p2p_types],
|
eth/[common, p2p, p2p/private/p2p_types],
|
||||||
"../.."/[constants, genesis, p2p/chain/chain_desc],
|
"../.."/[constants, genesis, p2p/chain/chain_desc],
|
||||||
".."/[protocol, sync_desc, types],
|
".."/[protocol, sync_desc, types],
|
||||||
../snap/worker_desc
|
../snap/worker_desc
|
||||||
@ -170,6 +170,29 @@ static:
|
|||||||
# Private logging helpers
|
# Private logging helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc pp(a: MDigest[256]; collapse = true): string =
|
||||||
|
if not collapse:
|
||||||
|
a.data.mapIt(it.toHex(2)).join.toLowerAscii
|
||||||
|
elif a == EMPTY_ROOT_HASH:
|
||||||
|
"EMPTY_ROOT_HASH"
|
||||||
|
elif a == EMPTY_UNCLE_HASH:
|
||||||
|
"EMPTY_UNCLE_HASH"
|
||||||
|
elif a == EMPTY_SHA3:
|
||||||
|
"EMPTY_SHA3"
|
||||||
|
elif a == ZERO_HASH256:
|
||||||
|
"ZERO_HASH256"
|
||||||
|
else:
|
||||||
|
a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
|
||||||
|
|
||||||
|
proc pp(bh: BlockHash): string =
|
||||||
|
"%" & $bh.Hash256.pp
|
||||||
|
|
||||||
|
proc pp(bn: BlockNumber): string =
|
||||||
|
"#" & $bn
|
||||||
|
|
||||||
|
proc pp(bhn: HashOrNum): string =
|
||||||
|
if bhn.isHash: bhn.hash.pp else: bhn.number.pp
|
||||||
|
|
||||||
proc traceSyncLocked(
|
proc traceSyncLocked(
|
||||||
sp: SnapPivotWorkerRef;
|
sp: SnapPivotWorkerRef;
|
||||||
num: BlockNumber;
|
num: BlockNumber;
|
||||||
@ -178,7 +201,7 @@ proc traceSyncLocked(
|
|||||||
## Trace messages when peer canonical head is confirmed or updated.
|
## Trace messages when peer canonical head is confirmed or updated.
|
||||||
let
|
let
|
||||||
peer = sp.peer
|
peer = sp.peer
|
||||||
bestBlock = sp.global.ctx.pp(hash, num)
|
bestBlock = num.pp
|
||||||
if sp.syncMode != SyncLocked:
|
if sp.syncMode != SyncLocked:
|
||||||
debug "Now tracking chain head of peer", peer,
|
debug "Now tracking chain head of peer", peer,
|
||||||
bestBlock
|
bestBlock
|
||||||
@ -207,7 +230,7 @@ proc lockSyncStateAndFetch(sp: SnapPivotWorkerRef; header: BlockHeader) =
|
|||||||
peer = sp.peer
|
peer = sp.peer
|
||||||
stateRoot = header.stateRoot
|
stateRoot = header.stateRoot
|
||||||
hash = header.blockHash.BlockHash
|
hash = header.blockHash.BlockHash
|
||||||
thisBlock = sp.global.ctx.pp(hash, header.blockNumber)
|
thisBlock = header.blockNumber.pp
|
||||||
|
|
||||||
sp.traceSyncLocked(header.blockNumber, hash)
|
sp.traceSyncLocked(header.blockNumber, hash)
|
||||||
sp.bestNumber = header.blockNumber
|
sp.bestNumber = header.blockNumber
|
||||||
@ -456,7 +479,6 @@ proc peerSyncChainEmptyReply(sp: SnapPivotWorkerRef; request: BlocksRequest) =
|
|||||||
if lowestAbsent == 0.toBlockNumber: lowestAbsent
|
if lowestAbsent == 0.toBlockNumber: lowestAbsent
|
||||||
else: lowestAbsent - 1.toBlockNumber
|
else: lowestAbsent - 1.toBlockNumber
|
||||||
sp.bestHash = default(typeof(sp.bestHash))
|
sp.bestHash = default(typeof(sp.bestHash))
|
||||||
sp.global.ctx.seen(sp.bestHash,sp.bestNumber)
|
|
||||||
|
|
||||||
|
|
||||||
proc peerSyncChainNonEmptyReply(
|
proc peerSyncChainNonEmptyReply(
|
||||||
@ -514,7 +536,6 @@ proc peerSyncChainNonEmptyReply(
|
|||||||
if highestPresent > sp.bestNumber:
|
if highestPresent > sp.bestNumber:
|
||||||
sp.bestNumber = highestPresent
|
sp.bestNumber = highestPresent
|
||||||
sp.bestHash = headers[highestIndex].blockHash.BlockHash
|
sp.bestHash = headers[highestIndex].blockHash.BlockHash
|
||||||
sp.global.ctx.seen(sp.bestHash,sp.bestNumber)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions, constructor
|
# Public functions, constructor
|
||||||
@ -584,7 +605,7 @@ proc pivotNegotiate*(
|
|||||||
let request = sp.peerSyncChainRequest
|
let request = sp.peerSyncChainRequest
|
||||||
trace trEthSendSendingGetBlockHeaders, peer,
|
trace trEthSendSendingGetBlockHeaders, peer,
|
||||||
count=request.maxResults,
|
count=request.maxResults,
|
||||||
startBlock=sp.global.ctx.pp(request.startBlock), step=request.traceStep
|
startBlock=request.startBlock.pp, step=request.traceStep
|
||||||
|
|
||||||
inc sp.global.stats.ok.getBlockHeaders
|
inc sp.global.stats.ok.getBlockHeaders
|
||||||
var reply: Option[protocol.blockHeadersObj]
|
var reply: Option[protocol.blockHeadersObj]
|
||||||
|
@ -253,7 +253,7 @@ p2pProtocol snap1(version = 1,
|
|||||||
proc getAccountRange(peer: Peer, rootHash: Hash256, origin: Hash256,
|
proc getAccountRange(peer: Peer, rootHash: Hash256, origin: Hash256,
|
||||||
limit: Hash256, responseBytes: uint64) =
|
limit: Hash256, responseBytes: uint64) =
|
||||||
trace trSnapRecvReceived & "GetAccountRange (0x00)", peer,
|
trace trSnapRecvReceived & "GetAccountRange (0x00)", peer,
|
||||||
accountRange=(origin,limit), stateRoot=($rootHash), responseBytes
|
accountRange=[origin,limit], stateRoot=($rootHash), responseBytes
|
||||||
|
|
||||||
trace trSnapSendReplying & "EMPTY AccountRange (0x01)", peer, sent=0
|
trace trSnapSendReplying & "EMPTY AccountRange (0x01)", peer, sent=0
|
||||||
await response.send(@[], @[])
|
await response.send(@[], @[])
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
../db/select_backend,
|
../db/select_backend,
|
||||||
|
@ -185,6 +185,7 @@ proc isEmpty*(lrs: openArray[NodeTagRangeSet]): bool =
|
|||||||
for ivSet in lrs:
|
for ivSet in lrs:
|
||||||
if 0 < ivSet.total or 0 < ivSet.chunks:
|
if 0 < ivSet.total or 0 < ivSet.chunks:
|
||||||
return false
|
return false
|
||||||
|
true
|
||||||
|
|
||||||
proc isFull*(lrs: NodeTagRangeSet): bool =
|
proc isFull*(lrs: NodeTagRangeSet): bool =
|
||||||
## Returns `true` if the argument set `lrs` contains of the single
|
## Returns `true` if the argument set `lrs` contains of the single
|
||||||
|
@ -15,7 +15,7 @@
|
|||||||
import
|
import
|
||||||
std/sequtils,
|
std/sequtils,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p, trie/trie_defs],
|
eth/[common, p2p, trie/trie_defs],
|
||||||
stew/interval_set,
|
stew/interval_set,
|
||||||
"../../.."/[protocol, protocol/trace_config],
|
"../../.."/[protocol, protocol/trace_config],
|
||||||
"../.."/[range_desc, worker_desc],
|
"../.."/[range_desc, worker_desc],
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
import
|
import
|
||||||
std/options,
|
std/options,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
"../../.."/[protocol, types],
|
"../../.."/[protocol, types],
|
||||||
../../worker_desc,
|
../../worker_desc,
|
||||||
./com_error
|
./com_error
|
||||||
|
@ -13,7 +13,7 @@
|
|||||||
import
|
import
|
||||||
std/[options, sequtils],
|
std/[options, sequtils],
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
"../../.."/[protocol, protocol/trace_config],
|
"../../.."/[protocol, protocol/trace_config],
|
||||||
"../.."/[range_desc, worker_desc],
|
"../.."/[range_desc, worker_desc],
|
||||||
./com_error
|
./com_error
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
import
|
import
|
||||||
std/[options, sequtils],
|
std/[options, sequtils],
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
stew/interval_set,
|
stew/interval_set,
|
||||||
"../../.."/[protocol, protocol/trace_config],
|
"../../.."/[protocol, protocol/trace_config],
|
||||||
"../.."/[range_desc, worker_desc],
|
"../.."/[range_desc, worker_desc],
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
import
|
import
|
||||||
std/[options, sequtils],
|
std/[options, sequtils],
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
"../../.."/[protocol, protocol/trace_config],
|
"../../.."/[protocol, protocol/trace_config],
|
||||||
../../worker_desc,
|
../../worker_desc,
|
||||||
./com_error
|
./com_error
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
std/[algorithm, hashes, sequtils, sets, strutils, tables],
|
std/[algorithm, hashes, sequtils, sets, strutils, tables],
|
||||||
eth/[common/eth_types, p2p, trie/nibbles],
|
eth/[common, p2p, trie/nibbles],
|
||||||
stint,
|
stint,
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
./hexary_error
|
./hexary_error
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
std/[sequtils, sets, strutils, tables],
|
std/[sequtils, sets, strutils, tables],
|
||||||
eth/[common/eth_types_rlp, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
stew/results,
|
stew/results,
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[hexary_desc, hexary_error]
|
"."/[hexary_desc, hexary_error]
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
import
|
import
|
||||||
std/[hashes, sequtils, sets, tables],
|
std/[hashes, sequtils, sets, tables],
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/[common/eth_types_rlp, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
stew/results,
|
stew/results,
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[hexary_desc, hexary_paths]
|
"."/[hexary_desc, hexary_paths]
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
std/[sequtils, sets, strutils, tables],
|
std/[sequtils, sets, strutils, tables],
|
||||||
eth/[common/eth_types, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
stew/results,
|
stew/results,
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[hexary_desc, hexary_error, hexary_paths]
|
"."/[hexary_desc, hexary_error, hexary_paths]
|
||||||
|
@ -12,7 +12,7 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
std/[tables],
|
std/[tables],
|
||||||
eth/[common/eth_types_rlp, trie/nibbles],
|
eth/[common, trie/nibbles],
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
./hexary_desc
|
./hexary_desc
|
||||||
|
|
||||||
|
@ -11,11 +11,11 @@
|
|||||||
import
|
import
|
||||||
std/[algorithm, sequtils, strutils, tables],
|
std/[algorithm, sequtils, strutils, tables],
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/[common, p2p, rlp, trie/nibbles, trie/db],
|
eth/[common, p2p, rlp, trie/nibbles],
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[bulk_storage, hexary_desc, hexary_error, hexary_import,
|
"."/[hexary_desc, hexary_error, hexary_import, hexary_interpolate,
|
||||||
hexary_interpolate, hexary_inspect, hexary_paths, snapdb_desc]
|
hexary_inspect, hexary_paths, snapdb_desc, snapdb_persistent]
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
@ -25,7 +25,7 @@ logScope:
|
|||||||
type
|
type
|
||||||
SnapDbAccountsRef* = ref object of SnapDbBaseRef
|
SnapDbAccountsRef* = ref object of SnapDbBaseRef
|
||||||
peer: Peer ## For log messages
|
peer: Peer ## For log messages
|
||||||
getFn: HexaryGetFn ## Persistent database `get()` closure
|
getClsFn: AccountsGetFn ## Persistent database `get()` closure
|
||||||
|
|
||||||
const
|
const
|
||||||
extraTraceMessages = false or true
|
extraTraceMessages = false or true
|
||||||
@ -40,6 +40,11 @@ proc to(h: Hash256; T: type NodeKey): T =
|
|||||||
proc convertTo(data: openArray[byte]; T: type Hash256): T =
|
proc convertTo(data: openArray[byte]; T: type Hash256): T =
|
||||||
discard result.data.NodeKey.init(data) # size error => zero
|
discard result.data.NodeKey.init(data) # size error => zero
|
||||||
|
|
||||||
|
proc getFn(ps: SnapDbAccountsRef): HexaryGetFn =
|
||||||
|
## Derive from `GetClsFn` closure => `HexaryGetFn`. There reason for that
|
||||||
|
## seemingly redundant mapping is that here is space for additional localised
|
||||||
|
## and locked parameters as done with the `StorageSlotsGetFn`.
|
||||||
|
return proc(key: openArray[byte]): Blob = ps.getClsFn(key)
|
||||||
|
|
||||||
template noKeyError(info: static[string]; code: untyped) =
|
template noKeyError(info: static[string]; code: untyped) =
|
||||||
try:
|
try:
|
||||||
@ -70,10 +75,10 @@ proc persistentAccounts(
|
|||||||
{.gcsafe, raises: [Defect,OSError,KeyError].} =
|
{.gcsafe, raises: [Defect,OSError,KeyError].} =
|
||||||
## Store accounts trie table on databse
|
## Store accounts trie table on databse
|
||||||
if ps.rockDb.isNil:
|
if ps.rockDb.isNil:
|
||||||
let rc = db.bulkStorageAccounts(ps.kvDb)
|
let rc = db.persistentAccountsPut(ps.kvDb)
|
||||||
if rc.isErr: return rc
|
if rc.isErr: return rc
|
||||||
else:
|
else:
|
||||||
let rc = db.bulkStorageAccountsRocky(ps.rockDb)
|
let rc = db.persistentAccountsPut(ps.rockDb)
|
||||||
if rc.isErr: return rc
|
if rc.isErr: return rc
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
@ -143,7 +148,7 @@ proc init*(
|
|||||||
new result
|
new result
|
||||||
result.init(pv, root.to(NodeKey))
|
result.init(pv, root.to(NodeKey))
|
||||||
result.peer = peer
|
result.peer = peer
|
||||||
result.getFn = proc(key: openArray[byte]): Blob = db.get(key)
|
result.getClsFn = db.persistentAccountsGetFn()
|
||||||
|
|
||||||
proc dup*(
|
proc dup*(
|
||||||
ps: SnapDbAccountsRef;
|
ps: SnapDbAccountsRef;
|
||||||
@ -456,7 +461,7 @@ proc nextAccountsChainDbKey*(
|
|||||||
accHash: Hash256;
|
accHash: Hash256;
|
||||||
): Result[Hash256,HexaryDbError] =
|
): Result[Hash256,HexaryDbError] =
|
||||||
## Fetch the account path on the `BaseChainDB`, the one next to the
|
## Fetch the account path on the `BaseChainDB`, the one next to the
|
||||||
## argument account.
|
## argument account key.
|
||||||
noRlpExceptionOops("getChainDbAccount()"):
|
noRlpExceptionOops("getChainDbAccount()"):
|
||||||
let path = accHash.to(NodeKey)
|
let path = accHash.to(NodeKey)
|
||||||
.hexaryPath(ps.root, ps.getFn)
|
.hexaryPath(ps.root, ps.getFn)
|
||||||
|
@ -11,17 +11,20 @@
|
|||||||
import
|
import
|
||||||
std/[sequtils, tables],
|
std/[sequtils, tables],
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/[common/eth_types, p2p, trie/db],
|
eth/[common, p2p, trie/db],
|
||||||
../../../../db/select_backend,
|
../../../../db/[select_backend, storage_types],
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[bulk_storage, hexary_desc, hexary_error, hexary_import, hexary_paths,
|
"."/[hexary_desc, hexary_error, hexary_import, hexary_paths, rocky_bulk_load]
|
||||||
rocky_bulk_load]
|
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "snap-db"
|
topics = "snap-db"
|
||||||
|
|
||||||
|
const
|
||||||
|
RockyBulkCache* = "accounts.sst"
|
||||||
|
## Name of temporary file to accomodate SST records for `rocksdb`
|
||||||
|
|
||||||
type
|
type
|
||||||
SnapDbRef* = ref object
|
SnapDbRef* = ref object
|
||||||
## Global, re-usable descriptor
|
## Global, re-usable descriptor
|
||||||
@ -80,6 +83,19 @@ proc pp*(a: RepairKey; ps: SnapDbBaseRef): string =
|
|||||||
proc pp*(a: NodeTag; ps: SnapDbBaseRef): string =
|
proc pp*(a: NodeTag; ps: SnapDbBaseRef): string =
|
||||||
a.to(NodeKey).pp(ps)
|
a.to(NodeKey).pp(ps)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private helper
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc clearRockyCacheFile(rocky: RocksStoreRef): bool =
|
||||||
|
if not rocky.isNil:
|
||||||
|
# A cache file might hang about from a previous crash
|
||||||
|
try:
|
||||||
|
discard rocky.clearCacheFile(RockyBulkCache)
|
||||||
|
return true
|
||||||
|
except OSError as e:
|
||||||
|
error "Cannot clear rocksdb cache", exception=($e.name), msg=e.msg
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public constructor
|
# Public constructor
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -97,7 +113,7 @@ proc init*(
|
|||||||
): T =
|
): T =
|
||||||
## Variant of `init()` allowing bulk import on rocksdb backend
|
## Variant of `init()` allowing bulk import on rocksdb backend
|
||||||
result = T(db: db.trieDB, rocky: db.rocksStoreRef)
|
result = T(db: db.trieDB, rocky: db.rocksStoreRef)
|
||||||
if not result.rocky.bulkStorageClearRockyCacheFile():
|
if not result.rocky.clearRockyCacheFile():
|
||||||
result.rocky = nil
|
result.rocky = nil
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
@ -157,6 +173,22 @@ proc kvDb*(pv: SnapDbRef): TrieDatabaseRef =
|
|||||||
## Getter, low level access to underlying persistent key-value DB
|
## Getter, low level access to underlying persistent key-value DB
|
||||||
pv.db
|
pv.db
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions, select sub-tables for persistent storage
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc toAccountsKey*(a: NodeKey): ByteArray32 =
|
||||||
|
a.ByteArray32
|
||||||
|
|
||||||
|
proc toStorageSlotsKey*(a: NodeKey): ByteArray33 =
|
||||||
|
a.ByteArray32.slotHashToSlotKey.data
|
||||||
|
|
||||||
|
template toOpenArray*(k: ByteArray32): openArray[byte] =
|
||||||
|
k.toOpenArray(0, 31)
|
||||||
|
|
||||||
|
template toOpenArray*(k: ByteArray33): openArray[byte] =
|
||||||
|
k.toOpenArray(0, 32)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -9,29 +9,29 @@
|
|||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[algorithm, strutils, tables],
|
std/[algorithm, tables],
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/[common, trie/db],
|
eth/[common, trie/db],
|
||||||
../../../../db/[kvstore_rocksdb, storage_types],
|
../../../../db/kvstore_rocksdb,
|
||||||
../../../types,
|
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[hexary_desc, hexary_error, rocky_bulk_load]
|
"."/[hexary_desc, hexary_error, rocky_bulk_load, snapdb_desc]
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "snap-db"
|
topics = "snap-db"
|
||||||
|
|
||||||
const
|
type
|
||||||
RockyBulkCache = "accounts.sst"
|
AccountsGetFn* = proc(key: openArray[byte]): Blob {.gcsafe.}
|
||||||
|
## The `get()` function for the accounts trie
|
||||||
|
|
||||||
|
StorageSlotsGetFn* = proc(acc: Hash256, key: openArray[byte]): Blob {.gcsafe.}
|
||||||
|
## The `get()` function for the storage trie depends on the current account
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private helpers
|
# Private helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc to(tag: NodeTag; T: type RepairKey): T =
|
|
||||||
tag.to(NodeKey).to(RepairKey)
|
|
||||||
|
|
||||||
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
||||||
## Might be lossy, check before use
|
## Might be lossy, check before use
|
||||||
discard result.init(key.ByteArray33[1 .. 32])
|
discard result.init(key.ByteArray33[1 .. 32])
|
||||||
@ -40,46 +40,33 @@ proc convertTo(key: RepairKey; T: type NodeTag): T =
|
|||||||
## Might be lossy, check before use
|
## Might be lossy, check before use
|
||||||
UInt256.fromBytesBE(key.ByteArray33[1 .. 32]).T
|
UInt256.fromBytesBE(key.ByteArray33[1 .. 32]).T
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
proc toAccountsKey(a: RepairKey): ByteArray32 =
|
||||||
# Private helpers for bulk load testing
|
a.convertTo(NodeKey).toAccountsKey
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc chainDbKey(a: RepairKey): ByteArray32 =
|
proc toStorageSlotsKey(a: RepairKey): ByteArray33 =
|
||||||
a.convertTo(NodeKey).ByteArray32
|
a.convertTo(NodeKey).toStorageSlotsKey
|
||||||
|
|
||||||
proc storagesKey(a: NodeKey): ByteArray33 =
|
|
||||||
a.ByteArray32.slotHashToSlotKey.data
|
|
||||||
|
|
||||||
proc storagesKey(a: RepairKey): ByteArray33 =
|
|
||||||
a.convertTo(NodeKey).storagesKey
|
|
||||||
|
|
||||||
template toOpenArray*(k: ByteArray32): openArray[byte] =
|
|
||||||
k.toOpenArray(0, 31)
|
|
||||||
|
|
||||||
template toOpenArray*(k: NodeKey): openArray[byte] =
|
|
||||||
k.ByteArray32.toOpenArray
|
|
||||||
|
|
||||||
template toOpenArray*(k: ByteArray33): openArray[byte] =
|
|
||||||
k.toOpenArray(0, 32)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helperd
|
# Public functions: get
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc bulkStorageClearRockyCacheFile*(rocky: RocksStoreRef): bool =
|
proc persistentAccountsGetFn*(db: TrieDatabaseRef): AccountsGetFn =
|
||||||
if not rocky.isNil:
|
return proc(key: openArray[byte]): Blob =
|
||||||
# A cache file might hang about from a previous crash
|
var nodeKey: NodeKey
|
||||||
try:
|
if nodeKey.init(key):
|
||||||
discard rocky.clearCacheFile(RockyBulkCache)
|
return db.get(nodeKey.toAccountsKey.toOpenArray)
|
||||||
return true
|
|
||||||
except OSError as e:
|
proc persistentStorageSlotsGetFn*(db: TrieDatabaseRef): StorageSlotsGetFn =
|
||||||
error "Cannot clear rocksdb cache", exception=($e.name), msg=e.msg
|
return proc(accHash: Hash256; key: openArray[byte]): Blob =
|
||||||
|
var nodeKey: NodeKey
|
||||||
|
if nodeKey.init(key):
|
||||||
|
return db.get(nodeKey.toStorageSlotsKey.toOpenArray)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public bulk store examples
|
# Public functions: store/put
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc bulkStorageAccounts*(
|
proc persistentAccountsPut*(
|
||||||
db: HexaryTreeDbRef;
|
db: HexaryTreeDbRef;
|
||||||
base: TrieDatabaseRef
|
base: TrieDatabaseRef
|
||||||
): Result[void,HexaryDbError] =
|
): Result[void,HexaryDbError] =
|
||||||
@ -92,10 +79,10 @@ proc bulkStorageAccounts*(
|
|||||||
let error = UnresolvedRepairNode
|
let error = UnresolvedRepairNode
|
||||||
trace "Unresolved node in repair table", error
|
trace "Unresolved node in repair table", error
|
||||||
return err(error)
|
return err(error)
|
||||||
base.put(key.chainDbKey.toOpenArray, value.convertTo(Blob))
|
base.put(key.toAccountsKey.toOpenArray, value.convertTo(Blob))
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc bulkStorageStorages*(
|
proc persistentStorageSlotsPut*(
|
||||||
db: HexaryTreeDbRef;
|
db: HexaryTreeDbRef;
|
||||||
base: TrieDatabaseRef
|
base: TrieDatabaseRef
|
||||||
): Result[void,HexaryDbError] =
|
): Result[void,HexaryDbError] =
|
||||||
@ -108,11 +95,11 @@ proc bulkStorageStorages*(
|
|||||||
let error = UnresolvedRepairNode
|
let error = UnresolvedRepairNode
|
||||||
trace "Unresolved node in repair table", error
|
trace "Unresolved node in repair table", error
|
||||||
return err(error)
|
return err(error)
|
||||||
base.put(key.storagesKey.toOpenArray, value.convertTo(Blob))
|
base.put(key.toStorageSlotsKey.toOpenArray, value.convertTo(Blob))
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
|
||||||
proc bulkStorageAccountsRocky*(
|
proc persistentAccountsPut*(
|
||||||
db: HexaryTreeDbRef;
|
db: HexaryTreeDbRef;
|
||||||
rocky: RocksStoreRef
|
rocky: RocksStoreRef
|
||||||
): Result[void,HexaryDbError]
|
): Result[void,HexaryDbError]
|
||||||
@ -147,7 +134,7 @@ proc bulkStorageAccountsRocky*(
|
|||||||
let
|
let
|
||||||
nodeKey = nodeTag.to(NodeKey)
|
nodeKey = nodeTag.to(NodeKey)
|
||||||
data = db.tab[nodeKey.to(RepairKey)].convertTo(Blob)
|
data = db.tab[nodeKey.to(RepairKey)].convertTo(Blob)
|
||||||
if not bulker.add(nodeKey.toOpenArray, data):
|
if not bulker.add(nodeKey.toAccountsKey.toOpenArray, data):
|
||||||
let error = AddBulkItemFailed
|
let error = AddBulkItemFailed
|
||||||
trace "Rocky hexary bulk load failure",
|
trace "Rocky hexary bulk load failure",
|
||||||
n, len=db.tab.len, error, info=bulker.lastError()
|
n, len=db.tab.len, error, info=bulker.lastError()
|
||||||
@ -161,7 +148,7 @@ proc bulkStorageAccountsRocky*(
|
|||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
|
||||||
proc bulkStorageStoragesRocky*(
|
proc persistentStorageSlotsPut*(
|
||||||
db: HexaryTreeDbRef;
|
db: HexaryTreeDbRef;
|
||||||
rocky: RocksStoreRef
|
rocky: RocksStoreRef
|
||||||
): Result[void,HexaryDbError]
|
): Result[void,HexaryDbError]
|
||||||
@ -196,7 +183,7 @@ proc bulkStorageStoragesRocky*(
|
|||||||
let
|
let
|
||||||
nodeKey = nodeTag.to(NodeKey)
|
nodeKey = nodeTag.to(NodeKey)
|
||||||
data = db.tab[nodeKey.to(RepairKey)].convertTo(Blob)
|
data = db.tab[nodeKey.to(RepairKey)].convertTo(Blob)
|
||||||
if not bulker.add(nodeKey.storagesKey.toOpenArray, data):
|
if not bulker.add(nodeKey.toStorageSlotsKey.toOpenArray, data):
|
||||||
let error = AddBulkItemFailed
|
let error = AddBulkItemFailed
|
||||||
trace "Rocky hexary bulk load failure",
|
trace "Rocky hexary bulk load failure",
|
||||||
n, len=db.tab.len, error, info=bulker.lastError()
|
n, len=db.tab.len, error, info=bulker.lastError()
|
@ -11,11 +11,11 @@
|
|||||||
import
|
import
|
||||||
std/tables,
|
std/tables,
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/[common, p2p, rlp, trie/db],
|
eth/[common, p2p, rlp],
|
||||||
../../../protocol,
|
../../../protocol,
|
||||||
../../range_desc,
|
../../range_desc,
|
||||||
"."/[bulk_storage, hexary_desc, hexary_error, hexary_import, hexary_inspect,
|
"."/[hexary_desc, hexary_error, hexary_import, hexary_inspect,
|
||||||
hexary_interpolate, hexary_paths, snapdb_desc]
|
hexary_interpolate, hexary_paths, snapdb_desc, snapdb_persistent]
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
@ -26,13 +26,10 @@ const
|
|||||||
extraTraceMessages = false or true
|
extraTraceMessages = false or true
|
||||||
|
|
||||||
type
|
type
|
||||||
GetAccFn = proc(accHash: Hash256, key: openArray[byte]): Blob {.gcsafe.}
|
|
||||||
## The `get()` function for the storage trie depends on the current account
|
|
||||||
|
|
||||||
SnapDbStorageSlotsRef* = ref object of SnapDbBaseRef
|
SnapDbStorageSlotsRef* = ref object of SnapDbBaseRef
|
||||||
peer: Peer ## For log messages
|
peer: Peer ## For log messages
|
||||||
accHash: Hash256 ## Accounts address hash (curr.unused)
|
accHash: Hash256 ## Accounts address hash (curr.unused)
|
||||||
getAccFn: GetAccFn ## Persistent database `get()` closure
|
getClsFn: StorageSlotsGetFn ## Persistent database `get()` closure
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private helpers
|
# Private helpers
|
||||||
@ -44,9 +41,9 @@ proc to(h: Hash256; T: type NodeKey): T =
|
|||||||
proc convertTo(data: openArray[byte]; T: type Hash256): T =
|
proc convertTo(data: openArray[byte]; T: type Hash256): T =
|
||||||
discard result.data.NodeKey.init(data) # size error => zero
|
discard result.data.NodeKey.init(data) # size error => zero
|
||||||
|
|
||||||
proc getAccCls(ps: SnapDbStorageSlotsRef; accHash: Hash256): HexaryGetFn =
|
proc getFn(ps: SnapDbStorageSlotsRef; accHash: Hash256): HexaryGetFn =
|
||||||
## Fix `accHash` argument in `GetAccFn` closure => `HexaryGetFn`
|
## Lock `accHash` argument into `GetClsFn` closure => `HexaryGetFn`
|
||||||
result = proc(key: openArray[byte]): Blob = ps.getAccFn(accHash,key)
|
return proc(key: openArray[byte]): Blob = ps.getClsFn(accHash,key)
|
||||||
|
|
||||||
|
|
||||||
template noKeyError(info: static[string]; code: untyped) =
|
template noKeyError(info: static[string]; code: untyped) =
|
||||||
@ -88,10 +85,10 @@ proc persistentStorageSlots(
|
|||||||
{.gcsafe, raises: [Defect,OSError,KeyError].} =
|
{.gcsafe, raises: [Defect,OSError,KeyError].} =
|
||||||
## Store accounts trie table on databse
|
## Store accounts trie table on databse
|
||||||
if ps.rockDb.isNil:
|
if ps.rockDb.isNil:
|
||||||
let rc = db.bulkStorageStorages(ps.kvDb)
|
let rc = db.persistentStorageSlotsPut(ps.kvDb)
|
||||||
if rc.isErr: return rc
|
if rc.isErr: return rc
|
||||||
else:
|
else:
|
||||||
let rc = db.bulkStorageStoragesRocky(ps.rockDb)
|
let rc = db.persistentStorageSlotsPut(ps.rockDb)
|
||||||
if rc.isErr: return rc
|
if rc.isErr: return rc
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
@ -177,10 +174,7 @@ proc init*(
|
|||||||
result.init(pv, root.to(NodeKey))
|
result.init(pv, root.to(NodeKey))
|
||||||
result.peer = peer
|
result.peer = peer
|
||||||
result.accHash = account
|
result.accHash = account
|
||||||
|
result.getClsFn = db.persistentStorageSlotsGetFn()
|
||||||
# At the moment, the resulting `getAccFn()` is independent of `accHash`
|
|
||||||
result.getAccFn = proc(accHash: Hash256, key: openArray[byte]): Blob =
|
|
||||||
db.get(key)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
@ -364,7 +358,7 @@ proc inspectStorageSlotsTrie*(
|
|||||||
var stats: TrieNodeStat
|
var stats: TrieNodeStat
|
||||||
noRlpExceptionOops("inspectStorageSlotsTrie()"):
|
noRlpExceptionOops("inspectStorageSlotsTrie()"):
|
||||||
if persistent:
|
if persistent:
|
||||||
stats = ps.getAccCls(ps.accHash).hexaryInspectTrie(ps.root, pathList)
|
stats = ps.getFn(ps.accHash).hexaryInspectTrie(ps.root, pathList)
|
||||||
else:
|
else:
|
||||||
stats = ps.hexaDb.hexaryInspectTrie(ps.root, pathList)
|
stats = ps.hexaDb.hexaryInspectTrie(ps.root, pathList)
|
||||||
|
|
||||||
@ -410,7 +404,7 @@ proc getStorageSlotsNodeKey*(
|
|||||||
var rc: Result[NodeKey,void]
|
var rc: Result[NodeKey,void]
|
||||||
noRlpExceptionOops("inspectAccountsPath()"):
|
noRlpExceptionOops("inspectAccountsPath()"):
|
||||||
if persistent:
|
if persistent:
|
||||||
rc = ps.getAccCls(ps.accHash).hexaryInspectPath(ps.root, path)
|
rc = ps.getFn(ps.accHash).hexaryInspectPath(ps.root, path)
|
||||||
else:
|
else:
|
||||||
rc = ps.hexaDb.hexaryInspectPath(ps.root, path)
|
rc = ps.hexaDb.hexaryInspectPath(ps.root, path)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
@ -443,7 +437,7 @@ proc getStorageSlotsData*(
|
|||||||
noRlpExceptionOops("getStorageSlotsData()"):
|
noRlpExceptionOops("getStorageSlotsData()"):
|
||||||
var leaf: Blob
|
var leaf: Blob
|
||||||
if persistent:
|
if persistent:
|
||||||
leaf = path.hexaryPath(ps.root, ps.getAccCls(ps.accHash)).leafData
|
leaf = path.hexaryPath(ps.root, ps.getFn(ps.accHash)).leafData
|
||||||
else:
|
else:
|
||||||
leaf = path.hexaryPath(ps.root.to(RepairKey),ps.hexaDb).leafData
|
leaf = path.hexaryPath(ps.root.to(RepairKey),ps.hexaDb).leafData
|
||||||
|
|
||||||
@ -475,7 +469,7 @@ proc haveStorageSlotsData*(
|
|||||||
## Caveat: There is no unit test yet
|
## Caveat: There is no unit test yet
|
||||||
noGenericExOrKeyError("haveStorageSlotsData()"):
|
noGenericExOrKeyError("haveStorageSlotsData()"):
|
||||||
if persistent:
|
if persistent:
|
||||||
let getFn = ps.getAccCls(ps.accHash)
|
let getFn = ps.getFn(ps.accHash)
|
||||||
return 0 < ps.root.ByteArray32.getFn().len
|
return 0 < ps.root.ByteArray32.getFn().len
|
||||||
else:
|
else:
|
||||||
return ps.hexaDb.tab.hasKey(ps.root.to(RepairKey))
|
return ps.hexaDb.tab.hasKey(ps.root.to(RepairKey))
|
||||||
|
@ -108,7 +108,7 @@ import
|
|||||||
std/sequtils,
|
std/sequtils,
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p, trie/nibbles, trie/trie_defs, rlp],
|
eth/[common, p2p, trie/nibbles, trie/trie_defs, rlp],
|
||||||
stew/[interval_set, keyed_queue],
|
stew/[interval_set, keyed_queue],
|
||||||
../../../utils/prettify,
|
../../../utils/prettify,
|
||||||
../../sync_desc,
|
../../sync_desc,
|
||||||
|
@ -18,7 +18,7 @@ import
|
|||||||
std/sequtils,
|
std/sequtils,
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p, trie/nibbles, trie/trie_defs, rlp],
|
eth/[common, p2p, trie/nibbles, trie/trie_defs, rlp],
|
||||||
stew/[interval_set, keyed_queue],
|
stew/[interval_set, keyed_queue],
|
||||||
../../../utils/prettify,
|
../../../utils/prettify,
|
||||||
../../sync_desc,
|
../../sync_desc,
|
||||||
|
@ -31,7 +31,7 @@
|
|||||||
import
|
import
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
stew/[interval_set, keyed_queue],
|
stew/[interval_set, keyed_queue],
|
||||||
stint,
|
stint,
|
||||||
../../sync_desc,
|
../../sync_desc,
|
||||||
|
@ -43,7 +43,7 @@
|
|||||||
import
|
import
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
stew/[interval_set, keyed_queue],
|
stew/[interval_set, keyed_queue],
|
||||||
stint,
|
stint,
|
||||||
../../sync_desc,
|
../../sync_desc,
|
||||||
|
@ -13,7 +13,7 @@ import
|
|||||||
std/[strformat, strutils, times],
|
std/[strformat, strutils, times],
|
||||||
chronos,
|
chronos,
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
stint,
|
stint,
|
||||||
../../../utils/prettify,
|
../../../utils/prettify,
|
||||||
../../misc/timer_helper
|
../../misc/timer_helper
|
||||||
|
@ -9,11 +9,11 @@
|
|||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[hashes, sequtils, strutils],
|
std/hashes,
|
||||||
eth/[common/eth_types, p2p],
|
eth/[common, p2p],
|
||||||
stew/[byteutils, interval_set, keyed_queue],
|
stew/[interval_set, keyed_queue],
|
||||||
"../.."/[constants, db/select_backend],
|
../../db/select_backend,
|
||||||
".."/[sync_desc, types],
|
../sync_desc,
|
||||||
./worker/[com/com_error, db/snapdb_desc, ticker],
|
./worker/[com/com_error, db/snapdb_desc, ticker],
|
||||||
./range_desc
|
./range_desc
|
||||||
|
|
||||||
@ -85,15 +85,7 @@ const
|
|||||||
## If set `true`, new peers will not change the pivot even if the
|
## If set `true`, new peers will not change the pivot even if the
|
||||||
## negotiated pivot would be newer. This should be the default.
|
## negotiated pivot would be newer. This should be the default.
|
||||||
|
|
||||||
# -------
|
|
||||||
|
|
||||||
seenBlocksMax = 500
|
|
||||||
## Internal size of LRU cache (for debugging)
|
|
||||||
|
|
||||||
type
|
type
|
||||||
WorkerSeenBlocks = KeyedQueue[NodeKey,BlockNumber]
|
|
||||||
## Temporary for pretty debugging, `BlockHash` keyed lru cache
|
|
||||||
|
|
||||||
SnapSlotsQueue* = KeyedQueue[NodeKey,SnapSlotQueueItemRef]
|
SnapSlotsQueue* = KeyedQueue[NodeKey,SnapSlotQueueItemRef]
|
||||||
## Handles list of storage slots data for fetch indexed by storage root.
|
## Handles list of storage slots data for fetch indexed by storage root.
|
||||||
##
|
##
|
||||||
@ -159,7 +151,6 @@ type
|
|||||||
|
|
||||||
CtxData* = object
|
CtxData* = object
|
||||||
## Globally shared data extension
|
## Globally shared data extension
|
||||||
seenBlock: WorkerSeenBlocks ## Temporary, debugging, pretty logs
|
|
||||||
rng*: ref HmacDrbgContext ## Random generator
|
rng*: ref HmacDrbgContext ## Random generator
|
||||||
dbBackend*: ChainDB ## Low level DB driver access (if any)
|
dbBackend*: ChainDB ## Low level DB driver access (if any)
|
||||||
pivotTable*: SnapPivotTable ## Per state root environment
|
pivotTable*: SnapPivotTable ## Per state root environment
|
||||||
@ -266,59 +257,6 @@ proc merge*(
|
|||||||
for w in reqList:
|
for w in reqList:
|
||||||
q.merge w
|
q.merge w
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Public functions, debugging helpers (will go away eventually)
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc pp*(ctx: SnapCtxRef; bh: BlockHash): string =
|
|
||||||
## Pretty printer for debugging
|
|
||||||
let rc = ctx.data.seenBlock.lruFetch(bh.Hash256.to(NodeKey))
|
|
||||||
if rc.isOk:
|
|
||||||
return "#" & $rc.value
|
|
||||||
"%" & $bh.to(Hash256).data.toHex
|
|
||||||
|
|
||||||
proc pp*(ctx: SnapCtxRef; bh: BlockHash; bn: BlockNumber): string =
|
|
||||||
## Pretty printer for debugging
|
|
||||||
let rc = ctx.data.seenBlock.lruFetch(bh.Hash256.to(NodeKey))
|
|
||||||
if rc.isOk:
|
|
||||||
return "#" & $rc.value
|
|
||||||
"#" & $ctx.data.seenBlock.lruAppend(bh.Hash256.to(NodeKey), bn, seenBlocksMax)
|
|
||||||
|
|
||||||
proc pp*(ctx: SnapCtxRef; bhn: HashOrNum): string =
|
|
||||||
if not bhn.isHash:
|
|
||||||
return "#" & $bhn.number
|
|
||||||
let rc = ctx.data.seenBlock.lruFetch(bhn.hash.to(NodeKey))
|
|
||||||
if rc.isOk:
|
|
||||||
return "%" & $rc.value
|
|
||||||
return "%" & $bhn.hash.data.toHex
|
|
||||||
|
|
||||||
proc seen*(ctx: SnapCtxRef; bh: BlockHash; bn: BlockNumber) =
|
|
||||||
## Register for pretty printing
|
|
||||||
if not ctx.data.seenBlock.lruFetch(bh.Hash256.to(NodeKey)).isOk:
|
|
||||||
discard ctx.data.seenBlock.lruAppend(
|
|
||||||
bh.Hash256.to(NodeKey), bn, seenBlocksMax)
|
|
||||||
|
|
||||||
proc pp*(a: MDigest[256]; collapse = true): string =
|
|
||||||
if not collapse:
|
|
||||||
a.data.mapIt(it.toHex(2)).join.toLowerAscii
|
|
||||||
elif a == EMPTY_ROOT_HASH:
|
|
||||||
"EMPTY_ROOT_HASH"
|
|
||||||
elif a == EMPTY_UNCLE_HASH:
|
|
||||||
"EMPTY_UNCLE_HASH"
|
|
||||||
elif a == EMPTY_SHA3:
|
|
||||||
"EMPTY_SHA3"
|
|
||||||
elif a == ZERO_HASH256:
|
|
||||||
"ZERO_HASH256"
|
|
||||||
else:
|
|
||||||
a.data.mapIt(it.toHex(2)).join[56 .. 63].toLowerAscii
|
|
||||||
|
|
||||||
proc pp*(bh: BlockHash): string =
|
|
||||||
"%" & bh.Hash256.pp
|
|
||||||
|
|
||||||
proc pp*(bn: BlockNumber): string =
|
|
||||||
if bn == high(BlockNumber): "#high"
|
|
||||||
else: "#" & $bn
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -62,7 +62,7 @@
|
|||||||
##
|
##
|
||||||
##
|
##
|
||||||
## Additional import files needed when using this template:
|
## Additional import files needed when using this template:
|
||||||
## * eth/[common/eth_types, p2p]
|
## * eth/[common, p2p]
|
||||||
## * chronicles
|
## * chronicles
|
||||||
## * chronos
|
## * chronos
|
||||||
## * stew/[interval_set, sorted_set],
|
## * stew/[interval_set, sorted_set],
|
||||||
@ -72,7 +72,7 @@
|
|||||||
import
|
import
|
||||||
std/hashes,
|
std/hashes,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common/eth_types, p2p, p2p/peer_pool, p2p/private/p2p_types],
|
eth/[common, p2p, p2p/peer_pool, p2p/private/p2p_types],
|
||||||
stew/keyed_queue,
|
stew/keyed_queue,
|
||||||
"."/[handlers, sync_desc]
|
"."/[handlers, sync_desc]
|
||||||
|
|
||||||
|
@ -15,7 +15,7 @@ import
|
|||||||
std/[algorithm, distros, hashes, math, os, sets,
|
std/[algorithm, distros, hashes, math, os, sets,
|
||||||
sequtils, strformat, strutils, tables, times],
|
sequtils, strformat, strutils, tables, times],
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/[common/eth_types, p2p, rlp, trie/db],
|
eth/[common, p2p, rlp, trie/db],
|
||||||
rocksdb,
|
rocksdb,
|
||||||
stint,
|
stint,
|
||||||
stew/[byteutils, results],
|
stew/[byteutils, results],
|
||||||
@ -442,6 +442,25 @@ proc storagesRunner(
|
|||||||
OkStoDb
|
OkStoDb
|
||||||
check dbDesc.importStorageSlots(w.data, persistent).toStoDbRc == expRc
|
check dbDesc.importStorageSlots(w.data, persistent).toStoDbRc == expRc
|
||||||
|
|
||||||
|
test &"Inspecting {storagesList.len} imported storages lists sub-tries":
|
||||||
|
let ignore = knownFailures.toTable
|
||||||
|
for n,w in storagesList:
|
||||||
|
let
|
||||||
|
testId = fileInfo & "#" & $n
|
||||||
|
errInx = if ignore.hasKey(testId): ignore[testId][0][0]
|
||||||
|
else: high(int)
|
||||||
|
for m in 0 ..< w.data.storages.len:
|
||||||
|
let
|
||||||
|
accHash = w.data.storages[m].account.accHash
|
||||||
|
root = w.data.storages[m].account.storageRoot
|
||||||
|
dbDesc = SnapDbStorageSlotsRef.init(dbBase, accHash, root, peer)
|
||||||
|
rc = dbDesc.inspectStorageSlotsTrie(persistent=persistent)
|
||||||
|
if m == errInx:
|
||||||
|
check rc == Result[TrieNodeStat,HexaryDbError].err(TrieIsEmpty)
|
||||||
|
else:
|
||||||
|
check rc.isOk # ok => level > 0 and not stopped
|
||||||
|
|
||||||
|
|
||||||
proc inspectionRunner(
|
proc inspectionRunner(
|
||||||
noisy = true;
|
noisy = true;
|
||||||
persistent = true;
|
persistent = true;
|
||||||
@ -1108,7 +1127,7 @@ when isMainModule:
|
|||||||
# value is mostly ignored but carried through.
|
# value is mostly ignored but carried through.
|
||||||
#
|
#
|
||||||
# * `Proof`: There is a list of hexary nodes which allow to build a partial
|
# * `Proof`: There is a list of hexary nodes which allow to build a partial
|
||||||
# Patricia-Mercle trie starting at the state root with all the account
|
# Patricia-Merkle trie starting at the state root with all the account
|
||||||
# leaves. There are enough nodes that show that there is no account before
|
# leaves. There are enough nodes that show that there is no account before
|
||||||
# the least account (which is currently ignored.)
|
# the least account (which is currently ignored.)
|
||||||
#
|
#
|
||||||
@ -1128,10 +1147,7 @@ when isMainModule:
|
|||||||
# * Load/accumulate accounts (needs some unique sorting)
|
# * Load/accumulate accounts (needs some unique sorting)
|
||||||
# * Build/complete hexary trie for accounts
|
# * Build/complete hexary trie for accounts
|
||||||
# * Save/bulk-store hexary trie on disk. If rocksdb is available, data
|
# * Save/bulk-store hexary trie on disk. If rocksdb is available, data
|
||||||
# are bulk stored via sst. An additional data set is stored in a table
|
# are bulk stored via sst.
|
||||||
# with key prefix 200 using transactional `put()` (for time comparison.)
|
|
||||||
# If there is no rocksdb, standard transactional `put()` is used, only
|
|
||||||
# (no key prefix 200 storage.)
|
|
||||||
#
|
#
|
||||||
# 3. Traverse trie nodes stored earlier. The accounts from test 2 are
|
# 3. Traverse trie nodes stored earlier. The accounts from test 2 are
|
||||||
# re-visted using the account hash as access path.
|
# re-visted using the account hash as access path.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user