mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-21 16:28:25 +00:00
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references why: Avoids copying in some cases * Fix copyright header * Aristo: Verify `leafTie.root` function argument for `merge()` proc why: Zero root will lead to inconsistent DB entry * Aristo: Update failure condition for hash labels compiler `hashify()` why: Node need not be rejected as long as links are on the schedule. In that case, `redo[]` is to become `wff.base[]` at a later stage. This amends an earlier fix, part of #1952 by also testing against the target nodes of the `wff.base[]` sets. * Aristo: Add storage root glue record to `hashify()` schedule why: An account leaf node might refer to a non-resolvable storage root ID. Storage root node chains will end up at the storage root. So the link `storage-root->account-leaf` needs an extra item in the schedule. * Aristo: fix error code returned by `fetchPayload()` details: Final error code is implied by the error code form the `hikeUp()` function. * CoreDb: Discard `createOk` argument in API `getRoot()` function why: Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is implemented where a stprage root node is created on-the-fly. * CoreDb: Prevent `$$` logging in some cases why: Logging the function `$$` is not useful when it is used for internal use, i.e. retrieving an an error text for logging. * CoreDb: Add `tryHashFn()` to API for pretty printing why: Pretty printing must not change the hashification status for the `Aristo` DB. So there is an independent API wrapper for getting the node hash which never updated the hashes. * CoreDb: Discard `update` argument in API `hash()` function why: When calling the API function `hash()`, the latest state is always wanted. For a version that uses the current state as-is without checking, the function `tryHash()` was added to the backend. * CoreDb: Update opaque vertex ID objects for the `Aristo` backend why: For `Aristo`, vID objects encapsulate a numeric `VertexID` referencing a vertex (rather than a node hash as used on the legacy backend.) For storage sub-tries, there might be no initial vertex known when the descriptor is created. So opaque vertex ID objects are supported without a valid `VertexID` which will be initalised on-the-fly when the first item is merged. * CoreDb: Add pretty printer for opaque vertex ID objects * Cosmetics, printing profiling data * CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor why: Missing initialisation error * CoreDb: Allow MPT to inherit shared context on `Aristo` backend why: Creates descriptors with different storage roots for the same shared `Aristo` DB descriptor. * Cosmetics, update diagnostic message items for `Aristo` backend * Fix Copyright year
187 lines
6.1 KiB
Nim
187 lines
6.1 KiB
Nim
# Nimbus
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
|
# Licensed under either of
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
# http://opensource.org/licenses/MIT)
|
|
# at your option. This file may not be copied, modified, or distributed except
|
|
# according to those terms.
|
|
|
|
# The point of this file is just to give a little more type-safety
|
|
# and clarity to our use of SecureHexaryTrie, by having distinct
|
|
# types for the big trie containing all the accounts and the little
|
|
# tries containing the storage for an individual account.
|
|
#
|
|
# It's nice to have all the accesses go through "getAccountBytes"
|
|
# rather than just "get" (which is hard to search for). Plus we
|
|
# may want to put in assertions to make sure that the nodes for
|
|
# the account are all present (in stateless mode), etc.
|
|
|
|
{.push raises: [].}
|
|
|
|
## Re-write of `distinct_tries.nim` to be imported into `accounts_ledger.nim`
|
|
## for using new database API.
|
|
##
|
|
|
|
import
|
|
std/[algorithm, sequtils, strutils, tables, typetraits],
|
|
chronicles,
|
|
eth/common,
|
|
results,
|
|
".."/[core_db, storage_types]
|
|
|
|
from ../aristo
|
|
import isValid
|
|
|
|
type
|
|
AccountLedger* = distinct CoreDxAccRef
|
|
StorageLedger* = distinct CoreDxPhkRef
|
|
SomeLedger* = AccountLedger | StorageLedger
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# Public debugging helpers
|
|
# ------------------------------------------------------------------------------
|
|
|
|
proc toSvp*(sl: StorageLedger): seq[(UInt256,UInt256)] =
|
|
## Dump as slot id-value pair sequence
|
|
let
|
|
db = sl.distinctBase.parent
|
|
save = db.trackNewApi
|
|
db.trackNewApi = false
|
|
defer: db.trackNewApi = save
|
|
let kvt = db.newKvt
|
|
var kvp: Table[UInt256,UInt256]
|
|
try:
|
|
for (slotHash,val) in sl.distinctBase.toMpt.pairs:
|
|
let rc = kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
|
|
if rc.isErr:
|
|
warn "StorageLedger.dump()", slotHash, error=($$rc.error)
|
|
else:
|
|
kvp[rlp.decode(rc.value,UInt256)] = rlp.decode(val,UInt256)
|
|
except CatchableError as e:
|
|
raiseAssert "Ooops(" & $e.name & "): " & e.msg
|
|
kvp.keys.toSeq.sorted.mapIt((it,kvp.getOrDefault(it,high UInt256)))
|
|
|
|
proc toStr*(w: seq[(UInt256,UInt256)]): string =
|
|
"[" & w.mapIt("(" & it[0].toHex & "," & it[1].toHex & ")").join(", ") & "]"
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# Public helpers
|
|
# ------------------------------------------------------------------------------
|
|
|
|
proc db*(t: SomeLedger): CoreDbRef =
|
|
t.distinctBase.parent
|
|
|
|
proc rootHash*(t: SomeLedger): Hash256 =
|
|
t.distinctBase.rootVid().hash().expect "SomeLedger/rootHash()"
|
|
|
|
proc rootVid*(t: SomeLedger): CoreDbVidRef =
|
|
t.distinctBase.rootVid
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# Public functions: accounts ledger
|
|
# ------------------------------------------------------------------------------
|
|
|
|
proc init*(
|
|
T: type AccountLedger;
|
|
db: CoreDbRef;
|
|
rootHash: Hash256;
|
|
isPruning = true;
|
|
): T =
|
|
const info = "AccountLedger/getRoot(): "
|
|
|
|
var vid = CoreDbVidRef(nil)
|
|
if rootHash.isValid:
|
|
let rc = db.getRoot(rootHash)
|
|
if rc.isErr:
|
|
raiseAssert info & $$rc.error
|
|
vid = rc.value
|
|
|
|
let acc = block:
|
|
let rc = db.newAccMpt(vid, isPruning, Shared)
|
|
if rc.isErr:
|
|
raiseAssert info & $$rc.error
|
|
rc.value
|
|
|
|
acc.T
|
|
|
|
proc init*(
|
|
T: type AccountLedger;
|
|
db: CoreDbRef;
|
|
isPruning = true;
|
|
): T =
|
|
db.newAccMpt(isPruning, Shared).AccountLedger
|
|
|
|
proc fetch*(al: AccountLedger; eAddr: EthAddress): Result[CoreDbAccount,void] =
|
|
## Using `fetch()` for trie data retrieval
|
|
al.distinctBase.fetch(eAddr).mapErr(proc(ign: CoreDbErrorRef) = discard)
|
|
|
|
proc merge*(al: AccountLedger; eAddr: EthAddress; account: CoreDbAccount) =
|
|
## Using `merge()` for trie data storage
|
|
al.distinctBase.merge(eAddr, account).expect "AccountLedger/merge()"
|
|
|
|
proc delete*(al: AccountLedger, eAddr: EthAddress) =
|
|
al.distinctBase.delete(eAddr).expect "AccountLedger/delete()"
|
|
|
|
proc persistent*(al: AccountLedger) =
|
|
al.distinctBase.persistent()
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# Public functions: storage ledger
|
|
# ------------------------------------------------------------------------------
|
|
|
|
proc init*(
|
|
T: type StorageLedger;
|
|
al: AccountLedger;
|
|
account: CoreDbAccount;
|
|
isPruning = false;
|
|
): T =
|
|
## Storage trie constructor.
|
|
##
|
|
## Note that the argument `isPruning` should be left `false` on the legacy
|
|
## `CoreDb` backend. Otherwise, pruning might kill some unwanted entries from
|
|
## storage tries ending up with an unstable database leading to crashes (see
|
|
## https://github.com/status-im/nimbus-eth1/issues/932.)
|
|
const
|
|
info = "StorageLedger/init(): "
|
|
let
|
|
vid = account.storageVid
|
|
mpt = block:
|
|
let rc = al.distinctBase.parent.newMpt(vid, isPruning, Shared)
|
|
if rc.isErr:
|
|
raiseAssert info & $$rc.error
|
|
rc.value
|
|
mpt.toPhk.T
|
|
|
|
#proc init*(T: type StorageLedger; db: CoreDbRef, isPruning = false): T =
|
|
# db.newMpt(CoreDbVidRef(nil), isPruning, Shared).toPhk.T
|
|
|
|
proc fetch*(sl: StorageLedger, slot: UInt256): Result[Blob,void] =
|
|
sl.distinctBase.fetch(slot.toBytesBE).mapErr proc(ign: CoreDbErrorRef)=discard
|
|
|
|
proc merge*(sl: StorageLedger, slot: UInt256, value: openArray[byte]) =
|
|
sl.distinctBase.merge(slot.toBytesBE, value).expect "StorageLedger/merge()"
|
|
|
|
proc delete*(sl: StorageLedger, slot: UInt256) =
|
|
sl.distinctBase.delete(slot.toBytesBE).expect "StorageLedger/delete()"
|
|
|
|
iterator storage*(
|
|
al: AccountLedger;
|
|
account: CoreDbAccount;
|
|
): (Blob,Blob)
|
|
{.gcsafe, raises: [CoreDbApiError].} =
|
|
## For given account, iterate over storage slots
|
|
const
|
|
info = "storage(): "
|
|
let
|
|
vid = account.storageVid
|
|
mpt = al.distinctBase.parent.newMpt(vid, saveMode=Shared).valueOr:
|
|
raiseAssert info & $$error
|
|
for (key,val) in mpt.pairs:
|
|
yield (key,val)
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# End
|
|
# ------------------------------------------------------------------------------
|