2023-10-18 19:27:22 +00:00
|
|
|
# Nimbus
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-10-18 19:27:22 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except
|
|
|
|
# according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
2024-06-27 09:01:26 +00:00
|
|
|
std/[tables, hashes, sets, typetraits],
|
2023-10-25 14:03:09 +00:00
|
|
|
chronicles,
|
2023-10-18 19:27:22 +00:00
|
|
|
eth/[common, rlp],
|
|
|
|
results,
|
2024-06-27 09:01:26 +00:00
|
|
|
stew/keyed_queue,
|
2024-06-08 08:05:00 +00:00
|
|
|
../../stateless/multi_keys,
|
2024-05-20 10:17:51 +00:00
|
|
|
"../.."/[constants, utils/utils],
|
2023-10-18 19:27:22 +00:00
|
|
|
../access_list as ac_access_list,
|
2024-06-21 07:44:10 +00:00
|
|
|
../../evm/code_bytes,
|
2024-06-27 09:01:26 +00:00
|
|
|
".."/[core_db, storage_types, transient_storage]
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-06-21 07:44:10 +00:00
|
|
|
export code_bytes
|
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
const
|
|
|
|
debugAccountsLedgerRef = false
|
2024-06-21 07:44:10 +00:00
|
|
|
codeLruSize = 16*1024
|
|
|
|
# An LRU cache of 16K items gives roughly 90% hit rate anecdotally on a
|
|
|
|
# small range of test blocks - this number could be studied in more detail
|
|
|
|
# Per EIP-170, a the code of a contract can be up to `MAX_CODE_SIZE` = 24kb,
|
|
|
|
# which would cause a worst case of 386MB memory usage though in reality
|
|
|
|
# code sizes are much smaller - it would make sense to study these numbers
|
|
|
|
# in greater detail.
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
AccountFlag = enum
|
|
|
|
Alive
|
|
|
|
IsNew
|
|
|
|
Dirty
|
|
|
|
Touched
|
|
|
|
CodeChanged
|
|
|
|
StorageChanged
|
|
|
|
NewlyCreated # EIP-6780: self destruct only in same transaction
|
|
|
|
|
|
|
|
AccountFlags = set[AccountFlag]
|
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
AccountRef = ref object
|
|
|
|
statement: CoreDbAccount
|
2024-06-27 19:21:01 +00:00
|
|
|
accPath: Hash256
|
2023-10-18 19:27:22 +00:00
|
|
|
flags: AccountFlags
|
2024-06-21 07:44:10 +00:00
|
|
|
code: CodeBytesRef
|
2023-10-18 19:27:22 +00:00
|
|
|
originalStorage: TableRef[UInt256, UInt256]
|
|
|
|
overlayStorage: Table[UInt256, UInt256]
|
|
|
|
|
|
|
|
WitnessData* = object
|
|
|
|
storageKeys*: HashSet[UInt256]
|
|
|
|
codeTouched*: bool
|
|
|
|
|
|
|
|
AccountsLedgerRef* = ref object
|
2024-06-27 09:01:26 +00:00
|
|
|
ledger: CoreDbAccRef # AccountLedger
|
2024-06-19 14:13:12 +00:00
|
|
|
kvt: CoreDbKvtRef
|
2023-10-18 19:27:22 +00:00
|
|
|
savePoint: LedgerSavePoint
|
|
|
|
witnessCache: Table[EthAddress, WitnessData]
|
|
|
|
isDirty: bool
|
|
|
|
ripemdSpecial: bool
|
2024-06-25 05:30:32 +00:00
|
|
|
cache: Table[EthAddress, AccountRef]
|
|
|
|
# Second-level cache for the ledger save point, which is cleared on every
|
|
|
|
# persist
|
2024-06-21 07:44:10 +00:00
|
|
|
code: KeyedQueue[Hash256, CodeBytesRef]
|
|
|
|
## The code cache provides two main benefits:
|
|
|
|
##
|
|
|
|
## * duplicate code is shared in memory beween accounts
|
|
|
|
## * the jump destination table does not have to be recomputed for every
|
|
|
|
## execution, for commonly called called contracts
|
|
|
|
##
|
|
|
|
## The former feature is specially important in the 2.3-2.7M block range
|
|
|
|
## when underpriced code opcodes are being run en masse - both advantages
|
|
|
|
## help performance broadly as well.
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
ReadOnlyStateDB* = distinct AccountsLedgerRef
|
|
|
|
|
|
|
|
TransactionState = enum
|
|
|
|
Pending
|
|
|
|
Committed
|
|
|
|
RolledBack
|
|
|
|
|
|
|
|
LedgerSavePoint* = ref object
|
|
|
|
parentSavepoint: LedgerSavePoint
|
2024-02-02 20:23:04 +00:00
|
|
|
cache: Table[EthAddress, AccountRef]
|
2024-06-02 19:21:29 +00:00
|
|
|
dirty: Table[EthAddress, AccountRef]
|
2023-10-18 19:27:22 +00:00
|
|
|
selfDestruct: HashSet[EthAddress]
|
|
|
|
logEntries: seq[Log]
|
|
|
|
accessList: ac_access_list.AccessList
|
|
|
|
transientStorage: TransientStorage
|
|
|
|
state: TransactionState
|
|
|
|
when debugAccountsLedgerRef:
|
|
|
|
depth: int
|
|
|
|
|
|
|
|
const
|
2024-02-02 20:23:04 +00:00
|
|
|
emptyEthAccount = newAccount()
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
resetFlags = {
|
|
|
|
Dirty,
|
|
|
|
IsNew,
|
|
|
|
Touched,
|
|
|
|
CodeChanged,
|
|
|
|
StorageChanged,
|
|
|
|
NewlyCreated
|
|
|
|
}
|
|
|
|
|
|
|
|
when debugAccountsLedgerRef:
|
|
|
|
import
|
|
|
|
stew/byteutils
|
|
|
|
|
|
|
|
proc inspectSavePoint(name: string, x: LedgerSavePoint) =
|
|
|
|
debugEcho "*** ", name, ": ", x.depth, " ***"
|
|
|
|
var sp = x
|
|
|
|
while sp != nil:
|
|
|
|
for address, acc in sp.cache:
|
|
|
|
debugEcho address.toHex, " ", acc.flags
|
|
|
|
sp = sp.parentSavepoint
|
|
|
|
|
Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
template logTxt(info: static[string]): static[string] =
|
|
|
|
"AccountsLedgerRef " & info
|
|
|
|
|
2024-06-27 19:21:01 +00:00
|
|
|
template toAccountKey(acc: AccountRef): openArray[byte] =
|
|
|
|
acc.accPath.data.toOpenArray(0,31)
|
|
|
|
|
|
|
|
template toAccountKey(eAddr: EthAddress): openArray[byte] =
|
|
|
|
eAddr.keccakHash.data.toOpenArray(0,31)
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
|
2024-06-27 19:21:01 +00:00
|
|
|
proc beginSavepoint*(ac: AccountsLedgerRef): LedgerSavePoint {.gcsafe.}
|
|
|
|
|
|
|
|
proc resetCoreDbAccount(ac: AccountsLedgerRef, acc: AccountRef) =
|
2024-06-27 09:01:26 +00:00
|
|
|
const info = "resetCoreDbAccount(): "
|
2024-06-27 19:21:01 +00:00
|
|
|
ac.ledger.clearStorage(acc.toAccountKey).isOkOr:
|
2024-06-27 09:01:26 +00:00
|
|
|
raiseAssert info & $$error
|
2024-06-27 19:21:01 +00:00
|
|
|
acc.statement.nonce = emptyEthAccount.nonce
|
|
|
|
acc.statement.balance = emptyEthAccount.balance
|
|
|
|
acc.statement.codeHash = emptyEthAccount.codeHash
|
2024-06-02 19:21:29 +00:00
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
template noRlpException(info: static[string]; code: untyped) =
|
|
|
|
try:
|
|
|
|
code
|
|
|
|
except RlpError as e:
|
|
|
|
raiseAssert info & ", name=\"" & $e.name & "\", msg=\"" & e.msg & "\""
|
|
|
|
|
|
|
|
# The AccountsLedgerRef is modeled after TrieDatabase for it's transaction style
|
|
|
|
proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef,
|
2024-05-28 14:24:10 +00:00
|
|
|
root: KeccakHash): AccountsLedgerRef =
|
2024-06-27 09:01:26 +00:00
|
|
|
const info = "AccountsLedgerRef.init(): "
|
2023-10-18 19:27:22 +00:00
|
|
|
new result
|
2024-06-27 09:01:26 +00:00
|
|
|
result.ledger = db.ctx.getAccounts()
|
|
|
|
if root != EMPTY_ROOT_HASH:
|
|
|
|
let rc = result.ledger.state(updateOk=true)
|
|
|
|
if rc.isErr:
|
|
|
|
raiseAssert info & $$rc.error
|
|
|
|
if rc.value != root:
|
|
|
|
raiseAssert info & ": wrong account state"
|
2024-03-14 22:17:43 +00:00
|
|
|
result.kvt = db.newKvt() # save manually in `persist()`
|
2024-06-10 09:05:30 +00:00
|
|
|
result.witnessCache = Table[EthAddress, WitnessData]()
|
2023-10-18 19:27:22 +00:00
|
|
|
discard result.beginSavepoint
|
|
|
|
|
2024-06-16 03:21:02 +00:00
|
|
|
proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef): AccountsLedgerRef =
|
|
|
|
init(x, db, EMPTY_ROOT_HASH)
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-04-19 18:37:27 +00:00
|
|
|
# Renamed `rootHash()` => `state()`
|
|
|
|
proc state*(ac: AccountsLedgerRef): KeccakHash =
|
2024-06-27 09:01:26 +00:00
|
|
|
const info = "state(): "
|
2023-10-18 19:27:22 +00:00
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
|
|
|
# make sure all cache already committed
|
|
|
|
doAssert(ac.isDirty == false)
|
2024-06-27 09:01:26 +00:00
|
|
|
ac.ledger.state.valueOr:
|
|
|
|
raiseAssert info & $$error
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
proc isTopLevelClean*(ac: AccountsLedgerRef): bool =
|
|
|
|
## Getter, returns `true` if all pending data have been commited.
|
|
|
|
not ac.isDirty and ac.savePoint.parentSavepoint.isNil
|
|
|
|
|
|
|
|
proc beginSavepoint*(ac: AccountsLedgerRef): LedgerSavePoint =
|
|
|
|
new result
|
2024-06-10 09:05:30 +00:00
|
|
|
result.cache = Table[EthAddress, AccountRef]()
|
2023-10-18 19:27:22 +00:00
|
|
|
result.accessList.init()
|
|
|
|
result.transientStorage.init()
|
|
|
|
result.state = Pending
|
|
|
|
result.parentSavepoint = ac.savePoint
|
|
|
|
ac.savePoint = result
|
|
|
|
|
|
|
|
when debugAccountsLedgerRef:
|
|
|
|
if not result.parentSavePoint.isNil:
|
|
|
|
result.depth = result.parentSavePoint.depth + 1
|
|
|
|
inspectSavePoint("snapshot", result)
|
|
|
|
|
|
|
|
proc rollback*(ac: AccountsLedgerRef, sp: LedgerSavePoint) =
|
|
|
|
# Transactions should be handled in a strictly nested fashion.
|
|
|
|
# Any child transaction must be committed or rolled-back before
|
|
|
|
# its parent transactions:
|
|
|
|
doAssert ac.savePoint == sp and sp.state == Pending
|
|
|
|
ac.savePoint = sp.parentSavepoint
|
|
|
|
sp.state = RolledBack
|
|
|
|
|
|
|
|
when debugAccountsLedgerRef:
|
|
|
|
inspectSavePoint("rollback", ac.savePoint)
|
|
|
|
|
|
|
|
proc commit*(ac: AccountsLedgerRef, sp: LedgerSavePoint) =
|
|
|
|
# Transactions should be handled in a strictly nested fashion.
|
|
|
|
# Any child transaction must be committed or rolled-back before
|
|
|
|
# its parent transactions:
|
|
|
|
doAssert ac.savePoint == sp and sp.state == Pending
|
|
|
|
# cannot commit most inner savepoint
|
|
|
|
doAssert not sp.parentSavepoint.isNil
|
|
|
|
|
|
|
|
ac.savePoint = sp.parentSavepoint
|
|
|
|
for k, v in sp.cache:
|
|
|
|
sp.parentSavepoint.cache[k] = v
|
|
|
|
|
2024-06-02 19:21:29 +00:00
|
|
|
for k, v in sp.dirty:
|
|
|
|
sp.parentSavepoint.dirty[k] = v
|
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
ac.savePoint.transientStorage.merge(sp.transientStorage)
|
|
|
|
ac.savePoint.accessList.merge(sp.accessList)
|
|
|
|
ac.savePoint.selfDestruct.incl sp.selfDestruct
|
|
|
|
ac.savePoint.logEntries.add sp.logEntries
|
|
|
|
sp.state = Committed
|
|
|
|
|
|
|
|
when debugAccountsLedgerRef:
|
|
|
|
inspectSavePoint("commit", ac.savePoint)
|
|
|
|
|
|
|
|
proc dispose*(ac: AccountsLedgerRef, sp: LedgerSavePoint) =
|
|
|
|
if sp.state == Pending:
|
|
|
|
ac.rollback(sp)
|
|
|
|
|
|
|
|
proc safeDispose*(ac: AccountsLedgerRef, sp: LedgerSavePoint) =
|
|
|
|
if (not isNil(sp)) and (sp.state == Pending):
|
|
|
|
ac.rollback(sp)
|
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
proc getAccount(
|
|
|
|
ac: AccountsLedgerRef;
|
|
|
|
address: EthAddress;
|
|
|
|
shouldCreate = true;
|
|
|
|
): AccountRef =
|
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
# search account from layers of cache
|
|
|
|
var sp = ac.savePoint
|
|
|
|
while sp != nil:
|
|
|
|
result = sp.cache.getOrDefault(address)
|
|
|
|
if not result.isNil:
|
|
|
|
return
|
|
|
|
sp = sp.parentSavepoint
|
|
|
|
|
2024-06-25 05:30:32 +00:00
|
|
|
if ac.cache.pop(address, result):
|
|
|
|
# Check second-level cache
|
|
|
|
ac.savePoint.cache[address] = result
|
|
|
|
return
|
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
# not found in cache, look into state trie
|
2024-06-27 19:21:01 +00:00
|
|
|
let rc = ac.ledger.fetch address.toAccountKey
|
2023-10-18 19:27:22 +00:00
|
|
|
if rc.isOk:
|
2024-02-02 20:23:04 +00:00
|
|
|
result = AccountRef(
|
|
|
|
statement: rc.value,
|
2024-06-27 19:21:01 +00:00
|
|
|
accPath: address.keccakHash,
|
|
|
|
flags: {Alive})
|
2023-10-18 19:27:22 +00:00
|
|
|
elif shouldCreate:
|
2024-02-02 20:23:04 +00:00
|
|
|
result = AccountRef(
|
2024-06-27 19:21:01 +00:00
|
|
|
statement: CoreDbAccount(
|
|
|
|
nonce: emptyEthAccount.nonce,
|
|
|
|
balance: emptyEthAccount.balance,
|
|
|
|
codeHash: emptyEthAccount.codeHash),
|
|
|
|
accPath: address.keccakHash,
|
|
|
|
flags: {Alive, IsNew})
|
2023-10-18 19:27:22 +00:00
|
|
|
else:
|
|
|
|
return # ignore, don't cache
|
|
|
|
|
|
|
|
# cache the account
|
|
|
|
ac.savePoint.cache[address] = result
|
2024-06-02 19:21:29 +00:00
|
|
|
ac.savePoint.dirty[address] = result
|
2024-02-02 20:23:04 +00:00
|
|
|
|
|
|
|
proc clone(acc: AccountRef, cloneStorage: bool): AccountRef =
|
|
|
|
result = AccountRef(
|
|
|
|
statement: acc.statement,
|
2024-06-27 19:21:01 +00:00
|
|
|
accPath: acc.accPath,
|
2024-02-02 20:23:04 +00:00
|
|
|
flags: acc.flags,
|
|
|
|
code: acc.code)
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
if cloneStorage:
|
|
|
|
result.originalStorage = acc.originalStorage
|
|
|
|
# it's ok to clone a table this way
|
|
|
|
result.overlayStorage = acc.overlayStorage
|
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
proc isEmpty(acc: AccountRef): bool =
|
2024-06-02 19:21:29 +00:00
|
|
|
acc.statement.nonce == 0 and
|
2024-02-02 20:23:04 +00:00
|
|
|
acc.statement.balance.isZero and
|
2024-06-02 19:21:29 +00:00
|
|
|
acc.statement.codeHash == EMPTY_CODE_HASH
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
template exists(acc: AccountRef): bool =
|
2023-10-18 19:27:22 +00:00
|
|
|
Alive in acc.flags
|
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
proc originalStorageValue(
|
|
|
|
acc: AccountRef;
|
|
|
|
slot: UInt256;
|
|
|
|
ac: AccountsLedgerRef;
|
|
|
|
): UInt256 =
|
2023-10-18 19:27:22 +00:00
|
|
|
# share the same original storage between multiple
|
|
|
|
# versions of account
|
|
|
|
if acc.originalStorage.isNil:
|
|
|
|
acc.originalStorage = newTable[UInt256, UInt256]()
|
|
|
|
else:
|
|
|
|
acc.originalStorage[].withValue(slot, val) do:
|
|
|
|
return val[]
|
|
|
|
|
|
|
|
# Not in the original values cache - go to the DB.
|
2024-06-27 09:01:26 +00:00
|
|
|
let
|
|
|
|
slotKey = slot.toBytesBE.keccakHash.data
|
2024-06-27 19:21:01 +00:00
|
|
|
rc = ac.ledger.slotFetch(acc.toAccountKey, slotKey)
|
2023-10-18 19:27:22 +00:00
|
|
|
if rc.isOk and 0 < rc.value.len:
|
|
|
|
noRlpException "originalStorageValue()":
|
|
|
|
result = rlp.decode(rc.value, UInt256)
|
|
|
|
|
|
|
|
acc.originalStorage[slot] = result
|
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
proc storageValue(
|
|
|
|
acc: AccountRef;
|
|
|
|
slot: UInt256;
|
|
|
|
ac: AccountsLedgerRef;
|
|
|
|
): UInt256 =
|
2023-10-18 19:27:22 +00:00
|
|
|
acc.overlayStorage.withValue(slot, val) do:
|
|
|
|
return val[]
|
|
|
|
do:
|
|
|
|
result = acc.originalStorageValue(slot, ac)
|
|
|
|
|
2024-06-07 10:56:31 +00:00
|
|
|
proc kill(ac: AccountsLedgerRef, acc: AccountRef) =
|
2023-10-18 19:27:22 +00:00
|
|
|
acc.flags.excl Alive
|
|
|
|
acc.overlayStorage.clear()
|
|
|
|
acc.originalStorage = nil
|
2024-06-27 19:21:01 +00:00
|
|
|
ac.resetCoreDbAccount acc
|
2024-06-02 19:21:29 +00:00
|
|
|
acc.code.reset()
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
PersistMode = enum
|
|
|
|
DoNothing
|
|
|
|
Update
|
|
|
|
Remove
|
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
proc persistMode(acc: AccountRef): PersistMode =
|
2023-10-18 19:27:22 +00:00
|
|
|
result = DoNothing
|
|
|
|
if Alive in acc.flags:
|
|
|
|
if IsNew in acc.flags or Dirty in acc.flags:
|
|
|
|
result = Update
|
|
|
|
else:
|
|
|
|
if IsNew notin acc.flags:
|
|
|
|
result = Remove
|
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
proc persistCode(acc: AccountRef, ac: AccountsLedgerRef) =
|
2023-10-18 19:27:22 +00:00
|
|
|
if acc.code.len != 0:
|
2024-05-16 15:00:20 +00:00
|
|
|
let rc = ac.kvt.put(
|
2024-06-21 07:44:10 +00:00
|
|
|
contractHashKey(acc.statement.codeHash).toOpenArray, acc.code.bytes())
|
Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
if rc.isErr:
|
|
|
|
warn logTxt "persistCode()",
|
2024-02-02 20:23:04 +00:00
|
|
|
codeHash=acc.statement.codeHash, error=($$rc.error)
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-06-02 19:21:29 +00:00
|
|
|
proc persistStorage(acc: AccountRef, ac: AccountsLedgerRef) =
|
2024-06-27 09:01:26 +00:00
|
|
|
const info = "persistStorage(): "
|
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
if acc.overlayStorage.len == 0:
|
|
|
|
# TODO: remove the storage too if we figure out
|
|
|
|
# how to create 'virtual' storage room for each account
|
|
|
|
return
|
|
|
|
|
2024-06-02 19:21:29 +00:00
|
|
|
if acc.originalStorage.isNil:
|
2023-10-18 19:27:22 +00:00
|
|
|
acc.originalStorage = newTable[UInt256, UInt256]()
|
|
|
|
|
2024-06-27 19:21:01 +00:00
|
|
|
# Make sure that there is an account entry on the database. This is needed by
|
|
|
|
# `Aristo` for updating the account's storage area reference. As a side effect,
|
|
|
|
# this action also updates the latest statement data.
|
|
|
|
ac.ledger.merge(acc.toAccountKey, acc.statement).isOkOr:
|
2024-06-27 09:01:26 +00:00
|
|
|
raiseAssert info & $$error
|
2024-02-02 20:23:04 +00:00
|
|
|
|
|
|
|
# Save `overlayStorage[]` on database
|
2023-10-18 19:27:22 +00:00
|
|
|
for slot, value in acc.overlayStorage:
|
2024-06-27 09:01:26 +00:00
|
|
|
let slotKey = slot.toBytesBE.keccakHash.data
|
2023-10-18 19:27:22 +00:00
|
|
|
if value > 0:
|
|
|
|
let encodedValue = rlp.encode(value)
|
2024-06-27 19:21:01 +00:00
|
|
|
ac.ledger.slotMerge(acc.toAccountKey, slotKey, encodedValue).isOkOr:
|
|
|
|
raiseAssert info & $$error
|
2023-10-18 19:27:22 +00:00
|
|
|
else:
|
2024-06-27 19:21:01 +00:00
|
|
|
ac.ledger.slotDelete(acc.toAccountKey, slotKey).isOkOr:
|
2024-06-27 09:01:26 +00:00
|
|
|
if error.error != StoNotFound:
|
|
|
|
raiseAssert info & $$error
|
|
|
|
discard
|
Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
let
|
|
|
|
key = slot.toBytesBE.keccakHash.data.slotHashToSlotKey
|
|
|
|
rc = ac.kvt.put(key.toOpenArray, rlp.encode(slot))
|
|
|
|
if rc.isErr:
|
|
|
|
warn logTxt "persistStorage()", slot, error=($$rc.error)
|
|
|
|
|
2024-06-02 19:21:29 +00:00
|
|
|
# move the overlayStorage to originalStorage, related to EIP2200, EIP1283
|
|
|
|
for slot, value in acc.overlayStorage:
|
|
|
|
if value > 0:
|
|
|
|
acc.originalStorage[slot] = value
|
|
|
|
else:
|
|
|
|
acc.originalStorage.del(slot)
|
|
|
|
acc.overlayStorage.clear()
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-06-07 10:56:31 +00:00
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
proc makeDirty(ac: AccountsLedgerRef, address: EthAddress, cloneStorage = true): AccountRef =
|
2023-10-18 19:27:22 +00:00
|
|
|
ac.isDirty = true
|
|
|
|
result = ac.getAccount(address)
|
|
|
|
if address in ac.savePoint.cache:
|
|
|
|
# it's already in latest savepoint
|
|
|
|
result.flags.incl Dirty
|
2024-06-02 19:21:29 +00:00
|
|
|
ac.savePoint.dirty[address] = result
|
2023-10-18 19:27:22 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# put a copy into latest savepoint
|
|
|
|
result = result.clone(cloneStorage)
|
|
|
|
result.flags.incl Dirty
|
|
|
|
ac.savePoint.cache[address] = result
|
2024-06-02 19:21:29 +00:00
|
|
|
ac.savePoint.dirty[address] = result
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
proc getCodeHash*(ac: AccountsLedgerRef, address: EthAddress): Hash256 =
|
|
|
|
let acc = ac.getAccount(address, false)
|
2024-02-02 20:23:04 +00:00
|
|
|
if acc.isNil: emptyEthAccount.codeHash
|
|
|
|
else: acc.statement.codeHash
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
proc getBalance*(ac: AccountsLedgerRef, address: EthAddress): UInt256 =
|
|
|
|
let acc = ac.getAccount(address, false)
|
2024-02-02 20:23:04 +00:00
|
|
|
if acc.isNil: emptyEthAccount.balance
|
|
|
|
else: acc.statement.balance
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
proc getNonce*(ac: AccountsLedgerRef, address: EthAddress): AccountNonce =
|
|
|
|
let acc = ac.getAccount(address, false)
|
2024-02-02 20:23:04 +00:00
|
|
|
if acc.isNil: emptyEthAccount.nonce
|
|
|
|
else: acc.statement.nonce
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-06-21 07:44:10 +00:00
|
|
|
proc getCode*(ac: AccountsLedgerRef, address: EthAddress): CodeBytesRef =
|
|
|
|
# Always returns non-nil!
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return CodeBytesRef()
|
|
|
|
|
|
|
|
if acc.code == nil:
|
|
|
|
acc.code =
|
|
|
|
if acc.statement.codeHash != EMPTY_CODE_HASH:
|
|
|
|
ac.code.lruFetch(acc.statement.codeHash).valueOr:
|
|
|
|
var rc = ac.kvt.get(contractHashKey(acc.statement.codeHash).toOpenArray)
|
|
|
|
if rc.isErr:
|
|
|
|
warn logTxt "getCode()", codeHash=acc.statement.codeHash, error=($$rc.error)
|
|
|
|
CodeBytesRef()
|
|
|
|
else:
|
|
|
|
let newCode = CodeBytesRef.init(move(rc.value))
|
|
|
|
ac.code.lruAppend(acc.statement.codeHash, newCode, codeLruSize)
|
2024-06-02 19:21:29 +00:00
|
|
|
else:
|
2024-06-21 07:44:10 +00:00
|
|
|
CodeBytesRef()
|
2024-06-02 19:21:29 +00:00
|
|
|
|
|
|
|
acc.code
|
|
|
|
|
2024-06-21 07:44:10 +00:00
|
|
|
proc getCodeSize*(ac: AccountsLedgerRef, address: EthAddress): int =
|
2023-10-18 19:27:22 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
2024-06-21 07:44:10 +00:00
|
|
|
return 0
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-06-21 07:44:10 +00:00
|
|
|
if acc.code == nil:
|
|
|
|
if acc.statement.codeHash == EMPTY_CODE_HASH:
|
|
|
|
return 0
|
|
|
|
acc.code = ac.code.lruFetch(acc.statement.codeHash).valueOr:
|
|
|
|
# On a cache miss, we don't fetch the code - instead, we fetch just the
|
|
|
|
# length - should the code itself be needed, it will typically remain
|
|
|
|
# cached and easily accessible in the database layer - this is to prevent
|
|
|
|
# EXTCODESIZE calls from messing up the code cache and thus causing
|
|
|
|
# recomputation of the jump destination table
|
|
|
|
var rc = ac.kvt.len(contractHashKey(acc.statement.codeHash).toOpenArray)
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-06-21 07:44:10 +00:00
|
|
|
return rc.valueOr:
|
|
|
|
warn logTxt "getCodeSize()", codeHash=acc.statement.codeHash, error=($$rc.error)
|
|
|
|
0
|
2024-06-02 19:21:29 +00:00
|
|
|
|
2024-06-21 07:44:10 +00:00
|
|
|
acc.code.len()
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
proc getCommittedStorage*(ac: AccountsLedgerRef, address: EthAddress, slot: UInt256): UInt256 =
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
|
|
|
acc.originalStorageValue(slot, ac)
|
|
|
|
|
|
|
|
proc getStorage*(ac: AccountsLedgerRef, address: EthAddress, slot: UInt256): UInt256 =
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
|
|
|
acc.storageValue(slot, ac)
|
|
|
|
|
2024-04-16 02:31:10 +00:00
|
|
|
proc contractCollision*(ac: AccountsLedgerRef, address: EthAddress): bool =
|
2023-10-18 19:27:22 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
2024-04-16 02:26:07 +00:00
|
|
|
acc.statement.nonce != 0 or
|
2024-06-02 19:21:29 +00:00
|
|
|
acc.statement.codeHash != EMPTY_CODE_HASH or
|
2024-06-27 19:21:01 +00:00
|
|
|
not ac.ledger.slotStateEmptyOrVoid(acc.toAccountKey)
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
proc accountExists*(ac: AccountsLedgerRef, address: EthAddress): bool =
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
|
|
|
acc.exists()
|
|
|
|
|
|
|
|
proc isEmptyAccount*(ac: AccountsLedgerRef, address: EthAddress): bool =
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
doAssert not acc.isNil
|
|
|
|
doAssert acc.exists()
|
|
|
|
acc.isEmpty()
|
|
|
|
|
|
|
|
proc isDeadAccount*(ac: AccountsLedgerRef, address: EthAddress): bool =
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return true
|
|
|
|
if not acc.exists():
|
|
|
|
return true
|
|
|
|
acc.isEmpty()
|
|
|
|
|
|
|
|
proc setBalance*(ac: AccountsLedgerRef, address: EthAddress, balance: UInt256) =
|
|
|
|
let acc = ac.getAccount(address)
|
|
|
|
acc.flags.incl {Alive}
|
2024-02-02 20:23:04 +00:00
|
|
|
if acc.statement.balance != balance:
|
|
|
|
ac.makeDirty(address).statement.balance = balance
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
proc addBalance*(ac: AccountsLedgerRef, address: EthAddress, delta: UInt256) =
|
|
|
|
# EIP161: We must check emptiness for the objects such that the account
|
|
|
|
# clearing (0,0,0 objects) can take effect.
|
|
|
|
if delta.isZero:
|
|
|
|
let acc = ac.getAccount(address)
|
|
|
|
if acc.isEmpty:
|
|
|
|
ac.makeDirty(address).flags.incl Touched
|
|
|
|
return
|
|
|
|
ac.setBalance(address, ac.getBalance(address) + delta)
|
|
|
|
|
|
|
|
proc subBalance*(ac: AccountsLedgerRef, address: EthAddress, delta: UInt256) =
|
|
|
|
if delta.isZero:
|
|
|
|
# This zero delta early exit is important as shown in EIP-4788.
|
|
|
|
# If the account is created, it will change the state.
|
|
|
|
# But early exit will prevent the account creation.
|
2023-10-19 00:50:07 +00:00
|
|
|
# In this case, the SYSTEM_ADDRESS
|
2023-10-18 19:27:22 +00:00
|
|
|
return
|
|
|
|
ac.setBalance(address, ac.getBalance(address) - delta)
|
|
|
|
|
|
|
|
proc setNonce*(ac: AccountsLedgerRef, address: EthAddress, nonce: AccountNonce) =
|
|
|
|
let acc = ac.getAccount(address)
|
|
|
|
acc.flags.incl {Alive}
|
2024-02-02 20:23:04 +00:00
|
|
|
if acc.statement.nonce != nonce:
|
|
|
|
ac.makeDirty(address).statement.nonce = nonce
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
proc incNonce*(ac: AccountsLedgerRef, address: EthAddress) =
|
|
|
|
ac.setNonce(address, ac.getNonce(address) + 1)
|
|
|
|
|
|
|
|
proc setCode*(ac: AccountsLedgerRef, address: EthAddress, code: seq[byte]) =
|
|
|
|
let acc = ac.getAccount(address)
|
|
|
|
acc.flags.incl {Alive}
|
|
|
|
let codeHash = keccakHash(code)
|
2024-02-02 20:23:04 +00:00
|
|
|
if acc.statement.codeHash != codeHash:
|
2023-10-18 19:27:22 +00:00
|
|
|
var acc = ac.makeDirty(address)
|
2024-02-02 20:23:04 +00:00
|
|
|
acc.statement.codeHash = codeHash
|
2024-06-21 07:44:10 +00:00
|
|
|
# Try to reuse cache entry if it exists, but don't save the code - it's not
|
|
|
|
# a given that it will be executed within LRU range
|
|
|
|
acc.code = ac.code.lruFetch(codeHash).valueOr(CodeBytesRef.init(code))
|
2023-10-18 19:27:22 +00:00
|
|
|
acc.flags.incl CodeChanged
|
|
|
|
|
|
|
|
proc setStorage*(ac: AccountsLedgerRef, address: EthAddress, slot, value: UInt256) =
|
|
|
|
let acc = ac.getAccount(address)
|
|
|
|
acc.flags.incl {Alive}
|
|
|
|
let oldValue = acc.storageValue(slot, ac)
|
|
|
|
if oldValue != value:
|
|
|
|
var acc = ac.makeDirty(address)
|
|
|
|
acc.overlayStorage[slot] = value
|
|
|
|
acc.flags.incl StorageChanged
|
|
|
|
|
|
|
|
proc clearStorage*(ac: AccountsLedgerRef, address: EthAddress) =
|
2024-06-27 09:01:26 +00:00
|
|
|
const info = "clearStorage(): "
|
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
# a.k.a createStateObject. If there is an existing account with
|
|
|
|
# the given address, it is overwritten.
|
|
|
|
|
|
|
|
let acc = ac.getAccount(address)
|
|
|
|
acc.flags.incl {Alive, NewlyCreated}
|
2024-02-02 20:23:04 +00:00
|
|
|
|
2024-06-27 19:21:01 +00:00
|
|
|
let empty = ac.ledger.slotStateEmpty(acc.toAccountKey).valueOr: return
|
2024-06-17 13:29:07 +00:00
|
|
|
if not empty:
|
2024-06-07 10:56:31 +00:00
|
|
|
# need to clear the storage from the database first
|
2023-10-18 19:27:22 +00:00
|
|
|
let acc = ac.makeDirty(address, cloneStorage = false)
|
2024-06-27 19:21:01 +00:00
|
|
|
ac.ledger.clearStorage(acc.toAccountKey).isOkOr:
|
2024-06-27 09:01:26 +00:00
|
|
|
raiseAssert info & $$error
|
2024-06-07 10:56:31 +00:00
|
|
|
# update caches
|
2023-10-18 19:27:22 +00:00
|
|
|
if acc.originalStorage.isNil.not:
|
|
|
|
# also clear originalStorage cache, otherwise
|
|
|
|
# both getStorage and getCommittedStorage will
|
|
|
|
# return wrong value
|
|
|
|
acc.originalStorage.clear()
|
|
|
|
|
|
|
|
proc deleteAccount*(ac: AccountsLedgerRef, address: EthAddress) =
|
|
|
|
# make sure all savepoints already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
|
|
|
let acc = ac.getAccount(address)
|
2024-06-02 19:21:29 +00:00
|
|
|
ac.savePoint.dirty[address] = acc
|
2024-06-07 10:56:31 +00:00
|
|
|
ac.kill acc
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
proc selfDestruct*(ac: AccountsLedgerRef, address: EthAddress) =
|
|
|
|
ac.setBalance(address, 0.u256)
|
|
|
|
ac.savePoint.selfDestruct.incl address
|
|
|
|
|
|
|
|
proc selfDestruct6780*(ac: AccountsLedgerRef, address: EthAddress) =
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
|
|
|
|
|
|
|
if NewlyCreated in acc.flags:
|
|
|
|
ac.selfDestruct(address)
|
|
|
|
|
|
|
|
proc selfDestructLen*(ac: AccountsLedgerRef): int =
|
|
|
|
ac.savePoint.selfDestruct.len
|
|
|
|
|
|
|
|
proc addLogEntry*(ac: AccountsLedgerRef, log: Log) =
|
|
|
|
ac.savePoint.logEntries.add log
|
|
|
|
|
|
|
|
proc logEntries*(ac: AccountsLedgerRef): seq[Log] =
|
|
|
|
ac.savePoint.logEntries
|
|
|
|
|
|
|
|
proc getAndClearLogEntries*(ac: AccountsLedgerRef): seq[Log] =
|
|
|
|
result = ac.savePoint.logEntries
|
|
|
|
ac.savePoint.logEntries.setLen(0)
|
|
|
|
|
|
|
|
proc ripemdSpecial*(ac: AccountsLedgerRef) =
|
|
|
|
ac.ripemdSpecial = true
|
|
|
|
|
|
|
|
proc deleteEmptyAccount(ac: AccountsLedgerRef, address: EthAddress) =
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
|
|
|
if not acc.isEmpty:
|
|
|
|
return
|
|
|
|
if not acc.exists:
|
|
|
|
return
|
2024-06-02 19:21:29 +00:00
|
|
|
|
|
|
|
ac.savePoint.dirty[address] = acc
|
2024-06-07 10:56:31 +00:00
|
|
|
ac.kill acc
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
proc clearEmptyAccounts(ac: AccountsLedgerRef) =
|
2024-06-02 19:21:29 +00:00
|
|
|
# https://github.com/ethereum/EIPs/blob/master/EIPS/eip-161.md
|
|
|
|
for acc in ac.savePoint.dirty.values():
|
2023-10-18 19:27:22 +00:00
|
|
|
if Touched in acc.flags and
|
|
|
|
acc.isEmpty and acc.exists:
|
2024-06-07 10:56:31 +00:00
|
|
|
ac.kill acc
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
# https://github.com/ethereum/EIPs/issues/716
|
|
|
|
if ac.ripemdSpecial:
|
Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
ac.deleteEmptyAccount(RIPEMD_ADDR)
|
2023-10-18 19:27:22 +00:00
|
|
|
ac.ripemdSpecial = false
|
|
|
|
|
|
|
|
proc persist*(ac: AccountsLedgerRef,
|
2024-06-25 05:30:32 +00:00
|
|
|
clearEmptyAccount: bool = false,
|
|
|
|
clearCache = false) =
|
2024-06-27 09:01:26 +00:00
|
|
|
const info = "persist(): "
|
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
|
|
|
|
|
|
|
if clearEmptyAccount:
|
|
|
|
ac.clearEmptyAccounts()
|
|
|
|
|
|
|
|
for address in ac.savePoint.selfDestruct:
|
|
|
|
ac.deleteAccount(address)
|
|
|
|
|
2024-06-27 19:21:01 +00:00
|
|
|
for (eAddr,acc) in ac.savePoint.dirty.pairs(): # This is a hotspot in block processing
|
2023-10-18 19:27:22 +00:00
|
|
|
case acc.persistMode()
|
|
|
|
of Update:
|
|
|
|
if CodeChanged in acc.flags:
|
|
|
|
acc.persistCode(ac)
|
|
|
|
if StorageChanged in acc.flags:
|
2024-06-02 19:21:29 +00:00
|
|
|
acc.persistStorage(ac)
|
2024-06-27 19:21:01 +00:00
|
|
|
else:
|
|
|
|
# This one is only necessary unless `persistStorage()` is run which needs
|
|
|
|
# to `merge()` the latest statement as well.
|
|
|
|
ac.ledger.merge(acc.toAccountKey, acc.statement).isOkOr:
|
|
|
|
raiseAssert info & $$error
|
2023-10-18 19:27:22 +00:00
|
|
|
of Remove:
|
2024-06-27 19:21:01 +00:00
|
|
|
ac.ledger.delete(acc.toAccountKey).isOkOr:
|
2024-06-27 09:01:26 +00:00
|
|
|
if error.error != AccNotFound:
|
|
|
|
raiseAssert info & $$error
|
2024-06-27 19:21:01 +00:00
|
|
|
ac.savePoint.cache.del eAddr
|
2023-10-18 19:27:22 +00:00
|
|
|
of DoNothing:
|
|
|
|
# dead man tell no tales
|
|
|
|
# remove touched dead account from cache
|
2024-06-02 19:21:29 +00:00
|
|
|
if Alive notin acc.flags:
|
2024-06-27 19:21:01 +00:00
|
|
|
ac.savePoint.cache.del eAddr
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
acc.flags = acc.flags - resetFlags
|
2024-06-02 19:21:29 +00:00
|
|
|
ac.savePoint.dirty.clear()
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-06-25 05:30:32 +00:00
|
|
|
if clearCache:
|
|
|
|
# This overwrites the cache from the previous persist, providing a crude LRU
|
|
|
|
# scheme with little overhead
|
2024-06-26 07:25:09 +00:00
|
|
|
# TODO https://github.com/nim-lang/Nim/issues/23759
|
|
|
|
swap(ac.cache, ac.savePoint.cache)
|
|
|
|
ac.savePoint.cache.reset()
|
2024-06-25 05:30:32 +00:00
|
|
|
|
2023-10-18 19:27:22 +00:00
|
|
|
ac.savePoint.selfDestruct.clear()
|
|
|
|
|
|
|
|
# EIP2929
|
|
|
|
ac.savePoint.accessList.clear()
|
|
|
|
|
|
|
|
ac.isDirty = false
|
|
|
|
|
|
|
|
iterator addresses*(ac: AccountsLedgerRef): EthAddress =
|
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
|
|
|
for address, _ in ac.savePoint.cache:
|
|
|
|
yield address
|
|
|
|
|
|
|
|
iterator accounts*(ac: AccountsLedgerRef): Account =
|
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
2024-06-27 19:21:01 +00:00
|
|
|
for _, acc in ac.savePoint.cache:
|
|
|
|
yield ac.ledger.recast(
|
|
|
|
acc.toAccountKey, acc.statement, updateOk=true).value
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
iterator pairs*(ac: AccountsLedgerRef): (EthAddress, Account) =
|
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
2024-06-27 19:21:01 +00:00
|
|
|
for address, acc in ac.savePoint.cache:
|
|
|
|
yield (address, ac.ledger.recast(
|
|
|
|
acc.toAccountKey, acc.statement, updateOk=true).value)
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-06-27 09:01:26 +00:00
|
|
|
iterator storage*(
|
|
|
|
ac: AccountsLedgerRef;
|
|
|
|
eAddr: EthAddress;
|
|
|
|
): (UInt256, UInt256) =
|
2023-10-18 19:27:22 +00:00
|
|
|
# beware that if the account not persisted,
|
|
|
|
# the storage root will not be updated
|
2024-06-27 09:01:26 +00:00
|
|
|
noRlpException "storage()":
|
2024-06-27 19:21:01 +00:00
|
|
|
for (slotHash, value) in ac.ledger.slotPairs eAddr.toAccountKey:
|
2024-06-27 09:01:26 +00:00
|
|
|
let rc = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
|
|
|
|
if rc.isErr:
|
|
|
|
warn logTxt "storage()", slotHash, error=($$rc.error)
|
|
|
|
else:
|
|
|
|
yield (rlp.decode(rc.value, UInt256), rlp.decode(value, UInt256))
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
iterator cachedStorage*(ac: AccountsLedgerRef, address: EthAddress): (UInt256, UInt256) =
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if not acc.isNil:
|
|
|
|
if not acc.originalStorage.isNil:
|
|
|
|
for k, v in acc.originalStorage:
|
|
|
|
yield (k, v)
|
|
|
|
|
|
|
|
proc getStorageRoot*(ac: AccountsLedgerRef, address: EthAddress): Hash256 =
|
|
|
|
# beware that if the account not persisted,
|
|
|
|
# the storage root will not be updated
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil: EMPTY_ROOT_HASH
|
2024-06-27 19:21:01 +00:00
|
|
|
else: ac.ledger.slotState(acc.toAccountKey).valueOr: EMPTY_ROOT_HASH
|
2023-10-18 19:27:22 +00:00
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
proc update(wd: var WitnessData, acc: AccountRef) =
|
2024-01-24 17:18:45 +00:00
|
|
|
# once the code is touched make sure it doesn't get reset back to false in another update
|
|
|
|
if not wd.codeTouched:
|
2024-06-21 07:44:10 +00:00
|
|
|
wd.codeTouched = CodeChanged in acc.flags or acc.code != nil
|
2023-10-18 19:27:22 +00:00
|
|
|
|
|
|
|
if not acc.originalStorage.isNil:
|
|
|
|
for k, v in acc.originalStorage:
|
|
|
|
if v.isZero: continue
|
|
|
|
wd.storageKeys.incl k
|
|
|
|
|
|
|
|
for k, v in acc.overlayStorage:
|
|
|
|
wd.storageKeys.incl k
|
|
|
|
|
2024-02-02 20:23:04 +00:00
|
|
|
proc witnessData(acc: AccountRef): WitnessData =
|
2024-06-10 09:05:30 +00:00
|
|
|
result.storageKeys = HashSet[UInt256]()
|
2023-10-18 19:27:22 +00:00
|
|
|
update(result, acc)
|
|
|
|
|
|
|
|
proc collectWitnessData*(ac: AccountsLedgerRef) =
|
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
|
|
|
# usually witness data is collected before we call persist()
|
|
|
|
for address, acc in ac.savePoint.cache:
|
|
|
|
ac.witnessCache.withValue(address, val) do:
|
|
|
|
update(val[], acc)
|
|
|
|
do:
|
|
|
|
ac.witnessCache[address] = witnessData(acc)
|
|
|
|
|
2024-02-21 16:04:59 +00:00
|
|
|
func multiKeys(slots: HashSet[UInt256]): MultiKeysRef =
|
2023-10-18 19:27:22 +00:00
|
|
|
if slots.len == 0: return
|
|
|
|
new result
|
|
|
|
for x in slots:
|
|
|
|
result.add x.toBytesBE
|
|
|
|
result.sort()
|
|
|
|
|
2024-02-21 16:04:59 +00:00
|
|
|
proc makeMultiKeys*(ac: AccountsLedgerRef): MultiKeysRef =
|
2023-10-18 19:27:22 +00:00
|
|
|
# this proc is called after we done executing a block
|
|
|
|
new result
|
|
|
|
for k, v in ac.witnessCache:
|
|
|
|
result.add(k, v.codeTouched, multiKeys(v.storageKeys))
|
|
|
|
result.sort()
|
|
|
|
|
|
|
|
proc accessList*(ac: AccountsLedgerRef, address: EthAddress) =
|
|
|
|
ac.savePoint.accessList.add(address)
|
|
|
|
|
|
|
|
proc accessList*(ac: AccountsLedgerRef, address: EthAddress, slot: UInt256) =
|
|
|
|
ac.savePoint.accessList.add(address, slot)
|
|
|
|
|
|
|
|
func inAccessList*(ac: AccountsLedgerRef, address: EthAddress): bool =
|
|
|
|
var sp = ac.savePoint
|
|
|
|
while sp != nil:
|
|
|
|
result = sp.accessList.contains(address)
|
|
|
|
if result:
|
|
|
|
return
|
|
|
|
sp = sp.parentSavepoint
|
|
|
|
|
|
|
|
func inAccessList*(ac: AccountsLedgerRef, address: EthAddress, slot: UInt256): bool =
|
|
|
|
var sp = ac.savePoint
|
|
|
|
while sp != nil:
|
|
|
|
result = sp.accessList.contains(address, slot)
|
|
|
|
if result:
|
|
|
|
return
|
|
|
|
sp = sp.parentSavepoint
|
|
|
|
|
|
|
|
func getTransientStorage*(ac: AccountsLedgerRef,
|
|
|
|
address: EthAddress, slot: UInt256): UInt256 =
|
|
|
|
var sp = ac.savePoint
|
|
|
|
while sp != nil:
|
|
|
|
let (ok, res) = sp.transientStorage.getStorage(address, slot)
|
|
|
|
if ok:
|
|
|
|
return res
|
|
|
|
sp = sp.parentSavepoint
|
|
|
|
|
|
|
|
proc setTransientStorage*(ac: AccountsLedgerRef,
|
|
|
|
address: EthAddress, slot, val: UInt256) =
|
|
|
|
ac.savePoint.transientStorage.setStorage(address, slot, val)
|
|
|
|
|
|
|
|
proc clearTransientStorage*(ac: AccountsLedgerRef) =
|
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
|
|
|
ac.savePoint.transientStorage.clear()
|
|
|
|
|
2024-02-29 11:16:47 +00:00
|
|
|
func getAccessList*(ac: AccountsLedgerRef): common.AccessList =
|
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
|
|
|
ac.savePoint.accessList.getAccessList()
|
|
|
|
|
2024-06-16 03:21:02 +00:00
|
|
|
proc getEthAccount*(ac: AccountsLedgerRef, address: EthAddress): Account =
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return emptyEthAccount
|
|
|
|
|
|
|
|
## Convert to legacy object, will throw an assert if that fails
|
2024-06-27 19:21:01 +00:00
|
|
|
let rc = ac.ledger.recast(acc.toAccountKey, acc.statement)
|
2024-06-16 03:21:02 +00:00
|
|
|
if rc.isErr:
|
|
|
|
raiseAssert "getAccount(): cannot convert account: " & $$rc.error
|
|
|
|
rc.value
|
|
|
|
|
2024-04-19 18:37:27 +00:00
|
|
|
proc state*(db: ReadOnlyStateDB): KeccakHash {.borrow.}
|
2023-10-18 19:27:22 +00:00
|
|
|
proc getCodeHash*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
|
|
|
proc getStorageRoot*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
|
|
|
proc getBalance*(db: ReadOnlyStateDB, address: EthAddress): UInt256 {.borrow.}
|
|
|
|
proc getStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
|
|
|
proc getNonce*(db: ReadOnlyStateDB, address: EthAddress): AccountNonce {.borrow.}
|
2024-06-21 07:44:10 +00:00
|
|
|
proc getCode*(db: ReadOnlyStateDB, address: EthAddress): CodeBytesRef {.borrow.}
|
2023-10-18 19:27:22 +00:00
|
|
|
proc getCodeSize*(db: ReadOnlyStateDB, address: EthAddress): int {.borrow.}
|
2024-04-16 02:31:10 +00:00
|
|
|
proc contractCollision*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
2023-10-18 19:27:22 +00:00
|
|
|
proc accountExists*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
|
|
|
proc isDeadAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
|
|
|
proc isEmptyAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
|
|
|
proc getCommittedStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
|
|
|
func inAccessList*(ac: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
|
|
|
func inAccessList*(ac: ReadOnlyStateDB, address: EthAddress, slot: UInt256): bool {.borrow.}
|
|
|
|
func getTransientStorage*(ac: ReadOnlyStateDB,
|
|
|
|
address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|