mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-24 19:19:21 +00:00
Ledger abstraction for accounts cache (#1824)
* Provide TDD/debug facility for inspecting `persistBlocks()` working detail: + Make sure that the last block of a test sample is the first batch item in `persistBlocks()`. + Additionally, allow `AccountsCache` API tracing by setting the flag `extraTraceMessages = true` in the file `accounts_cache.nim` * Overload AccountsCache by abstraction wrapper details: Can facilitate CoreDb API switch, details in `ledger/README.md`.
This commit is contained in:
parent
0472b75e23
commit
e8ad950e0a
@ -235,8 +235,7 @@ proc mptMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbMptFns =
|
||||
pairsIt: iterator: (Blob,Blob) {.gcsafe, raises: [LegacyApiRlpError].} =
|
||||
reraiseRlpException("legacy/mpt/pairs()"):
|
||||
for k,v in mpt.trie.pairs():
|
||||
yield (k,v)
|
||||
,
|
||||
yield (k,v),
|
||||
|
||||
replicateIt: iterator: (Blob,Blob) {.gcsafe, raises: [LegacyApiRlpError].} =
|
||||
reraiseRlpException("legacy/mpt/replicate()"):
|
||||
|
@ -8,6 +8,8 @@
|
||||
# may want to put in assertions to make sure that the nodes for
|
||||
# the account are all present (in stateless mode), etc.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/[common, trie/hexary],
|
||||
./core_db
|
||||
@ -19,14 +21,15 @@ type
|
||||
DistinctTrie* = AccountsTrie | StorageTrie
|
||||
|
||||
func toBase(t: DistinctTrie): CoreDbPhkRef =
|
||||
## Note that `CoreDbPhkRef` is a distinct variant of `CoreDxPhkRef`
|
||||
## Note that `CoreDbPhkRef` is a distinct variant of `CoreDxPhkRef` for
|
||||
## the legacy API.
|
||||
t.CoreDbPhkRef
|
||||
|
||||
# I don't understand why "borrow" doesn't work here. --Adam
|
||||
proc rootHash* (t: DistinctTrie): KeccakHash = t.toBase.rootHash()
|
||||
proc rootHashHex*(t: DistinctTrie): string = $t.toBase.rootHash()
|
||||
proc db* (t: DistinctTrie): DB = t.toBase.parent()
|
||||
proc isPruning* (t: DistinctTrie): bool = t.toBase.isPruning()
|
||||
proc rootHash* (t: DistinctTrie): KeccakHash = t.toBase.rootHash()
|
||||
proc rootHashHex*(t: DistinctTrie): string = $t.toBase.rootHash()
|
||||
proc db* (t: DistinctTrie): DB = t.toBase.parent()
|
||||
proc isPruning* (t: DistinctTrie): bool = t.toBase.isPruning()
|
||||
proc mpt* (t: DistinctTrie): CoreDbMptRef = t.toBase.toMpt()
|
||||
func phk* (t: DistinctTrie): CoreDbPhkRef = t.toBase
|
||||
|
||||
@ -40,7 +43,7 @@ template initAccountsTrie*(db: DB, isPruning = true): AccountsTrie =
|
||||
proc getAccountBytes*(trie: AccountsTrie, address: EthAddress): seq[byte] =
|
||||
CoreDbPhkRef(trie).get(address)
|
||||
|
||||
proc maybeGetAccountBytes*(trie: AccountsTrie, address: EthAddress): Option[seq[byte]] =
|
||||
proc maybeGetAccountBytes*(trie: AccountsTrie, address: EthAddress): Option[Blob] {.gcsafe, raises: [RlpError].} =
|
||||
let phk = CoreDbPhkRef(trie)
|
||||
if phk.parent.isLegacy:
|
||||
phk.backend.toLegacy.SecureHexaryTrie.maybeGet(address)
|
||||
@ -73,7 +76,7 @@ template createTrieKeyFromSlot*(slot: UInt256): auto =
|
||||
proc getSlotBytes*(trie: StorageTrie, slotAsKey: openArray[byte]): seq[byte] =
|
||||
CoreDbPhkRef(trie).get(slotAsKey)
|
||||
|
||||
proc maybeGetSlotBytes*(trie: StorageTrie, slotAsKey: openArray[byte]): Option[seq[byte]] =
|
||||
proc maybeGetSlotBytes*(trie: StorageTrie, slotAsKey: openArray[byte]): Option[Blob] {.gcsafe, raises: [RlpError].} =
|
||||
let phk = CoreDbPhkRef(trie)
|
||||
if phk.parent.isLegacy:
|
||||
phk.backend.toLegacy.SecureHexaryTrie.maybeGet(slotAsKey)
|
||||
|
118
nimbus/db/ledger.nim
Normal file
118
nimbus/db/ledger.nim
Normal file
@ -0,0 +1,118 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Unifies different ledger management APIs. All ledger objects are
|
||||
## derived from the base objects
|
||||
## ::
|
||||
## LedgerRef => AccountsCache, overloaded AccountsCache, etc.
|
||||
## LedgerSpRef => SavePoint, overloaded SavePoint etc
|
||||
##
|
||||
## In order to directly use `AccountsCache` it must be imported via
|
||||
## `import db/ledger/accounts_cache`. In this case, there is no `LedgerRef`.
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/common,
|
||||
./core_db,
|
||||
./ledger/base,
|
||||
./ledger/backend/[
|
||||
accounts_cache, accounts_cache_desc, accounts_ledger, accounts_ledger_desc]
|
||||
export
|
||||
AccountsCache,
|
||||
AccountsLedgerRef,
|
||||
base,
|
||||
init
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Note that there should be non-closure iterators here, at least for
|
||||
# `storage()`. With closures and the `accounts_cache.nim` driver as-is, all
|
||||
# unit tests and no-hive work OK apart from `TracerTests` which fails at block
|
||||
# 49018 due to mis-running of `storage()`.
|
||||
|
||||
iterator accounts*(ldg: LedgerRef): Account =
|
||||
case ldg.ldgType:
|
||||
of LegacyAccountsCache:
|
||||
for w in ldg.AccountsCache.accountsIt():
|
||||
yield w
|
||||
|
||||
of LedgerCache:
|
||||
for w in ldg.AccountsLedgerRef.accountsIt():
|
||||
yield w
|
||||
|
||||
else:
|
||||
raiseAssert: "Missing ledger type label"
|
||||
|
||||
|
||||
iterator addresses*(ldg: LedgerRef): EthAddress =
|
||||
case ldg.ldgType:
|
||||
of LegacyAccountsCache:
|
||||
for w in ldg.AccountsCache.addressesIt():
|
||||
yield w
|
||||
|
||||
of LedgerCache:
|
||||
for w in ldg.AccountsLedgerRef.addressesIt():
|
||||
yield w
|
||||
|
||||
else:
|
||||
raiseAssert: "Missing ledger type label"
|
||||
|
||||
|
||||
iterator cachedStorage*(ldg: LedgerRef, eAddr: EthAddress): (UInt256,UInt256) =
|
||||
case ldg.ldgType:
|
||||
of LegacyAccountsCache:
|
||||
for w in ldg.AccountsCache.cachedStorageIt(eAddr):
|
||||
yield w
|
||||
|
||||
of LedgerCache:
|
||||
for w in ldg.AccountsLedgerRef.cachedStorageIt(eAddr):
|
||||
yield w
|
||||
|
||||
else:
|
||||
raiseAssert: "Missing ledger type label"
|
||||
|
||||
|
||||
iterator pairs*(ldg: LedgerRef): (EthAddress,Account) =
|
||||
case ldg.ldgType:
|
||||
of LegacyAccountsCache:
|
||||
for w in ldg.AccountsCache.pairsIt():
|
||||
yield w
|
||||
|
||||
of LedgerCache:
|
||||
for w in ldg.AccountsLedgerRef.pairsIt():
|
||||
yield w
|
||||
|
||||
else:
|
||||
raiseAssert: "Missing ledger type label"
|
||||
|
||||
|
||||
iterator storage*(
|
||||
ldg: LedgerRef;
|
||||
eAddr: EthAddress;
|
||||
): (UInt256,UInt256)
|
||||
{.gcsafe, raises: [CoreDbApiError].} =
|
||||
case ldg.ldgType:
|
||||
of LegacyAccountsCache:
|
||||
for w in ldg.AccountsCache.storageIt(eAddr):
|
||||
yield w
|
||||
|
||||
of LedgerCache:
|
||||
for w in ldg.AccountsLedgerRef.storageIt(eAddr):
|
||||
yield w
|
||||
|
||||
else:
|
||||
raiseAssert: "Missing ledger type label"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
1
nimbus/db/ledger/,gitignore
Normal file
1
nimbus/db/ledger/,gitignore
Normal file
@ -0,0 +1 @@
|
||||
*.html
|
52
nimbus/db/ledger/README.md
Normal file
52
nimbus/db/ledger/README.md
Normal file
@ -0,0 +1,52 @@
|
||||
The file `accounts_cache.nim` has been relocated
|
||||
================================================
|
||||
|
||||
Background
|
||||
----------
|
||||
|
||||
The new *LedgerRef* module unifies different implementations of the
|
||||
*accounts_cache*. It is intended to be used as new base method for all of the
|
||||
*AccountsCache* implementations. Only constructors differ, depending on the
|
||||
implementation.
|
||||
|
||||
This was needed to accomodate for different *CoreDb* API paradigms. While the
|
||||
overloaded legacy *AccountsCache* implementation is just a closure based
|
||||
wrapper around the *accounts_cache* module, the overloaded *AccountsLedgerRef*
|
||||
is a closure based wrapper around the *accounts_ledger* module with the new
|
||||
*CoreDb* API returning *Result[]* values and saparating the meaning of trie
|
||||
root hash and trie root reference.
|
||||
|
||||
This allows to use the legacy hexary database (with the new *CoreDb* API) as
|
||||
well as the *Aristo* database (only supported on new API.)
|
||||
|
||||
Instructions
|
||||
------------
|
||||
|
||||
| **Legacy notation** | **LedgerRef replacement** | **Comment**
|
||||
|:-----------------------|:------------------------------|----------------------
|
||||
| | |
|
||||
| import accounts_cache | import ledger | preferred method,
|
||||
| AccountsCache.init(..) | AccountsCache.init(..) | wraps *AccountsCache*
|
||||
| | | methods
|
||||
| | *or* |
|
||||
| | |
|
||||
| | import ledger/accounts_cache | stay with legacy
|
||||
| | AccountsCache.init(..) | version of
|
||||
| | | *AccountsCache*
|
||||
| -- | |
|
||||
| fn(ac: AccountsCache) | fn(ac: LedgerRef) | function example for
|
||||
| | | preferred wrapper
|
||||
| | *or* | method
|
||||
| | |
|
||||
| | fn(ac: AccountsCache) | with legacy version,
|
||||
| | | no change here
|
||||
|
||||
|
||||
### The constructor decides which *CoreDb* API is to be used
|
||||
|
||||
| **Legacy API constructor** | **new API Constructor** |
|
||||
|:-------------------------------|:-----------------------------------|
|
||||
| | |
|
||||
| import ledger | import ledger |
|
||||
| let w = AccountsCache.init(..) | let w = AccountsLedgerRef.init(..) |
|
||||
| | |
|
755
nimbus/db/ledger/accounts_cache.nim
Normal file
755
nimbus/db/ledger/accounts_cache.nim
Normal file
@ -0,0 +1,755 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
import
|
||||
std/[tables, hashes, sets],
|
||||
eth/[common, rlp],
|
||||
../../../stateless/multi_keys,
|
||||
../../constants,
|
||||
../../utils/utils,
|
||||
../access_list as ac_access_list,
|
||||
".."/[core_db, distinct_tries, storage_types, transient_storage]
|
||||
|
||||
const
|
||||
debugAccountsCache = false
|
||||
|
||||
type
|
||||
AccountFlag = enum
|
||||
Alive
|
||||
IsNew
|
||||
Dirty
|
||||
Touched
|
||||
CodeLoaded
|
||||
CodeChanged
|
||||
StorageChanged
|
||||
NewlyCreated # EIP-6780: self destruct only in same transaction
|
||||
|
||||
AccountFlags = set[AccountFlag]
|
||||
|
||||
RefAccount = ref object
|
||||
account: Account
|
||||
flags: AccountFlags
|
||||
code: seq[byte]
|
||||
originalStorage: TableRef[UInt256, UInt256]
|
||||
overlayStorage: Table[UInt256, UInt256]
|
||||
|
||||
WitnessData* = object
|
||||
storageKeys*: HashSet[UInt256]
|
||||
codeTouched*: bool
|
||||
|
||||
AccountsCache* = ref object
|
||||
trie: AccountsTrie
|
||||
savePoint: SavePoint
|
||||
witnessCache: Table[EthAddress, WitnessData]
|
||||
isDirty: bool
|
||||
ripemdSpecial: bool
|
||||
|
||||
ReadOnlyStateDB* = distinct AccountsCache
|
||||
|
||||
TransactionState = enum
|
||||
Pending
|
||||
Committed
|
||||
RolledBack
|
||||
|
||||
SavePoint* = ref object
|
||||
parentSavepoint: SavePoint
|
||||
cache: Table[EthAddress, RefAccount]
|
||||
selfDestruct: HashSet[EthAddress]
|
||||
logEntries: seq[Log]
|
||||
accessList: ac_access_list.AccessList
|
||||
transientStorage: TransientStorage
|
||||
state: TransactionState
|
||||
when debugAccountsCache:
|
||||
depth: int
|
||||
|
||||
const
|
||||
emptyAcc = newAccount()
|
||||
|
||||
resetFlags = {
|
||||
Dirty,
|
||||
IsNew,
|
||||
Touched,
|
||||
CodeChanged,
|
||||
StorageChanged,
|
||||
NewlyCreated
|
||||
}
|
||||
|
||||
when debugAccountsCache:
|
||||
import
|
||||
stew/byteutils
|
||||
|
||||
proc inspectSavePoint(name: string, x: SavePoint) =
|
||||
debugEcho "*** ", name, ": ", x.depth, " ***"
|
||||
var sp = x
|
||||
while sp != nil:
|
||||
for address, acc in sp.cache:
|
||||
debugEcho address.toHex, " ", acc.flags
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
proc beginSavepoint*(ac: AccountsCache): SavePoint {.gcsafe.}
|
||||
|
||||
# FIXME-Adam: this is only necessary because of my sanity checks on the latest rootHash;
|
||||
# take this out once those are gone.
|
||||
proc rawTrie*(ac: AccountsCache): AccountsTrie = ac.trie
|
||||
|
||||
func db(ac: AccountsCache): CoreDbRef = ac.trie.db
|
||||
proc kvt(ac: AccountsCache): CoreDbKvtRef = ac.db.kvt
|
||||
|
||||
# The AccountsCache is modeled after TrieDatabase for it's transaction style
|
||||
proc init*(x: typedesc[AccountsCache], db: CoreDbRef,
|
||||
root: KeccakHash, pruneTrie = true): AccountsCache =
|
||||
new result
|
||||
result.trie = initAccountsTrie(db, root, pruneTrie)
|
||||
result.witnessCache = initTable[EthAddress, WitnessData]()
|
||||
discard result.beginSavepoint
|
||||
|
||||
proc init*(x: typedesc[AccountsCache], db: CoreDbRef, pruneTrie = true): AccountsCache =
|
||||
init(x, db, EMPTY_ROOT_HASH, pruneTrie)
|
||||
|
||||
proc rootHash*(ac: AccountsCache): KeccakHash =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
# make sure all cache already committed
|
||||
doAssert(ac.isDirty == false)
|
||||
ac.trie.rootHash
|
||||
|
||||
proc isTopLevelClean*(ac: AccountsCache): bool =
|
||||
## Getter, returns `true` if all pending data have been commited.
|
||||
not ac.isDirty and ac.savePoint.parentSavepoint.isNil
|
||||
|
||||
proc beginSavepoint*(ac: AccountsCache): SavePoint =
|
||||
new result
|
||||
result.cache = initTable[EthAddress, RefAccount]()
|
||||
result.accessList.init()
|
||||
result.transientStorage.init()
|
||||
result.state = Pending
|
||||
result.parentSavepoint = ac.savePoint
|
||||
ac.savePoint = result
|
||||
|
||||
when debugAccountsCache:
|
||||
if not result.parentSavePoint.isNil:
|
||||
result.depth = result.parentSavePoint.depth + 1
|
||||
inspectSavePoint("snapshot", result)
|
||||
|
||||
proc rollback*(ac: AccountsCache, sp: SavePoint) =
|
||||
# Transactions should be handled in a strictly nested fashion.
|
||||
# Any child transaction must be committed or rolled-back before
|
||||
# its parent transactions:
|
||||
doAssert ac.savePoint == sp and sp.state == Pending
|
||||
ac.savePoint = sp.parentSavepoint
|
||||
sp.state = RolledBack
|
||||
|
||||
when debugAccountsCache:
|
||||
inspectSavePoint("rollback", ac.savePoint)
|
||||
|
||||
proc commit*(ac: AccountsCache, sp: SavePoint) =
|
||||
# Transactions should be handled in a strictly nested fashion.
|
||||
# Any child transaction must be committed or rolled-back before
|
||||
# its parent transactions:
|
||||
doAssert ac.savePoint == sp and sp.state == Pending
|
||||
# cannot commit most inner savepoint
|
||||
doAssert not sp.parentSavepoint.isNil
|
||||
|
||||
ac.savePoint = sp.parentSavepoint
|
||||
for k, v in sp.cache:
|
||||
sp.parentSavepoint.cache[k] = v
|
||||
|
||||
ac.savePoint.transientStorage.merge(sp.transientStorage)
|
||||
ac.savePoint.accessList.merge(sp.accessList)
|
||||
ac.savePoint.selfDestruct.incl sp.selfDestruct
|
||||
ac.savePoint.logEntries.add sp.logEntries
|
||||
sp.state = Committed
|
||||
|
||||
when debugAccountsCache:
|
||||
inspectSavePoint("commit", ac.savePoint)
|
||||
|
||||
proc dispose*(ac: AccountsCache, sp: SavePoint) {.inline.} =
|
||||
if sp.state == Pending:
|
||||
ac.rollback(sp)
|
||||
|
||||
proc safeDispose*(ac: AccountsCache, sp: SavePoint) {.inline.} =
|
||||
if (not isNil(sp)) and (sp.state == Pending):
|
||||
ac.rollback(sp)
|
||||
|
||||
proc getAccount(ac: AccountsCache, address: EthAddress, shouldCreate = true): RefAccount =
|
||||
# search account from layers of cache
|
||||
var sp = ac.savePoint
|
||||
while sp != nil:
|
||||
result = sp.cache.getOrDefault(address)
|
||||
if not result.isNil:
|
||||
return
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
# not found in cache, look into state trie
|
||||
let recordFound =
|
||||
try:
|
||||
ac.trie.getAccountBytes(address)
|
||||
except RlpError:
|
||||
raiseAssert("No RlpError should occur on trie access for an address")
|
||||
if recordFound.len > 0:
|
||||
# we found it
|
||||
try:
|
||||
result = RefAccount(
|
||||
account: rlp.decode(recordFound, Account),
|
||||
flags: {Alive}
|
||||
)
|
||||
except RlpError:
|
||||
raiseAssert("No RlpError should occur on decoding account from trie")
|
||||
else:
|
||||
if not shouldCreate:
|
||||
return
|
||||
# it's a request for new account
|
||||
result = RefAccount(
|
||||
account: newAccount(),
|
||||
flags: {Alive, IsNew}
|
||||
)
|
||||
|
||||
# cache the account
|
||||
ac.savePoint.cache[address] = result
|
||||
|
||||
proc clone(acc: RefAccount, cloneStorage: bool): RefAccount =
|
||||
new(result)
|
||||
result.account = acc.account
|
||||
result.flags = acc.flags
|
||||
result.code = acc.code
|
||||
|
||||
if cloneStorage:
|
||||
result.originalStorage = acc.originalStorage
|
||||
# it's ok to clone a table this way
|
||||
result.overlayStorage = acc.overlayStorage
|
||||
|
||||
proc isEmpty(acc: RefAccount): bool =
|
||||
result = acc.account.codeHash == EMPTY_SHA3 and
|
||||
acc.account.balance.isZero and
|
||||
acc.account.nonce == 0
|
||||
|
||||
template exists(acc: RefAccount): bool =
|
||||
Alive in acc.flags
|
||||
|
||||
template createTrieKeyFromSlot(slot: UInt256): auto =
|
||||
# XXX: This is too expensive. Similar to `createRangeFromAddress`
|
||||
# Converts a number to hex big-endian representation including
|
||||
# prefix and leading zeros:
|
||||
slot.toBytesBE
|
||||
# Original py-evm code:
|
||||
# pad32(int_to_big_endian(slot))
|
||||
# morally equivalent to toByteRange_Unnecessary but with different types
|
||||
|
||||
template getStorageTrie(db: CoreDbRef, acc: RefAccount): auto =
|
||||
# TODO: implement `prefix-db` to solve issue #228 permanently.
|
||||
# the `prefix-db` will automatically insert account address to the
|
||||
# underlying-db key without disturb how the trie works.
|
||||
# it will create virtual container for each account.
|
||||
# see nim-eth#9
|
||||
initStorageTrie(db, acc.account.storageRoot, false)
|
||||
|
||||
proc originalStorageValue(acc: RefAccount, slot: UInt256, db: CoreDbRef): UInt256 =
|
||||
# share the same original storage between multiple
|
||||
# versions of account
|
||||
if acc.originalStorage.isNil:
|
||||
acc.originalStorage = newTable[UInt256, UInt256]()
|
||||
else:
|
||||
acc.originalStorage[].withValue(slot, val) do:
|
||||
return val[]
|
||||
|
||||
# Not in the original values cache - go to the DB.
|
||||
let
|
||||
slotAsKey = createTrieKeyFromSlot slot
|
||||
storageTrie = getStorageTrie(db, acc)
|
||||
foundRecord = storageTrie.getSlotBytes(slotAsKey)
|
||||
|
||||
result = if foundRecord.len > 0:
|
||||
rlp.decode(foundRecord, UInt256)
|
||||
else:
|
||||
UInt256.zero()
|
||||
|
||||
acc.originalStorage[slot] = result
|
||||
|
||||
proc storageValue(acc: RefAccount, slot: UInt256, db: CoreDbRef): UInt256 =
|
||||
acc.overlayStorage.withValue(slot, val) do:
|
||||
return val[]
|
||||
do:
|
||||
result = acc.originalStorageValue(slot, db)
|
||||
|
||||
proc kill(acc: RefAccount) =
|
||||
acc.flags.excl Alive
|
||||
acc.overlayStorage.clear()
|
||||
acc.originalStorage = nil
|
||||
acc.account = newAccount()
|
||||
acc.code = default(seq[byte])
|
||||
|
||||
type
|
||||
PersistMode = enum
|
||||
DoNothing
|
||||
Update
|
||||
Remove
|
||||
|
||||
proc persistMode(acc: RefAccount): PersistMode =
|
||||
result = DoNothing
|
||||
if Alive in acc.flags:
|
||||
if IsNew in acc.flags or Dirty in acc.flags:
|
||||
result = Update
|
||||
else:
|
||||
if IsNew notin acc.flags:
|
||||
result = Remove
|
||||
|
||||
proc persistCode(acc: RefAccount, db: CoreDbRef) =
|
||||
if acc.code.len != 0:
|
||||
when defined(geth):
|
||||
db.kvt.put(acc.account.codeHash.data, acc.code)
|
||||
else:
|
||||
db.kvt.put(contractHashKey(acc.account.codeHash).toOpenArray, acc.code)
|
||||
|
||||
proc persistStorage(acc: RefAccount, db: CoreDbRef, clearCache: bool) =
|
||||
if acc.overlayStorage.len == 0:
|
||||
# TODO: remove the storage too if we figure out
|
||||
# how to create 'virtual' storage room for each account
|
||||
return
|
||||
|
||||
if not clearCache and acc.originalStorage.isNil:
|
||||
acc.originalStorage = newTable[UInt256, UInt256]()
|
||||
|
||||
db.compensateLegacySetup()
|
||||
var storageTrie = getStorageTrie(db, acc)
|
||||
|
||||
for slot, value in acc.overlayStorage:
|
||||
let slotAsKey = createTrieKeyFromSlot slot
|
||||
|
||||
if value > 0:
|
||||
let encodedValue = rlp.encode(value)
|
||||
storageTrie.putSlotBytes(slotAsKey, encodedValue)
|
||||
else:
|
||||
storageTrie.delSlotBytes(slotAsKey)
|
||||
|
||||
# TODO: this can be disabled if we do not perform
|
||||
# accounts tracing
|
||||
# map slothash back to slot value
|
||||
# see iterator storage below
|
||||
# slotHash can be obtained from storageTrie.putSlotBytes?
|
||||
let slotHash = keccakHash(slotAsKey)
|
||||
db.kvt.put(slotHashToSlotKey(slotHash.data).toOpenArray, rlp.encode(slot))
|
||||
|
||||
if not clearCache:
|
||||
# if we preserve cache, move the overlayStorage
|
||||
# to originalStorage, related to EIP2200, EIP1283
|
||||
for slot, value in acc.overlayStorage:
|
||||
if value > 0:
|
||||
acc.originalStorage[slot] = value
|
||||
else:
|
||||
acc.originalStorage.del(slot)
|
||||
acc.overlayStorage.clear()
|
||||
|
||||
acc.account.storageRoot = storageTrie.rootHash
|
||||
|
||||
proc makeDirty(ac: AccountsCache, address: EthAddress, cloneStorage = true): RefAccount =
|
||||
ac.isDirty = true
|
||||
result = ac.getAccount(address)
|
||||
if address in ac.savePoint.cache:
|
||||
# it's already in latest savepoint
|
||||
result.flags.incl Dirty
|
||||
return
|
||||
|
||||
# put a copy into latest savepoint
|
||||
result = result.clone(cloneStorage)
|
||||
result.flags.incl Dirty
|
||||
ac.savePoint.cache[address] = result
|
||||
|
||||
proc getCodeHash*(ac: AccountsCache, address: EthAddress): Hash256 {.inline.} =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil: emptyAcc.codeHash
|
||||
else: acc.account.codeHash
|
||||
|
||||
proc getBalance*(ac: AccountsCache, address: EthAddress): UInt256 {.inline.} =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil: emptyAcc.balance
|
||||
else: acc.account.balance
|
||||
|
||||
proc getNonce*(ac: AccountsCache, address: EthAddress): AccountNonce {.inline.} =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil: emptyAcc.nonce
|
||||
else: acc.account.nonce
|
||||
|
||||
proc getCode*(ac: AccountsCache, address: EthAddress): seq[byte] =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
|
||||
if CodeLoaded in acc.flags or CodeChanged in acc.flags:
|
||||
result = acc.code
|
||||
else:
|
||||
when defined(geth):
|
||||
let data = ac.kvt.get(acc.account.codeHash.data)
|
||||
else:
|
||||
let data = ac.kvt.get(contractHashKey(acc.account.codeHash).toOpenArray)
|
||||
|
||||
acc.code = data
|
||||
acc.flags.incl CodeLoaded
|
||||
result = acc.code
|
||||
|
||||
proc getCodeSize*(ac: AccountsCache, address: EthAddress): int {.inline.} =
|
||||
ac.getCode(address).len
|
||||
|
||||
proc getCommittedStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256): UInt256 {.inline.} =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
acc.originalStorageValue(slot, ac.db)
|
||||
|
||||
proc getStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256): UInt256 {.inline.} =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
acc.storageValue(slot, ac.db)
|
||||
|
||||
proc hasCodeOrNonce*(ac: AccountsCache, address: EthAddress): bool {.inline.} =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
acc.account.nonce != 0 or acc.account.codeHash != EMPTY_SHA3
|
||||
|
||||
proc accountExists*(ac: AccountsCache, address: EthAddress): bool {.inline.} =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
acc.exists()
|
||||
|
||||
proc isEmptyAccount*(ac: AccountsCache, address: EthAddress): bool {.inline.} =
|
||||
let acc = ac.getAccount(address, false)
|
||||
doAssert not acc.isNil
|
||||
doAssert acc.exists()
|
||||
acc.isEmpty()
|
||||
|
||||
proc isDeadAccount*(ac: AccountsCache, address: EthAddress): bool =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return true
|
||||
if not acc.exists():
|
||||
return true
|
||||
acc.isEmpty()
|
||||
|
||||
proc setBalance*(ac: AccountsCache, address: EthAddress, balance: UInt256) =
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive}
|
||||
if acc.account.balance != balance:
|
||||
ac.makeDirty(address).account.balance = balance
|
||||
|
||||
proc addBalance*(ac: AccountsCache, address: EthAddress, delta: UInt256) {.inline.} =
|
||||
# EIP161: We must check emptiness for the objects such that the account
|
||||
# clearing (0,0,0 objects) can take effect.
|
||||
if delta.isZero:
|
||||
let acc = ac.getAccount(address)
|
||||
if acc.isEmpty:
|
||||
ac.makeDirty(address).flags.incl Touched
|
||||
return
|
||||
ac.setBalance(address, ac.getBalance(address) + delta)
|
||||
|
||||
proc subBalance*(ac: AccountsCache, address: EthAddress, delta: UInt256) {.inline.} =
|
||||
if delta.isZero:
|
||||
# This zero delta early exit is important as shown in EIP-4788.
|
||||
# If the account is created, it will change the state.
|
||||
# But early exit will prevent the account creation.
|
||||
# In this case, the SystemAddress
|
||||
return
|
||||
ac.setBalance(address, ac.getBalance(address) - delta)
|
||||
|
||||
proc setNonce*(ac: AccountsCache, address: EthAddress, nonce: AccountNonce) =
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive}
|
||||
if acc.account.nonce != nonce:
|
||||
ac.makeDirty(address).account.nonce = nonce
|
||||
|
||||
proc incNonce*(ac: AccountsCache, address: EthAddress) {.inline.} =
|
||||
ac.setNonce(address, ac.getNonce(address) + 1)
|
||||
|
||||
proc setCode*(ac: AccountsCache, address: EthAddress, code: seq[byte]) =
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive}
|
||||
let codeHash = keccakHash(code)
|
||||
if acc.account.codeHash != codeHash:
|
||||
var acc = ac.makeDirty(address)
|
||||
acc.account.codeHash = codeHash
|
||||
acc.code = code
|
||||
acc.flags.incl CodeChanged
|
||||
|
||||
proc setStorage*(ac: AccountsCache, address: EthAddress, slot, value: UInt256) =
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive}
|
||||
let oldValue = acc.storageValue(slot, ac.db)
|
||||
if oldValue != value:
|
||||
var acc = ac.makeDirty(address)
|
||||
acc.overlayStorage[slot] = value
|
||||
acc.flags.incl StorageChanged
|
||||
|
||||
proc clearStorage*(ac: AccountsCache, address: EthAddress) =
|
||||
# a.k.a createStateObject. If there is an existing account with
|
||||
# the given address, it is overwritten.
|
||||
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive, NewlyCreated}
|
||||
if acc.account.storageRoot != EMPTY_ROOT_HASH:
|
||||
# there is no point to clone the storage since we want to remove it
|
||||
let acc = ac.makeDirty(address, cloneStorage = false)
|
||||
acc.account.storageRoot = EMPTY_ROOT_HASH
|
||||
if acc.originalStorage.isNil.not:
|
||||
# also clear originalStorage cache, otherwise
|
||||
# both getStorage and getCommittedStorage will
|
||||
# return wrong value
|
||||
acc.originalStorage.clear()
|
||||
|
||||
proc deleteAccount*(ac: AccountsCache, address: EthAddress) =
|
||||
# make sure all savepoints already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
let acc = ac.getAccount(address)
|
||||
acc.kill()
|
||||
|
||||
proc selfDestruct*(ac: AccountsCache, address: EthAddress) =
|
||||
ac.setBalance(address, 0.u256)
|
||||
ac.savePoint.selfDestruct.incl address
|
||||
|
||||
proc selfDestruct6780*(ac: AccountsCache, address: EthAddress) =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
|
||||
if NewlyCreated in acc.flags:
|
||||
ac.selfDestruct(address)
|
||||
|
||||
proc selfDestructLen*(ac: AccountsCache): int =
|
||||
ac.savePoint.selfDestruct.len
|
||||
|
||||
proc addLogEntry*(ac: AccountsCache, log: Log) =
|
||||
ac.savePoint.logEntries.add log
|
||||
|
||||
proc logEntries*(ac: AccountsCache): seq[Log] =
|
||||
ac.savePoint.logEntries
|
||||
|
||||
proc getAndClearLogEntries*(ac: AccountsCache): seq[Log] =
|
||||
result = ac.savePoint.logEntries
|
||||
ac.savePoint.logEntries.setLen(0)
|
||||
|
||||
proc ripemdSpecial*(ac: AccountsCache) =
|
||||
ac.ripemdSpecial = true
|
||||
|
||||
proc deleteEmptyAccount(ac: AccountsCache, address: EthAddress) =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
if not acc.isEmpty:
|
||||
return
|
||||
if not acc.exists:
|
||||
return
|
||||
acc.kill()
|
||||
|
||||
proc clearEmptyAccounts(ac: AccountsCache) =
|
||||
for address, acc in ac.savePoint.cache:
|
||||
if Touched in acc.flags and
|
||||
acc.isEmpty and acc.exists:
|
||||
acc.kill()
|
||||
|
||||
# https://github.com/ethereum/EIPs/issues/716
|
||||
if ac.ripemdSpecial:
|
||||
ac.deleteEmptyAccount(RIPEMD_ADDR)
|
||||
ac.ripemdSpecial = false
|
||||
|
||||
proc persist*(ac: AccountsCache,
|
||||
clearEmptyAccount: bool = false,
|
||||
clearCache: bool = true) =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
var cleanAccounts = initHashSet[EthAddress]()
|
||||
|
||||
if clearEmptyAccount:
|
||||
ac.clearEmptyAccounts()
|
||||
|
||||
for address in ac.savePoint.selfDestruct:
|
||||
ac.deleteAccount(address)
|
||||
|
||||
for address, acc in ac.savePoint.cache:
|
||||
case acc.persistMode()
|
||||
of Update:
|
||||
if CodeChanged in acc.flags:
|
||||
acc.persistCode(ac.db)
|
||||
if StorageChanged in acc.flags:
|
||||
# storageRoot must be updated first
|
||||
# before persisting account into merkle trie
|
||||
acc.persistStorage(ac.db, clearCache)
|
||||
ac.trie.putAccountBytes address, rlp.encode(acc.account)
|
||||
of Remove:
|
||||
ac.trie.delAccountBytes address
|
||||
if not clearCache:
|
||||
cleanAccounts.incl address
|
||||
of DoNothing:
|
||||
# dead man tell no tales
|
||||
# remove touched dead account from cache
|
||||
if not clearCache and Alive notin acc.flags:
|
||||
cleanAccounts.incl address
|
||||
|
||||
acc.flags = acc.flags - resetFlags
|
||||
|
||||
if clearCache:
|
||||
ac.savePoint.cache.clear()
|
||||
else:
|
||||
for x in cleanAccounts:
|
||||
ac.savePoint.cache.del x
|
||||
|
||||
ac.savePoint.selfDestruct.clear()
|
||||
|
||||
# EIP2929
|
||||
ac.savePoint.accessList.clear()
|
||||
|
||||
ac.isDirty = false
|
||||
|
||||
iterator addresses*(ac: AccountsCache): EthAddress =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
for address, _ in ac.savePoint.cache:
|
||||
yield address
|
||||
|
||||
iterator accounts*(ac: AccountsCache): Account =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
for _, account in ac.savePoint.cache:
|
||||
yield account.account
|
||||
|
||||
iterator pairs*(ac: AccountsCache): (EthAddress, Account) =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
for address, account in ac.savePoint.cache:
|
||||
yield (address, account.account)
|
||||
|
||||
iterator storage*(ac: AccountsCache, address: EthAddress): (UInt256, UInt256) =
|
||||
# beware that if the account not persisted,
|
||||
# the storage root will not be updated
|
||||
let acc = ac.getAccount(address, false)
|
||||
if not acc.isNil:
|
||||
let storageRoot = acc.account.storageRoot
|
||||
let trie = ac.db.mptPrune storageRoot
|
||||
|
||||
for slotHash, value in trie:
|
||||
if slotHash.len == 0: continue
|
||||
let keyData = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
|
||||
if keyData.len == 0: continue
|
||||
yield (rlp.decode(keyData, UInt256), rlp.decode(value, UInt256))
|
||||
|
||||
iterator cachedStorage*(ac: AccountsCache, address: EthAddress): (UInt256, UInt256) =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if not acc.isNil:
|
||||
if not acc.originalStorage.isNil:
|
||||
for k, v in acc.originalStorage:
|
||||
yield (k, v)
|
||||
|
||||
proc getStorageRoot*(ac: AccountsCache, address: EthAddress): Hash256 =
|
||||
# beware that if the account not persisted,
|
||||
# the storage root will not be updated
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil: emptyAcc.storageRoot
|
||||
else: acc.account.storageRoot
|
||||
|
||||
func update(wd: var WitnessData, acc: RefAccount) =
|
||||
wd.codeTouched = CodeChanged in acc.flags
|
||||
|
||||
if not acc.originalStorage.isNil:
|
||||
for k, v in acc.originalStorage:
|
||||
if v.isZero: continue
|
||||
wd.storageKeys.incl k
|
||||
|
||||
for k, v in acc.overlayStorage:
|
||||
if v.isZero and k notin wd.storageKeys:
|
||||
continue
|
||||
if v.isZero and k in wd.storageKeys:
|
||||
wd.storageKeys.excl k
|
||||
continue
|
||||
wd.storageKeys.incl k
|
||||
|
||||
func witnessData(acc: RefAccount): WitnessData =
|
||||
result.storageKeys = initHashSet[UInt256]()
|
||||
update(result, acc)
|
||||
|
||||
proc collectWitnessData*(ac: AccountsCache) =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
# usually witness data is collected before we call persist()
|
||||
for address, acc in ac.savePoint.cache:
|
||||
ac.witnessCache.withValue(address, val) do:
|
||||
update(val[], acc)
|
||||
do:
|
||||
ac.witnessCache[address] = witnessData(acc)
|
||||
|
||||
func multiKeys(slots: HashSet[UInt256]): MultikeysRef =
|
||||
if slots.len == 0: return
|
||||
new result
|
||||
for x in slots:
|
||||
result.add x.toBytesBE
|
||||
result.sort()
|
||||
|
||||
proc makeMultiKeys*(ac: AccountsCache): MultikeysRef =
|
||||
# this proc is called after we done executing a block
|
||||
new result
|
||||
for k, v in ac.witnessCache:
|
||||
result.add(k, v.codeTouched, multiKeys(v.storageKeys))
|
||||
result.sort()
|
||||
|
||||
proc accessList*(ac: AccountsCache, address: EthAddress) {.inline.} =
|
||||
ac.savePoint.accessList.add(address)
|
||||
|
||||
proc accessList*(ac: AccountsCache, address: EthAddress, slot: UInt256) {.inline.} =
|
||||
ac.savePoint.accessList.add(address, slot)
|
||||
|
||||
func inAccessList*(ac: AccountsCache, address: EthAddress): bool =
|
||||
var sp = ac.savePoint
|
||||
while sp != nil:
|
||||
result = sp.accessList.contains(address)
|
||||
if result:
|
||||
return
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
func inAccessList*(ac: AccountsCache, address: EthAddress, slot: UInt256): bool =
|
||||
var sp = ac.savePoint
|
||||
while sp != nil:
|
||||
result = sp.accessList.contains(address, slot)
|
||||
if result:
|
||||
return
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
func getTransientStorage*(ac: AccountsCache,
|
||||
address: EthAddress, slot: UInt256): UInt256 =
|
||||
var sp = ac.savePoint
|
||||
while sp != nil:
|
||||
let (ok, res) = sp.transientStorage.getStorage(address, slot)
|
||||
if ok:
|
||||
return res
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
proc setTransientStorage*(ac: AccountsCache,
|
||||
address: EthAddress, slot, val: UInt256) =
|
||||
ac.savePoint.transientStorage.setStorage(address, slot, val)
|
||||
|
||||
proc clearTransientStorage*(ac: AccountsCache) {.inline.} =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
ac.savePoint.transientStorage.clear()
|
||||
|
||||
proc rootHash*(db: ReadOnlyStateDB): KeccakHash {.borrow.}
|
||||
proc getCodeHash*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getStorageRoot*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getBalance*(db: ReadOnlyStateDB, address: EthAddress): UInt256 {.borrow.}
|
||||
proc getStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
proc getNonce*(db: ReadOnlyStateDB, address: EthAddress): AccountNonce {.borrow.}
|
||||
proc getCode*(db: ReadOnlyStateDB, address: EthAddress): seq[byte] {.borrow.}
|
||||
proc getCodeSize*(db: ReadOnlyStateDB, address: EthAddress): int {.borrow.}
|
||||
proc hasCodeOrNonce*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc accountExists*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isDeadAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isEmptyAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc getCommittedStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
func inAccessList*(ac: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
func inAccessList*(ac: ReadOnlyStateDB, address: EthAddress, slot: UInt256): bool {.borrow.}
|
||||
func getTransientStorage*(ac: ReadOnlyStateDB,
|
||||
address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
741
nimbus/db/ledger/accounts_ledger.nim
Normal file
741
nimbus/db/ledger/accounts_ledger.nim
Normal file
@ -0,0 +1,741 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
## Re-write of `accounts_cache.nim` using new database API.
|
||||
##
|
||||
## Many objects and names are kept as in the original ``accounts_cache.nim` so
|
||||
## that a diff against the original file gives useful results (e.g. using
|
||||
## the graphical diff tool `meld`.)
|
||||
|
||||
import
|
||||
std/[tables, hashes, sets],
|
||||
eth/[common, rlp],
|
||||
results,
|
||||
../../../stateless/multi_keys,
|
||||
"../.."/[constants, errors, utils/utils],
|
||||
../access_list as ac_access_list,
|
||||
".."/[core_db, storage_types, transient_storage],
|
||||
./distinct_ledgers
|
||||
|
||||
const
|
||||
debugAccountsLedgerRef = false
|
||||
|
||||
type
|
||||
AccountFlag = enum
|
||||
Alive
|
||||
IsNew
|
||||
Dirty
|
||||
Touched
|
||||
CodeLoaded
|
||||
CodeChanged
|
||||
StorageChanged
|
||||
NewlyCreated # EIP-6780: self destruct only in same transaction
|
||||
|
||||
AccountFlags = set[AccountFlag]
|
||||
|
||||
RefAccount = ref object
|
||||
account: CoreDbAccount
|
||||
flags: AccountFlags
|
||||
code: seq[byte]
|
||||
originalStorage: TableRef[UInt256, UInt256]
|
||||
overlayStorage: Table[UInt256, UInt256]
|
||||
|
||||
WitnessData* = object
|
||||
storageKeys*: HashSet[UInt256]
|
||||
codeTouched*: bool
|
||||
|
||||
AccountsLedgerRef* = ref object
|
||||
kvt: CoreDbKvtRef # Legacy API is god enough here
|
||||
ledger: AccountLedger
|
||||
savePoint: LedgerSavePoint
|
||||
witnessCache: Table[EthAddress, WitnessData]
|
||||
isDirty: bool
|
||||
ripemdSpecial: bool
|
||||
|
||||
ReadOnlyStateDB* = distinct AccountsLedgerRef
|
||||
|
||||
TransactionState = enum
|
||||
Pending
|
||||
Committed
|
||||
RolledBack
|
||||
|
||||
LedgerSavePoint* = ref object
|
||||
parentSavepoint: LedgerSavePoint
|
||||
cache: Table[EthAddress, RefAccount]
|
||||
selfDestruct: HashSet[EthAddress]
|
||||
logEntries: seq[Log]
|
||||
accessList: ac_access_list.AccessList
|
||||
transientStorage: TransientStorage
|
||||
state: TransactionState
|
||||
when debugAccountsLedgerRef:
|
||||
depth: int
|
||||
|
||||
const
|
||||
emptyAcc = newAccount()
|
||||
|
||||
resetFlags = {
|
||||
Dirty,
|
||||
IsNew,
|
||||
Touched,
|
||||
CodeChanged,
|
||||
StorageChanged,
|
||||
NewlyCreated
|
||||
}
|
||||
|
||||
ripemdAddr* = block:
|
||||
proc initAddress(x: int): EthAddress {.compileTime.} =
|
||||
result[19] = x.byte
|
||||
initAddress(3)
|
||||
|
||||
when debugAccountsLedgerRef:
|
||||
import
|
||||
stew/byteutils
|
||||
|
||||
proc inspectSavePoint(name: string, x: LedgerSavePoint) =
|
||||
debugEcho "*** ", name, ": ", x.depth, " ***"
|
||||
var sp = x
|
||||
while sp != nil:
|
||||
for address, acc in sp.cache:
|
||||
debugEcho address.toHex, " ", acc.flags
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
proc beginSavepoint*(ac: AccountsLedgerRef): LedgerSavePoint {.gcsafe.}
|
||||
|
||||
# FIXME-Adam: this is only necessary because of my sanity checks on the latest rootHash;
|
||||
# take this out once those are gone.
|
||||
proc rawTrie*(ac: AccountsLedgerRef): AccountLedger = ac.ledger
|
||||
|
||||
func newCoreDbAccount: CoreDbAccount =
|
||||
CoreDbAccount(
|
||||
nonce: emptyAcc.nonce,
|
||||
balance: emptyAcc.balance,
|
||||
codeHash: emptyAcc.codeHash,
|
||||
storageVid: CoreDbVidRef(nil))
|
||||
|
||||
template noRlpException(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except RlpError as e:
|
||||
raiseAssert info & ", name=\"" & $e.name & "\", msg=\"" & e.msg & "\""
|
||||
|
||||
# The AccountsLedgerRef is modeled after TrieDatabase for it's transaction style
|
||||
proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef,
|
||||
root: KeccakHash, pruneTrie = true): AccountsLedgerRef =
|
||||
new result
|
||||
result.kvt = db.kvt
|
||||
result.ledger = AccountLedger.init(db, root, pruneTrie)
|
||||
result.witnessCache = initTable[EthAddress, WitnessData]()
|
||||
discard result.beginSavepoint
|
||||
|
||||
proc init*(x: typedesc[AccountsLedgerRef], db: CoreDbRef, pruneTrie = true): AccountsLedgerRef =
|
||||
init(x, db, EMPTY_ROOT_HASH, pruneTrie)
|
||||
|
||||
proc rootHash*(ac: AccountsLedgerRef): KeccakHash =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
# make sure all cache already committed
|
||||
doAssert(ac.isDirty == false)
|
||||
ac.ledger.rootHash
|
||||
|
||||
proc isTopLevelClean*(ac: AccountsLedgerRef): bool =
|
||||
## Getter, returns `true` if all pending data have been commited.
|
||||
not ac.isDirty and ac.savePoint.parentSavepoint.isNil
|
||||
|
||||
proc beginSavepoint*(ac: AccountsLedgerRef): LedgerSavePoint =
|
||||
new result
|
||||
result.cache = initTable[EthAddress, RefAccount]()
|
||||
result.accessList.init()
|
||||
result.transientStorage.init()
|
||||
result.state = Pending
|
||||
result.parentSavepoint = ac.savePoint
|
||||
ac.savePoint = result
|
||||
|
||||
when debugAccountsLedgerRef:
|
||||
if not result.parentSavePoint.isNil:
|
||||
result.depth = result.parentSavePoint.depth + 1
|
||||
inspectSavePoint("snapshot", result)
|
||||
|
||||
proc rollback*(ac: AccountsLedgerRef, sp: LedgerSavePoint) =
|
||||
# Transactions should be handled in a strictly nested fashion.
|
||||
# Any child transaction must be committed or rolled-back before
|
||||
# its parent transactions:
|
||||
doAssert ac.savePoint == sp and sp.state == Pending
|
||||
ac.savePoint = sp.parentSavepoint
|
||||
sp.state = RolledBack
|
||||
|
||||
when debugAccountsLedgerRef:
|
||||
inspectSavePoint("rollback", ac.savePoint)
|
||||
|
||||
proc commit*(ac: AccountsLedgerRef, sp: LedgerSavePoint) =
|
||||
# Transactions should be handled in a strictly nested fashion.
|
||||
# Any child transaction must be committed or rolled-back before
|
||||
# its parent transactions:
|
||||
doAssert ac.savePoint == sp and sp.state == Pending
|
||||
# cannot commit most inner savepoint
|
||||
doAssert not sp.parentSavepoint.isNil
|
||||
|
||||
ac.savePoint = sp.parentSavepoint
|
||||
for k, v in sp.cache:
|
||||
sp.parentSavepoint.cache[k] = v
|
||||
|
||||
ac.savePoint.transientStorage.merge(sp.transientStorage)
|
||||
ac.savePoint.accessList.merge(sp.accessList)
|
||||
ac.savePoint.selfDestruct.incl sp.selfDestruct
|
||||
ac.savePoint.logEntries.add sp.logEntries
|
||||
sp.state = Committed
|
||||
|
||||
when debugAccountsLedgerRef:
|
||||
inspectSavePoint("commit", ac.savePoint)
|
||||
|
||||
proc dispose*(ac: AccountsLedgerRef, sp: LedgerSavePoint) =
|
||||
if sp.state == Pending:
|
||||
ac.rollback(sp)
|
||||
|
||||
proc safeDispose*(ac: AccountsLedgerRef, sp: LedgerSavePoint) =
|
||||
if (not isNil(sp)) and (sp.state == Pending):
|
||||
ac.rollback(sp)
|
||||
|
||||
proc getAccount(ac: AccountsLedgerRef, address: EthAddress, shouldCreate = true): RefAccount =
|
||||
# search account from layers of cache
|
||||
var sp = ac.savePoint
|
||||
while sp != nil:
|
||||
result = sp.cache.getOrDefault(address)
|
||||
if not result.isNil:
|
||||
return
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
# not found in cache, look into state trie
|
||||
let rc = ac.ledger.fetch address
|
||||
if rc.isOk:
|
||||
result = RefAccount(
|
||||
account: rc.value,
|
||||
flags: {Alive})
|
||||
|
||||
elif shouldCreate:
|
||||
result = RefAccount(
|
||||
account: newCoreDbAccount(),
|
||||
flags: {Alive, IsNew})
|
||||
|
||||
else:
|
||||
return # ignore, don't cache
|
||||
|
||||
# cache the account
|
||||
ac.savePoint.cache[address] = result
|
||||
|
||||
proc clone(acc: RefAccount, cloneStorage: bool): RefAccount =
|
||||
new(result)
|
||||
result.account = acc.account
|
||||
result.flags = acc.flags
|
||||
result.code = acc.code
|
||||
|
||||
if cloneStorage:
|
||||
result.originalStorage = acc.originalStorage
|
||||
# it's ok to clone a table this way
|
||||
result.overlayStorage = acc.overlayStorage
|
||||
|
||||
proc isEmpty(acc: RefAccount): bool =
|
||||
result = acc.account.codeHash == EMPTY_SHA3 and
|
||||
acc.account.balance.isZero and
|
||||
acc.account.nonce == 0
|
||||
|
||||
template exists(acc: RefAccount): bool =
|
||||
Alive in acc.flags
|
||||
|
||||
proc originalStorageValue(acc: RefAccount, slot: UInt256, ac: AccountsLedgerRef): UInt256 =
|
||||
# share the same original storage between multiple
|
||||
# versions of account
|
||||
if acc.originalStorage.isNil:
|
||||
acc.originalStorage = newTable[UInt256, UInt256]()
|
||||
else:
|
||||
acc.originalStorage[].withValue(slot, val) do:
|
||||
return val[]
|
||||
|
||||
# Not in the original values cache - go to the DB.
|
||||
let rc = StorageLedger.init(ac.ledger, acc.account).fetch slot
|
||||
if rc.isOk and 0 < rc.value.len:
|
||||
noRlpException "originalStorageValue()":
|
||||
result = rlp.decode(rc.value, UInt256)
|
||||
|
||||
acc.originalStorage[slot] = result
|
||||
|
||||
proc storageValue(acc: RefAccount, slot: UInt256, ac: AccountsLedgerRef): UInt256 =
|
||||
acc.overlayStorage.withValue(slot, val) do:
|
||||
return val[]
|
||||
do:
|
||||
result = acc.originalStorageValue(slot, ac)
|
||||
|
||||
proc kill(acc: RefAccount) =
|
||||
acc.flags.excl Alive
|
||||
acc.overlayStorage.clear()
|
||||
acc.originalStorage = nil
|
||||
acc.account = newCoreDbAccount()
|
||||
acc.code = default(seq[byte])
|
||||
|
||||
type
|
||||
PersistMode = enum
|
||||
DoNothing
|
||||
Update
|
||||
Remove
|
||||
|
||||
proc persistMode(acc: RefAccount): PersistMode =
|
||||
result = DoNothing
|
||||
if Alive in acc.flags:
|
||||
if IsNew in acc.flags or Dirty in acc.flags:
|
||||
result = Update
|
||||
else:
|
||||
if IsNew notin acc.flags:
|
||||
result = Remove
|
||||
|
||||
proc persistCode(acc: RefAccount, ac: AccountsLedgerRef) =
|
||||
if acc.code.len != 0:
|
||||
when defined(geth):
|
||||
ac.kvt.put(acc.account.codeHash.data, acc.code)
|
||||
else:
|
||||
ac.kvt.put(contractHashKey(acc.account.codeHash).toOpenArray, acc.code)
|
||||
|
||||
proc persistStorage(acc: RefAccount, ac: AccountsLedgerRef, clearCache: bool) =
|
||||
if acc.overlayStorage.len == 0:
|
||||
# TODO: remove the storage too if we figure out
|
||||
# how to create 'virtual' storage room for each account
|
||||
return
|
||||
|
||||
if not clearCache and acc.originalStorage.isNil:
|
||||
acc.originalStorage = newTable[UInt256, UInt256]()
|
||||
|
||||
var storageLedger = StorageLedger.init(ac.ledger, acc.account)
|
||||
|
||||
for slot, value in acc.overlayStorage:
|
||||
if value > 0:
|
||||
let encodedValue = rlp.encode(value)
|
||||
storageLedger.merge(slot, encodedValue)
|
||||
else:
|
||||
storageLedger.delete(slot)
|
||||
|
||||
let key = slot.toBytesBE.keccakHash.data.slotHashToSlotKey
|
||||
ac.kvt.put(key.toOpenArray, rlp.encode(slot))
|
||||
|
||||
if not clearCache:
|
||||
# if we preserve cache, move the overlayStorage
|
||||
# to originalStorage, related to EIP2200, EIP1283
|
||||
for slot, value in acc.overlayStorage:
|
||||
if value > 0:
|
||||
acc.originalStorage[slot] = value
|
||||
else:
|
||||
acc.originalStorage.del(slot)
|
||||
acc.overlayStorage.clear()
|
||||
|
||||
acc.account.storageVid = storageLedger.rootVid
|
||||
|
||||
proc makeDirty(ac: AccountsLedgerRef, address: EthAddress, cloneStorage = true): RefAccount =
|
||||
ac.isDirty = true
|
||||
result = ac.getAccount(address)
|
||||
if address in ac.savePoint.cache:
|
||||
# it's already in latest savepoint
|
||||
result.flags.incl Dirty
|
||||
return
|
||||
|
||||
# put a copy into latest savepoint
|
||||
result = result.clone(cloneStorage)
|
||||
result.flags.incl Dirty
|
||||
ac.savePoint.cache[address] = result
|
||||
|
||||
proc getCodeHash*(ac: AccountsLedgerRef, address: EthAddress): Hash256 =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil: emptyAcc.codeHash
|
||||
else: acc.account.codeHash
|
||||
|
||||
proc getBalance*(ac: AccountsLedgerRef, address: EthAddress): UInt256 =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil: emptyAcc.balance
|
||||
else: acc.account.balance
|
||||
|
||||
proc getNonce*(ac: AccountsLedgerRef, address: EthAddress): AccountNonce =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil: emptyAcc.nonce
|
||||
else: acc.account.nonce
|
||||
|
||||
proc getCode*(ac: AccountsLedgerRef, address: EthAddress): seq[byte] =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
|
||||
if CodeLoaded in acc.flags or CodeChanged in acc.flags:
|
||||
result = acc.code
|
||||
else:
|
||||
when defined(geth):
|
||||
let data = ac.kvt.get(acc.account.codeHash.data)
|
||||
else:
|
||||
let data = ac.kvt.get(contractHashKey(acc.account.codeHash).toOpenArray)
|
||||
|
||||
acc.code = data
|
||||
acc.flags.incl CodeLoaded
|
||||
result = acc.code
|
||||
|
||||
proc getCodeSize*(ac: AccountsLedgerRef, address: EthAddress): int =
|
||||
ac.getCode(address).len
|
||||
|
||||
proc getCommittedStorage*(ac: AccountsLedgerRef, address: EthAddress, slot: UInt256): UInt256 =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
acc.originalStorageValue(slot, ac)
|
||||
|
||||
proc getStorage*(ac: AccountsLedgerRef, address: EthAddress, slot: UInt256): UInt256 =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
acc.storageValue(slot, ac)
|
||||
|
||||
proc hasCodeOrNonce*(ac: AccountsLedgerRef, address: EthAddress): bool =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
acc.account.nonce != 0 or acc.account.codeHash != EMPTY_SHA3
|
||||
|
||||
proc accountExists*(ac: AccountsLedgerRef, address: EthAddress): bool =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
acc.exists()
|
||||
|
||||
proc isEmptyAccount*(ac: AccountsLedgerRef, address: EthAddress): bool =
|
||||
let acc = ac.getAccount(address, false)
|
||||
doAssert not acc.isNil
|
||||
doAssert acc.exists()
|
||||
acc.isEmpty()
|
||||
|
||||
proc isDeadAccount*(ac: AccountsLedgerRef, address: EthAddress): bool =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return true
|
||||
if not acc.exists():
|
||||
return true
|
||||
acc.isEmpty()
|
||||
|
||||
proc setBalance*(ac: AccountsLedgerRef, address: EthAddress, balance: UInt256) =
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive}
|
||||
if acc.account.balance != balance:
|
||||
ac.makeDirty(address).account.balance = balance
|
||||
|
||||
proc addBalance*(ac: AccountsLedgerRef, address: EthAddress, delta: UInt256) =
|
||||
# EIP161: We must check emptiness for the objects such that the account
|
||||
# clearing (0,0,0 objects) can take effect.
|
||||
if delta.isZero:
|
||||
let acc = ac.getAccount(address)
|
||||
if acc.isEmpty:
|
||||
ac.makeDirty(address).flags.incl Touched
|
||||
return
|
||||
ac.setBalance(address, ac.getBalance(address) + delta)
|
||||
|
||||
proc subBalance*(ac: AccountsLedgerRef, address: EthAddress, delta: UInt256) =
|
||||
if delta.isZero:
|
||||
# This zero delta early exit is important as shown in EIP-4788.
|
||||
# If the account is created, it will change the state.
|
||||
# But early exit will prevent the account creation.
|
||||
# In this case, the SystemAddress
|
||||
return
|
||||
ac.setBalance(address, ac.getBalance(address) - delta)
|
||||
|
||||
proc setNonce*(ac: AccountsLedgerRef, address: EthAddress, nonce: AccountNonce) =
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive}
|
||||
if acc.account.nonce != nonce:
|
||||
ac.makeDirty(address).account.nonce = nonce
|
||||
|
||||
proc incNonce*(ac: AccountsLedgerRef, address: EthAddress) =
|
||||
ac.setNonce(address, ac.getNonce(address) + 1)
|
||||
|
||||
proc setCode*(ac: AccountsLedgerRef, address: EthAddress, code: seq[byte]) =
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive}
|
||||
let codeHash = keccakHash(code)
|
||||
if acc.account.codeHash != codeHash:
|
||||
var acc = ac.makeDirty(address)
|
||||
acc.account.codeHash = codeHash
|
||||
acc.code = code
|
||||
acc.flags.incl CodeChanged
|
||||
|
||||
proc setStorage*(ac: AccountsLedgerRef, address: EthAddress, slot, value: UInt256) =
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive}
|
||||
let oldValue = acc.storageValue(slot, ac)
|
||||
if oldValue != value:
|
||||
var acc = ac.makeDirty(address)
|
||||
acc.overlayStorage[slot] = value
|
||||
acc.flags.incl StorageChanged
|
||||
|
||||
proc clearStorage*(ac: AccountsLedgerRef, address: EthAddress) =
|
||||
# a.k.a createStateObject. If there is an existing account with
|
||||
# the given address, it is overwritten.
|
||||
|
||||
let acc = ac.getAccount(address)
|
||||
acc.flags.incl {Alive, NewlyCreated}
|
||||
let accHash = acc.account.storageVid.hash.valueOr: return
|
||||
if accHash != EMPTY_ROOT_HASH:
|
||||
# there is no point to clone the storage since we want to remove it
|
||||
let acc = ac.makeDirty(address, cloneStorage = false)
|
||||
acc.account.storageVid = CoreDbVidRef(nil)
|
||||
if acc.originalStorage.isNil.not:
|
||||
# also clear originalStorage cache, otherwise
|
||||
# both getStorage and getCommittedStorage will
|
||||
# return wrong value
|
||||
acc.originalStorage.clear()
|
||||
|
||||
proc deleteAccount*(ac: AccountsLedgerRef, address: EthAddress) =
|
||||
# make sure all savepoints already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
let acc = ac.getAccount(address)
|
||||
acc.kill()
|
||||
|
||||
proc selfDestruct*(ac: AccountsLedgerRef, address: EthAddress) =
|
||||
ac.setBalance(address, 0.u256)
|
||||
ac.savePoint.selfDestruct.incl address
|
||||
|
||||
proc selfDestruct6780*(ac: AccountsLedgerRef, address: EthAddress) =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
|
||||
if NewlyCreated in acc.flags:
|
||||
ac.selfDestruct(address)
|
||||
|
||||
proc selfDestructLen*(ac: AccountsLedgerRef): int =
|
||||
ac.savePoint.selfDestruct.len
|
||||
|
||||
proc addLogEntry*(ac: AccountsLedgerRef, log: Log) =
|
||||
ac.savePoint.logEntries.add log
|
||||
|
||||
proc logEntries*(ac: AccountsLedgerRef): seq[Log] =
|
||||
ac.savePoint.logEntries
|
||||
|
||||
proc getAndClearLogEntries*(ac: AccountsLedgerRef): seq[Log] =
|
||||
result = ac.savePoint.logEntries
|
||||
ac.savePoint.logEntries.setLen(0)
|
||||
|
||||
proc ripemdSpecial*(ac: AccountsLedgerRef) =
|
||||
ac.ripemdSpecial = true
|
||||
|
||||
proc deleteEmptyAccount(ac: AccountsLedgerRef, address: EthAddress) =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil:
|
||||
return
|
||||
if not acc.isEmpty:
|
||||
return
|
||||
if not acc.exists:
|
||||
return
|
||||
acc.kill()
|
||||
|
||||
proc clearEmptyAccounts(ac: AccountsLedgerRef) =
|
||||
for address, acc in ac.savePoint.cache:
|
||||
if Touched in acc.flags and
|
||||
acc.isEmpty and acc.exists:
|
||||
acc.kill()
|
||||
|
||||
# https://github.com/ethereum/EIPs/issues/716
|
||||
if ac.ripemdSpecial:
|
||||
ac.deleteEmptyAccount(ripemdAddr)
|
||||
ac.ripemdSpecial = false
|
||||
|
||||
proc persist*(ac: AccountsLedgerRef,
|
||||
clearEmptyAccount: bool = false,
|
||||
clearCache: bool = true) =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
var cleanAccounts = initHashSet[EthAddress]()
|
||||
|
||||
if clearEmptyAccount:
|
||||
ac.clearEmptyAccounts()
|
||||
|
||||
for address in ac.savePoint.selfDestruct:
|
||||
ac.deleteAccount(address)
|
||||
|
||||
for address, acc in ac.savePoint.cache:
|
||||
case acc.persistMode()
|
||||
of Update:
|
||||
if CodeChanged in acc.flags:
|
||||
acc.persistCode(ac)
|
||||
if StorageChanged in acc.flags:
|
||||
# storageRoot must be updated first
|
||||
# before persisting account into merkle trie
|
||||
acc.persistStorage(ac, clearCache)
|
||||
ac.ledger.merge(address, acc.account)
|
||||
of Remove:
|
||||
ac.ledger.delete address
|
||||
if not clearCache:
|
||||
cleanAccounts.incl address
|
||||
of DoNothing:
|
||||
# dead man tell no tales
|
||||
# remove touched dead account from cache
|
||||
if not clearCache and Alive notin acc.flags:
|
||||
cleanAccounts.incl address
|
||||
|
||||
acc.flags = acc.flags - resetFlags
|
||||
|
||||
if clearCache:
|
||||
ac.savePoint.cache.clear()
|
||||
else:
|
||||
for x in cleanAccounts:
|
||||
ac.savePoint.cache.del x
|
||||
|
||||
ac.savePoint.selfDestruct.clear()
|
||||
|
||||
# EIP2929
|
||||
ac.savePoint.accessList.clear()
|
||||
|
||||
ac.isDirty = false
|
||||
|
||||
iterator addresses*(ac: AccountsLedgerRef): EthAddress =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
for address, _ in ac.savePoint.cache:
|
||||
yield address
|
||||
|
||||
iterator accounts*(ac: AccountsLedgerRef): Account =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
for _, account in ac.savePoint.cache:
|
||||
yield account.account.recast.value
|
||||
|
||||
iterator pairs*(ac: AccountsLedgerRef): (EthAddress, Account) =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
for address, account in ac.savePoint.cache:
|
||||
yield (address, account.account.recast.value)
|
||||
|
||||
iterator storage*(ac: AccountsLedgerRef, address: EthAddress): (UInt256, UInt256) {.gcsafe, raises: [CoreDbApiError].} =
|
||||
# beware that if the account not persisted,
|
||||
# the storage root will not be updated
|
||||
let acc = ac.getAccount(address, false)
|
||||
if not acc.isNil:
|
||||
noRlpException "storage()":
|
||||
for slotHash, value in ac.ledger.storage acc.account:
|
||||
if slotHash.len == 0: continue
|
||||
let keyData = ac.kvt.get(slotHashToSlotKey(slotHash).toOpenArray)
|
||||
if keyData.len == 0: continue
|
||||
yield (rlp.decode(keyData, UInt256), rlp.decode(value, UInt256))
|
||||
|
||||
iterator cachedStorage*(ac: AccountsLedgerRef, address: EthAddress): (UInt256, UInt256) =
|
||||
let acc = ac.getAccount(address, false)
|
||||
if not acc.isNil:
|
||||
if not acc.originalStorage.isNil:
|
||||
for k, v in acc.originalStorage:
|
||||
yield (k, v)
|
||||
|
||||
proc getStorageRoot*(ac: AccountsLedgerRef, address: EthAddress): Hash256 =
|
||||
# beware that if the account not persisted,
|
||||
# the storage root will not be updated
|
||||
let acc = ac.getAccount(address, false)
|
||||
if acc.isNil: EMPTY_ROOT_HASH
|
||||
else: acc.account.storageVid.hash.valueOr: EMPTY_ROOT_HASH
|
||||
|
||||
func update(wd: var WitnessData, acc: RefAccount) =
|
||||
wd.codeTouched = CodeChanged in acc.flags
|
||||
|
||||
if not acc.originalStorage.isNil:
|
||||
for k, v in acc.originalStorage:
|
||||
if v.isZero: continue
|
||||
wd.storageKeys.incl k
|
||||
|
||||
for k, v in acc.overlayStorage:
|
||||
if v.isZero and k notin wd.storageKeys:
|
||||
continue
|
||||
if v.isZero and k in wd.storageKeys:
|
||||
wd.storageKeys.excl k
|
||||
continue
|
||||
wd.storageKeys.incl k
|
||||
|
||||
func witnessData(acc: RefAccount): WitnessData =
|
||||
result.storageKeys = initHashSet[UInt256]()
|
||||
update(result, acc)
|
||||
|
||||
proc collectWitnessData*(ac: AccountsLedgerRef) =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
# usually witness data is collected before we call persist()
|
||||
for address, acc in ac.savePoint.cache:
|
||||
ac.witnessCache.withValue(address, val) do:
|
||||
update(val[], acc)
|
||||
do:
|
||||
ac.witnessCache[address] = witnessData(acc)
|
||||
|
||||
func multiKeys(slots: HashSet[UInt256]): MultikeysRef =
|
||||
if slots.len == 0: return
|
||||
new result
|
||||
for x in slots:
|
||||
result.add x.toBytesBE
|
||||
result.sort()
|
||||
|
||||
proc makeMultiKeys*(ac: AccountsLedgerRef): MultikeysRef =
|
||||
# this proc is called after we done executing a block
|
||||
new result
|
||||
for k, v in ac.witnessCache:
|
||||
result.add(k, v.codeTouched, multiKeys(v.storageKeys))
|
||||
result.sort()
|
||||
|
||||
proc accessList*(ac: AccountsLedgerRef, address: EthAddress) =
|
||||
ac.savePoint.accessList.add(address)
|
||||
|
||||
proc accessList*(ac: AccountsLedgerRef, address: EthAddress, slot: UInt256) =
|
||||
ac.savePoint.accessList.add(address, slot)
|
||||
|
||||
func inAccessList*(ac: AccountsLedgerRef, address: EthAddress): bool =
|
||||
var sp = ac.savePoint
|
||||
while sp != nil:
|
||||
result = sp.accessList.contains(address)
|
||||
if result:
|
||||
return
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
func inAccessList*(ac: AccountsLedgerRef, address: EthAddress, slot: UInt256): bool =
|
||||
var sp = ac.savePoint
|
||||
while sp != nil:
|
||||
result = sp.accessList.contains(address, slot)
|
||||
if result:
|
||||
return
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
func getTransientStorage*(ac: AccountsLedgerRef,
|
||||
address: EthAddress, slot: UInt256): UInt256 =
|
||||
var sp = ac.savePoint
|
||||
while sp != nil:
|
||||
let (ok, res) = sp.transientStorage.getStorage(address, slot)
|
||||
if ok:
|
||||
return res
|
||||
sp = sp.parentSavepoint
|
||||
|
||||
proc setTransientStorage*(ac: AccountsLedgerRef,
|
||||
address: EthAddress, slot, val: UInt256) =
|
||||
ac.savePoint.transientStorage.setStorage(address, slot, val)
|
||||
|
||||
proc clearTransientStorage*(ac: AccountsLedgerRef) =
|
||||
# make sure all savepoint already committed
|
||||
doAssert(ac.savePoint.parentSavepoint.isNil)
|
||||
ac.savePoint.transientStorage.clear()
|
||||
|
||||
proc rootHash*(db: ReadOnlyStateDB): KeccakHash {.borrow.}
|
||||
proc getCodeHash*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getStorageRoot*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getBalance*(db: ReadOnlyStateDB, address: EthAddress): UInt256 {.borrow.}
|
||||
proc getStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
proc getNonce*(db: ReadOnlyStateDB, address: EthAddress): AccountNonce {.borrow.}
|
||||
proc getCode*(db: ReadOnlyStateDB, address: EthAddress): seq[byte] {.borrow.}
|
||||
proc getCodeSize*(db: ReadOnlyStateDB, address: EthAddress): int {.borrow.}
|
||||
proc hasCodeOrNonce*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc accountExists*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isDeadAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isEmptyAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc getCommittedStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
func inAccessList*(ac: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
func inAccessList*(ac: ReadOnlyStateDB, address: EthAddress, slot: UInt256): bool {.borrow.}
|
||||
func getTransientStorage*(ac: ReadOnlyStateDB,
|
||||
address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
237
nimbus/db/ledger/backend/accounts_cache.nim
Normal file
237
nimbus/db/ledger/backend/accounts_cache.nim
Normal file
@ -0,0 +1,237 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/common,
|
||||
../../../../stateless/multi_keys,
|
||||
"../.."/[core_db, distinct_tries],
|
||||
../accounts_cache as impl,
|
||||
".."/[base, base/base_desc],
|
||||
./accounts_cache_desc as wrp
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template noRlpException(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except RlpError as e:
|
||||
raiseAssert info & ", name=\"" & $e.name & "\", msg=\"" & e.msg & "\""
|
||||
|
||||
func savePoint(sp: LedgerSpRef): impl.SavePoint =
|
||||
wrp.SavePoint(sp).sp
|
||||
|
||||
# ----------------
|
||||
|
||||
proc ledgerMethods(lc: impl.AccountsCache): LedgerFns =
|
||||
LedgerFns(
|
||||
accessListFn: proc(eAddr: EthAddress) =
|
||||
lc.accessList(eAddr),
|
||||
|
||||
accessList2Fn: proc(eAddr: EthAddress, slot: UInt256) =
|
||||
lc.accessList(eAddr, slot),
|
||||
|
||||
accountExistsFn: proc(eAddr: EthAddress): bool =
|
||||
lc.accountExists(eAddr),
|
||||
|
||||
addBalanceFn: proc(eAddr: EthAddress, delta: UInt256) =
|
||||
lc.addBalance(eAddr, delta),
|
||||
|
||||
addLogEntryFn: proc(log: Log) =
|
||||
lc.addLogEntry(log),
|
||||
|
||||
beginSavepointFn: proc(): LedgerSpRef =
|
||||
wrp.SavePoint(sp: lc.beginSavepoint()),
|
||||
|
||||
clearStorageFn: proc(eAddr: EthAddress) =
|
||||
lc.clearStorage(eAddr),
|
||||
|
||||
clearTransientStorageFn: proc() =
|
||||
lc.clearTransientStorage(),
|
||||
|
||||
collectWitnessDataFn: proc() =
|
||||
lc.collectWitnessData(),
|
||||
|
||||
commitFn: proc(sp: LedgerSpRef) =
|
||||
lc.commit(sp.savePoint),
|
||||
|
||||
deleteAccountFn: proc(eAddr: EthAddress) =
|
||||
lc.deleteAccount(eAddr),
|
||||
|
||||
disposeFn: proc(sp: LedgerSpRef) =
|
||||
lc.dispose(sp.savePoint),
|
||||
|
||||
getAndClearLogEntriesFn: proc(): seq[Log] =
|
||||
lc.getAndClearLogEntries(),
|
||||
|
||||
getBalanceFn: proc(eAddr: EthAddress): UInt256 =
|
||||
lc.getBalance(eAddr),
|
||||
|
||||
getCodeFn: proc(eAddr: EthAddress): Blob =
|
||||
lc.getCode(eAddr),
|
||||
|
||||
getCodeHashFn: proc(eAddr: EthAddress): Hash256 =
|
||||
lc.getCodeHash(eAddr),
|
||||
|
||||
getCodeSizeFn: proc(eAddr: EthAddress): int =
|
||||
lc.getCodeSize(eAddr),
|
||||
|
||||
getCommittedStorageFn: proc(eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
noRlpException "getCommittedStorage()":
|
||||
result = lc.getCommittedStorage(eAddr, slot)
|
||||
discard,
|
||||
|
||||
getNonceFn: proc(eAddr: EthAddress): AccountNonce =
|
||||
lc.getNonce(eAddr),
|
||||
|
||||
getStorageFn: proc(eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
noRlpException "getStorageFn()":
|
||||
result = lc.getStorage(eAddr, slot)
|
||||
discard,
|
||||
|
||||
getStorageRootFn: proc(eAddr: EthAddress): Hash256 =
|
||||
lc.getStorageRoot(eAddr),
|
||||
|
||||
getTransientStorageFn: proc(eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
lc.getTransientStorage(eAddr, slot),
|
||||
|
||||
hasCodeOrNonceFn: proc(eAddr: EthAddress): bool =
|
||||
lc.hasCodeOrNonce(eAddr),
|
||||
|
||||
inAccessListFn: proc(eAddr: EthAddress): bool =
|
||||
lc.inAccessList(eAddr),
|
||||
|
||||
inAccessList2Fn: proc(eAddr: EthAddress, slot: UInt256): bool =
|
||||
lc.inAccessList(eAddr, slot),
|
||||
|
||||
incNonceFn: proc(eAddr: EthAddress) =
|
||||
lc.incNonce(eAddr),
|
||||
|
||||
isDeadAccountFn: proc(eAddr: EthAddress): bool =
|
||||
lc.isDeadAccount(eAddr),
|
||||
|
||||
isEmptyAccountFn: proc(eAddr: EthAddress): bool =
|
||||
lc.isEmptyAccount(eAddr),
|
||||
|
||||
isTopLevelCleanFn: proc(): bool =
|
||||
lc.isTopLevelClean(),
|
||||
|
||||
logEntriesFn: proc(): seq[Log] =
|
||||
lc.logEntries(),
|
||||
|
||||
makeMultiKeysFn: proc(): MultikeysRef =
|
||||
lc.makeMultiKeys(),
|
||||
|
||||
persistFn: proc(clearEmptyAccount: bool, clearCache: bool) =
|
||||
lc.persist(clearEmptyAccount, clearCache),
|
||||
|
||||
ripemdSpecialFn: proc() =
|
||||
lc.ripemdSpecial(),
|
||||
|
||||
rollbackFn: proc(sp: LedgerSpRef) =
|
||||
lc.rollback(sp.savePoint),
|
||||
|
||||
rootHashFn: proc(): Hash256 =
|
||||
lc.rootHash(),
|
||||
|
||||
safeDisposeFn: proc(sp: LedgerSpRef) =
|
||||
if not sp.isNil:
|
||||
lc.safeDispose(sp.savePoint)
|
||||
discard,
|
||||
|
||||
selfDestructFn: proc(eAddr: EthAddress) =
|
||||
lc.selfDestruct(eAddr),
|
||||
|
||||
selfDestruct6780Fn: proc(eAddr: EthAddress) =
|
||||
lc.selfDestruct6780(eAddr),
|
||||
|
||||
selfDestructLenFn: proc(): int =
|
||||
lc.selfDestructLen(),
|
||||
|
||||
setBalanceFn: proc(eAddr: EthAddress, balance: UInt256) =
|
||||
lc.setBalance(eAddr, balance),
|
||||
|
||||
setCodeFn: proc(eAddr: EthAddress, code: Blob) =
|
||||
lc.setCode(eAddr, code),
|
||||
|
||||
setNonceFn: proc(eAddr: EthAddress, nonce: AccountNonce) =
|
||||
lc.setNonce(eAddr, nonce),
|
||||
|
||||
setStorageFn: proc(eAddr: EthAddress, slot, val: UInt256) =
|
||||
noRlpException "setStorage()":
|
||||
lc.setStorage(eAddr, slot, val)
|
||||
discard,
|
||||
|
||||
setTransientStorageFn: proc(eAddr: EthAddress, slot, val: UInt256) =
|
||||
lc.setTransientStorage(eAddr, slot, val),
|
||||
|
||||
subBalanceFn: proc(eAddr: EthAddress, delta: UInt256) =
|
||||
lc.subBalance(eAddr, delta))
|
||||
|
||||
proc ledgerExtras(lc: impl.AccountsCache): LedgerExtras =
|
||||
LedgerExtras(
|
||||
rawRootHashFn: proc(): Hash256 =
|
||||
lc.rawTrie.rootHash())
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
iterator accountsIt*(lc: wrp.AccountsCache): Account =
|
||||
for w in lc.ac.accounts():
|
||||
yield w
|
||||
|
||||
iterator addressesIt*(lc: wrp.AccountsCache): EthAddress =
|
||||
for w in lc.ac.addresses():
|
||||
yield w
|
||||
|
||||
iterator cachedStorageIt*(
|
||||
lc: wrp.AccountsCache;
|
||||
eAddr: EthAddress;
|
||||
): (UInt256,UInt256) =
|
||||
for w in lc.ac.cachedStorage(eAddr):
|
||||
yield w
|
||||
|
||||
iterator pairsIt*(lc: wrp.AccountsCache): (EthAddress,Account) =
|
||||
for w in lc.ac.pairs():
|
||||
yield w
|
||||
|
||||
iterator storageIt*(
|
||||
lc: wrp.AccountsCache;
|
||||
eAddr: EthAddress;
|
||||
): (UInt256,UInt256)
|
||||
{.gcsafe, raises: [CoreDbApiError].} =
|
||||
noRlpException "storage()":
|
||||
for w in lc.ac.storage(eAddr):
|
||||
yield w
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type wrp.AccountsCache;
|
||||
db: CoreDbRef;
|
||||
root: Hash256;
|
||||
pruneTrie: bool): LedgerRef =
|
||||
let lc = impl.AccountsCache.init(db, root, pruneTrie)
|
||||
result = T(
|
||||
ldgType: LegacyAccountsCache,
|
||||
ac: lc,
|
||||
extras: lc.ledgerExtras(),
|
||||
methods: lc.ledgerMethods())
|
||||
result.validate
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
12
nimbus/db/ledger/backend/accounts_cache_desc.nim
Normal file
12
nimbus/db/ledger/backend/accounts_cache_desc.nim
Normal file
@ -0,0 +1,12 @@
|
||||
import
|
||||
../accounts_cache as impl,
|
||||
../base/base_desc
|
||||
|
||||
type
|
||||
AccountsCache* = ref object of LedgerRef
|
||||
ac*: impl.AccountsCache
|
||||
|
||||
SavePoint* = ref object of LedgerSpRef
|
||||
sp*: impl.SavePoint
|
||||
|
||||
# End
|
223
nimbus/db/ledger/backend/accounts_ledger.nim
Normal file
223
nimbus/db/ledger/backend/accounts_ledger.nim
Normal file
@ -0,0 +1,223 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/common,
|
||||
../../../../stateless/multi_keys,
|
||||
../../core_db,
|
||||
../base/base_desc,
|
||||
../accounts_ledger as impl,
|
||||
".."/[base, distinct_ledgers],
|
||||
./accounts_ledger_desc as wrp
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func savePoint(sp: LedgerSpRef): impl.LedgerSavePoint =
|
||||
wrp.LedgerSavePoint(sp).sp
|
||||
|
||||
# ----------------
|
||||
|
||||
proc ledgerMethods(lc: impl.AccountsLedgerRef): LedgerFns =
|
||||
LedgerFns(
|
||||
accessListFn: proc(eAddr: EthAddress) =
|
||||
lc.accessList(eAddr),
|
||||
|
||||
accessList2Fn: proc(eAddr: EthAddress, slot: UInt256) =
|
||||
lc.accessList(eAddr, slot),
|
||||
|
||||
accountExistsFn: proc(eAddr: EthAddress): bool =
|
||||
lc.accountExists(eAddr),
|
||||
|
||||
addBalanceFn: proc(eAddr: EthAddress, delta: UInt256) =
|
||||
lc.addBalance(eAddr, delta),
|
||||
|
||||
addLogEntryFn: proc(log: Log) =
|
||||
lc.addLogEntry(log),
|
||||
|
||||
beginSavepointFn: proc(): LedgerSpRef =
|
||||
wrp.LedgerSavePoint(sp: lc.beginSavepoint()),
|
||||
|
||||
clearStorageFn: proc(eAddr: EthAddress) =
|
||||
lc.clearStorage(eAddr),
|
||||
|
||||
clearTransientStorageFn: proc() =
|
||||
lc.clearTransientStorage(),
|
||||
|
||||
collectWitnessDataFn: proc() =
|
||||
lc.collectWitnessData(),
|
||||
|
||||
commitFn: proc(sp: LedgerSpRef) =
|
||||
lc.commit(sp.savePoint),
|
||||
|
||||
deleteAccountFn: proc(eAddr: EthAddress) =
|
||||
lc.deleteAccount(eAddr),
|
||||
|
||||
disposeFn: proc(sp: LedgerSpRef) =
|
||||
lc.dispose(sp.savePoint),
|
||||
|
||||
getAndClearLogEntriesFn: proc(): seq[Log] =
|
||||
lc.getAndClearLogEntries(),
|
||||
|
||||
getBalanceFn: proc(eAddr: EthAddress): UInt256 =
|
||||
lc.getBalance(eAddr),
|
||||
|
||||
getCodeFn: proc(eAddr: EthAddress): Blob =
|
||||
lc.getCode(eAddr),
|
||||
|
||||
getCodeHashFn: proc(eAddr: EthAddress): Hash256 =
|
||||
lc.getCodeHash(eAddr),
|
||||
|
||||
getCodeSizeFn: proc(eAddr: EthAddress): int =
|
||||
lc.getCodeSize(eAddr),
|
||||
|
||||
getCommittedStorageFn: proc(eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
lc.getCommittedStorage(eAddr, slot),
|
||||
|
||||
getNonceFn: proc(eAddr: EthAddress): AccountNonce =
|
||||
lc.getNonce(eAddr),
|
||||
|
||||
getStorageFn: proc(eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
lc.getStorage(eAddr, slot),
|
||||
|
||||
getStorageRootFn: proc(eAddr: EthAddress): Hash256 =
|
||||
lc.getStorageRoot(eAddr),
|
||||
|
||||
getTransientStorageFn: proc(eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
lc.getTransientStorage(eAddr, slot),
|
||||
|
||||
hasCodeOrNonceFn: proc(eAddr: EthAddress): bool =
|
||||
lc.hasCodeOrNonce(eAddr),
|
||||
|
||||
inAccessListFn: proc(eAddr: EthAddress): bool =
|
||||
lc.inAccessList(eAddr),
|
||||
|
||||
inAccessList2Fn: proc(eAddr: EthAddress, slot: UInt256): bool =
|
||||
lc.inAccessList(eAddr, slot),
|
||||
|
||||
incNonceFn: proc(eAddr: EthAddress) =
|
||||
lc.incNonce(eAddr),
|
||||
|
||||
isDeadAccountFn: proc(eAddr: EthAddress): bool =
|
||||
lc.isDeadAccount(eAddr),
|
||||
|
||||
isEmptyAccountFn: proc(eAddr: EthAddress): bool =
|
||||
lc.isEmptyAccount(eAddr),
|
||||
|
||||
isTopLevelCleanFn: proc(): bool =
|
||||
lc.isTopLevelClean(),
|
||||
|
||||
logEntriesFn: proc(): seq[Log] =
|
||||
lc.logEntries(),
|
||||
|
||||
makeMultiKeysFn: proc(): MultikeysRef =
|
||||
lc.makeMultiKeys(),
|
||||
|
||||
persistFn: proc(clearEmptyAccount: bool, clearCache: bool) =
|
||||
lc.persist(clearEmptyAccount, clearCache),
|
||||
|
||||
ripemdSpecialFn: proc() =
|
||||
lc.ripemdSpecial(),
|
||||
|
||||
rollbackFn: proc(sp: LedgerSpRef) =
|
||||
lc.rollback(sp.savePoint),
|
||||
|
||||
rootHashFn: proc(): Hash256 =
|
||||
lc.rootHash(),
|
||||
|
||||
safeDisposeFn: proc(sp: LedgerSpRef) =
|
||||
lc.safeDispose(sp.savePoint),
|
||||
|
||||
selfDestruct6780Fn: proc(eAddr: EthAddress) =
|
||||
lc.selfDestruct(eAddr),
|
||||
|
||||
selfDestructFn: proc(eAddr: EthAddress) =
|
||||
lc.selfDestruct6780(eAddr),
|
||||
|
||||
selfDestructLenFn: proc(): int =
|
||||
lc.selfDestructLen(),
|
||||
|
||||
setBalanceFn: proc(eAddr: EthAddress, balance: UInt256) =
|
||||
lc.setBalance(eAddr, balance),
|
||||
|
||||
setCodeFn: proc(eAddr: EthAddress, code: Blob) =
|
||||
lc.setCode(eAddr, code),
|
||||
|
||||
setNonceFn: proc(eAddr: EthAddress, nonce: AccountNonce) =
|
||||
lc.setNonce(eAddr, nonce),
|
||||
|
||||
setStorageFn: proc(eAddr: EthAddress, slot, val: UInt256) =
|
||||
lc.setStorage(eAddr, slot, val),
|
||||
|
||||
setTransientStorageFn: proc(eAddr: EthAddress, slot, val: UInt256) =
|
||||
lc.setTransientStorage(eAddr, slot, val),
|
||||
|
||||
subBalanceFn: proc(eAddr: EthAddress, delta: UInt256) =
|
||||
lc.subBalance(eAddr, delta))
|
||||
|
||||
proc ledgerExtras(lc: impl.AccountsLedgerRef): LedgerExtras =
|
||||
LedgerExtras(
|
||||
rawRootHashFn: proc(): Hash256 =
|
||||
lc.rawTrie.rootHash())
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
iterator accountsIt*(lc: wrp.AccountsLedgerRef): Account =
|
||||
for w in lc.ac.accounts():
|
||||
yield w
|
||||
|
||||
iterator addressesIt*(lc: wrp.AccountsLedgerRef): EthAddress =
|
||||
for w in lc.ac.addresses():
|
||||
yield w
|
||||
|
||||
iterator cachedStorageIt*(
|
||||
lc: wrp.AccountsLedgerRef;
|
||||
eAddr: EthAddress;
|
||||
): (UInt256,UInt256) =
|
||||
for w in lc.ac.cachedStorage(eAddr):
|
||||
yield w
|
||||
|
||||
iterator pairsIt*(lc: wrp.AccountsLedgerRef): (EthAddress,Account) =
|
||||
for w in lc.ac.pairs():
|
||||
yield w
|
||||
|
||||
iterator storageIt*(
|
||||
lc: wrp.AccountsLedgerRef;
|
||||
eAddr: EthAddress;
|
||||
): (UInt256,UInt256)
|
||||
{.gcsafe, raises: [CoreDbApiError].} =
|
||||
for w in lc.ac.storage(eAddr):
|
||||
yield w
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type wrp.AccountsLedgerRef;
|
||||
db: CoreDbRef;
|
||||
root: Hash256;
|
||||
pruneTrie: bool): LedgerRef =
|
||||
let lc = impl.AccountsLedgerRef.init(db, root, pruneTrie)
|
||||
result = T(
|
||||
ldgType: LedgerCache,
|
||||
ac: lc,
|
||||
extras: lc.ledgerExtras(),
|
||||
methods: lc.ledgerMethods())
|
||||
result.validate
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
12
nimbus/db/ledger/backend/accounts_ledger_desc.nim
Normal file
12
nimbus/db/ledger/backend/accounts_ledger_desc.nim
Normal file
@ -0,0 +1,12 @@
|
||||
import
|
||||
../accounts_ledger as impl,
|
||||
../base/base_desc
|
||||
|
||||
type
|
||||
AccountsLedgerRef* = ref object of LedgerRef
|
||||
ac*: impl.AccountsLedgerRef
|
||||
|
||||
LedgerSavePoint* = ref object of LedgerSpRef
|
||||
sp*: impl.LedgerSavePoint
|
||||
|
||||
# End
|
213
nimbus/db/ledger/base.nim
Normal file
213
nimbus/db/ledger/base.nim
Normal file
@ -0,0 +1,213 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Unify different ledger management APIs.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/common,
|
||||
../../../stateless/multi_keys,
|
||||
./base/[base_desc, validate]
|
||||
|
||||
type
|
||||
ReadOnlyStateDB* = distinct LedgerRef
|
||||
|
||||
export
|
||||
LedgerType,
|
||||
LedgerRef,
|
||||
LedgerSpRef
|
||||
|
||||
when defined(release):
|
||||
const AutoValidateDescriptors = false
|
||||
else:
|
||||
const AutoValidateDescriptors = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor helper
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when AutoValidateDescriptors:
|
||||
proc validate*(ldg: LedgerRef) =
|
||||
validate.validate(ldg)
|
||||
else:
|
||||
template validate*(ldg: LedgerRef) =
|
||||
discard
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc accessList*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.accessListFn(eAddr)
|
||||
|
||||
proc accessList*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256) =
|
||||
ldg.methods.accessList2Fn(eAddr, slot)
|
||||
|
||||
proc accountExists*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
ldg.methods.accountExistsFn(eAddr)
|
||||
|
||||
proc addBalance*(ldg: LedgerRef, eAddr: EthAddress, delta: UInt256) =
|
||||
ldg.methods.addBalanceFn(eAddr, delta)
|
||||
|
||||
proc addLogEntry*(ldg: LedgerRef, log: Log) =
|
||||
ldg.methods.addLogEntryFn(log)
|
||||
|
||||
proc beginSavepoint*(ldg: LedgerRef): LedgerSpRef =
|
||||
ldg.methods.beginSavepointFn()
|
||||
|
||||
proc clearStorage*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.clearStorageFn(eAddr)
|
||||
|
||||
proc clearTransientStorage*(ldg: LedgerRef) =
|
||||
ldg.methods.clearTransientStorageFn()
|
||||
|
||||
proc collectWitnessData*(ldg: LedgerRef) =
|
||||
ldg.methods.collectWitnessDataFn()
|
||||
|
||||
proc commit*(ldg: LedgerRef, sp: LedgerSpRef) =
|
||||
ldg.methods.commitFn(sp)
|
||||
|
||||
proc deleteAccount*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.deleteAccountFn(eAddr)
|
||||
|
||||
proc dispose*(ldg: LedgerRef, sp: LedgerSpRef) =
|
||||
ldg.methods.disposeFn(sp)
|
||||
|
||||
proc getAndClearLogEntries*(ldg: LedgerRef): seq[Log] =
|
||||
ldg.methods.getAndClearLogEntriesFn()
|
||||
|
||||
proc getBalance*(ldg: LedgerRef, eAddr: EthAddress): UInt256 =
|
||||
ldg.methods.getBalanceFn(eAddr)
|
||||
|
||||
proc getCode*(ldg: LedgerRef, eAddr: EthAddress): Blob =
|
||||
ldg.methods.getCodeFn(eAddr)
|
||||
|
||||
proc getCodeHash*(ldg: LedgerRef, eAddr: EthAddress): Hash256 =
|
||||
ldg.methods.getCodeHashFn(eAddr)
|
||||
|
||||
proc getCodeSize*(ldg: LedgerRef, eAddr: EthAddress): int =
|
||||
ldg.methods.getCodeSizeFn(eAddr)
|
||||
|
||||
proc getCommittedStorage*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
ldg.methods.getCommittedStorageFn(eAddr, slot)
|
||||
|
||||
proc getNonce*(ldg: LedgerRef, eAddr: EthAddress): AccountNonce =
|
||||
ldg.methods.getNonceFn(eAddr)
|
||||
|
||||
proc getStorage*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
ldg.methods.getStorageFn(eAddr, slot)
|
||||
|
||||
proc getStorageRoot*(ldg: LedgerRef, eAddr: EthAddress): Hash256 =
|
||||
ldg.methods.getStorageRootFn(eAddr)
|
||||
|
||||
proc getTransientStorage*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256): UInt256 =
|
||||
ldg.methods.getTransientStorageFn(eAddr, slot)
|
||||
|
||||
proc hasCodeOrNonce*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
ldg.methods.hasCodeOrNonceFn(eAddr)
|
||||
|
||||
proc inAccessList*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
ldg.methods.inAccessListFn(eAddr)
|
||||
|
||||
proc inAccessList*(ldg: LedgerRef, eAddr: EthAddress, slot: UInt256): bool =
|
||||
ldg.methods.inAccessList2Fn(eAddr, slot)
|
||||
|
||||
proc incNonce*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.incNonceFn(eAddr)
|
||||
|
||||
proc isDeadAccount*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
ldg.methods.isDeadAccountFn(eAddr)
|
||||
|
||||
proc isEmptyAccount*(ldg: LedgerRef, eAddr: EthAddress): bool =
|
||||
ldg.methods.isEmptyAccountFn(eAddr)
|
||||
|
||||
proc isTopLevelClean*(ldg: LedgerRef): bool =
|
||||
ldg.methods.isTopLevelCleanFn()
|
||||
|
||||
proc logEntries*(ldg: LedgerRef): seq[Log] =
|
||||
ldg.methods.logEntriesFn()
|
||||
|
||||
proc makeMultiKeys*(ldg: LedgerRef): MultikeysRef =
|
||||
ldg.methods.makeMultiKeysFn()
|
||||
|
||||
proc persist*(ldg: LedgerRef, clearEmptyAccount = false, clearCache = true) =
|
||||
ldg.methods.persistFn(clearEmptyAccount, clearCache)
|
||||
|
||||
proc ripemdSpecial*(ldg: LedgerRef) =
|
||||
ldg.methods.ripemdSpecialFn()
|
||||
|
||||
proc rollback*(ldg: LedgerRef, sp: LedgerSpRef) =
|
||||
ldg.methods.rollbackFn(sp)
|
||||
|
||||
proc rootHash*(ldg: LedgerRef): Hash256 =
|
||||
ldg.methods.rootHashFn()
|
||||
|
||||
proc safeDispose*(ldg: LedgerRef, sp: LedgerSpRef) =
|
||||
ldg.methods.safeDisposeFn(sp)
|
||||
|
||||
proc selfDestruct*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.selfDestructFn(eAddr)
|
||||
|
||||
proc selfDestruct6780*(ldg: LedgerRef, eAddr: EthAddress) =
|
||||
ldg.methods.selfDestruct6780Fn(eAddr)
|
||||
|
||||
proc selfDestructLen*(ldg: LedgerRef): int =
|
||||
ldg.methods.selfDestructLenFn()
|
||||
|
||||
proc setBalance*(ldg: LedgerRef, eAddr: EthAddress, balance: UInt256) =
|
||||
ldg.methods.setBalanceFn(eAddr, balance)
|
||||
|
||||
proc setCode*(ldg: LedgerRef, eAddr: EthAddress, code: Blob) =
|
||||
ldg.methods.setCodeFn(eAddr, code)
|
||||
|
||||
proc setNonce*(ldg: LedgerRef, eAddr: EthAddress, nonce: AccountNonce) =
|
||||
ldg.methods.setNonceFn(eAddr, nonce)
|
||||
|
||||
proc setStorage*(ldg: LedgerRef, eAddr: EthAddress, slot, val: UInt256) =
|
||||
ldg.methods.setStorageFn(eAddr, slot, val)
|
||||
|
||||
proc setTransientStorage*(ldg: LedgerRef, eAddr: EthAddress, slot, val: UInt256) =
|
||||
ldg.methods.setTransientStorageFn(eAddr, slot, val)
|
||||
|
||||
proc subBalance*(ldg: LedgerRef, eAddr: EthAddress, delta: UInt256) =
|
||||
ldg.methods.subBalanceFn(eAddr, delta)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public methods, extensions to go away
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rawRootHash*(ldg: LedgerRef): Hash256 =
|
||||
ldg.extras.rawRootHashFn()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public virtual read-only methods
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rootHash*(db: ReadOnlyStateDB): KeccakHash {.borrow.}
|
||||
proc getCodeHash*(db: ReadOnlyStateDB, eAddr: EthAddress): Hash256 {.borrow.}
|
||||
proc getStorageRoot*(db: ReadOnlyStateDB, eAddr: EthAddress): Hash256 {.borrow.}
|
||||
proc getBalance*(db: ReadOnlyStateDB, eAddr: EthAddress): UInt256 {.borrow.}
|
||||
proc getStorage*(db: ReadOnlyStateDB, eAddr: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
proc getNonce*(db: ReadOnlyStateDB, eAddr: EthAddress): AccountNonce {.borrow.}
|
||||
proc getCode*(db: ReadOnlyStateDB, eAddr: EthAddress): seq[byte] {.borrow.}
|
||||
proc getCodeSize*(db: ReadOnlyStateDB, eAddr: EthAddress): int {.borrow.}
|
||||
proc hasCodeOrNonce*(db: ReadOnlyStateDB, eAddr: EthAddress): bool {.borrow.}
|
||||
proc accountExists*(db: ReadOnlyStateDB, eAddr: EthAddress): bool {.borrow.}
|
||||
proc isDeadAccount*(db: ReadOnlyStateDB, eAddr: EthAddress): bool {.borrow.}
|
||||
proc isEmptyAccount*(db: ReadOnlyStateDB, eAddr: EthAddress): bool {.borrow.}
|
||||
proc getCommittedStorage*(db: ReadOnlyStateDB, eAddr: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
func inAccessList*(db: ReadOnlyStateDB, eAddr: EthAddress): bool {.borrow.}
|
||||
func inAccessList*(db: ReadOnlyStateDB, eAddr: EthAddress, slot: UInt256): bool {.borrow.}
|
||||
func getTransientStorage*(db: ReadOnlyStateDB, eAddr: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
136
nimbus/db/ledger/base/base_desc.nim
Normal file
136
nimbus/db/ledger/base/base_desc.nim
Normal file
@ -0,0 +1,136 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/common,
|
||||
../../../../stateless/multi_keys
|
||||
|
||||
# Annotation helpers
|
||||
{.pragma: noRaise, gcsafe, raises: [].}
|
||||
|
||||
type
|
||||
LedgerType* = enum
|
||||
Ooops = 0
|
||||
LegacyAccountsCache,
|
||||
LedgerCache
|
||||
|
||||
LedgerSpRef* = ref object of RootRef
|
||||
## Object for check point or save point
|
||||
|
||||
LedgerRef* = ref object of RootRef
|
||||
## Root object with closures
|
||||
ldgType*: LedgerType ## For debugging
|
||||
extras*: LedgerExtras ## Support might go away
|
||||
methods*: LedgerFns
|
||||
|
||||
RawRootHashFn* = proc(): Hash256 {.noRaise.}
|
||||
|
||||
LedgerExtras* = object
|
||||
rawRootHashFn*: RawRootHashFn
|
||||
|
||||
AccessListFn* = proc(eAddr: EthAddress) {.noRaise.}
|
||||
AccessList2Fn* = proc(eAddr: EthAddress, slot: UInt256) {.noRaise.}
|
||||
AccountExistsFn* = proc(eAddr: EthAddress): bool {.noRaise.}
|
||||
AddBalanceFn* = proc(eAddr: EthAddress, delta: UInt256) {.noRaise.}
|
||||
AddLogEntryFn* = proc(log: Log) {.noRaise.}
|
||||
BeginSavepointFn* = proc(): LedgerSpRef {.noRaise.}
|
||||
ClearStorageFn* = proc(eAddr: EthAddress) {.noRaise.}
|
||||
ClearTransientStorageFn* = proc() {.noRaise.}
|
||||
CollectWitnessDataFn* = proc() {.noRaise.}
|
||||
CommitFn* = proc(sp: LedgerSpRef) {.noRaise.}
|
||||
DeleteAccountFn* = proc(eAddr: EthAddress) {.noRaise.}
|
||||
DisposeFn* = proc(sp: LedgerSpRef) {.noRaise.}
|
||||
GetAndClearLogEntriesFn* = proc(): seq[Log] {.noRaise.}
|
||||
GetBalanceFn* = proc(eAddr: EthAddress): UInt256 {.noRaise.}
|
||||
GetCodeFn* = proc(eAddr: EthAddress): Blob {.noRaise.}
|
||||
GetCodeHashFn* = proc(eAddr: EthAddress): Hash256 {.noRaise.}
|
||||
GetCodeSizeFn* = proc(eAddr: EthAddress): int {.noRaise.}
|
||||
GetCommittedStorageFn* =
|
||||
proc(eAddr: EthAddress, slot: UInt256): UInt256 {.noRaise.}
|
||||
GetNonceFn* = proc(eAddr: EthAddress): AccountNonce {.noRaise.}
|
||||
GetStorageFn* = proc(eAddr: EthAddress, slot: UInt256): UInt256 {.noRaise.}
|
||||
GetStorageRootFn* = proc(eAddr: EthAddress): Hash256 {.noRaise.}
|
||||
GetTransientStorageFn* =
|
||||
proc(eAddr: EthAddress, slot: UInt256): UInt256 {.noRaise.}
|
||||
HasCodeOrNonceFn* = proc(eAddr: EthAddress): bool {.noRaise.}
|
||||
InAccessListFn* = proc(eAddr: EthAddress): bool {.noRaise.}
|
||||
InAccessList2Fn* = proc(eAddr: EthAddress, slot: UInt256): bool {.noRaise.}
|
||||
IncNonceFn* = proc(eAddr: EthAddress) {.noRaise.}
|
||||
IsDeadAccountFn* = proc(eAddr: EthAddress): bool {.noRaise.}
|
||||
IsEmptyAccountFn* = proc(eAddr: EthAddress): bool {.noRaise.}
|
||||
IsTopLevelCleanFn* = proc(): bool {.noRaise.}
|
||||
LogEntriesFn* = proc(): seq[Log] {.noRaise.}
|
||||
MakeMultiKeysFn* = proc(): MultikeysRef {.noRaise.}
|
||||
PersistFn* = proc(clearEmptyAccount: bool, clearCache: bool) {.noRaise.}
|
||||
RipemdSpecialFn* = proc() {.noRaise.}
|
||||
RollbackFn* = proc(sp: LedgerSpRef) {.noRaise.}
|
||||
RootHashFn* = proc(): Hash256 {.noRaise.}
|
||||
SafeDisposeFn* = proc(sp: LedgerSpRef) {.noRaise.}
|
||||
SelfDestructFn* = proc(eAddr: EthAddress) {.noRaise.}
|
||||
SelfDestruct6780Fn* = proc(eAddr: EthAddress) {.noRaise.}
|
||||
SelfDestructLenFn* = proc(): int {.noRaise.}
|
||||
SetBalanceFn* = proc(eAddr: EthAddress, balance: UInt256) {.noRaise.}
|
||||
SetCodeFn* = proc(eAddr: EthAddress, code: Blob) {.noRaise.}
|
||||
SetNonceFn* = proc(eAddr: EthAddress, nonce: AccountNonce) {.noRaise.}
|
||||
SetStorageFn* = proc(eAddr: EthAddress, slot, value: UInt256) {.noRaise.}
|
||||
SetTransientStorageFn* =
|
||||
proc(eAddr: EthAddress, slot, val: UInt256) {.noRaise.}
|
||||
SubBalanceFn* = proc(eAddr: EthAddress, delta: UInt256) {.noRaise.}
|
||||
|
||||
LedgerFns* = object
|
||||
accessListFn*: AccessListFn
|
||||
accessList2Fn*: AccessList2Fn
|
||||
accountExistsFn*: AccountExistsFn
|
||||
addBalanceFn*: AddBalanceFn
|
||||
addLogEntryFn*: AddLogEntryFn
|
||||
beginSavepointFn*: BeginSavepointFn
|
||||
clearStorageFn*: ClearStorageFn
|
||||
clearTransientStorageFn*: ClearTransientStorageFn
|
||||
collectWitnessDataFn*: CollectWitnessDataFn
|
||||
commitFn*: CommitFn
|
||||
deleteAccountFn*: DeleteAccountFn
|
||||
disposeFn*: DisposeFn
|
||||
getAndClearLogEntriesFn*: GetAndClearLogEntriesFn
|
||||
getBalanceFn*: GetBalanceFn
|
||||
getCodeFn*: GetCodeFn
|
||||
getCodeHashFn*: GetCodeHashFn
|
||||
getCodeSizeFn*: GetCodeSizeFn
|
||||
getCommittedStorageFn*: GetCommittedStorageFn
|
||||
getNonceFn*: GetNonceFn
|
||||
getStorageFn*: GetStorageFn
|
||||
getStorageRootFn*: GetStorageRootFn
|
||||
getTransientStorageFn*: GetTransientStorageFn
|
||||
hasCodeOrNonceFn*: HasCodeOrNonceFn
|
||||
inAccessListFn*: InAccessListFn
|
||||
inAccessList2Fn*: InAccessList2Fn
|
||||
incNonceFn*: IncNonceFn
|
||||
isDeadAccountFn*: IsDeadAccountFn
|
||||
isEmptyAccountFn*: IsEmptyAccountFn
|
||||
isTopLevelCleanFn*: IsTopLevelCleanFn
|
||||
logEntriesFn*: LogEntriesFn
|
||||
makeMultiKeysFn*: MakeMultiKeysFn
|
||||
persistFn*: PersistFn
|
||||
ripemdSpecialFn*: RipemdSpecialFn
|
||||
rollbackFn*: RollbackFn
|
||||
rootHashFn*: RootHashFn
|
||||
safeDisposeFn*: SafeDisposeFn
|
||||
selfDestruct6780Fn*: SelfDestruct6780Fn
|
||||
selfDestructFn*: SelfDestructFn
|
||||
selfDestructLenFn*: SelfDestructLenFn
|
||||
setBalanceFn*: SetBalanceFn
|
||||
setCodeFn*: SetCodeFn
|
||||
setNonceFn*: SetNonceFn
|
||||
setStorageFn*: SetStorageFn
|
||||
setTransientStorageFn*: SetTransientStorageFn
|
||||
subBalanceFn*: SubBalanceFn
|
||||
|
||||
# End
|
67
nimbus/db/ledger/base/validate.nim
Normal file
67
nimbus/db/ledger/base/validate.nim
Normal file
@ -0,0 +1,67 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
./base_desc
|
||||
|
||||
proc validate*(ldg: LedgerRef) =
|
||||
doAssert ldg.ldgType != LedgerType(0)
|
||||
|
||||
doAssert not ldg.extras.rawRootHashFn.isNil
|
||||
|
||||
doAssert not ldg.methods.accessListFn.isNil
|
||||
doAssert not ldg.methods.accessList2Fn.isNil
|
||||
doAssert not ldg.methods.accountExistsFn.isNil
|
||||
doAssert not ldg.methods.addBalanceFn.isNil
|
||||
doAssert not ldg.methods.addLogEntryFn.isNil
|
||||
doAssert not ldg.methods.beginSavepointFn.isNil
|
||||
doAssert not ldg.methods.clearStorageFn.isNil
|
||||
doAssert not ldg.methods.clearTransientStorageFn.isNil
|
||||
doAssert not ldg.methods.collectWitnessDataFn.isNil
|
||||
doAssert not ldg.methods.commitFn.isNil
|
||||
doAssert not ldg.methods.deleteAccountFn.isNil
|
||||
doAssert not ldg.methods.disposeFn.isNil
|
||||
doAssert not ldg.methods.getAndClearLogEntriesFn.isNil
|
||||
doAssert not ldg.methods.getBalanceFn.isNil
|
||||
doAssert not ldg.methods.getCodeFn.isNil
|
||||
doAssert not ldg.methods.getCodeHashFn.isNil
|
||||
doAssert not ldg.methods.getCodeSizeFn.isNil
|
||||
doAssert not ldg.methods.getCommittedStorageFn.isNil
|
||||
doAssert not ldg.methods.getNonceFn.isNil
|
||||
doAssert not ldg.methods.getStorageFn.isNil
|
||||
doAssert not ldg.methods.getStorageRootFn.isNil
|
||||
doAssert not ldg.methods.getTransientStorageFn.isNil
|
||||
doAssert not ldg.methods.hasCodeOrNonceFn.isNil
|
||||
doAssert not ldg.methods.inAccessListFn.isNil
|
||||
doAssert not ldg.methods.inAccessList2Fn.isNil
|
||||
doAssert not ldg.methods.incNonceFn.isNil
|
||||
doAssert not ldg.methods.isDeadAccountFn.isNil
|
||||
doAssert not ldg.methods.isEmptyAccountFn.isNil
|
||||
doAssert not ldg.methods.isTopLevelCleanFn.isNil
|
||||
doAssert not ldg.methods.logEntriesFn.isNil
|
||||
doAssert not ldg.methods.makeMultiKeysFn.isNil
|
||||
doAssert not ldg.methods.persistFn.isNil
|
||||
doAssert not ldg.methods.ripemdSpecialFn.isNil
|
||||
doAssert not ldg.methods.rollbackFn.isNil
|
||||
doAssert not ldg.methods.rootHashFn.isNil
|
||||
doAssert not ldg.methods.safeDisposeFn.isNil
|
||||
doAssert not ldg.methods.selfDestruct6780Fn.isNil
|
||||
doAssert not ldg.methods.selfDestructFn.isNil
|
||||
doAssert not ldg.methods.selfDestructLenFn.isNil
|
||||
doAssert not ldg.methods.setBalanceFn.isNil
|
||||
doAssert not ldg.methods.setCodeFn.isNil
|
||||
doAssert not ldg.methods.setNonceFn.isNil
|
||||
doAssert not ldg.methods.setStorageFn.isNil
|
||||
doAssert not ldg.methods.setTransientStorageFn.isNil
|
||||
doAssert not ldg.methods.subBalanceFn.isNil
|
||||
|
||||
# End
|
93
nimbus/db/ledger/distinct_ledgers.nim
Normal file
93
nimbus/db/ledger/distinct_ledgers.nim
Normal file
@ -0,0 +1,93 @@
|
||||
# The point of this file is just to give a little more type-safety
|
||||
# and clarity to our use of SecureHexaryTrie, by having distinct
|
||||
# types for the big trie containing all the accounts and the little
|
||||
# tries containing the storage for an individual account.
|
||||
#
|
||||
# It's nice to have all the accesses go through "getAccountBytes"
|
||||
# rather than just "get" (which is hard to search for). Plus we
|
||||
# may want to put in assertions to make sure that the nodes for
|
||||
# the account are all present (in stateless mode), etc.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
## Re-write of `distinct_tries.nim` to be imported into `accounts_cache.nim`
|
||||
## for using new database API.
|
||||
##
|
||||
|
||||
import
|
||||
std/typetraits,
|
||||
eth/common,
|
||||
results,
|
||||
../core_db
|
||||
|
||||
type
|
||||
AccountLedger* = distinct CoreDxAccRef
|
||||
StorageLedger* = distinct CoreDxPhkRef
|
||||
SomeLedger* = AccountLedger | StorageLedger
|
||||
|
||||
|
||||
proc rootHash*(t: SomeLedger): Hash256 =
|
||||
t.distinctBase.rootVid().hash().expect "SomeLedger/rootHash()"
|
||||
|
||||
proc rootVid*(t: SomeLedger): CoreDbVidRef =
|
||||
t.distinctBase.rootVid
|
||||
|
||||
|
||||
proc init*(
|
||||
T: type AccountLedger;
|
||||
db: CoreDbRef;
|
||||
rootHash: Hash256;
|
||||
isPruning = true;
|
||||
): T =
|
||||
let vid = db.getRoot(rootHash).expect "AccountLedger/getRoot()"
|
||||
db.newAccMpt(vid, isPruning).T
|
||||
|
||||
proc init*(
|
||||
T: type AccountLedger;
|
||||
db: CoreDbRef;
|
||||
isPruning = true;
|
||||
): T =
|
||||
db.newAccMpt(CoreDbVidRef(nil), isPruning).AccountLedger
|
||||
|
||||
proc fetch*(al: AccountLedger; eAddr: EthAddress): Result[CoreDbAccount,void] =
|
||||
## Using `fetch()` for trie data retrieval
|
||||
al.distinctBase.fetch(eAddr).mapErr(proc(ign: CoreDbErrorRef) = discard)
|
||||
|
||||
proc merge*(al: AccountLedger; eAddr: EthAddress; account: CoreDbAccount) =
|
||||
## Using `merge()` for trie data storage
|
||||
al.distinctBase.merge(eAddr, account).expect "AccountLedger/merge()"
|
||||
|
||||
proc delete*(al: AccountLedger, eAddr: EthAddress) =
|
||||
al.distinctBase.delete(eAddr).expect "AccountLedger/delete()"
|
||||
|
||||
|
||||
proc init*(
|
||||
T: type StorageLedger;
|
||||
al: AccountLedger;
|
||||
account: CoreDbAccount;
|
||||
isPruning = true;
|
||||
): T =
|
||||
al.distinctBase.parent.newMpt(account.storageVid, isPruning).toPhk.T
|
||||
|
||||
proc init*(T: type StorageLedger; db: CoreDbRef, isPruning = true): T =
|
||||
db.newMpt(CoreDbVidRef(nil), isPruning).toPhk.T
|
||||
|
||||
proc fetch*(sl: StorageLedger, slot: UInt256): Result[Blob,void] =
|
||||
sl.distinctBase.fetch(slot.toBytesBE).mapErr proc(ign: CoreDbErrorRef)=discard
|
||||
|
||||
proc merge*(sl: StorageLedger, slot: UInt256, value: openArray[byte]) =
|
||||
sl.distinctBase.merge(slot.toBytesBE, value).expect "StorageLedger/merge()"
|
||||
|
||||
proc delete*(sl: StorageLedger, slot: UInt256) =
|
||||
sl.distinctBase.delete(slot.toBytesBE).expect "StorageLedger/delete()"
|
||||
|
||||
iterator storage*(
|
||||
al: AccountLedger;
|
||||
account: CoreDbAccount;
|
||||
): (Blob,Blob)
|
||||
{.gcsafe, raises: [CoreDbApiError].} =
|
||||
## For given account, iterate over storage slots
|
||||
for (key,val) in al.distinctBase.parent.newMpt(account.storageVid).pairs:
|
||||
yield (key,val)
|
||||
|
||||
# End
|
@ -141,17 +141,17 @@ proc coreDbMain*(noisy = defined(debug)) =
|
||||
when isMainModule:
|
||||
const
|
||||
noisy = defined(debug) or true
|
||||
persDb = true
|
||||
persDb = true and false
|
||||
|
||||
setErrorLevel()
|
||||
|
||||
# This one uses the readily available dump: `bulkTest0` and some huge replay
|
||||
# dumps `bulkTest2`, `bulkTest3`, .. from the `nimbus-eth1-blobs` package.
|
||||
# For specs see `tests/test_coredb/bulk_test_xx.nim`.
|
||||
var testList = @[bulkTest0]
|
||||
testList = @[bulkTest1] # This test supersedes `bulkTest0`
|
||||
when true and false:
|
||||
testList = @[bulkTest1, bulkTest2, bulkTest3]
|
||||
var testList = @[bulkTest0] # This test is superseded by `bulkTest1` and `2`
|
||||
# testList = @[failSample0]
|
||||
when true: # and false:
|
||||
testList = @[bulkTest2, bulkTest3]
|
||||
|
||||
for n,capture in testList:
|
||||
noisy.legacyRunner(capture=capture, persistent=persDb)
|
||||
|
@ -44,4 +44,16 @@ const
|
||||
file: "mainnet332160.txt.gz",
|
||||
numBlocks: high(int))
|
||||
|
||||
failSample0* = CaptureSpecs(
|
||||
name: "fail-goerli",
|
||||
network: bulkTest0.network,
|
||||
file: bulkTest0.file,
|
||||
numBlocks: 18004)
|
||||
|
||||
failSample1* = CaptureSpecs(
|
||||
name: "fail-main",
|
||||
network: bulkTest3.network,
|
||||
file: bulkTest3.file,
|
||||
numBlocks: 51922)
|
||||
|
||||
# End
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
import
|
||||
std/strformat,
|
||||
chronicles,
|
||||
eth/common,
|
||||
results,
|
||||
unittest2,
|
||||
@ -22,6 +23,16 @@ import
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc setTraceLevel {.used.} =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
setLogLevel(LogLevel.TRACE)
|
||||
|
||||
proc setErrorLevel {.used.} =
|
||||
discard
|
||||
when defined(chronicles_runtime_filtering) and loggingEnabled:
|
||||
setLogLevel(LogLevel.ERROR)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public test function
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -30,12 +41,14 @@ proc test_chainSyncLegacyApi*(
|
||||
noisy: bool;
|
||||
filePath: string;
|
||||
com: CommonRef;
|
||||
numBlocks: int;
|
||||
numBlocks = high(int);
|
||||
lastOneExtra = true
|
||||
): bool =
|
||||
## Store persistent blocks from dump into chain DB
|
||||
let
|
||||
sayBlocks = 900.u256
|
||||
chain = com.newChain
|
||||
lastBlock = max(1, numBlocks - 1).toBlockNumber
|
||||
|
||||
for w in filePath.undumpBlocks:
|
||||
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
|
||||
@ -43,13 +56,52 @@ proc test_chainSyncLegacyApi*(
|
||||
xCheck w[0][0] == com.db.getBlockHeader(0.u256)
|
||||
continue
|
||||
|
||||
# Message if [fromBlock,toBlock] contains a multiple of `sayBlocks`
|
||||
if fromBlock + (toBlock mod sayBlocks) <= toBlock:
|
||||
noisy.say "***", &"processing ...[#{fromBlock},#{toBlock}]..."
|
||||
if toBlock < lastBlock:
|
||||
# Message if `[fromBlock,toBlock]` contains a multiple of `sayBlocks`
|
||||
if fromBlock + (toBlock mod sayBlocks) <= toBlock:
|
||||
noisy.say "***", &"processing ...[#{fromBlock},#{toBlock}]..."
|
||||
let runPersistBlocksRc = chain.persistBlocks(w[0], w[1])
|
||||
xCheck runPersistBlocksRc == ValidationResult.OK:
|
||||
if noisy:
|
||||
# Re-run with logging enabled
|
||||
setTraceLevel()
|
||||
discard chain.persistBlocks(w[0], w[1])
|
||||
continue
|
||||
|
||||
xCheck chain.persistBlocks(w[0], w[1]) == ValidationResult.OK
|
||||
if numBlocks.toBlockNumber <= w[0][^1].blockNumber:
|
||||
break
|
||||
# Make sure that the `lastBlock` is the first item of the argument batch.
|
||||
# So It might be necessary to Split off all blocks smaller than `lastBlock`
|
||||
# and execute them first. Then the next batch starts with the `lastBlock`.
|
||||
let
|
||||
pivot = (lastBlock - fromBlock).truncate(uint)
|
||||
headers9 = w[0][pivot .. ^1]
|
||||
bodies9 = w[1][pivot .. ^1]
|
||||
doAssert lastBlock == headers9[0].blockNumber
|
||||
|
||||
# Process leading betch before `lastBlock` (if any)
|
||||
var dotsOrSpace = "..."
|
||||
if fromBlock < lastBlock:
|
||||
let
|
||||
headers1 = w[0][0 ..< pivot]
|
||||
bodies1 = w[1][0 ..< pivot]
|
||||
noisy.say "***", &"processing {dotsOrSpace}[#{fromBlock},#{lastBlock-1}]"
|
||||
let runPersistBlocks1Rc = chain.persistBlocks(headers1, bodies1)
|
||||
xCheck runPersistBlocks1Rc == ValidationResult.OK
|
||||
dotsOrSpace = " "
|
||||
|
||||
if noisy: setTraceLevel()
|
||||
if lastOneExtra:
|
||||
let
|
||||
headers0 = headers9[0..0]
|
||||
bodies0 = bodies9[0..0]
|
||||
noisy.say "***", &"processing {dotsOrSpace}[#{lastBlock},#{lastBlock}]"
|
||||
let runPersistBlocks0Rc = chain.persistBlocks(headers0, bodies0)
|
||||
xCheck runPersistBlocks0Rc == ValidationResult.OK
|
||||
else:
|
||||
noisy.say "***", &"processing {dotsOrSpace}[#{lastBlock},#{toBlock}]"
|
||||
let runPersistBlocks9Rc = chain.persistBlocks(headers9, bodies9)
|
||||
xCheck runPersistBlocks9Rc == ValidationResult.OK
|
||||
|
||||
break
|
||||
|
||||
true
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user