2019-12-20 17:22:01 +00:00
|
|
|
import
|
2023-03-17 13:20:52 +00:00
|
|
|
std/[tables, hashes, sets],
|
2023-04-22 14:17:37 +00:00
|
|
|
chronos,
|
|
|
|
eth/[common, rlp], eth/trie/[hexary, db, trie_defs, nibbles],
|
|
|
|
../utils/functors/possible_futures,
|
2022-12-02 04:39:12 +00:00
|
|
|
../constants, ../utils/utils, storage_types,
|
2020-12-09 05:24:37 +00:00
|
|
|
../../stateless/multi_keys,
|
2023-04-22 14:17:37 +00:00
|
|
|
../evm/async/speculex,
|
2023-03-10 18:42:37 +00:00
|
|
|
./distinct_tries,
|
2022-04-08 04:54:11 +00:00
|
|
|
./access_list as ac_access_list
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
const
|
|
|
|
debugAccountsCache = false
|
|
|
|
|
2019-12-20 17:22:01 +00:00
|
|
|
type
|
|
|
|
AccountFlag = enum
|
2023-03-17 13:20:52 +00:00
|
|
|
Alive
|
2019-12-20 17:22:01 +00:00
|
|
|
IsNew
|
2023-03-17 13:20:52 +00:00
|
|
|
Dirty
|
2023-03-20 11:51:09 +00:00
|
|
|
Touched
|
2019-12-20 17:22:01 +00:00
|
|
|
CodeLoaded
|
|
|
|
CodeChanged
|
|
|
|
StorageChanged
|
|
|
|
|
|
|
|
AccountFlags = set[AccountFlag]
|
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
StorageCell* = SpeculativeExecutionCell[UInt256]
|
|
|
|
|
2019-12-20 17:22:01 +00:00
|
|
|
RefAccount = ref object
|
|
|
|
account: Account
|
|
|
|
flags: AccountFlags
|
2020-04-20 18:12:44 +00:00
|
|
|
code: seq[byte]
|
2019-12-20 17:22:01 +00:00
|
|
|
originalStorage: TableRef[UInt256, UInt256]
|
2023-04-22 14:17:37 +00:00
|
|
|
overlayStorage: Table[UInt256, StorageCell]
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2020-06-03 13:50:13 +00:00
|
|
|
WitnessData* = object
|
|
|
|
storageKeys*: HashSet[UInt256]
|
|
|
|
codeTouched*: bool
|
|
|
|
|
2020-05-30 03:14:59 +00:00
|
|
|
AccountsCache* = ref object
|
2019-12-20 17:22:01 +00:00
|
|
|
db: TrieDatabaseRef
|
2023-03-10 18:42:37 +00:00
|
|
|
trie: AccountsTrie
|
2019-12-20 17:22:01 +00:00
|
|
|
savePoint: SavePoint
|
2020-06-03 13:50:13 +00:00
|
|
|
witnessCache: Table[EthAddress, WitnessData]
|
2020-06-15 06:28:08 +00:00
|
|
|
isDirty: bool
|
2023-03-20 11:51:09 +00:00
|
|
|
ripemdSpecial: bool
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2020-05-30 03:14:59 +00:00
|
|
|
ReadOnlyStateDB* = distinct AccountsCache
|
|
|
|
|
2019-12-20 17:22:01 +00:00
|
|
|
TransactionState = enum
|
|
|
|
Pending
|
|
|
|
Committed
|
|
|
|
RolledBack
|
|
|
|
|
|
|
|
SavePoint* = ref object
|
|
|
|
parentSavepoint: SavePoint
|
|
|
|
cache: Table[EthAddress, RefAccount]
|
2023-03-20 11:51:09 +00:00
|
|
|
selfDestruct: HashSet[EthAddress]
|
|
|
|
logEntries: seq[Log]
|
2022-04-08 04:54:11 +00:00
|
|
|
accessList: ac_access_list.AccessList
|
2019-12-20 17:22:01 +00:00
|
|
|
state: TransactionState
|
2023-03-20 11:51:09 +00:00
|
|
|
when debugAccountsCache:
|
|
|
|
depth: int
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2020-06-15 06:28:08 +00:00
|
|
|
const
|
|
|
|
emptyAcc = newAccount()
|
|
|
|
|
|
|
|
resetFlags = {
|
2023-03-17 13:20:52 +00:00
|
|
|
Dirty,
|
2020-06-15 06:28:08 +00:00
|
|
|
IsNew,
|
2023-03-20 11:51:09 +00:00
|
|
|
Touched,
|
2020-06-15 06:28:08 +00:00
|
|
|
CodeChanged,
|
|
|
|
StorageChanged
|
|
|
|
}
|
2020-02-26 06:20:51 +00:00
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
ripemdAddr* = block:
|
|
|
|
proc initAddress(x: int): EthAddress {.compileTime.} =
|
|
|
|
result[19] = x.byte
|
|
|
|
initAddress(3)
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
when debugAccountsCache:
|
|
|
|
import
|
|
|
|
stew/byteutils
|
|
|
|
|
|
|
|
proc inspectSavePoint(name: string, x: SavePoint) =
|
|
|
|
debugEcho "*** ", name, ": ", x.depth, " ***"
|
|
|
|
var sp = x
|
|
|
|
while sp != nil:
|
|
|
|
for address, acc in sp.cache:
|
|
|
|
debugEcho address.toHex, " ", acc.flags
|
|
|
|
sp = sp.parentSavepoint
|
|
|
|
|
|
|
|
proc beginSavepoint*(ac: var AccountsCache): SavePoint {.gcsafe.}
|
2023-03-10 22:16:42 +00:00
|
|
|
|
|
|
|
# FIXME-Adam: this is only necessary because of my sanity checks on the latest rootHash;
|
|
|
|
# take this out once those are gone.
|
|
|
|
proc rawTrie*(ac: AccountsCache): AccountsTrie = ac.trie
|
|
|
|
proc rawDb*(ac: AccountsCache): TrieDatabaseRef = ac.trie.db
|
|
|
|
|
2019-12-20 17:22:01 +00:00
|
|
|
# The AccountsCache is modeled after TrieDatabase for it's transaction style
|
|
|
|
proc init*(x: typedesc[AccountsCache], db: TrieDatabaseRef,
|
2020-04-29 05:00:44 +00:00
|
|
|
root: KeccakHash, pruneTrie: bool = true): AccountsCache =
|
2020-05-30 03:14:59 +00:00
|
|
|
new result
|
2019-12-20 17:22:01 +00:00
|
|
|
result.db = db
|
2023-03-10 18:42:37 +00:00
|
|
|
result.trie = initAccountsTrie(db, root, pruneTrie)
|
2020-06-03 13:50:13 +00:00
|
|
|
result.witnessCache = initTable[EthAddress, WitnessData]()
|
2020-01-07 12:49:42 +00:00
|
|
|
discard result.beginSavepoint
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2020-04-29 05:00:44 +00:00
|
|
|
proc init*(x: typedesc[AccountsCache], db: TrieDatabaseRef, pruneTrie: bool = true): AccountsCache =
|
|
|
|
init(x, db, emptyRlpHash, pruneTrie)
|
|
|
|
|
2020-02-26 06:20:51 +00:00
|
|
|
proc rootHash*(ac: AccountsCache): KeccakHash =
|
|
|
|
# make sure all savepoint already committed
|
2022-04-08 04:54:11 +00:00
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
2020-02-26 06:20:51 +00:00
|
|
|
# make sure all cache already committed
|
2020-06-15 06:28:08 +00:00
|
|
|
doAssert(ac.isDirty == false)
|
2020-02-26 06:20:51 +00:00
|
|
|
ac.trie.rootHash
|
|
|
|
|
2022-01-18 16:19:32 +00:00
|
|
|
proc isTopLevelClean*(ac: AccountsCache): bool =
|
|
|
|
## Getter, returns `true` if all pending data have been commited.
|
2022-04-08 04:54:11 +00:00
|
|
|
not ac.isDirty and ac.savePoint.parentSavepoint.isNil
|
2022-01-18 16:19:32 +00:00
|
|
|
|
2020-01-07 12:49:42 +00:00
|
|
|
proc beginSavepoint*(ac: var AccountsCache): SavePoint =
|
2019-12-20 17:22:01 +00:00
|
|
|
new result
|
|
|
|
result.cache = initTable[EthAddress, RefAccount]()
|
2020-12-09 05:24:37 +00:00
|
|
|
result.accessList.init()
|
2019-12-20 17:22:01 +00:00
|
|
|
result.state = Pending
|
|
|
|
result.parentSavepoint = ac.savePoint
|
|
|
|
ac.savePoint = result
|
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
when debugAccountsCache:
|
|
|
|
if not result.parentSavePoint.isNil:
|
|
|
|
result.depth = result.parentSavePoint.depth + 1
|
|
|
|
inspectSavePoint("snapshot", result)
|
|
|
|
|
2022-04-08 04:54:11 +00:00
|
|
|
proc rollback*(ac: var AccountsCache, sp: SavePoint) =
|
2019-12-20 17:22:01 +00:00
|
|
|
# Transactions should be handled in a strictly nested fashion.
|
|
|
|
# Any child transaction must be committed or rolled-back before
|
|
|
|
# its parent transactions:
|
2020-01-07 12:49:42 +00:00
|
|
|
doAssert ac.savePoint == sp and sp.state == Pending
|
|
|
|
ac.savePoint = sp.parentSavepoint
|
2019-12-20 17:22:01 +00:00
|
|
|
sp.state = RolledBack
|
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
when debugAccountsCache:
|
|
|
|
inspectSavePoint("rollback", ac.savePoint)
|
|
|
|
|
2022-04-08 04:54:11 +00:00
|
|
|
proc commit*(ac: var AccountsCache, sp: SavePoint) =
|
2019-12-20 17:22:01 +00:00
|
|
|
# Transactions should be handled in a strictly nested fashion.
|
|
|
|
# Any child transaction must be committed or rolled-back before
|
|
|
|
# its parent transactions:
|
2020-01-07 12:49:42 +00:00
|
|
|
doAssert ac.savePoint == sp and sp.state == Pending
|
|
|
|
# cannot commit most inner savepoint
|
|
|
|
doAssert not sp.parentSavepoint.isNil
|
|
|
|
|
|
|
|
ac.savePoint = sp.parentSavepoint
|
|
|
|
for k, v in sp.cache:
|
|
|
|
sp.parentSavepoint.cache[k] = v
|
2020-12-09 05:24:37 +00:00
|
|
|
|
|
|
|
ac.savePoint.accessList.merge(sp.accessList)
|
2023-03-20 11:51:09 +00:00
|
|
|
ac.savePoint.selfDestruct.incl sp.selfDestruct
|
|
|
|
ac.savePoint.logEntries.add sp.logEntries
|
2019-12-20 17:22:01 +00:00
|
|
|
sp.state = Committed
|
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
when debugAccountsCache:
|
|
|
|
inspectSavePoint("commit", ac.savePoint)
|
|
|
|
|
2022-04-08 04:54:11 +00:00
|
|
|
proc dispose*(ac: var AccountsCache, sp: SavePoint) {.inline.} =
|
2019-12-20 17:22:01 +00:00
|
|
|
if sp.state == Pending:
|
2020-01-07 12:49:42 +00:00
|
|
|
ac.rollback(sp)
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2022-04-08 04:54:11 +00:00
|
|
|
proc safeDispose*(ac: var AccountsCache, sp: SavePoint) {.inline.} =
|
2019-12-20 17:22:01 +00:00
|
|
|
if (not isNil(sp)) and (sp.state == Pending):
|
2020-01-07 12:49:42 +00:00
|
|
|
ac.rollback(sp)
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2020-02-26 06:20:51 +00:00
|
|
|
proc getAccount(ac: AccountsCache, address: EthAddress, shouldCreate = true): RefAccount =
|
2019-12-20 17:22:01 +00:00
|
|
|
# search account from layers of cache
|
|
|
|
var sp = ac.savePoint
|
|
|
|
while sp != nil:
|
|
|
|
result = sp.cache.getOrDefault(address)
|
|
|
|
if not result.isNil:
|
|
|
|
return
|
|
|
|
sp = sp.parentSavepoint
|
|
|
|
|
|
|
|
# not found in cache, look into state trie
|
2023-01-26 12:37:19 +00:00
|
|
|
let recordFound =
|
|
|
|
try:
|
2023-03-10 18:42:37 +00:00
|
|
|
ac.trie.getAccountBytes(address)
|
2023-01-26 12:37:19 +00:00
|
|
|
except RlpError:
|
|
|
|
raiseAssert("No RlpError should occur on trie access for an address")
|
2019-12-20 17:22:01 +00:00
|
|
|
if recordFound.len > 0:
|
|
|
|
# we found it
|
2023-01-26 12:37:19 +00:00
|
|
|
try:
|
|
|
|
result = RefAccount(
|
|
|
|
account: rlp.decode(recordFound, Account),
|
2023-03-17 13:20:52 +00:00
|
|
|
flags: {Alive}
|
2023-01-26 12:37:19 +00:00
|
|
|
)
|
|
|
|
except RlpError:
|
|
|
|
raiseAssert("No RlpError should occur on decoding account from trie")
|
2019-12-20 17:22:01 +00:00
|
|
|
else:
|
2020-02-26 06:20:51 +00:00
|
|
|
if not shouldCreate:
|
|
|
|
return
|
2019-12-20 17:22:01 +00:00
|
|
|
# it's a request for new account
|
|
|
|
result = RefAccount(
|
|
|
|
account: newAccount(),
|
2023-03-17 13:20:52 +00:00
|
|
|
flags: {Alive, IsNew}
|
2019-12-20 17:22:01 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
# cache the account
|
|
|
|
ac.savePoint.cache[address] = result
|
|
|
|
|
|
|
|
proc clone(acc: RefAccount, cloneStorage: bool): RefAccount =
|
|
|
|
new(result)
|
|
|
|
result.account = acc.account
|
2023-03-17 13:20:52 +00:00
|
|
|
result.flags = acc.flags
|
2019-12-20 17:22:01 +00:00
|
|
|
result.code = acc.code
|
|
|
|
|
|
|
|
if cloneStorage:
|
|
|
|
result.originalStorage = acc.originalStorage
|
2020-01-07 12:49:42 +00:00
|
|
|
# it's ok to clone a table this way
|
|
|
|
result.overlayStorage = acc.overlayStorage
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
proc isEmpty(acc: RefAccount): bool =
|
|
|
|
result = acc.account.codeHash == EMPTY_SHA3 and
|
|
|
|
acc.account.balance.isZero and
|
|
|
|
acc.account.nonce == 0
|
|
|
|
|
2020-06-01 03:54:25 +00:00
|
|
|
template exists(acc: RefAccount): bool =
|
2023-03-17 13:20:52 +00:00
|
|
|
Alive in acc.flags
|
2020-01-07 12:49:42 +00:00
|
|
|
|
2020-04-20 18:12:44 +00:00
|
|
|
template createTrieKeyFromSlot(slot: UInt256): auto =
|
2019-12-20 17:22:01 +00:00
|
|
|
# XXX: This is too expensive. Similar to `createRangeFromAddress`
|
|
|
|
# Converts a number to hex big-endian representation including
|
|
|
|
# prefix and leading zeros:
|
2020-04-20 18:12:44 +00:00
|
|
|
slot.toByteArrayBE
|
2019-12-20 17:22:01 +00:00
|
|
|
# Original py-evm code:
|
|
|
|
# pad32(int_to_big_endian(slot))
|
|
|
|
# morally equivalent to toByteRange_Unnecessary but with different types
|
|
|
|
|
2023-03-10 18:42:37 +00:00
|
|
|
template getStorageTrie(db: TrieDatabaseRef, acc: RefAccount): auto =
|
2019-12-20 17:22:01 +00:00
|
|
|
# TODO: implement `prefix-db` to solve issue #228 permanently.
|
|
|
|
# the `prefix-db` will automatically insert account address to the
|
|
|
|
# underlying-db key without disturb how the trie works.
|
|
|
|
# it will create virtual container for each account.
|
|
|
|
# see nim-eth#9
|
2023-03-10 18:42:37 +00:00
|
|
|
initStorageTrie(db, acc.account.storageRoot, false)
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
proc originalStorageValue(acc: RefAccount, slot: UInt256, db: TrieDatabaseRef): UInt256 =
|
2020-01-07 12:49:42 +00:00
|
|
|
# share the same original storage between multiple
|
|
|
|
# versions of account
|
2019-12-20 17:22:01 +00:00
|
|
|
if acc.originalStorage.isNil:
|
|
|
|
acc.originalStorage = newTable[UInt256, UInt256]()
|
|
|
|
else:
|
2020-01-16 05:13:45 +00:00
|
|
|
acc.originalStorage[].withValue(slot, val) do:
|
|
|
|
return val[]
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
# Not in the original values cache - go to the DB.
|
|
|
|
let
|
|
|
|
slotAsKey = createTrieKeyFromSlot slot
|
2023-03-10 18:42:37 +00:00
|
|
|
storageTrie = getStorageTrie(db, acc)
|
|
|
|
foundRecord = storageTrie.getSlotBytes(slotAsKey)
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
result = if foundRecord.len > 0:
|
2020-01-16 05:13:45 +00:00
|
|
|
rlp.decode(foundRecord, UInt256)
|
|
|
|
else:
|
|
|
|
UInt256.zero()
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
acc.originalStorage[slot] = result
|
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
proc storageCell(acc: RefAccount, slot: UInt256, db: TrieDatabaseRef): StorageCell =
|
|
|
|
acc.overlayStorage.withValue(slot, cell) do:
|
|
|
|
return cell[]
|
2020-01-16 05:13:45 +00:00
|
|
|
do:
|
2023-04-22 14:17:37 +00:00
|
|
|
return pureCell(acc.originalStorageValue(slot, db))
|
|
|
|
|
|
|
|
# FIXME-removeSynchronousInterface: we use this down below, but I don't think that's okay anymore.
|
|
|
|
proc storageValue(acc: RefAccount, slot: UInt256, db: TrieDatabaseRef): UInt256 =
|
|
|
|
waitForValueOf(storageCell(acc, slot, db))
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
proc kill(acc: RefAccount) =
|
2023-03-17 13:20:52 +00:00
|
|
|
acc.flags.excl Alive
|
2019-12-20 17:22:01 +00:00
|
|
|
acc.overlayStorage.clear()
|
2020-01-07 12:49:42 +00:00
|
|
|
acc.originalStorage = nil
|
2019-12-20 17:22:01 +00:00
|
|
|
acc.account = newAccount()
|
2020-04-20 18:12:44 +00:00
|
|
|
acc.code = default(seq[byte])
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
PersistMode = enum
|
|
|
|
DoNothing
|
|
|
|
Update
|
|
|
|
Remove
|
|
|
|
|
|
|
|
proc persistMode(acc: RefAccount): PersistMode =
|
|
|
|
result = DoNothing
|
2023-03-17 13:20:52 +00:00
|
|
|
if Alive in acc.flags:
|
|
|
|
if IsNew in acc.flags or Dirty in acc.flags:
|
2019-12-20 17:22:01 +00:00
|
|
|
result = Update
|
|
|
|
else:
|
|
|
|
if IsNew notin acc.flags:
|
|
|
|
result = Remove
|
|
|
|
|
|
|
|
proc persistCode(acc: RefAccount, db: TrieDatabaseRef) =
|
|
|
|
if acc.code.len != 0:
|
2020-06-10 05:54:15 +00:00
|
|
|
when defined(geth):
|
|
|
|
db.put(acc.account.codeHash.data, acc.code)
|
|
|
|
else:
|
|
|
|
db.put(contractHashKey(acc.account.codeHash).toOpenArray, acc.code)
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
proc asyncPersistStorage(acc: RefAccount, db: TrieDatabaseRef, clearCache: bool): Future[void] {.async.} =
|
2020-02-26 06:20:51 +00:00
|
|
|
if acc.overlayStorage.len == 0:
|
2020-01-07 12:49:42 +00:00
|
|
|
# TODO: remove the storage too if we figure out
|
|
|
|
# how to create 'virtual' storage room for each account
|
|
|
|
return
|
|
|
|
|
2020-06-20 11:45:09 +00:00
|
|
|
if not clearCache and acc.originalStorage.isNil:
|
|
|
|
acc.originalStorage = newTable[UInt256, UInt256]()
|
|
|
|
|
2023-03-10 18:42:37 +00:00
|
|
|
var storageTrie = getStorageTrie(db, acc)
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
for slot, valueCell in acc.overlayStorage:
|
2019-12-20 17:22:01 +00:00
|
|
|
let slotAsKey = createTrieKeyFromSlot slot
|
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
let value = await valueCell.toFuture
|
2019-12-20 17:22:01 +00:00
|
|
|
if value > 0:
|
2020-04-20 18:12:44 +00:00
|
|
|
let encodedValue = rlp.encode(value)
|
2023-03-10 18:42:37 +00:00
|
|
|
storageTrie.putSlotBytes(slotAsKey, encodedValue)
|
2019-12-20 17:22:01 +00:00
|
|
|
else:
|
2023-03-10 18:42:37 +00:00
|
|
|
storageTrie.delSlotBytes(slotAsKey)
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2021-10-28 09:42:39 +00:00
|
|
|
# TODO: this can be disabled if we do not perform
|
|
|
|
# accounts tracing
|
2019-12-20 17:22:01 +00:00
|
|
|
# map slothash back to slot value
|
|
|
|
# see iterator storage below
|
2023-03-10 18:42:37 +00:00
|
|
|
# slotHash can be obtained from storageTrie.putSlotBytes?
|
2020-04-20 18:12:44 +00:00
|
|
|
let slotHash = keccakHash(slotAsKey)
|
2019-12-20 17:22:01 +00:00
|
|
|
db.put(slotHashToSlotKey(slotHash.data).toOpenArray, rlp.encode(slot))
|
2020-06-20 11:45:09 +00:00
|
|
|
|
|
|
|
if not clearCache:
|
|
|
|
# if we preserve cache, move the overlayStorage
|
|
|
|
# to originalStorage, related to EIP2200, EIP1283
|
2023-04-22 14:17:37 +00:00
|
|
|
for slot, valueCell in acc.overlayStorage:
|
|
|
|
let value = unsafeGetAlreadyAvailableValue(valueCell)
|
2020-06-20 11:45:09 +00:00
|
|
|
if value > 0:
|
|
|
|
acc.originalStorage[slot] = value
|
|
|
|
else:
|
|
|
|
acc.originalStorage.del(slot)
|
|
|
|
acc.overlayStorage.clear()
|
|
|
|
|
2023-03-10 18:42:37 +00:00
|
|
|
acc.account.storageRoot = storageTrie.rootHash
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
proc makeDirty(ac: AccountsCache, address: EthAddress, cloneStorage = true): RefAccount =
|
2020-06-15 06:28:08 +00:00
|
|
|
ac.isDirty = true
|
2019-12-20 17:22:01 +00:00
|
|
|
result = ac.getAccount(address)
|
|
|
|
if address in ac.savePoint.cache:
|
2020-01-07 12:49:42 +00:00
|
|
|
# it's already in latest savepoint
|
2023-03-17 13:20:52 +00:00
|
|
|
result.flags.incl Dirty
|
2019-12-20 17:22:01 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# put a copy into latest savepoint
|
|
|
|
result = result.clone(cloneStorage)
|
2023-03-17 13:20:52 +00:00
|
|
|
result.flags.incl Dirty
|
2019-12-20 17:22:01 +00:00
|
|
|
ac.savePoint.cache[address] = result
|
|
|
|
|
|
|
|
proc getCodeHash*(ac: AccountsCache, address: EthAddress): Hash256 {.inline.} =
|
2020-02-26 06:20:51 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil: emptyAcc.codeHash
|
|
|
|
else: acc.account.codeHash
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
proc getBalance*(ac: AccountsCache, address: EthAddress): UInt256 {.inline.} =
|
2020-02-26 06:20:51 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil: emptyAcc.balance
|
|
|
|
else: acc.account.balance
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
proc getNonce*(ac: AccountsCache, address: EthAddress): AccountNonce {.inline.} =
|
2020-02-26 06:20:51 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil: emptyAcc.nonce
|
|
|
|
else: acc.account.nonce
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2020-04-20 18:12:44 +00:00
|
|
|
proc getCode*(ac: AccountsCache, address: EthAddress): seq[byte] =
|
2020-02-26 06:20:51 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
|
|
|
|
2019-12-20 17:22:01 +00:00
|
|
|
if CodeLoaded in acc.flags or CodeChanged in acc.flags:
|
|
|
|
result = acc.code
|
|
|
|
else:
|
2020-06-10 05:54:15 +00:00
|
|
|
when defined(geth):
|
|
|
|
let data = ac.db.get(acc.account.codeHash.data)
|
|
|
|
else:
|
|
|
|
let data = ac.db.get(contractHashKey(acc.account.codeHash).toOpenArray)
|
|
|
|
|
2020-04-20 18:12:44 +00:00
|
|
|
acc.code = data
|
2019-12-20 17:22:01 +00:00
|
|
|
acc.flags.incl CodeLoaded
|
|
|
|
result = acc.code
|
|
|
|
|
|
|
|
proc getCodeSize*(ac: AccountsCache, address: EthAddress): int {.inline.} =
|
|
|
|
ac.getCode(address).len
|
|
|
|
|
|
|
|
proc getCommittedStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256): UInt256 {.inline.} =
|
2020-02-26 06:20:51 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
2019-12-20 17:22:01 +00:00
|
|
|
acc.originalStorageValue(slot, ac.db)
|
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
proc getStorageCell*(ac: AccountsCache, address: EthAddress, slot: UInt256): StorageCell =
|
2020-02-26 06:20:51 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
2023-04-22 14:17:37 +00:00
|
|
|
return pureCell(UInt256.zero)
|
|
|
|
return acc.storageCell(slot, ac.db)
|
|
|
|
|
|
|
|
# FIXME-removeSynchronousInterface
|
|
|
|
proc getStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256): UInt256 =
|
|
|
|
waitForValueOf(getStorageCell(ac, address, slot))
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
proc hasCodeOrNonce*(ac: AccountsCache, address: EthAddress): bool {.inline.} =
|
2020-02-26 06:20:51 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
2019-12-20 17:22:01 +00:00
|
|
|
acc.account.nonce != 0 or acc.account.codeHash != EMPTY_SHA3
|
|
|
|
|
|
|
|
proc accountExists*(ac: AccountsCache, address: EthAddress): bool {.inline.} =
|
2020-02-26 06:20:51 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
2020-01-08 03:18:33 +00:00
|
|
|
acc.exists()
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
proc isEmptyAccount*(ac: AccountsCache, address: EthAddress): bool {.inline.} =
|
2020-02-26 06:20:51 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
doAssert not acc.isNil
|
2020-01-08 03:18:33 +00:00
|
|
|
doAssert acc.exists()
|
2023-03-17 13:20:52 +00:00
|
|
|
acc.isEmpty()
|
2019-12-20 17:22:01 +00:00
|
|
|
|
|
|
|
proc isDeadAccount*(ac: AccountsCache, address: EthAddress): bool =
|
2020-02-26 06:20:51 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
2023-03-17 13:20:52 +00:00
|
|
|
return true
|
2020-01-08 03:18:33 +00:00
|
|
|
if not acc.exists():
|
2023-03-17 13:20:52 +00:00
|
|
|
return true
|
|
|
|
acc.isEmpty()
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2022-01-18 16:19:32 +00:00
|
|
|
proc setBalance*(ac: AccountsCache, address: EthAddress, balance: UInt256) =
|
2019-12-20 17:22:01 +00:00
|
|
|
let acc = ac.getAccount(address)
|
2023-03-17 13:20:52 +00:00
|
|
|
acc.flags.incl {Alive}
|
2019-12-20 17:22:01 +00:00
|
|
|
if acc.account.balance != balance:
|
|
|
|
ac.makeDirty(address).account.balance = balance
|
|
|
|
|
2022-01-18 16:19:32 +00:00
|
|
|
proc addBalance*(ac: AccountsCache, address: EthAddress, delta: UInt256) {.inline.} =
|
2023-03-17 13:20:52 +00:00
|
|
|
# EIP161: We must check emptiness for the objects such that the account
|
|
|
|
# clearing (0,0,0 objects) can take effect.
|
|
|
|
if delta == 0.u256:
|
|
|
|
let acc = ac.getAccount(address)
|
|
|
|
if acc.isEmpty:
|
2023-03-20 11:51:09 +00:00
|
|
|
ac.makeDirty(address).flags.incl Touched
|
2023-03-17 13:20:52 +00:00
|
|
|
return
|
2019-12-20 17:22:01 +00:00
|
|
|
ac.setBalance(address, ac.getBalance(address) + delta)
|
|
|
|
|
2022-01-18 16:19:32 +00:00
|
|
|
proc subBalance*(ac: AccountsCache, address: EthAddress, delta: UInt256) {.inline.} =
|
2019-12-20 17:22:01 +00:00
|
|
|
ac.setBalance(address, ac.getBalance(address) - delta)
|
|
|
|
|
2022-01-18 16:19:32 +00:00
|
|
|
proc setNonce*(ac: AccountsCache, address: EthAddress, nonce: AccountNonce) =
|
2019-12-20 17:22:01 +00:00
|
|
|
let acc = ac.getAccount(address)
|
2023-03-17 13:20:52 +00:00
|
|
|
acc.flags.incl {Alive}
|
2019-12-20 17:22:01 +00:00
|
|
|
if acc.account.nonce != nonce:
|
|
|
|
ac.makeDirty(address).account.nonce = nonce
|
|
|
|
|
2022-01-18 16:19:32 +00:00
|
|
|
proc incNonce*(ac: AccountsCache, address: EthAddress) {.inline.} =
|
2019-12-20 17:22:01 +00:00
|
|
|
ac.setNonce(address, ac.getNonce(address) + 1)
|
|
|
|
|
2022-01-18 16:19:32 +00:00
|
|
|
proc setCode*(ac: AccountsCache, address: EthAddress, code: seq[byte]) =
|
2019-12-20 17:22:01 +00:00
|
|
|
let acc = ac.getAccount(address)
|
2023-03-17 13:20:52 +00:00
|
|
|
acc.flags.incl {Alive}
|
2020-04-20 18:12:44 +00:00
|
|
|
let codeHash = keccakHash(code)
|
2019-12-20 17:22:01 +00:00
|
|
|
if acc.account.codeHash != codeHash:
|
|
|
|
var acc = ac.makeDirty(address)
|
|
|
|
acc.account.codeHash = codeHash
|
|
|
|
acc.code = code
|
|
|
|
acc.flags.incl CodeChanged
|
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
proc setStorageCell*(ac: AccountsCache, address: EthAddress, slot: UInt256, cell: StorageCell) =
|
2019-12-20 17:22:01 +00:00
|
|
|
let acc = ac.getAccount(address)
|
2023-03-17 13:20:52 +00:00
|
|
|
acc.flags.incl {Alive}
|
2023-04-22 14:17:37 +00:00
|
|
|
# FIXME-removeSynchronousInterface: ugh, this seems like a problem (that we need the values to be able to check whether they're equal)
|
2019-12-20 17:22:01 +00:00
|
|
|
let oldValue = acc.storageValue(slot, ac.db)
|
2023-04-22 14:17:37 +00:00
|
|
|
let value = waitForValueOf(cell)
|
2019-12-20 17:22:01 +00:00
|
|
|
if oldValue != value:
|
|
|
|
var acc = ac.makeDirty(address)
|
2023-04-22 14:17:37 +00:00
|
|
|
acc.overlayStorage[slot] = cell
|
2019-12-20 17:22:01 +00:00
|
|
|
acc.flags.incl StorageChanged
|
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
# FIXME-removeSynchronousInterface
|
|
|
|
proc setStorage*(ac: AccountsCache, address: EthAddress, slot: UInt256, value: UInt256) =
|
|
|
|
setStorageCell(ac, address, slot, pureCell(value))
|
|
|
|
|
2022-01-18 16:19:32 +00:00
|
|
|
proc clearStorage*(ac: AccountsCache, address: EthAddress) =
|
2019-12-20 17:22:01 +00:00
|
|
|
let acc = ac.getAccount(address)
|
2023-03-17 13:20:52 +00:00
|
|
|
acc.flags.incl {Alive}
|
2019-12-20 17:22:01 +00:00
|
|
|
if acc.account.storageRoot != emptyRlpHash:
|
|
|
|
# there is no point to clone the storage since we want to remove it
|
2023-03-23 02:38:42 +00:00
|
|
|
let acc = ac.makeDirty(address, cloneStorage = false)
|
|
|
|
acc.account.storageRoot = emptyRlpHash
|
|
|
|
if acc.originalStorage.isNil.not:
|
|
|
|
# also clear originalStorage cache, otherwise
|
|
|
|
# both getStorage and getCommittedStorage will
|
|
|
|
# return wrong value
|
|
|
|
acc.originalStorage.clear()
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2022-01-18 16:19:32 +00:00
|
|
|
proc deleteAccount*(ac: AccountsCache, address: EthAddress) =
|
2020-05-30 03:14:59 +00:00
|
|
|
# make sure all savepoints already committed
|
2022-04-08 04:54:11 +00:00
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
2020-05-30 03:14:59 +00:00
|
|
|
let acc = ac.getAccount(address)
|
|
|
|
acc.kill()
|
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
proc selfDestruct*(ac: AccountsCache, address: EthAddress) =
|
|
|
|
ac.savePoint.selfDestruct.incl address
|
|
|
|
|
|
|
|
proc selfDestructLen*(ac: AccountsCache): int =
|
|
|
|
ac.savePoint.selfDestruct.len
|
|
|
|
|
|
|
|
proc addLogEntry*(ac: AccountsCache, log: Log) =
|
|
|
|
ac.savePoint.logEntries.add log
|
|
|
|
|
|
|
|
proc logEntries*(ac: AccountsCache): seq[Log] =
|
|
|
|
ac.savePoint.logEntries
|
|
|
|
|
|
|
|
proc getAndClearLogEntries*(ac: AccountsCache): seq[Log] =
|
|
|
|
result = ac.savePoint.logEntries
|
|
|
|
ac.savePoint.logEntries.setLen(0)
|
|
|
|
|
|
|
|
proc ripemdSpecial*(ac: AccountsCache) =
|
|
|
|
ac.ripemdSpecial = true
|
|
|
|
|
|
|
|
proc deleteEmptyAccount(ac: AccountsCache, address: EthAddress) =
|
2023-03-17 13:20:52 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil:
|
|
|
|
return
|
|
|
|
if not acc.isEmpty:
|
|
|
|
return
|
|
|
|
if not acc.exists:
|
|
|
|
return
|
2023-03-20 11:51:09 +00:00
|
|
|
acc.kill()
|
2023-03-16 20:34:47 +00:00
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
proc clearEmptyAccounts(ac: AccountsCache) =
|
|
|
|
for address, acc in ac.savePoint.cache:
|
|
|
|
if Touched in acc.flags and
|
|
|
|
acc.isEmpty and acc.exists:
|
|
|
|
acc.kill()
|
|
|
|
|
|
|
|
# https://github.com/ethereum/EIPs/issues/716
|
|
|
|
if ac.ripemdSpecial:
|
|
|
|
ac.deleteEmptyAccount(ripemdAddr)
|
|
|
|
ac.ripemdSpecial = false
|
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
|
|
|
|
type MissingNodesError* = ref object of Defect
|
|
|
|
paths*: seq[seq[seq[byte]]]
|
|
|
|
nodeHashes*: seq[Hash256]
|
|
|
|
|
|
|
|
# FIXME-Adam: Move this elsewhere.
|
|
|
|
# Also, I imagine there's a more efficient way to do this.
|
|
|
|
proc padRight[V](s: seq[V], n: int, v: V): seq[V] =
|
|
|
|
for sv in s:
|
|
|
|
result.add(sv)
|
|
|
|
while result.len < n:
|
|
|
|
result.add(v)
|
|
|
|
|
|
|
|
proc padRightWithZeroes(s: NibblesSeq, n: int): NibblesSeq =
|
|
|
|
initNibbleRange(padRight(s.getBytes, (n + 1) div 2, byte(0)))
|
|
|
|
|
|
|
|
# FIXME-Adam: Why can I never find the conversion function I need?
|
|
|
|
func toHash*(value: seq[byte]): Hash256 =
|
|
|
|
doAssert(value.len == 32)
|
|
|
|
var byteArray: array[32, byte]
|
|
|
|
for i, b in value:
|
|
|
|
byteArray[i] = b
|
|
|
|
result.data = byteArray
|
|
|
|
|
|
|
|
func encodePath(path: NibblesSeq): seq[byte] =
|
|
|
|
if path.len == 64:
|
|
|
|
path.getBytes
|
|
|
|
else:
|
|
|
|
hexPrefixEncode(path)
|
|
|
|
|
|
|
|
proc createMissingNodesErrorForAccount(missingAccountPath: NibblesSeq, nodeHash: Hash256): MissingNodesError =
|
|
|
|
MissingNodesError(
|
|
|
|
paths: @[@[encodePath(padRightWithZeroes(missingAccountPath, 64))]],
|
|
|
|
nodeHashes: @[nodeHash]
|
|
|
|
)
|
|
|
|
|
|
|
|
proc createMissingNodesErrorForSlot(address: EthAddress, missingSlotPath: NibblesSeq, nodeHash: Hash256): MissingNodesError =
|
|
|
|
MissingNodesError(
|
|
|
|
paths: @[@[@(address.keccakHash.data), encodePath(padRightWithZeroes(missingSlotPath, 64))]],
|
|
|
|
nodeHashes: @[nodeHash]
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
proc asyncPersist*(ac: AccountsCache,
|
|
|
|
clearEmptyAccount: bool = false,
|
|
|
|
clearCache: bool = true): Future[void] {.async.} =
|
2019-12-20 17:22:01 +00:00
|
|
|
# make sure all savepoint already committed
|
2022-04-08 04:54:11 +00:00
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
2020-06-15 06:28:08 +00:00
|
|
|
var cleanAccounts = initHashSet[EthAddress]()
|
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
if clearEmptyAccount:
|
|
|
|
ac.clearEmptyAccounts()
|
2023-03-21 11:36:22 +00:00
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
for address in ac.savePoint.selfDestruct:
|
|
|
|
ac.deleteAccount(address)
|
|
|
|
|
2019-12-20 17:22:01 +00:00
|
|
|
for address, acc in ac.savePoint.cache:
|
|
|
|
case acc.persistMode()
|
|
|
|
of Update:
|
|
|
|
if CodeChanged in acc.flags:
|
|
|
|
acc.persistCode(ac.db)
|
|
|
|
if StorageChanged in acc.flags:
|
|
|
|
# storageRoot must be updated first
|
|
|
|
# before persisting account into merkle trie
|
2023-04-22 14:17:37 +00:00
|
|
|
#
|
|
|
|
# Also, see the comment on repeatedlyTryToPersist in
|
|
|
|
# process_transaction.nim.
|
|
|
|
try:
|
|
|
|
await acc.asyncPersistStorage(ac.db, clearCache)
|
|
|
|
except MissingNodeError as e:
|
|
|
|
raise createMissingNodesErrorForSlot(address, e.path, toHash(e.nodeHashBytes))
|
2023-03-10 18:42:37 +00:00
|
|
|
ac.trie.putAccountBytes address, rlp.encode(acc.account)
|
2019-12-20 17:22:01 +00:00
|
|
|
of Remove:
|
2023-04-22 14:17:37 +00:00
|
|
|
try:
|
|
|
|
ac.trie.delAccountBytes address
|
|
|
|
except MissingNodeError as e:
|
|
|
|
raise createMissingNodesErrorForAccount(e.path, toHash(e.nodeHashBytes))
|
2020-06-15 06:28:08 +00:00
|
|
|
if not clearCache:
|
|
|
|
cleanAccounts.incl address
|
2019-12-20 17:22:01 +00:00
|
|
|
of DoNothing:
|
2023-03-21 11:36:22 +00:00
|
|
|
# dead man tell no tales
|
2023-03-23 02:38:42 +00:00
|
|
|
# remove touched dead account from cache
|
2023-03-21 11:36:22 +00:00
|
|
|
if not clearCache and Alive notin acc.flags:
|
|
|
|
cleanAccounts.incl address
|
2020-06-15 06:28:08 +00:00
|
|
|
|
|
|
|
acc.flags = acc.flags - resetFlags
|
|
|
|
|
|
|
|
if clearCache:
|
|
|
|
ac.savePoint.cache.clear()
|
|
|
|
else:
|
|
|
|
for x in cleanAccounts:
|
|
|
|
ac.savePoint.cache.del x
|
2020-12-11 10:51:17 +00:00
|
|
|
|
2023-03-20 11:51:09 +00:00
|
|
|
ac.savePoint.selfDestruct.clear()
|
2023-03-17 13:20:52 +00:00
|
|
|
|
2020-12-11 10:51:17 +00:00
|
|
|
# EIP2929
|
|
|
|
ac.savePoint.accessList.clear()
|
|
|
|
|
2020-06-15 06:28:08 +00:00
|
|
|
ac.isDirty = false
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
# FIXME-removeSynchronousInterface
|
|
|
|
proc persist*(ac: AccountsCache,
|
|
|
|
clearEmptyAccount: bool = false,
|
|
|
|
clearCache: bool = true) =
|
|
|
|
waitFor(asyncPersist(ac, clearEmptyAccount, clearCache))
|
|
|
|
|
2022-09-26 16:14:12 +00:00
|
|
|
iterator addresses*(ac: AccountsCache): EthAddress =
|
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
|
|
|
for address, _ in ac.savePoint.cache:
|
|
|
|
yield address
|
|
|
|
|
|
|
|
iterator accounts*(ac: AccountsCache): Account =
|
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
|
|
|
for _, account in ac.savePoint.cache:
|
|
|
|
yield account.account
|
|
|
|
|
|
|
|
iterator pairs*(ac: AccountsCache): (EthAddress, Account) =
|
|
|
|
# make sure all savepoint already committed
|
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
|
|
|
for address, account in ac.savePoint.cache:
|
|
|
|
yield (address, account.account)
|
|
|
|
|
2019-12-20 17:22:01 +00:00
|
|
|
iterator storage*(ac: AccountsCache, address: EthAddress): (UInt256, UInt256) =
|
|
|
|
# beware that if the account not persisted,
|
|
|
|
# the storage root will not be updated
|
2020-06-01 06:45:32 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if not acc.isNil:
|
|
|
|
let storageRoot = acc.account.storageRoot
|
|
|
|
var trie = initHexaryTrie(ac.db, storageRoot)
|
2019-12-20 17:22:01 +00:00
|
|
|
|
2020-06-10 05:54:15 +00:00
|
|
|
for slotHash, value in trie:
|
|
|
|
if slotHash.len == 0: continue
|
|
|
|
let keyData = ac.db.get(slotHashToSlotKey(slotHash).toOpenArray)
|
|
|
|
if keyData.len == 0: continue
|
|
|
|
yield (rlp.decode(keyData, UInt256), rlp.decode(value, UInt256))
|
2020-05-30 03:14:59 +00:00
|
|
|
|
2022-12-02 04:39:12 +00:00
|
|
|
iterator cachedStorage*(ac: AccountsCache, address: EthAddress): (UInt256, UInt256) =
|
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if not acc.isNil:
|
|
|
|
if not acc.originalStorage.isNil:
|
|
|
|
for k, v in acc.originalStorage:
|
|
|
|
yield (k, v)
|
|
|
|
|
2020-05-30 03:14:59 +00:00
|
|
|
proc getStorageRoot*(ac: AccountsCache, address: EthAddress): Hash256 =
|
|
|
|
# beware that if the account not persisted,
|
|
|
|
# the storage root will not be updated
|
2020-06-01 06:45:32 +00:00
|
|
|
let acc = ac.getAccount(address, false)
|
|
|
|
if acc.isNil: emptyAcc.storageRoot
|
|
|
|
else: acc.account.storageRoot
|
2020-05-30 03:14:59 +00:00
|
|
|
|
2020-06-03 13:50:13 +00:00
|
|
|
func update(wd: var WitnessData, acc: RefAccount) =
|
|
|
|
wd.codeTouched = CodeChanged in acc.flags
|
|
|
|
|
|
|
|
if not acc.originalStorage.isNil:
|
|
|
|
for k, v in acc.originalStorage:
|
2023-03-23 02:38:42 +00:00
|
|
|
if v.isZero: continue
|
2020-06-03 13:50:13 +00:00
|
|
|
wd.storageKeys.incl k
|
|
|
|
|
2023-04-22 14:17:37 +00:00
|
|
|
for k, cell in acc.overlayStorage:
|
|
|
|
let v = unsafeGetAlreadyAvailableValue(cell) # FIXME-Adam: should be resolved by now, I think? wait, maybe not?
|
2023-03-23 02:38:42 +00:00
|
|
|
if v.isZero and k notin wd.storageKeys:
|
2020-06-03 13:50:13 +00:00
|
|
|
continue
|
2023-03-23 02:38:42 +00:00
|
|
|
if v.isZero and k in wd.storageKeys:
|
2020-06-03 13:50:13 +00:00
|
|
|
wd.storageKeys.excl k
|
|
|
|
continue
|
|
|
|
wd.storageKeys.incl k
|
|
|
|
|
|
|
|
func witnessData(acc: RefAccount): WitnessData =
|
|
|
|
result.storageKeys = initHashSet[UInt256]()
|
|
|
|
update(result, acc)
|
|
|
|
|
|
|
|
proc collectWitnessData*(ac: var AccountsCache) =
|
|
|
|
# make sure all savepoint already committed
|
2022-04-08 04:54:11 +00:00
|
|
|
doAssert(ac.savePoint.parentSavepoint.isNil)
|
2020-06-03 13:50:13 +00:00
|
|
|
# usually witness data is collected before we call persist()
|
|
|
|
for address, acc in ac.savePoint.cache:
|
|
|
|
ac.witnessCache.withValue(address, val) do:
|
|
|
|
update(val[], acc)
|
|
|
|
do:
|
|
|
|
ac.witnessCache[address] = witnessData(acc)
|
|
|
|
|
|
|
|
func multiKeys(slots: HashSet[UInt256]): MultikeysRef =
|
|
|
|
if slots.len == 0: return
|
|
|
|
new result
|
|
|
|
for x in slots:
|
|
|
|
result.add x.toBytesBE
|
|
|
|
result.sort()
|
|
|
|
|
|
|
|
proc makeMultiKeys*(ac: AccountsCache): MultikeysRef =
|
|
|
|
# this proc is called after we done executing a block
|
|
|
|
new result
|
|
|
|
for k, v in ac.witnessCache:
|
|
|
|
result.add(k, v.codeTouched, multiKeys(v.storageKeys))
|
|
|
|
result.sort()
|
|
|
|
|
2020-12-09 05:24:37 +00:00
|
|
|
proc accessList*(ac: var AccountsCache, address: EthAddress) {.inline.} =
|
|
|
|
ac.savePoint.accessList.add(address)
|
|
|
|
|
|
|
|
proc accessList*(ac: var AccountsCache, address: EthAddress, slot: UInt256) {.inline.} =
|
|
|
|
ac.savePoint.accessList.add(address, slot)
|
|
|
|
|
|
|
|
func inAccessList*(ac: AccountsCache, address: EthAddress): bool =
|
|
|
|
var sp = ac.savePoint
|
|
|
|
while sp != nil:
|
|
|
|
result = sp.accessList.contains(address)
|
|
|
|
if result:
|
|
|
|
return
|
|
|
|
sp = sp.parentSavepoint
|
|
|
|
|
|
|
|
func inAccessList*(ac: AccountsCache, address: EthAddress, slot: UInt256): bool =
|
|
|
|
var sp = ac.savePoint
|
|
|
|
while sp != nil:
|
|
|
|
result = sp.accessList.contains(address, slot)
|
|
|
|
if result:
|
|
|
|
return
|
|
|
|
sp = sp.parentSavepoint
|
|
|
|
|
2020-05-30 03:14:59 +00:00
|
|
|
proc rootHash*(db: ReadOnlyStateDB): KeccakHash {.borrow.}
|
|
|
|
proc getCodeHash*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
|
|
|
proc getStorageRoot*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
|
|
|
proc getBalance*(db: ReadOnlyStateDB, address: EthAddress): UInt256 {.borrow.}
|
|
|
|
proc getStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
|
|
|
proc getNonce*(db: ReadOnlyStateDB, address: EthAddress): AccountNonce {.borrow.}
|
|
|
|
proc getCode*(db: ReadOnlyStateDB, address: EthAddress): seq[byte] {.borrow.}
|
|
|
|
proc getCodeSize*(db: ReadOnlyStateDB, address: EthAddress): int {.borrow.}
|
|
|
|
proc hasCodeOrNonce*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
|
|
|
proc accountExists*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
|
|
|
proc isDeadAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
|
|
|
proc isEmptyAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
|
|
|
proc getCommittedStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
2020-12-09 05:24:37 +00:00
|
|
|
func inAccessList*(ac: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
|
|
|
func inAccessList*(ac: ReadOnlyStateDB, address: EthAddress, slot: UInt256): bool {.borrow.}
|