Optional accounts cache module for creating genesis (#1897)
* Split off `ReadOnlyStateDB` from `AccountStateDB` from `state_db.nim` why: Apart from testing, applications use `ReadOnlyStateDB` as an easy way to access the accounts ledger. This is well supported by the `Aristo` db, but writable mode is only parially supported. The writable AccountStateDB` object for modifying accounts is not used by production code. So, for lecgacy and testing apps, the full support of the previous `AccountStateDB` is now enabled by `import db/state_db/read_write` and the `import db/state_db` provides read-only mode. * Encapsulate `AccountStateDB` as `GenesisLedgerRef` or genesis creation why: `AccountStateDB` has poor support for `Aristo` and is not widely used in favour of `AccountsLedger` (which will be abstracted as `ledger`.) Currently, using other than the `AccountStateDB` ledgers within the `GenesisLedgerRef` wrapper is experimental and test only. Eventually, the wrapper should disappear so that the `Ledger` object (which encapsulates `AccountsCache` and `AccountsLedger`) will prevail. * For the `Ledger`, provide access to raw accounts `MPT` why: This gives to the `CoreDbMptRef` descriptor from the `CoreDb` (which is the legacy version of CoreDxMptRef`.) For the new `ledger` API, the accounts are based on the `CoreDxMAccRef` descriptor which uses a particular sub-system for accounts while legacy applications use the `CoreDbPhkRef` equivalent of the `SecureHexaryTrie`. The only place where this feature will currently be used is the `genesis.nim` source file. * Fix `Aristo` bugs, missing boundary checks, typos, etc. * Verify root vertex in `MPT` and account constructors why: Was missing so far, in particular the accounts constructor must verify `VertexID(1) * Fix include file
This commit is contained in:
parent
4825ab1566
commit
3e88589eb1
|
@ -137,8 +137,9 @@ proc init(com : CommonRef,
|
|||
pruneTrie: bool,
|
||||
networkId: NetworkId,
|
||||
config : ChainConfig,
|
||||
genesis : Genesis) {.gcsafe, raises: [CatchableError].} =
|
||||
|
||||
genesis : Genesis,
|
||||
avoidStateDb: bool
|
||||
) {.gcsafe, raises: [CatchableError].} =
|
||||
config.daoCheck()
|
||||
|
||||
com.db = db
|
||||
|
@ -172,7 +173,7 @@ proc init(com : CommonRef,
|
|||
time: some(genesis.timestamp)
|
||||
))
|
||||
com.genesisHeader = toGenesisHeader(genesis,
|
||||
com.currentFork, com.db)
|
||||
com.currentFork, com.db, avoidStateDb)
|
||||
com.setForkId(com.genesisHeader)
|
||||
com.pos.timestamp = genesis.timestamp
|
||||
else:
|
||||
|
@ -211,7 +212,9 @@ proc new*(_: type CommonRef,
|
|||
db: CoreDbRef,
|
||||
pruneTrie: bool = true,
|
||||
networkId: NetworkId = MainNet,
|
||||
params = networkParams(MainNet)): CommonRef
|
||||
params = networkParams(MainNet),
|
||||
avoidStateDb = false
|
||||
): CommonRef
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
|
||||
## If genesis data is present, the forkIds will be initialized
|
||||
|
@ -222,13 +225,16 @@ proc new*(_: type CommonRef,
|
|||
pruneTrie,
|
||||
networkId,
|
||||
params.config,
|
||||
params.genesis)
|
||||
params.genesis,
|
||||
avoidStateDb)
|
||||
|
||||
proc new*(_: type CommonRef,
|
||||
db: CoreDbRef,
|
||||
config: ChainConfig,
|
||||
pruneTrie: bool = true,
|
||||
networkId: NetworkId = MainNet): CommonRef
|
||||
networkId: NetworkId = MainNet,
|
||||
avoidStateDb = false
|
||||
): CommonRef
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
|
||||
## There is no genesis data present
|
||||
|
@ -239,7 +245,8 @@ proc new*(_: type CommonRef,
|
|||
pruneTrie,
|
||||
networkId,
|
||||
config,
|
||||
nil)
|
||||
nil,
|
||||
avoidStateDb)
|
||||
|
||||
proc clone*(com: CommonRef, db: CoreDbRef): CommonRef =
|
||||
## clone but replace the db
|
||||
|
|
|
@ -8,29 +8,147 @@
|
|||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
eth/[common, eip1559],
|
||||
eth/trie/trie_defs,
|
||||
../db/[core_db, state_db],
|
||||
../db/[accounts_cache, core_db, distinct_tries, state_db/read_write],
|
||||
../constants,
|
||||
./chain_config
|
||||
|
||||
{.push raises: [].}
|
||||
# Annotation helpers
|
||||
{.pragma: noRaise, gcsafe, raises: [].}
|
||||
{.pragma: rlpRaise, gcsafe, raises: [RlpError].}
|
||||
{.pragma: catchRaise, gcsafe, raises: [CatchableError].}
|
||||
|
||||
type
|
||||
GenesisAddAccountFn = proc(
|
||||
address: EthAddress; nonce: AccountNonce; balance: UInt256;
|
||||
code: openArray[byte]) {.catchRaise.}
|
||||
|
||||
GenesisCompensateLegacySetupFn = proc() {.noRaise.}
|
||||
|
||||
GenesisSetStorageFn = proc(
|
||||
address: EthAddress; slot: UInt256; val: UInt256) {.rlpRaise.}
|
||||
|
||||
GenesisCommitFn = proc() {.noRaise.}
|
||||
|
||||
GenesisRootHashFn = proc: Hash256 {.noRaise.}
|
||||
|
||||
GenesisGetTrieFn = proc: CoreDbMptRef {.noRaise.}
|
||||
|
||||
GenesisLedgerRef* = ref object
|
||||
## Exportable ledger DB just for initialising Genesis. This is needed
|
||||
## when using the `Aristo` backend which is not fully supported by the
|
||||
## `AccountStateDB` object.
|
||||
##
|
||||
## Currently, using other than the `AccountStateDB` ledgers are
|
||||
## experimental and test only. Eventually, the `GenesisLedgerRef` wrapper
|
||||
## should disappear so that the `Ledger` object (which encapsulates
|
||||
## `AccountsCache` and `AccountsLedger`) will prevail.
|
||||
##
|
||||
addAccount: GenesisAddAccountFn
|
||||
compensateLegacySetup: GenesisCompensateLegacySetupFn
|
||||
setStorage: GenesisSetStorageFn
|
||||
commit: GenesisCommitFn
|
||||
rootHash: GenesisRootHashFn
|
||||
getTrie: GenesisGetTrieFn
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc initStateDbledgerRef(db: CoreDbRef; pruneTrie: bool): GenesisLedgerRef =
|
||||
let sdb = newAccountStateDB(db, emptyRlpHash, pruneTrie)
|
||||
|
||||
GenesisLedgerRef(
|
||||
addAccount: proc(
|
||||
address: EthAddress;
|
||||
nonce: AccountNonce;
|
||||
balance: UInt256;
|
||||
code: openArray[byte];
|
||||
) {.catchRaise.} =
|
||||
sdb.setAccount(address, newAccount(nonce, balance))
|
||||
sdb.setCode(address, code),
|
||||
|
||||
compensateLegacySetup: proc() =
|
||||
if pruneTrie: db.compensateLegacySetup(),
|
||||
|
||||
setStorage: proc(
|
||||
address: EthAddress;
|
||||
slot: UInt256;
|
||||
val: UInt256;
|
||||
) {.rlpRaise.} =
|
||||
sdb.setStorage(address, slot, val),
|
||||
|
||||
commit: proc() =
|
||||
discard,
|
||||
|
||||
rootHash: proc(): Hash256 =
|
||||
sdb.rootHash(),
|
||||
|
||||
getTrie: proc(): CoreDbMptRef =
|
||||
sdb.getTrie())
|
||||
|
||||
|
||||
proc initAccountsLedgerRef(db: CoreDbRef; pruneTrie: bool): GenesisLedgerRef =
|
||||
let ac = AccountsCache.init(db, emptyRlpHash, pruneTrie)
|
||||
|
||||
GenesisLedgerRef(
|
||||
addAccount: proc(
|
||||
address: EthAddress;
|
||||
nonce: AccountNonce;
|
||||
balance: UInt256;
|
||||
code: openArray[byte];
|
||||
) {.catchRaise.} =
|
||||
ac.setNonce(address, nonce)
|
||||
ac.setBalance(address, balance)
|
||||
ac.setCode(address, @code),
|
||||
|
||||
compensateLegacySetup: proc() =
|
||||
if pruneTrie: db.compensateLegacySetup(),
|
||||
|
||||
setStorage: proc(
|
||||
address: EthAddress;
|
||||
slot: UInt256;
|
||||
val: UInt256;
|
||||
) {.rlpRaise.} =
|
||||
ac.setStorage(address, slot, val),
|
||||
|
||||
commit: proc() =
|
||||
ac.persist(),
|
||||
|
||||
rootHash: proc(): Hash256 =
|
||||
ac.rootHash(),
|
||||
|
||||
getTrie: proc(): CoreDbMptRef =
|
||||
ac.rawTrie.mpt)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc newStateDB*(
|
||||
db: CoreDbRef;
|
||||
pruneTrie: bool;
|
||||
): AccountStateDB
|
||||
{.gcsafe, raises: [].}=
|
||||
newAccountStateDB(db, emptyRlpHash, pruneTrie)
|
||||
avoidStateDb = false;
|
||||
): GenesisLedgerRef =
|
||||
## The flag `avoidStateDb` is set `false` for compatibility with legacy apps
|
||||
## `(see `test_state_network`).
|
||||
if avoidStateDb:
|
||||
db.initAccountsLedgerRef pruneTrie
|
||||
else:
|
||||
db.initStateDbledgerRef pruneTrie
|
||||
|
||||
proc getTrie*(sdb: GenesisLedgerRef): CoreDbMptRef =
|
||||
## Getter, used in `test_state_network`
|
||||
sdb.getTrie()
|
||||
|
||||
proc toGenesisHeader*(
|
||||
g: Genesis;
|
||||
sdb: AccountStateDB;
|
||||
sdb: GenesisLedgerRef;
|
||||
fork: HardFork;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
|
@ -42,11 +160,10 @@ proc toGenesisHeader*(
|
|||
|
||||
# The following kludge is needed for the `LegacyDbPersistent` type database
|
||||
# when `pruneTrie` is enabled. For other cases, this code is irrelevant.
|
||||
sdb.db.compensateLegacySetup()
|
||||
sdb.compensateLegacySetup()
|
||||
|
||||
for address, account in g.alloc:
|
||||
sdb.setAccount(address, newAccount(account.nonce, account.balance))
|
||||
sdb.setCode(address, account.code)
|
||||
sdb.addAccount(address, account.nonce, account.balance, account.code)
|
||||
|
||||
# Kludge:
|
||||
#
|
||||
|
@ -55,12 +172,13 @@ proc toGenesisHeader*(
|
|||
#
|
||||
# This kludge also fixes the initial crash described in
|
||||
# https://github.com/status-im/nimbus-eth1/issues/932.
|
||||
if sdb.pruneTrie:
|
||||
sdb.db.compensateLegacySetup() # <-- kludge
|
||||
sdb.compensateLegacySetup() # <-- kludge
|
||||
|
||||
for k, v in account.storage:
|
||||
sdb.setStorage(address, k, v)
|
||||
|
||||
sdb.commit()
|
||||
|
||||
result = BlockHeader(
|
||||
nonce: g.nonce,
|
||||
timestamp: g.timestamp,
|
||||
|
@ -69,7 +187,7 @@ proc toGenesisHeader*(
|
|||
difficulty: g.difficulty,
|
||||
mixDigest: g.mixHash,
|
||||
coinbase: g.coinbase,
|
||||
stateRoot: sdb.rootHash,
|
||||
stateRoot: sdb.rootHash(),
|
||||
parentHash: GENESIS_PARENT_HASH,
|
||||
txRoot: EMPTY_ROOT_HASH,
|
||||
receiptRoot: EMPTY_ROOT_HASH,
|
||||
|
@ -99,29 +217,27 @@ proc toGenesisHeader*(
|
|||
genesis: Genesis;
|
||||
fork: HardFork;
|
||||
db = CoreDbRef(nil);
|
||||
avoidStateDb = false;
|
||||
): BlockHeader
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Generate the genesis block header from the `genesis` and `config`
|
||||
## argument value.
|
||||
let
|
||||
db = if db.isNil: newCoreDbRef LegacyDbMemory else: db
|
||||
sdb = newStateDB(db, pruneTrie = true)
|
||||
sdb = newStateDB(db, pruneTrie = true, avoidStateDb)
|
||||
toGenesisHeader(genesis, sdb, fork)
|
||||
|
||||
proc toGenesisHeader*(
|
||||
params: NetworkParams;
|
||||
db = CoreDbRef(nil);
|
||||
avoidStateDb = false;
|
||||
): BlockHeader
|
||||
{.raises: [CatchableError].} =
|
||||
## Generate the genesis block header from the `genesis` and `config`
|
||||
## argument value.
|
||||
let map = toForkTransitionTable(params.config)
|
||||
let fork = map.toHardFork(forkDeterminationInfo(0.toBlockNumber, params.genesis.timestamp))
|
||||
toGenesisHeader(params.genesis, fork, db)
|
||||
|
||||
# End
|
||||
|
||||
|
||||
toGenesisHeader(params.genesis, fork, db, avoidStateDb)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
|
|
@ -259,6 +259,8 @@ type
|
|||
# Miscelaneous handy helpers
|
||||
PayloadTypeUnsupported
|
||||
LeafKeyInvalid
|
||||
AccountRootUnacceptable
|
||||
AccountRootCannotCreate
|
||||
AccountRlpDecodingError
|
||||
AccountStorageKeyMissing
|
||||
AccountVtxUnsupported
|
||||
|
|
|
@ -39,6 +39,8 @@ proc fetchPayloadImpl(
|
|||
if rc.error[1] in AcceptableHikeStops:
|
||||
return err((vid, FetchPathNotFound))
|
||||
return err((vid, rc.error[1]))
|
||||
if rc.value.legs.len == 0:
|
||||
return err((VertexID(0), FetchPathNotFound))
|
||||
ok rc.value.legs[^1].wp.vtx.lData
|
||||
|
||||
proc fetchPayloadImpl(
|
||||
|
|
|
@ -173,7 +173,7 @@ proc baseMethods(
|
|||
saveMode: CoreDbSaveFlags;
|
||||
): CoreDbRc[CoreDxAccRef] =
|
||||
db.kdbBase.gc()
|
||||
ok(? db.adbBase.newAccHandler(prune, saveMode, "newAccFn()")),
|
||||
ok(? db.adbBase.newAccHandler(root, prune, saveMode, "newAccFn()")),
|
||||
|
||||
beginFn: proc(): CoreDbRc[CoreDxTxRef] =
|
||||
const info = "beginFn()"
|
||||
|
|
|
@ -302,6 +302,33 @@ proc accDelete(
|
|||
return rc.toVoidRcImpl(cMpt.base.parent, info)
|
||||
ok()
|
||||
|
||||
# -------------------------------
|
||||
|
||||
proc cloneMpt(
|
||||
cMpt: AristoChildDbRef;
|
||||
info: static[string];
|
||||
): CoreDbRc[CoreDxMptRef] =
|
||||
let
|
||||
base = cMpt.base
|
||||
db = base.parent
|
||||
adb = base.adb
|
||||
|
||||
base.gc()
|
||||
|
||||
let
|
||||
cXpt = AristoChildDbRef(
|
||||
base: base,
|
||||
root: cMpt.root,
|
||||
prune: cMpt.prune,
|
||||
mpt: if cMpt.mpt == adb: adb else: ? adb.forkTop.toRcImpl(db, info),
|
||||
saveMode: cMpt.saveMode)
|
||||
|
||||
dsc = AristoCoreDxMptRef(
|
||||
ctx: cXpt,
|
||||
methods: cXpt.mptMethods)
|
||||
|
||||
ok(db.bless dsc)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private database methods function tables
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -326,8 +353,7 @@ proc mptMethods(cMpt: AristoChildDbRef): CoreDbMptFns =
|
|||
cMpt.mpt.hasPath(cMpt.root, k).toRcImpl(db, "hasPathFn()"),
|
||||
|
||||
rootVidFn: proc(): CoreDbVidRef =
|
||||
var w = AristoCoreDbVid(ctx: cMpt.mpt, aVid: cMpt.root)
|
||||
db.bless(w),
|
||||
db.bless(AristoCoreDbVid(ctx: cMpt.mpt, aVid: cMpt.root)),
|
||||
|
||||
isPruningFn: proc(): bool =
|
||||
cMpt.prune,
|
||||
|
@ -351,6 +377,9 @@ proc accMethods(cMpt: AristoChildDbRef): CoreDbAccFns =
|
|||
backendFn: proc(): CoreDbAccBackendRef =
|
||||
db.bless(AristoCoreDbAccBE(adb: cMpt.mpt)),
|
||||
|
||||
newMptFn: proc(): CoreDbRc[CoreDxMptRef] =
|
||||
cMpt.cloneMpt("newMptFn()"),
|
||||
|
||||
fetchFn: proc(address: EthAddress): CoreDbRc[CoreDbAccount] =
|
||||
cMpt.accFetch(address, "fetchFn()"),
|
||||
|
||||
|
@ -471,8 +500,8 @@ proc getVid*(
|
|||
db = base.parent
|
||||
adb = base.adb
|
||||
|
||||
if root == VOID_CODE_HASH:
|
||||
return ok(db.bless AristoCoreDbVid())
|
||||
if root == EMPTY_ROOT_HASH:
|
||||
return ok(db.bless AristoCoreDbVid(createOk: createOk))
|
||||
|
||||
block:
|
||||
base.gc() # update pending changes
|
||||
|
@ -513,15 +542,17 @@ proc newMptHandler*(
|
|||
): CoreDbRc[CoreDxMptRef] =
|
||||
base.gc()
|
||||
|
||||
let db = base.parent
|
||||
|
||||
var rootID = root.to(VertexID)
|
||||
if not rootID.isValid:
|
||||
let rc = base.adb.getKeyRc VertexID(1)
|
||||
if rc.isErr and rc.error == GetKeyNotFound:
|
||||
if rc.isErr:
|
||||
if rc.error != GetKeyNotFound:
|
||||
return err(rc.error.toErrorImpl(db, info, RootNotFound))
|
||||
rootID = VertexID(1)
|
||||
|
||||
let
|
||||
db = base.parent
|
||||
|
||||
(mode, mpt) = block:
|
||||
if saveMode == Companion:
|
||||
(saveMode, ? base.adb.forkTop.toRcImpl(db, info))
|
||||
|
@ -546,15 +577,26 @@ proc newMptHandler*(
|
|||
|
||||
proc newAccHandler*(
|
||||
base: AristoBaseRef;
|
||||
root: CoreDbVidRef;
|
||||
prune: bool;
|
||||
saveMode: CoreDbSaveFlags;
|
||||
info: static[string];
|
||||
): CoreDbRc[CoreDxAccRef] =
|
||||
base.gc()
|
||||
|
||||
let
|
||||
db = base.parent
|
||||
let db = base.parent
|
||||
|
||||
if root.isValid:
|
||||
let vid = root.to(VertexID)
|
||||
if vid.isValid:
|
||||
if vid != VertexID(1):
|
||||
let error = (vid,AccountRootUnacceptable)
|
||||
return err(error.toErrorImpl(db, info, RootUnacceptable))
|
||||
elif root.createOk:
|
||||
let error = AccountRootCannotCreate
|
||||
return err(error.toErrorImpl(db, info, RootCannotCreate))
|
||||
|
||||
let
|
||||
(mode, mpt) = block:
|
||||
if saveMode == Companion:
|
||||
(saveMode, ? base.adb.forkTop.toRcImpl(db, info))
|
||||
|
|
|
@ -264,7 +264,7 @@ proc newKvtHandler*(
|
|||
cKvt = KvtChildDbRef(
|
||||
base: base,
|
||||
kvt: kvt,
|
||||
saveMode: saveMode)
|
||||
saveMode: mode)
|
||||
|
||||
dsc = KvtCoreDxKvtRef(
|
||||
ctx: cKvt,
|
||||
|
|
|
@ -257,6 +257,10 @@ proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
|
|||
backendFn: proc(): CoreDbAccBackendRef =
|
||||
db.bless(LegacyCoreDbAccBE(mpt: mpt.trie)),
|
||||
|
||||
newMptFn: proc(): CoreDbRc[CoreDxMptRef] =
|
||||
let xMpt = HexaryChildDbRef(trie: mpt.trie)
|
||||
ok(db.bless CoreDxMptRef(methods: xMpt.mptMethods db)),
|
||||
|
||||
fetchFn: proc(k: EthAddress): CoreDbRc[CoreDbAccount] =
|
||||
db.mapRlpException "fetchFn()":
|
||||
let data = mpt.trie.get(k.keccakHash.data)
|
||||
|
|
|
@ -548,17 +548,24 @@ proc newMpt*(
|
|||
db.ifTrackNewApi:
|
||||
debug newApiTxt "newMpt()", root=root.toStr, prune, saveMode
|
||||
|
||||
proc newMpt*(
|
||||
db: CoreDbRef;
|
||||
prune = true;
|
||||
saveMode = AutoSave;
|
||||
): CoreDxMptRef =
|
||||
proc newMpt*(db: CoreDbRef; prune = true; saveMode = AutoSave): CoreDxMptRef =
|
||||
## Shortcut for `db.newMpt CoreDbVidRef()`
|
||||
let root = CoreDbVidRef()
|
||||
result = db.methods.newMptFn(root, prune, saveMode).valueOr:
|
||||
raiseAssert $$error
|
||||
db.ifTrackNewApi: debug newApiTxt "newMpt()", root=root.toStr, prune, saveMode
|
||||
|
||||
proc newMpt*(acc: CoreDxAccRef): CoreDxMptRef =
|
||||
## Constructor, will defect on failure. The argument `prune` is currently
|
||||
## effective only for the legacy backend.
|
||||
##
|
||||
## Variant of `newMpt()` where the input arguments are taken from the
|
||||
## current `acc` descriptor settings.
|
||||
##
|
||||
result = acc.methods.newMptFn().valueOr:
|
||||
raiseAssert $$error
|
||||
acc.ifTrackNewApi: debug newApiTxt "acc/toMpt()"
|
||||
|
||||
proc newAccMpt*(
|
||||
db: CoreDbRef;
|
||||
root: CoreDbVidRef;
|
||||
|
|
|
@ -49,6 +49,8 @@ type
|
|||
MptNotFound
|
||||
AccNotFound
|
||||
RootNotFound
|
||||
RootUnacceptable
|
||||
RootCannotCreate
|
||||
HashNotAvailable
|
||||
StorageFailed
|
||||
|
||||
|
@ -172,6 +174,7 @@ type
|
|||
# Sub-descriptor: Mpt/hexary trie methods for accounts
|
||||
# ------------------------------------------------------
|
||||
CoreDbAccBackendFn* = proc(): CoreDbAccBackendRef {.noRaise.}
|
||||
CoreDbAccNewMptFn* = proc(): CoreDbRc[CoreDxMptRef] {.noRaise.}
|
||||
CoreDbAccFetchFn* = proc(k: EthAddress): CoreDbRc[CoreDbAccount] {.noRaise.}
|
||||
CoreDbAccDeleteFn* = proc(k: EthAddress): CoreDbRc[void] {.noRaise.}
|
||||
CoreDbAccMergeFn* =
|
||||
|
@ -185,6 +188,7 @@ type
|
|||
CoreDbAccFns* = object
|
||||
## Methods for trie objects
|
||||
backendFn*: CoreDbAccBackendFn
|
||||
newMptFn*: CoreDbAccNewMptFn
|
||||
fetchFn*: CoreDbAccFetchFn
|
||||
deleteFn*: CoreDbAccDeleteFn
|
||||
mergeFn*: CoreDbAccMergeFn
|
||||
|
|
|
@ -66,6 +66,7 @@ proc validateMethodsDesc(fns: CoreDbMptFns) =
|
|||
|
||||
proc validateMethodsDesc(fns: CoreDbAccFns) =
|
||||
doAssert not fns.backendFn.isNil
|
||||
doAssert not fns.newMptFn.isNil
|
||||
doAssert not fns.fetchFn.isNil
|
||||
doAssert not fns.deleteFn.isNil
|
||||
doAssert not fns.mergeFn.isNil
|
||||
|
|
|
@ -180,6 +180,9 @@ proc ledgerMethods(lc: impl.AccountsCache): LedgerFns =
|
|||
|
||||
proc ledgerExtras(lc: impl.AccountsCache): LedgerExtras =
|
||||
LedgerExtras(
|
||||
getMptFn: proc(): CoreDbMptRef =
|
||||
lc.rawTrie.mpt,
|
||||
|
||||
rawRootHashFn: proc(): Hash256 =
|
||||
lc.rawTrie.rootHash())
|
||||
|
||||
|
|
|
@ -167,6 +167,9 @@ proc ledgerMethods(lc: impl.AccountsLedgerRef): LedgerFns =
|
|||
|
||||
proc ledgerExtras(lc: impl.AccountsLedgerRef): LedgerExtras =
|
||||
LedgerExtras(
|
||||
getMptFn: proc(): CoreDbMptRef =
|
||||
lc.rawTrie.CoreDxAccRef.newMpt.CoreDbMptRef,
|
||||
|
||||
rawRootHashFn: proc(): Hash256 =
|
||||
lc.rawTrie.rootHash())
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
|
@ -14,7 +14,6 @@
|
|||
|
||||
import
|
||||
eth/common,
|
||||
chronicles,
|
||||
../../../stateless/multi_keys,
|
||||
../core_db,
|
||||
./base/[base_desc, validate]
|
||||
|
@ -42,7 +41,7 @@ const
|
|||
# ------------------------------------------------------------------------------
|
||||
|
||||
when EnableApiTracking:
|
||||
import std/strutils, chronicles, stew/byteutils
|
||||
import std/strutils, chronicles, chronicles, stew/byteutils
|
||||
{.warning: "*** Provided API logging for Ledger (disabled by default)".}
|
||||
|
||||
template apiTxt(info: static[string]): static[string] =
|
||||
|
@ -62,6 +61,9 @@ when EnableApiTracking:
|
|||
proc toStr(w: Hash256): string =
|
||||
w.data.oaToStr
|
||||
|
||||
proc toStr(w: CoreDbMptRef): string =
|
||||
if w.CoreDxMptRef.isNil: "MptRef(nil)" else: "MptRef"
|
||||
|
||||
proc toStr(w: Blob): string =
|
||||
if 0 < w.len and w.len < 5: "<" & w.oaToStr & ">"
|
||||
else: "Blob[" & $w.len & "]"
|
||||
|
@ -279,6 +281,10 @@ proc subBalance*(ldg: LedgerRef, eAddr: EthAddress, delta: UInt256) =
|
|||
# Public methods, extensions to go away
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getMpt*(ldg: LedgerRef): CoreDbMptRef =
|
||||
result = ldg.extras.getMptFn()
|
||||
ldg.ifTrackApi: debug apiTxt "getMpt()", result=result.toStr
|
||||
|
||||
proc rawRootHash*(ldg: LedgerRef): Hash256 =
|
||||
result = ldg.extras.rawRootHashFn()
|
||||
ldg.ifTrackApi: debug apiTxt "rawRootHash()", result=result.toStr
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
import
|
||||
eth/common,
|
||||
../../core_db,
|
||||
../../../../stateless/multi_keys
|
||||
|
||||
# Annotation helpers
|
||||
|
@ -34,8 +35,10 @@ type
|
|||
methods*: LedgerFns
|
||||
|
||||
RawRootHashFn* = proc(): Hash256 {.noRaise.}
|
||||
GetMptFn* = proc(): CoreDbMptRef {.noRaise.}
|
||||
|
||||
LedgerExtras* = object
|
||||
getMptFn*: GetMptFn
|
||||
rawRootHashFn*: RawRootHashFn
|
||||
|
||||
AccessListFn* = proc(eAddr: EthAddress) {.noRaise.}
|
||||
|
|
|
@ -16,6 +16,7 @@ import
|
|||
proc validate*(ldg: LedgerRef) =
|
||||
doAssert ldg.ldgType != LedgerType(0)
|
||||
|
||||
doAssert not ldg.extras.getMptFn.isNil
|
||||
doAssert not ldg.extras.rawRootHashFn.isNil
|
||||
|
||||
doAssert not ldg.methods.accessListFn.isNil
|
||||
|
|
|
@ -5,290 +5,30 @@
|
|||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
## Read only source, import `state_db/read_write` for full functionality.
|
||||
##
|
||||
## Note that the writable mode is only partially supported by the `Aristo`
|
||||
## backend of `CoreDb` (read-only mode is fully supported.)
|
||||
|
||||
import
|
||||
std/[sets, strformat],
|
||||
chronicles,
|
||||
eth/[common, rlp],
|
||||
../constants,
|
||||
../utils/utils,
|
||||
"."/[core_db, distinct_tries, storage_types]
|
||||
state_db/[base, read_only]
|
||||
|
||||
logScope:
|
||||
topics = "state_db"
|
||||
export
|
||||
AccountStateDB,
|
||||
ReadOnlyStateDB,
|
||||
accountExists,
|
||||
getAccount,
|
||||
getBalance,
|
||||
getCode,
|
||||
getCodeHash,
|
||||
getNonce,
|
||||
getStorage,
|
||||
getStorageRoot,
|
||||
getTrie,
|
||||
hasCodeOrNonce,
|
||||
isDeadAccount,
|
||||
isEmptyAccount,
|
||||
newAccountStateDB,
|
||||
rootHash
|
||||
|
||||
# aleth/geth/parity compatibility mode:
|
||||
#
|
||||
# affected test cases both in GST and BCT:
|
||||
# - stSStoreTest\InitCollision.json
|
||||
# - stRevertTest\RevertInCreateInInit.json
|
||||
# - stCreate2\RevertInCreateInInitCreate2.json
|
||||
#
|
||||
# pyEVM sided with original Nimbus EVM
|
||||
#
|
||||
# implementation difference:
|
||||
# Aleth/geth/parity using accounts cache.
|
||||
# When contract creation happened on an existing
|
||||
# but 'empty' account with non empty storage will
|
||||
# get new empty storage root.
|
||||
# Aleth cs. only clear the storage cache while both pyEVM
|
||||
# and Nimbus will modify the state trie.
|
||||
# During the next SSTORE call, aleth cs. calculate
|
||||
# gas used based on this cached 'original storage value'.
|
||||
# In other hand pyEVM and Nimbus will fetch
|
||||
# 'original storage value' from state trie.
|
||||
#
|
||||
# Both Yellow Paper and EIP2200 are not clear about this
|
||||
# situation but since aleth/geth/and parity implement this
|
||||
# behaviour, we perhaps also need to implement it.
|
||||
#
|
||||
# TODO: should this compatibility mode enabled via
|
||||
# compile time switch, runtime switch, or just hard coded
|
||||
# it?
|
||||
const
|
||||
aleth_compat = true
|
||||
|
||||
type
|
||||
AccountStateDB* = ref object
|
||||
trie: AccountsTrie
|
||||
originalRoot: KeccakHash # will be updated for every transaction
|
||||
#transactionID: CoreDbTxID
|
||||
when aleth_compat:
|
||||
cleared: HashSet[EthAddress]
|
||||
|
||||
ReadOnlyStateDB* = distinct AccountStateDB
|
||||
|
||||
proc pruneTrie*(db: AccountStateDB): bool =
|
||||
db.trie.isPruning
|
||||
|
||||
proc db*(db: AccountStateDB): CoreDbRef =
|
||||
db.trie.db
|
||||
|
||||
proc kvt*(db: AccountStateDB): CoreDbKvtRef =
|
||||
db.trie.db.kvt
|
||||
|
||||
proc rootHash*(db: AccountStateDB): KeccakHash =
|
||||
db.trie.rootHash
|
||||
|
||||
proc `rootHash=`*(db: AccountStateDB, root: KeccakHash) =
|
||||
db.trie = initAccountsTrie(db.trie.db, root, db.trie.isPruning)
|
||||
|
||||
proc newAccountStateDB*(backingStore: CoreDbRef,
|
||||
root: KeccakHash, pruneTrie: bool): AccountStateDB =
|
||||
result.new()
|
||||
result.trie = initAccountsTrie(backingStore, root, pruneTrie)
|
||||
result.originalRoot = root
|
||||
#result.transactionID = backingStore.getTransactionID()
|
||||
when aleth_compat:
|
||||
result.cleared = initHashSet[EthAddress]()
|
||||
|
||||
proc getTrie*(db: AccountStateDB): CoreDbMptRef =
|
||||
db.trie.mpt
|
||||
|
||||
proc getSecureTrie*(db: AccountStateDB): CoreDbPhkRef =
|
||||
db.trie.phk
|
||||
|
||||
proc getAccount*(db: AccountStateDB, address: EthAddress): Account =
|
||||
let recordFound = db.trie.getAccountBytes(address)
|
||||
if recordFound.len > 0:
|
||||
result = rlp.decode(recordFound, Account)
|
||||
else:
|
||||
result = newAccount()
|
||||
|
||||
proc setAccount*(db: AccountStateDB, address: EthAddress, account: Account) =
|
||||
db.trie.putAccountBytes(address, rlp.encode(account))
|
||||
|
||||
proc deleteAccount*(db: AccountStateDB, address: EthAddress) =
|
||||
db.trie.delAccountBytes(address)
|
||||
|
||||
proc getCodeHash*(db: AccountStateDB, address: EthAddress): Hash256 =
|
||||
let account = db.getAccount(address)
|
||||
result = account.codeHash
|
||||
|
||||
proc getBalance*(db: AccountStateDB, address: EthAddress): UInt256 =
|
||||
let account = db.getAccount(address)
|
||||
account.balance
|
||||
|
||||
proc setBalance*(db: AccountStateDB, address: EthAddress, balance: UInt256) =
|
||||
var account = db.getAccount(address)
|
||||
account.balance = balance
|
||||
db.setAccount(address, account)
|
||||
|
||||
proc addBalance*(db: AccountStateDB, address: EthAddress, delta: UInt256) =
|
||||
db.setBalance(address, db.getBalance(address) + delta)
|
||||
|
||||
proc subBalance*(db: AccountStateDB, address: EthAddress, delta: UInt256) =
|
||||
db.setBalance(address, db.getBalance(address) - delta)
|
||||
|
||||
template createTrieKeyFromSlot(slot: UInt256): auto =
|
||||
# Converts a number to hex big-endian representation including
|
||||
# prefix and leading zeros:
|
||||
slot.toBytesBE
|
||||
# Original py-evm code:
|
||||
# pad32(int_to_big_endian(slot))
|
||||
# morally equivalent to toByteRange_Unnecessary but with different types
|
||||
|
||||
template getStorageTrie(db: AccountStateDB, account: Account): auto =
|
||||
storageTrieForAccount(db.trie, account, false)
|
||||
|
||||
proc clearStorage*(db: AccountStateDB, address: EthAddress) =
|
||||
var account = db.getAccount(address)
|
||||
account.storageRoot = EMPTY_ROOT_HASH
|
||||
db.setAccount(address, account)
|
||||
when aleth_compat:
|
||||
db.cleared.incl address
|
||||
|
||||
proc getStorageRoot*(db: AccountStateDB, address: EthAddress): Hash256 =
|
||||
var account = db.getAccount(address)
|
||||
account.storageRoot
|
||||
|
||||
proc setStorage*(db: AccountStateDB,
|
||||
address: EthAddress,
|
||||
slot: UInt256, value: UInt256) =
|
||||
var account = db.getAccount(address)
|
||||
var accountTrie = getStorageTrie(db, account)
|
||||
let slotAsKey = createTrieKeyFromSlot slot
|
||||
|
||||
if value > 0:
|
||||
let encodedValue = rlp.encode(value)
|
||||
accountTrie.putSlotBytes(slotAsKey, encodedValue)
|
||||
else:
|
||||
accountTrie.delSlotBytes(slotAsKey)
|
||||
|
||||
# map slothash back to slot value
|
||||
# see iterator storage below
|
||||
var
|
||||
triedb = db.kvt
|
||||
# slotHash can be obtained from accountTrie.put?
|
||||
slotHash = keccakHash(slot.toBytesBE)
|
||||
triedb.put(slotHashToSlotKey(slotHash.data).toOpenArray, rlp.encode(slot))
|
||||
|
||||
account.storageRoot = accountTrie.rootHash
|
||||
db.setAccount(address, account)
|
||||
|
||||
iterator storage*(db: AccountStateDB, address: EthAddress): (UInt256, UInt256) =
|
||||
let
|
||||
storageRoot = db.getStorageRoot(address)
|
||||
triedb = db.kvt
|
||||
trie = db.db.mptPrune storageRoot
|
||||
|
||||
for key, value in trie:
|
||||
if key.len != 0:
|
||||
var keyData = triedb.get(slotHashToSlotKey(key).toOpenArray)
|
||||
yield (rlp.decode(keyData, UInt256), rlp.decode(value, UInt256))
|
||||
|
||||
proc getStorage*(db: AccountStateDB, address: EthAddress, slot: UInt256): (UInt256, bool) =
|
||||
let
|
||||
account = db.getAccount(address)
|
||||
slotAsKey = createTrieKeyFromSlot slot
|
||||
storageTrie = getStorageTrie(db, account)
|
||||
|
||||
let
|
||||
foundRecord = storageTrie.getSlotBytes(slotAsKey)
|
||||
|
||||
if foundRecord.len > 0:
|
||||
result = (rlp.decode(foundRecord, UInt256), true)
|
||||
else:
|
||||
result = (0.u256, false)
|
||||
|
||||
proc setNonce*(db: AccountStateDB, address: EthAddress, newNonce: AccountNonce) =
|
||||
var account = db.getAccount(address)
|
||||
if newNonce != account.nonce:
|
||||
account.nonce = newNonce
|
||||
db.setAccount(address, account)
|
||||
|
||||
proc getNonce*(db: AccountStateDB, address: EthAddress): AccountNonce =
|
||||
let account = db.getAccount(address)
|
||||
account.nonce
|
||||
|
||||
proc incNonce*(db: AccountStateDB, address: EthAddress) {.inline.} =
|
||||
db.setNonce(address, db.getNonce(address) + 1)
|
||||
|
||||
proc setCode*(db: AccountStateDB, address: EthAddress, code: openArray[byte]) =
|
||||
var account = db.getAccount(address)
|
||||
# TODO: implement JournalDB to store code and storage
|
||||
# also use JournalDB to revert state trie
|
||||
|
||||
let
|
||||
newCodeHash = keccakHash(code)
|
||||
triedb = db.kvt
|
||||
|
||||
if code.len != 0:
|
||||
triedb.put(contractHashKey(newCodeHash).toOpenArray, code)
|
||||
|
||||
account.codeHash = newCodeHash
|
||||
db.setAccount(address, account)
|
||||
|
||||
proc getCode*(db: AccountStateDB, address: EthAddress): seq[byte] =
|
||||
let triedb = db.kvt
|
||||
triedb.get(contractHashKey(db.getCodeHash(address)).toOpenArray)
|
||||
|
||||
proc hasCodeOrNonce*(db: AccountStateDB, address: EthAddress): bool {.inline.} =
|
||||
db.getNonce(address) != 0 or db.getCodeHash(address) != EMPTY_SHA3
|
||||
|
||||
proc dumpAccount*(db: AccountStateDB, addressS: string): string =
|
||||
let address = addressS.parseAddress
|
||||
return fmt"{addressS}: Storage: {db.getStorage(address, 0.u256)}; getAccount: {db.getAccount address}"
|
||||
|
||||
proc accountExists*(db: AccountStateDB, address: EthAddress): bool =
|
||||
db.trie.getAccountBytes(address).len > 0
|
||||
|
||||
proc isEmptyAccount*(db: AccountStateDB, address: EthAddress): bool =
|
||||
let recordFound = db.trie.getAccountBytes(address)
|
||||
assert(recordFound.len > 0)
|
||||
|
||||
let account = rlp.decode(recordFound, Account)
|
||||
result = account.codeHash == EMPTY_SHA3 and
|
||||
account.balance.isZero and
|
||||
account.nonce == 0
|
||||
|
||||
proc isDeadAccount*(db: AccountStateDB, address: EthAddress): bool =
|
||||
let recordFound = db.trie.getAccountBytes(address)
|
||||
if recordFound.len > 0:
|
||||
let account = rlp.decode(recordFound, Account)
|
||||
result = account.codeHash == EMPTY_SHA3 and
|
||||
account.balance.isZero and
|
||||
account.nonce == 0
|
||||
else:
|
||||
result = true
|
||||
|
||||
# Note: `state_db.getCommittedStorage()` is nowhere used.
|
||||
#
|
||||
#proc getCommittedStorage*(db: AccountStateDB, address: EthAddress, slot: UInt256): UInt256 =
|
||||
# let tmpHash = db.rootHash
|
||||
# db.rootHash = db.originalRoot
|
||||
# db.transactionID.shortTimeReadOnly():
|
||||
# when aleth_compat:
|
||||
# if address in db.cleared:
|
||||
# debug "Forced contract creation on existing account detected", address
|
||||
# result = 0.u256
|
||||
# else:
|
||||
# result = db.getStorage(address, slot)[0]
|
||||
# else:
|
||||
# result = db.getStorage(address, slot)[0]
|
||||
# db.rootHash = tmpHash
|
||||
|
||||
# Note: `state_db.updateOriginalRoot()` is nowhere used.
|
||||
#
|
||||
#proc updateOriginalRoot*(db: AccountStateDB) =
|
||||
# ## this proc will be called for every transaction
|
||||
# db.originalRoot = db.rootHash
|
||||
# # no need to rollback or dispose
|
||||
# # transactionID, it will be handled elsewhere
|
||||
# db.transactionID = db.db.getTransactionID()
|
||||
#
|
||||
# when aleth_compat:
|
||||
# db.cleared.clear()
|
||||
|
||||
proc rootHash*(db: ReadOnlyStateDB): KeccakHash {.borrow.}
|
||||
proc getAccount*(db: ReadOnlyStateDB, address: EthAddress): Account {.borrow.}
|
||||
proc getCodeHash*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getBalance*(db: ReadOnlyStateDB, address: EthAddress): UInt256 {.borrow.}
|
||||
proc getStorageRoot*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): (UInt256, bool) {.borrow.}
|
||||
proc getNonce*(db: ReadOnlyStateDB, address: EthAddress): AccountNonce {.borrow.}
|
||||
proc getCode*(db: ReadOnlyStateDB, address: EthAddress): seq[byte] {.borrow.}
|
||||
proc hasCodeOrNonce*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc accountExists*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isDeadAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isEmptyAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
#proc getCommittedStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
# End
|
||||
|
|
|
@ -0,0 +1,280 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
std/[sets, strformat],
|
||||
chronicles,
|
||||
eth/[common, rlp],
|
||||
../../constants,
|
||||
../../utils/utils,
|
||||
".."/[core_db, distinct_tries, storage_types]
|
||||
|
||||
logScope:
|
||||
topics = "state_db"
|
||||
|
||||
# aleth/geth/parity compatibility mode:
|
||||
#
|
||||
# affected test cases both in GST and BCT:
|
||||
# - stSStoreTest\InitCollision.json
|
||||
# - stRevertTest\RevertInCreateInInit.json
|
||||
# - stCreate2\RevertInCreateInInitCreate2.json
|
||||
#
|
||||
# pyEVM sided with original Nimbus EVM
|
||||
#
|
||||
# implementation difference:
|
||||
# Aleth/geth/parity using accounts cache.
|
||||
# When contract creation happened on an existing
|
||||
# but 'empty' account with non empty storage will
|
||||
# get new empty storage root.
|
||||
# Aleth cs. only clear the storage cache while both pyEVM
|
||||
# and Nimbus will modify the state trie.
|
||||
# During the next SSTORE call, aleth cs. calculate
|
||||
# gas used based on this cached 'original storage value'.
|
||||
# In other hand pyEVM and Nimbus will fetch
|
||||
# 'original storage value' from state trie.
|
||||
#
|
||||
# Both Yellow Paper and EIP2200 are not clear about this
|
||||
# situation but since aleth/geth/and parity implement this
|
||||
# behaviour, we perhaps also need to implement it.
|
||||
#
|
||||
# TODO: should this compatibility mode enabled via
|
||||
# compile time switch, runtime switch, or just hard coded
|
||||
# it?
|
||||
const
|
||||
aleth_compat = true
|
||||
|
||||
type
|
||||
AccountStateDB* = ref object
|
||||
trie: AccountsTrie
|
||||
originalRoot: KeccakHash # will be updated for every transaction
|
||||
#transactionID: CoreDbTxID
|
||||
when aleth_compat:
|
||||
cleared: HashSet[EthAddress]
|
||||
|
||||
proc pruneTrie*(db: AccountStateDB): bool =
|
||||
db.trie.isPruning
|
||||
|
||||
proc db*(db: AccountStateDB): CoreDbRef =
|
||||
db.trie.db
|
||||
|
||||
proc kvt*(db: AccountStateDB): CoreDbKvtRef =
|
||||
db.trie.db.kvt
|
||||
|
||||
proc rootHash*(db: AccountStateDB): KeccakHash =
|
||||
db.trie.rootHash
|
||||
|
||||
proc `rootHash=`*(db: AccountStateDB, root: KeccakHash) =
|
||||
db.trie = initAccountsTrie(db.trie.db, root, db.trie.isPruning)
|
||||
|
||||
proc newAccountStateDB*(backingStore: CoreDbRef,
|
||||
root: KeccakHash, pruneTrie: bool): AccountStateDB =
|
||||
result.new()
|
||||
result.trie = initAccountsTrie(backingStore, root, pruneTrie)
|
||||
result.originalRoot = root
|
||||
#result.transactionID = backingStore.getTransactionID()
|
||||
when aleth_compat:
|
||||
result.cleared = initHashSet[EthAddress]()
|
||||
|
||||
proc getTrie*(db: AccountStateDB): CoreDbMptRef =
|
||||
db.trie.mpt
|
||||
|
||||
proc getSecureTrie*(db: AccountStateDB): CoreDbPhkRef =
|
||||
db.trie.phk
|
||||
|
||||
proc getAccount*(db: AccountStateDB, address: EthAddress): Account =
|
||||
let recordFound = db.trie.getAccountBytes(address)
|
||||
if recordFound.len > 0:
|
||||
result = rlp.decode(recordFound, Account)
|
||||
else:
|
||||
result = newAccount()
|
||||
|
||||
proc setAccount*(db: AccountStateDB, address: EthAddress, account: Account) =
|
||||
db.trie.putAccountBytes(address, rlp.encode(account))
|
||||
|
||||
proc deleteAccount*(db: AccountStateDB, address: EthAddress) =
|
||||
db.trie.delAccountBytes(address)
|
||||
|
||||
proc getCodeHash*(db: AccountStateDB, address: EthAddress): Hash256 =
|
||||
let account = db.getAccount(address)
|
||||
result = account.codeHash
|
||||
|
||||
proc getBalance*(db: AccountStateDB, address: EthAddress): UInt256 =
|
||||
let account = db.getAccount(address)
|
||||
account.balance
|
||||
|
||||
proc setBalance*(db: AccountStateDB, address: EthAddress, balance: UInt256) =
|
||||
var account = db.getAccount(address)
|
||||
account.balance = balance
|
||||
db.setAccount(address, account)
|
||||
|
||||
proc addBalance*(db: AccountStateDB, address: EthAddress, delta: UInt256) =
|
||||
db.setBalance(address, db.getBalance(address) + delta)
|
||||
|
||||
proc subBalance*(db: AccountStateDB, address: EthAddress, delta: UInt256) =
|
||||
db.setBalance(address, db.getBalance(address) - delta)
|
||||
|
||||
template createTrieKeyFromSlot(slot: UInt256): auto =
|
||||
# Converts a number to hex big-endian representation including
|
||||
# prefix and leading zeros:
|
||||
slot.toBytesBE
|
||||
# Original py-evm code:
|
||||
# pad32(int_to_big_endian(slot))
|
||||
# morally equivalent to toByteRange_Unnecessary but with different types
|
||||
|
||||
template getStorageTrie(db: AccountStateDB, account: Account): auto =
|
||||
storageTrieForAccount(db.trie, account, false)
|
||||
|
||||
proc clearStorage*(db: AccountStateDB, address: EthAddress) =
|
||||
var account = db.getAccount(address)
|
||||
account.storageRoot = EMPTY_ROOT_HASH
|
||||
db.setAccount(address, account)
|
||||
when aleth_compat:
|
||||
db.cleared.incl address
|
||||
|
||||
proc getStorageRoot*(db: AccountStateDB, address: EthAddress): Hash256 =
|
||||
var account = db.getAccount(address)
|
||||
account.storageRoot
|
||||
|
||||
proc setStorage*(db: AccountStateDB,
|
||||
address: EthAddress,
|
||||
slot: UInt256, value: UInt256) =
|
||||
var account = db.getAccount(address)
|
||||
var accountTrie = getStorageTrie(db, account)
|
||||
let slotAsKey = createTrieKeyFromSlot slot
|
||||
|
||||
if value > 0:
|
||||
let encodedValue = rlp.encode(value)
|
||||
accountTrie.putSlotBytes(slotAsKey, encodedValue)
|
||||
else:
|
||||
accountTrie.delSlotBytes(slotAsKey)
|
||||
|
||||
# map slothash back to slot value
|
||||
# see iterator storage below
|
||||
var
|
||||
triedb = db.kvt
|
||||
# slotHash can be obtained from accountTrie.put?
|
||||
slotHash = keccakHash(slot.toBytesBE)
|
||||
triedb.put(slotHashToSlotKey(slotHash.data).toOpenArray, rlp.encode(slot))
|
||||
|
||||
account.storageRoot = accountTrie.rootHash
|
||||
db.setAccount(address, account)
|
||||
|
||||
iterator storage*(db: AccountStateDB, address: EthAddress): (UInt256, UInt256) =
|
||||
let
|
||||
storageRoot = db.getStorageRoot(address)
|
||||
triedb = db.kvt
|
||||
trie = db.db.mptPrune storageRoot
|
||||
|
||||
for key, value in trie:
|
||||
if key.len != 0:
|
||||
var keyData = triedb.get(slotHashToSlotKey(key).toOpenArray)
|
||||
yield (rlp.decode(keyData, UInt256), rlp.decode(value, UInt256))
|
||||
|
||||
proc getStorage*(db: AccountStateDB, address: EthAddress, slot: UInt256): (UInt256, bool) =
|
||||
let
|
||||
account = db.getAccount(address)
|
||||
slotAsKey = createTrieKeyFromSlot slot
|
||||
storageTrie = getStorageTrie(db, account)
|
||||
|
||||
let
|
||||
foundRecord = storageTrie.getSlotBytes(slotAsKey)
|
||||
|
||||
if foundRecord.len > 0:
|
||||
result = (rlp.decode(foundRecord, UInt256), true)
|
||||
else:
|
||||
result = (0.u256, false)
|
||||
|
||||
proc setNonce*(db: AccountStateDB, address: EthAddress, newNonce: AccountNonce) =
|
||||
var account = db.getAccount(address)
|
||||
if newNonce != account.nonce:
|
||||
account.nonce = newNonce
|
||||
db.setAccount(address, account)
|
||||
|
||||
proc getNonce*(db: AccountStateDB, address: EthAddress): AccountNonce =
|
||||
let account = db.getAccount(address)
|
||||
account.nonce
|
||||
|
||||
proc incNonce*(db: AccountStateDB, address: EthAddress) {.inline.} =
|
||||
db.setNonce(address, db.getNonce(address) + 1)
|
||||
|
||||
proc setCode*(db: AccountStateDB, address: EthAddress, code: openArray[byte]) =
|
||||
var account = db.getAccount(address)
|
||||
# TODO: implement JournalDB to store code and storage
|
||||
# also use JournalDB to revert state trie
|
||||
|
||||
let
|
||||
newCodeHash = keccakHash(code)
|
||||
triedb = db.kvt
|
||||
|
||||
if code.len != 0:
|
||||
triedb.put(contractHashKey(newCodeHash).toOpenArray, code)
|
||||
|
||||
account.codeHash = newCodeHash
|
||||
db.setAccount(address, account)
|
||||
|
||||
proc getCode*(db: AccountStateDB, address: EthAddress): seq[byte] =
|
||||
let triedb = db.kvt
|
||||
triedb.get(contractHashKey(db.getCodeHash(address)).toOpenArray)
|
||||
|
||||
proc hasCodeOrNonce*(db: AccountStateDB, address: EthAddress): bool {.inline.} =
|
||||
db.getNonce(address) != 0 or db.getCodeHash(address) != EMPTY_SHA3
|
||||
|
||||
proc dumpAccount*(db: AccountStateDB, addressS: string): string =
|
||||
let address = addressS.parseAddress
|
||||
return fmt"{addressS}: Storage: {db.getStorage(address, 0.u256)}; getAccount: {db.getAccount address}"
|
||||
|
||||
proc accountExists*(db: AccountStateDB, address: EthAddress): bool =
|
||||
db.trie.getAccountBytes(address).len > 0
|
||||
|
||||
proc isEmptyAccount*(db: AccountStateDB, address: EthAddress): bool =
|
||||
let recordFound = db.trie.getAccountBytes(address)
|
||||
assert(recordFound.len > 0)
|
||||
|
||||
let account = rlp.decode(recordFound, Account)
|
||||
result = account.codeHash == EMPTY_SHA3 and
|
||||
account.balance.isZero and
|
||||
account.nonce == 0
|
||||
|
||||
proc isDeadAccount*(db: AccountStateDB, address: EthAddress): bool =
|
||||
let recordFound = db.trie.getAccountBytes(address)
|
||||
if recordFound.len > 0:
|
||||
let account = rlp.decode(recordFound, Account)
|
||||
result = account.codeHash == EMPTY_SHA3 and
|
||||
account.balance.isZero and
|
||||
account.nonce == 0
|
||||
else:
|
||||
result = true
|
||||
|
||||
# Note: `state_db.getCommittedStorage()` is nowhere used.
|
||||
#
|
||||
#proc getCommittedStorage*(db: AccountStateDB, address: EthAddress, slot: UInt256): UInt256 =
|
||||
# let tmpHash = db.rootHash
|
||||
# db.rootHash = db.originalRoot
|
||||
# db.transactionID.shortTimeReadOnly():
|
||||
# when aleth_compat:
|
||||
# if address in db.cleared:
|
||||
# debug "Forced contract creation on existing account detected", address
|
||||
# result = 0.u256
|
||||
# else:
|
||||
# result = db.getStorage(address, slot)[0]
|
||||
# else:
|
||||
# result = db.getStorage(address, slot)[0]
|
||||
# db.rootHash = tmpHash
|
||||
|
||||
# Note: `state_db.updateOriginalRoot()` is nowhere used.
|
||||
#
|
||||
#proc updateOriginalRoot*(db: AccountStateDB) =
|
||||
# ## this proc will be called for every transaction
|
||||
# db.originalRoot = db.rootHash
|
||||
# # no need to rollback or dispose
|
||||
# # transactionID, it will be handled elsewhere
|
||||
# db.transactionID = db.db.getTransactionID()
|
||||
#
|
||||
# when aleth_compat:
|
||||
# db.cleared.clear()
|
||||
|
||||
# End
|
|
@ -0,0 +1,33 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)
|
||||
# or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT)
|
||||
# or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
import
|
||||
../core_db,
|
||||
./base
|
||||
|
||||
type
|
||||
ReadOnlyStateDB* = distinct AccountStateDB
|
||||
|
||||
proc getTrie*(db: ReadOnlyStateDB): CoreDbMptRef {.borrow.}
|
||||
proc rootHash*(db: ReadOnlyStateDB): KeccakHash {.borrow.}
|
||||
proc getAccount*(db: ReadOnlyStateDB, address: EthAddress): Account {.borrow.}
|
||||
proc getCodeHash*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getBalance*(db: ReadOnlyStateDB, address: EthAddress): UInt256 {.borrow.}
|
||||
proc getStorageRoot*(db: ReadOnlyStateDB, address: EthAddress): Hash256 {.borrow.}
|
||||
proc getStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): (UInt256, bool) {.borrow.}
|
||||
proc getNonce*(db: ReadOnlyStateDB, address: EthAddress): AccountNonce {.borrow.}
|
||||
proc getCode*(db: ReadOnlyStateDB, address: EthAddress): seq[byte] {.borrow.}
|
||||
proc hasCodeOrNonce*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc accountExists*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isDeadAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
proc isEmptyAccount*(db: ReadOnlyStateDB, address: EthAddress): bool {.borrow.}
|
||||
#proc getCommittedStorage*(db: ReadOnlyStateDB, address: EthAddress, slot: UInt256): UInt256 {.borrow.}
|
||||
|
||||
# End
|
|
@ -0,0 +1,17 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE)
|
||||
# or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT)
|
||||
# or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
import
|
||||
"."/[base, read_only]
|
||||
|
||||
export
|
||||
base, read_only
|
||||
|
||||
# End
|
|
@ -15,7 +15,7 @@ import
|
|||
../nimbus/[vm_state, vm_types],
|
||||
../nimbus/utils/utils,
|
||||
../nimbus/tracer,
|
||||
../nimbus/db/[core_db, state_db],
|
||||
../nimbus/db/[core_db, state_db/read_write],
|
||||
../nimbus/core/executor,
|
||||
../nimbus/common/common,
|
||||
"."/[configuration, downloader, parser, premixcore]
|
||||
|
|
|
@ -95,7 +95,8 @@ proc openLegacyDB(
|
|||
result = CommonRef.new(
|
||||
db = coreDB,
|
||||
networkId = network,
|
||||
params = network.networkParams)
|
||||
params = network.networkParams,
|
||||
avoidStateDb = true)
|
||||
result.initializeEmptyDb
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -9,7 +9,7 @@ import
|
|||
eth/trie/trie_defs,
|
||||
stew/[byteutils, endians2],
|
||||
unittest2,
|
||||
../nimbus/db/state_db
|
||||
../nimbus/db/state_db/read_write
|
||||
|
||||
include ../nimbus/db/ledger/accounts_cache
|
||||
|
||||
|
|
Loading…
Reference in New Issue