Culling legacy DB and accounts cache (#2197)

details:
+ Compiles nimbus all_tests
+ Failing tests have been commented out
This commit is contained in:
Jordan Hrycaj 2024-05-20 10:17:51 +00:00 committed by GitHub
parent 38eaebc5c7
commit ee9aea171d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
134 changed files with 257 additions and 2232 deletions

View File

@ -22,7 +22,6 @@ proc processChainData(cd: ChainData): TestStatus =
let let
networkId = NetworkId(cd.params.config.chainId) networkId = NetworkId(cd.params.config.chainId)
com = CommonRef.new(newCoreDbRef DefaultDbMemory, com = CommonRef.new(newCoreDbRef DefaultDbMemory,
pruneTrie = false,
networkId, networkId,
cd.params cd.params
) )

View File

@ -61,8 +61,7 @@ const
proc makeCom*(conf: NimbusConf): CommonRef = proc makeCom*(conf: NimbusConf): CommonRef =
CommonRef.new( CommonRef.new(
newCoreDbRef LegacyDbMemory, newCoreDbRef DefaultDbMemory,
conf.chainDbMode == ChainDbMode.Prune,
conf.networkId, conf.networkId,
conf.networkParams conf.networkParams
) )

View File

@ -78,8 +78,7 @@ proc main() =
conf = makeConfig(@["--custom-network:" & genesisFile]) conf = makeConfig(@["--custom-network:" & genesisFile])
ethCtx = newEthContext() ethCtx = newEthContext()
ethNode = setupEthNode(conf, ethCtx, eth) ethNode = setupEthNode(conf, ethCtx, eth)
com = CommonRef.new(newCoreDbRef LegacyDbMemory, com = CommonRef.new(newCoreDbRef DefaultDbMemory,
pruneTrie = false,
conf.networkId, conf.networkId,
conf.networkParams conf.networkParams
) )

View File

@ -47,17 +47,16 @@ proc genesisHeader(node: JsonNode): BlockHeader =
rlp.decode(genesisRLP, EthBlock).header rlp.decode(genesisRLP, EthBlock).header
proc setupELClient*(t: TestEnv, conf: ChainConfig, node: JsonNode) = proc setupELClient*(t: TestEnv, conf: ChainConfig, node: JsonNode) =
let memDB = newCoreDbRef LegacyDbMemory let memDB = newCoreDbRef DefaultDbMemory
t.ctx = newEthContext() t.ctx = newEthContext()
t.ethNode = setupEthNode(t.conf, t.ctx, eth) t.ethNode = setupEthNode(t.conf, t.ctx, eth)
t.com = CommonRef.new( t.com = CommonRef.new(
memDB, memDB,
conf, conf
t.conf.chainDbMode == ChainDbMode.Prune
) )
t.chainRef = newChain(t.com, extraValidation = true) t.chainRef = newChain(t.com, extraValidation = true)
let let
stateDB = AccountsCache.init(memDB, emptyRlpHash, t.conf.chainDbMode == ChainDbMode.Prune) stateDB = LedgerCache.init(memDB, emptyRlpHash)
genesisHeader = node.genesisHeader genesisHeader = node.genesisHeader
setupStateDB(node["pre"], stateDB) setupStateDB(node["pre"], stateDB)

View File

@ -76,8 +76,7 @@ proc setupEnv*(): TestEnv =
let let
ethCtx = newEthContext() ethCtx = newEthContext()
ethNode = setupEthNode(conf, ethCtx, eth) ethNode = setupEthNode(conf, ethCtx, eth)
com = CommonRef.new(newCoreDbRef LegacyDbMemory, com = CommonRef.new(newCoreDbRef DefaultDbMemory,
conf.chainDbMode == ChainDbMode.Prune,
conf.networkId, conf.networkId,
conf.networkParams conf.networkParams
) )

View File

@ -46,9 +46,6 @@ type
# all purpose storage # all purpose storage
db: CoreDbRef db: CoreDbRef
# prune underlying state db?
pruneTrie: bool
# block chain config # block chain config
config: ChainConfig config: ChainConfig
@ -103,10 +100,8 @@ type
ldgType: LedgerType ldgType: LedgerType
## Optional suggestion for the ledger cache to be used as state DB ## Optional suggestion for the ledger cache to be used as state DB
const pruneHistory: bool
CommonLegacyDbLedgerTypeDefault = LegacyAccountsCache ## Must not not set for a full node, might go away some time
## Default ledger type to use, see `ldgType` above. This default will be
## superseded by `LedgerCache` as default for `Aristo` type deb backend.
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Forward declarations # Forward declarations
@ -147,31 +142,24 @@ proc daoCheck(conf: ChainConfig) =
if conf.daoForkSupport and conf.daoForkBlock.isNone: if conf.daoForkSupport and conf.daoForkBlock.isNone:
conf.daoForkBlock = conf.homesteadBlock conf.daoForkBlock = conf.homesteadBlock
proc init(com : CommonRef, proc init(com : CommonRef,
db : CoreDbRef, db : CoreDbRef,
pruneTrie: bool, networkId : NetworkId,
networkId: NetworkId, config : ChainConfig,
config : ChainConfig, genesis : Genesis,
genesis : Genesis, ldgType : LedgerType,
ldgType : LedgerType, pruneHistory: bool,
) {.gcsafe, raises: [CatchableError].} = ) {.gcsafe, raises: [CatchableError].} =
config.daoCheck() config.daoCheck()
com.db = db com.db = db
com.pruneTrie = pruneTrie
com.config = config com.config = config
com.forkTransitionTable = config.toForkTransitionTable() com.forkTransitionTable = config.toForkTransitionTable()
com.networkId = networkId com.networkId = networkId
com.syncProgress= SyncProgress() com.syncProgress= SyncProgress()
com.ldgType = block: com.ldgType = LedgerCache
if ldgType != LedgerType(0): com.pruneHistory= pruneHistory
ldgType
elif db.dbType in {AristoDbMemory,AristoDbRocks,AristoDbVoid}:
# The `Aristo` backend does not work well with the `LegacyAccountsCache`
LedgerCache
else:
CommonLegacyDbLedgerTypeDefault
# Initalise the PoA state regardless of whether it is needed on the current # Initalise the PoA state regardless of whether it is needed on the current
# network. For non-PoA networks this descriptor is ignored. # network. For non-PoA networks this descriptor is ignored.
@ -235,10 +223,10 @@ proc getTdIfNecessary(com: CommonRef, blockHash: Hash256): Option[DifficultyInt]
proc new*( proc new*(
_: type CommonRef; _: type CommonRef;
db: CoreDbRef; db: CoreDbRef;
pruneTrie: bool = true;
networkId: NetworkId = MainNet; networkId: NetworkId = MainNet;
params = networkParams(MainNet); params = networkParams(MainNet);
ldgType = LedgerType(0); ldgType = LedgerType(0);
pruneHistory = false;
): CommonRef ): CommonRef
{.gcsafe, raises: [CatchableError].} = {.gcsafe, raises: [CatchableError].} =
@ -247,19 +235,19 @@ proc new*(
new(result) new(result)
result.init( result.init(
db, db,
pruneTrie,
networkId, networkId,
params.config, params.config,
params.genesis, params.genesis,
ldgType) ldgType,
pruneHistory)
proc new*( proc new*(
_: type CommonRef; _: type CommonRef;
db: CoreDbRef; db: CoreDbRef;
config: ChainConfig; config: ChainConfig;
pruneTrie: bool = true;
networkId: NetworkId = MainNet; networkId: NetworkId = MainNet;
ldgType = LedgerType(0); ldgType = LedgerType(0);
pruneHistory = false;
): CommonRef ): CommonRef
{.gcsafe, raises: [CatchableError].} = {.gcsafe, raises: [CatchableError].} =
@ -268,18 +256,17 @@ proc new*(
new(result) new(result)
result.init( result.init(
db, db,
pruneTrie,
networkId, networkId,
config, config,
nil, nil,
ldgType) ldgType,
pruneHistory)
proc clone*(com: CommonRef, db: CoreDbRef): CommonRef = proc clone*(com: CommonRef, db: CoreDbRef): CommonRef =
## clone but replace the db ## clone but replace the db
## used in EVM tracer whose db is CaptureDB ## used in EVM tracer whose db is CaptureDB
CommonRef( CommonRef(
db : db, db : db,
pruneTrie : com.pruneTrie,
config : com.config, config : com.config,
forkTransitionTable: com.forkTransitionTable, forkTransitionTable: com.forkTransitionTable,
forkIdCalculator: com.forkIdCalculator, forkIdCalculator: com.forkIdCalculator,
@ -292,8 +279,8 @@ proc clone*(com: CommonRef, db: CoreDbRef): CommonRef =
pow : com.pow, pow : com.pow,
poa : com.poa, poa : com.poa,
pos : com.pos, pos : com.pos,
ldgType : com.ldgType ldgType : com.ldgType,
) pruneHistory : com.pruneHistory)
proc clone*(com: CommonRef): CommonRef = proc clone*(com: CommonRef): CommonRef =
com.clone(com.db) com.clone(com.db)
@ -492,8 +479,8 @@ func cliqueEpoch*(com: CommonRef): int =
if com.config.clique.epoch.isSome: if com.config.clique.epoch.isSome:
return com.config.clique.epoch.get() return com.config.clique.epoch.get()
func pruneTrie*(com: CommonRef): bool = func pruneHistory*(com: CommonRef): bool =
com.pruneTrie com.pruneHistory
# always remember ChainId and NetworkId # always remember ChainId and NetworkId
# are two distinct things that often got mixed # are two distinct things that often got mixed

View File

@ -1,5 +1,5 @@
# Nimbus # Nimbus
# Copyright (c) 2018-2023 Status Research & Development GmbH # Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -13,8 +13,7 @@
import import
std/tables, std/tables,
eth/[common, eip1559], eth/[common, eip1559],
eth/trie/trie_defs, ../db/[ledger, core_db],
../db/[ledger, core_db, state_db/read_write],
../constants, ../constants,
./chain_config ./chain_config
@ -28,8 +27,6 @@ type
address: EthAddress; nonce: AccountNonce; balance: UInt256; address: EthAddress; nonce: AccountNonce; balance: UInt256;
code: openArray[byte]) {.catchRaise.} code: openArray[byte]) {.catchRaise.}
GenesisCompensateLegacySetupFn = proc() {.noRaise.}
GenesisSetStorageFn = proc( GenesisSetStorageFn = proc(
address: EthAddress; slot: UInt256; val: UInt256) {.rlpRaise.} address: EthAddress; slot: UInt256; val: UInt256) {.rlpRaise.}
@ -40,70 +37,23 @@ type
GenesisGetTrieFn = proc: CoreDbMptRef {.noRaise.} GenesisGetTrieFn = proc: CoreDbMptRef {.noRaise.}
GenesisLedgerRef* = ref object GenesisLedgerRef* = ref object
## Exportable ledger DB just for initialising Genesis. This is needed ## Exportable ledger DB just for initialising Genesis.
## when using the `Aristo` backend which is not fully supported by the
## `AccountStateDB` object.
##
## Currently, using other than the `AccountStateDB` ledgers are
## experimental and test only. Eventually, the `GenesisLedgerRef` wrapper
## should disappear so that the `Ledger` object (which encapsulates
## `AccountsCache` and `AccountsLedger`) will prevail.
## ##
addAccount: GenesisAddAccountFn addAccount: GenesisAddAccountFn
compensateLegacySetup: GenesisCompensateLegacySetupFn
setStorage: GenesisSetStorageFn setStorage: GenesisSetStorageFn
commit: GenesisCommitFn commit: GenesisCommitFn
rootHash: GenesisRootHashFn rootHash: GenesisRootHashFn
getTrie: GenesisGetTrieFn getTrie: GenesisGetTrieFn
const
GenesisLedgerTypeDefault* = LedgerType(0)
## Default ledger type to use, `LedgerType(0)` uses `AccountStateDB`
## rather than a `Ledger` variant.
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc initStateDbledgerRef(db: CoreDbRef; pruneTrie: bool): GenesisLedgerRef =
let sdb = newAccountStateDB(db, emptyRlpHash, pruneTrie)
GenesisLedgerRef(
addAccount: proc(
address: EthAddress;
nonce: AccountNonce;
balance: UInt256;
code: openArray[byte];
) {.catchRaise.} =
sdb.setAccount(address, newAccount(nonce, balance))
sdb.setCode(address, code),
compensateLegacySetup: proc() =
if pruneTrie: db.compensateLegacySetup(),
setStorage: proc(
address: EthAddress;
slot: UInt256;
val: UInt256;
) {.rlpRaise.} =
sdb.setStorage(address, slot, val),
commit: proc() =
discard,
rootHash: proc(): Hash256 =
sdb.rootHash(),
getTrie: proc(): CoreDbMptRef =
sdb.getTrie())
proc initAccountsLedgerRef( proc initAccountsLedgerRef(
db: CoreDbRef; db: CoreDbRef;
pruneTrie: bool;
ledgerType: LedgerType;
): GenesisLedgerRef = ): GenesisLedgerRef =
let ac = ledgerType.init(db, emptyRlpHash, pruneTrie) ## Methods jump table
let ac = LedgerCache.init(db, EMPTY_ROOT_HASH)
GenesisLedgerRef( GenesisLedgerRef(
addAccount: proc( addAccount: proc(
@ -116,9 +66,6 @@ proc initAccountsLedgerRef(
ac.setBalance(address, balance) ac.setBalance(address, balance)
ac.setCode(address, @code), ac.setCode(address, @code),
compensateLegacySetup: proc() =
if pruneTrie: db.compensateLegacySetup(),
setStorage: proc( setStorage: proc(
address: EthAddress; address: EthAddress;
slot: UInt256; slot: UInt256;
@ -141,15 +88,11 @@ proc initAccountsLedgerRef(
proc newStateDB*( proc newStateDB*(
db: CoreDbRef; db: CoreDbRef;
pruneTrie: bool; ledgerType: LedgerType;
ledgerType = LedgerType(0);
): GenesisLedgerRef = ): GenesisLedgerRef =
## The flag `ledgerType` is set to zero for compatibility with legacy apps ## Currently only `LedgerCache` supported for `ledgerType`.
## (see `test_state_network`). doAssert ledgerType == LedgerCache
if ledgerType != LedgerType(0): db.initAccountsLedgerRef()
db.initAccountsLedgerRef(pruneTrie, ledgerType)
else:
db.initStateDbledgerRef pruneTrie
proc getTrie*(sdb: GenesisLedgerRef): CoreDbMptRef = proc getTrie*(sdb: GenesisLedgerRef): CoreDbMptRef =
## Getter, used in `test_state_network` ## Getter, used in `test_state_network`
@ -167,22 +110,9 @@ proc toGenesisHeader*(
## The function returns the `Genesis` block header. ## The function returns the `Genesis` block header.
## ##
# The following kludge is needed for the `LegacyDbPersistent` type database
# when `pruneTrie` is enabled. For other cases, this code is irrelevant.
sdb.compensateLegacySetup()
for address, account in g.alloc: for address, account in g.alloc:
sdb.addAccount(address, account.nonce, account.balance, account.code) sdb.addAccount(address, account.nonce, account.balance, account.code)
# Kludge:
#
# See https://github.com/status-im/nim-eth/issues/9 where other,
# probably related debilities are discussed.
#
# This kludge also fixes the initial crash described in
# https://github.com/status-im/nimbus-eth1/issues/932.
sdb.compensateLegacySetup() # <-- kludge
for k, v in account.storage: for k, v in account.storage:
sdb.setStorage(address, k, v) sdb.setStorage(address, k, v)
@ -226,20 +156,20 @@ proc toGenesisHeader*(
genesis: Genesis; genesis: Genesis;
fork: HardFork; fork: HardFork;
db = CoreDbRef(nil); db = CoreDbRef(nil);
ledgerType = GenesisLedgerTypeDefault; ledgerType = LedgerCache;
): BlockHeader ): BlockHeader
{.gcsafe, raises: [CatchableError].} = {.gcsafe, raises: [CatchableError].} =
## Generate the genesis block header from the `genesis` and `config` ## Generate the genesis block header from the `genesis` and `config`
## argument value. ## argument value.
let let
db = if db.isNil: newCoreDbRef LegacyDbMemory else: db db = if db.isNil: AristoDbMemory.newCoreDbRef() else: db
sdb = newStateDB(db, pruneTrie = true, ledgerType) sdb = db.newStateDB(ledgerType)
toGenesisHeader(genesis, sdb, fork) toGenesisHeader(genesis, sdb, fork)
proc toGenesisHeader*( proc toGenesisHeader*(
params: NetworkParams; params: NetworkParams;
db = CoreDbRef(nil); db = CoreDbRef(nil);
ledgerType = GenesisLedgerTypeDefault; ledgerType = LedgerCache;
): BlockHeader ): BlockHeader
{.raises: [CatchableError].} = {.raises: [CatchableError].} =
## Generate the genesis block header from the `genesis` and `config` ## Generate the genesis block header from the `genesis` and `config`

View File

@ -106,9 +106,8 @@ const sharedLibText = if defined(linux): " (*.so, *.so.N)"
type type
ChainDbMode* {.pure.} = enum ChainDbMode* {.pure.} = enum
Prune
Archive
Aristo Aristo
AriPrune
NimbusCmd* {.pure.} = enum NimbusCmd* {.pure.} = enum
noCommand noCommand
@ -117,7 +116,7 @@ type
ProtocolFlag* {.pure.} = enum ProtocolFlag* {.pure.} = enum
## Protocol flags ## Protocol flags
Eth ## enable eth subprotocol Eth ## enable eth subprotocol
Snap ## enable snap sub-protocol #Snap ## enable snap sub-protocol
Les ## enable les subprotocol Les ## enable les subprotocol
RpcFlag* {.pure.} = enum RpcFlag* {.pure.} = enum
@ -134,7 +133,7 @@ type
SyncMode* {.pure.} = enum SyncMode* {.pure.} = enum
Default Default
Full ## Beware, experimental Full ## Beware, experimental
Snap ## Beware, experimental #Snap ## Beware, experimental
Stateless ## Beware, experimental Stateless ## Beware, experimental
NimbusConf* = object of RootObj NimbusConf* = object of RootObj
@ -158,12 +157,11 @@ type
chainDbMode* {. chainDbMode* {.
desc: "Blockchain database" desc: "Blockchain database"
longDesc: longDesc:
"- Prune -- Legacy/reference database, full pruning\n" & "- Aristo -- Single state DB, full node\n" &
"- Archive -- Legacy/reference database without pruning\n" & "- AriPrune -- Aristo with curbed block history (for testing)\n" &
"- Aristo -- Experimental single state DB\n" &
"" ""
defaultValue: ChainDbMode.Prune defaultValue: ChainDbMode.Aristo
defaultValueDesc: $ChainDbMode.Prune defaultValueDesc: $ChainDbMode.Aristo
abbr : "p" abbr : "p"
name: "chaindb" }: ChainDbMode name: "chaindb" }: ChainDbMode
@ -172,7 +170,7 @@ type
longDesc: longDesc:
"- default -- legacy sync mode\n" & "- default -- legacy sync mode\n" &
"- full -- full blockchain archive\n" & "- full -- full blockchain archive\n" &
"- snap -- experimental snap mode (development only)\n" & # "- snap -- experimental snap mode (development only)\n" &
"- stateless -- experimental stateless mode (development only)" "- stateless -- experimental stateless mode (development only)"
defaultValue: SyncMode.Default defaultValue: SyncMode.Default
defaultValueDesc: $SyncMode.Default defaultValueDesc: $SyncMode.Default
@ -376,7 +374,8 @@ type
protocols {. protocols {.
desc: "Enable specific set of server protocols (available: Eth, " & desc: "Enable specific set of server protocols (available: Eth, " &
" Snap, Les, None.) This will not affect the sync mode" " Les, None.) This will not affect the sync mode"
# " Snap, Les, None.) This will not affect the sync mode"
defaultValue: @[] defaultValue: @[]
defaultValueDesc: $ProtocolFlag.Eth defaultValueDesc: $ProtocolFlag.Eth
name: "protocols" .}: seq[string] name: "protocols" .}: seq[string]
@ -643,7 +642,7 @@ proc getProtocolFlags*(conf: NimbusConf): set[ProtocolFlag] =
case item.toLowerAscii() case item.toLowerAscii()
of "eth": result.incl ProtocolFlag.Eth of "eth": result.incl ProtocolFlag.Eth
of "les": result.incl ProtocolFlag.Les of "les": result.incl ProtocolFlag.Les
of "snap": result.incl ProtocolFlag.Snap # of "snap": result.incl ProtocolFlag.Snap
of "none": noneOk = true of "none": noneOk = true
else: else:
error "Unknown protocol", name=item error "Unknown protocol", name=item

View File

@ -23,10 +23,6 @@ proc importRlpBlock*(blocksRlp: openArray[byte]; com: CommonRef; importFile: str
header: BlockHeader header: BlockHeader
body: BlockBody body: BlockBody
# The following kludge is needed for the `LegacyDbPersistent` type database
# when `pruneTrie` is enabled. For other cases, this code is irrelevant.
com.db.compensateLegacySetup()
# even though the new imported blocks have block number # even though the new imported blocks have block number
# smaller than head, we keep importing it. # smaller than head, we keep importing it.
# it maybe a side chain. # it maybe a side chain.

View File

@ -85,9 +85,6 @@ proc persistBlocksImpl(c: ChainRef; headers: openArray[BlockHeader];
let vmState = c.getVmState(headers[0]).valueOr: let vmState = c.getVmState(headers[0]).valueOr:
return ValidationResult.Error return ValidationResult.Error
# Check point
let stateRootChpt = vmState.parent.stateRoot
# Needed for figuring out whether KVT cleanup is due (see at the end) # Needed for figuring out whether KVT cleanup is due (see at the end)
let (fromBlock, toBlock) = (headers[0].blockNumber, headers[^1].blockNumber) let (fromBlock, toBlock) = (headers[0].blockNumber, headers[^1].blockNumber)

View File

@ -157,7 +157,7 @@ proc update(dh: TxChainRef; parent: BlockHeader)
let let
timestamp = dh.getTimestamp(parent) timestamp = dh.getTimestamp(parent)
db = dh.com.db db = dh.com.db
acc = dh.com.ledgerType.init(db, parent.stateRoot, dh.com.pruneTrie) acc = dh.com.ledgerType.init(db, parent.stateRoot)
fee = if dh.com.isLondon(parent.blockNumber + 1, timestamp): fee = if dh.com.isLondon(parent.blockNumber + 1, timestamp):
some(dh.com.baseFeeGet(parent).uint64.u256) some(dh.com.baseFeeGet(parent).uint64.u256)
else: else:

View File

@ -175,7 +175,7 @@ proc vmExecInit(xp: TxPoolRef): Result[TxPackerStateRef, string]
let packer = TxPackerStateRef( # return value let packer = TxPackerStateRef( # return value
xp: xp, xp: xp,
tr: newCoreDbRef(LegacyDbMemory).mptPrune, tr: AristoDbMemory.newCoreDbRef().mptPrune,
balance: xp.chain.vmState.readOnlyStateDB.getBalance(xp.chain.feeRecipient), balance: xp.chain.vmState.readOnlyStateDB.getBalance(xp.chain.feeRecipient),
numBlobPerBlock: 0, numBlobPerBlock: 0,
) )

View File

@ -145,7 +145,6 @@ proc findTx*(
else: else:
# Find `(vid,key)` on transaction layers # Find `(vid,key)` on transaction layers
var n = 0
for (n,tx,layer,error) in db.txRef.txFrameWalk: for (n,tx,layer,error) in db.txRef.txFrameWalk:
if error != AristoError(0): if error != AristoError(0):
return err(error) return err(error)

View File

@ -32,13 +32,7 @@ export
# setting up DB agnostic unit/integration tests. # setting up DB agnostic unit/integration tests.
# #
# Uncomment the below symbols in order to activate the `Aristo` database. # Uncomment the below symbols in order to activate the `Aristo` database.
#const DefaultDbMemory* = AristoDbMemory const DefaultDbMemory* = AristoDbMemory
#const DefaultDbPersistent* = AristoDbRocks const DefaultDbPersistent* = AristoDbRocks
# Catch undefined symbols and set them to the legacy database.
when not declared(DefaultDbMemory):
const DefaultDbMemory* = LegacyDbMemory
when not declared(DefaultDbPersistent):
const DefaultDbPersistent* = LegacyDbPersistent
# End # End

View File

@ -1,587 +0,0 @@
# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
{.push raises: [].}
import
std/tables,
eth/[common, rlp, trie/db, trie/hexary],
stew/byteutils,
results,
../../../errors,
".."/[base, base/base_desc]
type
LegacyApiRlpError* = object of CoreDbApiError
## For re-routing exceptions in iterator closure
# -----------
LegacyDbRef* = ref object of CoreDbRef
kvt: CoreDxKvtRef ## Cache, no need to rebuild methods descriptor
tdb: TrieDatabaseRef ## Descriptor reference copy captured with closures
top: LegacyCoreDxTxRef ## Top transaction (if any)
ctx: LegacyCoreDbCtxRef ## Cache, there is only one context here
level: int ## Debugging
LegacyDbClose* = proc() {.gcsafe, raises: [].}
## Custom destructor
HexaryChildDbRef = ref object
trie: HexaryTrie ## For closure descriptor for capturing
when CoreDbEnableApiTracking:
colType: CoreDbColType ## Current sub-trie
address: Option[EthAddress] ## For storage tree debugging
accPath: Blob ## For storage tree debugging
LegacyCoreDbCtxRef = ref object of CoreDbCtxRef
## Context (there is only one context here)
base: LegacyDbRef
LegacyCoreDxTxRef = ref object of CoreDxTxRef
ltx: DbTransaction ## Legacy transaction descriptor
back: LegacyCoreDxTxRef ## Previous transaction
level: int ## Transaction level when positive
RecorderRef = ref object of RootRef
flags: set[CoreDbCaptFlags]
parent: TrieDatabaseRef
logger: TableRef[Blob,Blob]
appDb: LegacyDbRef
LegacyColRef* = ref object of CoreDbColRef
root: Hash256 ## Hash key
when CoreDbEnableApiTracking:
colType: CoreDbColType ## Current sub-trie
address: Option[EthAddress] ## For storage tree debugging
accPath: Blob ## For storage tree debugging
LegacyCoreDbError = ref object of CoreDbErrorRef
ctx: string ## Exception or error context info
name: string ## name of exception
msg: string ## Exception info
# ------------
LegacyCoreDbKvtBE = ref object of CoreDbKvtBackendRef
tdb: TrieDatabaseRef
LegacyCoreDbMptBE = ref object of CoreDbMptBackendRef
mpt: HexaryTrie
proc init*(
db: LegacyDbRef;
dbType: CoreDbType;
tdb: TrieDatabaseRef;
closeDb = LegacyDbClose(nil);
): CoreDbRef
{.gcsafe.}
# ------------------------------------------------------------------------------
# Private helpers, exception management
# ------------------------------------------------------------------------------
template mapRlpException(db: LegacyDbRef; info: static[string]; code: untyped) =
try:
code
except RlpError as e:
return err(db.bless(RlpException, LegacyCoreDbError(
ctx: info,
name: $e.name,
msg: e.msg)))
template reraiseRlpException(info: static[string]; code: untyped) =
try:
code
except RlpError as e:
let msg = info & ", name=" & $e.name & ", msg=\"" & e.msg & "\""
raise (ref LegacyApiRlpError)(msg: msg)
# ------------------------------------------------------------------------------
# Private helpers, other functions
# ------------------------------------------------------------------------------
func errorPrint(e: CoreDbErrorRef): string =
if not e.isNil:
let e = e.LegacyCoreDbError
result &= "ctx=" & $e.ctx
if e.name != "":
result &= ", name=\"" & $e.name & "\""
if e.msg != "":
result &= ", msg=\"" & $e.msg & "\""
func colPrint(col: CoreDbColRef): string =
if not col.isNil:
if not col.ready:
result = "$?"
else:
var col = LegacyColRef(col)
when CoreDbEnableApiTracking:
result = "(" & $col.colType & ","
if col.address.isSome:
result &= "@"
if col.accPath.len == 0:
result &= "ø"
else:
result &= col.accPath.toHex & ","
result &= "%" & col.address.unsafeGet.toHex & ","
if col.root != EMPTY_ROOT_HASH:
result &= "£" & col.root.data.toHex
else:
result &= "£ø"
when CoreDbEnableApiTracking:
result &= ")"
func txLevel(db: LegacyDbRef): int =
if not db.top.isNil:
return db.top.level
func lroot(col: CoreDbColRef): Hash256 =
if not col.isNil and col.ready:
return col.LegacyColRef.root
EMPTY_ROOT_HASH
proc toCoreDbAccount(
db: LegacyDbRef;
data: Blob;
address: EthAddress;
): CoreDbAccount
{.gcsafe, raises: [RlpError].} =
let acc = rlp.decode(data, Account)
result = CoreDbAccount(
address: address,
nonce: acc.nonce,
balance: acc.balance,
codeHash: acc.codeHash)
if acc.storageRoot != EMPTY_ROOT_HASH:
result.storage = db.bless LegacyColRef(root: acc.storageRoot)
when CoreDbEnableApiTracking:
result.storage.LegacyColRef.colType = CtStorage # redundant, ord() = 0
result.storage.LegacyColRef.address = some(address)
result.storage.LegacyColRef.accPath = @(address.keccakHash.data)
proc toAccount(
acc: CoreDbAccount;
): Account =
## Fast rewrite of `recast()`
Account(
nonce: acc.nonce,
balance: acc.balance,
codeHash: acc.codeHash,
storageRoot: acc.storage.lroot)
# ------------------------------------------------------------------------------
# Private mixin methods for `trieDB` (backport from capturedb/tracer sources)
# ------------------------------------------------------------------------------
proc get(db: RecorderRef, key: openArray[byte]): Blob =
## Mixin for `trieDB()`
result = db.logger.getOrDefault @key
if result.len == 0:
result = db.parent.get(key)
if result.len != 0:
db.logger[@key] = result
proc put(db: RecorderRef, key, value: openArray[byte]) =
## Mixin for `trieDB()`
db.logger[@key] = @value
if PersistPut in db.flags:
db.parent.put(key, value)
proc contains(db: RecorderRef, key: openArray[byte]): bool =
## Mixin for `trieDB()`
if db.logger.hasKey @key:
return true
if db.parent.contains key:
return true
proc del(db: RecorderRef, key: openArray[byte]) =
## Mixin for `trieDB()`
db.logger.del @key
if PersistDel in db.flags:
db.parent.del key
proc newRecorderRef(
db: LegacyDbRef;
flags: set[CoreDbCaptFlags];
): RecorderRef =
## Capture constuctor, uses `mixin` values from above
result = RecorderRef(
flags: flags,
parent: db.tdb,
logger: newTable[Blob,Blob]())
let newDb = LegacyDbRef(
level: db.level+1,
trackLegaApi: db.trackLegaApi,
trackNewApi: db.trackNewApi,
trackLedgerApi: db.trackLedgerApi,
localDbOnly: db.localDbOnly,
profTab: db.profTab,
ledgerHook: db.ledgerHook)
# Note: the **mixin** magic happens in `trieDB()`
result.appDb = newDb.init(db.dbType, trieDB result).LegacyDbRef
# ------------------------------------------------------------------------------
# Private database method function tables
# ------------------------------------------------------------------------------
proc kvtMethods(db: LegacyDbRef): CoreDbKvtFns =
## Key-value database table handlers
let tdb = db.tdb
CoreDbKvtFns(
backendFn: proc(): CoreDbKvtBackendRef =
db.bless(LegacyCoreDbKvtBE(tdb: tdb)),
getFn: proc(k: openArray[byte]): CoreDbRc[Blob] =
let data = tdb.get(k)
if 0 < data.len:
return ok(data)
err(db.bless(KvtNotFound, LegacyCoreDbError(ctx: "getFn()"))),
delFn: proc(k: openArray[byte]): CoreDbRc[void] =
tdb.del(k)
ok(),
putFn: proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
tdb.put(k,v)
ok(),
hasKeyFn: proc(k: openArray[byte]): CoreDbRc[bool] =
ok(tdb.contains(k)),
saveOffSiteFn: proc(): CoreDbRc[void] =
# Emulate `Kvt` behaviour
if 0 < db.txLevel():
const info = "saveOffSiteFn()"
return err(db.bless(TxPending, LegacyCoreDbError(ctx: info)))
ok(),
forgetFn: proc(): CoreDbRc[void] =
ok())
proc mptMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbMptFns =
## Hexary trie database handlers
CoreDbMptFns(
backendFn: proc(): CoreDbMptBackendRef =
db.bless(LegacyCoreDbMptBE(mpt: mpt.trie)),
fetchFn: proc(k: openArray[byte]): CoreDbRc[Blob] =
db.mapRlpException("fetchFn()"):
let data = mpt.trie.get(k)
if 0 < data.len:
return ok(data)
err(db.bless(MptNotFound, LegacyCoreDbError(ctx: "fetchFn()"))),
deleteFn: proc(k: openArray[byte]): CoreDbRc[void] =
db.mapRlpException("deleteFn()"):
mpt.trie.del(k)
ok(),
mergeFn: proc(k: openArray[byte]; v: openArray[byte]): CoreDbRc[void] =
db.mapRlpException("mergeFn()"):
mpt.trie.put(k,v)
ok(),
hasPathFn: proc(k: openArray[byte]): CoreDbRc[bool] =
db.mapRlpException("hasPathFn()"):
return ok(mpt.trie.contains(k)),
getColFn: proc(): CoreDbColRef =
var col = LegacyColRef(root: mpt.trie.rootHash)
when CoreDbEnableApiTracking:
col.colType = mpt.colType
col.address = mpt.address
col.accPath = mpt.accPath
db.bless(col),
isPruningFn: proc(): bool =
mpt.trie.isPruning)
proc accMethods(mpt: HexaryChildDbRef; db: LegacyDbRef): CoreDbAccFns =
## Hexary trie database handlers
CoreDbAccFns(
getMptFn: proc(): CoreDbRc[CoreDxMptRef] =
let xMpt = HexaryChildDbRef(trie: mpt.trie)
ok(db.bless CoreDxMptRef(methods: xMpt.mptMethods db)),
fetchFn: proc(k: EthAddress): CoreDbRc[CoreDbAccount] =
db.mapRlpException "fetchFn()":
let data = mpt.trie.get(k.keccakHash.data)
if 0 < data.len:
return ok db.toCoreDbAccount(data,k)
err(db.bless(AccNotFound, LegacyCoreDbError(ctx: "fetchFn()"))),
deleteFn: proc(k: EthAddress): CoreDbRc[void] =
db.mapRlpException("deleteFn()"):
mpt.trie.del(k.keccakHash.data)
ok(),
stoFlushFn: proc(k: EthAddress): CoreDbRc[void] =
ok(),
mergeFn: proc(v: CoreDbAccount): CoreDbRc[void] =
db.mapRlpException("mergeFn()"):
mpt.trie.put(v.address.keccakHash.data, rlp.encode v.toAccount)
ok(),
hasPathFn: proc(k: EthAddress): CoreDbRc[bool] =
db.mapRlpException("hasPath()"):
return ok(mpt.trie.contains k.keccakHash.data),
getColFn: proc(): CoreDbColRef =
var col = LegacyColRef(root: mpt.trie.rootHash)
when CoreDbEnableApiTracking:
col.colType = mpt.colType
col.address = mpt.address
col.accPath = mpt.accPath
db.bless(col),
isPruningFn: proc(): bool =
mpt.trie.isPruning)
proc ctxMethods(ctx: LegacyCoreDbCtxRef): CoreDbCtxFns =
let
db = ctx.base
tdb = db.tdb
CoreDbCtxFns(
newColFn: proc(
colType: CoreDbColType;
root: Hash256;
address: Option[EthAddress];
): CoreDbRc[CoreDbColRef] =
var col = LegacyColRef(root: root)
when CoreDbEnableApiTracking:
col.colType = colType
col.address = address
if address.isSome:
col.accPath = @(address.unsafeGet.keccakHash.data)
ok(db.bless col),
getMptFn: proc(col: CoreDbColRef, prune: bool): CoreDbRc[CoreDxMptRef] =
var mpt = HexaryChildDbRef(trie: initHexaryTrie(tdb, col.lroot, prune))
when CoreDbEnableApiTracking:
if not col.isNil and col.ready:
let col = col.LegacyColRef
mpt.colType = col.colType
mpt.address = col.address
mpt.accPath = col.accPath
ok(db.bless CoreDxMptRef(methods: mpt.mptMethods db)),
getAccFn: proc(col: CoreDbColRef, prune: bool): CoreDbRc[CoreDxAccRef] =
var mpt = HexaryChildDbRef(trie: initHexaryTrie(tdb, col.lroot, prune))
when CoreDbEnableApiTracking:
if not col.isNil and col.ready:
if col.LegacyColRef.colType != CtAccounts:
let ctx = LegacyCoreDbError(
ctx: "newAccFn()",
msg: "got " & $col.LegacyColRef.colType)
return err(db.bless(RootUnacceptable, ctx))
mpt.colType = CtAccounts
ok(db.bless CoreDxAccRef(methods: mpt.accMethods db)),
forgetFn: proc() =
discard)
proc txMethods(tx: CoreDxTxRef): CoreDbTxFns =
let tx = tx.LegacyCoreDxTxRef
proc pop(tx: LegacyCoreDxTxRef) =
if 0 < tx.level:
tx.parent.LegacyDbRef.top = tx.back
tx.back = LegacyCoreDxTxRef(nil)
tx.level = -1
CoreDbTxFns(
levelFn: proc(): int =
tx.level,
commitFn: proc(applyDeletes: bool): CoreDbRc[void] =
tx.ltx.commit(applyDeletes)
tx.pop()
ok(),
rollbackFn: proc(): CoreDbRc[void] =
tx.ltx.rollback()
tx.pop()
ok(),
disposeFn: proc(): CoreDbRc[void] =
tx.ltx.dispose()
tx.pop()
ok(),
safeDisposeFn: proc(): CoreDbRc[void] =
tx.ltx.safeDispose()
tx.pop()
ok())
proc cptMethods(cpt: RecorderRef; db: LegacyDbRef): CoreDbCaptFns =
CoreDbCaptFns(
recorderFn: proc(): CoreDbRef =
cpt.appDb,
logDbFn: proc(): TableRef[Blob,Blob] =
cpt.logger,
getFlagsFn: proc(): set[CoreDbCaptFlags] =
cpt.flags,
forgetFn: proc() =
discard)
# ------------------------------------------------------------------------------
# Private base methods (including constructors)
# ------------------------------------------------------------------------------
proc baseMethods(
db: LegacyDbRef;
dbType: CoreDbType;
closeDb: LegacyDbClose;
): CoreDbBaseFns =
let db = db
CoreDbBaseFns(
levelFn: proc(): int =
db.txLevel(),
destroyFn: proc(ignore: bool) =
if not closeDb.isNil:
closeDb(),
colStateFn: proc(col: CoreDbColRef): CoreDbRc[Hash256] =
ok(col.lroot),
colPrintFn: proc(col: CoreDbColRef): string =
col.colPrint(),
errorPrintFn: proc(e: CoreDbErrorRef): string =
e.errorPrint(),
legacySetupFn: proc() =
db.tdb.put(EMPTY_ROOT_HASH.data, @[0x80u8]),
newKvtFn: proc(sharedTable = true): CoreDbRc[CoreDxKvtRef] =
ok(db.kvt),
newCtxFn: proc(): CoreDbCtxRef =
db.ctx,
swapCtxFn: proc(ctx: CoreDbCtxRef): CoreDbCtxRef =
doAssert CoreDbCtxRef(db.ctx) == ctx
ctx,
newCtxFromTxFn: proc(
root: Hash256;
colType: CoreDbColType;
): CoreDbRc[CoreDbCtxRef] =
ok(db.ctx),
beginFn: proc(): CoreDbRc[CoreDxTxRef] =
db.top = LegacyCoreDxTxRef(
ltx: db.tdb.beginTransaction,
level: (if db.top.isNil: 1 else: db.top.level + 1),
back: db.top)
db.top.methods = db.top.txMethods()
ok(db.bless db.top),
newCaptureFn: proc(flgs: set[CoreDbCaptFlags]): CoreDbRc[CoreDxCaptRef] =
let fns = db.newRecorderRef(flgs).cptMethods(db)
ok(db.bless CoreDxCaptRef(methods: fns)),
persistentFn: proc(bn: Option[BlockNumber]): CoreDbRc[void] =
# Emulate `Aristo` behaviour
if 0 < db.txLevel():
const info = "persistentFn()"
return err(db.bless(TxPending, LegacyCoreDbError(ctx: info)))
ok())
# ------------------------------------------------------------------------------
# Public constructor helpers
# ------------------------------------------------------------------------------
proc init*(
db: LegacyDbRef;
dbType: CoreDbType;
tdb: TrieDatabaseRef;
closeDb = LegacyDbClose(nil);
): CoreDbRef =
## Constructor helper
# Local extensions
db.tdb = tdb
db.kvt = db.bless CoreDxKvtRef(methods: db.kvtMethods())
# Base descriptor
db.dbType = dbType
db.methods = db.baseMethods(dbType, closeDb)
# Blind context layer
let ctx = LegacyCoreDbCtxRef(base: db)
ctx.methods = ctx.ctxMethods
db.ctx = db.bless ctx
db.bless
# ------------------------------------------------------------------------------
# Public constructor and low level data retrieval, storage & transation frame
# ------------------------------------------------------------------------------
proc newLegacyPersistentCoreDbRef*(db: TrieDatabaseRef): CoreDbRef =
LegacyDbRef().init(LegacyDbPersistent, db)
proc newLegacyMemoryCoreDbRef*(): CoreDbRef =
LegacyDbRef().init(LegacyDbMemory, newMemoryDB())
# ------------------------------------------------------------------------------
# Public legacy helpers
# ------------------------------------------------------------------------------
func isLegacy*(be: CoreDbRef): bool =
be.dbType in {LegacyDbMemory, LegacyDbPersistent}
func toLegacy*(be: CoreDbKvtBackendRef): TrieDatabaseRef =
if be.parent.isLegacy:
return be.LegacyCoreDbKvtBE.tdb
func toLegacy*(be: CoreDbMptBackendRef): HexaryTrie =
if be.parent.isLegacy:
return be.LegacyCoreDbMptBE.mpt
# ------------------------------------------------------------------------------
# Public legacy iterators
# ------------------------------------------------------------------------------
iterator legaKvtPairs*(kvt: CoreDxKvtRef): (Blob, Blob) =
for k,v in kvt.parent.LegacyDbRef.tdb.pairsInMemoryDB:
yield (k,v)
iterator legaMptPairs*(
mpt: CoreDxMptRef;
): (Blob,Blob)
{.gcsafe, raises: [LegacyApiRlpError].} =
reraiseRlpException("legaMptPairs()"):
for k,v in mpt.methods.backendFn().LegacyCoreDbMptBE.mpt.pairs():
yield (k,v)
iterator legaReplicate*(
mpt: CoreDxMptRef;
): (Blob,Blob)
{.gcsafe, raises: [LegacyApiRlpError].} =
reraiseRlpException("legaReplicate()"):
for k,v in mpt.methods.backendFn().LegacyCoreDbMptBE.mpt.replicate():
yield (k,v)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,79 +0,0 @@
# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
{.push raises: [].}
import
eth/trie/db,
eth/db/kvstore,
rocksdb,
../base,
./legacy_db,
../../kvstore_rocksdb
type
LegaPersDbRef = ref object of LegacyDbRef
rdb: RocksStoreRef # for backend access with legacy mode
ChainDB = ref object of RootObj
kv: KvStoreRef
rdb: RocksStoreRef
# TODO KvStore is a virtual interface and TrieDB is a virtual interface - one
# will be enough eventually - unless the TrieDB interface gains operations
# that are not typical to KvStores
proc get(db: ChainDB, key: openArray[byte]): seq[byte] =
var res: seq[byte]
proc onData(data: openArray[byte]) = res = @data
if db.kv.get(key, onData).expect("working database"):
return res
proc put(db: ChainDB, key, value: openArray[byte]) =
db.kv.put(key, value).expect("working database")
proc contains(db: ChainDB, key: openArray[byte]): bool =
db.kv.contains(key).expect("working database")
proc del(db: ChainDB, key: openArray[byte]): bool =
db.kv.del(key).expect("working database")
proc newChainDB(path: string): KvResult[ChainDB] =
let rdb = RocksStoreRef.init(path, "nimbus").valueOr:
return err(error)
ok(ChainDB(kv: kvStore rdb, rdb: rdb))
# ------------------------------------------------------------------------------
# Public constructor and low level data retrieval, storage & transation frame
# ------------------------------------------------------------------------------
proc newLegacyPersistentCoreDbRef*(path: string): CoreDbRef =
# when running `newChainDB(path)`. converted to a `Defect`.
let backend = newChainDB(path).valueOr:
let msg = "DB initialisation : " & error
raise (ref ResultDefect)(msg: msg)
proc done() =
backend.rdb.close()
LegaPersDbRef(rdb: backend.rdb).init(LegacyDbPersistent, backend.trieDB, done)
# ------------------------------------------------------------------------------
# Public helper for direct backend access
# ------------------------------------------------------------------------------
proc toRocksStoreRef*(
db: CoreDbKvtBackendRef | CoreDbMptBackendRef
): RocksStoreRef =
if db.parent.dbType == LegacyDbPersistent:
return LegaPersDbRef(db.parent).rdb
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -26,14 +26,12 @@ from ../../aristo
type type
CoreDbType* = enum CoreDbType* = enum
Ooops Ooops
LegacyDbMemory
LegacyDbPersistent
AristoDbMemory ## Memory backend emulator AristoDbMemory ## Memory backend emulator
AristoDbRocks ## RocksDB backend AristoDbRocks ## RocksDB backend
AristoDbVoid ## No backend AristoDbVoid ## No backend
const const
CoreDbPersistentTypes* = {LegacyDbPersistent, AristoDbRocks} CoreDbPersistentTypes* = {AristoDbRocks}
type type
CoreDbKvtRef* = distinct CoreDxKvtRef # Legacy descriptor CoreDbKvtRef* = distinct CoreDxKvtRef # Legacy descriptor

View File

@ -13,7 +13,7 @@
import import
std/typetraits, std/typetraits,
eth/common, eth/common,
./backend/[aristo_db, legacy_db], ./backend/aristo_db,
./base/[api_tracking, base_desc], ./base/[api_tracking, base_desc],
./base ./base
@ -41,9 +41,6 @@ iterator pairs*(kvt: CoreDxKvtRef): (Blob, Blob) {.apiRaise.} =
## ##
kvt.setTrackNewApi KvtPairsIt kvt.setTrackNewApi KvtPairsIt
case kvt.parent.dbType: case kvt.parent.dbType:
of LegacyDbMemory:
for k,v in kvt.legaKvtPairs():
yield (k,v)
of AristoDbMemory: of AristoDbMemory:
for k,v in kvt.aristoKvtPairsMem(): for k,v in kvt.aristoKvtPairsMem():
yield (k,v) yield (k,v)
@ -54,14 +51,11 @@ iterator pairs*(kvt: CoreDxKvtRef): (Blob, Blob) {.apiRaise.} =
raiseAssert: "Unsupported database type: " & $kvt.parent.dbType raiseAssert: "Unsupported database type: " & $kvt.parent.dbType
kvt.ifTrackNewApi: debug newApiTxt, api, elapsed kvt.ifTrackNewApi: debug newApiTxt, api, elapsed
iterator pairs*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} = iterator pairs*(mpt: CoreDxMptRef): (Blob, Blob) =
## Trie traversal, only supported for `CoreDxMptRef` (not `Phk`) ## Trie traversal, only supported for `CoreDxMptRef` (not `Phk`)
## ##
mpt.setTrackNewApi MptPairsIt mpt.setTrackNewApi MptPairsIt
case mpt.parent.dbType: case mpt.parent.dbType:
of LegacyDbMemory, LegacyDbPersistent:
for k,v in mpt.legaMptPairs():
yield (k,v)
of AristoDbMemory, AristoDbRocks, AristoDbVoid: of AristoDbMemory, AristoDbRocks, AristoDbVoid:
for k,v in mpt.aristoMptPairs(): for k,v in mpt.aristoMptPairs():
yield (k,v) yield (k,v)
@ -76,9 +70,6 @@ iterator replicate*(mpt: CoreDxMptRef): (Blob, Blob) {.apiRaise.} =
## ##
mpt.setTrackNewApi MptReplicateIt mpt.setTrackNewApi MptReplicateIt
case mpt.parent.dbType: case mpt.parent.dbType:
of LegacyDbMemory, LegacyDbPersistent:
for k,v in mpt.legaReplicate():
yield (k,v)
of AristoDbMemory: of AristoDbMemory:
for k,v in aristoReplicateMem(mpt): for k,v in aristoReplicateMem(mpt):
yield (k,v) yield (k,v)
@ -98,7 +89,7 @@ when ProvideLegacyAPI:
for k,v in kvt.distinctBase.pairs(): yield (k,v) for k,v in kvt.distinctBase.pairs(): yield (k,v)
kvt.ifTrackLegaApi: debug legaApiTxt, api, elapsed kvt.ifTrackLegaApi: debug legaApiTxt, api, elapsed
iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) {.apiRaise.} = iterator pairs*(mpt: CoreDbMptRef): (Blob, Blob) =
## Trie traversal, not supported for `CoreDbPhkRef` ## Trie traversal, not supported for `CoreDbPhkRef`
mpt.setTrackLegaApi LegaMptPairsIt mpt.setTrackLegaApi LegaMptPairsIt
for k,v in mpt.distinctBase.pairs(): yield (k,v) for k,v in mpt.distinctBase.pairs(): yield (k,v)

View File

@ -13,7 +13,7 @@
import import
std/typetraits, std/typetraits,
eth/common, eth/common,
./backend/[aristo_db, aristo_rocksdb, legacy_db], ./backend/[aristo_db, aristo_rocksdb],
./base/[api_tracking, base_desc], ./base/[api_tracking, base_desc],
./base ./base
@ -30,7 +30,7 @@ when ProvideLegacyAPI and CoreDbEnableApiTracking:
newApiTxt = logTxt & "API" newApiTxt = logTxt & "API"
# Annotation helper(s) # Annotation helper(s)
{.pragma: rlpRaise, gcsafe, raises: [AristoApiRlpError, LegacyApiRlpError].} {.pragma: rlpRaise, gcsafe, raises: [AristoApiRlpError].}
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public iterators # Public iterators
@ -41,9 +41,6 @@ iterator replicatePersistent*(mpt: CoreDxMptRef): (Blob, Blob) {.rlpRaise.} =
## ##
mpt.setTrackNewApi MptReplicateIt mpt.setTrackNewApi MptReplicateIt
case mpt.parent.dbType: case mpt.parent.dbType:
of LegacyDbMemory, LegacyDbPersistent:
for k,v in mpt.legaReplicate():
yield (k,v)
of AristoDbMemory: of AristoDbMemory:
for k,v in aristoReplicateMem(mpt): for k,v in aristoReplicateMem(mpt):
yield (k,v) yield (k,v)

View File

@ -1,745 +0,0 @@
# Nimbus
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
## This file was renamed from `core_apps.nim`.
{.push raises: [].}
import
std/[algorithm, options, sequtils],
chronicles,
eth/[common, rlp],
stew/byteutils,
"../.."/[errors, constants],
../storage_types,
"."/base
logScope:
topics = "core_db-apps"
type
TransactionKey = tuple
blockNumber: BlockNumber
index: int
# ------------------------------------------------------------------------------
# Forward declarations
# ------------------------------------------------------------------------------
proc getBlockHeader*(
db: CoreDbRef;
n: BlockNumber;
output: var BlockHeader;
): bool
{.gcsafe, raises: [RlpError].}
proc getBlockHeader*(
db: CoreDbRef,
blockHash: Hash256;
): BlockHeader
{.gcsafe, raises: [BlockNotFound].}
proc getBlockHash*(
db: CoreDbRef;
n: BlockNumber;
output: var Hash256;
): bool
{.gcsafe, raises: [RlpError].}
proc addBlockNumberToHashLookup*(
db: CoreDbRef;
header: BlockHeader;
) {.gcsafe.}
proc getBlockHeader*(
db: CoreDbRef;
blockHash: Hash256;
output: var BlockHeader;
): bool
{.gcsafe.}
# Copied from `utils/utils` which cannot be imported here in order to
# avoid circular imports.
func hash(b: BlockHeader): Hash256
# ------------------------------------------------------------------------------
# Private iterators
# ------------------------------------------------------------------------------
iterator findNewAncestors(
db: CoreDbRef;
header: BlockHeader;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
## Returns the chain leading up from the given header until the first
## ancestor it has in common with our canonical chain.
var h = header
var orig: BlockHeader
while true:
if db.getBlockHeader(h.blockNumber, orig) and orig.hash == h.hash:
break
yield h
if h.parentHash == GENESIS_PARENT_HASH:
break
else:
h = db.getBlockHeader(h.parentHash)
# ------------------------------------------------------------------------------
# Public iterators
# ------------------------------------------------------------------------------
iterator getBlockTransactionData*(
db: CoreDbRef;
transactionRoot: Hash256;
): seq[byte]
{.gcsafe, raises: [RlpError].} =
var transactionDb = db.mptPrune transactionRoot
var transactionIdx = 0
while true:
let transactionKey = rlp.encode(transactionIdx)
if transactionKey in transactionDb:
yield transactionDb.get(transactionKey)
else:
break
inc transactionIdx
iterator getBlockTransactions*(
db: CoreDbRef;
header: BlockHeader;
): Transaction
{.gcsafe, raises: [RlpError].} =
for encodedTx in db.getBlockTransactionData(header.txRoot):
yield rlp.decode(encodedTx, Transaction)
iterator getBlockTransactionHashes*(
db: CoreDbRef;
blockHeader: BlockHeader;
): Hash256
{.gcsafe, raises: [RlpError].} =
## Returns an iterable of the transaction hashes from th block specified
## by the given block header.
for encodedTx in db.getBlockTransactionData(blockHeader.txRoot):
let tx = rlp.decode(encodedTx, Transaction)
yield rlpHash(tx) # beware EIP-4844
iterator getWithdrawalsData*(
db: CoreDbRef;
withdrawalsRoot: Hash256;
): seq[byte]
{.gcsafe, raises: [RlpError].} =
var wddb = db.mptPrune withdrawalsRoot
var idx = 0
while true:
let wdKey = rlp.encode(idx)
if wdKey in wddb:
yield wddb.get(wdKey)
else:
break
inc idx
iterator getReceipts*(
db: CoreDbRef;
receiptRoot: Hash256;
): Receipt
{.gcsafe, raises: [RlpError].} =
var receiptDb = db.mptPrune receiptRoot
var receiptIdx = 0
while true:
let receiptKey = rlp.encode(receiptIdx)
if receiptKey in receiptDb:
let receiptData = receiptDb.get(receiptKey)
yield rlp.decode(receiptData, Receipt)
else:
break
inc receiptIdx
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
func hash(b: BlockHeader): Hash256 =
rlpHash(b)
proc removeTransactionFromCanonicalChain(
db: CoreDbRef;
transactionHash: Hash256;
) =
## Removes the transaction specified by the given hash from the canonical
## chain.
db.kvt.del(transactionHashToBlockKey(transactionHash).toOpenArray)
proc setAsCanonicalChainHead(
db: CoreDbRef;
headerHash: Hash256;
): seq[BlockHeader]
{.gcsafe, raises: [RlpError,BlockNotFound].} =
## Sets the header as the canonical chain HEAD.
let header = db.getBlockHeader(headerHash)
var newCanonicalHeaders = sequtils.toSeq(db.findNewAncestors(header))
reverse(newCanonicalHeaders)
for h in newCanonicalHeaders:
var oldHash: Hash256
if not db.getBlockHash(h.blockNumber, oldHash):
break
let oldHeader = db.getBlockHeader(oldHash)
for txHash in db.getBlockTransactionHashes(oldHeader):
db.removeTransactionFromCanonicalChain(txHash)
# TODO re-add txn to internal pending pool (only if local sender)
for h in newCanonicalHeaders:
db.addBlockNumberToHashLookup(h)
db.kvt.put(canonicalHeadHashKey().toOpenArray, rlp.encode(headerHash))
return newCanonicalHeaders
proc markCanonicalChain(
db: CoreDbRef;
header: BlockHeader;
headerHash: Hash256;
): bool
{.gcsafe, raises: [RlpError].} =
## mark this chain as canonical by adding block number to hash lookup
## down to forking point
var
currHash = headerHash
currHeader = header
# mark current header as canonical
let key = blockNumberToHashKey(currHeader.blockNumber)
db.kvt.put(key.toOpenArray, rlp.encode(currHash))
# it is a genesis block, done
if currHeader.parentHash == Hash256():
return true
# mark ancestor blocks as canonical too
currHash = currHeader.parentHash
if not db.getBlockHeader(currHeader.parentHash, currHeader):
return false
while currHash != Hash256():
let key = blockNumberToHashKey(currHeader.blockNumber)
let data = db.kvt.get(key.toOpenArray)
if data.len == 0:
# not marked, mark it
db.kvt.put(key.toOpenArray, rlp.encode(currHash))
elif rlp.decode(data, Hash256) != currHash:
# replace prev chain
db.kvt.put(key.toOpenArray, rlp.encode(currHash))
else:
# forking point, done
break
if currHeader.parentHash == Hash256():
break
currHash = currHeader.parentHash
if not db.getBlockHeader(currHeader.parentHash, currHeader):
return false
return true
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc exists*(db: CoreDbRef, hash: Hash256): bool =
db.kvt.contains(hash.data)
proc getBlockHeader*(
db: CoreDbRef;
blockHash: Hash256;
output: var BlockHeader;
): bool =
let data = db.kvt.get(genericHashKey(blockHash).toOpenArray)
if data.len != 0:
try:
output = rlp.decode(data, BlockHeader)
true
except RlpError:
false
else:
false
proc getBlockHeader*(
db: CoreDbRef,
blockHash: Hash256;
): BlockHeader =
## Returns the requested block header as specified by block hash.
##
## Raises BlockNotFound if it is not present in the db.
if not db.getBlockHeader(blockHash, result):
raise newException(
BlockNotFound, "No block with hash " & blockHash.data.toHex)
proc getHash(
db: CoreDbRef;
key: DbKey;
output: var Hash256;
): bool
{.gcsafe, raises: [RlpError].} =
let data = db.kvt.get(key.toOpenArray)
if data.len != 0:
output = rlp.decode(data, Hash256)
result = true
proc getCanonicalHead*(
db: CoreDbRef;
): BlockHeader
{.gcsafe, raises: [RlpError,EVMError].} =
var headHash: Hash256
if not db.getHash(canonicalHeadHashKey(), headHash) or
not db.getBlockHeader(headHash, result):
raise newException(
CanonicalHeadNotFound, "No canonical head set for this chain")
proc getCanonicalHeaderHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].}=
discard db.getHash(canonicalHeadHashKey(), result)
proc getBlockHash*(
db: CoreDbRef;
n: BlockNumber;
output: var Hash256;
): bool =
## Return the block hash for the given block number.
db.getHash(blockNumberToHashKey(n), output)
proc getBlockHash*(
db: CoreDbRef;
n: BlockNumber;
): Hash256
{.gcsafe, raises: [RlpError,BlockNotFound].} =
## Return the block hash for the given block number.
if not db.getHash(blockNumberToHashKey(n), result):
raise newException(BlockNotFound, "No block hash for number " & $n)
proc getHeadBlockHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
if not db.getHash(canonicalHeadHashKey(), result):
result = Hash256()
proc getBlockHeader*(
db: CoreDbRef;
n: BlockNumber;
output: var BlockHeader;
): bool =
## Returns the block header with the given number in the canonical chain.
var blockHash: Hash256
if db.getBlockHash(n, blockHash):
result = db.getBlockHeader(blockHash, output)
proc getBlockHeaderWithHash*(
db: CoreDbRef;
n: BlockNumber;
): Option[(BlockHeader, Hash256)]
{.gcsafe, raises: [RlpError].} =
## Returns the block header and its hash, with the given number in the canonical chain.
## Hash is returned to avoid recomputing it
var hash: Hash256
if db.getBlockHash(n, hash):
# Note: this will throw if header is not present.
var header: BlockHeader
if db.getBlockHeader(hash, header):
return some((header, hash))
else:
# this should not happen, but if it happen lets fail laudly as this means
# something is super wrong
raiseAssert("Corrupted database. Mapping number->hash present, without header in database")
else:
return none[(BlockHeader, Hash256)]()
proc getBlockHeader*(
db: CoreDbRef;
n: BlockNumber;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
## Returns the block header with the given number in the canonical chain.
## Raises BlockNotFound error if the block is not in the DB.
db.getBlockHeader(db.getBlockHash(n))
proc getScore*(
db: CoreDbRef;
blockHash: Hash256;
): UInt256
{.gcsafe, raises: [RlpError].} =
rlp.decode(db.kvt.get(blockHashToScoreKey(blockHash).toOpenArray), UInt256)
proc setScore*(db: CoreDbRef; blockHash: Hash256, score: UInt256) =
## for testing purpose
db.kvt.put(blockHashToScoreKey(blockHash).toOpenArray, rlp.encode(score))
proc getTd*(db: CoreDbRef; blockHash: Hash256, td: var UInt256): bool =
let bytes = db.kvt.get(blockHashToScoreKey(blockHash).toOpenArray)
if bytes.len == 0: return false
try:
td = rlp.decode(bytes, UInt256)
except RlpError:
return false
return true
proc headTotalDifficulty*(
db: CoreDbRef;
): UInt256
{.gcsafe, raises: [RlpError].} =
# this is actually a combination of `getHash` and `getScore`
const key = canonicalHeadHashKey()
let data = db.kvt.get(key.toOpenArray)
if data.len == 0:
return 0.u256
let blockHash = rlp.decode(data, Hash256)
rlp.decode(db.kvt.get(blockHashToScoreKey(blockHash).toOpenArray), UInt256)
proc getAncestorsHashes*(
db: CoreDbRef;
limit: UInt256;
header: BlockHeader;
): seq[Hash256]
{.gcsafe, raises: [BlockNotFound].} =
var ancestorCount = min(header.blockNumber, limit).truncate(int)
var h = header
result = newSeq[Hash256](ancestorCount)
while ancestorCount > 0:
h = db.getBlockHeader(h.parentHash)
result[ancestorCount - 1] = h.hash
dec ancestorCount
proc addBlockNumberToHashLookup*(db: CoreDbRef; header: BlockHeader) =
db.kvt.put(
blockNumberToHashKey(header.blockNumber).toOpenArray,
rlp.encode(header.hash))
proc persistTransactions*(
db: CoreDbRef;
blockNumber: BlockNumber;
transactions: openArray[Transaction];
): Hash256
{.gcsafe, raises: [CatchableError].} =
var trie = db.mptPrune()
for idx, tx in transactions:
let
encodedTx = rlp.encode(tx.removeNetworkPayload)
txHash = rlpHash(tx) # beware EIP-4844
txKey: TransactionKey = (blockNumber, idx)
trie.put(rlp.encode(idx), encodedTx)
db.kvt.put(transactionHashToBlockKey(txHash).toOpenArray, rlp.encode(txKey))
trie.rootHash
proc getTransaction*(
db: CoreDbRef;
txRoot: Hash256;
txIndex: int;
res: var Transaction;
): bool
{.gcsafe, raises: [RlpError].} =
var db = db.mptPrune txRoot
let txData = db.get(rlp.encode(txIndex))
if txData.len > 0:
res = rlp.decode(txData, Transaction)
result = true
proc getTransactionCount*(
db: CoreDbRef;
txRoot: Hash256;
): int
{.gcsafe, raises: [RlpError].} =
var trie = db.mptPrune txRoot
var txCount = 0
while true:
let txKey = rlp.encode(txCount)
if txKey in trie:
inc txCount
else:
return txCount
doAssert(false, "unreachable")
proc getUnclesCount*(
db: CoreDbRef;
ommersHash: Hash256;
): int
{.gcsafe, raises: [RlpError].} =
if ommersHash != EMPTY_UNCLE_HASH:
let encodedUncles = db.kvt.get(genericHashKey(ommersHash).toOpenArray)
if encodedUncles.len != 0:
let r = rlpFromBytes(encodedUncles)
result = r.listLen
proc getUncles*(
db: CoreDbRef;
ommersHash: Hash256;
): seq[BlockHeader]
{.gcsafe, raises: [RlpError].} =
if ommersHash != EMPTY_UNCLE_HASH:
let encodedUncles = db.kvt.get(genericHashKey(ommersHash).toOpenArray)
if encodedUncles.len != 0:
result = rlp.decode(encodedUncles, seq[BlockHeader])
proc persistWithdrawals*(
db: CoreDbRef;
withdrawals: openArray[Withdrawal];
): Hash256
{.gcsafe, raises: [CatchableError].} =
var trie = db.mptPrune()
for idx, wd in withdrawals:
let encodedWd = rlp.encode(wd)
trie.put(rlp.encode(idx), encodedWd)
trie.rootHash
proc getWithdrawals*(
db: CoreDbRef;
withdrawalsRoot: Hash256;
): seq[Withdrawal]
{.gcsafe, raises: [RlpError].} =
for encodedWd in db.getWithdrawalsData(withdrawalsRoot):
result.add(rlp.decode(encodedWd, Withdrawal))
proc getBlockBody*(
db: CoreDbRef;
header: BlockHeader;
output: var BlockBody;
): bool
{.gcsafe, raises: [RlpError].} =
result = true
output.transactions = @[]
output.uncles = @[]
for encodedTx in db.getBlockTransactionData(header.txRoot):
output.transactions.add(rlp.decode(encodedTx, Transaction))
if header.ommersHash != EMPTY_UNCLE_HASH:
let encodedUncles = db.kvt.get(genericHashKey(header.ommersHash).toOpenArray)
if encodedUncles.len != 0:
output.uncles = rlp.decode(encodedUncles, seq[BlockHeader])
else:
result = false
if header.withdrawalsRoot.isSome:
output.withdrawals = some(db.getWithdrawals(header.withdrawalsRoot.get))
proc getBlockBody*(
db: CoreDbRef;
blockHash: Hash256;
output: var BlockBody;
): bool
{.gcsafe, raises: [RlpError].} =
var header: BlockHeader
if db.getBlockHeader(blockHash, header):
return db.getBlockBody(header, output)
proc getBlockBody*(
db: CoreDbRef;
hash: Hash256;
): BlockBody
{.gcsafe, raises: [RlpError,ValueError].} =
if not db.getBlockBody(hash, result):
raise newException(ValueError, "Error when retrieving block body")
proc getUncleHashes*(
db: CoreDbRef;
blockHashes: openArray[Hash256];
): seq[Hash256]
{.gcsafe, raises: [RlpError,ValueError].} =
for blockHash in blockHashes:
var blockBody = db.getBlockBody(blockHash)
for uncle in blockBody.uncles:
result.add uncle.hash
proc getUncleHashes*(
db: CoreDbRef;
header: BlockHeader;
): seq[Hash256]
{.gcsafe, raises: [RlpError].} =
if header.ommersHash != EMPTY_UNCLE_HASH:
let encodedUncles = db.kvt.get(genericHashKey(header.ommersHash).toOpenArray)
if encodedUncles.len != 0:
let uncles = rlp.decode(encodedUncles, seq[BlockHeader])
for x in uncles:
result.add x.hash
proc getTransactionKey*(
db: CoreDbRef;
transactionHash: Hash256;
): tuple[blockNumber: BlockNumber, index: int]
{.gcsafe, raises: [RlpError].} =
let tx = db.kvt.get(transactionHashToBlockKey(transactionHash).toOpenArray)
if tx.len > 0:
let key = rlp.decode(tx, TransactionKey)
result = (key.blockNumber, key.index)
else:
result = (0.toBlockNumber, -1)
proc headerExists*(db: CoreDbRef; blockHash: Hash256): bool =
## Returns True if the header with the given block hash is in our DB.
db.kvt.contains(genericHashKey(blockHash).toOpenArray)
proc setHead*(
db: CoreDbRef;
blockHash: Hash256;
): bool
{.gcsafe, raises: [RlpError].} =
var header: BlockHeader
if not db.getBlockHeader(blockHash, header):
return false
if not db.markCanonicalChain(header, blockHash):
return false
db.kvt.put(canonicalHeadHashKey().toOpenArray, rlp.encode(blockHash))
return true
proc setHead*(
db: CoreDbRef;
header: BlockHeader;
writeHeader = false;
): bool
{.gcsafe, raises: [RlpError].} =
var headerHash = rlpHash(header)
if writeHeader:
db.kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header))
if not db.markCanonicalChain(header, headerHash):
return false
db.kvt.put(canonicalHeadHashKey().toOpenArray, rlp.encode(headerHash))
return true
proc persistReceipts*(
db: CoreDbRef;
receipts: openArray[Receipt];
): Hash256
{.gcsafe, raises: [CatchableError].} =
var trie = db.mptPrune()
for idx, rec in receipts:
trie.put(rlp.encode(idx), rlp.encode(rec))
trie.rootHash
proc getReceipts*(
db: CoreDbRef;
receiptRoot: Hash256;
): seq[Receipt]
{.gcsafe, raises: [RlpError].} =
var receipts = newSeq[Receipt]()
for r in db.getReceipts(receiptRoot):
receipts.add(r)
return receipts
proc persistHeaderToDb*(
db: CoreDbRef;
header: BlockHeader;
forceCanonical: bool;
startOfHistory = GENESIS_PARENT_HASH;
): seq[BlockHeader]
{.gcsafe, raises: [RlpError,EVMError].} =
let isStartOfHistory = header.parentHash == startOfHistory
let headerHash = header.blockHash
if not isStartOfHistory and not db.headerExists(header.parentHash):
raise newException(ParentNotFound, "Cannot persist block header " &
$headerHash & " with unknown parent " & $header.parentHash)
db.kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header))
let score = if isStartOfHistory: header.difficulty
else: db.getScore(header.parentHash) + header.difficulty
db.kvt.put(blockHashToScoreKey(headerHash).toOpenArray, rlp.encode(score))
db.addBlockNumberToHashLookup(header)
var headScore: UInt256
try:
headScore = db.getScore(db.getCanonicalHead().hash)
except CanonicalHeadNotFound:
return db.setAsCanonicalChainHead(headerHash)
if score > headScore or forceCanonical:
return db.setAsCanonicalChainHead(headerHash)
proc persistHeaderToDbWithoutSetHead*(
db: CoreDbRef;
header: BlockHeader;
startOfHistory = GENESIS_PARENT_HASH;
) {.gcsafe, raises: [RlpError].} =
let isStartOfHistory = header.parentHash == startOfHistory
let headerHash = header.blockHash
let score = if isStartOfHistory: header.difficulty
else: db.getScore(header.parentHash) + header.difficulty
db.kvt.put(blockHashToScoreKey(headerHash).toOpenArray, rlp.encode(score))
db.kvt.put(genericHashKey(headerHash).toOpenArray, rlp.encode(header))
# FIXME-Adam: This seems like a bad idea. I don't see a way to get the score
# in stateless mode, but it seems dangerous to just shove the header into
# the DB *without* also storing the score.
proc persistHeaderToDbWithoutSetHeadOrScore*(db: CoreDbRef; header: BlockHeader) =
db.addBlockNumberToHashLookup(header)
db.kvt.put(genericHashKey(header.blockHash).toOpenArray, rlp.encode(header))
proc persistUncles*(db: CoreDbRef, uncles: openArray[BlockHeader]): Hash256 =
## Persists the list of uncles to the database.
## Returns the uncles hash.
let enc = rlp.encode(uncles)
result = keccakHash(enc)
db.kvt.put(genericHashKey(result).toOpenArray, enc)
proc safeHeaderHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
discard db.getHash(safeHashKey(), result)
proc safeHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
db.kvt.put(safeHashKey().toOpenArray, rlp.encode(headerHash))
proc finalizedHeaderHash*(
db: CoreDbRef;
): Hash256
{.gcsafe, raises: [RlpError].} =
discard db.getHash(finalizedHashKey(), result)
proc finalizedHeaderHash*(db: CoreDbRef, headerHash: Hash256) =
db.kvt.put(finalizedHashKey().toOpenArray, rlp.encode(headerHash))
proc safeHeader*(
db: CoreDbRef;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
db.getBlockHeader(db.safeHeaderHash)
proc finalizedHeader*(
db: CoreDbRef;
): BlockHeader
{.gcsafe, raises: [RlpError,BlockNotFound].} =
db.getBlockHeader(db.finalizedHeaderHash)
proc haveBlockAndState*(db: CoreDbRef, headerHash: Hash256): bool =
var header: BlockHeader
if not db.getBlockHeader(headerHash, header):
return false
# see if stateRoot exists
db.exists(header.stateRoot)
proc getBlockWitness*(db: CoreDbRef, blockHash: Hash256): seq[byte] {.gcsafe.} =
db.kvt.get(blockHashToBlockWitnessKey(blockHash).toOpenArray)
proc setBlockWitness*(db: CoreDbRef, blockHash: Hash256, witness: seq[byte]) =
db.kvt.put(blockHashToBlockWitnessKey(blockHash).toOpenArray, witness)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -12,12 +12,11 @@
import import
std/options, std/options,
eth/[common, trie/db], eth/common,
../aristo, ../aristo,
./backend/[aristo_db, legacy_db] ./backend/aristo_db
import import
#./core_apps_legacy as core_apps -- avoid
./core_apps_newapi as core_apps ./core_apps_newapi as core_apps
import import
./base except bless ./base except bless
@ -36,10 +35,6 @@ export
toAristoProfData, toAristoProfData,
toAristoOldestState, toAristoOldestState,
# see `legacy_db`
isLegacy,
toLegacy,
# Standard interface for calculating merkle hash signatures (see `aristo`) # Standard interface for calculating merkle hash signatures (see `aristo`)
MerkleSignRef, MerkleSignRef,
merkleSignBegin, merkleSignBegin,
@ -51,17 +46,6 @@ export
# Public constructors # Public constructors
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc newCoreDbRef*(
db: TrieDatabaseRef;
): CoreDbRef
{.gcsafe, deprecated: "use newCoreDbRef(LegacyDbPersistent,<path>)".} =
## Legacy constructor.
##
## Note: Using legacy notation `newCoreDbRef()` rather than
## `CoreDbRef.init()` because of compiler coughing.
##
db.newLegacyPersistentCoreDbRef()
proc newCoreDbRef*( proc newCoreDbRef*(
dbType: static[CoreDbType]; # Database type symbol dbType: static[CoreDbType]; # Database type symbol
): CoreDbRef = ): CoreDbRef =
@ -70,10 +54,7 @@ proc newCoreDbRef*(
## Note: Using legacy notation `newCoreDbRef()` rather than ## Note: Using legacy notation `newCoreDbRef()` rather than
## `CoreDbRef.init()` because of compiler coughing. ## `CoreDbRef.init()` because of compiler coughing.
## ##
when dbType == LegacyDbMemory: when dbType == AristoDbMemory:
newLegacyMemoryCoreDbRef()
elif dbType == AristoDbMemory:
newAristoMemoryCoreDbRef() newAristoMemoryCoreDbRef()
elif dbType == AristoDbVoid: elif dbType == AristoDbVoid:

View File

@ -25,12 +25,11 @@ import
../aristo, ../aristo,
./memory_only, ./memory_only,
base_iterators_persistent, base_iterators_persistent,
./backend/[aristo_rocksdb, legacy_rocksdb] ./backend/aristo_rocksdb
export export
memory_only, memory_only,
base_iterators_persistent, base_iterators_persistent
toRocksStoreRef
proc newCoreDbRef*( proc newCoreDbRef*(
dbType: static[CoreDbType]; # Database type symbol dbType: static[CoreDbType]; # Database type symbol
@ -40,10 +39,7 @@ proc newCoreDbRef*(
## ##
## Note: Using legacy notation `newCoreDbRef()` rather than ## Note: Using legacy notation `newCoreDbRef()` rather than
## `CoreDbRef.init()` because of compiler coughing. ## `CoreDbRef.init()` because of compiler coughing.
when dbType == LegacyDbPersistent: when dbType == AristoDbRocks:
newLegacyPersistentCoreDbRef path
elif dbType == AristoDbRocks:
newAristoRocksDbCoreDbRef path newAristoRocksDbCoreDbRef path
else: else:

View File

@ -96,12 +96,8 @@ template initAccountsTrie*(db: DB, isPruning = true): AccountsTrie =
proc getAccountBytes*(trie: AccountsTrie, address: EthAddress): seq[byte] = proc getAccountBytes*(trie: AccountsTrie, address: EthAddress): seq[byte] =
CoreDbPhkRef(trie).get(address) CoreDbPhkRef(trie).get(address)
proc maybeGetAccountBytes*(trie: AccountsTrie, address: EthAddress): Option[Blob] {.gcsafe, raises: [RlpError].} = proc maybeGetAccountBytes*(trie: AccountsTrie, address: EthAddress): Option[Blob] =
let phk = CoreDbPhkRef(trie) some(CoreDbPhkRef(trie).get(address))
if phk.parent.isLegacy:
phk.toMpt.distinctBase.backend.toLegacy.SecureHexaryTrie.maybeGet(address)
else:
some(phk.get(address))
proc putAccountBytes*(trie: var AccountsTrie, address: EthAddress, value: openArray[byte]) = proc putAccountBytes*(trie: var AccountsTrie, address: EthAddress, value: openArray[byte]) =
CoreDbPhkRef(trie).put(address, value) CoreDbPhkRef(trie).put(address, value)
@ -131,12 +127,8 @@ template createTrieKeyFromSlot*(slot: UInt256): auto =
proc getSlotBytes*(trie: StorageTrie, slotAsKey: openArray[byte]): seq[byte] = proc getSlotBytes*(trie: StorageTrie, slotAsKey: openArray[byte]): seq[byte] =
CoreDbPhkRef(trie).get(slotAsKey) CoreDbPhkRef(trie).get(slotAsKey)
proc maybeGetSlotBytes*(trie: StorageTrie, slotAsKey: openArray[byte]): Option[Blob] {.gcsafe, raises: [RlpError].} = proc maybeGetSlotBytes*(trie: StorageTrie, slotAsKey: openArray[byte]): Option[Blob] =
let phk = CoreDbPhkRef(trie) some(CoreDbPhkRef(trie).get(slotAsKey))
if phk.parent.isLegacy:
phk.toMpt.distinctBase.backend.toLegacy.SecureHexaryTrie.maybeGet(slotAsKey)
else:
some(phk.get(slotAsKey))
proc putSlotBytes*(trie: var StorageTrie, slotAsKey: openArray[byte], value: openArray[byte]) = proc putSlotBytes*(trie: var StorageTrie, slotAsKey: openArray[byte], value: openArray[byte]) =
CoreDbPhkRef(trie).put(slotAsKey, value) CoreDbPhkRef(trie).put(slotAsKey, value)

View File

@ -21,7 +21,7 @@ The points of these two files are:
import import
chronicles, chronicles,
eth/[common, trie/db], eth/common,
"."/[core_db, distinct_tries, storage_types, values_from_bytes] "."/[core_db, distinct_tries, storage_types, values_from_bytes]
@ -55,10 +55,7 @@ proc ifNodesExistGetAccount*(trie: AccountsTrie, address: EthAddress): Option[Ac
ifNodesExistGetAccountBytes(trie, address).map(accountFromBytes) ifNodesExistGetAccountBytes(trie, address).map(accountFromBytes)
proc maybeGetCode*(db: CoreDbRef, codeHash: Hash256): Option[seq[byte]] = proc maybeGetCode*(db: CoreDbRef, codeHash: Hash256): Option[seq[byte]] =
if db.isLegacy: some(db.kvt.get(contractHashKey(codeHash).toOpenArray))
db.newKvt.backend.toLegacy.maybeGet(contractHashKey(codeHash).toOpenArray)
else:
some(db.kvt.get(contractHashKey(codeHash).toOpenArray))
proc maybeGetCode*(trie: AccountsTrie, address: EthAddress): Option[seq[byte]] = proc maybeGetCode*(trie: AccountsTrie, address: EthAddress): Option[seq[byte]] =
let maybeAcc = trie.ifNodesExistGetAccount(address) let maybeAcc = trie.ifNodesExistGetAccount(address)

View File

@ -22,20 +22,18 @@
import import
eth/common, eth/common,
./core_db, ./core_db,
./ledger/backend/[ ./ledger/backend/[accounts_ledger, accounts_ledger_desc],
accounts_cache, accounts_cache_desc, ./ledger/[base_iterators, distinct_ledgers]
accounts_ledger, accounts_ledger_desc],
./ledger/base_iterators
import import
./ledger/base except LedgerApiTxt, beginTrackApi, bless, ifTrackApi ./ledger/base except LedgerApiTxt, beginTrackApi, bless, ifTrackApi
export export
AccountsCache,
AccountsLedgerRef, AccountsLedgerRef,
LedgerType, LedgerType,
base, base,
base_iterators, base_iterators,
distinct_ledgers,
init init
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -46,14 +44,10 @@ proc init*(
ldgType: LedgerType; ldgType: LedgerType;
db: CoreDbRef; db: CoreDbRef;
root: Hash256; root: Hash256;
pruneTrie: bool;
): LedgerRef = ): LedgerRef =
case ldgType: case ldgType:
of LegacyAccountsCache:
result = AccountsCache.init(db, root, pruneTrie)
of LedgerCache: of LedgerCache:
result = AccountsLedgerRef.init(db, root, pruneTrie) AccountsLedgerRef.init(db, root)
else: else:
raiseAssert: "Missing ledger type label" raiseAssert: "Missing ledger type label"

View File

@ -34,7 +34,7 @@ import
eth/[common, rlp], eth/[common, rlp],
results, results,
../../../stateless/multi_keys, ../../../stateless/multi_keys,
"../.."/[constants, errors, utils/utils], "../.."/[constants, utils/utils],
../access_list as ac_access_list, ../access_list as ac_access_list,
".."/[core_db, storage_types, transient_storage], ".."/[core_db, storage_types, transient_storage],
./distinct_ledgers ./distinct_ledgers
@ -650,7 +650,7 @@ iterator pairs*(ac: AccountsLedgerRef): (EthAddress, Account) =
for address, account in ac.savePoint.cache: for address, account in ac.savePoint.cache:
yield (address, account.statement.recast().value) yield (address, account.statement.recast().value)
iterator storage*(ac: AccountsLedgerRef, address: EthAddress): (UInt256, UInt256) {.gcsafe, raises: [CoreDbApiError].} = iterator storage*(ac: AccountsLedgerRef, address: EthAddress): (UInt256, UInt256) =
# beware that if the account not persisted, # beware that if the account not persisted,
# the storage root will not be updated # the storage root will not be updated
let acc = ac.getAccount(address, false) let acc = ac.getAccount(address, false)

View File

@ -1,251 +0,0 @@
# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
{.push raises: [].}
import
eth/common,
../../../../stateless/multi_keys,
"../.."/[core_db, distinct_tries],
../accounts_cache as impl,
".."/[base, base/base_desc],
./accounts_cache_desc as wrp
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
template noRlpException(info: static[string]; code: untyped) =
try:
code
except RlpError as e:
raiseAssert info & ", name=\"" & $e.name & "\", msg=\"" & e.msg & "\""
func savePoint(sp: LedgerSpRef): impl.SavePoint =
wrp.SavePoint(sp).sp
# ----------------
proc ledgerMethods(lc: impl.AccountsCache): LedgerFns =
LedgerFns(
accessListFn: proc(eAddr: EthAddress) =
lc.accessList(eAddr),
accessList2Fn: proc(eAddr: EthAddress, slot: UInt256) =
lc.accessList(eAddr, slot),
accountExistsFn: proc(eAddr: EthAddress): bool =
lc.accountExists(eAddr),
addBalanceFn: proc(eAddr: EthAddress, delta: UInt256) =
lc.addBalance(eAddr, delta),
addLogEntryFn: proc(log: Log) =
lc.addLogEntry(log),
beginSavepointFn: proc(): LedgerSpRef =
wrp.SavePoint(sp: lc.beginSavepoint()),
clearStorageFn: proc(eAddr: EthAddress) =
lc.clearStorage(eAddr),
clearTransientStorageFn: proc() =
lc.clearTransientStorage(),
collectWitnessDataFn: proc() =
lc.collectWitnessData(),
commitFn: proc(sp: LedgerSpRef) =
lc.commit(sp.savePoint),
deleteAccountFn: proc(eAddr: EthAddress) =
lc.deleteAccount(eAddr),
disposeFn: proc(sp: LedgerSpRef) =
lc.dispose(sp.savePoint),
getAndClearLogEntriesFn: proc(): seq[Log] =
lc.getAndClearLogEntries(),
getBalanceFn: proc(eAddr: EthAddress): UInt256 =
lc.getBalance(eAddr),
getCodeFn: proc(eAddr: EthAddress): Blob =
lc.getCode(eAddr),
getCodeHashFn: proc(eAddr: EthAddress): Hash256 =
lc.getCodeHash(eAddr),
getCodeSizeFn: proc(eAddr: EthAddress): int =
lc.getCodeSize(eAddr),
getCommittedStorageFn: proc(eAddr: EthAddress, slot: UInt256): UInt256 =
noRlpException "getCommittedStorage()":
result = lc.getCommittedStorage(eAddr, slot)
discard,
getNonceFn: proc(eAddr: EthAddress): AccountNonce =
lc.getNonce(eAddr),
getStorageFn: proc(eAddr: EthAddress, slot: UInt256): UInt256 =
noRlpException "getStorageFn()":
result = lc.getStorage(eAddr, slot)
discard,
getStorageRootFn: proc(eAddr: EthAddress): Hash256 =
lc.getStorageRoot(eAddr),
getTransientStorageFn: proc(eAddr: EthAddress, slot: UInt256): UInt256 =
lc.getTransientStorage(eAddr, slot),
contractCollisionFn: proc(eAddr: EthAddress): bool =
lc.contractCollision(eAddr),
inAccessListFn: proc(eAddr: EthAddress): bool =
lc.inAccessList(eAddr),
inAccessList2Fn: proc(eAddr: EthAddress, slot: UInt256): bool =
lc.inAccessList(eAddr, slot),
incNonceFn: proc(eAddr: EthAddress) =
lc.incNonce(eAddr),
isDeadAccountFn: proc(eAddr: EthAddress): bool =
lc.isDeadAccount(eAddr),
isEmptyAccountFn: proc(eAddr: EthAddress): bool =
lc.isEmptyAccount(eAddr),
isTopLevelCleanFn: proc(): bool =
lc.isTopLevelClean(),
logEntriesFn: proc(): seq[Log] =
lc.logEntries(),
makeMultiKeysFn: proc(): MultiKeysRef =
lc.makeMultiKeys(),
persistFn: proc(clearEmptyAccount: bool, clearCache: bool) =
lc.persist(clearEmptyAccount, clearCache),
ripemdSpecialFn: proc() =
lc.ripemdSpecial(),
rollbackFn: proc(sp: LedgerSpRef) =
lc.rollback(sp.savePoint),
safeDisposeFn: proc(sp: LedgerSpRef) =
if not sp.isNil:
lc.safeDispose(sp.savePoint)
discard,
selfDestructFn: proc(eAddr: EthAddress) =
lc.selfDestruct(eAddr),
selfDestruct6780Fn: proc(eAddr: EthAddress) =
lc.selfDestruct6780(eAddr),
selfDestructLenFn: proc(): int =
lc.selfDestructLen(),
setBalanceFn: proc(eAddr: EthAddress, balance: UInt256) =
lc.setBalance(eAddr, balance),
setCodeFn: proc(eAddr: EthAddress, code: Blob) =
lc.setCode(eAddr, code),
setNonceFn: proc(eAddr: EthAddress, nonce: AccountNonce) =
lc.setNonce(eAddr, nonce),
setStorageFn: proc(eAddr: EthAddress, slot, val: UInt256) =
noRlpException "setStorage()":
lc.setStorage(eAddr, slot, val)
discard,
setTransientStorageFn: proc(eAddr: EthAddress, slot, val: UInt256) =
lc.setTransientStorage(eAddr, slot, val),
# Renamed from `rootHashFn`
stateFn: proc(): Hash256 =
lc.rootHash(),
subBalanceFn: proc(eAddr: EthAddress, delta: UInt256) =
lc.subBalance(eAddr, delta),
getAccessListFn: proc(): common.AccessList =
lc.getAccessList())
proc ledgerExtras(lc: impl.AccountsCache): LedgerExtras =
LedgerExtras(
getMptFn: proc(): CoreDbMptRef =
lc.rawTrie.mpt,
rawRootHashFn: proc(): Hash256 =
lc.rawTrie.rootHash())
proc newLegacyAccountsCache(
db: CoreDbRef;
root: Hash256;
pruneTrie: bool): LedgerRef =
## Constructor
let lc = impl.AccountsCache.init(db, root, pruneTrie)
wrp.AccountsCache(
ldgType: LegacyAccountsCache,
ac: lc,
extras: lc.ledgerExtras(),
methods: lc.ledgerMethods()).bless db
# ------------------------------------------------------------------------------
# Public iterators
# ------------------------------------------------------------------------------
iterator accountsIt*(lc: wrp.AccountsCache): Account =
for w in lc.ac.accounts():
yield w
iterator addressesIt*(lc: wrp.AccountsCache): EthAddress =
for w in lc.ac.addresses():
yield w
iterator cachedStorageIt*(
lc: wrp.AccountsCache;
eAddr: EthAddress;
): (UInt256,UInt256) =
for w in lc.ac.cachedStorage(eAddr):
yield w
iterator pairsIt*(lc: wrp.AccountsCache): (EthAddress,Account) =
for w in lc.ac.pairs():
yield w
iterator storageIt*(
lc: wrp.AccountsCache;
eAddr: EthAddress;
): (UInt256,UInt256)
{.gcsafe, raises: [CoreDbApiError].} =
noRlpException "storage()":
for w in lc.ac.storage(eAddr):
yield w
# ------------------------------------------------------------------------------
# Public constructor
# ------------------------------------------------------------------------------
proc init*(
T: type wrp.AccountsCache;
db: CoreDbRef;
root: Hash256;
pruneTrie: bool): LedgerRef =
db.newLegacyAccountsCache(root, pruneTrie)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,22 +0,0 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import
../accounts_cache as impl,
../base/base_desc
type
AccountsCache* = ref object of LedgerRef
ac*: impl.AccountsCache
SavePoint* = ref object of LedgerSpRef
sp*: impl.SavePoint
# End

View File

@ -180,8 +180,8 @@ proc ledgerExtras(lc: impl.AccountsLedgerRef): LedgerExtras =
proc newAccountsLedgerRef( proc newAccountsLedgerRef(
db: CoreDbRef; db: CoreDbRef;
root: Hash256; root: Hash256;
pruneTrie: bool): LedgerRef = ): LedgerRef =
let lc = impl.AccountsLedgerRef.init(db, root, pruneTrie) let lc = impl.AccountsLedgerRef.init(db, root)
wrp.AccountsLedgerRef( wrp.AccountsLedgerRef(
ldgType: LedgerCache, ldgType: LedgerCache,
ac: lc, ac: lc,
@ -214,8 +214,7 @@ iterator pairsIt*(lc: wrp.AccountsLedgerRef): (EthAddress,Account) =
iterator storageIt*( iterator storageIt*(
lc: wrp.AccountsLedgerRef; lc: wrp.AccountsLedgerRef;
eAddr: EthAddress; eAddr: EthAddress;
): (UInt256,UInt256) ): (UInt256,UInt256) =
{.gcsafe, raises: [CoreDbApiError].} =
for w in lc.ac.storage(eAddr): for w in lc.ac.storage(eAddr):
yield w yield w
@ -227,8 +226,9 @@ proc init*(
T: type wrp.AccountsLedgerRef; T: type wrp.AccountsLedgerRef;
db: CoreDbRef; db: CoreDbRef;
root: Hash256; root: Hash256;
pruneTrie: bool): LedgerRef = pruneTrie = false;
db.newAccountsLedgerRef(root, pruneTrie) ): LedgerRef =
db.newAccountsLedgerRef root
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -38,6 +38,7 @@ type
LdgDeleteAccountFn = "deleteAccount" LdgDeleteAccountFn = "deleteAccount"
LdgDisposeFn = "dispose" LdgDisposeFn = "dispose"
LdgGetAccessListFn = "getAcessList" LdgGetAccessListFn = "getAcessList"
LdgGetAccountFn = "getAccount"
LdgGetAndClearLogEntriesFn = "getAndClearLogEntries" LdgGetAndClearLogEntriesFn = "getAndClearLogEntries"
LdgGetBalanceFn = "getBalance" LdgGetBalanceFn = "getBalance"
LdgGetCodeFn = "getCode" LdgGetCodeFn = "getCode"

View File

@ -28,7 +28,6 @@ type
LedgerType* = enum LedgerType* = enum
Ooops = 0 Ooops = 0
LegacyAccountsCache,
LedgerCache LedgerCache
LedgerSpRef* = ref object of RootRef LedgerSpRef* = ref object of RootRef

View File

@ -31,6 +31,7 @@ proc validate*(ldg: LedgerRef) =
doAssert not ldg.methods.commitFn.isNil doAssert not ldg.methods.commitFn.isNil
doAssert not ldg.methods.deleteAccountFn.isNil doAssert not ldg.methods.deleteAccountFn.isNil
doAssert not ldg.methods.disposeFn.isNil doAssert not ldg.methods.disposeFn.isNil
doAssert not ldg.methods.getAccessListFn.isNil
doAssert not ldg.methods.getAndClearLogEntriesFn.isNil doAssert not ldg.methods.getAndClearLogEntriesFn.isNil
doAssert not ldg.methods.getBalanceFn.isNil doAssert not ldg.methods.getBalanceFn.isNil
doAssert not ldg.methods.getCodeFn.isNil doAssert not ldg.methods.getCodeFn.isNil

View File

@ -13,8 +13,7 @@
import import
eth/common, eth/common,
../core_db, ../core_db,
./backend/[accounts_cache, accounts_cache_desc, ./backend/[accounts_ledger, accounts_ledger_desc],
accounts_ledger, accounts_ledger_desc],
./base/api_tracking, ./base/api_tracking,
./base ./base
@ -39,9 +38,6 @@ when LedgerEnableApiTracking:
iterator accounts*(ldg: LedgerRef): Account = iterator accounts*(ldg: LedgerRef): Account =
ldg.beginTrackApi LdgAccountsIt ldg.beginTrackApi LdgAccountsIt
case ldg.ldgType: case ldg.ldgType:
of LegacyAccountsCache:
for w in ldg.AccountsCache.accountsIt():
yield w
of LedgerCache: of LedgerCache:
for w in ldg.AccountsLedgerRef.accountsIt(): for w in ldg.AccountsLedgerRef.accountsIt():
yield w yield w
@ -53,9 +49,6 @@ iterator accounts*(ldg: LedgerRef): Account =
iterator addresses*(ldg: LedgerRef): EthAddress = iterator addresses*(ldg: LedgerRef): EthAddress =
ldg.beginTrackApi LdgAdressesIt ldg.beginTrackApi LdgAdressesIt
case ldg.ldgType: case ldg.ldgType:
of LegacyAccountsCache:
for w in ldg.AccountsCache.addressesIt():
yield w
of LedgerCache: of LedgerCache:
for w in ldg.AccountsLedgerRef.addressesIt(): for w in ldg.AccountsLedgerRef.addressesIt():
yield w yield w
@ -67,9 +60,6 @@ iterator addresses*(ldg: LedgerRef): EthAddress =
iterator cachedStorage*(ldg: LedgerRef, eAddr: EthAddress): (UInt256,UInt256) = iterator cachedStorage*(ldg: LedgerRef, eAddr: EthAddress): (UInt256,UInt256) =
ldg.beginTrackApi LdgCachedStorageIt ldg.beginTrackApi LdgCachedStorageIt
case ldg.ldgType: case ldg.ldgType:
of LegacyAccountsCache:
for w in ldg.AccountsCache.cachedStorageIt(eAddr):
yield w
of LedgerCache: of LedgerCache:
for w in ldg.AccountsLedgerRef.cachedStorageIt(eAddr): for w in ldg.AccountsLedgerRef.cachedStorageIt(eAddr):
yield w yield w
@ -81,9 +71,6 @@ iterator cachedStorage*(ldg: LedgerRef, eAddr: EthAddress): (UInt256,UInt256) =
iterator pairs*(ldg: LedgerRef): (EthAddress,Account) = iterator pairs*(ldg: LedgerRef): (EthAddress,Account) =
ldg.beginTrackApi LdgPairsIt ldg.beginTrackApi LdgPairsIt
case ldg.ldgType: case ldg.ldgType:
of LegacyAccountsCache:
for w in ldg.AccountsCache.pairsIt():
yield w
of LedgerCache: of LedgerCache:
for w in ldg.AccountsLedgerRef.pairsIt(): for w in ldg.AccountsLedgerRef.pairsIt():
yield w yield w
@ -95,13 +82,9 @@ iterator pairs*(ldg: LedgerRef): (EthAddress,Account) =
iterator storage*( iterator storage*(
ldg: LedgerRef; ldg: LedgerRef;
eAddr: EthAddress; eAddr: EthAddress;
): (UInt256,UInt256) ): (UInt256,UInt256) =
{.gcsafe, raises: [CoreDbApiError].} =
ldg.beginTrackApi LdgStorageIt ldg.beginTrackApi LdgStorageIt
case ldg.ldgType: case ldg.ldgType:
of LegacyAccountsCache:
for w in ldg.AccountsCache.storageIt(eAddr):
yield w
of LedgerCache: of LedgerCache:
for w in ldg.AccountsLedgerRef.storageIt(eAddr): for w in ldg.AccountsLedgerRef.storageIt(eAddr):
yield w yield w

View File

@ -236,8 +236,7 @@ proc delete*(sl: StorageLedger, slot: UInt256) =
iterator storage*( iterator storage*(
al: AccountLedger; al: AccountLedger;
account: CoreDbAccount; account: CoreDbAccount;
): (Blob,Blob) ): (Blob,Blob) =
{.gcsafe, raises: [CoreDbApiError].} =
## For given account, iterate over storage slots ## For given account, iterate over storage slots
const const
info = "storage(): " info = "storage(): "

View File

@ -1,50 +0,0 @@
# Nimbus
# Copyright (c) 2019-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import eth/trie/db
type
CaptureFlags* {.pure.} = enum
PersistPut
PersistDel
DB = TrieDatabaseRef
CaptureDB* = ref object of RootObj
srcDb: DB
dstDb: DB
flags: set[CaptureFlags]
proc get*(db: CaptureDB, key: openArray[byte]): seq[byte] =
result = db.dstDb.get(key)
if result.len != 0: return
result = db.srcDb.get(key)
if result.len != 0:
db.dstDb.put(key, result)
proc put*(db: CaptureDB, key, value: openArray[byte]) =
db.dstDb.put(key, value)
if CaptureFlags.PersistPut in db.flags:
db.srcDb.put(key, value)
proc contains*(db: CaptureDB, key: openArray[byte]): bool =
result = db.srcDb.contains(key)
doAssert(db.dstDb.contains(key) == result)
proc del*(db: CaptureDB, key: openArray[byte]) =
db.dstDb.del(key)
if CaptureFlags.PersistDel in db.flags:
db.srcDb.del(key)
proc newCaptureDB*(srcDb, dstDb: DB, flags: set[CaptureFlags] = {}): CaptureDB =
result.new()
result.srcDb = srcDb
result.dstDb = dstDb
result.flags = flags

View File

@ -59,9 +59,6 @@ type
AccountProof* = seq[MptNodeRlpBytes] AccountProof* = seq[MptNodeRlpBytes]
SlotProof* = seq[MptNodeRlpBytes] SlotProof* = seq[MptNodeRlpBytes]
proc pruneTrie*(db: AccountStateDB): bool =
db.trie.isPruning
proc db*(db: AccountStateDB): CoreDbRef = proc db*(db: AccountStateDB): CoreDbRef =
db.trie.db db.trie.db
@ -75,9 +72,9 @@ proc `rootHash=`*(db: AccountStateDB, root: KeccakHash) =
db.trie = initAccountsTrie(db.trie.db, root, db.trie.isPruning) db.trie = initAccountsTrie(db.trie.db, root, db.trie.isPruning)
proc newAccountStateDB*(backingStore: CoreDbRef, proc newAccountStateDB*(backingStore: CoreDbRef,
root: KeccakHash, pruneTrie: bool): AccountStateDB = root: KeccakHash): AccountStateDB =
result.new() result.new()
result.trie = initAccountsTrie(backingStore, root, pruneTrie) result.trie = initAccountsTrie(backingStore, root)
result.originalRoot = root result.originalRoot = root
#result.transactionID = backingStore.getTransactionID() #result.transactionID = backingStore.getTransactionID()
when aleth_compat: when aleth_compat:

View File

@ -78,7 +78,7 @@ proc new*(
## with the `parent` block header. ## with the `parent` block header.
new result new result
result.init( result.init(
ac = com.ledgerType.init(com.db, parent.stateRoot, com.pruneTrie), ac = com.ledgerType.init(com.db, parent.stateRoot),
parent = parent, parent = parent,
blockCtx = blockCtx, blockCtx = blockCtx,
com = com, com = com,
@ -103,7 +103,7 @@ proc reinit*(self: BaseVMState; ## Object descriptor
com = self.com com = self.com
db = com.db db = com.db
ac = if self.stateDB.rootHash == parent.stateRoot: self.stateDB ac = if self.stateDB.rootHash == parent.stateRoot: self.stateDB
else: com.ledgerType.init(db, parent.stateRoot, com.pruneTrie) else: com.ledgerType.init(db, parent.stateRoot)
flags = self.flags flags = self.flags
self[].reset self[].reset
self.init( self.init(
@ -160,7 +160,7 @@ proc init*(
## It requires the `header` argument properly initalised so that for PoA ## It requires the `header` argument properly initalised so that for PoA
## networks, the miner address is retrievable via `ecRecover()`. ## networks, the miner address is retrievable via `ecRecover()`.
self.init( self.init(
ac = com.ledgerType.init(com.db, parent.stateRoot, com.pruneTrie), ac = com.ledgerType.init(com.db, parent.stateRoot),
parent = parent, parent = parent,
blockCtx = com.blockCtx(header), blockCtx = com.blockCtx(header),
com = com, com = com,
@ -227,7 +227,7 @@ proc statelessInit*(
tracer: TracerRef = nil): bool tracer: TracerRef = nil): bool
{.gcsafe, raises: [CatchableError].} = {.gcsafe, raises: [CatchableError].} =
vmState.init( vmState.init(
ac = com.ledgerType.init(com.db, parent.stateRoot, com.pruneTrie), ac = com.ledgerType.init(com.db, parent.stateRoot),
parent = parent, parent = parent,
blockCtx = com.blockCtx(header), blockCtx = com.blockCtx(header),
com = com, com = com,

View File

@ -148,7 +148,7 @@ proc wdNode(ctx: GraphqlContextRef, wd: Withdrawal): Node =
proc getStateDB(com: CommonRef, header: common.BlockHeader): ReadOnlyStateDB = proc getStateDB(com: CommonRef, header: common.BlockHeader): ReadOnlyStateDB =
## Retrieves the account db from canonical head ## Retrieves the account db from canonical head
## we don't use accounst_cache here because it's read only operations ## we don't use accounst_cache here because it's read only operations
let ac = newAccountStateDB(com.db, header.stateRoot, com.pruneTrie) let ac = newAccountStateDB(com.db, header.stateRoot)
ReadOnlyStateDB(ac) ReadOnlyStateDB(ac)
proc getBlockByNumber(ctx: GraphqlContextRef, number: Node): RespResult = proc getBlockByNumber(ctx: GraphqlContextRef, number: Node): RespResult =

View File

@ -135,10 +135,10 @@ proc setupP2P(nimbus: NimbusNode, conf: NimbusConf,
nimbus.txPool) nimbus.txPool)
of ProtocolFlag.Les: of ProtocolFlag.Les:
nimbus.ethNode.addCapability les nimbus.ethNode.addCapability les
of ProtocolFlag.Snap: #of ProtocolFlag.Snap:
nimbus.ethNode.addSnapHandlerCapability( # nimbus.ethNode.addSnapHandlerCapability(
nimbus.ethNode.peerPool, # nimbus.ethNode.peerPool,
nimbus.chainRef) # nimbus.chainRef)
# Cannot do without minimal `eth` capability # Cannot do without minimal `eth` capability
if ProtocolFlag.Eth notin protocols: if ProtocolFlag.Eth notin protocols:
nimbus.ethNode.addEthHandlerCapability( nimbus.ethNode.addEthHandlerCapability(
@ -157,14 +157,14 @@ proc setupP2P(nimbus: NimbusNode, conf: NimbusConf,
nimbus.fullSyncRef = FullSyncRef.init( nimbus.fullSyncRef = FullSyncRef.init(
nimbus.ethNode, nimbus.chainRef, nimbus.ctx.rng, conf.maxPeers, nimbus.ethNode, nimbus.chainRef, nimbus.ctx.rng, conf.maxPeers,
tickerOK, exCtrlFile) tickerOK, exCtrlFile)
of SyncMode.Snap: #of SyncMode.Snap:
# Minimal capability needed for sync only # # Minimal capability needed for sync only
if ProtocolFlag.Snap notin protocols: # if ProtocolFlag.Snap notin protocols:
nimbus.ethNode.addSnapHandlerCapability( # nimbus.ethNode.addSnapHandlerCapability(
nimbus.ethNode.peerPool) # nimbus.ethNode.peerPool)
nimbus.snapSyncRef = SnapSyncRef.init( # nimbus.snapSyncRef = SnapSyncRef.init(
nimbus.ethNode, nimbus.chainRef, nimbus.ctx.rng, conf.maxPeers, # nimbus.ethNode, nimbus.chainRef, nimbus.ctx.rng, conf.maxPeers,
tickerOK, exCtrlFile) # tickerOK, exCtrlFile)
of SyncMode.Stateless: of SyncMode.Stateless:
# FIXME-Adam: what needs to go here? # FIXME-Adam: what needs to go here?
nimbus.statelessSyncRef = StatelessSyncRef.init() nimbus.statelessSyncRef = StatelessSyncRef.init()
@ -192,7 +192,9 @@ proc setupP2P(nimbus: NimbusNode, conf: NimbusConf,
if conf.maxPeers > 0: if conf.maxPeers > 0:
var waitForPeers = true var waitForPeers = true
case conf.syncMode: case conf.syncMode:
of SyncMode.Snap, SyncMode.Stateless: #of SyncMode.Snap:
# waitForPeers = false
of SyncMode.Stateless:
waitForPeers = false waitForPeers = false
of SyncMode.Full, SyncMode.Default: of SyncMode.Full, SyncMode.Default:
discard discard
@ -283,11 +285,11 @@ proc start(nimbus: NimbusNode, conf: NimbusConf) =
let coreDB = let coreDB =
# Resolve statically for database type # Resolve statically for database type
case conf.chainDbMode: case conf.chainDbMode:
of Prune,Archive: LegacyDbPersistent.newCoreDbRef(string conf.dataDir) of Aristo,AriPrune:
of Aristo: AristoDbRocks.newCoreDbRef(string conf.dataDir) AristoDbRocks.newCoreDbRef(string conf.dataDir)
let com = CommonRef.new( let com = CommonRef.new(
db = coreDB, db = coreDB,
pruneTrie = (conf.chainDbMode == ChainDbMode.Prune), pruneHistory = (conf.chainDbMode == AriPrune),
networkId = conf.networkId, networkId = conf.networkId,
params = conf.networkParams) params = conf.networkParams)
@ -332,8 +334,8 @@ proc start(nimbus: NimbusNode, conf: NimbusConf) =
nimbus.fullSyncRef.start nimbus.fullSyncRef.start
of SyncMode.Stateless: of SyncMode.Stateless:
nimbus.statelessSyncRef.start nimbus.statelessSyncRef.start
of SyncMode.Snap: #of SyncMode.Snap:
nimbus.snapSyncRef.start # nimbus.snapSyncRef.start
if nimbus.state == NimbusState.Starting: if nimbus.state == NimbusState.Starting:
# it might have been set to "Stopping" with Ctrl+C # it might have been set to "Stopping" with Ctrl+C

View File

@ -18,7 +18,7 @@ import
./sync/peers, ./sync/peers,
./sync/beacon, ./sync/beacon,
./sync/legacy, ./sync/legacy,
./sync/snap, # ./sync/snap, # -- todo
./sync/stateless, ./sync/stateless,
./sync/full, ./sync/full,
./beacon/beacon_engine, ./beacon/beacon_engine,
@ -36,7 +36,7 @@ export
peers, peers,
beacon, beacon,
legacy, legacy,
snap, #snap,
stateless, stateless,
full, full,
beacon_engine, beacon_engine,
@ -59,7 +59,7 @@ type
networkLoop*: Future[void] networkLoop*: Future[void]
peerManager*: PeerManagerRef peerManager*: PeerManagerRef
legaSyncRef*: LegacySyncRef legaSyncRef*: LegacySyncRef
snapSyncRef*: SnapSyncRef # snapSyncRef*: SnapSyncRef # -- todo
fullSyncRef*: FullSyncRef fullSyncRef*: FullSyncRef
beaconSyncRef*: BeaconSyncRef beaconSyncRef*: BeaconSyncRef
statelessSyncRef*: StatelessSyncRef statelessSyncRef*: StatelessSyncRef
@ -82,8 +82,8 @@ proc stop*(nimbus: NimbusNode, conf: NimbusConf) {.async, gcsafe.} =
await nimbus.peerManager.stop() await nimbus.peerManager.stop()
if nimbus.statelessSyncRef.isNil.not: if nimbus.statelessSyncRef.isNil.not:
nimbus.statelessSyncRef.stop() nimbus.statelessSyncRef.stop()
if nimbus.snapSyncRef.isNil.not: #if nimbus.snapSyncRef.isNil.not:
nimbus.snapSyncRef.stop() # nimbus.snapSyncRef.stop()
if nimbus.fullSyncRef.isNil.not: if nimbus.fullSyncRef.isNil.not:
nimbus.fullSyncRef.stop() nimbus.fullSyncRef.stop()
if nimbus.beaconSyncRef.isNil.not: if nimbus.beaconSyncRef.isNil.not:

View File

@ -90,7 +90,7 @@ proc setupExpRpc*(com: CommonRef, server: RpcServer) =
proc getStateDB(header: BlockHeader): ReadOnlyStateDB = proc getStateDB(header: BlockHeader): ReadOnlyStateDB =
## Retrieves the account db from canonical head ## Retrieves the account db from canonical head
# we don't use accounst_cache here because it's only read operations # we don't use accounst_cache here because it's only read operations
let ac = newAccountStateDB(chainDB, header.stateRoot, com.pruneTrie) let ac = newAccountStateDB(chainDB, header.stateRoot)
result = ReadOnlyStateDB(ac) result = ReadOnlyStateDB(ac)
server.rpc("exp_getWitnessByBlockNumber") do(quantityTag: BlockTag, statePostExecution: bool) -> seq[byte]: server.rpc("exp_getWitnessByBlockNumber") do(quantityTag: BlockTag, statePostExecution: bool) -> seq[byte]:

View File

@ -72,7 +72,7 @@ proc setupEthRpc*(
proc getStateDB(header: BlockHeader): ReadOnlyStateDB = proc getStateDB(header: BlockHeader): ReadOnlyStateDB =
## Retrieves the account db from canonical head ## Retrieves the account db from canonical head
# we don't use accounst_cache here because it's only read operations # we don't use accounst_cache here because it's only read operations
let ac = newAccountStateDB(chainDB, header.stateRoot, com.pruneTrie) let ac = newAccountStateDB(chainDB, header.stateRoot)
result = ReadOnlyStateDB(ac) result = ReadOnlyStateDB(ac)
proc stateDBFromTag(quantityTag: BlockTag, readOnly = true): ReadOnlyStateDB proc stateDBFromTag(quantityTag: BlockTag, readOnly = true): ReadOnlyStateDB

View File

@ -10,12 +10,12 @@
import import
./handlers/eth as handlers_eth, ./handlers/eth as handlers_eth,
./handlers/setup as handlers_setup, ./handlers/setup as handlers_setup
./handlers/snap as handlers_snap #./handlers/snap as handlers_snap # -- todo
export export
handlers_eth, handlers_setup, handlers_eth, handlers_setup
handlers_snap #handlers_snap
static: static:
type type

View File

@ -46,20 +46,21 @@ proc addEthHandlerCapability*(
# Public functions: convenience mappings for `snap` # Public functions: convenience mappings for `snap`
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
import when false: # needs to be updated
./snap as handlers_snap import
./snap as handlers_snap
proc addSnapHandlerCapability*( proc addSnapHandlerCapability*(
node: EthereumNode; node: EthereumNode;
peerPool: PeerPool; peerPool: PeerPool;
chain = ChainRef(nil); chain = ChainRef(nil);
) = ) =
## Install `snap` handlers,Passing `chein` as `nil` installs the handler ## Install `snap` handlers,Passing `chein` as `nil` installs the handler
## in minimal/outbound mode. ## in minimal/outbound mode.
if chain.isNil: if chain.isNil:
node.addCapability protocol.snap node.addCapability protocol.snap
else: else:
node.addCapability(protocol.snap, SnapWireRef.init(chain, peerPool)) node.addCapability(protocol.snap, SnapWireRef.init(chain, peerPool))
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -169,7 +169,7 @@ proc traceTransaction*(com: CommonRef, header: BlockHeader,
# internal transactions: # internal transactions:
let let
saveCtxBefore = setCtx beforeCtx saveCtxBefore = setCtx beforeCtx
stateBefore = AccountsLedgerRef.init(capture.recorder, beforeRoot, com.pruneTrie) stateBefore = AccountsLedgerRef.init(capture.recorder, beforeRoot)
defer: defer:
saveCtxBefore.setCtx().ctx.forget() saveCtxBefore.setCtx().ctx.forget()
@ -208,7 +208,7 @@ proc dumpBlockState*(com: CommonRef, header: BlockHeader, body: BlockBody, dumpS
var var
before = newJArray() before = newJArray()
after = newJArray() after = newJArray()
stateBefore = AccountsLedgerRef.init(capture.recorder, parent.stateRoot, com.pruneTrie) stateBefore = AccountsLedgerRef.init(capture.recorder, parent.stateRoot)
for idx, tx in body.transactions: for idx, tx in body.transactions:
let sender = tx.getSender let sender = tx.getSender

View File

@ -89,7 +89,7 @@ proc calculateTransactionData(
## - root of transactions trie ## - root of transactions trie
## - list of transactions hashes ## - list of transactions hashes
## - total size of transactions in block ## - total size of transactions in block
var tr = newCoreDbRef(LegacyDbMemory).mptPrune var tr = newCoreDbRef(DefaultDbMemory).mptPrune
var txHashes: seq[TxOrHash] var txHashes: seq[TxOrHash]
var txSize: uint64 var txSize: uint64
for i, t in items: for i, t in items:

View File

@ -69,7 +69,7 @@ proc main() =
let let
blockEnv = json.parseFile(paramStr(1)) blockEnv = json.parseFile(paramStr(1))
memoryDB = newCoreDbRef(LegacyDbMemory) memoryDB = newCoreDbRef(DefaultDbMemory)
blockNumber = UInt256.fromHex(blockEnv["blockNumber"].getStr()) blockNumber = UInt256.fromHex(blockEnv["blockNumber"].getStr())
prepareBlockEnv(blockEnv, memoryDB) prepareBlockEnv(blockEnv, memoryDB)

View File

@ -47,7 +47,7 @@ proc dumpDebug(com: CommonRef, blockNumber: UInt256) =
proc main() {.used.} = proc main() {.used.} =
let conf = getConfiguration() let conf = getConfiguration()
let com = CommonRef.new(newCoreDbRef(LegacyDbPersistent, conf.dataDir), false) let com = CommonRef.new(newCoreDbRef(DefaultDbPersistent, conf.dataDir))
if conf.head != 0.u256: if conf.head != 0.u256:
dumpDebug(com, conf.head) dumpDebug(com, conf.head)

View File

@ -38,8 +38,8 @@ proc parseU256(val: string): UInt256 =
proc prepareBlockEnv(parent: BlockHeader, thisBlock: Block): CoreDbRef = proc prepareBlockEnv(parent: BlockHeader, thisBlock: Block): CoreDbRef =
var var
accounts = requestPostState(thisBlock) accounts = requestPostState(thisBlock)
memoryDB = newCoreDbRef LegacyDbMemory memoryDB = newCoreDbRef DefaultDbMemory
accountDB = newAccountStateDB(memoryDB, parent.stateRoot, false) accountDB = newAccountStateDB(memoryDB, parent.stateRoot)
parentNumber = %(parent.blockNumber.prefixHex) parentNumber = %(parent.blockNumber.prefixHex)
for address, account in accounts: for address, account in accounts:
@ -104,7 +104,7 @@ proc huntProblematicBlock(blockNumber: UInt256): ValidationResult =
memoryDB = prepareBlockEnv(parentBlock.header, thisBlock) memoryDB = prepareBlockEnv(parentBlock.header, thisBlock)
# try to execute current block # try to execute current block
com = CommonRef.new(memoryDB, false) com = CommonRef.new(memoryDB)
discard com.db.setHead(parentBlock.header, true) discard com.db.setHead(parentBlock.header, true)

View File

@ -54,8 +54,8 @@ proc main() {.used.} =
let conf = configuration.getConfiguration() let conf = configuration.getConfiguration()
let com = CommonRef.new( let com = CommonRef.new(
newCoreDbRef(LegacyDbPersistent, conf.dataDir), newCoreDbRef(DefaultDbPersistent, conf.dataDir),
false, conf.netId, networkParams(conf.netId)) conf.netId, networkParams(conf.netId))
# move head to block number ... # move head to block number ...
if conf.head != 0.u256: if conf.head != 0.u256:

View File

@ -1,5 +1,5 @@
# Nimbus # Nimbus
# Copyright (c) 2020-2023 Status Research & Development GmbH # Copyright (c) 2020-2024 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -19,7 +19,7 @@ proc generatePrestate*(nimbus, geth: JsonNode, blockNumber: UInt256, parent, hea
headerHash = rlpHash(header) headerHash = rlpHash(header)
var var
chainDB = newCoreDbRef(LegacyDbMemory) chainDB = newCoreDbRef(DefaultDbMemory)
discard chainDB.setHead(parent, true) discard chainDB.setHead(parent, true)
discard chainDB.persistTransactions(blockNumber, body.transactions) discard chainDB.persistTransactions(blockNumber, body.transactions)

View File

@ -52,7 +52,7 @@ proc validateBlock(com: CommonRef, blockNumber: BlockNumber): BlockNumber =
proc main() {.used.} = proc main() {.used.} =
let let
conf = getConfiguration() conf = getConfiguration()
com = CommonRef.new(newCoreDbRef(LegacyDbPersistent, conf.dataDir), false) com = CommonRef.new(newCoreDbRef(DefaultDbPersistent, conf.dataDir))
# move head to block number ... # move head to block number ...
if conf.head == 0.u256: if conf.head == 0.u256:

View File

@ -30,7 +30,7 @@ proc testGetBranch(tester: Tester, rootHash: KeccakHash, testStatusIMPL: var Tes
var wb = initWitnessBuilder(tester.memDB, rootHash, flags) var wb = initWitnessBuilder(tester.memDB, rootHash, flags)
var witness = wb.buildWitness(tester.keys) var witness = wb.buildWitness(tester.keys)
var db = newCoreDbRef(LegacyDbMemory) var db = newCoreDbRef(DefaultDbMemory)
when defined(useInputStream): when defined(useInputStream):
var input = memoryInput(witness) var input = memoryInput(witness)
var tb = initTreeBuilder(input, db, flags) var tb = initTreeBuilder(input, db, flags)
@ -87,8 +87,8 @@ proc setupStateDB(tester: var Tester, wantedState: JsonNode, stateDB: LedgerRef)
proc testBlockWitness(node: JsonNode, rootHash: Hash256, testStatusIMPL: var TestStatus) = proc testBlockWitness(node: JsonNode, rootHash: Hash256, testStatusIMPL: var TestStatus) =
var var
tester = Tester(memDB: newCoreDbRef(LegacyDbMemory)) tester = Tester(memDB: newCoreDbRef(DefaultDbMemory))
ac = AccountsCache.init(tester.memDB, emptyRlpHash, true) ac = LedgerCache.init(tester.memDB, emptyRlpHash)
let root = tester.setupStateDB(node, ac) let root = tester.setupStateDB(node, ac)
if rootHash != emptyRlpHash: if rootHash != emptyRlpHash:

Some files were not shown because too many files have changed in this diff Show More