Remove obsolete select_backend and fix simulators CI (#2007)

* Remove obsolete select_backend

* Fix copyright year
This commit is contained in:
andri lim 2024-02-04 21:28:20 +07:00 committed by GitHub
parent 9c53c73173
commit a441ec3cb1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 228 additions and 348 deletions

View File

@ -27,7 +27,7 @@ jobs:
id: versions
run: |
sudo apt-get -q update
sudo apt-get install -y libpcre3-dev
sudo apt-get install -y librocksdb-dev libpcre3-dev
getHash() {
git ls-remote "https://github.com/$1" "${2:-HEAD}" | cut -f 1
}
@ -66,6 +66,20 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Restore rocksdb from cache
id: rocksdb-cache
uses: actions/cache@v4
with:
path: rocks-db-cache
key: 'rocksdb-v2'
- name: Build and install rocksdb
run: |
HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_NO_INSTALL_CLEANUP=1 brew install ccache
echo "/usr/local/opt/ccache/libexec" >> ${GITHUB_PATH}
curl -O -L -s -S https://raw.githubusercontent.com/status-im/nimbus-build-system/master/scripts/build_rocksdb.sh
bash build_rocksdb.sh rocks-db-cache
- name: Get latest nimbus-build-system commit hash
id: versions
run: |
@ -110,18 +124,37 @@ jobs:
- name: Checkout code
uses: actions/checkout@v4
- name: Restore llvm-mingw (Windows) from cache
if: runner.os == 'Windows'
- name: Restore Nim DLLs dependencies from cache
id: windows-dlls-cache
uses: actions/cache@v4
with:
path: external/dlls
# according to docu, idle caches are kept for up to 7 days
# so change dlls# to force new cache contents (for some number #)
key: dlls0
- name: Install DLLs dependencies
if: steps.windows-dlls-cache.outputs.cache-hit != 'true'
run: |
ROCKSDBSUB=x64
DLLPATH=external/dlls
mkdir -p external
curl -L "https://nim-lang.org/download/windeps.zip" -o external/windeps.zip
7z x -y external/windeps.zip -o"$DLLPATH"
# ROCKSDB
curl -L "https://github.com/status-im/nimbus-deps/releases/download/nimbus-deps/nimbus-deps.zip" -o external/nimbus-deps.zip
7z x -y external/nimbus-deps.zip
cp "./$ROCKSDBSUB/librocksdb.dll" "$DLLPATH/librocksdb.dll"
- name: Restore llvm-mingw from cache
id: windows-mingw-cache
uses: actions/cache@v4
with:
path: external/mingw-amd64
key: 'mingw-llvm-17-sim'
- name: Install llvm-mingw dependency (Windows)
if: >
steps.windows-mingw-cache.outputs.cache-hit != 'true' &&
runner.os == 'Windows'
- name: Install llvm-mingw dependency
if: steps.windows-mingw-cache.outputs.cache-hit != 'true'
run: |
mkdir -p external
MINGW_BASE="https://github.com/mstorsjo/llvm-mingw/releases/download/20230905"
@ -130,11 +163,10 @@ jobs:
7z x -y "external/mingw-amd64.zip" -oexternal/mingw-amd64/
mv external/mingw-amd64/**/* ./external/mingw-amd64
- name: Path to cached dependencies (Windows)
if: >
runner.os == 'Windows'
- name: Path to cached dependencies
run: |
echo '${{ github.workspace }}'"/external/mingw-amd64/bin" >> $GITHUB_PATH
echo '${{ github.workspace }}'"/external/dlls" >> $GITHUB_PATH
- name: Get latest nimbus-build-system commit hash
id: versions

View File

@ -17,7 +17,7 @@ USE_SYSTEM_NIM=1
ENV_SCRIPT="vendor/nimbus-build-system/scripts/env.sh"
# nimbus_db_backend:none -> we only use memory db in simulators
NIM_FLAGS="c -d:release -d:nimbus_db_backend:none"
NIM_FLAGS="c -d:release"
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/engine/engine_sim
${ENV_SCRIPT} nim ${NIM_FLAGS} ${SIM_DIR}/consensus/consensus_sim

View File

@ -25,9 +25,7 @@ import
confutils/std/net
],
eth/[common, net/utils, net/nat, p2p/bootnodes, p2p/enode, p2p/discoveryv5/enr],
"."/[db/select_backend,
constants, vm_compile_info, version
],
"."/[constants, vm_compile_info, version],
common/chain_config
export net
@ -35,12 +33,11 @@ export net
const
# TODO: fix this agent-string format to match other
# eth clients format
NimbusIdent* = "$# v$# [$#: $#, $#, $#, $#]" % [
NimbusIdent* = "$# v$# [$#: $#, $#, $#]" % [
NimbusName,
NimbusVersion,
hostOS,
hostCPU,
nimbus_db_backend,
VmName,
GitRevision
]

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE))
# * MIT license ([LICENSE-MIT](LICENSE-MIT))
@ -11,7 +11,6 @@ import
chronicles,
eth/rlp, stew/io2,
./chain,
../db/select_backend,
../common/common,
../utils/utils

View File

@ -12,39 +12,51 @@
import
eth/trie/db,
eth/db/kvstore,
rocksdb,
"../.."/select_backend,
../base,
./legacy_db
./legacy_db,
../../kvstore_rocksdb
type
LegaPersDbRef = ref object of LegacyDbRef
rdb: RocksStoreRef # for backend access with legacy mode
# No other backend supported
doAssert nimbus_db_backend == "rocksdb"
ChainDB = ref object of RootObj
kv: KvStoreRef
rdb: RocksStoreRef
# TODO KvStore is a virtual interface and TrieDB is a virtual interface - one
# will be enough eventually - unless the TrieDB interface gains operations
# that are not typical to KvStores
proc get(db: ChainDB, key: openArray[byte]): seq[byte] =
var res: seq[byte]
proc onData(data: openArray[byte]) = res = @data
if db.kv.get(key, onData).expect("working database"):
return res
proc put(db: ChainDB, key, value: openArray[byte]) =
db.kv.put(key, value).expect("working database")
proc contains(db: ChainDB, key: openArray[byte]): bool =
db.kv.contains(key).expect("working database")
proc del(db: ChainDB, key: openArray[byte]): bool =
db.kv.del(key).expect("working database")
proc newChainDB(path: string): KvResult[ChainDB] =
let rdb = RocksStoreRef.init(path, "nimbus").valueOr:
return err(error)
ok(ChainDB(kv: kvStore rdb, rdb: rdb))
# ------------------------------------------------------------------------------
# Public constructor and low level data retrieval, storage & transation frame
# ------------------------------------------------------------------------------
proc newLegacyPersistentCoreDbRef*(path: string): CoreDbRef =
# Kludge: Compiler bails out on `results.tryGet()` with
# ::
# fatal.nim(54) sysFatal
# Error: unhandled exception: types.nim(1251, 10) \
# `b.kind in {tyObject} + skipPtrs` [AssertionDefect]
#
# when running `select_backend.newChainDB(path)`. The culprit seems to be
# the `ResultError` exception (or any other `CatchableError`). So this is
# converted to a `Defect`.
var backend: ChainDB
try:
{.push warning[Deprecated]: off.}
backend = newChainDB path
{.pop.}
except CatchableError as e:
let msg = "DB initialisation error(" & $e.name & "): " & e.msg
# when running `newChainDB(path)`. converted to a `Defect`.
let backend = newChainDB(path).valueOr:
let msg = "DB initialisation : " & error
raise (ref ResultDefect)(msg: msg)
proc done() =

View File

@ -24,29 +24,13 @@
import
../aristo,
./memory_only,
../select_backend
base_iterators_persistent,
./backend/[aristo_rocksdb, legacy_rocksdb]
export
memory_only
# This file is currently inconsistent due to the `dbBackend == rocksdb` hack
# which will be removed, soon (must go to the test base where such a compile
# time flag induced mechanism might be useful.)
#
# The `Aristo` backend has no business with `dbBackend` and will be extended
# in future.
{.warning: "Inconsistent API file needs to be de-uglified".}
# Allow hive sim to compile with dbBackend == none
when dbBackend == rocksdb:
import
base_iterators_persistent,
./backend/[aristo_rocksdb, legacy_rocksdb]
export
base_iterators_persistent,
toRocksStoreRef
memory_only,
base_iterators_persistent,
toRocksStoreRef
proc newCoreDbRef*(
dbType: static[CoreDbType]; # Database type symbol
@ -56,17 +40,14 @@ proc newCoreDbRef*(
##
## Note: Using legacy notation `newCoreDbRef()` rather than
## `CoreDbRef.init()` because of compiler coughing.
when dbBackend == rocksdb:
when dbType == LegacyDbPersistent:
newLegacyPersistentCoreDbRef path
when dbType == LegacyDbPersistent:
newLegacyPersistentCoreDbRef path
elif dbType == AristoDbRocks:
newAristoRocksDbCoreDbRef path
elif dbType == AristoDbRocks:
newAristoRocksDbCoreDbRef path
else:
{.error: "Unsupported dbType for persistent newCoreDbRef()".}
else:
{.error: "Unsupported dbBackend setting for persistent newCoreDbRef()".}
{.error: "Unsupported dbType for persistent newCoreDbRef()".}
proc newCoreDbRef*(
dbType: static[CoreDbType]; # Database type symbol
@ -77,15 +58,11 @@ proc newCoreDbRef*(
##
## Note: Using legacy notation `newCoreDbRef()` rather than
## `CoreDbRef.init()` because of compiler coughing.
when dbBackend == rocksdb:
when dbType == AristoDbRocks:
newAristoRocksDbCoreDbRef(path, qlr)
when dbType == AristoDbRocks:
newAristoRocksDbCoreDbRef(path, qlr)
else:
{.error: "Unsupported dbType for persistent newCoreDbRef()" &
" with qidLayout argument".}
else:
{.error: "Unsupported dbBackend setting for persistent newCoreDbRef()" &
{.error: "Unsupported dbType for persistent newCoreDbRef()" &
" with qidLayout argument".}
# End

View File

@ -1,80 +0,0 @@
# Nimbus
# Copyright (c) 2019-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed except
# according to those terms.
import strutils, eth/db/kvstore
export kvstore
# Database access layer that turns errors in the database into Defects as the
# code that uses it isn't equipped to handle errors of that kind - this should
# be reconsidered when making more changes here.
type DbBackend* = enum
none,
sqlite,
rocksdb,
lmdb
const
nimbus_db_backend* {.strdefine.} = "rocksdb"
dbBackend* = parseEnum[DbBackend](nimbus_db_backend)
when dbBackend == sqlite:
import eth/db/kvstore_sqlite3 as database_backend
elif dbBackend == rocksdb:
import ./kvstore_rocksdb as database_backend
type
ChainDB* = ref object of RootObj
kv*: KvStoreRef
when dbBackend == rocksdb:
rdb*: RocksStoreRef
# TODO KvStore is a virtual interface and TrieDB is a virtual interface - one
# will be enough eventually - unless the TrieDB interface gains operations
# that are not typical to KvStores
proc get*(db: ChainDB, key: openArray[byte]): seq[byte] =
var res: seq[byte]
proc onData(data: openArray[byte]) = res = @data
if db.kv.get(key, onData).expect("working database"):
return res
proc put*(db: ChainDB, key, value: openArray[byte]) =
db.kv.put(key, value).expect("working database")
proc contains*(db: ChainDB, key: openArray[byte]): bool =
db.kv.contains(key).expect("working database")
proc del*(db: ChainDB, key: openArray[byte]): bool =
db.kv.del(key).expect("working database")
when dbBackend == sqlite:
proc newChainDB*(path: string): ChainDB =
let db = SqStoreRef.init(path, "nimbus").expect("working database")
ChainDB(kv: kvStore db.openKvStore().expect("working database"))
elif dbBackend == rocksdb:
proc newChainDB*(path: string): ChainDB
{.gcsafe, deprecated: "use newCoreDbRef(LegacyDbPersistent,<path>)".} =
let rdb = RocksStoreRef.init(path, "nimbus").tryGet()
ChainDB(kv: kvStore rdb, rdb: rdb)
elif dbBackend == lmdb:
# TODO This implementation has several issues on restricted platforms, possibly
# due to mmap restrictions - see:
# https://github.com/status-im/nim-beacon-chain/issues/732
# https://github.com/status-im/nim-beacon-chain/issues/688
# It also has other issues, including exception safety:
# https://github.com/status-im/nim-beacon-chain/pull/809
{.error: "lmdb deprecated, needs reimplementing".}
elif dbBackend == none:
discard
when dbBackend != none:
export database_backend

View File

@ -24,7 +24,6 @@ import
./nimbus_desc,
./core/eip4844,
./core/block_import,
./db/select_backend,
./db/core_db/persistent,
./core/clique/clique_desc,
./core/clique/clique_sealer,

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -18,7 +18,7 @@ import
eth/[common/eth_types, p2p],
./core/chain/chain_desc,
./core/executor/process_block,
./db/[core_db, select_backend, ledger],
./db/[core_db, ledger],
./evm/async/[data_sources, operations, data_sources/json_rpc_data_source],
"."/[vm_state, vm_types]

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -9,19 +9,13 @@
# except according to those terms.
import
../db/select_backend,
./handlers/eth as handlers_eth,
./handlers/setup as handlers_setup
./handlers/setup as handlers_setup,
./handlers/snap as handlers_snap
export
handlers_eth, handlers_setup
when dbBackend != select_backend.none:
import
./handlers/snap as handlers_snap
export
handlers_snap
handlers_eth, handlers_setup,
handlers_snap
static:
type

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -12,7 +12,6 @@
import
eth/p2p,
../../db/select_backend,
../../core/[chain, tx_pool],
../protocol,
./eth as handlers_eth
@ -47,21 +46,20 @@ proc addEthHandlerCapability*(
# Public functions: convenience mappings for `snap`
# ------------------------------------------------------------------------------
when dbBackend != select_backend.none:
import
./snap as handlers_snap
import
./snap as handlers_snap
proc addSnapHandlerCapability*(
node: EthereumNode;
peerPool: PeerPool;
chain = ChainRef(nil);
) =
## Install `snap` handlers,Passing `chein` as `nil` installs the handler
## in minimal/outbound mode.
if chain.isNil:
node.addCapability protocol.snap
else:
node.addCapability(protocol.snap, SnapWireRef.init(chain, peerPool))
proc addSnapHandlerCapability*(
node: EthereumNode;
peerPool: PeerPool;
chain = ChainRef(nil);
) =
## Install `snap` handlers,Passing `chein` as `nil` installs the handler
## in minimal/outbound mode.
if chain.isNil:
node.addCapability protocol.snap
else:
node.addCapability(protocol.snap, SnapWireRef.init(chain, peerPool))
# ------------------------------------------------------------------------------
# End

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -14,7 +14,6 @@ import
chronicles,
chronos,
eth/p2p,
../db/select_backend,
../core/chain,
./snap/[worker, worker_desc],
"."/[protocol, sync_sched]

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Copyright (c) 2018-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -14,18 +14,17 @@ import
std/os, # std/[sequtils, strutils],
eth/common/eth_types,
rocksdb,
../../../../db/[kvstore_rocksdb, select_backend]
../../../../db/kvstore_rocksdb
{.push raises: [].}
type
RockyBulkLoadRef* = ref object of RootObj
when select_backend.dbBackend == select_backend.rocksdb:
db: RocksStoreRef
envOption: rocksdb_envoptions_t
importOption: rocksdb_ingestexternalfileoptions_t
writer: rocksdb_sstfilewriter_t
filePath: string
db: RocksStoreRef
envOption: rocksdb_envoptions_t
importOption: rocksdb_ingestexternalfileoptions_t
writer: rocksdb_sstfilewriter_t
filePath: string
csError: string
# ------------------------------------------------------------------------------
@ -38,16 +37,13 @@ proc init*(
envOption: rocksdb_envoptions_t
): T =
## Create a new bulk load descriptor.
when select_backend.dbBackend == select_backend.rocksdb:
result = T(
db: db,
envOption: envOption,
importOption: rocksdb_ingestexternalfileoptions_create())
result = T(
db: db,
envOption: envOption,
importOption: rocksdb_ingestexternalfileoptions_create())
doAssert not result.importOption.isNil
doAssert not envOption.isNil
else:
T(csError: "rocksdb is unsupported")
doAssert not result.importOption.isNil
doAssert not envOption.isNil
proc init*(T: type RockyBulkLoadRef; db: RocksStoreRef): T =
## Variant of `init()`
@ -57,12 +53,10 @@ proc clearCacheFile*(db: RocksStoreRef; fileName: string): bool
{.gcsafe, raises: [OSError].} =
## Remove left-over cache file from an imcomplete previous session. The
## return value `true` indicated that a cache file was detected.
discard
when select_backend.dbBackend == select_backend.rocksdb:
let filePath = db.tmpDir / fileName
if filePath.fileExists:
filePath.removeFile
return true
let filePath = db.tmpDir / fileName
if filePath.fileExists:
filePath.removeFile
return true
proc destroy*(rbl: RockyBulkLoadRef) {.gcsafe, raises: [OSError].} =
## Destructor, free memory resources and delete temporary file. This function
@ -73,17 +67,15 @@ proc destroy*(rbl: RockyBulkLoadRef) {.gcsafe, raises: [OSError].} =
## reset and must not be used anymore with any function (different from
## `destroy()`.)
##
discard
when select_backend.dbBackend == select_backend.rocksdb:
if not rbl.writer.isNil:
rbl.writer.rocksdb_sstfilewriter_destroy()
if not rbl.envOption.isNil:
rbl.envOption.rocksdb_envoptions_destroy()
if not rbl.importOption.isNil:
rbl.importOption.rocksdb_ingestexternalfileoptions_destroy()
if 0 < rbl.filePath.len:
rbl.filePath.removeFile
rbl[].reset
if not rbl.writer.isNil:
rbl.writer.rocksdb_sstfilewriter_destroy()
if not rbl.envOption.isNil:
rbl.envOption.rocksdb_envoptions_destroy()
if not rbl.importOption.isNil:
rbl.importOption.rocksdb_ingestexternalfileoptions_destroy()
if 0 < rbl.filePath.len:
rbl.filePath.removeFile
rbl[].reset
# ------------------------------------------------------------------------------
# Public functions, getters
@ -95,17 +87,7 @@ proc lastError*(rbl: RockyBulkLoadRef): string =
proc store*(rbl: RockyBulkLoadRef): RocksDBInstance =
## Provide the diecriptor for backend functions as defined in `rocksdb`.
discard
when select_backend.dbBackend == select_backend.rocksdb:
rbl.db.store
proc rocksStoreRef*(db: ChainDb): RocksStoreRef =
## Pull out underlying rocksdb backend descripto (if any)
# Current architecture allows only one globally defined persistent type
discard
when select_backend.dbBackend == select_backend.rocksdb:
if not db.isNil:
return db.rdb
rbl.db.store
# ------------------------------------------------------------------------------
# Public functions
@ -115,24 +97,22 @@ proc begin*(rbl: RockyBulkLoadRef; fileName: string): bool =
## Begin a new bulk load session storing data into a temporary cache file
## `fileName`. When finished, this file will bi direcly imported into the
## database.
discard
when select_backend.dbBackend == select_backend.rocksdb:
rbl.writer = rocksdb_sstfilewriter_create(
rbl.envOption, rbl.db.store.options)
if rbl.writer.isNil:
rbl.csError = "Cannot create sst writer session"
return false
rbl.writer = rocksdb_sstfilewriter_create(
rbl.envOption, rbl.db.store.options)
if rbl.writer.isNil:
rbl.csError = "Cannot create sst writer session"
return false
rbl.csError = ""
let filePath = rbl.db.tmpDir / fileName
var csError: cstring
rbl.writer.rocksdb_sstfilewriter_open(fileName, addr csError)
if not csError.isNil:
rbl.csError = $csError
return false
rbl.csError = ""
let filePath = rbl.db.tmpDir / fileName
var csError: cstring
rbl.writer.rocksdb_sstfilewriter_open(fileName, addr csError)
if not csError.isNil:
rbl.csError = $csError
return false
rbl.filePath = filePath
return true
rbl.filePath = filePath
return true
proc add*(
rbl: RockyBulkLoadRef;
@ -145,16 +125,14 @@ proc add*(
## This function is a wrapper around `rocksdb_sstfilewriter_add()` or
## `rocksdb_sstfilewriter_put()` (stragely enough, there are two functions
## with exactly the same impementation code.)
discard
when select_backend.dbBackend == select_backend.rocksdb:
var csError: cstring
rbl.writer.rocksdb_sstfilewriter_add(
cast[cstring](unsafeAddr key[0]), csize_t(key.len),
cast[cstring](unsafeAddr val[0]), csize_t(val.len),
addr csError)
if csError.isNil:
return true
rbl.csError = $csError
var csError: cstring
rbl.writer.rocksdb_sstfilewriter_add(
cast[cstring](unsafeAddr key[0]), csize_t(key.len),
cast[cstring](unsafeAddr val[0]), csize_t(val.len),
addr csError)
if csError.isNil:
return true
rbl.csError = $csError
proc finish*(
rbl: RockyBulkLoadRef
@ -166,29 +144,26 @@ proc finish*(
##
## If successful, the return value is the size of the SST file used if
## that value is available. Otherwise, `0` is returned.
when select_backend.dbBackend == select_backend.rocksdb:
var csError: cstring
rbl.writer.rocksdb_sstfilewriter_finish(addr csError)
var csError: cstring
rbl.writer.rocksdb_sstfilewriter_finish(addr csError)
if csError.isNil:
rbl.db.store.db.rocksdb_ingest_external_file(
[rbl.filePath].allocCStringArray, 1,
rbl.importOption,
addr csError)
if csError.isNil:
rbl.db.store.db.rocksdb_ingest_external_file(
[rbl.filePath].allocCStringArray, 1,
rbl.importOption,
addr csError)
var
size: int64
f: File
if f.open(rbl.filePath):
size = f.getFileSize
f.close
rbl.destroy()
return ok(size)
if csError.isNil:
var
size: int64
f: File
if f.open(rbl.filePath):
size = f.getFileSize
f.close
rbl.destroy()
return ok(size)
rbl.csError = $csError
err()
rbl.csError = $csError
# ------------------------------------------------------------------------------
# End

View File

@ -20,9 +20,6 @@ import
hexary_interpolate, hexary_inspect, hexary_paths, snapdb_desc,
snapdb_persistent]
import
../../../../db/select_backend
logScope:
topics = "snap-db"
@ -66,14 +63,13 @@ proc persistentAccounts(
): Result[void,HexaryError]
{.gcsafe, raises: [OSError,IOError,KeyError].} =
## Store accounts trie table on databse
when dbBackend == rocksdb:
if ps.rockDb.isNil:
let rc = db.persistentAccountsPut(ps.kvDb)
if rc.isErr: return rc
else:
let rc = db.persistentAccountsPut(ps.rockDb)
if rc.isErr: return rc
ok()
if ps.rockDb.isNil:
let rc = db.persistentAccountsPut(ps.kvDb)
if rc.isErr: return rc
else:
let rc = db.persistentAccountsPut(ps.rockDb)
if rc.isErr: return rc
ok()
proc collectAccounts(

View File

@ -16,9 +16,6 @@ import
../../range_desc,
"."/[hexary_desc, hexary_error, snapdb_desc, snapdb_persistent]
import
../../../../db/select_backend
logScope:
topics = "snap-db"
@ -49,16 +46,15 @@ proc persistentContracts(
): Result[void,HexaryError]
{.gcsafe, raises: [OSError,IOError,KeyError].} =
## Store contract codes onto permanent database
when dbBackend == rocksdb:
if ps.rockDb.isNil:
let rc = data.persistentContractPut ps.kvDb
if rc.isErr:
return rc
else:
let rc = data.persistentContractPut ps.rockDb
if rc.isErr:
return rc
ok()
if ps.rockDb.isNil:
let rc = data.persistentContractPut ps.kvDb
if rc.isErr:
return rc
else:
let rc = data.persistentContractPut ps.rockDb
if rc.isErr:
return rc
ok()
# ------------------------------------------------------------------------------
# Public constructor

View File

@ -15,7 +15,7 @@ import
chronicles,
eth/[common, p2p, trie/nibbles],
../../../../db/core_db/persistent,
../../../../db/[core_db, select_backend, storage_types],
../../../../db/[core_db, storage_types, kvstore_rocksdb],
../../../protocol,
../../range_desc,
"."/[hexary_desc, hexary_error, hexary_import, hexary_nearby, hexary_paths,
@ -35,9 +35,7 @@ type
## Global, re-usable descriptor
keyMap: Table[RepairKey,uint] ## For debugging only (will go away)
db: CoreDbRef ## General database
# Allow hive sim to compile with dbBackend == none
when dbBackend == rocksdb:
rocky: RocksStoreRef ## Set if rocksdb is available
rocky: RocksStoreRef ## Set if rocksdb is available
SnapDbBaseRef* = ref object of RootRef
## Session descriptor
@ -71,15 +69,14 @@ proc keyPp(a: RepairKey; pv: SnapDbRef): string =
# Private helper
# ------------------------------------------------------------------------------
when dbBackend == rocksdb:
proc clearRockyCacheFile(rocky: RocksStoreRef): bool =
if not rocky.isNil:
# A cache file might hang about from a previous crash
try:
discard rocky.clearCacheFile(RockyBulkCache)
return true
except OSError as e:
error "Cannot clear rocksdb cache", exception=($e.name), msg=e.msg
proc clearRockyCacheFile(rocky: RocksStoreRef): bool =
if not rocky.isNil:
# A cache file might hang about from a previous crash
try:
discard rocky.clearCacheFile(RockyBulkCache)
return true
except OSError as e:
error "Cannot clear rocksdb cache", exception=($e.name), msg=e.msg
# ------------------------------------------------------------------------------
# Public constructor
@ -90,8 +87,7 @@ proc init*(
db: CoreDbRef
): T =
## Main object constructor
when dbBackend == rocksdb:
T(db: db, rocky: db.backend.toRocksStoreRef)
T(db: db, rocky: db.backend.toRocksStoreRef)
proc init*(
T: type HexaryTreeDbRef;
@ -138,14 +134,13 @@ proc hexaDb*(ps: SnapDbBaseRef): HexaryTreeDbRef =
## Getter, low level access to underlying session DB
ps.xDb
when dbBackend == rocksdb:
proc rockDb*(ps: SnapDbBaseRef): RocksStoreRef =
## Getter, low level access to underlying persistent rock DB interface
ps.base.rocky
proc rockDb*(ps: SnapDbBaseRef): RocksStoreRef =
## Getter, low level access to underlying persistent rock DB interface
ps.base.rocky
proc rockDb*(pv: SnapDbRef): RocksStoreRef =
## Getter variant
pv.rocky
proc rockDb*(pv: SnapDbRef): RocksStoreRef =
## Getter variant
pv.rocky
proc kvDb*(ps: SnapDbBaseRef): CoreDbRef =
## Getter, low level access to underlying persistent key-value DB
@ -198,13 +193,11 @@ template toOpenArray*(k: ByteArray33): openArray[byte] =
proc dbBackendRocksDb*(pv: SnapDbRef): bool =
## Returns `true` if rocksdb features are available
when dbBackend == rocksdb:
not pv.rocky.isNil
not pv.rocky.isNil
proc dbBackendRocksDb*(ps: SnapDbBaseRef): bool =
## Returns `true` if rocksdb features are available
when dbBackend == rocksdb:
not ps.base.rocky.isNil
not ps.base.rocky.isNil
proc mergeProofs*(
xDb: HexaryTreeDbRef; ## Session database

View File

@ -21,9 +21,6 @@ import
hexary_inspect, hexary_interpolate, hexary_paths, snapdb_desc,
snapdb_persistent]
import
../../../../db/select_backend
logScope:
topics = "snap-db"
@ -63,14 +60,13 @@ proc persistentStorageSlots(
): Result[void,HexaryError]
{.gcsafe, raises: [OSError,IOError,KeyError].} =
## Store accounts trie table on databse
when dbBackend == rocksdb:
if ps.rockDb.isNil:
let rc = db.persistentStorageSlotsPut(ps.kvDb)
if rc.isErr: return rc
else:
let rc = db.persistentStorageSlotsPut(ps.rockDb)
if rc.isErr: return rc
ok()
if ps.rockDb.isNil:
let rc = db.persistentStorageSlotsPut(ps.kvDb)
if rc.isErr: return rc
else:
let rc = db.persistentStorageSlotsPut(ps.rockDb)
if rc.isErr: return rc
ok()
proc collectStorageSlots(

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -12,7 +12,6 @@
import
eth/[common, p2p],
../../db/select_backend,
../misc/ticker,
../sync_desc,
./worker/get/get_error,

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -60,7 +60,6 @@ import
std/algorithm,
eth/[common, p2p],
unittest2,
../../nimbus/db/select_backend,
../../nimbus/sync/protocol,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[

View File

@ -1,5 +1,5 @@
# Nimbus
# Copyright (c) 2022-2023 Status Research & Development GmbH
# Copyright (c) 2022-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -14,7 +14,6 @@ import
std/[sequtils, tables],
eth/[common, p2p],
unittest2,
../../nimbus/db/select_backend,
../../nimbus/sync/snap/range_desc,
../../nimbus/sync/snap/worker/db/[
hexary_desc, hexary_error, hexary_inspect,