Jordan Hrycaj 5a5cc6295e
Triggered write event for kvt (#2351)
* bump rockdb

* Rename `KVT` objects related to filters according to `Aristo` naming

details:
  filter* => delta*
  roFilter => balancer

* Compulsory error handling if `persistent()` fails

* Add return code to `reCentre()`

why:
  Might eventually fail if re-centring is blocked. Some logic will be
  added in subsequent patch sets.

* Add column families from earlier session to rocksdb in opening procedure

why:
  All previously used CFs must be declared when re-opening an existing
  database.

* Update `init()` and add rocksdb `reinit()` methods for changing parameters

why:
  Opening a set column families (with different open options) must span
  at least the ones that are already on disk.

* Provide write-trigger-event interface into `Aristo` backend

why:
  This allows to save data from a guest application (think `KVT`) to
  get synced with the write cycle so the guest and `Aristo` save all
  atomically.

* Use `KVT` with new column family interface from `Aristo`

* Remove obsolete guest interface

* Implement `KVT` piggyback on `Aristo` backend

* CoreDb: Add separate `KVT`/`Aristo` backend mode for debugging

* Remove `rocks_db` import from `persist()` function

why:
  Some systems (i.p `fluffy` and friends) use the `Aristo` memory
  backend emulation and do not link against rocksdb when building the
  application. So this should fix that problem.
2024-06-13 18:15:11 +00:00

88 lines
3.2 KiB
Nim

# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Rocks DB internal driver descriptor
## ===================================
{.push raises: [].}
import
std/os,
eth/common,
rocksdb,
stew/[endians2, keyed_queue],
../../../opts,
../../aristo_desc,
../init_common
type
RdbWriteEventCb* =
proc(session: WriteBatchRef): bool {.gcsafe, raises: [].}
## Call back closure function that passes the the write session handle
## to a guest peer right after it was opened. The guest may store any
## data on its own column family and return `true` if that worked
## all right. Then the `Aristo` handler will stor its own columns and
## finalise the write session.
##
## In case of an error when `false` is returned, `Aristo` will abort the
## write session and return a session error.
RdbInst* = object
admCol*: ColFamilyReadWrite ## Admin column family handler
vtxCol*: ColFamilyReadWrite ## Vertex column family handler
keyCol*: ColFamilyReadWrite ## Hash key column family handler
session*: WriteBatchRef ## For batched `put()`
rdKeyLru*: KeyedQueue[VertexID,HashKey] ## Read cache
rdVtxLru*: KeyedQueue[VertexID,VertexRef] ## Read cache
basePath*: string ## Database directory
opts*: DbOptions ## Just a copy here for re-opening
trgWriteEvent*: RdbWriteEventCb ## Database piggiback call back handler
AristoCFs* = enum
## Column family symbols/handles and names used on the database
AdmCF = "AriAdm" ## Admin column family name
VtxCF = "AriVtx" ## Vertex column family name
KeyCF = "AriKey" ## Hash key column family name
const
BaseFolder* = "nimbus" ## Same as for Legacy DB
DataFolder* = "aristo" ## Legacy DB has "data"
RdKeyLruMaxSize* = 4096 ## Max size of read cache for keys
RdVtxLruMaxSize* = 2048 ## Max size of read cache for vertex IDs
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
template logTxt*(info: static[string]): static[string] =
"RocksDB/" & info
template baseDb*(rdb: RdbInst): RocksDbReadWriteRef =
rdb.admCol.db
func baseDir*(rdb: RdbInst): string =
rdb.basePath / BaseFolder
func dataDir*(rdb: RdbInst): string =
rdb.baseDir / DataFolder
template toOpenArray*(xid: AdminTabID): openArray[byte] =
xid.uint64.toBytesBE.toOpenArray(0,7)
template toOpenArray*(vid: VertexID): openArray[byte] =
vid.uint64.toBytesBE.toOpenArray(0,7)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------