nimbus-eth1/nimbus/db/aristo/aristo_init/rocks_db.nim

329 lines
9.6 KiB
Nim
Raw Normal View History

# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Rocksdb backend for Aristo DB
## =============================
##
## The iterators provided here are currently available only by direct
## backend access
## ::
## import
## aristo/aristo_init,
## aristo/aristo_init/aristo_rocksdb
##
## let rc = AristoDb.init(BackendRocksDB, "/var/tmp")
## if rc.isOk:
## let be = rc.value.to(RdbBackendRef)
## for (n, key, vtx) in be.walkVtx:
## ...
##
{.push raises: [].}
import
eth/common,
rocksdb,
results,
../aristo_desc,
../aristo_desc/desc_backend,
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
../aristo_blobify,
./init_common,
./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk],
../../opts
const
extraTraceMessages = false
## Enabled additional logging noise
type
RdbBackendRef* = ref object of TypedBackendRef
rdb: RdbInst ## Allows low level access to database
RdbPutHdlRef = ref object of TypedPutHdlRef
when extraTraceMessages:
import chronicles
logScope:
topics = "aristo-backend"
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc newSession(db: RdbBackendRef): RdbPutHdlRef =
new result
result.TypedPutHdlRef.beginSession db
proc getSession(hdl: PutHdlRef; db: RdbBackendRef): RdbPutHdlRef =
hdl.TypedPutHdlRef.verifySession db
hdl.RdbPutHdlRef
proc endSession(hdl: PutHdlRef; db: RdbBackendRef): RdbPutHdlRef =
hdl.TypedPutHdlRef.finishSession db
hdl.RdbPutHdlRef
# ------------------------------------------------------------------------------
# Private functions: interface
# ------------------------------------------------------------------------------
proc getVtxFn(db: RdbBackendRef): GetVtxFn =
result =
proc(rvid: RootedVertexID): Result[VertexRef,AristoError] =
# Fetch serialised data record
let vtx = db.rdb.getVtx(rvid).valueOr:
when extraTraceMessages:
trace logTxt "getVtxFn() failed", rvid, error=error[0], info=error[1]
return err(error[0])
if vtx.isValid:
return ok(vtx)
err(GetVtxNotFound)
proc getKeyFn(db: RdbBackendRef): GetKeyFn =
result =
proc(rvid: RootedVertexID): Result[HashKey,AristoError] =
# Fetch serialised data record
let key = db.rdb.getKey(rvid).valueOr:
when extraTraceMessages:
trace logTxt "getKeyFn: failed", rvid, error=error[0], info=error[1]
return err(error[0])
if key.isValid:
return ok(key)
err(GetKeyNotFound)
2024-06-04 15:05:13 +00:00
proc getTuvFn(db: RdbBackendRef): GetTuvFn =
result =
2024-06-04 15:05:13 +00:00
proc(): Result[VertexID,AristoError]=
# Fetch serialised data record.
let data = db.rdb.getAdm(AdmTabIdTuv).valueOr:
when extraTraceMessages:
2024-06-04 15:05:13 +00:00
trace logTxt "getTuvFn: failed", error=error[0], info=error[1]
return err(error[0])
# Decode data record
if data.len == 0:
2024-06-04 15:05:13 +00:00
return ok VertexID(0)
# Decode data record
result = data.deblobify VertexID
proc getLstFn(db: RdbBackendRef): GetLstFn =
result =
proc(): Result[SavedState,AristoError]=
# Fetch serialised data record.
let data = db.rdb.getAdm(AdmTabIdLst).valueOr:
when extraTraceMessages:
trace logTxt "getLstFn: failed", error=error[0], info=error[1]
return err(error[0])
# Decode data record
data.deblobify SavedState
# -------------
proc putBegFn(db: RdbBackendRef): PutBegFn =
result =
proc(): Result[PutHdlRef,AristoError] =
db.rdb.begin()
ok db.newSession()
proc putVtxFn(db: RdbBackendRef): PutVtxFn =
result =
proc(hdl: PutHdlRef; rvid: RootedVertexID; vtx: VertexRef) =
let hdl = hdl.getSession db
if hdl.error.isNil:
db.rdb.putVtx(rvid, vtx).isOkOr:
hdl.error = TypedPutHdlErrRef(
pfx: VtxPfx,
vid: error[0],
code: error[1],
info: error[2])
proc putKeyFn(db: RdbBackendRef): PutKeyFn =
result =
proc(hdl: PutHdlRef; rvid: RootedVertexID, key: HashKey) =
let hdl = hdl.getSession db
if hdl.error.isNil:
db.rdb.putKey(rvid, key).isOkOr:
hdl.error = TypedPutHdlErrRef(
pfx: KeyPfx,
vid: error[0],
code: error[1],
info: error[2])
2024-06-04 15:05:13 +00:00
proc putTuvFn(db: RdbBackendRef): PutTuvFn =
result =
2024-06-04 15:05:13 +00:00
proc(hdl: PutHdlRef; vs: VertexID) =
let hdl = hdl.getSession db
if hdl.error.isNil:
2024-06-04 15:05:13 +00:00
if vs.isValid:
db.rdb.putAdm(AdmTabIdTuv, vs.blobify.data()).isOkOr:
2024-06-04 15:05:13 +00:00
hdl.error = TypedPutHdlErrRef(
pfx: AdmPfx,
aid: AdmTabIdTuv,
code: error[1],
info: error[2])
return
proc putLstFn(db: RdbBackendRef): PutLstFn =
result =
proc(hdl: PutHdlRef; lst: SavedState) =
let hdl = hdl.getSession db
if hdl.error.isNil:
let data = lst.blobify.valueOr:
hdl.error = TypedPutHdlErrRef(
pfx: AdmPfx,
aid: AdmTabIdLst,
code: error)
return
db.rdb.putAdm(AdmTabIdLst, data).isOkOr:
hdl.error = TypedPutHdlErrRef(
pfx: AdmPfx,
aid: AdmTabIdLst,
code: error[1],
info: error[2])
proc putEndFn(db: RdbBackendRef): PutEndFn =
result =
proc(hdl: PutHdlRef): Result[void,AristoError] =
let hdl = hdl.endSession db
if not hdl.error.isNil:
when extraTraceMessages:
case hdl.error.pfx:
of VtxPfx, KeyPfx: trace logTxt "putEndFn: vtx/key failed",
pfx=hdl.error.pfx, vid=hdl.error.vid, error=hdl.error.code
of AdmPfx: trace logTxt "putEndFn: admin failed",
pfx=AdmPfx, aid=hdl.error.aid.uint64, error=hdl.error.code
of Oops: trace logTxt "putEndFn: oops",
pfx=hdl.error.pfx, error=hdl.error.code
db.rdb.rollback()
return err(hdl.error.code)
# Commit session
db.rdb.commit().isOkOr:
when extraTraceMessages:
trace logTxt "putEndFn: failed", error=($error[0]), info=error[1]
return err(error[0])
ok()
proc closeFn(db: RdbBackendRef): CloseFn =
result =
proc(eradicate: bool) =
db.rdb.destroy(eradicate)
# ------------------------------------------------------------------------------
# Private functions: hosting interface changes
# ------------------------------------------------------------------------------
proc putBegHostingFn(db: RdbBackendRef): PutBegFn =
result =
proc(): Result[PutHdlRef,AristoError] =
db.rdb.begin()
if db.rdb.trgWriteEvent(db.rdb.session):
ok db.newSession()
else:
when extraTraceMessages:
trace logTxt "putBegFn: guest trigger aborted session"
db.rdb.rollback()
err(RdbGuestInstanceAborted)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc rocksDbBackend*(
path: string;
opts: DbOptions;
dbOpts: DbOptionsRef;
cfOpts: ColFamilyOptionsRef;
guestCFs: openArray[ColFamilyDescriptor];
): Result[(BackendRef, seq[ColFamilyReadWrite]),AristoError] =
let db = RdbBackendRef(
beKind: BackendRocksDB)
# Initialise RocksDB
let oCfs = block:
let rc = db.rdb.init(path, opts, dbOpts, cfOpts, guestCFs)
if rc.isErr:
when extraTraceMessages:
trace logTxt "constructor failed",
error=rc.error[0], info=rc.error[1]
return err(rc.error[0])
rc.value()
db.getVtxFn = getVtxFn db
db.getKeyFn = getKeyFn db
2024-06-04 15:05:13 +00:00
db.getTuvFn = getTuvFn db
db.getLstFn = getLstFn db
db.putBegFn = putBegFn db
db.putVtxFn = putVtxFn db
db.putKeyFn = putKeyFn db
2024-06-04 15:05:13 +00:00
db.putTuvFn = putTuvFn db
db.putLstFn = putLstFn db
db.putEndFn = putEndFn db
db.closeFn = closeFn db
ok((db, oCfs))
proc rocksDbSetEventTrigger*(
be: BackendRef;
hdl: RdbWriteEventCb;
): Result[void,AristoError] =
## Store event trigger. This also changes the backend type.
if hdl.isNil:
err(RdbBeWrTriggerNilFn)
else:
let db = RdbBackendRef(be)
db.rdb.trgWriteEvent = hdl
db.beKind = BackendRdbHosting
db.putBegFn = putBegHostingFn db
ok()
proc dup*(db: RdbBackendRef): RdbBackendRef =
## Duplicate descriptor shell as needed for API debugging
new result
init_common.init(result[], db[])
result.rdb = db.rdb
# ------------------------------------------------------------------------------
# Public iterators (needs direct backend access)
# ------------------------------------------------------------------------------
iterator walkVtx*(
be: RdbBackendRef;
): tuple[evid: RootedVertexID, vtx: VertexRef] =
## Variant of `walk()` iteration over the vertex sub-table.
for (rvid, vtx) in be.rdb.walkVtx:
yield (rvid, vtx)
iterator walkKey*(
be: RdbBackendRef;
): tuple[rvid: RootedVertexID, key: HashKey] =
## Variant of `walk()` iteration over the Markle hash sub-table.
for (rvid, data) in be.rdb.walkKey:
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
let lid = HashKey.fromBytes(data).valueOr:
continue
yield (rvid, lid)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------