mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-24 09:48:24 +00:00
Triggered write event for kvt (#2351)
* bump rockdb * Rename `KVT` objects related to filters according to `Aristo` naming details: filter* => delta* roFilter => balancer * Compulsory error handling if `persistent()` fails * Add return code to `reCentre()` why: Might eventually fail if re-centring is blocked. Some logic will be added in subsequent patch sets. * Add column families from earlier session to rocksdb in opening procedure why: All previously used CFs must be declared when re-opening an existing database. * Update `init()` and add rocksdb `reinit()` methods for changing parameters why: Opening a set column families (with different open options) must span at least the ones that are already on disk. * Provide write-trigger-event interface into `Aristo` backend why: This allows to save data from a guest application (think `KVT`) to get synced with the write cycle so the guest and `Aristo` save all atomically. * Use `KVT` with new column family interface from `Aristo` * Remove obsolete guest interface * Implement `KVT` piggyback on `Aristo` backend * CoreDb: Add separate `KVT`/`Aristo` backend mode for debugging * Remove `rocks_db` import from `persist()` function why: Some systems (i.p `fluffy` and friends) use the `Aristo` memory backend emulation and do not link against rocksdb when building the application. So this should fix that problem.
This commit is contained in:
parent
23027baf30
commit
5a5cc6295e
@ -140,7 +140,8 @@ proc persistBlocksImpl(
|
|||||||
dbTx.commit()
|
dbTx.commit()
|
||||||
|
|
||||||
# Save and record the block number before the last saved block state.
|
# Save and record the block number before the last saved block state.
|
||||||
c.db.persistent(toBlock)
|
c.db.persistent(toBlock).isOkOr:
|
||||||
|
return err("Failed to save state: " & $$error)
|
||||||
|
|
||||||
if c.com.pruneHistory:
|
if c.com.pruneHistory:
|
||||||
# There is a feature for test systems to regularly clean up older blocks
|
# There is a feature for test systems to regularly clean up older blocks
|
||||||
|
@ -300,7 +300,8 @@ type
|
|||||||
|
|
||||||
AristoApiReCentreFn* =
|
AristoApiReCentreFn* =
|
||||||
proc(db: AristoDbRef;
|
proc(db: AristoDbRef;
|
||||||
) {.noRaise.}
|
): Result[void,AristoError]
|
||||||
|
{.noRaise.}
|
||||||
## Re-focus the `db` argument descriptor so that it becomes the centre.
|
## Re-focus the `db` argument descriptor so that it becomes the centre.
|
||||||
## Nothing is done if the `db` descriptor is the centre, already.
|
## Nothing is done if the `db` descriptor is the centre, already.
|
||||||
##
|
##
|
||||||
@ -494,7 +495,7 @@ proc dup(be: BackendRef): BackendRef =
|
|||||||
of BackendMemory:
|
of BackendMemory:
|
||||||
return MemBackendRef(be).dup
|
return MemBackendRef(be).dup
|
||||||
|
|
||||||
of BackendRocksDB:
|
of BackendRocksDB, BackendRdbHosting:
|
||||||
when AristoPersistentBackendOk:
|
when AristoPersistentBackendOk:
|
||||||
return RdbBackendRef(be).dup
|
return RdbBackendRef(be).dup
|
||||||
|
|
||||||
@ -705,9 +706,9 @@ func init*(
|
|||||||
result = api.persist(a, b, c)
|
result = api.persist(a, b, c)
|
||||||
|
|
||||||
profApi.reCentre =
|
profApi.reCentre =
|
||||||
proc(a: AristoDbRef) =
|
proc(a: AristoDbRef): auto =
|
||||||
AristoApiProfReCentreFn.profileRunner:
|
AristoApiProfReCentreFn.profileRunner:
|
||||||
api.reCentre(a)
|
result = api.reCentre(a)
|
||||||
|
|
||||||
profApi.rollback =
|
profApi.rollback =
|
||||||
proc(a: AristoTxRef): auto =
|
proc(a: AristoTxRef): auto =
|
||||||
|
@ -74,7 +74,7 @@ proc checkBE*(
|
|||||||
case db.backend.kind:
|
case db.backend.kind:
|
||||||
of BackendMemory:
|
of BackendMemory:
|
||||||
return MemBackendRef.checkBE(db, cache=cache, relax=relax)
|
return MemBackendRef.checkBE(db, cache=cache, relax=relax)
|
||||||
of BackendRocksDB:
|
of BackendRocksDB, BackendRdbHosting:
|
||||||
return RdbBackendRef.checkBE(db, cache=cache, relax=relax)
|
return RdbBackendRef.checkBE(db, cache=cache, relax=relax)
|
||||||
of BackendVoid:
|
of BackendVoid:
|
||||||
return VoidBackendRef.checkBE(db, cache=cache, relax=relax)
|
return VoidBackendRef.checkBE(db, cache=cache, relax=relax)
|
||||||
|
@ -733,7 +733,7 @@ proc pp*(
|
|||||||
case be.kind:
|
case be.kind:
|
||||||
of BackendMemory:
|
of BackendMemory:
|
||||||
result &= be.MemBackendRef.ppBe(db, limit, indent+1)
|
result &= be.MemBackendRef.ppBe(db, limit, indent+1)
|
||||||
of BackendRocksDB:
|
of BackendRocksDB, BackendRdbHosting:
|
||||||
result &= be.RdbBackendRef.ppBe(db, limit, indent+1)
|
result &= be.RdbBackendRef.ppBe(db, limit, indent+1)
|
||||||
of BackendVoid:
|
of BackendVoid:
|
||||||
result &= "<NoBackend>"
|
result &= "<NoBackend>"
|
||||||
|
@ -60,9 +60,9 @@ proc deltaPersistent*(
|
|||||||
if db != parent:
|
if db != parent:
|
||||||
if not reCentreOk:
|
if not reCentreOk:
|
||||||
return err(FilBackendRoMode)
|
return err(FilBackendRoMode)
|
||||||
db.reCentre
|
? db.reCentre()
|
||||||
# Always re-centre to `parent` (in case `reCentreOk` was set)
|
# Always re-centre to `parent` (in case `reCentreOk` was set)
|
||||||
defer: parent.reCentre
|
defer: discard parent.reCentre()
|
||||||
|
|
||||||
# Initialise peer filter balancer.
|
# Initialise peer filter balancer.
|
||||||
let updateSiblings = ? UpdateSiblingsRef.init db
|
let updateSiblings = ? UpdateSiblingsRef.init db
|
||||||
@ -74,7 +74,7 @@ proc deltaPersistent*(
|
|||||||
serial: nxtFid)
|
serial: nxtFid)
|
||||||
|
|
||||||
# Store structural single trie entries
|
# Store structural single trie entries
|
||||||
let writeBatch = be.putBegFn()
|
let writeBatch = ? be.putBegFn()
|
||||||
be.putVtxFn(writeBatch, db.balancer.sTab.pairs.toSeq)
|
be.putVtxFn(writeBatch, db.balancer.sTab.pairs.toSeq)
|
||||||
be.putKeyFn(writeBatch, db.balancer.kMap.pairs.toSeq)
|
be.putKeyFn(writeBatch, db.balancer.kMap.pairs.toSeq)
|
||||||
be.putTuvFn(writeBatch, db.balancer.vTop)
|
be.putTuvFn(writeBatch, db.balancer.vTop)
|
||||||
|
@ -58,8 +58,7 @@ type
|
|||||||
centre: AristoDbRef ## Link to peer with write permission
|
centre: AristoDbRef ## Link to peer with write permission
|
||||||
peers: HashSet[AristoDbRef] ## List of all peers
|
peers: HashSet[AristoDbRef] ## List of all peers
|
||||||
|
|
||||||
AristoDbRef* = ref AristoDbObj
|
AristoDbRef* = ref object
|
||||||
AristoDbObj* = object
|
|
||||||
## Three tier database object supporting distributed instances.
|
## Three tier database object supporting distributed instances.
|
||||||
top*: LayerRef ## Database working layer, mutable
|
top*: LayerRef ## Database working layer, mutable
|
||||||
stack*: seq[LayerRef] ## Stashed immutable parent layers
|
stack*: seq[LayerRef] ## Stashed immutable parent layers
|
||||||
@ -150,7 +149,7 @@ func getCentre*(db: AristoDbRef): AristoDbRef =
|
|||||||
##
|
##
|
||||||
if db.dudes.isNil: db else: db.dudes.centre
|
if db.dudes.isNil: db else: db.dudes.centre
|
||||||
|
|
||||||
proc reCentre*(db: AristoDbRef) =
|
proc reCentre*(db: AristoDbRef): Result[void,AristoError] =
|
||||||
## Re-focus the `db` argument descriptor so that it becomes the centre.
|
## Re-focus the `db` argument descriptor so that it becomes the centre.
|
||||||
## Nothing is done if the `db` descriptor is the centre, already.
|
## Nothing is done if the `db` descriptor is the centre, already.
|
||||||
##
|
##
|
||||||
@ -166,6 +165,7 @@ proc reCentre*(db: AristoDbRef) =
|
|||||||
##
|
##
|
||||||
if not db.dudes.isNil:
|
if not db.dudes.isNil:
|
||||||
db.dudes.centre = db
|
db.dudes.centre = db
|
||||||
|
ok()
|
||||||
|
|
||||||
proc fork*(
|
proc fork*(
|
||||||
db: AristoDbRef;
|
db: AristoDbRef;
|
||||||
|
@ -48,7 +48,7 @@ type
|
|||||||
## by any library function using the backend.
|
## by any library function using the backend.
|
||||||
|
|
||||||
PutBegFn* =
|
PutBegFn* =
|
||||||
proc(): PutHdlRef {.gcsafe, raises: [].}
|
proc(): Result[PutHdlRef,AristoError] {.gcsafe, raises: [].}
|
||||||
## Generic transaction initialisation function
|
## Generic transaction initialisation function
|
||||||
|
|
||||||
PutVtxFn* =
|
PutVtxFn* =
|
||||||
@ -81,20 +81,6 @@ type
|
|||||||
|
|
||||||
# -------------
|
# -------------
|
||||||
|
|
||||||
GuestDbFn* =
|
|
||||||
proc(instance: int): Result[RootRef,AristoError] {.gcsafe, raises: [].}
|
|
||||||
## Generic function that returns a compartmentalised database handle that
|
|
||||||
## can be used by another application. If non-nil, this handle allows to
|
|
||||||
## use a previously allocated database. It is separated from the `Aristo`
|
|
||||||
## columns.
|
|
||||||
##
|
|
||||||
## A successful return value might be `nil` if this feature is
|
|
||||||
## unsupported.
|
|
||||||
##
|
|
||||||
## Caveat:
|
|
||||||
## The guest database is closed automatically when closing the `Aristo`
|
|
||||||
## database.
|
|
||||||
|
|
||||||
CloseFn* =
|
CloseFn* =
|
||||||
proc(flush: bool) {.gcsafe, raises: [].}
|
proc(flush: bool) {.gcsafe, raises: [].}
|
||||||
## Generic destructor for the `Aristo DB` backend. The argument `flush`
|
## Generic destructor for the `Aristo DB` backend. The argument `flush`
|
||||||
@ -119,7 +105,6 @@ type
|
|||||||
putLstFn*: PutLstFn ## Store saved state
|
putLstFn*: PutLstFn ## Store saved state
|
||||||
putEndFn*: PutEndFn ## Commit bulk store session
|
putEndFn*: PutEndFn ## Commit bulk store session
|
||||||
|
|
||||||
guestDbFn*: GuestDbFn ## Piggyback DB for another application
|
|
||||||
closeFn*: CloseFn ## Generic destructor
|
closeFn*: CloseFn ## Generic destructor
|
||||||
|
|
||||||
proc init*(trg: var BackendObj; src: BackendObj) =
|
proc init*(trg: var BackendObj; src: BackendObj) =
|
||||||
@ -135,7 +120,6 @@ proc init*(trg: var BackendObj; src: BackendObj) =
|
|||||||
trg.putLstFn = src.putLstFn
|
trg.putLstFn = src.putLstFn
|
||||||
trg.putEndFn = src.putEndFn
|
trg.putEndFn = src.putEndFn
|
||||||
|
|
||||||
trg.guestDbFn = src.guestDbFn
|
|
||||||
trg.closeFn = src.closeFn
|
trg.closeFn = src.closeFn
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -249,9 +249,14 @@ type
|
|||||||
RdbBeDriverGetVtxError
|
RdbBeDriverGetVtxError
|
||||||
RdbBeDriverGuestError
|
RdbBeDriverGuestError
|
||||||
RdbBeDriverPutAdmError
|
RdbBeDriverPutAdmError
|
||||||
RdbBeDriverPutVtxError
|
|
||||||
RdbBeDriverPutKeyError
|
RdbBeDriverPutKeyError
|
||||||
|
RdbBeDriverPutVtxError
|
||||||
RdbBeDriverWriteError
|
RdbBeDriverWriteError
|
||||||
|
RdbBeTypeUnsupported
|
||||||
|
RdbBeWrSessionUnfinished
|
||||||
|
RdbBeWrTriggerActiveAlready
|
||||||
|
RdbBeWrTriggerNilFn
|
||||||
|
RdbGuestInstanceAborted
|
||||||
RdbGuestInstanceUnsupported
|
RdbGuestInstanceUnsupported
|
||||||
RdbHashKeyExpected
|
RdbHashKeyExpected
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ type
|
|||||||
BackendVoid = 0 ## For providing backend-less constructor
|
BackendVoid = 0 ## For providing backend-less constructor
|
||||||
BackendMemory
|
BackendMemory
|
||||||
BackendRocksDB
|
BackendRocksDB
|
||||||
|
BackendRdbHosting ## Allowed piggybacking write session
|
||||||
|
|
||||||
StorageType* = enum
|
StorageType* = enum
|
||||||
## Storage types, key prefix
|
## Storage types, key prefix
|
||||||
|
@ -126,8 +126,8 @@ proc getLstFn(db: MemBackendRef): GetLstFn =
|
|||||||
|
|
||||||
proc putBegFn(db: MemBackendRef): PutBegFn =
|
proc putBegFn(db: MemBackendRef): PutBegFn =
|
||||||
result =
|
result =
|
||||||
proc(): PutHdlRef =
|
proc(): Result[PutHdlRef,AristoError] =
|
||||||
db.newSession()
|
ok db.newSession()
|
||||||
|
|
||||||
|
|
||||||
proc putVtxFn(db: MemBackendRef): PutVtxFn =
|
proc putVtxFn(db: MemBackendRef): PutVtxFn =
|
||||||
@ -215,11 +215,6 @@ proc putEndFn(db: MemBackendRef): PutEndFn =
|
|||||||
|
|
||||||
# -------------
|
# -------------
|
||||||
|
|
||||||
proc guestDbFn(db: MemBackendRef): GuestDbFn =
|
|
||||||
result =
|
|
||||||
proc(instance: int): Result[RootRef,AristoError] =
|
|
||||||
ok(RootRef nil)
|
|
||||||
|
|
||||||
proc closeFn(db: MemBackendRef): CloseFn =
|
proc closeFn(db: MemBackendRef): CloseFn =
|
||||||
result =
|
result =
|
||||||
proc(ignore: bool) =
|
proc(ignore: bool) =
|
||||||
@ -246,7 +241,6 @@ proc memoryBackend*(): BackendRef =
|
|||||||
db.putLstFn = putLstFn db
|
db.putLstFn = putLstFn db
|
||||||
db.putEndFn = putEndFn db
|
db.putEndFn = putEndFn db
|
||||||
|
|
||||||
db.guestDbFn = guestDbFn db
|
|
||||||
db.closeFn = closeFn db
|
db.closeFn = closeFn db
|
||||||
db
|
db
|
||||||
|
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
results,
|
|
||||||
../aristo_desc,
|
../aristo_desc,
|
||||||
../aristo_desc/desc_backend,
|
../aristo_desc/desc_backend,
|
||||||
"."/[init_common, memory_db]
|
"."/[init_common, memory_db]
|
||||||
@ -85,14 +84,6 @@ proc init*(
|
|||||||
## Shortcut for `AristoDbRef.init(VoidBackendRef)`
|
## Shortcut for `AristoDbRef.init(VoidBackendRef)`
|
||||||
AristoDbRef.init VoidBackendRef
|
AristoDbRef.init VoidBackendRef
|
||||||
|
|
||||||
proc guestDb*(db: AristoDbRef; instance = 0): Result[GuestDbRef,AristoError] =
|
|
||||||
## Database pigiback feature
|
|
||||||
if db.backend.isNil:
|
|
||||||
ok(GuestDbRef(nil))
|
|
||||||
else:
|
|
||||||
let gdb = db.backend.guestDbFn(instance).valueOr:
|
|
||||||
return err(error)
|
|
||||||
ok(gdb.GuestDbRef)
|
|
||||||
|
|
||||||
proc finish*(db: AristoDbRef; flush = false) =
|
proc finish*(db: AristoDbRef; flush = false) =
|
||||||
## Backend destructor. The argument `flush` indicates that a full database
|
## Backend destructor. The argument `flush` indicates that a full database
|
||||||
|
@ -21,13 +21,15 @@
|
|||||||
import
|
import
|
||||||
results,
|
results,
|
||||||
rocksdb,
|
rocksdb,
|
||||||
|
../../opts,
|
||||||
../aristo_desc,
|
../aristo_desc,
|
||||||
./rocks_db/rdb_desc,
|
./rocks_db/rdb_desc,
|
||||||
"."/[rocks_db, memory_only],
|
"."/[rocks_db, memory_only]
|
||||||
../../opts
|
|
||||||
|
|
||||||
export
|
export
|
||||||
|
AristoDbRef,
|
||||||
RdbBackendRef,
|
RdbBackendRef,
|
||||||
|
RdbWriteEventCb,
|
||||||
memory_only
|
memory_only
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -56,26 +58,68 @@ proc newAristoRdbDbRef(
|
|||||||
# Public database constuctors, destructor
|
# Public database constuctors, destructor
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc init*[W: RdbBackendRef](
|
proc init*(
|
||||||
T: type AristoDbRef;
|
T: type AristoDbRef;
|
||||||
B: type W;
|
B: type RdbBackendRef;
|
||||||
basePath: string;
|
basePath: string;
|
||||||
opts: DbOptions
|
opts: DbOptions
|
||||||
): Result[T, AristoError] =
|
): Result[T, AristoError] =
|
||||||
## Generic constructor, `basePath` argument is ignored for memory backend
|
## Generic constructor, `basePath` argument is ignored for memory backend
|
||||||
## databases (which also unconditionally succeed initialising.)
|
## databases (which also unconditionally succeed initialising.)
|
||||||
##
|
##
|
||||||
when B is RdbBackendRef:
|
|
||||||
basePath.newAristoRdbDbRef opts
|
basePath.newAristoRdbDbRef opts
|
||||||
|
|
||||||
proc getRocksDbFamily*(
|
proc reinit*(
|
||||||
gdb: GuestDbRef;
|
db: AristoDbRef;
|
||||||
instance = 0;
|
cfs: openArray[ColFamilyDescriptor];
|
||||||
): Result[ColFamilyReadWrite,void] =
|
): Result[seq[ColFamilyReadWrite],AristoError] =
|
||||||
## Database pigiback feature
|
## Re-initialise the `RocksDb` backend database with additional or changed
|
||||||
if not gdb.isNil and gdb.beKind == BackendRocksDB:
|
## column family settings. This can be used to make space for guest use of
|
||||||
return ok RdbGuestDbRef(gdb).guestDb
|
## the backend used by `Aristo`. The function returns a list of column family
|
||||||
err()
|
## descriptors in the same order as the `cfs` argument.
|
||||||
|
##
|
||||||
|
## The argument `cfs` list replaces and extends the CFs already on disk by
|
||||||
|
## its options except for the ones defined for use with `Aristo`.
|
||||||
|
##
|
||||||
|
## Even though tx layers and filters might not be affected by this function,
|
||||||
|
## it is prudent to have them clean and saved on the backend database before
|
||||||
|
## changing it. On error conditions, data might get lost.
|
||||||
|
##
|
||||||
|
case db.backend.kind:
|
||||||
|
of BackendRocksDB:
|
||||||
|
db.backend.rocksDbUpdateCfs cfs
|
||||||
|
of BackendRdbHosting:
|
||||||
|
err(RdbBeWrTriggerActiveAlready)
|
||||||
|
else:
|
||||||
|
return err(RdbBeTypeUnsupported)
|
||||||
|
|
||||||
|
proc activateWrTrigger*(
|
||||||
|
db: AristoDbRef;
|
||||||
|
hdl: RdbWriteEventCb;
|
||||||
|
): Result[void,AristoError] =
|
||||||
|
## This function allows to link an application to the `Aristo` storage event
|
||||||
|
## for the `RocksDb` backend via call back argument function `hdl`.
|
||||||
|
##
|
||||||
|
## The argument handler `hdl` of type
|
||||||
|
## ::
|
||||||
|
## proc(session: WriteBatchRef): bool
|
||||||
|
##
|
||||||
|
## will be invoked when a write batch for the `Aristo` database is opened in
|
||||||
|
## order to save current changes to the backend. The `session` argument passed
|
||||||
|
## to the handler in conjunction with a list of `ColFamilyReadWrite` items
|
||||||
|
## (as returned from `reinit()`) might be used to store additional items
|
||||||
|
## to the database with the same write batch.
|
||||||
|
##
|
||||||
|
## If the handler returns `true` upon return from running, the write batch
|
||||||
|
## will proceed saving. Otherwise it is aborted and no data are saved at all.
|
||||||
|
##
|
||||||
|
case db.backend.kind:
|
||||||
|
of BackendRocksDB:
|
||||||
|
db.backend.rocksDbSetEventTrigger hdl
|
||||||
|
of BackendRdbHosting:
|
||||||
|
err(RdbBeWrTriggerActiveAlready)
|
||||||
|
else:
|
||||||
|
err(RdbBeTypeUnsupported)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -137,9 +137,9 @@ proc getLstFn(db: RdbBackendRef): GetLstFn =
|
|||||||
|
|
||||||
proc putBegFn(db: RdbBackendRef): PutBegFn =
|
proc putBegFn(db: RdbBackendRef): PutBegFn =
|
||||||
result =
|
result =
|
||||||
proc(): PutHdlRef =
|
proc(): Result[PutHdlRef,AristoError] =
|
||||||
db.rdb.begin()
|
db.rdb.begin()
|
||||||
db.newSession()
|
ok db.newSession()
|
||||||
|
|
||||||
proc putVtxFn(db: RdbBackendRef): PutVtxFn =
|
proc putVtxFn(db: RdbBackendRef): PutVtxFn =
|
||||||
result =
|
result =
|
||||||
@ -212,7 +212,7 @@ proc putEndFn(db: RdbBackendRef): PutEndFn =
|
|||||||
of AdmPfx: trace logTxt "putEndFn: admin failed",
|
of AdmPfx: trace logTxt "putEndFn: admin failed",
|
||||||
pfx=AdmPfx, aid=hdl.error.aid.uint64, error=hdl.error.code
|
pfx=AdmPfx, aid=hdl.error.aid.uint64, error=hdl.error.code
|
||||||
of Oops: trace logTxt "putEndFn: oops",
|
of Oops: trace logTxt "putEndFn: oops",
|
||||||
error=hdl.error.code
|
pfx=hdl.error.pfx, error=hdl.error.code
|
||||||
db.rdb.rollback()
|
db.rdb.rollback()
|
||||||
return err(hdl.error.code)
|
return err(hdl.error.code)
|
||||||
|
|
||||||
@ -223,20 +223,27 @@ proc putEndFn(db: RdbBackendRef): PutEndFn =
|
|||||||
return err(error[0])
|
return err(error[0])
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc guestDbFn(db: RdbBackendRef): GuestDbFn =
|
|
||||||
result =
|
|
||||||
proc(instance: int): Result[RootRef,AristoError] =
|
|
||||||
let gdb = db.rdb.initGuestDb(instance).valueOr:
|
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt "guestDbFn", error=error[0], info=error[1]
|
|
||||||
return err(error[0])
|
|
||||||
ok gdb
|
|
||||||
|
|
||||||
proc closeFn(db: RdbBackendRef): CloseFn =
|
proc closeFn(db: RdbBackendRef): CloseFn =
|
||||||
result =
|
result =
|
||||||
proc(flush: bool) =
|
proc(flush: bool) =
|
||||||
db.rdb.destroy(flush)
|
db.rdb.destroy(flush)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private functions: hosting interface changes
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc putBegHostingFn(db: RdbBackendRef): PutBegFn =
|
||||||
|
result =
|
||||||
|
proc(): Result[PutHdlRef,AristoError] =
|
||||||
|
db.rdb.begin()
|
||||||
|
if db.rdb.trgWriteEvent(db.rdb.session):
|
||||||
|
ok db.newSession()
|
||||||
|
else:
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "putBegFn: guest trigger aborted session"
|
||||||
|
db.rdb.rollback()
|
||||||
|
err(RdbGuestInstanceAborted)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -269,10 +276,37 @@ proc rocksDbBackend*(
|
|||||||
db.putLstFn = putLstFn db
|
db.putLstFn = putLstFn db
|
||||||
db.putEndFn = putEndFn db
|
db.putEndFn = putEndFn db
|
||||||
|
|
||||||
db.guestDbFn = guestDbFn db
|
|
||||||
db.closeFn = closeFn db
|
db.closeFn = closeFn db
|
||||||
ok db
|
ok db
|
||||||
|
|
||||||
|
|
||||||
|
proc rocksDbUpdateCfs*(
|
||||||
|
be: BackendRef;
|
||||||
|
cfs: openArray[ColFamilyDescriptor];
|
||||||
|
): Result[seq[ColFamilyReadWrite],AristoError] =
|
||||||
|
## Reopen with extended column families given as argument.
|
||||||
|
let
|
||||||
|
db = RdbBackendRef(be)
|
||||||
|
rCfs = db.rdb.reinit(cfs).valueOr:
|
||||||
|
return err(error[0])
|
||||||
|
ok rCfs
|
||||||
|
|
||||||
|
|
||||||
|
proc rocksDbSetEventTrigger*(
|
||||||
|
be: BackendRef;
|
||||||
|
hdl: RdbWriteEventCb;
|
||||||
|
): Result[void,AristoError] =
|
||||||
|
## Store event trigger. This also changes the backend type.
|
||||||
|
if hdl.isNil:
|
||||||
|
err(RdbBeWrTriggerNilFn)
|
||||||
|
else:
|
||||||
|
let db = RdbBackendRef(be)
|
||||||
|
db.rdb.trgWriteEvent = hdl
|
||||||
|
db.beKind = BackendRdbHosting
|
||||||
|
db.putBegFn = putBegHostingFn db
|
||||||
|
ok()
|
||||||
|
|
||||||
|
|
||||||
proc dup*(db: RdbBackendRef): RdbBackendRef =
|
proc dup*(db: RdbBackendRef): RdbBackendRef =
|
||||||
## Duplicate descriptor shell as needed for API debugging
|
## Duplicate descriptor shell as needed for API debugging
|
||||||
new result
|
new result
|
||||||
|
@ -18,10 +18,22 @@ import
|
|||||||
eth/common,
|
eth/common,
|
||||||
rocksdb,
|
rocksdb,
|
||||||
stew/[endians2, keyed_queue],
|
stew/[endians2, keyed_queue],
|
||||||
|
../../../opts,
|
||||||
../../aristo_desc,
|
../../aristo_desc,
|
||||||
../init_common
|
../init_common
|
||||||
|
|
||||||
type
|
type
|
||||||
|
RdbWriteEventCb* =
|
||||||
|
proc(session: WriteBatchRef): bool {.gcsafe, raises: [].}
|
||||||
|
## Call back closure function that passes the the write session handle
|
||||||
|
## to a guest peer right after it was opened. The guest may store any
|
||||||
|
## data on its own column family and return `true` if that worked
|
||||||
|
## all right. Then the `Aristo` handler will stor its own columns and
|
||||||
|
## finalise the write session.
|
||||||
|
##
|
||||||
|
## In case of an error when `false` is returned, `Aristo` will abort the
|
||||||
|
## write session and return a session error.
|
||||||
|
|
||||||
RdbInst* = object
|
RdbInst* = object
|
||||||
admCol*: ColFamilyReadWrite ## Admin column family handler
|
admCol*: ColFamilyReadWrite ## Admin column family handler
|
||||||
vtxCol*: ColFamilyReadWrite ## Vertex column family handler
|
vtxCol*: ColFamilyReadWrite ## Vertex column family handler
|
||||||
@ -29,26 +41,18 @@ type
|
|||||||
session*: WriteBatchRef ## For batched `put()`
|
session*: WriteBatchRef ## For batched `put()`
|
||||||
rdKeyLru*: KeyedQueue[VertexID,HashKey] ## Read cache
|
rdKeyLru*: KeyedQueue[VertexID,HashKey] ## Read cache
|
||||||
rdVtxLru*: KeyedQueue[VertexID,VertexRef] ## Read cache
|
rdVtxLru*: KeyedQueue[VertexID,VertexRef] ## Read cache
|
||||||
|
|
||||||
basePath*: string ## Database directory
|
basePath*: string ## Database directory
|
||||||
noFq*: bool ## No filter queues available
|
opts*: DbOptions ## Just a copy here for re-opening
|
||||||
|
trgWriteEvent*: RdbWriteEventCb ## Database piggiback call back handler
|
||||||
|
|
||||||
# Alien interface
|
AristoCFs* = enum
|
||||||
RdbGuest* = enum
|
## Column family symbols/handles and names used on the database
|
||||||
## The guest CF was worth a try, but there are better solutions and this
|
AdmCF = "AriAdm" ## Admin column family name
|
||||||
## item will be removed in future.
|
VtxCF = "AriVtx" ## Vertex column family name
|
||||||
GuestFamily0 = "Guest0" ## Guest family (e.g. for Kvt)
|
KeyCF = "AriKey" ## Hash key column family name
|
||||||
GuestFamily1 = "Guest1" ## Ditto
|
|
||||||
GuestFamily2 = "Guest2" ## Ditto
|
|
||||||
|
|
||||||
RdbGuestDbRef* = ref object of GuestDbRef
|
|
||||||
## The guest CF was worth a try, but there are better solutions and this
|
|
||||||
## item will be removed in future.
|
|
||||||
guestDb*: ColFamilyReadWrite ## Pigiback feature references
|
|
||||||
|
|
||||||
const
|
const
|
||||||
AdmCF* = "AdmAri" ## Admin column family name
|
|
||||||
VtxCF* = "VtxAri" ## Vertex column family name
|
|
||||||
KeyCF* = "KeyAri" ## Hash key column family name
|
|
||||||
BaseFolder* = "nimbus" ## Same as for Legacy DB
|
BaseFolder* = "nimbus" ## Same as for Legacy DB
|
||||||
DataFolder* = "aristo" ## Legacy DB has "data"
|
DataFolder* = "aristo" ## Legacy DB has "data"
|
||||||
RdKeyLruMaxSize* = 4096 ## Max size of read cache for keys
|
RdKeyLruMaxSize* = 4096 ## Max size of read cache for keys
|
||||||
|
@ -14,7 +14,7 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[sequtils, os],
|
std/[sets, sequtils, os],
|
||||||
rocksdb,
|
rocksdb,
|
||||||
results,
|
results,
|
||||||
../../aristo_desc,
|
../../aristo_desc,
|
||||||
@ -22,27 +22,12 @@ import
|
|||||||
../../../opts
|
../../../opts
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public constructor
|
# Private constructor
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc init*(
|
proc getInitOptions(
|
||||||
rdb: var RdbInst;
|
|
||||||
basePath: string;
|
|
||||||
opts: DbOptions;
|
opts: DbOptions;
|
||||||
): Result[void,(AristoError,string)] =
|
): tuple[cfOpts: ColFamilyOptionsRef, dbOpts: DbOptionsRef] =
|
||||||
## Constructor c ode inspired by `RocksStoreRef.init()` from
|
|
||||||
## kvstore_rocksdb.nim
|
|
||||||
const initFailed = "RocksDB/init() failed"
|
|
||||||
|
|
||||||
rdb.basePath = basePath
|
|
||||||
|
|
||||||
let
|
|
||||||
dataDir = rdb.dataDir
|
|
||||||
try:
|
|
||||||
dataDir.createDir
|
|
||||||
except OSError, IOError:
|
|
||||||
return err((RdbBeCantCreateDataDir, ""))
|
|
||||||
|
|
||||||
# TODO the configuration options below have not been tuned but are rather
|
# TODO the configuration options below have not been tuned but are rather
|
||||||
# based on gut feeling, guesses and by looking at other clients - it
|
# based on gut feeling, guesses and by looking at other clients - it
|
||||||
# would make sense to test different settings and combinations once the
|
# would make sense to test different settings and combinations once the
|
||||||
@ -86,13 +71,7 @@ proc init*(
|
|||||||
# https://github.com/facebook/rocksdb/wiki/Compression
|
# https://github.com/facebook/rocksdb/wiki/Compression
|
||||||
cfOpts.setBottommostCompression(Compression.lz4Compression)
|
cfOpts.setBottommostCompression(Compression.lz4Compression)
|
||||||
|
|
||||||
let
|
let dbOpts = defaultDbOptions()
|
||||||
cfs = @[initColFamilyDescriptor(AdmCF, cfOpts),
|
|
||||||
initColFamilyDescriptor(VtxCF, cfOpts),
|
|
||||||
initColFamilyDescriptor(KeyCF, cfOpts)] &
|
|
||||||
RdbGuest.mapIt(initColFamilyDescriptor($it, cfOpts))
|
|
||||||
dbOpts = defaultDbOptions()
|
|
||||||
|
|
||||||
dbOpts.setMaxOpenFiles(opts.maxOpenFiles)
|
dbOpts.setMaxOpenFiles(opts.maxOpenFiles)
|
||||||
dbOpts.setMaxBytesForLevelBase(opts.writeBufferSize)
|
dbOpts.setMaxBytesForLevelBase(opts.writeBufferSize)
|
||||||
|
|
||||||
@ -110,6 +89,7 @@ proc init*(
|
|||||||
# https://github.com/facebook/rocksdb/blob/af50823069818fc127438e39fef91d2486d6e76c/include/rocksdb/advanced_options.h#L696
|
# https://github.com/facebook/rocksdb/blob/af50823069818fc127438e39fef91d2486d6e76c/include/rocksdb/advanced_options.h#L696
|
||||||
dbOpts.setOptimizeFiltersForHits(true)
|
dbOpts.setOptimizeFiltersForHits(true)
|
||||||
|
|
||||||
|
|
||||||
let tableOpts = defaultTableOptions()
|
let tableOpts = defaultTableOptions()
|
||||||
# This bloom filter helps avoid having to read multiple SST files when looking
|
# This bloom filter helps avoid having to read multiple SST files when looking
|
||||||
# for a value.
|
# for a value.
|
||||||
@ -141,39 +121,114 @@ proc init*(
|
|||||||
|
|
||||||
dbOpts.setBlockBasedTableFactory(tableOpts)
|
dbOpts.setBlockBasedTableFactory(tableOpts)
|
||||||
|
|
||||||
# Reserve a family corner for `Aristo` on the database
|
(cfOpts,dbOpts)
|
||||||
|
|
||||||
|
|
||||||
|
proc initImpl(
|
||||||
|
rdb: var RdbInst;
|
||||||
|
basePath: string;
|
||||||
|
opts: DbOptions;
|
||||||
|
guestCFs: openArray[ColFamilyDescriptor] = [];
|
||||||
|
): Result[void,(AristoError,string)] =
|
||||||
|
## Database backend constructor
|
||||||
|
const initFailed = "RocksDB/init() failed"
|
||||||
|
|
||||||
|
rdb.basePath = basePath
|
||||||
|
rdb.opts = opts
|
||||||
|
|
||||||
|
let
|
||||||
|
dataDir = rdb.dataDir
|
||||||
|
try:
|
||||||
|
dataDir.createDir
|
||||||
|
except OSError, IOError:
|
||||||
|
return err((RdbBeCantCreateDataDir, ""))
|
||||||
|
|
||||||
|
# Expand argument `opts` to rocksdb options
|
||||||
|
let (cfOpts, dbOpts) = opts.getInitOptions()
|
||||||
|
|
||||||
|
# Column familiy names to allocate when opening the database. This list
|
||||||
|
# might be extended below.
|
||||||
|
var useCFs = AristoCFs.mapIt($it).toHashSet
|
||||||
|
|
||||||
|
# The `guestCFs` list must not overwrite `AristoCFs` options
|
||||||
|
let guestCFs = guestCFs.filterIt(it.name notin useCFs)
|
||||||
|
|
||||||
|
# If the database exists already, check for missing column families and
|
||||||
|
# allocate them for opening. Otherwise rocksdb might reject the peristent
|
||||||
|
# database.
|
||||||
|
if (dataDir / "CURRENT").fileExists:
|
||||||
|
let hdCFs = dataDir.listColumnFamilies.valueOr:
|
||||||
|
raiseAssert initFailed & " cannot read existing CFs: " & error
|
||||||
|
# Update list of column families for opener.
|
||||||
|
useCFs = useCFs + hdCFs.toHashSet
|
||||||
|
|
||||||
|
# The `guestCFs` list might come with a different set of options. So it is
|
||||||
|
# temporarily removed from `useCFs` and will be re-added with appropriate
|
||||||
|
# options.
|
||||||
|
let guestCFq = @guestCFs
|
||||||
|
useCFs = useCFs - guestCFs.mapIt(it.name).toHashSet
|
||||||
|
|
||||||
|
# Finalise list of column families
|
||||||
|
let cfs = useCFs.toSeq.mapIt(it.initColFamilyDescriptor cfOpts) & guestCFq
|
||||||
|
|
||||||
|
# Open database for the extended family :)
|
||||||
let baseDb = openRocksDb(dataDir, dbOpts, columnFamilies=cfs).valueOr:
|
let baseDb = openRocksDb(dataDir, dbOpts, columnFamilies=cfs).valueOr:
|
||||||
raiseAssert initFailed & " cannot create base descriptor: " & error
|
raiseAssert initFailed & " cannot create base descriptor: " & error
|
||||||
|
|
||||||
# Initialise column handlers (this stores implicitely `baseDb`)
|
# Initialise column handlers (this stores implicitely `baseDb`)
|
||||||
rdb.admCol = baseDb.withColFamily(AdmCF).valueOr:
|
rdb.admCol = baseDb.withColFamily($AdmCF).valueOr:
|
||||||
raiseAssert initFailed & " cannot initialise AdmCF descriptor: " & error
|
raiseAssert initFailed & " cannot initialise AdmCF descriptor: " & error
|
||||||
rdb.vtxCol = baseDb.withColFamily(VtxCF).valueOr:
|
rdb.vtxCol = baseDb.withColFamily($VtxCF).valueOr:
|
||||||
raiseAssert initFailed & " cannot initialise VtxCF descriptor: " & error
|
raiseAssert initFailed & " cannot initialise VtxCF descriptor: " & error
|
||||||
rdb.keyCol = baseDb.withColFamily(KeyCF).valueOr:
|
rdb.keyCol = baseDb.withColFamily($KeyCF).valueOr:
|
||||||
raiseAssert initFailed & " cannot initialise KeyCF descriptor: " & error
|
raiseAssert initFailed & " cannot initialise KeyCF descriptor: " & error
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc initGuestDb*(
|
# ------------------------------------------------------------------------------
|
||||||
rdb: RdbInst;
|
# Public constructor
|
||||||
instance: int;
|
# ------------------------------------------------------------------------------
|
||||||
): Result[RootRef,(AristoError,string)] =
|
|
||||||
## Initialise `Guest` family
|
|
||||||
##
|
|
||||||
## Thus was a worth a try, but there are better solutions and this item
|
|
||||||
## will be removed in future.
|
|
||||||
##
|
|
||||||
if high(RdbGuest).ord < instance:
|
|
||||||
return err((RdbGuestInstanceUnsupported,""))
|
|
||||||
let
|
|
||||||
guestSym = $RdbGuest(instance)
|
|
||||||
guestDb = rdb.baseDb.withColFamily(guestSym).valueOr:
|
|
||||||
raiseAssert "RocksDb/initGuestDb() failed: " & error
|
|
||||||
|
|
||||||
ok RdbGuestDbRef(
|
proc init*(
|
||||||
beKind: BackendRocksDB,
|
rdb: var RdbInst;
|
||||||
guestDb: guestDb)
|
basePath: string;
|
||||||
|
opts: DbOptions;
|
||||||
|
): Result[void,(AristoError,string)] =
|
||||||
|
## Temporarily define a guest CF list here.
|
||||||
|
rdb.initImpl(basePath, opts)
|
||||||
|
|
||||||
|
proc reinit*(
|
||||||
|
rdb: var RdbInst;
|
||||||
|
cfs: openArray[ColFamilyDescriptor];
|
||||||
|
): Result[seq[ColFamilyReadWrite],(AristoError,string)] =
|
||||||
|
## Re-open database with changed parameters. Even though tx layers and
|
||||||
|
## filters might not be affected it is prudent to have them clean and
|
||||||
|
## saved on the backend database before changing it.
|
||||||
|
##
|
||||||
|
## The function returns a list of column family descriptors in the same
|
||||||
|
## order as the `cfs` argument.
|
||||||
|
##
|
||||||
|
## The `cfs` list replaces and extends the CFs already on disk by its
|
||||||
|
## options except for the ones defined with `AristoCFs`.
|
||||||
|
##
|
||||||
|
const initFailed = "RocksDB/reinit() failed"
|
||||||
|
|
||||||
|
if not rdb.session.isNil:
|
||||||
|
return err((RdbBeWrSessionUnfinished,""))
|
||||||
|
if not rdb.baseDb.isClosed():
|
||||||
|
rdb.baseDb.close()
|
||||||
|
|
||||||
|
rdb.initImpl(rdb.basePath, rdb.opts, cfs).isOkOr:
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Assemble list of column family descriptors
|
||||||
|
var guestCols = newSeq[ColFamilyReadWrite](cfs.len)
|
||||||
|
for n,col in cfs:
|
||||||
|
guestCols[n] = rdb.baseDb.withColFamily(col.name).valueOr:
|
||||||
|
raiseAssert initFailed & " cannot initialise " &
|
||||||
|
col.name & " descriptor: " & error
|
||||||
|
|
||||||
|
ok guestCols
|
||||||
|
|
||||||
|
|
||||||
proc destroy*(rdb: var RdbInst; flush: bool) =
|
proc destroy*(rdb: var RdbInst; flush: bool) =
|
||||||
|
@ -72,13 +72,13 @@ proc putAdm*(
|
|||||||
): Result[void,(AdminTabID,AristoError,string)] =
|
): Result[void,(AdminTabID,AristoError,string)] =
|
||||||
let dsc = rdb.session
|
let dsc = rdb.session
|
||||||
if data.len == 0:
|
if data.len == 0:
|
||||||
dsc.delete(xid.toOpenArray, AdmCF).isOkOr:
|
dsc.delete(xid.toOpenArray, $AdmCF).isOkOr:
|
||||||
const errSym = RdbBeDriverDelAdmError
|
const errSym = RdbBeDriverDelAdmError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "putAdm()", xid, error=errSym, info=error
|
trace logTxt "putAdm()", xid, error=errSym, info=error
|
||||||
return err((xid,errSym,error))
|
return err((xid,errSym,error))
|
||||||
else:
|
else:
|
||||||
dsc.put(xid.toOpenArray, data, AdmCF).isOkOr:
|
dsc.put(xid.toOpenArray, data, $AdmCF).isOkOr:
|
||||||
const errSym = RdbBeDriverPutAdmError
|
const errSym = RdbBeDriverPutAdmError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "putAdm()", xid, error=errSym, info=error
|
trace logTxt "putAdm()", xid, error=errSym, info=error
|
||||||
@ -94,7 +94,7 @@ proc putKey*(
|
|||||||
for (vid,key) in data:
|
for (vid,key) in data:
|
||||||
|
|
||||||
if key.isValid:
|
if key.isValid:
|
||||||
dsc.put(vid.toOpenArray, key.data, KeyCF).isOkOr:
|
dsc.put(vid.toOpenArray, key.data, $KeyCF).isOkOr:
|
||||||
# Caller must `rollback()` which will flush the `rdKeyLru` cache
|
# Caller must `rollback()` which will flush the `rdKeyLru` cache
|
||||||
const errSym = RdbBeDriverPutKeyError
|
const errSym = RdbBeDriverPutKeyError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
@ -106,7 +106,7 @@ proc putKey*(
|
|||||||
discard rdb.rdKeyLru.lruAppend(vid, key, RdKeyLruMaxSize)
|
discard rdb.rdKeyLru.lruAppend(vid, key, RdKeyLruMaxSize)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
dsc.delete(vid.toOpenArray, KeyCF).isOkOr:
|
dsc.delete(vid.toOpenArray, $KeyCF).isOkOr:
|
||||||
# Caller must `rollback()` which will flush the `rdKeyLru` cache
|
# Caller must `rollback()` which will flush the `rdKeyLru` cache
|
||||||
const errSym = RdbBeDriverDelKeyError
|
const errSym = RdbBeDriverDelKeyError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
@ -132,7 +132,7 @@ proc putVtx*(
|
|||||||
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
||||||
return err((vid,rc.error,""))
|
return err((vid,rc.error,""))
|
||||||
|
|
||||||
dsc.put(vid.toOpenArray, rc.value, VtxCF).isOkOr:
|
dsc.put(vid.toOpenArray, rc.value, $VtxCF).isOkOr:
|
||||||
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
||||||
const errSym = RdbBeDriverPutVtxError
|
const errSym = RdbBeDriverPutVtxError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
@ -144,7 +144,7 @@ proc putVtx*(
|
|||||||
discard rdb.rdVtxLru.lruAppend(vid, vtx, RdVtxLruMaxSize)
|
discard rdb.rdVtxLru.lruAppend(vid, vtx, RdVtxLruMaxSize)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
dsc.delete(vid.toOpenArray, VtxCF).isOkOr:
|
dsc.delete(vid.toOpenArray, $VtxCF).isOkOr:
|
||||||
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
||||||
const errSym = RdbBeDriverDelVtxError
|
const errSym = RdbBeDriverDelVtxError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
|
@ -679,7 +679,8 @@ proc swapCtx*(base: AristoBaseRef; ctx: CoreDbCtxRef): CoreDbCtxRef =
|
|||||||
|
|
||||||
# Set read-write access and install
|
# Set read-write access and install
|
||||||
base.ctx = AristoCoreDbCtxRef(ctx)
|
base.ctx = AristoCoreDbCtxRef(ctx)
|
||||||
base.api.reCentre(base.ctx.mpt)
|
base.api.reCentre(base.ctx.mpt).isOkOr:
|
||||||
|
raiseAssert "swapCtx() failed: " & $error
|
||||||
|
|
||||||
|
|
||||||
proc persistent*(
|
proc persistent*(
|
||||||
|
@ -204,10 +204,13 @@ proc persistent*(
|
|||||||
rc = api.persist(kvt)
|
rc = api.persist(kvt)
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
ok()
|
ok()
|
||||||
elif api.level(kvt) == 0:
|
elif api.level(kvt) != 0:
|
||||||
err(rc.error.toError(base, info))
|
|
||||||
else:
|
|
||||||
err(rc.error.toError(base, info, TxPending))
|
err(rc.error.toError(base, info, TxPending))
|
||||||
|
elif rc.error == TxPersistDelayed:
|
||||||
|
# This is OK: Piggybacking on `Aristo` backend
|
||||||
|
ok()
|
||||||
|
else:
|
||||||
|
err(rc.error.toError(base, info))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public constructors and related
|
# Public constructors and related
|
||||||
|
@ -39,10 +39,22 @@ const
|
|||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc newAristoRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef =
|
proc newAristoRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef =
|
||||||
|
## This funcion piggybacks the `KVT` on the `Aristo` backend.
|
||||||
let
|
let
|
||||||
adb = AristoDbRef.init(use_ari.RdbBackendRef, path, opts).expect aristoFail
|
adb = AristoDbRef.init(use_ari.RdbBackendRef, path, opts).valueOr:
|
||||||
gdb = adb.guestDb().valueOr: GuestDbRef(nil)
|
raiseAssert aristoFail & ": " & $error
|
||||||
kdb = KvtDbRef.init(use_kvt.RdbBackendRef, path, gdb).expect kvtFail
|
kdb = KvtDbRef.init(use_kvt.RdbBackendRef, adb, opts).valueOr:
|
||||||
|
raiseAssert kvtFail & ": " & $error
|
||||||
|
AristoDbRocks.create(kdb, adb)
|
||||||
|
|
||||||
|
proc newAristoDualRocksDbCoreDbRef*(path: string, opts: DbOptions): CoreDbRef =
|
||||||
|
## This is mainly for debugging. The KVT is run on a completely separate
|
||||||
|
## database backend.
|
||||||
|
let
|
||||||
|
adb = AristoDbRef.init(use_ari.RdbBackendRef, path, opts).valueOr:
|
||||||
|
raiseAssert aristoFail & ": " & $error
|
||||||
|
kdb = KvtDbRef.init(use_kvt.RdbBackendRef, path, opts).valueOr:
|
||||||
|
raiseAssert kvtFail & ": " & $error
|
||||||
AristoDbRocks.create(kdb, adb)
|
AristoDbRocks.create(kdb, adb)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -820,7 +820,7 @@ proc level*(db: CoreDbRef): int =
|
|||||||
|
|
||||||
proc persistent*(
|
proc persistent*(
|
||||||
db: CoreDbRef;
|
db: CoreDbRef;
|
||||||
): CoreDbRc[void] {.discardable.} =
|
): CoreDbRc[void] =
|
||||||
## For the legacy database, this function has no effect and succeeds always.
|
## For the legacy database, this function has no effect and succeeds always.
|
||||||
## It will nevertheless return a discardable error if there is a pending
|
## It will nevertheless return a discardable error if there is a pending
|
||||||
## transaction (i.e. `db.level() == 0`.)
|
## transaction (i.e. `db.level() == 0`.)
|
||||||
|
@ -28,6 +28,7 @@ type
|
|||||||
Ooops
|
Ooops
|
||||||
AristoDbMemory ## Memory backend emulator
|
AristoDbMemory ## Memory backend emulator
|
||||||
AristoDbRocks ## RocksDB backend
|
AristoDbRocks ## RocksDB backend
|
||||||
|
AristoDbDualRocks ## Dual RocksDB backends for `Kvt` and `Aristo`
|
||||||
AristoDbVoid ## No backend
|
AristoDbVoid ## No backend
|
||||||
|
|
||||||
const
|
const
|
||||||
|
@ -39,11 +39,20 @@ proc newCoreDbRef*(
|
|||||||
): CoreDbRef =
|
): CoreDbRef =
|
||||||
## Constructor for persistent type DB
|
## Constructor for persistent type DB
|
||||||
##
|
##
|
||||||
## Note: Using legacy notation `newCoreDbRef()` rather than
|
## The production database type is `AristoDbRocks` which uses a single
|
||||||
## `CoreDbRef.init()` because of compiler coughing.
|
## `RocksDb` backend for both, `Aristo` and `KVT`.
|
||||||
|
##
|
||||||
|
## For debugging, there is the `AristoDbDualRocks` database with split
|
||||||
|
## backends for `Aristo` and `KVT`. This database is not compatible with
|
||||||
|
## `AristoDbRocks` so it cannot be reliably switched between both versions
|
||||||
|
## with consecutive sessions.
|
||||||
|
##
|
||||||
when dbType == AristoDbRocks:
|
when dbType == AristoDbRocks:
|
||||||
newAristoRocksDbCoreDbRef path, opts
|
newAristoRocksDbCoreDbRef path, opts
|
||||||
|
|
||||||
|
elif dbType == AristoDbDualRocks:
|
||||||
|
newAristoDualRocksDbCoreDbRef path, opts
|
||||||
|
|
||||||
else:
|
else:
|
||||||
{.error: "Unsupported dbType for persistent newCoreDbRef()".}
|
{.error: "Unsupported dbType for persistent newCoreDbRef()".}
|
||||||
|
|
||||||
|
@ -58,7 +58,7 @@ type
|
|||||||
KvtApiNForkedFn* = proc(db: KvtDbRef): int {.noRaise.}
|
KvtApiNForkedFn* = proc(db: KvtDbRef): int {.noRaise.}
|
||||||
KvtApiPutFn* = proc(db: KvtDbRef,
|
KvtApiPutFn* = proc(db: KvtDbRef,
|
||||||
key, data: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
key, data: openArray[byte]): Result[void,KvtError] {.noRaise.}
|
||||||
KvtApiReCentreFn* = proc(db: KvtDbRef) {.noRaise.}
|
KvtApiReCentreFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
|
||||||
KvtApiRollbackFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
|
KvtApiRollbackFn* = proc(tx: KvtTxRef): Result[void,KvtError] {.noRaise.}
|
||||||
KvtApiPersistFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
|
KvtApiPersistFn* = proc(db: KvtDbRef): Result[void,KvtError] {.noRaise.}
|
||||||
KvtApiToKvtDbRefFn* = proc(tx: KvtTxRef): KvtDbRef {.noRaise.}
|
KvtApiToKvtDbRefFn* = proc(tx: KvtTxRef): KvtDbRef {.noRaise.}
|
||||||
@ -156,7 +156,7 @@ proc dup(be: BackendRef): BackendRef =
|
|||||||
of BackendMemory:
|
of BackendMemory:
|
||||||
return MemBackendRef(be).dup
|
return MemBackendRef(be).dup
|
||||||
|
|
||||||
of BackendRocksDB:
|
of BackendRocksDB, BackendRdbTriggered:
|
||||||
when KvtPersistentBackendOk:
|
when KvtPersistentBackendOk:
|
||||||
return RdbBackendRef(be).dup
|
return RdbBackendRef(be).dup
|
||||||
|
|
||||||
@ -306,9 +306,9 @@ func init*(
|
|||||||
result = api.put(a, b, c)
|
result = api.put(a, b, c)
|
||||||
|
|
||||||
profApi.reCentre =
|
profApi.reCentre =
|
||||||
proc(a: KvtDbRef) =
|
proc(a: KvtDbRef): auto =
|
||||||
KvtApiProfReCentreFn.profileRunner:
|
KvtApiProfReCentreFn.profileRunner:
|
||||||
api.reCentre(a)
|
result = api.reCentre(a)
|
||||||
|
|
||||||
profApi.rollback =
|
profApi.rollback =
|
||||||
proc(a: KvtTxRef): auto =
|
proc(a: KvtTxRef): auto =
|
||||||
|
@ -17,29 +17,29 @@ import
|
|||||||
results,
|
results,
|
||||||
./kvt_desc,
|
./kvt_desc,
|
||||||
./kvt_desc/desc_backend,
|
./kvt_desc/desc_backend,
|
||||||
./kvt_filter/[filter_merge, filter_reverse]
|
./kvt_delta/[delta_merge, delta_reverse]
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc filterMerge*(
|
proc deltaMerge*(
|
||||||
db: KvtDbRef; # Database
|
db: KvtDbRef; # Database
|
||||||
filter: LayerDeltaRef; # Filter to apply to database
|
delta: LayerDeltaRef; # Filter to apply to database
|
||||||
) =
|
) =
|
||||||
## Merge the argument `filter` into the read-only filter layer. Note that
|
## Merge the argument `delta` into the balancer filter layer. Note that
|
||||||
## this function has no control of the filter source. Having merged the
|
## this function has no control of the filter source. Having merged the
|
||||||
## argument `filter`, all the `top` and `stack` layers should be cleared.
|
## argument `delta`, all the `top` and `stack` layers should be cleared.
|
||||||
##
|
##
|
||||||
db.merge(filter, db.roFilter)
|
db.merge(delta, db.balancer)
|
||||||
|
|
||||||
|
|
||||||
proc filterUpdateOk*(db: KvtDbRef): bool =
|
proc deltaUpdateOk*(db: KvtDbRef): bool =
|
||||||
## Check whether the read-only filter can be merged into the backend
|
## Check whether the balancer filter can be merged into the backend
|
||||||
not db.backend.isNil and db.isCentre
|
not db.backend.isNil and db.isCentre
|
||||||
|
|
||||||
|
|
||||||
proc filterUpdate*(
|
proc deltaUpdate*(
|
||||||
db: KvtDbRef; # Database
|
db: KvtDbRef; # Database
|
||||||
reCentreOk = false;
|
reCentreOk = false;
|
||||||
): Result[void,KvtError] =
|
): Result[void,KvtError] =
|
||||||
@ -58,7 +58,7 @@ proc filterUpdate*(
|
|||||||
return err(FilBackendMissing)
|
return err(FilBackendMissing)
|
||||||
|
|
||||||
# Blind or missing filter
|
# Blind or missing filter
|
||||||
if db.roFilter.isNil:
|
if db.balancer.isNil:
|
||||||
return ok()
|
return ok()
|
||||||
|
|
||||||
# Make sure that the argument `db` is at the centre so the backend is in
|
# Make sure that the argument `db` is at the centre so the backend is in
|
||||||
@ -67,21 +67,21 @@ proc filterUpdate*(
|
|||||||
if db != parent:
|
if db != parent:
|
||||||
if not reCentreOk:
|
if not reCentreOk:
|
||||||
return err(FilBackendRoMode)
|
return err(FilBackendRoMode)
|
||||||
db.reCentre
|
? db.reCentre()
|
||||||
# Always re-centre to `parent` (in case `reCentreOk` was set)
|
# Always re-centre to `parent` (in case `reCentreOk` was set)
|
||||||
defer: parent.reCentre
|
defer: discard parent.reCentre()
|
||||||
|
|
||||||
# Store structural single trie entries
|
# Store structural single trie entries
|
||||||
let writeBatch = be.putBegFn()
|
let writeBatch = ? be.putBegFn()
|
||||||
be.putKvpFn(writeBatch, db.roFilter.sTab.pairs.toSeq)
|
be.putKvpFn(writeBatch, db.balancer.sTab.pairs.toSeq)
|
||||||
? be.putEndFn writeBatch
|
? be.putEndFn writeBatch
|
||||||
|
|
||||||
# Update peer filter balance.
|
# Update peer filter balance.
|
||||||
let rev = db.filterReverse db.roFilter
|
let rev = db.deltaReverse db.balancer
|
||||||
for w in db.forked:
|
for w in db.forked:
|
||||||
db.merge(rev, w.roFilter)
|
db.merge(rev, w.balancer)
|
||||||
|
|
||||||
db.roFilter = LayerDeltaRef(nil)
|
db.balancer = LayerDeltaRef(nil)
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
@ -17,21 +17,21 @@ import
|
|||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc filterReverse*(
|
proc deltaReverse*(
|
||||||
db: KvtDbRef; # Database
|
db: KvtDbRef; # Database
|
||||||
filter: LayerDeltaRef; # Filter to revert
|
delta: LayerDeltaRef; # Filter to revert
|
||||||
): LayerDeltaRef =
|
): LayerDeltaRef =
|
||||||
## Assemble reverse filter for the `filter` argument, i.e. changes to the
|
## Assemble a reverse filter for the `delta` argument, i.e. changes to the
|
||||||
## backend that reverse the effect of applying the this read-only filter.
|
## backend that reverse the effect of applying this to the balancer filter.
|
||||||
## The resulting filter is calculated against the current *unfiltered*
|
## The resulting filter is calculated against the current *unfiltered*
|
||||||
## backend (excluding optionally installed read-only filter.)
|
## backend (excluding optionally installed balancer filters.)
|
||||||
##
|
##
|
||||||
## If `filter` is `nil`, the result will be `nil` as well.
|
## If `delta` is `nil`, the result will be `nil` as well.
|
||||||
if not filter.isNil:
|
if not delta.isNil:
|
||||||
result = LayerDeltaRef()
|
result = LayerDeltaRef()
|
||||||
|
|
||||||
# Calculate reverse changes for the `sTab[]` structural table
|
# Calculate reverse changes for the `sTab[]` structural table
|
||||||
for key in filter.sTab.keys:
|
for key in delta.sTab.keys:
|
||||||
let rc = db.getUbe key
|
let rc = db.getUbe key
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
result.sTab[key] = rc.value
|
result.sTab[key] = rc.value
|
@ -42,12 +42,11 @@ type
|
|||||||
centre: KvtDbRef ## Link to peer with write permission
|
centre: KvtDbRef ## Link to peer with write permission
|
||||||
peers: HashSet[KvtDbRef] ## List of all peers
|
peers: HashSet[KvtDbRef] ## List of all peers
|
||||||
|
|
||||||
KvtDbRef* = ref KvtDbObj
|
KvtDbRef* = ref object of RootRef
|
||||||
KvtDbObj* = object
|
|
||||||
## Three tier database object supporting distributed instances.
|
## Three tier database object supporting distributed instances.
|
||||||
top*: LayerRef ## Database working layer, mutable
|
top*: LayerRef ## Database working layer, mutable
|
||||||
stack*: seq[LayerRef] ## Stashed immutable parent layers
|
stack*: seq[LayerRef] ## Stashed immutable parent layers
|
||||||
roFilter*: LayerDeltaRef ## Apply read filter (locks writing)
|
balancer*: LayerDeltaRef ## Apply read filter (locks writing)
|
||||||
backend*: BackendRef ## Backend database (may well be `nil`)
|
backend*: BackendRef ## Backend database (may well be `nil`)
|
||||||
|
|
||||||
txRef*: KvtTxRef ## Latest active transaction
|
txRef*: KvtTxRef ## Latest active transaction
|
||||||
@ -62,6 +61,17 @@ type
|
|||||||
KvtDbAction* = proc(db: KvtDbRef) {.gcsafe, raises: [].}
|
KvtDbAction* = proc(db: KvtDbRef) {.gcsafe, raises: [].}
|
||||||
## Generic call back function/closure.
|
## Generic call back function/closure.
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc canMod(db: KvtDbRef): Result[void,KvtError] =
|
||||||
|
## Ask for permission before doing nasty stuff
|
||||||
|
if db.backend.isNil:
|
||||||
|
ok()
|
||||||
|
else:
|
||||||
|
db.backend.canModFn()
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -97,7 +107,7 @@ func getCentre*(db: KvtDbRef): KvtDbRef =
|
|||||||
##
|
##
|
||||||
if db.dudes.isNil: db else: db.dudes.centre
|
if db.dudes.isNil: db else: db.dudes.centre
|
||||||
|
|
||||||
proc reCentre*(db: KvtDbRef) =
|
proc reCentre*(db: KvtDbRef): Result[void,KvtError] =
|
||||||
## Re-focus the `db` argument descriptor so that it becomes the centre.
|
## Re-focus the `db` argument descriptor so that it becomes the centre.
|
||||||
## Nothing is done if the `db` descriptor is the centre, already.
|
## Nothing is done if the `db` descriptor is the centre, already.
|
||||||
##
|
##
|
||||||
@ -111,8 +121,10 @@ proc reCentre*(db: KvtDbRef) =
|
|||||||
## accessing the same backend database. Descriptors where `isCentre()`
|
## accessing the same backend database. Descriptors where `isCentre()`
|
||||||
## returns `false` must be single destructed with `forget()`.
|
## returns `false` must be single destructed with `forget()`.
|
||||||
##
|
##
|
||||||
if not db.dudes.isNil:
|
if not db.dudes.isNil and db.dudes.centre != db:
|
||||||
|
? db.canMod()
|
||||||
db.dudes.centre = db
|
db.dudes.centre = db
|
||||||
|
ok()
|
||||||
|
|
||||||
proc fork*(
|
proc fork*(
|
||||||
db: KvtDbRef;
|
db: KvtDbRef;
|
||||||
@ -140,7 +152,7 @@ proc fork*(
|
|||||||
dudes: db.dudes)
|
dudes: db.dudes)
|
||||||
|
|
||||||
if not noFilter:
|
if not noFilter:
|
||||||
clone.roFilter = db.roFilter # Ref is ok here (filters are immutable)
|
clone.balancer = db.balancer # Ref is ok here (filters are immutable)
|
||||||
|
|
||||||
if not noTopLayer:
|
if not noTopLayer:
|
||||||
clone.top = LayerRef.init()
|
clone.top = LayerRef.init()
|
||||||
@ -177,6 +189,7 @@ proc forget*(db: KvtDbRef): Result[void,KvtError] =
|
|||||||
elif db notin db.dudes.peers:
|
elif db notin db.dudes.peers:
|
||||||
err(StaleDescriptor)
|
err(StaleDescriptor)
|
||||||
else:
|
else:
|
||||||
|
? db.canMod()
|
||||||
db.dudes.peers.excl db # Unlink argument `db` from peers list
|
db.dudes.peers.excl db # Unlink argument `db` from peers list
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
@ -187,7 +200,7 @@ proc forgetOthers*(db: KvtDbRef): Result[void,KvtError] =
|
|||||||
if not db.dudes.isNil:
|
if not db.dudes.isNil:
|
||||||
if db.dudes.centre != db:
|
if db.dudes.centre != db:
|
||||||
return err(MustBeOnCentre)
|
return err(MustBeOnCentre)
|
||||||
|
? db.canMod()
|
||||||
db.dudes = DudesRef(nil)
|
db.dudes = DudesRef(nil)
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ type
|
|||||||
## by any library function using the backend.
|
## by any library function using the backend.
|
||||||
|
|
||||||
PutBegFn* =
|
PutBegFn* =
|
||||||
proc(): PutHdlRef {.gcsafe, raises: [].}
|
proc(): Result[PutHdlRef,KvtError] {.gcsafe, raises: [].}
|
||||||
## Generic transaction initialisation function
|
## Generic transaction initialisation function
|
||||||
|
|
||||||
PutKvpFn* =
|
PutKvpFn* =
|
||||||
@ -53,6 +53,21 @@ type
|
|||||||
## `false` the outcome might differ depending on the type of backend
|
## `false` the outcome might differ depending on the type of backend
|
||||||
## (e.g. in-memory backends would flush on close.)
|
## (e.g. in-memory backends would flush on close.)
|
||||||
|
|
||||||
|
CanModFn* =
|
||||||
|
proc(): Result[void,KvtError] {.gcsafe, raises: [].}
|
||||||
|
## This function returns OK if there is nothing to prevent the main
|
||||||
|
## `KVT` descriptors being modified (e.g. by `reCentre()`) or by
|
||||||
|
## adding/removing a new peer (e.g. by `fork()` or `forget()`.)
|
||||||
|
|
||||||
|
SetWrReqFn* =
|
||||||
|
proc(db: RootRef): Result[void,KvtError] {.gcsafe, raises: [].}
|
||||||
|
## This function stores a request function for the piggiback mode
|
||||||
|
## writing to the `Aristo` set of column families.
|
||||||
|
##
|
||||||
|
## If used at all, this function would run `rocks_db.setWrReqTriggeredFn()()`
|
||||||
|
## with a `KvtDbRef` type argument for `db`. This allows to run the `Kvt`
|
||||||
|
## without linking to the rocksdb interface unless it is really needed.
|
||||||
|
|
||||||
# -------------
|
# -------------
|
||||||
|
|
||||||
BackendRef* = ref BackendObj
|
BackendRef* = ref BackendObj
|
||||||
@ -66,6 +81,9 @@ type
|
|||||||
putEndFn*: PutEndFn ## Commit bulk store session
|
putEndFn*: PutEndFn ## Commit bulk store session
|
||||||
|
|
||||||
closeFn*: CloseFn ## Generic destructor
|
closeFn*: CloseFn ## Generic destructor
|
||||||
|
canModFn*: CanModFn ## Lock-alike
|
||||||
|
|
||||||
|
setWrReqFn*: SetWrReqFn ## Register main descr for write request
|
||||||
|
|
||||||
proc init*(trg: var BackendObj; src: BackendObj) =
|
proc init*(trg: var BackendObj; src: BackendObj) =
|
||||||
trg.getKvpFn = src.getKvpFn
|
trg.getKvpFn = src.getKvpFn
|
||||||
@ -73,6 +91,7 @@ proc init*(trg: var BackendObj; src: BackendObj) =
|
|||||||
trg.putKvpFn = src.putKvpFn
|
trg.putKvpFn = src.putKvpFn
|
||||||
trg.putEndFn = src.putEndFn
|
trg.putEndFn = src.putEndFn
|
||||||
trg.closeFn = src.closeFn
|
trg.closeFn = src.closeFn
|
||||||
|
trg.canModFn = src.canModFn
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -19,11 +19,16 @@ type
|
|||||||
|
|
||||||
# RocksDB backend
|
# RocksDB backend
|
||||||
RdbBeCantCreateDataDir
|
RdbBeCantCreateDataDir
|
||||||
|
RdbBeDelayedAlreadyRegistered
|
||||||
|
RdbBeDelayedLocked
|
||||||
|
RdbBeDelayedNotReady
|
||||||
RdbBeDriverDelError
|
RdbBeDriverDelError
|
||||||
RdbBeDriverGetError
|
RdbBeDriverGetError
|
||||||
RdbBeDriverInitError
|
RdbBeDriverInitError
|
||||||
RdbBeDriverPutError
|
RdbBeDriverPutError
|
||||||
RdbBeDriverWriteError
|
RdbBeDriverWriteError
|
||||||
|
RdbBeHostError
|
||||||
|
RdbBeHostNotApplicable
|
||||||
|
|
||||||
# Transaction wrappers
|
# Transaction wrappers
|
||||||
TxArgStaleTx
|
TxArgStaleTx
|
||||||
@ -33,6 +38,7 @@ type
|
|||||||
TxNoPendingTx
|
TxNoPendingTx
|
||||||
TxNotTopTx
|
TxNotTopTx
|
||||||
TxPendingTx
|
TxPendingTx
|
||||||
|
TxPersistDelayed
|
||||||
TxStackGarbled
|
TxStackGarbled
|
||||||
TxStackUnderflow
|
TxStackUnderflow
|
||||||
|
|
||||||
|
@ -11,18 +11,20 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
../../aristo/aristo_init/init_common,
|
|
||||||
../kvt_desc,
|
../kvt_desc,
|
||||||
../kvt_desc/desc_backend
|
../kvt_desc/desc_backend
|
||||||
|
|
||||||
export
|
|
||||||
BackendType # borrowed from Aristo
|
|
||||||
|
|
||||||
const
|
const
|
||||||
verifyIxId = true # and false
|
verifyIxId = true # and false
|
||||||
## Enforce session tracking
|
## Enforce session tracking
|
||||||
|
|
||||||
type
|
type
|
||||||
|
BackendType* = enum
|
||||||
|
BackendVoid = 0 ## For providing backend-less constructor
|
||||||
|
BackendMemory ## Same as Aristo
|
||||||
|
BackendRocksDB ## Same as Aristo
|
||||||
|
BackendRdbTriggered ## Piggybacked on remote write session
|
||||||
|
|
||||||
TypedBackendRef* = ref TypedBackendObj
|
TypedBackendRef* = ref TypedBackendObj
|
||||||
TypedBackendObj* = object of BackendObj
|
TypedBackendObj* = object of BackendObj
|
||||||
beKind*: BackendType ## Backend type identifier
|
beKind*: BackendType ## Backend type identifier
|
||||||
|
@ -86,8 +86,8 @@ proc getKvpFn(db: MemBackendRef): GetKvpFn =
|
|||||||
|
|
||||||
proc putBegFn(db: MemBackendRef): PutBegFn =
|
proc putBegFn(db: MemBackendRef): PutBegFn =
|
||||||
result =
|
result =
|
||||||
proc(): PutHdlRef =
|
proc(): Result[PutHdlRef,KvtError] =
|
||||||
db.newSession()
|
ok db.newSession()
|
||||||
|
|
||||||
proc putKvpFn(db: MemBackendRef): PutKvpFn =
|
proc putKvpFn(db: MemBackendRef): PutKvpFn =
|
||||||
result =
|
result =
|
||||||
@ -120,6 +120,16 @@ proc closeFn(db: MemBackendRef): CloseFn =
|
|||||||
proc(ignore: bool) =
|
proc(ignore: bool) =
|
||||||
discard
|
discard
|
||||||
|
|
||||||
|
proc canModFn(db: MemBackendRef): CanModFn =
|
||||||
|
result =
|
||||||
|
proc(): Result[void,KvtError] =
|
||||||
|
ok()
|
||||||
|
|
||||||
|
proc setWrReqFn(db: MemBackendRef): SetWrReqFn =
|
||||||
|
result =
|
||||||
|
proc(kvt: RootRef): Result[void,KvtError] =
|
||||||
|
err(RdbBeHostNotApplicable)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -136,7 +146,8 @@ proc memoryBackend*: BackendRef =
|
|||||||
db.putEndFn = putEndFn db
|
db.putEndFn = putEndFn db
|
||||||
|
|
||||||
db.closeFn = closeFn db
|
db.closeFn = closeFn db
|
||||||
|
db.canModFn = canModFn db
|
||||||
|
db.setWrReqFn = setWrReqFn db
|
||||||
db
|
db
|
||||||
|
|
||||||
proc dup*(db: MemBackendRef): MemBackendRef =
|
proc dup*(db: MemBackendRef): MemBackendRef =
|
||||||
|
@ -32,7 +32,7 @@ export
|
|||||||
# Public helpers
|
# Public helpers
|
||||||
# -----------------------------------------------------------------------------
|
# -----------------------------------------------------------------------------
|
||||||
|
|
||||||
proc kind*(
|
func kind*(
|
||||||
be: BackendRef;
|
be: BackendRef;
|
||||||
): BackendType =
|
): BackendType =
|
||||||
## Retrieves the backend type symbol for a `be` backend database argument
|
## Retrieves the backend type symbol for a `be` backend database argument
|
||||||
|
@ -20,43 +20,70 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
results,
|
results,
|
||||||
|
../../aristo,
|
||||||
|
../../opts,
|
||||||
../kvt_desc,
|
../kvt_desc,
|
||||||
"."/[rocks_db, memory_only]
|
"."/[rocks_db, memory_only]
|
||||||
|
|
||||||
from ../../aristo/aristo_persistent
|
|
||||||
import GuestDbRef, getRocksDbFamily
|
|
||||||
|
|
||||||
export
|
export
|
||||||
RdbBackendRef,
|
RdbBackendRef,
|
||||||
memory_only
|
memory_only
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private helpers
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
func toErr0(err: (KvtError,string)): KvtError =
|
||||||
|
err[0]
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public database constuctors, destructor
|
# Public database constuctors, destructor
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc init*[W: MemOnlyBackend|RdbBackendRef](
|
proc init*(
|
||||||
T: type KvtDbRef;
|
T: type KvtDbRef;
|
||||||
B: type W;
|
B: type RdbBackendRef;
|
||||||
basePath: string;
|
basePath: string;
|
||||||
guestDb = GuestDbRef(nil);
|
opts: DbOptions;
|
||||||
): Result[KvtDbRef,KvtError] =
|
): Result[KvtDbRef,KvtError] =
|
||||||
## Generic constructor, `basePath` argument is ignored for `BackendNone` and
|
## Generic constructor for `RocksDb` backend
|
||||||
## `BackendMemory` type backend database. Also, both of these backends
|
|
||||||
## aways succeed initialising.
|
|
||||||
##
|
##
|
||||||
## If the argument `guestDb` is set and is a RocksDB column familly, the
|
ok KvtDbRef(
|
||||||
## `Kvt`batabase is built upon this column familly. Othewise it is newly
|
top: LayerRef.init(),
|
||||||
## created with `basePath` as storage location.
|
backend: ? rocksDbKvtBackend(basePath, opts).mapErr toErr0)
|
||||||
##
|
|
||||||
when B is RdbBackendRef:
|
|
||||||
let rc = guestDb.getRocksDbFamily()
|
|
||||||
if rc.isOk:
|
|
||||||
ok KvtDbRef(top: LayerRef.init(), backend: ? rocksDbKvtBackend rc.value)
|
|
||||||
else:
|
|
||||||
ok KvtDbRef(top: LayerRef.init(), backend: ? rocksDbKvtBackend basePath)
|
|
||||||
|
|
||||||
else:
|
proc init*(
|
||||||
ok KvtDbRef.init B
|
T: type KvtDbRef;
|
||||||
|
B: type RdbBackendRef;
|
||||||
|
adb: AristoDbRef;
|
||||||
|
opts: DbOptions;
|
||||||
|
): Result[KvtDbRef,KvtError] =
|
||||||
|
## Constructor for `RocksDb` backend which piggybacks on the `Aristo`
|
||||||
|
## backend. The following changes will occur after successful instantiation:
|
||||||
|
##
|
||||||
|
## * When invoked, the function `kvt_tx.persistent()` will always return an
|
||||||
|
## error. If everything is all right (e.g. saving is possible), the error
|
||||||
|
## returned will be `TxPersistDelayed`. This indicates that the save
|
||||||
|
## request was queued, waiting for being picked up by an event handler.
|
||||||
|
##
|
||||||
|
## * There should be an invocation of `aristo_tx.persistent()` immediately
|
||||||
|
## follwing the `kvt_tx.persistent()` call (some `KVT` functions might
|
||||||
|
## return `RdbBeDelayedLocked` or similar errors while the save request
|
||||||
|
## is pending.) Once successful, the`aristo_tx.persistent()` function will
|
||||||
|
## also have commited the pending save request mentioned above.
|
||||||
|
##
|
||||||
|
## * The function `kvt_init/memory_only.finish()` does nothing.
|
||||||
|
##
|
||||||
|
## * The function `aristo_init/memory_only.finish()` will close both
|
||||||
|
## sessions, the one for `KVT` and the other for `Aristo`.
|
||||||
|
##
|
||||||
|
## * The functiond `kvt_delta.deltaUpdate()` and `tx_stow.tcStow()` should
|
||||||
|
## not be invoked directly (they will stop with an error most of the time,
|
||||||
|
## anyway.)
|
||||||
|
##
|
||||||
|
ok KvtDbRef(
|
||||||
|
top: LayerRef.init(),
|
||||||
|
backend: ? rocksDbKvtTriggeredBackend(adb, opts).mapErr toErr0)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -27,18 +27,19 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
chronicles,
|
||||||
eth/common,
|
eth/common,
|
||||||
rocksdb,
|
rocksdb,
|
||||||
results,
|
results,
|
||||||
|
../../aristo/aristo_init/persistent,
|
||||||
|
../../opts,
|
||||||
../kvt_desc,
|
../kvt_desc,
|
||||||
../kvt_desc/desc_backend,
|
../kvt_desc/desc_backend,
|
||||||
|
../kvt_tx/tx_stow,
|
||||||
./init_common,
|
./init_common,
|
||||||
./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk]
|
./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk]
|
||||||
|
|
||||||
|
|
||||||
const
|
const
|
||||||
maxOpenFiles = 512 ## Rocks DB setup, open files limit
|
|
||||||
|
|
||||||
extraTraceMessages = false or true
|
extraTraceMessages = false or true
|
||||||
## Enabled additional logging noise
|
## Enabled additional logging noise
|
||||||
|
|
||||||
@ -48,11 +49,8 @@ type
|
|||||||
|
|
||||||
RdbPutHdlRef = ref object of TypedPutHdlRef
|
RdbPutHdlRef = ref object of TypedPutHdlRef
|
||||||
|
|
||||||
when extraTraceMessages:
|
|
||||||
import chronicles
|
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "aristo-backend"
|
topics = "kvt-backend"
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private helpers
|
# Private helpers
|
||||||
@ -75,7 +73,7 @@ proc endSession(hdl: PutHdlRef; db: RdbBackendRef): RdbPutHdlRef =
|
|||||||
hdl.RdbPutHdlRef
|
hdl.RdbPutHdlRef
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions: interface
|
# Private functions: standard interface
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc getKvpFn(db: RdbBackendRef): GetKvpFn =
|
proc getKvpFn(db: RdbBackendRef): GetKvpFn =
|
||||||
@ -98,9 +96,10 @@ proc getKvpFn(db: RdbBackendRef): GetKvpFn =
|
|||||||
|
|
||||||
proc putBegFn(db: RdbBackendRef): PutBegFn =
|
proc putBegFn(db: RdbBackendRef): PutBegFn =
|
||||||
result =
|
result =
|
||||||
proc(): PutHdlRef =
|
proc(): Result[PutHdlRef,KvtError] =
|
||||||
db.rdb.begin()
|
db.rdb.begin()
|
||||||
db.newSession()
|
ok db.newSession()
|
||||||
|
|
||||||
|
|
||||||
proc putKvpFn(db: RdbBackendRef): PutKvpFn =
|
proc putKvpFn(db: RdbBackendRef): PutKvpFn =
|
||||||
result =
|
result =
|
||||||
@ -114,6 +113,7 @@ proc putKvpFn(db: RdbBackendRef): PutKvpFn =
|
|||||||
hdl.info = error[2]
|
hdl.info = error[2]
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
proc putEndFn(db: RdbBackendRef): PutEndFn =
|
proc putEndFn(db: RdbBackendRef): PutEndFn =
|
||||||
result =
|
result =
|
||||||
proc(hdl: PutHdlRef): Result[void,KvtError] =
|
proc(hdl: PutHdlRef): Result[void,KvtError] =
|
||||||
@ -121,6 +121,7 @@ proc putEndFn(db: RdbBackendRef): PutEndFn =
|
|||||||
if hdl.error != KvtError(0):
|
if hdl.error != KvtError(0):
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
debug logTxt "putEndFn: failed", error=hdl.error, info=hdl.info
|
debug logTxt "putEndFn: failed", error=hdl.error, info=hdl.info
|
||||||
|
db.rdb.rollback()
|
||||||
return err(hdl.error)
|
return err(hdl.error)
|
||||||
|
|
||||||
# Commit session
|
# Commit session
|
||||||
@ -136,16 +137,117 @@ proc closeFn(db: RdbBackendRef): CloseFn =
|
|||||||
proc(flush: bool) =
|
proc(flush: bool) =
|
||||||
db.rdb.destroy(flush)
|
db.rdb.destroy(flush)
|
||||||
|
|
||||||
# --------------
|
proc canModFn(db: RdbBackendRef): CanModFn =
|
||||||
|
result =
|
||||||
|
proc(): Result[void,KvtError] =
|
||||||
|
ok()
|
||||||
|
|
||||||
proc setup(db: RdbBackendRef) =
|
proc setWrReqFn(db: RdbBackendRef): SetWrReqFn =
|
||||||
db.getKvpFn = getKvpFn db
|
result =
|
||||||
|
proc(kvt: RootRef): Result[void,KvtError] =
|
||||||
|
err(RdbBeHostNotApplicable)
|
||||||
|
|
||||||
db.putBegFn = putBegFn db
|
# ------------------------------------------------------------------------------
|
||||||
db.putKvpFn = putKvpFn db
|
# Private functions: triggered interface changes
|
||||||
db.putEndFn = putEndFn db
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
db.closeFn = closeFn db
|
proc putBegTriggeredFn(db: RdbBackendRef): PutBegFn =
|
||||||
|
## Variant of `putBegFn()` for piggyback write batch
|
||||||
|
result =
|
||||||
|
proc(): Result[PutHdlRef,KvtError] =
|
||||||
|
# Check whether somebody else initiated the rocksdb write batch/session
|
||||||
|
if db.rdb.session.isNil:
|
||||||
|
const error = RdbBeDelayedNotReady
|
||||||
|
when extraTraceMessages:
|
||||||
|
debug logTxt "putBegTriggeredFn: failed", error
|
||||||
|
return err(error)
|
||||||
|
ok db.newSession()
|
||||||
|
|
||||||
|
proc putEndTriggeredFn(db: RdbBackendRef): PutEndFn =
|
||||||
|
## Variant of `putEndFn()` for piggyback write batch
|
||||||
|
result =
|
||||||
|
proc(hdl: PutHdlRef): Result[void,KvtError] =
|
||||||
|
|
||||||
|
# There is no commit()/rollback() here as we do not own the backend.
|
||||||
|
let hdl = hdl.endSession db
|
||||||
|
|
||||||
|
if hdl.error != KvtError(0):
|
||||||
|
when extraTraceMessages:
|
||||||
|
debug logTxt "putEndTriggeredFn: failed",
|
||||||
|
error=hdl.error, info=hdl.info
|
||||||
|
# The error return code will signal a problem to the `txStow()`
|
||||||
|
# function which was called by `writeEvCb()` below.
|
||||||
|
return err(hdl.error)
|
||||||
|
|
||||||
|
# Commit the session. This will be acknowledged by the `txStow()`
|
||||||
|
# function which was called by `writeEvCb()` below.
|
||||||
|
ok()
|
||||||
|
|
||||||
|
proc closeTriggeredFn(db: RdbBackendRef): CloseFn =
|
||||||
|
## Variant of `closeFn()` for piggyback write batch
|
||||||
|
result =
|
||||||
|
proc(flush: bool) =
|
||||||
|
# Nothing to do here as we do not own the backend
|
||||||
|
discard
|
||||||
|
|
||||||
|
proc canModTriggeredFn(db: RdbBackendRef): CanModFn =
|
||||||
|
## Variant of `canModFn()` for piggyback write batch
|
||||||
|
result =
|
||||||
|
proc(): Result[void,KvtError] =
|
||||||
|
# Deny modifications/changes if there is a pending write request
|
||||||
|
if not db.rdb.delayedPersist.isNil:
|
||||||
|
return err(RdbBeDelayedLocked)
|
||||||
|
ok()
|
||||||
|
|
||||||
|
proc setWrReqTriggeredFn(db: RdbBackendRef): SetWrReqFn =
|
||||||
|
result =
|
||||||
|
proc(kvt: RootRef): Result[void,KvtError] =
|
||||||
|
if db.rdb.delayedPersist.isNil:
|
||||||
|
db.rdb.delayedPersist = KvtDbRef(kvt)
|
||||||
|
ok()
|
||||||
|
else:
|
||||||
|
err(RdbBeDelayedAlreadyRegistered)
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Private function: trigger handler
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc writeEvCb(db: RdbBackendRef): RdbWriteEventCb =
|
||||||
|
## Write session event handler
|
||||||
|
result =
|
||||||
|
proc(ws: WriteBatchRef): bool =
|
||||||
|
|
||||||
|
# Only do something if a write session request was queued
|
||||||
|
if not db.rdb.delayedPersist.isNil:
|
||||||
|
defer:
|
||||||
|
# Clear session environment when leaving. This makes sure that the
|
||||||
|
# same session can only be run once.
|
||||||
|
db.rdb.session = WriteBatchRef(nil)
|
||||||
|
db.rdb.delayedPersist = KvtDbRef(nil)
|
||||||
|
|
||||||
|
# Publish session argument
|
||||||
|
db.rdb.session = ws
|
||||||
|
|
||||||
|
# Execute delayed session. Note the the `txStow()` function is located
|
||||||
|
# in `tx_stow.nim`. This module `tx_stow.nim` is also imported by
|
||||||
|
# `kvt_tx.nim` which contains `persist() `. So the logic goes:
|
||||||
|
# ::
|
||||||
|
# kvt_tx.persist() --> registers a delayed write request rather
|
||||||
|
# than excuting tx_stow.txStow()
|
||||||
|
#
|
||||||
|
# // the backend owner (i.e. Aristo) will start a write cycle and
|
||||||
|
# // invoke the envent handler rocks_db.writeEvCb()
|
||||||
|
# rocks_db.writeEvCb() --> calls tx_stow.txStow()
|
||||||
|
#
|
||||||
|
# tx_stow.txStow() --> calls rocks_db.putBegTriggeredFn()
|
||||||
|
# calls rocks_db.putKvpFn()
|
||||||
|
# calls rocks_db.putEndTriggeredFn()
|
||||||
|
#
|
||||||
|
let rc = db.rdb.delayedPersist.txStow(persistent=true)
|
||||||
|
if rc.isErr:
|
||||||
|
error "writeEventCb(): persist() failed", error=rc.error
|
||||||
|
return false
|
||||||
|
true
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
@ -153,28 +255,58 @@ proc setup(db: RdbBackendRef) =
|
|||||||
|
|
||||||
proc rocksDbKvtBackend*(
|
proc rocksDbKvtBackend*(
|
||||||
path: string;
|
path: string;
|
||||||
): Result[BackendRef,KvtError] =
|
opts: DbOptions;
|
||||||
|
): Result[BackendRef,(KvtError,string)] =
|
||||||
let db = RdbBackendRef(
|
let db = RdbBackendRef(
|
||||||
beKind: BackendRocksDB)
|
beKind: BackendRocksDB)
|
||||||
|
|
||||||
# Initialise RocksDB
|
# Initialise RocksDB
|
||||||
db.rdb.init(path, maxOpenFiles).isOkOr:
|
db.rdb.init(path, opts).isOkOr:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "constructor failed", error=error[0], info=error[1]
|
trace logTxt "constructor failed", error=error[0], info=error[1]
|
||||||
return err(error[0])
|
return err(error)
|
||||||
|
|
||||||
db.setup()
|
db.getKvpFn = getKvpFn db
|
||||||
|
|
||||||
|
db.putBegFn = putBegFn db
|
||||||
|
db.putKvpFn = putKvpFn db
|
||||||
|
db.putEndFn = putEndFn db
|
||||||
|
|
||||||
|
db.closeFn = closeFn db
|
||||||
|
db.canModFn = canModFn db
|
||||||
|
db.setWrReqFn = setWrReqFn db
|
||||||
ok db
|
ok db
|
||||||
|
|
||||||
proc rocksDbKvtBackend*(
|
|
||||||
store: ColFamilyReadWrite;
|
proc rocksDbKvtTriggeredBackend*(
|
||||||
): Result[BackendRef,KvtError] =
|
adb: AristoDbRef;
|
||||||
|
opts: DbOptions;
|
||||||
|
): Result[BackendRef,(KvtError,string)] =
|
||||||
let db = RdbBackendRef(
|
let db = RdbBackendRef(
|
||||||
beKind: BackendRocksDB)
|
beKind: BackendRdbTriggered)
|
||||||
db.rdb.init(store)
|
|
||||||
db.setup()
|
# Initialise RocksDB piggy-backed on `Aristo` backend.
|
||||||
|
db.rdb.init(adb, opts).isOkOr:
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "constructor failed", error=error[0], info=error[1]
|
||||||
|
return err(error)
|
||||||
|
|
||||||
|
# Register write session event handler
|
||||||
|
adb.activateWrTrigger(db.writeEvCb()).isOkOr:
|
||||||
|
return err((RdbBeHostError,$error))
|
||||||
|
|
||||||
|
db.getKvpFn = getKvpFn db
|
||||||
|
|
||||||
|
db.putBegFn = putBegTriggeredFn db
|
||||||
|
db.putKvpFn = putKvpFn db
|
||||||
|
db.putEndFn = putEndTriggeredFn db
|
||||||
|
|
||||||
|
db.closeFn = closeTriggeredFn db
|
||||||
|
db.canModFn = canModTriggeredFn db
|
||||||
|
db.setWrReqFn = setWrReqTriggeredFn db
|
||||||
ok db
|
ok db
|
||||||
|
|
||||||
|
|
||||||
proc dup*(db: RdbBackendRef): RdbBackendRef =
|
proc dup*(db: RdbBackendRef): RdbBackendRef =
|
||||||
new result
|
new result
|
||||||
init_common.init(result[], db[])
|
init_common.init(result[], db[])
|
||||||
|
@ -15,16 +15,25 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
std/os,
|
std/os,
|
||||||
|
../../kvt_desc,
|
||||||
rocksdb
|
rocksdb
|
||||||
|
|
||||||
type
|
type
|
||||||
RdbInst* = object
|
RdbInst* = object
|
||||||
store*: ColFamilyReadWrite ## Rocks DB database handler
|
store*: KvtCfStore ## Rocks DB database handler
|
||||||
session*: WriteBatchRef ## For batched `put()`
|
session*: WriteBatchRef ## For batched `put()`
|
||||||
|
|
||||||
basePath*: string ## Database directory
|
basePath*: string ## Database directory
|
||||||
|
delayedPersist*: KvtDbRef ## Enable next proggyback write session
|
||||||
|
|
||||||
|
KvtCFs* = enum
|
||||||
|
## Column family symbols/handles and names used on the database
|
||||||
|
KvtGeneric = "KvtGen" ## Generic column family
|
||||||
|
|
||||||
|
KvtCfStore* = array[KvtCFs,ColFamilyReadWrite]
|
||||||
|
## List of column family handlers
|
||||||
|
|
||||||
const
|
const
|
||||||
KvtFamily* = "Kvt" ## RocksDB column family
|
|
||||||
BaseFolder* = "nimbus" ## Same as for Legacy DB
|
BaseFolder* = "nimbus" ## Same as for Legacy DB
|
||||||
DataFolder* = "kvt" ## Legacy DB has "data"
|
DataFolder* = "kvt" ## Legacy DB has "data"
|
||||||
|
|
||||||
@ -32,6 +41,10 @@ const
|
|||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
template logTxt*(info: static[string]): static[string] =
|
||||||
|
"RocksDB/" & info
|
||||||
|
|
||||||
|
|
||||||
func baseDir*(rdb: RdbInst): string =
|
func baseDir*(rdb: RdbInst): string =
|
||||||
rdb.basePath / BaseFolder
|
rdb.basePath / BaseFolder
|
||||||
|
|
||||||
@ -39,8 +52,8 @@ func dataDir*(rdb: RdbInst): string =
|
|||||||
rdb.baseDir / DataFolder
|
rdb.baseDir / DataFolder
|
||||||
|
|
||||||
|
|
||||||
template logTxt*(info: static[string]): static[string] =
|
template baseDb*(rdb: RdbInst): RocksDbReadWriteRef =
|
||||||
"RocksDB/" & info
|
rdb.store[KvtGeneric].db
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
@ -43,7 +43,7 @@ proc get*(
|
|||||||
let onData: DataProc = proc(data: openArray[byte]) =
|
let onData: DataProc = proc(data: openArray[byte]) =
|
||||||
res = @data
|
res = @data
|
||||||
|
|
||||||
let gotData = rdb.store.get(key, onData).valueOr:
|
let gotData = rdb.store[KvtGeneric].get(key, onData).valueOr:
|
||||||
const errSym = RdbBeDriverGetError
|
const errSym = RdbBeDriverGetError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "get", error=errSym, info=error
|
trace logTxt "get", error=errSym, info=error
|
||||||
|
@ -14,21 +14,37 @@
|
|||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/os,
|
std/[sequtils, os],
|
||||||
rocksdb,
|
rocksdb,
|
||||||
results,
|
results,
|
||||||
|
../../../aristo/aristo_init/persistent,
|
||||||
|
../../../opts,
|
||||||
../../kvt_desc,
|
../../kvt_desc,
|
||||||
|
../../kvt_desc/desc_error as kdb,
|
||||||
./rdb_desc
|
./rdb_desc
|
||||||
|
|
||||||
const
|
# ------------------------------------------------------------------------------
|
||||||
extraTraceMessages = false
|
# Private helpers
|
||||||
## Enabled additional logging noise
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
when extraTraceMessages:
|
proc getCFInitOptions(opts: DbOptions): ColFamilyOptionsRef =
|
||||||
import chronicles
|
result = defaultColFamilyOptions()
|
||||||
|
if opts.writeBufferSize > 0:
|
||||||
|
result.setWriteBufferSize(opts.writeBufferSize)
|
||||||
|
|
||||||
logScope:
|
|
||||||
topics = "kvt-backend"
|
proc getDbInitOptions(opts: DbOptions): DbOptionsRef =
|
||||||
|
result = defaultDbOptions()
|
||||||
|
result.setMaxOpenFiles(opts.maxOpenFiles)
|
||||||
|
result.setMaxBytesForLevelBase(opts.writeBufferSize)
|
||||||
|
|
||||||
|
if opts.rowCacheSize > 0:
|
||||||
|
result.setRowCache(cacheCreateLRU(opts.rowCacheSize))
|
||||||
|
|
||||||
|
if opts.blockCacheSize > 0:
|
||||||
|
let tableOpts = defaultTableOptions()
|
||||||
|
tableOpts.setBlockCache(cacheCreateLRU(opts.rowCacheSize))
|
||||||
|
result.setBlockBasedTableFactory(tableOpts)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public constructor
|
# Public constructor
|
||||||
@ -37,51 +53,64 @@ when extraTraceMessages:
|
|||||||
proc init*(
|
proc init*(
|
||||||
rdb: var RdbInst;
|
rdb: var RdbInst;
|
||||||
basePath: string;
|
basePath: string;
|
||||||
openMax: int;
|
opts: DbOptions;
|
||||||
): Result[void,(KvtError,string)] =
|
): Result[void,(KvtError,string)] =
|
||||||
## Constructor c ode inspired by `RocksStoreRef.init()` from
|
## Database backend constructor for stand-alone version
|
||||||
## kvstore_rocksdb.nim
|
##
|
||||||
|
const initFailed = "RocksDB/init() failed"
|
||||||
|
|
||||||
rdb.basePath = basePath
|
rdb.basePath = basePath
|
||||||
|
|
||||||
let
|
let
|
||||||
dataDir = rdb.dataDir
|
dataDir = rdb.dataDir
|
||||||
|
|
||||||
try:
|
try:
|
||||||
dataDir.createDir
|
dataDir.createDir
|
||||||
except OSError, IOError:
|
except OSError, IOError:
|
||||||
return err((RdbBeCantCreateDataDir, ""))
|
return err((kdb.RdbBeCantCreateDataDir, ""))
|
||||||
|
|
||||||
let
|
# Expand argument `opts` to rocksdb options
|
||||||
cfs = @[initColFamilyDescriptor KvtFamily]
|
let (cfOpts, dbOpts) = (opts.getCFInitOptions, opts.getDbInitOptions)
|
||||||
opts = defaultDbOptions()
|
|
||||||
opts.setMaxOpenFiles(openMax)
|
|
||||||
|
|
||||||
# Reserve a family corner for `Kvt` on the database
|
# Column familiy names to allocate when opening the database.
|
||||||
let baseDb = openRocksDb(dataDir, opts, columnFamilies=cfs).valueOr:
|
let cfs = KvtCFs.mapIt(($it).initColFamilyDescriptor cfOpts)
|
||||||
let errSym = RdbBeDriverInitError
|
|
||||||
when extraTraceMessages:
|
|
||||||
debug logTxt "init failed", dataDir, openMax, error=errSym, info=error
|
|
||||||
return err((errSym, error))
|
|
||||||
|
|
||||||
# Initialise `Kvt` family
|
# Open database for the extended family :)
|
||||||
rdb.store = baseDb.withColFamily(KvtFamily).valueOr:
|
let baseDb = openRocksDb(dataDir, dbOpts, columnFamilies=cfs).valueOr:
|
||||||
let errSym = RdbBeDriverInitError
|
raiseAssert initFailed & " cannot create base descriptor: " & error
|
||||||
when extraTraceMessages:
|
|
||||||
debug logTxt "init failed", dataDir, openMax, error=errSym, info=error
|
# Initialise column handlers (this stores implicitely `baseDb`)
|
||||||
return err((errSym, error))
|
for col in KvtCFs:
|
||||||
|
rdb.store[col] = baseDb.withColFamily($col).valueOr:
|
||||||
|
raiseAssert initFailed & " cannot initialise " &
|
||||||
|
$col & " descriptor: " & error
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
|
||||||
proc init*(
|
proc init*(
|
||||||
rdb: var RdbInst;
|
rdb: var RdbInst;
|
||||||
store: ColFamilyReadWrite;
|
adb: AristoDbRef;
|
||||||
) =
|
opts: DbOptions;
|
||||||
## Piggyback on other database
|
): Result[void,(KvtError,string)] =
|
||||||
rdb.store = store # that's it
|
## Initalise column handlers piggy-backing on the `Aristo` backend.
|
||||||
|
##
|
||||||
|
let
|
||||||
|
cfOpts = opts.getCFInitOptions()
|
||||||
|
iCfs = KvtCFs.toSeq.mapIt(initColFamilyDescriptor($it, cfOpts))
|
||||||
|
oCfs = adb.reinit(iCfs).valueOr:
|
||||||
|
return err((RdbBeHostError,$error))
|
||||||
|
|
||||||
|
# Collect column family descriptors (this stores implicitely `baseDb`)
|
||||||
|
for n in KvtCFs:
|
||||||
|
assert oCfs[n.ord].name != "" # debugging only
|
||||||
|
rdb.store[n] = oCfs[n.ord]
|
||||||
|
|
||||||
|
ok()
|
||||||
|
|
||||||
|
|
||||||
proc destroy*(rdb: var RdbInst; flush: bool) =
|
proc destroy*(rdb: var RdbInst; flush: bool) =
|
||||||
## Destructor (no need to do anything if piggybacked)
|
## Destructor (no need to do anything if piggybacked)
|
||||||
if 0 < rdb.basePath.len:
|
if 0 < rdb.basePath.len:
|
||||||
rdb.store.db.close()
|
rdb.baseDb.close()
|
||||||
|
|
||||||
if flush:
|
if flush:
|
||||||
try:
|
try:
|
||||||
|
@ -50,7 +50,7 @@ when extraTraceMessages:
|
|||||||
|
|
||||||
proc begin*(rdb: var RdbInst) =
|
proc begin*(rdb: var RdbInst) =
|
||||||
if rdb.session.isNil:
|
if rdb.session.isNil:
|
||||||
rdb.session = rdb.store.openWriteBatch()
|
rdb.session = rdb.baseDb.openWriteBatch()
|
||||||
|
|
||||||
proc rollback*(rdb: var RdbInst) =
|
proc rollback*(rdb: var RdbInst) =
|
||||||
if not rdb.session.isClosed():
|
if not rdb.session.isClosed():
|
||||||
@ -59,7 +59,7 @@ proc rollback*(rdb: var RdbInst) =
|
|||||||
proc commit*(rdb: var RdbInst): Result[void,(KvtError,string)] =
|
proc commit*(rdb: var RdbInst): Result[void,(KvtError,string)] =
|
||||||
if not rdb.session.isClosed():
|
if not rdb.session.isClosed():
|
||||||
defer: rdb.disposeSession()
|
defer: rdb.disposeSession()
|
||||||
rdb.store.write(rdb.session).isOkOr:
|
rdb.baseDb.write(rdb.session).isOkOr:
|
||||||
const errSym = RdbBeDriverWriteError
|
const errSym = RdbBeDriverWriteError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "commit", error=errSym, info=error
|
trace logTxt "commit", error=errSym, info=error
|
||||||
@ -70,16 +70,15 @@ proc put*(
|
|||||||
rdb: RdbInst;
|
rdb: RdbInst;
|
||||||
data: openArray[(Blob,Blob)];
|
data: openArray[(Blob,Blob)];
|
||||||
): Result[void,(Blob,KvtError,string)] =
|
): Result[void,(Blob,KvtError,string)] =
|
||||||
let dsc = rdb.session
|
|
||||||
for (key,val) in data:
|
for (key,val) in data:
|
||||||
if val.len == 0:
|
if val.len == 0:
|
||||||
dsc.delete(key, rdb.store.name).isOkOr:
|
rdb.session.delete(key, $KvtGeneric).isOkOr:
|
||||||
const errSym = RdbBeDriverDelError
|
const errSym = RdbBeDriverDelError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "del", key, error=errSym, info=error
|
trace logTxt "del", key, error=errSym, info=error
|
||||||
return err((key,errSym,error))
|
return err((key,errSym,error))
|
||||||
else:
|
else:
|
||||||
dsc.put(key, val, rdb.store.name).isOkOr:
|
rdb.session.put(key, val, $KvtGeneric).isOkOr:
|
||||||
const errSym = RdbBeDriverPutError
|
const errSym = RdbBeDriverPutError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "put", key, error=errSym, info=error
|
trace logTxt "put", key, error=errSym, info=error
|
||||||
|
@ -38,7 +38,7 @@ iterator walk*(rdb: RdbInst): tuple[key: Blob, data: Blob] =
|
|||||||
##
|
##
|
||||||
## Non-decodable entries are stepped over and ignored.
|
## Non-decodable entries are stepped over and ignored.
|
||||||
block walkBody:
|
block walkBody:
|
||||||
let rit = rdb.store.openIterator().valueOr:
|
let rit = rdb.store[KvtGeneric].openIterator().valueOr:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "walk", pfx="all", error
|
trace logTxt "walk", pfx="all", error
|
||||||
break walkBody
|
break walkBody
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
import
|
import
|
||||||
results,
|
results,
|
||||||
./kvt_tx/[tx_fork, tx_frame, tx_stow],
|
./kvt_tx/[tx_fork, tx_frame, tx_stow],
|
||||||
|
./kvt_init/memory_only,
|
||||||
./kvt_desc
|
./kvt_desc
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
@ -177,6 +178,12 @@ proc persist*(
|
|||||||
## and the staged data area is cleared. Wile performing this last step,
|
## and the staged data area is cleared. Wile performing this last step,
|
||||||
## the recovery journal is updated (if available.)
|
## the recovery journal is updated (if available.)
|
||||||
##
|
##
|
||||||
|
# Register for saving if piggybacked on remote database
|
||||||
|
if db.backend.kind == BackendRdbTriggered:
|
||||||
|
? db.txStowOk(persistent=true)
|
||||||
|
? db.backend.setWrReqFn db
|
||||||
|
return err(TxPersistDelayed)
|
||||||
|
|
||||||
db.txStow(persistent=true)
|
db.txStow(persistent=true)
|
||||||
|
|
||||||
proc stow*(
|
proc stow*(
|
||||||
|
@ -16,12 +16,27 @@
|
|||||||
import
|
import
|
||||||
std/tables,
|
std/tables,
|
||||||
results,
|
results,
|
||||||
".."/[kvt_desc, kvt_filter]
|
".."/[kvt_desc, kvt_delta]
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc txStowOk*(
|
||||||
|
db: KvtDbRef; # Database
|
||||||
|
persistent: bool; # Stage only unless `true`
|
||||||
|
): Result[void,KvtError] =
|
||||||
|
## Verify that `txStow()` can go ahead
|
||||||
|
if not db.txRef.isNil:
|
||||||
|
return err(TxPendingTx)
|
||||||
|
if 0 < db.stack.len:
|
||||||
|
return err(TxStackGarbled)
|
||||||
|
|
||||||
|
if persistent and not db.deltaUpdateOk():
|
||||||
|
return err(TxBackendNotWritable)
|
||||||
|
|
||||||
|
ok()
|
||||||
|
|
||||||
proc txStow*(
|
proc txStow*(
|
||||||
db: KvtDbRef; # Database
|
db: KvtDbRef; # Database
|
||||||
persistent: bool; # Stage only unless `true`
|
persistent: bool; # Stage only unless `true`
|
||||||
@ -32,21 +47,15 @@ proc txStow*(
|
|||||||
## If there is no backend the function returns immediately with an error.
|
## If there is no backend the function returns immediately with an error.
|
||||||
## The same happens if there is a pending transaction.
|
## The same happens if there is a pending transaction.
|
||||||
##
|
##
|
||||||
if not db.txRef.isNil:
|
? db.txStowOk persistent
|
||||||
return err(TxPendingTx)
|
|
||||||
if 0 < db.stack.len:
|
|
||||||
return err(TxStackGarbled)
|
|
||||||
|
|
||||||
if persistent and not db.filterUpdateOk():
|
|
||||||
return err(TxBackendNotWritable)
|
|
||||||
|
|
||||||
if 0 < db.top.delta.sTab.len:
|
if 0 < db.top.delta.sTab.len:
|
||||||
db.filterMerge db.top.delta
|
db.deltaMerge db.top.delta
|
||||||
db.top.delta = LayerDeltaRef()
|
db.top.delta = LayerDeltaRef()
|
||||||
|
|
||||||
if persistent:
|
if persistent:
|
||||||
# Move `roFilter` data into persistent tables
|
# Move `balancer` data into persistent tables
|
||||||
? db.filterUpdate()
|
? db.deltaUpdate()
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
@ -41,8 +41,8 @@ proc getBe*(
|
|||||||
key: openArray[byte]; # Key of database record
|
key: openArray[byte]; # Key of database record
|
||||||
): Result[Blob,KvtError] =
|
): Result[Blob,KvtError] =
|
||||||
## Get the vertex from the (filtered) backened if available.
|
## Get the vertex from the (filtered) backened if available.
|
||||||
if not db.roFilter.isNil:
|
if not db.balancer.isNil:
|
||||||
db.roFilter.sTab.withValue(@key, w):
|
db.balancer.sTab.withValue(@key, w):
|
||||||
if w[].len == 0:
|
if w[].len == 0:
|
||||||
return err(GetNotFound)
|
return err(GetNotFound)
|
||||||
return ok(w[])
|
return ok(w[])
|
||||||
|
@ -20,6 +20,7 @@ iterator undumpBlocksEra1*(
|
|||||||
dir: string,
|
dir: string,
|
||||||
least = low(uint64), # First block to extract
|
least = low(uint64), # First block to extract
|
||||||
stopAfter = high(uint64), # Last block to extract
|
stopAfter = high(uint64), # Last block to extract
|
||||||
|
doAssertOk = false;
|
||||||
): seq[EthBlock] =
|
): seq[EthBlock] =
|
||||||
let db = Era1DbRef.init(dir, "mainnet").expect("Era files present")
|
let db = Era1DbRef.init(dir, "mainnet").expect("Era files present")
|
||||||
defer:
|
defer:
|
||||||
@ -32,6 +33,7 @@ iterator undumpBlocksEra1*(
|
|||||||
|
|
||||||
for i in 0 ..< stopAfter:
|
for i in 0 ..< stopAfter:
|
||||||
var bck = db.getEthBlock(least + i).valueOr:
|
var bck = db.getEthBlock(least + i).valueOr:
|
||||||
|
if doAssertOk:
|
||||||
doAssert i > 0, "expected at least one block"
|
doAssert i > 0, "expected at least one block"
|
||||||
break
|
break
|
||||||
|
|
||||||
|
@ -288,7 +288,7 @@ proc testDistributedAccess*(
|
|||||||
xCheck db2.balancer != db3.balancer
|
xCheck db2.balancer != db3.balancer
|
||||||
|
|
||||||
# Clause (11) from `aristo/README.md` example
|
# Clause (11) from `aristo/README.md` example
|
||||||
db2.reCentre()
|
discard db2.reCentre()
|
||||||
block:
|
block:
|
||||||
let rc = db2.persist()
|
let rc = db2.persist()
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
@ -321,7 +321,7 @@ proc testDistributedAccess*(
|
|||||||
dy.cleanUp()
|
dy.cleanUp()
|
||||||
|
|
||||||
# Build clause (12) from `aristo/README.md` example
|
# Build clause (12) from `aristo/README.md` example
|
||||||
db2.reCentre()
|
discard db2.reCentre()
|
||||||
block:
|
block:
|
||||||
let rc = db2.persist()
|
let rc = db2.persist()
|
||||||
xCheckRc rc.error == 0
|
xCheckRc rc.error == 0
|
||||||
|
@ -159,8 +159,9 @@ proc initRunnerDB(
|
|||||||
case dbType:
|
case dbType:
|
||||||
of AristoDbMemory: AristoDbMemory.newCoreDbRef()
|
of AristoDbMemory: AristoDbMemory.newCoreDbRef()
|
||||||
of AristoDbRocks: AristoDbRocks.newCoreDbRef(path, DbOptions.init())
|
of AristoDbRocks: AristoDbRocks.newCoreDbRef(path, DbOptions.init())
|
||||||
|
of AristoDbDualRocks: AristoDbDualRocks.newCoreDbRef(path, DbOptions.init())
|
||||||
of AristoDbVoid: AristoDbVoid.newCoreDbRef()
|
of AristoDbVoid: AristoDbVoid.newCoreDbRef()
|
||||||
else: raiseAssert "Oops"
|
of Ooops: raiseAssert "Ooops"
|
||||||
|
|
||||||
when false: # or true:
|
when false: # or true:
|
||||||
setDebugLevel()
|
setDebugLevel()
|
||||||
@ -336,7 +337,7 @@ when isMainModule:
|
|||||||
|
|
||||||
setErrorLevel()
|
setErrorLevel()
|
||||||
|
|
||||||
when true: # and false:
|
when true and false:
|
||||||
false.coreDbMain()
|
false.coreDbMain()
|
||||||
|
|
||||||
# This one uses the readily available dump: `bulkTest0` and some huge replay
|
# This one uses the readily available dump: `bulkTest0` and some huge replay
|
||||||
@ -353,6 +354,7 @@ when isMainModule:
|
|||||||
for n,capture in sampleList:
|
for n,capture in sampleList:
|
||||||
noisy.profileSection("@sample #" & $n, state):
|
noisy.profileSection("@sample #" & $n, state):
|
||||||
noisy.chainSyncRunner(
|
noisy.chainSyncRunner(
|
||||||
|
#dbType = AristoDbDualRocks,
|
||||||
capture = capture,
|
capture = capture,
|
||||||
pruneHistory = true,
|
pruneHistory = true,
|
||||||
#profilingOk = true,
|
#profilingOk = true,
|
||||||
|
2
vendor/nim-rocksdb
vendored
2
vendor/nim-rocksdb
vendored
@ -1 +1 @@
|
|||||||
Subproject commit a84cf5b8960756acb9027a80ba2b1e7cd059e206
|
Subproject commit c5bbf831145d7dd4d60420d69ce9eb601ccd5be2
|
Loading…
x
Reference in New Issue
Block a user