Aristo use rocksdb cf instead of key pfx (#2332)
* Use RocksDb column families instead of a prefixed single column why: Better performance * Use structural objects `VertexRef` and `HashKey` in LRU cache for RocksDb why: Avoids repeated de/serialisation
This commit is contained in:
parent
c72d6aa5d6
commit
a347291413
|
@ -318,8 +318,8 @@ data, for RLP encoded or for unstructured data as defined below.
|
||||||
where each bitmask(2)-word array entry defines the length of
|
where each bitmask(2)-word array entry defines the length of
|
||||||
the preceeding data fields:
|
the preceeding data fields:
|
||||||
00 -- field is missing
|
00 -- field is missing
|
||||||
01 -- field lengthh is 8 bytes
|
01 -- field length is 8 bytes
|
||||||
10 -- field lengthh is 32 bytes
|
10 -- field length is 32 bytes
|
||||||
|
|
||||||
Apparently, entries 0 and and 2 of the *4 x bitmask(2)* word array cannot have
|
Apparently, entries 0 and and 2 of the *4 x bitmask(2)* word array cannot have
|
||||||
the two bit value *10* as they refer to the nonce and the storage ID data
|
the two bit value *10* as they refer to the nonce and the storage ID data
|
||||||
|
|
|
@ -130,10 +130,6 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||||
# been deleted.
|
# been deleted.
|
||||||
for vid in max(topVidBe + 1, VertexID(LEAST_FREE_VID)) .. vidTuvBe:
|
for vid in max(topVidBe + 1, VertexID(LEAST_FREE_VID)) .. vidTuvBe:
|
||||||
if db.getVtxBE(vid).isOk or db.getKeyBE(vid).isOk:
|
if db.getVtxBE(vid).isOk or db.getKeyBE(vid).isOk:
|
||||||
echo ">>>",
|
|
||||||
" topVidBe=", topVidBe,
|
|
||||||
" vidTuvBe=", vidTuvBe,
|
|
||||||
" vid=", vid
|
|
||||||
return err((vid,CheckBeGarbledVTop))
|
return err((vid,CheckBeGarbledVTop))
|
||||||
|
|
||||||
# Check layer cache against backend
|
# Check layer cache against backend
|
||||||
|
@ -195,10 +191,6 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
|
||||||
for vid in max(db.vTop + 1, VertexID(LEAST_FREE_VID)) .. topVidCache:
|
for vid in max(db.vTop + 1, VertexID(LEAST_FREE_VID)) .. topVidCache:
|
||||||
if db.layersGetVtxOrVoid(vid).isValid or
|
if db.layersGetVtxOrVoid(vid).isValid or
|
||||||
db.layersGetKeyOrVoid(vid).isValid:
|
db.layersGetKeyOrVoid(vid).isValid:
|
||||||
echo ">>>",
|
|
||||||
" topVidCache=", topVidCache,
|
|
||||||
" vTop=", db.vTop,
|
|
||||||
" vid=", vid
|
|
||||||
return err((db.vTop,CheckBeCacheGarbledVTop))
|
return err((db.vTop,CheckBeCacheGarbledVTop))
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
|
@ -241,11 +241,16 @@ type
|
||||||
# RocksDB backend
|
# RocksDB backend
|
||||||
RdbBeCantCreateDataDir
|
RdbBeCantCreateDataDir
|
||||||
RdbBeCantCreateTmpDir
|
RdbBeCantCreateTmpDir
|
||||||
RdbBeDriverDelError
|
RdbBeDriverDelAdmError
|
||||||
RdbBeDriverGetError
|
RdbBeDriverDelKeyError
|
||||||
|
RdbBeDriverDelVtxError
|
||||||
|
RdbBeDriverGetAdmError
|
||||||
|
RdbBeDriverGetKeyError
|
||||||
|
RdbBeDriverGetVtxError
|
||||||
RdbBeDriverGuestError
|
RdbBeDriverGuestError
|
||||||
RdbBeDriverInitError
|
RdbBeDriverPutAdmError
|
||||||
RdbBeDriverPutError
|
RdbBeDriverPutVtxError
|
||||||
|
RdbBeDriverPutKeyError
|
||||||
RdbBeDriverWriteError
|
RdbBeDriverWriteError
|
||||||
RdbGuestInstanceUnsupported
|
RdbGuestInstanceUnsupported
|
||||||
RdbHashKeyExpected
|
RdbHashKeyExpected
|
||||||
|
|
|
@ -79,14 +79,13 @@ proc getVtxFn(db: RdbBackendRef): GetVtxFn =
|
||||||
proc(vid: VertexID): Result[VertexRef,AristoError] =
|
proc(vid: VertexID): Result[VertexRef,AristoError] =
|
||||||
|
|
||||||
# Fetch serialised data record
|
# Fetch serialised data record
|
||||||
let data = db.rdb.getVtx(vid.uint64).valueOr:
|
let vtx = db.rdb.getVtx(vid).valueOr:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "getVtxFn() failed", vid, error=error[0], info=error[1]
|
trace logTxt "getVtxFn() failed", vid, error=error[0], info=error[1]
|
||||||
return err(error[0])
|
return err(error[0])
|
||||||
|
|
||||||
# Decode data record
|
if vtx.isValid:
|
||||||
if 0 < data.len:
|
return ok(vtx)
|
||||||
return data.deblobify VertexRef
|
|
||||||
|
|
||||||
err(GetVtxNotFound)
|
err(GetVtxNotFound)
|
||||||
|
|
||||||
|
@ -95,16 +94,13 @@ proc getKeyFn(db: RdbBackendRef): GetKeyFn =
|
||||||
proc(vid: VertexID): Result[HashKey,AristoError] =
|
proc(vid: VertexID): Result[HashKey,AristoError] =
|
||||||
|
|
||||||
# Fetch serialised data record
|
# Fetch serialised data record
|
||||||
let data = db.rdb.getKey(vid.uint64).valueOr:
|
let key = db.rdb.getKey(vid).valueOr:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "getKeyFn: failed", vid, error=error[0], info=error[1]
|
trace logTxt "getKeyFn: failed", vid, error=error[0], info=error[1]
|
||||||
return err(error[0])
|
return err(error[0])
|
||||||
|
|
||||||
# Decode data record
|
if key.isValid:
|
||||||
if 0 < data.len:
|
return ok(key)
|
||||||
let lid = HashKey.fromBytes(data).valueOr:
|
|
||||||
return err(RdbHashKeyExpected)
|
|
||||||
return ok lid
|
|
||||||
|
|
||||||
err(GetKeyNotFound)
|
err(GetKeyNotFound)
|
||||||
|
|
||||||
|
@ -113,7 +109,7 @@ proc getTuvFn(db: RdbBackendRef): GetTuvFn =
|
||||||
proc(): Result[VertexID,AristoError]=
|
proc(): Result[VertexID,AristoError]=
|
||||||
|
|
||||||
# Fetch serialised data record.
|
# Fetch serialised data record.
|
||||||
let data = db.rdb.getByPfx(AdmPfx, AdmTabIdTuv.uint64).valueOr:
|
let data = db.rdb.getAdm(AdmTabIdTuv).valueOr:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "getTuvFn: failed", error=error[0], info=error[1]
|
trace logTxt "getTuvFn: failed", error=error[0], info=error[1]
|
||||||
return err(error[0])
|
return err(error[0])
|
||||||
|
@ -123,14 +119,14 @@ proc getTuvFn(db: RdbBackendRef): GetTuvFn =
|
||||||
return ok VertexID(0)
|
return ok VertexID(0)
|
||||||
|
|
||||||
# Decode data record
|
# Decode data record
|
||||||
data.deblobify VertexID
|
result = data.deblobify VertexID
|
||||||
|
|
||||||
proc getLstFn(db: RdbBackendRef): GetLstFn =
|
proc getLstFn(db: RdbBackendRef): GetLstFn =
|
||||||
result =
|
result =
|
||||||
proc(): Result[SavedState,AristoError]=
|
proc(): Result[SavedState,AristoError]=
|
||||||
|
|
||||||
# Fetch serialised data record.
|
# Fetch serialised data record.
|
||||||
let data = db.rdb.getByPfx(AdmPfx, AdmTabIdLst.uint64).valueOr:
|
let data = db.rdb.getAdm(AdmTabIdLst).valueOr:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "getLstFn: failed", error=error[0], info=error[1]
|
trace logTxt "getLstFn: failed", error=error[0], info=error[1]
|
||||||
return err(error[0])
|
return err(error[0])
|
||||||
|
@ -151,27 +147,10 @@ proc putVtxFn(db: RdbBackendRef): PutVtxFn =
|
||||||
proc(hdl: PutHdlRef; vrps: openArray[(VertexID,VertexRef)]) =
|
proc(hdl: PutHdlRef; vrps: openArray[(VertexID,VertexRef)]) =
|
||||||
let hdl = hdl.getSession db
|
let hdl = hdl.getSession db
|
||||||
if hdl.error.isNil:
|
if hdl.error.isNil:
|
||||||
|
db.rdb.putVtx(vrps).isOkOr:
|
||||||
# Collect batch session arguments
|
|
||||||
var batch: seq[(uint64,Blob)]
|
|
||||||
for (vid,vtx) in vrps:
|
|
||||||
if vtx.isValid:
|
|
||||||
let rc = vtx.blobify()
|
|
||||||
if rc.isErr:
|
|
||||||
hdl.error = TypedPutHdlErrRef(
|
|
||||||
pfx: VtxPfx,
|
|
||||||
vid: vid,
|
|
||||||
code: rc.error)
|
|
||||||
return
|
|
||||||
batch.add (vid.uint64, rc.value)
|
|
||||||
else:
|
|
||||||
batch.add (vid.uint64, EmptyBlob)
|
|
||||||
|
|
||||||
# Stash batch session data via LRU cache
|
|
||||||
db.rdb.putVtx(batch).isOkOr:
|
|
||||||
hdl.error = TypedPutHdlErrRef(
|
hdl.error = TypedPutHdlErrRef(
|
||||||
pfx: VtxPfx,
|
pfx: VtxPfx,
|
||||||
vid: VertexID(error[0]),
|
vid: error[0],
|
||||||
code: error[1],
|
code: error[1],
|
||||||
info: error[2])
|
info: error[2])
|
||||||
|
|
||||||
|
@ -180,20 +159,10 @@ proc putKeyFn(db: RdbBackendRef): PutKeyFn =
|
||||||
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,HashKey)]) =
|
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,HashKey)]) =
|
||||||
let hdl = hdl.getSession db
|
let hdl = hdl.getSession db
|
||||||
if hdl.error.isNil:
|
if hdl.error.isNil:
|
||||||
|
db.rdb.putKey(vkps).isOkOr:
|
||||||
# Collect batch session arguments
|
|
||||||
var batch: seq[(uint64,Blob)]
|
|
||||||
for (vid,key) in vkps:
|
|
||||||
if key.isValid:
|
|
||||||
batch.add (vid.uint64, @(key.data))
|
|
||||||
else:
|
|
||||||
batch.add (vid.uint64, EmptyBlob)
|
|
||||||
|
|
||||||
# Stash batch session data via LRU cache
|
|
||||||
db.rdb.putKey(batch).isOkOr:
|
|
||||||
hdl.error = TypedPutHdlErrRef(
|
hdl.error = TypedPutHdlErrRef(
|
||||||
pfx: KeyPfx,
|
pfx: KeyPfx,
|
||||||
vid: VertexID(error[0]),
|
vid: error[0],
|
||||||
code: error[1],
|
code: error[1],
|
||||||
info: error[2])
|
info: error[2])
|
||||||
|
|
||||||
|
@ -203,12 +172,14 @@ proc putTuvFn(db: RdbBackendRef): PutTuvFn =
|
||||||
let hdl = hdl.getSession db
|
let hdl = hdl.getSession db
|
||||||
if hdl.error.isNil:
|
if hdl.error.isNil:
|
||||||
if vs.isValid:
|
if vs.isValid:
|
||||||
db.rdb.putByPfx(AdmPfx, @[(AdmTabIdTuv.uint64, vs.blobify)]).isOkOr:
|
db.rdb.putAdm(AdmTabIdTuv, vs.blobify).isOkOr:
|
||||||
hdl.error = TypedPutHdlErrRef(
|
hdl.error = TypedPutHdlErrRef(
|
||||||
pfx: AdmPfx,
|
pfx: AdmPfx,
|
||||||
aid: AdmTabIdTuv,
|
aid: AdmTabIdTuv,
|
||||||
code: error[1],
|
code: error[1],
|
||||||
info: error[2])
|
info: error[2])
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
proc putLstFn(db: RdbBackendRef): PutLstFn =
|
proc putLstFn(db: RdbBackendRef): PutLstFn =
|
||||||
result =
|
result =
|
||||||
|
@ -221,7 +192,7 @@ proc putLstFn(db: RdbBackendRef): PutLstFn =
|
||||||
aid: AdmTabIdLst,
|
aid: AdmTabIdLst,
|
||||||
code: error)
|
code: error)
|
||||||
return
|
return
|
||||||
db.rdb.putByPfx(AdmPfx, @[(AdmTabIdLst.uint64, data)]).isOkOr:
|
db.rdb.putAdm(AdmTabIdLst, data).isOkOr:
|
||||||
hdl.error = TypedPutHdlErrRef(
|
hdl.error = TypedPutHdlErrRef(
|
||||||
pfx: AdmPfx,
|
pfx: AdmPfx,
|
||||||
aid: AdmTabIdLst,
|
aid: AdmTabIdLst,
|
||||||
|
@ -243,6 +214,7 @@ proc putEndFn(db: RdbBackendRef): PutEndFn =
|
||||||
pfx=AdmPfx, aid=hdl.error.aid.uint64, error=hdl.error.code
|
pfx=AdmPfx, aid=hdl.error.aid.uint64, error=hdl.error.code
|
||||||
of Oops: trace logTxt "putEndFn: oops",
|
of Oops: trace logTxt "putEndFn: oops",
|
||||||
error=hdl.error.code
|
error=hdl.error.code
|
||||||
|
db.rdb.rollback()
|
||||||
return err(hdl.error.code)
|
return err(hdl.error.code)
|
||||||
|
|
||||||
# Commit session
|
# Commit session
|
||||||
|
@ -317,28 +289,32 @@ iterator walk*(
|
||||||
): tuple[pfx: StorageType, xid: uint64, data: Blob] =
|
): tuple[pfx: StorageType, xid: uint64, data: Blob] =
|
||||||
## Walk over all key-value pairs of the database.
|
## Walk over all key-value pairs of the database.
|
||||||
##
|
##
|
||||||
## Non-decodable entries are stepped over while the counter `n` of the
|
## Non-decodable entries are ignored
|
||||||
## yield record is still incremented.
|
##
|
||||||
for w in be.rdb.walk:
|
for (xid, data) in be.rdb.walkAdm:
|
||||||
yield w
|
yield (AdmPfx, xid, data)
|
||||||
|
for (vid, data) in be.rdb.walkVtx:
|
||||||
|
yield (VtxPfx, vid, data)
|
||||||
|
for (vid, data) in be.rdb.walkKey:
|
||||||
|
yield (KeyPfx, vid, data)
|
||||||
|
|
||||||
iterator walkVtx*(
|
iterator walkVtx*(
|
||||||
be: RdbBackendRef;
|
be: RdbBackendRef;
|
||||||
): tuple[vid: VertexID, vtx: VertexRef] =
|
): tuple[vid: VertexID, vtx: VertexRef] =
|
||||||
## Variant of `walk()` iteration over the vertex sub-table.
|
## Variant of `walk()` iteration over the vertex sub-table.
|
||||||
for (xid, data) in be.rdb.walk VtxPfx:
|
for (vid, data) in be.rdb.walkVtx:
|
||||||
let rc = data.deblobify VertexRef
|
let rc = data.deblobify VertexRef
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
yield (VertexID(xid), rc.value)
|
yield (VertexID(vid), rc.value)
|
||||||
|
|
||||||
iterator walkKey*(
|
iterator walkKey*(
|
||||||
be: RdbBackendRef;
|
be: RdbBackendRef;
|
||||||
): tuple[vid: VertexID, key: HashKey] =
|
): tuple[vid: VertexID, key: HashKey] =
|
||||||
## Variant of `walk()` iteration over the Markle hash sub-table.
|
## Variant of `walk()` iteration over the Markle hash sub-table.
|
||||||
for (xid, data) in be.rdb.walk KeyPfx:
|
for (vid, data) in be.rdb.walkKey:
|
||||||
let lid = HashKey.fromBytes(data).valueOr:
|
let lid = HashKey.fromBytes(data).valueOr:
|
||||||
continue
|
continue
|
||||||
yield (VertexID(xid), lid)
|
yield (VertexID(vid), lid)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -23,10 +23,12 @@ import
|
||||||
|
|
||||||
type
|
type
|
||||||
RdbInst* = object
|
RdbInst* = object
|
||||||
store*: ColFamilyReadWrite ## Rocks DB database handler
|
admCol*: ColFamilyReadWrite ## Admin column family handler
|
||||||
|
vtxCol*: ColFamilyReadWrite ## Vertex column family handler
|
||||||
|
keyCol*: ColFamilyReadWrite ## Hash key column family handler
|
||||||
session*: WriteBatchRef ## For batched `put()`
|
session*: WriteBatchRef ## For batched `put()`
|
||||||
rdKeyLru*: KeyedQueue[RdbKey,Blob] ## Read cache
|
rdKeyLru*: KeyedQueue[VertexID,HashKey] ## Read cache
|
||||||
rdVtxLru*: KeyedQueue[RdbKey,Blob] ## Read cache
|
rdVtxLru*: KeyedQueue[VertexID,VertexRef] ## Read cache
|
||||||
basePath*: string ## Database directory
|
basePath*: string ## Database directory
|
||||||
noFq*: bool ## No filter queues available
|
noFq*: bool ## No filter queues available
|
||||||
|
|
||||||
|
@ -35,15 +37,21 @@ type
|
||||||
|
|
||||||
# Alien interface
|
# Alien interface
|
||||||
RdbGuest* = enum
|
RdbGuest* = enum
|
||||||
|
## The guest CF was worth a try, but there are better solutions and this
|
||||||
|
## item will be removed in future.
|
||||||
GuestFamily0 = "Guest0" ## Guest family (e.g. for Kvt)
|
GuestFamily0 = "Guest0" ## Guest family (e.g. for Kvt)
|
||||||
GuestFamily1 = "Guest1" ## Ditto
|
GuestFamily1 = "Guest1" ## Ditto
|
||||||
GuestFamily2 = "Guest2" ## Ditto
|
GuestFamily2 = "Guest2" ## Ditto
|
||||||
|
|
||||||
RdbGuestDbRef* = ref object of GuestDbRef
|
RdbGuestDbRef* = ref object of GuestDbRef
|
||||||
|
## The guest CF was worth a try, but there are better solutions and this
|
||||||
|
## item will be removed in future.
|
||||||
guestDb*: ColFamilyReadWrite ## Pigiback feature references
|
guestDb*: ColFamilyReadWrite ## Pigiback feature references
|
||||||
|
|
||||||
const
|
const
|
||||||
AristoFamily* = "Aristo" ## RocksDB column family
|
AdmCF* = "AdmAri" ## Admin column family name
|
||||||
|
VtxCF* = "VtxAri" ## Vertex column family name
|
||||||
|
KeyCF* = "KeyAri" ## Hash key column family name
|
||||||
BaseFolder* = "nimbus" ## Same as for Legacy DB
|
BaseFolder* = "nimbus" ## Same as for Legacy DB
|
||||||
DataFolder* = "aristo" ## Legacy DB has "data"
|
DataFolder* = "aristo" ## Legacy DB has "data"
|
||||||
RdKeyLruMaxSize* = 4096 ## Max size of read cache for keys
|
RdKeyLruMaxSize* = 4096 ## Max size of read cache for keys
|
||||||
|
@ -56,6 +64,9 @@ const
|
||||||
template logTxt*(info: static[string]): static[string] =
|
template logTxt*(info: static[string]): static[string] =
|
||||||
"RocksDB/" & info
|
"RocksDB/" & info
|
||||||
|
|
||||||
|
template baseDb*(rdb: RdbInst): RocksDbReadWriteRef =
|
||||||
|
rdb.admCol.db
|
||||||
|
|
||||||
|
|
||||||
func baseDir*(rdb: RdbInst): string =
|
func baseDir*(rdb: RdbInst): string =
|
||||||
rdb.basePath / BaseFolder
|
rdb.basePath / BaseFolder
|
||||||
|
@ -63,10 +74,12 @@ func baseDir*(rdb: RdbInst): string =
|
||||||
func dataDir*(rdb: RdbInst): string =
|
func dataDir*(rdb: RdbInst): string =
|
||||||
rdb.baseDir / DataFolder
|
rdb.baseDir / DataFolder
|
||||||
|
|
||||||
func toRdbKey*(id: uint64; pfx: StorageType): RdbKey =
|
|
||||||
let idKey = id.toBytesBE
|
template toOpenArray*(xid: AdminTabID): openArray[byte] =
|
||||||
result[0] = pfx.ord.byte
|
xid.uint64.toBytesBE.toOpenArray(0,7)
|
||||||
copyMem(addr result[1], unsafeAddr idKey, sizeof idKey)
|
|
||||||
|
template toOpenArray*(vid: VertexID): openArray[byte] =
|
||||||
|
vid.uint64.toBytesBE.toOpenArray(0,7)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -18,7 +18,7 @@ import
|
||||||
rocksdb,
|
rocksdb,
|
||||||
results,
|
results,
|
||||||
stew/keyed_queue,
|
stew/keyed_queue,
|
||||||
../../aristo_desc,
|
../../[aristo_blobify, aristo_desc],
|
||||||
../init_common,
|
../init_common,
|
||||||
./rdb_desc
|
./rdb_desc
|
||||||
|
|
||||||
|
@ -33,15 +33,19 @@ when extraTraceMessages:
|
||||||
logScope:
|
logScope:
|
||||||
topics = "aristo-rocksdb"
|
topics = "aristo-rocksdb"
|
||||||
|
|
||||||
proc getImpl(rdb: RdbInst; key: RdbKey): Result[Blob,(AristoError,string)] =
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc getAdm*(rdb: RdbInst; xid: AdminTabID): Result[Blob,(AristoError,string)] =
|
||||||
var res: Blob
|
var res: Blob
|
||||||
let onData = proc(data: openArray[byte]) =
|
let onData = proc(data: openArray[byte]) =
|
||||||
res = @data
|
res = @data
|
||||||
|
|
||||||
let gotData = rdb.store.get(key, onData).valueOr:
|
let gotData = rdb.admCol.get(xid.toOpenArray, onData).valueOr:
|
||||||
const errSym = RdbBeDriverGetError
|
const errSym = RdbBeDriverGetAdmError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "get", pfx=key[0], error=errSym, info=error
|
trace logTxt "getAdm", xid, error=errSym, info=error
|
||||||
return err((errSym,error))
|
return err((errSym,error))
|
||||||
|
|
||||||
# Correct result if needed
|
# Correct result if needed
|
||||||
|
@ -49,46 +53,68 @@ proc getImpl(rdb: RdbInst; key: RdbKey): Result[Blob,(AristoError,string)] =
|
||||||
res = EmptyBlob
|
res = EmptyBlob
|
||||||
ok move(res)
|
ok move(res)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Public functions
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc getByPfx*(
|
proc getKey*(
|
||||||
rdb: RdbInst;
|
rdb: var RdbInst;
|
||||||
pfx: StorageType;
|
vid: VertexID;
|
||||||
xid: uint64,
|
): Result[HashKey,(AristoError,string)] =
|
||||||
): Result[Blob,(AristoError,string)] =
|
|
||||||
rdb.getImpl(xid.toRdbKey pfx)
|
|
||||||
|
|
||||||
proc getKey*(rdb: var RdbInst; xid: uint64): Result[Blob,(AristoError,string)] =
|
|
||||||
# Try LRU cache first
|
# Try LRU cache first
|
||||||
let
|
var rc = rdb.rdKeyLru.lruFetch(vid)
|
||||||
key = xid.toRdbKey KeyPfx
|
|
||||||
var
|
|
||||||
rc = rdb.rdKeyLru.lruFetch(key)
|
|
||||||
if rc.isOK:
|
if rc.isOK:
|
||||||
return ok(move(rc.value))
|
return ok(move(rc.value))
|
||||||
|
|
||||||
# Otherwise fetch from backend database
|
# Otherwise fetch from backend database
|
||||||
let res = ? rdb.getImpl(key)
|
var res: Blob
|
||||||
|
let onData = proc(data: openArray[byte]) =
|
||||||
|
res = @data
|
||||||
|
|
||||||
|
let gotData = rdb.keyCol.get(vid.toOpenArray, onData).valueOr:
|
||||||
|
const errSym = RdbBeDriverGetKeyError
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "getKey", vid, error=errSym, info=error
|
||||||
|
return err((errSym,error))
|
||||||
|
|
||||||
|
# Correct result if needed
|
||||||
|
let key = block:
|
||||||
|
if gotData:
|
||||||
|
HashKey.fromBytes(res).valueOr:
|
||||||
|
return err((RdbHashKeyExpected,""))
|
||||||
|
else:
|
||||||
|
VOID_HASH_KEY
|
||||||
|
|
||||||
# Update cache and return
|
# Update cache and return
|
||||||
ok rdb.rdKeyLru.lruAppend(key, res, RdKeyLruMaxSize)
|
ok rdb.rdKeyLru.lruAppend(vid, key, RdKeyLruMaxSize)
|
||||||
|
|
||||||
proc getVtx*(rdb: var RdbInst; xid: uint64): Result[Blob,(AristoError,string)] =
|
|
||||||
|
proc getVtx*(
|
||||||
|
rdb: var RdbInst;
|
||||||
|
vid: VertexID;
|
||||||
|
): Result[VertexRef,(AristoError,string)] =
|
||||||
# Try LRU cache first
|
# Try LRU cache first
|
||||||
let
|
var rc = rdb.rdVtxLru.lruFetch(vid)
|
||||||
key = xid.toRdbKey VtxPfx
|
|
||||||
var
|
|
||||||
rc = rdb.rdVtxLru.lruFetch(key)
|
|
||||||
if rc.isOK:
|
if rc.isOK:
|
||||||
return ok(move(rc.value))
|
return ok(move(rc.value))
|
||||||
|
|
||||||
# Otherwise fetch from backend database
|
# Otherwise fetch from backend database
|
||||||
let res = ? rdb.getImpl(key)
|
var res: Blob
|
||||||
|
let onData = proc(data: openArray[byte]) =
|
||||||
|
res = @data
|
||||||
|
|
||||||
|
let gotData = rdb.vtxCol.get(vid.toOpenArray, onData).valueOr:
|
||||||
|
const errSym = RdbBeDriverGetVtxError
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "getVtx", vid, error=errSym, info=error
|
||||||
|
return err((errSym,error))
|
||||||
|
|
||||||
|
var vtx = VertexRef(nil)
|
||||||
|
if gotData:
|
||||||
|
let rc = res.deblobify VertexRef
|
||||||
|
if rc.isErr:
|
||||||
|
return err((rc.error,""))
|
||||||
|
vtx = rc.value
|
||||||
|
|
||||||
# Update cache and return
|
# Update cache and return
|
||||||
ok rdb.rdVtxLru.lruAppend(key, res, RdVtxLruMaxSize)
|
ok rdb.rdVtxLru.lruAppend(vid, vtx, RdVtxLruMaxSize)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -21,17 +21,6 @@ import
|
||||||
./rdb_desc,
|
./rdb_desc,
|
||||||
../../../opts
|
../../../opts
|
||||||
|
|
||||||
const
|
|
||||||
extraTraceMessages = false
|
|
||||||
## Enable additional logging noise
|
|
||||||
|
|
||||||
when extraTraceMessages:
|
|
||||||
import
|
|
||||||
chronicles
|
|
||||||
|
|
||||||
logScope:
|
|
||||||
topics = "aristo-rocksdb"
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public constructor
|
# Public constructor
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -43,11 +32,12 @@ proc init*(
|
||||||
): Result[void,(AristoError,string)] =
|
): Result[void,(AristoError,string)] =
|
||||||
## Constructor c ode inspired by `RocksStoreRef.init()` from
|
## Constructor c ode inspired by `RocksStoreRef.init()` from
|
||||||
## kvstore_rocksdb.nim
|
## kvstore_rocksdb.nim
|
||||||
|
const initFailed = "RocksDB/init() failed"
|
||||||
|
|
||||||
rdb.basePath = basePath
|
rdb.basePath = basePath
|
||||||
|
|
||||||
let
|
let
|
||||||
dataDir = rdb.dataDir
|
dataDir = rdb.dataDir
|
||||||
|
|
||||||
try:
|
try:
|
||||||
dataDir.createDir
|
dataDir.createDir
|
||||||
except OSError, IOError:
|
except OSError, IOError:
|
||||||
|
@ -60,7 +50,9 @@ proc init*(
|
||||||
cfOpts.setWriteBufferSize(opts.writeBufferSize)
|
cfOpts.setWriteBufferSize(opts.writeBufferSize)
|
||||||
|
|
||||||
let
|
let
|
||||||
cfs = @[initColFamilyDescriptor(AristoFamily, cfOpts)] &
|
cfs = @[initColFamilyDescriptor(AdmCF, cfOpts),
|
||||||
|
initColFamilyDescriptor(VtxCF, cfOpts),
|
||||||
|
initColFamilyDescriptor(KeyCF, cfOpts)] &
|
||||||
RdbGuest.mapIt(initColFamilyDescriptor($it, cfOpts))
|
RdbGuest.mapIt(initColFamilyDescriptor($it, cfOpts))
|
||||||
dbOpts = defaultDbOptions()
|
dbOpts = defaultDbOptions()
|
||||||
|
|
||||||
|
@ -77,17 +69,15 @@ proc init*(
|
||||||
|
|
||||||
# Reserve a family corner for `Aristo` on the database
|
# Reserve a family corner for `Aristo` on the database
|
||||||
let baseDb = openRocksDb(dataDir, dbOpts, columnFamilies=cfs).valueOr:
|
let baseDb = openRocksDb(dataDir, dbOpts, columnFamilies=cfs).valueOr:
|
||||||
let errSym = RdbBeDriverInitError
|
raiseAssert initFailed & " cannot create base descriptor: " & error
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt "init failed", dataDir, openMax, error=errSym, info=error
|
|
||||||
return err((errSym, error))
|
|
||||||
|
|
||||||
# Initialise `Aristo` family
|
# Initialise column handlers (this stores implicitely `baseDb`)
|
||||||
rdb.store = baseDb.withColFamily(AristoFamily).valueOr:
|
rdb.admCol = baseDb.withColFamily(AdmCF).valueOr:
|
||||||
let errSym = RdbBeDriverInitError
|
raiseAssert initFailed & " cannot initialise AdmCF descriptor: " & error
|
||||||
when extraTraceMessages:
|
rdb.vtxCol = baseDb.withColFamily(VtxCF).valueOr:
|
||||||
trace logTxt "init failed", dataDir, openMax, error=errSym, info=error
|
raiseAssert initFailed & " cannot initialise VtxCF descriptor: " & error
|
||||||
return err((errSym, error))
|
rdb.keyCol = baseDb.withColFamily(KeyCF).valueOr:
|
||||||
|
raiseAssert initFailed & " cannot initialise KeyCF descriptor: " & error
|
||||||
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
@ -95,16 +85,17 @@ proc initGuestDb*(
|
||||||
rdb: RdbInst;
|
rdb: RdbInst;
|
||||||
instance: int;
|
instance: int;
|
||||||
): Result[RootRef,(AristoError,string)] =
|
): Result[RootRef,(AristoError,string)] =
|
||||||
# Initialise `Guest` family
|
## Initialise `Guest` family
|
||||||
|
##
|
||||||
|
## Thus was a worth a try, but there are better solutions and this item
|
||||||
|
## will be removed in future.
|
||||||
|
##
|
||||||
if high(RdbGuest).ord < instance:
|
if high(RdbGuest).ord < instance:
|
||||||
return err((RdbGuestInstanceUnsupported,""))
|
return err((RdbGuestInstanceUnsupported,""))
|
||||||
let
|
let
|
||||||
guestSym = $RdbGuest(instance)
|
guestSym = $RdbGuest(instance)
|
||||||
guestDb = rdb.store.db.withColFamily(guestSym).valueOr:
|
guestDb = rdb.baseDb.withColFamily(guestSym).valueOr:
|
||||||
let errSym = RdbBeDriverGuestError
|
raiseAssert "RocksDb/initGuestDb() failed: " & error
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt "guestDb failed", error=errSym, info=error
|
|
||||||
return err((errSym, error))
|
|
||||||
|
|
||||||
ok RdbGuestDbRef(
|
ok RdbGuestDbRef(
|
||||||
beKind: BackendRocksDB,
|
beKind: BackendRocksDB,
|
||||||
|
@ -113,7 +104,7 @@ proc initGuestDb*(
|
||||||
|
|
||||||
proc destroy*(rdb: var RdbInst; flush: bool) =
|
proc destroy*(rdb: var RdbInst; flush: bool) =
|
||||||
## Destructor
|
## Destructor
|
||||||
rdb.store.db.close()
|
rdb.baseDb.close()
|
||||||
|
|
||||||
if flush:
|
if flush:
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -17,8 +17,8 @@ import
|
||||||
eth/common,
|
eth/common,
|
||||||
rocksdb,
|
rocksdb,
|
||||||
results,
|
results,
|
||||||
stew/[endians2, keyed_queue],
|
stew/keyed_queue,
|
||||||
../../aristo_desc,
|
../../[aristo_blobify, aristo_desc],
|
||||||
../init_common,
|
../init_common,
|
||||||
./rdb_desc
|
./rdb_desc
|
||||||
|
|
||||||
|
@ -40,37 +40,13 @@ proc disposeSession(rdb: var RdbInst) =
|
||||||
rdb.session.close()
|
rdb.session.close()
|
||||||
rdb.session = WriteBatchRef(nil)
|
rdb.session = WriteBatchRef(nil)
|
||||||
|
|
||||||
proc putImpl(
|
|
||||||
dsc: WriteBatchRef;
|
|
||||||
name: string;
|
|
||||||
key: RdbKey;
|
|
||||||
val: Blob;
|
|
||||||
): Result[void,(uint64,AristoError,string)] =
|
|
||||||
if val.len == 0:
|
|
||||||
dsc.delete(key, name).isOkOr:
|
|
||||||
const errSym = RdbBeDriverDelError
|
|
||||||
let xid = uint64.fromBytesBE key[1 .. 8]
|
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt "del",
|
|
||||||
pfx=StorageType(key[0]), xid, error=errSym, info=error
|
|
||||||
return err((xid,errSym,error))
|
|
||||||
else:
|
|
||||||
dsc.put(key, val, name).isOkOr:
|
|
||||||
const errSym = RdbBeDriverPutError
|
|
||||||
let xid = uint64.fromBytesBE key[1 .. 8]
|
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt "put",
|
|
||||||
pfx=StorageType(key[0]), xid, error=errSym, info=error
|
|
||||||
return err((xid,errSym,error))
|
|
||||||
ok()
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc begin*(rdb: var RdbInst) =
|
proc begin*(rdb: var RdbInst) =
|
||||||
if rdb.session.isNil:
|
if rdb.session.isNil:
|
||||||
rdb.session = rdb.store.openWriteBatch()
|
rdb.session = rdb.baseDb.openWriteBatch()
|
||||||
|
|
||||||
proc rollback*(rdb: var RdbInst) =
|
proc rollback*(rdb: var RdbInst) =
|
||||||
if not rdb.session.isClosed():
|
if not rdb.session.isClosed():
|
||||||
|
@ -81,62 +57,103 @@ proc rollback*(rdb: var RdbInst) =
|
||||||
proc commit*(rdb: var RdbInst): Result[void,(AristoError,string)] =
|
proc commit*(rdb: var RdbInst): Result[void,(AristoError,string)] =
|
||||||
if not rdb.session.isClosed():
|
if not rdb.session.isClosed():
|
||||||
defer: rdb.disposeSession()
|
defer: rdb.disposeSession()
|
||||||
rdb.store.write(rdb.session).isOkOr:
|
rdb.baseDb.write(rdb.session).isOkOr:
|
||||||
const errSym = RdbBeDriverWriteError
|
const errSym = RdbBeDriverWriteError
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "commit", error=errSym, info=error
|
trace logTxt "commit", error=errSym, info=error
|
||||||
return err((errSym,error))
|
return err((errSym,error))
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
proc putByPfx*(
|
|
||||||
|
proc putAdm*(
|
||||||
rdb: var RdbInst;
|
rdb: var RdbInst;
|
||||||
pfx: StorageType;
|
xid: AdminTabID;
|
||||||
data: openArray[(uint64,Blob)];
|
data: Blob;
|
||||||
): Result[void,(uint64,AristoError,string)] =
|
): Result[void,(AdminTabID,AristoError,string)] =
|
||||||
let
|
let dsc = rdb.session
|
||||||
dsc = rdb.session
|
if data.len == 0:
|
||||||
name = rdb.store.name
|
dsc.delete(xid.toOpenArray, AdmCF).isOkOr:
|
||||||
for (xid,val) in data:
|
const errSym = RdbBeDriverDelAdmError
|
||||||
dsc.putImpl(name, xid.toRdbKey pfx, val).isOkOr:
|
when extraTraceMessages:
|
||||||
return err(error)
|
trace logTxt "putAdm()", xid, error=errSym, info=error
|
||||||
|
return err((xid,errSym,error))
|
||||||
|
else:
|
||||||
|
dsc.put(xid.toOpenArray, data, AdmCF).isOkOr:
|
||||||
|
const errSym = RdbBeDriverPutAdmError
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "putAdm()", xid, error=errSym, info=error
|
||||||
|
return err((xid,errSym,error))
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
|
||||||
proc putKey*(
|
proc putKey*(
|
||||||
rdb: var RdbInst;
|
rdb: var RdbInst;
|
||||||
data: openArray[(uint64,Blob)];
|
data: openArray[(VertexID,HashKey)];
|
||||||
): Result[void,(uint64,AristoError,string)] =
|
): Result[void,(VertexID,AristoError,string)] =
|
||||||
let
|
let dsc = rdb.session
|
||||||
dsc = rdb.session
|
for (vid,key) in data:
|
||||||
name = rdb.store.name
|
|
||||||
for (xid,val) in data:
|
|
||||||
let key = xid.toRdbKey KeyPfx
|
|
||||||
|
|
||||||
# Update cache
|
if key.isValid:
|
||||||
if not rdb.rdKeyLru.lruUpdate(key, val):
|
dsc.put(vid.toOpenArray, key.data, KeyCF).isOkOr:
|
||||||
discard rdb.rdKeyLru.lruAppend(key, val, RdKeyLruMaxSize)
|
# Caller must `rollback()` which will flush the `rdKeyLru` cache
|
||||||
|
const errSym = RdbBeDriverPutKeyError
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "putKey()", vid, error=errSym, info=error
|
||||||
|
return err((vid,errSym,error))
|
||||||
|
|
||||||
|
# Update cache
|
||||||
|
if not rdb.rdKeyLru.lruUpdate(vid, key):
|
||||||
|
discard rdb.rdKeyLru.lruAppend(vid, key, RdKeyLruMaxSize)
|
||||||
|
|
||||||
|
else:
|
||||||
|
dsc.delete(vid.toOpenArray, KeyCF).isOkOr:
|
||||||
|
# Caller must `rollback()` which will flush the `rdKeyLru` cache
|
||||||
|
const errSym = RdbBeDriverDelKeyError
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "putKey()", vid, error=errSym, info=error
|
||||||
|
return err((vid,errSym,error))
|
||||||
|
|
||||||
|
# Update cache, vertex will most probably never be visited anymore
|
||||||
|
rdb.rdKeyLru.del vid
|
||||||
|
|
||||||
# Store on write batch queue
|
|
||||||
dsc.putImpl(name, key, val).isOkOr:
|
|
||||||
return err(error)
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
|
|
||||||
proc putVtx*(
|
proc putVtx*(
|
||||||
rdb: var RdbInst;
|
rdb: var RdbInst;
|
||||||
data: openArray[(uint64,Blob)];
|
data: openArray[(VertexID,VertexRef)];
|
||||||
): Result[void,(uint64,AristoError,string)] =
|
): Result[void,(VertexID,AristoError,string)] =
|
||||||
let
|
let dsc = rdb.session
|
||||||
dsc = rdb.session
|
for (vid,vtx) in data:
|
||||||
name = rdb.store.name
|
|
||||||
for (xid,val) in data:
|
|
||||||
let key = xid.toRdbKey VtxPfx
|
|
||||||
|
|
||||||
# Update cache
|
if vtx.isValid:
|
||||||
if not rdb.rdVtxLru.lruUpdate(key, val):
|
let rc = vtx.blobify()
|
||||||
discard rdb.rdVtxLru.lruAppend(key, val, RdVtxLruMaxSize)
|
if rc.isErr:
|
||||||
|
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
||||||
|
return err((vid,rc.error,""))
|
||||||
|
|
||||||
|
dsc.put(vid.toOpenArray, rc.value, VtxCF).isOkOr:
|
||||||
|
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
||||||
|
const errSym = RdbBeDriverPutVtxError
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "putVtx()", vid, error=errSym, info=error
|
||||||
|
return err((vid,errSym,error))
|
||||||
|
|
||||||
|
# Update cache
|
||||||
|
if not rdb.rdVtxLru.lruUpdate(vid, vtx):
|
||||||
|
discard rdb.rdVtxLru.lruAppend(vid, vtx, RdVtxLruMaxSize)
|
||||||
|
|
||||||
|
else:
|
||||||
|
dsc.delete(vid.toOpenArray, VtxCF).isOkOr:
|
||||||
|
# Caller must `rollback()` which will flush the `rdVtxLru` cache
|
||||||
|
const errSym = RdbBeDriverDelVtxError
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "putVtx()", vid, error=errSym, info=error
|
||||||
|
return err((vid,errSym,error))
|
||||||
|
|
||||||
|
# Update cache, vertex will most probably never be visited anymore
|
||||||
|
rdb.rdVtxLru.del vid
|
||||||
|
|
||||||
# Store on write batch queue
|
|
||||||
dsc.putImpl(name, key, val).isOkOr:
|
|
||||||
return err(error)
|
|
||||||
ok()
|
ok()
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
|
@ -17,7 +17,6 @@ import
|
||||||
eth/common,
|
eth/common,
|
||||||
stew/endians2,
|
stew/endians2,
|
||||||
rocksdb,
|
rocksdb,
|
||||||
../init_common,
|
|
||||||
./rdb_desc
|
./rdb_desc
|
||||||
|
|
||||||
const
|
const
|
||||||
|
@ -35,63 +34,53 @@ when extraTraceMessages:
|
||||||
# Public iterators
|
# Public iterators
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
iterator walk*(
|
iterator walkAdm*(rdb: RdbInst): tuple[xid: uint64, data: Blob] =
|
||||||
rdb: RdbInst;
|
## Walk over key-value pairs of the admin column of the database.
|
||||||
): tuple[pfx: StorageType, xid: uint64, data: Blob] =
|
##
|
||||||
## Walk over all key-value pairs of the database.
|
## Non-decodable entries are are ignored.
|
||||||
##
|
##
|
||||||
## Non-decodable entries are stepped over and ignored.
|
|
||||||
block walkBody:
|
block walkBody:
|
||||||
let rit = rdb.store.openIterator().valueOr:
|
let rit = rdb.admCol.openIterator().valueOr:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "walk", pfx="all", error
|
trace logTxt "walkAdm()", error
|
||||||
break walkBody
|
break walkBody
|
||||||
defer: rit.close()
|
defer: rit.close()
|
||||||
|
|
||||||
for (key,val) in rit.pairs:
|
for (key,val) in rit.pairs:
|
||||||
if key.len == 9:
|
if key.len == 8 and val.len != 0:
|
||||||
if StorageType.high.ord < key[0]:
|
yield (uint64.fromBytesBE key, val)
|
||||||
break walkBody
|
|
||||||
let
|
|
||||||
pfx = StorageType(key[0])
|
|
||||||
id = uint64.fromBytesBE key.toOpenArray(1, key.len - 1)
|
|
||||||
yield (pfx, id, val)
|
|
||||||
|
|
||||||
|
iterator walkKey*(rdb: RdbInst): tuple[vid: uint64, data: Blob] =
|
||||||
iterator walk*(
|
## Walk over key-value pairs of the hash key column of the database.
|
||||||
rdb: RdbInst;
|
|
||||||
pfx: StorageType;
|
|
||||||
): tuple[xid: uint64, data: Blob] =
|
|
||||||
## Walk over key-value pairs of the table referted to by the argument `pfx`
|
|
||||||
## whic must be different from `Oops` and `AdmPfx`.
|
|
||||||
##
|
##
|
||||||
## Non-decodable entries are stepped over and ignored.
|
## Non-decodable entries are are ignored.
|
||||||
##
|
##
|
||||||
block walkBody:
|
block walkBody:
|
||||||
let rit = rdb.store.openIterator().valueOr:
|
let rit = rdb.keyCol.openIterator().valueOr:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
echo ">>> walk (2) oops",
|
trace logTxt "walkKey()", error
|
||||||
" pfx=", pfx
|
|
||||||
trace logTxt "walk", pfx, error
|
|
||||||
break walkBody
|
break walkBody
|
||||||
defer: rit.close()
|
defer: rit.close()
|
||||||
|
|
||||||
# Start at first entry not less than `<pfx> & 1`
|
for (key,val) in rit.pairs:
|
||||||
rit.seekToKey 1u64.toRdbKey pfx
|
if key.len == 8 and val.len != 0:
|
||||||
|
yield (uint64.fromBytesBE key, val)
|
||||||
|
|
||||||
# Fetch sub-table data as long as the current key is acceptable
|
iterator walkVtx*(rdb: RdbInst): tuple[vid: uint64, data: Blob] =
|
||||||
while rit.isValid():
|
## Walk over key-value pairs of the hash key column of the database.
|
||||||
let key = rit.key()
|
##
|
||||||
if key.len == 9:
|
## Non-decodable entries are are ignored.
|
||||||
if key[0] != pfx.ord.uint:
|
##
|
||||||
break walkBody # done
|
block walkBody:
|
||||||
|
let rit = rdb.vtxCol.openIterator().valueOr:
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "walkVtx()", error
|
||||||
|
break walkBody
|
||||||
|
defer: rit.close()
|
||||||
|
|
||||||
let val = rit.value()
|
for (key,val) in rit.pairs:
|
||||||
if val.len != 0:
|
if key.len == 8 and val.len != 0:
|
||||||
yield (uint64.fromBytesBE key.toOpenArray(1, key.high()), val)
|
yield (uint64.fromBytesBE key, val)
|
||||||
|
|
||||||
# Update Iterator
|
|
||||||
rit.next()
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -172,7 +172,7 @@ when isMainModule:
|
||||||
noisy.accountsRunner(sam, resetDb=true)
|
noisy.accountsRunner(sam, resetDb=true)
|
||||||
|
|
||||||
when true: # and false:
|
when true: # and false:
|
||||||
let persistent = false # or true
|
let persistent = false or true
|
||||||
noisy.showElapsed("@snap_test_list"):
|
noisy.showElapsed("@snap_test_list"):
|
||||||
for n,sam in snapTestList:
|
for n,sam in snapTestList:
|
||||||
noisy.accountsRunner(sam, persistent=persistent)
|
noisy.accountsRunner(sam, persistent=persistent)
|
||||||
|
|
|
@ -134,7 +134,7 @@ proc dbTriplet(w: LeafQuartet; rdbPath: string): Result[DbTriplet,AristoError] =
|
||||||
check (n, report.error) == (n,0)
|
check (n, report.error) == (n,0)
|
||||||
return err(report.error)
|
return err(report.error)
|
||||||
|
|
||||||
return ok dx
|
ok dx
|
||||||
|
|
||||||
# ----------------------
|
# ----------------------
|
||||||
|
|
||||||
|
@ -338,7 +338,6 @@ proc testDistributedAccess*(
|
||||||
let c11Fil1_eq_db1RoFilter = c11Filter1.isDbEq(db1.balancer, db1, noisy)
|
let c11Fil1_eq_db1RoFilter = c11Filter1.isDbEq(db1.balancer, db1, noisy)
|
||||||
xCheck c11Fil1_eq_db1RoFilter:
|
xCheck c11Fil1_eq_db1RoFilter:
|
||||||
noisy.say "*** testDistributedAccess (7)", "n=", n,
|
noisy.say "*** testDistributedAccess (7)", "n=", n,
|
||||||
"\n c11Filter1\n ", c11Filter1.pp(db1),
|
|
||||||
"db1".dump(db1),
|
"db1".dump(db1),
|
||||||
""
|
""
|
||||||
|
|
||||||
|
@ -346,10 +345,8 @@ proc testDistributedAccess*(
|
||||||
let c11Fil3_eq_db3RoFilter = c11Filter3.isDbEq(db3.balancer, db3, noisy)
|
let c11Fil3_eq_db3RoFilter = c11Filter3.isDbEq(db3.balancer, db3, noisy)
|
||||||
xCheck c11Fil3_eq_db3RoFilter:
|
xCheck c11Fil3_eq_db3RoFilter:
|
||||||
noisy.say "*** testDistributedAccess (8)", "n=", n,
|
noisy.say "*** testDistributedAccess (8)", "n=", n,
|
||||||
"\n c11Filter3\n ", c11Filter3.pp(db3),
|
|
||||||
"db3".dump(db3),
|
"db3".dump(db3),
|
||||||
""
|
""
|
||||||
|
|
||||||
# Check/verify backends
|
# Check/verify backends
|
||||||
block:
|
block:
|
||||||
let ok = dy.checkBeOk(noisy=noisy,fifos=true)
|
let ok = dy.checkBeOk(noisy=noisy,fifos=true)
|
||||||
|
|
Loading…
Reference in New Issue