Core db and aristo updates for destructor and tx logic (#1894)
* Disable `TransactionID` related functions from `state_db.nim`
why:
Functions `getCommittedStorage()` and `updateOriginalRoot()` from
the `state_db` module are nowhere used. The emulation of a legacy
`TransactionID` type functionality is administratively expensive to
provide by `Aristo` (the legacy DB version is only partially
implemented, anyway).
As there is no other place where `TransactionID`s are used, they will
not be provided by the `Aristo` variant of the `CoreDb`. For the
legacy DB API, nothing will change.
* Fix copyright headers in source code
* Get rid of compiler warning
* Update Aristo code, remove unused `merge()` variant, export `hashify()`
why:
Adapt to upcoming `CoreDb` wrapper
* Remove synced tx feature from `Aristo`
why:
+ This feature allowed to synchronise transaction methods like begin,
commit, and rollback for a group of descriptors.
+ The feature is over engineered and not needed for `CoreDb`, neither
is it complete (some convergence features missing.)
* Add debugging helpers to `Kvt`
also:
Update database iterator, add count variable yield argument similar
to `Aristo`.
* Provide optional destructors for `CoreDb` API
why;
For the upcoming Aristo wrapper, this allows to control when certain
smart destruction and update can take place. The auto destructor works
fine in general when the storage/cache strategy is known and acceptable
when creating descriptors.
* Add update option for `CoreDb` API function `hash()`
why;
The hash function is typically used to get the state root of the MPT.
Due to lazy hashing, this might be not available on the `Aristo` DB.
So the `update` function asks for re-hashing the gurrent state changes
if needed.
* Update API tracking log mode: `info` => `debug
* Use shared `Kvt` descriptor in new Ledger API
why:
No need to create a new descriptor all the time
2023-11-16 19:35:03 +00:00
|
|
|
# Nimbus
|
2024-03-05 04:54:42 +00:00
|
|
|
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
2023-01-23 16:09:12 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or
|
|
|
|
# distributed except according to those terms.
|
|
|
|
|
|
|
|
## Snap sync components tester and TDD environment
|
|
|
|
|
|
|
|
import
|
2023-01-30 17:50:58 +00:00
|
|
|
std/[algorithm, math, sequtils, strformat, times],
|
2023-01-23 16:09:12 +00:00
|
|
|
stew/byteutils,
|
2024-03-05 04:54:42 +00:00
|
|
|
rocksdb/lib/librocksdb,
|
2023-01-23 16:09:12 +00:00
|
|
|
rocksdb,
|
|
|
|
unittest2,
|
|
|
|
../../nimbus/core/chain,
|
2024-03-05 04:54:42 +00:00
|
|
|
../../nimbus/db/kvstore_rocksdb,
|
2023-08-04 11:10:09 +00:00
|
|
|
../../nimbus/db/core_db,
|
2023-10-11 19:09:11 +00:00
|
|
|
../../nimbus/db/core_db/persistent,
|
2023-01-23 16:09:12 +00:00
|
|
|
../../nimbus/sync/snap/range_desc,
|
|
|
|
../../nimbus/sync/snap/worker/db/[hexary_desc, rocky_bulk_load],
|
|
|
|
../../nimbus/utils/prettify,
|
2023-04-21 21:11:04 +00:00
|
|
|
../replay/[pp, undump_blocks]
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-04-21 21:11:04 +00:00
|
|
|
proc to*(b: openArray[byte]; T: type ByteArray32): T =
|
|
|
|
## Convert to other representation (or exception)
|
|
|
|
if b.len == 32:
|
|
|
|
(addr result[0]).copyMem(unsafeAddr b[0], 32)
|
|
|
|
else:
|
|
|
|
doAssert b.len == 32
|
|
|
|
|
|
|
|
proc to*(b: openArray[byte]; T: type ByteArray33): T =
|
|
|
|
## Convert to other representation (or exception)
|
|
|
|
if b.len == 33:
|
|
|
|
(addr result[0]).copyMem(unsafeAddr b[0], 33)
|
|
|
|
else:
|
|
|
|
doAssert b.len == 33
|
|
|
|
|
|
|
|
proc to*(b: ByteArray32|ByteArray33; T: type Blob): T =
|
|
|
|
b.toSeq
|
|
|
|
|
|
|
|
proc to*(b: openArray[byte]; T: type NodeTag): T =
|
|
|
|
## Convert from serialised equivalent
|
|
|
|
UInt256.fromBytesBE(b).T
|
|
|
|
|
|
|
|
proc to*(w: (byte, NodeTag); T: type Blob): T =
|
|
|
|
let (b,t) = w
|
|
|
|
@[b] & toSeq(t.UInt256.toBytesBE)
|
|
|
|
|
|
|
|
proc to*(t: NodeTag; T: type Blob): T =
|
|
|
|
toSeq(t.UInt256.toBytesBE)
|
|
|
|
|
|
|
|
# ----------------
|
|
|
|
|
2024-03-05 04:54:42 +00:00
|
|
|
proc thisRecord(r: ptr rocksdb_iterator_t): (Blob,Blob) =
|
2023-01-23 16:09:12 +00:00
|
|
|
var kLen, vLen: csize_t
|
|
|
|
let
|
|
|
|
kData = r.rocksdb_iter_key(addr kLen)
|
|
|
|
vData = r.rocksdb_iter_value(addr vLen)
|
|
|
|
if not kData.isNil and not vData.isNil:
|
|
|
|
let
|
|
|
|
key = string.fromBytes(toOpenArrayByte(kData,0,int(kLen)-1))
|
|
|
|
value = string.fromBytes(toOpenArrayByte(vData,0,int(vLen)-1))
|
|
|
|
return (key.mapIt(it.byte),value.mapIt(it.byte))
|
|
|
|
|
|
|
|
proc meanStdDev(sum, sqSum: float; length: int): (float,float) =
|
|
|
|
if 0 < length:
|
|
|
|
result[0] = sum / length.float
|
|
|
|
result[1] = sqrt(sqSum / length.float - result[0] * result[0])
|
|
|
|
|
2023-04-21 21:11:04 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions, pretty printing
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc pp*(d: Duration): string =
|
|
|
|
if 40 < d.inSeconds:
|
|
|
|
d.ppMins
|
|
|
|
elif 200 < d.inMilliseconds:
|
|
|
|
d.ppSecs
|
|
|
|
elif 200 < d.inMicroseconds:
|
|
|
|
d.ppMs
|
|
|
|
else:
|
|
|
|
d.ppUs
|
|
|
|
|
|
|
|
proc ppKvPc*(w: openArray[(string,int)]): string =
|
|
|
|
w.mapIt(&"{it[0]}={it[1]}%").join(", ")
|
|
|
|
|
|
|
|
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
|
|
|
|
if noisy:
|
|
|
|
if args.len == 0:
|
|
|
|
echo "*** ", pfx
|
|
|
|
elif 0 < pfx.len and pfx[^1] != ' ':
|
|
|
|
echo pfx, " ", args.toSeq.join
|
|
|
|
else:
|
|
|
|
echo pfx, args.toSeq.join
|
|
|
|
|
2023-01-23 16:09:12 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public test function: setup
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc test_dbTimingUndumpBlocks*(
|
|
|
|
noisy: bool;
|
|
|
|
filePath: string;
|
|
|
|
com: CommonRef;
|
|
|
|
numBlocks: int;
|
|
|
|
loadNoise = false;
|
|
|
|
) =
|
|
|
|
## Store persistent blocks from dump into chain DB
|
|
|
|
let chain = com.newChain
|
|
|
|
|
2023-04-21 21:11:04 +00:00
|
|
|
for w in filePath.undumpBlocks:
|
2023-01-23 16:09:12 +00:00
|
|
|
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
|
|
|
|
if fromBlock == 0.u256:
|
|
|
|
doAssert w[0][0] == com.db.getBlockHeader(0.u256)
|
|
|
|
continue
|
|
|
|
# Message if [fromBlock,toBlock] contains a multiple of 700
|
|
|
|
if fromBlock + (toBlock mod 900) <= toBlock:
|
|
|
|
loadNoise.say "***", &"processing ...[#{fromBlock},#{toBlock}]..."
|
|
|
|
check chain.persistBlocks(w[0], w[1]) == ValidationResult.OK
|
|
|
|
if numBlocks.toBlockNumber <= w[0][^1].blockNumber:
|
|
|
|
break
|
2023-04-21 21:11:04 +00:00
|
|
|
|
2023-01-23 16:09:12 +00:00
|
|
|
proc test_dbTimingRockySetup*(
|
|
|
|
noisy: bool;
|
|
|
|
t32: var Table[ByteArray32,Blob],
|
|
|
|
t33: var Table[ByteArray33,Blob],
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
) =
|
|
|
|
## Extract key-value records into memory tables via rocksdb iterator
|
|
|
|
let
|
2024-04-19 18:37:27 +00:00
|
|
|
rdb = cdb.newKvt.backend.toRocksStoreRef
|
2024-03-05 04:54:42 +00:00
|
|
|
rop = rocksdb_readoptions_create()
|
2024-03-12 03:04:46 +00:00
|
|
|
rit = rdb.rocksDb.cPtr.rocksdb_create_iterator(rop)
|
2023-01-23 16:09:12 +00:00
|
|
|
check not rit.isNil
|
|
|
|
|
|
|
|
var
|
|
|
|
v32Sum, v32SqSum: float # statistics
|
|
|
|
v33Sum, v33SqSum: float
|
|
|
|
|
|
|
|
t32.clear
|
|
|
|
t33.clear
|
|
|
|
|
|
|
|
rit.rocksdb_iter_seek_to_first()
|
|
|
|
while rit.rocksdb_iter_valid() != 0:
|
|
|
|
let (key,val) = rit.thisRecord()
|
|
|
|
rit.rocksdb_iter_next()
|
|
|
|
if key.len == 32:
|
|
|
|
t32[key.to(ByteArray32)] = val
|
|
|
|
v32Sum += val.len.float
|
|
|
|
v32SqSum += val.len.float * val.len.float
|
|
|
|
check key.to(ByteArray32).to(Blob) == key
|
|
|
|
elif key.len == 33:
|
|
|
|
t33[key.to(ByteArray33)] = val
|
|
|
|
v33Sum += val.len.float
|
|
|
|
v33SqSum += val.len.float * val.len.float
|
|
|
|
check key.to(ByteArray33).to(Blob) == key
|
|
|
|
else:
|
|
|
|
noisy.say "***", "ignoring key=", key.toHex
|
|
|
|
|
|
|
|
rit.rocksdb_iter_destroy()
|
2024-03-05 04:54:42 +00:00
|
|
|
rop.rocksdb_readoptions_destroy()
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
(mean32, stdv32) = meanStdDev(v32Sum, v32SqSum, t32.len)
|
|
|
|
(mean33, stdv33) = meanStdDev(v33Sum, v33SqSum, t33.len)
|
|
|
|
noisy.say "***",
|
|
|
|
"key 32 table: ",
|
|
|
|
&"size={t32.len} valLen={(mean32+0.5).int}({(stdv32+0.5).int})",
|
|
|
|
", key 33 table: ",
|
|
|
|
&"size={t33.len} valLen={(mean33+0.5).int}({(stdv33+0.5).int})"
|
2023-04-21 21:11:04 +00:00
|
|
|
|
2023-01-23 16:09:12 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public test function: timing
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc test_dbTimingStoreDirect32*(
|
|
|
|
noisy: bool;
|
|
|
|
t32: Table[ByteArray32,Blob];
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
) =
|
|
|
|
## Direct db, key length 32, no transaction
|
|
|
|
var ela: Duration
|
2023-08-04 11:10:09 +00:00
|
|
|
let tdb = cdb.kvt
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
if noisy: echo ""
|
|
|
|
noisy.showElapsed("Standard db loader(keyLen 32)", ela):
|
|
|
|
for (key,val) in t32.pairs:
|
|
|
|
tdb.put(key, val)
|
|
|
|
|
|
|
|
if ela.inNanoseconds != 0:
|
|
|
|
let
|
|
|
|
elaNs = ela.inNanoseconds.float
|
|
|
|
perRec = ((elaNs / t32.len.float) + 0.5).int.initDuration
|
|
|
|
noisy.say "***",
|
|
|
|
"nRecords=", t32.len, ", ",
|
|
|
|
"perRecord=", perRec.pp
|
|
|
|
|
|
|
|
proc test_dbTimingStoreDirectly32as33*(
|
|
|
|
noisy: bool;
|
|
|
|
t32: Table[ByteArray32,Blob],
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
) =
|
|
|
|
## Direct db, key length 32 as 33, no transaction
|
|
|
|
var ela = initDuration()
|
2023-08-04 11:10:09 +00:00
|
|
|
let tdb = cdb.kvt
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
if noisy: echo ""
|
|
|
|
noisy.showElapsed("Standard db loader(keyLen 32 as 33)", ela):
|
|
|
|
for (key,val) in t32.pairs:
|
|
|
|
tdb.put(@[99.byte] & key.toSeq, val)
|
|
|
|
|
|
|
|
if ela.inNanoseconds != 0:
|
|
|
|
let
|
|
|
|
elaNs = ela.inNanoseconds.float
|
|
|
|
perRec = ((elaNs / t32.len.float) + 0.5).int.initDuration
|
|
|
|
noisy.say "***",
|
|
|
|
"nRecords=", t32.len, ", ",
|
|
|
|
"perRecord=", perRec.pp
|
|
|
|
|
|
|
|
proc test_dbTimingStoreTx32*(
|
|
|
|
noisy: bool;
|
|
|
|
t32: Table[ByteArray32,Blob],
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
) =
|
|
|
|
## Direct db, key length 32, transaction based
|
|
|
|
var ela: Duration
|
2023-08-04 11:10:09 +00:00
|
|
|
let tdb = cdb.kvt
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
if noisy: echo ""
|
|
|
|
noisy.showElapsed("Standard db loader(tx,keyLen 32)", ela):
|
2023-08-04 11:10:09 +00:00
|
|
|
let dbTx = cdb.beginTransaction
|
2023-01-23 16:09:12 +00:00
|
|
|
defer: dbTx.commit
|
|
|
|
|
|
|
|
for (key,val) in t32.pairs:
|
|
|
|
tdb.put(key, val)
|
|
|
|
|
|
|
|
if ela.inNanoseconds != 0:
|
|
|
|
let
|
|
|
|
elaNs = ela.inNanoseconds.float
|
|
|
|
perRec = ((elaNs / t32.len.float) + 0.5).int.initDuration
|
|
|
|
noisy.say "***",
|
|
|
|
"nRecords=", t32.len, ", ",
|
|
|
|
"perRecord=", perRec.pp
|
|
|
|
|
|
|
|
proc test_dbTimingStoreTx32as33*(
|
|
|
|
noisy: bool;
|
|
|
|
t32: Table[ByteArray32,Blob],
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
) =
|
|
|
|
## Direct db, key length 32 as 33, transaction based
|
|
|
|
var ela: Duration
|
2023-08-04 11:10:09 +00:00
|
|
|
let tdb = cdb.kvt
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
if noisy: echo ""
|
|
|
|
noisy.showElapsed("Standard db loader(tx,keyLen 32 as 33)", ela):
|
2023-08-04 11:10:09 +00:00
|
|
|
let dbTx = cdb.beginTransaction
|
2023-01-23 16:09:12 +00:00
|
|
|
defer: dbTx.commit
|
|
|
|
|
|
|
|
for (key,val) in t32.pairs:
|
|
|
|
tdb.put(@[99.byte] & key.toSeq, val)
|
|
|
|
|
|
|
|
if ela.inNanoseconds != 0:
|
|
|
|
let
|
|
|
|
elaNs = ela.inNanoseconds.float
|
|
|
|
perRec = ((elaNs / t32.len.float) + 0.5).int.initDuration
|
|
|
|
noisy.say "***",
|
|
|
|
"nRecords=", t32.len, ", ",
|
|
|
|
"perRecord=", perRec.pp
|
|
|
|
|
|
|
|
proc test_dbTimingDirect33*(
|
|
|
|
noisy: bool;
|
|
|
|
t33: Table[ByteArray33,Blob],
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
) =
|
|
|
|
## Direct db, key length 33, no transaction
|
|
|
|
var ela: Duration
|
2023-08-04 11:10:09 +00:00
|
|
|
let tdb = cdb.kvt
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
if noisy: echo ""
|
|
|
|
noisy.showElapsed("Standard db loader(keyLen 33)", ela):
|
|
|
|
for (key,val) in t33.pairs:
|
|
|
|
tdb.put(key, val)
|
|
|
|
|
|
|
|
if ela.inNanoseconds != 0:
|
|
|
|
let
|
|
|
|
elaNs = ela.inNanoseconds.float
|
|
|
|
perRec = ((elaNs / t33.len.float) + 0.5).int.initDuration
|
|
|
|
noisy.say "***",
|
|
|
|
"nRecords=", t33.len, ", ",
|
|
|
|
"perRecord=", perRec.pp
|
|
|
|
|
|
|
|
proc test_dbTimingTx33*(
|
|
|
|
noisy: bool;
|
|
|
|
t33: Table[ByteArray33,Blob],
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
) =
|
|
|
|
## Direct db, key length 33, transaction based
|
|
|
|
var ela: Duration
|
2023-08-04 11:10:09 +00:00
|
|
|
let tdb = cdb.kvt
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
if noisy: echo ""
|
|
|
|
noisy.showElapsed("Standard db loader(tx,keyLen 33)", ela):
|
2023-08-04 11:10:09 +00:00
|
|
|
let dbTx = cdb.beginTransaction
|
2023-01-23 16:09:12 +00:00
|
|
|
defer: dbTx.commit
|
|
|
|
|
|
|
|
for (key,val) in t33.pairs:
|
|
|
|
tdb.put(key, val)
|
|
|
|
|
|
|
|
if ela.inNanoseconds != 0:
|
|
|
|
let
|
|
|
|
elaNs = ela.inNanoseconds.float
|
|
|
|
perRec = ((elaNs / t33.len.float) + 0.5).int.initDuration
|
|
|
|
noisy.say "***",
|
|
|
|
"nRecords=", t33.len, ", ",
|
|
|
|
"perRecord=", perRec.pp
|
|
|
|
|
|
|
|
proc test_dbTimingRocky32*(
|
|
|
|
noisy: bool;
|
|
|
|
t32: Table[ByteArray32,Blob],
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
fullNoise = false;
|
|
|
|
) =
|
|
|
|
## Rocksdb, key length 32
|
|
|
|
var
|
|
|
|
ela: array[4,Duration]
|
|
|
|
size: int64
|
|
|
|
let
|
2024-04-19 18:37:27 +00:00
|
|
|
rdb = cdb.newKvt.backend.toRocksStoreRef
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
# Note that 32 and 33 size keys cannot be usefully merged into the same SST
|
|
|
|
# file. The keys must be added in a sorted mode. So playing safe, key sizes
|
|
|
|
# should be of equal length.
|
|
|
|
|
|
|
|
if noisy: echo ""
|
|
|
|
noisy.showElapsed("Rocky bulk loader(keyLen 32)", ela[0]):
|
|
|
|
let bulker = RockyBulkLoadRef.init(rdb)
|
|
|
|
defer: bulker.destroy()
|
|
|
|
check bulker.begin("rocky-bulk-cache")
|
|
|
|
|
|
|
|
var
|
|
|
|
keyList = newSeq[NodeTag](t32.len)
|
|
|
|
|
|
|
|
fullNoise.showElapsed("Rocky bulk loader/32, sorter", ela[1]):
|
|
|
|
var inx = 0
|
|
|
|
for key in t32.keys:
|
|
|
|
keyList[inx] = key.to(NodeTag)
|
|
|
|
inx.inc
|
|
|
|
keyList.sort(cmp)
|
|
|
|
|
|
|
|
fullNoise.showElapsed("Rocky bulk loader/32, append", ela[2]):
|
|
|
|
for n,nodeTag in keyList:
|
|
|
|
let key = nodeTag.to(Blob)
|
|
|
|
check bulker.add(key, t32[key.to(ByteArray32)])
|
|
|
|
|
|
|
|
fullNoise.showElapsed("Rocky bulk loader/32, slurp", ela[3]):
|
|
|
|
let rc = bulker.finish()
|
|
|
|
if rc.isOk:
|
|
|
|
size = rc.value
|
|
|
|
else:
|
|
|
|
check bulker.lastError == "" # force printing error
|
|
|
|
|
|
|
|
fullNoise.say "***", " ela[]=", $ela.toSeq.mapIt(it.pp)
|
|
|
|
if ela[0].inNanoseconds != 0:
|
|
|
|
let
|
|
|
|
elaNs = ela.toSeq.mapIt(it.inNanoseconds.float)
|
|
|
|
elaPc = elaNs.mapIt(((it / elaNs[0]) * 100 + 0.5).int)
|
|
|
|
perRec = ((elaNs[0] / t32.len.float) + 0.5).int.initDuration
|
|
|
|
noisy.say "***",
|
|
|
|
"nRecords=", t32.len, ", ",
|
|
|
|
"perRecord=", perRec.pp, ", ",
|
|
|
|
"sstSize=", size.uint64.toSI, ", ",
|
|
|
|
"perRecord=", ((size.float / t32.len.float) + 0.5).int, ", ",
|
|
|
|
["Total","Sorter","Append","Ingest"].zip(elaPc).ppKvPc
|
|
|
|
|
|
|
|
proc test_dbTimingRocky32as33*(
|
|
|
|
noisy: bool;
|
|
|
|
t32: Table[ByteArray32,Blob],
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
fullNoise = false;
|
|
|
|
) =
|
|
|
|
## Rocksdb, key length 32 as 33
|
|
|
|
var
|
|
|
|
ela: array[4,Duration]
|
|
|
|
size: int64
|
|
|
|
let
|
2024-04-19 18:37:27 +00:00
|
|
|
rdb = cdb.newKvt.backend.toRocksStoreRef
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
# Note that 32 and 33 size keys cannot be usefiully merged into the same SST
|
|
|
|
# file. The keys must be added in a sorted mode. So playing safe, key sizes
|
|
|
|
# should be of equal length.
|
|
|
|
|
|
|
|
if noisy: echo ""
|
|
|
|
noisy.showElapsed("Rocky bulk loader(keyLen 32 as 33)", ela[0]):
|
|
|
|
let bulker = RockyBulkLoadRef.init(rdb)
|
|
|
|
defer: bulker.destroy()
|
|
|
|
check bulker.begin("rocky-bulk-cache")
|
|
|
|
|
|
|
|
var
|
|
|
|
keyList = newSeq[NodeTag](t32.len)
|
|
|
|
|
|
|
|
fullNoise.showElapsed("Rocky bulk loader/32 as 33, sorter", ela[1]):
|
|
|
|
var inx = 0
|
|
|
|
for key in t32.keys:
|
|
|
|
keyList[inx] = key.to(NodeTag)
|
|
|
|
inx.inc
|
|
|
|
keyList.sort(cmp)
|
|
|
|
|
|
|
|
fullNoise.showElapsed("Rocky bulk loader/32 as 33, append", ela[2]):
|
|
|
|
for n,nodeTag in keyList:
|
|
|
|
let key = nodeTag.to(Blob)
|
|
|
|
check bulker.add(@[99.byte] & key, t32[key.to(ByteArray32)])
|
|
|
|
|
|
|
|
fullNoise.showElapsed("Rocky bulk loader/32 as 33, slurp", ela[3]):
|
|
|
|
let rc = bulker.finish()
|
|
|
|
if rc.isOk:
|
|
|
|
size = rc.value
|
|
|
|
else:
|
|
|
|
check bulker.lastError == "" # force printing error
|
|
|
|
|
|
|
|
fullNoise.say "***", " ela[]=", $ela.toSeq.mapIt(it.pp)
|
|
|
|
if ela[0].inNanoseconds != 0:
|
|
|
|
let
|
|
|
|
elaNs = ela.toSeq.mapIt(it.inNanoseconds.float)
|
|
|
|
elaPc = elaNs.mapIt(((it / elaNs[0]) * 100 + 0.5).int)
|
|
|
|
perRec = ((elaNs[0] / t32.len.float) + 0.5).int.initDuration
|
|
|
|
noisy.say "***",
|
|
|
|
"nRecords=", t32.len, ", ",
|
|
|
|
"perRecord=", perRec.pp, ", ",
|
|
|
|
"sstSize=", size.uint64.toSI, ", ",
|
|
|
|
"perRecord=", ((size.float / t32.len.float) + 0.5).int, ", ",
|
|
|
|
["Total","Sorter","Append","Ingest"].zip(elaPc).ppKvPc
|
|
|
|
|
|
|
|
proc test_dbTimingRocky33*(
|
|
|
|
noisy: bool;
|
|
|
|
t33: Table[ByteArray33,Blob],
|
2023-08-04 11:10:09 +00:00
|
|
|
cdb: CoreDbRef;
|
2023-01-23 16:09:12 +00:00
|
|
|
fullNoise = false;
|
|
|
|
) =
|
|
|
|
## Rocksdb, key length 33
|
|
|
|
var
|
|
|
|
ela: array[4,Duration]
|
|
|
|
size: int64
|
2024-04-19 18:37:27 +00:00
|
|
|
let rdb = cdb.newKvt.backend.toRocksStoreRef
|
2023-01-23 16:09:12 +00:00
|
|
|
|
|
|
|
# Note that 32 and 33 size keys cannot be usefiully merged into the same SST
|
|
|
|
# file. The keys must be added in a sorted mode. So playing safe, key sizes
|
|
|
|
# should be of equal length.
|
|
|
|
|
|
|
|
if noisy: echo ""
|
|
|
|
noisy.showElapsed("Rocky bulk loader(keyLen 33)", ela[0]):
|
|
|
|
let bulker = RockyBulkLoadRef.init(rdb)
|
|
|
|
defer: bulker.destroy()
|
|
|
|
check bulker.begin("rocky-bulk-cache")
|
|
|
|
|
|
|
|
var
|
|
|
|
kKeys: seq[byte] # need to cacscade
|
|
|
|
kTab: Table[byte,seq[NodeTag]]
|
|
|
|
|
|
|
|
fullNoise.showElapsed("Rocky bulk loader/33, sorter", ela[1]):
|
|
|
|
for key in t33.keys:
|
|
|
|
if kTab.hasKey(key[0]):
|
|
|
|
kTab[key[0]].add key.toOpenArray(1,32).to(NodeTag)
|
|
|
|
else:
|
|
|
|
kTab[key[0]] = @[key.toOpenArray(1,32).to(NodeTag)]
|
|
|
|
|
|
|
|
kKeys = toSeq(kTab.keys).sorted
|
|
|
|
for w in kKeys:
|
|
|
|
kTab[w].sort(cmp)
|
|
|
|
|
|
|
|
fullNoise.showElapsed("Rocky bulk loader/33, append", ela[2]):
|
|
|
|
for w in kKeys:
|
|
|
|
fullNoise.say "***", " prefix=", w, " entries=", kTab[w].len
|
|
|
|
for n,nodeTag in kTab[w]:
|
|
|
|
let key = (w,nodeTag).to(Blob)
|
|
|
|
check bulker.add(key, t33[key.to(ByteArray33)])
|
|
|
|
|
|
|
|
fullNoise.showElapsed("Rocky bulk loader/33, slurp", ela[3]):
|
|
|
|
let rc = bulker.finish()
|
|
|
|
if rc.isOk:
|
|
|
|
size = rc.value
|
|
|
|
else:
|
|
|
|
check bulker.lastError == "" # force printing error
|
|
|
|
|
|
|
|
fullNoise.say "***", " ela[]=", $ela.toSeq.mapIt(it.pp)
|
|
|
|
if ela[0].inNanoseconds != 0:
|
|
|
|
let
|
|
|
|
elaNs = ela.toSeq.mapIt(it.inNanoseconds.float)
|
|
|
|
elaPc = elaNs.mapIt(((it / elaNs[0]) * 100 + 0.5).int)
|
|
|
|
perRec = ((elaNs[0] / t33.len.float) + 0.5).int.initDuration
|
|
|
|
noisy.say "***",
|
|
|
|
"nRecords=", t33.len, ", ",
|
|
|
|
"perRecord=", perRec.pp, ", ",
|
|
|
|
"sstSize=", size.uint64.toSI, ", ",
|
|
|
|
"perRecord=", ((size.float / t33.len.float) + 0.5).int, ", ",
|
|
|
|
["Total","Cascaded-Sorter","Append","Ingest"].zip(elaPc).ppKvPc
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|