From 84a72c8658464311b5d606768aed80d5757b528c Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 30 Aug 2024 17:32:13 +0200 Subject: [PATCH] Use zstd compression in bottommost layer (#2582) Tested up to block ~14m, zstd uses ~12% less space which seems to result in a small:ish (2-4%) performance improvement on block import speed - this seems like a better baseline for more extensive testing in the future. Pre: 57383308 kb Post: 50831236 kb --- nimbus/db/core_db/backend/aristo_rocksdb.nim | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/nimbus/db/core_db/backend/aristo_rocksdb.nim b/nimbus/db/core_db/backend/aristo_rocksdb.nim index 8e327422b..579e1d726 100644 --- a/nimbus/db/core_db/backend/aristo_rocksdb.nim +++ b/nimbus/db/core_db/backend/aristo_rocksdb.nim @@ -93,13 +93,16 @@ proc toRocksDb*( cfOpts.memtableWholeKeyFiltering = true cfOpts.memtablePrefixBloomSizeRatio = 0.1 - # LZ4 seems to cut database size to 2/3 roughly, at the time of writing + # ZSTD seems to cut database size to 2/3 roughly, at the time of writing # Using it for the bottom-most level means it applies to 90% of data but # delays compression until data has settled a bit, which seems like a # reasonable tradeoff. - # TODO evaluate zstd compression with a trained dictionary - # https://github.com/facebook/rocksdb/wiki/Compression - cfOpts.bottommostCompression = Compression.lz4Compression + # Compared to LZ4 that was tested earlier, the default ZSTD config results + # in 10% less space and similar or slightly better performance in some + # simple tests around mainnet block 14M. + # TODO evaluate zstd dictionary compression + # https://github.com/facebook/rocksdb/wiki/Dictionary-Compression + cfOpts.bottommostCompression = Compression.zstdCompression # TODO In the AriVtx table, we don't do lookups that are expected to result # in misses thus we could avoid the filter cost - this does not apply to