Aristo and core db updates (#1800)

* Aristo: remove obsolete functions

* Aristo: Fix error code for non-available hash keys

why:
  Must not return `not-found` when the key is not available (i.e. the
  current changes were not hashified, yet.)

* CoreDB: Provide TDD and test framework
This commit is contained in:
Jordan Hrycaj 2023-10-03 12:56:13 +01:00 committed by GitHub
parent e7d82417d4
commit 395580ff9d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 340 additions and 59 deletions

View File

@ -208,6 +208,7 @@ type
GetLeafNotFound
GetVtxNotFound
GetKeyNotFound
GetKeyTempLocked
GetFilNotFound
GetIdgNotFound
GetFqsNotFound

View File

@ -167,7 +167,7 @@ proc getKeyRc*(db: AristoDbRef; vid: VertexID): Result[HashKey,AristoError] =
let key = db.top.kMap.getOrVoid(vid).key
if key.isValid:
return ok(key)
return err(GetKeyNotFound)
return err(GetKeyTempLocked)
db.getKeyBE vid
proc getKey*(db: AristoDbRef; vid: VertexID): HashKey =

View File

@ -840,7 +840,9 @@ proc merge*(
# Create and assign a new root key
if not rootVid.isValid:
return ok db.vidRoot(rootKey)
let vid = db.vidFetch
db.vidAttach(HashLabel(root: vid, key: rootKey), vid)
return ok vid
err(MergeRootKeyDiffersForVid)

View File

@ -65,7 +65,7 @@ proc serialise(
if not vid.isValid:
VOID_HASH_KEY
else:
let rc = vid.getkey
let rc = vid.getKey
if rc.isErr:
return err((vid,rc.error))
rc.value

View File

@ -15,18 +15,9 @@
import
eth/common,
stint,
results,
"."/[aristo_desc, aristo_get]
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
proc rootHash*(db: AristoDbRef; root = VertexID(1)): Hash256 =
## Shortcut
db.getKey(root).to(Hash256)
# ------------------------------------------------------------------------------
# Public functions, converters
# ------------------------------------------------------------------------------
@ -51,10 +42,7 @@ proc toAccount*(
codeHash: payload.account.codehash,
storageRoot: EMPTY_ROOT_HASH)
if payload.account.storageID.isValid:
let key = db.getKey payload.account.storageID
if not key.isValid:
return err(AccountStorageKeyMissing)
acc.storageRoot = key.to(Hash256)
acc.storageRoot = (? db.getKeyRc payload.account.storageID).to(Hash256)
return ok(acc)
else:
discard

View File

@ -106,11 +106,6 @@ proc vidAttach*(db: AristoDbRef; lbl: HashLabel): VertexID {.discardable.} =
result = db.vidFetch
db.vidAttach(lbl, result)
proc vidRoot*(db: AristoDbRef; key: HashKey): VertexID {.discardable.} =
## Variant of `vidAttach()` for generating a sub-trie root
result = db.vidFetch
db.vidAttach(HashLabel(root: result, key: key), result)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -225,44 +225,6 @@ template xCheckErr*(expr: untyped; ifFalse: untyped): untyped =
if rc.isOk:
xCheck(expr, ifFalse)
# ------------------------------------------------------------------------------
# Public iterators
# ------------------------------------------------------------------------------
iterator walkAllDb*(rocky: RocksStoreRef): (int,Blob,Blob) =
## Walk over all key-value pairs of the database (`RocksDB` only.)
let
rop = rocky.store.readOptions
rit = rocky.store.db.rocksdb_create_iterator(rop)
defer:
rit.rocksdb_iter_destroy()
rit.rocksdb_iter_seek_to_first()
var count = -1
while rit.rocksdb_iter_valid() != 0:
count .inc
# Read key-value pair
var
kLen, vLen: csize_t
let
kData = rit.rocksdb_iter_key(addr kLen)
vData = rit.rocksdb_iter_value(addr vLen)
# Fetch data
let
key = if kData.isNil: EmptyBlob
else: kData.toOpenArrayByte(0,int(kLen)-1).toSeq
value = if vData.isNil: EmptyBlob
else: vData.toOpenArrayByte(0,int(vLen)-1).toSeq
yield (count, key, value)
# Update Iterator (might overwrite kData/vdata)
rit.rocksdb_iter_next()
# End while
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

151
tests/test_coredb.nim Normal file
View File

@ -0,0 +1,151 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
## Testing `CoreDB` wrapper implementation
import
std/[os, strformat, strutils],
chronicles,
eth/common,
results,
unittest2,
../../nimbus/[db/core_db/persistent, core/chain],
./replay/pp,
./test_coredb/[coredb_test_xx, test_legacy]
const
baseDir = [".", "..", ".."/"..", $DirSep]
repoDir = [".", "tests", "nimbus-eth1-blobs"]
subDir = ["replay", "test_coredb"]
# Reference file for finding some database directory base
sampleDirRefFile = "coredb_test_xx.nim"
# Standard test sample
bChainCapture = bulkTest0
# ------------------------------------------------------------------------------
# Helpers
# ------------------------------------------------------------------------------
proc findFilePath(
file: string;
baseDir: openArray[string] = baseDir;
repoDir: openArray[string] = repoDir;
subDir: openArray[string] = subDir;
): Result[string,void] =
for dir in baseDir:
if dir.dirExists:
for repo in repoDir:
if (dir / repo).dirExists:
for sub in subDir:
if (dir / repo / sub).dirExists:
let path = dir / repo / sub / file
if path.fileExists:
return ok(path)
echo "*** File not found \"", file, "\"."
err()
proc getTmpDir(sampleDir = sampleDirRefFile): string =
sampleDir.findFilePath.value.splitFile.dir
proc flushDbDir(s: string) =
if s != "":
let dataDir = s / "nimbus"
if (dataDir / "data").dirExists:
# Typically under Windows: there might be stale file locks.
try: dataDir.removeDir except CatchableError: discard
block dontClearUnlessEmpty:
for w in s.walkDir:
break dontClearUnlessEmpty
try: s.removeDir except CatchableError: discard
# ----------------
proc setTraceLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.TRACE)
proc setErrorLevel {.used.} =
discard
when defined(chronicles_runtime_filtering) and loggingEnabled:
setLogLevel(LogLevel.ERROR)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc openLegacyDB(
persistent: bool;
path: string;
network: NetworkId;
): CommonRef =
let coreDB = if not persistent: newCoreDbRef LegacyDbMemory
else: newCoreDbRef(LegacyDbPersistent, path)
result = CommonRef.new(
db = coreDB,
networkId = network,
params = network.networkParams)
result.initializeEmptyDb
# ------------------------------------------------------------------------------
# Test Runners: accounts and accounts storages
# ------------------------------------------------------------------------------
proc legacyRunner(
noisy = true;
capture = bChainCapture;
persistent = true;
) =
## Test legacy backend database
let
fileInfo = capture.file.splitFile.name.split(".")[0]
filePath = capture.file.findFilePath(baseDir,repoDir).value
baseDir = getTmpDir() / capture.name & "-legacy"
dbDir = if persistent: baseDir / "tmp" else: ""
sayPersistent = if persistent: "persistent DB" else: "mem DB only"
numBlocks = capture.numBlocks
numBlocksInfo = if numBlocks == high(int): "" else: $numBlocks & " "
defer:
if persistent: baseDir.flushDbDir
suite "Legacy DB: test Core API interfaces"&
&", capture={fileInfo}, {sayPersistent}":
test &"Legaci API, {numBlocksInfo} blocks":
let
com = openLegacyDB(persistent, dbDir, capture.network)
defer:
com.db.finish(flush = true)
if persistent: dbDir.flushDbDir
check noisy.testChainSyncLegacyApi(filePath, com, numBlocks)
# ------------------------------------------------------------------------------
# Main function(s)
# ------------------------------------------------------------------------------
proc coreDbMain*(noisy = defined(debug)) =
noisy.legacyRunner()
when isMainModule:
const
noisy = defined(debug) or true
setErrorLevel()
noisy.legacyRunner()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,47 @@
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
eth/common,
../../nimbus/common/chain_config
type
CaptureSpecs* = object
name*: string ## sample name, also used as sub-directory for db separation
network*: NetworkId
file*: string ## name of capture file
numBlocks*: int ## Number of blocks to load
const
bulkTest0* = CaptureSpecs(
name: "some-goerli",
network: GoerliNet,
file: "goerli68161.txt.gz",
numBlocks: 1_000)
bulkTest1* = CaptureSpecs(
name: "full-goerli",
network: bulkTest0.network,
file: bulkTest0.file,
numBlocks: high(int))
bulkTest2* = CaptureSpecs(
name: "more-goerli",
network: GoerliNet,
file: "goerli482304.txt.gz",
numBlocks: high(int))
bulkTest3* = CaptureSpecs(
name: "mainnet",
network: MainNet,
file: "mainnet332160.txt.gz",
numBlocks: high(int))
# End

View File

@ -0,0 +1,77 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
std/[sequtils],
eth/common,
#../test_sync_snap/test_types,
../replay/[pp]
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc toPfx(indent: int): string =
"\n" & " ".repeat(indent)
# ------------------------------------------------------------------------------
# Public pretty printing
# ------------------------------------------------------------------------------
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
if noisy:
if args.len == 0:
echo "*** ", pfx
elif 0 < pfx.len and pfx[^1] != ' ':
echo pfx, " ", args.toSeq.join
else:
echo pfx, args.toSeq.join
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Public workflow helpers
# ------------------------------------------------------------------------------
template xCheck*(expr: untyped): untyped =
## Note: this check will invoke `expr` twice
if not (expr):
check expr
return
template xCheck*(expr: untyped; ifFalse: untyped): untyped =
## Note: this check will invoke `expr` twice
if not (expr):
ifFalse
check expr
return
template xCheckRc*(expr: untyped): untyped =
if rc.isErr:
xCheck(expr)
template xCheckRc*(expr: untyped; ifFalse: untyped): untyped =
if rc.isErr:
xCheck(expr, ifFalse)
template xCheckErr*(expr: untyped): untyped =
if rc.isOk:
xCheck(expr)
template xCheckErr*(expr: untyped; ifFalse: untyped): untyped =
if rc.isOk:
xCheck(expr, ifFalse)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,58 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or
# distributed except according to those terms.
import
std/strformat,
eth/common,
results,
unittest2,
../../nimbus/[core/chain],
../replay/undump_blocks,
./test_helpers
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc test_chainSyncLegacyApi*(
noisy: bool;
filePath: string;
com: CommonRef;
numBlocks: int;
): bool =
## Store persistent blocks from dump into chain DB
let
sayBlocks = 900.u256
chain = com.newChain
for w in filePath.undumpBlocks:
let (fromBlock, toBlock) = (w[0][0].blockNumber, w[0][^1].blockNumber)
if fromBlock == 0.u256:
xCheck w[0][0] == com.db.getBlockHeader(0.u256)
continue
# Message if [fromBlock,toBlock] contains a multiple of `sayBlocks`
if fromBlock + (toBlock mod sayBlocks) <= toBlock:
noisy.say "***", &"processing ...[#{fromBlock},#{toBlock}]..."
xCheck chain.persistBlocks(w[0], w[1]) == ValidationResult.OK
if numBlocks.toBlockNumber <= w[0][^1].blockNumber:
break
true
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------