Moved db backends to eth_trie
This commit is contained in:
parent
a2a11ed7aa
commit
58ad0e472c
|
@ -1,46 +0,0 @@
|
|||
import
|
||||
ranges, tables, sets,
|
||||
eth_trie/db,
|
||||
../storage_types
|
||||
|
||||
type
|
||||
CachingDB* = ref object of RootObj
|
||||
backing: TrieDatabaseRef
|
||||
changed: Table[seq[byte], seq[byte]]
|
||||
deleted: HashSet[seq[byte]]
|
||||
|
||||
proc newCachingDB*(backing: TrieDatabaseRef): CachingDB =
|
||||
result.new()
|
||||
result.backing = backing
|
||||
result.changed = initTable[seq[byte], seq[byte]]()
|
||||
result.deleted = initSet[seq[byte]]()
|
||||
|
||||
proc get*(db: CachingDB, key: openarray[byte]): seq[byte] =
|
||||
let key = @key
|
||||
result = db.changed.getOrDefault(key)
|
||||
if result.len == 0 and key notin db.deleted:
|
||||
result = db.backing.get(key)
|
||||
|
||||
proc put*(db: CachingDB, key, value: openarray[byte]) =
|
||||
let key = @key
|
||||
db.deleted.excl(key)
|
||||
db.changed[key] = @value
|
||||
|
||||
proc contains*(db: CachingDB, key: openarray[byte]): bool =
|
||||
let key = @key
|
||||
result = key in db.changed
|
||||
if not result and key notin db.deleted:
|
||||
result = db.backing.contains(key)
|
||||
|
||||
proc del*(db: CachingDB, key: openarray[byte]) =
|
||||
let key = @key
|
||||
db.changed.del(key)
|
||||
db.deleted.incl(key)
|
||||
|
||||
proc commit*(db: CachingDB) =
|
||||
for k in db.deleted:
|
||||
db.backing.del(k)
|
||||
|
||||
for k, v in db.changed:
|
||||
db.backing.put(k, v)
|
||||
|
|
@ -1,160 +0,0 @@
|
|||
import os, ranges, eth_trie/[defs, db_tracing]
|
||||
import ../storage_types
|
||||
|
||||
when defined(windows):
|
||||
const Lib = "lmdb.dll"
|
||||
elif defined(macos):
|
||||
const Lib = "liblmdb.dylib"
|
||||
else:
|
||||
const Lib = "liblmdb.so"
|
||||
|
||||
const
|
||||
MDB_NOSUBDIR = 0x4000
|
||||
MDB_NOTFOUND = -30798
|
||||
LMDB_MAP_SIZE = 1024 * 1024 * 1024 * 10 # 10TB enough?
|
||||
|
||||
type
|
||||
MDB_Env = distinct pointer
|
||||
MDB_Txn = distinct pointer
|
||||
MDB_Dbi = distinct cuint
|
||||
|
||||
MDB_val = object
|
||||
mv_size: csize
|
||||
mv_data: pointer
|
||||
|
||||
# this is only a subset of LMDB API needed in nimbus
|
||||
proc mdb_env_create(env: var MDB_Env): cint {.cdecl, dynlib: Lib, importc: "mdb_env_create".}
|
||||
proc mdb_env_open(env: MDB_Env, path: cstring, flags: cuint, mode: cint): cint {.cdecl, dynlib: Lib, importc: "mdb_env_open".}
|
||||
proc mdb_txn_begin(env: MDB_Env, parent: MDB_Txn, flags: cuint, txn: var MDB_Txn): cint {.cdecl, dynlib: Lib, importc: "mdb_txn_begin".}
|
||||
proc mdb_txn_commit(txn: MDB_Txn): cint {.cdecl, dynlib: Lib, importc: "mdb_txn_commit".}
|
||||
proc mdb_dbi_open(txn: MDB_Txn, name: cstring, flags: cuint, dbi: var MDB_Dbi): cint {.cdecl, dynlib: Lib, importc: "mdb_dbi_open".}
|
||||
proc mdb_dbi_close(env: MDB_Env, dbi: MDB_Dbi) {.cdecl, dynlib: Lib, importc: "mdb_dbi_close".}
|
||||
proc mdb_env_close(env: MDB_Env) {.cdecl, dynlib: Lib, importc: "mdb_env_close".}
|
||||
|
||||
proc mdb_get(txn: MDB_Txn, dbi: MDB_Dbi, key: var MDB_val, data: var MDB_val): cint {.cdecl, dynlib: Lib, importc: "mdb_get".}
|
||||
proc mdb_del(txn: MDB_Txn, dbi: MDB_Dbi, key: var MDB_val, data: ptr MDB_val): cint {.cdecl, dynlib: Lib, importc: "mdb_del".}
|
||||
proc mdb_put(txn: MDB_Txn, dbi: MDB_Dbi, key: var MDB_val, data: var MDB_val, flags: cuint): cint {.cdecl, dynlib: Lib, importc: "mdb_put".}
|
||||
|
||||
proc mdb_env_set_mapsize(env: MDB_Env, size: uint64): cint {.cdecl, dynlib: Lib, importc: "mdb_env_set_mapsize".}
|
||||
|
||||
type
|
||||
LmdbChainDB* = ref object of RootObj
|
||||
env: MDB_Env
|
||||
txn: MDB_Txn
|
||||
dbi: MDB_Dbi
|
||||
manualCommit: bool
|
||||
|
||||
ChainDB* = LmdbChainDB
|
||||
|
||||
# call txBegin and txCommit if you want to disable auto-commit
|
||||
proc txBegin*(db: ChainDB, manualCommit = true): bool =
|
||||
result = true
|
||||
if manualCommit:
|
||||
db.manualCommit = true
|
||||
else:
|
||||
if db.manualCommit: return
|
||||
result = mdb_txn_begin(db.env, MDB_Txn(nil), 0, db.txn) == 0
|
||||
result = result and mdb_dbi_open(db.txn, nil, 0, db.dbi) == 0
|
||||
|
||||
proc txCommit*(db: ChainDB, manualCommit = true): bool =
|
||||
result = true
|
||||
if manualCommit:
|
||||
db.manualCommit = false
|
||||
else:
|
||||
if db.manualCommit: return
|
||||
result = mdb_txn_commit(db.txn) == 0
|
||||
mdb_dbi_close(db.env, db.dbi)
|
||||
|
||||
proc toMdbVal(val: openArray[byte]): MDB_Val =
|
||||
result.mv_size = val.len
|
||||
result.mv_data = unsafeAddr val[0]
|
||||
|
||||
proc get*(db: ChainDB, key: openarray[byte]): seq[byte] =
|
||||
if key.len == 0: return
|
||||
var
|
||||
dbKey = toMdbVal(key)
|
||||
dbVal: MDB_val
|
||||
|
||||
if not db.txBegin(false):
|
||||
raiseKeyReadError(key)
|
||||
|
||||
var errCode = mdb_get(db.txn, db.dbi, dbKey, dbVal)
|
||||
|
||||
if not(errCode == 0 or errCode == MDB_NOTFOUND):
|
||||
raiseKeyReadError(key)
|
||||
|
||||
if dbVal.mv_size > 0 and errCode == 0:
|
||||
result = newSeq[byte](dbVal.mv_size.int)
|
||||
copyMem(result[0].addr, dbVal.mv_data, result.len)
|
||||
else:
|
||||
result = @[]
|
||||
|
||||
traceGet key, result
|
||||
if not db.txCommit(false):
|
||||
raiseKeyReadError(key)
|
||||
|
||||
proc put*(db: ChainDB, key, value: openarray[byte]) =
|
||||
tracePut key, value
|
||||
if key.len == 0 or value.len == 0: return
|
||||
var
|
||||
dbKey = toMdbVal(key)
|
||||
dbVal = toMdbVal(value)
|
||||
|
||||
if not db.txBegin(false):
|
||||
raiseKeyWriteError(key)
|
||||
|
||||
var ok = mdb_put(db.txn, db.dbi, dbKey, dbVal, 0) == 0
|
||||
if not ok:
|
||||
raiseKeyWriteError(key)
|
||||
|
||||
if not db.txCommit(false):
|
||||
raiseKeyWriteError(key)
|
||||
|
||||
proc contains*(db: ChainDB, key: openarray[byte]): bool =
|
||||
if key.len == 0: return
|
||||
var
|
||||
dbKey = toMdbVal(key)
|
||||
dbVal: MDB_val
|
||||
|
||||
if not db.txBegin(false):
|
||||
raiseKeySearchError(key)
|
||||
|
||||
result = mdb_get(db.txn, db.dbi, dbKey, dbVal) == 0
|
||||
|
||||
if not db.txCommit(false):
|
||||
raiseKeySearchError(key)
|
||||
|
||||
proc del*(db: ChainDB, key: openarray[byte]) =
|
||||
traceDel key
|
||||
if key.len == 0: return
|
||||
var
|
||||
dbKey = toMdbVal(key)
|
||||
|
||||
if not db.txBegin(false):
|
||||
raiseKeyDeletionError(key)
|
||||
|
||||
var errCode = mdb_del(db.txn, db.dbi, dbKey, nil)
|
||||
if not(errCode == 0 or errCode == MDB_NOTFOUND):
|
||||
raiseKeyDeletionError(key)
|
||||
|
||||
if not db.txCommit(false):
|
||||
raiseKeyDeletionError(key)
|
||||
|
||||
proc close*(db: ChainDB) =
|
||||
mdb_env_close(db.env)
|
||||
|
||||
proc newChainDB*(basePath: string): ChainDB =
|
||||
result.new()
|
||||
|
||||
let dataDir = basePath / "nimbus.db"
|
||||
var ok = mdb_env_create(result.env) == 0
|
||||
if not ok: raiseStorageInitError()
|
||||
|
||||
ok = mdb_env_set_mapsize(result.env, LMDB_MAP_SIZE) == 0
|
||||
if not ok: raiseStorageInitError()
|
||||
|
||||
# file mode ignored on windows
|
||||
ok = mdb_env_open(result.env, dataDir, MDB_NOSUBDIR, 0o664) == 0
|
||||
if not ok: raiseStorageInitError()
|
||||
|
||||
result.put(emptyRlpHash.data, emptyRlp)
|
|
@ -1,30 +0,0 @@
|
|||
# Nimbus
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import tables
|
||||
import ranges
|
||||
import ../storage_types
|
||||
|
||||
type
|
||||
MemoryDB* = ref object
|
||||
kvStore*: Table[DbKey, ByteRange]
|
||||
|
||||
proc newMemoryDB*: MemoryDB =
|
||||
MemoryDB(kvStore: initTable[DbKey, ByteRange]())
|
||||
|
||||
proc get*(db: MemoryDB, key: DbKey): ByteRange =
|
||||
db.kvStore[key]
|
||||
|
||||
proc set*(db: var MemoryDB, key: DbKey, value: ByteRange) =
|
||||
db.kvStore[key] = value
|
||||
|
||||
proc contains*(db: MemoryDB, key: DbKey): bool =
|
||||
db.kvStore.hasKey(key)
|
||||
|
||||
proc delete*(db: var MemoryDB, key: DbKey) =
|
||||
db.kvStore.del(key)
|
||||
|
|
@ -1,51 +0,0 @@
|
|||
import os, rocksdb, ranges, eth_trie/[defs, db_tracing]
|
||||
import ../storage_types
|
||||
|
||||
type
|
||||
RocksChainDB* = ref object of RootObj
|
||||
store: RocksDBInstance
|
||||
|
||||
ChainDB* = RocksChainDB
|
||||
|
||||
proc get*(db: ChainDB, key: openarray[byte]): seq[byte] =
|
||||
let s = db.store.getBytes(key)
|
||||
if s.ok:
|
||||
result = s.value
|
||||
traceGet key, result
|
||||
elif s.error.len == 0:
|
||||
discard
|
||||
else:
|
||||
raiseKeyReadError(key)
|
||||
|
||||
proc put*(db: ChainDB, key, value: openarray[byte]) =
|
||||
tracePut key, value
|
||||
let s = db.store.put(key, value)
|
||||
if not s.ok: raiseKeyWriteError(key)
|
||||
|
||||
proc contains*(db: ChainDB, key: openarray[byte]): bool =
|
||||
let s = db.store.contains(key)
|
||||
if not s.ok: raiseKeySearchError(key)
|
||||
return s.value
|
||||
|
||||
proc del*(db: ChainDB, key: openarray[byte]) =
|
||||
traceDel key
|
||||
let s = db.store.del(key)
|
||||
if not s.ok: raiseKeyDeletionError(key)
|
||||
|
||||
proc close*(db: ChainDB) =
|
||||
db.store.close
|
||||
|
||||
proc newChainDB*(basePath: string): ChainDB =
|
||||
result.new()
|
||||
let
|
||||
dataDir = basePath / "data"
|
||||
backupsDir = basePath / "backups"
|
||||
|
||||
createDir(dataDir)
|
||||
createDir(backupsDir)
|
||||
|
||||
let s = result.store.init(dataDir, backupsDir)
|
||||
if not s.ok: raiseStorageInitError()
|
||||
|
||||
put(result, emptyRlpHash.data, emptyRlp)
|
||||
|
|
@ -1,131 +0,0 @@
|
|||
import
|
||||
os, sqlite3, ranges, ranges/ptr_arith, eth_trie/[db_tracing, defs],
|
||||
../storage_types
|
||||
|
||||
type
|
||||
SqliteChainDB* = ref object of RootObj
|
||||
store: PSqlite3
|
||||
selectStmt, insertStmt, deleteStmt: PStmt
|
||||
|
||||
ChainDB* = SqliteChainDB
|
||||
|
||||
proc put*(db: ChainDB, key, value: openarray[byte])
|
||||
|
||||
proc newChainDB*(basePath: string, inMemory = false): ChainDB =
|
||||
result.new()
|
||||
let dbPath = if inMemory: ":memory:" else: basePath / "nimbus.db"
|
||||
var s = sqlite3.open(dbPath, result.store)
|
||||
if s != SQLITE_OK:
|
||||
raiseStorageInitError()
|
||||
|
||||
template execQuery(q: string) =
|
||||
var s: Pstmt
|
||||
if prepare_v2(result.store, q, q.len.int32, s, nil) == SQLITE_OK:
|
||||
if step(s) != SQLITE_DONE or finalize(s) != SQLITE_OK:
|
||||
raiseStorageInitError()
|
||||
else:
|
||||
raiseStorageInitError()
|
||||
|
||||
# TODO: check current version and implement schema versioning
|
||||
execQuery "PRAGMA user_version = 1;"
|
||||
|
||||
execQuery """
|
||||
CREATE TABLE IF NOT EXISTS trie_nodes(
|
||||
key BLOB PRIMARY KEY,
|
||||
value BLOB
|
||||
);
|
||||
"""
|
||||
|
||||
template prepare(q: string): PStmt =
|
||||
var s: Pstmt
|
||||
if prepare_v2(result.store, q, q.len.int32, s, nil) != SQLITE_OK:
|
||||
raiseStorageInitError()
|
||||
s
|
||||
|
||||
result.selectStmt = prepare "SELECT value FROM trie_nodes WHERE key = ?;"
|
||||
|
||||
if sqlite3.libversion_number() < 3024000:
|
||||
result.insertStmt = prepare """
|
||||
INSERT OR REPLACE INTO trie_nodes(key, value) VALUES (?, ?);
|
||||
"""
|
||||
else:
|
||||
result.insertStmt = prepare """
|
||||
INSERT INTO trie_nodes(key, value) VALUES (?, ?)
|
||||
ON CONFLICT(key) DO UPDATE SET value = excluded.value;
|
||||
"""
|
||||
|
||||
result.deleteStmt = prepare "DELETE FROM trie_nodes WHERE key = ?;"
|
||||
|
||||
put(result, emptyRlpHash.data, emptyRlp)
|
||||
|
||||
proc bindBlob(s: Pstmt, n: int, blob: openarray[byte]): int32 =
|
||||
sqlite3.bind_blob(s, n.int32, blob.baseAddr, blob.len.int32, nil)
|
||||
|
||||
proc get*(db: ChainDB, key: openarray[byte]): seq[byte] =
|
||||
template check(op) =
|
||||
let status = op
|
||||
if status != SQLITE_OK: raiseKeyReadError(key)
|
||||
|
||||
check reset(db.selectStmt)
|
||||
check clearBindings(db.selectStmt)
|
||||
check bindBlob(db.selectStmt, 1, key)
|
||||
|
||||
case step(db.selectStmt)
|
||||
of SQLITE_ROW:
|
||||
var
|
||||
resStart = columnBlob(db.selectStmt, 0)
|
||||
resLen = columnBytes(db.selectStmt, 0)
|
||||
result = newSeq[byte](resLen)
|
||||
copyMem(result.baseAddr, resStart, resLen)
|
||||
traceGet key, result
|
||||
of SQLITE_DONE:
|
||||
discard
|
||||
else:
|
||||
raiseKeyReadError(key)
|
||||
|
||||
proc put*(db: ChainDB, key, value: openarray[byte]) =
|
||||
tracePut key, value
|
||||
|
||||
template check(op) =
|
||||
let status = op
|
||||
if status != SQLITE_OK: raiseKeyWriteError(key)
|
||||
|
||||
check reset(db.insertStmt)
|
||||
check clearBindings(db.insertStmt)
|
||||
check bindBlob(db.insertStmt, 1, key)
|
||||
check bindBlob(db.insertStmt, 2, value)
|
||||
|
||||
if step(db.insertStmt) != SQLITE_DONE:
|
||||
raiseKeyWriteError(key)
|
||||
|
||||
proc contains*(db: ChainDB, key: openarray[byte]): bool =
|
||||
template check(op) =
|
||||
let status = op
|
||||
if status != SQLITE_OK: raiseKeySearchError(key)
|
||||
|
||||
check reset(db.selectStmt)
|
||||
check clearBindings(db.selectStmt)
|
||||
check bindBlob(db.selectStmt, 1, key)
|
||||
|
||||
case step(db.selectStmt)
|
||||
of SQLITE_ROW: result = true
|
||||
of SQLITE_DONE: result = false
|
||||
else: raiseKeySearchError(key)
|
||||
|
||||
proc del*(db: ChainDB, key: openarray[byte]) =
|
||||
traceDel key
|
||||
|
||||
template check(op) =
|
||||
let status = op
|
||||
if status != SQLITE_OK: raiseKeyDeletionError(key)
|
||||
|
||||
check reset(db.deleteStmt)
|
||||
check clearBindings(db.deleteStmt)
|
||||
check bindBlob(db.deleteStmt, 1, key)
|
||||
|
||||
if step(db.deleteStmt) != SQLITE_DONE:
|
||||
raiseKeyDeletionError(key)
|
||||
|
||||
proc close*(db: ChainDB) =
|
||||
discard sqlite3.close(db.store)
|
||||
reset(db[])
|
|
@ -10,10 +10,10 @@ const
|
|||
dbBackend = parseEnum[DbBackend](nimbus_db_backend)
|
||||
|
||||
when dbBackend == sqlite:
|
||||
import ./backends/sqlite_backend as database_backend
|
||||
import eth_trie/backends/sqlite_backend as database_backend
|
||||
elif dbBackend == rocksdb:
|
||||
import ./backends/rocksdb_backend as database_backend
|
||||
import eth_trie/backends/rocksdb_backend as database_backend
|
||||
elif dbBackend == lmdb:
|
||||
import ./backends/lmdb_backend as database_backend
|
||||
import eth_trie/backends/lmdb_backend as database_backend
|
||||
|
||||
export database_backend
|
||||
|
|
|
@ -16,8 +16,6 @@ type
|
|||
data*: array[33, byte]
|
||||
dataEndPos*: uint8 # the last populated position in the data
|
||||
|
||||
StorageError* = object of Exception
|
||||
|
||||
proc genericHashKey*(h: Hash256): DbKey {.inline.} =
|
||||
result.data[0] = byte ord(genericHash)
|
||||
result.data[1 .. 32] = h.data
|
||||
|
@ -76,18 +74,3 @@ proc `==`*[T](lhs, rhs: openarray[T]): bool =
|
|||
proc `==`*(a, b: DbKey): bool {.inline.} =
|
||||
a.toOpenArray == b.toOpenArray
|
||||
|
||||
template raiseStorageInitError* =
|
||||
raise newException(StorageError, "failure to initialize storage")
|
||||
|
||||
template raiseKeyReadError*(key: auto) =
|
||||
raise newException(StorageError, "failed to read key " & $key)
|
||||
|
||||
template raiseKeyWriteError*(key: auto) =
|
||||
raise newException(StorageError, "failed to write key " & $key)
|
||||
|
||||
template raiseKeySearchError*(key: auto) =
|
||||
raise newException(StorageError, "failure during search for key " & $key)
|
||||
|
||||
template raiseKeyDeletionError*(key: auto) =
|
||||
raise newException(StorageError, "failure to delete key " & $key)
|
||||
|
||||
|
|
|
@ -10,8 +10,6 @@ import ./test_code_stream,
|
|||
./test_memory,
|
||||
./test_stack,
|
||||
./test_opcode,
|
||||
./test_storage_backends,
|
||||
./test_caching_db_backend,
|
||||
./test_genesis,
|
||||
./test_vm_json,
|
||||
./test_precompiles,
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
#[
|
||||
import
|
||||
unittest,
|
||||
eth_trie/db,
|
||||
../nimbus/db/backends/caching_backend
|
||||
|
||||
let
|
||||
key1 = [0.byte, 0, 1]
|
||||
key2 = [0.byte, 0, 2]
|
||||
key3 = [0.byte, 0, 3]
|
||||
key4 = [0.byte, 0, 4]
|
||||
value1 = [1.byte, 0, 1]
|
||||
value2 = [1.byte, 0, 2]
|
||||
value3 = [1.byte, 0, 3]
|
||||
value4 = [1.byte, 0, 4]
|
||||
|
||||
suite "Caching DB backend":
|
||||
test "Basic test":
|
||||
let mdb = newMemDB()
|
||||
mdb.put(key1, value1)
|
||||
mdb.put(key2, value2)
|
||||
let cdb = newCachingDB(trieDB(mdb))
|
||||
check:
|
||||
cdb.get(key1) == @value1
|
||||
cdb.get(key2) == @value2
|
||||
|
||||
cdb.del(key1)
|
||||
check:
|
||||
key1 notin cdb
|
||||
mdb.get(key1) == @value1
|
||||
|
||||
cdb.put(key3, value3)
|
||||
check:
|
||||
cdb.get(key3) == @value3
|
||||
key3 notin mdb
|
||||
|
||||
cdb.put(key4, value4)
|
||||
cdb.del(key4)
|
||||
check(key4 notin cdb)
|
||||
|
||||
cdb.commit()
|
||||
|
||||
check:
|
||||
key1 notin mdb
|
||||
mdb.get(key2) == @value2
|
||||
mdb.get(key3) == @value3
|
||||
key4 notin mdb
|
||||
]#
|
|
@ -1,59 +0,0 @@
|
|||
import
|
||||
unittest, macros, os,
|
||||
nimcrypto/[keccak, hash], ranges, eth_common/eth_types,
|
||||
../nimbus/db/[storage_types],
|
||||
../nimbus/db/backends/[sqlite_backend, rocksdb_backend]
|
||||
|
||||
template dummyInstance(T: type SqliteChainDB): auto =
|
||||
sqlite_backend.newChainDB(getTempDir(), inMemory = true)
|
||||
|
||||
template dummyInstance(T: type RocksChainDB): auto =
|
||||
let tmp = getTempDir() / "nimbus-test-db"
|
||||
removeDir(tmp)
|
||||
rocksdb_backend.newChainDB(tmp)
|
||||
|
||||
template backendTests(DB) =
|
||||
suite("storage tests: " & astToStr(DB)):
|
||||
setup:
|
||||
var db = dummyInstance(DB)
|
||||
|
||||
teardown:
|
||||
close(db)
|
||||
|
||||
test "basic insertions and deletions":
|
||||
var keyA = @(genericHashKey(keccak256.digest("A")).toOpenArray)
|
||||
var keyB = @(blockNumberToHashKey(100.toBlockNumber).toOpenArray)
|
||||
var value1 = @[1.byte, 2, 3, 4, 5]
|
||||
var value2 = @[7.byte, 8, 9, 10]
|
||||
|
||||
db.put(keyA, value1)
|
||||
|
||||
check:
|
||||
keyA in db
|
||||
keyB notin db
|
||||
|
||||
db.put(keyB, value2)
|
||||
|
||||
check:
|
||||
keyA in db
|
||||
keyB in db
|
||||
|
||||
check:
|
||||
db.get(keyA) == value1
|
||||
db.get(keyB) == value2
|
||||
|
||||
db.del(keyA)
|
||||
db.put(keyB, value1)
|
||||
|
||||
check:
|
||||
keyA notin db
|
||||
keyB in db
|
||||
|
||||
check db.get(keyA) == @[]
|
||||
|
||||
check db.get(keyB) == value1
|
||||
db.del(keyA)
|
||||
|
||||
backendTests(RocksChainDB)
|
||||
backendTests(SqliteChainDB)
|
||||
|
Loading…
Reference in New Issue