Merge pull request #87 from status-im/db
Align DB backend interfaces with eth_trie. Init BaseChainDB in nimbus.
This commit is contained in:
commit
4944fef3ae
|
@ -2,12 +2,13 @@ import os, rocksdb, ranges
|
|||
import ../storage_types
|
||||
|
||||
type
|
||||
RocksChainDB* = object
|
||||
RocksChainDB* = ref object of RootObj
|
||||
store: RocksDBInstance
|
||||
|
||||
ChainDB* = RocksChainDB
|
||||
|
||||
proc initChainDB*(basePath: string): ChainDB =
|
||||
proc newChainDB*(basePath: string): ChainDB =
|
||||
result.new()
|
||||
let
|
||||
dataDir = basePath / "data"
|
||||
backupsDir = basePath / "backups"
|
||||
|
@ -18,24 +19,23 @@ proc initChainDB*(basePath: string): ChainDB =
|
|||
let s = result.store.init(dataDir, backupsDir)
|
||||
if not s.ok: raiseStorageInitError()
|
||||
|
||||
proc get*(db: ChainDB, key: DbKey): ByteRange =
|
||||
let s = db.store.getBytes(key.toOpenArray)
|
||||
proc get*(db: ChainDB, key: openarray[byte]): seq[byte] =
|
||||
let s = db.store.getBytes(key)
|
||||
if not s.ok: raiseKeyReadError(key)
|
||||
return s.value.toRange
|
||||
return s.value
|
||||
|
||||
proc put*(db: var ChainDB, key: DbKey, value: ByteRange) =
|
||||
let s = db.store.put(key.toOpenArray, value.toOpenArray)
|
||||
proc put*(db: ChainDB, key, value: openarray[byte]) =
|
||||
let s = db.store.put(key, value)
|
||||
if not s.ok: raiseKeyWriteError(key)
|
||||
|
||||
proc contains*(db: ChainDB, key: DbKey): bool =
|
||||
let s = db.store.contains(key.toOpenArray)
|
||||
proc contains*(db: ChainDB, key: openarray[byte]): bool =
|
||||
let s = db.store.contains(key)
|
||||
if not s.ok: raiseKeySearchError(key)
|
||||
return s.value
|
||||
|
||||
proc del*(db: var ChainDB, key: DbKey) =
|
||||
let s = db.store.del(key.toOpenArray)
|
||||
proc del*(db: ChainDB, key: openarray[byte]) =
|
||||
let s = db.store.del(key)
|
||||
if not s.ok: raiseKeyDeletionError(key)
|
||||
|
||||
proc close*(db: var ChainDB) =
|
||||
proc close*(db: ChainDB) =
|
||||
db.store.close
|
||||
|
||||
|
|
|
@ -2,13 +2,14 @@ import
|
|||
sqlite3, ranges, ranges/ptr_arith, ../storage_types
|
||||
|
||||
type
|
||||
SqliteChainDB* = object
|
||||
SqliteChainDB* = ref object of RootObj
|
||||
store: PSqlite3
|
||||
selectStmt, insertStmt, deleteStmt: PStmt
|
||||
|
||||
ChainDB* = SqliteChainDB
|
||||
|
||||
proc initChainDB*(dbPath: string): ChainDB =
|
||||
proc newChainDB*(dbPath: string): ChainDB =
|
||||
result.new()
|
||||
var s = sqlite3.open(dbPath, result.store)
|
||||
if s != SQLITE_OK:
|
||||
raiseStorageInitError()
|
||||
|
@ -54,14 +55,14 @@ proc initChainDB*(dbPath: string): ChainDB =
|
|||
proc bindBlob(s: Pstmt, n: int, blob: openarray[byte]): int32 =
|
||||
sqlite3.bind_blob(s, n.int32, blob.baseAddr, blob.len.int32, nil)
|
||||
|
||||
proc get*(db: ChainDB, key: DbKey): ByteRange =
|
||||
proc get*(db: ChainDB, key: openarray[byte]): seq[byte] =
|
||||
template check(op) =
|
||||
let status = op
|
||||
if status != SQLITE_OK: raiseKeyReadError(key)
|
||||
|
||||
check reset(db.selectStmt)
|
||||
check clearBindings(db.selectStmt)
|
||||
check bindBlob(db.selectStmt, 1, key.toOpenArray)
|
||||
check bindBlob(db.selectStmt, 1, key)
|
||||
|
||||
case step(db.selectStmt)
|
||||
of SQLITE_ROW:
|
||||
|
@ -70,51 +71,50 @@ proc get*(db: ChainDB, key: DbKey): ByteRange =
|
|||
resLen = columnBytes(db.selectStmt, 0)
|
||||
resSeq = newSeq[byte](resLen)
|
||||
copyMem(resSeq.baseAddr, resStart, resLen)
|
||||
return resSeq.toRange
|
||||
return resSeq
|
||||
of SQLITE_DONE:
|
||||
return ByteRange()
|
||||
return @[]
|
||||
else: raiseKeySearchError(key)
|
||||
|
||||
proc put*(db: var ChainDB, key: DbKey, value: ByteRange) =
|
||||
proc put*(db: ChainDB, key, value: openarray[byte]) =
|
||||
template check(op) =
|
||||
let status = op
|
||||
if status != SQLITE_OK: raiseKeyWriteError(key)
|
||||
|
||||
check reset(db.insertStmt)
|
||||
check clearBindings(db.insertStmt)
|
||||
check bindBlob(db.insertStmt, 1, key.toOpenArray)
|
||||
check bindBlob(db.insertStmt, 2, value.toOpenArray)
|
||||
check bindBlob(db.insertStmt, 1, key)
|
||||
check bindBlob(db.insertStmt, 2, value)
|
||||
|
||||
if step(db.insertStmt) != SQLITE_DONE:
|
||||
raiseKeyWriteError(key)
|
||||
|
||||
proc contains*(db: ChainDB, key: DbKey): bool =
|
||||
proc contains*(db: ChainDB, key: openarray[byte]): bool =
|
||||
template check(op) =
|
||||
let status = op
|
||||
if status != SQLITE_OK: raiseKeySearchError(key)
|
||||
|
||||
check reset(db.selectStmt)
|
||||
check clearBindings(db.selectStmt)
|
||||
check bindBlob(db.selectStmt, 1, key.toOpenArray)
|
||||
check bindBlob(db.selectStmt, 1, key)
|
||||
|
||||
case step(db.selectStmt)
|
||||
of SQLITE_ROW: result = true
|
||||
of SQLITE_DONE: result = false
|
||||
else: raiseKeySearchError(key)
|
||||
|
||||
proc del*(db: var ChainDB, key: DbKey) =
|
||||
proc del*(db: ChainDB, key: openarray[byte]) =
|
||||
template check(op) =
|
||||
let status = op
|
||||
if status != SQLITE_OK: raiseKeyDeletionError(key)
|
||||
|
||||
check reset(db.deleteStmt)
|
||||
check clearBindings(db.deleteStmt)
|
||||
check bindBlob(db.deleteStmt, 1, key.toOpenArray)
|
||||
check bindBlob(db.deleteStmt, 1, key)
|
||||
|
||||
if step(db.deleteStmt) != SQLITE_DONE:
|
||||
raiseKeyDeletionError(key)
|
||||
|
||||
proc close*(db: var ChainDB) =
|
||||
proc close*(db: ChainDB) =
|
||||
discard sqlite3.close(db.store)
|
||||
reset(db)
|
||||
|
||||
reset(db[])
|
||||
|
|
|
@ -11,7 +11,7 @@ import
|
|||
../errors, ../block_types, ../utils/header, ../constants, ./storage_types.nim
|
||||
|
||||
type
|
||||
BaseChainDB* = ref object
|
||||
BaseChainDB* = ref object of AbstractChainDB
|
||||
db*: TrieDatabaseRef
|
||||
# TODO db*: JournalDB
|
||||
|
||||
|
@ -229,3 +229,7 @@ proc persistBlockToDb*(self: BaseChainDB; blk: Block) =
|
|||
|
||||
proc getStateDb*(self: BaseChainDB; stateRoot: Hash256; readOnly: bool = false): AccountStateDB =
|
||||
result = newAccountStateDB(self.db, stateRoot)
|
||||
|
||||
method getBestBlockHeader*(self: BaseChainDB): BlockHeaderRef =
|
||||
result.new()
|
||||
result[] = self.getCanonicalHead()
|
||||
|
|
|
@ -8,10 +8,18 @@
|
|||
# those terms.
|
||||
|
||||
import
|
||||
os, strutils, net,
|
||||
os, strutils, net, eth_common, db/[storage_types, db_chain],
|
||||
asyncdispatch2, json_rpc/rpcserver, eth_keys,
|
||||
eth_p2p, eth_p2p/rlpx_protocols/[eth, les],
|
||||
config, rpc/[common, p2p]
|
||||
config, rpc/[common, p2p],
|
||||
eth_trie
|
||||
|
||||
const UseSqlite = true
|
||||
|
||||
when UseSqlite:
|
||||
import db/backends/sqlite_backend
|
||||
else:
|
||||
import db/backends/rocksdb_backend
|
||||
|
||||
## TODO:
|
||||
## * No IPv6 support
|
||||
|
@ -33,6 +41,13 @@ type
|
|||
ethNode*: EthereumNode
|
||||
state*: NimbusState
|
||||
|
||||
proc newTrieDb(): TrieDatabaseRef =
|
||||
# XXX: Setup db storage location according to config
|
||||
result = trieDB(newChainDb(":memory:"))
|
||||
|
||||
proc initializeEmptyDb(db: BaseChainDB) =
|
||||
echo "Initializing empty DB (TODO)"
|
||||
|
||||
proc start(): NimbusObject =
|
||||
var nimbus = NimbusObject()
|
||||
var conf = getConfiguration()
|
||||
|
@ -55,9 +70,17 @@ proc start(): NimbusObject =
|
|||
address.tcpPort = Port(conf.net.bindPort)
|
||||
address.udpPort = Port(conf.net.discPort)
|
||||
|
||||
let trieDB = newTrieDb()
|
||||
let chainDB = newBaseChainDB(trieDB)
|
||||
|
||||
if canonicalHeadHashKey().toOpenArray notin trieDB:
|
||||
initializeEmptyDb(chainDb)
|
||||
|
||||
nimbus.ethNode = newEthereumNode(keypair, address, conf.net.networkId,
|
||||
nil, nimbusClientId)
|
||||
|
||||
nimbus.ethNode.chain = chainDB
|
||||
|
||||
if RpcFlags.Enabled in conf.rpc.flags:
|
||||
setupP2PRpc(nimbus.ethNode, nimbus.rpcServer)
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import
|
|||
../nimbus/[constants, vm_types, logging, vm_state],
|
||||
../nimbus/vm/interpreter,
|
||||
../nimbus/utils/header,
|
||||
../nimbus/db/[db_chain, state_db, backends/memory_backend],
|
||||
../nimbus/db/[db_chain, state_db],
|
||||
./test_helpers
|
||||
|
||||
from eth_common import GasInt
|
||||
|
|
|
@ -1,14 +1,16 @@
|
|||
import
|
||||
unittest, macros, ospaths,
|
||||
unittest, macros, os,
|
||||
nimcrypto/[keccak, hash], ranges, eth_common/eth_types,
|
||||
../nimbus/db/[storage_types],
|
||||
../nimbus/db/backends/[sqlite_backend, rocksdb_backend]
|
||||
|
||||
template dummyInstance(T: type SqliteChainDB): auto =
|
||||
sqlite_backend.initChainDB ":memory:"
|
||||
sqlite_backend.newChainDB ":memory:"
|
||||
|
||||
template dummyInstance(T: type RocksChainDB): auto =
|
||||
rocksdb_backend.initChainDB getTempDir() / "nimbus-test-db"
|
||||
let tmp = getTempDir() / "nimbus-test-db"
|
||||
removeDir(tmp)
|
||||
rocksdb_backend.newChainDB(tmp)
|
||||
|
||||
template backendTests(DB) =
|
||||
suite("storage tests: " & astToStr(DB)):
|
||||
|
@ -19,10 +21,10 @@ template backendTests(DB) =
|
|||
close(db)
|
||||
|
||||
test "basic insertions and deletions":
|
||||
var keyA = genericHashKey(keccak256.digest("A"))
|
||||
var keyB = blockNumberToHashKey(100.toBlockNumber)
|
||||
var value1 = @[1.byte, 2, 3, 4, 5].toRange
|
||||
var value2 = @[7.byte, 8, 9, 10].toRange
|
||||
var keyA = @(genericHashKey(keccak256.digest("A")).toOpenArray)
|
||||
var keyB = @(blockNumberToHashKey(100.toBlockNumber).toOpenArray)
|
||||
var value1 = @[1.byte, 2, 3, 4, 5]
|
||||
var value2 = @[7.byte, 8, 9, 10]
|
||||
|
||||
db.put(keyA, value1)
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ import
|
|||
../nimbus/[vm_state, vm_types],
|
||||
../nimbus/utils/header,
|
||||
../nimbus/vm/interpreter,
|
||||
../nimbus/db/[db_chain, state_db, backends/memory_backend]
|
||||
../nimbus/db/[db_chain, state_db]
|
||||
|
||||
proc testFixture(fixtures: JsonNode, testStatusIMPL: var TestStatus)
|
||||
|
||||
|
|
Loading…
Reference in New Issue