diff --git a/nimbus.nimble b/nimbus.nimble index 88c50f634..9acd73a7b 100644 --- a/nimbus.nimble +++ b/nimbus.nimble @@ -12,6 +12,8 @@ requires "nim >= 0.18.1", "nimcrypto", "rlp", "stint", + "rocksdb", + "eth_trie", "https://github.com/status-im/nim-eth-common", "https://github.com/status-im/nim-eth-rpc", "https://github.com/status-im/nim-asyncdispatch2", diff --git a/nimbus.nix b/nimbus.nix index 9141bbcd4..1d7f7ffff 100644 --- a/nimbus.nix +++ b/nimbus.nix @@ -3,6 +3,7 @@ let stdenv = pkgs.stdenv; nim = pkgs.callPackage ./nim.nix {}; + makeLibraryPath = stdenv.lib.makeLibraryPath; in @@ -19,5 +20,6 @@ stdenv.mkDerivation rec { src = ./.; buildInputs = [pkgs.clang nim pkgs.rocksdb_lite]; + LD_LIBRARY_PATH = "${makeLibraryPath buildInputs}"; } diff --git a/nimbus/db/backends/memory_backend.nim b/nimbus/db/backends/memory_backend.nim index 289ae43a1..3b47ecd73 100644 --- a/nimbus/db/backends/memory_backend.nim +++ b/nimbus/db/backends/memory_backend.nim @@ -5,58 +5,21 @@ # * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) # at your option. This file may not be copied, modified, or distributed except according to those terms. -import tables, hashes, eth_common +import tables +import ranges +import ../storage_types type - DBKeyKind = enum - genericHash - blockNumberToHash - blockHashToScore - transactionHashToBlock - canonicalHeadHash - - DbKey* = object - case kind: DBKeyKind - of genericHash, blockHashToScore, transactionHashToBlock: - h: Hash256 - of blockNumberToHash: - u: BlockNumber - of canonicalHeadHash: - discard - MemoryDB* = ref object - kvStore*: Table[DbKey, seq[byte]] - -proc genericHashKey*(h: Hash256): DbKey {.inline.} = DbKey(kind: genericHash, h: h) -proc blockHashToScoreKey*(h: Hash256): DbKey {.inline.} = DbKey(kind: blockHashToScore, h: h) -proc transactionHashToBlockKey*(h: Hash256): DbKey {.inline.} = DbKey(kind: transactionHashToBlock, h: h) -proc blockNumberToHashKey*(u: BlockNumber): DbKey {.inline.} = DbKey(kind: blockNumberToHash, u: u) -proc canonicalHeadHashKey*(): DbKey {.inline.} = DbKey(kind: canonicalHeadHash) - -proc hash(k: DbKey): Hash = - result = result !& hash(k.kind) - case k.kind - of genericHash, blockHashToScore, transactionHashToBlock: - result = result !& hash(k.h) - of blockNumberToHash: - result = result !& hashData(unsafeAddr k.u, sizeof(k.u)) - of canonicalHeadHash: - discard - result = result - -proc `==`(a, b: DbKey): bool {.inline.} = - equalMem(unsafeAddr a, unsafeAddr b, sizeof(a)) - -proc newMemoryDB*(kvStore: Table[DbKey, seq[byte]]): MemoryDB = - MemoryDB(kvStore: kvStore) + kvStore*: Table[DbKey, ByteRange] proc newMemoryDB*: MemoryDB = - MemoryDB(kvStore: initTable[DbKey, seq[byte]]()) + MemoryDB(kvStore: initTable[DbKey, ByteRange]()) -proc get*(db: MemoryDB, key: DbKey): seq[byte] = +proc get*(db: MemoryDB, key: DbKey): ByteRange = db.kvStore[key] -proc set*(db: var MemoryDB, key: DbKey, value: seq[byte]) = +proc set*(db: var MemoryDB, key: DbKey, value: ByteRange) = db.kvStore[key] = value proc contains*(db: MemoryDB, key: DbKey): bool = @@ -64,3 +27,4 @@ proc contains*(db: MemoryDB, key: DbKey): bool = proc delete*(db: var MemoryDB, key: DbKey) = db.kvStore.del(key) + diff --git a/nimbus/db/backends/rocksdb_backend.nim b/nimbus/db/backends/rocksdb_backend.nim new file mode 100644 index 000000000..1de292ee6 --- /dev/null +++ b/nimbus/db/backends/rocksdb_backend.nim @@ -0,0 +1,41 @@ +import os, rocksdb, ranges +import ../storage_types + +type + RocksChainDB* = object + store: RocksDBInstance + + ChainDB* = RocksChainDB + +proc initChainDB*(basePath: string): ChainDB = + let + dataDir = basePath / "data" + backupsDir = basePath / "backups" + + createDir(dataDir) + createDir(backupsDir) + + let s = result.store.init(dataDir, backupsDir) + if not s.ok: raiseStorageInitError() + +proc get*(db: ChainDB, key: DbKey): ByteRange = + let s = db.store.getBytes(key.toOpenArray) + if not s.ok: raiseKeyReadError(key) + return s.value.toRange + +proc put*(db: var ChainDB, key: DbKey, value: ByteRange) = + let s = db.store.put(key.toOpenArray, value.toOpenArray) + if not s.ok: raiseKeyWriteError(key) + +proc contains*(db: ChainDB, key: DbKey): bool = + let s = db.store.contains(key.toOpenArray) + if not s.ok: raiseKeySearchError(key) + return s.value + +proc del*(db: var ChainDB, key: DbKey) = + let s = db.store.del(key.toOpenArray) + if not s.ok: raiseKeyDeletionError(key) + +proc close*(db: var ChainDB) = + db.store.close + diff --git a/nimbus/db/backends/sqlite_backend.nim b/nimbus/db/backends/sqlite_backend.nim new file mode 100644 index 000000000..3fba36da3 --- /dev/null +++ b/nimbus/db/backends/sqlite_backend.nim @@ -0,0 +1,120 @@ +import + sqlite3, ranges, ranges/ptr_arith, ../storage_types + +type + SqliteChainDB* = object + store: PSqlite3 + selectStmt, insertStmt, deleteStmt: PStmt + + ChainDB* = SqliteChainDB + +proc initChainDB*(dbPath: string): ChainDB = + var s = sqlite3.open(dbPath, result.store) + if s != SQLITE_OK: + raiseStorageInitError() + + template execQuery(q: string) = + var s: Pstmt + if prepare_v2(result.store, q, q.len.int32, s, nil) == SQLITE_OK: + if step(s) != SQLITE_DONE or finalize(s) != SQLITE_OK: + raiseStorageInitError() + else: + raiseStorageInitError() + + # TODO: check current version and implement schema versioning + execQuery "PRAGMA user_version = 1;" + + execQuery """ + CREATE TABLE IF NOT EXISTS trie_nodes( + key BLOB PRIMARY KEY, + value BLOB + ); + """ + + template prepare(q: string): PStmt = + var s: Pstmt + if prepare_v2(result.store, q, q.len.int32, s, nil) != SQLITE_OK: + raiseStorageInitError() + s + + result.selectStmt = prepare "SELECT value FROM trie_nodes WHERE key = ?;" + + if sqlite3.libversion_number() < 3024000: + result.insertStmt = prepare """ + INSERT OR REPLACE INTO trie_nodes(key, value) VALUES (?, ?); + """ + else: + result.insertStmt = prepare """ + INSERT INTO trie_nodes(key, value) VALUES (?, ?) + ON CONFLICT(key) DO UPDATE SET value = excluded.value; + """ + + result.deleteStmt = prepare "DELETE FROM trie_nodes WHERE key = ?;" + +proc bindBlob(s: Pstmt, n: int, blob: openarray[byte]): int32 = + sqlite3.bind_blob(s, n.int32, blob.baseAddr, blob.len.int32, nil) + +proc get*(db: ChainDB, key: DbKey): ByteRange = + template check(op) = + let status = op + if status != SQLITE_OK: raiseKeyReadError(key) + + check reset(db.selectStmt) + check clearBindings(db.selectStmt) + check bindBlob(db.selectStmt, 1, key.toOpenArray) + + case step(db.selectStmt) + of SQLITE_ROW: + var + resStart = columnBlob(db.selectStmt, 0) + resLen = columnBytes(db.selectStmt, 0) + resSeq = newSeq[byte](resLen) + copyMem(resSeq.baseAddr, resStart, resLen) + return resSeq.toRange + of SQLITE_DONE: + return ByteRange() + else: raiseKeySearchError(key) + +proc put*(db: var ChainDB, key: DbKey, value: ByteRange) = + template check(op) = + let status = op + if status != SQLITE_OK: raiseKeyWriteError(key) + + check reset(db.insertStmt) + check clearBindings(db.insertStmt) + check bindBlob(db.insertStmt, 1, key.toOpenArray) + check bindBlob(db.insertStmt, 2, value.toOpenArray) + + if step(db.insertStmt) != SQLITE_DONE: + raiseKeyWriteError(key) + +proc contains*(db: ChainDB, key: DbKey): bool = + template check(op) = + let status = op + if status != SQLITE_OK: raiseKeySearchError(key) + + check reset(db.selectStmt) + check clearBindings(db.selectStmt) + check bindBlob(db.selectStmt, 1, key.toOpenArray) + + case step(db.selectStmt) + of SQLITE_ROW: result = true + of SQLITE_DONE: result = false + else: raiseKeySearchError(key) + +proc del*(db: var ChainDB, key: DbKey) = + template check(op) = + let status = op + if status != SQLITE_OK: raiseKeyDeletionError(key) + + check reset(db.deleteStmt) + check clearBindings(db.deleteStmt) + check bindBlob(db.deleteStmt, 1, key.toOpenArray) + + if step(db.deleteStmt) != SQLITE_DONE: + raiseKeyDeletionError(key) + +proc close*(db: var ChainDB) = + discard sqlite3.close(db.store) + reset(db) + diff --git a/nimbus/db/db_chain.nim b/nimbus/db/db_chain.nim index 9d732d346..f314afbd9 100644 --- a/nimbus/db/db_chain.nim +++ b/nimbus/db/db_chain.nim @@ -6,8 +6,8 @@ # at your option. This file may not be copied, modified, or distributed except according to those terms. import stint, tables, sequtils, algorithm, rlp, ranges, state_db, nimcrypto, - backends / memory_backend, - ../errors, ../block_types, ../utils/header, ../constants, eth_common, byteutils + ../errors, ../block_types, ../utils/header, ../constants, eth_common, byteutils, + ./storage_types.nim, backends/memory_backend type BaseChainDB* = ref object @@ -36,13 +36,11 @@ proc getBlockHeaderByHash*(self: BaseChainDB; blockHash: Hash256): BlockHeader = ## Returns the requested block header as specified by block hash. ## ## Raises BlockNotFound if it is not present in the db. - var blk: seq[byte] try: - blk = self.db.get(genericHashKey(blockHash)) + let blk = self.db.get(genericHashKey(blockHash)) + return decode(blk, BlockHeader) except KeyError: raise newException(BlockNotFound, "No block with hash " & blockHash.data.toHex) - let rng = blk.toRange - return decode(rng, BlockHeader) proc getHash(self: BaseChainDB, key: DbKey): Hash256 {.inline.} = rlp.decode(self.db.get(key).toRange, Hash256) @@ -88,7 +86,7 @@ iterator findNewAncestors(self: BaseChainDB; header: BlockHeader): BlockHeader = h = self.getBlockHeaderByHash(h.parentHash) proc addBlockNumberToHashLookup(self: BaseChainDB; header: BlockHeader) = - self.db.set(blockNumberToHashKey(header.blockNumber), rlp.encode(header.hash).toSeq()) + self.db.set(blockNumberToHashKey(header.blockNumber), rlp.encode(header.hash)) iterator getBlockTransactionHashes(self: BaseChainDB, blockHeader: BlockHeader): Hash256 = ## Returns an iterable of the transaction hashes from th block specified @@ -127,7 +125,7 @@ proc setAsCanonicalChainHead(self: BaseChainDB; headerHash: Hash256): seq[BlockH for h in newCanonicalHeaders: self.addBlockNumberToHashLookup(h) - self.db.set(canonicalHeadHashKey(), rlp.encode(header.hash).toSeq()) + self.db.set(canonicalHeadHashKey(), rlp.encode(header.hash)) return newCanonicalHeaders proc headerExists*(self: BaseChainDB; blockHash: Hash256): bool = @@ -168,10 +166,10 @@ proc persistHeaderToDb*(self: BaseChainDB; header: BlockHeader): seq[BlockHeader if not isGenesis and not self.headerExists(header.parentHash): raise newException(ParentNotFound, "Cannot persist block header " & $header.hash & " with unknown parent " & $header.parentHash) - self.db.set(genericHashKey(header.hash), rlp.encode(header).toSeq()) + self.db.set(genericHashKey(header.hash), rlp.encode(header)) let score = if isGenesis: header.difficulty else: self.getScore(header.parentHash).u256 + header.difficulty - self.db.set(blockHashToScoreKey(header.hash), rlp.encode(score).toSeq()) + self.db.set(blockHashToScoreKey(header.hash), rlp.encode(score)) var headScore: int try: headScore = self.getScore(self.getCanonicalHead().hash) @@ -185,14 +183,14 @@ proc persistHeaderToDb*(self: BaseChainDB; header: BlockHeader): seq[BlockHeader proc addTransactionToCanonicalChain(self: BaseChainDB, txHash: Hash256, blockHeader: BlockHeader, index: int) = let k: TransactionKey = (blockHeader.blockNumber, index) - self.db.set(transactionHashToBlockKey(txHash), rlp.encode(k).toSeq()) + self.db.set(transactionHashToBlockKey(txHash), rlp.encode(k)) proc persistUncles*(self: BaseChainDB, uncles: openarray[BlockHeader]): Hash256 = ## Persists the list of uncles to the database. ## Returns the uncles hash. let enc = rlp.encode(uncles) result = keccak256.digest(enc.toOpenArray()) - self.db.set(genericHashKey(result), enc.toSeq()) + self.db.set(genericHashKey(result), enc) proc persistBlockToDb*(self: BaseChainDB; blk: Block) = ## Persist the given block's header and uncles. diff --git a/nimbus/db/storage_types.nim b/nimbus/db/storage_types.nim new file mode 100644 index 000000000..3783e6bdc --- /dev/null +++ b/nimbus/db/storage_types.nim @@ -0,0 +1,69 @@ +import + hashes, eth_common + +type + DBKeyKind* = enum + genericHash + blockNumberToHash + blockHashToScore + transactionHashToBlock + canonicalHeadHash + + DbKey* = object + # The first byte stores the key type. The rest are key-specific values + data: array[33, byte] + usedBytes: uint8 + + StorageError* = object of Exception + +proc genericHashKey*(h: Hash256): DbKey {.inline.} = + result.data[0] = byte ord(genericHash) + result.data[1 .. 32] = h.data + result.usedBytes = uint8 32 + +proc blockHashToScoreKey*(h: Hash256): DbKey {.inline.} = + result.data[0] = byte ord(blockHashToScore) + result.data[1 .. 32] = h.data + result.usedBytes = uint8 32 + +proc transactionHashToBlockKey*(h: Hash256): DbKey {.inline.} = + result.data[0] = byte ord(transactionHashToBlock) + result.data[1 .. 32] = h.data + result.usedBytes = uint8 32 + +proc blockNumberToHashKey*(u: BlockNumber): DbKey {.inline.} = + result.data[0] = byte ord(blockNumberToHash) + assert sizeof(u) <= 32 + copyMem(addr result.data[1], unsafeAddr u, sizeof(u)) + result.usedBytes = uint8 sizeof(u) + +proc canonicalHeadHashKey*(): DbKey {.inline.} = + result.data[0] = byte ord(canonicalHeadHash) + result.usedBytes = 32 + +const hashHolderKinds = {genericHash, blockHashToScore, transactionHashToBlock} + +template toOpenArray*(k: DbKey): openarray[byte] = + k.data.toOpenArray(0, int k.usedBytes) + +proc hash*(k: DbKey): Hash = + result = hash(k.toOpenArray) + +proc `==`*(a, b: DbKey): bool {.inline.} = + equalMem(unsafeAddr a, unsafeAddr b, sizeof(a)) + +template raiseStorageInitError* = + raise newException(StorageError, "failure to initialize storage") + +template raiseKeyReadError*(key: auto) = + raise newException(StorageError, "failed to read key " & $key) + +template raiseKeyWriteError*(key: auto) = + raise newException(StorageError, "failed to write key " & $key) + +template raiseKeySearchError*(key: auto) = + raise newException(StorageError, "failure during search for key " & $key) + +template raiseKeyDeletionError*(key: auto) = + raise newException(StorageError, "failure to delete key " & $key) + diff --git a/tests/all_tests.nim b/tests/all_tests.nim index 514013e21..461037de7 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -9,5 +9,6 @@ import ./test_code_stream, ./test_gas_meter, ./test_memory, ./test_stack, - ./test_opcode + ./test_opcode, + ./test_storage_backends # ./test_vm_json diff --git a/tests/test_storage_backends.nim b/tests/test_storage_backends.nim new file mode 100644 index 000000000..e8de17b66 --- /dev/null +++ b/tests/test_storage_backends.nim @@ -0,0 +1,55 @@ +import + unittest, macros, + nimcrypto/[keccak, hash], ranges, eth_common/eth_types, + ../nimbus/db/[storage_types], + ../nimbus/db/backends/[sqlite_backend, rocksdb_backend] + +template dummyInstance(T: type SqliteChainDB): auto = + sqlite_backend.initChainDB ":memory:" + +template dummyInstance(T: type RocksChainDB): auto = + rocksdb_backend.initChainDB "/tmp/nimbus-test-db" + +template backendTests(DB) = + suite("storage tests: " & astToStr(DB)): + setup: + var db = dummyInstance(DB) + + teardown: + close(db) + + test "basic insertions and deletions": + var keyA = genericHashKey(keccak256.digest("A")) + var keyB = blockNumberToHashKey(100.toBlockNumber) + var value1 = @[1.byte, 2, 3, 4, 5].toRange + var value2 = @[7.byte, 8, 9, 10].toRange + + db.put(keyA, value1) + + check: + keyA in db + keyB notin db + + db.put(keyB, value2) + + check: + keyA in db + keyB in db + + check: + db.get(keyA) == value1 + db.get(keyB) == value2 + + db.del(keyA) + db.put(keyB, value1) + + check: + keyA notin db + keyB in db + + check db.get(keyB) == value1 + db.del(keyA) + +backendTests(RocksChainDB) +backendTests(SqliteChainDB) +