2023-06-20 13:26:25 +00:00
|
|
|
# nimbus-eth1
|
2024-03-05 04:54:42 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-06-20 13:26:25 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
## Rocks DB store data record
|
|
|
|
## ==========================
|
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
|
|
|
eth/common,
|
|
|
|
rocksdb,
|
2023-09-12 18:45:12 +00:00
|
|
|
results,
|
2024-04-22 19:02:22 +00:00
|
|
|
stew/[endians2, keyed_queue],
|
2024-04-16 20:39:11 +00:00
|
|
|
../../aristo_desc,
|
2023-08-25 22:53:59 +00:00
|
|
|
../init_common,
|
2023-06-20 13:26:25 +00:00
|
|
|
./rdb_desc
|
|
|
|
|
|
|
|
const
|
2024-04-16 20:39:11 +00:00
|
|
|
extraTraceMessages = false
|
2023-06-20 13:26:25 +00:00
|
|
|
## Enable additional logging noise
|
|
|
|
|
2024-04-16 20:39:11 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
import chronicles
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-04-16 20:39:11 +00:00
|
|
|
logScope:
|
|
|
|
topics = "aristo-rocksdb"
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
2024-04-16 20:39:11 +00:00
|
|
|
# Private helpers
|
2023-06-20 13:26:25 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-04-16 20:39:11 +00:00
|
|
|
proc disposeSession(rdb: var RdbInst) =
|
|
|
|
rdb.session.close()
|
|
|
|
rdb.session = WriteBatchRef(nil)
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-04-22 19:02:22 +00:00
|
|
|
proc putImpl(
|
|
|
|
dsc: WriteBatchRef;
|
|
|
|
name: string;
|
|
|
|
key: RdbKey;
|
|
|
|
val: Blob;
|
|
|
|
): Result[void,(uint64,AristoError,string)] =
|
|
|
|
if val.len == 0:
|
|
|
|
dsc.delete(key, name).isOkOr:
|
|
|
|
const errSym = RdbBeDriverDelError
|
|
|
|
let xid = uint64.fromBytesBE key[1 .. 8]
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "del",
|
|
|
|
pfx=StorageType(key[0]), xid, error=errSym, info=error
|
|
|
|
return err((xid,errSym,error))
|
|
|
|
else:
|
|
|
|
dsc.put(key, val, name).isOkOr:
|
|
|
|
const errSym = RdbBeDriverPutError
|
|
|
|
let xid = uint64.fromBytesBE key[1 .. 8]
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "put",
|
|
|
|
pfx=StorageType(key[0]), xid, error=errSym, info=error
|
|
|
|
return err((xid,errSym,error))
|
|
|
|
ok()
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-04-16 20:39:11 +00:00
|
|
|
proc begin*(rdb: var RdbInst) =
|
|
|
|
if rdb.session.isNil:
|
|
|
|
rdb.session = rdb.store.openWriteBatch()
|
|
|
|
|
|
|
|
proc rollback*(rdb: var RdbInst) =
|
|
|
|
if not rdb.session.isClosed():
|
2024-04-22 19:02:22 +00:00
|
|
|
rdb.rdKeyLru.clear() # Flush caches
|
|
|
|
rdb.rdVtxLru.clear() # Flush caches
|
2024-04-16 20:39:11 +00:00
|
|
|
rdb.disposeSession()
|
|
|
|
|
|
|
|
proc commit*(rdb: var RdbInst): Result[void,(AristoError,string)] =
|
|
|
|
if not rdb.session.isClosed():
|
|
|
|
defer: rdb.disposeSession()
|
|
|
|
rdb.store.write(rdb.session).isOkOr:
|
|
|
|
const errSym = RdbBeDriverWriteError
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "commit", error=errSym, info=error
|
|
|
|
return err((errSym,error))
|
|
|
|
ok()
|
2023-06-20 13:26:25 +00:00
|
|
|
|
2024-04-22 19:02:22 +00:00
|
|
|
proc putByPfx*(
|
|
|
|
rdb: var RdbInst;
|
2024-04-16 20:39:11 +00:00
|
|
|
pfx: StorageType;
|
|
|
|
data: openArray[(uint64,Blob)];
|
|
|
|
): Result[void,(uint64,AristoError,string)] =
|
2024-04-22 19:02:22 +00:00
|
|
|
let
|
|
|
|
dsc = rdb.session
|
|
|
|
name = rdb.store.name
|
|
|
|
for (xid,val) in data:
|
|
|
|
dsc.putImpl(name, xid.toRdbKey pfx, val).isOkOr:
|
|
|
|
return err(error)
|
|
|
|
ok()
|
|
|
|
|
|
|
|
proc putKey*(
|
|
|
|
rdb: var RdbInst;
|
|
|
|
data: openArray[(uint64,Blob)];
|
|
|
|
): Result[void,(uint64,AristoError,string)] =
|
|
|
|
let
|
|
|
|
dsc = rdb.session
|
|
|
|
name = rdb.store.name
|
2024-04-16 20:39:11 +00:00
|
|
|
for (xid,val) in data:
|
2024-04-22 19:02:22 +00:00
|
|
|
let key = xid.toRdbKey KeyPfx
|
|
|
|
|
|
|
|
# Update cache
|
|
|
|
if not rdb.rdKeyLru.lruUpdate(key, val):
|
|
|
|
discard rdb.rdKeyLru.lruAppend(key, val, RdKeyLruMaxSize)
|
|
|
|
|
|
|
|
# Store on write batch queue
|
|
|
|
dsc.putImpl(name, key, val).isOkOr:
|
|
|
|
return err(error)
|
|
|
|
ok()
|
|
|
|
|
|
|
|
proc putVtx*(
|
|
|
|
rdb: var RdbInst;
|
|
|
|
data: openArray[(uint64,Blob)];
|
|
|
|
): Result[void,(uint64,AristoError,string)] =
|
|
|
|
let
|
|
|
|
dsc = rdb.session
|
|
|
|
name = rdb.store.name
|
|
|
|
for (xid,val) in data:
|
|
|
|
let key = xid.toRdbKey VtxPfx
|
|
|
|
|
|
|
|
# Update cache
|
|
|
|
if not rdb.rdVtxLru.lruUpdate(key, val):
|
|
|
|
discard rdb.rdVtxLru.lruAppend(key, val, RdVtxLruMaxSize)
|
|
|
|
|
|
|
|
# Store on write batch queue
|
|
|
|
dsc.putImpl(name, key, val).isOkOr:
|
|
|
|
return err(error)
|
2023-06-20 13:26:25 +00:00
|
|
|
ok()
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|