2022-08-15 16:51:50 +01:00
|
|
|
# nimbus-eth1
|
|
|
|
# Copyright (c) 2021 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
2023-02-15 00:38:33 +01:00
|
|
|
{.push raises: [].}
|
|
|
|
|
2022-08-15 16:51:50 +01:00
|
|
|
import
|
2022-10-20 17:59:54 +01:00
|
|
|
std/[algorithm, tables],
|
2022-08-15 16:51:50 +01:00
|
|
|
chronicles,
|
2023-04-14 23:28:57 +01:00
|
|
|
eth/[common, trie/db],
|
2023-09-26 10:21:13 +01:00
|
|
|
../../../../db/[core_db, core_db/legacy_db, kvstore_rocksdb],
|
2022-08-15 16:51:50 +01:00
|
|
|
../../range_desc,
|
2022-10-20 17:59:54 +01:00
|
|
|
"."/[hexary_desc, hexary_error, rocky_bulk_load, snapdb_desc]
|
2022-08-15 16:51:50 +01:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "snap-db"
|
|
|
|
|
2022-10-20 17:59:54 +01:00
|
|
|
type
|
2023-02-15 00:38:33 +01:00
|
|
|
AccountsGetFn* = proc(key: openArray[byte]): Blob
|
|
|
|
{.gcsafe, raises:[].}
|
|
|
|
## The `get()` function for the accounts trie
|
2022-10-20 17:59:54 +01:00
|
|
|
|
2023-04-21 22:11:04 +01:00
|
|
|
ContractsGetFn* = proc(key: openArray[byte]): Blob
|
|
|
|
{.gcsafe, raises:[].}
|
|
|
|
## The `get()` function for the contracts table
|
|
|
|
|
2023-02-15 00:38:33 +01:00
|
|
|
StorageSlotsGetFn* = proc(acc: NodeKey; key: openArray[byte]): Blob
|
|
|
|
{.gcsafe, raises: [].}
|
2023-04-21 22:11:04 +01:00
|
|
|
## The `get()` function for the storage tries depends on the current
|
2023-02-15 00:38:33 +01:00
|
|
|
## account
|
2022-08-15 16:51:50 +01:00
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
StateRootRegistry* = object
|
|
|
|
## State root record. A table of these kind of records is organised as
|
|
|
|
## follows.
|
|
|
|
## ::
|
|
|
|
## zero -> (n/a) -------+
|
|
|
|
## |
|
|
|
|
## ... |
|
|
|
|
## ^ |
|
|
|
|
## | |
|
|
|
|
## (data) |
|
|
|
|
## ^ |
|
|
|
|
## | |
|
|
|
|
## (data) |
|
|
|
|
## ^ |
|
|
|
|
## | |
|
|
|
|
## (data) <-----+
|
|
|
|
##
|
|
|
|
key*: NodeKey ## Top reference for base entry, back reference otherwise
|
|
|
|
data*: Blob ## Some data
|
|
|
|
|
2023-04-21 22:11:04 +01:00
|
|
|
const
|
|
|
|
extraTraceMessages = false # or true
|
|
|
|
## Enable additional logging noise
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers, logging
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
template logTxt(info: static[string]): static[string] =
|
|
|
|
"Persistent db " & info
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers, logging
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
template logTxt(info: static[string]): static[string] =
|
|
|
|
"Persistent db " & info
|
|
|
|
|
2022-08-15 16:51:50 +01:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
2022-09-16 08:24:12 +01:00
|
|
|
## Might be lossy, check before use
|
|
|
|
discard result.init(key.ByteArray33[1 .. 32])
|
2022-08-15 16:51:50 +01:00
|
|
|
|
|
|
|
proc convertTo(key: RepairKey; T: type NodeTag): T =
|
2022-09-16 08:24:12 +01:00
|
|
|
## Might be lossy, check before use
|
|
|
|
UInt256.fromBytesBE(key.ByteArray33[1 .. 32]).T
|
2022-08-15 16:51:50 +01:00
|
|
|
|
2023-04-14 23:28:57 +01:00
|
|
|
proc toAccountsKey(a: RepairKey): auto =
|
2022-10-20 17:59:54 +01:00
|
|
|
a.convertTo(NodeKey).toAccountsKey
|
2022-08-15 16:51:50 +01:00
|
|
|
|
2023-04-14 23:28:57 +01:00
|
|
|
proc toStorageSlotsKey(a: RepairKey): auto =
|
2022-10-20 17:59:54 +01:00
|
|
|
a.convertTo(NodeKey).toStorageSlotsKey
|
2022-09-02 19:16:09 +01:00
|
|
|
|
2023-08-04 12:10:09 +01:00
|
|
|
proc stateRootGet*(db: CoreDbRef; nodeKey: Nodekey): Blob =
|
2023-09-26 10:21:13 +01:00
|
|
|
if db.isLegacy:
|
|
|
|
return db.kvt.backend.toLegacy.get(nodeKey.toStateRootKey.toOpenArray)
|
2022-11-16 23:51:06 +00:00
|
|
|
|
2022-08-15 16:51:50 +01:00
|
|
|
# ------------------------------------------------------------------------------
|
2022-10-20 17:59:54 +01:00
|
|
|
# Public functions: get
|
2022-08-15 16:51:50 +01:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-08-04 12:10:09 +01:00
|
|
|
proc persistentAccountsGetFn*(db: CoreDbRef): AccountsGetFn =
|
2022-11-16 23:51:06 +00:00
|
|
|
## Returns a `get()` function for retrieving accounts data
|
2022-10-20 17:59:54 +01:00
|
|
|
return proc(key: openArray[byte]): Blob =
|
|
|
|
var nodeKey: NodeKey
|
|
|
|
if nodeKey.init(key):
|
2023-09-26 10:21:13 +01:00
|
|
|
if db.isLegacy:
|
|
|
|
return db.kvt.backend.toLegacy.get(
|
|
|
|
nodeKey.toAccountsKey.toOpenArray)
|
2022-10-20 17:59:54 +01:00
|
|
|
|
2023-08-04 12:10:09 +01:00
|
|
|
proc persistentContractsGetFn*(db: CoreDbRef): ContractsGetFn =
|
2023-04-21 22:11:04 +01:00
|
|
|
## Returns a `get()` function for retrieving contracts data
|
|
|
|
return proc(key: openArray[byte]): Blob =
|
|
|
|
var nodeKey: NodeKey
|
|
|
|
if nodeKey.init(key):
|
2023-09-26 10:21:13 +01:00
|
|
|
if db.isLegacy:
|
|
|
|
return db.kvt.backend.toLegacy.get(
|
|
|
|
nodeKey.toContractHashKey.toOpenArray)
|
2023-04-21 22:11:04 +01:00
|
|
|
|
2023-08-04 12:10:09 +01:00
|
|
|
proc persistentStorageSlotsGetFn*(db: CoreDbRef): StorageSlotsGetFn =
|
2022-11-16 23:51:06 +00:00
|
|
|
## Returns a `get()` function for retrieving storage slots data
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 14:49:28 +01:00
|
|
|
return proc(accKey: NodeKey; key: openArray[byte]): Blob =
|
2022-10-20 17:59:54 +01:00
|
|
|
var nodeKey: NodeKey
|
|
|
|
if nodeKey.init(key):
|
2023-09-26 10:21:13 +01:00
|
|
|
if db.isLegacy:
|
|
|
|
return db.kvt.backend.toLegacy.get(
|
|
|
|
nodeKey.toStorageSlotsKey.toOpenArray)
|
2022-11-16 23:51:06 +00:00
|
|
|
|
|
|
|
proc persistentStateRootGet*(
|
2023-08-04 12:10:09 +01:00
|
|
|
db: CoreDbRef;
|
2022-11-16 23:51:06 +00:00
|
|
|
root: NodeKey;
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[StateRootRegistry,HexaryError] =
|
2022-11-16 23:51:06 +00:00
|
|
|
## Implements a `get()` function for returning state root registry data.
|
|
|
|
let rlpBlob = db.stateRootGet(root)
|
|
|
|
if 0 < rlpBlob.len:
|
|
|
|
try:
|
|
|
|
return ok(rlp.decode(rlpBlob, StateRootRegistry))
|
|
|
|
except RlpError:
|
|
|
|
return err(RlpEncoding)
|
|
|
|
err(StateRootNotFound)
|
|
|
|
|
2022-08-15 16:51:50 +01:00
|
|
|
# ------------------------------------------------------------------------------
|
2022-10-20 17:59:54 +01:00
|
|
|
# Public functions: store/put
|
2022-08-15 16:51:50 +01:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-04-14 23:28:57 +01:00
|
|
|
proc persistentBlockHeaderPut*(
|
2023-08-04 12:10:09 +01:00
|
|
|
db: CoreDbRef;
|
2023-04-14 23:28:57 +01:00
|
|
|
hdr: BlockHeader;
|
|
|
|
) =
|
|
|
|
## Store a single header. This function is intended to finalise snap sync
|
|
|
|
## with storing a universal pivot header not unlike genesis.
|
|
|
|
let hashKey = hdr.blockHash
|
2023-08-04 12:10:09 +01:00
|
|
|
db.kvt.put(hashKey.toBlockHeaderKey.toOpenArray, rlp.encode(hdr))
|
|
|
|
db.kvt.put(hdr.blockNumber.toBlockNumberKey.toOpenArray, rlp.encode(hashKey))
|
2023-04-21 22:11:04 +01:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "stored block header", hashKey,
|
|
|
|
blockNumber=hdr.blockNumber.toStr,
|
|
|
|
dbVerify=(0 < db.get(hashKey.toBlockHeaderKey.toOpenArray).len)
|
2023-04-14 23:28:57 +01:00
|
|
|
|
2022-10-20 17:59:54 +01:00
|
|
|
proc persistentAccountsPut*(
|
2022-09-16 08:24:12 +01:00
|
|
|
db: HexaryTreeDbRef;
|
2023-08-04 12:10:09 +01:00
|
|
|
base: CoreDbRef;
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[void,HexaryError] =
|
2022-11-16 23:51:06 +00:00
|
|
|
## Bulk store using transactional `put()`
|
2022-08-15 16:51:50 +01:00
|
|
|
let dbTx = base.beginTransaction
|
|
|
|
defer: dbTx.commit
|
|
|
|
|
|
|
|
for (key,value) in db.tab.pairs:
|
|
|
|
if not key.isNodeKey:
|
|
|
|
let error = UnresolvedRepairNode
|
2023-04-21 22:11:04 +01:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "unresolved node in repair table", error
|
2022-08-15 16:51:50 +01:00
|
|
|
return err(error)
|
2023-08-04 12:10:09 +01:00
|
|
|
base.kvt.put(key.toAccountsKey.toOpenArray, value.convertTo(Blob))
|
2022-08-15 16:51:50 +01:00
|
|
|
ok()
|
|
|
|
|
2022-10-20 17:59:54 +01:00
|
|
|
proc persistentStorageSlotsPut*(
|
2022-09-16 08:24:12 +01:00
|
|
|
db: HexaryTreeDbRef;
|
2023-08-04 12:10:09 +01:00
|
|
|
base: CoreDbRef;
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[void,HexaryError] =
|
2022-11-16 23:51:06 +00:00
|
|
|
## Bulk store using transactional `put()`
|
2022-08-15 16:51:50 +01:00
|
|
|
let dbTx = base.beginTransaction
|
|
|
|
defer: dbTx.commit
|
|
|
|
|
|
|
|
for (key,value) in db.tab.pairs:
|
|
|
|
if not key.isNodeKey:
|
|
|
|
let error = UnresolvedRepairNode
|
2023-04-21 22:11:04 +01:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "unresolved node in repair table", error
|
2022-08-15 16:51:50 +01:00
|
|
|
return err(error)
|
2023-08-04 12:10:09 +01:00
|
|
|
base.kvt.put(key.toStorageSlotsKey.toOpenArray, value.convertTo(Blob))
|
2022-09-02 19:16:09 +01:00
|
|
|
ok()
|
|
|
|
|
2023-04-21 22:11:04 +01:00
|
|
|
proc persistentContractPut*(
|
|
|
|
data: seq[(NodeKey,Blob)];
|
2023-08-04 12:10:09 +01:00
|
|
|
base: CoreDbRef;
|
2023-04-21 22:11:04 +01:00
|
|
|
): Result[void,HexaryError]
|
|
|
|
{.gcsafe, raises: [OSError,IOError,KeyError].} =
|
|
|
|
## SST based bulk load on `rocksdb`.
|
|
|
|
let dbTx = base.beginTransaction
|
|
|
|
defer: dbTx.commit
|
|
|
|
|
|
|
|
for (key,val) in data:
|
2023-08-04 12:10:09 +01:00
|
|
|
base.kvt.put(key.toContracthashKey.toOpenArray,val)
|
2023-04-21 22:11:04 +01:00
|
|
|
ok()
|
|
|
|
|
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
proc persistentStateRootPut*(
|
2023-08-04 12:10:09 +01:00
|
|
|
db: CoreDbRef;
|
2022-11-16 23:51:06 +00:00
|
|
|
root: NodeKey;
|
|
|
|
data: Blob;
|
2023-01-30 22:10:23 +00:00
|
|
|
) {.gcsafe, raises: [RlpError].} =
|
2022-11-16 23:51:06 +00:00
|
|
|
## Save or update state root registry data.
|
|
|
|
const
|
|
|
|
zeroKey = NodeKey.default
|
|
|
|
let
|
|
|
|
rlpData = db.stateRootGet(root)
|
|
|
|
|
|
|
|
if rlpData.len == 0:
|
|
|
|
var backKey: NodeKey
|
|
|
|
|
|
|
|
let baseBlob = db.stateRootGet(zeroKey)
|
|
|
|
if 0 < baseBlob.len:
|
|
|
|
backKey = rlp.decode(baseBlob, StateRootRegistry).key
|
|
|
|
|
|
|
|
# No need for a transaction frame. If the system crashes in between,
|
|
|
|
# so be it :). All that can happen is storing redundant top entries.
|
|
|
|
let
|
|
|
|
rootEntryData = rlp.encode StateRootRegistry(key: backKey, data: data)
|
|
|
|
zeroEntryData = rlp.encode StateRootRegistry(key: root)
|
|
|
|
|
|
|
|
# Store a new top entry
|
2023-08-04 12:10:09 +01:00
|
|
|
db.kvt.put(root.toStateRootKey.toOpenArray, rootEntryData)
|
2022-11-16 23:51:06 +00:00
|
|
|
|
|
|
|
# Store updated base record pointing to top entry
|
2023-08-04 12:10:09 +01:00
|
|
|
db.kvt.put(zeroKey.toStateRootKey.toOpenArray, zeroEntryData)
|
2022-11-16 23:51:06 +00:00
|
|
|
|
|
|
|
else:
|
|
|
|
let record = rlp.decode(rlpData, StateRootRegistry)
|
|
|
|
if record.data != data:
|
|
|
|
|
|
|
|
let rootEntryData =
|
|
|
|
rlp.encode StateRootRegistry(key: record.key, data: data)
|
|
|
|
|
2023-08-04 12:10:09 +01:00
|
|
|
db.kvt.put(root.toStateRootKey.toOpenArray, rootEntryData)
|
2022-11-16 23:51:06 +00:00
|
|
|
|
2022-09-02 19:16:09 +01:00
|
|
|
|
2022-10-20 17:59:54 +01:00
|
|
|
proc persistentAccountsPut*(
|
2022-09-16 08:24:12 +01:00
|
|
|
db: HexaryTreeDbRef;
|
2022-09-02 19:16:09 +01:00
|
|
|
rocky: RocksStoreRef
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[void,HexaryError]
|
2023-02-15 00:38:33 +01:00
|
|
|
{.gcsafe, raises: [OSError,IOError,KeyError].} =
|
2022-09-02 19:16:09 +01:00
|
|
|
## SST based bulk load on `rocksdb`.
|
|
|
|
if rocky.isNil:
|
|
|
|
return err(NoRocksDbBackend)
|
|
|
|
let bulker = RockyBulkLoadRef.init(rocky)
|
|
|
|
defer: bulker.destroy()
|
|
|
|
if not bulker.begin(RockyBulkCache):
|
|
|
|
let error = CannotOpenRocksDbBulkSession
|
2023-04-21 22:11:04 +01:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "rocksdb session initiation failed",
|
|
|
|
error, info=bulker.lastError()
|
2022-09-02 19:16:09 +01:00
|
|
|
return err(error)
|
|
|
|
|
|
|
|
#let keyList = toSeq(db.tab.keys)
|
|
|
|
# .filterIt(it.isNodeKey)
|
|
|
|
# .mapIt(it.convertTo(NodeTag))
|
|
|
|
# .sorted(cmp)
|
|
|
|
var
|
|
|
|
keyList = newSeq[NodeTag](db.tab.len)
|
|
|
|
inx = 0
|
|
|
|
for repairKey in db.tab.keys:
|
|
|
|
if repairKey.isNodeKey:
|
|
|
|
keyList[inx] = repairKey.convertTo(NodeTag)
|
|
|
|
inx.inc
|
|
|
|
if inx < db.tab.len:
|
|
|
|
return err(UnresolvedRepairNode)
|
|
|
|
keyList.sort(cmp)
|
|
|
|
|
|
|
|
for n,nodeTag in keyList:
|
|
|
|
let
|
|
|
|
nodeKey = nodeTag.to(NodeKey)
|
|
|
|
data = db.tab[nodeKey.to(RepairKey)].convertTo(Blob)
|
2022-10-20 17:59:54 +01:00
|
|
|
if not bulker.add(nodeKey.toAccountsKey.toOpenArray, data):
|
2022-09-02 19:16:09 +01:00
|
|
|
let error = AddBulkItemFailed
|
2023-04-21 22:11:04 +01:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "rocksdb bulk stash failure",
|
|
|
|
n, len=db.tab.len, error, info=bulker.lastError()
|
2022-09-02 19:16:09 +01:00
|
|
|
return err(error)
|
|
|
|
|
|
|
|
if bulker.finish().isErr:
|
|
|
|
let error = CommitBulkItemsFailed
|
2023-04-21 22:11:04 +01:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "rocksdb commit failure",
|
|
|
|
len=db.tab.len, error, info=bulker.lastError()
|
2022-09-02 19:16:09 +01:00
|
|
|
return err(error)
|
2022-08-15 16:51:50 +01:00
|
|
|
ok()
|
|
|
|
|
|
|
|
|
2022-10-20 17:59:54 +01:00
|
|
|
proc persistentStorageSlotsPut*(
|
2022-09-16 08:24:12 +01:00
|
|
|
db: HexaryTreeDbRef;
|
2022-08-15 16:51:50 +01:00
|
|
|
rocky: RocksStoreRef
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[void,HexaryError]
|
2023-02-15 00:38:33 +01:00
|
|
|
{.gcsafe, raises: [OSError,IOError,KeyError].} =
|
2022-08-15 16:51:50 +01:00
|
|
|
## SST based bulk load on `rocksdb`.
|
|
|
|
if rocky.isNil:
|
|
|
|
return err(NoRocksDbBackend)
|
|
|
|
let bulker = RockyBulkLoadRef.init(rocky)
|
|
|
|
defer: bulker.destroy()
|
|
|
|
if not bulker.begin(RockyBulkCache):
|
|
|
|
let error = CannotOpenRocksDbBulkSession
|
2023-04-21 22:11:04 +01:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "rocksdb session initiation failed",
|
|
|
|
error, info=bulker.lastError()
|
2022-08-15 16:51:50 +01:00
|
|
|
return err(error)
|
|
|
|
|
|
|
|
#let keyList = toSeq(db.tab.keys)
|
|
|
|
# .filterIt(it.isNodeKey)
|
|
|
|
# .mapIt(it.convertTo(NodeTag))
|
|
|
|
# .sorted(cmp)
|
|
|
|
var
|
|
|
|
keyList = newSeq[NodeTag](db.tab.len)
|
|
|
|
inx = 0
|
|
|
|
for repairKey in db.tab.keys:
|
|
|
|
if repairKey.isNodeKey:
|
|
|
|
keyList[inx] = repairKey.convertTo(NodeTag)
|
|
|
|
inx.inc
|
|
|
|
if inx < db.tab.len:
|
|
|
|
return err(UnresolvedRepairNode)
|
|
|
|
keyList.sort(cmp)
|
|
|
|
|
|
|
|
for n,nodeTag in keyList:
|
|
|
|
let
|
2022-09-02 19:16:09 +01:00
|
|
|
nodeKey = nodeTag.to(NodeKey)
|
|
|
|
data = db.tab[nodeKey.to(RepairKey)].convertTo(Blob)
|
2022-10-20 17:59:54 +01:00
|
|
|
if not bulker.add(nodeKey.toStorageSlotsKey.toOpenArray, data):
|
2022-08-15 16:51:50 +01:00
|
|
|
let error = AddBulkItemFailed
|
2023-04-21 22:11:04 +01:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "rocksdb bulk stash failure",
|
|
|
|
n, len=db.tab.len, error, info=bulker.lastError()
|
2022-08-15 16:51:50 +01:00
|
|
|
return err(error)
|
|
|
|
|
|
|
|
if bulker.finish().isErr:
|
|
|
|
let error = CommitBulkItemsFailed
|
2023-04-21 22:11:04 +01:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "rocksdb commit failure",
|
|
|
|
len=db.tab.len, error, info=bulker.lastError()
|
2022-08-15 16:51:50 +01:00
|
|
|
return err(error)
|
|
|
|
ok()
|
|
|
|
|
2023-04-21 22:11:04 +01:00
|
|
|
|
|
|
|
proc persistentContractPut*(
|
|
|
|
data: seq[(NodeKey,Blob)];
|
|
|
|
rocky: RocksStoreRef
|
|
|
|
): Result[void,HexaryError]
|
|
|
|
{.gcsafe, raises: [OSError,IOError,KeyError].} =
|
|
|
|
## SST based bulk load on `rocksdb`.
|
|
|
|
if rocky.isNil:
|
|
|
|
return err(NoRocksDbBackend)
|
|
|
|
let bulker = RockyBulkLoadRef.init(rocky)
|
|
|
|
defer: bulker.destroy()
|
|
|
|
if not bulker.begin(RockyBulkCache):
|
|
|
|
let error = CannotOpenRocksDbBulkSession
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "rocksdb session initiation failed",
|
|
|
|
error, info=bulker.lastError()
|
|
|
|
return err(error)
|
|
|
|
|
|
|
|
var
|
|
|
|
lookup: Table[NodeKey,Blob]
|
|
|
|
keyList = newSeq[NodeTag](data.len)
|
|
|
|
inx = 0
|
|
|
|
for (key,val) in data:
|
|
|
|
if not lookup.hasKey key:
|
|
|
|
lookup[key] = val
|
|
|
|
keyList[inx] = key.to(NodeTag)
|
|
|
|
inx.inc
|
|
|
|
if lookup.len < inx:
|
|
|
|
keyList.setLen(inx)
|
|
|
|
keyList.sort(cmp)
|
|
|
|
|
|
|
|
for n,nodeTag in keyList:
|
|
|
|
let
|
|
|
|
nodeKey = nodeTag.to(NodeKey)
|
|
|
|
data = lookup[nodeKey]
|
|
|
|
if not bulker.add(nodeKey.toContracthashKey.toOpenArray, data):
|
|
|
|
let error = AddBulkItemFailed
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "rocksdb bulk load failure",
|
|
|
|
n, dataLen=data.len, error, info=bulker.lastError()
|
|
|
|
return err(error)
|
|
|
|
|
|
|
|
if bulker.finish().isErr:
|
|
|
|
let error = CommitBulkItemsFailed
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "rocksdb commit failure",
|
|
|
|
dataLen=data.len, error, info=bulker.lastError()
|
|
|
|
return err(error)
|
|
|
|
ok()
|
2022-08-15 16:51:50 +01:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|