nimbus-eth1/nimbus/db/aristo/aristo_init/memory_db.nim

310 lines
9.0 KiB
Nim
Raw Normal View History

# nimbus-eth1
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## In-memory backend for Aristo DB
## ===============================
##
## The iterators provided here are currently available only by direct
## backend access
## ::
## import
## aristo/aristo_init,
## aristo/aristo_init/aristo_memory
##
## let rc = newAristoDbRef(BackendMemory)
## if rc.isOk:
## let be = rc.value.to(MemBackendRef)
## for (n, key, vtx) in be.walkVtx:
## ...
##
{.push raises: [].}
import
std/[algorithm, options, sequtils, tables],
eth/common,
results,
../aristo_constants,
../aristo_desc,
../aristo_desc/desc_backend,
Aristo db update for short nodes key edge cases (#1887) * Aristo: Provide key-value list signature calculator detail: Simple wrappers around `Aristo` core functionality * Update new API for `CoreDb` details: + Renamed new API functions `contains()` => `hasKey()` or `hasPath()` which disables the `in` operator on non-boolean `contains()` functions + The functions `get()` and `fetch()` always return a not-found error if there is no item, available. The new functions `getOrEmpty()` and `mergeOrEmpty()` return an an empty `Blob` if there is no such key found. * Rewrite `core_apps.nim` using new API from `CoreDb` * Use `Aristo` functionality for calculating Merkle signatures details: For debugging, the `VerifyAristoForMerkleRootCalc` can be set so that `Aristo` results will be verified against the legacy versions. * Provide general interface for Merkle signing key-value tables details: Export `Aristo` wrappers * Activate `CoreDb` tests why: Now, API seems to be stable enough for general tests. * Update `toHex()` usage why: Byteutils' `toHex()` is superior to `toSeq.mapIt(it.toHex(2)).join` * Split `aristo_transcode` => `aristo_serialise` + `aristo_blobify` why: + Different modules for different purposes + `aristo_serialise`: RLP encoding/decoding + `aristo_blobify`: Aristo database encoding/decoding * Compacted representation of small nodes' links instead of Keccak hashes why: Ethereum MPTs use Keccak hashes as node links if the size of an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded node value is used as a pseudo node link (rather than a hash.) Such a node is nor stored on key-value database. Rather the RLP encoded node value is stored instead of a lode link in a parent node instead. Only for the root hash, the top level node is always referred to by the hash. This feature needed an abstraction of the `HashKey` object which is now either a hash or a blob of length at most 31 bytes. This leaves two ways of representing an empty/void `HashKey` type, either as an empty blob of zero length, or the hash of an empty blob. * Update `CoreDb` interface (mainly reducing logger noise) * Fix copyright years (to make `Lint` happy)
2023-11-08 12:18:32 +00:00
../aristo_blobify,
./init_common
const
extraTraceMessages = false or true
## Enabled additional logging noise
type
MemDbRef = ref object
## Database
sTab: Table[VertexID,Blob] ## Structural vertex table making up a trie
kMap: Table[VertexID,HashKey] ## Merkle hash key mapping
2024-06-04 15:05:13 +00:00
tUvi: Option[VertexID] ## Top used vertex ID
lSst: Opt[SavedState] ## Last saved state
MemBackendRef* = ref object of TypedBackendRef
## Inheriting table so access can be extended for debugging purposes
mdb: MemDbRef ## Database
MemPutHdlRef = ref object of TypedPutHdlRef
sTab: Table[VertexID,Blob]
kMap: Table[VertexID,HashKey]
2024-06-04 15:05:13 +00:00
tUvi: Option[VertexID]
lSst: Opt[SavedState]
when extraTraceMessages:
import chronicles
logScope:
topics = "aristo-backend"
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template logTxt(info: static[string]): static[string] =
"MemoryDB/" & info
proc newSession(db: MemBackendRef): MemPutHdlRef =
new result
result.TypedPutHdlRef.beginSession db
proc getSession(hdl: PutHdlRef; db: MemBackendRef): MemPutHdlRef =
hdl.TypedPutHdlRef.verifySession db
hdl.MemPutHdlRef
proc endSession(hdl: PutHdlRef; db: MemBackendRef): MemPutHdlRef =
hdl.TypedPutHdlRef.finishSession db
hdl.MemPutHdlRef
# ------------------------------------------------------------------------------
# Private functions: interface
# ------------------------------------------------------------------------------
proc getVtxFn(db: MemBackendRef): GetVtxFn =
result =
proc(vid: VertexID): Result[VertexRef,AristoError] =
# Fetch serialised data record
let data = db.mdb.sTab.getOrDefault(vid, EmptyBlob)
if 0 < data.len:
let rc = data.deblobify(VertexRef)
when extraTraceMessages:
if rc.isErr:
trace logTxt "getVtxFn() failed", vid, error=rc.error
return rc
err(GetVtxNotFound)
proc getKeyFn(db: MemBackendRef): GetKeyFn =
result =
proc(vid: VertexID): Result[HashKey,AristoError] =
let key = db.mdb.kMap.getOrVoid vid
if key.isValid:
return ok key
err(GetKeyNotFound)
2024-06-04 15:05:13 +00:00
proc getTuvFn(db: MemBackendRef): GetTuvFn =
result =
2024-06-04 15:05:13 +00:00
proc(): Result[VertexID,AristoError]=
if db.mdb.tUvi.isSome:
return ok db.mdb.tUvi.unsafeGet
err(GetTuvNotFound)
proc getLstFn(db: MemBackendRef): GetLstFn =
result =
proc(): Result[SavedState,AristoError]=
if db.mdb.lSst.isSome:
return ok db.mdb.lSst.unsafeGet
err(GetLstNotFound)
# -------------
proc putBegFn(db: MemBackendRef): PutBegFn =
result =
proc(): PutHdlRef =
db.newSession()
proc putVtxFn(db: MemBackendRef): PutVtxFn =
result =
proc(hdl: PutHdlRef; vrps: openArray[(VertexID,VertexRef)]) =
let hdl = hdl.getSession db
if hdl.error.isNil:
for (vid,vtx) in vrps:
if vtx.isValid:
let rc = vtx.blobify()
if rc.isErr:
hdl.error = TypedPutHdlErrRef(
pfx: VtxPfx,
vid: vid,
code: rc.error)
return
hdl.sTab[vid] = rc.value
else:
hdl.sTab[vid] = EmptyBlob
proc putKeyFn(db: MemBackendRef): PutKeyFn =
result =
proc(hdl: PutHdlRef; vkps: openArray[(VertexID,HashKey)]) =
let hdl = hdl.getSession db
if hdl.error.isNil:
for (vid,key) in vkps:
hdl.kMap[vid] = key
2024-06-04 15:05:13 +00:00
proc putTuvFn(db: MemBackendRef): PutTuvFn =
result =
2024-06-04 15:05:13 +00:00
proc(hdl: PutHdlRef; vs: VertexID) =
let hdl = hdl.getSession db
if hdl.error.isNil:
2024-06-04 15:05:13 +00:00
hdl.tUvi = some(vs)
proc putLstFn(db: MemBackendRef): PutLstFn =
result =
proc(hdl: PutHdlRef; lst: SavedState) =
let hdl = hdl.getSession db
if hdl.error.isNil:
let rc = lst.blobify # test
if rc.isOk:
hdl.lSst = Opt.some(lst)
else:
hdl.error = TypedPutHdlErrRef(
pfx: AdmPfx,
aid: AdmTabIdLst,
code: rc.error)
proc putEndFn(db: MemBackendRef): PutEndFn =
result =
proc(hdl: PutHdlRef): Result[void,AristoError] =
let hdl = hdl.endSession db
if not hdl.error.isNil:
when extraTraceMessages:
case hdl.error.pfx:
of VtxPfx, KeyPfx: trace logTxt "putEndFn: vtx/key failed",
pfx=hdl.error.pfx, vid=hdl.error.vid, error=hdl.error.code
of AdmPfx: trace logTxt "putEndFn: admin failed",
pfx=AdmPfx, aid=hdl.error.aid.uint64, error=hdl.error.code
of Oops: trace logTxt "putEndFn: failed",
pfx=hdl.error.pfx, error=hdl.error.code
return err(hdl.error.code)
for (vid,data) in hdl.sTab.pairs:
if 0 < data.len:
db.mdb.sTab[vid] = data
else:
db.mdb.sTab.del vid
for (vid,key) in hdl.kMap.pairs:
if key.isValid:
db.mdb.kMap[vid] = key
else:
db.mdb.kMap.del vid
2024-06-04 15:05:13 +00:00
let tuv = hdl.tUvi.get(otherwise = VertexID(0))
if tuv.isValid:
db.mdb.tUvi = some(tuv)
if hdl.lSst.isSome:
db.mdb.lSst = hdl.lSst
ok()
# -------------
proc guestDbFn(db: MemBackendRef): GuestDbFn =
result =
proc(instance: int): Result[RootRef,AristoError] =
ok(RootRef nil)
proc closeFn(db: MemBackendRef): CloseFn =
result =
proc(ignore: bool) =
discard
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc memoryBackend*(): BackendRef =
let db = MemBackendRef(
beKind: BackendMemory,
mdb: MemDbRef())
db.getVtxFn = getVtxFn db
db.getKeyFn = getKeyFn db
2024-06-04 15:05:13 +00:00
db.getTuvFn = getTuvFn db
db.getLstFn = getLstFn db
db.putBegFn = putBegFn db
db.putVtxFn = putVtxFn db
db.putKeyFn = putKeyFn db
2024-06-04 15:05:13 +00:00
db.putTuvFn = putTuvFn db
db.putLstFn = putLstFn db
db.putEndFn = putEndFn db
db.guestDbFn = guestDbFn db
db.closeFn = closeFn db
db
proc dup*(db: MemBackendRef): MemBackendRef =
## Duplicate descriptor shell as needed for API debugging
new result
init_common.init(result[], db[])
result.mdb = db.mdb
# ------------------------------------------------------------------------------
# Public iterators (needs direct backend access)
# ------------------------------------------------------------------------------
iterator walkVtx*(
be: MemBackendRef;
): tuple[vid: VertexID, vtx: VertexRef] =
## Iteration over the vertex sub-table.
for n,vid in be.mdb.sTab.keys.toSeq.mapIt(it).sorted:
let data = be.mdb.sTab.getOrDefault(vid, EmptyBlob)
if 0 < data.len:
let rc = data.deblobify VertexRef
if rc.isErr:
when extraTraceMessages:
debug logTxt "walkVtxFn() skip", n, vid, error=rc.error
else:
yield (vid, rc.value)
iterator walkKey*(
be: MemBackendRef;
): tuple[vid: VertexID, key: HashKey] =
## Iteration over the Markle hash sub-table.
for vid in be.mdb.kMap.keys.toSeq.mapIt(it).sorted:
let key = be.mdb.kMap.getOrVoid vid
if key.isValid:
yield (vid, key)
iterator walk*(
be: MemBackendRef;
): tuple[pfx: StorageType, xid: uint64, data: Blob] =
## Walk over all key-value pairs of the database.
##
## Non-decodable entries are stepped over while the counter `n` of the
## yield record is still incremented.
2024-06-04 15:05:13 +00:00
if be.mdb.tUvi.isSome:
yield(AdmPfx, AdmTabIdTuv.uint64, be.mdb.tUvi.unsafeGet.blobify)
if be.mdb.lSst.isSome:
yield(AdmPfx, AdmTabIdLst.uint64, be.mdb.lSst.unsafeGet.blobify.value)
for vid in be.mdb.sTab.keys.toSeq.mapIt(it).sorted:
let data = be.mdb.sTab.getOrDefault(vid, EmptyBlob)
if 0 < data.len:
yield (VtxPfx, vid.uint64, data)
for (vid,key) in be.walkKey:
Aristo avoid storage trie update race conditions (#2251) * Update TDD suite logger output format choices why: New format is not practical for TDD as it just dumps data across a wide range (considerably larder than 80 columns.) So the new format can be turned on by function argument. * Update unit tests samples configuration why: Slightly changed the way to find the `era1` directory * Remove compiler warnings (fix deprecated expressions and phrases) * Update `Aristo` debugging tools * Always update the `storageID` field of account leaf vertices why: Storage tries are weekly linked to an account leaf object in that the `storageID` field is updated by the application. Previously, `Aristo` verified that leaf objects make sense when passed to the database. As a consequence * the database was inconsistent for a short while * the burden for correctness was all on the application which led to delayed error handling which is hard to debug. So `Aristo` will internally update the account leaf objects so that there are no race conditions due to the storage trie handling * Aristo: Let `stow()`/`persist()` bail out unless there is a `VertexID(1)` why: The journal and filter logic depends on the hash of the `VertexID(1)` which is commonly known as the state root. This implies that all changes to the database are somehow related to that. * Make sure that a `Ledger` account does not overwrite the storage trie reference why: Due to the abstraction of a sub-trie (now referred to as column with a hash describing its state) there was a weakness in the `Aristo` handler where an account leaf could be overwritten though changing the validity of the database. This has been changed and the database will now reject such changes. This patch fixes the behaviour on the application layer. In particular, the column handle returned by the `CoreDb` needs to be updated by the `Aristo` database state. This mitigates the problem that a storage trie might have vanished or re-apperaed with a different vertex ID. * Fix sub-trie deletion test why: Was originally hinged on `VertexID(1)` which cannot be wholesale deleted anymore after the last Aristo update. Also, running with `VertexID(2)` needs an artificial `VertexID(1)` for making `stow()` or `persist()` work. * Cosmetics * Activate `test_generalstate_json` * Temporarily `deactivate test_tracer_json` * Fix copyright header --------- Co-authored-by: jordan <jordan@dry.pudding> Co-authored-by: Jacek Sieka <jacek@status.im>
2024-05-30 17:48:38 +00:00
yield (KeyPfx, vid.uint64, @(key.data))
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------