nimbus-eth1/nimbus/db/aristo/aristo_fetch.nim
Jordan Hrycaj a1161b537b
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references

why:
  Avoids copying in some cases

* Fix copyright header

* Aristo: Verify `leafTie.root` function argument for `merge()` proc

why:
  Zero root will lead to inconsistent DB entry

* Aristo: Update failure condition for hash labels compiler `hashify()`

why:
  Node need not be rejected as long as links are on the schedule. In
  that case, `redo[]` is to become `wff.base[]` at a later stage.

  This amends an earlier fix, part of #1952 by also testing against
  the target nodes of the `wff.base[]` sets.

* Aristo: Add storage root glue record to `hashify()` schedule

why:
  An account leaf node might refer to a non-resolvable storage root ID.
  Storage root node chains will end up at the storage root. So the link
  `storage-root->account-leaf` needs an extra item in the schedule.

* Aristo: fix error code returned by `fetchPayload()`

details:
  Final error code is implied by the error code form the `hikeUp()`
  function.

* CoreDb: Discard `createOk` argument in API `getRoot()` function

why:
  Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
  implemented where a stprage root node is created on-the-fly.

* CoreDb: Prevent `$$` logging in some cases

why:
  Logging the function `$$` is not useful when it is used for internal
  use, i.e. retrieving an an error text for logging.

* CoreDb: Add `tryHashFn()` to API for pretty printing

why:
  Pretty printing must not change the hashification status for the
  `Aristo` DB. So there is an independent API wrapper for getting the
  node hash which never updated the hashes.

* CoreDb: Discard `update` argument in API `hash()` function

why:
  When calling the API function `hash()`, the latest state is always
  wanted. For a version that uses the current state as-is without checking,
  the function `tryHash()` was added to the backend.

* CoreDb: Update opaque vertex ID objects for the `Aristo` backend

why:
  For `Aristo`, vID objects encapsulate a numeric `VertexID`
  referencing a vertex (rather than a node hash as used on the
  legacy backend.) For storage sub-tries, there might be no initial
  vertex known when the descriptor is created. So opaque vertex ID
  objects are supported without a valid `VertexID` which will be
  initalised on-the-fly when the first item is merged.

* CoreDb: Add pretty printer for opaque vertex ID objects

* Cosmetics, printing profiling data

* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor

why:
  Missing initialisation  error

* CoreDb: Allow MPT to inherit shared context on `Aristo` backend

why:
  Creates descriptors with different storage roots for the same
  shared `Aristo` DB descriptor.

* Cosmetics, update diagnostic message items for `Aristo` backend

* Fix Copyright year
2024-01-11 19:11:38 +00:00

97 lines
2.9 KiB
Nim

# nimbus-eth1
# Copyright (c) 2023-2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
## Aristo DB -- Obects Retrival Via Traversal Path
## ===============================================
##
{.push raises: [].}
import
eth/trie/nibbles,
results,
"."/[aristo_desc, aristo_hike]
const
AcceptableHikeStops = {
HikeBranchTailEmpty,
HikeBranchBlindEdge,
HikeExtTailEmpty,
HikeExtTailMismatch,
HikeLeafUnexpected}
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc fetchPayloadImpl(
rc: Result[Hike,(Hike,AristoError)];
): Result[PayloadRef,(VertexID,AristoError)] =
if rc.isErr:
let vid =
if rc.error[0].legs.len == 0: VertexID(0)
else: rc.error[0].legs[^1].wp.vid
if rc.error[1] in AcceptableHikeStops:
return err((vid, FetchPathNotFound))
return err((vid, rc.error[1]))
if rc.value.legs.len == 0:
return err((VertexID(0), FetchPathNotFound))
ok rc.value.legs[^1].wp.vtx.lData
proc fetchPayloadImpl(
db: AristoDbRef;
root: VertexID;
path: openArray[byte];
): Result[PayloadRef,(VertexID,AristoError)] =
path.initNibbleRange.hikeUp(root, db).fetchPayloadImpl
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc fetchPayload*(
db: AristoDbRef;
key: LeafTie;
): Result[PayloadRef,(VertexID,AristoError)] =
## Cascaded attempt to traverse the `Aristo Trie` and fetch the value of a
## leaf vertex. This function is complementary to `merge()`.
##
key.hikeUp(db).fetchPayloadImpl
proc fetchPayload*(
db: AristoDbRef;
root: VertexID;
path: openArray[byte];
): Result[PayloadRef,(VertexID,AristoError)] =
## Variant of `fetchPayload()`
##
if path.len == 0:
return err((VertexID(0),LeafKeyInvalid))
db.fetchPayloadImpl(root, path)
proc hasPath*(
db: AristoDbRef; # Database
root: VertexID;
path: openArray[byte]; # Key of database record
): Result[bool,(VertexID,AristoError)] =
## Variant of `fetchPayload()`
##
if path.len == 0:
return err((VertexID(0),LeafKeyInvalid))
let rc = db.fetchPayloadImpl(root, path)
if rc.isOk:
return ok(true)
if rc.error[1] == FetchPathNotFound:
return ok(false)
err(rc.error)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------