2023-08-25 22:53:59 +00:00
|
|
|
# nimbus-eth1
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-08-25 22:53:59 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
import
|
2024-05-03 17:38:17 +00:00
|
|
|
std/options,
|
2023-11-08 12:18:32 +00:00
|
|
|
eth/common,
|
2023-08-25 22:53:59 +00:00
|
|
|
results,
|
2024-05-03 17:38:17 +00:00
|
|
|
".."/[aristo_desc, aristo_desc/desc_backend],
|
|
|
|
./journal_scheduler
|
2023-09-11 20:38:49 +00:00
|
|
|
|
2023-08-25 22:53:59 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-05-03 17:38:17 +00:00
|
|
|
proc journalGetInx*(
|
2023-09-11 20:38:49 +00:00
|
|
|
be: BackendRef;
|
2024-04-29 20:17:17 +00:00
|
|
|
fid = none(FilterID);
|
2023-09-11 20:38:49 +00:00
|
|
|
earlierOK = false;
|
2024-05-03 17:38:17 +00:00
|
|
|
): Result[JournalInx,AristoError] =
|
2024-04-29 20:17:17 +00:00
|
|
|
## If there is some argument `fid`, find the filter on the journal with ID
|
|
|
|
## not larger than `fid` (i e. the resulting filter must not be more recent.)
|
|
|
|
##
|
|
|
|
## If the argument `earlierOK` is passed `false`, the function succeeds only
|
|
|
|
## if the filter ID of the returned filter is equal to the argument `fid`.
|
|
|
|
##
|
|
|
|
## In case that there is no argument `fid`, the filter with the smallest
|
|
|
|
## filter ID (i.e. the oldest filter) is returned. here, the argument
|
|
|
|
## `earlierOK` is ignored.
|
2023-08-25 22:53:59 +00:00
|
|
|
##
|
2024-04-19 18:37:27 +00:00
|
|
|
if be.journal.isNil:
|
2023-09-11 20:38:49 +00:00
|
|
|
return err(FilQuSchedDisabled)
|
|
|
|
|
2024-04-29 20:17:17 +00:00
|
|
|
var cache = (QueueID(0),FilterRef(nil)) # Avoids double lookup for last entry
|
|
|
|
proc qid2fid(qid: QueueID): Result[FilterID,void] =
|
|
|
|
if qid == cache[0]: # Avoids double lookup for last entry
|
|
|
|
return ok cache[1].fid
|
|
|
|
let fil = be.getFilFn(qid).valueOr:
|
|
|
|
return err()
|
|
|
|
cache = (qid,fil)
|
|
|
|
ok fil.fid
|
|
|
|
|
|
|
|
let qid = block:
|
|
|
|
if fid.isNone:
|
|
|
|
# Get oldest filter
|
|
|
|
be.journal[^1]
|
|
|
|
else:
|
|
|
|
# Find filter with ID not smaller than `fid`
|
|
|
|
be.journal.le(fid.unsafeGet, qid2fid, forceEQ = not earlierOK)
|
|
|
|
|
2023-09-11 20:38:49 +00:00
|
|
|
if not qid.isValid:
|
|
|
|
return err(FilFilterNotFound)
|
|
|
|
|
2024-05-03 17:38:17 +00:00
|
|
|
var fip: JournalInx
|
2023-09-11 20:38:49 +00:00
|
|
|
fip.fil = block:
|
|
|
|
if cache[0] == qid:
|
|
|
|
cache[1]
|
|
|
|
else:
|
2024-04-29 20:17:17 +00:00
|
|
|
be.getFilFn(qid).valueOr:
|
|
|
|
return err(error)
|
2023-09-05 18:00:40 +00:00
|
|
|
|
2024-04-19 18:37:27 +00:00
|
|
|
fip.inx = be.journal[qid]
|
2023-09-11 20:38:49 +00:00
|
|
|
if fip.inx < 0:
|
|
|
|
return err(FilInxByQidFailed)
|
|
|
|
|
|
|
|
ok fip
|
|
|
|
|
|
|
|
|
2024-05-03 17:38:17 +00:00
|
|
|
proc journalGetOverlap*(
|
2023-09-11 20:38:49 +00:00
|
|
|
be: BackendRef;
|
|
|
|
filter: FilterRef;
|
|
|
|
): int =
|
2024-05-03 17:38:17 +00:00
|
|
|
## This function will find the overlap of an argument `filter` which is
|
|
|
|
## composed by some recent filter slots from the journal.
|
|
|
|
##
|
|
|
|
## The function returns the number of most recent journal filters that are
|
|
|
|
## reverted by the argument `filter`. This requires that `src`, `trg`, and
|
|
|
|
## `fid` of the argument `filter` is properly calculated (e.g. using
|
|
|
|
## `journalOpsFetchSlots()`.)
|
2023-09-11 20:38:49 +00:00
|
|
|
##
|
2024-04-29 20:17:17 +00:00
|
|
|
# Check against the top-fifo entry.
|
2024-04-19 18:37:27 +00:00
|
|
|
let qid = be.journal[0]
|
2023-09-11 20:38:49 +00:00
|
|
|
if not qid.isValid:
|
|
|
|
return 0
|
2024-04-29 20:17:17 +00:00
|
|
|
|
|
|
|
let top = be.getFilFn(qid).valueOr:
|
|
|
|
return 0
|
2023-09-11 20:38:49 +00:00
|
|
|
|
|
|
|
# The `filter` must match the `top`
|
|
|
|
if filter.src != top.src:
|
|
|
|
return 0
|
|
|
|
|
|
|
|
# Does the filter revert the fitst entry?
|
|
|
|
if filter.trg == top.trg:
|
|
|
|
return 1
|
|
|
|
|
2024-04-26 13:43:52 +00:00
|
|
|
# Check against some stored filter IDs
|
2023-09-11 20:38:49 +00:00
|
|
|
if filter.isValid:
|
2024-05-03 17:38:17 +00:00
|
|
|
let fp = be.journalGetInx(some(filter.fid), earlierOK=true).valueOr:
|
2024-04-29 20:17:17 +00:00
|
|
|
return 0
|
|
|
|
if filter.trg == fp.fil.trg:
|
|
|
|
return 1 + fp.inx
|
2023-08-25 22:53:59 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|