2023-05-30 21:21:15 +00:00
|
|
|
# nimbus-eth1
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-05-30 21:21:15 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
|
|
|
eth/common,
|
2023-09-15 15:23:53 +00:00
|
|
|
results,
|
2024-06-28 15:03:12 +00:00
|
|
|
"."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise]
|
2023-08-17 13:42:01 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2024-06-28 15:03:12 +00:00
|
|
|
proc computeKey*(
|
2023-12-12 17:47:41 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
2024-06-28 15:03:12 +00:00
|
|
|
vid: VertexID; # Vertex to convert
|
|
|
|
): Result[HashKey, AristoError] =
|
|
|
|
# This is a variation on getKeyRc which computes the key instead of returning
|
|
|
|
# an error
|
|
|
|
# TODO it should not always write the key to the persistent storage
|
|
|
|
|
|
|
|
proc getKey(db: AristoDbRef; vid: VertexID): HashKey =
|
|
|
|
block body:
|
|
|
|
let key = db.layersGetKey(vid).valueOr:
|
|
|
|
break body
|
|
|
|
if key.isValid:
|
|
|
|
return key
|
2024-02-22 08:24:58 +00:00
|
|
|
else:
|
2024-06-28 15:03:12 +00:00
|
|
|
return VOID_HASH_KEY
|
|
|
|
let rc = db.getKeyBE vid
|
|
|
|
if rc.isOk:
|
|
|
|
return rc.value
|
|
|
|
VOID_HASH_KEY
|
|
|
|
|
|
|
|
let key = getKey(db, vid)
|
|
|
|
if key.isValid():
|
|
|
|
# debugEcho "ok ", vid, " ", key
|
|
|
|
return ok key
|
|
|
|
|
|
|
|
#let vtx = db.getVtx(vid)
|
|
|
|
#doAssert vtx.isValid()
|
|
|
|
let vtx = ? db.getVtxRc vid
|
|
|
|
|
|
|
|
# TODO this is the same code as when serializing NodeRef, without the NodeRef
|
|
|
|
var rlp = initRlpWriter()
|
|
|
|
|
|
|
|
case vtx.vType:
|
|
|
|
of Leaf:
|
|
|
|
rlp.startList(2)
|
|
|
|
rlp.append(vtx.lPfx.toHexPrefix(isLeaf = true))
|
|
|
|
# Need to resolve storage root for account leaf
|
|
|
|
case vtx.lData.pType
|
|
|
|
of AccountData:
|
|
|
|
let vid = vtx.lData.stoID
|
|
|
|
let key = if vid.isValid:
|
|
|
|
?db.computeKey(vid)
|
|
|
|
# if not key.isValid:
|
|
|
|
# block looseCoupling:
|
|
|
|
# when LOOSE_STORAGE_TRIE_COUPLING:
|
|
|
|
# # Stale storage trie?
|
|
|
|
# if LEAST_FREE_VID <= vid.distinctBase and
|
|
|
|
# not db.getVtx(vid).isValid:
|
|
|
|
# node.lData.account.storageID = VertexID(0)
|
|
|
|
# break looseCoupling
|
|
|
|
# # Otherwise this is a stale storage trie.
|
|
|
|
# return err(@[vid])
|
|
|
|
else:
|
|
|
|
VOID_HASH_KEY
|
|
|
|
|
|
|
|
rlp.append(encode Account(
|
|
|
|
nonce: vtx.lData.account.nonce,
|
|
|
|
balance: vtx.lData.account.balance,
|
|
|
|
storageRoot: key.to(Hash256),
|
|
|
|
codeHash: vtx.lData.account.codeHash)
|
|
|
|
)
|
|
|
|
of RawData:
|
|
|
|
rlp.append(vtx.lData.rawBlob)
|
|
|
|
|
|
|
|
of Branch:
|
|
|
|
rlp.startList(17)
|
|
|
|
for n in 0..15:
|
|
|
|
let vid = vtx.bVid[n]
|
|
|
|
if vid.isValid:
|
|
|
|
rlp.append(?db.computeKey(vid))
|
|
|
|
else:
|
|
|
|
rlp.append(VOID_HASH_KEY)
|
|
|
|
rlp.append EmptyBlob
|
2023-12-04 20:39:26 +00:00
|
|
|
|
2024-06-28 15:03:12 +00:00
|
|
|
of Extension:
|
|
|
|
rlp.startList(2)
|
|
|
|
rlp.append(vtx.ePfx.toHexPrefix(isleaf = false))
|
|
|
|
rlp.append(?db.computeKey(vtx.eVid))
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
|
2024-06-28 15:03:12 +00:00
|
|
|
let h = rlp.finish().digestTo(HashKey)
|
|
|
|
# TODO This shouldn't necessarily go into the database if we're just computing
|
|
|
|
# a key ephemerally - it should however be cached for some tiem since
|
|
|
|
# deep hash computations are expensive
|
|
|
|
# debugEcho "putkey ", vtx.vType, " ", vid, " ", h, " ", toHex(rlp.finish)
|
|
|
|
db.layersPutKey(VertexID(1), vid, h)
|
|
|
|
ok h
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2024-02-08 16:32:16 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|