2023-05-30 21:21:15 +00:00
|
|
|
# nimbus-eth1
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-05-30 21:21:15 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
|
|
|
eth/common,
|
2023-09-15 15:23:53 +00:00
|
|
|
results,
|
2024-07-18 07:13:56 +00:00
|
|
|
"."/[aristo_desc, aristo_get, aristo_serialise]
|
|
|
|
|
|
|
|
proc putKeyAtLevel(
|
|
|
|
db: AristoDbRef, rvid: RootedVertexID, key: HashKey, level: int
|
|
|
|
): Result[void, AristoError] =
|
|
|
|
## Store a hash key in the given layer or directly to the underlying database
|
|
|
|
## which helps ensure that memory usage is proportional to the pending change
|
|
|
|
## set (vertex data may have been committed to disk without computing the
|
|
|
|
## corresponding hash!)
|
|
|
|
if level == -2:
|
|
|
|
let be = db.backend
|
|
|
|
doAssert be != nil, "source data is from the backend"
|
|
|
|
# TODO long-running batch here?
|
|
|
|
let writeBatch = ?be.putBegFn()
|
|
|
|
be.putKeyFn(writeBatch, rvid, key)
|
|
|
|
?be.putEndFn writeBatch
|
|
|
|
ok()
|
|
|
|
else:
|
|
|
|
db.deltaAtLevel(level).kMap[rvid] = key
|
|
|
|
ok()
|
|
|
|
|
|
|
|
func maxLevel(cur, other: int): int =
|
|
|
|
# Compare two levels and return the topmost in the stack, taking into account
|
|
|
|
# the odd reversal of order around the zero point
|
|
|
|
if cur < 0:
|
|
|
|
max(cur, other) # >= 0 is always more topmost than <0
|
|
|
|
elif other < 0:
|
|
|
|
cur
|
|
|
|
else:
|
|
|
|
min(cur, other) # Here the order is reversed and 0 is the top layer
|
|
|
|
|
|
|
|
proc computeKeyImpl(
|
2024-07-04 13:46:52 +00:00
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
rvid: RootedVertexID; # Vertex to convert
|
2024-07-18 07:13:56 +00:00
|
|
|
): Result[(HashKey, int), AristoError] =
|
2024-07-17 13:48:21 +00:00
|
|
|
## Compute the key for an arbitrary vertex ID. If successful, the length of
|
|
|
|
## the resulting key might be smaller than 32. If it is used as a root vertex
|
|
|
|
## state/hash, it must be converted to a `Hash256` (using (`.to(Hash256)`) as
|
|
|
|
## in `db.computeKey(rvid).value.to(Hash256)` which always results in a
|
|
|
|
## 32 byte value.
|
2024-07-18 07:13:56 +00:00
|
|
|
|
|
|
|
db.getKeyRc(rvid).isErrOr:
|
|
|
|
# Value cached either in layers or database
|
|
|
|
return ok value
|
|
|
|
let (vtx, vl) = ? db.getVtxRc rvid
|
|
|
|
|
|
|
|
# Top-most level of all the verticies this hash compution depends on
|
|
|
|
var level = vl
|
2024-06-28 15:03:12 +00:00
|
|
|
|
|
|
|
# TODO this is the same code as when serializing NodeRef, without the NodeRef
|
2024-07-04 23:48:45 +00:00
|
|
|
var writer = initRlpWriter()
|
2024-06-28 15:03:12 +00:00
|
|
|
|
|
|
|
case vtx.vType:
|
|
|
|
of Leaf:
|
2024-07-04 23:48:45 +00:00
|
|
|
writer.startList(2)
|
2024-09-13 16:55:17 +00:00
|
|
|
writer.append(vtx.pfx.toHexPrefix(isLeaf = true).data())
|
2024-07-18 07:13:56 +00:00
|
|
|
|
2024-06-28 15:03:12 +00:00
|
|
|
case vtx.lData.pType
|
|
|
|
of AccountData:
|
2024-07-04 13:46:52 +00:00
|
|
|
let
|
|
|
|
stoID = vtx.lData.stoID
|
2024-07-18 07:13:56 +00:00
|
|
|
skey =
|
|
|
|
if stoID.isValid:
|
2024-08-07 13:28:01 +00:00
|
|
|
let (skey, sl) = ?db.computeKeyImpl((stoID.vid, stoID.vid))
|
2024-07-18 07:13:56 +00:00
|
|
|
level = maxLevel(level, sl)
|
|
|
|
skey
|
|
|
|
else:
|
|
|
|
VOID_HASH_KEY
|
2024-06-28 15:03:12 +00:00
|
|
|
|
2024-07-04 23:48:45 +00:00
|
|
|
writer.append(encode Account(
|
2024-06-28 15:03:12 +00:00
|
|
|
nonce: vtx.lData.account.nonce,
|
|
|
|
balance: vtx.lData.account.balance,
|
2024-07-18 07:13:56 +00:00
|
|
|
storageRoot: skey.to(Hash256),
|
2024-06-28 15:03:12 +00:00
|
|
|
codeHash: vtx.lData.account.codeHash)
|
|
|
|
)
|
|
|
|
of RawData:
|
2024-07-04 23:48:45 +00:00
|
|
|
writer.append(vtx.lData.rawBlob)
|
|
|
|
of StoData:
|
|
|
|
# TODO avoid memory allocation when encoding storage data
|
|
|
|
writer.append(rlp.encode(vtx.lData.stoData))
|
2024-06-28 15:03:12 +00:00
|
|
|
|
|
|
|
of Branch:
|
No ext update (#2494)
* Imported/rebase from `no-ext`, PR #2485
Store extension nodes together with the branch
Extension nodes must be followed by a branch - as such, it makes sense
to store the two together both in the database and in memory:
* fewer reads, writes and updates to traverse the tree
* simpler logic for maintaining the node structure
* less space used, both memory and storage, because there are fewer
nodes overall
There is also a downside: hashes can no longer be cached for an
extension - instead, only the extension+branch hash can be cached - this
seems like a fine tradeoff since computing it should be fast.
TODO: fix commented code
* Fix merge functions and `toNode()`
* Update `merkleSignCommit()` prototype
why:
Result is always a 32bit hash
* Update short Merkle hash key generation
details:
Ethereum reference MPTs use Keccak hashes as node links if the size of
an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded
node value is used as a pseudo node link (rather than a hash.) This is
specified in the yellow paper, appendix D.
Different to the `Aristo` implementation, the reference MPT would not
store such a node on the key-value database. Rather the RLP encoded node value is stored instead of a node link in a parent node
is stored as a node link on the parent database.
Only for the root hash, the top level node is always referred to by the
hash.
* Fix/update `Extension` sections
why:
Were commented out after removal of a dedicated `Extension` type which
left the system disfunctional.
* Clean up unused error codes
* Update unit tests
* Update docu
---------
Co-authored-by: Jacek Sieka <jacek@status.im>
2024-07-16 19:47:59 +00:00
|
|
|
template writeBranch(w: var RlpWriter) =
|
|
|
|
w.startList(17)
|
|
|
|
for n in 0..15:
|
|
|
|
let vid = vtx.bVid[n]
|
|
|
|
if vid.isValid:
|
2024-07-18 07:13:56 +00:00
|
|
|
let (bkey, bl) = ?db.computeKeyImpl((rvid.root, vid))
|
|
|
|
level = maxLevel(level, bl)
|
|
|
|
w.append(bkey)
|
No ext update (#2494)
* Imported/rebase from `no-ext`, PR #2485
Store extension nodes together with the branch
Extension nodes must be followed by a branch - as such, it makes sense
to store the two together both in the database and in memory:
* fewer reads, writes and updates to traverse the tree
* simpler logic for maintaining the node structure
* less space used, both memory and storage, because there are fewer
nodes overall
There is also a downside: hashes can no longer be cached for an
extension - instead, only the extension+branch hash can be cached - this
seems like a fine tradeoff since computing it should be fast.
TODO: fix commented code
* Fix merge functions and `toNode()`
* Update `merkleSignCommit()` prototype
why:
Result is always a 32bit hash
* Update short Merkle hash key generation
details:
Ethereum reference MPTs use Keccak hashes as node links if the size of
an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded
node value is used as a pseudo node link (rather than a hash.) This is
specified in the yellow paper, appendix D.
Different to the `Aristo` implementation, the reference MPT would not
store such a node on the key-value database. Rather the RLP encoded node value is stored instead of a node link in a parent node
is stored as a node link on the parent database.
Only for the root hash, the top level node is always referred to by the
hash.
* Fix/update `Extension` sections
why:
Were commented out after removal of a dedicated `Extension` type which
left the system disfunctional.
* Clean up unused error codes
* Update unit tests
* Update docu
---------
Co-authored-by: Jacek Sieka <jacek@status.im>
2024-07-16 19:47:59 +00:00
|
|
|
else:
|
|
|
|
w.append(VOID_HASH_KEY)
|
|
|
|
w.append EmptyBlob
|
2024-09-13 16:55:17 +00:00
|
|
|
if vtx.pfx.len > 0: # Extension node
|
No ext update (#2494)
* Imported/rebase from `no-ext`, PR #2485
Store extension nodes together with the branch
Extension nodes must be followed by a branch - as such, it makes sense
to store the two together both in the database and in memory:
* fewer reads, writes and updates to traverse the tree
* simpler logic for maintaining the node structure
* less space used, both memory and storage, because there are fewer
nodes overall
There is also a downside: hashes can no longer be cached for an
extension - instead, only the extension+branch hash can be cached - this
seems like a fine tradeoff since computing it should be fast.
TODO: fix commented code
* Fix merge functions and `toNode()`
* Update `merkleSignCommit()` prototype
why:
Result is always a 32bit hash
* Update short Merkle hash key generation
details:
Ethereum reference MPTs use Keccak hashes as node links if the size of
an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded
node value is used as a pseudo node link (rather than a hash.) This is
specified in the yellow paper, appendix D.
Different to the `Aristo` implementation, the reference MPT would not
store such a node on the key-value database. Rather the RLP encoded node value is stored instead of a node link in a parent node
is stored as a node link on the parent database.
Only for the root hash, the top level node is always referred to by the
hash.
* Fix/update `Extension` sections
why:
Were commented out after removal of a dedicated `Extension` type which
left the system disfunctional.
* Clean up unused error codes
* Update unit tests
* Update docu
---------
Co-authored-by: Jacek Sieka <jacek@status.im>
2024-07-16 19:47:59 +00:00
|
|
|
var bwriter = initRlpWriter()
|
|
|
|
writeBranch(bwriter)
|
2023-12-04 20:39:26 +00:00
|
|
|
|
No ext update (#2494)
* Imported/rebase from `no-ext`, PR #2485
Store extension nodes together with the branch
Extension nodes must be followed by a branch - as such, it makes sense
to store the two together both in the database and in memory:
* fewer reads, writes and updates to traverse the tree
* simpler logic for maintaining the node structure
* less space used, both memory and storage, because there are fewer
nodes overall
There is also a downside: hashes can no longer be cached for an
extension - instead, only the extension+branch hash can be cached - this
seems like a fine tradeoff since computing it should be fast.
TODO: fix commented code
* Fix merge functions and `toNode()`
* Update `merkleSignCommit()` prototype
why:
Result is always a 32bit hash
* Update short Merkle hash key generation
details:
Ethereum reference MPTs use Keccak hashes as node links if the size of
an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded
node value is used as a pseudo node link (rather than a hash.) This is
specified in the yellow paper, appendix D.
Different to the `Aristo` implementation, the reference MPT would not
store such a node on the key-value database. Rather the RLP encoded node value is stored instead of a node link in a parent node
is stored as a node link on the parent database.
Only for the root hash, the top level node is always referred to by the
hash.
* Fix/update `Extension` sections
why:
Were commented out after removal of a dedicated `Extension` type which
left the system disfunctional.
* Clean up unused error codes
* Update unit tests
* Update docu
---------
Co-authored-by: Jacek Sieka <jacek@status.im>
2024-07-16 19:47:59 +00:00
|
|
|
writer.startList(2)
|
2024-09-13 16:55:17 +00:00
|
|
|
writer.append(vtx.pfx.toHexPrefix(isleaf = false).data())
|
2024-07-17 13:48:21 +00:00
|
|
|
writer.append(bwriter.finish().digestTo(HashKey))
|
No ext update (#2494)
* Imported/rebase from `no-ext`, PR #2485
Store extension nodes together with the branch
Extension nodes must be followed by a branch - as such, it makes sense
to store the two together both in the database and in memory:
* fewer reads, writes and updates to traverse the tree
* simpler logic for maintaining the node structure
* less space used, both memory and storage, because there are fewer
nodes overall
There is also a downside: hashes can no longer be cached for an
extension - instead, only the extension+branch hash can be cached - this
seems like a fine tradeoff since computing it should be fast.
TODO: fix commented code
* Fix merge functions and `toNode()`
* Update `merkleSignCommit()` prototype
why:
Result is always a 32bit hash
* Update short Merkle hash key generation
details:
Ethereum reference MPTs use Keccak hashes as node links if the size of
an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded
node value is used as a pseudo node link (rather than a hash.) This is
specified in the yellow paper, appendix D.
Different to the `Aristo` implementation, the reference MPT would not
store such a node on the key-value database. Rather the RLP encoded node value is stored instead of a node link in a parent node
is stored as a node link on the parent database.
Only for the root hash, the top level node is always referred to by the
hash.
* Fix/update `Extension` sections
why:
Were commented out after removal of a dedicated `Extension` type which
left the system disfunctional.
* Clean up unused error codes
* Update unit tests
* Update docu
---------
Co-authored-by: Jacek Sieka <jacek@status.im>
2024-07-16 19:47:59 +00:00
|
|
|
else:
|
|
|
|
writeBranch(writer)
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
|
2024-07-18 07:13:56 +00:00
|
|
|
let h = writer.finish().digestTo(HashKey)
|
2024-07-17 13:48:21 +00:00
|
|
|
|
2024-07-18 07:13:56 +00:00
|
|
|
# Cache the hash int the same storage layer as the the top-most value that it
|
|
|
|
# depends on (recursively) - this could be an ephemeral in-memory layer or the
|
|
|
|
# underlying database backend - typically, values closer to the root are more
|
|
|
|
# likely to live in an in-memory layer since any leaf change will lead to the
|
|
|
|
# root key also changing while leaves that have never been hashed will see
|
|
|
|
# their hash being saved directly to the backend.
|
|
|
|
? db.putKeyAtLevel(rvid, h, level)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2024-07-18 07:13:56 +00:00
|
|
|
ok (h, level)
|
2024-02-08 16:32:16 +00:00
|
|
|
|
2024-07-18 07:13:56 +00:00
|
|
|
proc computeKey*(
|
|
|
|
db: AristoDbRef; # Database, top layer
|
|
|
|
rvid: RootedVertexID; # Vertex to convert
|
|
|
|
): Result[HashKey, AristoError] =
|
|
|
|
ok (?computeKeyImpl(db, rvid))[0]
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|