2023-07-12 23:03:14 +00:00
|
|
|
# nimbus-eth1
|
2024-02-01 21:27:48 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-07-12 23:03:14 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
## Aristo DB -- Handy Helpers
|
|
|
|
## ==========================
|
|
|
|
##
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
2024-06-22 20:33:37 +00:00
|
|
|
eth/common,
|
2023-07-12 23:03:14 +00:00
|
|
|
results,
|
2024-09-19 08:39:06 +00:00
|
|
|
"."/[aristo_constants, aristo_desc, aristo_get, aristo_layers]
|
2023-07-12 23:03:14 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions, converters
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc toNode*(
|
|
|
|
vtx: VertexRef; # Vertex to convert
|
No ext update (#2494)
* Imported/rebase from `no-ext`, PR #2485
Store extension nodes together with the branch
Extension nodes must be followed by a branch - as such, it makes sense
to store the two together both in the database and in memory:
* fewer reads, writes and updates to traverse the tree
* simpler logic for maintaining the node structure
* less space used, both memory and storage, because there are fewer
nodes overall
There is also a downside: hashes can no longer be cached for an
extension - instead, only the extension+branch hash can be cached - this
seems like a fine tradeoff since computing it should be fast.
TODO: fix commented code
* Fix merge functions and `toNode()`
* Update `merkleSignCommit()` prototype
why:
Result is always a 32bit hash
* Update short Merkle hash key generation
details:
Ethereum reference MPTs use Keccak hashes as node links if the size of
an RLP encoded node is at least 32 bytes. Otherwise, the RLP encoded
node value is used as a pseudo node link (rather than a hash.) This is
specified in the yellow paper, appendix D.
Different to the `Aristo` implementation, the reference MPT would not
store such a node on the key-value database. Rather the RLP encoded node value is stored instead of a node link in a parent node
is stored as a node link on the parent database.
Only for the root hash, the top level node is always referred to by the
hash.
* Fix/update `Extension` sections
why:
Were commented out after removal of a dedicated `Extension` type which
left the system disfunctional.
* Clean up unused error codes
* Update unit tests
* Update docu
---------
Co-authored-by: Jacek Sieka <jacek@status.im>
2024-07-16 19:47:59 +00:00
|
|
|
root: VertexID; # Sub-tree root the `vtx` belongs to
|
|
|
|
db: AristoDbRef; # Database
|
2023-07-12 23:03:14 +00:00
|
|
|
stopEarly = true; # Full list of missing links if `false`
|
2023-12-04 20:39:26 +00:00
|
|
|
beKeyOk = true; # Allow fetching DB backend keys
|
2023-07-12 23:03:14 +00:00
|
|
|
): Result[NodeRef,seq[VertexID]] =
|
|
|
|
## Convert argument the vertex `vtx` to a node type. Missing Merkle hash
|
|
|
|
## keys are searched for on the argument database `db`.
|
|
|
|
##
|
|
|
|
## On error, at least the vertex ID of the first missing Merkle hash key is
|
|
|
|
## returned. If the argument `stopEarly` is set `false`, all missing Merkle
|
|
|
|
## hash keys are returned.
|
|
|
|
##
|
2023-12-04 20:39:26 +00:00
|
|
|
## In the argument `beKeyOk` is set `false`, keys for node links are accepted
|
|
|
|
## only from the cache layer. This does not affect a link key for a payload
|
|
|
|
## storage root.
|
|
|
|
##
|
2024-07-04 13:46:52 +00:00
|
|
|
proc getKey(db: AristoDbRef; rvid: RootedVertexID; beOk: bool): HashKey =
|
2023-12-19 12:39:23 +00:00
|
|
|
block body:
|
2024-07-04 13:46:52 +00:00
|
|
|
let key = db.layersGetKey(rvid).valueOr:
|
2023-12-19 12:39:23 +00:00
|
|
|
break body
|
2024-07-18 07:13:56 +00:00
|
|
|
if key[0].isValid:
|
|
|
|
return key[0]
|
2023-12-19 12:39:23 +00:00
|
|
|
else:
|
|
|
|
return VOID_HASH_KEY
|
2023-12-04 20:39:26 +00:00
|
|
|
if beOk:
|
2024-07-04 13:46:52 +00:00
|
|
|
let rc = db.getKeyBE rvid
|
2023-12-04 20:39:26 +00:00
|
|
|
if rc.isOk:
|
2024-07-18 07:13:56 +00:00
|
|
|
return rc.value[0]
|
2023-12-04 20:39:26 +00:00
|
|
|
VOID_HASH_KEY
|
|
|
|
|
2023-07-12 23:03:14 +00:00
|
|
|
case vtx.vType:
|
|
|
|
of Leaf:
|
2024-09-13 16:55:17 +00:00
|
|
|
let node = NodeRef(vtx: vtx.dup())
|
2023-07-12 23:03:14 +00:00
|
|
|
# Need to resolve storage root for account leaf
|
|
|
|
if vtx.lData.pType == AccountData:
|
2024-08-07 13:28:01 +00:00
|
|
|
let stoID = vtx.lData.stoID
|
|
|
|
if stoID.isValid:
|
|
|
|
let key = db.getKey (stoID.vid, stoID.vid)
|
2024-02-12 19:37:00 +00:00
|
|
|
if not key.isValid:
|
2024-08-07 13:28:01 +00:00
|
|
|
return err(@[stoID.vid])
|
2023-07-12 23:03:14 +00:00
|
|
|
node.key[0] = key
|
|
|
|
return ok node
|
2023-11-08 12:18:32 +00:00
|
|
|
|
2023-07-12 23:03:14 +00:00
|
|
|
of Branch:
|
2024-09-13 16:55:17 +00:00
|
|
|
let node = NodeRef(vtx: vtx.dup())
|
2023-07-12 23:03:14 +00:00
|
|
|
var missing: seq[VertexID]
|
|
|
|
for n in 0 .. 15:
|
|
|
|
let vid = vtx.bVid[n]
|
|
|
|
if vid.isValid:
|
2024-07-04 13:46:52 +00:00
|
|
|
let key = db.getKey((root, vid), beOk=beKeyOk)
|
2023-07-12 23:03:14 +00:00
|
|
|
if key.isValid:
|
|
|
|
node.key[n] = key
|
2023-11-08 12:18:32 +00:00
|
|
|
elif stopEarly:
|
|
|
|
return err(@[vid])
|
2023-07-12 23:03:14 +00:00
|
|
|
else:
|
|
|
|
missing.add vid
|
|
|
|
if 0 < missing.len:
|
|
|
|
return err(missing)
|
|
|
|
return ok node
|
2023-11-08 12:18:32 +00:00
|
|
|
|
2023-12-04 20:39:26 +00:00
|
|
|
|
2024-07-01 12:07:39 +00:00
|
|
|
iterator subVids*(vtx: VertexRef): VertexID =
|
2024-02-01 21:27:48 +00:00
|
|
|
## Returns the list of all sub-vertex IDs for the argument `vtx`.
|
2023-12-04 20:39:26 +00:00
|
|
|
case vtx.vType:
|
|
|
|
of Leaf:
|
2024-02-01 21:27:48 +00:00
|
|
|
if vtx.lData.pType == AccountData:
|
2024-08-07 13:28:01 +00:00
|
|
|
let stoID = vtx.lData.stoID
|
|
|
|
if stoID.isValid:
|
|
|
|
yield stoID.vid
|
2023-12-04 20:39:26 +00:00
|
|
|
of Branch:
|
|
|
|
for vid in vtx.bVid:
|
|
|
|
if vid.isValid:
|
2024-07-01 12:07:39 +00:00
|
|
|
yield vid
|
2023-12-04 20:39:26 +00:00
|
|
|
|
2024-07-29 20:15:17 +00:00
|
|
|
iterator subVidKeys*(node: NodeRef): (VertexID,HashKey) =
|
|
|
|
## Simolar to `subVids()` but for nodes
|
2024-09-13 16:55:17 +00:00
|
|
|
case node.vtx.vType:
|
2024-07-29 20:15:17 +00:00
|
|
|
of Leaf:
|
2024-09-13 16:55:17 +00:00
|
|
|
if node.vtx.lData.pType == AccountData:
|
|
|
|
let stoID = node.vtx.lData.stoID
|
2024-08-07 13:28:01 +00:00
|
|
|
if stoID.isValid:
|
|
|
|
yield (stoID.vid, node.key[0])
|
2024-07-29 20:15:17 +00:00
|
|
|
of Branch:
|
|
|
|
for n in 0 .. 15:
|
2024-09-13 16:55:17 +00:00
|
|
|
let vid = node.vtx.bVid[n]
|
2024-07-29 20:15:17 +00:00
|
|
|
if vid.isValid:
|
|
|
|
yield (vid,node.key[n])
|
|
|
|
|
2023-07-12 23:03:14 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|