mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-29 05:25:34 +00:00
0f430c70fd
* Update TDD suite logger output format choices why: New format is not practical for TDD as it just dumps data across a wide range (considerably larder than 80 columns.) So the new format can be turned on by function argument. * Update unit tests samples configuration why: Slightly changed the way to find the `era1` directory * Remove compiler warnings (fix deprecated expressions and phrases) * Update `Aristo` debugging tools * Always update the `storageID` field of account leaf vertices why: Storage tries are weekly linked to an account leaf object in that the `storageID` field is updated by the application. Previously, `Aristo` verified that leaf objects make sense when passed to the database. As a consequence * the database was inconsistent for a short while * the burden for correctness was all on the application which led to delayed error handling which is hard to debug. So `Aristo` will internally update the account leaf objects so that there are no race conditions due to the storage trie handling * Aristo: Let `stow()`/`persist()` bail out unless there is a `VertexID(1)` why: The journal and filter logic depends on the hash of the `VertexID(1)` which is commonly known as the state root. This implies that all changes to the database are somehow related to that. * Make sure that a `Ledger` account does not overwrite the storage trie reference why: Due to the abstraction of a sub-trie (now referred to as column with a hash describing its state) there was a weakness in the `Aristo` handler where an account leaf could be overwritten though changing the validity of the database. This has been changed and the database will now reject such changes. This patch fixes the behaviour on the application layer. In particular, the column handle returned by the `CoreDb` needs to be updated by the `Aristo` database state. This mitigates the problem that a storage trie might have vanished or re-apperaed with a different vertex ID. * Fix sub-trie deletion test why: Was originally hinged on `VertexID(1)` which cannot be wholesale deleted anymore after the last Aristo update. Also, running with `VertexID(2)` needs an artificial `VertexID(1)` for making `stow()` or `persist()` work. * Cosmetics * Activate `test_generalstate_json` * Temporarily `deactivate test_tracer_json` * Fix copyright header --------- Co-authored-by: jordan <jordan@dry.pudding> Co-authored-by: Jacek Sieka <jacek@status.im>
248 lines
7.7 KiB
Nim
248 lines
7.7 KiB
Nim
# nimbus-eth1
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
|
# Licensed under either of
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
# http://opensource.org/licenses/MIT)
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
# except according to those terms.
|
|
|
|
## Aristo DB -- Handy Helpers
|
|
## ==========================
|
|
##
|
|
{.push raises: [].}
|
|
|
|
import
|
|
std/[sequtils, sets, typetraits],
|
|
eth/common,
|
|
results,
|
|
"."/[aristo_constants, aristo_desc, aristo_get, aristo_hike, aristo_layers]
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# Public functions, converters
|
|
# ------------------------------------------------------------------------------
|
|
|
|
proc toAccount*(
|
|
payload: PayloadRef;
|
|
db: AristoDbRef;
|
|
): Result[Account,AristoError] =
|
|
## Converts the argument `payload` to an `Account` type. If the implied
|
|
## account das a storage slots system associated, the database `db` must
|
|
## contain the Merkle hash key of the root vertex.
|
|
case payload.pType:
|
|
of RlpData:
|
|
try:
|
|
return ok(rlp.decode(payload.rlpBlob, Account))
|
|
except RlpError:
|
|
return err(AccRlpDecodingError)
|
|
of AccountData:
|
|
var acc = Account(
|
|
nonce: payload.account.nonce,
|
|
balance: payload.account.balance,
|
|
codeHash: payload.account.codeHash,
|
|
storageRoot: EMPTY_ROOT_HASH)
|
|
if payload.account.storageID.isValid:
|
|
acc.storageRoot = (? db.getKeyRc payload.account.storageID).to(Hash256)
|
|
return ok(acc)
|
|
else:
|
|
discard
|
|
|
|
err PayloadTypeUnsupported
|
|
|
|
proc toAccount*(
|
|
vtx: VertexRef;
|
|
db: AristoDbRef;
|
|
): Result[Account,AristoError] =
|
|
## Variant of `toAccount()` for a `Leaf` vertex.
|
|
if vtx.isValid and vtx.vType == Leaf:
|
|
return vtx.lData.toAccount db
|
|
err AccVtxUnsupported
|
|
|
|
proc toAccount*(
|
|
node: NodeRef;
|
|
): Result[Account,AristoError] =
|
|
## Variant of `toAccount()` for a `Leaf` node which must be complete (i.e.
|
|
## a potential Merkle hash key must have been initialised.)
|
|
if node.isValid and node.vType == Leaf:
|
|
case node.lData.pType:
|
|
of RlpData:
|
|
try:
|
|
return ok(rlp.decode(node.lData.rlpBlob, Account))
|
|
except RlpError:
|
|
return err(AccRlpDecodingError)
|
|
of AccountData:
|
|
var acc = Account(
|
|
nonce: node.lData.account.nonce,
|
|
balance: node.lData.account.balance,
|
|
codeHash: node.lData.account.codeHash,
|
|
storageRoot: EMPTY_ROOT_HASH)
|
|
if node.lData.account.storageID.isValid:
|
|
if not node.key[0].isValid:
|
|
return err(AccStorageKeyMissing)
|
|
acc.storageRoot = node.key[0].to(Hash256)
|
|
return ok(acc)
|
|
else:
|
|
return err(PayloadTypeUnsupported)
|
|
|
|
err AccNodeUnsupported
|
|
|
|
# ---------------------
|
|
|
|
proc toNode*(
|
|
vtx: VertexRef; # Vertex to convert
|
|
db: AristoDbRef; # Database, top layer
|
|
stopEarly = true; # Full list of missing links if `false`
|
|
beKeyOk = true; # Allow fetching DB backend keys
|
|
): Result[NodeRef,seq[VertexID]] =
|
|
## Convert argument the vertex `vtx` to a node type. Missing Merkle hash
|
|
## keys are searched for on the argument database `db`.
|
|
##
|
|
## On error, at least the vertex ID of the first missing Merkle hash key is
|
|
## returned. If the argument `stopEarly` is set `false`, all missing Merkle
|
|
## hash keys are returned.
|
|
##
|
|
## In the argument `beKeyOk` is set `false`, keys for node links are accepted
|
|
## only from the cache layer. This does not affect a link key for a payload
|
|
## storage root.
|
|
##
|
|
proc getKey(db: AristoDbRef; vid: VertexID; beOk: bool): HashKey =
|
|
block body:
|
|
let key = db.layersGetKey(vid).valueOr:
|
|
break body
|
|
if key.isValid:
|
|
return key
|
|
else:
|
|
return VOID_HASH_KEY
|
|
if beOk:
|
|
let rc = db.getKeyBE vid
|
|
if rc.isOk:
|
|
return rc.value
|
|
VOID_HASH_KEY
|
|
|
|
case vtx.vType:
|
|
of Leaf:
|
|
let node = NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
|
|
# Need to resolve storage root for account leaf
|
|
if vtx.lData.pType == AccountData:
|
|
let vid = vtx.lData.account.storageID
|
|
if vid.isValid:
|
|
let key = db.getKey vid
|
|
if not key.isValid:
|
|
block looseCoupling:
|
|
when LOOSE_STORAGE_TRIE_COUPLING:
|
|
# Stale storage trie?
|
|
if LEAST_FREE_VID <= vid.distinctBase and
|
|
not db.getVtx(vid).isValid:
|
|
node.lData.account.storageID = VertexID(0)
|
|
break looseCoupling
|
|
# Otherwise this is a stale storage trie.
|
|
return err(@[vid])
|
|
node.key[0] = key
|
|
return ok node
|
|
|
|
of Branch:
|
|
let node = NodeRef(vType: Branch, bVid: vtx.bVid)
|
|
var missing: seq[VertexID]
|
|
for n in 0 .. 15:
|
|
let vid = vtx.bVid[n]
|
|
if vid.isValid:
|
|
let key = db.getKey(vid, beOk=beKeyOk)
|
|
if key.isValid:
|
|
node.key[n] = key
|
|
elif stopEarly:
|
|
return err(@[vid])
|
|
else:
|
|
missing.add vid
|
|
if 0 < missing.len:
|
|
return err(missing)
|
|
return ok node
|
|
|
|
of Extension:
|
|
let
|
|
vid = vtx.eVid
|
|
key = db.getKey(vid, beOk=beKeyOk)
|
|
if not key.isValid:
|
|
return err(@[vid])
|
|
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid)
|
|
node.key[0] = key
|
|
return ok node
|
|
|
|
|
|
proc subVids*(vtx: VertexRef): seq[VertexID] =
|
|
## Returns the list of all sub-vertex IDs for the argument `vtx`.
|
|
case vtx.vType:
|
|
of Leaf:
|
|
if vtx.lData.pType == AccountData:
|
|
let vid = vtx.lData.account.storageID
|
|
if vid.isValid:
|
|
result.add vid
|
|
of Branch:
|
|
for vid in vtx.bVid:
|
|
if vid.isValid:
|
|
result.add vid
|
|
of Extension:
|
|
result.add vtx.eVid
|
|
|
|
# ---------------------
|
|
|
|
proc registerAccount*(
|
|
db: AristoDbRef; # Database, top layer
|
|
stoRoot: VertexID; # Storage root ID
|
|
accPath: PathID; # Needed for accounts payload
|
|
): Result[VidVtxPair,AristoError] =
|
|
## Verify that the `stoRoot` argument is properly referred to by the
|
|
## account data (if any) implied to by the `accPath` argument.
|
|
##
|
|
## The function will return an account leaf node if there was any, or an empty
|
|
## `VidVtxPair()` object.
|
|
##
|
|
# Verify storage root and account path
|
|
if not stoRoot.isValid:
|
|
return err(UtilsStoRootMissing)
|
|
if not accPath.isValid:
|
|
return err(UtilsAccPathMissing)
|
|
|
|
# Get account leaf with account data
|
|
let hike = LeafTie(root: VertexID(1), path: accPath).hikeUp(db).valueOr:
|
|
return err(UtilsAccUnaccessible)
|
|
|
|
let wp = hike.legs[^1].wp
|
|
if wp.vtx.vType != Leaf:
|
|
return err(UtilsAccPathWithoutLeaf)
|
|
if wp.vtx.lData.pType != AccountData:
|
|
return ok(VidVtxPair()) # nothing to do
|
|
|
|
# Check whether the `stoRoot` exists on the databse
|
|
let stoVtx = block:
|
|
let rc = db.getVtxRc stoRoot
|
|
if rc.isOk:
|
|
rc.value
|
|
elif rc.error == GetVtxNotFound:
|
|
VertexRef(nil)
|
|
else:
|
|
return err(rc.error)
|
|
|
|
# Verify `stoVtx` against storage root
|
|
let stoID = wp.vtx.lData.account.storageID
|
|
if stoVtx.isValid:
|
|
if stoID != stoRoot:
|
|
return err(UtilsAccWrongStorageRoot)
|
|
else:
|
|
if stoID.isValid:
|
|
return err(UtilsAccWrongStorageRoot)
|
|
|
|
# Clear Merkle keys so that `hasify()` can calculate the re-hash forest/tree
|
|
for w in hike.legs.mapIt(it.wp.vid):
|
|
db.layersResKey(hike.root, w)
|
|
|
|
# Signal to `hashify()` where to start rebuilding Merkel hashes
|
|
db.top.final.dirty.incl hike.root
|
|
db.top.final.dirty.incl wp.vid
|
|
|
|
ok(wp)
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# End
|
|
# ------------------------------------------------------------------------------
|