2023-08-07 18:45:23 +01:00
|
|
|
# nimbus-eth1
|
2024-02-01 21:27:48 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-08-07 18:45:23 +01:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
## Aristo DB -- Obects Retrival Via Traversal Path
|
|
|
|
## ===============================================
|
|
|
|
##
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
2024-06-19 12:40:00 +00:00
|
|
|
std/typetraits,
|
2024-06-22 22:33:37 +02:00
|
|
|
eth/common,
|
2023-08-07 18:45:23 +01:00
|
|
|
results,
|
2024-07-03 17:58:25 +02:00
|
|
|
"."/[aristo_compute, aristo_desc, aristo_get, aristo_layers, aristo_hike]
|
2023-08-07 18:45:23 +01:00
|
|
|
|
2023-09-15 16:23:53 +01:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2024-07-14 12:02:05 +02:00
|
|
|
proc retrieveLeaf(
|
2023-09-26 10:21:13 +01:00
|
|
|
db: AristoDbRef;
|
|
|
|
root: VertexID;
|
2024-12-09 08:15:04 +01:00
|
|
|
path: Hash32;
|
2024-07-14 12:02:05 +02:00
|
|
|
): Result[VertexRef,AristoError] =
|
2024-12-09 08:15:04 +01:00
|
|
|
for step in stepUp(NibblesBuf.fromBytes(path.data), root, db):
|
2024-07-01 14:07:39 +02:00
|
|
|
let vtx = step.valueOr:
|
|
|
|
if error in HikeAcceptableStopsNotFound:
|
|
|
|
return err(FetchPathNotFound)
|
|
|
|
return err(error)
|
2024-06-19 12:40:00 +00:00
|
|
|
|
2024-07-01 14:07:39 +02:00
|
|
|
if vtx.vType == Leaf:
|
2024-07-14 12:02:05 +02:00
|
|
|
return ok vtx
|
2023-08-07 18:45:23 +01:00
|
|
|
|
2024-07-01 14:07:39 +02:00
|
|
|
return err(FetchPathNotFound)
|
2024-06-19 12:40:00 +00:00
|
|
|
|
2024-10-01 21:03:10 +00:00
|
|
|
proc cachedAccLeaf*(db: AristoDbRef; accPath: Hash32): Opt[VertexRef] =
|
2024-09-19 10:39:06 +02:00
|
|
|
# Return vertex from layers or cache, `nil` if it's known to not exist and
|
|
|
|
# none otherwise
|
|
|
|
db.layersGetAccLeaf(accPath) or
|
|
|
|
db.accLeaves.get(accPath) or
|
|
|
|
Opt.none(VertexRef)
|
|
|
|
|
2024-10-01 21:03:10 +00:00
|
|
|
proc cachedStoLeaf*(db: AristoDbRef; mixPath: Hash32): Opt[VertexRef] =
|
2024-09-19 10:39:06 +02:00
|
|
|
# Return vertex from layers or cache, `nil` if it's known to not exist and
|
|
|
|
# none otherwise
|
|
|
|
db.layersGetStoLeaf(mixPath) or
|
|
|
|
db.stoLeaves.get(mixPath) or
|
|
|
|
Opt.none(VertexRef)
|
|
|
|
|
2024-09-14 09:47:32 +02:00
|
|
|
proc retrieveAccountLeaf(
|
2024-07-12 15:08:26 +02:00
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
2024-09-14 09:47:32 +02:00
|
|
|
): Result[VertexRef,AristoError] =
|
2024-09-19 10:39:06 +02:00
|
|
|
if (let leafVtx = db.cachedAccLeaf(accPath); leafVtx.isSome()):
|
2024-07-14 19:12:10 +02:00
|
|
|
if not leafVtx[].isValid():
|
2024-07-12 15:08:26 +02:00
|
|
|
return err(FetchPathNotFound)
|
2024-09-14 09:47:32 +02:00
|
|
|
return ok leafVtx[]
|
2024-07-12 15:08:26 +02:00
|
|
|
|
|
|
|
# Updated payloads are stored in the layers so if we didn't find them there,
|
|
|
|
# it must have been in the database
|
|
|
|
let
|
2024-12-09 08:15:04 +01:00
|
|
|
leafVtx = db.retrieveLeaf(VertexID(1), accPath).valueOr:
|
2024-09-14 09:47:32 +02:00
|
|
|
if error == FetchPathNotFound:
|
|
|
|
db.accLeaves.put(accPath, nil)
|
2024-07-12 15:08:26 +02:00
|
|
|
return err(error)
|
|
|
|
|
2024-09-13 15:47:50 +02:00
|
|
|
db.accLeaves.put(accPath, leafVtx)
|
|
|
|
|
2024-09-14 09:47:32 +02:00
|
|
|
ok leafVtx
|
2024-07-12 15:08:26 +02:00
|
|
|
|
2024-06-27 09:01:26 +00:00
|
|
|
proc retrieveMerkleHash(
|
2023-08-07 18:45:23 +01:00
|
|
|
db: AristoDbRef;
|
2024-06-27 09:01:26 +00:00
|
|
|
root: VertexID;
|
2024-10-01 21:03:10 +00:00
|
|
|
): Result[Hash32,AristoError] =
|
2024-07-18 09:13:56 +02:00
|
|
|
let key =
|
2024-11-22 14:15:35 +01:00
|
|
|
db.computeKey((root, root)).valueOr:
|
Pre-allocate vids for branches (#2882)
Each branch node may have up to 16 sub-items - currently, these are
given VertexID based when they are first needed leading to a
mostly-random order of vertexid for each subitem.
Here, we pre-allocate all 16 vertex ids such that when a branch subitem
is filled, it already has a vertexid waiting for it. This brings several
important benefits:
* subitems are sorted and "close" in their id sequencing - this means
that when rocksdb stores them, they are likely to end up in the same
data block thus improving read efficiency
* because the ids are consequtive, we can store just the starting id and
a bitmap representing which subitems are in use - this reduces disk
space usage for branches allowing more of them fit into a single disk
read, further improving disk read and caching performance - disk usage
at block 18M is down from 84 to 78gb!
* the in-memory footprint of VertexRef reduced allowing more instances
to fit into caches and less memory to be used overall.
Because of the increased locality of reference, it turns out that we no
longer need to iterate over the entire database to efficiently generate
the hash key database because the normal computation is now faster -
this significantly benefits "live" chain processing as well where each
dirtied key must be accompanied by a read of all branch subitems next to
it - most of the performance benefit in this branch comes from this
locality-of-reference improvement.
On a sample resync, there's already ~20% improvement with later blocks
seeing increasing benefit (because the trie is deeper in later blocks
leading to more benefit from branch read perf improvements)
```
blocks: 18729664, baseline: 190h43m49s, contender: 153h59m0s
Time (total): -36h44m48s, -19.27%
```
Note: clients need to be resynced as the PR changes the on-disk format
R.I.P. little bloom filter - your life in the repo was short but
valuable
2024-12-04 11:42:04 +01:00
|
|
|
if error in [GetVtxNotFound, GetKeyNotFound]:
|
2024-11-22 14:15:35 +01:00
|
|
|
return ok(EMPTY_ROOT_HASH)
|
|
|
|
return err(error)
|
|
|
|
|
2024-10-01 21:03:10 +00:00
|
|
|
ok key.to(Hash32)
|
2024-06-19 12:40:00 +00:00
|
|
|
|
2024-07-12 15:08:26 +02:00
|
|
|
proc hasAccountPayload(
|
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
2024-07-12 15:08:26 +02:00
|
|
|
): Result[bool,AristoError] =
|
2024-09-14 09:47:32 +02:00
|
|
|
let error = db.retrieveAccountLeaf(accPath).errorOr:
|
2024-07-12 15:08:26 +02:00
|
|
|
return ok(true)
|
|
|
|
|
|
|
|
if error == FetchPathNotFound:
|
|
|
|
return ok(false)
|
|
|
|
err(error)
|
|
|
|
|
2024-09-11 20:27:42 +00:00
|
|
|
proc fetchStorageIdImpl(
|
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
2024-09-11 20:27:42 +00:00
|
|
|
enaStoRootMissing = false;
|
|
|
|
): Result[VertexID,AristoError] =
|
|
|
|
## Helper function for retrieving a storage (vertex) ID for a given account.
|
|
|
|
let
|
2024-09-14 09:47:32 +02:00
|
|
|
leafVtx = ?db.retrieveAccountLeaf(accPath)
|
|
|
|
stoID = leafVtx[].lData.stoID
|
2024-09-11 20:27:42 +00:00
|
|
|
|
|
|
|
if stoID.isValid:
|
|
|
|
ok stoID.vid
|
|
|
|
elif enaStoRootMissing:
|
|
|
|
err(FetchPathStoRootMissing)
|
|
|
|
else:
|
|
|
|
err(FetchPathNotFound)
|
|
|
|
|
2024-06-27 09:01:26 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc fetchAccountHike*(
|
|
|
|
db: AristoDbRef; # Database
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32; # Implies a storage ID (if any)
|
2024-09-19 10:39:06 +02:00
|
|
|
accHike: var Hike
|
|
|
|
): Result[void,AristoError] =
|
|
|
|
## Expand account path to account leaf or return failure
|
|
|
|
|
|
|
|
# Prefer the leaf cache so as not to burden the lower layers
|
|
|
|
let leaf = db.cachedAccLeaf(accPath)
|
|
|
|
if leaf == Opt.some(VertexRef(nil)):
|
|
|
|
return err(FetchAccInaccessible)
|
|
|
|
|
|
|
|
accPath.hikeUp(VertexID(1), db, leaf, accHike).isOkOr:
|
2024-06-27 09:01:26 +00:00
|
|
|
return err(FetchAccInaccessible)
|
|
|
|
|
|
|
|
# Extract the account payload from the leaf
|
2024-09-19 10:39:06 +02:00
|
|
|
if accHike.legs.len == 0 or accHike.legs[^1].wp.vtx.vType != Leaf:
|
2024-06-27 09:01:26 +00:00
|
|
|
return err(FetchAccPathWithoutLeaf)
|
|
|
|
|
2024-09-19 10:39:06 +02:00
|
|
|
assert accHike.legs[^1].wp.vtx.lData.pType == AccountData
|
|
|
|
|
|
|
|
ok()
|
2024-06-27 09:01:26 +00:00
|
|
|
|
|
|
|
proc fetchStorageID*(
|
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
2024-06-27 09:01:26 +00:00
|
|
|
): Result[VertexID,AristoError] =
|
2024-09-11 20:27:42 +00:00
|
|
|
## Public helper function for retrieving a storage (vertex) ID for a given account. This
|
|
|
|
## function returns a separate error `FetchPathStoRootMissing` (from `FetchPathNotFound`)
|
|
|
|
## if the account for the argument path `accPath` exists but has no storage root.
|
|
|
|
##
|
|
|
|
db.fetchStorageIdImpl(accPath, enaStoRootMissing=true)
|
2024-07-12 15:08:26 +02:00
|
|
|
|
2024-07-14 19:12:10 +02:00
|
|
|
proc retrieveStoragePayload(
|
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
|
|
|
stoPath: Hash32;
|
2024-07-14 19:12:10 +02:00
|
|
|
): Result[UInt256,AristoError] =
|
2024-09-13 15:47:50 +02:00
|
|
|
let mixPath = mixUp(accPath, stoPath)
|
2024-07-14 19:12:10 +02:00
|
|
|
|
2024-09-19 10:39:06 +02:00
|
|
|
if (let leafVtx = db.cachedStoLeaf(mixPath); leafVtx.isSome()):
|
2024-07-14 19:12:10 +02:00
|
|
|
if not leafVtx[].isValid():
|
|
|
|
return err(FetchPathNotFound)
|
|
|
|
return ok leafVtx[].lData.stoData
|
|
|
|
|
|
|
|
# Updated payloads are stored in the layers so if we didn't find them there,
|
|
|
|
# it must have been in the database
|
2024-12-09 08:15:04 +01:00
|
|
|
let leafVtx = db.retrieveLeaf(? db.fetchStorageIdImpl(accPath), stoPath).valueOr:
|
2024-09-14 09:47:32 +02:00
|
|
|
if error == FetchPathNotFound:
|
|
|
|
db.stoLeaves.put(mixPath, nil)
|
2024-09-11 20:27:42 +00:00
|
|
|
return err(error)
|
2024-07-14 19:12:10 +02:00
|
|
|
|
2024-09-13 15:47:50 +02:00
|
|
|
db.stoLeaves.put(mixPath, leafVtx)
|
|
|
|
|
|
|
|
ok leafVtx.lData.stoData
|
2024-07-14 19:12:10 +02:00
|
|
|
|
|
|
|
proc hasStoragePayload(
|
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
|
|
|
stoPath: Hash32;
|
2024-07-14 19:12:10 +02:00
|
|
|
): Result[bool,AristoError] =
|
|
|
|
let error = db.retrieveStoragePayload(accPath, stoPath).errorOr:
|
|
|
|
return ok(true)
|
|
|
|
|
|
|
|
if error == FetchPathNotFound:
|
|
|
|
return ok(false)
|
|
|
|
err(error)
|
2024-06-27 09:01:26 +00:00
|
|
|
|
2024-06-19 12:40:00 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
2023-08-07 18:45:23 +01:00
|
|
|
|
2024-05-31 18:32:22 +01:00
|
|
|
proc fetchLastSavedState*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
): Result[SavedState,AristoError] =
|
|
|
|
## Wrapper around `getLstUbe()`. The function returns the state of the last
|
|
|
|
## saved state. This is a Merkle hash tag for vertex with ID 1 and a bespoke
|
|
|
|
## `uint64` identifier (may be interpreted as block number.)
|
|
|
|
db.getLstUbe()
|
|
|
|
|
2024-06-27 09:01:26 +00:00
|
|
|
proc fetchAccountRecord*(
|
2024-06-19 12:40:00 +00:00
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
2024-06-19 12:40:00 +00:00
|
|
|
): Result[AristoAccount,AristoError] =
|
2024-06-27 19:21:01 +00:00
|
|
|
## Fetch an account record from the database indexed by `accPath`.
|
2024-06-19 12:40:00 +00:00
|
|
|
##
|
2024-09-14 09:47:32 +02:00
|
|
|
let leafVtx = ? db.retrieveAccountLeaf(accPath)
|
|
|
|
assert leafVtx.lData.pType == AccountData # debugging only
|
2024-07-12 15:08:26 +02:00
|
|
|
|
2024-09-14 09:47:32 +02:00
|
|
|
ok leafVtx.lData.account
|
2024-06-19 12:40:00 +00:00
|
|
|
|
2024-11-22 14:15:35 +01:00
|
|
|
proc fetchStateRoot*(
|
2024-06-27 09:01:26 +00:00
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
): Result[Hash32,AristoError] =
|
2024-06-27 09:01:26 +00:00
|
|
|
## Fetch the Merkle hash of the account root.
|
2024-11-22 14:15:35 +01:00
|
|
|
db.retrieveMerkleHash(VertexID(1))
|
2024-06-27 09:01:26 +00:00
|
|
|
|
2024-06-19 12:40:00 +00:00
|
|
|
proc hasPathAccount*(
|
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
2024-06-19 12:40:00 +00:00
|
|
|
): Result[bool,AristoError] =
|
2024-06-27 19:21:01 +00:00
|
|
|
## For an account record indexed by `accPath` query whether this record exists
|
2024-06-19 12:40:00 +00:00
|
|
|
## on the database.
|
|
|
|
##
|
2024-07-12 15:08:26 +02:00
|
|
|
db.hasAccountPayload(accPath)
|
2024-06-19 12:40:00 +00:00
|
|
|
|
|
|
|
proc fetchStorageData*(
|
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
|
|
|
stoPath: Hash32;
|
2024-07-05 01:48:45 +02:00
|
|
|
): Result[UInt256,AristoError] =
|
2024-06-19 12:40:00 +00:00
|
|
|
## For a storage tree related to account `accPath`, fetch the data record
|
|
|
|
## from the database indexed by `path`.
|
|
|
|
##
|
2024-09-14 09:47:32 +02:00
|
|
|
db.retrieveStoragePayload(accPath, stoPath)
|
2024-06-19 12:40:00 +00:00
|
|
|
|
2024-10-27 19:56:28 +01:00
|
|
|
proc fetchStorageRoot*(
|
2024-06-27 09:01:26 +00:00
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
|
|
|
): Result[Hash32,AristoError] =
|
2024-06-27 09:01:26 +00:00
|
|
|
## Fetch the Merkle hash of the storage root related to `accPath`.
|
2024-09-11 20:27:42 +00:00
|
|
|
let stoID = db.fetchStorageIdImpl(accPath).valueOr:
|
2024-06-27 09:01:26 +00:00
|
|
|
if error == FetchPathNotFound:
|
|
|
|
return ok(EMPTY_ROOT_HASH) # no sub-tree
|
|
|
|
return err(error)
|
2024-11-22 14:15:35 +01:00
|
|
|
db.retrieveMerkleHash(stoID)
|
2024-06-27 09:01:26 +00:00
|
|
|
|
2024-06-19 12:40:00 +00:00
|
|
|
proc hasPathStorage*(
|
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
|
|
|
stoPath: Hash32;
|
2024-06-19 12:40:00 +00:00
|
|
|
): Result[bool,AristoError] =
|
|
|
|
## For a storage tree related to account `accPath`, query whether the data
|
|
|
|
## record indexed by `path` exists on the database.
|
|
|
|
##
|
2024-07-14 19:12:10 +02:00
|
|
|
db.hasStoragePayload(accPath, stoPath)
|
2024-06-27 09:01:26 +00:00
|
|
|
|
|
|
|
proc hasStorageData*(
|
|
|
|
db: AristoDbRef;
|
2024-10-01 21:03:10 +00:00
|
|
|
accPath: Hash32;
|
2024-06-27 09:01:26 +00:00
|
|
|
): Result[bool,AristoError] =
|
|
|
|
## For a storage tree related to account `accPath`, query whether there
|
|
|
|
## is a non-empty data storage area at all.
|
|
|
|
##
|
2024-09-11 20:27:42 +00:00
|
|
|
let stoID = db.fetchStorageIdImpl(accPath).valueOr:
|
2024-06-27 09:01:26 +00:00
|
|
|
if error == FetchPathNotFound:
|
|
|
|
return ok(false) # no sub-tree
|
|
|
|
return err(error)
|
|
|
|
ok stoID.isValid
|
2024-06-19 12:40:00 +00:00
|
|
|
|
2023-08-07 18:45:23 +01:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|