mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-16 23:31:16 +00:00
01ca415721
Currently, computed hash keys are stored in a separate column family with respect to the MPT data they're generated from - this has several disadvantages: * A lot of space is wasted because the lookup key (`RootedVertexID`) is repeated in both tables - this is 30% of the `AriKey` content! * rocksdb must maintain in-memory bloom filters and LRU caches for said keys, doubling its "minimal efficient cache size" * An extra disk traversal must be made to check for existence of cached hash key * Doubles the amount of files on disk due to each column family being its own set of files Here, the two CFs are joined such that both key and data is stored in `AriVtx`. This means: * we save ~30% disk space on repeated lookup keys * we save ~2gb of memory overhead that can be used to cache data instead of indices * we can skip storing hash keys for MPT leaf nodes - these are trivial to compute and waste a lot of space - previously they had to present in the `AriKey` CF to avoid having to look in two tables on the happy path. * There is a small increase in write amplification because when a hash value is updated for a branch node, we must write both key and branch data - previously we would write only the key * There's a small shift in CPU usage - instead of performing lookups in the database, hashes for leaf nodes are (re)-computed on the fly * We can return to slightly smaller on-disk SST files since there's fewer of them, which should reduce disk traffic a bit Internally, there are also other advantages: * when clearing keys, we no longer have to store a zero hash in memory - instead, we deduce staleness of the cached key from the presence of an updated VertexRef - this saves ~1gb of mem overhead during import * hash key cache becomes dedicated to branch keys since leaf keys are no longer stored in memory, reducing churn * key computation is a lot faster thanks to the skipped second disk traversal - a key computation for mainnet can be completed in 11 hours instead of ~2 days (!) thanks to better cache usage and less read amplification - with additional improvements to the on-disk format, we can probably get rid of the initial full traversal method of seeding the key cache on first start after import All in all, this PR reduces the size of a mainnet database from 160gb to 110gb and the peak memory footprint during import by ~1-2gb.
264 lines
9.6 KiB
Nim
264 lines
9.6 KiB
Nim
# nimbus-eth1
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
|
# Licensed under either of
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
# http://opensource.org/licenses/MIT)
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
# except according to those terms.
|
|
|
|
## Aristo DB -- Patricia Trie builder, raw node insertion
|
|
## ======================================================
|
|
##
|
|
## This module merges `PathID` values as hexary lookup paths into the
|
|
## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes),
|
|
## associated (but separated) Merkle hashes will be deleted unless locked.
|
|
## Instead of deleting locked hashes error handling is applied.
|
|
##
|
|
## Also, nodes (vertices plus merkle hashes) can be added which is needed for
|
|
## boundary proofing after `snap/1` download. The vertices are split from the
|
|
## nodes and stored as-is on the table holding `Patricia Trie` entries. The
|
|
## hashes are stored iin a separate table and the vertices are labelled
|
|
## `locked`.
|
|
|
|
{.push raises: [].}
|
|
|
|
import
|
|
std/typetraits,
|
|
eth/common,
|
|
results,
|
|
"."/[aristo_desc, aristo_fetch, aristo_get, aristo_layers, aristo_vid]
|
|
|
|
|
|
proc layersPutLeaf(
|
|
db: AristoDbRef, rvid: RootedVertexID, path: NibblesBuf, payload: LeafPayload
|
|
): VertexRef =
|
|
let vtx = VertexRef(vType: Leaf, pfx: path, lData: payload)
|
|
db.layersPutVtx(rvid, vtx)
|
|
vtx
|
|
|
|
proc mergePayloadImpl(
|
|
db: AristoDbRef, # Database, top layer
|
|
root: VertexID, # MPT state root
|
|
path: openArray[byte], # Leaf item to add to the database
|
|
leaf: Opt[VertexRef],
|
|
payload: LeafPayload, # Payload value
|
|
): Result[(VertexRef, VertexRef, VertexRef), AristoError] =
|
|
## Merge the argument `(root,path)` key-value-pair into the top level vertex
|
|
## table of the database `db`. The `path` argument is used to address the
|
|
## leaf vertex with the payload. It is stored or updated on the database
|
|
## accordingly.
|
|
##
|
|
var
|
|
path = NibblesBuf.fromBytes(path)
|
|
cur = root
|
|
(vtx, _) = db.getVtxRc((root, cur)).valueOr:
|
|
if error != GetVtxNotFound:
|
|
return err(error)
|
|
|
|
# We're at the root vertex and there is no data - this must be a fresh
|
|
# VertexID!
|
|
return ok (db.layersPutLeaf((root, cur), path, payload), nil, nil)
|
|
vids: ArrayBuf[NibblesBuf.high + 1, VertexID]
|
|
vtxs: ArrayBuf[NibblesBuf.high + 1, VertexRef]
|
|
|
|
template resetKeys() =
|
|
# Reset cached hashes of touched verticies
|
|
for i in 2..vids.len:
|
|
db.layersResKey((root, vids[^i]), vtxs[^i])
|
|
|
|
while path.len > 0:
|
|
# Clear existing merkle keys along the traversal path
|
|
vids.add cur
|
|
vtxs.add vtx
|
|
|
|
let n = path.sharedPrefixLen(vtx.pfx)
|
|
case vtx.vType
|
|
of Leaf:
|
|
let res =
|
|
if n == vtx.pfx.len:
|
|
# Same path - replace the current vertex with a new payload
|
|
|
|
if vtx.lData == payload:
|
|
return err(MergeNoAction)
|
|
|
|
let leafVtx = if root == VertexID(1):
|
|
var payload = payload.dup()
|
|
# TODO can we avoid this hack? it feels like the caller should already
|
|
# have set an appropriate stoID - this "fixup" feels risky,
|
|
# specially from a caching point of view
|
|
payload.stoID = vtx.lData.stoID
|
|
db.layersPutLeaf((root, cur), path, payload)
|
|
else:
|
|
db.layersPutLeaf((root, cur), path, payload)
|
|
(leafVtx, nil, nil)
|
|
else:
|
|
# Turn leaf into a branch (or extension) then insert the two leaves
|
|
# into the branch
|
|
let branch = VertexRef(vType: Branch, pfx: path.slice(0, n))
|
|
let other = block: # Copy of existing leaf node, now one level deeper
|
|
let local = db.vidFetch()
|
|
branch.bVid[vtx.pfx[n]] = local
|
|
db.layersPutLeaf((root, local), vtx.pfx.slice(n + 1), vtx.lData)
|
|
|
|
let leafVtx = block: # Newly inserted leaf node
|
|
let local = db.vidFetch()
|
|
branch.bVid[path[n]] = local
|
|
db.layersPutLeaf((root, local), path.slice(n + 1), payload)
|
|
|
|
# Put the branch at the vid where the leaf was
|
|
db.layersPutVtx((root, cur), branch)
|
|
|
|
# We need to return vtx here because its pfx member hasn't yet been
|
|
# sliced off and is therefore shared with the hike
|
|
(leafVtx, vtx, other)
|
|
|
|
resetKeys()
|
|
return ok(res)
|
|
of Branch:
|
|
if vtx.pfx.len == n:
|
|
# The existing branch is a prefix of the new entry
|
|
let
|
|
nibble = path[vtx.pfx.len]
|
|
next = vtx.bVid[nibble]
|
|
|
|
if next.isValid:
|
|
cur = next
|
|
path = path.slice(n + 1)
|
|
vtx =
|
|
if leaf.isSome and leaf[].isValid and leaf[].pfx == path:
|
|
leaf[]
|
|
else:
|
|
(?db.getVtxRc((root, next)))[0]
|
|
|
|
else:
|
|
# There's no vertex at the branch point - insert the payload as a new
|
|
# leaf and update the existing branch
|
|
let
|
|
local = db.vidFetch()
|
|
leafVtx = db.layersPutLeaf((root, local), path.slice(n + 1), payload)
|
|
brDup = vtx.dup()
|
|
|
|
brDup.bVid[nibble] = local
|
|
db.layersPutVtx((root, cur), brDup)
|
|
|
|
resetKeys()
|
|
return ok((leafVtx, nil, nil))
|
|
else:
|
|
# Partial path match - we need to split the existing branch at
|
|
# the point of divergence, inserting a new branch
|
|
let branch = VertexRef(vType: Branch, pfx: path.slice(0, n))
|
|
block: # Copy the existing vertex and add it to the new branch
|
|
let local = db.vidFetch()
|
|
branch.bVid[vtx.pfx[n]] = local
|
|
|
|
db.layersPutVtx(
|
|
(root, local),
|
|
VertexRef(vType: Branch, pfx: vtx.pfx.slice(n + 1), bVid: vtx.bVid),
|
|
)
|
|
|
|
let leafVtx = block: # add the new entry
|
|
let local = db.vidFetch()
|
|
branch.bVid[path[n]] = local
|
|
db.layersPutLeaf((root, local), path.slice(n + 1), payload)
|
|
|
|
db.layersPutVtx((root, cur), branch)
|
|
|
|
resetKeys()
|
|
return ok((leafVtx, nil, nil))
|
|
|
|
err(MergeHikeFailed)
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# Public functions
|
|
# ------------------------------------------------------------------------------
|
|
|
|
proc mergeAccountRecord*(
|
|
db: AristoDbRef; # Database, top layer
|
|
accPath: Hash32; # Even nibbled byte path
|
|
accRec: AristoAccount; # Account data
|
|
): Result[bool,AristoError] =
|
|
## Merge the key-value-pair argument `(accKey,accRec)` as an account
|
|
## ledger value, i.e. the the sub-tree starting at `VertexID(1)`.
|
|
##
|
|
## On success, the function returns `true` if the `accRec` argument was
|
|
## not on the database already or different from `accRec`, and `false`
|
|
## otherwise.
|
|
##
|
|
let
|
|
pyl = LeafPayload(pType: AccountData, account: accRec)
|
|
updated = db.mergePayloadImpl(
|
|
VertexID(1), accPath.data, db.cachedAccLeaf(accPath), pyl).valueOr:
|
|
if error == MergeNoAction:
|
|
return ok false
|
|
return err(error)
|
|
|
|
# Update leaf cache both of the merged value and potentially the displaced
|
|
# leaf resulting from splitting a leaf into a branch with two leaves
|
|
db.layersPutAccLeaf(accPath, updated[0])
|
|
if updated[1].isValid:
|
|
let otherPath = Hash32(getBytes(
|
|
NibblesBuf.fromBytes(accPath.data).replaceSuffix(updated[1].pfx)))
|
|
db.layersPutAccLeaf(otherPath, updated[2])
|
|
|
|
ok true
|
|
|
|
proc mergeStorageData*(
|
|
db: AristoDbRef; # Database, top layer
|
|
accPath: Hash32; # Needed for accounts payload
|
|
stoPath: Hash32; # Storage data path (aka key)
|
|
stoData: UInt256; # Storage data payload value
|
|
): Result[void,AristoError] =
|
|
## Store the `stoData` data argument on the storage area addressed by
|
|
## `(accPath,stoPath)` where `accPath` is the account key (into the MPT)
|
|
## and `stoPath` is the slot path of the corresponding storage area.
|
|
##
|
|
var accHike: Hike
|
|
db.fetchAccountHike(accPath,accHike).isOkOr:
|
|
return err(MergeStoAccMissing)
|
|
|
|
let
|
|
stoID = accHike.legs[^1].wp.vtx.lData.stoID
|
|
|
|
# Provide new storage ID when needed
|
|
useID =
|
|
if stoID.isValid: stoID # Use as is
|
|
elif stoID.vid.isValid: (true, stoID.vid) # Re-use previous vid
|
|
else: (true, db.vidFetch()) # Create new vid
|
|
mixPath = mixUp(accPath, stoPath)
|
|
# Call merge
|
|
pyl = LeafPayload(pType: StoData, stoData: stoData)
|
|
updated = db.mergePayloadImpl(
|
|
useID.vid, stoPath.data, db.cachedStoLeaf(mixPath), pyl).valueOr:
|
|
if error == MergeNoAction:
|
|
assert stoID.isValid # debugging only
|
|
return ok()
|
|
|
|
return err(error)
|
|
|
|
# Mark account path Merkle keys for update
|
|
db.layersResKeys(accHike)
|
|
|
|
# Update leaf cache both of the merged value and potentially the displaced
|
|
# leaf resulting from splitting a leaf into a branch with two leaves
|
|
db.layersPutStoLeaf(mixPath, updated[0])
|
|
|
|
if updated[1].isValid:
|
|
let otherPath = Hash32(getBytes(
|
|
NibblesBuf.fromBytes(stoPath.data).replaceSuffix(updated[1].pfx)))
|
|
db.layersPutStoLeaf(mixUp(accPath, otherPath), updated[2])
|
|
|
|
if not stoID.isValid:
|
|
# Make sure that there is an account that refers to that storage trie
|
|
let leaf = accHike.legs[^1].wp.vtx.dup # Dup on modify
|
|
leaf.lData.stoID = useID
|
|
db.layersPutAccLeaf(accPath, leaf)
|
|
db.layersPutVtx((VertexID(1), accHike.legs[^1].wp.vid), leaf)
|
|
|
|
ok()
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# End
|
|
# ------------------------------------------------------------------------------
|