Turn payload into value type (#2483)

The Vertex type unifies branches, extensions and leaves into a single
memory area where the larges member is the branch (128 bytes + overhead) -
the payloads we have are all smaller than 128 thus wrapping them in an
extra layer of `ref` is wasteful from a memory usage perspective.

Further, the ref:s must be visited during the M&S phase of garbage
collection - since we keep millions of these, many of them
short-lived, this takes up significant CPU time.

```
Function	CPU Time: Total	CPU Time: Self	Module	Function (Full)	Source File	Start Address
system::markStackAndRegisters	10.0%	4.922s	nimbus	system::markStackAndRegisters(var<system::GcHeap>).constprop.0	gc.nim	0x701230`
```
This commit is contained in:
Jacek Sieka 2024-07-14 12:02:05 +02:00 committed by GitHub
parent 72947b3647
commit f3a56002ca
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 126 additions and 139 deletions

View File

@ -121,9 +121,7 @@ proc load256(data: openArray[byte]; start: var int, len: int): Result[UInt256,Ar
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc blobifyTo*(pyl: PayloadRef, data: var Blob) = proc blobifyTo*(pyl: LeafPayload, data: var Blob) =
if pyl.isNil:
return
case pyl.pType case pyl.pType
of RawData: of RawData:
data &= pyl.rawBlob data &= pyl.rawBlob
@ -248,22 +246,22 @@ proc blobify*(lSst: SavedState): Result[Blob,AristoError] =
# ------------- # -------------
proc deblobify( proc deblobify(
data: openArray[byte]; data: openArray[byte];
T: type PayloadRef; T: type LeafPayload;
): Result[PayloadRef,AristoError] = ): Result[LeafPayload,AristoError] =
if data.len == 0: if data.len == 0:
return ok PayloadRef(pType: RawData) return ok LeafPayload(pType: RawData)
let mask = data[^1] let mask = data[^1]
if (mask and 0x10) > 0: # unstructured payload if (mask and 0x10) > 0: # unstructured payload
return ok PayloadRef(pType: RawData, rawBlob: data[0 .. ^2]) return ok LeafPayload(pType: RawData, rawBlob: data[0 .. ^2])
if (mask and 0x20) > 0: # Slot storage data if (mask and 0x20) > 0: # Slot storage data
return ok PayloadRef( return ok LeafPayload(
pType: StoData, pType: StoData,
stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256)) stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256))
var var
pAcc = PayloadRef(pType: AccountData) pAcc = LeafPayload(pType: AccountData)
start = 0 start = 0
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2)) lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))
@ -352,7 +350,7 @@ proc deblobify*(
NibblesBuf.fromHexPrefix record.toOpenArray(pLen, rLen-1) NibblesBuf.fromHexPrefix record.toOpenArray(pLen, rLen-1)
if not isLeaf: if not isLeaf:
return err(DeblobLeafGotExtPrefix) return err(DeblobLeafGotExtPrefix)
let pyl = ? record.toOpenArray(0, pLen - 1).deblobify(PayloadRef) let pyl = ? record.toOpenArray(0, pLen - 1).deblobify(LeafPayload)
VertexRef( VertexRef(
vType: Leaf, vType: Leaf,
lPfx: pathSegment, lPfx: pathSegment,

View File

@ -180,21 +180,18 @@ proc ppPathPfx(pfx: NibblesBuf): string =
proc ppNibble(n: int8): string = proc ppNibble(n: int8): string =
if n < 0: "ø" elif n < 10: $n else: n.toHexLsb if n < 0: "ø" elif n < 10: $n else: n.toHexLsb
proc ppPayload(p: PayloadRef, db: AristoDbRef): string = proc ppPayload(p: LeafPayload, db: AristoDbRef): string =
if p.isNil: case p.pType:
result = "n/a" of RawData:
else: result &= p.rawBlob.toHex.squeeze(hex=true)
case p.pType: of AccountData:
of RawData: result = "("
result &= p.rawBlob.toHex.squeeze(hex=true) result &= ($p.account.nonce).stripZeros(toExp=true) & ","
of AccountData: result &= ($p.account.balance).stripZeros(toExp=true) & ","
result = "(" result &= p.stoID.ppVid & ","
result &= ($p.account.nonce).stripZeros(toExp=true) & "," result &= p.account.codeHash.ppCodeHash & ")"
result &= ($p.account.balance).stripZeros(toExp=true) & "," of StoData:
result &= p.stoID.ppVid & "," result = $p.stoData
result &= p.account.codeHash.ppCodeHash & ")"
of StoData:
result = $p.stoData
proc ppVtx(nd: VertexRef, db: AristoDbRef, rvid: RootedVertexID): string = proc ppVtx(nd: VertexRef, db: AristoDbRef, rvid: RootedVertexID): string =
if not nd.isValid: if not nd.isValid:
@ -493,7 +490,7 @@ proc pp*(vid: VertexID): string =
proc pp*(vLst: openArray[VertexID]): string = proc pp*(vLst: openArray[VertexID]): string =
vLst.ppVidList vLst.ppVidList
proc pp*(p: PayloadRef, db = AristoDbRef(nil)): string = proc pp*(p: LeafPayload, db = AristoDbRef(nil)): string =
p.ppPayload(db.orDefault) p.ppPayload(db.orDefault)
proc pp*(nd: VertexRef, db = AristoDbRef(nil)): string = proc pp*(nd: VertexRef, db = AristoDbRef(nil)): string =

View File

@ -344,7 +344,7 @@ proc deleteAccountRecord*(
db.deleteImpl(hike).isOkOr: db.deleteImpl(hike).isOkOr:
return err(error[1]) return err(error[1])
db.layersPutAccPayload(accPath, nil) db.layersPutAccLeaf(accPath, nil)
ok() ok()
@ -438,7 +438,7 @@ proc deleteStorageData*(
# De-register the deleted storage tree from the account record # De-register the deleted storage tree from the account record
let leaf = wpAcc.vtx.dup # Dup on modify let leaf = wpAcc.vtx.dup # Dup on modify
leaf.lData.stoID = VertexID(0) leaf.lData.stoID = VertexID(0)
db.layersPutAccPayload(accPath, leaf.lData) db.layersPutAccLeaf(accPath, leaf)
db.layersPutVtx((accHike.root, wpAcc.vid), leaf) db.layersPutVtx((accHike.root, wpAcc.vid), leaf)
db.layersResKey((accHike.root, wpAcc.vid)) db.layersResKey((accHike.root, wpAcc.vid))
ok(true) ok(true)
@ -469,7 +469,7 @@ proc deleteStorageTree*(
# De-register the deleted storage tree from the accounts record # De-register the deleted storage tree from the accounts record
let leaf = wpAcc.vtx.dup # Dup on modify let leaf = wpAcc.vtx.dup # Dup on modify
leaf.lData.stoID = VertexID(0) leaf.lData.stoID = VertexID(0)
db.layersPutAccPayload(accPath, leaf.lData) db.layersPutAccLeaf(accPath, leaf)
db.layersPutVtx((accHike.root, wpAcc.vid), leaf) db.layersPutVtx((accHike.root, wpAcc.vid), leaf)
db.layersResKey((accHike.root, wpAcc.vid)) db.layersResKey((accHike.root, wpAcc.vid))
ok() ok()

View File

@ -83,10 +83,10 @@ proc deltaPersistent*(
? be.putEndFn writeBatch # Finalise write batch ? be.putEndFn writeBatch # Finalise write batch
# Copy back updated payloads # Copy back updated payloads
for accPath, pyl in db.balancer.accPyls: for accPath, pyl in db.balancer.accLeaves:
let accKey = accPath.to(AccountKey) let accKey = accPath.to(AccountKey)
if not db.accPyls.lruUpdate(accKey, pyl): if not db.accLeaves.lruUpdate(accKey, pyl):
discard db.accPyls.lruAppend(accKey, pyl, accLruSize) discard db.accLeaves.lruAppend(accKey, pyl, accLruSize)
# Update dudes and this descriptor # Update dudes and this descriptor
? updateSiblings.update().commit() ? updateSiblings.update().commit()

View File

@ -84,7 +84,7 @@ type
# Debugging data below, might go away in future # Debugging data below, might go away in future
xMap*: Table[HashKey,HashSet[RootedVertexID]] ## For pretty printing/debugging xMap*: Table[HashKey,HashSet[RootedVertexID]] ## For pretty printing/debugging
accPyls*: KeyedQueue[AccountKey, PayloadRef] accLeaves*: KeyedQueue[AccountKey, VertexRef]
## Account path to payload cache - accounts are frequently accessed by ## Account path to payload cache - accounts are frequently accessed by
## account path when contracts interact with them - this cache ensures ## account path when contracts interact with them - this cache ensures
## that we don't have to re-traverse the storage trie for every such ## that we don't have to re-traverse the storage trie for every such
@ -133,9 +133,6 @@ func isValid*(vtx: VertexRef): bool =
func isValid*(nd: NodeRef): bool = func isValid*(nd: NodeRef): bool =
nd != NodeRef(nil) nd != NodeRef(nil)
func isValid*(pld: PayloadRef): bool =
pld != PayloadRef(nil)
func isValid*(pid: PathID): bool = func isValid*(pid: PathID): bool =
pid != VOID_PATH_ID pid != VOID_PATH_ID

View File

@ -24,7 +24,7 @@ type
## Generalised key-value pair for a sub-trie. The main trie is the ## Generalised key-value pair for a sub-trie. The main trie is the
## sub-trie with `root=VertexID(1)`. ## sub-trie with `root=VertexID(1)`.
leafTie*: LeafTie ## Full `Patricia Trie` path root-to-leaf leafTie*: LeafTie ## Full `Patricia Trie` path root-to-leaf
payload*: PayloadRef ## Leaf data payload (see below) payload*: LeafPayload ## Leaf data payload (see below)
VertexType* = enum VertexType* = enum
## Type of `Aristo Trie` vertex ## Type of `Aristo Trie` vertex
@ -34,7 +34,7 @@ type
AristoAccount* = object AristoAccount* = object
## Application relevant part of an Ethereum account. Note that the storage ## Application relevant part of an Ethereum account. Note that the storage
## data/tree reference is not part of the account (see `PayloadRef` below.) ## data/tree reference is not part of the account (see `LeafPayload` below.)
nonce*: AccountNonce ## Some `uint64` type nonce*: AccountNonce ## Some `uint64` type
balance*: UInt256 balance*: UInt256
codeHash*: Hash256 codeHash*: Hash256
@ -45,7 +45,7 @@ type
AccountData ## `Aristo account` with vertex IDs links AccountData ## `Aristo account` with vertex IDs links
StoData ## Slot storage data StoData ## Slot storage data
PayloadRef* = ref object of RootRef LeafPayload* = object
## The payload type depends on the sub-tree used. The `VertexID(1)` rooted ## The payload type depends on the sub-tree used. The `VertexID(1)` rooted
## sub-tree only has `AccountData` type payload, stoID-based have StoData ## sub-tree only has `AccountData` type payload, stoID-based have StoData
## while generic have RawData ## while generic have RawData
@ -63,7 +63,7 @@ type
case vType*: VertexType case vType*: VertexType
of Leaf: of Leaf:
lPfx*: NibblesBuf ## Portion of path segment lPfx*: NibblesBuf ## Portion of path segment
lData*: PayloadRef ## Reference to data payload lData*: LeafPayload ## Reference to data payload
of Extension: of Extension:
ePfx*: NibblesBuf ## Portion of path segment ePfx*: NibblesBuf ## Portion of path segment
eVid*: VertexID ## Edge to vertex with ID `eVid` eVid*: VertexID ## Edge to vertex with ID `eVid`
@ -115,7 +115,7 @@ type
kMap*: Table[RootedVertexID,HashKey] ## Merkle hash key mapping kMap*: Table[RootedVertexID,HashKey] ## Merkle hash key mapping
vTop*: VertexID ## Last used vertex ID vTop*: VertexID ## Last used vertex ID
accPyls*: Table[Hash256, PayloadRef] ## Account path -> VertexRef accLeaves*: Table[Hash256, VertexRef] ## Account path -> VertexRef
LayerRef* = ref LayerObj LayerRef* = ref LayerObj
LayerObj* = object LayerObj* = object
@ -137,15 +137,11 @@ func hash*(node: NodeRef): Hash =
cast[pointer](node).hash cast[pointer](node).hash
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers: `NodeRef` and `PayloadRef` # Public helpers: `NodeRef` and `LeafPayload`
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc `==`*(a, b: PayloadRef): bool = proc `==`*(a, b: LeafPayload): bool =
## Beware, potential deep comparison ## Beware, potential deep comparison
if a.isNil:
return b.isNil
if b.isNil:
return false
if unsafeAddr(a) != unsafeAddr(b): if unsafeAddr(a) != unsafeAddr(b):
if a.pType != b.pType: if a.pType != b.pType:
return false return false
@ -204,20 +200,20 @@ proc `==`*(a, b: NodeRef): bool =
# Public helpers, miscellaneous functions # Public helpers, miscellaneous functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func dup*(pld: PayloadRef): PayloadRef = func dup*(pld: LeafPayload): LeafPayload =
## Duplicate payload. ## Duplicate payload.
case pld.pType: case pld.pType:
of RawData: of RawData:
PayloadRef( LeafPayload(
pType: RawData, pType: RawData,
rawBlob: pld.rawBlob) rawBlob: pld.rawBlob)
of AccountData: of AccountData:
PayloadRef( LeafPayload(
pType: AccountData, pType: AccountData,
account: pld.account, account: pld.account,
stoID: pld.stoID) stoID: pld.stoID)
of StoData: of StoData:
PayloadRef( LeafPayload(
pType: StoData, pType: StoData,
stoData: pld.stoData stoData: pld.stoData
) )

View File

@ -36,11 +36,11 @@ func mustBeGeneric(
ok() ok()
proc retrievePayload( proc retrieveLeaf(
db: AristoDbRef; db: AristoDbRef;
root: VertexID; root: VertexID;
path: openArray[byte]; path: openArray[byte];
): Result[PayloadRef,AristoError] = ): Result[VertexRef,AristoError] =
if path.len == 0: if path.len == 0:
return err(FetchPathInvalid) return err(FetchPathInvalid)
@ -51,35 +51,35 @@ proc retrievePayload(
return err(error) return err(error)
if vtx.vType == Leaf: if vtx.vType == Leaf:
return ok vtx.lData return ok vtx
return err(FetchPathNotFound) return err(FetchPathNotFound)
proc retrieveAccountPayload( proc retrieveAccountPayload(
db: AristoDbRef; db: AristoDbRef;
accPath: Hash256; accPath: Hash256;
): Result[PayloadRef,AristoError] = ): Result[LeafPayload,AristoError] =
if (let pyl = db.layersGetAccPayload(accPath); pyl.isSome()): if (let pyl = db.layersGetAccLeaf(accPath); pyl.isSome()):
if not pyl[].isValid(): if not pyl[].isValid():
return err(FetchPathNotFound) return err(FetchPathNotFound)
return ok pyl[] return ok pyl[].lData
let accKey = accPath.to(AccountKey) let accKey = accPath.to(AccountKey)
if (let pyl = db.accPyls.lruFetch(accKey); pyl.isSome()): if (let pyl = db.accLeaves.lruFetch(accKey); pyl.isSome()):
if not pyl[].isValid(): if not pyl[].isValid():
return err(FetchPathNotFound) return err(FetchPathNotFound)
return ok pyl[] return ok pyl[].lData
# Updated payloads are stored in the layers so if we didn't find them there, # Updated payloads are stored in the layers so if we didn't find them there,
# it must have been in the database # it must have been in the database
let let
payload = db.retrievePayload(VertexID(1), accPath.data).valueOr: payload = db.retrieveLeaf(VertexID(1), accPath.data).valueOr:
if error == FetchAccInaccessible: if error == FetchAccInaccessible:
discard db.accPyls.lruAppend(accKey, nil, accLruSize) discard db.accLeaves.lruAppend(accKey, nil, accLruSize)
return err(FetchPathNotFound) return err(FetchPathNotFound)
return err(error) return err(error)
ok db.accPyls.lruAppend(accKey, payload, accLruSize) ok db.accLeaves.lruAppend(accKey, payload, accLruSize).lData
proc retrieveMerkleHash( proc retrieveMerkleHash(
db: AristoDbRef; db: AristoDbRef;
@ -105,7 +105,7 @@ proc hasPayload(
root: VertexID; root: VertexID;
path: openArray[byte]; path: openArray[byte];
): Result[bool,AristoError] = ): Result[bool,AristoError] =
let error = db.retrievePayload(root, path).errorOr: let error = db.retrieveLeaf(root, path).errorOr:
return ok(true) return ok(true)
if error == FetchPathNotFound: if error == FetchPathNotFound:
@ -218,9 +218,9 @@ proc fetchGenericData*(
## indexed by `path`. ## indexed by `path`.
## ##
? root.mustBeGeneric() ? root.mustBeGeneric()
let pyl = ? db.retrievePayload(root, path) let pyl = ? db.retrieveLeaf(root, path)
assert pyl.pType == RawData # debugging only assert pyl.lData.pType == RawData # debugging only
ok pyl.rawBlob ok pyl.lData.rawBlob
proc fetchGenericState*( proc fetchGenericState*(
db: AristoDbRef; db: AristoDbRef;
@ -249,9 +249,9 @@ proc fetchStorageData*(
## For a storage tree related to account `accPath`, fetch the data record ## For a storage tree related to account `accPath`, fetch the data record
## from the database indexed by `path`. ## from the database indexed by `path`.
## ##
let pyl = ? db.retrievePayload(? db.fetchStorageID accPath, stoPath.data) let pyl = ? db.retrieveLeaf(? db.fetchStorageID accPath, stoPath.data)
assert pyl.pType == StoData # debugging only assert pyl.lData.pType == StoData # debugging only
ok pyl.stoData ok pyl.lData.stoData
proc fetchStorageState*( proc fetchStorageState*(
db: AristoDbRef; db: AristoDbRef;

View File

@ -91,15 +91,15 @@ func layersGetKeyOrVoid*(db: AristoDbRef; rvid: RootedVertexID): HashKey =
## Simplified version of `layersGetKey()` ## Simplified version of `layersGetKey()`
db.layersGetKey(rvid).valueOr: VOID_HASH_KEY db.layersGetKey(rvid).valueOr: VOID_HASH_KEY
func layersGetAccPayload*(db: AristoDbRef; accPath: Hash256): Opt[PayloadRef] = func layersGetAccLeaf*(db: AristoDbRef; accPath: Hash256): Opt[VertexRef] =
db.top.delta.accPyls.withValue(accPath, item): db.top.delta.accLeaves.withValue(accPath, item):
return Opt.some(item[]) return Opt.some(item[])
for w in db.rstack: for w in db.rstack:
w.delta.accPyls.withValue(accPath, item): w.delta.accLeaves.withValue(accPath, item):
return Opt.some(item[]) return Opt.some(item[])
Opt.none(PayloadRef) Opt.none(VertexRef)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -147,8 +147,8 @@ proc layersUpdateVtx*(
db.layersResKey(rvid) db.layersResKey(rvid)
func layersPutAccPayload*(db: AristoDbRef; accPath: Hash256; pyl: PayloadRef) = func layersPutAccLeaf*(db: AristoDbRef; accPath: Hash256; pyl: VertexRef) =
db.top.delta.accPyls[accPath] = pyl db.top.delta.accLeaves[accPath] = pyl
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
@ -165,8 +165,8 @@ func layersMergeOnto*(src: LayerRef; trg: var LayerObj) =
for (vid,key) in src.delta.kMap.pairs: for (vid,key) in src.delta.kMap.pairs:
trg.delta.kMap[vid] = key trg.delta.kMap[vid] = key
trg.delta.vTop = src.delta.vTop trg.delta.vTop = src.delta.vTop
for (accPath,pyl) in src.delta.accPyls.pairs: for (accPath,pyl) in src.delta.accLeaves.pairs:
trg.delta.accPyls[accPath] = pyl trg.delta.accLeaves[accPath] = pyl
func layersCc*(db: AristoDbRef; level = high(int)): LayerRef = func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
## Provide a collapsed copy of layers up to a particular transaction level. ## Provide a collapsed copy of layers up to a particular transaction level.
@ -182,7 +182,7 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
sTab: layers[0].delta.sTab.dup, # explicit dup for ref values sTab: layers[0].delta.sTab.dup, # explicit dup for ref values
kMap: layers[0].delta.kMap, kMap: layers[0].delta.kMap,
vTop: layers[^1].delta.vTop, vTop: layers[^1].delta.vTop,
accPyls: layers[0].delta.accPyls, accLeaves: layers[0].delta.accLeaves,
)) ))
# Consecutively merge other layers on top # Consecutively merge other layers on top
@ -191,8 +191,8 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
result.delta.sTab[vid] = vtx result.delta.sTab[vid] = vtx
for (vid,key) in layers[n].delta.kMap.pairs: for (vid,key) in layers[n].delta.kMap.pairs:
result.delta.kMap[vid] = key result.delta.kMap[vid] = key
for (accPath,pyl) in layers[n].delta.accPyls.pairs: for (accPath,pyl) in layers[n].delta.accLeaves.pairs:
result.delta.accPyls[accPath] = pyl result.delta.accLeaves[accPath] = pyl
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public iterators # Public iterators

View File

@ -51,10 +51,10 @@ proc mergeAccountRecord*(
## otherwise. ## otherwise.
## ##
let let
pyl = PayloadRef(pType: AccountData, account: accRec) pyl = LeafPayload(pType: AccountData, account: accRec)
rc = db.mergePayloadImpl(VertexID(1), accPath.data, pyl) rc = db.mergePayloadImpl(VertexID(1), accPath.data, pyl)
if rc.isOk: if rc.isOk:
db.layersPutAccPayload(accPath, pyl) db.layersPutAccLeaf(accPath, rc.value)
ok true ok true
elif rc.error in MergeNoAction: elif rc.error in MergeNoAction:
ok false ok false
@ -84,7 +84,7 @@ proc mergeGenericData*(
return err(MergeStoRootNotAccepted) return err(MergeStoRootNotAccepted)
let let
pyl = PayloadRef(pType: RawData, rawBlob: @data) pyl = LeafPayload(pType: RawData, rawBlob: @data)
rc = db.mergePayloadImpl(root, path, pyl) rc = db.mergePayloadImpl(root, path, pyl)
if rc.isOk: if rc.isOk:
ok true ok true
@ -130,19 +130,18 @@ proc mergeStorageData*(
useID = if stoID.isValid: stoID else: db.vidFetch() useID = if stoID.isValid: stoID else: db.vidFetch()
# Call merge # Call merge
pyl = PayloadRef(pType: StoData, stoData: stoData) pyl = LeafPayload(pType: StoData, stoData: stoData)
rc = db.mergePayloadImpl(useID, stoPath.data, pyl) rc = db.mergePayloadImpl(useID, stoPath.data, pyl)
if rc.isOk: if rc.isOk:
# Mark account path Merkle keys for update # Mark account path Merkle keys for update
resetKeys() resetKeys()
if not stoID.isValid: if not stoID.isValid:
# Make sure that there is an account that refers to that storage trie # Make sure that there is an account that refers to that storage trie
let leaf = vtx.dup # Dup on modify let leaf = vtx.dup # Dup on modify
leaf.lData.stoID = useID leaf.lData.stoID = useID
db.layersPutAccPayload(accPath, leaf.lData) db.layersPutAccLeaf(accPath, leaf)
db.layersPutVtx((VertexID(1), touched[pos - 1]), leaf) db.layersPutVtx((VertexID(1), touched[pos - 1]), leaf)
return ok() return ok()

View File

@ -28,18 +28,19 @@ proc xPfx(vtx: VertexRef): NibblesBuf =
# ----------- # -----------
proc layersPutLeaf( proc layersPutLeaf(
db: AristoDbRef, rvid: RootedVertexID, path: NibblesBuf, payload: PayloadRef db: AristoDbRef, rvid: RootedVertexID, path: NibblesBuf, payload: LeafPayload
) = ): VertexRef =
let vtx = VertexRef(vType: Leaf, lPfx: path, lData: payload) let vtx = VertexRef(vType: Leaf, lPfx: path, lData: payload)
db.layersPutVtx(rvid, vtx) db.layersPutVtx(rvid, vtx)
vtx
proc insertBranch( proc insertBranch(
db: AristoDbRef, # Database, top layer db: AristoDbRef, # Database, top layer
linkID: RootedVertexID, # Vertex ID to insert linkID: RootedVertexID, # Vertex ID to insert
linkVtx: VertexRef, # Vertex to insert linkVtx: VertexRef, # Vertex to insert
path: NibblesBuf, path: NibblesBuf,
payload: PayloadRef, # Leaf data payload payload: LeafPayload, # Leaf data payload
): Result[void, AristoError] = ): Result[VertexRef, AristoError] =
## ##
## Insert `Extension->Branch` vertex chain or just a `Branch` vertex ## Insert `Extension->Branch` vertex chain or just a `Branch` vertex
## ##
@ -97,7 +98,7 @@ proc insertBranch(
forkVtx.bVid[linkInx] = local forkVtx.bVid[linkInx] = local
db.layersPutVtx((linkID.root, local), linkDup) db.layersPutVtx((linkID.root, local), linkDup)
block: let leafVtx = block:
let local = db.vidFetch(pristine = true) let local = db.vidFetch(pristine = true)
forkVtx.bVid[leafInx] = local forkVtx.bVid[leafInx] = local
db.layersPutLeaf((linkID.root, local), path.slice(1 + n), payload) db.layersPutLeaf((linkID.root, local), path.slice(1 + n), payload)
@ -112,15 +113,15 @@ proc insertBranch(
else: else:
db.layersPutVtx(linkID, forkVtx) db.layersPutVtx(linkID, forkVtx)
ok() ok(leafVtx)
proc concatBranchAndLeaf( proc concatBranchAndLeaf(
db: AristoDbRef, # Database, top layer db: AristoDbRef, # Database, top layer
brVid: RootedVertexID, # Branch vertex ID from from `Hike` top brVid: RootedVertexID, # Branch vertex ID from from `Hike` top
brVtx: VertexRef, # Branch vertex, linked to from `Hike` brVtx: VertexRef, # Branch vertex, linked to from `Hike`
path: NibblesBuf, path: NibblesBuf,
payload: PayloadRef, # Leaf data payload payload: LeafPayload, # Leaf data payload
): Result[void, AristoError] = ): Result[VertexRef, AristoError] =
## Append argument branch vertex passed as argument `(brID,brVtx)` and then ## Append argument branch vertex passed as argument `(brID,brVtx)` and then
## a `Leaf` vertex derived from the argument `payload`. ## a `Leaf` vertex derived from the argument `payload`.
## ##
@ -137,9 +138,7 @@ proc concatBranchAndLeaf(
brDup.bVid[nibble] = vid brDup.bVid[nibble] = vid
db.layersPutVtx(brVid, brDup) db.layersPutVtx(brVid, brDup)
db.layersPutLeaf((brVid.root, vid), path.slice(1), payload) ok db.layersPutLeaf((brVid.root, vid), path.slice(1), payload)
ok()
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
@ -149,8 +148,8 @@ proc mergePayloadImpl*(
db: AristoDbRef, # Database, top layer db: AristoDbRef, # Database, top layer
root: VertexID, # MPT state root root: VertexID, # MPT state root
path: openArray[byte], # Leaf item to add to the database path: openArray[byte], # Leaf item to add to the database
payload: PayloadRef, # Payload value payload: LeafPayload, # Payload value
): Result[void, AristoError] = ): Result[VertexRef, AristoError] =
## Merge the argument `(root,path)` key-value-pair into the top level vertex ## Merge the argument `(root,path)` key-value-pair into the top level vertex
## table of the database `db`. The `path` argument is used to address the ## table of the database `db`. The `path` argument is used to address the
## leaf vertex with the payload. It is stored or updated on the database ## leaf vertex with the payload. It is stored or updated on the database
@ -167,8 +166,7 @@ proc mergePayloadImpl*(
# We're at the root vertex and there is no data - this must be a fresh # We're at the root vertex and there is no data - this must be a fresh
# VertexID! # VertexID!
db.layersPutLeaf((root, cur), path, payload) return ok db.layersPutLeaf((root, cur), path, payload)
return ok()
template resetKeys() = template resetKeys() =
# Reset cached hashes of touched verticies # Reset cached hashes of touched verticies
@ -182,28 +180,30 @@ proc mergePayloadImpl*(
case vtx.vType case vtx.vType
of Leaf: of Leaf:
if path == vtx.lPfx: let leafVtx =
# Replace the current vertex with a new payload if path == vtx.lPfx:
# Replace the current vertex with a new payload
if vtx.lData == payload: if vtx.lData == payload:
# TODO is this still needed? Higher levels should already be doing # TODO is this still needed? Higher levels should already be doing
# these checks # these checks
return err(MergeLeafPathCachedAlready) return err(MergeLeafPathCachedAlready)
if root == VertexID(1): var payload = payload
# TODO can we avoid this hack? it feels like the caller should already if root == VertexID(1):
# have set an appropriate stoID - this "fixup" feels risky, # TODO can we avoid this hack? it feels like the caller should already
# specially from a caching point of view # have set an appropriate stoID - this "fixup" feels risky,
payload.stoID = vtx.lData.stoID # specially from a caching point of view
payload.stoID = vtx.lData.stoID
db.layersPutLeaf((root, cur), path, payload) db.layersPutLeaf((root, cur), path, payload)
else: else:
# Turn leaf into branch, leaves with possible ext prefix # Turn leaf into branch, leaves with possible ext prefix
? db.insertBranch((root, cur), vtx, path, payload) ? db.insertBranch((root, cur), vtx, path, payload)
resetKeys() resetKeys()
return ok() return ok(leafVtx)
of Extension: of Extension:
if vtx.ePfx.len == path.sharedPrefixLen(vtx.ePfx): if vtx.ePfx.len == path.sharedPrefixLen(vtx.ePfx):
@ -211,10 +211,10 @@ proc mergePayloadImpl*(
path = path.slice(vtx.ePfx.len) path = path.slice(vtx.ePfx.len)
vtx = ?db.getVtxRc((root, cur)) vtx = ?db.getVtxRc((root, cur))
else: else:
? db.insertBranch((root, cur), vtx, path, payload) let leafVtx = ? db.insertBranch((root, cur), vtx, path, payload)
resetKeys() resetKeys()
return ok() return ok(leafVtx)
of Branch: of Branch:
let let
nibble = path[0] nibble = path[0]
@ -225,9 +225,9 @@ proc mergePayloadImpl*(
path = path.slice(1) path = path.slice(1)
vtx = ?db.getVtxRc((root, next)) vtx = ?db.getVtxRc((root, next))
else: else:
? db.concatBranchAndLeaf((root, cur), vtx, path, payload) let leafVtx = ? db.concatBranchAndLeaf((root, cur), vtx, path, payload)
resetKeys() resetKeys()
return ok() return ok(leafVtx)
err(MergeHikeFailed) err(MergeHikeFailed)

View File

@ -72,7 +72,7 @@ proc branchNibbleMax*(vtx: VertexRef; maxInx: int8): int8 =
# ------------------ # ------------------
proc toLeafTiePayload(hike: Hike): (LeafTie,PayloadRef) = proc toLeafTiePayload(hike: Hike): (LeafTie,LeafPayload) =
## Shortcut for iterators. This function will gloriously crash unless the ## Shortcut for iterators. This function will gloriously crash unless the
## `hike` argument is complete. ## `hike` argument is complete.
(LeafTie(root: hike.root, path: hike.to(NibblesBuf).pathToTag.value), (LeafTie(root: hike.root, path: hike.to(NibblesBuf).pathToTag.value),
@ -414,7 +414,7 @@ proc right*(
iterator rightPairs*( iterator rightPairs*(
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
start = low(LeafTie); # Before or at first value start = low(LeafTie); # Before or at first value
): (LeafTie,PayloadRef) = ): (LeafTie,LeafPayload) =
## Traverse the sub-trie implied by the argument `start` with increasing ## Traverse the sub-trie implied by the argument `start` with increasing
## order. ## order.
var var
@ -507,7 +507,7 @@ proc left*(
iterator leftPairs*( iterator leftPairs*(
db: AristoDbRef; # Database layer db: AristoDbRef; # Database layer
start = high(LeafTie); # Before or at first value start = high(LeafTie); # Before or at first value
): (LeafTie,PayloadRef) = ): (LeafTie,LeafPayload) =
## Traverse the sub-trie implied by the argument `start` with decreasing ## Traverse the sub-trie implied by the argument `start` with decreasing
## order. It will stop at any error. In order to reproduce an error, one ## order. It will stop at any error. In order to reproduce an error, one
## can run the function `left()` on the last returned `LiefTie` item with ## can run the function `left()` on the last returned `LiefTie` item with

View File

@ -32,7 +32,7 @@ proc aristoError(error: AristoError): NodeRef =
NodeRef(vType: Leaf, error: error) NodeRef(vType: Leaf, error: error)
proc serialise( proc serialise(
pyl: PayloadRef; pyl: LeafPayload;
getKey: ResolveVidFn; getKey: ResolveVidFn;
): Result[Blob,(VertexID,AristoError)] = ): Result[Blob,(VertexID,AristoError)] =
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of ## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
@ -105,7 +105,7 @@ proc read*(rlp: var Rlp; T: type NodeRef): T {.gcsafe, raises: [RlpError].} =
return NodeRef( return NodeRef(
vType: Leaf, vType: Leaf,
lPfx: pathSegment, lPfx: pathSegment,
lData: PayloadRef( lData: LeafPayload(
pType: RawData, pType: RawData,
rawBlob: blobs[1])) rawBlob: blobs[1]))
else: else:
@ -169,7 +169,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T =
proc serialise*( proc serialise*(
db: AristoDbRef; db: AristoDbRef;
root: VertexID; root: VertexID;
pyl: PayloadRef; pyl: LeafPayload;
): Result[Blob,(VertexID,AristoError)] = ): Result[Blob,(VertexID,AristoError)] =
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of ## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
## account type, otherwise pass the data as is. ## account type, otherwise pass the data as is.

View File

@ -58,8 +58,8 @@ type
## `Aristo` journal entry ## `Aristo` journal entry
blind: bool ## Marked `true` for `fetch()` logs blind: bool ## Marked `true` for `fetch()` logs
accPath: PathID ## Account path needed for storage data accPath: PathID ## Account path needed for storage data
old: PayloadRef ## Deleted or just cached payload version old: LeafPayload ## Deleted or just cached payload version
cur: PayloadRef ## Updated/current or just cached cur: LeafPayload ## Updated/current or just cached
curBlob: Blob ## Serialised version for `cur` accounts data curBlob: Blob ## Serialised version for `cur` accounts data
TracerBlobTabRef* = TracerBlobTabRef* =
@ -116,7 +116,7 @@ when EnableDebugLog:
else: else:
"@" & q "@" & q
func `$`(pyl: PayloadRef): string = func `$`(pyl: LeafPayload): string =
case pyl.pType: case pyl.pType:
of RawData: of RawData:
pyl.rawBlob.toStr pyl.rawBlob.toStr
@ -172,7 +172,7 @@ func leafTie(
ok LeafTie(root: root, path: tag) ok LeafTie(root: root, path: tag)
proc blobify( proc blobify(
pyl: PayloadRef; pyl: LeafPayload;
api: AristoApiRef; api: AristoApiRef;
mpt: AristoDbRef; mpt: AristoDbRef;
): Result[Blob,(VertexID,AristoError)] = ): Result[Blob,(VertexID,AristoError)] =
@ -598,7 +598,7 @@ proc traceRecorder(
proc(mpt: AristoDbRef; proc(mpt: AristoDbRef;
root: VertexID; root: VertexID;
path: openArray[byte]; path: openArray[byte];
): Result[PayloadRef,(VertexID,AristoError)] = ): Result[LeafPayload,(VertexID,AristoError)] =
when EnableDebugLog: when EnableDebugLog:
const const
logTxt = "trace fetchPayload" logTxt = "trace fetchPayload"
@ -714,7 +714,7 @@ proc traceRecorder(
flags = tr.inst[^1].flags flags = tr.inst[^1].flags
# TODO: collect all paths on this tree # TODO: collect all paths on this tree
var deletedRows: seq[(LeafTie,PayloadRef)] var deletedRows: seq[(LeafTie,LeafPayload)]
# Delete from DB # Delete from DB
api.delTree(mpt, root, accPath).isOkOr: api.delTree(mpt, root, accPath).isOkOr:
@ -769,7 +769,7 @@ proc traceRecorder(
# Create journal entry, `pType` same as generated by `merge()` # Create journal entry, `pType` same as generated by `merge()`
tpl = TracerPylRef( tpl = TracerPylRef(
accPath: accPath, accPath: accPath,
cur: PayloadRef(pType: RawData, rawBlob: @data)) cur: LeafPayload(pType: RawData, rawBlob: @data))
# Update journal # Update journal
let jrn = tr.mptJournalGet(mpt, key) let jrn = tr.mptJournalGet(mpt, key)
@ -805,7 +805,7 @@ proc traceRecorder(
proc(mpt: AristoDbRef; proc(mpt: AristoDbRef;
root: VertexID; root: VertexID;
path: openArray[byte]; path: openArray[byte];
pyl: PayloadRef; pyl: LeafPayload;
accPath = VOID_PATH_ID; accPath = VOID_PATH_ID;
): Result[bool,AristoError] = ): Result[bool,AristoError] =
when EnableDebugLog: when EnableDebugLog:
@ -888,9 +888,9 @@ func kLog*(inst: TracerLogInstRef): TableRef[Blob,Blob] =
if tbl.cur.len != 0: if tbl.cur.len != 0:
result[key] = tbl.cur result[key] = tbl.cur
func mLog*(inst: TracerLogInstRef): TableRef[LeafTie,PayloadRef] = func mLog*(inst: TracerLogInstRef): TableRef[LeafTie,LeafPayload] =
## Export `mpt` journal ## Export `mpt` journal
result = newTable[LeafTie,PayloadRef]() result = newTable[LeafTie,LeafPayload]()
for (_,mptTab) in inst.mptJournal.pairs: for (_,mptTab) in inst.mptJournal.pairs:
for (key,tpl) in mptTab.pairs: for (key,tpl) in mptTab.pairs:
if not tpl.cur.isNil: if not tpl.cur.isNil:

View File

@ -168,7 +168,7 @@ func to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
leafTie: LeafTie( leafTie: LeafTie(
root: rootVid, root: rootVid,
path: it.accKey.to(PathID)), path: it.accKey.to(PathID)),
payload: PayloadRef(pType: RawData, rawBlob: it.accBlob)))) payload: LeafPayload(pType: RawData, rawBlob: it.accBlob))))
func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T = func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
var (rootKey, rootVid) = (Hash256(), VertexID(0)) var (rootKey, rootVid) = (Hash256(), VertexID(0))
@ -185,7 +185,7 @@ func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
leafTie: LeafTie( leafTie: LeafTie(
root: rootVid, root: rootVid,
path: it.slotHash.to(PathID)), path: it.slotHash.to(PathID)),
payload: PayloadRef(pType: RawData, rawBlob: it.slotData)))) payload: LeafPayload(pType: RawData, rawBlob: it.slotData))))
if 0 < result.len: if 0 < result.len:
result[^1].proof = s.data.proof result[^1].proof = s.data.proof