Turn payload into value type (#2483)
The Vertex type unifies branches, extensions and leaves into a single memory area where the larges member is the branch (128 bytes + overhead) - the payloads we have are all smaller than 128 thus wrapping them in an extra layer of `ref` is wasteful from a memory usage perspective. Further, the ref:s must be visited during the M&S phase of garbage collection - since we keep millions of these, many of them short-lived, this takes up significant CPU time. ``` Function CPU Time: Total CPU Time: Self Module Function (Full) Source File Start Address system::markStackAndRegisters 10.0% 4.922s nimbus system::markStackAndRegisters(var<system::GcHeap>).constprop.0 gc.nim 0x701230` ```
This commit is contained in:
parent
72947b3647
commit
f3a56002ca
|
@ -121,9 +121,7 @@ proc load256(data: openArray[byte]; start: var int, len: int): Result[UInt256,Ar
|
|||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc blobifyTo*(pyl: PayloadRef, data: var Blob) =
|
||||
if pyl.isNil:
|
||||
return
|
||||
proc blobifyTo*(pyl: LeafPayload, data: var Blob) =
|
||||
case pyl.pType
|
||||
of RawData:
|
||||
data &= pyl.rawBlob
|
||||
|
@ -248,22 +246,22 @@ proc blobify*(lSst: SavedState): Result[Blob,AristoError] =
|
|||
# -------------
|
||||
proc deblobify(
|
||||
data: openArray[byte];
|
||||
T: type PayloadRef;
|
||||
): Result[PayloadRef,AristoError] =
|
||||
T: type LeafPayload;
|
||||
): Result[LeafPayload,AristoError] =
|
||||
if data.len == 0:
|
||||
return ok PayloadRef(pType: RawData)
|
||||
return ok LeafPayload(pType: RawData)
|
||||
|
||||
let mask = data[^1]
|
||||
if (mask and 0x10) > 0: # unstructured payload
|
||||
return ok PayloadRef(pType: RawData, rawBlob: data[0 .. ^2])
|
||||
return ok LeafPayload(pType: RawData, rawBlob: data[0 .. ^2])
|
||||
|
||||
if (mask and 0x20) > 0: # Slot storage data
|
||||
return ok PayloadRef(
|
||||
return ok LeafPayload(
|
||||
pType: StoData,
|
||||
stoData: ?deblobify(data.toOpenArray(0, data.len - 2), UInt256))
|
||||
|
||||
var
|
||||
pAcc = PayloadRef(pType: AccountData)
|
||||
pAcc = LeafPayload(pType: AccountData)
|
||||
start = 0
|
||||
lens = uint16.fromBytesBE(data.toOpenArray(data.len - 3, data.len - 2))
|
||||
|
||||
|
@ -352,7 +350,7 @@ proc deblobify*(
|
|||
NibblesBuf.fromHexPrefix record.toOpenArray(pLen, rLen-1)
|
||||
if not isLeaf:
|
||||
return err(DeblobLeafGotExtPrefix)
|
||||
let pyl = ? record.toOpenArray(0, pLen - 1).deblobify(PayloadRef)
|
||||
let pyl = ? record.toOpenArray(0, pLen - 1).deblobify(LeafPayload)
|
||||
VertexRef(
|
||||
vType: Leaf,
|
||||
lPfx: pathSegment,
|
||||
|
|
|
@ -180,10 +180,7 @@ proc ppPathPfx(pfx: NibblesBuf): string =
|
|||
proc ppNibble(n: int8): string =
|
||||
if n < 0: "ø" elif n < 10: $n else: n.toHexLsb
|
||||
|
||||
proc ppPayload(p: PayloadRef, db: AristoDbRef): string =
|
||||
if p.isNil:
|
||||
result = "n/a"
|
||||
else:
|
||||
proc ppPayload(p: LeafPayload, db: AristoDbRef): string =
|
||||
case p.pType:
|
||||
of RawData:
|
||||
result &= p.rawBlob.toHex.squeeze(hex=true)
|
||||
|
@ -493,7 +490,7 @@ proc pp*(vid: VertexID): string =
|
|||
proc pp*(vLst: openArray[VertexID]): string =
|
||||
vLst.ppVidList
|
||||
|
||||
proc pp*(p: PayloadRef, db = AristoDbRef(nil)): string =
|
||||
proc pp*(p: LeafPayload, db = AristoDbRef(nil)): string =
|
||||
p.ppPayload(db.orDefault)
|
||||
|
||||
proc pp*(nd: VertexRef, db = AristoDbRef(nil)): string =
|
||||
|
|
|
@ -344,7 +344,7 @@ proc deleteAccountRecord*(
|
|||
db.deleteImpl(hike).isOkOr:
|
||||
return err(error[1])
|
||||
|
||||
db.layersPutAccPayload(accPath, nil)
|
||||
db.layersPutAccLeaf(accPath, nil)
|
||||
|
||||
ok()
|
||||
|
||||
|
@ -438,7 +438,7 @@ proc deleteStorageData*(
|
|||
# De-register the deleted storage tree from the account record
|
||||
let leaf = wpAcc.vtx.dup # Dup on modify
|
||||
leaf.lData.stoID = VertexID(0)
|
||||
db.layersPutAccPayload(accPath, leaf.lData)
|
||||
db.layersPutAccLeaf(accPath, leaf)
|
||||
db.layersPutVtx((accHike.root, wpAcc.vid), leaf)
|
||||
db.layersResKey((accHike.root, wpAcc.vid))
|
||||
ok(true)
|
||||
|
@ -469,7 +469,7 @@ proc deleteStorageTree*(
|
|||
# De-register the deleted storage tree from the accounts record
|
||||
let leaf = wpAcc.vtx.dup # Dup on modify
|
||||
leaf.lData.stoID = VertexID(0)
|
||||
db.layersPutAccPayload(accPath, leaf.lData)
|
||||
db.layersPutAccLeaf(accPath, leaf)
|
||||
db.layersPutVtx((accHike.root, wpAcc.vid), leaf)
|
||||
db.layersResKey((accHike.root, wpAcc.vid))
|
||||
ok()
|
||||
|
|
|
@ -83,10 +83,10 @@ proc deltaPersistent*(
|
|||
? be.putEndFn writeBatch # Finalise write batch
|
||||
|
||||
# Copy back updated payloads
|
||||
for accPath, pyl in db.balancer.accPyls:
|
||||
for accPath, pyl in db.balancer.accLeaves:
|
||||
let accKey = accPath.to(AccountKey)
|
||||
if not db.accPyls.lruUpdate(accKey, pyl):
|
||||
discard db.accPyls.lruAppend(accKey, pyl, accLruSize)
|
||||
if not db.accLeaves.lruUpdate(accKey, pyl):
|
||||
discard db.accLeaves.lruAppend(accKey, pyl, accLruSize)
|
||||
|
||||
# Update dudes and this descriptor
|
||||
? updateSiblings.update().commit()
|
||||
|
|
|
@ -84,7 +84,7 @@ type
|
|||
# Debugging data below, might go away in future
|
||||
xMap*: Table[HashKey,HashSet[RootedVertexID]] ## For pretty printing/debugging
|
||||
|
||||
accPyls*: KeyedQueue[AccountKey, PayloadRef]
|
||||
accLeaves*: KeyedQueue[AccountKey, VertexRef]
|
||||
## Account path to payload cache - accounts are frequently accessed by
|
||||
## account path when contracts interact with them - this cache ensures
|
||||
## that we don't have to re-traverse the storage trie for every such
|
||||
|
@ -133,9 +133,6 @@ func isValid*(vtx: VertexRef): bool =
|
|||
func isValid*(nd: NodeRef): bool =
|
||||
nd != NodeRef(nil)
|
||||
|
||||
func isValid*(pld: PayloadRef): bool =
|
||||
pld != PayloadRef(nil)
|
||||
|
||||
func isValid*(pid: PathID): bool =
|
||||
pid != VOID_PATH_ID
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ type
|
|||
## Generalised key-value pair for a sub-trie. The main trie is the
|
||||
## sub-trie with `root=VertexID(1)`.
|
||||
leafTie*: LeafTie ## Full `Patricia Trie` path root-to-leaf
|
||||
payload*: PayloadRef ## Leaf data payload (see below)
|
||||
payload*: LeafPayload ## Leaf data payload (see below)
|
||||
|
||||
VertexType* = enum
|
||||
## Type of `Aristo Trie` vertex
|
||||
|
@ -34,7 +34,7 @@ type
|
|||
|
||||
AristoAccount* = object
|
||||
## Application relevant part of an Ethereum account. Note that the storage
|
||||
## data/tree reference is not part of the account (see `PayloadRef` below.)
|
||||
## data/tree reference is not part of the account (see `LeafPayload` below.)
|
||||
nonce*: AccountNonce ## Some `uint64` type
|
||||
balance*: UInt256
|
||||
codeHash*: Hash256
|
||||
|
@ -45,7 +45,7 @@ type
|
|||
AccountData ## `Aristo account` with vertex IDs links
|
||||
StoData ## Slot storage data
|
||||
|
||||
PayloadRef* = ref object of RootRef
|
||||
LeafPayload* = object
|
||||
## The payload type depends on the sub-tree used. The `VertexID(1)` rooted
|
||||
## sub-tree only has `AccountData` type payload, stoID-based have StoData
|
||||
## while generic have RawData
|
||||
|
@ -63,7 +63,7 @@ type
|
|||
case vType*: VertexType
|
||||
of Leaf:
|
||||
lPfx*: NibblesBuf ## Portion of path segment
|
||||
lData*: PayloadRef ## Reference to data payload
|
||||
lData*: LeafPayload ## Reference to data payload
|
||||
of Extension:
|
||||
ePfx*: NibblesBuf ## Portion of path segment
|
||||
eVid*: VertexID ## Edge to vertex with ID `eVid`
|
||||
|
@ -115,7 +115,7 @@ type
|
|||
kMap*: Table[RootedVertexID,HashKey] ## Merkle hash key mapping
|
||||
vTop*: VertexID ## Last used vertex ID
|
||||
|
||||
accPyls*: Table[Hash256, PayloadRef] ## Account path -> VertexRef
|
||||
accLeaves*: Table[Hash256, VertexRef] ## Account path -> VertexRef
|
||||
|
||||
LayerRef* = ref LayerObj
|
||||
LayerObj* = object
|
||||
|
@ -137,15 +137,11 @@ func hash*(node: NodeRef): Hash =
|
|||
cast[pointer](node).hash
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers: `NodeRef` and `PayloadRef`
|
||||
# Public helpers: `NodeRef` and `LeafPayload`
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc `==`*(a, b: PayloadRef): bool =
|
||||
proc `==`*(a, b: LeafPayload): bool =
|
||||
## Beware, potential deep comparison
|
||||
if a.isNil:
|
||||
return b.isNil
|
||||
if b.isNil:
|
||||
return false
|
||||
if unsafeAddr(a) != unsafeAddr(b):
|
||||
if a.pType != b.pType:
|
||||
return false
|
||||
|
@ -204,20 +200,20 @@ proc `==`*(a, b: NodeRef): bool =
|
|||
# Public helpers, miscellaneous functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
func dup*(pld: PayloadRef): PayloadRef =
|
||||
func dup*(pld: LeafPayload): LeafPayload =
|
||||
## Duplicate payload.
|
||||
case pld.pType:
|
||||
of RawData:
|
||||
PayloadRef(
|
||||
LeafPayload(
|
||||
pType: RawData,
|
||||
rawBlob: pld.rawBlob)
|
||||
of AccountData:
|
||||
PayloadRef(
|
||||
LeafPayload(
|
||||
pType: AccountData,
|
||||
account: pld.account,
|
||||
stoID: pld.stoID)
|
||||
of StoData:
|
||||
PayloadRef(
|
||||
LeafPayload(
|
||||
pType: StoData,
|
||||
stoData: pld.stoData
|
||||
)
|
||||
|
|
|
@ -36,11 +36,11 @@ func mustBeGeneric(
|
|||
ok()
|
||||
|
||||
|
||||
proc retrievePayload(
|
||||
proc retrieveLeaf(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
): Result[PayloadRef,AristoError] =
|
||||
): Result[VertexRef,AristoError] =
|
||||
if path.len == 0:
|
||||
return err(FetchPathInvalid)
|
||||
|
||||
|
@ -51,35 +51,35 @@ proc retrievePayload(
|
|||
return err(error)
|
||||
|
||||
if vtx.vType == Leaf:
|
||||
return ok vtx.lData
|
||||
return ok vtx
|
||||
|
||||
return err(FetchPathNotFound)
|
||||
|
||||
proc retrieveAccountPayload(
|
||||
db: AristoDbRef;
|
||||
accPath: Hash256;
|
||||
): Result[PayloadRef,AristoError] =
|
||||
if (let pyl = db.layersGetAccPayload(accPath); pyl.isSome()):
|
||||
): Result[LeafPayload,AristoError] =
|
||||
if (let pyl = db.layersGetAccLeaf(accPath); pyl.isSome()):
|
||||
if not pyl[].isValid():
|
||||
return err(FetchPathNotFound)
|
||||
return ok pyl[]
|
||||
return ok pyl[].lData
|
||||
|
||||
let accKey = accPath.to(AccountKey)
|
||||
if (let pyl = db.accPyls.lruFetch(accKey); pyl.isSome()):
|
||||
if (let pyl = db.accLeaves.lruFetch(accKey); pyl.isSome()):
|
||||
if not pyl[].isValid():
|
||||
return err(FetchPathNotFound)
|
||||
return ok pyl[]
|
||||
return ok pyl[].lData
|
||||
|
||||
# Updated payloads are stored in the layers so if we didn't find them there,
|
||||
# it must have been in the database
|
||||
let
|
||||
payload = db.retrievePayload(VertexID(1), accPath.data).valueOr:
|
||||
payload = db.retrieveLeaf(VertexID(1), accPath.data).valueOr:
|
||||
if error == FetchAccInaccessible:
|
||||
discard db.accPyls.lruAppend(accKey, nil, accLruSize)
|
||||
discard db.accLeaves.lruAppend(accKey, nil, accLruSize)
|
||||
return err(FetchPathNotFound)
|
||||
return err(error)
|
||||
|
||||
ok db.accPyls.lruAppend(accKey, payload, accLruSize)
|
||||
ok db.accLeaves.lruAppend(accKey, payload, accLruSize).lData
|
||||
|
||||
proc retrieveMerkleHash(
|
||||
db: AristoDbRef;
|
||||
|
@ -105,7 +105,7 @@ proc hasPayload(
|
|||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
): Result[bool,AristoError] =
|
||||
let error = db.retrievePayload(root, path).errorOr:
|
||||
let error = db.retrieveLeaf(root, path).errorOr:
|
||||
return ok(true)
|
||||
|
||||
if error == FetchPathNotFound:
|
||||
|
@ -218,9 +218,9 @@ proc fetchGenericData*(
|
|||
## indexed by `path`.
|
||||
##
|
||||
? root.mustBeGeneric()
|
||||
let pyl = ? db.retrievePayload(root, path)
|
||||
assert pyl.pType == RawData # debugging only
|
||||
ok pyl.rawBlob
|
||||
let pyl = ? db.retrieveLeaf(root, path)
|
||||
assert pyl.lData.pType == RawData # debugging only
|
||||
ok pyl.lData.rawBlob
|
||||
|
||||
proc fetchGenericState*(
|
||||
db: AristoDbRef;
|
||||
|
@ -249,9 +249,9 @@ proc fetchStorageData*(
|
|||
## For a storage tree related to account `accPath`, fetch the data record
|
||||
## from the database indexed by `path`.
|
||||
##
|
||||
let pyl = ? db.retrievePayload(? db.fetchStorageID accPath, stoPath.data)
|
||||
assert pyl.pType == StoData # debugging only
|
||||
ok pyl.stoData
|
||||
let pyl = ? db.retrieveLeaf(? db.fetchStorageID accPath, stoPath.data)
|
||||
assert pyl.lData.pType == StoData # debugging only
|
||||
ok pyl.lData.stoData
|
||||
|
||||
proc fetchStorageState*(
|
||||
db: AristoDbRef;
|
||||
|
|
|
@ -91,15 +91,15 @@ func layersGetKeyOrVoid*(db: AristoDbRef; rvid: RootedVertexID): HashKey =
|
|||
## Simplified version of `layersGetKey()`
|
||||
db.layersGetKey(rvid).valueOr: VOID_HASH_KEY
|
||||
|
||||
func layersGetAccPayload*(db: AristoDbRef; accPath: Hash256): Opt[PayloadRef] =
|
||||
db.top.delta.accPyls.withValue(accPath, item):
|
||||
func layersGetAccLeaf*(db: AristoDbRef; accPath: Hash256): Opt[VertexRef] =
|
||||
db.top.delta.accLeaves.withValue(accPath, item):
|
||||
return Opt.some(item[])
|
||||
|
||||
for w in db.rstack:
|
||||
w.delta.accPyls.withValue(accPath, item):
|
||||
w.delta.accLeaves.withValue(accPath, item):
|
||||
return Opt.some(item[])
|
||||
|
||||
Opt.none(PayloadRef)
|
||||
Opt.none(VertexRef)
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -147,8 +147,8 @@ proc layersUpdateVtx*(
|
|||
db.layersResKey(rvid)
|
||||
|
||||
|
||||
func layersPutAccPayload*(db: AristoDbRef; accPath: Hash256; pyl: PayloadRef) =
|
||||
db.top.delta.accPyls[accPath] = pyl
|
||||
func layersPutAccLeaf*(db: AristoDbRef; accPath: Hash256; pyl: VertexRef) =
|
||||
db.top.delta.accLeaves[accPath] = pyl
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
|
@ -165,8 +165,8 @@ func layersMergeOnto*(src: LayerRef; trg: var LayerObj) =
|
|||
for (vid,key) in src.delta.kMap.pairs:
|
||||
trg.delta.kMap[vid] = key
|
||||
trg.delta.vTop = src.delta.vTop
|
||||
for (accPath,pyl) in src.delta.accPyls.pairs:
|
||||
trg.delta.accPyls[accPath] = pyl
|
||||
for (accPath,pyl) in src.delta.accLeaves.pairs:
|
||||
trg.delta.accLeaves[accPath] = pyl
|
||||
|
||||
func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
|
||||
## Provide a collapsed copy of layers up to a particular transaction level.
|
||||
|
@ -182,7 +182,7 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
|
|||
sTab: layers[0].delta.sTab.dup, # explicit dup for ref values
|
||||
kMap: layers[0].delta.kMap,
|
||||
vTop: layers[^1].delta.vTop,
|
||||
accPyls: layers[0].delta.accPyls,
|
||||
accLeaves: layers[0].delta.accLeaves,
|
||||
))
|
||||
|
||||
# Consecutively merge other layers on top
|
||||
|
@ -191,8 +191,8 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
|
|||
result.delta.sTab[vid] = vtx
|
||||
for (vid,key) in layers[n].delta.kMap.pairs:
|
||||
result.delta.kMap[vid] = key
|
||||
for (accPath,pyl) in layers[n].delta.accPyls.pairs:
|
||||
result.delta.accPyls[accPath] = pyl
|
||||
for (accPath,pyl) in layers[n].delta.accLeaves.pairs:
|
||||
result.delta.accLeaves[accPath] = pyl
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public iterators
|
||||
|
|
|
@ -51,10 +51,10 @@ proc mergeAccountRecord*(
|
|||
## otherwise.
|
||||
##
|
||||
let
|
||||
pyl = PayloadRef(pType: AccountData, account: accRec)
|
||||
pyl = LeafPayload(pType: AccountData, account: accRec)
|
||||
rc = db.mergePayloadImpl(VertexID(1), accPath.data, pyl)
|
||||
if rc.isOk:
|
||||
db.layersPutAccPayload(accPath, pyl)
|
||||
db.layersPutAccLeaf(accPath, rc.value)
|
||||
ok true
|
||||
elif rc.error in MergeNoAction:
|
||||
ok false
|
||||
|
@ -84,7 +84,7 @@ proc mergeGenericData*(
|
|||
return err(MergeStoRootNotAccepted)
|
||||
|
||||
let
|
||||
pyl = PayloadRef(pType: RawData, rawBlob: @data)
|
||||
pyl = LeafPayload(pType: RawData, rawBlob: @data)
|
||||
rc = db.mergePayloadImpl(root, path, pyl)
|
||||
if rc.isOk:
|
||||
ok true
|
||||
|
@ -130,19 +130,18 @@ proc mergeStorageData*(
|
|||
useID = if stoID.isValid: stoID else: db.vidFetch()
|
||||
|
||||
# Call merge
|
||||
pyl = PayloadRef(pType: StoData, stoData: stoData)
|
||||
pyl = LeafPayload(pType: StoData, stoData: stoData)
|
||||
rc = db.mergePayloadImpl(useID, stoPath.data, pyl)
|
||||
|
||||
if rc.isOk:
|
||||
# Mark account path Merkle keys for update
|
||||
resetKeys()
|
||||
|
||||
|
||||
if not stoID.isValid:
|
||||
# Make sure that there is an account that refers to that storage trie
|
||||
let leaf = vtx.dup # Dup on modify
|
||||
leaf.lData.stoID = useID
|
||||
db.layersPutAccPayload(accPath, leaf.lData)
|
||||
db.layersPutAccLeaf(accPath, leaf)
|
||||
db.layersPutVtx((VertexID(1), touched[pos - 1]), leaf)
|
||||
|
||||
return ok()
|
||||
|
|
|
@ -28,18 +28,19 @@ proc xPfx(vtx: VertexRef): NibblesBuf =
|
|||
# -----------
|
||||
|
||||
proc layersPutLeaf(
|
||||
db: AristoDbRef, rvid: RootedVertexID, path: NibblesBuf, payload: PayloadRef
|
||||
) =
|
||||
db: AristoDbRef, rvid: RootedVertexID, path: NibblesBuf, payload: LeafPayload
|
||||
): VertexRef =
|
||||
let vtx = VertexRef(vType: Leaf, lPfx: path, lData: payload)
|
||||
db.layersPutVtx(rvid, vtx)
|
||||
vtx
|
||||
|
||||
proc insertBranch(
|
||||
db: AristoDbRef, # Database, top layer
|
||||
linkID: RootedVertexID, # Vertex ID to insert
|
||||
linkVtx: VertexRef, # Vertex to insert
|
||||
path: NibblesBuf,
|
||||
payload: PayloadRef, # Leaf data payload
|
||||
): Result[void, AristoError] =
|
||||
payload: LeafPayload, # Leaf data payload
|
||||
): Result[VertexRef, AristoError] =
|
||||
##
|
||||
## Insert `Extension->Branch` vertex chain or just a `Branch` vertex
|
||||
##
|
||||
|
@ -97,7 +98,7 @@ proc insertBranch(
|
|||
forkVtx.bVid[linkInx] = local
|
||||
db.layersPutVtx((linkID.root, local), linkDup)
|
||||
|
||||
block:
|
||||
let leafVtx = block:
|
||||
let local = db.vidFetch(pristine = true)
|
||||
forkVtx.bVid[leafInx] = local
|
||||
db.layersPutLeaf((linkID.root, local), path.slice(1 + n), payload)
|
||||
|
@ -112,15 +113,15 @@ proc insertBranch(
|
|||
else:
|
||||
db.layersPutVtx(linkID, forkVtx)
|
||||
|
||||
ok()
|
||||
ok(leafVtx)
|
||||
|
||||
proc concatBranchAndLeaf(
|
||||
db: AristoDbRef, # Database, top layer
|
||||
brVid: RootedVertexID, # Branch vertex ID from from `Hike` top
|
||||
brVtx: VertexRef, # Branch vertex, linked to from `Hike`
|
||||
path: NibblesBuf,
|
||||
payload: PayloadRef, # Leaf data payload
|
||||
): Result[void, AristoError] =
|
||||
payload: LeafPayload, # Leaf data payload
|
||||
): Result[VertexRef, AristoError] =
|
||||
## Append argument branch vertex passed as argument `(brID,brVtx)` and then
|
||||
## a `Leaf` vertex derived from the argument `payload`.
|
||||
##
|
||||
|
@ -137,9 +138,7 @@ proc concatBranchAndLeaf(
|
|||
brDup.bVid[nibble] = vid
|
||||
|
||||
db.layersPutVtx(brVid, brDup)
|
||||
db.layersPutLeaf((brVid.root, vid), path.slice(1), payload)
|
||||
|
||||
ok()
|
||||
ok db.layersPutLeaf((brVid.root, vid), path.slice(1), payload)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
|
@ -149,8 +148,8 @@ proc mergePayloadImpl*(
|
|||
db: AristoDbRef, # Database, top layer
|
||||
root: VertexID, # MPT state root
|
||||
path: openArray[byte], # Leaf item to add to the database
|
||||
payload: PayloadRef, # Payload value
|
||||
): Result[void, AristoError] =
|
||||
payload: LeafPayload, # Payload value
|
||||
): Result[VertexRef, AristoError] =
|
||||
## Merge the argument `(root,path)` key-value-pair into the top level vertex
|
||||
## table of the database `db`. The `path` argument is used to address the
|
||||
## leaf vertex with the payload. It is stored or updated on the database
|
||||
|
@ -167,8 +166,7 @@ proc mergePayloadImpl*(
|
|||
|
||||
# We're at the root vertex and there is no data - this must be a fresh
|
||||
# VertexID!
|
||||
db.layersPutLeaf((root, cur), path, payload)
|
||||
return ok()
|
||||
return ok db.layersPutLeaf((root, cur), path, payload)
|
||||
|
||||
template resetKeys() =
|
||||
# Reset cached hashes of touched verticies
|
||||
|
@ -182,6 +180,7 @@ proc mergePayloadImpl*(
|
|||
|
||||
case vtx.vType
|
||||
of Leaf:
|
||||
let leafVtx =
|
||||
if path == vtx.lPfx:
|
||||
# Replace the current vertex with a new payload
|
||||
|
||||
|
@ -190,6 +189,7 @@ proc mergePayloadImpl*(
|
|||
# these checks
|
||||
return err(MergeLeafPathCachedAlready)
|
||||
|
||||
var payload = payload
|
||||
if root == VertexID(1):
|
||||
# TODO can we avoid this hack? it feels like the caller should already
|
||||
# have set an appropriate stoID - this "fixup" feels risky,
|
||||
|
@ -203,7 +203,7 @@ proc mergePayloadImpl*(
|
|||
? db.insertBranch((root, cur), vtx, path, payload)
|
||||
|
||||
resetKeys()
|
||||
return ok()
|
||||
return ok(leafVtx)
|
||||
|
||||
of Extension:
|
||||
if vtx.ePfx.len == path.sharedPrefixLen(vtx.ePfx):
|
||||
|
@ -211,10 +211,10 @@ proc mergePayloadImpl*(
|
|||
path = path.slice(vtx.ePfx.len)
|
||||
vtx = ?db.getVtxRc((root, cur))
|
||||
else:
|
||||
? db.insertBranch((root, cur), vtx, path, payload)
|
||||
let leafVtx = ? db.insertBranch((root, cur), vtx, path, payload)
|
||||
|
||||
resetKeys()
|
||||
return ok()
|
||||
return ok(leafVtx)
|
||||
of Branch:
|
||||
let
|
||||
nibble = path[0]
|
||||
|
@ -225,9 +225,9 @@ proc mergePayloadImpl*(
|
|||
path = path.slice(1)
|
||||
vtx = ?db.getVtxRc((root, next))
|
||||
else:
|
||||
? db.concatBranchAndLeaf((root, cur), vtx, path, payload)
|
||||
let leafVtx = ? db.concatBranchAndLeaf((root, cur), vtx, path, payload)
|
||||
resetKeys()
|
||||
return ok()
|
||||
return ok(leafVtx)
|
||||
|
||||
err(MergeHikeFailed)
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ proc branchNibbleMax*(vtx: VertexRef; maxInx: int8): int8 =
|
|||
|
||||
# ------------------
|
||||
|
||||
proc toLeafTiePayload(hike: Hike): (LeafTie,PayloadRef) =
|
||||
proc toLeafTiePayload(hike: Hike): (LeafTie,LeafPayload) =
|
||||
## Shortcut for iterators. This function will gloriously crash unless the
|
||||
## `hike` argument is complete.
|
||||
(LeafTie(root: hike.root, path: hike.to(NibblesBuf).pathToTag.value),
|
||||
|
@ -414,7 +414,7 @@ proc right*(
|
|||
iterator rightPairs*(
|
||||
db: AristoDbRef; # Database layer
|
||||
start = low(LeafTie); # Before or at first value
|
||||
): (LeafTie,PayloadRef) =
|
||||
): (LeafTie,LeafPayload) =
|
||||
## Traverse the sub-trie implied by the argument `start` with increasing
|
||||
## order.
|
||||
var
|
||||
|
@ -507,7 +507,7 @@ proc left*(
|
|||
iterator leftPairs*(
|
||||
db: AristoDbRef; # Database layer
|
||||
start = high(LeafTie); # Before or at first value
|
||||
): (LeafTie,PayloadRef) =
|
||||
): (LeafTie,LeafPayload) =
|
||||
## Traverse the sub-trie implied by the argument `start` with decreasing
|
||||
## order. It will stop at any error. In order to reproduce an error, one
|
||||
## can run the function `left()` on the last returned `LiefTie` item with
|
||||
|
|
|
@ -32,7 +32,7 @@ proc aristoError(error: AristoError): NodeRef =
|
|||
NodeRef(vType: Leaf, error: error)
|
||||
|
||||
proc serialise(
|
||||
pyl: PayloadRef;
|
||||
pyl: LeafPayload;
|
||||
getKey: ResolveVidFn;
|
||||
): Result[Blob,(VertexID,AristoError)] =
|
||||
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
|
||||
|
@ -105,7 +105,7 @@ proc read*(rlp: var Rlp; T: type NodeRef): T {.gcsafe, raises: [RlpError].} =
|
|||
return NodeRef(
|
||||
vType: Leaf,
|
||||
lPfx: pathSegment,
|
||||
lData: PayloadRef(
|
||||
lData: LeafPayload(
|
||||
pType: RawData,
|
||||
rawBlob: blobs[1]))
|
||||
else:
|
||||
|
@ -169,7 +169,7 @@ proc digestTo*(node: NodeRef; T: type HashKey): T =
|
|||
proc serialise*(
|
||||
db: AristoDbRef;
|
||||
root: VertexID;
|
||||
pyl: PayloadRef;
|
||||
pyl: LeafPayload;
|
||||
): Result[Blob,(VertexID,AristoError)] =
|
||||
## Encode the data payload of the argument `pyl` as RLP `Blob` if it is of
|
||||
## account type, otherwise pass the data as is.
|
||||
|
|
|
@ -58,8 +58,8 @@ type
|
|||
## `Aristo` journal entry
|
||||
blind: bool ## Marked `true` for `fetch()` logs
|
||||
accPath: PathID ## Account path needed for storage data
|
||||
old: PayloadRef ## Deleted or just cached payload version
|
||||
cur: PayloadRef ## Updated/current or just cached
|
||||
old: LeafPayload ## Deleted or just cached payload version
|
||||
cur: LeafPayload ## Updated/current or just cached
|
||||
curBlob: Blob ## Serialised version for `cur` accounts data
|
||||
|
||||
TracerBlobTabRef* =
|
||||
|
@ -116,7 +116,7 @@ when EnableDebugLog:
|
|||
else:
|
||||
"@" & q
|
||||
|
||||
func `$`(pyl: PayloadRef): string =
|
||||
func `$`(pyl: LeafPayload): string =
|
||||
case pyl.pType:
|
||||
of RawData:
|
||||
pyl.rawBlob.toStr
|
||||
|
@ -172,7 +172,7 @@ func leafTie(
|
|||
ok LeafTie(root: root, path: tag)
|
||||
|
||||
proc blobify(
|
||||
pyl: PayloadRef;
|
||||
pyl: LeafPayload;
|
||||
api: AristoApiRef;
|
||||
mpt: AristoDbRef;
|
||||
): Result[Blob,(VertexID,AristoError)] =
|
||||
|
@ -598,7 +598,7 @@ proc traceRecorder(
|
|||
proc(mpt: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
): Result[PayloadRef,(VertexID,AristoError)] =
|
||||
): Result[LeafPayload,(VertexID,AristoError)] =
|
||||
when EnableDebugLog:
|
||||
const
|
||||
logTxt = "trace fetchPayload"
|
||||
|
@ -714,7 +714,7 @@ proc traceRecorder(
|
|||
flags = tr.inst[^1].flags
|
||||
|
||||
# TODO: collect all paths on this tree
|
||||
var deletedRows: seq[(LeafTie,PayloadRef)]
|
||||
var deletedRows: seq[(LeafTie,LeafPayload)]
|
||||
|
||||
# Delete from DB
|
||||
api.delTree(mpt, root, accPath).isOkOr:
|
||||
|
@ -769,7 +769,7 @@ proc traceRecorder(
|
|||
# Create journal entry, `pType` same as generated by `merge()`
|
||||
tpl = TracerPylRef(
|
||||
accPath: accPath,
|
||||
cur: PayloadRef(pType: RawData, rawBlob: @data))
|
||||
cur: LeafPayload(pType: RawData, rawBlob: @data))
|
||||
|
||||
# Update journal
|
||||
let jrn = tr.mptJournalGet(mpt, key)
|
||||
|
@ -805,7 +805,7 @@ proc traceRecorder(
|
|||
proc(mpt: AristoDbRef;
|
||||
root: VertexID;
|
||||
path: openArray[byte];
|
||||
pyl: PayloadRef;
|
||||
pyl: LeafPayload;
|
||||
accPath = VOID_PATH_ID;
|
||||
): Result[bool,AristoError] =
|
||||
when EnableDebugLog:
|
||||
|
@ -888,9 +888,9 @@ func kLog*(inst: TracerLogInstRef): TableRef[Blob,Blob] =
|
|||
if tbl.cur.len != 0:
|
||||
result[key] = tbl.cur
|
||||
|
||||
func mLog*(inst: TracerLogInstRef): TableRef[LeafTie,PayloadRef] =
|
||||
func mLog*(inst: TracerLogInstRef): TableRef[LeafTie,LeafPayload] =
|
||||
## Export `mpt` journal
|
||||
result = newTable[LeafTie,PayloadRef]()
|
||||
result = newTable[LeafTie,LeafPayload]()
|
||||
for (_,mptTab) in inst.mptJournal.pairs:
|
||||
for (key,tpl) in mptTab.pairs:
|
||||
if not tpl.cur.isNil:
|
||||
|
|
|
@ -168,7 +168,7 @@ func to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
|
|||
leafTie: LeafTie(
|
||||
root: rootVid,
|
||||
path: it.accKey.to(PathID)),
|
||||
payload: PayloadRef(pType: RawData, rawBlob: it.accBlob))))
|
||||
payload: LeafPayload(pType: RawData, rawBlob: it.accBlob))))
|
||||
|
||||
func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
||||
var (rootKey, rootVid) = (Hash256(), VertexID(0))
|
||||
|
@ -185,7 +185,7 @@ func to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
|
|||
leafTie: LeafTie(
|
||||
root: rootVid,
|
||||
path: it.slotHash.to(PathID)),
|
||||
payload: PayloadRef(pType: RawData, rawBlob: it.slotData))))
|
||||
payload: LeafPayload(pType: RawData, rawBlob: it.slotData))))
|
||||
if 0 < result.len:
|
||||
result[^1].proof = s.data.proof
|
||||
|
||||
|
|
Loading…
Reference in New Issue