Core db use differential tx layers for aristo and kvt (#1949)

* Fix kvt headers

* Provide differential layers for KVT transaction stack

why:
  Significant performance improvement

* Provide abstraction layer for database top cache layer

why:
  This will eventually implemented as a differential database layers
  or transaction layers. The latter is needed to improve performance.

behavioural changes:
  Zero vertex and keys (i.e. delete requests) are not optimised out
  until the last layer is written to the database.

* Provide differential layers for Aristo transaction stack

why:
  Significant performance improvement
This commit is contained in:
Jordan Hrycaj 2023-12-19 12:39:23 +00:00 committed by GitHub
parent 3675cd6736
commit ffa8ad2246
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 1033 additions and 631 deletions

View File

@ -16,7 +16,7 @@ import
stew/interval_set,
../../aristo,
../aristo_walk/persistent,
".."/[aristo_desc, aristo_get, aristo_vid]
".."/[aristo_desc, aristo_get, aristo_layers, aristo_vid]
const
Vid2 = @[VertexID(2)].toHashSet
@ -148,24 +148,23 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
# Check top layer cache against backend
if cache:
if db.top.dirty:
if db.dirty:
return err((VertexID(0),CheckBeCacheIsDirty))
# Check structural table
for (vid,vtx) in db.top.sTab.pairs:
for (vid,vtx) in db.layersWalkVtx:
let lbl = db.layersGetLabel(vid).valueOr:
# A `kMap[]` entry must exist.
if not db.top.kMap.hasKey vid:
return err((vid,CheckBeCacheKeyMissing))
if vtx.isValid:
# Register existing vid against backend generator state
discard vids.reduce Interval[VertexID,uint64].new(vid,vid)
else:
# Some vertex is to be deleted, the key must be empty
let lbl = db.top.kMap.getOrVoid vid
if lbl.isValid:
return err((vid,CheckBeCacheKeyNonEmpty))
# There must be a representation on the backend DB
if db.getVtxBE(vid).isErr:
# There must be a representation on the backend DB unless in a TX
if db.getVtxBE(vid).isErr and db.stack.len == 0:
return err((vid,CheckBeCacheVidUnsynced))
# Register deleted vid against backend generator state
discard vids.merge Interval[VertexID,uint64].new(vid,vid)
@ -185,10 +184,10 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
# Check key table
var list: seq[VertexID]
for (vid,lbl) in db.top.kMap.pairs:
for (vid,lbl) in db.layersWalkLabel:
list.add vid
let vtx = db.getVtx vid
if not db.top.sTab.hasKey(vid) and not vtx.isValid:
if db.layersGetVtx(vid).isErr and not vtx.isValid:
return err((vid,CheckBeCacheKeyDangling))
if not lbl.isValid or relax:
continue
@ -202,7 +201,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
# Check vGen
let
vGen = db.top.vGen.vidReorg.toHashSet
vGen = db.vGen.vidReorg.toHashSet
vGenExpected = vids.invTo(HashSet[VertexID])
delta = vGenExpected -+- vGen # symmetric difference
if 0 < delta.len:

View File

@ -11,10 +11,10 @@
{.push raises: [].}
import
std/[sequtils, sets, tables],
std/[sequtils, sets],
eth/[common, trie/nibbles],
results,
".."/[aristo_desc, aristo_get, aristo_serialise, aristo_utils]
".."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise, aristo_utils]
# ------------------------------------------------------------------------------
# Public functions
@ -26,8 +26,8 @@ proc checkTopStrict*(
# No need to specify zero keys if implied by a leaf path with valid target
# vertex ID (i.e. not deleted).
var zeroKeys: HashSet[VertexID]
for (vid,vtx) in db.top.sTab.pairs:
let lbl = db.top.kMap.getOrVoid vid
for (vid,vtx) in db.layersWalkVtx:
let lbl = db.layersGetLabelOrVoid vid
if not vtx.isValid:
if lbl.isValid:
@ -42,13 +42,13 @@ proc checkTopStrict*(
if lbl.key != node.digestTo(HashKey):
return err((vid,CheckStkVtxKeyMismatch))
let revVids = db.top.pAmk.getOrVoid lbl
let revVids = db.layersGetLebalOrVoid lbl
if not revVids.isValid:
return err((vid,CheckStkRevKeyMissing))
if vid notin revVids:
return err((vid,CheckStkRevKeyMismatch))
elif not db.top.dirty or not db.top.kMap.hasKey vid:
elif not db.dirty or db.layersGetLabel(vid).isErr:
# So `vtx` exists but not `lbl`, so cache is supposed dirty and the
# vertex has a zero entry.
return err((vid,CheckStkVtxKeyMissing))
@ -56,14 +56,14 @@ proc checkTopStrict*(
else: # Empty key flags key is for update
zeroKeys.incl vid
for (vid,key) in db.top.kMap.pairs:
for (vid,key) in db.layersWalkLabel:
if not key.isValid and vid notin zeroKeys:
if not db.getVtx(vid).isValid:
return err((vid,CheckStkKeyStrayZeroEntry))
let
pAmkVtxCount = db.top.pAmk.values.toSeq.foldl(a + b.len, 0)
sTabVtxCount = db.top.sTab.values.toSeq.filterIt(it.isValid).len
pAmkVtxCount = db.layersWalkLebal.toSeq.mapIt(it[1]).foldl(a + b.len, 0)
sTabVtxCount = db.layersWalkVtx.toSeq.mapIt(it[1]).filterIt(it.isValid).len
# Non-zero values mist sum up the same
if pAmkVtxCount + zeroKeys.len < sTabVtxCount:
@ -75,26 +75,26 @@ proc checkTopStrict*(
proc checkTopProofMode*(
db: AristoDbRef; # Database, top layer
): Result[void,(VertexID,AristoError)] =
if 0 < db.top.pPrf.len:
for vid in db.top.pPrf:
let vtx = db.top.sTab.getOrVoid vid
if 0 < db.pPrf.len:
for vid in db.pPrf:
let vtx = db.layersGetVtxOrVoid vid
if vtx.isValid:
let node = vtx.toNode(db).valueOr:
return err((vid,CheckRlxVtxIncomplete))
let lbl = db.top.kMap.getOrVoid vid
let lbl = db.layersGetlabelOrVoid vid
if not lbl.isValid:
return err((vid,CheckRlxVtxKeyMissing))
if lbl.key != node.digestTo(HashKey):
return err((vid,CheckRlxVtxKeyMismatch))
let revVids = db.top.pAmk.getOrVoid lbl
let revVids = db.layersGetLebalOrVoid lbl
if not revVids.isValid:
return err((vid,CheckRlxRevKeyMissing))
if vid notin revVids:
return err((vid,CheckRlxRevKeyMismatch))
else:
for (vid,lbl) in db.top.kMap.pairs:
for (vid,lbl) in db.layersWalkLabel:
if lbl.isValid: # Otherwise to be deleted
let vtx = db.getVtx vid
if vtx.isValid:
@ -103,25 +103,24 @@ proc checkTopProofMode*(
if lbl.key != node.digestTo(HashKey):
return err((vid,CheckRlxVtxKeyMismatch))
let revVids = db.top.pAmk.getOrVoid lbl
let revVids = db.layersGetLebalOrVoid lbl
if not revVids.isValid:
return err((vid,CheckRlxRevKeyMissing))
if vid notin revVids:
return err((vid,CheckRlxRevKeyMismatch))
ok()
proc checkTopCommon*(
db: AristoDbRef; # Database, top layer
): Result[void,(VertexID,AristoError)] =
# Some `kMap[]` entries may ne void indicating backend deletion
let
kMapCount = db.top.kMap.values.toSeq.filterIt(it.isValid).len
kMapNilCount = db.top.kMap.len - kMapCount
kMapCount = db.layersWalkLabel.toSeq.mapIt(it[1]).filterIt(it.isValid).len
kMapNilCount = db.layersWalkLabel.toSeq.len - kMapCount
# Collect leafs and check deleted entries
var nNilVtx = 0
for (vid,vtx) in db.top.sTab.pairs:
for (vid,vtx) in db.layersWalkVtx:
if vtx.isValid:
case vtx.vType:
of Leaf:
@ -140,11 +139,10 @@ proc checkTopCommon*(
return err((vid,CheckAnyVtxExtPfxMissing))
else:
nNilVtx.inc
discard db.getVtxBE(vid).valueOr:
return err((vid,CheckAnyVidVtxMissing))
if not db.top.kMap.hasKey vid:
let rc = db.layersGetLabel vid
if rc.isErr:
return err((vid,CheckAnyVtxEmptyKeyMissing))
if db.top.kMap.getOrVoid(vid).isValid:
if rc.value.isValid:
return err((vid,CheckAnyVtxEmptyKeyExpected))
# If present, there are at least as many deleted hashes as there are deleted
@ -152,20 +150,20 @@ proc checkTopCommon*(
if kMapNilCount != 0 and kMapNilCount < nNilVtx:
return err((VertexID(0),CheckAnyVtxEmptyKeyMismatch))
let pAmkVtxCount = db.top.pAmk.values.toSeq.foldl(a + b.len, 0)
let pAmkVtxCount = db.layersWalkLebal.toSeq.mapIt(it[1]).foldl(a + b.len, 0)
if pAmkVtxCount != kMapCount:
var knownKeys: HashSet[VertexID]
for (key,vids) in db.top.pAmk.pairs:
for (key,vids) in db.layersWalkLebal:
for vid in vids:
if not db.top.kMap.hasKey(vid):
if db.layersGetLabel(vid).isErr:
return err((vid,CheckAnyRevVtxMissing))
if vid in knownKeys:
return err((vid,CheckAnyRevVtxDup))
knownKeys.incl vid
return err((VertexID(0),CheckAnyRevCountMismatch)) # should not apply(!)
for vid in db.top.pPrf:
if not db.top.kMap.hasKey(vid):
for vid in db.pPrf:
if db.layersGetLabel(vid).isErr:
return err((vid,CheckAnyVtxLockWithoutKey))
ok()

View File

@ -15,15 +15,45 @@ import
eth/[common, trie/nibbles],
results,
stew/byteutils,
"."/[aristo_constants, aristo_desc, aristo_get, aristo_hike],
./aristo_desc/desc_backend,
./aristo_init/[memory_db, memory_only, rocks_db],
./aristo_filter/filter_scheduler
./aristo_filter/filter_scheduler,
"."/[aristo_constants, aristo_desc, aristo_hike, aristo_layers]
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc orDefault(db: AristoDbRef): AristoDbRef =
if db.isNil: AristoDbRef(top: LayerRef()) else: db
proc del(xMap: var VidsByLabelTab; lbl: HashLabel; vid: VertexID) =
# Update `xMap`
var vidsLen = -1
xMap.withValue(lbl,value):
value[].excl vid
vidsLen = value[].len
if vidsLen == 0:
xMap.del lbl
proc del(xMap: var VidsByLabelTab; lbl: HashLabel; vids: HashSet[VertexID]) =
for vid in vids:
xMap.del(lbl, vid)
proc add(xMap: var VidsByLabelTab; lbl: HashLabel; vid: VertexID) =
xMap.withValue(lbl,value):
value[].incl vid
do: # else if not found
xMap[lbl] = @[vid].toHashSet
func cmp(a, b: HashLabel): int =
if a.root != b.root:
a.root.cmp b.root
else:
a.key.cmp b.key
# --------------------------
proc toHex(w: VertexID): string =
w.uint64.toHex
@ -45,31 +75,16 @@ proc sortedKeys(sTab: Table[VertexID,VertexRef]): seq[VertexID] =
proc sortedKeys(pPrf: HashSet[VertexID]): seq[VertexID] =
pPrf.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc sortedKeys(pAmk: Table[HashLabel,VertexID]): seq[HashLabel] =
pAmk.keys.toSeq.sorted cmp
proc sortedKeys(pAmk: VidsByLabelTab): seq[HashLabel] =
pAmk.keys.toSeq.sorted cmp
proc toPfx(indent: int; offset = 0): string =
if 0 < indent+offset: "\n" & " ".repeat(indent+offset) else: ""
proc lidVidUpdate(
db: AristoDbRef;
root: VertexID;
lid: HashKey;
vid: VertexID;
): string =
if lid.isValid and vid.isValid:
let lbl = HashLabel(root: root, key: lid)
if not db.top.isNil:
let vids = db.top.pAmk.getOrVoid lbl
if vids.isValid:
if vid notin vids:
result = "(!)"
return
block:
let vids = db.xMap.getOrVoid lbl
if vids.isValid:
if vid notin vids:
result = "(!)"
return
db.xMap.append(lbl, vid)
proc squeeze(s: string; hex = false; ignLen = false): string =
## For long strings print `begin..end` only
if hex:
@ -105,9 +120,10 @@ proc stripZeros(a: string; toExp = false): string =
proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 =
if lbl.isValid:
if not db.top.isNil:
let vids = db.top.pAmk.getOrVoid lbl
block:
let vids = db.layersGetLebalOrVoid lbl
if vids.isValid:
db.xMap.del(lbl, vids)
return vids.sortedKeys[0].uint64
block:
let vids = db.xMap.getOrVoid lbl
@ -116,6 +132,29 @@ proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 =
# ---------------------
proc ppLabelOk(
db: AristoDbRef;
root: VertexID;
key: HashKey;
vid: VertexID;
): string =
if key.isValid and vid.isValid:
let
lbl = HashLabel(root: root, key: key)
vids = db.layersGetLebalOrVoid lbl
if vids.isValid:
db.xMap.del(lbl, vids)
if vid notin vids:
result = "(!)"
return
block:
let vids = db.xMap.getOrVoid lbl
if vids.isValid:
if vid notin vids:
result = "(!)"
return
db.xMap.add(lbl,vid)
proc ppVid(vid: VertexID; pfx = true): string =
if pfx:
result = "$"
@ -124,6 +163,15 @@ proc ppVid(vid: VertexID; pfx = true): string =
else:
result &= "ø"
proc ppVids(vids: HashSet[VertexID]): string =
result = "{"
for vid in vids.toSeq.sorted:
result = "$"
if vid.isValid:
result &= vid.toHex.stripZeros.toLowerAscii
else:
result &= "ø"
func ppCodeHash(h: Hash256): string =
result = "¢"
if h == Hash256():
@ -168,15 +216,17 @@ proc ppVidList(vGen: openArray[VertexID]): string =
# "{" & vGen.sortedKeys.mapIt(it.ppVid).join(",") & "}"
proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string =
proc getVids: HashSet[VertexID] =
if not db.top.isNil:
let vids = db.top.pAmk.getOrVoid HashLabel(root: root, key: key)
if vids.isValid:
return vids
proc getVids(): tuple[vids: HashSet[VertexID], xMapTag: string] =
let lbl = HashLabel(root: root, key: key)
block:
let vids = db.xMap.getOrVoid HashLabel(root: root, key: key)
let vids = db.layersGetLebalOrVoid lbl
if vids.isValid:
return vids
db.xMap.del(lbl, vids)
return (vids, "")
block:
let vids = db.xMap.getOrVoid lbl
if vids.isValid:
return (vids, "+")
if pfx:
result = "£"
if key.len == 0 or key.to(Hash256) == Hash256():
@ -186,12 +236,12 @@ proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string =
else:
let
tag = if key.len < 32: "[#" & $key.len & "]" else: ""
vids = getVids()
(vids, xMapTag) = getVids()
if vids.isValid:
if not pfx and 0 < tag.len:
result &= "$"
if 1 < vids.len: result &= "{"
result &= vids.sortedKeys.mapIt(it.ppVid(pfx=false)).join(",")
result &= vids.sortedKeys.mapIt(it.ppVid(pfx=false) & xMapTag).join(",")
if 1 < vids.len: result &= "}"
result &= tag
return
@ -236,9 +286,9 @@ proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =
if not nd.isValid:
result = "ø"
else:
if db.top.isNil or not vid.isValid or vid in db.top.pPrf:
if not vid.isValid or vid in db.pPrf:
result = ["L(", "X(", "B("][nd.vType.ord]
elif vid in db.top.kMap:
elif db.layersGetLabel(vid).isOk:
result = ["l(", "x(", "b("][nd.vType.ord]
else:
result = ["ł(", "€(", "þ("][nd.vType.ord]
@ -257,7 +307,7 @@ proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =
proc ppSTab(
sTab: Table[VertexID,VertexRef];
db = AristoDbRef();
db: AristoDbRef;
indent = 4;
): string =
"{" & sTab.sortedKeys
@ -267,9 +317,9 @@ proc ppSTab(
proc ppLTab(
lTab: Table[LeafTie,VertexID];
db: AristoDbRef;
indent = 4;
): string =
let db = AristoDbRef()
"{" & lTab.sortedKeys
.mapIt((it, lTab.getOrVoid it))
.mapIt("(" & it[0].ppLeafTie(db) & "," & it[1].ppVid & ")")
@ -281,16 +331,25 @@ proc ppPPrf(pPrf: HashSet[VertexID]): string =
proc ppXMap*(
db: AristoDbRef;
kMap: Table[VertexID,HashLabel];
pAmk: Table[HashLabel,HashSet[VertexID]];
pAmk: VidsByLabelTab;
indent: int;
): string =
let pfx = indent.toPfx(1)
var dups: HashSet[VertexID]
for vids in pAmk.values:
if 1 < vids.len:
dups = dups + vids
var
multi: HashSet[VertexID]
oops: HashSet[VertexID]
block:
var vids: HashSet[VertexID]
for w in pAmk.values:
for v in w:
if v in vids:
oops.incl v
else:
vids.incl v
if 1 < w.len:
multi = multi + w
# Vertex IDs without forward mapping `kMap: VertexID -> HashLabel`
var revOnly: Table[VertexID,HashLabel]
@ -347,7 +406,7 @@ proc ppXMap*(
for vid in kMap.sortedKeys:
let lbl = kMap.getOrVoid vid
if lbl.isValid:
cache.add (vid.uint64, lbl.vidCode(db), vid in dups)
cache.add (vid.uint64, lbl.vidCode(db), vid in multi)
let vids = pAmk.getOrVoid lbl
if (0 < vids.len and vid notin vids) or lbl.key.len < 32:
cache[^1][2] = true
@ -452,7 +511,7 @@ proc ppLayer(
proc doPrefix(s: string; dataOk: bool): string =
var rc: string
if tagOk:
rc = pfy & s & (if dataOk: pfx2 else: " ")
rc = pfy & s & (if dataOk: pfx2 else: "")
pfy = pfx1
else:
rc = pfy
@ -464,35 +523,35 @@ proc ppLayer(
result &= "<layer>".doPrefix(false)
if vGenOk:
let
tLen = layer.vGen.len
tLen = layer.final.vGen.len
info = "vGen(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.vGen.ppVidList
result &= info.doPrefix(0 < tLen) & layer.final.vGen.ppVidList
if sTabOk:
let
tLen = layer.sTab.len
tLen = layer.delta.sTab.len
info = "sTab(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.sTab.ppSTab(db,indent+2)
result &= info.doPrefix(0 < tLen) & layer.delta.sTab.ppSTab(db,indent+2)
if lTabOk:
let
tlen = layer.lTab.len
tlen = layer.final.lTab.len
info = "lTab(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.lTab.ppLTab(indent+2)
result &= info.doPrefix(0 < tLen) & layer.final.lTab.ppLTab(db,indent+2)
if kMapOk:
let
tLen = layer.kMap.len
ulen = layer.pAmk.len
tLen = layer.delta.kMap.len
ulen = layer.delta.pAmk.len
lInf = if tLen == uLen: $tLen else: $tLen & "," & $ulen
info = "kMap(" & lInf & ")"
result &= info.doPrefix(0 < tLen + uLen)
result &= db.ppXMap(layer.kMap, layer.pAmk, indent+2)
result &= db.ppXMap(layer.delta.kMap, layer.delta.pAmk, indent+2)
if pPrfOk:
let
tLen = layer.pPrf.len
tLen = layer.final.pPrf.len
info = "pPrf(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.pPrf.ppPPrf
result &= info.doPrefix(0 < tLen) & layer.final.pPrf.ppPPrf
if 0 < nOKs:
let
info = if layer.dirty: "dirty" else: "clean"
info = if layer.final.dirty: "dirty" else: "clean"
result &= info.doPrefix(false)
# ------------------------------------------------------------------------------
@ -510,14 +569,14 @@ proc pp*(w: Hash256): string =
proc pp*(w: HashKey; sig: MerkleSignRef): string =
w.ppKey(sig.db, sig.root)
proc pp*(w: HashKey; db = AristoDbRef(); root = VertexID(1)): string =
w.ppKey(db, root)
proc pp*(w: HashKey; db = AristoDbRef(nil); root = VertexID(1)): string =
w.ppKey(db.orDefault, root)
proc pp*(lbl: HashLabel, db = AristoDbRef()): string =
lbl.ppLabel(db)
proc pp*(lbl: HashLabel, db = AristoDbRef(nil)): string =
lbl.ppLabel(db.orDefault)
proc pp*(lty: LeafTie, db = AristoDbRef()): string =
lty.ppLeafTie(db)
proc pp*(lty: LeafTie, db = AristoDbRef(nil)): string =
lty.ppLeafTie(db.orDefault)
proc pp*(vid: VertexID): string =
vid.ppVid
@ -540,11 +599,11 @@ proc pp*(a: openArray[QidAction]): string =
proc pp*(vGen: openArray[VertexID]): string =
vGen.ppVidList
proc pp*(p: PayloadRef, db = AristoDbRef()): string =
p.ppPayload(db)
proc pp*(p: PayloadRef, db = AristoDbRef(nil)): string =
p.ppPayload(db.orDefault)
proc pp*(nd: VertexRef, db = AristoDbRef()): string =
nd.ppVtx(db, VertexID(0))
proc pp*(nd: VertexRef, db = AristoDbRef(nil)): string =
nd.ppVtx(db.orDefault, VertexID(0))
proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
if not nd.isValid:
@ -560,14 +619,14 @@ proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
of Extension:
result &= $nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid & ","
result &= nd.key[0].ppKey(db,root)
result &= db.lidVidUpdate(root, nd.key[0], nd.eVid)
result &= db.ppLabelOk(root, nd.key[0], nd.eVid)
of Branch:
result &= "["
for n in 0..15:
if nd.bVid[n].isValid or nd.key[n].isValid:
result &= nd.bVid[n].ppVid
result &= db.lidVidUpdate(root, nd.key[n], nd.bVid[n]) & ","
result &= db.ppLabelOk(root, nd.key[n], nd.bVid[n]) & ","
result[^1] = ']'
result &= ",["
@ -579,40 +638,43 @@ proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
result &= ")"
proc pp*(nd: NodeRef): string =
var db = AristoDbRef()
nd.pp(db)
nd.pp(AristoDbRef(nil).orDefault)
proc pp*(
sTab: Table[VertexID,VertexRef];
db = AristoDbRef();
db = AristoDbRef(nil);
indent = 4;
): string =
sTab.ppSTab
sTab.ppSTab(db.orDefault)
proc pp*(lTab: Table[LeafTie,VertexID]; indent = 4): string =
lTab.ppLTab
proc pp*(
lTab: Table[LeafTie,VertexID];
db = AristoDbRef(nil);
indent = 4;
): string =
lTab.ppLTab(db.orDefault, indent)
proc pp*(pPrf: HashSet[VertexID]): string =
pPrf.ppPPrf
proc pp*(leg: Leg; db = AristoDbRef()): string =
proc pp*(leg: Leg; db = AristoDbRef(nil)): string =
let db = db.orDefault()
result = "(" & leg.wp.vid.ppVid & ","
if not db.top.isNil:
let lbl = db.top.kMap.getOrVoid leg.wp.vid
block:
let lbl = db.layersGetLabelOrVoid leg.wp.vid
if not lbl.isValid:
result &= "ø"
elif leg.wp.vid notin db.top.pAmk.getOrVoid lbl:
elif leg.wp.vid notin db.layersGetLebalOrVoid lbl:
result &= lbl.ppLabel(db)
result &= ","
if leg.backend:
result &= ""
result &= ","
if 0 <= leg.nibble:
result &= $leg.nibble.ppNibble
result &= "," & leg.wp.vtx.pp(db) & ")"
proc pp*(hike: Hike; db = AristoDbRef(); indent = 4): string =
let pfx = indent.toPfx(1)
proc pp*(hike: Hike; db = AristoDbRef(nil); indent = 4): string =
let
db = db.orDefault()
pfx = indent.toPfx(1)
result = "["
if hike.legs.len == 0:
result &= "(" & hike.root.ppVid & ")"
@ -624,30 +686,37 @@ proc pp*(hike: Hike; db = AristoDbRef(); indent = 4): string =
result &= "]"
proc pp*(kMap: Table[VertexID,Hashlabel]; indent = 4): string =
let db = AristoDbRef()
let db = AristoDbRef(nil).orDefault
"{" & kMap.sortedKeys
.mapIt((it,kMap.getOrVoid it))
.filterIt(it[1].isValid)
.mapIt((it, kMap.getOrVoid it))
.mapIt("(" & it[0].ppVid & "," & it[1].ppLabel(db) & ")")
.join("," & indent.toPfx(1)) & "}"
proc pp*(pAmk: Table[Hashlabel,VertexID]; indent = 4): string =
let db = AristoDbRef()
var rev = pAmk.pairs.toSeq.mapIt((it[1],it[0])).toTable
"{" & rev.sortedKeys
.mapIt((it,rev.getOrVoid it))
.filterIt(it[1].isValid)
.mapIt("(" & it[1].ppLabel(db) & "," & it[0].ppVid & ")")
proc pp*(kMap: Table[VertexID,Hashlabel]; db: AristoDbRef; indent = 4): string =
db.ppXMap(kMap, db.layersCc.delta.pAmk, indent)
proc pp*(
pAmk: Table[HashLabel,VertexID];
db = AristoDbRef(nil);
indent = 4;
): string =
let db = db.orDefault
"{" & pAmk.sortedkeys
.mapIt((it, pAmk.getOrVoid it))
.mapIt("(" & it[0].ppLabel(db) & "," & it[1].ppVid & ")")
.join("," & indent.toPfx(1)) & "}"
proc pp*(kMap: Table[VertexID,Hashlabel]; db: AristoDbRef; indent = 4): string =
db.ppXMap(kMap, db.top.pAmk, indent)
proc pp*(pAmk: VidsByLabelTab; db = AristoDbRef(nil); indent = 4): string =
let db = db.orDefault
"{" & pAmk.sortedkeys
.mapIt((it, pAmk.getOrVoid it))
.mapIt("(" & it[0].ppLabel(db) & "," & it[1].ppVids & ")")
.join("," & indent.toPfx(1)) & "}"
proc pp*(pAmk: VidsByLabel; db: AristoDbRef; indent = 4): string =
db.ppXMap(db.top.kMap, pAmk, indent)
# ---------------------
proc pp*(tx: AristoTxRef): string =
result = "(uid=" & $tx.txUid & ",lvl=" & $tx.level
result = "(uid=" & $tx.txUid & ",level=" & $tx.level
if not tx.parent.isNil:
result &= ", par=" & $tx.parent.txUid
result &= ")"
@ -655,7 +724,6 @@ proc pp*(tx: AristoTxRef): string =
proc pp*(wp: VidVtxPair; db: AristoDbRef): string =
"(" & wp.vid.pp & "," & wp.vtx.pp(db) & ")"
# ---------------------
proc pp*(
layer: LayerRef;
@ -691,7 +759,7 @@ proc pp*(
xTabOk: bool;
indent = 4;
): string =
db.top.pp(db, xTabOk=xTabOk, indent=indent)
db.layersCc.pp(db, xTabOk=xTabOk, indent=indent)
proc pp*(
db: AristoDbRef;
@ -700,15 +768,15 @@ proc pp*(
other = false;
indent = 4;
): string =
db.top.pp(db, xTabOk=xTabOk, kMapOk=kMapOk, other=other, indent=indent)
db.layersCc.pp(db, xTabOk=xTabOk, kMapOk=kMapOk, other=other, indent=indent)
proc pp*(
filter: FilterRef;
db = AristoDbRef();
db = AristoDbRef(nil);
root = VertexID(1);
indent = 4;
): string =
filter.ppFilter(db, root, indent)
filter.ppFilter(db.orDefault(), root, indent)
proc pp*(
be: BackendRef;
@ -732,7 +800,9 @@ proc pp*(
backendOk = false;
filterOk = true;
): string =
result = db.top.pp(db, indent=indent) & indent.toPfx
result = db.layersCc.pp(db, indent=indent) & indent.toPfx
if 0 < db.stack.len:
result &= " level=" & $db.stack.len & indent.toPfx
if backendOk:
result &= db.backend.pp(db)
elif filterOk:

View File

@ -20,7 +20,8 @@ import
chronicles,
eth/[common, trie/nibbles],
results,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_path, aristo_vid]
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers, aristo_path,
aristo_vid]
logScope:
topics = "aristo-delete"
@ -59,24 +60,16 @@ proc nullifyKey(
vid: VertexID; # Vertex IDs to clear
) =
# Register for void hash (to be recompiled)
let lbl = db.top.kMap.getOrVoid vid
db.top.pAmk.del lbl
db.top.kMap[vid] = VOID_HASH_LABEL
db.top.dirty = true # Modified top level cache
db.layersResLabel vid
proc disposeOfVtx(
db: AristoDbRef; # Database, top layer
vid: VertexID; # Vertex IDs to clear
) =
# Remove entry
if db.getVtxBE(vid).isOk:
db.top.sTab[vid] = VertexRef(nil) # Will be propagated to backend
db.nullifyKey vid
else:
db.top.sTab.del vid
db.top.kMap.del vid
db.top.dirty = true # Modified top level cache
db.vidDispose vid
db.layersResVtx vid
db.layersResLabel vid
db.vidDispose vid # Recycle ID
# ------------------------------------------------------------------------------
# Private functions
@ -129,7 +122,7 @@ proc collapseBranch(
# Replace `br` (use `xt` as-is)
discard
db.top.sTab[xt.vid] = xt.vtx
db.layersPutVtx(xt.vid, xt.vtx)
ok()
@ -179,7 +172,7 @@ proc collapseExt(
# Replace ^2 by `^2 & vtx` (use `xt` as-is)
discard
db.top.sTab[xt.vid] = xt.vtx
db.layersPutVtx(xt.vid, xt.vtx)
ok()
@ -220,15 +213,15 @@ proc collapseLeaf(
of Branch: # (1)
# Replace `vtx` by `^2 & vtx` (use `lf` as-is)
par.vtx.bVid[hike.legs[^3].nibble] = lf.vid
db.top.sTab[par.vid] = par.vtx
db.top.sTab[lf.vid] = lf.vtx
db.layersPutVtx(par.vid, par.vtx)
db.layersPutVtx(lf.vid, lf.vtx)
# Make sure that there is a cache enty in case the leaf was pulled from
# the backend.!
let
lfPath = hike.legsTo(hike.legs.len - 2, NibblesSeq) & lf.vtx.lPfx
tag = lfPath.pathToTag.valueOr:
return err((lf.vid,error))
db.top.lTab[LeafTie(root: hike.root, path: tag)] = lf.vid
db.top.final.lTab[LeafTie(root: hike.root, path: tag)] = lf.vid
return ok()
of Extension: # (2) or (3)
@ -243,19 +236,19 @@ proc collapseLeaf(
return err((gpr.vid,DelBranchExpexted))
db.disposeOfVtx par.vid # `par` is obsolete now
gpr.vtx.bVid[hike.legs[^4].nibble] = lf.vid
db.top.sTab[gpr.vid] = gpr.vtx
db.top.sTab[lf.vid] = lf.vtx
db.layersPutVtx(gpr.vid, gpr.vtx)
db.layersPutVtx(lf.vid, lf.vtx)
# Make sure that there is a cache enty in case the leaf was pulled from
# the backend.!
let
lfPath = hike.legsTo(hike.legs.len - 3, NibblesSeq) & lf.vtx.lPfx
tag = lfPath.pathToTag.valueOr:
return err((lf.vid,error))
db.top.lTab[LeafTie(root: hike.root, path: tag)] = lf.vid
db.top.final.lTab[LeafTie(root: hike.root, path: tag)] = lf.vid
return ok()
# No grandparent, so ^3 is root vertex # (3)
db.top.sTab[par.vid] = lf.vtx
db.layersPutVtx(par.vid, lf.vtx)
# Continue below
of Leaf:
@ -264,7 +257,7 @@ proc collapseLeaf(
else: # (4)
# Replace ^2 by `^2 & vtx` (use `lf` as-is) # `br` is root vertex
db.nullifyKey br.vid # root was changed
db.top.sTab[br.vid] = lf.vtx
db.layersPutVtx(br.vid, lf.vtx)
# Continue below
# Common part for setting up `lf` as root vertex # Rest of (3) or (4)
@ -275,8 +268,8 @@ proc collapseLeaf(
# No need to update the cache unless `lf` is present there. The leaf path
# as well as the value associated with the leaf path has not been changed.
let lfTie = LeafTie(root: hike.root, path: rc.value)
if db.top.lTab.hasKey lfTie:
db.top.lTab[lfTie] = lf.vid
if db.top.final.lTab.hasKey lfTie:
db.top.final.lTab[lfTie] = lf.vid
# Clean up stale leaf vertex which has moved to root position
db.disposeOfVtx lf.vid
@ -289,7 +282,7 @@ proc collapseLeaf(
rootVtx.vType == Leaf:
let tag = rootVtx.lPfx.pathToTag.valueOr:
return err((hike.root,error))
db.top.lTab[LeafTie(root: hike.root, path: tag)] = hike.root
db.top.final.lTab[LeafTie(root: hike.root, path: tag)] = hike.root
ok()
@ -306,7 +299,7 @@ proc deleteImpl(
let lf = hike.legs[^1].wp
if lf.vtx.vType != Leaf:
return err((lf.vid,DelLeafExpexted))
if lf.vid in db.top.pPrf:
if lf.vid in db.pPrf:
return err((lf.vid, DelLeafLocked))
# Will be needed at the end. Just detect an error early enouhh
@ -332,12 +325,12 @@ proc deleteImpl(
# Unlink child vertex from structural table
br.vtx.bVid[hike.legs[^2].nibble] = VertexID(0)
db.top.sTab[br.vid] = br.vtx
db.layersPutVtx(br.vid, br.vtx)
# Clear all keys up to the root key
for n in 0 .. hike.legs.len - 2:
let vid = hike.legs[n].wp.vid
if vid in db.top.pPrf:
if vid in db.top.final.pPrf:
return err((vid, DelBranchLocked))
db.nullifyKey vid
@ -368,10 +361,10 @@ proc deleteImpl(
# Delete leaf entry
if leafVidBe.isValid:
# To be recorded on change history
db.top.lTab[lty] = VertexID(0)
db.top.final.lTab[lty] = VertexID(0)
else:
# No need to keep it any longer in cache
db.top.lTab.del lty
db.top.final.lTab.del lty
ok()

View File

@ -61,6 +61,10 @@ type
else:
rwDb: AristoDbRef ## Link to writable descriptor
VidVtxPair* = object
vid*: VertexID ## Table lookup vertex ID (if any)
vtx*: VertexRef ## Reference to vertex
AristoDbRef* = ref AristoDbObj
AristoDbObj* = object
## Three tier database object supporting distributed instances.
@ -74,7 +78,7 @@ type
dudes: DudesRef ## Related DB descriptors
# Debugging data below, might go away in future
xMap*: VidsByLabel ## For pretty printing, extends `pAmk`
xMap*: VidsByLabelTab ## For pretty printing, extends `pAmk`
AristoDbAction* = proc(db: AristoDbRef) {.gcsafe, raises: [].}
## Generic call back function/closure.
@ -148,6 +152,12 @@ func hash*(db: AristoDbRef): Hash =
## Table/KeyedQueue/HashSet mixin
cast[pointer](db).hash
func dup*(wp: VidVtxPair): VidVtxPair =
## Safe copy of `wp` argument
VidVtxPair(
vid: wp.vid,
vtx: wp.vtx.dup)
# ------------------------------------------------------------------------------
# Public functions, `dude` related
# ------------------------------------------------------------------------------
@ -226,7 +236,7 @@ proc fork*(
if not rawTopLayer:
let rc = clone.backend.getIdgFn()
if rc.isOk:
clone.top.vGen = rc.value
clone.top.final.vGen = rc.value
elif rc.error != GetIdgNotFound:
return err(rc.error)

View File

@ -130,7 +130,6 @@ type
CheckRlxRevKeyMissing
CheckRlxRevKeyMismatch
CheckAnyVidVtxMissing
CheckAnyVtxEmptyKeyMissing
CheckAnyVtxEmptyKeyExpected
CheckAnyVtxEmptyKeyMismatch
@ -216,10 +215,12 @@ type
FilSiblingsCommitUnfinshed
# Get functions from `aristo_get.nim`
GetLeafMissing
GetKeyUpdateNeeded
GetLeafNotFound
GetVtxNotFound
GetKeyNotFound
GetKeyTempLocked
GetFilNotFound
GetIdgNotFound
GetFqsNotFound

View File

@ -33,20 +33,7 @@ type
codeHash*: Hash256
PayloadType* = enum
## Type of leaf data. On the Aristo backend, data are serialised as
## follows:
##
## * Opaque data => opaque data, marked `0xff`
## * `Account` object => RLP encoded data, marked `0xaa`
## * `AristoAccount` object => serialised account, marked `0x99` or smaller
##
## On deserialisation from the Aristo backend, there is no reverese for an
## `Account` object. It rather is kept as an RLP encoded `Blob`.
##
## * opaque data, marked `0xff` => `RawData`
## * RLP encoded data, marked `0xaa` => `RlpData`
## * erialised account, marked `0x99` or smaller => `AccountData`
##
## Type of leaf data.
RawData ## Generic data
RlpData ## Marked RLP encoded
AccountData ## `Aristo account` with vertex IDs links
@ -81,7 +68,7 @@ type
# ----------------------
FilterRef* = ref object
## Delta layer with expanded sequences for quick access
## Delta layer with expanded sequences for quick access.
fid*: FilterID ## Filter identifier
src*: Hash256 ## Applicable to this state root
trg*: Hash256 ## Resulting state root (i.e. `kMap[1]`)
@ -89,20 +76,55 @@ type
kMap*: Table[VertexID,HashKey] ## Filter Merkle hash key mapping
vGen*: seq[VertexID] ## Filter unique vertex ID generator
VidsByLabel* = Table[HashLabel,HashSet[VertexID]]
VidsByLabelTab* = Table[HashLabel,HashSet[VertexID]]
## Reverse lookup searching `VertexID` by the hash key/label.
LayerDelta* = object
## Delta layers are stacked implying a tables hierarchy. Table entries on
## a higher level take precedence over lower layer table entries. So an
## existing key-value table entry of a layer on top supersedes same key
## entries on all lower layers. A missing entry on a higher layer indicates
## that the key-value pair might be fond on some lower layer.
##
## A zero value (`nil`, empty hash etc.) is considered am missing key-value
## pair. Tables on the `LayerDelta` may have stray zero key-value pairs for
## missing entries due to repeated transactions while adding and deleting
## entries. There is no need to purge redundant zero entries.
##
## As for `kMap[]` entries, there might be a zero value entriy relating
## (i.e. indexed by the same vertex ID) to an `sMap[]` non-zero value entry
## (of the same layer or a lower layer whatever comes first.) This entry
## is kept as a reminder that the hash value of the `kMap[]` entry needs
## to be re-compiled.
##
## The reasoning behind the above scenario is that every vertex held on the
## `sTab[]` tables must correspond to a hash entry held on the `kMap[]`
## tables. So a corresponding zero value or missing entry produces an
## inconsistent state that must be resolved.
##
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping
pAmk*: VidsByLabelTab ## Reverse `kMap` entries, hash key lookup
LayerFinal* = object
## Final tables fully supersede tables on lower layers when stacked as a
## whole. Missing entries on a higher layers are the final state (for the
## the top layer version of the table.)
##
## These structures are used for tables which are typically smaller then
## the ones on the `LayerDelta` object.
##
lTab*: Table[LeafTie,VertexID] ## Access path to leaf vertex
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
vGen*: seq[VertexID] ## Unique vertex ID generator
dirty*: bool ## Needs to be hashified if `true`
LayerRef* = ref object
## Hexary trie database layer structures. Any layer holds the full
## change relative to the backend.
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
lTab*: Table[LeafTie,VertexID] ## Direct access, path to leaf vertex
kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping
pAmk*: VidsByLabel ## Reverse `kMap` entries, hash key lookup
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
vGen*: seq[VertexID] ## Unique vertex ID generator
delta*: LayerDelta ## Most structural tables held as deltas
final*: LayerFinal ## Stored as latest version
txUid*: uint ## Transaction identifier if positive
dirty*: bool ## Needs to be hashified if `true`
# ----------------------
@ -138,25 +160,6 @@ const
func max(a, b, c: int): int =
max(max(a,b),c)
# ------------------------------------------------------------------------------
# Public helpers: `Table[HashLabel,seq[VertexID]]`
# ------------------------------------------------------------------------------
proc append*(pAmk: var VidsByLabel; lbl: HashLabel; vid: VertexID) =
pAmk.withValue(lbl,value):
value[].incl vid
do: # else if not found
pAmk[lbl] = @[vid].toHashSet
proc delete*(pAmk: var VidsByLabel; lbl: HashLabel; vid: VertexID) =
var deleteItem = false
pAmk.withValue(lbl,value):
value[].excl vid
if value[].len == 0:
deleteItem = true
if deleteItem:
pAmk.del lbl
# ------------------------------------------------------------------------------
# Public helpers: `NodeRef` and `PayloadRef`
# ------------------------------------------------------------------------------
@ -293,18 +296,6 @@ func dup*(node: NodeRef): NodeRef =
bVid: node.bVid,
key: node.key)
func dup*(layer: LayerRef): LayerRef =
## Duplicate layer.
result = LayerRef(
lTab: layer.lTab,
kMap: layer.kMap,
pAmk: layer.pAmk,
pPrf: layer.pPrf,
vGen: layer.vGen,
txUid: layer.txUid)
for (k,v) in layer.sTab.pairs:
result.sTab[k] = v.dup
# ---------------
func to*(node: NodeRef; T: type VertexRef): T =

View File

@ -21,25 +21,6 @@ import
./aristo_filter/[
filter_fifos, filter_helpers, filter_merge, filter_reverse, filter_siblings]
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
func bulk*(filter: FilterRef): int =
## Some measurement for the size of the filter calculated as the length of
## the `sTab[]` table plus the lengthof the `kMap[]` table. This can be used
## to set a threshold when to flush the staging area to the backend DB to
## be used in `stow()`.
##
## The `filter` argument may be `nil`, i.e. `FilterRef(nil).bulk == 0`
if filter.isNil: 0 else: filter.sTab.len + filter.kMap.len
func bulk*(layer: LayerRef): int =
## Variant of `bulk()` for layers rather than filters.
##
## The `layer` argument may be `nil`, i.e. `LayerRef(nil).bulk == 0`
if layer.isNil: 0 else: layer.sTab.len + layer.kMap.len
# ------------------------------------------------------------------------------
# Public functions, construct filters
# ------------------------------------------------------------------------------
@ -64,7 +45,7 @@ proc fwdFilter*(
# Register the Merkle hash keys of the MPT where this reverse filter will be
# applicable: `be => fg`
let (srcRoot, trgRoot) = block:
let rc = db.getLayerStateRoots(layer, chunkedMpt)
let rc = db.getLayerStateRoots(layer.delta, chunkedMpt)
if rc.isOK:
(rc.value.be, rc.value.fg)
elif rc.error == FilPrettyPointlessLayer:
@ -74,9 +55,9 @@ proc fwdFilter*(
ok FilterRef(
src: srcRoot,
sTab: layer.sTab,
kMap: layer.kMap.pairs.toSeq.mapIt((it[0],it[1].key)).toTable,
vGen: layer.vGen.vidReorg, # Compact recycled IDs
sTab: layer.delta.sTab,
kMap: layer.delta.kMap.pairs.toSeq.mapIt((it[0],it[1].key)).toTable,
vGen: layer.final.vGen.vidReorg, # Compact recycled IDs
trg: trgRoot)
# ------------------------------------------------------------------------------
@ -212,7 +193,7 @@ proc forkBackLog*(
let
instr = ? be.fifosFetch(backSteps = episode+1)
clone = ? db.fork(rawToplayer = true)
clone.top.vGen = instr.fil.vGen
clone.top.final.vGen = instr.fil.vGen
clone.roFilter = instr.fil
ok clone

View File

@ -32,7 +32,7 @@ type
proc getLayerStateRoots*(
db: AristoDbRef;
layer: LayerRef;
delta: LayerDelta;
chunkedMpt: bool;
): Result[StateRootPair,AristoError] =
## Get the Merkle hash key for target state root to arrive at after this
@ -51,7 +51,7 @@ proc getLayerStateRoots*(
spr.be = sprBeKey.to(Hash256)
spr.fg = block:
let lbl = layer.kMap.getOrVoid VertexID(1)
let lbl = delta.kMap.getOrVoid VertexID(1)
if lbl.isValid:
lbl.key.to(Hash256)
else:
@ -60,14 +60,14 @@ proc getLayerStateRoots*(
return ok(spr)
if chunkedMpt:
let vids = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: sprBeKey)
if VertexID(1) in vids:
let lbl = HashLabel(root: VertexID(1), key: sprBeKey)
if VertexID(1) in delta.pAmk.getOrVoid lbl:
spr.fg = spr.be
return ok(spr)
if layer.sTab.len == 0 and
layer.kMap.len == 0 and
layer.pAmk.len == 0:
if delta.sTab.len == 0 and
delta.kMap.len == 0 and
delta.pAmk.len == 0:
return err(FilPrettyPointlessLayer)
err(FilStateRootMismatch)

View File

@ -16,21 +16,7 @@
import
std/tables,
results,
./aristo_desc
type
VidVtxPair* = object
vid*: VertexID ## Table lookup vertex ID (if any)
vtx*: VertexRef ## Reference to vertex
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
func dup*(wp: VidVtxPair): VidVtxPair =
VidVtxPair(
vid: wp.vid,
vtx: wp.vtx.dup)
"."/[aristo_desc, aristo_layers]
# ------------------------------------------------------------------------------
# Public functions
@ -124,17 +110,21 @@ proc getLeaf*(
db: AristoDbRef;
lty: LeafTie;
): Result[VidVtxPair,AristoError] =
## Get the vertex from the top layer by the `Patricia Trie` path. This
## function does not search on the `backend` layer.
let vid = db.top.lTab.getOrVoid lty
## Get the leaf path from the cache layers and look up the database for a
## leaf node.
let vid = db.lTab.getOrVoid lty
if not vid.isValid:
return err(GetLeafNotFound)
let vtx = db.top.sTab.getOrVoid vid
if not vtx.isValid:
return err(GetVtxNotFound)
block body:
let vtx = db.layersGetVtx(vid).valueOr:
break body
if vtx.isValid:
return ok(VidVtxPair(vid: vid, vtx: vtx))
ok VidVtxPair(vid: vid, vtx: vtx)
# The leaf node cannot be on the backend. It was produced by a `merge()`
# action. So this is a system problem.
err(GetLeafMissing)
proc getLeafVtx*(db: AristoDbRef; lty: LeafTie): VertexRef =
## Variant of `getLeaf()` returning `nil` on error (while ignoring the
@ -147,44 +137,59 @@ proc getLeafVtx*(db: AristoDbRef; lty: LeafTie): VertexRef =
# ------------------
proc getVtxRc*(db: AristoDbRef; vid: VertexID): Result[VertexRef,AristoError] =
## Cascaded attempt to fetch a vertex from the top layer or the backend.
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
##
if db.top.sTab.hasKey vid:
# If the vertex is to be deleted on the backend, a `VertexRef(nil)` entry
# is kept in the local table in which case it is OK to return this value.
let vtx = db.top.sTab.getOrVoid vid
block body:
# If the vertex marked is to be deleted on the backend, a `VertexRef(nil)`
# entry is kept in the local table in which case it isis returned as the
# error symbol `GetVtxNotFound`.
let vtx = db.layersGetVtx(vid).valueOr:
break body
if vtx.isValid:
return ok(vtx)
return ok vtx
else:
return err(GetVtxNotFound)
db.getVtxBE vid
proc getVtx*(db: AristoDbRef; vid: VertexID): VertexRef =
## Cascaded attempt to fetch a vertex from the top layer or the backend.
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
## The function returns `nil` on error or failure.
##
let rc = db.getVtxRc vid
if rc.isOk:
return rc.value
VertexRef(nil)
db.getVtxRc(vid).valueOr: VertexRef(nil)
proc getKeyRc*(db: AristoDbRef; vid: VertexID): Result[HashKey,AristoError] =
## Cascaded attempt to fetch a Merkle hash from the top layer or the backend.
## Cascaded attempt to fetch a Merkle hash from the cache layers or the
## backend.
##
if db.top.kMap.hasKey vid:
# If the key is to be deleted on the backend, a `VOID_HASH_LABEL` entry
# is kept on the local table in which case it is OK to return this value.
let lbl = db.top.kMap.getOrVoid vid
if lbl.isValid:
return ok lbl.key
return err(GetKeyTempLocked)
block body:
let key = db.layersGetKey(vid).valueOr:
break body
# If there is a zero value label, the entry is either marked for being
# updated or for deletion on the database. So check below.
if key.isValid:
return ok key
# The zero value label does not refer to an update mark if there is no
# valid vertex (either on the cache or the backend whatever comes first.)
let vtx = db.layersGetVtx(vid).valueOr:
# There was no vertex on the cache. So there must be one the backend (the
# reason for the key lable to exists, at all.)
return err(GetKeyUpdateNeeded)
if vtx.isValid:
return err(GetKeyUpdateNeeded)
else:
# The vertex is to be deleted. So is the value label.
return err(GetVtxNotFound)
db.getKeyBE vid
proc getKey*(db: AristoDbRef; vid: VertexID): HashKey =
## Cascaded attempt to fetch a vertex from the top layer or the backend.
## Cascaded attempt to fetch a vertex from the cache layers or the backend.
## The function returns `nil` on error or failure.
##
db.getKeyRc(vid).valueOr:
return VOID_HASH_KEY
db.getKeyRc(vid).valueOr: VOID_HASH_KEY
# ------------------------------------------------------------------------------
# End

View File

@ -60,8 +60,8 @@ import
eth/common,
results,
stew/byteutils,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_serialise, aristo_utils,
aristo_vid]
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers, aristo_serialise,
aristo_utils]
type
FollowUpVid = object
@ -213,8 +213,9 @@ proc updateSchedule(
unresolved = error
break findlegInx
vid = leaf.vid
if not db.top.kMap.getOrVoid(vid).key.isValid:
db.vidAttach(HashLabel(root: root, key: node.digestTo(HashKey)), vid)
if not db.layersGetKeyOrVoid(vid).isValid:
db.layersPutLabel(vid, HashLabel(root: root, key: node.digestTo(HashKey)))
# Clean up unnecessay leaf node from previous session
wff.base.del vid
wff.setNextLink(wff.pool, wff.base.getOrVoid vid)
@ -231,7 +232,7 @@ proc updateSchedule(
break findlegInx
# All done this `hike`
if db.top.kMap.getOrVoid(root).key.isValid:
if db.layersGetKeyOrVoid(root).isValid:
wff.root.excl root
wff.completed.incl root
return
@ -294,10 +295,10 @@ proc hashify*(
deleted = false # Need extra check for orphaned vertices
wff: WidthFirstForest # Leaf-to-root traversal structure
if not db.top.dirty:
if not db.dirty:
return ok wff.completed
for (lky,lfVid) in db.top.lTab.pairs:
for (lky,lfVid) in db.lTab.pairs:
let
rc = lky.hikeUp db
hike = rc.to(Hike)
@ -329,10 +330,11 @@ proc hashify*(
# is the task to search for unresolved node keys and add glue paths to
# the width-first schedule.
var unresolved: HashSet[VertexID]
for (vid,lbl) in db.top.kMap.pairs:
for (vid,lbl) in db.layersWalkLabel:
if not lbl.isValid and
vid notin wff and
(vid notin db.top.sTab or db.top.sTab.getOrVoid(vid).isValid):
vid notin wff:
let rc = db.layersGetVtx vid
if rc.isErr or rc.value.isValid:
unresolved.incl vid
let glue = unresolved.cloudConnect(db, wff.base)
@ -376,7 +378,7 @@ proc hashify*(
# Add the child vertices to `redo[]` for the schedule `base[]` list.
for w in error:
if w notin wff.base:
if not db.top.sTab.hasKey w:
if db.layersGetVtx(w).isErr:
# Ooops, should have been marked for update
return err((w,HashifyNodeUnresolved))
redo[w] = FollowUpVid(root: val.root, toVid: vid)
@ -384,7 +386,7 @@ proc hashify*(
# Could resolve => update Merkle hash
let key = node.digestTo(HashKey)
db.vidAttach(HashLabel(root: val.root, key: key), vid)
db.layersPutLabel(vid, HashLabel(root: val.root, key: key))
# Set follow up link for next round
wff.setNextLink(redo, val)
@ -393,15 +395,15 @@ proc hashify*(
wff.base.swap redo
# Update root nodes
for vid in wff.root - db.top.pPrf:
for vid in wff.root - db.pPrf:
# Convert root vertex to a node.
let node = db.getVtx(vid).toNode(db,stopEarly=false).valueOr:
return err((vid,HashifyRootNodeUnresolved))
db.vidAttach(HashLabel(root: vid, key: node.digestTo(HashKey)), vid)
db.layersPutLabel(vid, HashLabel(root: vid, key: node.digestTo(HashKey)))
wff.completed.incl vid
db.top.dirty = false
db.top.lTab.clear
db.top.final.dirty = false
db.top.final.lTab.clear
ok wff.completed
# ------------------------------------------------------------------------------

View File

@ -20,7 +20,6 @@ type
## For constructing a `VertexPath`
wp*: VidVtxPair ## Vertex ID and data ref
nibble*: int8 ## Next vertex selector for `Branch` (if any)
backend*: bool ## Sources from backend if `true`
Hike* = object
## Trie traversal path
@ -86,16 +85,13 @@ proc hikeUp*(
while vid.isValid:
var leg = Leg(wp: VidVtxPair(vid: vid), nibble: -1)
# Fetch vertex to be checked on this lap
leg.wp.vtx = db.top.sTab.getOrVoid vid
if not leg.wp.vtx.isValid:
# Register vertex fetched from backend (if any)
let rc = db.getVtxBE vid
if rc.isErr:
# Fetch next vertex
leg.wp.vtx = db.getVtxRc(vid).valueOr:
if error != GetVtxNotFound:
return err((hike,error))
if hike.legs.len == 0:
return err((hike,HikeEmptyPath))
break
leg.backend = true
leg.wp.vtx = rc.value
case leg.wp.vtx.vType:
of Leaf:

View File

@ -42,7 +42,7 @@ proc newAristoRdbDbRef(
be.closeFn(flush = false)
return err(rc.error)
rc.value
ok AristoDbRef(top: LayerRef(vGen: vGen), backend: be)
ok AristoDbRef(top: LayerRef(final: LayerFinal(vGen: vGen)), backend: be)
# ------------------------------------------------------------------------------
# Public database constuctors, destructor

View File

@ -0,0 +1,306 @@
# nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/[algorithm, sequtils, sets, tables],
eth/common,
results,
./aristo_desc
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
func dup(sTab: Table[VertexID,VertexRef]): Table[VertexID,VertexRef] =
## Explicit dup for `VertexRef` values
for (k,v) in sTab.pairs:
result[k] = v.dup
func dup(delta: LayerDelta): LayerDelta =
result = LayerDelta(
sTab: delta.sTab.dup, # explicit dup for ref values
kMap: delta.kMap,
pAmk: delta.pAmk)
func stackGetLebalOrVoid(db: AristoDbRef; lbl: HashLabel): HashSet[VertexID] =
# Helper: get next set of vertex IDs from stack.
for w in db.stack.reversed:
w.delta.pAmk.withValue(lbl,value):
return value[]
# ------------------------------------------------------------------------------
# Public getters: lazy value lookup for read only versions
# ------------------------------------------------------------------------------
func lTab*(db: AristoDbRef): Table[LeafTie,VertexID] =
db.top.final.lTab
func pPrf*(db: AristoDbRef): HashSet[VertexID] =
db.top.final.pPrf
func vGen*(db: AristoDbRef): seq[VertexID] =
db.top.final.vGen
func dirty*(db: AristoDbRef): bool =
db.top.final.dirty
# ------------------------------------------------------------------------------
# Public getters/helpers
# ------------------------------------------------------------------------------
func nLayersVtx*(db: AristoDbRef): int =
## Number of vertex entries on the cache layers
db.stack.mapIt(it.delta.sTab.len).foldl(a + b, db.top.delta.sTab.len)
func nLayersLabel*(db: AristoDbRef): int =
## Number of key/label entries on the cache layers
db.stack.mapIt(it.delta.kMap.len).foldl(a + b, db.top.delta.kMap.len)
func nLayersLebal*(db: AristoDbRef): int =
## Number of key/label reverse lookup entries on the cache layers
db.stack.mapIt(it.delta.pAmk.len).foldl(a + b, db.top.delta.pAmk.len)
# ------------------------------------------------------------------------------
# Public functions: get variants
# ------------------------------------------------------------------------------
proc layersGetVtx*(db: AristoDbRef; vid: VertexID): Result[VertexRef,void] =
## Find a vertex on the cache layers. An `ok()` result might contain a
## `nil` vertex if it is stored on the cache that way.
##
if db.top.delta.sTab.hasKey vid:
return ok(db.top.delta.sTab.getOrVoid vid)
for w in db.stack.reversed:
if w.delta.sTab.hasKey vid:
return ok(w.delta.sTab.getOrVoid vid)
err()
proc layersGetVtxOrVoid*(db: AristoDbRef; vid: VertexID): VertexRef =
## Simplified version of `layersGetVtx()`
db.layersGetVtx(vid).valueOr: VertexRef(nil)
proc layersGetLabel*(db: AristoDbRef; vid: VertexID): Result[HashLabel,void] =
## Find a hash label (containh the `HashKey`) on the cache layers. An
## `ok()` result might contain a void hash label if it is stored on the
## cache that way.
##
if db.top.delta.kMap.hasKey vid:
return ok(db.top.delta.kMap.getOrVoid vid)
for w in db.stack.reversed:
if w.delta.kMap.hasKey vid:
return ok(w.delta.kMap.getOrVoid vid)
err()
proc layersGetlabelOrVoid*(db: AristoDbRef; vid: VertexID): HashLabel =
## Simplified version of `layersGetLabel()`
db.layersGetLabel(vid).valueOr: VOID_HASH_LABEL
proc layersGetKey*(db: AristoDbRef; vid: VertexID): Result[HashKey,void] =
## Variant of `layersGetLabel()` for returning the `HashKey` part of the
## label only.
let lbl = db.layersGetLabel(vid).valueOr:
return err()
# Note that `lbl.isValid == lbl.key.isValid`
ok(lbl.key)
proc layersGetKeyOrVoid*(db: AristoDbRef; vid: VertexID): HashKey =
## Simplified version of `layersGetKey()`
db.layersGetKey(vid).valueOr: VOID_HASH_KEY
proc layersGetLebal*(
db: AristoDbRef;
lbl: HashLabel;
): Result[HashSet[VertexID],void] =
## Inverse of `layersGetKey()`. For a given argumnt `lbl`, find all vertex
## IDs that have `layersGetLbl()` return this very `lbl` value for the these
## vertex IDs.
if db.top.delta.pAmk.hasKey lbl:
return ok(db.top.delta.pAmk.getOrVoid lbl)
for w in db.stack.reversed:
if w.delta.pAmk.hasKey lbl:
return ok(w.delta.pAmk.getOrVoid lbl)
err()
proc layersGetLebalOrVoid*(db: AristoDbRef; lbl: HashLabel): HashSet[VertexID] =
## Simplified version of `layersGetVidsOrVoid()`
db.layersGetLebal(lbl).valueOr: EmptyVidSet
# ------------------------------------------------------------------------------
# Public functions: put variants
# ------------------------------------------------------------------------------
proc layersPutVtx*(db: AristoDbRef; vid: VertexID; vtx: VertexRef) =
## Store a (potentally empty) vertex on the top layer
db.top.delta.sTab[vid] = vtx
db.top.final.dirty = true # Modified top cache layers
proc layersResVtx*(db: AristoDbRef; vid: VertexID) =
## Shortcut for `db.layersPutVtx(vid, VertexRef(nil))`. It is sort of the
## equivalent of a delete function.
db.layersPutVtx(vid, VertexRef(nil))
proc layersPutLabel*(db: AristoDbRef; vid: VertexID; lbl: HashLabel) =
## Store a (potentally void) hash label on the top layer
# Get previous label
let blb = db.top.delta.kMap.getOrVoid vid
# Update label on `label->vid` mappiing table
db.top.delta.kMap[vid] = lbl
db.top.final.dirty = true # Modified top cache layers
# Clear previous value on reverse table if it has changed
if blb.isValid and blb != lbl:
db.top.delta.pAmk.withValue(blb, value):
value[].excl vid
do: # provide empty lookup
db.top.delta.pAmk[blb] = db.stackGetLebalOrVoid(blb) - @[vid].toHashSet
# Add updated value on reverse table if non-zero
if lbl.isValid:
db.top.delta.pAmk.withValue(lbl, value):
value[].incl vid
do: # else if not found: need to merge with value set from lower layer
db.top.delta.pAmk[lbl] = db.stackGetLebalOrVoid(lbl) + @[vid].toHashSet
proc layersResLabel*(db: AristoDbRef; vid: VertexID) =
## Shortcut for `db.layersPutLabel(vid, VOID_HASH_LABEL)`. It is sort of the
## equivalent of a delete function.
db.layersPutLabel(vid, VOID_HASH_LABEL)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc layersMergeOnto*(src: LayerRef; trg: LayerRef): LayerRef {.discardable.} =
## Merges the argument `src` into the argument `trg` and returns `trg`. For
## the result layer, the `txUid` value set to `0`.
trg.final = src.final
trg.txUid = 0
for (vid,vtx) in src.delta.sTab.pairs:
trg.delta.sTab[vid] = vtx
for (vid,lbl) in src.delta.kMap.pairs:
trg.delta.kMap[vid] = lbl
for (lbl,vids) in src.delta.pAmk.pairs:
trg.delta.pAmk.withValue(lbl, value):
value[] = value[] + vids
do:
trg.delta.pAmk[lbl] = vids
trg
proc layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
## Provide a collapsed copy of layers up to a particular transaction level.
## If the `level` argument is too large, the maximum transaction level is
## returned. For the result layer, the `txUid` value set to `0`.
let level = min(level, db.stack.len)
result = LayerRef(final: db.top.final) # Pre-merged/final values
# Merge stack into its bottom layer
if level <= 0 and db.stack.len == 0:
result.delta = db.top.delta.dup # Explicit dup for ref values
else:
# now: 0 < level <= db.stack.len
result.delta = db.stack[0].delta.dup # Explicit dup for ref values
# Merge stack: structural vertex table and hash key mapping
for w in db.stack.reversed:
w.layersMergeOnto result
# Merge top layer
db.top.layersMergeOnto result
# ------------------------------------------------------------------------------
# Public iterators
# ------------------------------------------------------------------------------
iterator layersWalkVtx*(
db: AristoDbRef;
seen: var HashSet[VertexID];
): tuple[vid: VertexID, vtx: VertexRef] =
## Walk over all `(VertexID,VertexRef)` pairs on the cache layers. Note that
## entries are unsorted.
##
## The argument `seen` collects a set of all visited vertex IDs including
## the one with a zero vertex which are othewise skipped by the iterator.
## The `seen` argument must not be modified while the iterator is active.
##
for (vid,vtx) in db.top.delta.sTab.pairs:
yield (vid,vtx)
seen.incl vid
for w in db.stack.reversed:
for (vid,vtx) in w.delta.sTab.pairs:
if vid notin seen:
yield (vid,vtx)
seen.incl vid
iterator layersWalkVtx*(
db: AristoDbRef;
): tuple[vid: VertexID, vtx: VertexRef] =
## Variant of `layersWalkVtx()`.
var seen: HashSet[VertexID]
for (vid,vtx) in db.layersWalkVtx seen:
yield (vid,vtx)
iterator layersWalkLabel*(
db: AristoDbRef;
): tuple[vid: VertexID, lbl: HashLabel] =
## Walk over all `(VertexID,HashLabel)` pairs on the cache layers. Note that
## entries are unsorted.
var seen: HashSet[VertexID]
for (vid,lbl) in db.top.delta.kMap.pairs:
yield (vid,lbl)
seen.incl vid
for w in db.stack.reversed:
for (vid,lbl) in w.delta.kMap.pairs:
if vid notin seen:
yield (vid,lbl)
seen.incl vid
iterator layersWalkLebal*(
db: AristoDbRef;
): tuple[lbl: HashLabel, vids: HashSet[VertexID]] =
## Walk over `(HashLabel,HashSet[VertexID])` pairs.
var seen: HashSet[HashLabel]
for (lbl,vids) in db.top.delta.pAmk.pairs:
yield (lbl,vids)
seen.incl lbl
for w in db.stack.reversed:
for (lbl,vids) in w.delta.pAmk.pairs:
if lbl notin seen:
yield (lbl,vids)
seen.incl lbl
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -31,8 +31,8 @@ import
results,
stew/keyed_queue,
../../sync/protocol/snap/snap_types,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_path, aristo_serialise,
aristo_vid]
"."/[aristo_desc, aristo_get, aristo_hike, aristo_layers, aristo_path,
aristo_serialise, aristo_vid]
logScope:
topics = "aristo-merge"
@ -81,10 +81,7 @@ proc nullifyKey(
vid: VertexID; # Vertex IDs to clear
) =
# Register for void hash (to be recompiled)
let lbl = db.top.kMap.getOrVoid vid
db.top.pAmk.del lbl
db.top.kMap[vid] = VOID_HASH_LABEL
db.top.dirty = true # Modified top level cache
db.layersResLabel vid
proc clearMerkleKeys(
db: AristoDbRef; # Database, top layer
@ -99,8 +96,8 @@ proc setVtxAndKey(
vid: VertexID; # Vertex IDs to add/clear
vtx: VertexRef; # Vertex to add
) =
db.top.sTab[vid] = vtx
db.nullifyKey vid
db.layersPutVtx(vid, vtx)
db.layersResLabel vid
# -----------
@ -150,15 +147,12 @@ proc insertBranch(
var
leafLeg = Leg(nibble: -1)
# Will modify top level cache
db.top.dirty = true
# Install `forkVtx`
block:
# Clear Merkle hashes (aka hash keys) unless proof mode.
if db.top.pPrf.len == 0:
if db.pPrf.len == 0:
db.clearMerkleKeys(hike, linkID)
elif linkID in db.top.pPrf:
elif linkID in db.pPrf:
return err(MergeNonBranchProofModeLock)
if linkVtx.vType == Leaf:
@ -174,7 +168,7 @@ proc insertBranch(
local = db.vidFetch(pristine = true)
lty = LeafTie(root: hike.root, path: rc.value)
db.top.lTab[lty] = local # update leaf path lookup cache
db.top.final.lTab[lty] = local # update leaf path lookup cache
db.setVtxAndKey(local, linkVtx)
linkVtx.lPfx = linkVtx.lPfx.slice(1+n)
forkVtx.bVid[linkInx] = local
@ -254,18 +248,15 @@ proc concatBranchAndLeaf(
return err(MergeRootBranchLinkBusy)
# Clear Merkle hashes (aka hash keys) unless proof mode.
if db.top.pPrf.len == 0:
if db.pPrf.len == 0:
db.clearMerkleKeys(hike, brVid)
elif brVid in db.top.pPrf:
elif brVid in db.pPrf:
return err(MergeBranchProofModeLock) # Ooops
# Append branch vertex
var okHike = Hike(root: hike.root, legs: hike.legs)
okHike.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
# Will modify top level cache
db.top.dirty = true
# Append leaf vertex
let
vid = db.vidFetch(pristine = true)
@ -310,14 +301,11 @@ proc topIsBranchAddLeaf(
#
# <-------- immutable ------------> <---- mutable ----> ..
#
if db.top.pPrf.len == 0:
if db.pPrf.len == 0:
# Not much else that can be done here
debug "Dangling leaf link, reused", branch=hike.legs[^1].wp.vid,
nibble, linkID, leafPfx=hike.tail
# Will modify top level cache
db.top.dirty = true
# Reuse placeholder entry in table
let vtx = VertexRef(
vType: Leaf,
@ -365,9 +353,6 @@ proc topIsExtAddLeaf(
# <-------- immutable -------------->
#
# Will modify top level cache
db.top.dirty = true
let vtx = VertexRef(
vType: Leaf,
lPfx: extVtx.ePfx & hike.tail,
@ -392,13 +377,10 @@ proc topIsExtAddLeaf(
if linkID.isValid:
return err(MergeRootBranchLinkBusy)
# Will modify top level cache
db.top.dirty = true
# Clear Merkle hashes (aka hash keys) unless proof mode
if db.top.pPrf.len == 0:
if db.pPrf.len == 0:
db.clearMerkleKeys(hike, brVid)
elif brVid in db.top.pPrf:
elif brVid in db.pPrf:
return err(MergeBranchProofModeLock)
let
@ -410,7 +392,6 @@ proc topIsExtAddLeaf(
brVtx.bVid[nibble] = vid
db.setVtxAndKey(brVid, brVtx)
db.setVtxAndKey(vid, vtx)
db.top.dirty = true # Modified top level cache
okHike.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
okHike.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
@ -430,13 +411,10 @@ proc topIsEmptyAddLeaf(
if rootVtx.bVid[nibble].isValid:
return err(MergeRootBranchLinkBusy)
# Will modify top level cache
db.top.dirty = true
# Clear Merkle hashes (aka hash keys) unless proof mode
if db.top.pPrf.len == 0:
if db.pPrf.len == 0:
db.clearMerkleKeys(hike, hike.root)
elif hike.root in db.top.pPrf:
elif hike.root in db.pPrf:
return err(MergeBranchProofModeLock)
let
@ -476,17 +454,15 @@ proc updatePayload(
lPfx: leafLeg.wp.vtx.lPfx,
lData: payload)
var hike = hike
hike.legs[^1].backend = false
hike.legs[^1].wp.vtx = vtx
# Modify top level cache
db.top.dirty = true
db.setVtxAndKey(vid, vtx)
db.top.lTab[leafTie] = vid
db.top.final.lTab[leafTie] = vid
db.clearMerkleKeys(hike, vid)
ok hike
elif leafLeg.backend:
elif db.layersGetVtx(leafLeg.wp.vid).isErr:
err(MergeLeafPathOnBackendAlready)
else:
@ -537,7 +513,7 @@ proc mergeNodeImpl(
# order `root->.. ->leaf`.
let
hashLbl = HashLabel(root: rootVid, key: hashKey)
vids = db.top.pAmk.getOrVoid(hashLbl).toSeq
vids = db.layersGetLebalOrVoid(hashLbl).toSeq
isRoot = rootVid in vids
if vids.len == 0:
return err(MergeRevVidMustHaveBeenCached)
@ -546,11 +522,11 @@ proc mergeNodeImpl(
return err(MergeHashKeyRevLookUpGarbled)
# Use the first vertex ID from the `vis` list as representant for all others
let lbl = db.top.kMap.getOrVoid vids[0]
let lbl = db.layersGetLabelOrVoid vids[0]
if lbl == hashLbl:
if db.top.sTab.hasKey vids[0]:
if db.layersGetVtx(vids[0]).isOk:
for n in 1 ..< vids.len:
if not db.top.sTab.hasKey vids[n]:
if db.layersGetVtx(vids[n]).isErr:
return err(MergeHashKeyRevLookUpGarbled)
# This is tyically considered OK
return err(MergeHashKeyCachedAlready)
@ -572,7 +548,7 @@ proc mergeNodeImpl(
# Verify that all `vids` entries are similar
for n in 1 ..< vids.len:
let w = vids[n]
if lbl != db.top.kMap.getOrVoid(w) or db.top.sTab.hasKey(w):
if lbl != db.layersGetLabelOrVoid(w) or db.layersGetVtx(w).isOk:
return err(MergeHashKeyRevLookUpGarbled)
if not hasVtx:
# Prefer existing node which has all links available, already.
@ -589,26 +565,27 @@ proc mergeNodeImpl(
let eLbl = HashLabel(root: rootVid, key: node.key[0])
if not hasVtx:
# Brand new reverse lookup link for this vertex
vtx.eVid = db.vidAttach eLbl
vtx.eVid = db.vidFetch
db.layersPutLabel(vtx.eVid, eLbl)
elif not vtx.eVid.isValid:
return err(MergeNodeVtxDiffersFromExisting)
db.top.pAmk.append(eLbl, vtx.eVid)
db.layersPutLabel(vtx.eVid, eLbl)
of Branch:
for n in 0..15:
if node.key[n].isValid:
let bLbl = HashLabel(root: rootVid, key: node.key[n])
if not hasVtx:
# Brand new reverse lookup link for this vertex
vtx.bVid[n] = db.vidAttach bLbl
vtx.bVid[n] = db.vidFetch
db.layersPutLabel(vtx.bVid[n], bLbl)
elif not vtx.bVid[n].isValid:
return err(MergeNodeVtxDiffersFromExisting)
db.top.pAmk.append(bLbl, vtx.bVid[n])
db.layersPutLabel(vtx.bVid[n], bLbl)
for w in vids:
db.top.pPrf.incl w
db.top.final.pPrf.incl w
if not hasVtx or db.getKey(w) != hashKey:
db.top.sTab[w] = vtx.dup
db.top.dirty = true # Modified top level cache
db.layersPutVtx(w, vtx.dup)
ok()
@ -629,7 +606,7 @@ proc merge*(
##
# Check whether the leaf is on the database and payloads match
block:
let vid = db.top.lTab.getOrVoid leafTie
let vid = db.lTab.getOrVoid leafTie
if vid.isValid:
let vtx = db.getVtx vid
if vtx.isValid and vtx.lData == payload:
@ -672,7 +649,7 @@ proc merge*(
return err(MergeAssemblyFailed) # Ooops
# Update leaf acccess cache
db.top.lTab[leafTie] = okHike.legs[^1].wp.vid
db.top.final.lTab[leafTie] = okHike.legs[^1].wp.vid
ok okHike
@ -820,10 +797,9 @@ proc merge*(
block:
let
lbl = HashLabel(root: rootVid, key: rootKey)
vids = db.top.pAmk.getOrVoid lbl
vids = db.layersGetLebalOrVoid lbl
if not vids.isValid:
db.top.pAmk.append(lbl, rootVid)
db.top.dirty = true # Modified top level cache
db.layersPutlabel(rootVid, lbl)
# Process over chains in reverse mode starting with the root node. This
# allows the algorithm to find existing nodes on the backend.
@ -875,7 +851,7 @@ proc merge*(
return ok rootVid
if not key.isValid:
db.vidAttach(HashLabel(root: rootVid, key: rootLink), rootVid)
db.layersPutLabel(rootVid, HashLabel(root: rootVid, key: rootLink))
return ok rootVid
else:
let key = db.getKey VertexID(1)
@ -884,13 +860,13 @@ proc merge*(
# Otherwise assign unless valid
if not key.isValid:
db.vidAttach(HashLabel(root: VertexID(1), key: rootLink), VertexID(1))
db.layersPutLabel(VertexID(1),HashLabel(root: VertexID(1), key: rootLink))
return ok VertexID(1)
# Create and assign a new root key
if not rootVid.isValid:
let vid = db.vidFetch
db.vidAttach(HashLabel(root: vid, key: rootLink), vid)
db.layersPutLabel(vid, HashLabel(root: vid, key: rootLink))
return ok vid
err(MergeRootKeyDiffersForVid)

View File

@ -15,7 +15,7 @@
import
results,
"."/[aristo_desc, aristo_filter, aristo_get, aristo_hashify]
"."/[aristo_desc, aristo_filter, aristo_get, aristo_layers, aristo_hashify]
func isTop*(tx: AristoTxRef): bool
func level*(db: AristoDbRef): int
@ -24,10 +24,6 @@ func level*(db: AristoDbRef): int
# Private helpers
# ------------------------------------------------------------------------------
func fromVae(err: (VertexID,AristoError)): AristoError =
## Map error pair to error reason component
err[1]
func getDbDescFromTopTx(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
if not tx.isTop():
return err(TxNotTopTx)
@ -108,35 +104,32 @@ proc forkTx*(
##
let db = tx.db
# Provide new top layer
var topLayer: LayerRef
# Verify `tx` argument
if db.txRef == tx:
topLayer = db.top.dup
elif tx.level < db.stack.len:
topLayer = db.stack[tx.level].dup
else:
if db.top.txUid != tx.txUid:
return err(TxArgStaleTx)
if topLayer.txUid != tx.txUid:
elif db.stack.len <= tx.level:
return err(TxArgStaleTx)
elif db.stack[tx.level].txUid != tx.txUid:
return err(TxArgStaleTx)
topLayer.txUid = 1
# Provide new empty stack layer
let stackLayer = block:
let rc = db.getIdgBE()
if rc.isOk:
LayerRef(vGen: rc.value)
LayerRef(final: LayerFinal(vGen: rc.value))
elif rc.error == GetIdgNotFound:
LayerRef()
else:
return err(rc.error)
let txClone = ? db.fork(rawToplayer = true)
# Set up clone associated to `db`
txClone.top = topLayer # is a deep copy
txClone.stack = @[stackLayer]
txClone.roFilter = db.roFilter # no need to copy contents (done when updated)
let txClone = ? db.fork(rawToplayer = true)
txClone.top = db.layersCc tx.level # Provide tx level 1 stack
txClone.stack = @[stackLayer] # Zero level stack
txClone.roFilter = db.roFilter # No need to copy (done when updated)
txClone.backend = db.backend
txClone.top.txUid = 1
txClone.txUidGen = 1
# Install transaction similar to `tx` on clone
@ -146,10 +139,9 @@ proc forkTx*(
level: 1)
if not dontHashify:
let rc = txClone.hashify()
if rc.isErr:
discard txClone.hashify().valueOr:
discard txClone.forget()
return err(rc.error.fromVae)
return err(error[1])
ok(txClone)
@ -166,15 +158,14 @@ proc forkTop*(
if db.txRef.isNil:
let dbClone = ? db.fork(rawToplayer = true)
dbClone.top = db.top.dup # is a deep copy
dbClone.roFilter = db.roFilter # no need to copy contents when updated
dbClone.top = db.layersCc # Is a deep copy
dbClone.roFilter = db.roFilter # No need to copy contents when updated
dbClone.backend = db.backend
if not dontHashify:
let rc = dbClone.hashify()
if rc.isErr:
discard dbClone.hashify().valueOr:
discard dbClone.forget()
return err(rc.error.fromVae)
return err(error[1])
return ok(dbClone)
db.txRef.forkTx dontHashify
@ -215,8 +206,10 @@ proc txBegin*(db: AristoDbRef): Result[AristoTxRef,AristoError] =
if db.level != db.stack.len:
return err(TxStackGarbled)
db.stack.add db.top.dup # push (save and use top later)
db.top.txUid = db.getTxUid()
db.stack.add db.top
db.top = LayerRef(
final: db.top.final,
txUid: db.getTxUid)
db.txRef = AristoTxRef(
db: db,
@ -252,13 +245,20 @@ proc commit*(
## previous transaction is returned if there was any.
##
let db = ? tx.getDbDescFromTopTx()
discard ? db.hashify().mapErr fromVae
discard db.hashify().valueOr:
return err(error[1])
# Keep top and discard layer below
db.top.txUid = db.stack[^1].txUid
# Replace the top two layers by its merged version
let merged = db.top.layersMergeOnto db.stack[^1]
# Install `merged` layer
db.top = merged
db.stack.setLen(db.stack.len-1)
db.txRef = tx.parent
if 0 < db.stack.len:
db.txRef.txUid = db.getTxUid
db.top.txUid = db.txRef.txUid
db.txRef = db.txRef.parent
ok()
@ -278,7 +278,8 @@ proc collapse*(
if commit:
# For commit, hashify the current layer if requested and install it
discard ? db.hashify().mapErr fromVae
discard db.hashify().valueOr:
return err(error[1])
db.top.txUid = 0
db.stack.setLen(0)
@ -316,36 +317,29 @@ proc stow*(
if persistent and not db.canResolveBackendFilter():
return err(TxBackendNotWritable)
discard ? db.hashify().mapErr fromVae
discard db.hashify().valueOr:
return err(error[1])
let fwd = ? db.fwdFilter(db.top, chunkedMpt).mapErr fromVae
let fwd = db.fwdFilter(db.top, chunkedMpt).valueOr:
return err(error[1])
if fwd.isValid:
# Merge `top` layer into `roFilter`
? db.merge(fwd).mapErr fromVae
db.top = LayerRef(vGen: db.roFilter.vGen)
db.merge(fwd).isOkOr:
return err(error[1])
db.top = LayerRef(final: LayerFinal(vGen: db.roFilter.vGen))
if persistent:
? db.resolveBackendFilter()
db.roFilter = FilterRef(nil)
# Delete or clear stack and clear top
db.stack.setLen(0)
db.top = LayerRef(vGen: db.top.vGen, txUid: db.top.txUid)
# Delete/clear top
db.top = LayerRef(
final: LayerFinal(vGen: db.vGen),
txUid: db.top.txUid)
ok()
proc stow*(
db: AristoDbRef; # Database
stageLimit: int; # Policy based persistent storage
chunkedMpt = false; # Partial data (e.g. from `snap`)
): Result[void,AristoError] =
## Variant of `stow()` with the `persistent` argument replaced by
## `stageLimit < max(db.roFilter.bulk, db.top.bulk)`.
db.stow(
persistent = (stageLimit < max(db.roFilter.bulk, db.top.bulk)),
chunkedMpt = chunkedMpt)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -16,7 +16,7 @@
import
eth/common,
results,
"."/[aristo_desc, aristo_get]
"."/[aristo_desc, aristo_get, aristo_layers]
# ------------------------------------------------------------------------------
# Public functions, converters
@ -106,10 +106,13 @@ proc toNode*(
## storage root.
##
proc getKey(db: AristoDbRef; vid: VertexID; beOk: bool): HashKey =
block:
let lbl = db.top.kMap.getOrVoid vid
block body:
let lbl = db.layersGetLabel(vid).valueOr:
break body
if lbl.isValid:
return lbl.key
else:
return VOID_HASH_KEY
if beOk:
let rc = db.getKeyBE vid
if rc.isOk:
@ -137,7 +140,7 @@ proc toNode*(
for n in 0 .. 15:
let vid = vtx.bVid[n]
if vid.isValid:
let key = db.getKey(vid, beKeyOk)
let key = db.getKey(vid, beOk=beKeyOk)
if key.isValid:
node.key[n] = key
elif stopEarly:
@ -151,7 +154,7 @@ proc toNode*(
of Extension:
let
vid = vtx.eVid
key = db.getKey(vid, beKeyOk)
key = db.getKey(vid, beOk=beKeyOk)
if not key.isValid:
return err(@[vid])
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid)

View File

@ -14,8 +14,8 @@
{.push raises: [].}
import
std/[algorithm, sequtils, tables],
./aristo_desc
std/[algorithm, sequtils],
"."/[aristo_desc, aristo_layers]
# ------------------------------------------------------------------------------
# Public functions
@ -30,45 +30,44 @@ proc vidFetch*(db: AristoDbRef; pristine = false): VertexID =
## When the argument `pristine` is set `true`, the function guarantees to
## return a non-recycled, brand new vertex *ID* which is the preferred mode
## when creating leaf vertices.
let top = db.top
if top.vGen.len == 0:
if db.vGen.len == 0:
# Note that `VertexID(1)` is the root of the main trie
top.vGen = @[VertexID(3)]
db.top.final.vGen = @[VertexID(3)]
result = VertexID(2)
elif top.vGen.len == 1 or pristine:
result = top.vGen[^1]
top.vGen[^1] = result + 1
elif db.vGen.len == 1 or pristine:
result = db.vGen[^1]
db.top.final.vGen[^1] = result + 1
else:
result = top.vGen[^2]
top.vGen[^2] = top.vGen[^1]
top.vGen.setLen(top.vGen.len-1)
result = db.vGen[^2]
db.top.final.vGen[^2] = db.top.final.vGen[^1]
db.top.final.vGen.setLen(db.vGen.len-1)
proc vidPeek*(db: AristoDbRef): VertexID =
## Like `new()` without consuming this *ID*. It will return the *ID* that
## would be returned by the `new()` function.
case db.top.vGen.len:
case db.vGen.len:
of 0:
VertexID(2)
of 1:
db.top.vGen[^1]
db.vGen[^1]
else:
db.top.vGen[^2]
db.vGen[^2]
proc vidDispose*(db: AristoDbRef; vid: VertexID) =
## Recycle the argument `vtxID` which is useful after deleting entries from
## the vertex table to prevent the `VertexID` type key values small.
if VertexID(1) < vid:
if db.top.vGen.len == 0:
db.top.vGen = @[vid]
if db.vGen.len == 0:
db.top.final.vGen = @[vid]
else:
let topID = db.top.vGen[^1]
let topID = db.vGen[^1]
# Only store smaller numbers: all numberts larger than `topID`
# are free numbers
if vid < topID:
db.top.vGen[^1] = vid
db.top.vGen.add topID
db.top.final.vGen[^1] = vid
db.top.final.vGen.add topID
proc vidReorg*(vGen: seq[VertexID]): seq[VertexID] =
## Return a compacted version of the argument vertex ID generator state
@ -95,17 +94,6 @@ proc vidReorg*(vGen: seq[VertexID]): seq[VertexID] =
vGen
proc vidAttach*(db: AristoDbRef; lbl: HashLabel; vid: VertexID) =
## Attach (i.r. register) a Merkle hash key to a vertex ID.
db.top.pAmk.append(lbl, vid)
db.top.kMap[vid] = lbl
db.top.dirty = true # Modified top level cache
proc vidAttach*(db: AristoDbRef; lbl: HashLabel): VertexID {.discardable.} =
## Variant of `vidAttach()` with auto-generated vertex ID
result = db.vidFetch
db.vidAttach(lbl, result)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -10,9 +10,9 @@
# distributed except according to those terms.
import
std/[algorithm, sequtils, tables],
std/[sequtils, sets, tables],
results,
".."/[aristo_desc, aristo_get, aristo_init, aristo_utils]
".."/[aristo_desc, aristo_get, aristo_layers, aristo_init, aristo_utils]
# ------------------------------------------------------------------------------
# Public generic iterators
@ -129,11 +129,13 @@ iterator walkPairsImpl*[T](
): tuple[vid: VertexID, vtx: VertexRef] =
## Walk over all `(VertexID,VertexRef)` in the database. Note that entries
## are unsorted.
for (vid,vtx) in db.top.sTab.pairs:
var seen: HashSet[VertexID]
for (vid,vtx) in db.layersWalkVtx seen:
if vtx.isValid:
yield (vid,vtx)
for (_,vid,vtx) in walkVtxBeImpl[T](db):
if vid notin db.top.sTab and vtx.isValid:
if vid notin seen:
yield (vid,vtx)
iterator replicateImpl*[T](

View File

@ -579,7 +579,7 @@ func txTop*(
): CoreDbRc[AristoTxRef] =
base.adb.txTop.toRc(base.parent, info)
func txBegin*(
proc txBegin*(
base: AristoBaseRef;
info: static[string];
): CoreDbRc[AristoTxRef] =
@ -610,7 +610,7 @@ proc getHash*(
let key = block:
let rc = mpt.getKeyRc aVid
if rc.isErr:
doAssert rc.error in {GetKeyNotFound,GetKeyTempLocked}
doAssert rc.error in {GetKeyNotFound,GetKeyUpdateNeeded}
return err(rc.error.toError(db, info, HashNotAvailable))
rc.value

View File

@ -322,7 +322,7 @@ func txTop*(
): CoreDbRc[KvtTxRef] =
base.kdb.txTop.toRc(base.parent, info)
func txBegin*(
proc txBegin*(
base: KvtBaseRef;
info: static[string];
): CoreDbRc[KvtTxRef] =

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -15,9 +15,9 @@ import
eth/common,
results,
stew/byteutils,
./kvt_desc,
./kvt_desc/desc_backend,
./kvt_init/[memory_db, memory_only, rocks_db]
./kvt_init/[memory_db, memory_only, rocks_db],
"."/[kvt_desc, kvt_layers]
# ------------------------------------------------------------------------------
# Private functions
@ -127,18 +127,17 @@ proc ppBe[T](be: T; db: KvtDbRef; indent: int): string =
spc = if 0 < data.len: pfx2 else: " "
"<" & $be.kind & ">" & pfx1 & "tab" & spc & "{" & data & "}"
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc pp*(layer: LayerRef; db: KvtDbRef; indent = 4): string =
proc ppLayer(layer: LayerRef; db: KvtDbRef; indent = 4): string =
let
tLen = layer.tab.len
tLen = layer.dTab.len
info = "tab(" & $tLen & ")"
pfx1 = indent.toPfx(1)
pfx2 = if 0 < tLen: indent.toPfx(2) else: " "
"<layer>" & pfx1 & info & pfx2 & layer.tab.ppTab(db,indent+2)
"<layer>" & pfx1 & info & pfx2 & layer.dTab.ppTab(db,indent+2)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc pp*(
be: BackendRef;
@ -162,7 +161,7 @@ proc pp*(
let
pfx = indent.toPfx
pfx1 = indent.toPfx(1)
result = db.top.pp(db, indent=indent)
result = db.layersCc.ppLayer(db, indent=indent)
if backendOk:
result &= pfx & db.backend.pp(db, indent=indent)
if keysOk:

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -18,23 +18,16 @@ import
eth/common
type
LayerDelta* = object
## Delta tables relative to previous layer
sTab*: Table[Blob,Blob] ## Structural data table
LayerRef* = ref object
## Kvt database layer structures. Any layer holds the full
## change relative to the backend.
tab*: Table[Blob,Blob] ## Structural table
delta*: LayerDelta ## Structural tables held as deltas
txUid*: uint ## Transaction identifier if positive
# ------------------------------------------------------------------------------
# Public helpers, miscellaneous functions
# ------------------------------------------------------------------------------
proc dup*(layer: LayerRef): LayerRef =
## Duplicate layer.
result = LayerRef(
txUid: layer.txUid)
for (k,v) in layer.tab.pairs:
result.tab[k] = v
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -0,0 +1,46 @@
# nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/tables,
eth/common,
./kvt_desc
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc layersCc*(db: KvtDbRef; level = high(int)): LayerRef =
## Provide a collapsed copy of layers up to a particular transaction level.
## If the `level` argument is too large, the maximum transaction level is
## returned. For the result layer, the `txUid` value set to `0`.
let level = min(level, db.stack.len)
# Merge stack into its bottom layer
if level <= 0 and db.stack.len == 0:
result = LayerRef(delta: LayerDelta(sTab: db.top.delta.sTab))
else:
# now: 0 < level <= db.stack.len
result = LayerRef(delta: LayerDelta(sTab: db.stack[0].delta.sTab))
for n in 1 ..< level:
for (key,val) in db.stack[n].delta.sTab.pairs:
result.delta.sTab[key] = val
# Merge top layer if needed
if level == db.stack.len:
for (key,val) in db.top.delta.sTab.pairs:
result.delta.sTab[key] = val
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -15,9 +15,10 @@
import
std/[sequtils, tables],
eth/common,
results,
./kvt_desc/desc_backend,
./kvt_desc
"."/[kvt_desc, kvt_layers]
func isTop*(tx: KvtTxRef): bool
@ -86,25 +87,21 @@ proc forkTx*(tx: KvtTxRef): Result[KvtDbRef,KvtError] =
##
let db = tx.db
# Provide new top layer
var topLayer: LayerRef
# Verify `tx` argument
if db.txRef == tx:
topLayer = db.top.dup
elif tx.level < db.stack.len:
topLayer = db.stack[tx.level].dup
else:
if db.top.txUid != tx.txUid:
return err(TxArgStaleTx)
if topLayer.txUid != tx.txUid:
elif db.stack.len <= tx.level:
return err(TxArgStaleTx)
elif db.stack[tx.level].txUid != tx.txUid:
return err(TxArgStaleTx)
topLayer.txUid = 1
let txClone = ? db.fork()
# Set up clone associated to `db`
txClone.top = topLayer # is a deep copy
txClone.stack = @[LayerRef()]
txClone.backend = db.backend
txClone.txUidGen = 1
let txClone = ? db.fork()
txClone.top = db.layersCc tx.level
txClone.stack = @[LayerRef()] # Provide tx level 1 stack
txClone.top.txUid = 1
txClone.txUidGen = 1 # Used value of `txClone.top.txUid`
# Install transaction similar to `tx` on clone
txClone.txRef = KvtTxRef(
@ -122,10 +119,7 @@ proc forkTop*(db: KvtDbRef): Result[KvtDbRef,KvtError] =
##
if db.txRef.isNil:
let dbClone = ? db.fork()
dbClone.top = db.top.dup # is a deep copy
dbClone.backend = db.backend
dbClone.top = db.layersCc
return ok(dbClone)
db.txRef.forkTx()
@ -162,9 +156,8 @@ proc txBegin*(db: KvtDbRef): Result[KvtTxRef,KvtError] =
if db.level != db.stack.len:
return err(TxStackGarbled)
db.stack.add db.top.dup # push (save and use top later)
db.top.txUid = db.getTxUid()
db.stack.add db.top
db.top = LayerRef(txUid: db.getTxUid)
db.txRef = KvtTxRef(
db: db,
txUid: db.top.txUid,
@ -199,11 +192,19 @@ proc commit*(
##
let db = ? tx.getDbDescFromTopTx()
# Keep top and discard layer below
db.top.txUid = db.stack[^1].txUid
db.stack.setLen(db.stack.len-1)
# Replace the top two layers by its merged version
let merged = db.stack[^1]
for (key,val) in db.top.delta.sTab.pairs:
merged.delta.sTab[key] = val
# Install `merged` layer
db.top = merged
db.stack.setLen(db.stack.len-1)
db.txRef = tx.parent
if 0 < db.stack.len:
db.txRef.txUid = db.getTxUid
db.top.txUid = db.txRef.txUid
ok()
@ -221,12 +222,13 @@ proc collapse*(
##
let db = ? tx.getDbDescFromTopTx()
# If commit, then leave the current layer and clear the stack, otherwise
# install the stack bottom.
if not commit:
db.stack[0].swap db.top
if commit:
db.top = db.layersCc
else:
db.top = db.stack[0]
db.top.txUid = 0
# Clean up
db.stack.setLen(0)
db.txRef = KvtTxRef(nil)
ok()
@ -255,12 +257,11 @@ proc stow*(
# Save structural and other table entries
let txFrame = be.putBegFn()
be.putKvpFn(txFrame, db.top.tab.pairs.toSeq)
be.putKvpFn(txFrame, db.top.delta.sTab.pairs.toSeq)
? be.putEndFn txFrame
# Delete or clear stack and clear top
db.stack.setLen(0)
db.top = LayerRef(txUid: db.top.txUid)
# Clean up
db.top.delta.sTab.clear
ok()

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -14,6 +14,7 @@
{.push raises: [].}
import
std/algorithm,
eth/common,
results,
./kvt_desc/desc_backend,
@ -23,7 +24,7 @@ import
# Private helpers
# ------------------------------------------------------------------------------
proc getBE*(
proc getBE(
db: KvtDbRef; # Database
key: openArray[byte]; # Key of database record
): Result[Blob,KvtError] =
@ -51,7 +52,7 @@ proc put*(
if data.len == 0:
return err(DataInvalid)
db.top.tab[@key] = @data
db.top.delta.sTab[@key] = @data
ok()
@ -64,14 +65,22 @@ proc del*(
if key.len == 0:
return err(KeyInvalid)
let rc = db.getBE(key)
block haveKey:
for w in db.stack.reversed:
if w.delta.sTab.hasKey @key:
break haveKey
# Do this one last as it is the most expensive lookup
let rc = db.getBE key
if rc.isOk:
db.top.tab[@key] = EmptyBlob
elif rc.error == GetNotFound:
db.top.tab.del @key
else:
break haveKey
if rc.error != GetNotFound:
return err(rc.error)
db.top.delta.sTab.del @key # No such key anywhere => delete now
return ok()
db.top.delta.sTab[@key] = EmptyBlob # Mark for deletion
ok()
# ------------
@ -85,11 +94,21 @@ proc get*(
##
if key.len == 0:
return err(KeyInvalid)
let data = db.top.tab.getOrVoid @key
block:
let data = db.top.delta.sTab.getOrVoid @key
if data.isValid:
return ok(data)
block:
for w in db.stack.reversed:
let data = w.delta.sTab.getOrVoid @key
if data.isValid:
return ok(data)
db.getBE key
proc hasKey*(
db: KvtDbRef; # Database
key: openArray[byte]; # Key of database record
@ -99,9 +118,14 @@ proc hasKey*(
##
if key.len == 0:
return err(KeyInvalid)
let data = db.top.tab.getOrVoid @key
if data.isValid:
if db.top.delta.sTab.hasKey @key:
return ok(true)
for w in db.stack.reversed:
if w.delta.sTab.haskey @key:
return ok(true)
let rc = db.getBE key
if rc.isOk:
return ok(true)

View File

@ -1,6 +1,5 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,6 +1,5 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,6 +1,5 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,6 +1,5 @@
# Nimbus - Types, data structures and shared utilities used in network sync
#
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -10,7 +9,7 @@
# distributed except according to those terms.
import
std/tables,
std/[algorithm, sets, tables],
eth/common,
".."/[kvt_desc, kvt_init]
@ -24,17 +23,28 @@ iterator walkPairsImpl*[T](
## Walk over all `(VertexID,VertexRef)` in the database. Note that entries
## are unsorted.
var i = 0
for (key,data) in db.top.tab.pairs:
var
seen: HashSet[Blob]
i = 0
for (key,data) in db.top.delta.sTab.pairs:
if data.isValid:
yield (i,key,data)
inc i
i.inc
seen.incl key
for w in db.stack.reversed:
for (key,data) in w.delta.sTab.pairs:
if key notin seen:
if data.isValid:
yield (i,key,data)
i.inc
seen.incl key
when T isnot VoidBackendRef:
mixin walk
for (n,key,data) in db.backend.T.walk:
if key notin db.top.tab and data.isValid:
if key notin seen and data.isValid:
yield (n+i,key,data)
# ------------------------------------------------------------------------------

View File

@ -107,7 +107,7 @@ proc verify(
for vid in beSTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
let
nVtx = ly.sTab.getOrVoid vid
nVtx = ly.delta.sTab.getOrVoid vid
mVtx = beSTab.getOrVoid vid
xCheck (nVtx != VertexRef(nil))
@ -119,8 +119,8 @@ proc verify(
" nVtx=", nVtx.pp,
" mVtx=", mVtx.pp
xCheck beSTab.len == ly.sTab.len
xCheck beKMap.len == ly.kMap.len
xCheck beSTab.len == ly.delta.sTab.len
xCheck beKMap.len == ly.delta.kMap.len
true

View File

@ -18,7 +18,7 @@ import
unittest2,
../../nimbus/db/aristo/[
aristo_check, aristo_debug, aristo_desc, aristo_filter, aristo_get,
aristo_merge, aristo_persistent, aristo_blobify],
aristo_layers, aristo_merge, aristo_persistent, aristo_blobify],
../../nimbus/db/aristo,
../../nimbus/db/aristo/aristo_desc/desc_backend,
../../nimbus/db/aristo/aristo_filter/[filter_fifos, filter_scheduler],
@ -336,7 +336,7 @@ proc checkBeOk(
## ..
for n in 0 ..< dx.len:
let
cache = if forceCache: true else: not dx[n].top.dirty
cache = if forceCache: true else: not dx[n].dirty
rc = dx[n].checkBE(relax=relax, cache=cache)
xCheckRc rc.error == (0,0):
noisy.say "***", "db check failed",

View File

@ -19,7 +19,8 @@ import
unittest2,
../../nimbus/db/aristo,
../../nimbus/db/aristo/[
aristo_check, aristo_debug, aristo_desc, aristo_blobify, aristo_vid],
aristo_check, aristo_debug, aristo_desc, aristo_blobify, aristo_layers,
aristo_vid],
../../nimbus/db/aristo/aristo_filter/filter_scheduler,
../replay/xcheck,
./test_helpers
@ -275,41 +276,41 @@ proc testVidRecycleLists*(noisy = true; seed = 42): bool =
expectedVids += (vid < first).ord
db.vidDispose vid
xCheck db.top.vGen.len == expectedVids
noisy.say "***", "vids=", db.top.vGen.len, " discarded=", count-expectedVids
xCheck db.vGen.len == expectedVids
noisy.say "***", "vids=", db.vGen.len, " discarded=", count-expectedVids
# Serialise/deserialise
block:
let dbBlob = db.top.vGen.blobify
let dbBlob = db.vGen.blobify
# Deserialise
let
db1 = AristoDbRef.init()
rc = dbBlob.deblobify seq[VertexID]
xCheckRc rc.error == 0
db1.top.vGen = rc.value
db1.top.final.vGen = rc.value
xCheck db.top.vGen == db1.top.vGen
xCheck db.vGen == db1.vGen
# Make sure that recycled numbers are fetched first
let topVid = db.top.vGen[^1]
while 1 < db.top.vGen.len:
let topVid = db.vGen[^1]
while 1 < db.vGen.len:
let w = db.vidFetch()
xCheck w < topVid
xCheck db.top.vGen.len == 1 and db.top.vGen[0] == topVid
xCheck db.vGen.len == 1 and db.vGen[0] == topVid
# Get some consecutive vertex IDs
for n in 0 .. 5:
let w = db.vidFetch()
xCheck w == topVid + n
xCheck db.top.vGen.len == 1
xCheck db.vGen.len == 1
# Repeat last test after clearing the cache
db.top.vGen.setLen(0)
db.top.final.vGen.setLen(0)
for n in 0 .. 5:
let w = db.vidFetch()
xCheck w == VertexID(2) + n # VertexID(1) is default root ID
xCheck db.top.vGen.len == 1
xCheck db.vGen.len == 1
# Recycling and re-org tests
func toVQ(a: seq[int]): seq[VertexID] = a.mapIt(VertexID(it))
@ -491,6 +492,8 @@ proc testShortKeys*(
"\n k=", k.toHex, " v=", v.toHex,
"\n r=", r.pp(sig),
"\n ", sig.pp(),
"\n",
"\n pAmk=", sig.db.layersWalkLebal.toSeq.toTable.pp(sig.db),
"\n"
let w = sig.merkleSignCommit().value
gossip.say "*** testShortkeys (2)", "n=", n, " inx=", inx,
@ -499,6 +502,8 @@ proc testShortKeys*(
"\n R=", w.pp(sig),
"\n ", sig.pp(),
"\n",
"\n pAmk=", sig.db.layersWalkLebal.toSeq.toTable.pp(sig.db),
"\n",
"\n ----------------",
"\n"
let rc = sig.db.check

View File

@ -18,7 +18,7 @@ import
stew/endians2,
../../nimbus/db/aristo/[
aristo_check, aristo_debug, aristo_delete, aristo_desc, aristo_get,
aristo_merge],
aristo_layers, aristo_merge],
../../nimbus/db/[aristo, aristo/aristo_init/persistent],
../replay/xcheck,
./test_helpers
@ -88,7 +88,7 @@ proc randomisedLeafs(
db: AristoDbRef;
td: var PrngDesc;
): seq[(LeafTie,VertexID)] =
result = db.top.lTab.pairs.toSeq.filterIt(it[1].isvalid).sorted(
result = db.lTab.pairs.toSeq.filterIt(it[1].isvalid).sorted(
cmp = proc(a,b: (LeafTie,VertexID)): int = cmp(a[0], b[0]))
if 2 < result.len:
for n in 0 ..< result.len-1:
@ -103,6 +103,18 @@ proc innerCleanUp(db: AristoDbRef): bool {.discardable.} =
xCheckRc rc.error == 0
db.finish(flush=true)
proc schedStow(
db: AristoDbRef; # Database
chunkedMpt = false; # Partial data (e.g. from `snap`)
): Result[void,AristoError] =
## Scheduled storage
let
layersMeter = db.nLayersVtx + db.nLayersLabel
filterMeter = if db.roFilter.isNil: 0
else: db.roFilter.sTab.len + db.roFilter.kMap.len
persistent = MaxFilterBulk < max(layersMeter, filterMeter)
db.stow(persistent = persistent, chunkedMpt = chunkedMpt)
proc saveToBackend(
tx: var AristoTxRef;
chunkedMpt: bool;
@ -125,7 +137,7 @@ proc saveToBackend(
xCheckRc rc.error == 0
# Make sure MPT hashes are OK
xCheck db.top.dirty == false
xCheck db.dirty == false
block:
let rc = db.txTop()
@ -145,14 +157,14 @@ proc saveToBackend(
xCheckRc rc.error == 0
# Make sure MPT hashes are OK
xCheck db.top.dirty == false
xCheck db.dirty == false
block:
let rc = db.txTop()
xCheckErr rc.value.level < 0 # force error
block:
let rc = db.stow(stageLimit=MaxFilterBulk, chunkedMpt=chunkedMpt)
let rc = db.schedStow(chunkedMpt=chunkedMpt)
xCheckRc rc.error == 0
block:
@ -183,7 +195,7 @@ proc saveToBackendWithOops(
xCheckRc rc.error == 0
# Make sure MPT hashes are OK
xCheck db.top.dirty == false
xCheck db.dirty == false
block:
let rc = db.txTop()
@ -199,14 +211,14 @@ proc saveToBackendWithOops(
xCheckRc rc.error == 0
# Make sure MPT hashes are OK
xCheck db.top.dirty == false
xCheck db.dirty == false
block:
let rc = db.txTop()
xCheckErr rc.value.level < 0 # force error
block:
let rc = db.stow(stageLimit=MaxFilterBulk, chunkedMpt=chunkedMpt)
let rc = db.schedStow(chunkedMpt=chunkedMpt)
xCheckRc rc.error == 0
# Update layers to original level
@ -449,8 +461,8 @@ proc testTxMergeProofAndKvpList*(
testId = idPfx & "#" & $w.id & "." & $n
runID = n
lstLen = list.len
sTabLen = db.top.sTab.len
lTabLen = db.top.lTab.len
sTabLen = db.nLayersVtx()
lTabLen = db.lTab.len
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
var
@ -463,14 +475,14 @@ proc testTxMergeProofAndKvpList*(
xCheck proved.error in {AristoError(0),MergeHashKeyCachedAlready}
xCheck w.proof.len == proved.merged + proved.dups
xCheck db.top.lTab.len == lTabLen
xCheck db.top.sTab.len <= proved.merged + sTabLen
xCheck proved.merged < db.top.pAmk.len
xCheck db.lTab.len == lTabLen
xCheck db.nLayersVtx() <= proved.merged + sTabLen
xCheck proved.merged < db.nLayersLebal()
let
merged = db.merge leafs
xCheck db.top.lTab.len == lTabLen + merged.merged
xCheck db.lTab.len == lTabLen + merged.merged
xCheck merged.merged + merged.dups == leafs.len
xCheck merged.error in {AristoError(0), MergeLeafPathCachedAlready}