Core db use differential tx layers for aristo and kvt (#1949)

* Fix kvt headers

* Provide differential layers for KVT transaction stack

why:
  Significant performance improvement

* Provide abstraction layer for database top cache layer

why:
  This will eventually implemented as a differential database layers
  or transaction layers. The latter is needed to improve performance.

behavioural changes:
  Zero vertex and keys (i.e. delete requests) are not optimised out
  until the last layer is written to the database.

* Provide differential layers for Aristo transaction stack

why:
  Significant performance improvement
This commit is contained in:
Jordan Hrycaj 2023-12-19 12:39:23 +00:00 committed by GitHub
parent 3675cd6736
commit ffa8ad2246
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
45 changed files with 1033 additions and 631 deletions

View File

@ -16,7 +16,7 @@ import
stew/interval_set, stew/interval_set,
../../aristo, ../../aristo,
../aristo_walk/persistent, ../aristo_walk/persistent,
".."/[aristo_desc, aristo_get, aristo_vid] ".."/[aristo_desc, aristo_get, aristo_layers, aristo_vid]
const const
Vid2 = @[VertexID(2)].toHashSet Vid2 = @[VertexID(2)].toHashSet
@ -148,24 +148,23 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
# Check top layer cache against backend # Check top layer cache against backend
if cache: if cache:
if db.top.dirty: if db.dirty:
return err((VertexID(0),CheckBeCacheIsDirty)) return err((VertexID(0),CheckBeCacheIsDirty))
# Check structural table # Check structural table
for (vid,vtx) in db.top.sTab.pairs: for (vid,vtx) in db.layersWalkVtx:
# A `kMap[]` entry must exist. let lbl = db.layersGetLabel(vid).valueOr:
if not db.top.kMap.hasKey vid: # A `kMap[]` entry must exist.
return err((vid,CheckBeCacheKeyMissing)) return err((vid,CheckBeCacheKeyMissing))
if vtx.isValid: if vtx.isValid:
# Register existing vid against backend generator state # Register existing vid against backend generator state
discard vids.reduce Interval[VertexID,uint64].new(vid,vid) discard vids.reduce Interval[VertexID,uint64].new(vid,vid)
else: else:
# Some vertex is to be deleted, the key must be empty # Some vertex is to be deleted, the key must be empty
let lbl = db.top.kMap.getOrVoid vid
if lbl.isValid: if lbl.isValid:
return err((vid,CheckBeCacheKeyNonEmpty)) return err((vid,CheckBeCacheKeyNonEmpty))
# There must be a representation on the backend DB # There must be a representation on the backend DB unless in a TX
if db.getVtxBE(vid).isErr: if db.getVtxBE(vid).isErr and db.stack.len == 0:
return err((vid,CheckBeCacheVidUnsynced)) return err((vid,CheckBeCacheVidUnsynced))
# Register deleted vid against backend generator state # Register deleted vid against backend generator state
discard vids.merge Interval[VertexID,uint64].new(vid,vid) discard vids.merge Interval[VertexID,uint64].new(vid,vid)
@ -185,10 +184,10 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
# Check key table # Check key table
var list: seq[VertexID] var list: seq[VertexID]
for (vid,lbl) in db.top.kMap.pairs: for (vid,lbl) in db.layersWalkLabel:
list.add vid list.add vid
let vtx = db.getVtx vid let vtx = db.getVtx vid
if not db.top.sTab.hasKey(vid) and not vtx.isValid: if db.layersGetVtx(vid).isErr and not vtx.isValid:
return err((vid,CheckBeCacheKeyDangling)) return err((vid,CheckBeCacheKeyDangling))
if not lbl.isValid or relax: if not lbl.isValid or relax:
continue continue
@ -202,7 +201,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
# Check vGen # Check vGen
let let
vGen = db.top.vGen.vidReorg.toHashSet vGen = db.vGen.vidReorg.toHashSet
vGenExpected = vids.invTo(HashSet[VertexID]) vGenExpected = vids.invTo(HashSet[VertexID])
delta = vGenExpected -+- vGen # symmetric difference delta = vGenExpected -+- vGen # symmetric difference
if 0 < delta.len: if 0 < delta.len:

View File

@ -11,10 +11,10 @@
{.push raises: [].} {.push raises: [].}
import import
std/[sequtils, sets, tables], std/[sequtils, sets],
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
results, results,
".."/[aristo_desc, aristo_get, aristo_serialise, aristo_utils] ".."/[aristo_desc, aristo_get, aristo_layers, aristo_serialise, aristo_utils]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
@ -26,8 +26,8 @@ proc checkTopStrict*(
# No need to specify zero keys if implied by a leaf path with valid target # No need to specify zero keys if implied by a leaf path with valid target
# vertex ID (i.e. not deleted). # vertex ID (i.e. not deleted).
var zeroKeys: HashSet[VertexID] var zeroKeys: HashSet[VertexID]
for (vid,vtx) in db.top.sTab.pairs: for (vid,vtx) in db.layersWalkVtx:
let lbl = db.top.kMap.getOrVoid vid let lbl = db.layersGetLabelOrVoid vid
if not vtx.isValid: if not vtx.isValid:
if lbl.isValid: if lbl.isValid:
@ -42,13 +42,13 @@ proc checkTopStrict*(
if lbl.key != node.digestTo(HashKey): if lbl.key != node.digestTo(HashKey):
return err((vid,CheckStkVtxKeyMismatch)) return err((vid,CheckStkVtxKeyMismatch))
let revVids = db.top.pAmk.getOrVoid lbl let revVids = db.layersGetLebalOrVoid lbl
if not revVids.isValid: if not revVids.isValid:
return err((vid,CheckStkRevKeyMissing)) return err((vid,CheckStkRevKeyMissing))
if vid notin revVids: if vid notin revVids:
return err((vid,CheckStkRevKeyMismatch)) return err((vid,CheckStkRevKeyMismatch))
elif not db.top.dirty or not db.top.kMap.hasKey vid: elif not db.dirty or db.layersGetLabel(vid).isErr:
# So `vtx` exists but not `lbl`, so cache is supposed dirty and the # So `vtx` exists but not `lbl`, so cache is supposed dirty and the
# vertex has a zero entry. # vertex has a zero entry.
return err((vid,CheckStkVtxKeyMissing)) return err((vid,CheckStkVtxKeyMissing))
@ -56,14 +56,14 @@ proc checkTopStrict*(
else: # Empty key flags key is for update else: # Empty key flags key is for update
zeroKeys.incl vid zeroKeys.incl vid
for (vid,key) in db.top.kMap.pairs: for (vid,key) in db.layersWalkLabel:
if not key.isValid and vid notin zeroKeys: if not key.isValid and vid notin zeroKeys:
if not db.getVtx(vid).isValid: if not db.getVtx(vid).isValid:
return err((vid,CheckStkKeyStrayZeroEntry)) return err((vid,CheckStkKeyStrayZeroEntry))
let let
pAmkVtxCount = db.top.pAmk.values.toSeq.foldl(a + b.len, 0) pAmkVtxCount = db.layersWalkLebal.toSeq.mapIt(it[1]).foldl(a + b.len, 0)
sTabVtxCount = db.top.sTab.values.toSeq.filterIt(it.isValid).len sTabVtxCount = db.layersWalkVtx.toSeq.mapIt(it[1]).filterIt(it.isValid).len
# Non-zero values mist sum up the same # Non-zero values mist sum up the same
if pAmkVtxCount + zeroKeys.len < sTabVtxCount: if pAmkVtxCount + zeroKeys.len < sTabVtxCount:
@ -75,26 +75,26 @@ proc checkTopStrict*(
proc checkTopProofMode*( proc checkTopProofMode*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
if 0 < db.top.pPrf.len: if 0 < db.pPrf.len:
for vid in db.top.pPrf: for vid in db.pPrf:
let vtx = db.top.sTab.getOrVoid vid let vtx = db.layersGetVtxOrVoid vid
if vtx.isValid: if vtx.isValid:
let node = vtx.toNode(db).valueOr: let node = vtx.toNode(db).valueOr:
return err((vid,CheckRlxVtxIncomplete)) return err((vid,CheckRlxVtxIncomplete))
let lbl = db.top.kMap.getOrVoid vid let lbl = db.layersGetlabelOrVoid vid
if not lbl.isValid: if not lbl.isValid:
return err((vid,CheckRlxVtxKeyMissing)) return err((vid,CheckRlxVtxKeyMissing))
if lbl.key != node.digestTo(HashKey): if lbl.key != node.digestTo(HashKey):
return err((vid,CheckRlxVtxKeyMismatch)) return err((vid,CheckRlxVtxKeyMismatch))
let revVids = db.top.pAmk.getOrVoid lbl let revVids = db.layersGetLebalOrVoid lbl
if not revVids.isValid: if not revVids.isValid:
return err((vid,CheckRlxRevKeyMissing)) return err((vid,CheckRlxRevKeyMissing))
if vid notin revVids: if vid notin revVids:
return err((vid,CheckRlxRevKeyMismatch)) return err((vid,CheckRlxRevKeyMismatch))
else: else:
for (vid,lbl) in db.top.kMap.pairs: for (vid,lbl) in db.layersWalkLabel:
if lbl.isValid: # Otherwise to be deleted if lbl.isValid: # Otherwise to be deleted
let vtx = db.getVtx vid let vtx = db.getVtx vid
if vtx.isValid: if vtx.isValid:
@ -103,25 +103,24 @@ proc checkTopProofMode*(
if lbl.key != node.digestTo(HashKey): if lbl.key != node.digestTo(HashKey):
return err((vid,CheckRlxVtxKeyMismatch)) return err((vid,CheckRlxVtxKeyMismatch))
let revVids = db.top.pAmk.getOrVoid lbl let revVids = db.layersGetLebalOrVoid lbl
if not revVids.isValid: if not revVids.isValid:
return err((vid,CheckRlxRevKeyMissing)) return err((vid,CheckRlxRevKeyMissing))
if vid notin revVids: if vid notin revVids:
return err((vid,CheckRlxRevKeyMismatch)) return err((vid,CheckRlxRevKeyMismatch))
ok() ok()
proc checkTopCommon*( proc checkTopCommon*(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
# Some `kMap[]` entries may ne void indicating backend deletion # Some `kMap[]` entries may ne void indicating backend deletion
let let
kMapCount = db.top.kMap.values.toSeq.filterIt(it.isValid).len kMapCount = db.layersWalkLabel.toSeq.mapIt(it[1]).filterIt(it.isValid).len
kMapNilCount = db.top.kMap.len - kMapCount kMapNilCount = db.layersWalkLabel.toSeq.len - kMapCount
# Collect leafs and check deleted entries # Collect leafs and check deleted entries
var nNilVtx = 0 var nNilVtx = 0
for (vid,vtx) in db.top.sTab.pairs: for (vid,vtx) in db.layersWalkVtx:
if vtx.isValid: if vtx.isValid:
case vtx.vType: case vtx.vType:
of Leaf: of Leaf:
@ -140,11 +139,10 @@ proc checkTopCommon*(
return err((vid,CheckAnyVtxExtPfxMissing)) return err((vid,CheckAnyVtxExtPfxMissing))
else: else:
nNilVtx.inc nNilVtx.inc
discard db.getVtxBE(vid).valueOr: let rc = db.layersGetLabel vid
return err((vid,CheckAnyVidVtxMissing)) if rc.isErr:
if not db.top.kMap.hasKey vid:
return err((vid,CheckAnyVtxEmptyKeyMissing)) return err((vid,CheckAnyVtxEmptyKeyMissing))
if db.top.kMap.getOrVoid(vid).isValid: if rc.value.isValid:
return err((vid,CheckAnyVtxEmptyKeyExpected)) return err((vid,CheckAnyVtxEmptyKeyExpected))
# If present, there are at least as many deleted hashes as there are deleted # If present, there are at least as many deleted hashes as there are deleted
@ -152,20 +150,20 @@ proc checkTopCommon*(
if kMapNilCount != 0 and kMapNilCount < nNilVtx: if kMapNilCount != 0 and kMapNilCount < nNilVtx:
return err((VertexID(0),CheckAnyVtxEmptyKeyMismatch)) return err((VertexID(0),CheckAnyVtxEmptyKeyMismatch))
let pAmkVtxCount = db.top.pAmk.values.toSeq.foldl(a + b.len, 0) let pAmkVtxCount = db.layersWalkLebal.toSeq.mapIt(it[1]).foldl(a + b.len, 0)
if pAmkVtxCount != kMapCount: if pAmkVtxCount != kMapCount:
var knownKeys: HashSet[VertexID] var knownKeys: HashSet[VertexID]
for (key,vids) in db.top.pAmk.pairs: for (key,vids) in db.layersWalkLebal:
for vid in vids: for vid in vids:
if not db.top.kMap.hasKey(vid): if db.layersGetLabel(vid).isErr:
return err((vid,CheckAnyRevVtxMissing)) return err((vid,CheckAnyRevVtxMissing))
if vid in knownKeys: if vid in knownKeys:
return err((vid,CheckAnyRevVtxDup)) return err((vid,CheckAnyRevVtxDup))
knownKeys.incl vid knownKeys.incl vid
return err((VertexID(0),CheckAnyRevCountMismatch)) # should not apply(!) return err((VertexID(0),CheckAnyRevCountMismatch)) # should not apply(!)
for vid in db.top.pPrf: for vid in db.pPrf:
if not db.top.kMap.hasKey(vid): if db.layersGetLabel(vid).isErr:
return err((vid,CheckAnyVtxLockWithoutKey)) return err((vid,CheckAnyVtxLockWithoutKey))
ok() ok()

View File

@ -15,15 +15,45 @@ import
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
results, results,
stew/byteutils, stew/byteutils,
"."/[aristo_constants, aristo_desc, aristo_get, aristo_hike],
./aristo_desc/desc_backend, ./aristo_desc/desc_backend,
./aristo_init/[memory_db, memory_only, rocks_db], ./aristo_init/[memory_db, memory_only, rocks_db],
./aristo_filter/filter_scheduler ./aristo_filter/filter_scheduler,
"."/[aristo_constants, aristo_desc, aristo_hike, aristo_layers]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc orDefault(db: AristoDbRef): AristoDbRef =
if db.isNil: AristoDbRef(top: LayerRef()) else: db
proc del(xMap: var VidsByLabelTab; lbl: HashLabel; vid: VertexID) =
# Update `xMap`
var vidsLen = -1
xMap.withValue(lbl,value):
value[].excl vid
vidsLen = value[].len
if vidsLen == 0:
xMap.del lbl
proc del(xMap: var VidsByLabelTab; lbl: HashLabel; vids: HashSet[VertexID]) =
for vid in vids:
xMap.del(lbl, vid)
proc add(xMap: var VidsByLabelTab; lbl: HashLabel; vid: VertexID) =
xMap.withValue(lbl,value):
value[].incl vid
do: # else if not found
xMap[lbl] = @[vid].toHashSet
func cmp(a, b: HashLabel): int =
if a.root != b.root:
a.root.cmp b.root
else:
a.key.cmp b.key
# --------------------------
proc toHex(w: VertexID): string = proc toHex(w: VertexID): string =
w.uint64.toHex w.uint64.toHex
@ -45,31 +75,16 @@ proc sortedKeys(sTab: Table[VertexID,VertexRef]): seq[VertexID] =
proc sortedKeys(pPrf: HashSet[VertexID]): seq[VertexID] = proc sortedKeys(pPrf: HashSet[VertexID]): seq[VertexID] =
pPrf.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID) pPrf.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc sortedKeys(pAmk: Table[HashLabel,VertexID]): seq[HashLabel] =
pAmk.keys.toSeq.sorted cmp
proc sortedKeys(pAmk: VidsByLabelTab): seq[HashLabel] =
pAmk.keys.toSeq.sorted cmp
proc toPfx(indent: int; offset = 0): string = proc toPfx(indent: int; offset = 0): string =
if 0 < indent+offset: "\n" & " ".repeat(indent+offset) else: "" if 0 < indent+offset: "\n" & " ".repeat(indent+offset) else: ""
proc lidVidUpdate(
db: AristoDbRef;
root: VertexID;
lid: HashKey;
vid: VertexID;
): string =
if lid.isValid and vid.isValid:
let lbl = HashLabel(root: root, key: lid)
if not db.top.isNil:
let vids = db.top.pAmk.getOrVoid lbl
if vids.isValid:
if vid notin vids:
result = "(!)"
return
block:
let vids = db.xMap.getOrVoid lbl
if vids.isValid:
if vid notin vids:
result = "(!)"
return
db.xMap.append(lbl, vid)
proc squeeze(s: string; hex = false; ignLen = false): string = proc squeeze(s: string; hex = false; ignLen = false): string =
## For long strings print `begin..end` only ## For long strings print `begin..end` only
if hex: if hex:
@ -105,9 +120,10 @@ proc stripZeros(a: string; toExp = false): string =
proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 = proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 =
if lbl.isValid: if lbl.isValid:
if not db.top.isNil: block:
let vids = db.top.pAmk.getOrVoid lbl let vids = db.layersGetLebalOrVoid lbl
if vids.isValid: if vids.isValid:
db.xMap.del(lbl, vids)
return vids.sortedKeys[0].uint64 return vids.sortedKeys[0].uint64
block: block:
let vids = db.xMap.getOrVoid lbl let vids = db.xMap.getOrVoid lbl
@ -116,6 +132,29 @@ proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 =
# --------------------- # ---------------------
proc ppLabelOk(
db: AristoDbRef;
root: VertexID;
key: HashKey;
vid: VertexID;
): string =
if key.isValid and vid.isValid:
let
lbl = HashLabel(root: root, key: key)
vids = db.layersGetLebalOrVoid lbl
if vids.isValid:
db.xMap.del(lbl, vids)
if vid notin vids:
result = "(!)"
return
block:
let vids = db.xMap.getOrVoid lbl
if vids.isValid:
if vid notin vids:
result = "(!)"
return
db.xMap.add(lbl,vid)
proc ppVid(vid: VertexID; pfx = true): string = proc ppVid(vid: VertexID; pfx = true): string =
if pfx: if pfx:
result = "$" result = "$"
@ -124,6 +163,15 @@ proc ppVid(vid: VertexID; pfx = true): string =
else: else:
result &= "ø" result &= "ø"
proc ppVids(vids: HashSet[VertexID]): string =
result = "{"
for vid in vids.toSeq.sorted:
result = "$"
if vid.isValid:
result &= vid.toHex.stripZeros.toLowerAscii
else:
result &= "ø"
func ppCodeHash(h: Hash256): string = func ppCodeHash(h: Hash256): string =
result = "¢" result = "¢"
if h == Hash256(): if h == Hash256():
@ -168,15 +216,17 @@ proc ppVidList(vGen: openArray[VertexID]): string =
# "{" & vGen.sortedKeys.mapIt(it.ppVid).join(",") & "}" # "{" & vGen.sortedKeys.mapIt(it.ppVid).join(",") & "}"
proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string = proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string =
proc getVids: HashSet[VertexID] = proc getVids(): tuple[vids: HashSet[VertexID], xMapTag: string] =
if not db.top.isNil: let lbl = HashLabel(root: root, key: key)
let vids = db.top.pAmk.getOrVoid HashLabel(root: root, key: key)
if vids.isValid:
return vids
block: block:
let vids = db.xMap.getOrVoid HashLabel(root: root, key: key) let vids = db.layersGetLebalOrVoid lbl
if vids.isValid: if vids.isValid:
return vids db.xMap.del(lbl, vids)
return (vids, "")
block:
let vids = db.xMap.getOrVoid lbl
if vids.isValid:
return (vids, "+")
if pfx: if pfx:
result = "£" result = "£"
if key.len == 0 or key.to(Hash256) == Hash256(): if key.len == 0 or key.to(Hash256) == Hash256():
@ -186,12 +236,12 @@ proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string =
else: else:
let let
tag = if key.len < 32: "[#" & $key.len & "]" else: "" tag = if key.len < 32: "[#" & $key.len & "]" else: ""
vids = getVids() (vids, xMapTag) = getVids()
if vids.isValid: if vids.isValid:
if not pfx and 0 < tag.len: if not pfx and 0 < tag.len:
result &= "$" result &= "$"
if 1 < vids.len: result &= "{" if 1 < vids.len: result &= "{"
result &= vids.sortedKeys.mapIt(it.ppVid(pfx=false)).join(",") result &= vids.sortedKeys.mapIt(it.ppVid(pfx=false) & xMapTag).join(",")
if 1 < vids.len: result &= "}" if 1 < vids.len: result &= "}"
result &= tag result &= tag
return return
@ -236,9 +286,9 @@ proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =
if not nd.isValid: if not nd.isValid:
result = "ø" result = "ø"
else: else:
if db.top.isNil or not vid.isValid or vid in db.top.pPrf: if not vid.isValid or vid in db.pPrf:
result = ["L(", "X(", "B("][nd.vType.ord] result = ["L(", "X(", "B("][nd.vType.ord]
elif vid in db.top.kMap: elif db.layersGetLabel(vid).isOk:
result = ["l(", "x(", "b("][nd.vType.ord] result = ["l(", "x(", "b("][nd.vType.ord]
else: else:
result = ["ł(", "€(", "þ("][nd.vType.ord] result = ["ł(", "€(", "þ("][nd.vType.ord]
@ -257,7 +307,7 @@ proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =
proc ppSTab( proc ppSTab(
sTab: Table[VertexID,VertexRef]; sTab: Table[VertexID,VertexRef];
db = AristoDbRef(); db: AristoDbRef;
indent = 4; indent = 4;
): string = ): string =
"{" & sTab.sortedKeys "{" & sTab.sortedKeys
@ -267,9 +317,9 @@ proc ppSTab(
proc ppLTab( proc ppLTab(
lTab: Table[LeafTie,VertexID]; lTab: Table[LeafTie,VertexID];
db: AristoDbRef;
indent = 4; indent = 4;
): string = ): string =
let db = AristoDbRef()
"{" & lTab.sortedKeys "{" & lTab.sortedKeys
.mapIt((it, lTab.getOrVoid it)) .mapIt((it, lTab.getOrVoid it))
.mapIt("(" & it[0].ppLeafTie(db) & "," & it[1].ppVid & ")") .mapIt("(" & it[0].ppLeafTie(db) & "," & it[1].ppVid & ")")
@ -281,16 +331,25 @@ proc ppPPrf(pPrf: HashSet[VertexID]): string =
proc ppXMap*( proc ppXMap*(
db: AristoDbRef; db: AristoDbRef;
kMap: Table[VertexID,HashLabel]; kMap: Table[VertexID,HashLabel];
pAmk: Table[HashLabel,HashSet[VertexID]]; pAmk: VidsByLabelTab;
indent: int; indent: int;
): string = ): string =
let pfx = indent.toPfx(1) let pfx = indent.toPfx(1)
var dups: HashSet[VertexID] var
for vids in pAmk.values: multi: HashSet[VertexID]
if 1 < vids.len: oops: HashSet[VertexID]
dups = dups + vids block:
var vids: HashSet[VertexID]
for w in pAmk.values:
for v in w:
if v in vids:
oops.incl v
else:
vids.incl v
if 1 < w.len:
multi = multi + w
# Vertex IDs without forward mapping `kMap: VertexID -> HashLabel` # Vertex IDs without forward mapping `kMap: VertexID -> HashLabel`
var revOnly: Table[VertexID,HashLabel] var revOnly: Table[VertexID,HashLabel]
@ -347,7 +406,7 @@ proc ppXMap*(
for vid in kMap.sortedKeys: for vid in kMap.sortedKeys:
let lbl = kMap.getOrVoid vid let lbl = kMap.getOrVoid vid
if lbl.isValid: if lbl.isValid:
cache.add (vid.uint64, lbl.vidCode(db), vid in dups) cache.add (vid.uint64, lbl.vidCode(db), vid in multi)
let vids = pAmk.getOrVoid lbl let vids = pAmk.getOrVoid lbl
if (0 < vids.len and vid notin vids) or lbl.key.len < 32: if (0 < vids.len and vid notin vids) or lbl.key.len < 32:
cache[^1][2] = true cache[^1][2] = true
@ -452,7 +511,7 @@ proc ppLayer(
proc doPrefix(s: string; dataOk: bool): string = proc doPrefix(s: string; dataOk: bool): string =
var rc: string var rc: string
if tagOk: if tagOk:
rc = pfy & s & (if dataOk: pfx2 else: " ") rc = pfy & s & (if dataOk: pfx2 else: "")
pfy = pfx1 pfy = pfx1
else: else:
rc = pfy rc = pfy
@ -464,35 +523,35 @@ proc ppLayer(
result &= "<layer>".doPrefix(false) result &= "<layer>".doPrefix(false)
if vGenOk: if vGenOk:
let let
tLen = layer.vGen.len tLen = layer.final.vGen.len
info = "vGen(" & $tLen & ")" info = "vGen(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.vGen.ppVidList result &= info.doPrefix(0 < tLen) & layer.final.vGen.ppVidList
if sTabOk: if sTabOk:
let let
tLen = layer.sTab.len tLen = layer.delta.sTab.len
info = "sTab(" & $tLen & ")" info = "sTab(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.sTab.ppSTab(db,indent+2) result &= info.doPrefix(0 < tLen) & layer.delta.sTab.ppSTab(db,indent+2)
if lTabOk: if lTabOk:
let let
tlen = layer.lTab.len tlen = layer.final.lTab.len
info = "lTab(" & $tLen & ")" info = "lTab(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.lTab.ppLTab(indent+2) result &= info.doPrefix(0 < tLen) & layer.final.lTab.ppLTab(db,indent+2)
if kMapOk: if kMapOk:
let let
tLen = layer.kMap.len tLen = layer.delta.kMap.len
ulen = layer.pAmk.len ulen = layer.delta.pAmk.len
lInf = if tLen == uLen: $tLen else: $tLen & "," & $ulen lInf = if tLen == uLen: $tLen else: $tLen & "," & $ulen
info = "kMap(" & lInf & ")" info = "kMap(" & lInf & ")"
result &= info.doPrefix(0 < tLen + uLen) result &= info.doPrefix(0 < tLen + uLen)
result &= db.ppXMap(layer.kMap, layer.pAmk, indent+2) result &= db.ppXMap(layer.delta.kMap, layer.delta.pAmk, indent+2)
if pPrfOk: if pPrfOk:
let let
tLen = layer.pPrf.len tLen = layer.final.pPrf.len
info = "pPrf(" & $tLen & ")" info = "pPrf(" & $tLen & ")"
result &= info.doPrefix(0 < tLen) & layer.pPrf.ppPPrf result &= info.doPrefix(0 < tLen) & layer.final.pPrf.ppPPrf
if 0 < nOKs: if 0 < nOKs:
let let
info = if layer.dirty: "dirty" else: "clean" info = if layer.final.dirty: "dirty" else: "clean"
result &= info.doPrefix(false) result &= info.doPrefix(false)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -510,14 +569,14 @@ proc pp*(w: Hash256): string =
proc pp*(w: HashKey; sig: MerkleSignRef): string = proc pp*(w: HashKey; sig: MerkleSignRef): string =
w.ppKey(sig.db, sig.root) w.ppKey(sig.db, sig.root)
proc pp*(w: HashKey; db = AristoDbRef(); root = VertexID(1)): string = proc pp*(w: HashKey; db = AristoDbRef(nil); root = VertexID(1)): string =
w.ppKey(db, root) w.ppKey(db.orDefault, root)
proc pp*(lbl: HashLabel, db = AristoDbRef()): string = proc pp*(lbl: HashLabel, db = AristoDbRef(nil)): string =
lbl.ppLabel(db) lbl.ppLabel(db.orDefault)
proc pp*(lty: LeafTie, db = AristoDbRef()): string = proc pp*(lty: LeafTie, db = AristoDbRef(nil)): string =
lty.ppLeafTie(db) lty.ppLeafTie(db.orDefault)
proc pp*(vid: VertexID): string = proc pp*(vid: VertexID): string =
vid.ppVid vid.ppVid
@ -540,11 +599,11 @@ proc pp*(a: openArray[QidAction]): string =
proc pp*(vGen: openArray[VertexID]): string = proc pp*(vGen: openArray[VertexID]): string =
vGen.ppVidList vGen.ppVidList
proc pp*(p: PayloadRef, db = AristoDbRef()): string = proc pp*(p: PayloadRef, db = AristoDbRef(nil)): string =
p.ppPayload(db) p.ppPayload(db.orDefault)
proc pp*(nd: VertexRef, db = AristoDbRef()): string = proc pp*(nd: VertexRef, db = AristoDbRef(nil)): string =
nd.ppVtx(db, VertexID(0)) nd.ppVtx(db.orDefault, VertexID(0))
proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string = proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
if not nd.isValid: if not nd.isValid:
@ -560,14 +619,14 @@ proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
of Extension: of Extension:
result &= $nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid & "," result &= $nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid & ","
result &= nd.key[0].ppKey(db,root) result &= nd.key[0].ppKey(db,root)
result &= db.lidVidUpdate(root, nd.key[0], nd.eVid) result &= db.ppLabelOk(root, nd.key[0], nd.eVid)
of Branch: of Branch:
result &= "[" result &= "["
for n in 0..15: for n in 0..15:
if nd.bVid[n].isValid or nd.key[n].isValid: if nd.bVid[n].isValid or nd.key[n].isValid:
result &= nd.bVid[n].ppVid result &= nd.bVid[n].ppVid
result &= db.lidVidUpdate(root, nd.key[n], nd.bVid[n]) & "," result &= db.ppLabelOk(root, nd.key[n], nd.bVid[n]) & ","
result[^1] = ']' result[^1] = ']'
result &= ",[" result &= ",["
@ -579,40 +638,43 @@ proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
result &= ")" result &= ")"
proc pp*(nd: NodeRef): string = proc pp*(nd: NodeRef): string =
var db = AristoDbRef() nd.pp(AristoDbRef(nil).orDefault)
nd.pp(db)
proc pp*( proc pp*(
sTab: Table[VertexID,VertexRef]; sTab: Table[VertexID,VertexRef];
db = AristoDbRef(); db = AristoDbRef(nil);
indent = 4; indent = 4;
): string = ): string =
sTab.ppSTab sTab.ppSTab(db.orDefault)
proc pp*(lTab: Table[LeafTie,VertexID]; indent = 4): string = proc pp*(
lTab.ppLTab lTab: Table[LeafTie,VertexID];
db = AristoDbRef(nil);
indent = 4;
): string =
lTab.ppLTab(db.orDefault, indent)
proc pp*(pPrf: HashSet[VertexID]): string = proc pp*(pPrf: HashSet[VertexID]): string =
pPrf.ppPPrf pPrf.ppPPrf
proc pp*(leg: Leg; db = AristoDbRef()): string = proc pp*(leg: Leg; db = AristoDbRef(nil)): string =
let db = db.orDefault()
result = "(" & leg.wp.vid.ppVid & "," result = "(" & leg.wp.vid.ppVid & ","
if not db.top.isNil: block:
let lbl = db.top.kMap.getOrVoid leg.wp.vid let lbl = db.layersGetLabelOrVoid leg.wp.vid
if not lbl.isValid: if not lbl.isValid:
result &= "ø" result &= "ø"
elif leg.wp.vid notin db.top.pAmk.getOrVoid lbl: elif leg.wp.vid notin db.layersGetLebalOrVoid lbl:
result &= lbl.ppLabel(db) result &= lbl.ppLabel(db)
result &= "," result &= ","
if leg.backend:
result &= ""
result &= ","
if 0 <= leg.nibble: if 0 <= leg.nibble:
result &= $leg.nibble.ppNibble result &= $leg.nibble.ppNibble
result &= "," & leg.wp.vtx.pp(db) & ")" result &= "," & leg.wp.vtx.pp(db) & ")"
proc pp*(hike: Hike; db = AristoDbRef(); indent = 4): string = proc pp*(hike: Hike; db = AristoDbRef(nil); indent = 4): string =
let pfx = indent.toPfx(1) let
db = db.orDefault()
pfx = indent.toPfx(1)
result = "[" result = "["
if hike.legs.len == 0: if hike.legs.len == 0:
result &= "(" & hike.root.ppVid & ")" result &= "(" & hike.root.ppVid & ")"
@ -624,30 +686,37 @@ proc pp*(hike: Hike; db = AristoDbRef(); indent = 4): string =
result &= "]" result &= "]"
proc pp*(kMap: Table[VertexID,Hashlabel]; indent = 4): string = proc pp*(kMap: Table[VertexID,Hashlabel]; indent = 4): string =
let db = AristoDbRef() let db = AristoDbRef(nil).orDefault
"{" & kMap.sortedKeys "{" & kMap.sortedKeys
.mapIt((it,kMap.getOrVoid it)) .mapIt((it, kMap.getOrVoid it))
.filterIt(it[1].isValid)
.mapIt("(" & it[0].ppVid & "," & it[1].ppLabel(db) & ")") .mapIt("(" & it[0].ppVid & "," & it[1].ppLabel(db) & ")")
.join("," & indent.toPfx(1)) & "}" .join("," & indent.toPfx(1)) & "}"
proc pp*(pAmk: Table[Hashlabel,VertexID]; indent = 4): string =
let db = AristoDbRef()
var rev = pAmk.pairs.toSeq.mapIt((it[1],it[0])).toTable
"{" & rev.sortedKeys
.mapIt((it,rev.getOrVoid it))
.filterIt(it[1].isValid)
.mapIt("(" & it[1].ppLabel(db) & "," & it[0].ppVid & ")")
.join("," & indent.toPfx(1)) & "}"
proc pp*(kMap: Table[VertexID,Hashlabel]; db: AristoDbRef; indent = 4): string = proc pp*(kMap: Table[VertexID,Hashlabel]; db: AristoDbRef; indent = 4): string =
db.ppXMap(kMap, db.top.pAmk, indent) db.ppXMap(kMap, db.layersCc.delta.pAmk, indent)
proc pp*(pAmk: VidsByLabel; db: AristoDbRef; indent = 4): string = proc pp*(
db.ppXMap(db.top.kMap, pAmk, indent) pAmk: Table[HashLabel,VertexID];
db = AristoDbRef(nil);
indent = 4;
): string =
let db = db.orDefault
"{" & pAmk.sortedkeys
.mapIt((it, pAmk.getOrVoid it))
.mapIt("(" & it[0].ppLabel(db) & "," & it[1].ppVid & ")")
.join("," & indent.toPfx(1)) & "}"
proc pp*(pAmk: VidsByLabelTab; db = AristoDbRef(nil); indent = 4): string =
let db = db.orDefault
"{" & pAmk.sortedkeys
.mapIt((it, pAmk.getOrVoid it))
.mapIt("(" & it[0].ppLabel(db) & "," & it[1].ppVids & ")")
.join("," & indent.toPfx(1)) & "}"
# ---------------------
proc pp*(tx: AristoTxRef): string = proc pp*(tx: AristoTxRef): string =
result = "(uid=" & $tx.txUid & ",lvl=" & $tx.level result = "(uid=" & $tx.txUid & ",level=" & $tx.level
if not tx.parent.isNil: if not tx.parent.isNil:
result &= ", par=" & $tx.parent.txUid result &= ", par=" & $tx.parent.txUid
result &= ")" result &= ")"
@ -655,7 +724,6 @@ proc pp*(tx: AristoTxRef): string =
proc pp*(wp: VidVtxPair; db: AristoDbRef): string = proc pp*(wp: VidVtxPair; db: AristoDbRef): string =
"(" & wp.vid.pp & "," & wp.vtx.pp(db) & ")" "(" & wp.vid.pp & "," & wp.vtx.pp(db) & ")"
# ---------------------
proc pp*( proc pp*(
layer: LayerRef; layer: LayerRef;
@ -691,7 +759,7 @@ proc pp*(
xTabOk: bool; xTabOk: bool;
indent = 4; indent = 4;
): string = ): string =
db.top.pp(db, xTabOk=xTabOk, indent=indent) db.layersCc.pp(db, xTabOk=xTabOk, indent=indent)
proc pp*( proc pp*(
db: AristoDbRef; db: AristoDbRef;
@ -700,15 +768,15 @@ proc pp*(
other = false; other = false;
indent = 4; indent = 4;
): string = ): string =
db.top.pp(db, xTabOk=xTabOk, kMapOk=kMapOk, other=other, indent=indent) db.layersCc.pp(db, xTabOk=xTabOk, kMapOk=kMapOk, other=other, indent=indent)
proc pp*( proc pp*(
filter: FilterRef; filter: FilterRef;
db = AristoDbRef(); db = AristoDbRef(nil);
root = VertexID(1); root = VertexID(1);
indent = 4; indent = 4;
): string = ): string =
filter.ppFilter(db, root, indent) filter.ppFilter(db.orDefault(), root, indent)
proc pp*( proc pp*(
be: BackendRef; be: BackendRef;
@ -732,7 +800,9 @@ proc pp*(
backendOk = false; backendOk = false;
filterOk = true; filterOk = true;
): string = ): string =
result = db.top.pp(db, indent=indent) & indent.toPfx result = db.layersCc.pp(db, indent=indent) & indent.toPfx
if 0 < db.stack.len:
result &= " level=" & $db.stack.len & indent.toPfx
if backendOk: if backendOk:
result &= db.backend.pp(db) result &= db.backend.pp(db)
elif filterOk: elif filterOk:

View File

@ -20,7 +20,8 @@ import
chronicles, chronicles,
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
results, results,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_path, aristo_vid] "."/[aristo_desc, aristo_get, aristo_hike, aristo_layers, aristo_path,
aristo_vid]
logScope: logScope:
topics = "aristo-delete" topics = "aristo-delete"
@ -59,24 +60,16 @@ proc nullifyKey(
vid: VertexID; # Vertex IDs to clear vid: VertexID; # Vertex IDs to clear
) = ) =
# Register for void hash (to be recompiled) # Register for void hash (to be recompiled)
let lbl = db.top.kMap.getOrVoid vid db.layersResLabel vid
db.top.pAmk.del lbl
db.top.kMap[vid] = VOID_HASH_LABEL
db.top.dirty = true # Modified top level cache
proc disposeOfVtx( proc disposeOfVtx(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
vid: VertexID; # Vertex IDs to clear vid: VertexID; # Vertex IDs to clear
) = ) =
# Remove entry # Remove entry
if db.getVtxBE(vid).isOk: db.layersResVtx vid
db.top.sTab[vid] = VertexRef(nil) # Will be propagated to backend db.layersResLabel vid
db.nullifyKey vid db.vidDispose vid # Recycle ID
else:
db.top.sTab.del vid
db.top.kMap.del vid
db.top.dirty = true # Modified top level cache
db.vidDispose vid
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
@ -129,7 +122,7 @@ proc collapseBranch(
# Replace `br` (use `xt` as-is) # Replace `br` (use `xt` as-is)
discard discard
db.top.sTab[xt.vid] = xt.vtx db.layersPutVtx(xt.vid, xt.vtx)
ok() ok()
@ -179,7 +172,7 @@ proc collapseExt(
# Replace ^2 by `^2 & vtx` (use `xt` as-is) # Replace ^2 by `^2 & vtx` (use `xt` as-is)
discard discard
db.top.sTab[xt.vid] = xt.vtx db.layersPutVtx(xt.vid, xt.vtx)
ok() ok()
@ -220,15 +213,15 @@ proc collapseLeaf(
of Branch: # (1) of Branch: # (1)
# Replace `vtx` by `^2 & vtx` (use `lf` as-is) # Replace `vtx` by `^2 & vtx` (use `lf` as-is)
par.vtx.bVid[hike.legs[^3].nibble] = lf.vid par.vtx.bVid[hike.legs[^3].nibble] = lf.vid
db.top.sTab[par.vid] = par.vtx db.layersPutVtx(par.vid, par.vtx)
db.top.sTab[lf.vid] = lf.vtx db.layersPutVtx(lf.vid, lf.vtx)
# Make sure that there is a cache enty in case the leaf was pulled from # Make sure that there is a cache enty in case the leaf was pulled from
# the backend.! # the backend.!
let let
lfPath = hike.legsTo(hike.legs.len - 2, NibblesSeq) & lf.vtx.lPfx lfPath = hike.legsTo(hike.legs.len - 2, NibblesSeq) & lf.vtx.lPfx
tag = lfPath.pathToTag.valueOr: tag = lfPath.pathToTag.valueOr:
return err((lf.vid,error)) return err((lf.vid,error))
db.top.lTab[LeafTie(root: hike.root, path: tag)] = lf.vid db.top.final.lTab[LeafTie(root: hike.root, path: tag)] = lf.vid
return ok() return ok()
of Extension: # (2) or (3) of Extension: # (2) or (3)
@ -243,19 +236,19 @@ proc collapseLeaf(
return err((gpr.vid,DelBranchExpexted)) return err((gpr.vid,DelBranchExpexted))
db.disposeOfVtx par.vid # `par` is obsolete now db.disposeOfVtx par.vid # `par` is obsolete now
gpr.vtx.bVid[hike.legs[^4].nibble] = lf.vid gpr.vtx.bVid[hike.legs[^4].nibble] = lf.vid
db.top.sTab[gpr.vid] = gpr.vtx db.layersPutVtx(gpr.vid, gpr.vtx)
db.top.sTab[lf.vid] = lf.vtx db.layersPutVtx(lf.vid, lf.vtx)
# Make sure that there is a cache enty in case the leaf was pulled from # Make sure that there is a cache enty in case the leaf was pulled from
# the backend.! # the backend.!
let let
lfPath = hike.legsTo(hike.legs.len - 3, NibblesSeq) & lf.vtx.lPfx lfPath = hike.legsTo(hike.legs.len - 3, NibblesSeq) & lf.vtx.lPfx
tag = lfPath.pathToTag.valueOr: tag = lfPath.pathToTag.valueOr:
return err((lf.vid,error)) return err((lf.vid,error))
db.top.lTab[LeafTie(root: hike.root, path: tag)] = lf.vid db.top.final.lTab[LeafTie(root: hike.root, path: tag)] = lf.vid
return ok() return ok()
# No grandparent, so ^3 is root vertex # (3) # No grandparent, so ^3 is root vertex # (3)
db.top.sTab[par.vid] = lf.vtx db.layersPutVtx(par.vid, lf.vtx)
# Continue below # Continue below
of Leaf: of Leaf:
@ -264,7 +257,7 @@ proc collapseLeaf(
else: # (4) else: # (4)
# Replace ^2 by `^2 & vtx` (use `lf` as-is) # `br` is root vertex # Replace ^2 by `^2 & vtx` (use `lf` as-is) # `br` is root vertex
db.nullifyKey br.vid # root was changed db.nullifyKey br.vid # root was changed
db.top.sTab[br.vid] = lf.vtx db.layersPutVtx(br.vid, lf.vtx)
# Continue below # Continue below
# Common part for setting up `lf` as root vertex # Rest of (3) or (4) # Common part for setting up `lf` as root vertex # Rest of (3) or (4)
@ -275,8 +268,8 @@ proc collapseLeaf(
# No need to update the cache unless `lf` is present there. The leaf path # No need to update the cache unless `lf` is present there. The leaf path
# as well as the value associated with the leaf path has not been changed. # as well as the value associated with the leaf path has not been changed.
let lfTie = LeafTie(root: hike.root, path: rc.value) let lfTie = LeafTie(root: hike.root, path: rc.value)
if db.top.lTab.hasKey lfTie: if db.top.final.lTab.hasKey lfTie:
db.top.lTab[lfTie] = lf.vid db.top.final.lTab[lfTie] = lf.vid
# Clean up stale leaf vertex which has moved to root position # Clean up stale leaf vertex which has moved to root position
db.disposeOfVtx lf.vid db.disposeOfVtx lf.vid
@ -289,7 +282,7 @@ proc collapseLeaf(
rootVtx.vType == Leaf: rootVtx.vType == Leaf:
let tag = rootVtx.lPfx.pathToTag.valueOr: let tag = rootVtx.lPfx.pathToTag.valueOr:
return err((hike.root,error)) return err((hike.root,error))
db.top.lTab[LeafTie(root: hike.root, path: tag)] = hike.root db.top.final.lTab[LeafTie(root: hike.root, path: tag)] = hike.root
ok() ok()
@ -306,7 +299,7 @@ proc deleteImpl(
let lf = hike.legs[^1].wp let lf = hike.legs[^1].wp
if lf.vtx.vType != Leaf: if lf.vtx.vType != Leaf:
return err((lf.vid,DelLeafExpexted)) return err((lf.vid,DelLeafExpexted))
if lf.vid in db.top.pPrf: if lf.vid in db.pPrf:
return err((lf.vid, DelLeafLocked)) return err((lf.vid, DelLeafLocked))
# Will be needed at the end. Just detect an error early enouhh # Will be needed at the end. Just detect an error early enouhh
@ -332,12 +325,12 @@ proc deleteImpl(
# Unlink child vertex from structural table # Unlink child vertex from structural table
br.vtx.bVid[hike.legs[^2].nibble] = VertexID(0) br.vtx.bVid[hike.legs[^2].nibble] = VertexID(0)
db.top.sTab[br.vid] = br.vtx db.layersPutVtx(br.vid, br.vtx)
# Clear all keys up to the root key # Clear all keys up to the root key
for n in 0 .. hike.legs.len - 2: for n in 0 .. hike.legs.len - 2:
let vid = hike.legs[n].wp.vid let vid = hike.legs[n].wp.vid
if vid in db.top.pPrf: if vid in db.top.final.pPrf:
return err((vid, DelBranchLocked)) return err((vid, DelBranchLocked))
db.nullifyKey vid db.nullifyKey vid
@ -368,10 +361,10 @@ proc deleteImpl(
# Delete leaf entry # Delete leaf entry
if leafVidBe.isValid: if leafVidBe.isValid:
# To be recorded on change history # To be recorded on change history
db.top.lTab[lty] = VertexID(0) db.top.final.lTab[lty] = VertexID(0)
else: else:
# No need to keep it any longer in cache # No need to keep it any longer in cache
db.top.lTab.del lty db.top.final.lTab.del lty
ok() ok()

View File

@ -61,6 +61,10 @@ type
else: else:
rwDb: AristoDbRef ## Link to writable descriptor rwDb: AristoDbRef ## Link to writable descriptor
VidVtxPair* = object
vid*: VertexID ## Table lookup vertex ID (if any)
vtx*: VertexRef ## Reference to vertex
AristoDbRef* = ref AristoDbObj AristoDbRef* = ref AristoDbObj
AristoDbObj* = object AristoDbObj* = object
## Three tier database object supporting distributed instances. ## Three tier database object supporting distributed instances.
@ -74,7 +78,7 @@ type
dudes: DudesRef ## Related DB descriptors dudes: DudesRef ## Related DB descriptors
# Debugging data below, might go away in future # Debugging data below, might go away in future
xMap*: VidsByLabel ## For pretty printing, extends `pAmk` xMap*: VidsByLabelTab ## For pretty printing, extends `pAmk`
AristoDbAction* = proc(db: AristoDbRef) {.gcsafe, raises: [].} AristoDbAction* = proc(db: AristoDbRef) {.gcsafe, raises: [].}
## Generic call back function/closure. ## Generic call back function/closure.
@ -148,6 +152,12 @@ func hash*(db: AristoDbRef): Hash =
## Table/KeyedQueue/HashSet mixin ## Table/KeyedQueue/HashSet mixin
cast[pointer](db).hash cast[pointer](db).hash
func dup*(wp: VidVtxPair): VidVtxPair =
## Safe copy of `wp` argument
VidVtxPair(
vid: wp.vid,
vtx: wp.vtx.dup)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions, `dude` related # Public functions, `dude` related
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -220,13 +230,13 @@ proc fork*(
## database lookup for cases where the top layer is redefined anyway. ## database lookup for cases where the top layer is redefined anyway.
## ##
let clone = AristoDbRef( let clone = AristoDbRef(
top: LayerRef(), top: LayerRef(),
backend: db.backend) backend: db.backend)
if not rawTopLayer: if not rawTopLayer:
let rc = clone.backend.getIdgFn() let rc = clone.backend.getIdgFn()
if rc.isOk: if rc.isOk:
clone.top.vGen = rc.value clone.top.final.vGen = rc.value
elif rc.error != GetIdgNotFound: elif rc.error != GetIdgNotFound:
return err(rc.error) return err(rc.error)

View File

@ -130,7 +130,6 @@ type
CheckRlxRevKeyMissing CheckRlxRevKeyMissing
CheckRlxRevKeyMismatch CheckRlxRevKeyMismatch
CheckAnyVidVtxMissing
CheckAnyVtxEmptyKeyMissing CheckAnyVtxEmptyKeyMissing
CheckAnyVtxEmptyKeyExpected CheckAnyVtxEmptyKeyExpected
CheckAnyVtxEmptyKeyMismatch CheckAnyVtxEmptyKeyMismatch
@ -216,10 +215,12 @@ type
FilSiblingsCommitUnfinshed FilSiblingsCommitUnfinshed
# Get functions from `aristo_get.nim` # Get functions from `aristo_get.nim`
GetLeafMissing
GetKeyUpdateNeeded
GetLeafNotFound GetLeafNotFound
GetVtxNotFound GetVtxNotFound
GetKeyNotFound GetKeyNotFound
GetKeyTempLocked
GetFilNotFound GetFilNotFound
GetIdgNotFound GetIdgNotFound
GetFqsNotFound GetFqsNotFound

View File

@ -33,20 +33,7 @@ type
codeHash*: Hash256 codeHash*: Hash256
PayloadType* = enum PayloadType* = enum
## Type of leaf data. On the Aristo backend, data are serialised as ## Type of leaf data.
## follows:
##
## * Opaque data => opaque data, marked `0xff`
## * `Account` object => RLP encoded data, marked `0xaa`
## * `AristoAccount` object => serialised account, marked `0x99` or smaller
##
## On deserialisation from the Aristo backend, there is no reverese for an
## `Account` object. It rather is kept as an RLP encoded `Blob`.
##
## * opaque data, marked `0xff` => `RawData`
## * RLP encoded data, marked `0xaa` => `RlpData`
## * erialised account, marked `0x99` or smaller => `AccountData`
##
RawData ## Generic data RawData ## Generic data
RlpData ## Marked RLP encoded RlpData ## Marked RLP encoded
AccountData ## `Aristo account` with vertex IDs links AccountData ## `Aristo account` with vertex IDs links
@ -81,7 +68,7 @@ type
# ---------------------- # ----------------------
FilterRef* = ref object FilterRef* = ref object
## Delta layer with expanded sequences for quick access ## Delta layer with expanded sequences for quick access.
fid*: FilterID ## Filter identifier fid*: FilterID ## Filter identifier
src*: Hash256 ## Applicable to this state root src*: Hash256 ## Applicable to this state root
trg*: Hash256 ## Resulting state root (i.e. `kMap[1]`) trg*: Hash256 ## Resulting state root (i.e. `kMap[1]`)
@ -89,20 +76,55 @@ type
kMap*: Table[VertexID,HashKey] ## Filter Merkle hash key mapping kMap*: Table[VertexID,HashKey] ## Filter Merkle hash key mapping
vGen*: seq[VertexID] ## Filter unique vertex ID generator vGen*: seq[VertexID] ## Filter unique vertex ID generator
VidsByLabel* = Table[HashLabel,HashSet[VertexID]] VidsByLabelTab* = Table[HashLabel,HashSet[VertexID]]
## Reverse lookup searching `VertexID` by the hash key/label. ## Reverse lookup searching `VertexID` by the hash key/label.
LayerDelta* = object
## Delta layers are stacked implying a tables hierarchy. Table entries on
## a higher level take precedence over lower layer table entries. So an
## existing key-value table entry of a layer on top supersedes same key
## entries on all lower layers. A missing entry on a higher layer indicates
## that the key-value pair might be fond on some lower layer.
##
## A zero value (`nil`, empty hash etc.) is considered am missing key-value
## pair. Tables on the `LayerDelta` may have stray zero key-value pairs for
## missing entries due to repeated transactions while adding and deleting
## entries. There is no need to purge redundant zero entries.
##
## As for `kMap[]` entries, there might be a zero value entriy relating
## (i.e. indexed by the same vertex ID) to an `sMap[]` non-zero value entry
## (of the same layer or a lower layer whatever comes first.) This entry
## is kept as a reminder that the hash value of the `kMap[]` entry needs
## to be re-compiled.
##
## The reasoning behind the above scenario is that every vertex held on the
## `sTab[]` tables must correspond to a hash entry held on the `kMap[]`
## tables. So a corresponding zero value or missing entry produces an
## inconsistent state that must be resolved.
##
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping
pAmk*: VidsByLabelTab ## Reverse `kMap` entries, hash key lookup
LayerFinal* = object
## Final tables fully supersede tables on lower layers when stacked as a
## whole. Missing entries on a higher layers are the final state (for the
## the top layer version of the table.)
##
## These structures are used for tables which are typically smaller then
## the ones on the `LayerDelta` object.
##
lTab*: Table[LeafTie,VertexID] ## Access path to leaf vertex
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
vGen*: seq[VertexID] ## Unique vertex ID generator
dirty*: bool ## Needs to be hashified if `true`
LayerRef* = ref object LayerRef* = ref object
## Hexary trie database layer structures. Any layer holds the full ## Hexary trie database layer structures. Any layer holds the full
## change relative to the backend. ## change relative to the backend.
sTab*: Table[VertexID,VertexRef] ## Structural vertex table delta*: LayerDelta ## Most structural tables held as deltas
lTab*: Table[LeafTie,VertexID] ## Direct access, path to leaf vertex final*: LayerFinal ## Stored as latest version
kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping txUid*: uint ## Transaction identifier if positive
pAmk*: VidsByLabel ## Reverse `kMap` entries, hash key lookup
pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
vGen*: seq[VertexID] ## Unique vertex ID generator
txUid*: uint ## Transaction identifier if positive
dirty*: bool ## Needs to be hashified if `true`
# ---------------------- # ----------------------
@ -138,25 +160,6 @@ const
func max(a, b, c: int): int = func max(a, b, c: int): int =
max(max(a,b),c) max(max(a,b),c)
# ------------------------------------------------------------------------------
# Public helpers: `Table[HashLabel,seq[VertexID]]`
# ------------------------------------------------------------------------------
proc append*(pAmk: var VidsByLabel; lbl: HashLabel; vid: VertexID) =
pAmk.withValue(lbl,value):
value[].incl vid
do: # else if not found
pAmk[lbl] = @[vid].toHashSet
proc delete*(pAmk: var VidsByLabel; lbl: HashLabel; vid: VertexID) =
var deleteItem = false
pAmk.withValue(lbl,value):
value[].excl vid
if value[].len == 0:
deleteItem = true
if deleteItem:
pAmk.del lbl
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers: `NodeRef` and `PayloadRef` # Public helpers: `NodeRef` and `PayloadRef`
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -293,18 +296,6 @@ func dup*(node: NodeRef): NodeRef =
bVid: node.bVid, bVid: node.bVid,
key: node.key) key: node.key)
func dup*(layer: LayerRef): LayerRef =
## Duplicate layer.
result = LayerRef(
lTab: layer.lTab,
kMap: layer.kMap,
pAmk: layer.pAmk,
pPrf: layer.pPrf,
vGen: layer.vGen,
txUid: layer.txUid)
for (k,v) in layer.sTab.pairs:
result.sTab[k] = v.dup
# --------------- # ---------------
func to*(node: NodeRef; T: type VertexRef): T = func to*(node: NodeRef; T: type VertexRef): T =

View File

@ -21,25 +21,6 @@ import
./aristo_filter/[ ./aristo_filter/[
filter_fifos, filter_helpers, filter_merge, filter_reverse, filter_siblings] filter_fifos, filter_helpers, filter_merge, filter_reverse, filter_siblings]
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
func bulk*(filter: FilterRef): int =
## Some measurement for the size of the filter calculated as the length of
## the `sTab[]` table plus the lengthof the `kMap[]` table. This can be used
## to set a threshold when to flush the staging area to the backend DB to
## be used in `stow()`.
##
## The `filter` argument may be `nil`, i.e. `FilterRef(nil).bulk == 0`
if filter.isNil: 0 else: filter.sTab.len + filter.kMap.len
func bulk*(layer: LayerRef): int =
## Variant of `bulk()` for layers rather than filters.
##
## The `layer` argument may be `nil`, i.e. `LayerRef(nil).bulk == 0`
if layer.isNil: 0 else: layer.sTab.len + layer.kMap.len
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions, construct filters # Public functions, construct filters
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -64,7 +45,7 @@ proc fwdFilter*(
# Register the Merkle hash keys of the MPT where this reverse filter will be # Register the Merkle hash keys of the MPT where this reverse filter will be
# applicable: `be => fg` # applicable: `be => fg`
let (srcRoot, trgRoot) = block: let (srcRoot, trgRoot) = block:
let rc = db.getLayerStateRoots(layer, chunkedMpt) let rc = db.getLayerStateRoots(layer.delta, chunkedMpt)
if rc.isOK: if rc.isOK:
(rc.value.be, rc.value.fg) (rc.value.be, rc.value.fg)
elif rc.error == FilPrettyPointlessLayer: elif rc.error == FilPrettyPointlessLayer:
@ -74,9 +55,9 @@ proc fwdFilter*(
ok FilterRef( ok FilterRef(
src: srcRoot, src: srcRoot,
sTab: layer.sTab, sTab: layer.delta.sTab,
kMap: layer.kMap.pairs.toSeq.mapIt((it[0],it[1].key)).toTable, kMap: layer.delta.kMap.pairs.toSeq.mapIt((it[0],it[1].key)).toTable,
vGen: layer.vGen.vidReorg, # Compact recycled IDs vGen: layer.final.vGen.vidReorg, # Compact recycled IDs
trg: trgRoot) trg: trgRoot)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -212,7 +193,7 @@ proc forkBackLog*(
let let
instr = ? be.fifosFetch(backSteps = episode+1) instr = ? be.fifosFetch(backSteps = episode+1)
clone = ? db.fork(rawToplayer = true) clone = ? db.fork(rawToplayer = true)
clone.top.vGen = instr.fil.vGen clone.top.final.vGen = instr.fil.vGen
clone.roFilter = instr.fil clone.roFilter = instr.fil
ok clone ok clone

View File

@ -32,7 +32,7 @@ type
proc getLayerStateRoots*( proc getLayerStateRoots*(
db: AristoDbRef; db: AristoDbRef;
layer: LayerRef; delta: LayerDelta;
chunkedMpt: bool; chunkedMpt: bool;
): Result[StateRootPair,AristoError] = ): Result[StateRootPair,AristoError] =
## Get the Merkle hash key for target state root to arrive at after this ## Get the Merkle hash key for target state root to arrive at after this
@ -51,7 +51,7 @@ proc getLayerStateRoots*(
spr.be = sprBeKey.to(Hash256) spr.be = sprBeKey.to(Hash256)
spr.fg = block: spr.fg = block:
let lbl = layer.kMap.getOrVoid VertexID(1) let lbl = delta.kMap.getOrVoid VertexID(1)
if lbl.isValid: if lbl.isValid:
lbl.key.to(Hash256) lbl.key.to(Hash256)
else: else:
@ -60,14 +60,14 @@ proc getLayerStateRoots*(
return ok(spr) return ok(spr)
if chunkedMpt: if chunkedMpt:
let vids = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: sprBeKey) let lbl = HashLabel(root: VertexID(1), key: sprBeKey)
if VertexID(1) in vids: if VertexID(1) in delta.pAmk.getOrVoid lbl:
spr.fg = spr.be spr.fg = spr.be
return ok(spr) return ok(spr)
if layer.sTab.len == 0 and if delta.sTab.len == 0 and
layer.kMap.len == 0 and delta.kMap.len == 0 and
layer.pAmk.len == 0: delta.pAmk.len == 0:
return err(FilPrettyPointlessLayer) return err(FilPrettyPointlessLayer)
err(FilStateRootMismatch) err(FilStateRootMismatch)

View File

@ -16,21 +16,7 @@
import import
std/tables, std/tables,
results, results,
./aristo_desc "."/[aristo_desc, aristo_layers]
type
VidVtxPair* = object
vid*: VertexID ## Table lookup vertex ID (if any)
vtx*: VertexRef ## Reference to vertex
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
func dup*(wp: VidVtxPair): VidVtxPair =
VidVtxPair(
vid: wp.vid,
vtx: wp.vtx.dup)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
@ -124,17 +110,21 @@ proc getLeaf*(
db: AristoDbRef; db: AristoDbRef;
lty: LeafTie; lty: LeafTie;
): Result[VidVtxPair,AristoError] = ): Result[VidVtxPair,AristoError] =
## Get the vertex from the top layer by the `Patricia Trie` path. This ## Get the leaf path from the cache layers and look up the database for a
## function does not search on the `backend` layer. ## leaf node.
let vid = db.top.lTab.getOrVoid lty let vid = db.lTab.getOrVoid lty
if not vid.isValid: if not vid.isValid:
return err(GetLeafNotFound) return err(GetLeafNotFound)
let vtx = db.top.sTab.getOrVoid vid block body:
if not vtx.isValid: let vtx = db.layersGetVtx(vid).valueOr:
return err(GetVtxNotFound) break body
if vtx.isValid:
return ok(VidVtxPair(vid: vid, vtx: vtx))
ok VidVtxPair(vid: vid, vtx: vtx) # The leaf node cannot be on the backend. It was produced by a `merge()`
# action. So this is a system problem.
err(GetLeafMissing)
proc getLeafVtx*(db: AristoDbRef; lty: LeafTie): VertexRef = proc getLeafVtx*(db: AristoDbRef; lty: LeafTie): VertexRef =
## Variant of `getLeaf()` returning `nil` on error (while ignoring the ## Variant of `getLeaf()` returning `nil` on error (while ignoring the
@ -147,44 +137,59 @@ proc getLeafVtx*(db: AristoDbRef; lty: LeafTie): VertexRef =
# ------------------ # ------------------
proc getVtxRc*(db: AristoDbRef; vid: VertexID): Result[VertexRef,AristoError] = proc getVtxRc*(db: AristoDbRef; vid: VertexID): Result[VertexRef,AristoError] =
## Cascaded attempt to fetch a vertex from the top layer or the backend. ## Cascaded attempt to fetch a vertex from the cache layers or the backend.
## ##
if db.top.sTab.hasKey vid: block body:
# If the vertex is to be deleted on the backend, a `VertexRef(nil)` entry # If the vertex marked is to be deleted on the backend, a `VertexRef(nil)`
# is kept in the local table in which case it is OK to return this value. # entry is kept in the local table in which case it isis returned as the
let vtx = db.top.sTab.getOrVoid vid # error symbol `GetVtxNotFound`.
let vtx = db.layersGetVtx(vid).valueOr:
break body
if vtx.isValid: if vtx.isValid:
return ok(vtx) return ok vtx
return err(GetVtxNotFound) else:
return err(GetVtxNotFound)
db.getVtxBE vid db.getVtxBE vid
proc getVtx*(db: AristoDbRef; vid: VertexID): VertexRef = proc getVtx*(db: AristoDbRef; vid: VertexID): VertexRef =
## Cascaded attempt to fetch a vertex from the top layer or the backend. ## Cascaded attempt to fetch a vertex from the cache layers or the backend.
## The function returns `nil` on error or failure. ## The function returns `nil` on error or failure.
## ##
let rc = db.getVtxRc vid db.getVtxRc(vid).valueOr: VertexRef(nil)
if rc.isOk:
return rc.value
VertexRef(nil)
proc getKeyRc*(db: AristoDbRef; vid: VertexID): Result[HashKey,AristoError] = proc getKeyRc*(db: AristoDbRef; vid: VertexID): Result[HashKey,AristoError] =
## Cascaded attempt to fetch a Merkle hash from the top layer or the backend. ## Cascaded attempt to fetch a Merkle hash from the cache layers or the
## backend.
## ##
if db.top.kMap.hasKey vid: block body:
# If the key is to be deleted on the backend, a `VOID_HASH_LABEL` entry let key = db.layersGetKey(vid).valueOr:
# is kept on the local table in which case it is OK to return this value. break body
let lbl = db.top.kMap.getOrVoid vid # If there is a zero value label, the entry is either marked for being
if lbl.isValid: # updated or for deletion on the database. So check below.
return ok lbl.key if key.isValid:
return err(GetKeyTempLocked) return ok key
# The zero value label does not refer to an update mark if there is no
# valid vertex (either on the cache or the backend whatever comes first.)
let vtx = db.layersGetVtx(vid).valueOr:
# There was no vertex on the cache. So there must be one the backend (the
# reason for the key lable to exists, at all.)
return err(GetKeyUpdateNeeded)
if vtx.isValid:
return err(GetKeyUpdateNeeded)
else:
# The vertex is to be deleted. So is the value label.
return err(GetVtxNotFound)
db.getKeyBE vid db.getKeyBE vid
proc getKey*(db: AristoDbRef; vid: VertexID): HashKey = proc getKey*(db: AristoDbRef; vid: VertexID): HashKey =
## Cascaded attempt to fetch a vertex from the top layer or the backend. ## Cascaded attempt to fetch a vertex from the cache layers or the backend.
## The function returns `nil` on error or failure. ## The function returns `nil` on error or failure.
## ##
db.getKeyRc(vid).valueOr: db.getKeyRc(vid).valueOr: VOID_HASH_KEY
return VOID_HASH_KEY
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -60,8 +60,8 @@ import
eth/common, eth/common,
results, results,
stew/byteutils, stew/byteutils,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_serialise, aristo_utils, "."/[aristo_desc, aristo_get, aristo_hike, aristo_layers, aristo_serialise,
aristo_vid] aristo_utils]
type type
FollowUpVid = object FollowUpVid = object
@ -213,8 +213,9 @@ proc updateSchedule(
unresolved = error unresolved = error
break findlegInx break findlegInx
vid = leaf.vid vid = leaf.vid
if not db.top.kMap.getOrVoid(vid).key.isValid:
db.vidAttach(HashLabel(root: root, key: node.digestTo(HashKey)), vid) if not db.layersGetKeyOrVoid(vid).isValid:
db.layersPutLabel(vid, HashLabel(root: root, key: node.digestTo(HashKey)))
# Clean up unnecessay leaf node from previous session # Clean up unnecessay leaf node from previous session
wff.base.del vid wff.base.del vid
wff.setNextLink(wff.pool, wff.base.getOrVoid vid) wff.setNextLink(wff.pool, wff.base.getOrVoid vid)
@ -231,7 +232,7 @@ proc updateSchedule(
break findlegInx break findlegInx
# All done this `hike` # All done this `hike`
if db.top.kMap.getOrVoid(root).key.isValid: if db.layersGetKeyOrVoid(root).isValid:
wff.root.excl root wff.root.excl root
wff.completed.incl root wff.completed.incl root
return return
@ -294,10 +295,10 @@ proc hashify*(
deleted = false # Need extra check for orphaned vertices deleted = false # Need extra check for orphaned vertices
wff: WidthFirstForest # Leaf-to-root traversal structure wff: WidthFirstForest # Leaf-to-root traversal structure
if not db.top.dirty: if not db.dirty:
return ok wff.completed return ok wff.completed
for (lky,lfVid) in db.top.lTab.pairs: for (lky,lfVid) in db.lTab.pairs:
let let
rc = lky.hikeUp db rc = lky.hikeUp db
hike = rc.to(Hike) hike = rc.to(Hike)
@ -329,11 +330,12 @@ proc hashify*(
# is the task to search for unresolved node keys and add glue paths to # is the task to search for unresolved node keys and add glue paths to
# the width-first schedule. # the width-first schedule.
var unresolved: HashSet[VertexID] var unresolved: HashSet[VertexID]
for (vid,lbl) in db.top.kMap.pairs: for (vid,lbl) in db.layersWalkLabel:
if not lbl.isValid and if not lbl.isValid and
vid notin wff and vid notin wff:
(vid notin db.top.sTab or db.top.sTab.getOrVoid(vid).isValid): let rc = db.layersGetVtx vid
unresolved.incl vid if rc.isErr or rc.value.isValid:
unresolved.incl vid
let glue = unresolved.cloudConnect(db, wff.base) let glue = unresolved.cloudConnect(db, wff.base)
if 0 < glue.unresolved.len: if 0 < glue.unresolved.len:
@ -376,7 +378,7 @@ proc hashify*(
# Add the child vertices to `redo[]` for the schedule `base[]` list. # Add the child vertices to `redo[]` for the schedule `base[]` list.
for w in error: for w in error:
if w notin wff.base: if w notin wff.base:
if not db.top.sTab.hasKey w: if db.layersGetVtx(w).isErr:
# Ooops, should have been marked for update # Ooops, should have been marked for update
return err((w,HashifyNodeUnresolved)) return err((w,HashifyNodeUnresolved))
redo[w] = FollowUpVid(root: val.root, toVid: vid) redo[w] = FollowUpVid(root: val.root, toVid: vid)
@ -384,7 +386,7 @@ proc hashify*(
# Could resolve => update Merkle hash # Could resolve => update Merkle hash
let key = node.digestTo(HashKey) let key = node.digestTo(HashKey)
db.vidAttach(HashLabel(root: val.root, key: key), vid) db.layersPutLabel(vid, HashLabel(root: val.root, key: key))
# Set follow up link for next round # Set follow up link for next round
wff.setNextLink(redo, val) wff.setNextLink(redo, val)
@ -393,15 +395,15 @@ proc hashify*(
wff.base.swap redo wff.base.swap redo
# Update root nodes # Update root nodes
for vid in wff.root - db.top.pPrf: for vid in wff.root - db.pPrf:
# Convert root vertex to a node. # Convert root vertex to a node.
let node = db.getVtx(vid).toNode(db,stopEarly=false).valueOr: let node = db.getVtx(vid).toNode(db,stopEarly=false).valueOr:
return err((vid,HashifyRootNodeUnresolved)) return err((vid,HashifyRootNodeUnresolved))
db.vidAttach(HashLabel(root: vid, key: node.digestTo(HashKey)), vid) db.layersPutLabel(vid, HashLabel(root: vid, key: node.digestTo(HashKey)))
wff.completed.incl vid wff.completed.incl vid
db.top.dirty = false db.top.final.dirty = false
db.top.lTab.clear db.top.final.lTab.clear
ok wff.completed ok wff.completed
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -20,7 +20,6 @@ type
## For constructing a `VertexPath` ## For constructing a `VertexPath`
wp*: VidVtxPair ## Vertex ID and data ref wp*: VidVtxPair ## Vertex ID and data ref
nibble*: int8 ## Next vertex selector for `Branch` (if any) nibble*: int8 ## Next vertex selector for `Branch` (if any)
backend*: bool ## Sources from backend if `true`
Hike* = object Hike* = object
## Trie traversal path ## Trie traversal path
@ -86,16 +85,13 @@ proc hikeUp*(
while vid.isValid: while vid.isValid:
var leg = Leg(wp: VidVtxPair(vid: vid), nibble: -1) var leg = Leg(wp: VidVtxPair(vid: vid), nibble: -1)
# Fetch vertex to be checked on this lap # Fetch next vertex
leg.wp.vtx = db.top.sTab.getOrVoid vid leg.wp.vtx = db.getVtxRc(vid).valueOr:
if not leg.wp.vtx.isValid: if error != GetVtxNotFound:
return err((hike,error))
# Register vertex fetched from backend (if any) if hike.legs.len == 0:
let rc = db.getVtxBE vid return err((hike,HikeEmptyPath))
if rc.isErr: break
break
leg.backend = true
leg.wp.vtx = rc.value
case leg.wp.vtx.vType: case leg.wp.vtx.vType:
of Leaf: of Leaf:

View File

@ -42,7 +42,7 @@ proc newAristoRdbDbRef(
be.closeFn(flush = false) be.closeFn(flush = false)
return err(rc.error) return err(rc.error)
rc.value rc.value
ok AristoDbRef(top: LayerRef(vGen: vGen), backend: be) ok AristoDbRef(top: LayerRef(final: LayerFinal(vGen: vGen)), backend: be)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public database constuctors, destructor # Public database constuctors, destructor

View File

@ -0,0 +1,306 @@
# nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/[algorithm, sequtils, sets, tables],
eth/common,
results,
./aristo_desc
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
func dup(sTab: Table[VertexID,VertexRef]): Table[VertexID,VertexRef] =
## Explicit dup for `VertexRef` values
for (k,v) in sTab.pairs:
result[k] = v.dup
func dup(delta: LayerDelta): LayerDelta =
result = LayerDelta(
sTab: delta.sTab.dup, # explicit dup for ref values
kMap: delta.kMap,
pAmk: delta.pAmk)
func stackGetLebalOrVoid(db: AristoDbRef; lbl: HashLabel): HashSet[VertexID] =
# Helper: get next set of vertex IDs from stack.
for w in db.stack.reversed:
w.delta.pAmk.withValue(lbl,value):
return value[]
# ------------------------------------------------------------------------------
# Public getters: lazy value lookup for read only versions
# ------------------------------------------------------------------------------
func lTab*(db: AristoDbRef): Table[LeafTie,VertexID] =
db.top.final.lTab
func pPrf*(db: AristoDbRef): HashSet[VertexID] =
db.top.final.pPrf
func vGen*(db: AristoDbRef): seq[VertexID] =
db.top.final.vGen
func dirty*(db: AristoDbRef): bool =
db.top.final.dirty
# ------------------------------------------------------------------------------
# Public getters/helpers
# ------------------------------------------------------------------------------
func nLayersVtx*(db: AristoDbRef): int =
## Number of vertex entries on the cache layers
db.stack.mapIt(it.delta.sTab.len).foldl(a + b, db.top.delta.sTab.len)
func nLayersLabel*(db: AristoDbRef): int =
## Number of key/label entries on the cache layers
db.stack.mapIt(it.delta.kMap.len).foldl(a + b, db.top.delta.kMap.len)
func nLayersLebal*(db: AristoDbRef): int =
## Number of key/label reverse lookup entries on the cache layers
db.stack.mapIt(it.delta.pAmk.len).foldl(a + b, db.top.delta.pAmk.len)
# ------------------------------------------------------------------------------
# Public functions: get variants
# ------------------------------------------------------------------------------
proc layersGetVtx*(db: AristoDbRef; vid: VertexID): Result[VertexRef,void] =
## Find a vertex on the cache layers. An `ok()` result might contain a
## `nil` vertex if it is stored on the cache that way.
##
if db.top.delta.sTab.hasKey vid:
return ok(db.top.delta.sTab.getOrVoid vid)
for w in db.stack.reversed:
if w.delta.sTab.hasKey vid:
return ok(w.delta.sTab.getOrVoid vid)
err()
proc layersGetVtxOrVoid*(db: AristoDbRef; vid: VertexID): VertexRef =
## Simplified version of `layersGetVtx()`
db.layersGetVtx(vid).valueOr: VertexRef(nil)
proc layersGetLabel*(db: AristoDbRef; vid: VertexID): Result[HashLabel,void] =
## Find a hash label (containh the `HashKey`) on the cache layers. An
## `ok()` result might contain a void hash label if it is stored on the
## cache that way.
##
if db.top.delta.kMap.hasKey vid:
return ok(db.top.delta.kMap.getOrVoid vid)
for w in db.stack.reversed:
if w.delta.kMap.hasKey vid:
return ok(w.delta.kMap.getOrVoid vid)
err()
proc layersGetlabelOrVoid*(db: AristoDbRef; vid: VertexID): HashLabel =
## Simplified version of `layersGetLabel()`
db.layersGetLabel(vid).valueOr: VOID_HASH_LABEL
proc layersGetKey*(db: AristoDbRef; vid: VertexID): Result[HashKey,void] =
## Variant of `layersGetLabel()` for returning the `HashKey` part of the
## label only.
let lbl = db.layersGetLabel(vid).valueOr:
return err()
# Note that `lbl.isValid == lbl.key.isValid`
ok(lbl.key)
proc layersGetKeyOrVoid*(db: AristoDbRef; vid: VertexID): HashKey =
## Simplified version of `layersGetKey()`
db.layersGetKey(vid).valueOr: VOID_HASH_KEY
proc layersGetLebal*(
db: AristoDbRef;
lbl: HashLabel;
): Result[HashSet[VertexID],void] =
## Inverse of `layersGetKey()`. For a given argumnt `lbl`, find all vertex
## IDs that have `layersGetLbl()` return this very `lbl` value for the these
## vertex IDs.
if db.top.delta.pAmk.hasKey lbl:
return ok(db.top.delta.pAmk.getOrVoid lbl)
for w in db.stack.reversed:
if w.delta.pAmk.hasKey lbl:
return ok(w.delta.pAmk.getOrVoid lbl)
err()
proc layersGetLebalOrVoid*(db: AristoDbRef; lbl: HashLabel): HashSet[VertexID] =
## Simplified version of `layersGetVidsOrVoid()`
db.layersGetLebal(lbl).valueOr: EmptyVidSet
# ------------------------------------------------------------------------------
# Public functions: put variants
# ------------------------------------------------------------------------------
proc layersPutVtx*(db: AristoDbRef; vid: VertexID; vtx: VertexRef) =
## Store a (potentally empty) vertex on the top layer
db.top.delta.sTab[vid] = vtx
db.top.final.dirty = true # Modified top cache layers
proc layersResVtx*(db: AristoDbRef; vid: VertexID) =
## Shortcut for `db.layersPutVtx(vid, VertexRef(nil))`. It is sort of the
## equivalent of a delete function.
db.layersPutVtx(vid, VertexRef(nil))
proc layersPutLabel*(db: AristoDbRef; vid: VertexID; lbl: HashLabel) =
## Store a (potentally void) hash label on the top layer
# Get previous label
let blb = db.top.delta.kMap.getOrVoid vid
# Update label on `label->vid` mappiing table
db.top.delta.kMap[vid] = lbl
db.top.final.dirty = true # Modified top cache layers
# Clear previous value on reverse table if it has changed
if blb.isValid and blb != lbl:
db.top.delta.pAmk.withValue(blb, value):
value[].excl vid
do: # provide empty lookup
db.top.delta.pAmk[blb] = db.stackGetLebalOrVoid(blb) - @[vid].toHashSet
# Add updated value on reverse table if non-zero
if lbl.isValid:
db.top.delta.pAmk.withValue(lbl, value):
value[].incl vid
do: # else if not found: need to merge with value set from lower layer
db.top.delta.pAmk[lbl] = db.stackGetLebalOrVoid(lbl) + @[vid].toHashSet
proc layersResLabel*(db: AristoDbRef; vid: VertexID) =
## Shortcut for `db.layersPutLabel(vid, VOID_HASH_LABEL)`. It is sort of the
## equivalent of a delete function.
db.layersPutLabel(vid, VOID_HASH_LABEL)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc layersMergeOnto*(src: LayerRef; trg: LayerRef): LayerRef {.discardable.} =
## Merges the argument `src` into the argument `trg` and returns `trg`. For
## the result layer, the `txUid` value set to `0`.
trg.final = src.final
trg.txUid = 0
for (vid,vtx) in src.delta.sTab.pairs:
trg.delta.sTab[vid] = vtx
for (vid,lbl) in src.delta.kMap.pairs:
trg.delta.kMap[vid] = lbl
for (lbl,vids) in src.delta.pAmk.pairs:
trg.delta.pAmk.withValue(lbl, value):
value[] = value[] + vids
do:
trg.delta.pAmk[lbl] = vids
trg
proc layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
## Provide a collapsed copy of layers up to a particular transaction level.
## If the `level` argument is too large, the maximum transaction level is
## returned. For the result layer, the `txUid` value set to `0`.
let level = min(level, db.stack.len)
result = LayerRef(final: db.top.final) # Pre-merged/final values
# Merge stack into its bottom layer
if level <= 0 and db.stack.len == 0:
result.delta = db.top.delta.dup # Explicit dup for ref values
else:
# now: 0 < level <= db.stack.len
result.delta = db.stack[0].delta.dup # Explicit dup for ref values
# Merge stack: structural vertex table and hash key mapping
for w in db.stack.reversed:
w.layersMergeOnto result
# Merge top layer
db.top.layersMergeOnto result
# ------------------------------------------------------------------------------
# Public iterators
# ------------------------------------------------------------------------------
iterator layersWalkVtx*(
db: AristoDbRef;
seen: var HashSet[VertexID];
): tuple[vid: VertexID, vtx: VertexRef] =
## Walk over all `(VertexID,VertexRef)` pairs on the cache layers. Note that
## entries are unsorted.
##
## The argument `seen` collects a set of all visited vertex IDs including
## the one with a zero vertex which are othewise skipped by the iterator.
## The `seen` argument must not be modified while the iterator is active.
##
for (vid,vtx) in db.top.delta.sTab.pairs:
yield (vid,vtx)
seen.incl vid
for w in db.stack.reversed:
for (vid,vtx) in w.delta.sTab.pairs:
if vid notin seen:
yield (vid,vtx)
seen.incl vid
iterator layersWalkVtx*(
db: AristoDbRef;
): tuple[vid: VertexID, vtx: VertexRef] =
## Variant of `layersWalkVtx()`.
var seen: HashSet[VertexID]
for (vid,vtx) in db.layersWalkVtx seen:
yield (vid,vtx)
iterator layersWalkLabel*(
db: AristoDbRef;
): tuple[vid: VertexID, lbl: HashLabel] =
## Walk over all `(VertexID,HashLabel)` pairs on the cache layers. Note that
## entries are unsorted.
var seen: HashSet[VertexID]
for (vid,lbl) in db.top.delta.kMap.pairs:
yield (vid,lbl)
seen.incl vid
for w in db.stack.reversed:
for (vid,lbl) in w.delta.kMap.pairs:
if vid notin seen:
yield (vid,lbl)
seen.incl vid
iterator layersWalkLebal*(
db: AristoDbRef;
): tuple[lbl: HashLabel, vids: HashSet[VertexID]] =
## Walk over `(HashLabel,HashSet[VertexID])` pairs.
var seen: HashSet[HashLabel]
for (lbl,vids) in db.top.delta.pAmk.pairs:
yield (lbl,vids)
seen.incl lbl
for w in db.stack.reversed:
for (lbl,vids) in w.delta.pAmk.pairs:
if lbl notin seen:
yield (lbl,vids)
seen.incl lbl
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -31,8 +31,8 @@ import
results, results,
stew/keyed_queue, stew/keyed_queue,
../../sync/protocol/snap/snap_types, ../../sync/protocol/snap/snap_types,
"."/[aristo_desc, aristo_get, aristo_hike, aristo_path, aristo_serialise, "."/[aristo_desc, aristo_get, aristo_hike, aristo_layers, aristo_path,
aristo_vid] aristo_serialise, aristo_vid]
logScope: logScope:
topics = "aristo-merge" topics = "aristo-merge"
@ -81,10 +81,7 @@ proc nullifyKey(
vid: VertexID; # Vertex IDs to clear vid: VertexID; # Vertex IDs to clear
) = ) =
# Register for void hash (to be recompiled) # Register for void hash (to be recompiled)
let lbl = db.top.kMap.getOrVoid vid db.layersResLabel vid
db.top.pAmk.del lbl
db.top.kMap[vid] = VOID_HASH_LABEL
db.top.dirty = true # Modified top level cache
proc clearMerkleKeys( proc clearMerkleKeys(
db: AristoDbRef; # Database, top layer db: AristoDbRef; # Database, top layer
@ -99,8 +96,8 @@ proc setVtxAndKey(
vid: VertexID; # Vertex IDs to add/clear vid: VertexID; # Vertex IDs to add/clear
vtx: VertexRef; # Vertex to add vtx: VertexRef; # Vertex to add
) = ) =
db.top.sTab[vid] = vtx db.layersPutVtx(vid, vtx)
db.nullifyKey vid db.layersResLabel vid
# ----------- # -----------
@ -150,15 +147,12 @@ proc insertBranch(
var var
leafLeg = Leg(nibble: -1) leafLeg = Leg(nibble: -1)
# Will modify top level cache
db.top.dirty = true
# Install `forkVtx` # Install `forkVtx`
block: block:
# Clear Merkle hashes (aka hash keys) unless proof mode. # Clear Merkle hashes (aka hash keys) unless proof mode.
if db.top.pPrf.len == 0: if db.pPrf.len == 0:
db.clearMerkleKeys(hike, linkID) db.clearMerkleKeys(hike, linkID)
elif linkID in db.top.pPrf: elif linkID in db.pPrf:
return err(MergeNonBranchProofModeLock) return err(MergeNonBranchProofModeLock)
if linkVtx.vType == Leaf: if linkVtx.vType == Leaf:
@ -174,7 +168,7 @@ proc insertBranch(
local = db.vidFetch(pristine = true) local = db.vidFetch(pristine = true)
lty = LeafTie(root: hike.root, path: rc.value) lty = LeafTie(root: hike.root, path: rc.value)
db.top.lTab[lty] = local # update leaf path lookup cache db.top.final.lTab[lty] = local # update leaf path lookup cache
db.setVtxAndKey(local, linkVtx) db.setVtxAndKey(local, linkVtx)
linkVtx.lPfx = linkVtx.lPfx.slice(1+n) linkVtx.lPfx = linkVtx.lPfx.slice(1+n)
forkVtx.bVid[linkInx] = local forkVtx.bVid[linkInx] = local
@ -254,18 +248,15 @@ proc concatBranchAndLeaf(
return err(MergeRootBranchLinkBusy) return err(MergeRootBranchLinkBusy)
# Clear Merkle hashes (aka hash keys) unless proof mode. # Clear Merkle hashes (aka hash keys) unless proof mode.
if db.top.pPrf.len == 0: if db.pPrf.len == 0:
db.clearMerkleKeys(hike, brVid) db.clearMerkleKeys(hike, brVid)
elif brVid in db.top.pPrf: elif brVid in db.pPrf:
return err(MergeBranchProofModeLock) # Ooops return err(MergeBranchProofModeLock) # Ooops
# Append branch vertex # Append branch vertex
var okHike = Hike(root: hike.root, legs: hike.legs) var okHike = Hike(root: hike.root, legs: hike.legs)
okHike.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble) okHike.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
# Will modify top level cache
db.top.dirty = true
# Append leaf vertex # Append leaf vertex
let let
vid = db.vidFetch(pristine = true) vid = db.vidFetch(pristine = true)
@ -310,14 +301,11 @@ proc topIsBranchAddLeaf(
# #
# <-------- immutable ------------> <---- mutable ----> .. # <-------- immutable ------------> <---- mutable ----> ..
# #
if db.top.pPrf.len == 0: if db.pPrf.len == 0:
# Not much else that can be done here # Not much else that can be done here
debug "Dangling leaf link, reused", branch=hike.legs[^1].wp.vid, debug "Dangling leaf link, reused", branch=hike.legs[^1].wp.vid,
nibble, linkID, leafPfx=hike.tail nibble, linkID, leafPfx=hike.tail
# Will modify top level cache
db.top.dirty = true
# Reuse placeholder entry in table # Reuse placeholder entry in table
let vtx = VertexRef( let vtx = VertexRef(
vType: Leaf, vType: Leaf,
@ -365,9 +353,6 @@ proc topIsExtAddLeaf(
# <-------- immutable --------------> # <-------- immutable -------------->
# #
# Will modify top level cache
db.top.dirty = true
let vtx = VertexRef( let vtx = VertexRef(
vType: Leaf, vType: Leaf,
lPfx: extVtx.ePfx & hike.tail, lPfx: extVtx.ePfx & hike.tail,
@ -392,13 +377,10 @@ proc topIsExtAddLeaf(
if linkID.isValid: if linkID.isValid:
return err(MergeRootBranchLinkBusy) return err(MergeRootBranchLinkBusy)
# Will modify top level cache
db.top.dirty = true
# Clear Merkle hashes (aka hash keys) unless proof mode # Clear Merkle hashes (aka hash keys) unless proof mode
if db.top.pPrf.len == 0: if db.pPrf.len == 0:
db.clearMerkleKeys(hike, brVid) db.clearMerkleKeys(hike, brVid)
elif brVid in db.top.pPrf: elif brVid in db.pPrf:
return err(MergeBranchProofModeLock) return err(MergeBranchProofModeLock)
let let
@ -410,7 +392,6 @@ proc topIsExtAddLeaf(
brVtx.bVid[nibble] = vid brVtx.bVid[nibble] = vid
db.setVtxAndKey(brVid, brVtx) db.setVtxAndKey(brVid, brVtx)
db.setVtxAndKey(vid, vtx) db.setVtxAndKey(vid, vtx)
db.top.dirty = true # Modified top level cache
okHike.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble) okHike.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
okHike.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1) okHike.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
@ -430,13 +411,10 @@ proc topIsEmptyAddLeaf(
if rootVtx.bVid[nibble].isValid: if rootVtx.bVid[nibble].isValid:
return err(MergeRootBranchLinkBusy) return err(MergeRootBranchLinkBusy)
# Will modify top level cache
db.top.dirty = true
# Clear Merkle hashes (aka hash keys) unless proof mode # Clear Merkle hashes (aka hash keys) unless proof mode
if db.top.pPrf.len == 0: if db.pPrf.len == 0:
db.clearMerkleKeys(hike, hike.root) db.clearMerkleKeys(hike, hike.root)
elif hike.root in db.top.pPrf: elif hike.root in db.pPrf:
return err(MergeBranchProofModeLock) return err(MergeBranchProofModeLock)
let let
@ -476,17 +454,15 @@ proc updatePayload(
lPfx: leafLeg.wp.vtx.lPfx, lPfx: leafLeg.wp.vtx.lPfx,
lData: payload) lData: payload)
var hike = hike var hike = hike
hike.legs[^1].backend = false
hike.legs[^1].wp.vtx = vtx hike.legs[^1].wp.vtx = vtx
# Modify top level cache # Modify top level cache
db.top.dirty = true
db.setVtxAndKey(vid, vtx) db.setVtxAndKey(vid, vtx)
db.top.lTab[leafTie] = vid db.top.final.lTab[leafTie] = vid
db.clearMerkleKeys(hike, vid) db.clearMerkleKeys(hike, vid)
ok hike ok hike
elif leafLeg.backend: elif db.layersGetVtx(leafLeg.wp.vid).isErr:
err(MergeLeafPathOnBackendAlready) err(MergeLeafPathOnBackendAlready)
else: else:
@ -537,7 +513,7 @@ proc mergeNodeImpl(
# order `root->.. ->leaf`. # order `root->.. ->leaf`.
let let
hashLbl = HashLabel(root: rootVid, key: hashKey) hashLbl = HashLabel(root: rootVid, key: hashKey)
vids = db.top.pAmk.getOrVoid(hashLbl).toSeq vids = db.layersGetLebalOrVoid(hashLbl).toSeq
isRoot = rootVid in vids isRoot = rootVid in vids
if vids.len == 0: if vids.len == 0:
return err(MergeRevVidMustHaveBeenCached) return err(MergeRevVidMustHaveBeenCached)
@ -546,11 +522,11 @@ proc mergeNodeImpl(
return err(MergeHashKeyRevLookUpGarbled) return err(MergeHashKeyRevLookUpGarbled)
# Use the first vertex ID from the `vis` list as representant for all others # Use the first vertex ID from the `vis` list as representant for all others
let lbl = db.top.kMap.getOrVoid vids[0] let lbl = db.layersGetLabelOrVoid vids[0]
if lbl == hashLbl: if lbl == hashLbl:
if db.top.sTab.hasKey vids[0]: if db.layersGetVtx(vids[0]).isOk:
for n in 1 ..< vids.len: for n in 1 ..< vids.len:
if not db.top.sTab.hasKey vids[n]: if db.layersGetVtx(vids[n]).isErr:
return err(MergeHashKeyRevLookUpGarbled) return err(MergeHashKeyRevLookUpGarbled)
# This is tyically considered OK # This is tyically considered OK
return err(MergeHashKeyCachedAlready) return err(MergeHashKeyCachedAlready)
@ -572,7 +548,7 @@ proc mergeNodeImpl(
# Verify that all `vids` entries are similar # Verify that all `vids` entries are similar
for n in 1 ..< vids.len: for n in 1 ..< vids.len:
let w = vids[n] let w = vids[n]
if lbl != db.top.kMap.getOrVoid(w) or db.top.sTab.hasKey(w): if lbl != db.layersGetLabelOrVoid(w) or db.layersGetVtx(w).isOk:
return err(MergeHashKeyRevLookUpGarbled) return err(MergeHashKeyRevLookUpGarbled)
if not hasVtx: if not hasVtx:
# Prefer existing node which has all links available, already. # Prefer existing node which has all links available, already.
@ -589,26 +565,27 @@ proc mergeNodeImpl(
let eLbl = HashLabel(root: rootVid, key: node.key[0]) let eLbl = HashLabel(root: rootVid, key: node.key[0])
if not hasVtx: if not hasVtx:
# Brand new reverse lookup link for this vertex # Brand new reverse lookup link for this vertex
vtx.eVid = db.vidAttach eLbl vtx.eVid = db.vidFetch
db.layersPutLabel(vtx.eVid, eLbl)
elif not vtx.eVid.isValid: elif not vtx.eVid.isValid:
return err(MergeNodeVtxDiffersFromExisting) return err(MergeNodeVtxDiffersFromExisting)
db.top.pAmk.append(eLbl, vtx.eVid) db.layersPutLabel(vtx.eVid, eLbl)
of Branch: of Branch:
for n in 0..15: for n in 0..15:
if node.key[n].isValid: if node.key[n].isValid:
let bLbl = HashLabel(root: rootVid, key: node.key[n]) let bLbl = HashLabel(root: rootVid, key: node.key[n])
if not hasVtx: if not hasVtx:
# Brand new reverse lookup link for this vertex # Brand new reverse lookup link for this vertex
vtx.bVid[n] = db.vidAttach bLbl vtx.bVid[n] = db.vidFetch
db.layersPutLabel(vtx.bVid[n], bLbl)
elif not vtx.bVid[n].isValid: elif not vtx.bVid[n].isValid:
return err(MergeNodeVtxDiffersFromExisting) return err(MergeNodeVtxDiffersFromExisting)
db.top.pAmk.append(bLbl, vtx.bVid[n]) db.layersPutLabel(vtx.bVid[n], bLbl)
for w in vids: for w in vids:
db.top.pPrf.incl w db.top.final.pPrf.incl w
if not hasVtx or db.getKey(w) != hashKey: if not hasVtx or db.getKey(w) != hashKey:
db.top.sTab[w] = vtx.dup db.layersPutVtx(w, vtx.dup)
db.top.dirty = true # Modified top level cache
ok() ok()
@ -629,7 +606,7 @@ proc merge*(
## ##
# Check whether the leaf is on the database and payloads match # Check whether the leaf is on the database and payloads match
block: block:
let vid = db.top.lTab.getOrVoid leafTie let vid = db.lTab.getOrVoid leafTie
if vid.isValid: if vid.isValid:
let vtx = db.getVtx vid let vtx = db.getVtx vid
if vtx.isValid and vtx.lData == payload: if vtx.isValid and vtx.lData == payload:
@ -672,7 +649,7 @@ proc merge*(
return err(MergeAssemblyFailed) # Ooops return err(MergeAssemblyFailed) # Ooops
# Update leaf acccess cache # Update leaf acccess cache
db.top.lTab[leafTie] = okHike.legs[^1].wp.vid db.top.final.lTab[leafTie] = okHike.legs[^1].wp.vid
ok okHike ok okHike
@ -820,10 +797,9 @@ proc merge*(
block: block:
let let
lbl = HashLabel(root: rootVid, key: rootKey) lbl = HashLabel(root: rootVid, key: rootKey)
vids = db.top.pAmk.getOrVoid lbl vids = db.layersGetLebalOrVoid lbl
if not vids.isValid: if not vids.isValid:
db.top.pAmk.append(lbl, rootVid) db.layersPutlabel(rootVid, lbl)
db.top.dirty = true # Modified top level cache
# Process over chains in reverse mode starting with the root node. This # Process over chains in reverse mode starting with the root node. This
# allows the algorithm to find existing nodes on the backend. # allows the algorithm to find existing nodes on the backend.
@ -875,7 +851,7 @@ proc merge*(
return ok rootVid return ok rootVid
if not key.isValid: if not key.isValid:
db.vidAttach(HashLabel(root: rootVid, key: rootLink), rootVid) db.layersPutLabel(rootVid, HashLabel(root: rootVid, key: rootLink))
return ok rootVid return ok rootVid
else: else:
let key = db.getKey VertexID(1) let key = db.getKey VertexID(1)
@ -884,13 +860,13 @@ proc merge*(
# Otherwise assign unless valid # Otherwise assign unless valid
if not key.isValid: if not key.isValid:
db.vidAttach(HashLabel(root: VertexID(1), key: rootLink), VertexID(1)) db.layersPutLabel(VertexID(1),HashLabel(root: VertexID(1), key: rootLink))
return ok VertexID(1) return ok VertexID(1)
# Create and assign a new root key # Create and assign a new root key
if not rootVid.isValid: if not rootVid.isValid:
let vid = db.vidFetch let vid = db.vidFetch
db.vidAttach(HashLabel(root: vid, key: rootLink), vid) db.layersPutLabel(vid, HashLabel(root: vid, key: rootLink))
return ok vid return ok vid
err(MergeRootKeyDiffersForVid) err(MergeRootKeyDiffersForVid)

View File

@ -15,7 +15,7 @@
import import
results, results,
"."/[aristo_desc, aristo_filter, aristo_get, aristo_hashify] "."/[aristo_desc, aristo_filter, aristo_get, aristo_layers, aristo_hashify]
func isTop*(tx: AristoTxRef): bool func isTop*(tx: AristoTxRef): bool
func level*(db: AristoDbRef): int func level*(db: AristoDbRef): int
@ -24,10 +24,6 @@ func level*(db: AristoDbRef): int
# Private helpers # Private helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
func fromVae(err: (VertexID,AristoError)): AristoError =
## Map error pair to error reason component
err[1]
func getDbDescFromTopTx(tx: AristoTxRef): Result[AristoDbRef,AristoError] = func getDbDescFromTopTx(tx: AristoTxRef): Result[AristoDbRef,AristoError] =
if not tx.isTop(): if not tx.isTop():
return err(TxNotTopTx) return err(TxNotTopTx)
@ -108,35 +104,32 @@ proc forkTx*(
## ##
let db = tx.db let db = tx.db
# Provide new top layer # Verify `tx` argument
var topLayer: LayerRef
if db.txRef == tx: if db.txRef == tx:
topLayer = db.top.dup if db.top.txUid != tx.txUid:
elif tx.level < db.stack.len: return err(TxArgStaleTx)
topLayer = db.stack[tx.level].dup elif db.stack.len <= tx.level:
else:
return err(TxArgStaleTx) return err(TxArgStaleTx)
if topLayer.txUid != tx.txUid: elif db.stack[tx.level].txUid != tx.txUid:
return err(TxArgStaleTx) return err(TxArgStaleTx)
topLayer.txUid = 1
# Provide new empty stack layer # Provide new empty stack layer
let stackLayer = block: let stackLayer = block:
let rc = db.getIdgBE() let rc = db.getIdgBE()
if rc.isOk: if rc.isOk:
LayerRef(vGen: rc.value) LayerRef(final: LayerFinal(vGen: rc.value))
elif rc.error == GetIdgNotFound: elif rc.error == GetIdgNotFound:
LayerRef() LayerRef()
else: else:
return err(rc.error) return err(rc.error)
let txClone = ? db.fork(rawToplayer = true)
# Set up clone associated to `db` # Set up clone associated to `db`
txClone.top = topLayer # is a deep copy let txClone = ? db.fork(rawToplayer = true)
txClone.stack = @[stackLayer] txClone.top = db.layersCc tx.level # Provide tx level 1 stack
txClone.roFilter = db.roFilter # no need to copy contents (done when updated) txClone.stack = @[stackLayer] # Zero level stack
txClone.roFilter = db.roFilter # No need to copy (done when updated)
txClone.backend = db.backend txClone.backend = db.backend
txClone.top.txUid = 1
txClone.txUidGen = 1 txClone.txUidGen = 1
# Install transaction similar to `tx` on clone # Install transaction similar to `tx` on clone
@ -146,10 +139,9 @@ proc forkTx*(
level: 1) level: 1)
if not dontHashify: if not dontHashify:
let rc = txClone.hashify() discard txClone.hashify().valueOr:
if rc.isErr:
discard txClone.forget() discard txClone.forget()
return err(rc.error.fromVae) return err(error[1])
ok(txClone) ok(txClone)
@ -166,15 +158,14 @@ proc forkTop*(
if db.txRef.isNil: if db.txRef.isNil:
let dbClone = ? db.fork(rawToplayer = true) let dbClone = ? db.fork(rawToplayer = true)
dbClone.top = db.top.dup # is a deep copy dbClone.top = db.layersCc # Is a deep copy
dbClone.roFilter = db.roFilter # no need to copy contents when updated dbClone.roFilter = db.roFilter # No need to copy contents when updated
dbClone.backend = db.backend dbClone.backend = db.backend
if not dontHashify: if not dontHashify:
let rc = dbClone.hashify() discard dbClone.hashify().valueOr:
if rc.isErr:
discard dbClone.forget() discard dbClone.forget()
return err(rc.error.fromVae) return err(error[1])
return ok(dbClone) return ok(dbClone)
db.txRef.forkTx dontHashify db.txRef.forkTx dontHashify
@ -215,8 +206,10 @@ proc txBegin*(db: AristoDbRef): Result[AristoTxRef,AristoError] =
if db.level != db.stack.len: if db.level != db.stack.len:
return err(TxStackGarbled) return err(TxStackGarbled)
db.stack.add db.top.dup # push (save and use top later) db.stack.add db.top
db.top.txUid = db.getTxUid() db.top = LayerRef(
final: db.top.final,
txUid: db.getTxUid)
db.txRef = AristoTxRef( db.txRef = AristoTxRef(
db: db, db: db,
@ -252,13 +245,20 @@ proc commit*(
## previous transaction is returned if there was any. ## previous transaction is returned if there was any.
## ##
let db = ? tx.getDbDescFromTopTx() let db = ? tx.getDbDescFromTopTx()
discard ? db.hashify().mapErr fromVae discard db.hashify().valueOr:
return err(error[1])
# Keep top and discard layer below # Replace the top two layers by its merged version
db.top.txUid = db.stack[^1].txUid let merged = db.top.layersMergeOnto db.stack[^1]
# Install `merged` layer
db.top = merged
db.stack.setLen(db.stack.len-1) db.stack.setLen(db.stack.len-1)
db.txRef = tx.parent
if 0 < db.stack.len:
db.txRef.txUid = db.getTxUid
db.top.txUid = db.txRef.txUid
db.txRef = db.txRef.parent
ok() ok()
@ -278,7 +278,8 @@ proc collapse*(
if commit: if commit:
# For commit, hashify the current layer if requested and install it # For commit, hashify the current layer if requested and install it
discard ? db.hashify().mapErr fromVae discard db.hashify().valueOr:
return err(error[1])
db.top.txUid = 0 db.top.txUid = 0
db.stack.setLen(0) db.stack.setLen(0)
@ -316,36 +317,29 @@ proc stow*(
if persistent and not db.canResolveBackendFilter(): if persistent and not db.canResolveBackendFilter():
return err(TxBackendNotWritable) return err(TxBackendNotWritable)
discard ? db.hashify().mapErr fromVae discard db.hashify().valueOr:
return err(error[1])
let fwd = ? db.fwdFilter(db.top, chunkedMpt).mapErr fromVae let fwd = db.fwdFilter(db.top, chunkedMpt).valueOr:
return err(error[1])
if fwd.isValid: if fwd.isValid:
# Merge `top` layer into `roFilter` # Merge `top` layer into `roFilter`
? db.merge(fwd).mapErr fromVae db.merge(fwd).isOkOr:
db.top = LayerRef(vGen: db.roFilter.vGen) return err(error[1])
db.top = LayerRef(final: LayerFinal(vGen: db.roFilter.vGen))
if persistent: if persistent:
? db.resolveBackendFilter() ? db.resolveBackendFilter()
db.roFilter = FilterRef(nil) db.roFilter = FilterRef(nil)
# Delete or clear stack and clear top # Delete/clear top
db.stack.setLen(0) db.top = LayerRef(
db.top = LayerRef(vGen: db.top.vGen, txUid: db.top.txUid) final: LayerFinal(vGen: db.vGen),
txUid: db.top.txUid)
ok() ok()
proc stow*(
db: AristoDbRef; # Database
stageLimit: int; # Policy based persistent storage
chunkedMpt = false; # Partial data (e.g. from `snap`)
): Result[void,AristoError] =
## Variant of `stow()` with the `persistent` argument replaced by
## `stageLimit < max(db.roFilter.bulk, db.top.bulk)`.
db.stow(
persistent = (stageLimit < max(db.roFilter.bulk, db.top.bulk)),
chunkedMpt = chunkedMpt)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -16,7 +16,7 @@
import import
eth/common, eth/common,
results, results,
"."/[aristo_desc, aristo_get] "."/[aristo_desc, aristo_get, aristo_layers]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions, converters # Public functions, converters
@ -106,10 +106,13 @@ proc toNode*(
## storage root. ## storage root.
## ##
proc getKey(db: AristoDbRef; vid: VertexID; beOk: bool): HashKey = proc getKey(db: AristoDbRef; vid: VertexID; beOk: bool): HashKey =
block: block body:
let lbl = db.top.kMap.getOrVoid vid let lbl = db.layersGetLabel(vid).valueOr:
break body
if lbl.isValid: if lbl.isValid:
return lbl.key return lbl.key
else:
return VOID_HASH_KEY
if beOk: if beOk:
let rc = db.getKeyBE vid let rc = db.getKeyBE vid
if rc.isOk: if rc.isOk:
@ -137,7 +140,7 @@ proc toNode*(
for n in 0 .. 15: for n in 0 .. 15:
let vid = vtx.bVid[n] let vid = vtx.bVid[n]
if vid.isValid: if vid.isValid:
let key = db.getKey(vid, beKeyOk) let key = db.getKey(vid, beOk=beKeyOk)
if key.isValid: if key.isValid:
node.key[n] = key node.key[n] = key
elif stopEarly: elif stopEarly:
@ -151,7 +154,7 @@ proc toNode*(
of Extension: of Extension:
let let
vid = vtx.eVid vid = vtx.eVid
key = db.getKey(vid, beKeyOk) key = db.getKey(vid, beOk=beKeyOk)
if not key.isValid: if not key.isValid:
return err(@[vid]) return err(@[vid])
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid) let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vid)

View File

@ -14,8 +14,8 @@
{.push raises: [].} {.push raises: [].}
import import
std/[algorithm, sequtils, tables], std/[algorithm, sequtils],
./aristo_desc "."/[aristo_desc, aristo_layers]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
@ -30,45 +30,44 @@ proc vidFetch*(db: AristoDbRef; pristine = false): VertexID =
## When the argument `pristine` is set `true`, the function guarantees to ## When the argument `pristine` is set `true`, the function guarantees to
## return a non-recycled, brand new vertex *ID* which is the preferred mode ## return a non-recycled, brand new vertex *ID* which is the preferred mode
## when creating leaf vertices. ## when creating leaf vertices.
let top = db.top if db.vGen.len == 0:
if top.vGen.len == 0:
# Note that `VertexID(1)` is the root of the main trie # Note that `VertexID(1)` is the root of the main trie
top.vGen = @[VertexID(3)] db.top.final.vGen = @[VertexID(3)]
result = VertexID(2) result = VertexID(2)
elif top.vGen.len == 1 or pristine: elif db.vGen.len == 1 or pristine:
result = top.vGen[^1] result = db.vGen[^1]
top.vGen[^1] = result + 1 db.top.final.vGen[^1] = result + 1
else: else:
result = top.vGen[^2] result = db.vGen[^2]
top.vGen[^2] = top.vGen[^1] db.top.final.vGen[^2] = db.top.final.vGen[^1]
top.vGen.setLen(top.vGen.len-1) db.top.final.vGen.setLen(db.vGen.len-1)
proc vidPeek*(db: AristoDbRef): VertexID = proc vidPeek*(db: AristoDbRef): VertexID =
## Like `new()` without consuming this *ID*. It will return the *ID* that ## Like `new()` without consuming this *ID*. It will return the *ID* that
## would be returned by the `new()` function. ## would be returned by the `new()` function.
case db.top.vGen.len: case db.vGen.len:
of 0: of 0:
VertexID(2) VertexID(2)
of 1: of 1:
db.top.vGen[^1] db.vGen[^1]
else: else:
db.top.vGen[^2] db.vGen[^2]
proc vidDispose*(db: AristoDbRef; vid: VertexID) = proc vidDispose*(db: AristoDbRef; vid: VertexID) =
## Recycle the argument `vtxID` which is useful after deleting entries from ## Recycle the argument `vtxID` which is useful after deleting entries from
## the vertex table to prevent the `VertexID` type key values small. ## the vertex table to prevent the `VertexID` type key values small.
if VertexID(1) < vid: if VertexID(1) < vid:
if db.top.vGen.len == 0: if db.vGen.len == 0:
db.top.vGen = @[vid] db.top.final.vGen = @[vid]
else: else:
let topID = db.top.vGen[^1] let topID = db.vGen[^1]
# Only store smaller numbers: all numberts larger than `topID` # Only store smaller numbers: all numberts larger than `topID`
# are free numbers # are free numbers
if vid < topID: if vid < topID:
db.top.vGen[^1] = vid db.top.final.vGen[^1] = vid
db.top.vGen.add topID db.top.final.vGen.add topID
proc vidReorg*(vGen: seq[VertexID]): seq[VertexID] = proc vidReorg*(vGen: seq[VertexID]): seq[VertexID] =
## Return a compacted version of the argument vertex ID generator state ## Return a compacted version of the argument vertex ID generator state
@ -95,17 +94,6 @@ proc vidReorg*(vGen: seq[VertexID]): seq[VertexID] =
vGen vGen
proc vidAttach*(db: AristoDbRef; lbl: HashLabel; vid: VertexID) =
## Attach (i.r. register) a Merkle hash key to a vertex ID.
db.top.pAmk.append(lbl, vid)
db.top.kMap[vid] = lbl
db.top.dirty = true # Modified top level cache
proc vidAttach*(db: AristoDbRef; lbl: HashLabel): VertexID {.discardable.} =
## Variant of `vidAttach()` with auto-generated vertex ID
result = db.vidFetch
db.vidAttach(lbl, result)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -10,9 +10,9 @@
# distributed except according to those terms. # distributed except according to those terms.
import import
std/[algorithm, sequtils, tables], std/[sequtils, sets, tables],
results, results,
".."/[aristo_desc, aristo_get, aristo_init, aristo_utils] ".."/[aristo_desc, aristo_get, aristo_layers, aristo_init, aristo_utils]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public generic iterators # Public generic iterators
@ -129,11 +129,13 @@ iterator walkPairsImpl*[T](
): tuple[vid: VertexID, vtx: VertexRef] = ): tuple[vid: VertexID, vtx: VertexRef] =
## Walk over all `(VertexID,VertexRef)` in the database. Note that entries ## Walk over all `(VertexID,VertexRef)` in the database. Note that entries
## are unsorted. ## are unsorted.
for (vid,vtx) in db.top.sTab.pairs: var seen: HashSet[VertexID]
for (vid,vtx) in db.layersWalkVtx seen:
if vtx.isValid: if vtx.isValid:
yield (vid,vtx) yield (vid,vtx)
for (_,vid,vtx) in walkVtxBeImpl[T](db): for (_,vid,vtx) in walkVtxBeImpl[T](db):
if vid notin db.top.sTab and vtx.isValid: if vid notin seen:
yield (vid,vtx) yield (vid,vtx)
iterator replicateImpl*[T]( iterator replicateImpl*[T](

View File

@ -579,7 +579,7 @@ func txTop*(
): CoreDbRc[AristoTxRef] = ): CoreDbRc[AristoTxRef] =
base.adb.txTop.toRc(base.parent, info) base.adb.txTop.toRc(base.parent, info)
func txBegin*( proc txBegin*(
base: AristoBaseRef; base: AristoBaseRef;
info: static[string]; info: static[string];
): CoreDbRc[AristoTxRef] = ): CoreDbRc[AristoTxRef] =
@ -610,7 +610,7 @@ proc getHash*(
let key = block: let key = block:
let rc = mpt.getKeyRc aVid let rc = mpt.getKeyRc aVid
if rc.isErr: if rc.isErr:
doAssert rc.error in {GetKeyNotFound,GetKeyTempLocked} doAssert rc.error in {GetKeyNotFound,GetKeyUpdateNeeded}
return err(rc.error.toError(db, info, HashNotAvailable)) return err(rc.error.toError(db, info, HashNotAvailable))
rc.value rc.value

View File

@ -322,7 +322,7 @@ func txTop*(
): CoreDbRc[KvtTxRef] = ): CoreDbRc[KvtTxRef] =
base.kdb.txTop.toRc(base.parent, info) base.kdb.txTop.toRc(base.parent, info)
func txBegin*( proc txBegin*(
base: KvtBaseRef; base: KvtBaseRef;
info: static[string]; info: static[string];
): CoreDbRc[KvtTxRef] = ): CoreDbRc[KvtTxRef] =

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -15,9 +15,9 @@ import
eth/common, eth/common,
results, results,
stew/byteutils, stew/byteutils,
./kvt_desc,
./kvt_desc/desc_backend, ./kvt_desc/desc_backend,
./kvt_init/[memory_db, memory_only, rocks_db] ./kvt_init/[memory_db, memory_only, rocks_db],
"."/[kvt_desc, kvt_layers]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
@ -127,18 +127,17 @@ proc ppBe[T](be: T; db: KvtDbRef; indent: int): string =
spc = if 0 < data.len: pfx2 else: " " spc = if 0 < data.len: pfx2 else: " "
"<" & $be.kind & ">" & pfx1 & "tab" & spc & "{" & data & "}" "<" & $be.kind & ">" & pfx1 & "tab" & spc & "{" & data & "}"
# ------------------------------------------------------------------------------ proc ppLayer(layer: LayerRef; db: KvtDbRef; indent = 4): string =
# Public functions
# ------------------------------------------------------------------------------
proc pp*(layer: LayerRef; db: KvtDbRef; indent = 4): string =
let let
tLen = layer.tab.len tLen = layer.dTab.len
info = "tab(" & $tLen & ")" info = "tab(" & $tLen & ")"
pfx1 = indent.toPfx(1) pfx1 = indent.toPfx(1)
pfx2 = if 0 < tLen: indent.toPfx(2) else: " " pfx2 = if 0 < tLen: indent.toPfx(2) else: " "
"<layer>" & pfx1 & info & pfx2 & layer.tab.ppTab(db,indent+2) "<layer>" & pfx1 & info & pfx2 & layer.dTab.ppTab(db,indent+2)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc pp*( proc pp*(
be: BackendRef; be: BackendRef;
@ -162,7 +161,7 @@ proc pp*(
let let
pfx = indent.toPfx pfx = indent.toPfx
pfx1 = indent.toPfx(1) pfx1 = indent.toPfx(1)
result = db.top.pp(db, indent=indent) result = db.layersCc.ppLayer(db, indent=indent)
if backendOk: if backendOk:
result &= pfx & db.backend.pp(db, indent=indent) result &= pfx & db.backend.pp(db, indent=indent)
if keysOk: if keysOk:

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -18,23 +18,16 @@ import
eth/common eth/common
type type
LayerDelta* = object
## Delta tables relative to previous layer
sTab*: Table[Blob,Blob] ## Structural data table
LayerRef* = ref object LayerRef* = ref object
## Kvt database layer structures. Any layer holds the full ## Kvt database layer structures. Any layer holds the full
## change relative to the backend. ## change relative to the backend.
tab*: Table[Blob,Blob] ## Structural table delta*: LayerDelta ## Structural tables held as deltas
txUid*: uint ## Transaction identifier if positive txUid*: uint ## Transaction identifier if positive
# ------------------------------------------------------------------------------
# Public helpers, miscellaneous functions
# ------------------------------------------------------------------------------
proc dup*(layer: LayerRef): LayerRef =
## Duplicate layer.
result = LayerRef(
txUid: layer.txUid)
for (k,v) in layer.tab.pairs:
result.tab[k] = v
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -0,0 +1,46 @@
# nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/tables,
eth/common,
./kvt_desc
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc layersCc*(db: KvtDbRef; level = high(int)): LayerRef =
## Provide a collapsed copy of layers up to a particular transaction level.
## If the `level` argument is too large, the maximum transaction level is
## returned. For the result layer, the `txUid` value set to `0`.
let level = min(level, db.stack.len)
# Merge stack into its bottom layer
if level <= 0 and db.stack.len == 0:
result = LayerRef(delta: LayerDelta(sTab: db.top.delta.sTab))
else:
# now: 0 < level <= db.stack.len
result = LayerRef(delta: LayerDelta(sTab: db.stack[0].delta.sTab))
for n in 1 ..< level:
for (key,val) in db.stack[n].delta.sTab.pairs:
result.delta.sTab[key] = val
# Merge top layer if needed
if level == db.stack.len:
for (key,val) in db.top.delta.sTab.pairs:
result.delta.sTab[key] = val
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -15,9 +15,10 @@
import import
std/[sequtils, tables], std/[sequtils, tables],
eth/common,
results, results,
./kvt_desc/desc_backend, ./kvt_desc/desc_backend,
./kvt_desc "."/[kvt_desc, kvt_layers]
func isTop*(tx: KvtTxRef): bool func isTop*(tx: KvtTxRef): bool
@ -86,25 +87,21 @@ proc forkTx*(tx: KvtTxRef): Result[KvtDbRef,KvtError] =
## ##
let db = tx.db let db = tx.db
# Provide new top layer # Verify `tx` argument
var topLayer: LayerRef
if db.txRef == tx: if db.txRef == tx:
topLayer = db.top.dup if db.top.txUid != tx.txUid:
elif tx.level < db.stack.len: return err(TxArgStaleTx)
topLayer = db.stack[tx.level].dup elif db.stack.len <= tx.level:
else:
return err(TxArgStaleTx) return err(TxArgStaleTx)
if topLayer.txUid != tx.txUid: elif db.stack[tx.level].txUid != tx.txUid:
return err(TxArgStaleTx) return err(TxArgStaleTx)
topLayer.txUid = 1
let txClone = ? db.fork()
# Set up clone associated to `db` # Set up clone associated to `db`
txClone.top = topLayer # is a deep copy let txClone = ? db.fork()
txClone.stack = @[LayerRef()] txClone.top = db.layersCc tx.level
txClone.backend = db.backend txClone.stack = @[LayerRef()] # Provide tx level 1 stack
txClone.txUidGen = 1 txClone.top.txUid = 1
txClone.txUidGen = 1 # Used value of `txClone.top.txUid`
# Install transaction similar to `tx` on clone # Install transaction similar to `tx` on clone
txClone.txRef = KvtTxRef( txClone.txRef = KvtTxRef(
@ -122,10 +119,7 @@ proc forkTop*(db: KvtDbRef): Result[KvtDbRef,KvtError] =
## ##
if db.txRef.isNil: if db.txRef.isNil:
let dbClone = ? db.fork() let dbClone = ? db.fork()
dbClone.top = db.layersCc
dbClone.top = db.top.dup # is a deep copy
dbClone.backend = db.backend
return ok(dbClone) return ok(dbClone)
db.txRef.forkTx() db.txRef.forkTx()
@ -162,9 +156,8 @@ proc txBegin*(db: KvtDbRef): Result[KvtTxRef,KvtError] =
if db.level != db.stack.len: if db.level != db.stack.len:
return err(TxStackGarbled) return err(TxStackGarbled)
db.stack.add db.top.dup # push (save and use top later) db.stack.add db.top
db.top.txUid = db.getTxUid() db.top = LayerRef(txUid: db.getTxUid)
db.txRef = KvtTxRef( db.txRef = KvtTxRef(
db: db, db: db,
txUid: db.top.txUid, txUid: db.top.txUid,
@ -199,11 +192,19 @@ proc commit*(
## ##
let db = ? tx.getDbDescFromTopTx() let db = ? tx.getDbDescFromTopTx()
# Keep top and discard layer below # Replace the top two layers by its merged version
db.top.txUid = db.stack[^1].txUid let merged = db.stack[^1]
db.stack.setLen(db.stack.len-1) for (key,val) in db.top.delta.sTab.pairs:
merged.delta.sTab[key] = val
# Install `merged` layer
db.top = merged
db.stack.setLen(db.stack.len-1)
db.txRef = tx.parent db.txRef = tx.parent
if 0 < db.stack.len:
db.txRef.txUid = db.getTxUid
db.top.txUid = db.txRef.txUid
ok() ok()
@ -221,12 +222,13 @@ proc collapse*(
## ##
let db = ? tx.getDbDescFromTopTx() let db = ? tx.getDbDescFromTopTx()
# If commit, then leave the current layer and clear the stack, otherwise if commit:
# install the stack bottom. db.top = db.layersCc
if not commit: else:
db.stack[0].swap db.top db.top = db.stack[0]
db.top.txUid = 0
db.top.txUid = 0 # Clean up
db.stack.setLen(0) db.stack.setLen(0)
db.txRef = KvtTxRef(nil) db.txRef = KvtTxRef(nil)
ok() ok()
@ -255,12 +257,11 @@ proc stow*(
# Save structural and other table entries # Save structural and other table entries
let txFrame = be.putBegFn() let txFrame = be.putBegFn()
be.putKvpFn(txFrame, db.top.tab.pairs.toSeq) be.putKvpFn(txFrame, db.top.delta.sTab.pairs.toSeq)
? be.putEndFn txFrame ? be.putEndFn txFrame
# Delete or clear stack and clear top # Clean up
db.stack.setLen(0) db.top.delta.sTab.clear
db.top = LayerRef(txUid: db.top.txUid)
ok() ok()

View File

@ -1,5 +1,5 @@
# nimbus-eth1 # nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH # Copyright (c) 2023 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -14,6 +14,7 @@
{.push raises: [].} {.push raises: [].}
import import
std/algorithm,
eth/common, eth/common,
results, results,
./kvt_desc/desc_backend, ./kvt_desc/desc_backend,
@ -23,7 +24,7 @@ import
# Private helpers # Private helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc getBE*( proc getBE(
db: KvtDbRef; # Database db: KvtDbRef; # Database
key: openArray[byte]; # Key of database record key: openArray[byte]; # Key of database record
): Result[Blob,KvtError] = ): Result[Blob,KvtError] =
@ -51,7 +52,7 @@ proc put*(
if data.len == 0: if data.len == 0:
return err(DataInvalid) return err(DataInvalid)
db.top.tab[@key] = @data db.top.delta.sTab[@key] = @data
ok() ok()
@ -64,14 +65,22 @@ proc del*(
if key.len == 0: if key.len == 0:
return err(KeyInvalid) return err(KeyInvalid)
let rc = db.getBE(key) block haveKey:
if rc.isOk: for w in db.stack.reversed:
db.top.tab[@key] = EmptyBlob if w.delta.sTab.hasKey @key:
elif rc.error == GetNotFound: break haveKey
db.top.tab.del @key
else:
return err(rc.error)
# Do this one last as it is the most expensive lookup
let rc = db.getBE key
if rc.isOk:
break haveKey
if rc.error != GetNotFound:
return err(rc.error)
db.top.delta.sTab.del @key # No such key anywhere => delete now
return ok()
db.top.delta.sTab[@key] = EmptyBlob # Mark for deletion
ok() ok()
# ------------ # ------------
@ -85,11 +94,21 @@ proc get*(
## ##
if key.len == 0: if key.len == 0:
return err(KeyInvalid) return err(KeyInvalid)
let data = db.top.tab.getOrVoid @key
if data.isValid: block:
return ok(data) let data = db.top.delta.sTab.getOrVoid @key
if data.isValid:
return ok(data)
block:
for w in db.stack.reversed:
let data = w.delta.sTab.getOrVoid @key
if data.isValid:
return ok(data)
db.getBE key db.getBE key
proc hasKey*( proc hasKey*(
db: KvtDbRef; # Database db: KvtDbRef; # Database
key: openArray[byte]; # Key of database record key: openArray[byte]; # Key of database record
@ -99,9 +118,14 @@ proc hasKey*(
## ##
if key.len == 0: if key.len == 0:
return err(KeyInvalid) return err(KeyInvalid)
let data = db.top.tab.getOrVoid @key
if data.isValid: if db.top.delta.sTab.hasKey @key:
return ok(true) return ok(true)
for w in db.stack.reversed:
if w.delta.sTab.haskey @key:
return ok(true)
let rc = db.getBE key let rc = db.getBE key
if rc.isOk: if rc.isOk:
return ok(true) return ok(true)

View File

@ -1,6 +1,5 @@
# Nimbus - Types, data structures and shared utilities used in network sync # Nimbus-eth1
# # Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,6 +1,5 @@
# Nimbus - Types, data structures and shared utilities used in network sync # Nimbus-eth1
# # Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,6 +1,5 @@
# Nimbus - Types, data structures and shared utilities used in network sync # Nimbus-eth1
# # Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)

View File

@ -1,6 +1,5 @@
# Nimbus - Types, data structures and shared utilities used in network sync # Nimbus-eth1
# # Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Licensed under either of # Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or # * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0) # http://www.apache.org/licenses/LICENSE-2.0)
@ -10,7 +9,7 @@
# distributed except according to those terms. # distributed except according to those terms.
import import
std/tables, std/[algorithm, sets, tables],
eth/common, eth/common,
".."/[kvt_desc, kvt_init] ".."/[kvt_desc, kvt_init]
@ -24,17 +23,28 @@ iterator walkPairsImpl*[T](
## Walk over all `(VertexID,VertexRef)` in the database. Note that entries ## Walk over all `(VertexID,VertexRef)` in the database. Note that entries
## are unsorted. ## are unsorted.
var i = 0 var
for (key,data) in db.top.tab.pairs: seen: HashSet[Blob]
i = 0
for (key,data) in db.top.delta.sTab.pairs:
if data.isValid: if data.isValid:
yield (i,key,data) yield (i,key,data)
inc i i.inc
seen.incl key
for w in db.stack.reversed:
for (key,data) in w.delta.sTab.pairs:
if key notin seen:
if data.isValid:
yield (i,key,data)
i.inc
seen.incl key
when T isnot VoidBackendRef: when T isnot VoidBackendRef:
mixin walk mixin walk
for (n,key,data) in db.backend.T.walk: for (n,key,data) in db.backend.T.walk:
if key notin db.top.tab and data.isValid: if key notin seen and data.isValid:
yield (n+i,key,data) yield (n+i,key,data)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -107,7 +107,7 @@ proc verify(
for vid in beSTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID): for vid in beSTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
let let
nVtx = ly.sTab.getOrVoid vid nVtx = ly.delta.sTab.getOrVoid vid
mVtx = beSTab.getOrVoid vid mVtx = beSTab.getOrVoid vid
xCheck (nVtx != VertexRef(nil)) xCheck (nVtx != VertexRef(nil))
@ -119,8 +119,8 @@ proc verify(
" nVtx=", nVtx.pp, " nVtx=", nVtx.pp,
" mVtx=", mVtx.pp " mVtx=", mVtx.pp
xCheck beSTab.len == ly.sTab.len xCheck beSTab.len == ly.delta.sTab.len
xCheck beKMap.len == ly.kMap.len xCheck beKMap.len == ly.delta.kMap.len
true true

View File

@ -18,7 +18,7 @@ import
unittest2, unittest2,
../../nimbus/db/aristo/[ ../../nimbus/db/aristo/[
aristo_check, aristo_debug, aristo_desc, aristo_filter, aristo_get, aristo_check, aristo_debug, aristo_desc, aristo_filter, aristo_get,
aristo_merge, aristo_persistent, aristo_blobify], aristo_layers, aristo_merge, aristo_persistent, aristo_blobify],
../../nimbus/db/aristo, ../../nimbus/db/aristo,
../../nimbus/db/aristo/aristo_desc/desc_backend, ../../nimbus/db/aristo/aristo_desc/desc_backend,
../../nimbus/db/aristo/aristo_filter/[filter_fifos, filter_scheduler], ../../nimbus/db/aristo/aristo_filter/[filter_fifos, filter_scheduler],
@ -336,7 +336,7 @@ proc checkBeOk(
## .. ## ..
for n in 0 ..< dx.len: for n in 0 ..< dx.len:
let let
cache = if forceCache: true else: not dx[n].top.dirty cache = if forceCache: true else: not dx[n].dirty
rc = dx[n].checkBE(relax=relax, cache=cache) rc = dx[n].checkBE(relax=relax, cache=cache)
xCheckRc rc.error == (0,0): xCheckRc rc.error == (0,0):
noisy.say "***", "db check failed", noisy.say "***", "db check failed",

View File

@ -19,7 +19,8 @@ import
unittest2, unittest2,
../../nimbus/db/aristo, ../../nimbus/db/aristo,
../../nimbus/db/aristo/[ ../../nimbus/db/aristo/[
aristo_check, aristo_debug, aristo_desc, aristo_blobify, aristo_vid], aristo_check, aristo_debug, aristo_desc, aristo_blobify, aristo_layers,
aristo_vid],
../../nimbus/db/aristo/aristo_filter/filter_scheduler, ../../nimbus/db/aristo/aristo_filter/filter_scheduler,
../replay/xcheck, ../replay/xcheck,
./test_helpers ./test_helpers
@ -275,41 +276,41 @@ proc testVidRecycleLists*(noisy = true; seed = 42): bool =
expectedVids += (vid < first).ord expectedVids += (vid < first).ord
db.vidDispose vid db.vidDispose vid
xCheck db.top.vGen.len == expectedVids xCheck db.vGen.len == expectedVids
noisy.say "***", "vids=", db.top.vGen.len, " discarded=", count-expectedVids noisy.say "***", "vids=", db.vGen.len, " discarded=", count-expectedVids
# Serialise/deserialise # Serialise/deserialise
block: block:
let dbBlob = db.top.vGen.blobify let dbBlob = db.vGen.blobify
# Deserialise # Deserialise
let let
db1 = AristoDbRef.init() db1 = AristoDbRef.init()
rc = dbBlob.deblobify seq[VertexID] rc = dbBlob.deblobify seq[VertexID]
xCheckRc rc.error == 0 xCheckRc rc.error == 0
db1.top.vGen = rc.value db1.top.final.vGen = rc.value
xCheck db.top.vGen == db1.top.vGen xCheck db.vGen == db1.vGen
# Make sure that recycled numbers are fetched first # Make sure that recycled numbers are fetched first
let topVid = db.top.vGen[^1] let topVid = db.vGen[^1]
while 1 < db.top.vGen.len: while 1 < db.vGen.len:
let w = db.vidFetch() let w = db.vidFetch()
xCheck w < topVid xCheck w < topVid
xCheck db.top.vGen.len == 1 and db.top.vGen[0] == topVid xCheck db.vGen.len == 1 and db.vGen[0] == topVid
# Get some consecutive vertex IDs # Get some consecutive vertex IDs
for n in 0 .. 5: for n in 0 .. 5:
let w = db.vidFetch() let w = db.vidFetch()
xCheck w == topVid + n xCheck w == topVid + n
xCheck db.top.vGen.len == 1 xCheck db.vGen.len == 1
# Repeat last test after clearing the cache # Repeat last test after clearing the cache
db.top.vGen.setLen(0) db.top.final.vGen.setLen(0)
for n in 0 .. 5: for n in 0 .. 5:
let w = db.vidFetch() let w = db.vidFetch()
xCheck w == VertexID(2) + n # VertexID(1) is default root ID xCheck w == VertexID(2) + n # VertexID(1) is default root ID
xCheck db.top.vGen.len == 1 xCheck db.vGen.len == 1
# Recycling and re-org tests # Recycling and re-org tests
func toVQ(a: seq[int]): seq[VertexID] = a.mapIt(VertexID(it)) func toVQ(a: seq[int]): seq[VertexID] = a.mapIt(VertexID(it))
@ -491,6 +492,8 @@ proc testShortKeys*(
"\n k=", k.toHex, " v=", v.toHex, "\n k=", k.toHex, " v=", v.toHex,
"\n r=", r.pp(sig), "\n r=", r.pp(sig),
"\n ", sig.pp(), "\n ", sig.pp(),
"\n",
"\n pAmk=", sig.db.layersWalkLebal.toSeq.toTable.pp(sig.db),
"\n" "\n"
let w = sig.merkleSignCommit().value let w = sig.merkleSignCommit().value
gossip.say "*** testShortkeys (2)", "n=", n, " inx=", inx, gossip.say "*** testShortkeys (2)", "n=", n, " inx=", inx,
@ -499,6 +502,8 @@ proc testShortKeys*(
"\n R=", w.pp(sig), "\n R=", w.pp(sig),
"\n ", sig.pp(), "\n ", sig.pp(),
"\n", "\n",
"\n pAmk=", sig.db.layersWalkLebal.toSeq.toTable.pp(sig.db),
"\n",
"\n ----------------", "\n ----------------",
"\n" "\n"
let rc = sig.db.check let rc = sig.db.check

View File

@ -18,7 +18,7 @@ import
stew/endians2, stew/endians2,
../../nimbus/db/aristo/[ ../../nimbus/db/aristo/[
aristo_check, aristo_debug, aristo_delete, aristo_desc, aristo_get, aristo_check, aristo_debug, aristo_delete, aristo_desc, aristo_get,
aristo_merge], aristo_layers, aristo_merge],
../../nimbus/db/[aristo, aristo/aristo_init/persistent], ../../nimbus/db/[aristo, aristo/aristo_init/persistent],
../replay/xcheck, ../replay/xcheck,
./test_helpers ./test_helpers
@ -88,7 +88,7 @@ proc randomisedLeafs(
db: AristoDbRef; db: AristoDbRef;
td: var PrngDesc; td: var PrngDesc;
): seq[(LeafTie,VertexID)] = ): seq[(LeafTie,VertexID)] =
result = db.top.lTab.pairs.toSeq.filterIt(it[1].isvalid).sorted( result = db.lTab.pairs.toSeq.filterIt(it[1].isvalid).sorted(
cmp = proc(a,b: (LeafTie,VertexID)): int = cmp(a[0], b[0])) cmp = proc(a,b: (LeafTie,VertexID)): int = cmp(a[0], b[0]))
if 2 < result.len: if 2 < result.len:
for n in 0 ..< result.len-1: for n in 0 ..< result.len-1:
@ -103,6 +103,18 @@ proc innerCleanUp(db: AristoDbRef): bool {.discardable.} =
xCheckRc rc.error == 0 xCheckRc rc.error == 0
db.finish(flush=true) db.finish(flush=true)
proc schedStow(
db: AristoDbRef; # Database
chunkedMpt = false; # Partial data (e.g. from `snap`)
): Result[void,AristoError] =
## Scheduled storage
let
layersMeter = db.nLayersVtx + db.nLayersLabel
filterMeter = if db.roFilter.isNil: 0
else: db.roFilter.sTab.len + db.roFilter.kMap.len
persistent = MaxFilterBulk < max(layersMeter, filterMeter)
db.stow(persistent = persistent, chunkedMpt = chunkedMpt)
proc saveToBackend( proc saveToBackend(
tx: var AristoTxRef; tx: var AristoTxRef;
chunkedMpt: bool; chunkedMpt: bool;
@ -125,7 +137,7 @@ proc saveToBackend(
xCheckRc rc.error == 0 xCheckRc rc.error == 0
# Make sure MPT hashes are OK # Make sure MPT hashes are OK
xCheck db.top.dirty == false xCheck db.dirty == false
block: block:
let rc = db.txTop() let rc = db.txTop()
@ -145,14 +157,14 @@ proc saveToBackend(
xCheckRc rc.error == 0 xCheckRc rc.error == 0
# Make sure MPT hashes are OK # Make sure MPT hashes are OK
xCheck db.top.dirty == false xCheck db.dirty == false
block: block:
let rc = db.txTop() let rc = db.txTop()
xCheckErr rc.value.level < 0 # force error xCheckErr rc.value.level < 0 # force error
block: block:
let rc = db.stow(stageLimit=MaxFilterBulk, chunkedMpt=chunkedMpt) let rc = db.schedStow(chunkedMpt=chunkedMpt)
xCheckRc rc.error == 0 xCheckRc rc.error == 0
block: block:
@ -183,7 +195,7 @@ proc saveToBackendWithOops(
xCheckRc rc.error == 0 xCheckRc rc.error == 0
# Make sure MPT hashes are OK # Make sure MPT hashes are OK
xCheck db.top.dirty == false xCheck db.dirty == false
block: block:
let rc = db.txTop() let rc = db.txTop()
@ -199,14 +211,14 @@ proc saveToBackendWithOops(
xCheckRc rc.error == 0 xCheckRc rc.error == 0
# Make sure MPT hashes are OK # Make sure MPT hashes are OK
xCheck db.top.dirty == false xCheck db.dirty == false
block: block:
let rc = db.txTop() let rc = db.txTop()
xCheckErr rc.value.level < 0 # force error xCheckErr rc.value.level < 0 # force error
block: block:
let rc = db.stow(stageLimit=MaxFilterBulk, chunkedMpt=chunkedMpt) let rc = db.schedStow(chunkedMpt=chunkedMpt)
xCheckRc rc.error == 0 xCheckRc rc.error == 0
# Update layers to original level # Update layers to original level
@ -449,8 +461,8 @@ proc testTxMergeProofAndKvpList*(
testId = idPfx & "#" & $w.id & "." & $n testId = idPfx & "#" & $w.id & "." & $n
runID = n runID = n
lstLen = list.len lstLen = list.len
sTabLen = db.top.sTab.len sTabLen = db.nLayersVtx()
lTabLen = db.top.lTab.len lTabLen = db.lTab.len
leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
var var
@ -463,14 +475,14 @@ proc testTxMergeProofAndKvpList*(
xCheck proved.error in {AristoError(0),MergeHashKeyCachedAlready} xCheck proved.error in {AristoError(0),MergeHashKeyCachedAlready}
xCheck w.proof.len == proved.merged + proved.dups xCheck w.proof.len == proved.merged + proved.dups
xCheck db.top.lTab.len == lTabLen xCheck db.lTab.len == lTabLen
xCheck db.top.sTab.len <= proved.merged + sTabLen xCheck db.nLayersVtx() <= proved.merged + sTabLen
xCheck proved.merged < db.top.pAmk.len xCheck proved.merged < db.nLayersLebal()
let let
merged = db.merge leafs merged = db.merge leafs
xCheck db.top.lTab.len == lTabLen + merged.merged xCheck db.lTab.len == lTabLen + merged.merged
xCheck merged.merged + merged.dups == leafs.len xCheck merged.merged + merged.dups == leafs.len
xCheck merged.error in {AristoError(0), MergeLeafPathCachedAlready} xCheck merged.error in {AristoError(0), MergeLeafPathCachedAlready}