Aristo db remove over engineered object type (#2027)

* CoreDb: update test suite

* Aristo: Simplify reverse key map

why:
  The reverse key map `pAmk: (root,key) -> {vid,..}` as been simplified to
  `pAmk: key -> {vid,..}` as the state `root` domain argument is not used,
  anymore

* Aristo: Remove `HashLabel` object type and replace it by `HashKey`

why:
  The `HashLabel` object attaches a root hash to a hash key. This is
  nowhere used, anymore.

* Fix copyright
This commit is contained in:
Jordan Hrycaj 2024-02-14 19:11:59 +00:00 committed by GitHub
parent d5a54f66ee
commit 1b4a43c140
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 282 additions and 331 deletions

View File

@ -155,7 +155,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
# Check structural table
for (vid,vtx) in db.layersWalkVtx:
let lbl = db.layersGetLabel(vid).valueOr:
let key = db.layersGetKey(vid).valueOr:
# A `kMap[]` entry must exist.
return err((vid,CheckBeCacheKeyMissing))
if vtx.isValid:
@ -163,7 +163,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
discard vids.reduce Interval[VertexID,uint64].new(vid,vid)
else:
# Some vertex is to be deleted, the key must be empty
if lbl.isValid:
if key.isValid:
return err((vid,CheckBeCacheKeyNonEmpty))
# There must be a representation on the backend DB unless in a TX
if db.getVtxBE(vid).isErr and db.stack.len == 0:
@ -186,19 +186,19 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
# Check key table
var list: seq[VertexID]
for (vid,lbl) in db.layersWalkLabel:
for (vid,key) in db.layersWalkKey:
list.add vid
let vtx = db.getVtx vid
if db.layersGetVtx(vid).isErr and not vtx.isValid:
return err((vid,CheckBeCacheKeyDangling))
if not lbl.isValid or relax:
if not key.isValid or relax:
continue
if not vtx.isValid:
return err((vid,CheckBeCacheVtxDangling))
let node = vtx.toNode(db).valueOr: # compile cache first
return err((vid,CheckBeCacheKeyCantCompile))
let expected = node.digestTo(HashKey)
if expected != lbl.key:
if expected != key:
return err((vid,CheckBeCacheKeyMismatch))
# Check vGen

View File

@ -1,5 +1,5 @@
# nimbus-eth1
# Copyright (c) 2023 Status Research & Development GmbH
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
@ -27,42 +27,42 @@ proc checkTopStrict*(
# vertex ID (i.e. not deleted).
var zeroKeys: HashSet[VertexID]
for (vid,vtx) in db.layersWalkVtx:
let lbl = db.layersGetLabelOrVoid vid
let key = db.layersGetKeyOrVoid vid
if not vtx.isValid:
if lbl.isValid:
if key.isValid:
return err((vid,CheckStkVtxKeyMismatch))
else: # Empty key flags key is for update
zeroKeys.incl vid
elif lbl.isValid:
# So `vtx` and `lbl` exist
elif key.isValid:
# So `vtx` and `key` exist
let node = vtx.toNode(db).valueOr:
return err((vid,CheckStkVtxIncomplete))
if lbl.key != node.digestTo(HashKey):
if key != node.digestTo(HashKey):
return err((vid,CheckStkVtxKeyMismatch))
let revVids = db.layersGetLebalOrVoid lbl
let revVids = db.layersGetYekOrVoid key
if not revVids.isValid:
return err((vid,CheckStkRevKeyMissing))
if vid notin revVids:
return err((vid,CheckStkRevKeyMismatch))
elif not db.dirty or db.layersGetLabel(vid).isErr:
# So `vtx` exists but not `lbl`, so cache is supposed dirty and the
elif not db.dirty or db.layersGetKey(vid).isErr:
# So `vtx` exists but not `key`, so cache is supposed dirty and the
# vertex has a zero entry.
return err((vid,CheckStkVtxKeyMissing))
else: # Empty key flags key is for update
zeroKeys.incl vid
for (vid,key) in db.layersWalkLabel:
for (vid,key) in db.layersWalkKey:
if not key.isValid and vid notin zeroKeys:
if not db.getVtx(vid).isValid:
return err((vid,CheckStkKeyStrayZeroEntry))
let
pAmkVtxCount = db.layersWalkLebal.toSeq.mapIt(it[1]).foldl(a + b.len, 0)
pAmkVtxCount = db.layersWalkYek.toSeq.mapIt(it[1]).foldl(a + b.len, 0)
sTabVtxCount = db.layersWalkVtx.toSeq.mapIt(it[1]).filterIt(it.isValid).len
# Non-zero values mist sum up the same
@ -73,7 +73,7 @@ proc checkTopStrict*(
proc checkTopProofMode*(
db: AristoDbRef; # Database, top layer
db: AristoDbRef; # Database, top layer
): Result[void,(VertexID,AristoError)] =
if 0 < db.pPrf.len:
for vid in db.pPrf:
@ -82,28 +82,28 @@ proc checkTopProofMode*(
let node = vtx.toNode(db).valueOr:
return err((vid,CheckRlxVtxIncomplete))
let lbl = db.layersGetlabelOrVoid vid
if not lbl.isValid:
let key = db.layersGetKeyOrVoid vid
if not key.isValid:
return err((vid,CheckRlxVtxKeyMissing))
if lbl.key != node.digestTo(HashKey):
if key != node.digestTo(HashKey):
return err((vid,CheckRlxVtxKeyMismatch))
let revVids = db.layersGetLebalOrVoid lbl
let revVids = db.layersGetYekOrVoid key
if not revVids.isValid:
return err((vid,CheckRlxRevKeyMissing))
if vid notin revVids:
return err((vid,CheckRlxRevKeyMismatch))
else:
for (vid,lbl) in db.layersWalkLabel:
if lbl.isValid: # Otherwise to be deleted
for (vid,key) in db.layersWalkKey:
if key.isValid: # Otherwise to be deleted
let vtx = db.getVtx vid
if vtx.isValid:
let node = vtx.toNode(db).valueOr:
continue
if lbl.key != node.digestTo(HashKey):
if key != node.digestTo(HashKey):
return err((vid,CheckRlxVtxKeyMismatch))
let revVids = db.layersGetLebalOrVoid lbl
let revVids = db.layersGetYekOrVoid key
if not revVids.isValid:
return err((vid,CheckRlxRevKeyMissing))
if vid notin revVids:
@ -115,8 +115,8 @@ proc checkTopCommon*(
): Result[void,(VertexID,AristoError)] =
# Some `kMap[]` entries may ne void indicating backend deletion
let
kMapCount = db.layersWalkLabel.toSeq.mapIt(it[1]).filterIt(it.isValid).len
kMapNilCount = db.layersWalkLabel.toSeq.len - kMapCount
kMapCount = db.layersWalkKey.toSeq.mapIt(it[1]).filterIt(it.isValid).len
kMapNilCount = db.layersWalkKey.toSeq.len - kMapCount
# Collect leafs and check deleted entries
var nNilVtx = 0
@ -139,7 +139,7 @@ proc checkTopCommon*(
return err((vid,CheckAnyVtxExtPfxMissing))
else:
nNilVtx.inc
let rc = db.layersGetLabel vid
let rc = db.layersGetKey vid
if rc.isErr:
return err((vid,CheckAnyVtxEmptyKeyMissing))
if rc.value.isValid:
@ -150,12 +150,12 @@ proc checkTopCommon*(
if kMapNilCount != 0 and kMapNilCount < nNilVtx:
return err((VertexID(0),CheckAnyVtxEmptyKeyMismatch))
let pAmkVtxCount = db.layersWalkLebal.toSeq.mapIt(it[1]).foldl(a + b.len, 0)
let pAmkVtxCount = db.layersWalkYek.toSeq.mapIt(it[1]).foldl(a + b.len, 0)
if pAmkVtxCount != kMapCount:
var knownKeys: HashSet[VertexID]
for (key,vids) in db.layersWalkLebal:
for (key,vids) in db.layersWalkYek:
for vid in vids:
if db.layersGetLabel(vid).isErr:
if db.layersGetKey(vid).isErr:
return err((vid,CheckAnyRevVtxMissing))
if vid in knownKeys:
return err((vid,CheckAnyRevVtxDup))
@ -163,7 +163,7 @@ proc checkTopCommon*(
return err((VertexID(0),CheckAnyRevCountMismatch)) # should not apply(!)
for vid in db.pPrf:
if db.layersGetLabel(vid).isErr:
if db.layersGetKey(vid).isErr:
return err((vid,CheckAnyVtxLockWithoutKey))
ok()

View File

@ -34,9 +34,6 @@ const
VOID_HASH_KEY* = HashKey()
## Void equivalent for Merkle hash value
VOID_HASH_LABEL* = HashLabel(key: VOID_HASH_KEY)
## Void equivalent for Merkle hash value
VOID_PATH_ID* = PathID()
## Void equivalent for Merkle hash value

View File

@ -27,30 +27,24 @@ import
proc orDefault(db: AristoDbRef): AristoDbRef =
if db.isNil: AristoDbRef(top: LayerRef.init()) else: db
proc del(xMap: var VidsByLabelTab; lbl: HashLabel; vid: VertexID) =
proc del(xMap: var VidsByKeyTab; key: HashKey; vid: VertexID) =
# Update `xMap`
var vidsLen = -1
xMap.withValue(lbl,value):
xMap.withValue(key,value):
value[].excl vid
vidsLen = value[].len
if vidsLen == 0:
xMap.del lbl
xMap.del key
proc del(xMap: var VidsByLabelTab; lbl: HashLabel; vids: HashSet[VertexID]) =
proc del(xMap: var VidsByKeyTab; key: HashKey; vids: HashSet[VertexID]) =
for vid in vids:
xMap.del(lbl, vid)
xMap.del(key, vid)
proc add(xMap: var VidsByLabelTab; lbl: HashLabel; vid: VertexID) =
xMap.withValue(lbl,value):
proc add(xMap: var VidsByKeyTab; key: Hashkey; vid: VertexID) =
xMap.withValue(key,value):
value[].incl vid
do: # else if not found
xMap[lbl] = @[vid].toHashSet
func cmp(a, b: HashLabel): int =
if a.root != b.root:
a.root.cmp b.root
else:
a.key.cmp b.key
xMap[key] = @[vid].toHashSet
# --------------------------
@ -63,22 +57,13 @@ proc toHexLsb(w: int8): string =
proc sortedKeys(lTab: Table[LeafTie,VertexID]): seq[LeafTie] =
lTab.keys.toSeq.sorted(cmp = proc(a,b: LeafTie): int = cmp(a,b))
proc sortedKeys(kMap: Table[VertexID,HashLabel]): seq[VertexID] =
kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc sortedKeys(kMap: Table[VertexID,HashKey]): seq[VertexID] =
kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc sortedKeys(sTab: Table[VertexID,VertexRef]): seq[VertexID] =
sTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc sortedKeys[T](tab: Table[VertexID,T]): seq[VertexID] =
tab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc sortedKeys(pPrf: HashSet[VertexID]): seq[VertexID] =
pPrf.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc sortedKeys(pAmk: Table[HashLabel,VertexID]): seq[HashLabel] =
pAmk.keys.toSeq.sorted cmp
proc sortedKeys(pAmk: VidsByLabelTab): seq[HashLabel] =
proc sortedKeys[T](pAmk: Table[HashKey,T]): seq[HashKey] =
pAmk.keys.toSeq.sorted cmp
@ -118,42 +103,40 @@ proc stripZeros(a: string; toExp = false): string =
elif 2 < n:
result &= "" & $n
proc vidCode(lbl: HashLabel, db: AristoDbRef): uint64 =
if lbl.isValid:
proc vidCode(key: HashKey, db: AristoDbRef): uint64 =
if key.isValid:
block:
let vids = db.layersGetLebalOrVoid lbl
let vids = db.layersGetYekOrVoid key
if vids.isValid:
db.xMap.del(lbl, vids)
db.xMap.del(key, vids)
return vids.sortedKeys[0].uint64
block:
let vids = db.xMap.getOrVoid lbl
let vids = db.xMap.getOrVoid key
if vids.isValid:
return vids.sortedKeys[0].uint64
# ---------------------
proc ppLabelOk(
proc ppKeyOk(
db: AristoDbRef;
root: VertexID;
key: HashKey;
vid: VertexID;
): string =
if key.isValid and vid.isValid:
let
lbl = HashLabel(root: root, key: key)
vids = db.layersGetLebalOrVoid lbl
vids = db.layersGetYekOrVoid key
if vids.isValid:
db.xMap.del(lbl, vids)
db.xMap.del(key, vids)
if vid notin vids:
result = "(!)"
return
block:
let vids = db.xMap.getOrVoid lbl
let vids = db.xMap.getOrVoid key
if vids.isValid:
if vid notin vids:
result = "(!)"
return
db.xMap.add(lbl,vid)
db.xMap.add(key,vid)
proc ppVid(vid: VertexID; pfx = true): string =
if pfx:
@ -215,16 +198,15 @@ proc ppVidList(vGen: openArray[VertexID]): string =
#proc ppVidList(vGen: HashSet[VertexID]): string =
# "{" & vGen.sortedKeys.mapIt(it.ppVid).join(",") & "}"
proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string =
proc ppKey(key: HashKey; db: AristoDbRef; pfx = true): string =
proc getVids(): tuple[vids: HashSet[VertexID], xMapTag: string] =
let lbl = HashLabel(root: root, key: key)
block:
let vids = db.layersGetLebalOrVoid lbl
let vids = db.layersGetYekOrVoid key
if vids.isValid:
db.xMap.del(lbl, vids)
db.xMap.del(key, vids)
return (vids, "")
block:
let vids = db.xMap.getOrVoid lbl
let vids = db.xMap.getOrVoid key
if vids.isValid:
return (vids, "+")
if pfx:
@ -247,13 +229,6 @@ proc ppKey(key: HashKey; db: AristoDbRef; root: VertexID; pfx = true): string =
return
result &= @key.toHex.squeeze(hex=true,ignLen=true) & tag
proc ppLabel(lbl: HashLabel; db: AristoDbRef): string =
if lbl.isValid:
"%" & ($lbl.root.toHex).stripZeros &
":" & lbl.key.ppKey(db, lbl.root, pfx=false)
else:
""
proc ppLeafTie(lty: LeafTie, db: AristoDbRef): string =
let pfx = lty.path.to(NibblesSeq)
"@" & lty.root.ppVid(pfx=false) & ":" &
@ -288,7 +263,7 @@ proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =
else:
if not vid.isValid or vid in db.pPrf:
result = ["L(", "X(", "B("][nd.vType.ord]
elif db.layersGetLabel(vid).isOk:
elif db.layersGetKey(vid).isOk:
result = ["l(", "x(", "b("][nd.vType.ord]
else:
result = ["ł(", "€(", "þ("][nd.vType.ord]
@ -330,8 +305,8 @@ proc ppPPrf(pPrf: HashSet[VertexID]): string =
proc ppXMap*(
db: AristoDbRef;
kMap: Table[VertexID,HashLabel];
pAmk: VidsByLabelTab;
kMap: Table[VertexID,HashKey];
pAmk: VidsByKeyTab;
indent: int;
): string =
@ -351,24 +326,24 @@ proc ppXMap*(
if 1 < w.len:
multi = multi + w
# Vertex IDs without forward mapping `kMap: VertexID -> HashLabel`
var revOnly: Table[VertexID,HashLabel]
for (lbl,vids) in pAmk.pairs:
# Vertex IDs without forward mapping `kMap: VertexID -> HashKey`
var revOnly: Table[VertexID,HashKey]
for (key,vids) in pAmk.pairs:
for vid in vids:
if not kMap.hasKey vid:
revOnly[vid] = lbl
revOnly[vid] = key
let revKeys =
revOnly.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc ppNtry(n: uint64): string =
var s = VertexID(n).ppVid
let lbl = kMap.getOrVoid VertexID(n)
if lbl.isValid:
let vids = pAmk.getOrVoid lbl
let key = kMap.getOrVoid VertexID(n)
if key.isValid:
let vids = pAmk.getOrVoid key
if VertexID(n) notin vids or 1 < vids.len:
s = "(" & s & "," & lbl.key.ppKey(db,lbl.root)
elif lbl.key.len < 32:
s &= "[#" & $lbl.key.len & "]"
s = "(" & s & "," & key.ppKey(db)
elif key.len < 32:
s &= "[#" & $key.len & "]"
else:
s &= "£ø"
if s[0] == '(':
@ -378,10 +353,10 @@ proc ppXMap*(
result = "{"
# Extra reverse lookups
if 0 < revKeys.len:
proc ppRevlabel(vid: VertexID): string =
"(ø," & revOnly.getOrVoid(vid).ppLabel(db) & ")"
proc ppRevKey(vid: VertexID): string =
"(ø," & revOnly.getOrVoid(vid).ppkey(db) & ")"
var (i, r) = (0, revKeys[0])
result &= revKeys[0].ppRevlabel
result &= revKeys[0].ppRevKey
for n in 1 ..< revKeys.len:
let vid = revKeys[n]
r.inc
@ -391,24 +366,24 @@ proc ppXMap*(
result &= pfx
else:
result &= ".. "
result &= revKeys[n-1].ppRevlabel
result &= pfx & vid.ppRevlabel
result &= revKeys[n-1].ppRevKey
result &= pfx & vid.ppRevKey
(i, r) = (n, vid)
if i < revKeys.len - 1:
if i+1 != revKeys.len - 1:
result &= ".. "
else:
result &= pfx
result &= revKeys[^1].ppRevlabel
result &= revKeys[^1].ppRevKey
# Forward lookups
var cache: seq[(uint64,uint64,bool)]
for vid in kMap.sortedKeys:
let lbl = kMap.getOrVoid vid
if lbl.isValid:
cache.add (vid.uint64, lbl.vidCode(db), vid in multi)
let vids = pAmk.getOrVoid lbl
if (0 < vids.len and vid notin vids) or lbl.key.len < 32:
let key = kMap.getOrVoid vid
if key.isValid:
cache.add (vid.uint64, key.vidCode(db), vid in multi)
let vids = pAmk.getOrVoid key
if (0 < vids.len and vid notin vids) or key.len < 32:
cache[^1][2] = true
else:
cache.add (vid.uint64, 0u64, true)
@ -445,7 +420,6 @@ proc ppXMap*(
proc ppFilter(
fl: FilterRef;
db: AristoDbRef;
root: VertexID;
indent: int;
): string =
## Walk over filter tables
@ -458,8 +432,8 @@ proc ppFilter(
result &= " n/a"
return
result &= pfx & "fid=" & fl.fid.ppFid
result &= pfx & "src=" & fl.src.to(HashKey).ppKey(db,root)
result &= pfx & "trg=" & fl.trg.to(HashKey).ppKey(db,root)
result &= pfx & "src=" & fl.src.to(HashKey).ppKey(db)
result &= pfx & "trg=" & fl.trg.to(HashKey).ppKey(db)
result &= pfx & "vGen" & pfx1 & "[" &
fl.vGen.mapIt(it.ppVid).join(",") & "]"
result &= pfx & "sTab" & pfx1 & "{"
@ -471,10 +445,10 @@ proc ppFilter(
for n,vid in fl.kMap.sortedKeys:
let key = fl.kMap.getOrVoid vid
if 0 < n: result &= pfx2
result &= $(1+n) & "(" & vid.ppVid & "," & key.ppKey(db,root) & ")"
result &= $(1+n) & "(" & vid.ppVid & "," & key.ppKey(db) & ")"
result &= "}"
proc ppBe[T](be: T; db: AristoDbRef; root: VertexID; indent: int): string =
proc ppBe[T](be: T; db: AristoDbRef; indent: int): string =
## Walk over backend tables
let
pfx = indent.toPfx
@ -497,7 +471,7 @@ proc ppBe[T](be: T; db: AristoDbRef; root: VertexID; indent: int): string =
for (vid,key) in be.walkKey:
if 0 < n: result &= pfx2
n.inc
result &= $n & "(" & vid.ppVid & "," & key.ppKey(db,root) & ")"
result &= $n & "(" & vid.ppVid & "," & key.ppKey(db) & ")"
result &= "}"
proc ppLayer(
@ -577,13 +551,10 @@ proc pp*(w: Hash256): string =
w.data.toHex.squeeze(hex=true,ignLen=true)
proc pp*(w: HashKey; sig: MerkleSignRef): string =
w.ppKey(sig.db, sig.root)
w.ppKey(sig.db)
proc pp*(w: HashKey; db = AristoDbRef(nil); root = VertexID(1)): string =
w.ppKey(db.orDefault, root)
proc pp*(lbl: HashLabel, db = AristoDbRef(nil)): string =
lbl.ppLabel(db.orDefault)
proc pp*(w: HashKey; db = AristoDbRef(nil)): string =
w.ppKey(db.orDefault)
proc pp*(lty: LeafTie, db = AristoDbRef(nil)): string =
lty.ppLeafTie(db.orDefault)
@ -615,7 +586,7 @@ proc pp*(p: PayloadRef, db = AristoDbRef(nil)): string =
proc pp*(nd: VertexRef, db = AristoDbRef(nil)): string =
nd.ppVtx(db.orDefault, VertexID(0))
proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
proc pp*(nd: NodeRef; db: AristoDbRef): string =
if not nd.isValid:
result = "n/a"
elif nd.error != AristoError(0):
@ -628,21 +599,21 @@ proc pp*(nd: NodeRef; root: VertexID; db: AristoDbRef): string =
of Extension:
result &= $nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid & ","
result &= nd.key[0].ppKey(db,root)
result &= db.ppLabelOk(root, nd.key[0], nd.eVid)
result &= nd.key[0].ppKey(db)
result &= db.ppKeyOk(nd.key[0], nd.eVid)
of Branch:
result &= "["
for n in 0..15:
if nd.bVid[n].isValid or nd.key[n].isValid:
result &= nd.bVid[n].ppVid
result &= db.ppLabelOk(root, nd.key[n], nd.bVid[n]) & ","
result &= db.ppKeyOk(nd.key[n], nd.bVid[n]) & ","
result[^1] = ']'
result &= ",["
for n in 0..15:
if nd.bVid[n].isValid or nd.key[n].isValid:
result &= nd.key[n].ppKey(db,root)
result &= nd.key[n].ppKey(db)
result &= ","
result[^1] = ']'
result &= ")"
@ -680,11 +651,11 @@ proc pp*(leg: Leg; db = AristoDbRef(nil)): string =
let db = db.orDefault()
result = "(" & leg.wp.vid.ppVid & ","
block:
let lbl = db.layersGetLabelOrVoid leg.wp.vid
if not lbl.isValid:
let key = db.layersGetKeyOrVoid leg.wp.vid
if not key.isValid:
result &= "ø"
elif leg.wp.vid notin db.layersGetLebalOrVoid lbl:
result &= lbl.ppLabel(db)
elif leg.wp.vid notin db.layersGetYekOrVoid key:
result &= key.ppKey(db)
result &= ","
if 0 <= leg.nibble:
result &= $leg.nibble.ppNibble
@ -704,32 +675,32 @@ proc pp*(hike: Hike; db = AristoDbRef(nil); indent = 4): string =
result &= pfx & "(" & hike.tail.ppPathPfx & ")"
result &= "]"
proc pp*(kMap: Table[VertexID,Hashlabel]; indent = 4): string =
proc pp*(kMap: Table[VertexID,HashKey]; indent = 4): string =
let db = AristoDbRef(nil).orDefault
"{" & kMap.sortedKeys
.mapIt((it, kMap.getOrVoid it))
.mapIt("(" & it[0].ppVid & "," & it[1].ppLabel(db) & ")")
.mapIt("(" & it[0].ppVid & "," & it[1].ppKey(db) & ")")
.join("," & indent.toPfx(1)) & "}"
proc pp*(kMap: Table[VertexID,Hashlabel]; db: AristoDbRef; indent = 4): string =
proc pp*(kMap: Table[VertexID,HashKey]; db: AristoDbRef; indent = 4): string =
db.ppXMap(kMap, db.layersCc.delta.pAmk, indent)
proc pp*(
pAmk: Table[HashLabel,VertexID];
pAmk: Table[HashKey,VertexID];
db = AristoDbRef(nil);
indent = 4;
): string =
let db = db.orDefault
"{" & pAmk.sortedkeys
.mapIt((it, pAmk.getOrVoid it))
.mapIt("(" & it[0].ppLabel(db) & "," & it[1].ppVid & ")")
.mapIt("(" & it[0].ppKey(db) & "," & it[1].ppVid & ")")
.join("," & indent.toPfx(1)) & "}"
proc pp*(pAmk: VidsByLabelTab; db = AristoDbRef(nil); indent = 4): string =
proc pp*(pAmk: VidsByKeyTab; db = AristoDbRef(nil); indent = 4): string =
let db = db.orDefault
"{" & pAmk.sortedkeys
.mapIt((it, pAmk.getOrVoid it))
.mapIt("(" & it[0].ppLabel(db) & "," & it[1].ppVids & ")")
.mapIt("(" & it[0].ppKey(db) & "," & it[1].ppVids & ")")
.join("," & indent.toPfx(1)) & "}"
# ---------------------
@ -792,29 +763,26 @@ proc pp*(
proc pp*(
filter: FilterRef;
db = AristoDbRef(nil);
root = VertexID(1);
indent = 4;
): string =
filter.ppFilter(db.orDefault(), root, indent)
filter.ppFilter(db.orDefault(), indent)
proc pp*(
be: BackendRef;
db: AristoDbRef;
root = VertexID(1);
indent = 4;
): string =
result = db.roFilter.ppFilter(db, root, indent+1) & indent.toPfx
result = db.roFilter.ppFilter(db, indent+1) & indent.toPfx
case be.kind:
of BackendMemory:
result &= be.MemBackendRef.ppBe(db, root, indent+1)
result &= be.MemBackendRef.ppBe(db, indent+1)
of BackendRocksDB:
result &= be.RdbBackendRef.ppBe(db, root, indent+1)
result &= be.RdbBackendRef.ppBe(db, indent+1)
of BackendVoid:
result &= "<NoBackend>"
proc pp*(
db: AristoDbRef;
root = VertexID(1);
indent = 4;
backendOk = false;
filterOk = true;
@ -842,13 +810,13 @@ proc pp*(
if backendOk:
result &= db.backend.pp(db)
elif filterOk:
result &= db.roFilter.ppFilter(db, root, indent+1)
result &= db.roFilter.ppFilter(db, indent+1)
proc pp*(sdb: MerkleSignRef; indent = 4): string =
"count=" & $sdb.count &
" root=" & sdb.root.pp &
" error=" & $sdb.error &
"\n db\n " & sdb.db.pp(root=sdb.root, indent=indent+1)
"\n db\n " & sdb.db.pp(indent=indent+1)
# ------------------------------------------------------------------------------
# End

View File

@ -67,7 +67,7 @@ proc nullifyKey(
vid: VertexID; # Vertex IDs to clear
) =
# Register for void hash (to be recompiled)
db.layersResLabel vid
db.layersResKey vid
proc disposeOfVtx(
db: AristoDbRef; # Database, top layer
@ -75,7 +75,7 @@ proc disposeOfVtx(
) =
# Remove entry
db.layersResVtx vid
db.layersResLabel vid
db.layersResKey vid
db.vidDispose vid # Recycle ID
# ------------------------------------------------------------------------------

View File

@ -78,7 +78,7 @@ type
dudes: DudesRef ## Related DB descriptors
# Debugging data below, might go away in future
xMap*: VidsByLabelTab ## For pretty printing, extends `pAmk`
xMap*: VidsByKeyTab ## For pretty printing, extends `pAmk`
AristoDbAction* = proc(db: AristoDbRef) {.gcsafe, raises: [].}
## Generic call back function/closure.
@ -90,9 +90,6 @@ type
func getOrVoid*[W](tab: Table[W,VertexRef]; w: W): VertexRef =
tab.getOrDefault(w, VertexRef(nil))
func getOrVoid*[W](tab: Table[W,HashLabel]; w: W): HashLabel =
tab.getOrDefault(w, VOID_HASH_LABEL)
func getOrVoid*[W](tab: Table[W,NodeRef]; w: W): NodeRef =
tab.getOrDefault(w, NodeRef(nil))
@ -134,9 +131,6 @@ func isValid*(key: HashKey): bool =
func isValid*(vid: VertexID): bool =
vid != VertexID(0)
func isValid*(lbl: HashLabel): bool =
lbl.key.isValid
func isValid*(sqv: HashSet[VertexID]): bool =
sqv != EmptyVidSet

View File

@ -86,18 +86,6 @@ type
root*: VertexID ## Root ID for the sub-trie
path*: PathID ## Path into the `Patricia Trie`
HashLabel* = object
## Merkle hash key uniquely associated with a vertex ID. As hashes in a
## `Merkle Patricia Tree` are unique only on a particular sub-trie, the
## hash key is paired with the top vertex of the relevant sub-trie. This
## construction is similar to the one of a `LeafTie` object.
##
## Note that `HashLabel` objects have no representation in the
## `Aristo Trie`. They are used temporarily and in caches or backlog
## tables.
root*: VertexID ## Root ID for the sub-trie.
key*: HashKey ## Merkle hash or encoded small node data
# ------------------------------------------------------------------------------
# Chronicles formatters
# ------------------------------------------------------------------------------
@ -382,13 +370,6 @@ func hash*(a: HashKey): Hash =
h = h !& a.blob.hash
!$h
func hash*(lbl: HashLabel): Hash =
## Table/KeyedQueue/HashSet mixin
var h: Hash = 0
h = h !& lbl.root.hash
h = h !& lbl.key.hash
!$h
# ------------------------------------------------------------------------------
# Miscellaneous helpers
# ------------------------------------------------------------------------------

View File

@ -76,8 +76,8 @@ type
kMap*: Table[VertexID,HashKey] ## Filter Merkle hash key mapping
vGen*: seq[VertexID] ## Filter unique vertex ID generator
VidsByLabelTab* = Table[HashLabel,HashSet[VertexID]]
## Reverse lookup searching `VertexID` by the hash key/label.
VidsByKeyTab* = Table[HashKey,HashSet[VertexID]]
## Reverse lookup searching `VertexID` by the hash key.
LayerDeltaRef* = ref object
## Delta layers are stacked implying a tables hierarchy. Table entries on
@ -103,8 +103,8 @@ type
## inconsistent state that must be resolved.
##
sTab*: Table[VertexID,VertexRef] ## Structural vertex table
kMap*: Table[VertexID,HashLabel] ## Merkle hash key mapping
pAmk*: VidsByLabelTab ## Reverse `kMap` entries, hash key lookup
kMap*: Table[VertexID,HashKey] ## Merkle hash key mapping
pAmk*: VidsByKeyTab ## Reverse `kMap` entries, hash key lookup
LayerFinalRef* = ref object
## Final tables fully supersede tables on lower layers when stacked as a

View File

@ -56,7 +56,7 @@ proc fwdFilter*(
ok FilterRef(
src: srcRoot,
sTab: layer.delta.sTab,
kMap: layer.delta.kMap.pairs.toSeq.mapIt((it[0],it[1].key)).toTable,
kMap: layer.delta.kMap,
vGen: layer.final.vGen.vidReorg, # Compact recycled IDs
trg: trgRoot)

View File

@ -51,9 +51,9 @@ proc getLayerStateRoots*(
spr.be = sprBeKey.to(Hash256)
spr.fg = block:
let lbl = delta.kMap.getOrVoid VertexID(1)
if lbl.isValid:
lbl.key.to(Hash256)
let key = delta.kMap.getOrVoid VertexID(1)
if key.isValid:
key.to(Hash256)
else:
EMPTY_ROOT_HASH
if spr.fg.isValid:
@ -68,8 +68,7 @@ proc getLayerStateRoots*(
return ok(spr)
if chunkedMpt:
let lbl = HashLabel(root: VertexID(1), key: sprBeKey)
if VertexID(1) in delta.pAmk.getOrVoid lbl:
if VertexID(1) in delta.pAmk.getOrVoid sprBeKey:
spr.fg = spr.be
return ok(spr)

View File

@ -166,12 +166,12 @@ proc getKeyRc*(db: AristoDbRef; vid: VertexID): Result[HashKey,AristoError] =
block body:
let key = db.layersGetKey(vid).valueOr:
break body
# If there is a zero value label, the entry is either marked for being
# If there is a zero key value, the entry is either marked for being
# updated or for deletion on the database. So check below.
if key.isValid:
return ok key
# The zero value label does not refer to an update mark if there is no
# The zero key value does not refer to an update mark if there is no
# valid vertex (either on the cache or the backend whatever comes first.)
let vtx = db.layersGetVtx(vid).valueOr:
# There was no vertex on the cache. So there must be one the backend (the
@ -180,7 +180,7 @@ proc getKeyRc*(db: AristoDbRef; vid: VertexID): Result[HashKey,AristoError] =
if vtx.isValid:
return err(GetKeyUpdateNeeded)
else:
# The vertex is to be deleted. So is the value label.
# The vertex is to be deleted. So is the value key.
return err(GetKeyNotFound)
db.getKeyBE vid

View File

@ -25,7 +25,7 @@
## `lTab[]` entry with `(root-vertex,path,VertexID(0))`.
##
## * All vertices where the key (aka Merkle hash) has changed must have a
## top layer cache `kMap[]` entry `(vertex-ID,VOID_HASH_LABEL)` indicating
## top layer cache `kMap[]` entry `(vertex-ID,VOID_HASH_KEY)` indicating
## that there is no key available for this vertex. This also applies for
## backend verices where the key has changed while the structural logic
## did not change.
@ -333,8 +333,8 @@ proc hashify*(
# is the task to search for unresolved node keys and add glue paths to
# the width-first schedule.
var unresolved: HashSet[VertexID]
for (vid,lbl) in db.layersWalkLabel:
if not lbl.isValid and
for (vid,key) in db.layersWalkKey:
if not key.isValid and
vid notin wff:
let rc = db.layersGetVtx vid
if rc.isErr or rc.value.isValid:
@ -390,8 +390,7 @@ proc hashify*(
# End `valueOr` terminates error clause
# Could resolve => update Merkle hash
let key = node.digestTo(HashKey)
db.layersPutLabel(vid, HashLabel(root: val.root, key: key))
db.layersPutKey(vid, node.digestTo HashKey)
# Set follow up link for next round
wff.setNextLink(redo, val)
@ -410,7 +409,7 @@ proc hashify*(
# Convert root vertex to a node.
let node = db.getVtx(vid).toNode(db,stopEarly=false).valueOr:
return err((vid,HashifyRootNodeUnresolved))
db.layersPutLabel(vid, HashLabel(root: vid, key: node.digestTo(HashKey)))
db.layersPutKey(vid, node.digestTo(HashKey))
wff.completed.incl vid
db.top.final.dirty = false # Mark top layer clean

View File

@ -25,21 +25,21 @@ func dup(sTab: Table[VertexID,VertexRef]): Table[VertexID,VertexRef] =
for (k,v) in sTab.pairs:
result[k] = v.dup
func getLebalOrVoid(stack: seq[LayerRef]; lbl: HashLabel): HashSet[VertexID] =
func getLebalOrVoid(stack: seq[LayerRef]; key: HashKey): HashSet[VertexID] =
# Helper: get next set of vertex IDs from stack.
for w in stack.reversed:
w.delta.pAmk.withValue(lbl,value):
w.delta.pAmk.withValue(key,value):
return value[]
proc recalcLebal(layer: var LayerObj) =
## Calculate reverse `kMap[]` for final (aka zero) layer
layer.delta.pAmk.clear
for (vid,lbl) in layer.delta.kMap.pairs:
if lbl.isValid:
layer.delta.pAmk.withValue(lbl, value):
for (vid,key) in layer.delta.kMap.pairs:
if key.isValid:
layer.delta.pAmk.withValue(key, value):
value[].incl vid
do:
layer.delta.pAmk[lbl] = @[vid].toHashSet
layer.delta.pAmk[key] = @[vid].toHashSet
# ------------------------------------------------------------------------------
# Public getters: lazy value lookup for read only versions
@ -62,24 +62,25 @@ func dirty*(db: AristoDbRef): bool =
# ------------------------------------------------------------------------------
func nLayersVtx*(db: AristoDbRef): int =
## Number of vertex ID/vertex entries on the cache layers. This is an upper
## bound for the number of effective vertex ID mappings held on the cache
## layers as there might be duplicate entries for the same vertex ID on
## different layers.
## Number of vertex ID/vertex entries on the cache layers. This is an upper bound
## for the number of effective vertex ID mappings held on the cache layers as
## there might be duplicate entries for the same vertex ID on different layers.
##
db.stack.mapIt(it.delta.sTab.len).foldl(a + b, db.top.delta.sTab.len)
func nLayersLabel*(db: AristoDbRef): int =
## Number of vertex ID/label entries on the cache layers. This is an upper
## bound for the number of effective vertex ID mappingss held on the cache
## layers as there might be duplicate entries for the same vertex ID on
## different layers.
func nLayersKey*(db: AristoDbRef): int =
## Number of vertex ID/key entries on the cache layers. This is an upper bound
## for the number of effective vertex ID mappingss held on the cache layers as
## there might be duplicate entries for the same vertex ID on different layers.
##
db.stack.mapIt(it.delta.kMap.len).foldl(a + b, db.top.delta.kMap.len)
func nLayersLebal*(db: AristoDbRef): int =
## Number of label/vertex IDs reverse lookup entries on the cache layers.
## This is an upper bound for the number of effective label mappingss held
## on the cache layers as there might be duplicate entries for the same label
## on different layers.
func nLayersYek*(db: AristoDbRef): int =
## Number of key/vertex IDs reverse lookup entries on the cache layers. This
## is an upper bound for the number of effective key mappingss held on the
## cache layers as there might be duplicate entries for the same key on
## different layers.
##
db.stack.mapIt(it.delta.pAmk.len).foldl(a + b, db.top.delta.pAmk.len)
# ------------------------------------------------------------------------------
@ -104,10 +105,9 @@ proc layersGetVtxOrVoid*(db: AristoDbRef; vid: VertexID): VertexRef =
db.layersGetVtx(vid).valueOr: VertexRef(nil)
proc layersGetLabel*(db: AristoDbRef; vid: VertexID): Result[HashLabel,void] =
## Find a hash label (containh the `HashKey`) on the cache layers. An
## `ok()` result might contain a void hash label if it is stored on the
## cache that way.
proc layersGetKey*(db: AristoDbRef; vid: VertexID): Result[HashKey,void] =
## Find a hash key on the cache layers. An `ok()` result might contain a void
## hash key if it is stored on the cache that way.
##
if db.top.delta.kMap.hasKey vid:
# This is ok regardless of the `dirty` flag. If this vertex has become
@ -121,43 +121,30 @@ proc layersGetLabel*(db: AristoDbRef; vid: VertexID): Result[HashLabel,void] =
err()
proc layersGetlabelOrVoid*(db: AristoDbRef; vid: VertexID): HashLabel =
## Simplified version of `layersGetLabel()`
db.layersGetLabel(vid).valueOr: VOID_HASH_LABEL
proc layersGetKey*(db: AristoDbRef; vid: VertexID): Result[HashKey,void] =
## Variant of `layersGetLabel()` for returning the `HashKey` part of the
## label only.
let lbl = db.layersGetLabel(vid).valueOr:
return err()
# Note that `lbl.isValid == lbl.key.isValid`
ok(lbl.key)
proc layersGetKeyOrVoid*(db: AristoDbRef; vid: VertexID): HashKey =
## Simplified version of `layersGetKey()`
## Simplified version of `layersGetkey()`
db.layersGetKey(vid).valueOr: VOID_HASH_KEY
proc layersGetLebal*(
proc layersGetYek*(
db: AristoDbRef;
lbl: HashLabel;
key: HashKey;
): Result[HashSet[VertexID],void] =
## Inverse of `layersGetKey()`. For a given argumnt `lbl`, find all vertex
## IDs that have `layersGetLbl()` return this very `lbl` value for the these
## Inverse of `layersGetKey()`. For a given argumnt `key`, finds all vertex IDs
## that have `layersGetKey()` return this very `key` value for the argument
## vertex IDs.
if db.top.delta.pAmk.hasKey lbl:
return ok(db.top.delta.pAmk.getOrVoid lbl)
if db.top.delta.pAmk.hasKey key:
return ok(db.top.delta.pAmk.getOrVoid key)
for w in db.stack.reversed:
if w.delta.pAmk.hasKey lbl:
return ok(w.delta.pAmk.getOrVoid lbl)
if w.delta.pAmk.hasKey key:
return ok(w.delta.pAmk.getOrVoid key)
err()
proc layersGetLebalOrVoid*(db: AristoDbRef; lbl: HashLabel): HashSet[VertexID] =
proc layersGetYekOrVoid*(db: AristoDbRef; key: HashKey): HashSet[VertexID] =
## Simplified version of `layersGetVidsOrVoid()`
db.layersGetLebal(lbl).valueOr: EmptyVidSet
db.layersGetYek(key).valueOr: EmptyVidSet
# ------------------------------------------------------------------------------
# Public functions: put variants
@ -174,43 +161,43 @@ proc layersResVtx*(db: AristoDbRef; vid: VertexID) =
db.layersPutVtx(vid, VertexRef(nil))
proc layersPutLabel*(db: AristoDbRef; vid: VertexID; lbl: HashLabel) =
## Store a (potentally void) hash label on the top layer
proc layersPutKey*(db: AristoDbRef; vid: VertexID; key: HashKey) =
## Store a (potentally void) hash key on the top layer
# Get previous label
let blb = db.top.delta.kMap.getOrVoid vid
# Update label on `label->vid` mapping table
db.top.delta.kMap[vid] = lbl
# Get previous key
let prvKey = db.top.delta.kMap.getOrVoid vid
# Update key on `kMap:key->vid` mapping table
db.top.delta.kMap[vid] = key
db.top.final.dirty = true # Modified top cache layers
# Clear previous value on reverse table if it has changed
if blb.isValid and blb != lbl:
if prvKey.isValid and prvKey != key:
var vidsLen = -1
db.top.delta.pAmk.withValue(blb, value):
db.top.delta.pAmk.withValue(prvKey, value):
value[].excl vid
vidsLen = value[].len
do: # provide empty lookup
let vids = db.stack.getLebalOrVoid(blb)
let vids = db.stack.getLebalOrVoid(prvKey)
if vids.isValid and vid in vids:
# This entry supersedes non-emtpty changed ones from lower levels
db.top.delta.pAmk[blb] = vids - @[vid].toHashSet
if vidsLen == 0 and not db.stack.getLebalOrVoid(blb).isValid:
db.top.delta.pAmk[prvKey] = vids - @[vid].toHashSet
if vidsLen == 0 and not db.stack.getLebalOrVoid(prvKey).isValid:
# There is no non-emtpty entry on lower levels, so ledete this one
db.top.delta.pAmk.del blb
db.top.delta.pAmk.del prvKey
# Add updated value on reverse table if non-zero
if lbl.isValid:
db.top.delta.pAmk.withValue(lbl, value):
if key.isValid:
db.top.delta.pAmk.withValue(key, value):
value[].incl vid
do: # else if not found: need to merge with value set from lower layer
db.top.delta.pAmk[lbl] = db.stack.getLebalOrVoid(lbl) + @[vid].toHashSet
db.top.delta.pAmk[key] = db.stack.getLebalOrVoid(key) + @[vid].toHashSet
proc layersResLabel*(db: AristoDbRef; vid: VertexID) =
## Shortcut for `db.layersPutLabel(vid, VOID_HASH_LABEL)`. It is sort of the
proc layersResKey*(db: AristoDbRef; vid: VertexID) =
## Shortcut for `db.layersPutKey(vid, VOID_HASH_KEY)`. It is sort of the
## equivalent of a delete function.
db.layersPutLabel(vid, VOID_HASH_LABEL)
db.layersPutKey(vid, VOID_HASH_KEY)
# ------------------------------------------------------------------------------
# Public functions
@ -225,18 +212,18 @@ proc layersMergeOnto*(src: LayerRef; trg: var LayerObj; stack: seq[LayerRef]) =
for (vid,vtx) in src.delta.sTab.pairs:
trg.delta.sTab[vid] = vtx
for (vid,lbl) in src.delta.kMap.pairs:
trg.delta.kMap[vid] = lbl
for (vid,key) in src.delta.kMap.pairs:
trg.delta.kMap[vid] = key
if stack.len == 0:
# Re-calculate `pAmk[]`
trg.recalcLebal()
else:
# Merge reverse `kMap[]` layers. Empty label image sets are ignored unless
# Merge reverse `kMap[]` layers. Empty key set images are ignored unless
# they supersede non-empty values on the argument `stack[]`.
for (lbl,vids) in src.delta.pAmk.pairs:
if 0 < vids.len or stack.getLebalOrVoid(lbl).isValid:
trg.delta.pAmk[lbl] = vids
for (key,vids) in src.delta.pAmk.pairs:
if 0 < vids.len or stack.getLebalOrVoid(key).isValid:
trg.delta.pAmk[key] = vids
func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
@ -258,8 +245,8 @@ func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
for n in 1 ..< layers.len:
for (vid,vtx) in layers[n].delta.sTab.pairs:
result.delta.sTab[vid] = vtx
for (vid,lbl) in layers[n].delta.kMap.pairs:
result.delta.kMap[vid] = lbl
for (vid,key) in layers[n].delta.kMap.pairs:
result.delta.kMap[vid] = key
# Re-calculate `pAmk[]`
result[].recalcLebal()
@ -298,37 +285,37 @@ iterator layersWalkVtx*(
yield (vid,vtx)
iterator layersWalkLabel*(
iterator layersWalkKey*(
db: AristoDbRef;
): tuple[vid: VertexID, lbl: HashLabel] =
## Walk over all `(VertexID,HashLabel)` pairs on the cache layers. Note that
): tuple[vid: VertexID, key: HashKey] =
## Walk over all `(VertexID,HashKey)` pairs on the cache layers. Note that
## entries are unsorted.
var seen: HashSet[VertexID]
for (vid,lbl) in db.top.delta.kMap.pairs:
yield (vid,lbl)
for (vid,key) in db.top.delta.kMap.pairs:
yield (vid,key)
seen.incl vid
for w in db.stack.reversed:
for (vid,lbl) in w.delta.kMap.pairs:
for (vid,key) in w.delta.kMap.pairs:
if vid notin seen:
yield (vid,lbl)
yield (vid,key)
seen.incl vid
iterator layersWalkLebal*(
iterator layersWalkYek*(
db: AristoDbRef;
): tuple[lbl: HashLabel, vids: HashSet[VertexID]] =
## Walk over `(HashLabel,HashSet[VertexID])` pairs.
var seen: HashSet[HashLabel]
for (lbl,vids) in db.top.delta.pAmk.pairs:
yield (lbl,vids)
seen.incl lbl
): tuple[key: HashKey, vids: HashSet[VertexID]] =
## Walk over `(HashKey,HashSet[VertexID])` pairs.
var seen: HashSet[HashKey]
for (key,vids) in db.top.delta.pAmk.pairs:
yield (key,vids)
seen.incl key
for w in db.stack.reversed:
for (lbl,vids) in w.delta.pAmk.pairs:
if lbl notin seen:
yield (lbl,vids)
seen.incl lbl
for (key,vids) in w.delta.pAmk.pairs:
if key notin seen:
yield (key,vids)
seen.incl key
# ------------------------------------------------------------------------------
# End

View File

@ -81,7 +81,7 @@ proc nullifyKey(
vid: VertexID; # Vertex IDs to clear
) =
# Register for void hash (to be recompiled)
db.layersResLabel vid
db.layersResKey vid
proc clearMerkleKeys(
db: AristoDbRef; # Database, top layer
@ -97,7 +97,7 @@ proc setVtxAndKey(
vtx: VertexRef; # Vertex to add
) =
db.layersPutVtx(vid, vtx)
db.layersResLabel vid
db.layersResKey vid
# -----------
@ -512,8 +512,7 @@ proc mergeNodeImpl(
# already. This is provided for if the `nodes` are processed in the right
# order `root->.. ->leaf`.
let
hashLbl = HashLabel(root: rootVid, key: hashKey)
vids = db.layersGetLebalOrVoid(hashLbl).toSeq
vids = db.layersGetYekOrVoid(hashKey).toSeq
isRoot = rootVid in vids
if vids.len == 0:
return err(MergeRevVidMustHaveBeenCached)
@ -522,8 +521,8 @@ proc mergeNodeImpl(
return err(MergeHashKeyRevLookUpGarbled)
# Use the first vertex ID from the `vis` list as representant for all others
let lbl = db.layersGetLabelOrVoid vids[0]
if lbl == hashLbl:
let key = db.layersGetKeyOrVoid vids[0]
if key == hashKey:
if db.layersGetVtx(vids[0]).isOk:
for n in 1 ..< vids.len:
if db.layersGetVtx(vids[n]).isErr:
@ -531,7 +530,7 @@ proc mergeNodeImpl(
# This is tyically considered OK
return err(MergeHashKeyCachedAlready)
# Otherwise proceed
elif lbl.isValid:
elif key.isValid:
# Different key assigned => error
return err(MergeHashKeyDiffersFromCached)
@ -548,7 +547,7 @@ proc mergeNodeImpl(
# Verify that all `vids` entries are similar
for n in 1 ..< vids.len:
let w = vids[n]
if lbl != db.layersGetLabelOrVoid(w) or db.layersGetVtx(w).isOk:
if key != db.layersGetKeyOrVoid(w) or db.layersGetVtx(w).isOk:
return err(MergeHashKeyRevLookUpGarbled)
if not hasVtx:
# Prefer existing node which has all links available, already.
@ -556,31 +555,31 @@ proc mergeNodeImpl(
if u.isValid:
(vtx, hasVtx) = (u, true)
# The `vertexID <-> hashLabel` mappings need to be set up now (if any)
# The `vertexID <-> hashKey` mappings need to be set up now (if any)
case node.vType:
of Leaf:
discard
of Extension:
if node.key[0].isValid:
let eLbl = HashLabel(root: rootVid, key: node.key[0])
let eKey = node.key[0]
if not hasVtx:
# Brand new reverse lookup link for this vertex
vtx.eVid = db.vidFetch
db.layersPutLabel(vtx.eVid, eLbl)
db.layersPutKey(vtx.eVid, eKey)
elif not vtx.eVid.isValid:
return err(MergeNodeVtxDiffersFromExisting)
db.layersPutLabel(vtx.eVid, eLbl)
db.layersPutKey(vtx.eVid, eKey)
of Branch:
for n in 0..15:
if node.key[n].isValid:
let bLbl = HashLabel(root: rootVid, key: node.key[n])
let bKey = node.key[n]
if not hasVtx:
# Brand new reverse lookup link for this vertex
vtx.bVid[n] = db.vidFetch
db.layersPutLabel(vtx.bVid[n], bLbl)
db.layersPutKey(vtx.bVid[n], bKey)
elif not vtx.bVid[n].isValid:
return err(MergeNodeVtxDiffersFromExisting)
db.layersPutLabel(vtx.bVid[n], bLbl)
db.layersPutKey(vtx.bVid[n], bKey)
for w in vids:
db.top.final.pPrf.incl w
@ -824,13 +823,11 @@ proc merge*(
if 0 < chain.len and chain[^1] == rootKey:
chains.add chain
# Make sure that the reverse lookup for the root vertex label is available.
# Make sure that the reverse lookup for the root vertex key is available.
block:
let
lbl = HashLabel(root: rootVid, key: rootKey)
vids = db.layersGetLebalOrVoid lbl
let vids = db.layersGetYekOrVoid rootKey
if not vids.isValid:
db.layersPutlabel(rootVid, lbl)
db.layersPutKey(rootVid, rootKey)
# Process over chains in reverse mode starting with the root node. This
# allows the algorithm to find existing nodes on the backend.
@ -882,7 +879,7 @@ proc merge*(
return ok rootVid
if not key.isValid:
db.layersPutLabel(rootVid, HashLabel(root: rootVid, key: rootLink))
db.layersPutKey(rootVid, rootLink)
return ok rootVid
else:
let key = db.getKey VertexID(1)
@ -891,13 +888,13 @@ proc merge*(
# Otherwise assign unless valid
if not key.isValid:
db.layersPutLabel(VertexID(1),HashLabel(root: VertexID(1), key: rootLink))
db.layersPutKey(VertexID(1), rootLink)
return ok VertexID(1)
# Create and assign a new root key
if not rootVid.isValid:
let vid = db.vidFetch
db.layersPutLabel(vid, HashLabel(root: vid, key: rootLink))
db.layersPutKey(vid, rootLink)
return ok vid
err(MergeRootKeyDiffersForVid)

View File

@ -108,10 +108,10 @@ proc toNode*(
##
proc getKey(db: AristoDbRef; vid: VertexID; beOk: bool): HashKey =
block body:
let lbl = db.layersGetLabel(vid).valueOr:
let key = db.layersGetKey(vid).valueOr:
break body
if lbl.isValid:
return lbl.key
if key.isValid:
return key
else:
return VOID_HASH_KEY
if beOk:
@ -225,7 +225,7 @@ proc registerAccount*(
# Clear Merkle keys and store leaf record
for w in hike.legs.mapIt(it.wp.vid):
db.layersResLabel w
db.layersResKey w
db.top.final.lTab[lty] = wp.vid
ok()

View File

@ -76,8 +76,7 @@ func ppFil(w: FilterRef; db = AristoDbRef(nil)): string =
let n = key.to(UInt256)
if n == 0: "£ø" else: "£" & $n
else:
let keyLink = HashKey.fromBytes(key.data).value
HashLabel(root: VertexID(1), key: keyLink).pp(db)
HashKey.fromBytes(key.data).value.pp(db)
"(" & w.fid.pp & "," & w.src.qq(db) & "->" & w.trg.qq(db) & ")"
func pp(qf: (QueueID,FilterRef); db = AristoDbRef(nil)): string =

View File

@ -59,7 +59,7 @@ func pp*(
let
pfx = indent.toPfx
rootLink = w.root.to(HashKey)
result = "(" & HashLabel(root: rootID, key: rootLink).pp(db)
result = "(" & rootLink.pp(db)
result &= "," & $w.id & ",[" & $w.proof.len & "],"
result &= pfx & " ["
for n,kvp in w.kvpLst:

View File

@ -493,7 +493,7 @@ proc testShortKeys*(
"\n r=", r.pp(sig),
"\n ", sig.pp(),
"\n",
"\n pAmk=", sig.db.layersWalkLebal.toSeq.toTable.pp(sig.db),
"\n pAmk=", sig.db.layersWalkYek.toSeq.toTable.pp(sig.db),
"\n"
let w = sig.merkleSignCommit().value
gossip.say "*** testShortkeys (2)", "n=", n, " inx=", inx,
@ -502,7 +502,7 @@ proc testShortKeys*(
"\n R=", w.pp(sig),
"\n ", sig.pp(),
"\n",
"\n pAmk=", sig.db.layersWalkLebal.toSeq.toTable.pp(sig.db),
"\n pAmk=", sig.db.layersWalkYek.toSeq.toTable.pp(sig.db),
"\n",
"\n ----------------",
"\n"

View File

@ -109,7 +109,7 @@ proc schedStow(
): Result[void,AristoError] =
## Scheduled storage
let
layersMeter = db.nLayersVtx + db.nLayersLabel
layersMeter = db.nLayersVtx() + db.nLayersKey()
filterMeter = if db.roFilter.isNil: 0
else: db.roFilter.sTab.len + db.roFilter.kMap.len
persistent = MaxFilterBulk < max(layersMeter, filterMeter)
@ -557,7 +557,7 @@ proc testTxMergeProofAndKvpList*(
xCheck w.proof.len == proved.merged + proved.dups
xCheck db.lTab.len == lTabLen
xCheck db.nLayersVtx() <= proved.merged + sTabLen
xCheck proved.merged < db.nLayersLebal()
xCheck proved.merged < db.nLayersYek()
let
merged = db.mergeList leafs

View File

@ -46,7 +46,24 @@ let
when unittest2DisableParamFiltering:
# Filter out local options and pass on the rest to `unittest2`
proc cmdLineConfig(): tuple[samples: seq[CaptureSpecs]] =
## This helper allows to pass additional command line options to the
## unit test.
##
## Example:
## ::
## nim c -r ...\
## -d:unittest2DisableParamFiltering \
## ./tests/test_coredb.nim \
## --output-level=VERBOSE \
## --sample=goerli-lp,goerli-ar
## or
## ::
## nim c ... -d:unittest2DisableParamFiltering ./tests/test_coredb.nim
## ./tests/test_coredb.out --output-level=VERBOSE --sample=goerli-ar
## ...
##
## At the moment, only the `--sample=` additional option is provided.
##
# Define sample list from the command line (if any)
const optPfx = "--sample=" # Custom option with sample list
@ -182,8 +199,10 @@ proc chainSyncRunner(
capture = bChainCapture;
dbType = CoreDbType(0);
ldgType = ldgTypeDefault;
enaLogging = false;
lastOneExtra = true;
profilingOk = false;
finalDiskCleanUpOk = true;
enaLoggingOk = false;
lastOneExtraOk = true;
) =
## Test backend database and ledger
@ -215,9 +234,9 @@ proc chainSyncRunner(
let
com = initRunnerDB(dbDir, capture, dbType, ldgType)
defer:
com.db.finish(flush = true)
#noisy.testChainSyncProfilingPrint numBlocks
if persistent: dbDir.flushDbDir
com.db.finish(flush = finalDiskCleanUpOk)
if profilingOk: noisy.testChainSyncProfilingPrint numBlocks
if persistent and finalDiskCleanUpOk: dbDir.flushDbDir
if noisy:
com.db.trackNewApi = true
@ -226,7 +245,7 @@ proc chainSyncRunner(
com.db.localDbOnly = true
check noisy.testChainSync(filePaths, com, numBlocks,
lastOneExtra=lastOneExtra, enaLogging=enaLogging)
lastOneExtra=lastOneExtraOk, enaLogging=enaLoggingOk)
# ------------------------------------------------------------------------------
# Main function(s)
@ -258,10 +277,13 @@ when isMainModule:
for n,capture in sampleList:
noisy.profileSection("@testList #" & $n, state):
noisy.chainSyncRunner(
capture=capture,
#dbType = ...,
capture = capture,
#dbType = ..,
ldgType=LedgerCache,
#enaLogging = true
#profilingOk = ..,
finalDiskCleanUpOk = false,
#enaLoggingOk = ..,
#lastOneExtraOk = ..,
)
noisy.say "***", "total elapsed: ", state[0].pp, " sections: ", state[1]

View File

@ -92,6 +92,14 @@ let
# To be compared against the proof-of-concept implementation as reference
legaTest0* = CaptureSpecs(
builtIn: true,
name: ariTest0.name.replace("-am", "-lm"),
network: ariTest0.network,
files: ariTest0.files,
numBlocks: ariTest0.numBlocks,
dbType: LegacyDbMemory)
legaTest1* = CaptureSpecs(
builtIn: true,
name: ariTest1.name.replace("-ar", "-lp"),
@ -112,6 +120,6 @@ let
allSamples* = [
bulkTest0, bulkTest1, bulkTest2, bulkTest3,
ariTest0, ariTest1, ariTest2,
legaTest1, legaTest2]
legaTest0, legaTest1, legaTest2]
# End