Aristo db supporting forest and layered tx architecture (#1598)

* Exclude some storage tests

why:
  These test running on external dumps slipped through. The particular
  dumps were reported earlier as somehow dodgy.

  This was changed in `#1457` but having a second look, the change on
  hexary_interpolate.nim(350) might be incorrect.

* Redesign `Aristo DB` descriptor for transaction based layers

why:
  Previous descriptor layout made it cumbersome to push/pop
  database delta layers.

  The new architecture keeps each layer with the full delta set
  relative to the database backend.

* Keep root ID as part of the `Patricia Trie` leaf path

why;
  That way, forests are supported
This commit is contained in:
Jordan Hrycaj 2023-06-09 12:17:37 +01:00 committed by GitHub
parent 67aaf92c1d
commit 932a2140f2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 1090 additions and 752 deletions

View File

@ -20,15 +20,26 @@ import
# Ptivate functions # Ptivate functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc toPfx(indent: int): string = proc sortedKeys(lTab: Table[LeafKey,VertexID]): seq[LeafKey] =
"\n" & " ".repeat(indent) lTab.keys.toSeq.sorted(cmp = proc(a,b: LeafKey): int = cmp(a,b))
proc keyVidUpdate(db: AristoDbRef, key: NodeKey, vid: VertexID): string = proc sortedKeys(kMap: Table[VertexID,NodeKey]): seq[VertexID] =
if not key.isEmpty and kMap.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
not vid.isZero and
not db.isNil: proc sortedKeys(sTab: Table[VertexID,VertexRef]): seq[VertexID] =
block: sTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
let keyVid = db.pAmk.getOrDefault(key, VertexID(0))
proc sortedKeys(pPrf: HashSet[VertexID]): seq[VertexID] =
pPrf.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
proc toPfx(indent: int; offset = 0): string =
if 0 < indent: "\n" & " ".repeat(indent+offset) else: ""
proc keyVidUpdate(db: var AristoDb, key: NodeKey, vid: VertexID): string =
if key != EMPTY_ROOT_KEY and
vid != VertexID(0):
if not db.top.isNil:
let keyVid = db.top.pAmk.getOrDefault(key, VertexID(0))
if keyVid != VertexID(0): if keyVid != VertexID(0):
if keyVid != vid: if keyVid != vid:
result = "(!)" result = "(!)"
@ -57,20 +68,17 @@ proc squeeze(s: string; hex = false; ignLen = false): string =
result &= ".." & s[s.len-16 .. ^1] result &= ".." & s[s.len-16 .. ^1]
proc stripZeros(a: string): string = proc stripZeros(a: string): string =
for n in 0 ..< a.len: a.strip(leading=true, trailing=false, chars={'0'}).toLowerAscii
if a[n] != '0':
return a[n .. ^1]
return a
proc ppVid(vid: VertexID): string = proc ppVid(vid: VertexID): string =
if vid.isZero: "ø" else: "$" & vid.uint64.toHex.stripZeros.toLowerAscii if vid == VertexID(0): "ø"
else: "$" & vid.uint64.toHex.stripZeros.toLowerAscii
proc vidCode(key: NodeKey, db: AristoDbRef): uint64 = proc vidCode(key: NodeKey, db: AristoDb): uint64 =
if not db.isNil and if key != EMPTY_ROOT_KEY and
key != EMPTY_ROOT_KEY and
key != EMPTY_CODE_KEY: key != EMPTY_CODE_KEY:
block: if not db.top.isNil:
let vid = db.pAmk.getOrDefault(key, VertexID(0)) let vid = db.top.pAmk.getOrDefault(key, VertexID(0))
if vid != VertexID(0): if vid != VertexID(0):
return vid.uint64 return vid.uint64
block: block:
@ -78,7 +86,7 @@ proc vidCode(key: NodeKey, db: AristoDbRef): uint64 =
if vid != VertexID(0): if vid != VertexID(0):
return vid.uint64 return vid.uint64
proc ppKey(key: NodeKey, db: AristoDbRef): string = proc ppKey(key: NodeKey, db: AristoDb): string =
if key == NodeKey.default: if key == NodeKey.default:
return "£ø" return "£ø"
if key == EMPTY_ROOT_KEY: if key == EMPTY_ROOT_KEY:
@ -86,38 +94,49 @@ proc ppKey(key: NodeKey, db: AristoDbRef): string =
if key == EMPTY_CODE_KEY: if key == EMPTY_CODE_KEY:
return "£c" return "£c"
if not db.isNil: if not db.top.isNil:
block: let vid = db.top.pAmk.getOrDefault(key, VertexID(0))
let vid = db.pAmk.getOrDefault(key, VertexID(0)) if vid != VertexID(0):
if vid != VertexID(0): return "£" & vid.uint64.toHex.stripZeros.toLowerAscii
return "£" & vid.uint64.toHex.stripZeros.toLowerAscii block:
block: let vid = db.xMap.getOrDefault(key, VertexID(0))
let vid = db.xMap.getOrDefault(key, VertexID(0)) if vid != VertexID(0):
if vid != VertexID(0): return "£" & vid.uint64.toHex.stripZeros.toLowerAscii
return "£" & vid.uint64.toHex.stripZeros.toLowerAscii
"%" & key.ByteArray32 "%" & key.ByteArray32
.mapIt(it.toHex(2)).join.tolowerAscii .mapIt(it.toHex(2)).join.tolowerAscii
.squeeze(hex=true,ignLen=true) .squeeze(hex=true,ignLen=true)
proc ppRootKey(a: NodeKey, db: AristoDbRef): string = proc ppRootKey(a: NodeKey, db: AristoDb): string =
if a != EMPTY_ROOT_KEY: if a != EMPTY_ROOT_KEY:
return a.ppKey(db) return a.ppKey(db)
proc ppCodeKey(a: NodeKey, db: AristoDbRef): string = proc ppCodeKey(a: NodeKey, db: AristoDb): string =
if a != EMPTY_CODE_KEY: if a != EMPTY_CODE_KEY:
return a.ppKey(db) return a.ppKey(db)
proc ppPathTag(tag: NodeTag, db: AristoDbRef): string = proc ppPathTag(tag: NodeTag, db: AristoDb): string =
## Raw key, for referenced key dump use `key.pp(db)` below ## Raw key, for referenced key dump use `key.pp(db)` below
if not db.isNil: if not db.top.isNil:
let vid = db.lTab.getOrDefault(tag, VertexID(0)) let
lky = LeafKey(root: VertexID(1), path: tag)
vid = db.top.lTab.getOrDefault(lky, VertexID(0))
if vid != VertexID(0): if vid != VertexID(0):
return "@" & vid.ppVid return "@" & vid.ppVid
"@" & tag.to(NodeKey).ByteArray32 "@" & tag.to(NodeKey).ByteArray32
.mapIt(it.toHex(2)).join.toLowerAscii .mapIt(it.toHex(2)).join.squeeze(hex=true,ignLen=true)
.squeeze(hex=true,ignLen=true)
proc ppLeafKey(lky: LeafKey, db: AristoDb): string =
## Raw key, for referenced key dump use `key.pp(db)` below
if not db.top.isNil:
let vid = db.top.lTab.getOrDefault(lky, VertexID(0))
if vid != VertexID(0):
return "@" & vid.ppVid
"@" & ($lky.root.uint64.toHex).stripZeros & ":" &
lky.path.to(NodeKey).ByteArray32
.mapIt(it.toHex(2)).join.squeeze(hex=true,ignLen=true)
proc ppPathPfx(pfx: NibblesSeq): string = proc ppPathPfx(pfx: NibblesSeq): string =
let s = $pfx let s = $pfx
@ -126,7 +145,7 @@ proc ppPathPfx(pfx: NibblesSeq): string =
proc ppNibble(n: int8): string = proc ppNibble(n: int8): string =
if n < 0: "ø" elif n < 10: $n else: n.toHex(1).toLowerAscii if n < 0: "ø" elif n < 10: $n else: n.toHex(1).toLowerAscii
proc ppPayload(p: PayloadRef, db: AristoDbRef): string = proc ppPayload(p: PayloadRef, db: AristoDb): string =
if p.isNil: if p.isNil:
result = "n/a" result = "n/a"
else: else:
@ -140,11 +159,13 @@ proc ppPayload(p: PayloadRef, db: AristoDbRef): string =
result &= p.account.storageRoot.to(NodeKey).ppRootKey(db) & "," result &= p.account.storageRoot.to(NodeKey).ppRootKey(db) & ","
result &= p.account.codeHash.to(NodeKey).ppCodeKey(db) & ")" result &= p.account.codeHash.to(NodeKey).ppCodeKey(db) & ")"
proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string = proc ppVtx(nd: VertexRef, db: AristoDb, vid: VertexID): string =
if nd.isNil: if nd.isNil:
result = "n/a" result = "n/a"
else: else:
if db.isNil or vid.isZero or vid in db.pPrf: if db.top.isNil or vid == VertexID(0) or vid in db.top.pPrf:
result = ["L(", "X(", "B("][nd.vType.ord]
elif vid in db.top.kMap:
result = ["l(", "x(", "b("][nd.vType.ord] result = ["l(", "x(", "b("][nd.vType.ord]
else: else:
result = ["ł(", "€(", "þ("][nd.vType.ord] result = ["ł(", "€(", "þ("][nd.vType.ord]
@ -155,14 +176,14 @@ proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =
result &= nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid result &= nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid
of Branch: of Branch:
for n in 0..15: for n in 0..15:
if not nd.bVid[n].isZero: if nd.bVid[n] != VertexID(0):
result &= nd.bVid[n].ppVid result &= nd.bVid[n].ppVid
if n < 15: if n < 15:
result &= "," result &= ","
result &= ")" result &= ")"
proc ppXMap*( proc ppXMap*(
db: AristoDbRef; db: AristoDb;
kMap: Table[VertexID,NodeKey]; kMap: Table[VertexID,NodeKey];
pAmk: Table[NodeKey,VertexID]; pAmk: Table[NodeKey,VertexID];
indent: int; indent: int;
@ -193,7 +214,7 @@ proc ppXMap*(
s & ")," s & "),"
var cache: seq[(uint64,uint64,bool)] var cache: seq[(uint64,uint64,bool)]
for vid in toSeq(kMap.keys).mapIt(it.uint64).sorted.mapIt(it.VertexID): for vid in kMap.sortedKeys:
let key = kMap.getOrDefault(vid, EMPTY_ROOT_KEY) let key = kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY: if key != EMPTY_ROOT_KEY:
cache.add (vid.uint64, key.vidCode(db), 0 < dups.getOrDefault(vid, 0)) cache.add (vid.uint64, key.vidCode(db), 0 < dups.getOrDefault(vid, 0))
@ -206,7 +227,7 @@ proc ppXMap*(
result = "{" result = "{"
if 0 < cache.len: if 0 < cache.len:
let let
pfx = indent.toPfx pfx = indent.toPfx(1)
var var
(i, r) = (0, cache[0]) (i, r) = (0, cache[0])
result &= cache[i][0].ppNtry result &= cache[i][0].ppNtry
@ -217,13 +238,13 @@ proc ppXMap*(
if r != w or w[2]: if r != w or w[2]:
if i+1 != n: if i+1 != n:
result &= ".. " & cache[n-1][0].ppNtry result &= ".. " & cache[n-1][0].ppNtry
result &= pfx & " " & cache[n][0].ppNtry result &= pfx & cache[n][0].ppNtry
(i, r) = (n, w) (i, r) = (n, w)
if i < cache.len - 1: if i < cache.len - 1:
if i+1 != cache.len - 1: if i+1 != cache.len - 1:
result &= ".. " result &= ".. "
else: else:
result &= pfx & " " result &= pfx
result &= cache[^1][0].ppNtry result &= cache[^1][0].ppNtry
result[^1] = '}' result[^1] = '}'
else: else:
@ -233,10 +254,9 @@ proc ppXMap*(
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc keyToVtxID*(db: AristoDbRef, key: NodeKey): VertexID = proc keyToVtxID*(db: var AristoDb, key: NodeKey): VertexID =
## Associate a vertex ID with the argument `key` for pretty printing. ## Associate a vertex ID with the argument `key` for pretty printing.
if not db.isNil and if key != EMPTY_ROOT_KEY and
key != EMPTY_ROOT_KEY and
key != EMPTY_CODE_KEY: key != EMPTY_CODE_KEY:
let vid = db.xMap.getOrDefault(key, VertexID(0)) let vid = db.xMap.getOrDefault(key, VertexID(0))
if vid != VertexID(0): if vid != VertexID(0):
@ -245,28 +265,31 @@ proc keyToVtxID*(db: AristoDbRef, key: NodeKey): VertexID =
result = db.vidFetch() result = db.vidFetch()
db.xMap[key] = result db.xMap[key] = result
proc pp*(vid: NodeKey, db = AristoDbRef(nil)): string = proc pp*(vid: NodeKey, db = AristoDb()): string =
vid.ppKey(db) vid.ppKey(db)
proc pp*(tag: NodeTag, db = AristoDbRef(nil)): string = proc pp*(tag: NodeTag, db = AristoDb()): string =
tag.ppPathTag(db) tag.ppPathTag(db)
proc pp*(lky: LeafKey, db = AristoDb()): string =
lky.ppLeafKey(db)
proc pp*(vid: VertexID): string = proc pp*(vid: VertexID): string =
vid.ppVid vid.ppVid
proc pp*(vid: openArray[VertexID]): string = proc pp*(vid: openArray[VertexID]): string =
"[" & vid.mapIt(it.ppVid).join(",") & "]" "[" & vid.mapIt(it.ppVid).join(",") & "]"
proc pp*(p: PayloadRef, db = AristoDbRef(nil)): string = proc pp*(p: PayloadRef, db = AristoDb()): string =
p.ppPayload(db) p.ppPayload(db)
proc pp*(nd: VertexRef, db = AristoDbRef(nil)): string = proc pp*(nd: VertexRef, db = AristoDb()): string =
nd.ppVtx(db, VertexID(0)) nd.ppVtx(db, VertexID(0))
proc pp*(nd: NodeRef, db = AristoDbRef(nil)): string = proc pp*(nd: NodeRef, db: var AristoDB): string =
if nd.isNil: if nd.isNil:
result = "n/a" result = "n/a"
elif nd.isError: elif nd.error != AristoError(0):
result = "(!" & $nd.error result = "(!" & $nd.error
else: else:
result = ["L(", "X(", "B("][nd.vType.ord] result = ["L(", "X(", "B("][nd.vType.ord]
@ -282,158 +305,104 @@ proc pp*(nd: NodeRef, db = AristoDbRef(nil)): string =
of Branch: of Branch:
result &= "[" result &= "["
for n in 0..15: for n in 0..15:
if not nd.bVid[n].isZero or nd.key[n] != EMPTY_ROOT_KEY: if nd.bVid[n] != VertexID(0) or nd.key[n] != EMPTY_ROOT_KEY:
result &= nd.bVid[n].ppVid result &= nd.bVid[n].ppVid
result &= db.keyVidUpdate(nd.key[n], nd.bVid[n]) & "," result &= db.keyVidUpdate(nd.key[n], nd.bVid[n]) & ","
result[^1] = ']' result[^1] = ']'
result &= ",[" result &= ",["
for n in 0..15: for n in 0..15:
if not nd.bVid[n].isZero or nd.key[n] != EMPTY_ROOT_KEY: if nd.bVid[n] != VertexID(0) or nd.key[n] != EMPTY_ROOT_KEY:
result &= nd.key[n].ppKey(db) result &= nd.key[n].ppKey(db)
result &= "," result &= ","
result[^1] = ']' result[^1] = ']'
result &= ")" result &= ")"
proc pp*( proc pp*(nd: NodeRef): string =
sTab: Table[VertexID,VertexRef]; var db = AristoDB()
db = AristoDbRef(nil); nd.pp(db)
indent = 4;
): string =
let pfx = indent.toPfx
var first = true
result = "{"
for vid in toSeq(sTab.keys).mapIt(it.uint64).sorted.mapIt(it.VertexID):
let vtx = sTab.getOrDefault(vid, VertexRef(nil))
if vtx != VertexRef(nil):
if first:
first = false
else:
result &= pfx & " "
result &= "(" & vid.ppVid & "," & vtx.ppVtx(db,vid) & ")"
result &= "}"
proc pp*( proc pp*(sTab: Table[VertexID,VertexRef]; db = AristoDb(); indent = 4): string =
lTab: Table[NodeTag,VertexID]; "{" & sTab.sortedKeys
indent = 4; .mapIt((it, sTab.getOrDefault(it, VertexRef(nil))))
): string = .filterIt(it[1] != VertexRef(nil))
let pfx = indent.toPfx .mapIt("(" & it[0].ppVid & "," & it[1].ppVtx(db,it[0]) & ")")
var first = true .join("," & indent.toPfx(1)) & "}"
result = "{"
for tag in toSeq(lTab.keys).mapIt(it.UInt256).sorted.mapIt(it.NodeTag): proc pp*(lTab: Table[LeafKey,VertexID]; indent = 4): string =
let vid = lTab.getOrDefault(tag, VertexID(0)) var db = AristoDb()
if vid != VertexID(0): "{" & lTab.sortedKeys
if first: .mapIt((it, lTab.getOrDefault(it, VertexID(0))))
first = false .filterIt(it[1] != VertexID(0))
else: .mapIt("(" & it[0].ppLeafKey(db) & "," & it[1].ppVid & ")")
result &= pfx & " " .join("," & indent.toPfx(1)) & "}"
result &= "(" & tag.ppPathTag(nil) & "," & vid.ppVid & ")"
result &= "}"
proc pp*(vGen: seq[VertexID]): string = proc pp*(vGen: seq[VertexID]): string =
result = "[" "[" & vGen.mapIt(it.ppVid).join(",") & "]"
for vid in vGen:
result &= vid.ppVid & ","
if result[^1] == ',':
result[^1] = ']'
else:
result &= "]"
proc pp*(pPrf: HashSet[VertexID]): string = proc pp*(pPrf: HashSet[VertexID]): string =
result = "{" "{" & pPrf.sortedKeys.mapIt(it.ppVid).join(",") & "}"
for vid in pPrf.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
result &= vid.ppVid & ","
if result[^1] == ',':
result[^1] = '}'
else:
result &= "}"
proc pp*( proc pp*(leg: Leg; db = AristoDb()): string =
leg: Leg; result = "(" & leg.wp.vid.ppVid & ","
db = AristoDbRef(nil); if not db.top.isNil:
): string = let key = db.top.kMap.getOrDefault(leg.wp.vid, EMPTY_ROOT_KEY)
result = " (" & leg.wp.vid.ppVid & "," result &= (if key != EMPTY_ROOT_KEY: key.ppKey(db) else: "ø")
if not db.isNil:
let key = db.kMap.getOrDefault(leg.wp.vid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY:
result &= key.ppKey(db)
else:
result &= "ø"
result &= "," & $leg.nibble.ppNibble & "," & leg.wp.vtx.pp(db) & ")" result &= "," & $leg.nibble.ppNibble & "," & leg.wp.vtx.pp(db) & ")"
proc pp*( proc pp*(hike: Hike; db = AristoDb(); indent = 4): string =
hike: Hike; let pfx = indent.toPfx(1)
db = AristoDbRef(nil); result = "["
indent = 4; if hike.legs.len == 0:
): string = result &= "(" & hike.root.ppVid & ")"
let pfx = indent.toPfx else:
var first = true if hike.legs[0].wp.vid != hike.root:
result = "[(" & hike.root.ppVid & ")" result &= "(" & hike.root.ppVid & ")" & pfx
for leg in hike.legs: result &= hike.legs.mapIt(it.pp(db)).join(pfx)
result &= "," & pfx & leg.pp(db) result &= pfx & "(" & hike.tail.ppPathPfx & ")"
result &= "," & pfx & " (" & hike.tail.ppPathPfx & ")"
if hike.error != AristoError(0): if hike.error != AristoError(0):
result &= "," & pfx & " (" & $hike.error & ")" result &= pfx & "(" & $hike.error & ")"
result &= "]" result &= "]"
proc pp*( proc pp*(kMap: Table[VertexID,NodeKey]; indent = 4): string =
kMap: Table[VertexID,NodeKey]; var db: AristoDb
db = AristoDbRef(nil); "{" & kMap.sortedKeys
indent = 4; .mapIt((it,kMap.getOrDefault(it, EMPTY_ROOT_KEY)))
): string = .filterIt(it[1] != EMPTY_ROOT_KEY)
let pfx = indent.toPfx .mapIt("(" & it[0].ppVid & "," & it[1].ppKey(db) & ")")
var first = true .join("," & indent.toPfx(1)) & "}"
result = "{"
for vid in toSeq(kMap.keys).mapIt(it.uint64).sorted.mapIt(it.VertexID):
let key = kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY:
if first:
first = false
else:
result &= pfx & " "
result &= "(" & vid.ppVid & "," & key.ppKey(db) & "),"
if result[^1] == ',':
result[^1] = '}'
else:
result &= "}"
proc pp*( proc pp*(pAmk: Table[NodeKey,VertexID]; indent = 4): string =
pAmk: Table[NodeKey,VertexID];
db = AristoDbRef(nil);
indent = 4;
): string =
let pfx = indent.toPfx
var var
db: AristoDb
rev = pAmk.pairs.toSeq.mapIt((it[1],it[0])).toTable rev = pAmk.pairs.toSeq.mapIt((it[1],it[0])).toTable
first = true "{" & rev.sortedKeys
result = "{" .mapIt((it,rev.getOrDefault(it, EMPTY_ROOT_KEY)))
for vid in rev.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID): .filterIt(it[1] != EMPTY_ROOT_KEY)
let key = rev.getOrDefault(vid, EMPTY_ROOT_KEY) .mapIt("(" & it[1].ppKey(db) & "," & it[0].ppVid & ")")
if key != EMPTY_ROOT_KEY: .join("," & indent.toPfx(1)) & "}"
if first:
first = false proc pp*(kMap: Table[VertexID,NodeKey]; db: AristoDb; indent = 4): string =
else: db.ppXMap(kMap, db.top.pAmk, indent)
result &= pfx & " "
result &= "(" & key.ppKey(db) & "," & vid.ppVid & ")," proc pp*(pAmk: Table[NodeKey,VertexID]; db: AristoDb; indent = 4): string =
if result[^1] == ',': db.ppXMap(db.top.kMap, pAmk, indent)
result[^1] = '}'
else:
result &= "}"
# --------------------- # ---------------------
proc pp*( proc pp*(
db: AristoDbRef; db: AristoDb;
sTabOk = true; sTabOk = true;
lTabOk = true; lTabOk = true;
kMapOk = true; kMapOk = true;
dKeyOk = true;
pPrfOk = true; pPrfOk = true;
indent = 4; indent = 4;
): string = ): string =
let let
pfx1 = max(indent-1,0).toPfx pfx1 = max(indent-1,0).toPfx
pfx2 = indent.toPfx pfx2 = indent.toPfx
labelOk = 1 < sTabOk.ord + lTabOk.ord + kMapOk.ord + pPrfOk.ord labelOk = 1 < sTabOk.ord + lTabOk.ord + kMapOk.ord + dKeyOk.ord + pPrfOk.ord
var var
pfy1 = "" pfy1 = ""
pfy2 = "" pfy2 = ""
@ -448,18 +417,22 @@ proc pp*(
pfy2 = pfx2 pfy2 = pfx2
rc rc
if sTabOk: if not db.top.isNil:
let info = "sTab(" & $db.sTab.len & ")" if sTabOk:
result &= info.doPrefix & db.sTab.pp(db,indent) let info = "sTab(" & $db.top.sTab.len & ")"
if lTabOk: result &= info.doPrefix & db.top.sTab.pp(db,indent)
let info = "lTab(" & $db.lTab.len & "),root=" & db.lRoot.ppVid if lTabOk:
result &= info.doPrefix & db.lTab.pp(indent) let info = "lTab(" & $db.top.lTab.len & ")"
if kMapOk: result &= info.doPrefix & db.top.lTab.pp(indent)
let info = "kMap(" & $db.kMap.len & "," & $db.pAmk.len & ")" if kMapOk:
result &= info.doPrefix & db.ppXMap(db.kMap,db.pAmk,indent) let info = "kMap(" & $db.top.kMap.len & "," & $db.top.pAmk.len & ")"
if pPrfOk: result &= info.doPrefix & db.ppXMap(db.top.kMap,db.top.pAmk,indent)
let info = "pPrf(" & $db.pPrf.len & ")" if dKeyOk:
result &= info.doPrefix & db.pPrf.pp let info = "dKey(" & $db.top.dkey.len & ")"
result &= info.doPrefix & db.top.dKey.pp
if pPrfOk:
let info = "pPrf(" & $db.top.pPrf.len & ")"
result &= info.doPrefix & db.top.pPrf.pp
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -35,23 +35,30 @@ proc branchStillNeeded(vtx: VertexRef): bool =
if vtx.bVid[n] != VertexID(0): if vtx.bVid[n] != VertexID(0):
return true return true
proc clearKey(db: AristoDbRef; vid: VertexID) = proc clearKey(db: AristoDb; vid: VertexID) =
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY) let key = db.top.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY: if key != EMPTY_ROOT_KEY:
db.kMap.del vid db.top.kMap.del vid
db.pAmk.del key db.top.pAmk.del key
elif db.getKeyBackend(vid).isOK:
# Register for deleting on backend
db.top.dKey.incl vid
proc doneWith(db: AristoDbRef; vid: VertexID) = proc doneWith(db: AristoDb; vid: VertexID) =
# Remove entry # Remove entry
db.vidDispose vid db.top.dKey.excl vid # No need to register for deleting on backend
db.sTab.del vid db.vidDispose vid # Will be propagated to backend
db.clearKey vid # Update Merkle hash db.top.sTab.del vid
let key = db.top.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY:
db.top.kMap.del vid
db.top.pAmk.del key
proc deleteImpl( proc deleteImpl(
hike: Hike; # Fully expanded path hike: Hike; # Fully expanded path
pathTag: NodeTag; # `Patricia Trie` path root-to-leaf lky: LeafKey; # `Patricia Trie` path root-to-leaf
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Implementation of *delete* functionality. ## Implementation of *delete* functionality.
if hike.error != AristoError(0): if hike.error != AristoError(0):
@ -66,7 +73,7 @@ proc deleteImpl(
let lf = hike.legs[inx].wp let lf = hike.legs[inx].wp
if lf.vtx.vType != Leaf: if lf.vtx.vType != Leaf:
return err((lf.vid,DelLeafExpexted)) return err((lf.vid,DelLeafExpexted))
if lf.vid in db.pPrf: if lf.vid in db.top.pPrf:
return err((lf.vid, DelLeafLocked)) return err((lf.vid, DelLeafLocked))
db.doneWith lf.vid db.doneWith lf.vid
inx.dec inx.dec
@ -76,7 +83,7 @@ proc deleteImpl(
let br = hike.legs[inx].wp let br = hike.legs[inx].wp
if br.vtx.vType != Branch: if br.vtx.vType != Branch:
return err((br.vid,DelBranchExpexted)) return err((br.vid,DelBranchExpexted))
if br.vid in db.pPrf: if br.vid in db.top.pPrf:
return err((br.vid, DelBranchLocked)) return err((br.vid, DelBranchLocked))
br.vtx.bVid[hike.legs[inx].nibble] = VertexID(0) br.vtx.bVid[hike.legs[inx].nibble] = VertexID(0)
@ -94,15 +101,19 @@ proc deleteImpl(
# There might be an optional `Extension` to remove # There might be an optional `Extension` to remove
let ext = hike.legs[inx].wp let ext = hike.legs[inx].wp
if ext.vtx.vType == Extension: if ext.vtx.vType == Extension:
if br.vid in db.pPrf: if br.vid in db.top.pPrf:
return err((ext.vid, DelExtLocked)) return err((ext.vid, DelExtLocked))
db.doneWith ext.vid db.doneWith ext.vid
inx.dec inx.dec
# Delete leaf entry # Delete leaf entry
db.lTab.del pathTag let rc = db.getVtxBackend lf.vid
if db.lTab.len == 0: if rc.isErr and rc.error == GetVtxNotFound:
db.lRoot = VertexID(0) # No need to keep it any longer
db.top.lTab.del lky
else:
# To be deleted in backend when it is updated
db.top.lTab[lky] = VertexID(0)
ok() ok()
@ -112,23 +123,23 @@ proc deleteImpl(
proc delete*( proc delete*(
hike: Hike; # Fully expanded chain of vertices hike: Hike; # Fully expanded chain of vertices
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Delete argument `hike` chain of vertices from the database ## Delete argument `hike` chain of vertices from the database
# Need path in order to remove it from `lTab[]` # Need path in order to remove it from `lTab[]`
let pathTag = block: let lky = block:
let rc = hike.to(NibblesSeq).pathToTag() let rc = hike.to(NibblesSeq).pathToTag()
if rc.isErr: if rc.isErr:
return err((VertexID(0),DelPathTagError)) return err((VertexID(0),DelPathTagError))
rc.value LeafKey(root: hike.root, path: rc.value)
hike.deleteImpl(pathTag, db) hike.deleteImpl(lky, db)
proc delete*( proc delete*(
pathTag: NodeTag; # `Patricia Trie` path root-to-leaf lky: LeafKey; # `Patricia Trie` path root-to-leaf
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Variant of `delete()` ## Variant of `delete()`
pathTag.hikeUp(db.lRoot, db).deleteImpl(pathTag, db) lky.hikeUp(db).deleteImpl(lky, db)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -22,10 +22,10 @@
{.push raises: [].} {.push raises: [].}
import import
std/[sets, tables], std/[sets, strutils, tables],
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
stew/results, stew/results,
"."/[aristo_constants, aristo_error] "."/aristo_error
import import
../../sync/snap/range_desc ../../sync/snap/range_desc
@ -37,33 +37,75 @@ type
## Tip of edge towards child object in the `Patricia Trie` logic. It is ## Tip of edge towards child object in the `Patricia Trie` logic. It is
## also the key into the structural table of the `Aristo Trie`. ## also the key into the structural table of the `Aristo Trie`.
LeafKey* = object
## Generalised access key for a leaf vertex on a dedicated sub-trie
## defined by the `root` field. The main trie is the sub-trie with
## root ID `VertexID(1)`.
root*: VertexID ## Root ID for the sub-trie
path*: NodeTag ## Path into the `Patricia Trie`
# -------------
GetVtxFn* = GetVtxFn* =
proc(vid: VertexID): Result[VertexRef,AristoError] proc(vid: VertexID): Result[VertexRef,AristoError] {.gcsafe, raises: [].}
{.gcsafe, raises: [].}
## Generic backend database retrieval function for a single structural ## Generic backend database retrieval function for a single structural
## `Aristo DB` data record. ## `Aristo DB` data record.
GetKeyFn* = GetKeyFn* =
proc(vid: VertexID): Result[NodeKey,AristoError] proc(vid: VertexID): Result[NodeKey,AristoError] {.gcsafe, raises: [].}
{.gcsafe, raises: [].}
## Generic backend database retrieval function for a single ## Generic backend database retrieval function for a single
## `Aristo DB` hash lookup value. ## `Aristo DB` hash lookup value.
GetIdgFn* =
proc(): Result[seq[VertexID],AristoError] {.gcsafe, raises: [].}
## Generic backend database retrieval function for a the ID generator
## `Aristo DB` state record.
# -------------
PutHdlRef* = ref object of RootRef
## Persistent database transaction frame handle. This handle is used to
## wrap any of `PutVtxFn`, `PutKeyFn`, and `PutIdgFn` into and atomic
## transaction frame. These transaction frames must not be interleaved
## by any library function using the backend.
PutBegFn* =
proc(): PutHdlRef {.gcsafe, raises: [].}
## Generic transaction initialisation function
PutVtxFn* = PutVtxFn* =
proc(vrps: openArray[(VertexID,VertexRef)]): AristoError proc(hdl: PutHdlRef; vrps: openArray[(VertexID,VertexRef)])
{.gcsafe, raises: [].} {.gcsafe, raises: [].}
## Generic backend database bulk storage function. ## Generic backend database bulk storage function.
PutKeyFn* = PutKeyFn* =
proc(vkps: openArray[(VertexID,NodeKey)]): AristoError proc(hdl: PutHdlRef; vkps: openArray[(VertexID,NodeKey)])
{.gcsafe, raises: [].} {.gcsafe, raises: [].}
## Generic backend database bulk storage function. ## Generic backend database bulk storage function.
DelFn* = PutIdgFn* =
proc(hdl: PutHdlRef; vs: openArray[VertexID]) {.gcsafe, raises: [].}
## Generic backend database ID generator state storage function.
PutEndFn* =
proc(hdl: PutHdlRef): AristoError {.gcsafe, raises: [].}
## Generic transaction termination function
# -------------
DelVtxFn* =
proc(vids: openArray[VertexID]) proc(vids: openArray[VertexID])
{.gcsafe, raises: [].} {.gcsafe, raises: [].}
## Generic backend database delete function for both, the structural ## Generic backend database delete function for the structural
## `Aristo DB` data record and the hash lookup value. ## `Aristo DB` data records
DelKeyFn* =
proc(vids: openArray[VertexID])
{.gcsafe, raises: [].}
## Generic backend database delete function for the `Aristo DB`
## Merkle hash key mappings.
# -------------
VertexType* = enum VertexType* = enum
## Type of `Aristo Trie` vertex ## Type of `Aristo Trie` vertex
@ -105,29 +147,32 @@ type
AristoBackendRef* = ref object AristoBackendRef* = ref object
## Backend interface. ## Backend interface.
getVtxFn*: GetVtxFn ## Read vertex record getVtxFn*: GetVtxFn ## Read vertex record
getKeyFn*: GetKeyFn ## Read vertex hash getKeyFn*: GetKeyFn ## Read Merkle hash/key
getIdgFn*: GetIdgFn ## Read ID generator state
putBegFn*: PutBegFn ## Start bulk store session
putVtxFn*: PutVtxFn ## Bulk store vertex records putVtxFn*: PutVtxFn ## Bulk store vertex records
putKeyFn*: PutKeyFn ## Bulk store vertex hashes putKeyFn*: PutKeyFn ## Bulk store vertex hashes
delFn*: DelFn ## Bulk delete vertex records and hashes putIdgFn*: PutIdgFn ## Store ID generator state
putEndFn*: PutEndFn ## Commit bulk store session
delVtxFn*: DelVtxFn ## Bulk delete vertex records
delKeyFn*: DelKeyFn ## Bulk delete vertex Merkle hashes
AristoDbRef* = ref AristoDbObj AristoLayerRef* = ref object
AristoDbObj = object ## Hexary trie database layer structures. Any layer holds the full
## Hexary trie plus helper structures ## change relative to the backend.
sTab*: Table[VertexID,VertexRef] ## Structural vertex table making up a trie sTab*: Table[VertexID,VertexRef] ## Structural vertex table
lTab*: Table[NodeTag,VertexID] ## Direct access, path to leaf node lTab*: Table[LeafKey,VertexID] ## Direct access, path to leaf vertex
lRoot*: VertexID ## Root vertex for `lTab[]`
kMap*: Table[VertexID,NodeKey] ## Merkle hash key mapping kMap*: Table[VertexID,NodeKey] ## Merkle hash key mapping
dKey*: HashSet[VertexID] ## Locally deleted Merkle hash keys
pAmk*: Table[NodeKey,VertexID] ## Reverse mapper for data import pAmk*: Table[NodeKey,VertexID] ## Reverse mapper for data import
pPrf*: HashSet[VertexID] ## Locked vertices (from proof vertices) pPrf*: HashSet[VertexID] ## Locked vertices (proof nodes)
vGen*: seq[VertexID] ## Unique vertex ID generator vGen*: seq[VertexID] ## Unique vertex ID generator
case cascaded*: bool ## Cascaded delta databases, tx layer AristoDb* = object
of true: ## Set of database layers, supporting transaction frames
level*: int ## Positive number of stack layers top*: AristoLayerRef ## Database working layer
stack*: AristoDbRef ## Down the chain, not `nil` stack*: seq[AristoLayerRef] ## Stashed parent layers
base*: AristoDbRef ## Backend level descriptor, maybe unneeded backend*: AristoBackendRef ## Backend database (may well be `nil`)
else:
backend*: AristoBackendRef ## backend database (maybe `nil`)
# Debugging data below, might go away in future # Debugging data below, might go away in future
xMap*: Table[NodeKey,VertexID] ## For pretty printing, extends `pAmk` xMap*: Table[NodeKey,VertexID] ## For pretty printing, extends `pAmk`
@ -145,6 +190,23 @@ proc `==`*(a, b: VertexID): bool {.borrow.}
proc cmp*(a, b: VertexID): int {.borrow.} proc cmp*(a, b: VertexID): int {.borrow.}
proc `$`*(a: VertexID): string = $a.uint64 proc `$`*(a: VertexID): string = $a.uint64
# ------------------------------------------------------------------------------
# Public helpers: `LeafKey` scalar data model
# ------------------------------------------------------------------------------
proc `<`*(a, b: LeafKey): bool =
a.root < b.root or (a.root == b.root and a.path < b.path)
proc `==`*(a, b: LeafKey): bool =
a.root == b.root and a.path == b.path
proc cmp*(a, b: LeafKey): int =
if a < b: -1 elif a == b: 0 else: 1
proc `$`*(a: LeafKey): string =
let w = $a.root.uint64.toHex & ":" & $a.path.Uint256.toHex
w.strip(leading=true, trailing=false, chars={'0'}).toLowerAscii
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers: `NodeRef` and `PayloadRef` # Public helpers: `NodeRef` and `PayloadRef`
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -209,15 +271,6 @@ proc `==`*(a, b: NodeRef): bool =
# Public helpers, miscellaneous functions # Public helpers, miscellaneous functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc isZero*(a: VertexID): bool =
a == VertexID(0)
proc isEmpty*(a: NodeKey): bool =
a == EMPTY_ROOT_KEY
proc isError*(a: NodeRef): bool =
a.error != AristoError(0)
proc convertTo*(payload: PayloadRef; T: type Blob): T = proc convertTo*(payload: PayloadRef; T: type Blob): T =
## Probably lossy conversion as the storage type `kind` gets missing ## Probably lossy conversion as the storage type `kind` gets missing
case payload.pType: case payload.pType:

View File

@ -13,7 +13,7 @@ type
NothingSerious = 0 NothingSerious = 0
GenericError GenericError
# Rlp decoder, `fromRlpRecord()` # Rlp decoder, `read()`
Rlp2Or17ListEntries Rlp2Or17ListEntries
RlpBlobExpected RlpBlobExpected
RlpBranchLinkExpected RlpBranchLinkExpected
@ -23,7 +23,7 @@ type
RlpRlpException RlpRlpException
RlpOtherException RlpOtherException
# Db record decoder, `fromDbRecord()` # Db record decoder, `blobify()`
DbrNilArgument DbrNilArgument
DbrUnknown DbrUnknown
DbrTooShort DbrTooShort
@ -36,22 +36,23 @@ type
DbrLeafSizeGarbled DbrLeafSizeGarbled
DbrLeafGotExtPrefix DbrLeafGotExtPrefix
# Db admin data decoder, `fromAristoDb()` # Db admin data decoder, `deblobify()`
ADbGarbledSize ADbGarbledSize
ADbWrongType ADbWrongType
# Db record encoder, `toDbRecord()` # Db record encoder, `blobify()`
VtxExPathOverflow VtxExPathOverflow
VtxLeafPathOverflow VtxLeafPathOverflow
# Converter `asNode()` # Converter `asNode()`, currenly for unit tests only
CacheMissingNodekeys CacheMissingNodekeys
# Get function `getVtxCascaded()` # Get function `getVtxCascaded()`
GetVtxNotFound GetVtxNotFound
GetTagNotFound GetTagNotFound
GetKeyNotFound
# Path function hikeUp()` # Path function `hikeUp()`
PathRootMissing PathRootMissing
PathLeafTooEarly PathLeafTooEarly
PathBranchTailEmpty PathBranchTailEmpty
@ -83,6 +84,10 @@ type
MergeNodeKeyEmpty MergeNodeKeyEmpty
MergeNodeKeyCachedAlready MergeNodeKeyCachedAlready
MergeNodeKeyDiffersFromCached
MergeRootKeyEmpty
MergeRootKeyDiffersForVid
# Update `Merkle` hashes `hashify()` # Update `Merkle` hashes `hashify()`
HashifyCannotComplete HashifyCannotComplete
@ -126,4 +131,7 @@ type
DelBranchLocked DelBranchLocked
DelExtLocked DelExtLocked
# Save permanently, `save()`
BackendMissing
# End # End

View File

@ -8,15 +8,15 @@
# at your option. This file may not be copied, modified, or distributed # at your option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
## Read vertex recorfd on the layered Aristo DB delta architecture ## Read vertex record on the layered Aristo DB delta architecture
## =============================================================== ## ==============================================================
{.push raises: [].} {.push raises: [].}
import import
std/tables, std/[sets, tables],
stew/results, stew/results,
"."/[aristo_desc, aristo_error] "."/[aristo_constants, aristo_desc, aristo_error]
type type
VidVtxPair* = object VidVtxPair* = object
@ -27,56 +27,86 @@ type
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc getVtxCascaded*( proc getVtxBackend*(
db: AristoDbRef; db: AristoDb;
vid: VertexID; vid: VertexID;
): Result[VertexRef,AristoError] = ): Result[VertexRef,AristoError] =
## Cascaded lookup for data record down the transaction cascade. This ## Get the vertex from the `backened` layer if available.
## function will return a potential error code from the backend (if any). let be = db.backend
db.sTab.withValue(vid, vtxPtr):
return ok vtxPtr[]
# Down the rabbit hole of transaction layers
var lDb = db
while lDb.cascaded:
lDb = lDb.stack
lDb.sTab.withValue(vid, vtxPtr):
return ok vtxPtr[]
let be = lDb.backend
if not be.isNil: if not be.isNil:
return be.getVtxFn vid return be.getVtxFn vid
err(GetVtxNotFound) err(GetVtxNotFound)
proc getVtxCascaded*( proc getKeyBackend*(
db: AristoDbRef; db: AristoDb;
tag: NodeTag; vid: VertexID;
): Result[VidVtxPair,AristoError] = ): Result[NodeKey,AristoError] =
## Cascaded lookup for data record down the transaction cascade using ## Get the merkle hash/key from the backend
## the Patricia path. # key must not have been locally deleted (but not saved, yet)
db.lTab.withValue(tag, vidPtr): if vid notin db.top.dKey:
db.sTab.withValue(vidPtr[], vtxPtr): let be = db.backend
return ok VidVtxPair(vid: vidPtr[], vtx: vtxPtr[]) if not be.isNil:
return err(GetTagNotFound) return be.getKeyFn vid
# Down the rabbit hole of transaction layers err(GetKeyNotFound)
var lDb = db
while lDb.cascaded:
lDb = lDb.stack proc getVtxCascaded*(
lDb.lTab.withValue(tag, vidPtr): db: AristoDb;
lDb.sTab.withValue(vidPtr[], vtxPtr): vid: VertexID;
return ok VidVtxPair(vid: vidPtr[], vtx: vtxPtr[]) ): Result[VertexRef,AristoError] =
return err(GetTagNotFound) ## Get the vertex from the top layer or the `backened` layer if available.
let vtx = db.top.sTab.getOrDefault(vid, VertexRef(nil))
if vtx != VertexRef(nil):
return ok vtx
db.getVtxBackend vid
proc getKeyCascaded*(
db: AristoDb;
vid: VertexID;
): Result[NodeKey,AristoError] =
## Get the Merkle hash/key from the top layer or the `backened` layer if
## available.
let key = db.top.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY:
return ok key
db.getKeyBackend vid
proc getLeaf*(
db: AristoDb;
lky: LeafKey;
): Result[VidVtxPair,AristoError] =
## Get the vertex from the top layer by the `Patricia Trie` path. This
## function does not search on the `backend` layer.
let vid = db.top.lTab.getOrDefault(lky, VertexID(0))
if vid != VertexID(0):
let vtx = db.top.sTab.getOrDefault(vid, VertexRef(nil))
if vtx != VertexRef(nil):
return ok VidVtxPair(vid: vid, vtx: vtx)
err(GetTagNotFound) err(GetTagNotFound)
proc getVtx*(db: AristoDbRef; vid: VertexID): VertexRef = # ---------
## Variant of `getVtxCascaded()` with returning `nil` on error ignoring the
## error type information. proc getVtx*(db: AristoDb; vid: VertexID): VertexRef =
let rc = db.getVtxCascaded vid ## Variant of `getVtxCascaded()` returning `nil` on error (while
## ignoring the detailed error type information.)
db.getVtxCascaded(vid).get(otherwise = VertexRef(nil))
proc getVtx*(db: AristoDb; lky: LeafKey): VertexRef =
## Variant of `getLeaf()` returning `nil` on error (while
## ignoring the detailed error type information.)
let rc = db.getLeaf lky
if rc.isOk: if rc.isOk:
return rc.value return rc.value.vtx
proc getKey*(db: AristoDb; vid: VertexID): NodeKey =
## Variant of `getKeyCascaded()` returning `EMPTY_ROOT_KEY` on error (while
## ignoring the detailed error type information.)
db.getKeyCascaded(vid).get(otherwise = EMPTY_ROOT_KEY)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -42,62 +42,46 @@
{.push raises: [].} {.push raises: [].}
import import
std/[algorithm, sequtils, sets, tables], std/[sets, tables],
chronicles, chronicles,
eth/common, eth/common,
stew/results, stew/results,
./aristo_debug, "."/[aristo_constants, aristo_debug, aristo_desc, aristo_error, aristo_get,
"."/[aristo_constants, aristo_desc, aristo_error, aristo_get, aristo_hike, aristo_hike, aristo_transcode, aristo_vid]
aristo_transcode]
logScope: logScope:
topics = "aristo-hashify" topics = "aristo-hashify"
# ------------------------------------------------------------------------------
# Private helper, debugging
# ------------------------------------------------------------------------------
proc pp(t: Table[VertexID,VertexID]): string =
result = "{"
for a in toSeq(t.keys).mapIt(it.uint64).sorted.mapIt(it.VertexID):
let b = t.getOrDefault(a, VertexID(0))
if b != VertexID(0):
result &= "(" & a.pp & "," & b.pp & "),"
if result[^1] == ',':
result[^1] = '}'
else:
result &= "}"
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc toNode(vtx: VertexRef; db: AristoDbRef): Result[NodeRef,void] = proc toNode(vtx: VertexRef; db: AristoDb): Result[NodeRef,void] =
case vtx.vType: case vtx.vType:
of Leaf: of Leaf:
return ok NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData) return ok NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
of Branch: of Branch:
let node = NodeRef(vType: Branch, bVid: vtx.bVid) let node = NodeRef(vType: Branch, bVid: vtx.bVid)
for n in 0 .. 15: for n in 0 .. 15:
if vtx.bVid[n].isZero: if vtx.bVid[n] == VertexID(0):
node.key[n] = EMPTY_ROOT_KEY node.key[n] = EMPTY_ROOT_KEY
else: else:
let key = db.kMap.getOrDefault(vtx.bVid[n], EMPTY_ROOT_KEY) let key = db.getKey vtx.bVid[n]
if key != EMPTY_ROOT_KEY: if key != EMPTY_ROOT_KEY:
node.key[n] = key node.key[n] = key
continue continue
return err() return err()
return ok node return ok node
of Extension: of Extension:
if not vtx.eVid.isZero: if vtx.eVid != VertexID(0):
let key = db.kMap.getOrDefault(vtx.eVid, EMPTY_ROOT_KEY) let key = db.getKey vtx.eVid
if key != EMPTY_ROOT_KEY: if key != EMPTY_ROOT_KEY:
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vtx.eVid) let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vtx.eVid)
node.key[0] = key node.key[0] = key
return ok node return ok node
proc leafToRootHasher( proc leafToRootHasher(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
hike: Hike; # Hike for labelling leaf..root hike: Hike; # Hike for labelling leaf..root
): Result[int,(VertexID,AristoError)] = ): Result[int,(VertexID,AristoError)] =
## Returns the index of the first node that could not be hashed ## Returns the index of the first node that could not be hashed
@ -107,19 +91,20 @@ proc leafToRootHasher(
rc = wp.vtx.toNode db rc = wp.vtx.toNode db
if rc.isErr: if rc.isErr:
return ok n return ok n
# Vertices marked proof nodes need not be checked # Vertices marked proof nodes need not be checked
if wp.vid in db.pPrf: if wp.vid in db.top.pPrf:
continue continue
# Check against existing key, or store new key # Check against existing key, or store new key
let key = rc.value.encode.digestTo(NodeKey) let
let vfyKey = db.kMap.getOrDefault(wp.vid, EMPTY_ROOT_KEY) key = rc.value.encode.digestTo(NodeKey)
if vfyKey == EMPTY_ROOT_KEY: vfy = db.getKey wp.vid
db.pAmk[key] = wp.vid if vfy == EMPTY_ROOT_KEY:
db.kMap[wp.vid] = key db.vidAttach(key, wp.vid)
elif key != vfyKey: elif key != vfy:
let error = HashifyExistingHashMismatch let error = HashifyExistingHashMismatch
debug "hashify failed", vid=wp.vid, key, expected=vfyKey, error debug "hashify failed", vid=wp.vid, key, expected=vfy, error
return err((wp.vid,error)) return err((wp.vid,error))
ok -1 # all could be hashed ok -1 # all could be hashed
@ -129,44 +114,48 @@ proc leafToRootHasher(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc hashifyClear*( proc hashifyClear*(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
locksOnly = false; # If `true`, then clear only proof locks locksOnly = false; # If `true`, then clear only proof locks
) = ) =
## Clear all `Merkle` hashes from the argument database layer `db`. ## Clear all `Merkle` hashes from the `db` argument database top layer.
if not locksOnly: if not locksOnly:
db.pAmk.clear db.top.pAmk.clear
db.kMap.clear db.top.kMap.clear
db.pPrf.clear db.top.dKey.clear
db.top.pPrf.clear
proc hashify*( proc hashify*(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
rootKey = EMPTY_ROOT_KEY; # Optional root key ): Result[HashSet[VertexID],(VertexID,AristoError)] =
): Result[NodeKey,(VertexID,AristoError)] =
## Add keys to the `Patricia Trie` so that it becomes a `Merkle Patricia ## Add keys to the `Patricia Trie` so that it becomes a `Merkle Patricia
## Tree`. If successful, the function returns the key (aka Merkle hash) of ## Tree`. If successful, the function returns the key (aka Merkle hash) of
## the root vertex. ## the root vertex.
var var
thisRootKey = EMPTY_ROOT_KEY roots: HashSet[VertexID]
completed: HashSet[VertexID]
# Width-first leaf-to-root traversal structure # Width-first leaf-to-root traversal structure
backLink: Table[VertexID,VertexID] backLink: Table[VertexID,VertexID]
downMost: Table[VertexID,VertexID] downMost: Table[VertexID,VertexID]
for (pathTag,vid) in db.lTab.pairs: for (lky,vid) in db.top.lTab.pairs:
let hike = pathTag.hikeUp(db.lRoot,db) let hike = lky.hikeUp(db)
if hike.error != AristoError(0): if hike.error != AristoError(0):
return err((VertexID(0),hike.error)) return err((hike.root,hike.error))
roots.incl hike.root
# Hash as much of the `hike` as possible # Hash as much of the `hike` as possible
let n = block: let n = block:
let rc = db.leafToRootHasher hike let rc = db.leafToRootHasher(hike)
if rc.isErr: if rc.isErr:
return err(rc.error) return err(rc.error)
rc.value rc.value
if 0 < n: if 0 < n:
# Backtrack and register remaining nodes # Backtrack and register remaining nodes. Note that in case *n == 0*, the
# root vertex has not been fully resolved yet.
# #
# hike.legs: (leg[0], leg[1], .., leg[n-1], leg[n], ..) # hike.legs: (leg[0], leg[1], .., leg[n-1], leg[n], ..)
# | | | | # | | | |
@ -178,25 +167,15 @@ proc hashify*(
for u in (n-1).countDown(1): for u in (n-1).countDown(1):
backLink[hike.legs[u].wp.vid] = hike.legs[u-1].wp.vid backLink[hike.legs[u].wp.vid] = hike.legs[u-1].wp.vid
elif thisRootKey == EMPTY_ROOT_KEY: elif n < 0:
let rootVid = hike.legs[0].wp.vid completed.incl hike.root
thisRootKey = db.kMap.getOrDefault(rootVid, EMPTY_ROOT_KEY)
if thisRootKey != EMPTY_ROOT_KEY:
if rootKey != EMPTY_ROOT_KEY and rootKey != thisRootKey:
return err((rootVid, HashifyRootHashMismatch))
if db.lRoot == VertexID(0):
db.lRoot = rootVid
elif db.lRoot != rootVid:
return err((rootVid,HashifyRootVidMismatch))
# At least one full path leaf..root should have succeeded with labelling # At least one full path leaf..root should have succeeded with labelling
if thisRootKey == EMPTY_ROOT_KEY: # for each root.
if completed.len < roots.len:
return err((VertexID(0),HashifyLeafToRootAllFailed)) return err((VertexID(0),HashifyLeafToRootAllFailed))
# Update remaining hashes # Update remaining hashes
var n = 0 # for logging
while 0 < downMost.len: while 0 < downMost.len:
var var
redo: Table[VertexID,VertexID] redo: Table[VertexID,VertexID]
@ -217,10 +196,9 @@ proc hashify*(
let nodeKey = rc.value.encode.digestTo(NodeKey) let nodeKey = rc.value.encode.digestTo(NodeKey)
# Update Merkle hash (aka `nodeKey`) # Update Merkle hash (aka `nodeKey`)
let fromKey = db.kMap.getOrDefault(fromVid, EMPTY_ROOT_KEY) let fromKey = db.top.kMap.getOrDefault(fromVid, EMPTY_ROOT_KEY)
if fromKey == EMPTY_ROOT_KEY: if fromKey == EMPTY_ROOT_KEY:
db.pAmk[nodeKey] = fromVid db.vidAttach(nodeKey, fromVid)
db.kMap[fromVid] = nodeKey
elif nodeKey != fromKey: elif nodeKey != fromKey:
let error = HashifyExistingHashMismatch let error = HashifyExistingHashMismatch
debug "hashify failed", vid=fromVid, key=nodeKey, debug "hashify failed", vid=fromVid, key=nodeKey,
@ -244,39 +222,39 @@ proc hashify*(
backLink.del vid backLink.del vid
downMost = redo downMost = redo
ok thisRootKey ok completed
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public debugging functions # Public debugging functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc hashifyCheck*( proc hashifyCheck*(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
relax = false; # Check existing hashes only relax = false; # Check existing hashes only
): Result[void,(VertexID,AristoError)] = ): Result[void,(VertexID,AristoError)] =
## Verify that the Merkle hash keys are either completely missing or ## Verify that the Merkle hash keys are either completely missing or
## match all known vertices on the argument database layer `db`. ## match all known vertices on the argument database layer `db`.
if not relax: if not relax:
for (vid,vtx) in db.sTab.pairs: for (vid,vtx) in db.top.sTab.pairs:
let rc = vtx.toNode(db) let rc = vtx.toNode(db)
if rc.isErr: if rc.isErr:
return err((vid,HashifyCheckVtxIncomplete)) return err((vid,HashifyCheckVtxIncomplete))
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY) let key = db.top.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key == EMPTY_ROOT_KEY: if key == EMPTY_ROOT_KEY:
return err((vid,HashifyCheckVtxHashMissing)) return err((vid,HashifyCheckVtxHashMissing))
if key != rc.value.encode.digestTo(NodeKey): if key != rc.value.encode.digestTo(NodeKey):
return err((vid,HashifyCheckVtxHashMismatch)) return err((vid,HashifyCheckVtxHashMismatch))
let revVid = db.pAmk.getOrDefault(key, VertexID(0)) let revVid = db.top.pAmk.getOrDefault(key, VertexID(0))
if revVid == VertexID(0): if revVid == VertexID(0):
return err((vid,HashifyCheckRevHashMissing)) return err((vid,HashifyCheckRevHashMissing))
if revVid != vid: if revVid != vid:
return err((vid,HashifyCheckRevHashMismatch)) return err((vid,HashifyCheckRevHashMismatch))
elif 0 < db.pPrf.len: elif 0 < db.top.pPrf.len:
for vid in db.pPrf: for vid in db.top.pPrf:
let vtx = db.sTab.getOrDefault(vid, VertexRef(nil)) let vtx = db.top.sTab.getOrDefault(vid, VertexRef(nil))
if vtx == VertexRef(nil): if vtx == VertexRef(nil):
return err((vid,HashifyCheckVidVtxMismatch)) return err((vid,HashifyCheckVidVtxMismatch))
@ -284,20 +262,20 @@ proc hashifyCheck*(
if rc.isErr: if rc.isErr:
return err((vid,HashifyCheckVtxIncomplete)) return err((vid,HashifyCheckVtxIncomplete))
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY) let key = db.top.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key == EMPTY_ROOT_KEY: if key == EMPTY_ROOT_KEY:
return err((vid,HashifyCheckVtxHashMissing)) return err((vid,HashifyCheckVtxHashMissing))
if key != rc.value.encode.digestTo(NodeKey): if key != rc.value.encode.digestTo(NodeKey):
return err((vid,HashifyCheckVtxHashMismatch)) return err((vid,HashifyCheckVtxHashMismatch))
let revVid = db.pAmk.getOrDefault(key, VertexID(0)) let revVid = db.top.pAmk.getOrDefault(key, VertexID(0))
if revVid == VertexID(0): if revVid == VertexID(0):
return err((vid,HashifyCheckRevHashMissing)) return err((vid,HashifyCheckRevHashMissing))
if revVid != vid: if revVid != vid:
return err((vid,HashifyCheckRevHashMismatch)) return err((vid,HashifyCheckRevHashMismatch))
else: else:
for (vid,key) in db.kMap.pairs: for (vid,key) in db.top.kMap.pairs:
let vtx = db.getVtx vid let vtx = db.getVtx vid
if not vtx.isNil: if not vtx.isNil:
let rc = vtx.toNode(db) let rc = vtx.toNode(db)
@ -305,27 +283,27 @@ proc hashifyCheck*(
if key != rc.value.encode.digestTo(NodeKey): if key != rc.value.encode.digestTo(NodeKey):
return err((vid,HashifyCheckVtxHashMismatch)) return err((vid,HashifyCheckVtxHashMismatch))
let revVid = db.pAmk.getOrDefault(key, VertexID(0)) let revVid = db.top.pAmk.getOrDefault(key, VertexID(0))
if revVid == VertexID(0): if revVid == VertexID(0):
return err((vid,HashifyCheckRevHashMissing)) return err((vid,HashifyCheckRevHashMissing))
if revVid != vid: if revVid != vid:
return err((vid,HashifyCheckRevHashMismatch)) return err((vid,HashifyCheckRevHashMismatch))
if db.pAmk.len != db.kMap.len: if db.top.pAmk.len != db.top.kMap.len:
var knownKeys: HashSet[VertexID] var knownKeys: HashSet[VertexID]
for (key,vid) in db.pAmk.pairs: for (key,vid) in db.top.pAmk.pairs:
if not db.kMap.hasKey(vid): if not db.top.kMap.hasKey(vid):
return err((vid,HashifyCheckRevVtxMissing)) return err((vid,HashifyCheckRevVtxMissing))
if vid in knownKeys: if vid in knownKeys:
return err((vid,HashifyCheckRevVtxDup)) return err((vid,HashifyCheckRevVtxDup))
knownKeys.incl vid knownKeys.incl vid
return err((VertexID(0),HashifyCheckRevCountMismatch)) # should not apply(!) return err((VertexID(0),HashifyCheckRevCountMismatch)) # should not apply(!)
if 0 < db.pAmk.len and not relax and db.pAmk.len != db.sTab.len: if 0 < db.top.pAmk.len and not relax and db.top.pAmk.len != db.top.sTab.len:
return err((VertexID(0),HashifyCheckVtxCountMismatch)) return err((VertexID(0),HashifyCheckVtxCountMismatch))
for vid in db.pPrf: for vid in db.top.pPrf:
if not db.kMap.hasKey(vid): if not db.top.kMap.hasKey(vid):
return err((vid,HashifyCheckVtxLockWithoutKey)) return err((vid,HashifyCheckVtxLockWithoutKey))
ok() ok()

View File

@ -60,7 +60,7 @@ func legsTo*(hike: Hike; T: type NibblesSeq): T =
proc hikeUp*( proc hikeUp*(
path: NibblesSeq; # Partial path path: NibblesSeq; # Partial path
root: VertexID; # Start vertex root: VertexID; # Start vertex
db: AristoDbRef; # Database db: AristoDb; # Database
): Hike = ): Hike =
## For the argument `path`, find and return the logest possible path in the ## For the argument `path`, find and return the logest possible path in the
## argument database `db`. ## argument database `db`.
@ -68,12 +68,12 @@ proc hikeUp*(
root: root, root: root,
tail: path) tail: path)
if root.isZero: if root == VertexID(0):
result.error = PathRootMissing result.error = PathRootMissing
else: else:
var vid = root var vid = root
while not vid.isZero: while vid != VertexID(0):
var vtx = db.getVtx vid var vtx = db.getVtx vid
if vtx.isNil: if vtx.isNil:
break break
@ -100,7 +100,7 @@ proc hikeUp*(
nibble = result.tail[0].int8 nibble = result.tail[0].int8
nextVid = vtx.bVid[nibble] nextVid = vtx.bVid[nibble]
if nextVid.isZero: if nextVid == VertexID(0):
result.error = PathBranchBlindEdge # Ooops result.error = PathBranchBlindEdge # Ooops
break break
@ -124,9 +124,9 @@ proc hikeUp*(
result.tail = result.tail.slice(vtx.ePfx.len) result.tail = result.tail.slice(vtx.ePfx.len)
vid = vtx.eVid vid = vtx.eVid
proc hikeUp*(keyOrTag: NodeKey|NodeTag; root: VertexID; db: AristoDbRef): Hike = proc hikeUp*(lky: LeafKey; db: AristoDb): Hike =
## Variant of `hike()` ## Variant of `hike()`
keyOrTag.pathAsNibbles.hikeUp(root, db) lky.path.pathAsNibbles.hikeUp(lky.root, db)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -23,23 +23,10 @@ import
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc init*(T: type AristoDbRef): T = proc init*(T: type AristoDb): T =
## Constructor with memory backend. ## Constructor with memory backend.
T(cascaded: false, backend: memoryBackend()) T(top: AristoLayerRef(),
backend: memoryBackend())
proc init*(T: type AristoDbRef; db: T): T =
## Cascaded constructor, a new layer is pushed and returned.
result = T(
cascaded: true,
lRoot: db.lRoot,
vGen: db.vGen,
stack: db)
if db.cascaded:
result.level = db.level + 1
result.base = db.base
else:
result.level = 1
result.base = db
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -14,7 +14,7 @@
{.push raises: [].} {.push raises: [].}
import import
std/tables, std/[sequtils, tables],
stew/results, stew/results,
".."/[aristo_constants, aristo_desc, aristo_error] ".."/[aristo_constants, aristo_desc, aristo_error]
@ -22,6 +22,15 @@ type
MemBackendRef = ref object MemBackendRef = ref object
sTab: Table[VertexID,VertexRef] ## Structural vertex table making up a trie sTab: Table[VertexID,VertexRef] ## Structural vertex table making up a trie
kMap: Table[VertexID,NodeKey] ## Merkle hash key mapping kMap: Table[VertexID,NodeKey] ## Merkle hash key mapping
vGen: seq[VertexID]
txGen: uint ## Transaction ID generator (for debugging)
txId: uint ## Active transaction ID (for debugging)
MemPutHdlRef = ref object of PutHdlRef
txId: uint ## Transaction ID (for debugging)
const
VerifyIxId = true # for debugging
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
@ -43,23 +52,66 @@ proc getKeyFn(db: MemBackendRef): GetKeyFn =
return ok key return ok key
err(MemBeKeyNotFound) err(MemBeKeyNotFound)
proc getIdgFn(db: MemBackendRef): GetIdgFn =
result =
proc(): Result[seq[VertexID],AristoError]=
ok db.vGen
# -------------
proc putBegFn(db: MemBackendRef): PutBegFn =
result =
proc(): PutHdlRef =
when VerifyIxId:
doAssert db.txId == 0
db.txGen.inc
MemPutHdlRef(txId: db.txGen)
proc putVtxFn(db: MemBackendRef): PutVtxFn = proc putVtxFn(db: MemBackendRef): PutVtxFn =
result = result =
proc(vrps: openArray[(VertexID,VertexRef)]): AristoError = proc(hdl: PutHdlRef; vrps: openArray[(VertexID,VertexRef)]) =
when VerifyIxId:
doAssert db.txId == hdl.MemPutHdlRef.txId
for (vid,vtx) in vrps: for (vid,vtx) in vrps:
db.sTab[vid] = vtx db.sTab[vid] = vtx
proc putKeyFn(db: MemBackendRef): PutKeyFn = proc putKeyFn(db: MemBackendRef): PutKeyFn =
result = result =
proc(vkps: openArray[(VertexID,NodeKey)]): AristoError = proc(hdl: PutHdlRef; vkps: openArray[(VertexID,NodeKey)]) =
when VerifyIxId:
doAssert db.txId == hdl.MemPutHdlRef.txId
for (vid,key) in vkps: for (vid,key) in vkps:
db.kMap[vid] = key db.kMap[vid] = key
proc delFn(db: MemBackendRef): DelFn = proc putIdgFn(db: MemBackendRef): PutIdgFn =
result =
proc(hdl: PutHdlRef; vs: openArray[VertexID]) =
when VerifyIxId:
doAssert db.txId == hdl.MemPutHdlRef.txId
db.vGen = vs.toSeq
proc putEndFn(db: MemBackendRef): PutEndFn =
result =
proc(hdl: PutHdlRef): AristoError =
when VerifyIxId:
doAssert db.txId == hdl.MemPutHdlRef.txId
db.txId = 0
AristoError(0)
# -------------
proc delVtxFn(db: MemBackendRef): DelVtxFn =
result = result =
proc(vids: openArray[VertexID]) = proc(vids: openArray[VertexID]) =
for vid in vids: for vid in vids:
db.sTab.del vid db.sTab.del vid
proc delKeyFn(db: MemBackendRef): DelKeyFn =
result =
proc(vids: openArray[VertexID]) =
for vid in vids:
db.kMap.del vid db.kMap.del vid
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -72,9 +124,16 @@ proc memoryBackend*(): AristoBackendRef =
AristoBackendRef( AristoBackendRef(
getVtxFn: getVtxFn db, getVtxFn: getVtxFn db,
getKeyFn: getKeyFn db, getKeyFn: getKeyFn db,
getIdgFn: getIdgFn db,
putBegFn: putBegFn db,
putVtxFn: putVtxFn db, putVtxFn: putVtxFn db,
putKeyFn: putKeyFn db, putKeyFn: putKeyFn db,
delFn: delFn db) putIdgFn: putIdgFn db,
putEndFn: putEndFn db,
delVtxFn: delVtxFn db,
delKeyFn: delKeyFn db)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -37,9 +37,15 @@ logScope:
topics = "aristo-merge" topics = "aristo-merge"
type type
LeafKVP* = object LeafSubKVP* = object
## Generalised key-value pair ## Generalised key-value pair for a sub-trie. The main trie is the
pathTag*: NodeTag ## `Patricia Trie` path root-to-leaf ## sub-trie with `root=VertexID(1)`.
leafKey*: LeafKey ## Full `Patricia Trie` path root-to-leaf
payload*: PayloadRef ## Leaf data payload
LeafMainKVP* = object
## Variant of `LeafSubKVP` for the main trie, implies: `root=VertexID(1)`
pathTag*: NodeTag ## Path root-to-leaf in main trie
payload*: PayloadRef ## Leaf data payload payload*: PayloadRef ## Leaf data payload
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -69,23 +75,26 @@ proc `xPfx=`(vtx: VertexRef, val: NibblesSeq) =
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc clearMerkleKeys( proc clearMerkleKeys(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
hike: Hike; # Implied vertex IDs to clear hashes for hike: Hike; # Implied vertex IDs to clear hashes for
vid: VertexID; # Additionall vertex IDs to clear vid: VertexID; # Additionall vertex IDs to clear
) = ) =
for vid in hike.legs.mapIt(it.wp.vid) & @[vid]: for vid in hike.legs.mapIt(it.wp.vid) & @[vid]:
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY) let key = db.top.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY: if key != EMPTY_ROOT_KEY:
db.kMap.del vid db.top.kMap.del vid
db.pAmk.del key db.top.pAmk.del key
elif db.getKeyBackend(vid).isOK:
# Register for deleting on backend
db.top.dKey.incl vid
# ----------- # -----------
proc insertBranch( proc insertBranch(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
hike: Hike; hike: Hike; # Current state
linkID: VertexID; linkID: VertexID; # Vertex ID to insert
linkVtx: VertexRef; linkVtx: VertexRef; # Vertex to insert
payload: PayloadRef; # Leaf data payload payload: PayloadRef; # Leaf data payload
): Hike = ): Hike =
## ##
@ -130,9 +139,9 @@ proc insertBranch(
# Install `forkVtx` # Install `forkVtx`
block: block:
# Clear Merkle hashes (aka node keys) unless proof mode. # Clear Merkle hashes (aka node keys) unless proof mode.
if db.pPrf.len == 0: if db.top.pPrf.len == 0:
db.clearMerkleKeys(hike, linkID) db.clearMerkleKeys(hike, linkID)
elif linkID in db.pPrf: elif linkID in db.top.pPrf:
return Hike(error: MergeNonBranchProofModeLock) return Hike(error: MergeNonBranchProofModeLock)
if linkVtx.vType == Leaf: if linkVtx.vType == Leaf:
@ -144,9 +153,11 @@ proc insertBranch(
debug "Branch link leaf path garbled", linkID, path debug "Branch link leaf path garbled", linkID, path
return Hike(error: MergeBrLinkLeafGarbled) return Hike(error: MergeBrLinkLeafGarbled)
let local = db.vidFetch let
db.lTab[rc.value] = local # update leaf path lookup cache local = db.vidFetch
db.sTab[local] = linkVtx lky = LeafKey(root: hike.root, path: rc.value)
db.top.lTab[lky] = local # update leaf path lookup cache
db.top.sTab[local] = linkVtx
linkVtx.lPfx = linkVtx.lPfx.slice(1+n) linkVtx.lPfx = linkVtx.lPfx.slice(1+n)
forkVtx.bVid[linkInx] = local forkVtx.bVid[linkInx] = local
@ -156,7 +167,7 @@ proc insertBranch(
else: else:
let local = db.vidFetch let local = db.vidFetch
db.sTab[local] = linkVtx db.top.sTab[local] = linkVtx
linkVtx.ePfx = linkVtx.ePfx.slice(1+n) linkVtx.ePfx = linkVtx.ePfx.slice(1+n)
forkVtx.bVid[linkInx] = local forkVtx.bVid[linkInx] = local
@ -168,7 +179,7 @@ proc insertBranch(
vType: Leaf, vType: Leaf,
lPfx: hike.tail.slice(1+n), lPfx: hike.tail.slice(1+n),
lData: payload) lData: payload)
db.sTab[local] = leafLeg.wp.vtx db.top.sTab[local] = leafLeg.wp.vtx
# Update branch leg, ready to append more legs # Update branch leg, ready to append more legs
result = Hike(root: hike.root, legs: hike.legs) result = Hike(root: hike.root, legs: hike.legs)
@ -180,7 +191,7 @@ proc insertBranch(
ePfx: hike.tail.slice(0,n), ePfx: hike.tail.slice(0,n),
eVid: db.vidFetch) eVid: db.vidFetch)
db.sTab[linkID] = extVtx db.top.sTab[linkID] = extVtx
result.legs.add Leg( result.legs.add Leg(
nibble: -1, nibble: -1,
@ -188,14 +199,14 @@ proc insertBranch(
vid: linkID, vid: linkID,
vtx: extVtx)) vtx: extVtx))
db.sTab[extVtx.eVid] = forkVtx db.top.sTab[extVtx.eVid] = forkVtx
result.legs.add Leg( result.legs.add Leg(
nibble: leafInx.int8, nibble: leafInx.int8,
wp: VidVtxPair( wp: VidVtxPair(
vid: extVtx.eVid, vid: extVtx.eVid,
vtx: forkVtx)) vtx: forkVtx))
else: else:
db.sTab[linkID] = forkVtx db.top.sTab[linkID] = forkVtx
result.legs.add Leg( result.legs.add Leg(
nibble: leafInx.int8, nibble: leafInx.int8,
wp: VidVtxPair( wp: VidVtxPair(
@ -206,7 +217,7 @@ proc insertBranch(
proc concatBranchAndLeaf( proc concatBranchAndLeaf(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
hike: Hike; # Path top has a `Branch` vertex hike: Hike; # Path top has a `Branch` vertex
brVid: VertexID; # Branch vertex ID from from `Hike` top brVid: VertexID; # Branch vertex ID from from `Hike` top
brVtx: VertexRef; # Branch vertex, linked to from `Hike` brVtx: VertexRef; # Branch vertex, linked to from `Hike`
@ -219,13 +230,13 @@ proc concatBranchAndLeaf(
return Hike(error: MergeBranchGarbledTail) return Hike(error: MergeBranchGarbledTail)
let nibble = hike.tail[0].int8 let nibble = hike.tail[0].int8
if not brVtx.bVid[nibble].isZero: if brVtx.bVid[nibble] != VertexID(0):
return Hike(error: MergeRootBranchLinkBusy) return Hike(error: MergeRootBranchLinkBusy)
# Clear Merkle hashes (aka node keys) unless proof mode. # Clear Merkle hashes (aka node keys) unless proof mode.
if db.pPrf.len == 0: if db.top.pPrf.len == 0:
db.clearMerkleKeys(hike, brVid) db.clearMerkleKeys(hike, brVid)
elif brVid in db.pPrf: elif brVid in db.top.pPrf:
return Hike(error: MergeBranchProofModeLock) # Ooops return Hike(error: MergeBranchProofModeLock) # Ooops
# Append branch node # Append branch node
@ -240,7 +251,7 @@ proc concatBranchAndLeaf(
lPfx: hike.tail.slice(1), lPfx: hike.tail.slice(1),
lData: payload) lData: payload)
brVtx.bVid[nibble] = vid brVtx.bVid[nibble] = vid
db.sTab[vid] = vtx db.top.sTab[vid] = vtx
result.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1) result.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -248,7 +259,7 @@ proc concatBranchAndLeaf(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc topIsBranchAddLeaf( proc topIsBranchAddLeaf(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
hike: Hike; # Path top has a `Branch` vertex hike: Hike; # Path top has a `Branch` vertex
payload: PayloadRef; # Leaf data payload payload: PayloadRef; # Leaf data payload
): Hike = ): Hike =
@ -273,7 +284,7 @@ proc topIsBranchAddLeaf(
# #
# <-------- immutable ------------> <---- mutable ----> .. # <-------- immutable ------------> <---- mutable ----> ..
# #
if db.pPrf.len == 0: if db.top.pPrf.len == 0:
# Not much else that can be done here # Not much else that can be done here
debug "Dangling leaf link, reused", branch=hike.legs[^1].wp.vid, debug "Dangling leaf link, reused", branch=hike.legs[^1].wp.vid,
nibble, linkID, leafPfx=hike.tail nibble, linkID, leafPfx=hike.tail
@ -283,7 +294,7 @@ proc topIsBranchAddLeaf(
vType: Leaf, vType: Leaf,
lPfx: hike.tail, lPfx: hike.tail,
lData: payload) lData: payload)
db.sTab[linkID] = vtx db.top.sTab[linkID] = vtx
result = Hike(root: hike.root, legs: hike.legs) result = Hike(root: hike.root, legs: hike.legs)
result.legs.add Leg(wp: VidVtxPair(vid: linkID, vtx: vtx), nibble: -1) result.legs.add Leg(wp: VidVtxPair(vid: linkID, vtx: vtx), nibble: -1)
return return
@ -301,7 +312,7 @@ proc topIsBranchAddLeaf(
proc topIsExtAddLeaf( proc topIsExtAddLeaf(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
hike: Hike; # Path top has an `Extension` vertex hike: Hike; # Path top has an `Extension` vertex
payload: PayloadRef; # Leaf data payload payload: PayloadRef; # Leaf data payload
): Hike = ): Hike =
@ -328,7 +339,7 @@ proc topIsExtAddLeaf(
vType: Leaf, vType: Leaf,
lPfx: extVtx.ePfx & hike.tail, lPfx: extVtx.ePfx & hike.tail,
lData: payload) lData: payload)
db.sTab[extVid] = vtx db.top.sTab[extVid] = vtx
result.legs[^1].wp.vtx = vtx result.legs[^1].wp.vtx = vtx
elif brVtx.vType != Branch: elif brVtx.vType != Branch:
@ -345,13 +356,13 @@ proc topIsExtAddLeaf(
# #
# <-------- immutable --------------> <-------- mutable ----------> .. # <-------- immutable --------------> <-------- mutable ----------> ..
# #
if not linkID.isZero: if linkID != VertexID(0):
return Hike(error: MergeRootBranchLinkBusy) return Hike(error: MergeRootBranchLinkBusy)
# Clear Merkle hashes (aka node keys) unless proof mode # Clear Merkle hashes (aka node keys) unless proof mode
if db.pPrf.len == 0: if db.top.pPrf.len == 0:
db.clearMerkleKeys(hike, brVid) db.clearMerkleKeys(hike, brVid)
elif brVid in db.pPrf: elif brVid in db.top.pPrf:
return Hike(error: MergeBranchProofModeLock) return Hike(error: MergeBranchProofModeLock)
let let
@ -361,13 +372,13 @@ proc topIsExtAddLeaf(
lPfx: hike.tail.slice(1), lPfx: hike.tail.slice(1),
lData: payload) lData: payload)
brVtx.bVid[nibble] = vid brVtx.bVid[nibble] = vid
db.sTab[vid] = vtx db.top.sTab[vid] = vtx
result.legs[^1].nibble = nibble result.legs[^1].nibble = nibble
result.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1) result.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
proc topIsEmptyAddLeaf( proc topIsEmptyAddLeaf(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
hike: Hike; # No path legs hike: Hike; # No path legs
rootVtx: VertexRef; # Root vertex rootVtx: VertexRef; # Root vertex
payload: PayloadRef; # Leaf data payload payload: PayloadRef; # Leaf data payload
@ -375,14 +386,15 @@ proc topIsEmptyAddLeaf(
## Append a `Leaf` vertex derived from the argument `payload` after the ## Append a `Leaf` vertex derived from the argument `payload` after the
## argument vertex `rootVtx` and append both the empty arguent `hike`. ## argument vertex `rootVtx` and append both the empty arguent `hike`.
if rootVtx.vType == Branch: if rootVtx.vType == Branch:
let nibble = hike.tail[0].int8 let nibble = hike.tail[0].int8
if not rootVtx.bVid[nibble].isZero: if rootVtx.bVid[nibble] != VertexID(0):
return Hike(error: MergeRootBranchLinkBusy) return Hike(error: MergeRootBranchLinkBusy)
# Clear Merkle hashes (aka node keys) unless proof mode # Clear Merkle hashes (aka node keys) unless proof mode
if db.pPrf.len == 0: if db.top.pPrf.len == 0:
db.clearMerkleKeys(hike, hike.root) db.clearMerkleKeys(hike, hike.root)
elif hike.root in db.pPrf: elif hike.root in db.top.pPrf:
return Hike(error: MergeBranchProofModeLock) return Hike(error: MergeBranchProofModeLock)
let let
@ -392,7 +404,7 @@ proc topIsEmptyAddLeaf(
lPfx: hike.tail.slice(1), lPfx: hike.tail.slice(1),
lData: payload) lData: payload)
rootVtx.bVid[nibble] = leafVid rootVtx.bVid[nibble] = leafVid
db.sTab[leafVid] = leafVtx db.top.sTab[leafVid] = leafVtx
return Hike( return Hike(
root: hike.root, root: hike.root,
legs: @[Leg(wp: VidVtxPair(vtx: rootVtx, vid: hike.root), nibble: nibble), legs: @[Leg(wp: VidVtxPair(vtx: rootVtx, vid: hike.root), nibble: nibble),
@ -405,8 +417,8 @@ proc topIsEmptyAddLeaf(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc merge*( proc merge*(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
leaf: LeafKVP; # Leaf item to add to the database leaf: LeafSubKVP; # Leaf item to add to the database
): Hike = ): Hike =
## Merge the argument `leaf` key-value-pair into the top level vertex table ## Merge the argument `leaf` key-value-pair into the top level vertex table
## of the database `db`. The field `pathKey` of the `leaf` argument is used ## of the database `db`. The field `pathKey` of the `leaf` argument is used
@ -414,25 +426,11 @@ proc merge*(
## stored with the leaf vertex in the database unless the leaf vertex exists ## stored with the leaf vertex in the database unless the leaf vertex exists
## already. ## already.
## ##
proc setUpAsRoot(vid: VertexID): Hike = if db.top.lTab.hasKey leaf.leafKey:
let
vtx = VertexRef(
vType: Leaf,
lPfx: leaf.pathTag.pathAsNibbles,
lData: leaf.payload)
wp = VidVtxPair(vid: vid, vtx: vtx)
db.sTab[vid] = vtx
Hike(root: vid, legs: @[Leg(wp: wp, nibble: -1)])
if db.lRoot.isZero:
result = db.vidFetch.setUpAsRoot() # bootstrap: new root ID
db.lRoot = result.root
elif db.lTab.haskey leaf.pathTag:
result.error = MergeLeafPathCachedAlready result.error = MergeLeafPathCachedAlready
else: else:
let hike = leaf.pathTag.hikeUp(db.lRoot, db) let hike = leaf.leafKey.hikeUp(db)
if 0 < hike.legs.len: if 0 < hike.legs.len:
case hike.legs[^1].wp.vtx.vType: case hike.legs[^1].wp.vtx.vType:
@ -447,25 +445,54 @@ proc merge*(
else: else:
# Empty hike # Empty hike
let rootVtx = db.getVtx db.lRoot let rootVtx = db.getVtx hike.root
if rootVtx.isNil: if not rootVtx.isNil:
result = db.lRoot.setUpAsRoot() # bootstrap for existing root ID
else:
result = db.topIsEmptyAddLeaf(hike,rootVtx,leaf.payload) result = db.topIsEmptyAddLeaf(hike,rootVtx,leaf.payload)
else:
# Bootstrap for existing root ID
let wp = VidVtxPair(
vid: hike.root,
vtx: VertexRef(
vType: Leaf,
lPfx: leaf.leafKey.path.pathAsNibbles,
lData: leaf.payload))
db.top.sTab[wp.vid] = wp.vtx
result = Hike(root: wp.vid, legs: @[Leg(wp: wp, nibble: -1)])
# Update leaf acccess cache # Update leaf acccess cache
if result.error == AristoError(0): if result.error == AristoError(0):
db.lTab[leaf.pathTag] = result.legs[^1].wp.vid db.top.lTab[leaf.leafKey] = result.legs[^1].wp.vid
# End else (1st level)
proc merge*( proc merge*(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
leafs: openArray[LeafKVP]; # Leaf items to add to the database leafs: openArray[LeafSubKVP]; # Leaf items to add to the database
): tuple[merged: int, dups: int, error: AristoError] = ): tuple[merged: int, dups: int, error: AristoError] =
## Variant of `merge()` for leaf lists. ## Variant of `merge()` for leaf lists.
var (merged, dups) = (0, 0) var (merged, dups) = (0, 0)
for n,w in leafs: for n,w in leafs:
let hike = db.merge w let hike = db.merge(w)
if hike.error == AristoError(0):
merged.inc
elif hike.error == MergeLeafPathCachedAlready:
dups.inc
else:
return (n,dups,hike.error)
(merged, dups, AristoError(0))
proc merge*(
db: AristoDb; # Database, top layer
leafs: openArray[LeafMainKVP]; # Leaf items to add to the database
): tuple[merged: int, dups: int, error: AristoError] =
## Variant of `merge()` for leaf lists on the main trie
var (merged, dups) = (0, 0)
for n,w in leafs:
let hike = db.merge(LeafSubKVP(
leafKey: LeafKey(root: VertexID(1), path: w.pathTag),
payload: w.payload))
if hike.error == AristoError(0): if hike.error == AristoError(0):
merged.inc merged.inc
elif hike.error == MergeLeafPathCachedAlready: elif hike.error == MergeLeafPathCachedAlready:
@ -478,7 +505,7 @@ proc merge*(
# --------------------- # ---------------------
proc merge*( proc merge*(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
nodeKey: NodeKey; # Merkel hash of node nodeKey: NodeKey; # Merkel hash of node
node: NodeRef; # Node derived from RLP representation node: NodeRef; # Node derived from RLP representation
): Result[VertexID,AristoError] = ): Result[VertexID,AristoError] =
@ -492,11 +519,9 @@ proc merge*(
## decoder as expected, these vertex IDs will be all zero. ## decoder as expected, these vertex IDs will be all zero.
## ##
proc register(key: NodeKey): VertexID = proc register(key: NodeKey): VertexID =
var vid = db.pAmk.getOrDefault(key, VertexID(0)) var vid = db.top.pAmk.getOrDefault(key, VertexID(0))
if vid == VertexID(0): if vid == VertexID(0):
vid = db.vidFetch vid = db.vidAttach key
db.pAmk[key] = vid
db.kMap[vid] = key
vid vid
# Check whether the record is correct # Check whether the record is correct
@ -507,40 +532,48 @@ proc merge*(
if nodeKey == EMPTY_ROOT_KEY: if nodeKey == EMPTY_ROOT_KEY:
return err(MergeNodeKeyEmpty) return err(MergeNodeKeyEmpty)
# Check whether the node exists, already # Check whether the node exists, already. If not then create a new vertex ID
let nodeVid = db.pAmk.getOrDefault(nodeKey, VertexID(0)) var vid = db.top.pAmk.getOrDefault(nodeKey, VertexID(0))
if nodeVid != VertexID(0) and db.sTab.hasKey nodeVid: if vid == VertexID(0):
return err(MergeNodeKeyCachedAlready)
let
vid = nodeKey.register vid = nodeKey.register
vtx = node.to(VertexRef) # the vertex IDs need to be set up now (if any) else:
let key = db.top.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key == nodeKey:
if db.top.sTab.hasKey vid:
# This is tyically considered OK
return err(MergeNodeKeyCachedAlready)
# Otherwise proceed
elif key != EMPTY_ROOT_KEY:
# Different key assigned => error
return err(MergeNodeKeyDiffersFromCached)
let vtx = node.to(VertexRef) # the vertex IDs need to be set up now (if any)
case node.vType: case node.vType:
of Leaf: of Leaf:
discard discard
of Extension: of Extension:
if not node.key[0].isEmpty: if node.key[0] != EMPTY_ROOT_KEY:
let eVid = db.pAmk.getOrDefault(node.key[0], VertexID(0)) let eVid = db.top.pAmk.getOrDefault(node.key[0], VertexID(0))
if eVid != VertexID(0): if eVid != VertexID(0):
vtx.eVid = eVid vtx.eVid = eVid
else: else:
vtx.eVid = node.key[0].register vtx.eVid = node.key[0].register
of Branch: of Branch:
for n in 0..15: for n in 0..15:
if not node.key[n].isEmpty: if node.key[n] != EMPTY_ROOT_KEY:
let bVid = db.pAmk.getOrDefault(node.key[n], VertexID(0)) let bVid = db.top.pAmk.getOrDefault(node.key[n], VertexID(0))
if bVid != VertexID(0): if bVid != VertexID(0):
vtx.bVid[n] = bVid vtx.bVid[n] = bVid
else: else:
vtx.bVid[n] = node.key[n].register vtx.bVid[n] = node.key[n].register
db.pPrf.incl vid db.top.pPrf.incl vid
db.sTab[vid] = vtx db.top.sTab[vid] = vtx
ok vid ok vid
proc merge*( proc merge*(
db: AristoDbRef; # Database, top layer db: AristoDb; # Database, top layer
proof: openArray[SnapProof]; # RLP encoded node records proof: openArray[SnapProof]; # RLP encoded node records
): tuple[merged: int, dups: int, error: AristoError] ): tuple[merged: int, dups: int, error: AristoError]
{.gcsafe, raises: [RlpError].} = {.gcsafe, raises: [RlpError].} =
@ -562,6 +595,54 @@ proc merge*(
(merged, dups, AristoError(0)) (merged, dups, AristoError(0))
proc merge*(
db: AristoDb; # Database, top layer
rootKey: NodeKey; # Merkle hash for root
rootVid = VertexID(0) # Optionally, force root vertex ID
): Result[VertexID,AristoError] =
## Set up a `rootKey` associated with a vertex ID.
##
## If argument `rootVid` is unset (defaults to `VertexID(0)`) then the main
## trie is tested for `VertexID(1)`. If assigned with a different Merkle key
## already, a new vertex ID is created and the argument root key is assigned
## to this vertex ID.
##
## If the argument `rootVid` is set (to a value different from `VertexID(0)`),
## then a sub-trie with root `rootVid` is checked for. If it exists with a
## diffent root key assigned, then an error is returned. Otherwise a new
## vertex ID is created and the argument root key is assigned.
##
## Upon successful return, the vertex ID assigned to the root key is returned.
##
if rootKey == EMPTY_ROOT_KEY:
return err(MergeRootKeyEmpty)
if rootVid == VertexID(0) or
rootVid == VertexID(1):
let key = db.getKey VertexID(1)
if key == rootKey:
return ok VertexID(1)
# Otherwise assign if empty
if key == EMPTY_ROOT_KEY:
db.vidAttach(rootKey, VertexID(1))
return ok VertexID(1)
# Create new root key
if rootVid == VertexID(0):
return ok db.vidAttach(rootKey)
else:
let key = db.getKey rootVid
if key == rootKey:
return ok rootVid
if key == EMPTY_ROOT_KEY:
db.vidAttach(rootKey, rootVid)
return ok rootVid
err(MergeRootKeyDiffersForVid)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -57,7 +57,7 @@ proc branchNibbleMin*(vtx: VertexRef; minInx: int8): int8 =
## greater or equal the argument `nibble`. ## greater or equal the argument `nibble`.
if vtx.vType == Branch: if vtx.vType == Branch:
for n in minInx .. 15: for n in minInx .. 15:
if not vtx.bVid[n].isZero: if vtx.bVid[n] != VertexID(0):
return n return n
-1 -1
@ -66,7 +66,7 @@ proc branchNibbleMax*(vtx: VertexRef; maxInx: int8): int8 =
## less or equal the argument `nibble`. ## less or equal the argument `nibble`.
if vtx.vType == Branch: if vtx.vType == Branch:
for n in maxInx.countDown 0: for n in maxInx.countDown 0:
if not vtx.bVid[n].isZero: if vtx.bVid[n] != VertexID(0):
return n return n
-1 -1
@ -77,7 +77,7 @@ proc branchNibbleMax*(vtx: VertexRef; maxInx: int8): int8 =
proc complete( proc complete(
hike: Hike; # Partially expanded chain of vertices hike: Hike; # Partially expanded chain of vertices
vid: VertexID; # Start ID vid: VertexID; # Start ID
db: AristoDbRef; # Database layer db: AristoDb; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any) hikeLenMax: static[int]; # Beware of loops (if any)
doLeast: static[bool]; # Direction: *least* or *most* doLeast: static[bool]; # Direction: *least* or *most*
): Hike = ): Hike =
@ -99,7 +99,7 @@ proc complete(
of Extension: of Extension:
vid = vtx.eVid vid = vtx.eVid
if not vid.isZero: if vid != VertexID(0):
vtx = db.getVtx vid vtx = db.getVtx vid
if not vtx.isNil: if not vtx.isNil:
uHike.legs.add leg uHike.legs.add leg
@ -124,7 +124,7 @@ proc complete(
proc zeroAdjust( proc zeroAdjust(
hike: Hike; # Partially expanded chain of vertices hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDb; # Database layer
doLeast: static[bool]; # Direction: *least* or *most* doLeast: static[bool]; # Direction: *least* or *most*
): Hike = ): Hike =
## Adjust empty argument path to the first node entry to the right. Ths ## Adjust empty argument path to the first node entry to the right. Ths
@ -142,7 +142,7 @@ proc zeroAdjust(
else: else:
w.branchNibbleMax n w.branchNibbleMax n
proc toHike(pfx: NibblesSeq, root: VertexID, db: AristoDbRef): Hike = proc toHike(pfx: NibblesSeq, root: VertexID, db: AristoDb): Hike =
when doLeast: when doLeast:
pfx.pathPfxPad(0).hikeUp(root, db) pfx.pathPfxPad(0).hikeUp(root, db)
else: else:
@ -205,7 +205,7 @@ proc zeroAdjust(
proc finalise( proc finalise(
hike: Hike; # Partially expanded chain of vertices hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDb; # Database layer
moveRight: static[bool]; # Direction of next vertex moveRight: static[bool]; # Direction of next vertex
): Hike = ): Hike =
## Handle some pathological cases after main processing failed ## Handle some pathological cases after main processing failed
@ -259,7 +259,7 @@ proc finalise(
proc nearbyNext( proc nearbyNext(
hike: Hike; # Partially expanded chain of vertices hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDb; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any) hikeLenMax: static[int]; # Beware of loops (if any)
moveRight: static[bool]; # Direction of next vertex moveRight: static[bool]; # Direction of next vertex
): Hike = ): Hike =
@ -317,7 +317,7 @@ proc nearbyNext(
# Look ahead checking next node # Look ahead checking next node
if start: if start:
let vid = top.wp.vtx.bVid[top.nibble] let vid = top.wp.vtx.bVid[top.nibble]
if vid.isZero: if vid == VertexID(0):
return Hike(error: NearbyDanglingLink) # error return Hike(error: NearbyDanglingLink) # error
let vtx = db.getVtx vid let vtx = db.getVtx vid
@ -364,14 +364,13 @@ proc nearbyNext(
proc nearbyNext( proc nearbyNext(
baseTag: NodeTag; # Some `Patricia Trie` path lky: LeafKey; # Some `Patricia Trie` path
root: VertexID; # State root db: AristoDb; # Database layer
db: AristoDbRef; # Database layer
hikeLenMax: static[int]; # Beware of loops (if any) hikeLenMax: static[int]; # Beware of loops (if any)
moveRight:static[ bool]; # Direction of next vertex moveRight:static[bool]; # Direction of next vertex
): Result[NodeTag,AristoError] = ): Result[NodeTag,AristoError] =
## Variant of `nearbyNext()`, convenience wrapper ## Variant of `nearbyNext()`, convenience wrapper
let hike = baseTag.hikeUp(root,db).nearbyNext(db, hikeLenMax, moveRight) let hike = lky.hikeUp(db).nearbyNext(db, hikeLenMax, moveRight)
if hike.error != AristoError(0): if hike.error != AristoError(0):
return err(hike.error) return err(hike.error)
@ -389,7 +388,7 @@ proc nearbyNext(
proc nearbyRight*( proc nearbyRight*(
hike: Hike; # Partially expanded chain of vertices hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDb; # Database layer
): Hike = ): Hike =
## Extends the maximally extended argument nodes `hike` to the right (i.e. ## Extends the maximally extended argument nodes `hike` to the right (i.e.
## with non-decreasing path value). This function does not backtrack if ## with non-decreasing path value). This function does not backtrack if
@ -403,17 +402,19 @@ proc nearbyRight*(
hike.nearbyNext(db, 64, moveRight=true) hike.nearbyNext(db, 64, moveRight=true)
proc nearbyRight*( proc nearbyRight*(
nodeTag: NodeTag; # Some `Patricia Trie` path lky: LeafKey; # Some `Patricia Trie` path
root: VertexID; # State root db: AristoDb; # Database layer
db: AristoDbRef; # Database layer ): Result[LeafKey,AristoError] =
): Result[NodeTag,AristoError] =
## Variant of `nearbyRight()` working with a `NodeTag` argument instead ## Variant of `nearbyRight()` working with a `NodeTag` argument instead
## of a `Hike`. ## of a `Hike`.
nodeTag.nearbyNext(root, db, 64, moveRight=true) let rc = lky.nearbyNext(db, 64, moveRight=true)
if rc.isErr:
return err(rc.error)
ok LeafKey(root: lky.root, path: rc.value)
proc nearbyLeft*( proc nearbyLeft*(
hike: Hike; # Partially expanded chain of vertices hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDb; # Database layer
): Hike = ): Hike =
## Similar to `nearbyRight()`. ## Similar to `nearbyRight()`.
## ##
@ -422,13 +423,15 @@ proc nearbyLeft*(
hike.nearbyNext(db, 64, moveRight=false) hike.nearbyNext(db, 64, moveRight=false)
proc nearbyLeft*( proc nearbyLeft*(
nodeTag: NodeTag; # Some `Patricia Trie` path lky: LeafKey; # Some `Patricia Trie` path
root: VertexID; # State root db: AristoDb; # Database layer
db: AristoDbRef; # Database layer ): Result[LeafKey,AristoError] =
): Result[NodeTag,AristoError] =
## Similar to `nearbyRight()` for `NodeTag` argument instead ## Similar to `nearbyRight()` for `NodeTag` argument instead
## of a `Hike`. ## of a `Hike`.
nodeTag.nearbyNext(root, db, 64, moveRight=false) let rc = lky.nearbyNext(db, 64, moveRight=false)
if rc.isErr:
return err(rc.error)
ok LeafKey(root: lky.root, path: rc.value)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public debugging helpers # Public debugging helpers
@ -436,7 +439,7 @@ proc nearbyLeft*(
proc nearbyRightMissing*( proc nearbyRightMissing*(
hike: Hike; # Partially expanded chain of vertices hike: Hike; # Partially expanded chain of vertices
db: AristoDbRef; # Database layer db: AristoDb; # Database layer
): Result[bool,AristoError] = ): Result[bool,AristoError] =
## Returns `true` if the maximally extended argument nodes `hike` is the ## Returns `true` if the maximally extended argument nodes `hike` is the
## rightmost on the hexary trie database. It verifies that there is no more ## rightmost on the hexary trie database. It verifies that there is no more
@ -464,7 +467,7 @@ proc nearbyRightMissing*(
return err(NearbyBranchError) return err(NearbyBranchError)
let vid = top.wp.vtx.bVid[top.nibble] let vid = top.wp.vtx.bVid[top.nibble]
if vid.isZero: if vid == VertexID(0):
return err(NearbyDanglingLink) # error return err(NearbyDanglingLink) # error
let vtx = db.getVtx vid let vtx = db.getVtx vid

View File

@ -67,9 +67,9 @@ proc pathToTag*(partPath: NibblesSeq|Blob): Result[NodeTag,AristoError] =
# -------------------- # --------------------
proc pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NodeKey = proc pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NibblesSeq =
## Extend (or cut) the argument nibbles sequence `pfx` for generating a ## Extend (or cut) the argument nibbles sequence `pfx` for generating a
## `NodeKey`. ## `NibblesSeq` with exactly 64 nibbles, the equivalent of a path key.
## ##
## This function must be handled with some care regarding a meaningful value ## This function must be handled with some care regarding a meaningful value
## for the `dblNibble` argument. Currently, only static values `0` and `255` ## for the `dblNibble` argument. Currently, only static values `0` and `255`
@ -77,19 +77,21 @@ proc pathPfxPad*(pfx: NibblesSeq; dblNibble: static[byte]): NodeKey =
static: static:
doAssert dblNibble == 0 or dblNibble == 255 doAssert dblNibble == 0 or dblNibble == 255
# Pad with zeroes
var padded: NibblesSeq
let padLen = 64 - pfx.len let padLen = 64 - pfx.len
if 0 <= padLen: if 0 <= padLen:
padded = pfx & dblNibble.repeat(padlen div 2).mapIt(it.byte).initNibbleRange result = pfx & dblNibble.repeat(padlen div 2).mapIt(it.byte).initNibbleRange
if (padLen and 1) == 1: if (padLen and 1) == 1:
padded = padded & @[dblNibble.byte].initNibbleRange.slice(1) result = result & @[dblNibble.byte].initNibbleRange.slice(1)
else: else:
let nope = seq[byte].default.initNibbleRange let nope = seq[byte].default.initNibbleRange
padded = pfx.slice(0,64) & nope # nope forces re-alignment result = pfx.slice(0,64) & nope # nope forces re-alignment
let bytes = padded.getBytes proc pathPfxPadKey*(pfx: NibblesSeq; dblNibble: static[byte]): NodeKey =
## Variant of `pathPfxPad()`.
##
## Extend (or cut) the argument nibbles sequence `pfx` for generating a
## `NodeKey`.
let bytes = pfx.pathPfxPad(dblNibble).getBytes
(addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len) (addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -110,12 +110,12 @@ proc append*(writer: var RlpWriter; node: NodeRef) =
## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty ## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty
## list. ## list.
proc addNodeKey(writer: var RlpWriter; key: NodeKey) = proc addNodeKey(writer: var RlpWriter; key: NodeKey) =
if key.isEmpty: if key == EMPTY_ROOT_KEY:
writer.append EmptyBlob writer.append EmptyBlob
else: else:
writer.append key.to(Hash256) writer.append key.to(Hash256)
if node.isError: if node.error != AristoError(0):
writer.startList(0) writer.startList(0)
else: else:
case node.vType: case node.vType:
@ -170,7 +170,7 @@ proc blobify*(node: VertexRef; data: var Blob): AristoError =
refs: Blob refs: Blob
keys: Blob keys: Blob
for n in 0..15: for n in 0..15:
if not node.bVid[n].isZero: if node.bVid[n] != VertexID(0):
access = access or (1u16 shl n) access = access or (1u16 shl n)
refs &= node.bVid[n].uint64.toBytesBE.toSeq refs &= node.bVid[n].uint64.toBytesBE.toSeq
data = refs & access.toBytesBE.toSeq & @[0u8] data = refs & access.toBytesBE.toSeq & @[0u8]
@ -199,7 +199,7 @@ proc blobify*(node: VertexRef): Result[Blob, AristoError] =
ok(data) ok(data)
proc blobify*(db: AristoDbRef; data: var Blob) = proc blobify*(db: AristoDb; data: var Blob) =
## This function serialises some maintenance data for the `AristoDb` ## This function serialises some maintenance data for the `AristoDb`
## descriptor. At the moment, this contains the recycliing table for the ## descriptor. At the moment, this contains the recycliing table for the
## `VertexID` values, only. ## `VertexID` values, only.
@ -212,11 +212,12 @@ proc blobify*(db: AristoDbRef; data: var Blob) =
## 0x40 ## 0x40
## ##
data.setLen(0) data.setLen(0)
for w in db.vGen: if not db.top.isNil:
data &= w.uint64.toBytesBE.toSeq for w in db.top.vGen:
data &= w.uint64.toBytesBE.toSeq
data.add 0x40u8 data.add 0x40u8
proc blobify*(db: AristoDbRef): Blob = proc blobify*(db: AristoDb): Blob =
## Variant of `toDescRecord()` ## Variant of `toDescRecord()`
db.blobify result db.blobify result
@ -289,14 +290,15 @@ proc deblobify*(record: Blob; vtx: var VertexRef): AristoError =
return DbrUnknown return DbrUnknown
proc deblobify*(data: Blob; db: var AristoDbRef): AristoError = proc deblobify*(data: Blob; db: var AristoDb): AristoError =
## De-serialise the data record encoded with `blobify()`. The second ## De-serialise the data record encoded with `blobify()` into a new current
## argument `db` can be `nil` in which case a new `AristoDbRef` type ## top layer. If present, the previous top layer of the `db` descriptor is
## descriptor will be created. ## pushed onto the parent layers stack.
if db.isNil: if not db.top.isNil:
db = AristoDbRef() db.stack.add db.top
db.top = AristoLayerRef()
if data.len == 0: if data.len == 0:
db.vGen = @[1.VertexID] db.top.vGen = @[1.VertexID]
else: else:
if (data.len mod 8) != 1: if (data.len mod 8) != 1:
return ADbGarbledSize return ADbGarbledSize
@ -304,14 +306,13 @@ proc deblobify*(data: Blob; db: var AristoDbRef): AristoError =
return ADbWrongType return ADbWrongType
for n in 0 ..< (data.len div 8): for n in 0 ..< (data.len div 8):
let w = n * 8 let w = n * 8
db.vGen.add (uint64.fromBytesBE data[w ..< w + 8]).VertexID db.top.vGen.add (uint64.fromBytesBE data[w ..< w + 8]).VertexID
proc deblobify*[W: VertexRef|AristoDb](
proc deblobify*[W: VertexRef|AristoDbRef](
record: Blob; record: Blob;
T: type W; T: type W;
): Result[T,AristoError] = ): Result[T,AristoError] =
## Variant of `deblobify()` for either `VertexRef` or `AristoDbRef` ## Variant of `deblobify()` for either `VertexRef` or `AristoDb`
var obj: T # isNil, will be auto-initialised var obj: T # isNil, will be auto-initialised
let info = record.deblobify obj let info = record.deblobify obj
if info != AristoError(0): if info != AristoError(0):

View File

@ -14,53 +14,92 @@
{.push raises: [].} {.push raises: [].}
import import
std/[algorithm, sequtils, sets, tables],
./aristo_desc ./aristo_desc
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc vidFetch*(db: AristoDbRef): VertexID = proc vidFetch*(db: AristoDb): VertexID =
## Create a new `VertexID`. Reusable *ID*s are kept in a list where the top ## Create a new `VertexID`. Reusable *ID*s are kept in a list where the top
## entry *ID0* has the property that any other *ID* larger *ID0* is also not ## entry *ID0* has the property that any other *ID* larger *ID0* is also not
## not used on the database. ## not used on the database.
case db.vGen.len: let top = db.top
case top.vGen.len:
of 0: of 0:
db.vGen = @[2.VertexID] # Note that `VertexID(1)` is the root of the main trie
result = 1.VertexID top.vGen = @[VertexID(3)]
result = VertexID(2)
of 1: of 1:
result = db.vGen[^1] result = top.vGen[^1]
db.vGen = @[(result.uint64 + 1).VertexID] top.vGen = @[VertexID(result.uint64 + 1)]
else: else:
result = db.vGen[^2] result = top.vGen[^2]
db.vGen[^2] = db.vGen[^1] top.vGen[^2] = top.vGen[^1]
db.vGen.setLen(db.vGen.len-1) top.vGen.setLen(top.vGen.len-1)
proc vidPeek*(db: AristoDbRef): VertexID = proc vidPeek*(db: AristoDb): VertexID =
## Like `new()` without consuming this *ID*. It will return the *ID* that ## Like `new()` without consuming this *ID*. It will return the *ID* that
## would be returned by the `new()` function. ## would be returned by the `new()` function.
case db.vGen.len: case db.top.vGen.len:
of 0: of 0:
1.VertexID VertexID(2)
of 1: of 1:
db.vGen[^1] db.top.vGen[^1]
else: else:
db.vGen[^2] db.top.vGen[^2]
proc vidDispose*(db: AristoDbRef; vid: VertexID) = proc vidDispose*(db: AristoDb; vid: VertexID) =
## Recycle the argument `vtxID` which is useful after deleting entries from ## Recycle the argument `vtxID` which is useful after deleting entries from
## the vertex table to prevent the `VertexID` type key values small. ## the vertex table to prevent the `VertexID` type key values small.
if db.vGen.len == 0: if VertexID(1) < vid:
db.vGen = @[vid] if db.top.vGen.len == 0:
else: db.top.vGen = @[vid]
let topID = db.vGen[^1] else:
# Only store smaller numbers: all numberts larger than `topID` let topID = db.top.vGen[^1]
# are free numbers # Only store smaller numbers: all numberts larger than `topID`
if vid < topID: # are free numbers
db.vGen[^1] = vid if vid < topID:
db.vGen.add topID db.top.vGen[^1] = vid
db.top.vGen.add topID
proc vidReorg*(db: AristoDb) =
## Remove redundant items from the recycle queue. All recycled entries are
## typically kept in the queue until the backend database is committed.
if 1 < db.top.vGen.len:
let lst = db.top.vGen.mapIt(uint64(it)).sorted.mapIt(VertexID(it))
for n in (lst.len-1).countDown(1):
if lst[n-1].uint64 + 1 != lst[n].uint64:
# All elements larger than `lst[n-1` are in increasing order. For
# the last continuously increasing sequence, only the smallest item
# is needed and the rest can be removed
#
# Example:
# ..3, 5, 6, 7 => ..3, 5
# ^
# |
# n
#
if n < lst.len-1:
db.top.vGen.shallowCopy lst
db.top.vGen.setLen(n+1)
return
# All entries are continuously increasing
db.top.vGen = @[lst[0]]
proc vidAttach*(db: AristoDb; key: NodeKey; vid: VertexID) =
## Attach (i.r. register) a Merkle hash key to a vertex ID.
db.top.dKey.excl vid
db.top.pAmk[key] = vid
db.top.kMap[vid] = key
proc vidAttach*(db: AristoDb; key: NodeKey): VertexID {.discardable.} =
## Variant of `vidAttach()` with auto-generated vertex ID
result = db.vidFetch
db.vidAttach(key, result)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -18,7 +18,7 @@ import
rocksdb, rocksdb,
unittest2, unittest2,
../nimbus/db/select_backend, ../nimbus/db/select_backend,
../nimbus/db/aristo/[aristo_desc, aristo_error, aristo_merge], ../nimbus/db/aristo/[aristo_desc, aristo_merge],
../nimbus/core/chain, ../nimbus/core/chain,
../nimbus/sync/snap/worker/db/[rocky_bulk_load, snapdb_accounts, snapdb_desc], ../nimbus/sync/snap/worker/db/[rocky_bulk_load, snapdb_accounts, snapdb_desc],
./replay/[pp, undump_accounts, undump_storages], ./replay/[pp, undump_accounts, undump_storages],
@ -147,6 +147,12 @@ proc snapDbAccountsRef(cdb:ChainDb; root:Hash256; pers:bool):SnapDbAccountsRef =
# Test Runners: accounts and accounts storages # Test Runners: accounts and accounts storages
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc miscRunner(noisy =true) =
suite &"Aristo: Miscellaneous tests":
test &"VertexID recyling lists":
noisy.test_transcodeVidRecycleLists()
proc transcodeRunner(noisy =true; sample=accSample; stopAfter=high(int)) = proc transcodeRunner(noisy =true; sample=accSample; stopAfter=high(int)) =
let let
accLst = sample.to(seq[UndumpAccounts]) accLst = sample.to(seq[UndumpAccounts])
@ -162,9 +168,6 @@ proc transcodeRunner(noisy =true; sample=accSample; stopAfter=high(int)) =
suite &"Aristo: transcoding {fileInfo} accounts for {info}": suite &"Aristo: transcoding {fileInfo} accounts for {info}":
test &"Trancoding VertexID recyling lists (seed={accLst.len})":
noisy.test_transcodeVidRecycleLists(accLst.len)
# New common descriptor for this sub-group of tests # New common descriptor for this sub-group of tests
let let
desc = db.cdb[0].snapDbAccountsRef(root, db.persistent) desc = db.cdb[0].snapDbAccountsRef(root, db.persistent)
@ -193,16 +196,16 @@ proc accountsRunner(noisy=true; sample=accSample, resetDb=false) =
suite &"Aristo: accounts data dump from {fileInfo}{listMode}": suite &"Aristo: accounts data dump from {fileInfo}{listMode}":
test &"Merge {accLst.len} account lists to database": test &"Merge {accLst.len} account lists to database":
noisy.test_mergeKvpList(accLst, resetDb) check noisy.test_mergeKvpList(accLst, resetDb)
test &"Merge {accLst.len} proof & account lists to database": test &"Merge {accLst.len} proof & account lists to database":
noisy.test_mergeProofAndKvpList(accLst, resetDb) check noisy.test_mergeProofAndKvpList(accLst, resetDb)
test &"Traverse accounts database w/{accLst.len} account lists": test &"Traverse accounts database w/{accLst.len} account lists":
noisy.test_nearbyKvpList(accLst, resetDb) check noisy.test_nearbyKvpList(accLst, resetDb)
test &"Delete accounts database, successively {accLst.len} entries": test &"Delete accounts database, successively {accLst.len} entries":
noisy.test_delete accLst check noisy.test_delete accLst
proc storagesRunner( proc storagesRunner(
@ -219,22 +222,23 @@ proc storagesRunner(
suite &"Aristo: storages data dump from {fileInfo}{listMode}": suite &"Aristo: storages data dump from {fileInfo}{listMode}":
test &"Merge {stoLst.len} storage slot lists to database": test &"Merge {stoLst.len} storage slot lists to database":
noisy.test_mergeKvpList(stoLst, resetDb) check noisy.test_mergeKvpList(stoLst, resetDb)
test &"Merge {stoLst.len} proof & slots lists to database": test &"Merge {stoLst.len} proof & slots lists to database":
noisy.test_mergeProofAndKvpList(stoLst, resetDb, fileInfo, oops) check noisy.test_mergeProofAndKvpList(stoLst, resetDb, fileInfo, oops)
test &"Traverse storage slots database w/{stoLst.len} account lists": test &"Traverse storage slots database w/{stoLst.len} account lists":
noisy.test_nearbyKvpList(stoLst, resetDb) check noisy.test_nearbyKvpList(stoLst, resetDb)
test &"Delete storage database, successively {stoLst.len} entries": test &"Delete storage database, successively {stoLst.len} entries":
noisy.test_delete stoLst check noisy.test_delete stoLst
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Main function(s) # Main function(s)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc aristoMain*(noisy = defined(debug)) = proc aristoMain*(noisy = defined(debug)) =
noisy.miscRunner()
noisy.transcodeRunner() noisy.transcodeRunner()
noisy.accountsRunner() noisy.accountsRunner()
noisy.storagesRunner() noisy.storagesRunner()
@ -243,6 +247,11 @@ when isMainModule:
const const
noisy = defined(debug) or true noisy = defined(debug) or true
setErrorLevel()
when true: # and false:
noisy.miscRunner()
# Borrowed from `test_sync_snap.nim` # Borrowed from `test_sync_snap.nim`
when true: # and false: when true: # and false:
for n,sam in snapTestList: for n,sam in snapTestList:
@ -259,9 +268,13 @@ when isMainModule:
# This one usues dumps from the external `nimbus-eth1-blob` repo # This one usues dumps from the external `nimbus-eth1-blob` repo
when true and false: when true and false:
import ./test_sync_snap/snap_storage_xx import ./test_sync_snap/snap_storage_xx, ../nimbus/db/aristo/aristo_error
let knownFailures: KnownHasherFailure = @[ let knownFailures: KnownHasherFailure = @[
("storages5__34__41_dump#10.20512",(VertexID(1),HashifyRootHashMismatch)), ("storages3__18__25_dump#11.27367",(3,HashifyExistingHashMismatch)),
("storages4__26__33_dump#11.23924",(6,HashifyExistingHashMismatch)),
("storages5__34__41_dump#10.20512",(1,HashifyRootHashMismatch)),
("storagesB__84__92_dump#6.9709", (7,HashifyExistingHashMismatch)),
("storagesD_102_109_dump#17.28287",(9,HashifyExistingHashMismatch)),
] ]
noisy.showElapsed("@snap_storage_xx"): noisy.showElapsed("@snap_storage_xx"):
for n,sam in snapStorageList: for n,sam in snapStorageList:

View File

@ -24,7 +24,7 @@ import
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc convertPartially( proc convertPartially(
db: AristoDbRef; db: AristoDb;
vtx: VertexRef; vtx: VertexRef;
nd: var NodeRef; nd: var NodeRef;
): seq[VertexID] = ): seq[VertexID] =
@ -42,7 +42,7 @@ proc convertPartially(
vType: Extension, vType: Extension,
ePfx: vtx.ePfx, ePfx: vtx.ePfx,
eVid: vtx.eVid) eVid: vtx.eVid)
let key = db.kMap.getOrDefault(vtx.eVid, EMPTY_ROOT_KEY) let key = db.top.kMap.getOrDefault(vtx.eVid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY: if key != EMPTY_ROOT_KEY:
nd.key[0] = key nd.key[0] = key
return return
@ -52,15 +52,15 @@ proc convertPartially(
vType: Branch, vType: Branch,
bVid: vtx.bVid) bVid: vtx.bVid)
for n in 0..15: for n in 0..15:
if not vtx.bVid[n].isZero: if vtx.bVid[n] != VertexID(0):
let key = db.kMap.getOrDefault(vtx.bVid[n], EMPTY_ROOT_KEY) let key = db.top.kMap.getOrDefault(vtx.bVid[n], EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY: if key != EMPTY_ROOT_KEY:
nd.key[n] = key nd.key[n] = key
continue continue
result.add vtx.bVid[n] result.add vtx.bVid[n]
proc convertPartiallyOk( proc convertPartiallyOk(
db: AristoDbRef; db: AristoDb;
vtx: VertexRef; vtx: VertexRef;
nd: var NodeRef; nd: var NodeRef;
): bool = ): bool =
@ -77,7 +77,7 @@ proc convertPartiallyOk(
vType: Extension, vType: Extension,
ePfx: vtx.ePfx, ePfx: vtx.ePfx,
eVid: vtx.eVid) eVid: vtx.eVid)
let key = db.kMap.getOrDefault(vtx.eVid, EMPTY_ROOT_KEY) let key = db.top.kMap.getOrDefault(vtx.eVid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY: if key != EMPTY_ROOT_KEY:
nd.key[0] = key nd.key[0] = key
result = true result = true
@ -87,54 +87,54 @@ proc convertPartiallyOk(
bVid: vtx.bVid) bVid: vtx.bVid)
result = true result = true
for n in 0..15: for n in 0..15:
if not vtx.bVid[n].isZero: if vtx.bVid[n] != VertexID(0):
let key = db.kMap.getOrDefault(vtx.bVid[n], EMPTY_ROOT_KEY) let key = db.top.kMap.getOrDefault(vtx.bVid[n], EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY: if key != EMPTY_ROOT_KEY:
nd.key[n] = key nd.key[n] = key
continue continue
return false return false
proc cachedVID(db: AristoDbRef; nodeKey: NodeKey): VertexID = proc cachedVID(db: AristoDb; nodeKey: NodeKey): VertexID =
## Get vertex ID from reverse cache ## Get vertex ID from reverse cache
let vid = db.pAmk.getOrDefault(nodeKey, VertexID(0)) let vid = db.top.pAmk.getOrDefault(nodeKey, VertexID(0))
if vid != VertexID(0): if vid != VertexID(0):
result = vid result = vid
else: else:
result = db.vidFetch() result = db.vidFetch()
db.pAmk[nodeKey] = result db.top.pAmk[nodeKey] = result
db.kMap[result] = nodeKey db.top.kMap[result] = nodeKey
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions for `VertexID` => `NodeKey` mapping # Public functions for `VertexID` => `NodeKey` mapping
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc pal*(db: AristoDbRef; vid: VertexID): NodeKey = proc pal*(db: AristoDb; vid: VertexID): NodeKey =
## Retrieve the cached `Merkel` hash (aka `NodeKey` object) associated with ## Retrieve the cached `Merkel` hash (aka `NodeKey` object) associated with
## the argument `VertexID` type argument `vid`. Return a zero `NodeKey` if ## the argument `VertexID` type argument `vid`. Return a zero `NodeKey` if
## there is none. ## there is none.
## ##
## If the vertex ID `vid` is not found in the cache, then the structural ## If the vertex ID `vid` is not found in the cache, then the structural
## table is checked whether the cache can be updated. ## table is checked whether the cache can be updated.
if not db.isNil: if not db.top.isNil:
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY) let key = db.top.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
if key != EMPTY_ROOT_KEY: if key != EMPTY_ROOT_KEY:
return key return key
let vtx = db.sTab.getOrDefault(vid, VertexRef(nil)) let vtx = db.top.sTab.getOrDefault(vid, VertexRef(nil))
if vtx != VertexRef(nil): if vtx != VertexRef(nil):
var node: NodeRef var node: NodeRef
if db.convertPartiallyOk(vtx,node): if db.convertPartiallyOk(vtx,node):
var w = initRlpWriter() var w = initRlpWriter()
w.append node w.append node
result = w.finish.keccakHash.data.NodeKey result = w.finish.keccakHash.data.NodeKey
db.kMap[vid] = result db.top.kMap[vid] = result
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public funcions extending/completing vertex records # Public funcions extending/completing vertex records
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc updated*(nd: NodeRef; db: AristoDbRef): NodeRef = proc updated*(nd: NodeRef; db: AristoDb): NodeRef =
## Return a copy of the argument node `nd` with updated missing vertex IDs. ## Return a copy of the argument node `nd` with updated missing vertex IDs.
## ##
## For a `Leaf` node, the payload data `PayloadRef` type reference is *not* ## For a `Leaf` node, the payload data `PayloadRef` type reference is *not*
@ -153,7 +153,7 @@ proc updated*(nd: NodeRef; db: AristoDbRef): NodeRef =
result = NodeRef( result = NodeRef(
vType: Extension, vType: Extension,
ePfx: nd.ePfx) ePfx: nd.ePfx)
if not nd.key[0].isEmpty: if nd.key[0] != EMPTY_ROOT_KEY:
result.eVid = db.cachedVID nd.key[0] result.eVid = db.cachedVID nd.key[0]
result.key[0] = nd.key[0] result.key[0] = nd.key[0]
of Branch: of Branch:
@ -161,10 +161,10 @@ proc updated*(nd: NodeRef; db: AristoDbRef): NodeRef =
vType: Branch, vType: Branch,
key: nd.key) key: nd.key)
for n in 0..15: for n in 0..15:
if not nd.key[n].isEmpty: if nd.key[n] != EMPTY_ROOT_KEY:
result.bVid[n] = db.cachedVID nd.key[n] result.bVid[n] = db.cachedVID nd.key[n]
proc asNode*(vtx: VertexRef; db: AristoDbRef): NodeRef = proc asNode*(vtx: VertexRef; db: AristoDb): NodeRef =
## Return a `NodeRef` object by augmenting missing `Merkel` hashes (aka ## Return a `NodeRef` object by augmenting missing `Merkel` hashes (aka
## `NodeKey` objects) from the cache or from calculated cached vertex ## `NodeKey` objects) from the cache or from calculated cached vertex
## entries, if available. ## entries, if available.
@ -174,7 +174,7 @@ proc asNode*(vtx: VertexRef; db: AristoDbRef): NodeRef =
if not db.convertPartiallyOk(vtx, result): if not db.convertPartiallyOk(vtx, result):
return NodeRef(error: CacheMissingNodekeys) return NodeRef(error: CacheMissingNodekeys)
proc asNode*(rc: Result[VertexRef,AristoError]; db: AristoDbRef): NodeRef = proc asNode*(rc: Result[VertexRef,AristoError]; db: AristoDb): NodeRef =
## Variant of `asNode()`. ## Variant of `asNode()`.
if rc.isErr: if rc.isErr:
return NodeRef(error: rc.error) return NodeRef(error: rc.error)

View File

@ -30,6 +30,11 @@ type
# Private helpers # Private helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc sortedKeys(lTab: Table[LeafKey,VertexID]): seq[LeafKey] =
lTab.keys.toSeq.sorted(cmp = proc(a,b: LeafKey): int = cmp(a,b))
# --------------
proc posixPrngRand(state: var uint32): byte = proc posixPrngRand(state: var uint32): byte =
## POSIX.1-2001 example of a rand() implementation, see manual page rand(3). ## POSIX.1-2001 example of a rand() implementation, see manual page rand(3).
state = state * 1103515245 + 12345; state = state * 1103515245 + 12345;
@ -70,17 +75,18 @@ proc rand(td: var TesterDesc; top: int): int =
# ----------------------- # -----------------------
proc fwdWalkVerify( proc fwdWalkVerify(
db: AristoDbRef; db: AristoDb;
root: VertexID;
noisy: bool; noisy: bool;
): tuple[visited: int, error: AristoError] = ): tuple[visited: int, error: AristoError] =
let let
lTabLen = db.lTab.len lTabLen = db.top.lTab.len
var var
error = AristoError(0) error = AristoError(0)
tag: NodeTag lky = LeafKey(root: root)
n = 0 n = 0
while n < lTabLen + 1: while n < lTabLen + 1:
let rc = tag.nearbyRight(db.lRoot, db) # , noisy) let rc = lky.nearbyRight(db)
#noisy.say "=================== ", n #noisy.say "=================== ", n
if rc.isErr: if rc.isErr:
if rc.error != NearbyBeyondRange: if rc.error != NearbyBeyondRange:
@ -88,8 +94,8 @@ proc fwdWalkVerify(
error = rc.error error = rc.error
check rc.error == AristoError(0) check rc.error == AristoError(0)
break break
if rc.value < high(NodeTag): if rc.value.path < high(NodeTag):
tag = (rc.value.u256 + 1).NodeTag lky.path = NodeTag(rc.value.path.u256 + 1)
n.inc n.inc
if error != AristoError(0): if error != AristoError(0):
@ -108,13 +114,14 @@ proc fwdWalkVerify(
proc test_delete*( proc test_delete*(
noisy: bool; noisy: bool;
list: openArray[ProofTrieData]; list: openArray[ProofTrieData];
) = ): bool =
var td = TesterDesc.init 42 var td = TesterDesc.init 42
for n,w in list: for n,w in list:
let let
db = AristoDbRef() db = AristoDb(top: AristoLayerRef())
lstLen = list.len lstLen = list.len
added = db.merge w.kvpLst leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
added = db.merge leafs
if added.error != AristoError(0): if added.error != AristoError(0):
check added.error == AristoError(0) check added.error == AristoError(0)
@ -127,24 +134,24 @@ proc test_delete*(
# Now `db` represents a (fully labelled) `Merkle Patricia Tree` # Now `db` represents a (fully labelled) `Merkle Patricia Tree`
# Provide a (reproducible) peudo-random copy of the leafs list # Provide a (reproducible) peudo-random copy of the leafs list
var leafs = db.lTab.keys.toSeq.mapIt(it.Uint256).sorted.mapIt(it.NodeTag) var leafKeys = db.top.lTab.sortedKeys
if 2 < leafs.len: if 2 < leafKeys.len:
for n in 0 ..< leafs.len-1: for n in 0 ..< leafKeys.len-1:
let r = n + td.rand(leafs.len - n) let r = n + td.rand(leafKeys.len - n)
leafs[n].swap leafs[r] leafKeys[n].swap leafKeys[r]
let uMax = leafs.len - 1 let uMax = leafKeys.len - 1
for u,pathTag in leafs: for u,leafKey in leafKeys:
let rc = pathTag.delete(db) # , noisy=(tags.len < 2)) let rc = leafKey.delete(db)
if rc.isErr: if rc.isErr:
check rc.error == (VertexID(0),AristoError(0)) check rc.error == (VertexID(0),AristoError(0))
return return
if pathTag in db.lTab: if leafKey in db.top.lTab:
check pathTag notin db.lTab check leafKey notin db.top.lTab
return return
if uMax != db.lTab.len + u: if uMax != db.top.lTab.len + u:
check uMax == db.lTab.len + u check uMax == db.top.lTab.len + u
return return
# Walking the database is too slow for large tables. So the hope is that # Walking the database is too slow for large tables. So the hope is that
@ -152,12 +159,12 @@ proc test_delete*(
const tailCheck = 999 const tailCheck = 999
if uMax < u + tailCheck: if uMax < u + tailCheck:
if u < uMax: if u < uMax:
let vfy = db.fwdWalkVerify(noisy) let vfy = db.fwdWalkVerify(leafKey.root, noisy)
if vfy.error != AristoError(0): if vfy.error != AristoError(0):
check vfy == (0, AristoError(0)) check vfy == (0, AristoError(0))
return return
elif 0 < db.sTab.len: elif 0 < db.top.sTab.len:
check db.sTab.len == 0 check db.top.sTab.len == 0
return return
let rc = db.hashifyCheck(relax=true) let rc = db.hashifyCheck(relax=true)
if rc.isErr: if rc.isErr:
@ -166,11 +173,12 @@ proc test_delete*(
when true and false: when true and false:
if uMax < u + tailCheck or (u mod 777) == 3: if uMax < u + tailCheck or (u mod 777) == 3:
noisy.say "***", "step lTab=", db.lTab.len noisy.say "***", "step lTab=", db.top.lTab.len
when true and false: when true and false:
noisy.say "***", "sample <", n, "/", list.len-1, ">", noisy.say "***", "sample <", n, "/", list.len-1, ">",
" lstLen=", w.kvpLst.len " lstLen=", leafs.len
true
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -13,10 +13,10 @@ import
std/sequtils, std/sequtils,
eth/common, eth/common,
rocksdb, rocksdb,
../../nimbus/db/aristo/[aristo_desc, aristo_merge], ../../nimbus/db/aristo/[
aristo_constants, aristo_debug, aristo_desc, aristo_merge],
../../nimbus/db/kvstore_rocksdb, ../../nimbus/db/kvstore_rocksdb,
../../nimbus/sync/protocol/snap/snap_types, ../../nimbus/sync/protocol/snap/snap_types,
../../nimbus/sync/snap/[constants, range_desc],
../test_sync_snap/test_types, ../test_sync_snap/test_types,
../replay/[pp, undump_accounts, undump_storages] ../replay/[pp, undump_accounts, undump_storages]
@ -25,34 +25,43 @@ type
root*: NodeKey root*: NodeKey
id*: int id*: int
proof*: seq[SnapProof] proof*: seq[SnapProof]
kvpLst*: seq[LeafKVP] kvpLst*: seq[LeafSubKVP]
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helpers # Private helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc to(w: UndumpAccounts; T: type ProofTrieData): T = proc toPfx(indent: int): string =
T(root: w.root.to(NodeKey), "\n" & " ".repeat(indent)
proof: w.data.proof,
kvpLst: w.data.accounts.mapIt(LeafKVP(
pathTag: it.accKey.to(NodeTag),
payload: PayloadRef(pType: BlobData, blob: it.accBlob))))
proc to(s: UndumpStorages; id: int; T: type seq[ProofTrieData]): T =
for w in s.data.storages:
result.add ProofTrieData(
root: w.account.storageRoot.to(NodeKey),
id: id,
kvpLst: w.data.mapIt(LeafKVP(
pathTag: it.slotHash.to(NodeTag),
payload: PayloadRef(pType: BlobData, blob: it.slotData))))
if 0 < result.len:
result[^1].proof = s.data.proof
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public helpers # Public pretty printing
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc pp*(w: ProofTrieData; db: var AristoDb; indent = 4): string =
let pfx = indent.toPfx
result = "(" & w.root.pp(db) & "," & $w.id & ",[" & $w.proof.len & "],"
result &= pfx & " ["
for n,kvp in w.kvpLst:
if 0 < n:
result &= "," & pfx & " "
result &= "(" & kvp.leafKey.pp(db) & "," & $kvp.payload.pType & ")"
result &= "])"
proc pp*(w: ProofTrieData; indent = 4): string =
var db = AristoDB()
w.pp(db, indent)
proc pp*(w: openArray[ProofTrieData]; db: var AristoDb; indent = 4): string =
let pfx = indent.toPfx
"[" & w.mapIt(it.pp(db, indent + 1)).join("," & pfx & " ") & "]"
proc pp*(w: openArray[ProofTrieData]; indent = 4): string =
let pfx = indent.toPfx
"[" & w.mapIt(it.pp(indent + 1)).join("," & pfx & " ") & "]"
# ----------
proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) = proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
if noisy: if noisy:
if args.len == 0: if args.len == 0:
@ -62,7 +71,9 @@ proc say*(noisy = false; pfx = "***"; args: varargs[string, `$`]) =
else: else:
echo pfx, args.toSeq.join echo pfx, args.toSeq.join
# ----------------------- # ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
proc to*(sample: AccountsSample; T: type seq[UndumpAccounts]): T = proc to*(sample: AccountsSample; T: type seq[UndumpAccounts]): T =
## Convert test data into usable in-memory format ## Convert test data into usable in-memory format
@ -96,12 +107,39 @@ proc to*(sample: AccountsSample; T: type seq[UndumpStorages]): T =
break break
result.add w result.add w
proc to*(w: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T = proc to*(ua: seq[UndumpAccounts]; T: type seq[ProofTrieData]): T =
w.mapIt(it.to(ProofTrieData)) var (rootKey, rootVid) = (EMPTY_ROOT_KEY, VertexID(0))
for w in ua:
let thisRoot = w.root.to(NodeKey)
if rootKey != thisRoot:
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
result.add ProofTrieData(
root: rootKey,
proof: w.data.proof,
kvpLst: w.data.accounts.mapIt(LeafSubKVP(
leafKey: LeafKey(root: rootVid, path: it.accKey.to(NodeTag)),
payload: PayloadRef(pType: BlobData, blob: it.accBlob))))
proc to*(s: seq[UndumpStorages]; T: type seq[ProofTrieData]): T = proc to*(us: seq[UndumpStorages]; T: type seq[ProofTrieData]): T =
for n,w in s: var (rootKey, rootVid) = (EMPTY_ROOT_KEY, VertexID(0))
result &= w.to(n,seq[ProofTrieData]) for n,s in us:
for w in s.data.storages:
let thisRoot = w.account.storageRoot.to(NodeKey)
if rootKey != thisRoot:
(rootKey, rootVid) = (thisRoot, VertexID(rootVid.uint64 + 1))
result.add ProofTrieData(
root: thisRoot,
id: n + 1,
kvpLst: w.data.mapIt(LeafSubKVP(
leafKey: LeafKey(root: rootVid, path: it.slotHash.to(NodeTag)),
payload: PayloadRef(pType: BlobData, blob: it.slotData))))
if 0 < result.len:
result[^1].proof = s.data.proof
proc mapRootVid*(a: openArray[LeafSubKVP]; toVid: VertexID): seq[LeafSubKVP] =
a.mapIt(LeafSubKVP(
leafKey: LeafKey(root: toVid, path: it.leafKey.path),
payload: it.payload))
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public iterators # Public iterators

View File

@ -22,8 +22,8 @@ import
./test_helpers ./test_helpers
type type
KnownHasherFailure* = seq[(string,(VertexID,AristoError))] KnownHasherFailure* = seq[(string,(int,AristoError))]
## (<sample-name> & "#" <instance>, @[(<slot-id>, <error-symbol>)), ..]) ## (<sample-name> & "#" <instance>, (<vertex-id>,<error-symbol>))
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helpers # Private helpers
@ -36,12 +36,12 @@ proc pp(w: tuple[merged: int, dups: int, error: AristoError]): string =
result &= ")" result &= ")"
proc mergeStepwise( proc mergeStepwise(
db: AristoDbRef; db: AristoDb;
leafs: openArray[LeafKVP]; leafs: openArray[LeafSubKVP];
noisy: bool; noisy: bool;
): tuple[merged: int, dups: int, error: AristoError] = ): tuple[merged: int, dups: int, error: AristoError] =
let let
lTabLen = db.lTab.len lTabLen = db.top.lTab.len
var var
(merged, dups, error) = (0, 0, AristoError(0)) (merged, dups, error) = (0, 0, AristoError(0))
@ -53,7 +53,9 @@ proc mergeStepwise(
let let
preState = db.pp preState = db.pp
hike = db.merge leaf hike = db.merge leaf
ekih = leaf.pathTag.hikeUp(db.lRoot, db) ekih = leaf.leafKey.hikeUp(db)
noisy.say "***", "step <", n, "/", leafs.len-1, "> "
case hike.error: case hike.error:
of AristoError(0): of AristoError(0):
@ -81,7 +83,7 @@ proc mergeStepwise(
rc.error rc.error
if dumpOk: if dumpOk:
noisy.say "***", "<", n, "/", leafs.len-1, "> ", leaf.pathTag.pp, noisy.say "***", "<", n, "/", leafs.len-1, "> ", leaf.leafKey.pp,
"\n pre-state ", preState, "\n pre-state ", preState,
"\n --------", "\n --------",
"\n merge => hike", "\n merge => hike",
@ -103,10 +105,10 @@ proc mergeStepwise(
elif hike.error != MergeLeafPathCachedAlready: elif hike.error != MergeLeafPathCachedAlready:
check ekih.legs[^1].wp.vtx.lData.blob == leaf.payload.blob check ekih.legs[^1].wp.vtx.lData.blob == leaf.payload.blob
if db.lTab.len != lTabLen + merged: if db.top.lTab.len != lTabLen + merged:
error = GenericError error = GenericError
check db.lTab.len == lTabLen + merged # quick leaf access table check db.top.lTab.len == lTabLen + merged # quick leaf access table
stopOk = true # makes no sense to go on further stopOk = true # makes no sense to go on
if stopOk: if stopOk:
noisy.say "***", "<", n, "/", leafs.len-1, "> stop" noisy.say "***", "<", n, "/", leafs.len-1, "> stop"
@ -122,27 +124,28 @@ proc test_mergeKvpList*(
noisy: bool; noisy: bool;
list: openArray[ProofTrieData]; list: openArray[ProofTrieData];
resetDb = false; resetDb = false;
) = ): bool =
var db = AristoDbRef()
var db = AristoDb(top: AristoLayerRef())
for n,w in list: for n,w in list:
if resetDb: if resetDb:
db = AristoDbRef() db.top = AristoLayerRef()
let let
lstLen = list.len lstLen = list.len
lTabLen = db.lTab.len lTabLen = db.top.lTab.len
leafs = w.kvpLst leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
#prePreDb = db.pp #prePreDb = db.pp
added = db.merge leafs added = db.merge leafs
#added = db.mergeStepwise(leafs, noisy=(6 < n)) #added = db.mergeStepwise(leafs, noisy=true)
check added.error == AristoError(0) check added.error == AristoError(0)
check db.lTab.len == lTabLen + added.merged check db.top.lTab.len == lTabLen + added.merged
check added.merged + added.dups == leafs.len check added.merged + added.dups == leafs.len
let let
#preDb = db.pp #preDb = db.pp
preKMap = (db.kMap.len, db.pp(sTabOk=false, lTabOk=false)) preKMap = (db.top.kMap.len, db.pp(sTabOk=false, lTabOk=false))
prePAmk = (db.pAmk.len, db.pAmk.pp(db)) prePAmk = (db.top.pAmk.len, db.top.pAmk.pp(db))
block: block:
let rc = db.hashify # (noisy=true) let rc = db.hashify # (noisy=true)
@ -175,8 +178,9 @@ proc test_mergeKvpList*(
when true and false: when true and false:
noisy.say "***", "sample ", n, "/", lstLen-1, noisy.say "***", "sample ", n, "/", lstLen-1,
" leafs merged=", added.merged, " merged=", added.merged,
" dup=", added.dups " dup=", added.dups
true
proc test_mergeProofAndKvpList*( proc test_mergeProofAndKvpList*(
@ -185,14 +189,14 @@ proc test_mergeProofAndKvpList*(
resetDb = false; resetDb = false;
idPfx = ""; idPfx = "";
oops: KnownHasherFailure = @[]; oops: KnownHasherFailure = @[];
) = ): bool =
var var
db = AristoDbRef(nil) db: AristoDb
rootKey = NodeKey.default rootKey = NodeKey.default
count = 0 count = 0
for n,w in list: for n,w in list:
if resetDb or w.root != rootKey or w.proof.len == 0: if resetDb or w.root != rootKey or w.proof.len == 0:
db = AristoDbRef() db.top = AristoLayerRef()
rootKey = w.root rootKey = w.root
count = 0 count = 0
count.inc count.inc
@ -201,44 +205,44 @@ proc test_mergeProofAndKvpList*(
testId = idPfx & "#" & $w.id & "." & $n testId = idPfx & "#" & $w.id & "." & $n
oopsTab = oops.toTable oopsTab = oops.toTable
lstLen = list.len lstLen = list.len
sTabLen = db.sTab.len sTabLen = db.top.sTab.len
lTabLen = db.lTab.len lTabLen = db.top.lTab.len
leafs = w.kvpLst leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
when true and false: when true and false:
noisy.say "***", "sample <", n, "/", lstLen-1, ">", noisy.say "***", "sample(1) <", n, "/", lstLen-1, ">",
" groups=", count, " nLeafs=", leafs.len " groups=", count, " nLeafs=", leafs.len,
" db-dump\n ", db.pp
var proved: tuple[merged: int, dups: int, error: AristoError] var proved: tuple[merged: int, dups: int, error: AristoError]
if 0 < w.proof.len: if 0 < w.proof.len:
let rc = db.merge(rootKey, VertexID(1))
if rc.isErr:
check rc.error == AristoError(0)
return
proved = db.merge w.proof proved = db.merge w.proof
check proved.error in {AristoError(0),MergeNodeKeyCachedAlready} check proved.error in {AristoError(0),MergeNodeKeyCachedAlready}
check w.proof.len == proved.merged + proved.dups check w.proof.len == proved.merged + proved.dups
check db.lTab.len == lTabLen check db.top.lTab.len == lTabLen
check db.sTab.len == proved.merged + sTabLen check db.top.sTab.len == proved.merged + sTabLen
check proved.merged < db.pAmk.len check proved.merged < db.top.pAmk.len
check proved.merged < db.kMap.len check proved.merged < db.top.kMap.len
# Set up root ID
db.lRoot = db.pAmk.getOrDefault(rootKey, VertexID(0))
if db.lRoot == VertexID(0):
check db.lRoot != VertexID(0)
return
when true and false: when true and false:
noisy.say "***", "sample <", n, "/", lstLen-1, ">", if 0 < w.proof.len:
" groups=", count, " nLeafs=", leafs.len, " proved=", proved noisy.say "***", "sample(2) <", n, "/", lstLen-1, ">",
" groups=", count, " nLeafs=", leafs.len, " proved=", proved,
" db-dump\n ", db.pp
let let
merged = db.merge leafs merged = db.merge leafs
#merged = db.mergeStepwise(leafs, noisy=false) #merged = db.mergeStepwise(leafs, noisy=false)
check db.lTab.len == lTabLen + merged.merged check db.top.lTab.len == lTabLen + merged.merged
check merged.merged + merged.dups == leafs.len check merged.merged + merged.dups == leafs.len
if w.proof.len == 0: if w.proof.len == 0:
let vtx = db.getVtx VertexID(1) let vtx = db.getVtx VertexID(1)
#check db.pAmk.getOrDefault(rootKey, VertexID(0)) != VertexID(0)
block: block:
if merged.error notin {AristoError(0), MergeLeafPathCachedAlready}: if merged.error notin {AristoError(0), MergeLeafPathCachedAlready}:
@ -246,21 +250,24 @@ proc test_mergeProofAndKvpList*(
check merged.error in {AristoError(0), MergeLeafPathCachedAlready} check merged.error in {AristoError(0), MergeLeafPathCachedAlready}
return return
#noisy.say "***", "sample ", n, "/", lstLen-1, " merged=", merged when true and false:
noisy.say "***", "sample(3) <", n, "/", lstLen-1, ">",
" groups=", count, " nLeafs=", leafs.len, " merged=", merged,
" db-dump\n ", db.pp
block: block:
let let
preRoot = db.lRoot
preDb = db.pp(sTabOk=false, lTabOk=false) preDb = db.pp(sTabOk=false, lTabOk=false)
rc = db.hashify rootKey rc = db.hashify() # noisy=true)
# Handle known errors # Handle known errors
if oopsTab.hasKey(testId): if oopsTab.hasKey(testId):
if rc.isOK: if rc.isOK:
check rc.isErr check rc.isErr
return return
if oopsTab[testId] != rc.error: let oops = (VertexID(oopsTab[testId][0]), oopsTab[testId][1])
check oopsTab[testId] == rc.error if oops != rc.error:
check oops == rc.error
return return
# Otherwise, check for correctness # Otherwise, check for correctness
@ -269,20 +276,21 @@ proc test_mergeProofAndKvpList*(
" testId=", testId, " testId=", testId,
" groups=", count, " groups=", count,
"\n pre-DB", "\n pre-DB",
" lRoot=", preRoot.pp,
"\n ", preDb, "\n ", preDb,
"\n --------", "\n --------",
"\n ", db.pp "\n ", db.pp
check rc.error == (VertexID(0),AristoError(0)) check rc.error == (VertexID(0),AristoError(0))
return return
if db.lRoot == VertexID(0): when true and false:
check db.lRoot != VertexID(0) noisy.say "***", "sample(4) <", n, "/", lstLen-1, ">",
return " groups=", count,
" db-dump\n ", db.pp
when true and false: when true and false:
noisy.say "***", "sample <", n, "/", lstLen-1, ">", noisy.say "***", "sample(5) <", n, "/", lstLen-1, ">",
" groups=", count, " proved=", proved.pp, " merged=", merged.pp " groups=", count, " proved=", proved.pp, " merged=", merged.pp
true
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End

View File

@ -26,7 +26,8 @@ import
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc fwdWalkLeafsCompleteDB( proc fwdWalkLeafsCompleteDB(
db: AristoDbRef; db: AristoDb;
root: VertexID;
tags: openArray[NodeTag]; tags: openArray[NodeTag];
noisy: bool; noisy: bool;
): tuple[visited: int, error: AristoError] = ): tuple[visited: int, error: AristoError] =
@ -34,10 +35,10 @@ proc fwdWalkLeafsCompleteDB(
tLen = tags.len tLen = tags.len
var var
error = AristoError(0) error = AristoError(0)
tag = (tags[0].u256 div 2).NodeTag lky = LeafKey(root: root, path: NodeTag(tags[0].u256 div 2))
n = 0 n = 0
while true: while true:
let rc = tag.nearbyRight(db.lRoot, db) # , noisy) let rc = lky.nearbyRight(db)
#noisy.say "=================== ", n #noisy.say "=================== ", n
if rc.isErr: if rc.isErr:
if rc.error != NearbyBeyondRange: if rc.error != NearbyBeyondRange:
@ -54,34 +55,35 @@ proc fwdWalkLeafsCompleteDB(
error = AristoError(1) error = AristoError(1)
check n < tlen check n < tlen
break break
if rc.value != tags[n]: if rc.value.path != tags[n]:
noisy.say "***", "[", n, "/", tLen-1, "] fwd-walk -- leafs differ,", noisy.say "***", "[", n, "/", tLen-1, "] fwd-walk -- leafs differ,",
" got=", rc.value.pp(db), " got=", rc.value.pp(db),
" wanted=", tags[n].pp(db) #, " db-dump\n ", db.pp " wanted=", tags[n].pp(db) #, " db-dump\n ", db.pp
error = AristoError(1) error = AristoError(1)
check rc.value == tags[n] check rc.value.path == tags[n]
break break
if rc.value < high(NodeTag): if rc.value.path < high(NodeTag):
tag = (rc.value.u256 + 1).NodeTag lky.path = NodeTag(rc.value.path.u256 + 1)
n.inc n.inc
(n,error) (n,error)
proc revWalkLeafsCompleteDB( proc revWalkLeafsCompleteDB(
db: AristoDbRef; db: AristoDb;
root: VertexID;
tags: openArray[NodeTag]; tags: openArray[NodeTag];
noisy: bool; noisy: bool;
): tuple[visited: int, error: AristoError] = ): tuple[visited: int, error: AristoError] =
let let
tLen = tags.len tLen = tags.len
var var
error = AristoError(0) error = AristoError(0)
delta = ((high(UInt256) - tags[^1].u256) div 2) delta = ((high(UInt256) - tags[^1].u256) div 2)
tag = (tags[^1].u256 + delta).NodeTag lky = LeafKey(root: root, path: NodeTag(tags[^1].u256 + delta))
n = tLen-1 n = tLen-1
while true: # and false: while true: # and false:
let rc = tag.nearbyLeft(db.lRoot, db) # , noisy) let rc = lky.nearbyLeft(db)
if rc.isErr: if rc.isErr:
if rc.error != NearbyBeyondRange: if rc.error != NearbyBeyondRange:
noisy.say "***", "[", n, "/", tLen-1, "] rev-walk error=", rc.error noisy.say "***", "[", n, "/", tLen-1, "] rev-walk error=", rc.error
@ -97,15 +99,15 @@ proc revWalkLeafsCompleteDB(
error = AristoError(1) error = AristoError(1)
check 0 <= n check 0 <= n
break break
if rc.value != tags[n]: if rc.value.path != tags[n]:
noisy.say "***", "[", n, "/", tLen-1, "] rev-walk -- leafs differ,", noisy.say "***", "[", n, "/", tLen-1, "] rev-walk -- leafs differ,",
" got=", rc.value.pp(db), " got=", rc.value.pp(db),
" wanted=", tags[n]..pp(db) #, " db-dump\n ", db.pp " wanted=", tags[n]..pp(db) #, " db-dump\n ", db.pp
error = AristoError(1) error = AristoError(1)
check rc.value == tags[n] check rc.value.path == tags[n]
break break
if low(NodeTag) < rc.value: if low(NodeTag) < rc.value.path:
tag = (rc.value.u256 - 1).NodeTag lky.path = NodeTag(rc.value.path.u256 - 1)
n.dec n.dec
(tLen-1 - n, error) (tLen-1 - n, error)
@ -118,44 +120,57 @@ proc test_nearbyKvpList*(
noisy: bool; noisy: bool;
list: openArray[ProofTrieData]; list: openArray[ProofTrieData];
resetDb = false; resetDb = false;
) = ): bool =
var var
db = AristoDbRef() db: AristoDb
rootKey = NodeKey.default
tagSet: HashSet[NodeTag] tagSet: HashSet[NodeTag]
count = 0
for n,w in list: for n,w in list:
if resetDb: if resetDb or w.root != rootKey:
db = AristoDbRef() db.top = AristoLayerRef()
rootKey = w.root
tagSet.reset tagSet.reset
count = 0
count.inc
let let
lstLen = list.len lstLen = list.len
lTabLen = db.lTab.len lTabLen = db.top.lTab.len
leafs = w.kvpLst leafs = w.kvpLst.mapRootVid VertexID(1) # merge into main trie
added = db.merge leafs added = db.merge leafs
check added.error == AristoError(0) if added.error != AristoError(0):
check db.lTab.len == lTabLen + added.merged check added.error == AristoError(0)
return
check db.top.lTab.len == lTabLen + added.merged
check added.merged + added.dups == leafs.len check added.merged + added.dups == leafs.len
for w in leafs: for kvp in leafs:
tagSet.incl w.pathTag tagSet.incl kvp.leafKey.path
let let
tags = tagSet.toSeq.sorted tags = tagSet.toSeq.sorted
fwdWalk = db.fwdWalkLeafsCompleteDB(tags, noisy=true) rootVid = leafs[0].leafKey.root
revWalk = db.revWalkLeafsCompleteDB(tags, noisy=true) fwdWalk = db.fwdWalkLeafsCompleteDB(rootVid, tags, noisy=true)
revWalk = db.revWalkLeafsCompleteDB(rootVid, tags, noisy=true)
check fwdWalk.error == AristoError(0) check fwdWalk.error == AristoError(0)
check revWalk.error == AristoError(0) check revWalk.error == AristoError(0)
check fwdWalk == revWalk check fwdWalk == revWalk
if {fwdWalk.error, revWalk.error} != {AristoError(0)}: if {fwdWalk.error, revWalk.error} != {AristoError(0)}:
noisy.say "***", "<", n, "/", lstLen-1, "> db dump", noisy.say "***", "<", n, "/", lstLen-1, ">",
" groups=", count, " db dump",
"\n post-state ", db.pp, "\n post-state ", db.pp,
"\n" "\n"
break return
#noisy.say "***", "sample ",n,"/",lstLen-1, " visited=", fwdWalk.visited #noisy.say "***", "sample ",n,"/",lstLen-1, " visited=", fwdWalk.visited
true
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -12,12 +12,14 @@
## Aristo (aka Patricia) DB trancoder test ## Aristo (aka Patricia) DB trancoder test
import import
std/sequtils,
eth/common, eth/common,
stew/byteutils, stew/byteutils,
unittest2, unittest2,
../../nimbus/db/kvstore_rocksdb, ../../nimbus/db/kvstore_rocksdb,
../../nimbus/db/aristo/[ ../../nimbus/db/aristo/[
aristo_desc, aristo_debug, aristo_error, aristo_transcode, aristo_vid], aristo_constants, aristo_desc, aristo_debug, aristo_error,
aristo_transcode, aristo_vid],
"."/[test_aristo_cache, test_helpers] "."/[test_aristo_cache, test_helpers]
type type
@ -86,7 +88,7 @@ proc test_transcodeAccounts*(
) = ) =
## Transcoder tests on accounts database ## Transcoder tests on accounts database
var var
adb = AristoDbRef() adb = AristoDb(top: AristoLayerRef())
count = -1 count = -1
for (n, key,value) in rocky.walkAllDb(): for (n, key,value) in rocky.walkAllDb():
if stopAfter < n: if stopAfter < n:
@ -106,7 +108,7 @@ proc test_transcodeAccounts*(
# Provide DbRecord with dummy links and expanded payload. Registering the # Provide DbRecord with dummy links and expanded payload. Registering the
# node as vertex and re-converting it does the job # node as vertex and re-converting it does the job
var node = node0.updated(adb) var node = node0.updated(adb)
if node.isError: if node.error != AristoError(0):
check node.error == AristoError(0) check node.error == AristoError(0)
else: else:
case node.vType: case node.vType:
@ -118,13 +120,13 @@ proc test_transcodeAccounts*(
of aristo_desc.Extension: of aristo_desc.Extension:
# key <-> vtx correspondence # key <-> vtx correspondence
check node.key[0] == node0.key[0] check node.key[0] == node0.key[0]
check not node.eVid.isZero check node.eVid != VertexID(0)
of aristo_desc.Branch: of aristo_desc.Branch:
for n in 0..15: for n in 0..15:
# key[n] <-> vtx[n] correspondence # key[n] <-> vtx[n] correspondence
check node.key[n] == node0.key[n] check node.key[n] == node0.key[n]
check node.key[n].isEmpty == node.bVid[n].isZero if (node.key[n]==EMPTY_ROOT_KEY) != (node.bVid[n]==VertexID(0)):
if node.key[n].isEmpty != node.bVid[n].isZero: check (node.key[n]==EMPTY_ROOT_KEY) == (node.bVid[n]==VertexID(0))
echo ">>> node=", node.pp echo ">>> node=", node.pp
# This NIM object must match to the same RLP encoded byte stream # This NIM object must match to the same RLP encoded byte stream
@ -139,7 +141,7 @@ proc test_transcodeAccounts*(
# NIM object <-> DbRecord mapping # NIM object <-> DbRecord mapping
let dbr = node.blobify.getOrEmpty(noisy) let dbr = node.blobify.getOrEmpty(noisy)
var node1 = dbr.deblobify.asNode(adb) var node1 = dbr.deblobify.asNode(adb)
if node1.isError: if node1.error != AristoError(0):
check node1.error == AristoError(0) check node1.error == AristoError(0)
block: block:
@ -175,7 +177,7 @@ proc test_transcodeAccounts*(
proc test_transcodeVidRecycleLists*(noisy = true; seed = 42) = proc test_transcodeVidRecycleLists*(noisy = true; seed = 42) =
## Transcode VID lists held in `AristoDb` descriptor ## Transcode VID lists held in `AristoDb` descriptor
var td = TesterDesc.init seed var td = TesterDesc.init seed
let db = AristoDbRef() let db = AristoDb(top: AristoLayerRef())
# Add some randum numbers # Add some randum numbers
block: block:
@ -192,8 +194,8 @@ proc test_transcodeVidRecycleLists*(noisy = true; seed = 42) =
expectedVids += (vid < first).ord expectedVids += (vid < first).ord
db.vidDispose vid db.vidDispose vid
check db.vGen.len == expectedVids check db.top.vGen.len == expectedVids
noisy.say "***", "vids=", db.vGen.len, " discarded=", count-expectedVids noisy.say "***", "vids=", db.top.vGen.len, " discarded=", count-expectedVids
# Serialise/deserialise # Serialise/deserialise
block: block:
@ -201,32 +203,61 @@ proc test_transcodeVidRecycleLists*(noisy = true; seed = 42) =
# Deserialise # Deserialise
let db1 = block: let db1 = block:
let rc = dbBlob.deblobify AristoDbRef let rc = dbBlob.deblobify AristoDb
if rc.isErr: if rc.isErr:
check rc.isOk check rc.isOk
rc.get(otherwise = AristoDbRef()) rc.get(otherwise = AristoDb(top: AristoLayerRef()))
check db.vGen == db1.vGen check db.top.vGen == db1.top.vGen
# Make sure that recycled numbers are fetched first # Make sure that recycled numbers are fetched first
let topVid = db.vGen[^1] let topVid = db.top.vGen[^1]
while 1 < db.vGen.len: while 1 < db.top.vGen.len:
let w = db.vidFetch() let w = db.vidFetch()
check w < topVid check w < topVid
check db.vGen.len == 1 and db.vGen[0] == topVid check db.top.vGen.len == 1 and db.top.vGen[0] == topVid
# Get some consecutive vertex IDs # Get some consecutive vertex IDs
for n in 0 .. 5: for n in 0 .. 5:
let w = db.vidFetch() let w = db.vidFetch()
check w == topVid + n check w == topVid + n
check db.vGen.len == 1 check db.top.vGen.len == 1
# Repeat last test after clearing the cache # Repeat last test after clearing the cache
db.vGen.setLen(0) db.top.vGen.setLen(0)
for n in 0 .. 5: for n in 0 .. 5:
let w = db.vidFetch() let w = db.vidFetch()
check w == 1.VertexID + n check w == VertexID(2) + n # VertexID(1) is default root ID
check db.vGen.len == 1 check db.top.vGen.len == 1
# Recycling and re-org tests
db.top.vGen = @[8, 7, 3, 4, 5, 9].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[3, 4, 5, 7].mapIt(VertexID(it))
db.top.vGen = @[8, 7, 6, 3, 4, 5, 9].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[3].mapIt(VertexID(it))
db.top.vGen = @[5, 4, 3, 7].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[5, 4, 3, 7].mapIt(VertexID(it))
db.top.vGen = @[5].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[5].mapIt(VertexID(it))
db.top.vGen = @[3, 5].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[3, 5].mapIt(VertexID(it))
db.top.vGen = @[4, 5].mapIt(VertexID(it))
db.vidReorg()
check db.top.vGen == @[4].mapIt(VertexID(it))
db.top.vGen.setLen(0)
db.vidReorg()
check db.top.vGen.len == 0
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End