mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-11 21:04:11 +00:00
Aristo db merkle hashify functionality added (#1593)
* Keep vertex ID generator state with each db-layer why: The vertex ID generator state is part of the difference to the below layer * Move otherwise unused source to test directory * Add Merkle hash generator also: * Verification facility for debugging * Empty Merkle key hashes encoded as `EMPTY_ROOT_HASH`
This commit is contained in:
parent
cd78458123
commit
2fc349feb9
@ -11,10 +11,9 @@
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sets, sequtils, strutils, tables],
|
||||
std/[algorithm, sequtils, sets, strutils, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/byteutils,
|
||||
../../sync/snap/range_desc,
|
||||
"."/[aristo_constants, aristo_desc, aristo_error, aristo_hike, aristo_vid]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -25,24 +24,28 @@ proc toPfx(indent: int): string =
|
||||
"\n" & " ".repeat(indent)
|
||||
|
||||
proc keyVidUpdate(db: AristoDbRef, key: NodeKey, vid: VertexID): string =
|
||||
if not key.isZero and
|
||||
if not key.isEmpty and
|
||||
not vid.isZero and
|
||||
not db.isNil:
|
||||
db.pAmk.withValue(key, vidRef):
|
||||
if vidRef[] != vid:
|
||||
result = "(!)"
|
||||
return
|
||||
db.xMap.withValue(key, vidRef):
|
||||
if vidRef[] == vid:
|
||||
result = "(!)"
|
||||
return
|
||||
block:
|
||||
let keyVid = db.pAmk.getOrDefault(key, VertexID(0))
|
||||
if keyVid != VertexID(0):
|
||||
if keyVid != vid:
|
||||
result = "(!)"
|
||||
return
|
||||
block:
|
||||
let keyVid = db.xMap.getOrDefault(key, VertexID(0))
|
||||
if keyVid != VertexID(0):
|
||||
if keyVid != vid:
|
||||
result = "(!)"
|
||||
return
|
||||
db.xMap[key] = vid
|
||||
|
||||
proc squeeze(s: string; hex = false; ignLen = false): string =
|
||||
## For long strings print `begin..end` only
|
||||
if hex:
|
||||
let n = (s.len + 1) div 2
|
||||
result = if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. s.len-1]
|
||||
result = if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. ^1]
|
||||
if not ignLen:
|
||||
result &= "[" & (if 0 < n: "#" & $n else: "") & "]"
|
||||
elif s.len <= 30:
|
||||
@ -51,7 +54,7 @@ proc squeeze(s: string; hex = false; ignLen = false): string =
|
||||
result = if (s.len and 1) == 0: s[0 ..< 8] else: "0" & s[0 ..< 7]
|
||||
if not ignLen:
|
||||
result &= "..(" & $s.len & ")"
|
||||
result &= ".." & s[s.len-16 ..< s.len]
|
||||
result &= ".." & s[s.len-16 .. ^1]
|
||||
|
||||
proc stripZeros(a: string): string =
|
||||
for n in 0 ..< a.len:
|
||||
@ -62,78 +65,68 @@ proc stripZeros(a: string): string =
|
||||
proc ppVid(vid: VertexID): string =
|
||||
if vid.isZero: "ø" else: "$" & vid.uint64.toHex.stripZeros.toLowerAscii
|
||||
|
||||
proc ppKey(key: NodeKey, db = AristoDbRef(nil)): string =
|
||||
if key.isZero:
|
||||
return "ø"
|
||||
proc vidCode(key: NodeKey, db: AristoDbRef): uint64 =
|
||||
if not db.isNil and
|
||||
key != EMPTY_ROOT_KEY and
|
||||
key != EMPTY_CODE_KEY:
|
||||
block:
|
||||
let vid = db.pAmk.getOrDefault(key, VertexID(0))
|
||||
if vid != VertexID(0):
|
||||
return vid.uint64
|
||||
block:
|
||||
let vid = db.xMap.getOrDefault(key, VertexID(0))
|
||||
if vid != VertexID(0):
|
||||
return vid.uint64
|
||||
|
||||
proc ppKey(key: NodeKey, db: AristoDbRef): string =
|
||||
if key == NodeKey.default:
|
||||
return "£ø"
|
||||
if key == EMPTY_ROOT_KEY:
|
||||
return "£r"
|
||||
if key == EMPTY_CODE_KEY:
|
||||
return "£c"
|
||||
|
||||
if not db.isNil:
|
||||
db.pAmk.withValue(key, pRef):
|
||||
return "£" & pRef[].uint64.toHex.stripZeros.toLowerAscii
|
||||
db.xMap.withValue(key, xRef):
|
||||
return "£" & xRef[].uint64.toHex.stripZeros.toLowerAscii
|
||||
block:
|
||||
let vid = db.pAmk.getOrDefault(key, VertexID(0))
|
||||
if vid != VertexID(0):
|
||||
return "£" & vid.uint64.toHex.stripZeros.toLowerAscii
|
||||
block:
|
||||
let vid = db.xMap.getOrDefault(key, VertexID(0))
|
||||
if vid != VertexID(0):
|
||||
return "£" & vid.uint64.toHex.stripZeros.toLowerAscii
|
||||
|
||||
"%" & key.ByteArray32
|
||||
.mapIt(it.toHex(2)).join.tolowerAscii
|
||||
.squeeze(hex=true,ignLen=true)
|
||||
|
||||
proc ppRootKey(a: NodeKey, db = AristoDbRef(nil)): string =
|
||||
proc ppRootKey(a: NodeKey, db: AristoDbRef): string =
|
||||
if a != EMPTY_ROOT_KEY:
|
||||
return a.ppKey(db)
|
||||
|
||||
proc ppCodeKey(a: NodeKey, db = AristoDbRef(nil)): string =
|
||||
proc ppCodeKey(a: NodeKey, db: AristoDbRef): string =
|
||||
if a != EMPTY_CODE_KEY:
|
||||
return a.ppKey(db)
|
||||
|
||||
proc ppPathTag(tag: NodeTag, db = AristoDbRef(nil)): string =
|
||||
proc ppPathTag(tag: NodeTag, db: AristoDbRef): string =
|
||||
## Raw key, for referenced key dump use `key.pp(db)` below
|
||||
if not db.isNil:
|
||||
db.lTab.withValue(tag, keyPtr):
|
||||
return "@" & keyPtr[].ppVid
|
||||
let vid = db.lTab.getOrDefault(tag, VertexID(0))
|
||||
if vid != VertexID(0):
|
||||
return "@" & vid.ppVid
|
||||
|
||||
"@" & tag.to(NodeKey).ByteArray32
|
||||
.mapIt(it.toHex(2)).join.toLowerAscii
|
||||
.squeeze(hex=true,ignLen=true)
|
||||
|
||||
proc ppPathPfx(pfx: NibblesSeq): string =
|
||||
($(pfx & EmptyNibbleSeq)).squeeze(hex=true)
|
||||
let s = $pfx
|
||||
if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. ^1] & ":" & $s.len
|
||||
|
||||
proc ppNibble(n: int8): string =
|
||||
if n < 0: "ø" elif n < 10: $n else: n.toHex(1).toLowerAscii
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc keyToVtxID*(db: AristoDbRef, key: NodeKey): VertexID =
|
||||
## Associate a vertex ID with the argument `key` for pretty printing.
|
||||
if not key.isZero and
|
||||
key != EMPTY_ROOT_KEY and
|
||||
key != EMPTY_CODE_KEY and
|
||||
not db.isNil:
|
||||
|
||||
db.xMap.withValue(key, vidPtr):
|
||||
return vidPtr[]
|
||||
|
||||
result = db.vidFetch()
|
||||
db.xMap[key] = result
|
||||
|
||||
proc pp*(vid: NodeKey): string =
|
||||
vid.ppKey
|
||||
|
||||
proc pp*(tag: NodeTag, db = AristoDbRef(nil)): string =
|
||||
tag.ppPathTag(db)
|
||||
|
||||
proc pp*(vid: VertexID): string =
|
||||
vid.ppVid
|
||||
|
||||
proc pp*(vid: openArray[VertexID]): string =
|
||||
"[" & vid.mapIt(it.ppVid).join(",") & "]"
|
||||
|
||||
proc pp*(p: PayloadRef, db = AristoDbRef(nil)): string =
|
||||
proc ppPayload(p: PayloadRef, db: AristoDbRef): string =
|
||||
if p.isNil:
|
||||
result = "n/a"
|
||||
else:
|
||||
@ -147,25 +140,129 @@ proc pp*(p: PayloadRef, db = AristoDbRef(nil)): string =
|
||||
result &= p.account.storageRoot.to(NodeKey).ppRootKey(db) & ","
|
||||
result &= p.account.codeHash.to(NodeKey).ppCodeKey(db) & ")"
|
||||
|
||||
proc pp*(nd: VertexRef, db = AristoDbRef(nil)): string =
|
||||
proc ppVtx(nd: VertexRef, db: AristoDbRef, vid: VertexID): string =
|
||||
if nd.isNil:
|
||||
result = "n/a"
|
||||
else:
|
||||
result = ["l(", "x(", "b("][nd.vType.ord]
|
||||
if db.isNil or vid.isZero or vid in db.pPrf:
|
||||
result = ["l(", "x(", "b("][nd.vType.ord]
|
||||
else:
|
||||
result = ["ł(", "€(", "þ("][nd.vType.ord]
|
||||
case nd.vType:
|
||||
of Leaf:
|
||||
result &= nd.lPfx.ppPathPfx & "," & nd.lData.pp(db)
|
||||
result &= nd.lPfx.ppPathPfx & "," & nd.lData.ppPayload(db)
|
||||
of Extension:
|
||||
result &= nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid
|
||||
of Branch:
|
||||
result &= "["
|
||||
for n in 0..15:
|
||||
if not nd.bVid[n].isZero:
|
||||
result &= nd.bVid[n].ppVid
|
||||
result &= ","
|
||||
result[^1] = ']'
|
||||
if n < 15:
|
||||
result &= ","
|
||||
result &= ")"
|
||||
|
||||
proc ppXMap*(
|
||||
db: AristoDbRef;
|
||||
kMap: Table[VertexID,NodeKey];
|
||||
pAmk: Table[NodeKey,VertexID];
|
||||
indent: int;
|
||||
): string =
|
||||
|
||||
let dups = pAmk.values.toSeq.toCountTable.pairs.toSeq
|
||||
.filterIt(1 < it[1]).toTable
|
||||
|
||||
proc ppNtry(n: uint64): string =
|
||||
let
|
||||
vid = n.VertexID
|
||||
key = kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
|
||||
var s = "(" & vid.ppVid & ","
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
s &= key.ppKey(db)
|
||||
|
||||
let keyVid = pAmk.getOrDefault(key, VertexID(0))
|
||||
if keyVid == VertexID(0):
|
||||
s &= ",ø"
|
||||
elif keyVid != vid:
|
||||
s &= "," & keyVid.ppVid
|
||||
|
||||
let count = dups.getOrDefault(vid, 0)
|
||||
if 0 < count:
|
||||
s &= ",*" & $count
|
||||
else:
|
||||
s &= "£r(!)"
|
||||
s & "),"
|
||||
|
||||
var cache: seq[(uint64,uint64,bool)]
|
||||
for vid in toSeq(kMap.keys).mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
let key = kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
cache.add (vid.uint64, key.vidCode(db), 0 < dups.getOrDefault(vid, 0))
|
||||
let keyVid = pAmk.getOrDefault(key, VertexID(0))
|
||||
if keyVid != VertexID(0) and keyVid != vid:
|
||||
cache[^1][2] = true
|
||||
else:
|
||||
cache.add (vid.uint64, 0u64, true)
|
||||
|
||||
result = "{"
|
||||
if 0 < cache.len:
|
||||
let
|
||||
pfx = indent.toPfx
|
||||
var
|
||||
(i, r) = (0, cache[0])
|
||||
result &= cache[i][0].ppNtry
|
||||
for n in 1 ..< cache.len:
|
||||
let w = cache[n]
|
||||
r[0].inc
|
||||
r[1].inc
|
||||
if r != w or w[2]:
|
||||
if i+1 != n:
|
||||
result &= ".. " & cache[n-1][0].ppNtry
|
||||
result &= pfx & " " & cache[n][0].ppNtry
|
||||
(i, r) = (n, w)
|
||||
if i < cache.len - 1:
|
||||
if i+1 != cache.len - 1:
|
||||
result &= ".. "
|
||||
else:
|
||||
result &= pfx & " "
|
||||
result &= cache[^1][0].ppNtry
|
||||
result[^1] = '}'
|
||||
else:
|
||||
result &= "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc keyToVtxID*(db: AristoDbRef, key: NodeKey): VertexID =
|
||||
## Associate a vertex ID with the argument `key` for pretty printing.
|
||||
if not db.isNil and
|
||||
key != EMPTY_ROOT_KEY and
|
||||
key != EMPTY_CODE_KEY:
|
||||
let vid = db.xMap.getOrDefault(key, VertexID(0))
|
||||
if vid != VertexID(0):
|
||||
result = vid
|
||||
else:
|
||||
result = db.vidFetch()
|
||||
db.xMap[key] = result
|
||||
|
||||
proc pp*(vid: NodeKey, db = AristoDbRef(nil)): string =
|
||||
vid.ppKey(db)
|
||||
|
||||
proc pp*(tag: NodeTag, db = AristoDbRef(nil)): string =
|
||||
tag.ppPathTag(db)
|
||||
|
||||
proc pp*(vid: VertexID): string =
|
||||
vid.ppVid
|
||||
|
||||
proc pp*(vid: openArray[VertexID]): string =
|
||||
"[" & vid.mapIt(it.ppVid).join(",") & "]"
|
||||
|
||||
proc pp*(p: PayloadRef, db = AristoDbRef(nil)): string =
|
||||
p.ppPayload(db)
|
||||
|
||||
proc pp*(nd: VertexRef, db = AristoDbRef(nil)): string =
|
||||
nd.ppVtx(db, VertexID(0))
|
||||
|
||||
proc pp*(nd: NodeRef, db = AristoDbRef(nil)): string =
|
||||
if nd.isNil:
|
||||
result = "n/a"
|
||||
@ -178,26 +275,28 @@ proc pp*(nd: NodeRef, db = AristoDbRef(nil)): string =
|
||||
result &= $nd.lPfx.ppPathPfx & "," & nd.lData.pp(db)
|
||||
|
||||
of Extension:
|
||||
result &= $nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid & "," & nd.key[0].ppKey
|
||||
result &= $nd.ePfx.ppPathPfx & "," & nd.eVid.ppVid & ","
|
||||
result &= nd.key[0].ppKey(db)
|
||||
result &= db.keyVidUpdate(nd.key[0], nd.eVid)
|
||||
|
||||
of Branch:
|
||||
result &= "["
|
||||
for n in 0..15:
|
||||
if not nd.bVid[n].isZero or not nd.key[n].isZero:
|
||||
if not nd.bVid[n].isZero or nd.key[n] != EMPTY_ROOT_KEY:
|
||||
result &= nd.bVid[n].ppVid
|
||||
result &= db.keyVidUpdate(nd.key[n], nd.bVid[n]) & ","
|
||||
result[^1] = ']'
|
||||
|
||||
result &= ",["
|
||||
for n in 0..15:
|
||||
if not nd.bVid[n].isZero or not nd.key[n].isZero:
|
||||
if not nd.bVid[n].isZero or nd.key[n] != EMPTY_ROOT_KEY:
|
||||
result &= nd.key[n].ppKey(db)
|
||||
result &= ","
|
||||
result[^1] = ']'
|
||||
result &= ")"
|
||||
|
||||
proc pp*(
|
||||
sTab: var Table[VertexID,VertexRef];
|
||||
sTab: Table[VertexID,VertexRef];
|
||||
db = AristoDbRef(nil);
|
||||
indent = 4;
|
||||
): string =
|
||||
@ -205,42 +304,63 @@ proc pp*(
|
||||
var first = true
|
||||
result = "{"
|
||||
for vid in toSeq(sTab.keys).mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
sTab.withValue(vid, vtxPtr):
|
||||
let vtx = sTab.getOrDefault(vid, VertexRef(nil))
|
||||
if vtx != VertexRef(nil):
|
||||
if first:
|
||||
first = false
|
||||
else:
|
||||
result &= pfx & " "
|
||||
result &= "(" & vid.ppVid & "," & vtxPtr[].pp(db) & "),"
|
||||
if result[^1] == ',':
|
||||
result[^1] = '}'
|
||||
else:
|
||||
result &= "}"
|
||||
result &= "(" & vid.ppVid & "," & vtx.ppVtx(db,vid) & ")"
|
||||
result &= "}"
|
||||
|
||||
proc pp*(lTab: var Table[NodeTag,VertexID]; indent = 4): string =
|
||||
proc pp*(
|
||||
lTab: Table[NodeTag,VertexID];
|
||||
indent = 4;
|
||||
): string =
|
||||
let pfx = indent.toPfx
|
||||
var first = true
|
||||
result = "{"
|
||||
for tag in toSeq(lTab.keys).mapIt(it.UInt256).sorted.mapIt(it.NodeTag):
|
||||
lTab.withValue(tag,vidPtr):
|
||||
let vid = lTab.getOrDefault(tag, VertexID(0))
|
||||
if vid != VertexID(0):
|
||||
if first:
|
||||
first = false
|
||||
else:
|
||||
result &= pfx & " "
|
||||
result &= "(" & tag.ppPathTag & "," & vidPtr[].ppVid & "),"
|
||||
if result[^1] == ',':
|
||||
result[^1] = '}'
|
||||
else:
|
||||
result &= "}"
|
||||
result &= "(" & tag.ppPathTag(nil) & "," & vid.ppVid & ")"
|
||||
result &= "}"
|
||||
|
||||
proc pp*(sDel: HashSet[VertexID]): string =
|
||||
proc pp*(vGen: seq[VertexID]): string =
|
||||
result = "["
|
||||
for vid in vGen:
|
||||
result &= vid.ppVid & ","
|
||||
if result[^1] == ',':
|
||||
result[^1] = ']'
|
||||
else:
|
||||
result &= "]"
|
||||
|
||||
proc pp*(pPrf: HashSet[VertexID]): string =
|
||||
result = "{"
|
||||
for vid in toSeq(sDel.items).mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
for vid in pPrf.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
result &= vid.ppVid & ","
|
||||
if result[^1] == ',':
|
||||
result[^1] = '}'
|
||||
else:
|
||||
result &= "}"
|
||||
|
||||
proc pp*(
|
||||
leg: Leg;
|
||||
db = AristoDbRef(nil);
|
||||
): string =
|
||||
result = " (" & leg.wp.vid.ppVid & ","
|
||||
if not db.isNil:
|
||||
let key = db.kMap.getOrDefault(leg.wp.vid, EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
result &= key.ppKey(db)
|
||||
else:
|
||||
result &= "ø"
|
||||
result &= "," & $leg.nibble.ppNibble & "," & leg.wp.vtx.pp(db) & ")"
|
||||
|
||||
proc pp*(
|
||||
hike: Hike;
|
||||
db = AristoDbRef(nil);
|
||||
@ -250,20 +370,14 @@ proc pp*(
|
||||
var first = true
|
||||
result = "[(" & hike.root.ppVid & ")"
|
||||
for leg in hike.legs:
|
||||
result &= "," & pfx & " (" & leg.wp.vid.ppVid
|
||||
if not db.isNil:
|
||||
var key = "ø"
|
||||
db.kMap.withValue(leg.wp.vid, keyPtr):
|
||||
key = keyPtr[].ppKey(db)
|
||||
result &= "," & key
|
||||
result &= "," & $leg.nibble.ppNibble & "," & leg.wp.vtx.pp(db) & ")"
|
||||
result &= "," & pfx & " (" & $hike.tail & ")"
|
||||
result &= "," & pfx & leg.pp(db)
|
||||
result &= "," & pfx & " (" & hike.tail.ppPathPfx & ")"
|
||||
if hike.error != AristoError(0):
|
||||
result &= "," & pfx & " (" & $hike.error & ")"
|
||||
result &= "]"
|
||||
|
||||
proc pp*(
|
||||
kMap: var Table[VertexID,NodeKey];
|
||||
kMap: Table[VertexID,NodeKey];
|
||||
db = AristoDbRef(nil);
|
||||
indent = 4;
|
||||
): string =
|
||||
@ -271,38 +385,82 @@ proc pp*(
|
||||
var first = true
|
||||
result = "{"
|
||||
for vid in toSeq(kMap.keys).mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
kMap.withValue(vid, keyPtr):
|
||||
let key = kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
if first:
|
||||
first = false
|
||||
else:
|
||||
result &= pfx & " "
|
||||
result &= "(" & vid.ppVid & "," & keyPtr[].ppKey(db) & "),"
|
||||
result &= "(" & vid.ppVid & "," & key.ppKey(db) & "),"
|
||||
if result[^1] == ',':
|
||||
result[^1] = '}'
|
||||
else:
|
||||
result &= "}"
|
||||
|
||||
proc pp*(
|
||||
pAmk: var Table[NodeKey,VertexID];
|
||||
pAmk: Table[NodeKey,VertexID];
|
||||
db = AristoDbRef(nil);
|
||||
indent = 4;
|
||||
): string =
|
||||
let pfx = indent.toPfx
|
||||
var first = true
|
||||
var
|
||||
rev = pAmk.pairs.toSeq.mapIt((it[1],it[0])).toTable
|
||||
first = true
|
||||
result = "{"
|
||||
for key in toSeq(pAmk.keys).mapIt(it.to(NodeTag).UInt256)
|
||||
.sorted.mapIt(it.NodeTag.to(NodeKey)):
|
||||
pAmk.withValue(key,vidPtr):
|
||||
for vid in rev.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
let key = rev.getOrDefault(vid, EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
if first:
|
||||
first = false
|
||||
else:
|
||||
result &= pfx & " "
|
||||
result &= "(" & key.ppKey(db) & "," & vidPtr[].ppVid & "),"
|
||||
result &= "(" & key.ppKey(db) & "," & vid.ppVid & "),"
|
||||
if result[^1] == ',':
|
||||
result[^1] = '}'
|
||||
else:
|
||||
result &= "}"
|
||||
|
||||
# ---------------------
|
||||
|
||||
proc pp*(
|
||||
db: AristoDbRef;
|
||||
sTabOk = true;
|
||||
lTabOk = true;
|
||||
kMapOk = true;
|
||||
pPrfOk = true;
|
||||
indent = 4;
|
||||
): string =
|
||||
let
|
||||
pfx1 = max(indent-1,0).toPfx
|
||||
pfx2 = indent.toPfx
|
||||
labelOk = 1 < sTabOk.ord + lTabOk.ord + kMapOk.ord + pPrfOk.ord
|
||||
var
|
||||
pfy1 = ""
|
||||
pfy2 = ""
|
||||
|
||||
proc doPrefix(s: string): string =
|
||||
var rc: string
|
||||
if labelOk:
|
||||
rc = pfy1 & s & pfx2
|
||||
pfy1 = pfx1
|
||||
else:
|
||||
rc = pfy2
|
||||
pfy2 = pfx2
|
||||
rc
|
||||
|
||||
if sTabOk:
|
||||
let info = "sTab(" & $db.sTab.len & ")"
|
||||
result &= info.doPrefix & db.sTab.pp(db,indent)
|
||||
if lTabOk:
|
||||
let info = "lTab(" & $db.lTab.len & "),root=" & db.lRoot.ppVid
|
||||
result &= info.doPrefix & db.lTab.pp(indent)
|
||||
if kMapOk:
|
||||
let info = "kMap(" & $db.kMap.len & "," & $db.pAmk.len & ")"
|
||||
result &= info.doPrefix & db.ppXMap(db.kMap,db.pAmk,indent)
|
||||
if pPrfOk:
|
||||
let info = "pPrf(" & $db.pPrf.len & ")"
|
||||
result &= info.doPrefix & db.pPrf.pp
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -25,8 +25,12 @@ import
|
||||
std/[sets, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/results,
|
||||
../../sync/snap/range_desc,
|
||||
./aristo_error
|
||||
"."/[aristo_constants, aristo_error]
|
||||
|
||||
import
|
||||
../../sync/snap/range_desc
|
||||
export
|
||||
ByteArray32, NodeKey, NodeTag, digestTo, hash, to, `==`, `$`
|
||||
|
||||
type
|
||||
VertexID* = distinct uint64
|
||||
@ -111,17 +115,18 @@ type
|
||||
## Hexary trie plus helper structures
|
||||
sTab*: Table[VertexID,VertexRef] ## Structural vertex table making up a trie
|
||||
lTab*: Table[NodeTag,VertexID] ## Direct access, path to leaf node
|
||||
sDel*: HashSet[VertexID] ## Deleted vertices
|
||||
lRoot*: VertexID ## Root vertex for `lTab[]`
|
||||
kMap*: Table[VertexID,NodeKey] ## Merkle hash key mapping
|
||||
pAmk*: Table[NodeKey,VertexID] ## Reverse mapper for data import
|
||||
pPrf*: HashSet[VertexID] ## Locked vertices (from proof vertices)
|
||||
vGen*: seq[VertexID] ## Unique vertex ID generator
|
||||
|
||||
case cascaded*: bool ## Cascaded delta databases, tx layer
|
||||
of true:
|
||||
level*: int ## Positive number of stack layers
|
||||
stack*: AristoDbRef ## Down the chain, not `nil`
|
||||
base*: AristoDbRef ## Backend level descriptor
|
||||
base*: AristoDbRef ## Backend level descriptor, maybe unneeded
|
||||
else:
|
||||
vidGen*: seq[VertexID] ## Unique vertex ID generator
|
||||
backend*: AristoBackendRef ## backend database (maybe `nil`)
|
||||
|
||||
# Debugging data below, might go away in future
|
||||
@ -204,8 +209,11 @@ proc `==`*(a, b: NodeRef): bool =
|
||||
# Public helpers, miscellaneous functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc isZero*[T: NodeKey|VertexID](a: T): bool =
|
||||
a == typeof(a).default
|
||||
proc isZero*(a: VertexID): bool =
|
||||
a == VertexID(0)
|
||||
|
||||
proc isEmpty*(a: NodeKey): bool =
|
||||
a == EMPTY_ROOT_KEY
|
||||
|
||||
proc isError*(a: NodeRef): bool =
|
||||
a.error != AristoError(0)
|
||||
|
@ -11,6 +11,7 @@
|
||||
type
|
||||
AristoError* = enum
|
||||
NothingSerious = 0
|
||||
GenericError
|
||||
|
||||
# Rlp decoder, `fromRlpRecord()`
|
||||
Rlp2Or17ListEntries
|
||||
@ -71,10 +72,35 @@ type
|
||||
MergeBrLinkVtxPfxTooShort
|
||||
MergeBranchGarbledNibble
|
||||
MergeBranchGarbledTail
|
||||
MergeBranchLinkLockedKey
|
||||
MergeBranchLinkProofModeLock
|
||||
MergeBranchProofModeLock
|
||||
MergeBranchRootExpected
|
||||
MergeLeafGarbledHike
|
||||
MergeLeafPathCachedAlready
|
||||
MergeNonBranchProofModeLock
|
||||
MergeRootBranchLinkBusy
|
||||
|
||||
MergeNodeKeyZero
|
||||
MergeNodeKeyEmpty
|
||||
MergeNodeKeyCachedAlready
|
||||
|
||||
# Update `Merkle` hashes `hashify()`
|
||||
HashifyCannotComplete
|
||||
HashifyCannotHashRoot
|
||||
HashifyExistingHashMismatch
|
||||
HashifyLeafToRootAllFailed
|
||||
HashifyRootHashMismatch
|
||||
|
||||
HashifyCheckRevCountMismatch
|
||||
HashifyCheckRevHashMismatch
|
||||
HashifyCheckRevHashMissing
|
||||
HashifyCheckRevVtxDup
|
||||
HashifyCheckRevVtxMissing
|
||||
HashifyCheckVidVtxMismatch
|
||||
HashifyCheckVtxCountMismatch
|
||||
HashifyCheckVtxHashMismatch
|
||||
HashifyCheckVtxHashMissing
|
||||
HashifyCheckVtxIncomplete
|
||||
HashifyCheckVtxLockWithoutKey
|
||||
|
||||
# End
|
||||
|
@ -16,7 +16,6 @@
|
||||
import
|
||||
std/tables,
|
||||
stew/results,
|
||||
../../sync/snap/range_desc,
|
||||
"."/[aristo_desc, aristo_error]
|
||||
|
||||
type
|
||||
@ -32,7 +31,8 @@ proc getVtxCascaded*(
|
||||
db: AristoDbRef;
|
||||
vid: VertexID;
|
||||
): Result[VertexRef,AristoError] =
|
||||
## Cascaded lookup for data record down the transaction cascade.
|
||||
## Cascaded lookup for data record down the transaction cascade. This
|
||||
## function will return a potential error code from the backend (if any).
|
||||
db.sTab.withValue(vid, vtxPtr):
|
||||
return ok vtxPtr[]
|
||||
|
||||
|
326
nimbus/db/aristo/aristo_hashify.nim
Normal file
326
nimbus/db/aristo/aristo_hashify.nim
Normal file
@ -0,0 +1,326 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Aristo DB -- Patricia Trie Merkleisation
|
||||
## ========================================
|
||||
##
|
||||
## For the current state of the `Patricia Trie`, keys (equivalent to hashes)
|
||||
## are associated with the vertex IDs. Existing key associations are checked
|
||||
## (i.e. recalculated and compared) unless the ID is locked. In the latter
|
||||
## case, the key is assumed to be correct without checking.
|
||||
##
|
||||
## The association algorithm is an optimised version of:
|
||||
##
|
||||
## * For all leaf vertices, label them with parent vertex so that there are
|
||||
## chains from the leafs to the root vertex.
|
||||
##
|
||||
## * Apply a width-first traversal starting with the set of leafs vertices
|
||||
## compiling the keys to associate with by hashing the current vertex.
|
||||
##
|
||||
## Apperently, keys (aka hashes) can be compiled for leaf vertices. For the
|
||||
## other vertices, the keys can be compiled if all the children keys are
|
||||
## known which is assured by the nature of the width-first traversal method.
|
||||
##
|
||||
## For production, this algorithm is slightly optimised:
|
||||
##
|
||||
## * For each leaf vertex, calculate the chain from the leaf to the root vertex.
|
||||
## + Starting at the leaf, calculate the key for each vertex towards the root
|
||||
## vertex as long as possible.
|
||||
## + Stash the rest of the partial chain to be completed later
|
||||
##
|
||||
## * While there is a partial chain left, use the ends towards the leaf nodes
|
||||
## and calculate the remaining keys (which results in a width-first
|
||||
## traversal, again.)
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, sets, tables],
|
||||
chronicles,
|
||||
eth/common,
|
||||
stew/results,
|
||||
./aristo_debug,
|
||||
"."/[aristo_constants, aristo_desc, aristo_error, aristo_get, aristo_hike,
|
||||
aristo_transcode]
|
||||
|
||||
logScope:
|
||||
topics = "aristo-hashify"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helper, debugging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp(t: Table[VertexID,VertexID]): string =
|
||||
result = "{"
|
||||
for a in toSeq(t.keys).mapIt(it.uint64).sorted.mapIt(it.VertexID):
|
||||
let b = t.getOrDefault(a, VertexID(0))
|
||||
if b != VertexID(0):
|
||||
result &= "(" & a.pp & "," & b.pp & "),"
|
||||
if result[^1] == ',':
|
||||
result[^1] = '}'
|
||||
else:
|
||||
result &= "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc toNode(vtx: VertexRef; db: AristoDbRef): Result[NodeRef,void] =
|
||||
case vtx.vType:
|
||||
of Leaf:
|
||||
return ok NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
|
||||
of Branch:
|
||||
let node = NodeRef(vType: Branch, bVid: vtx.bVid)
|
||||
for n in 0 .. 15:
|
||||
if vtx.bVid[n].isZero:
|
||||
node.key[n] = EMPTY_ROOT_KEY
|
||||
else:
|
||||
let key = db.kMap.getOrDefault(vtx.bVid[n], EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
node.key[n] = key
|
||||
continue
|
||||
return err()
|
||||
return ok node
|
||||
of Extension:
|
||||
if not vtx.eVid.isZero:
|
||||
let key = db.kMap.getOrDefault(vtx.eVid, EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vtx.eVid)
|
||||
node.key[0] = key
|
||||
return ok node
|
||||
|
||||
proc leafToRootHasher(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # Hike for labelling leaf..root
|
||||
): Result[int,AristoError] =
|
||||
## Returns the index of the first node that could not be hashed
|
||||
for n in (hike.legs.len-1).countDown(0):
|
||||
let
|
||||
wp = hike.legs[n].wp
|
||||
rc = wp.vtx.toNode db
|
||||
if rc.isErr:
|
||||
return ok n
|
||||
# Vertices marked proof nodes need not be checked
|
||||
if wp.vid in db.pPrf:
|
||||
continue
|
||||
|
||||
# Check against existing key, or store new key
|
||||
let key = rc.value.encode.digestTo(NodeKey)
|
||||
let vfyKey = db.kMap.getOrDefault(wp.vid, EMPTY_ROOT_KEY)
|
||||
if vfyKey == EMPTY_ROOT_KEY:
|
||||
db.pAmk[key] = wp.vid
|
||||
db.kMap[wp.vid] = key
|
||||
elif key != vfyKey:
|
||||
let error = HashifyExistingHashMismatch
|
||||
debug "hashify failed", vid=wp.vid, key, expected=vfyKey, error
|
||||
return err(error)
|
||||
|
||||
ok -1 # all could be hashed
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hashifyClear*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
locksOnly = false; # If `true`, then clear only proof locks
|
||||
) =
|
||||
## Clear all `Merkle` hashes from the argument database layer `db`.
|
||||
if not locksOnly:
|
||||
db.pAmk.clear
|
||||
db.kMap.clear
|
||||
db.pPrf.clear
|
||||
|
||||
|
||||
proc hashify*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
): Result[NodeKey,AristoError] =
|
||||
## Add keys to the `Patricia Trie` so that it becomes a `Merkle Patricia
|
||||
## Tree`. If successful, the function returns the key (aka Merkle hash) of
|
||||
## the root vertex.
|
||||
var
|
||||
fullPath = false
|
||||
rootKey: NodeKey
|
||||
|
||||
# Width-first leaf-to-root traversal structure
|
||||
backLink: Table[VertexID,VertexID]
|
||||
downMost: Table[VertexID,VertexID]
|
||||
|
||||
for (pathTag,vid) in db.lTab.pairs:
|
||||
let hike = pathTag.hikeUp(db.lRoot,db)
|
||||
if hike.error != AristoError(0):
|
||||
return err(hike.error)
|
||||
|
||||
# Hash as much of the `hike` as possible
|
||||
let n = block:
|
||||
let rc = db.leafToRootHasher hike
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
rc.value
|
||||
|
||||
if 0 < n:
|
||||
# Backtrack and register remaining nodes
|
||||
#
|
||||
# hike.legs: (leg[0], leg[1], .., leg[n-1], leg[n], ..)
|
||||
# | | | |
|
||||
# | <---- | <---- | <---- |
|
||||
# | | |
|
||||
# | backLink[] | downMost |
|
||||
#
|
||||
downMost[hike.legs[n].wp.vid] = hike.legs[n-1].wp.vid
|
||||
for u in (n-1).countDown(1):
|
||||
backLink[hike.legs[u].wp.vid] = hike.legs[u-1].wp.vid
|
||||
|
||||
elif not fullPath:
|
||||
rootKey = db.kMap.getOrDefault(hike.legs[0].wp.vid, EMPTY_ROOT_KEY)
|
||||
fullPath = (rootKey != EMPTY_ROOT_KEY)
|
||||
|
||||
# At least one full path leaf..root should have succeeded with labelling
|
||||
if not fullPath:
|
||||
return err(HashifyLeafToRootAllFailed)
|
||||
|
||||
# Update remaining hashes
|
||||
var n = 0 # for logging
|
||||
while 0 < downMost.len:
|
||||
var
|
||||
redo: Table[VertexID,VertexID]
|
||||
done: HashSet[VertexID]
|
||||
|
||||
for (fromVid,toVid) in downMost.pairs:
|
||||
# Try to convert vertex to a node. This is possible only if all link
|
||||
# references have Merkle hashes.
|
||||
#
|
||||
# Also `db.getVtx(fromVid)` => not nil as it was fetched earlier, already
|
||||
let rc = db.getVtx(fromVid).toNode(db)
|
||||
if rc.isErr:
|
||||
# Cannot complete with this node, so do it later
|
||||
redo[fromVid] = toVid
|
||||
|
||||
else:
|
||||
# Register Hashes
|
||||
let nodeKey = rc.value.encode.digestTo(NodeKey)
|
||||
|
||||
# Update Merkle hash (aka `nodeKey`)
|
||||
let fromKey = db.kMap.getOrDefault(fromVid, EMPTY_ROOT_KEY)
|
||||
if fromKey == EMPTY_ROOT_KEY:
|
||||
db.pAmk[nodeKey] = fromVid
|
||||
db.kMap[fromVid] = nodeKey
|
||||
elif nodeKey != fromKey:
|
||||
let error = HashifyExistingHashMismatch
|
||||
debug "hashify failed", vid=fromVid, key=nodeKey,
|
||||
expected=fromKey.pp, error
|
||||
return err(error)
|
||||
|
||||
done.incl fromVid
|
||||
|
||||
# Proceed with back link
|
||||
let nextVid = backLink.getOrDefault(toVid, VertexID(0))
|
||||
if nextVid != VertexID(0):
|
||||
redo[toVid] = nextVid
|
||||
|
||||
# Make sure that the algorithm proceeds
|
||||
if done.len == 0:
|
||||
let error = HashifyCannotComplete
|
||||
return err(error)
|
||||
|
||||
# Clean up dups from `backLink` and restart `downMost`
|
||||
for vid in done.items:
|
||||
backLink.del vid
|
||||
downMost = redo
|
||||
|
||||
ok rootKey
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public debugging functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hashifyCheck*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
relax = false; # Check existing hashes only
|
||||
): Result[void,(VertexID,AristoError)] =
|
||||
## Verify that the Merkle hash keys are either completely missing or
|
||||
## match all known vertices on the argument database layer `db`.
|
||||
if not relax:
|
||||
for (vid,vtx) in db.sTab.pairs:
|
||||
let rc = vtx.toNode(db)
|
||||
if rc.isErr:
|
||||
return err((vid,HashifyCheckVtxIncomplete))
|
||||
|
||||
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
|
||||
if key == EMPTY_ROOT_KEY:
|
||||
return err((vid,HashifyCheckVtxHashMissing))
|
||||
if key != rc.value.encode.digestTo(NodeKey):
|
||||
return err((vid,HashifyCheckVtxHashMismatch))
|
||||
|
||||
let revVid = db.pAmk.getOrDefault(key, VertexID(0))
|
||||
if revVid == VertexID(0):
|
||||
return err((vid,HashifyCheckRevHashMissing))
|
||||
if revVid != vid:
|
||||
return err((vid,HashifyCheckRevHashMismatch))
|
||||
|
||||
elif 0 < db.pPrf.len:
|
||||
for vid in db.pPrf:
|
||||
let vtx = db.sTab.getOrDefault(vid, VertexRef(nil))
|
||||
if vtx == VertexRef(nil):
|
||||
return err((vid,HashifyCheckVidVtxMismatch))
|
||||
|
||||
let rc = vtx.toNode(db)
|
||||
if rc.isErr:
|
||||
return err((vid,HashifyCheckVtxIncomplete))
|
||||
|
||||
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
|
||||
if key == EMPTY_ROOT_KEY:
|
||||
return err((vid,HashifyCheckVtxHashMissing))
|
||||
if key != rc.value.encode.digestTo(NodeKey):
|
||||
return err((vid,HashifyCheckVtxHashMismatch))
|
||||
|
||||
let revVid = db.pAmk.getOrDefault(key, VertexID(0))
|
||||
if revVid == VertexID(0):
|
||||
return err((vid,HashifyCheckRevHashMissing))
|
||||
if revVid != vid:
|
||||
return err((vid,HashifyCheckRevHashMismatch))
|
||||
|
||||
else:
|
||||
for (vid,key) in db.kMap.pairs:
|
||||
let vtx = db.getVtx vid
|
||||
if not vtx.isNil:
|
||||
let rc = vtx.toNode(db)
|
||||
if rc.isOk:
|
||||
if key != rc.value.encode.digestTo(NodeKey):
|
||||
return err((vid,HashifyCheckVtxHashMismatch))
|
||||
|
||||
let revVid = db.pAmk.getOrDefault(key, VertexID(0))
|
||||
if revVid == VertexID(0):
|
||||
return err((vid,HashifyCheckRevHashMissing))
|
||||
if revVid != vid:
|
||||
return err((vid,HashifyCheckRevHashMismatch))
|
||||
|
||||
if db.pAmk.len != db.kMap.len:
|
||||
var knownKeys: HashSet[VertexID]
|
||||
for (key,vid) in db.pAmk.pairs:
|
||||
if not db.kMap.hasKey(vid):
|
||||
return err((vid,HashifyCheckRevVtxMissing))
|
||||
if vid in knownKeys:
|
||||
return err((vid,HashifyCheckRevVtxDup))
|
||||
knownKeys.incl vid
|
||||
return err((VertexID(0),HashifyCheckRevCountMismatch)) # should not apply(!)
|
||||
|
||||
if 0 < db.pAmk.len and not relax and db.pAmk.len != db.sTab.len:
|
||||
return err((VertexID(0),HashifyCheckVtxCountMismatch))
|
||||
|
||||
for vid in db.pPrf:
|
||||
if not db.kMap.hasKey(vid):
|
||||
return err((vid,HashifyCheckVtxLockWithoutKey))
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -12,7 +12,6 @@
|
||||
|
||||
import
|
||||
eth/[common, trie/nibbles],
|
||||
../../sync/snap/range_desc,
|
||||
"."/[aristo_constants, aristo_desc, aristo_error, aristo_get, aristo_path]
|
||||
|
||||
type
|
||||
|
@ -29,7 +29,11 @@ proc init*(T: type AristoDbRef): T =
|
||||
|
||||
proc init*(T: type AristoDbRef; db: T): T =
|
||||
## Cascaded constructor, a new layer is pushed and returned.
|
||||
result = T(cascaded: true, stack: db)
|
||||
result = T(
|
||||
cascaded: true,
|
||||
lRoot: db.lRoot,
|
||||
vGen: db.vGen,
|
||||
stack: db)
|
||||
if db.cascaded:
|
||||
result.level = db.level + 1
|
||||
result.base = db.base
|
||||
|
@ -16,8 +16,7 @@
|
||||
import
|
||||
std/tables,
|
||||
stew/results,
|
||||
../../../sync/snap/range_desc,
|
||||
".."/[aristo_desc, aristo_error]
|
||||
".."/[aristo_constants, aristo_desc, aristo_error]
|
||||
|
||||
type
|
||||
MemBackendRef = ref object
|
||||
@ -31,15 +30,17 @@ type
|
||||
proc getVtxFn(db: MemBackendRef): GetVtxFn =
|
||||
result =
|
||||
proc(vid: VertexID): Result[VertexRef,AristoError] =
|
||||
db.sTab.withValue(vid, vtxPtr):
|
||||
return ok vtxPtr[]
|
||||
let vtx = db.sTab.getOrDefault(vid, VertexRef(nil))
|
||||
if vtx != VertexRef(nil):
|
||||
return ok vtx
|
||||
err(MemBeVtxNotFound)
|
||||
|
||||
proc getKeyFn(db: MemBackendRef): GetKeyFn =
|
||||
result =
|
||||
proc(vid: VertexID): Result[NodeKey,AristoError] =
|
||||
db.kMap.withValue(vid, keyPtr):
|
||||
return ok keyPtr[]
|
||||
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
return ok key
|
||||
err(MemBeKeyNotFound)
|
||||
|
||||
proc putVtxFn(db: MemBackendRef): PutVtxFn =
|
||||
|
@ -8,20 +8,39 @@
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Aristo DB -- Patricia Trie builder, raw node insertion
|
||||
## ======================================================
|
||||
##
|
||||
## This module merges `NodeTag` values as hexary lookup paths into the
|
||||
## `Patricia Trie`. When changing vertices (aka nodes without Merkle hashes),
|
||||
## associated (but separated) Merkle hashes will be deleted unless locked.
|
||||
## Instead of deleting locked hashes error handling is applied.
|
||||
##
|
||||
## Also, nodes (vertices plus merkle hashes) can be added which is needed for
|
||||
## boundary proofing after `snap/1` download. The vertices are split from the
|
||||
## nodes and stored as-is on the table holding `Patricia Trie` entries. The
|
||||
## hashes are stored iin a separate table and the vertices are labelled
|
||||
## `locked`.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
std/[sequtils, sets, tables],
|
||||
chronicles,
|
||||
eth/[common, trie/nibbles],
|
||||
stew/results,
|
||||
../../sync/snap/range_desc,
|
||||
./aristo_debug,
|
||||
"."/[aristo_desc, aristo_error, aristo_get, aristo_hike, aristo_path,
|
||||
aristo_vid]
|
||||
../../sync/protocol,
|
||||
"."/[aristo_constants, aristo_desc, aristo_error, aristo_get, aristo_hike,
|
||||
aristo_path, aristo_transcode, aristo_vid]
|
||||
|
||||
logScope:
|
||||
topics = "aristo-leaf"
|
||||
topics = "aristo-merge"
|
||||
|
||||
type
|
||||
LeafKVP* = object
|
||||
## Generalised key-value pair
|
||||
pathTag*: NodeTag ## `Patricia Trie` path root-to-leaf
|
||||
payload*: PayloadRef ## Leaf data payload
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private getters & setters
|
||||
@ -45,6 +64,18 @@ proc `xPfx=`(vtx: VertexRef, val: NibblesSeq) =
|
||||
of Branch:
|
||||
doAssert vtx.vType != Branch # Ooops
|
||||
|
||||
|
||||
proc clearMerkleKeys(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # Implied vertex IDs to clear hashes for
|
||||
vid: VertexID; # Additionall vertex IDs to clear
|
||||
) =
|
||||
for vid in hike.legs.mapIt(it.wp.vid) & @[vid]:
|
||||
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
db.kMap.del vid
|
||||
db.pAmk.del key
|
||||
|
||||
# -----------
|
||||
|
||||
proc insertBranch(
|
||||
@ -57,21 +88,25 @@ proc insertBranch(
|
||||
##
|
||||
## Insert `Extension->Branch` vertex chain or just a `Branch` vertex
|
||||
##
|
||||
## --(linkID)--> <linkVtx>
|
||||
## ... --(linkID)--> <linkVtx>
|
||||
##
|
||||
## <-- immutable --> <---- mutable ----> ..
|
||||
##
|
||||
## will become either
|
||||
##
|
||||
## --(linkID)-->
|
||||
## <extVtx> --(local1)-->
|
||||
## <forkVtx>[linkInx] --(local2)--> <linkVtx>
|
||||
## <forkVtx>[linkInx] --(local2)--> <linkVtx*>
|
||||
## [leafInx] --(local3)--> <leafVtx>
|
||||
##
|
||||
## or in case that there is no common prefix
|
||||
##
|
||||
## --(linkID)-->
|
||||
## <forkVtx>[linkInx] --(local2)--> <linkVtx>
|
||||
## <forkVtx>[linkInx] --(local2)--> <linkVtx*>
|
||||
## [leafInx] --(local3)--> <leafVtx>
|
||||
##
|
||||
## *) vertex was slightly modified or removed if obsolete `Extension`
|
||||
##
|
||||
let n = linkVtx.xPfx.sharedPrefixLen hike.tail
|
||||
|
||||
# Verify minimum requirements
|
||||
@ -91,21 +126,37 @@ proc insertBranch(
|
||||
|
||||
# Install `forkVtx`
|
||||
block:
|
||||
let local = db.vidFetch
|
||||
# Clear Merkle hashes (aka node keys) unless proof mode.
|
||||
if db.pPrf.len == 0:
|
||||
db.clearMerkleKeys(hike, linkID)
|
||||
elif linkID in db.pPrf:
|
||||
return Hike(error: MergeNonBranchProofModeLock)
|
||||
|
||||
# Update vertex path lookup
|
||||
if linkVtx.vType == Leaf:
|
||||
# Update vertex path lookup
|
||||
let
|
||||
path = hike.legsTo(NibblesSeq) & linkVtx.lPfx
|
||||
rc = path.pathToTag()
|
||||
if rc.isErr:
|
||||
error "Branch link leaf path garbled", linkID, path
|
||||
debug "Branch link leaf path garbled", linkID, path
|
||||
return Hike(error: MergeBrLinkLeafGarbled)
|
||||
db.lTab[rc.value] = local # update leaf path lookup cache
|
||||
|
||||
forkVtx.bVid[linkInx] = local
|
||||
db.sTab[local] = linkVtx
|
||||
linkVtx.xPfx = linkVtx.xPfx.slice(1+n)
|
||||
let local = db.vidFetch
|
||||
db.lTab[rc.value] = local # update leaf path lookup cache
|
||||
db.sTab[local] = linkVtx
|
||||
linkVtx.lPfx = linkVtx.lPfx.slice(1+n)
|
||||
forkVtx.bVid[linkInx] = local
|
||||
|
||||
elif linkVtx.ePfx.len == n + 1:
|
||||
# This extension `linkVtx` becomes obsolete
|
||||
forkVtx.bVid[linkInx] = linkVtx.eVid
|
||||
|
||||
else:
|
||||
let local = db.vidFetch
|
||||
db.sTab[local] = linkVtx
|
||||
linkVtx.ePfx = linkVtx.ePfx.slice(1+n)
|
||||
forkVtx.bVid[linkInx] = local
|
||||
|
||||
block:
|
||||
let local = db.vidFetch
|
||||
forkVtx.bVid[leafInx] = local
|
||||
@ -151,25 +202,32 @@ proc insertBranch(
|
||||
result.legs.add leafLeg
|
||||
|
||||
|
||||
proc appendBranchAndLeaf(
|
||||
proc concatBranchAndLeaf(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # Path top has a `Branch` vertex
|
||||
brID: VertexID; # Branch vertex ID from from `Hike` top
|
||||
brVid: VertexID; # Branch vertex ID from from `Hike` top
|
||||
brVtx: VertexRef; # Branch vertex, linked to from `Hike`
|
||||
payload: PayloadRef; # Leaf data payload
|
||||
): Hike =
|
||||
## Append argument branch vertex passed as argument `(brID,brVtx)` and then
|
||||
## a `Leaf` vertex derived from the argument `payload`.
|
||||
|
||||
##
|
||||
if hike.tail.len == 0:
|
||||
return Hike(error: MergeBranchGarbledTail)
|
||||
|
||||
let nibble = hike.tail[0].int8
|
||||
if not brVtx.bVid[nibble].isZero:
|
||||
return Hike(error: MergeRootBranchLinkBusy)
|
||||
|
||||
# Clear Merkle hashes (aka node keys) unless proof mode.
|
||||
if db.pPrf.len == 0:
|
||||
db.clearMerkleKeys(hike, brVid)
|
||||
elif brVid in db.pPrf:
|
||||
return Hike(error: MergeBranchProofModeLock) # Ooops
|
||||
|
||||
# Append branch node
|
||||
result = Hike(root: hike.root, legs: hike.legs)
|
||||
result.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brID), nibble: nibble)
|
||||
result.legs.add Leg(wp: VidVtxPair(vtx: brVtx, vid: brVid), nibble: nibble)
|
||||
|
||||
# Append leaf node
|
||||
let
|
||||
@ -186,11 +244,10 @@ proc appendBranchAndLeaf(
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hikeTopBranchAppendLeaf(
|
||||
proc topIsBranchAddLeaf(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # Path top has a `Branch` vertex
|
||||
payload: PayloadRef; # Leaf data payload
|
||||
proofMode: bool; # May have dangling links
|
||||
): Hike =
|
||||
## Append a `Leaf` vertex derived from the argument `payload` after the top
|
||||
## leg of the `hike` argument which is assumend to refert to a `Branch`
|
||||
@ -205,34 +262,42 @@ proc hikeTopBranchAppendLeaf(
|
||||
let
|
||||
branch = hike.legs[^1].wp.vtx
|
||||
linkID = branch.bVid[nibble]
|
||||
linkVtx = db.getVtx linkID
|
||||
|
||||
# Busy slot, check for dangling link
|
||||
linkVtx = block:
|
||||
let rc = db.getVtxCascaded linkID
|
||||
if rc.isErr and not proofMode:
|
||||
# Not much else that can be done here
|
||||
error "Dangling leaf link, reused", branch=hike.legs[^1].wp.vid,
|
||||
nibble, linkID, leafPfx=hike.tail
|
||||
if rc.isErr or rc.value.isNil:
|
||||
# Reuse placeholder entry in table
|
||||
let vtx = VertexRef(
|
||||
vType: Leaf,
|
||||
lPfx: hike.tail,
|
||||
lData: payload)
|
||||
db.sTab[linkID] = vtx
|
||||
result = Hike(root: hike.root, legs: hike.legs)
|
||||
result.legs.add Leg(wp: VidVtxPair(vid: linkID, vtx: vtx), nibble: -1)
|
||||
return
|
||||
rc.value
|
||||
if linkVtx.isNil:
|
||||
#
|
||||
# .. <branch>[nibble] --(linkID)--> nil
|
||||
#
|
||||
# <-------- immutable ------------> <---- mutable ----> ..
|
||||
#
|
||||
if db.pPrf.len == 0:
|
||||
# Not much else that can be done here
|
||||
debug "Dangling leaf link, reused", branch=hike.legs[^1].wp.vid,
|
||||
nibble, linkID, leafPfx=hike.tail
|
||||
|
||||
# Reuse placeholder entry in table
|
||||
let vtx = VertexRef(
|
||||
vType: Leaf,
|
||||
lPfx: hike.tail,
|
||||
lData: payload)
|
||||
db.sTab[linkID] = vtx
|
||||
result = Hike(root: hike.root, legs: hike.legs)
|
||||
result.legs.add Leg(wp: VidVtxPair(vid: linkID, vtx: vtx), nibble: -1)
|
||||
return
|
||||
|
||||
# Slot link to a branch vertex should be handled by `hikeUp()`
|
||||
if linkVtx.vType == Branch:
|
||||
return db.appendBranchAndLeaf(hike, linkID, linkVtx, payload)
|
||||
# Slot link to a branch vertex should be handled by `hikeUp()`
|
||||
#
|
||||
# .. <branch>[nibble] --(linkID)--> <linkVtx>[]
|
||||
#
|
||||
# <-------- immutable ------------> <---- mutable ----> ..
|
||||
#
|
||||
return db.concatBranchAndLeaf(hike, linkID, linkVtx, payload)
|
||||
|
||||
db.insertBranch(hike, linkID, linkVtx, payload)
|
||||
|
||||
|
||||
proc hikeTopExtensionAppendLeaf(
|
||||
proc topIsExtAddLeaf(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # Path top has an `Extension` vertex
|
||||
payload: PayloadRef; # Leaf data payload
|
||||
@ -242,28 +307,50 @@ proc hikeTopExtensionAppendLeaf(
|
||||
## vertex. If successful, the function returns the
|
||||
## updated `hike` trail.
|
||||
let
|
||||
parVtx = hike.legs[^1].wp.vtx
|
||||
parID = hike.legs[^1].wp.vid
|
||||
brVtx = db.getVtx parVtx.eVid
|
||||
extVtx = hike.legs[^1].wp.vtx
|
||||
extVid = hike.legs[^1].wp.vid
|
||||
brVid = extVtx.eVid
|
||||
brVtx = db.getVtx brVid
|
||||
|
||||
result = Hike(root: hike.root, legs: hike.legs)
|
||||
|
||||
if brVtx.isNil:
|
||||
# Blind vertex, promote to leaf node.
|
||||
#
|
||||
# --(extVid)--> <extVtx> --(brVid)--> nil
|
||||
#
|
||||
# <-------- immutable -------------->
|
||||
#
|
||||
let vtx = VertexRef(
|
||||
vType: Leaf,
|
||||
lPfx: parVtx.ePfx & hike.tail,
|
||||
lPfx: extVtx.ePfx & hike.tail,
|
||||
lData: payload)
|
||||
db.sTab[parID] = vtx
|
||||
db.sTab[extVid] = vtx
|
||||
result.legs[^1].wp.vtx = vtx
|
||||
|
||||
elif brVtx.vType != Branch:
|
||||
return Hike(error: MergeBranchRootExpected)
|
||||
|
||||
else:
|
||||
let nibble = hike.tail[0].int8
|
||||
if not brVtx.bVid[nibble].isZero:
|
||||
let
|
||||
nibble = hike.tail[0].int8
|
||||
linkID = brVtx.bVid[nibble]
|
||||
#
|
||||
# Required
|
||||
#
|
||||
# --(extVid)--> <extVtx> --(brVid)--> <brVtx>[nibble] --(linkID)--> nil
|
||||
#
|
||||
# <-------- immutable --------------> <-------- mutable ----------> ..
|
||||
#
|
||||
if not linkID.isZero:
|
||||
return Hike(error: MergeRootBranchLinkBusy)
|
||||
|
||||
# Clear Merkle hashes (aka node keys) unless proof mode
|
||||
if db.pPrf.len == 0:
|
||||
db.clearMerkleKeys(hike, brVid)
|
||||
elif brVid in db.pPrf:
|
||||
return Hike(error: MergeBranchProofModeLock)
|
||||
|
||||
let
|
||||
vid = db.vidFetch
|
||||
vtx = VertexRef(
|
||||
@ -276,7 +363,7 @@ proc hikeTopExtensionAppendLeaf(
|
||||
result.legs.add Leg(wp: VidVtxPair(vtx: vtx, vid: vid), nibble: -1)
|
||||
|
||||
|
||||
proc emptyHikeAppendLeaf(
|
||||
proc topIsEmptyAddLeaf(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
hike: Hike; # No path legs
|
||||
rootVtx: VertexRef; # Root vertex
|
||||
@ -309,83 +396,97 @@ proc emptyHikeAppendLeaf(
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
pathTag: NodeTag; # `Patricia Trie` path root-to-leaf
|
||||
payload: PayloadRef; # Leaf data payload
|
||||
root = VertexID(0); # Root node reference
|
||||
proofMode = false; # May have dangling links
|
||||
noisy = false;
|
||||
leaf: LeafKVP; # Leaf item to add to the database
|
||||
): Hike =
|
||||
## Merge the argument `leaf` record into the top level vertex table of the
|
||||
## database `db`. The argument `pathKey` is used to index the leaf on the
|
||||
## `Patricia Tree`. The argument `payload` is stored with the leaf vertex in
|
||||
## the database unless the leaf vertex exists already.
|
||||
|
||||
## Merge the argument `leaf` key-value-pair into the top level vertex table
|
||||
## of the database `db`. The field `pathKey` of the `leaf` argument is used
|
||||
## to index the leaf vertex on the `Patricia Trie`. The field `payload` is
|
||||
## stored with the leaf vertex in the database unless the leaf vertex exists
|
||||
## already.
|
||||
##
|
||||
proc setUpAsRoot(vid: VertexID): Hike =
|
||||
let
|
||||
vtx = VertexRef(
|
||||
vType: Leaf,
|
||||
lPfx: pathTag.pathAsNibbles,
|
||||
lData: payload)
|
||||
lPfx: leaf.pathTag.pathAsNibbles,
|
||||
lData: leaf.payload)
|
||||
wp = VidVtxPair(vid: vid, vtx: vtx)
|
||||
db.sTab[vid] = vtx
|
||||
Hike(root: vid, legs: @[Leg(wp: wp, nibble: -1)])
|
||||
|
||||
if root.isZero:
|
||||
if noisy: echo ">>> merge (1)"
|
||||
if db.lRoot.isZero:
|
||||
result = db.vidFetch.setUpAsRoot() # bootstrap: new root ID
|
||||
db.lRoot = result.root
|
||||
|
||||
elif db.lTab.haskey leaf.pathTag:
|
||||
result.error = MergeLeafPathCachedAlready
|
||||
|
||||
else:
|
||||
let hike = pathTag.hikeUp(root, db)
|
||||
if noisy: echo "<<< merge (2) >>>", "\n ", hike.pp(db)
|
||||
let hike = leaf.pathTag.hikeUp(db.lRoot, db)
|
||||
|
||||
if 0 < hike.legs.len:
|
||||
case hike.legs[^1].wp.vtx.vType:
|
||||
of Branch:
|
||||
if noisy: echo ">>> merge (3)"
|
||||
result = db.hikeTopBranchAppendLeaf(hike, payload, proofMode)
|
||||
result = db.topIsBranchAddLeaf(hike, leaf.payload)
|
||||
of Leaf:
|
||||
if noisy: echo ">>> merge (4)"
|
||||
if 0 < hike.tail.len: # `Leaf` vertex problem?
|
||||
return Hike(error: MergeLeafGarbledHike)
|
||||
result = hike
|
||||
of Extension:
|
||||
if noisy: echo ">>> merge (5)"
|
||||
result = db.hikeTopExtensionAppendLeaf(hike, payload)
|
||||
result = db.topIsExtAddLeaf(hike, leaf.payload)
|
||||
|
||||
else:
|
||||
# Empty hike
|
||||
let rootVtx = db.getVtx root
|
||||
let rootVtx = db.getVtx db.lRoot
|
||||
|
||||
if rootVtx.isNil:
|
||||
if noisy: echo ">>> merge (6)"
|
||||
result = root.setUpAsRoot() # bootstrap for existing root ID
|
||||
result = db.lRoot.setUpAsRoot() # bootstrap for existing root ID
|
||||
else:
|
||||
if noisy: echo ">>> merge (7)"
|
||||
result = db.emptyHikeAppendLeaf(hike, rootVtx, payload)
|
||||
result = db.topIsEmptyAddLeaf(hike,rootVtx,leaf.payload)
|
||||
|
||||
# Update leaf acccess cache
|
||||
if result.error == AristoError(0):
|
||||
db.lTab[pathTag] = result.legs[^1].wp.vid
|
||||
db.lTab[leaf.pathTag] = result.legs[^1].wp.vid
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
leafs: openArray[LeafKVP]; # Leaf items to add to the database
|
||||
): tuple[merged: int, dups: int, error: AristoError] =
|
||||
## Variant of `merge()` for leaf lists.
|
||||
var (merged, dups) = (0, 0)
|
||||
for n,w in leafs:
|
||||
let hike = db.merge w
|
||||
if hike.error == AristoError(0):
|
||||
merged.inc
|
||||
elif hike.error == MergeLeafPathCachedAlready:
|
||||
dups.inc
|
||||
else:
|
||||
return (n,dups,hike.error)
|
||||
|
||||
(merged, dups, AristoError(0))
|
||||
|
||||
# ---------------------
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
nodeKey: NodeKey; # Merkel hash of node
|
||||
node: NodeRef; # Node derived from RLP representation
|
||||
): Result[VertexID,AristoError] =
|
||||
## Merge a node key expanded from its RLP representation into the database.
|
||||
## The function merges a node key `nodeKey` expanded from its RLP
|
||||
## representation into the `Aristo Trie` database. The vertex is split off
|
||||
## from the node and stored separately. So are the Merkle hashes. The
|
||||
## vertex is labelled `locked`.
|
||||
##
|
||||
## The `node` argument is *not* checked, whether the vertex IDs have been
|
||||
## allocated, already. If the node comes straight from the `decode()` RLP
|
||||
## decoder as expected, these vertex IDs will be all zero.
|
||||
##
|
||||
## There is some rudimentaty check whether the `node` is consistent. It is
|
||||
## *not* checked, whether the vertex IDs have been allocated, already. If
|
||||
## the node comes straight from the `decode()` RLP decoder, these vertex IDs
|
||||
## will be all zero.
|
||||
|
||||
proc register(key: NodeKey): VertexID =
|
||||
db.pAmk.withValue(key,vidPtr):
|
||||
return vidPtr[]
|
||||
let vid = db.vidFetch
|
||||
db.pAmk[key] = vid
|
||||
db.kMap[vid] = key
|
||||
var vid = db.pAmk.getOrDefault(key, VertexID(0))
|
||||
if vid == VertexID(0):
|
||||
vid = db.vidFetch
|
||||
db.pAmk[key] = vid
|
||||
db.kMap[vid] = key
|
||||
vid
|
||||
|
||||
# Check whether the record is correct
|
||||
@ -393,13 +494,13 @@ proc merge*(
|
||||
return err(node.error)
|
||||
|
||||
# Verify `nodeKey`
|
||||
if nodeKey.isZero:
|
||||
return err(MergeNodeKeyZero)
|
||||
if nodeKey == EMPTY_ROOT_KEY:
|
||||
return err(MergeNodeKeyEmpty)
|
||||
|
||||
# Check whether the node exists, already
|
||||
db.pAmk.withValue(nodeKey,vidPtr):
|
||||
if db.sTab.hasKey vidPtr[]:
|
||||
return ok vidPtr[]
|
||||
let nodeVid = db.pAmk.getOrDefault(nodeKey, VertexID(0))
|
||||
if nodeVid != VertexID(0) and db.sTab.hasKey nodeVid:
|
||||
return err(MergeNodeKeyCachedAlready)
|
||||
|
||||
let
|
||||
vid = nodeKey.register
|
||||
@ -409,22 +510,48 @@ proc merge*(
|
||||
of Leaf:
|
||||
discard
|
||||
of Extension:
|
||||
if not node.key[0].isZero:
|
||||
db.pAmk.withValue(node.key[0],vidPtr):
|
||||
vtx.eVid = vidPtr[]
|
||||
do:
|
||||
if not node.key[0].isEmpty:
|
||||
let eVid = db.pAmk.getOrDefault(node.key[0], VertexID(0))
|
||||
if eVid != VertexID(0):
|
||||
vtx.eVid = eVid
|
||||
else:
|
||||
vtx.eVid = node.key[0].register
|
||||
of Branch:
|
||||
for n in 0..15:
|
||||
if not node.key[n].isZero:
|
||||
db.pAmk.withValue(node.key[n],vidPtr):
|
||||
vtx.bVid[n] = vidPtr[]
|
||||
do:
|
||||
if not node.key[n].isEmpty:
|
||||
let bVid = db.pAmk.getOrDefault(node.key[n], VertexID(0))
|
||||
if bVid != VertexID(0):
|
||||
vtx.bVid[n] = bVid
|
||||
else:
|
||||
vtx.bVid[n] = node.key[n].register
|
||||
|
||||
db.pPrf.incl vid
|
||||
db.sTab[vid] = vtx
|
||||
ok vid
|
||||
|
||||
proc merge*(
|
||||
db: AristoDbRef; # Database, top layer
|
||||
proof: openArray[SnapProof]; # RLP encoded node records
|
||||
): tuple[merged: int, dups: int, error: AristoError]
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
## The function merges the argument `proof` list of RLP encoded node records
|
||||
## into the `Aristo Trie` database. This function is intended to be used with
|
||||
## the proof nodes as returened by `snap/1` messages.
|
||||
var (merged, dups) = (0, 0)
|
||||
for n,w in proof:
|
||||
let
|
||||
key = w.Blob.digestTo(NodeKey)
|
||||
node = w.Blob.decode(NodeRef)
|
||||
rc = db.merge(key, node)
|
||||
if rc.isOK:
|
||||
merged.inc
|
||||
elif rc.error == MergeNodeKeyCachedAlready:
|
||||
dups.inc
|
||||
else:
|
||||
return (n, dups, rc.error)
|
||||
|
||||
(merged, dups, AristoError(0))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -13,8 +13,7 @@
|
||||
import
|
||||
eth/[common, trie/nibbles],
|
||||
stew/results,
|
||||
../../sync/snap/range_desc,
|
||||
"."/[aristo_constants, aristo_error]
|
||||
"."/[aristo_constants, aristo_desc, aristo_error]
|
||||
|
||||
# Info snippet (just a reminder to keep somewhere)
|
||||
#
|
||||
|
@ -14,7 +14,6 @@ import
|
||||
std/[bitops, sequtils],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/results,
|
||||
../../sync/snap/range_desc,
|
||||
"."/[aristo_constants, aristo_desc, aristo_error]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -25,6 +24,16 @@ proc aristoError(error: AristoError): NodeRef =
|
||||
## Allows returning de
|
||||
NodeRef(vType: Leaf, error: error)
|
||||
|
||||
proc aInit(key: var NodeKey; data: openArray[byte]): bool =
|
||||
## Import argument `data` into `key` which must have length either `32`, or
|
||||
## `0`. The latter case is equivalent to an all zero byte array of size `32`.
|
||||
if data.len == 32:
|
||||
(addr key.ByteArray32[0]).copyMem(unsafeAddr data[0], data.len)
|
||||
return true
|
||||
elif data.len == 0:
|
||||
key = EMPTY_ROOT_KEY
|
||||
return true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public RLP transcoder mixins
|
||||
# ------------------------------------------------------------------------------
|
||||
@ -53,7 +62,7 @@ proc read*(
|
||||
return aristoError(RlpBlobExpected)
|
||||
blobs[top] = rlp.read(Blob)
|
||||
of 2 .. 15:
|
||||
if not links[top].init(rlp.read(Blob)):
|
||||
if not links[top].aInit(rlp.read(Blob)):
|
||||
return aristoError(RlpBranchLinkExpected)
|
||||
of 16:
|
||||
if not w.isBlob:
|
||||
@ -81,12 +90,12 @@ proc read*(
|
||||
var node = NodeRef(
|
||||
vType: Extension,
|
||||
ePfx: pathSegment)
|
||||
if not node.key[0].init(blobs[1]):
|
||||
if not node.key[0].aInit(blobs[1]):
|
||||
return aristoError(RlpExtPathEncoding)
|
||||
return node
|
||||
of 17:
|
||||
for n in [0,1]:
|
||||
if not links[n].init(blobs[n]):
|
||||
if not links[n].aInit(blobs[n]):
|
||||
return aristoError(RlpBranchLinkExpected)
|
||||
return NodeRef(
|
||||
vType: Branch,
|
||||
@ -101,7 +110,7 @@ proc append*(writer: var RlpWriter; node: NodeRef) =
|
||||
## Mixin for RLP writer. Note that a `Dummy` node is encoded as an empty
|
||||
## list.
|
||||
proc addNodeKey(writer: var RlpWriter; key: NodeKey) =
|
||||
if key.isZero:
|
||||
if key.isEmpty:
|
||||
writer.append EmptyBlob
|
||||
else:
|
||||
writer.append key.to(Hash256)
|
||||
@ -203,7 +212,7 @@ proc blobify*(db: AristoDbRef; data: var Blob) =
|
||||
## 0x40
|
||||
##
|
||||
data.setLen(0)
|
||||
for w in db.vidGen:
|
||||
for w in db.vGen:
|
||||
data &= w.uint64.toBytesBE.toSeq
|
||||
data.add 0x40u8
|
||||
|
||||
@ -287,7 +296,7 @@ proc deblobify*(data: Blob; db: var AristoDbRef): AristoError =
|
||||
if db.isNil:
|
||||
db = AristoDbRef()
|
||||
if data.len == 0:
|
||||
db.vidGen = @[1.VertexID]
|
||||
db.vGen = @[1.VertexID]
|
||||
else:
|
||||
if (data.len mod 8) != 1:
|
||||
return ADbGarbledSize
|
||||
@ -295,7 +304,7 @@ proc deblobify*(data: Blob; db: var AristoDbRef): AristoError =
|
||||
return ADbWrongType
|
||||
for n in 0 ..< (data.len div 8):
|
||||
let w = n * 8
|
||||
db.vidGen.add (uint64.fromBytesBE data[w ..< w + 8]).VertexID
|
||||
db.vGen.add (uint64.fromBytesBE data[w ..< w + 8]).VertexID
|
||||
|
||||
|
||||
proc deblobify*[W: VertexRef|AristoDbRef](
|
||||
|
@ -24,55 +24,43 @@ proc vidFetch*(db: AristoDbRef): VertexID =
|
||||
## Create a new `VertexID`. Reusable *ID*s are kept in a list where the top
|
||||
## entry *ID0* has the property that any other *ID* larger *ID0* is also not
|
||||
## not used on the database.
|
||||
|
||||
# Down the rabbit hole of transaction layers
|
||||
let xDb = if db.cascaded: db.base else: db
|
||||
|
||||
case xDb.vidGen.len:
|
||||
case db.vGen.len:
|
||||
of 0:
|
||||
xDb.vidGen = @[2.VertexID]
|
||||
db.vGen = @[2.VertexID]
|
||||
result = 1.VertexID
|
||||
of 1:
|
||||
result = xDb.vidGen[^1]
|
||||
xDb.vidGen = @[(result.uint64 + 1).VertexID]
|
||||
result = db.vGen[^1]
|
||||
db.vGen = @[(result.uint64 + 1).VertexID]
|
||||
else:
|
||||
result = xDb.vidGen[^2]
|
||||
xDb.vidGen[^2] = xDb.vidGen[^1]
|
||||
xDb.vidGen.setLen(xDb.vidGen.len-1)
|
||||
result = db.vGen[^2]
|
||||
db.vGen[^2] = db.vGen[^1]
|
||||
db.vGen.setLen(db.vGen.len-1)
|
||||
|
||||
|
||||
proc vidPeek*(db: AristoDbRef): VertexID =
|
||||
## Like `new()` without consuming this *ID*. It will return the *ID* that
|
||||
## would be returned by the `new()` function.
|
||||
|
||||
# Down the rabbit hole of transaction layers
|
||||
let xDb = if db.cascaded: db.base else: db
|
||||
|
||||
case xDb.vidGen.len:
|
||||
case db.vGen.len:
|
||||
of 0:
|
||||
1.VertexID
|
||||
of 1:
|
||||
xDb.vidGen[^1]
|
||||
db.vGen[^1]
|
||||
else:
|
||||
xDb.vidGen[^2]
|
||||
db.vGen[^2]
|
||||
|
||||
|
||||
proc vidDispose*(db: AristoDbRef; vtxID: VertexID) =
|
||||
proc vidDispose*(db: AristoDbRef; vid: VertexID) =
|
||||
## Recycle the argument `vtxID` which is useful after deleting entries from
|
||||
## the vertex table to prevent the `VertexID` type key values small.
|
||||
|
||||
# Down the rabbit hole of transaction layers
|
||||
let xDb = if db.cascaded: db.base else: db
|
||||
|
||||
if xDb.vidGen.len == 0:
|
||||
xDb.vidGen = @[vtxID]
|
||||
if db.vGen.len == 0:
|
||||
db.vGen = @[vid]
|
||||
else:
|
||||
let topID = xDb.vidGen[^1]
|
||||
# No need to store smaller numbers: all numberts larger than `topID`
|
||||
let topID = db.vGen[^1]
|
||||
# Only store smaller numbers: all numberts larger than `topID`
|
||||
# are free numbers
|
||||
if vtxID < topID:
|
||||
xDb.vidGen[^1] = vtxID
|
||||
xDb.vidGen.add topID
|
||||
if vid < topID:
|
||||
db.vGen[^1] = vid
|
||||
db.vGen.add topID
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
@ -162,7 +162,7 @@ proc snapDbAccountsRef(cdb:ChainDb; root:Hash256; pers:bool):SnapDbAccountsRef =
|
||||
# Test Runners: accounts and accounts storages
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc trancodeRunner(noisy = true; sample = accSample; stopAfter = high(int)) =
|
||||
proc transcodeRunner(noisy = true; sample = accSample; stopAfter = high(int)) =
|
||||
let
|
||||
accLst = sample.to(seq[UndumpAccounts])
|
||||
root = accLst[0].root
|
||||
@ -175,17 +175,7 @@ proc trancodeRunner(noisy = true; sample = accSample; stopAfter = high(int)) =
|
||||
defer:
|
||||
db.flushDbs
|
||||
|
||||
suite &"Aristo: transcoding {fileInfo} accounts and proofs for {info}":
|
||||
|
||||
# --- Merging ---
|
||||
|
||||
test &"Merge {accLst.len} account lists to database":
|
||||
noisy.test_mergeAccounts accLst.mapIt(it.data)
|
||||
|
||||
test &"Merge {accLst.len} proof & account lists to database":
|
||||
noisy.test_mergeProofsAndAccounts accLst
|
||||
|
||||
# --- Transcoding ---
|
||||
suite &"Aristo: transcoding {fileInfo} accounts for {info}":
|
||||
|
||||
test &"Trancoding VertexID recyling lists (seed={accLst.len})":
|
||||
noisy.test_transcodeVidRecycleLists(accLst.len)
|
||||
@ -208,12 +198,28 @@ proc trancodeRunner(noisy = true; sample = accSample; stopAfter = high(int)) =
|
||||
noisy.showElapsed("test_transcoder()"):
|
||||
noisy.test_transcodeAccounts(db.cdb[0].rocksStoreRef, stopAfter)
|
||||
|
||||
|
||||
proc dataRunner(noisy = true; sample = accSample) =
|
||||
let
|
||||
accLst = sample.to(seq[UndumpAccounts])
|
||||
fileInfo = sample.file.splitPath.tail.replace(".txt.gz","")
|
||||
|
||||
suite &"Aristo: accounts data import from {fileInfo}":
|
||||
|
||||
test &"Merge {accLst.len} account lists to database":
|
||||
noisy.test_mergeAccounts accLst
|
||||
|
||||
test &"Merge {accLst.len} proof & account lists to database":
|
||||
noisy.test_mergeProofsAndAccounts accLst
|
||||
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Main function(s)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc aristoMain*(noisy = defined(debug)) =
|
||||
noisy.trancodeRunner()
|
||||
noisy.transcodeRunner()
|
||||
noisy.dataRunner()
|
||||
|
||||
when isMainModule:
|
||||
const
|
||||
@ -222,9 +228,29 @@ when isMainModule:
|
||||
# Borrowed from `test_sync_snap.nim`
|
||||
when true: # and false:
|
||||
for n,sam in snapTestList:
|
||||
noisy.trancodeRunner(sam)
|
||||
noisy.transcodeRunner(sam)
|
||||
for n,sam in snapTestStorageList:
|
||||
noisy.trancodeRunner(sam)
|
||||
noisy.transcodeRunner(sam)
|
||||
|
||||
# This one uses dumps from the external `nimbus-eth1-blob` repo
|
||||
when true and false:
|
||||
import ./test_sync_snap/snap_other_xx
|
||||
noisy.showElapsed("dataRunner() @snap_other_xx"):
|
||||
for n,sam in snapOtherList:
|
||||
noisy.dataRunner(sam)
|
||||
|
||||
# This one usues dumps from the external `nimbus-eth1-blob` repo
|
||||
when true and false:
|
||||
import ./test_sync_snap/snap_storage_xx
|
||||
noisy.showElapsed("dataRunner() @snap_storage_xx"):
|
||||
for n,sam in snapStorageList:
|
||||
noisy.dataRunner(sam)
|
||||
|
||||
when true: # and false:
|
||||
for n,sam in snapTestList:
|
||||
noisy.dataRunner(sam)
|
||||
for n,sam in snapTestStorageList:
|
||||
noisy.dataRunner(sam)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
@ -10,12 +10,14 @@
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
## Parked here, currently uded only for trancode tests
|
||||
|
||||
import
|
||||
std/tables,
|
||||
eth/common,
|
||||
stew/results,
|
||||
../../sync/snap/range_desc,
|
||||
"."/[aristo_desc, aristo_error, aristo_transcode, aristo_vid]
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_constants, aristo_desc, aristo_error, aristo_transcode, aristo_vid]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
@ -40,8 +42,9 @@ proc convertPartially(
|
||||
vType: Extension,
|
||||
ePfx: vtx.ePfx,
|
||||
eVid: vtx.eVid)
|
||||
db.kMap.withValue(vtx.eVid, keyPtr):
|
||||
nd.key[0] = keyPtr[]
|
||||
let key = db.kMap.getOrDefault(vtx.eVid, EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
nd.key[0] = key
|
||||
return
|
||||
result.add vtx.eVid
|
||||
of Branch:
|
||||
@ -49,11 +52,11 @@ proc convertPartially(
|
||||
vType: Branch,
|
||||
bVid: vtx.bVid)
|
||||
for n in 0..15:
|
||||
if vtx.bVid[n].isZero:
|
||||
continue
|
||||
db.kMap.withValue(vtx.bVid[n], kPtr):
|
||||
nd.key[n] = kPtr[]
|
||||
continue
|
||||
if not vtx.bVid[n].isZero:
|
||||
let key = db.kMap.getOrDefault(vtx.bVid[n], EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
nd.key[n] = key
|
||||
continue
|
||||
result.add vtx.bVid[n]
|
||||
|
||||
proc convertPartiallyOk(
|
||||
@ -74,8 +77,9 @@ proc convertPartiallyOk(
|
||||
vType: Extension,
|
||||
ePfx: vtx.ePfx,
|
||||
eVid: vtx.eVid)
|
||||
db.kMap.withValue(vtx.eVid, keyPtr):
|
||||
nd.key[0] = keyPtr[]
|
||||
let key = db.kMap.getOrDefault(vtx.eVid, EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
nd.key[0] = key
|
||||
result = true
|
||||
of Branch:
|
||||
nd = NodeRef(
|
||||
@ -84,18 +88,21 @@ proc convertPartiallyOk(
|
||||
result = true
|
||||
for n in 0..15:
|
||||
if not vtx.bVid[n].isZero:
|
||||
db.kMap.withValue(vtx.bVid[n], kPtr):
|
||||
nd.key[n] = kPtr[]
|
||||
let key = db.kMap.getOrDefault(vtx.bVid[n], EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
nd.key[n] = key
|
||||
continue
|
||||
return false
|
||||
|
||||
proc cachedVID(db: AristoDbRef; nodeKey: NodeKey): VertexID =
|
||||
## Get vertex ID from reverse cache
|
||||
db.pAmk.withValue(nodeKey, vidPtr):
|
||||
return vidPtr[]
|
||||
result = db.vidFetch()
|
||||
db.pAmk[nodeKey] = result
|
||||
db.kMap[result] = nodeKey
|
||||
let vid = db.pAmk.getOrDefault(nodeKey, VertexID(0))
|
||||
if vid != VertexID(0):
|
||||
result = vid
|
||||
else:
|
||||
result = db.vidFetch()
|
||||
db.pAmk[nodeKey] = result
|
||||
db.kMap[result] = nodeKey
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions for `VertexID` => `NodeKey` mapping
|
||||
@ -110,12 +117,14 @@ proc pal*(db: AristoDbRef; vid: VertexID): NodeKey =
|
||||
## table is checked whether the cache can be updated.
|
||||
if not db.isNil:
|
||||
|
||||
db.kMap.withValue(vid, keyPtr):
|
||||
return keyPtr[]
|
||||
let key = db.kMap.getOrDefault(vid, EMPTY_ROOT_KEY)
|
||||
if key != EMPTY_ROOT_KEY:
|
||||
return key
|
||||
|
||||
db.sTab.withValue(vid, vtxPtr):
|
||||
let vtx = db.sTab.getOrDefault(vid, VertexRef(nil))
|
||||
if vtx != VertexRef(nil):
|
||||
var node: NodeRef
|
||||
if db.convertPartiallyOk(vtxPtr[],node):
|
||||
if db.convertPartiallyOk(vtx,node):
|
||||
var w = initRlpWriter()
|
||||
w.append node
|
||||
result = w.finish.keccakHash.data.NodeKey
|
||||
@ -144,7 +153,7 @@ proc updated*(nd: NodeRef; db: AristoDbRef): NodeRef =
|
||||
result = NodeRef(
|
||||
vType: Extension,
|
||||
ePfx: nd.ePfx)
|
||||
if not nd.key[0].isZero:
|
||||
if not nd.key[0].isEmpty:
|
||||
result.eVid = db.cachedVID nd.key[0]
|
||||
result.key[0] = nd.key[0]
|
||||
of Branch:
|
||||
@ -152,7 +161,7 @@ proc updated*(nd: NodeRef; db: AristoDbRef): NodeRef =
|
||||
vType: Branch,
|
||||
key: nd.key)
|
||||
for n in 0..15:
|
||||
if not nd.key[n].isZero:
|
||||
if not nd.key[n].isEmpty:
|
||||
result.bVid[n] = db.cachedVID nd.key[n]
|
||||
|
||||
proc asNode*(vtx: VertexRef; db: AristoDbRef): NodeRef =
|
@ -12,12 +12,13 @@
|
||||
## Aristo (aka Patricia) DB records merge test
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
eth/common,
|
||||
stew/results,
|
||||
unittest2,
|
||||
../../nimbus/db/kvstore_rocksdb,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_desc, aristo_debug, aristo_error, aristo_hike,
|
||||
aristo_merge, aristo_transcode],
|
||||
aristo_desc, aristo_debug, aristo_error, aristo_hashify,
|
||||
aristo_hike, aristo_merge],
|
||||
../../nimbus/sync/snap/range_desc,
|
||||
../replay/undump_accounts,
|
||||
./test_helpers
|
||||
@ -26,143 +27,204 @@ import
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc to(w: PackedAccount; T: type LeafKVP): T =
|
||||
T(pathTag: w.accKey.to(NodeTag),
|
||||
payload: PayloadRef(pType: BlobData, blob: w.accBlob))
|
||||
|
||||
proc to[T](w: openArray[PackedAccount]; W: type seq[T]): W =
|
||||
w.toSeq.mapIt(it.to(T))
|
||||
|
||||
|
||||
proc mergeStepwise(
|
||||
db: AristoDbRef;
|
||||
leafs: openArray[LeafKVP];
|
||||
noisy: bool;
|
||||
): tuple[merged: int, dups: int, error: AristoError] =
|
||||
let
|
||||
lTabLen = db.lTab.len
|
||||
var
|
||||
(merged, dups, error) = (0, 0, AristoError(0))
|
||||
|
||||
for n,leaf in leafs:
|
||||
var
|
||||
event = false # or (2 < u) or true
|
||||
dumpOk = false or event
|
||||
stopOk = false
|
||||
let
|
||||
preState = db.pp
|
||||
hike = db.merge leaf
|
||||
ekih = leaf.pathTag.hikeUp(db.lRoot, db)
|
||||
|
||||
case hike.error:
|
||||
of AristoError(0):
|
||||
merged.inc
|
||||
of MergeLeafPathCachedAlready:
|
||||
dups.inc
|
||||
else:
|
||||
error = hike.error
|
||||
dumpOk = true
|
||||
stopOk = true
|
||||
|
||||
if ekih.error != AristoError(0):
|
||||
dumpOk = true
|
||||
stopOk = true
|
||||
|
||||
let hashesOk = block:
|
||||
let rc = db.hashifyCheck(relax = true)
|
||||
if rc.isOk:
|
||||
(VertexID(0),AristoError(0))
|
||||
else:
|
||||
dumpOk = true
|
||||
stopOk = true
|
||||
if error == AristoError(0):
|
||||
error = rc.error[1]
|
||||
rc.error
|
||||
|
||||
if dumpOk:
|
||||
noisy.say "***", "<", n, "/", leafs.len-1, "> ", leaf.pathTag.pp,
|
||||
"\n pre-state ", preState,
|
||||
"\n --------",
|
||||
"\n merge => hike",
|
||||
"\n ", hike.pp(db),
|
||||
"\n --------",
|
||||
"\n ekih", ekih.pp(db),
|
||||
"\n --------",
|
||||
"\n post-state ", db.pp,
|
||||
"\n"
|
||||
|
||||
check hike.error in {AristoError(0), MergeLeafPathCachedAlready}
|
||||
check ekih.error == AristoError(0)
|
||||
check hashesOk == (VertexID(0),AristoError(0))
|
||||
|
||||
if ekih.legs.len == 0:
|
||||
check 0 < ekih.legs.len
|
||||
elif ekih.legs[^1].wp.vtx.vType != Leaf:
|
||||
check ekih.legs[^1].wp.vtx.vType == Leaf
|
||||
else:
|
||||
check ekih.legs[^1].wp.vtx.lData.blob == leaf.payload.blob
|
||||
|
||||
if db.lTab.len != lTabLen + merged:
|
||||
error = GenericError
|
||||
check db.lTab.len == lTabLen + merged # quick leaf access table
|
||||
stopOk = true # makes no sense to go on further
|
||||
|
||||
if stopOk:
|
||||
noisy.say "***", "<", n, "/", leafs.len-1, "> stop"
|
||||
break
|
||||
|
||||
(merged,dups,error)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public test function
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc test_mergeAccounts*(
|
||||
noisy: bool;
|
||||
lst: openArray[PackedAccountRange];
|
||||
lst: openArray[UndumpAccounts];
|
||||
) =
|
||||
for u,par in lst:
|
||||
let db = AristoDbRef()
|
||||
var
|
||||
root = VertexID(0)
|
||||
count = 0
|
||||
let
|
||||
db = AristoDbRef()
|
||||
|
||||
for n,w in par.accounts:
|
||||
let
|
||||
sTabState = db.sTab.pp(db)
|
||||
payload = PayloadRef(pType: BlobData, blob: w.accBlob)
|
||||
pathTag = w.accKey.to(NodeTag)
|
||||
hike = db.merge(pathTag, payload, root, proofMode=false)
|
||||
ekih = pathTag.hikeUp(hike.root, db)
|
||||
for n,par in lst:
|
||||
let
|
||||
lTabLen = db.lTab.len
|
||||
leafs = par.data.accounts.to(seq[LeafKVP])
|
||||
added = db.merge leafs
|
||||
#added = db.mergeStepwise(leafs, noisy=false)
|
||||
|
||||
if hike.error == AristoError(0):
|
||||
root = hike.root
|
||||
check added.error == AristoError(0)
|
||||
check db.lTab.len == lTabLen + added.merged
|
||||
check added.merged + added.dups == leafs.len
|
||||
|
||||
count = n
|
||||
if hike.error != AristoError(0): # or true:
|
||||
noisy.say "***", "<", n, "> ", pathTag.pp,
|
||||
"\n hike",
|
||||
"\n ", hike.pp(db),
|
||||
"\n sTab (prev)",
|
||||
"\n ", sTabState,
|
||||
"\n sTab",
|
||||
"\n ", db.sTab.pp(db),
|
||||
"\n lTab",
|
||||
"\n ", db.lTab.pp,
|
||||
let
|
||||
preKMap = (db.kMap.len, db.pp(sTabOk=false, lTabOk=false))
|
||||
prePAmk = (db.pAmk.len, db.pAmk.pp(db))
|
||||
|
||||
block:
|
||||
let rc = db.hashify # (noisy=true)
|
||||
if rc.isErr: # or true:
|
||||
noisy.say "***", "<", n, "> db dump",
|
||||
"\n pre-kMap(", preKMap[0], ")\n ", preKMap[1],
|
||||
"\n --------",
|
||||
"\n post-state ", db.pp,
|
||||
"\n"
|
||||
if rc.isErr:
|
||||
check rc.error == AristoError(0) # force message
|
||||
return
|
||||
|
||||
check hike.error == AristoError(0)
|
||||
check ekih.error == AristoError(0)
|
||||
block:
|
||||
let rc = db.hashifyCheck()
|
||||
if rc.isErr:
|
||||
noisy.say "***", "<", n, "/", lst.len-1, "> db dump",
|
||||
"\n pre-kMap(", preKMap[0], ")\n ", preKMap[1],
|
||||
"\n --------",
|
||||
"\n pre-pAmk(", prePAmk[0], ")\n ", prePAmk[1],
|
||||
"\n --------",
|
||||
"\n post-state ", db.pp,
|
||||
"\n"
|
||||
if rc.isErr:
|
||||
check rc == Result[void,(VertexID,AristoError)].ok()
|
||||
return
|
||||
|
||||
if ekih.legs.len == 0:
|
||||
check 0 < ekih.legs.len
|
||||
elif ekih.legs[^1].wp.vtx.vType != Leaf:
|
||||
check ekih.legs[^1].wp.vtx.vType == Leaf
|
||||
else:
|
||||
check ekih.legs[^1].wp.vtx.lData.blob == w.accBlob
|
||||
|
||||
if db.lTab.len != n + 1:
|
||||
check db.lTab.len == n + 1 # quick leaf access table
|
||||
break # makes no sense to go on further
|
||||
|
||||
noisy.say "***", "sample ", u, "/", lst.len ," leafs merged: ", count+1
|
||||
#noisy.say "***", "sample ",n,"/",lst.len-1," leafs merged: ", added.merged
|
||||
|
||||
|
||||
proc test_mergeProofsAndAccounts*(
|
||||
noisy: bool;
|
||||
lst: openArray[UndumpAccounts];
|
||||
) =
|
||||
for u,par in lst:
|
||||
let
|
||||
db = AristoDbRef()
|
||||
|
||||
for n,par in lst:
|
||||
let
|
||||
sTabLen = db.sTab.len
|
||||
lTabLen = db.lTab.len
|
||||
leafs = par.data.accounts.to(seq[LeafKVP])
|
||||
|
||||
noisy.say "***", "sample ", n, "/", lst.len-1, " start, nLeafs=", leafs.len
|
||||
|
||||
let
|
||||
db = AristoDbRef()
|
||||
rootKey = par.root.to(NodeKey)
|
||||
var
|
||||
rootID: VertexID
|
||||
count = 0
|
||||
proved = db.merge par.data.proof
|
||||
|
||||
for n,w in par.data.proof:
|
||||
let
|
||||
key = w.Blob.digestTo(NodeKey)
|
||||
node = w.Blob.decode(NodeRef)
|
||||
rc = db.merge(key, node)
|
||||
if rc.isErr:
|
||||
check rc.isOK # provoke message and error
|
||||
check rc.error == AristoError(0)
|
||||
continue
|
||||
|
||||
check n + 1 < db.pAmk.len
|
||||
check n + 1 < db.kMap.len
|
||||
check db.sTab.len == n + 1
|
||||
check proved.error in {AristoError(0),MergeNodeKeyCachedAlready}
|
||||
check par.data.proof.len == proved.merged + proved.dups
|
||||
check db.lTab.len == lTabLen
|
||||
check db.sTab.len == proved.merged + sTabLen
|
||||
check proved.merged < db.pAmk.len
|
||||
check proved.merged < db.kMap.len
|
||||
|
||||
# Set up root ID
|
||||
db.pAmk.withValue(rootKey, vidPtr):
|
||||
rootID = vidPtr[]
|
||||
db.lRoot = db.pAmk.getOrDefault(rootKey, VertexID(0))
|
||||
check db.lRoot != VertexID(0)
|
||||
|
||||
check not rootID.isZero
|
||||
noisy.say "***", "sample ", n, "/", lst.len-1, " proved=", proved
|
||||
#noisy.say "***", "<", n, "/", lst.len-1, ">\n ", db.pp
|
||||
|
||||
if true and false:
|
||||
noisy.say "***", count, " proof nodes, root=", rootID.pp,
|
||||
#"\n pAmk",
|
||||
#"\n ", db.pAmk.pp(db),
|
||||
"\n kMap",
|
||||
"\n ", db.kMap.pp(db),
|
||||
"\n sTab",
|
||||
"\n ", db.sTab.pp(db),
|
||||
"\n"
|
||||
let
|
||||
added = db.merge leafs
|
||||
#added = db.mergeStepwise(leafs, noisy=false)
|
||||
|
||||
for n,w in par.data.accounts:
|
||||
let
|
||||
sTabState = db.sTab.pp(db)
|
||||
payload = PayloadRef(pType: BlobData, blob: w.accBlob)
|
||||
pathTag = w.accKey.to(NodeTag)
|
||||
hike = db.merge(pathTag, payload, rootID, proofMode=true) #, noisy=true)
|
||||
ekih = pathTag.hikeUp(rootID, db)
|
||||
check db.lTab.len == lTabLen + added.merged
|
||||
check added.merged + added.dups == leafs.len
|
||||
|
||||
count = n
|
||||
if hike.error != AristoError(0): # or true:
|
||||
noisy.say "***", "<", n, "> ", pathTag.pp,
|
||||
"\n hike",
|
||||
"\n ", hike.pp(db),
|
||||
"\n sTab (prev)",
|
||||
"\n ", sTabState,
|
||||
"\n sTab",
|
||||
"\n ", db.sTab.pp(db),
|
||||
"\n lTab",
|
||||
"\n ", db.lTab.pp,
|
||||
"\n"
|
||||
block:
|
||||
if added.error notin {AristoError(0), MergeLeafPathCachedAlready}:
|
||||
noisy.say "***", "<", n, "/", lst.len-1, ">\n ", db.pp
|
||||
check added.error in {AristoError(0), MergeLeafPathCachedAlready}
|
||||
return
|
||||
|
||||
check hike.error == AristoError(0)
|
||||
check ekih.error == AristoError(0)
|
||||
noisy.say "***", "sample ", n, "/", lst.len-1, " added=", added
|
||||
|
||||
if ekih.legs.len == 0:
|
||||
check 0 < ekih.legs.len
|
||||
elif ekih.legs[^1].wp.vtx.vType != Leaf:
|
||||
check ekih.legs[^1].wp.vtx.vType == Leaf
|
||||
else:
|
||||
check ekih.legs[^1].wp.vtx.lData.blob == w.accBlob
|
||||
block:
|
||||
let rc = db.hashify # (noisy=false or (7 <= n))
|
||||
if rc.isErr:
|
||||
noisy.say "***", "<", n, "/", lst.len-1, ">\n ", db.pp
|
||||
check rc.error == AristoError(0)
|
||||
return
|
||||
|
||||
if db.lTab.len != n + 1:
|
||||
check db.lTab.len == n + 1 # quick leaf access table
|
||||
break # makes no sense to go on further
|
||||
|
||||
#if 10 < n:
|
||||
# break
|
||||
|
||||
noisy.say "***", "sample ", u, "/", lst.len ," leafs merged: ", count+1
|
||||
#break
|
||||
noisy.say "***", "sample ",n,"/",lst.len-1," leafs merged: ", added.merged
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
@ -17,10 +17,8 @@ import
|
||||
unittest2,
|
||||
../../nimbus/db/kvstore_rocksdb,
|
||||
../../nimbus/db/aristo/[
|
||||
aristo_desc, aristo_cache, aristo_debug, aristo_error, aristo_transcode,
|
||||
aristo_vid],
|
||||
../../nimbus/sync/snap/range_desc,
|
||||
./test_helpers
|
||||
aristo_desc, aristo_debug, aristo_error, aristo_transcode, aristo_vid],
|
||||
"."/[test_aristo_cache, test_helpers]
|
||||
|
||||
type
|
||||
TesterDesc = object
|
||||
@ -125,7 +123,9 @@ proc test_transcodeAccounts*(
|
||||
for n in 0..15:
|
||||
# key[n] <-> vtx[n] correspondence
|
||||
check node.key[n] == node0.key[n]
|
||||
check node.key[n].isZero == node.bVid[n].isZero
|
||||
check node.key[n].isEmpty == node.bVid[n].isZero
|
||||
if node.key[n].isEmpty != node.bVid[n].isZero:
|
||||
echo ">>> node=", node.pp
|
||||
|
||||
# This NIM object must match to the same RLP encoded byte stream
|
||||
block:
|
||||
@ -192,8 +192,8 @@ proc test_transcodeVidRecycleLists*(noisy = true; seed = 42) =
|
||||
expectedVids += (vid < first).ord
|
||||
db.vidDispose vid
|
||||
|
||||
check db.vidGen.len == expectedVids
|
||||
noisy.say "***", "vids=", db.vidGen.len, " discarded=", count-expectedVids
|
||||
check db.vGen.len == expectedVids
|
||||
noisy.say "***", "vids=", db.vGen.len, " discarded=", count-expectedVids
|
||||
|
||||
# Serialise/deserialise
|
||||
block:
|
||||
@ -206,27 +206,27 @@ proc test_transcodeVidRecycleLists*(noisy = true; seed = 42) =
|
||||
check rc.isOk
|
||||
rc.get(otherwise = AristoDbRef())
|
||||
|
||||
check db.vidGen == db1.vidGen
|
||||
check db.vGen == db1.vGen
|
||||
|
||||
# Make sure that recycled numbers are fetched first
|
||||
let topVid = db.vidGen[^1]
|
||||
while 1 < db.vidGen.len:
|
||||
let topVid = db.vGen[^1]
|
||||
while 1 < db.vGen.len:
|
||||
let w = db.vidFetch()
|
||||
check w < topVid
|
||||
check db.vidGen.len == 1 and db.vidGen[0] == topVid
|
||||
check db.vGen.len == 1 and db.vGen[0] == topVid
|
||||
|
||||
# Get some consecutive vertex IDs
|
||||
for n in 0 .. 5:
|
||||
let w = db.vidFetch()
|
||||
check w == topVid + n
|
||||
check db.vidGen.len == 1
|
||||
check db.vGen.len == 1
|
||||
|
||||
# Repeat last test after clearing the cache
|
||||
db.vidGen.setLen(0)
|
||||
db.vGen.setLen(0)
|
||||
for n in 0 .. 5:
|
||||
let w = db.vidFetch()
|
||||
check w == 1.VertexID + n
|
||||
check db.vidGen.len == 1
|
||||
check db.vGen.len == 1
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
|
Loading…
x
Reference in New Issue
Block a user