2023-05-30 21:21:15 +00:00
|
|
|
# nimbus-eth1
|
|
|
|
# Copyright (c) 2021 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
## Aristo DB -- Patricia Trie Merkleisation
|
|
|
|
## ========================================
|
|
|
|
##
|
|
|
|
## For the current state of the `Patricia Trie`, keys (equivalent to hashes)
|
|
|
|
## are associated with the vertex IDs. Existing key associations are checked
|
|
|
|
## (i.e. recalculated and compared) unless the ID is locked. In the latter
|
|
|
|
## case, the key is assumed to be correct without checking.
|
|
|
|
##
|
|
|
|
## The association algorithm is an optimised version of:
|
|
|
|
##
|
|
|
|
## * For all leaf vertices, label them with parent vertex so that there are
|
|
|
|
## chains from the leafs to the root vertex.
|
|
|
|
##
|
|
|
|
## * Apply a width-first traversal starting with the set of leafs vertices
|
|
|
|
## compiling the keys to associate with by hashing the current vertex.
|
|
|
|
##
|
|
|
|
## Apperently, keys (aka hashes) can be compiled for leaf vertices. For the
|
|
|
|
## other vertices, the keys can be compiled if all the children keys are
|
|
|
|
## known which is assured by the nature of the width-first traversal method.
|
|
|
|
##
|
|
|
|
## For production, this algorithm is slightly optimised:
|
|
|
|
##
|
|
|
|
## * For each leaf vertex, calculate the chain from the leaf to the root vertex.
|
|
|
|
## + Starting at the leaf, calculate the key for each vertex towards the root
|
|
|
|
## vertex as long as possible.
|
|
|
|
## + Stash the rest of the partial chain to be completed later
|
|
|
|
##
|
2023-06-12 18:16:03 +00:00
|
|
|
## * While there is a partial chain left, use the ends towards the leaf
|
|
|
|
## vertices and calculate the remaining keys (which results in a width-first
|
2023-05-30 21:21:15 +00:00
|
|
|
## traversal, again.)
|
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
2023-06-20 13:26:25 +00:00
|
|
|
std/[algorithm, sequtils, strutils],
|
2023-06-09 11:17:37 +00:00
|
|
|
std/[sets, tables],
|
2023-05-30 21:21:15 +00:00
|
|
|
chronicles,
|
|
|
|
eth/common,
|
|
|
|
stew/results,
|
2023-06-12 13:48:47 +00:00
|
|
|
"."/[aristo_constants, aristo_debug, aristo_desc, aristo_get,
|
2023-06-09 11:17:37 +00:00
|
|
|
aristo_hike, aristo_transcode, aristo_vid]
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
type
|
|
|
|
BackVidValRef = ref object
|
|
|
|
root: VertexID ## Root vertex
|
|
|
|
onBe: bool ## Table key vid refers to backend
|
|
|
|
toVid: VertexID ## Next/follow up vertex
|
|
|
|
|
|
|
|
BackVidTab =
|
|
|
|
Table[VertexID,BackVidValRef]
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
logScope:
|
|
|
|
topics = "aristo-hashify"
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
template logTxt(info: static[string]): static[string] =
|
|
|
|
"Hashify " & info
|
|
|
|
|
|
|
|
func getOrVoid(tab: BackVidTab; vid: VertexID): BackVidValRef =
|
|
|
|
tab.getOrDefault(vid, BackVidValRef(nil))
|
|
|
|
|
|
|
|
func isValid(brv: BackVidValRef): bool =
|
|
|
|
brv != BackVidValRef(nil)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helper, debugging
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc pp(w: BackVidValRef): string =
|
|
|
|
if w.isNil:
|
|
|
|
return "n/a"
|
|
|
|
result = "(" & w.root.pp & ","
|
|
|
|
if w.onBe:
|
|
|
|
result &= "*"
|
|
|
|
result &= "," & w.toVid.pp & ")"
|
|
|
|
|
|
|
|
proc pp(t: BackVidTab): string =
|
|
|
|
proc pp(b: bool): string =
|
|
|
|
if b: "*" else: ""
|
|
|
|
"{" & t.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID)
|
|
|
|
.mapIt("(" & it.pp & "," & t.getOrVoid(it).pp & ")")
|
|
|
|
.join(",") & "}"
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
proc toNode(vtx: VertexRef; db: AristoDb): Result[NodeRef,void] =
|
2023-05-30 21:21:15 +00:00
|
|
|
case vtx.vType:
|
|
|
|
of Leaf:
|
|
|
|
return ok NodeRef(vType: Leaf, lPfx: vtx.lPfx, lData: vtx.lData)
|
|
|
|
of Branch:
|
|
|
|
let node = NodeRef(vType: Branch, bVid: vtx.bVid)
|
|
|
|
for n in 0 .. 15:
|
2023-06-12 13:48:47 +00:00
|
|
|
if vtx.bVid[n].isValid:
|
2023-06-09 11:17:37 +00:00
|
|
|
let key = db.getKey vtx.bVid[n]
|
2023-06-12 13:48:47 +00:00
|
|
|
if key.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
node.key[n] = key
|
|
|
|
continue
|
|
|
|
return err()
|
2023-06-12 13:48:47 +00:00
|
|
|
else:
|
2023-06-12 18:16:03 +00:00
|
|
|
node.key[n] = VOID_HASH_KEY
|
2023-05-30 21:21:15 +00:00
|
|
|
return ok node
|
|
|
|
of Extension:
|
2023-06-12 13:48:47 +00:00
|
|
|
if vtx.eVid.isValid:
|
2023-06-09 11:17:37 +00:00
|
|
|
let key = db.getKey vtx.eVid
|
2023-06-12 13:48:47 +00:00
|
|
|
if key.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
let node = NodeRef(vType: Extension, ePfx: vtx.ePfx, eVid: vtx.eVid)
|
|
|
|
node.key[0] = key
|
|
|
|
return ok node
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
|
|
|
|
proc updateHashKey(
|
|
|
|
db: AristoDb; # Database, top layer
|
|
|
|
root: VertexID; # Root ID
|
|
|
|
vid: VertexID; # Vertex ID to check for
|
|
|
|
expected: HashKey; # Hash key for vertex address by `vid`
|
|
|
|
backend: bool; # Set `true` id vertex is on backend
|
|
|
|
): Result[void,AristoError] =
|
|
|
|
## Update the argument hash key `expected` for the vertex addressed by `vid`.
|
|
|
|
##
|
|
|
|
# If the Merkle hash has been cached locally, already it must match.
|
|
|
|
block:
|
|
|
|
let key = db.top.kMap.getOrVoid(vid).key
|
|
|
|
if key.isValid:
|
|
|
|
if key != expected:
|
|
|
|
let error = HashifyExistingHashMismatch
|
|
|
|
debug logTxt "hash update failed", vid, key, expected, error
|
|
|
|
return err(error)
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
# If the vertex had been cached locally, there would be no locally cached
|
|
|
|
# Merkle hash key. It will be created at the bottom end of the function.
|
|
|
|
#
|
|
|
|
# So there remains tha case when vertex is available on the backend only.
|
|
|
|
# The Merkle hash not cached locally. It might be overloaded (and eventually
|
|
|
|
# overwitten.)
|
|
|
|
if backend:
|
|
|
|
# Ok, vertex is on the backend.
|
|
|
|
let rc = db.getKeyBackend vid
|
|
|
|
if rc.isOk:
|
|
|
|
let key = rc.value
|
|
|
|
if key == expected:
|
|
|
|
return ok()
|
|
|
|
|
|
|
|
# This step is a error in the sense that something the on the backend
|
|
|
|
# is fishy. There should not be contradicting Merkle hashes. Throwing
|
|
|
|
# an error heres would lead to a deadlock so we correct it.
|
|
|
|
debug "correcting backend hash key mismatch", vid, key, expected
|
|
|
|
# Proceed `vidAttach()`, below
|
|
|
|
|
|
|
|
elif rc.error != GetKeyNotFound:
|
|
|
|
debug logTxt "backend key fetch failed", vid, expected, error=rc.error
|
|
|
|
return err(rc.error)
|
|
|
|
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
# Proceed `vidAttach()`, below
|
|
|
|
|
|
|
|
# Othwise there is no Merkle hash, so create one with the `expected` key
|
|
|
|
db.vidAttach(HashLabel(root: root, key: expected), vid)
|
|
|
|
ok()
|
|
|
|
|
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
proc leafToRootHasher(
|
2023-06-09 11:17:37 +00:00
|
|
|
db: AristoDb; # Database, top layer
|
2023-05-30 21:21:15 +00:00
|
|
|
hike: Hike; # Hike for labelling leaf..root
|
2023-06-02 10:04:29 +00:00
|
|
|
): Result[int,(VertexID,AristoError)] =
|
2023-05-30 21:21:15 +00:00
|
|
|
## Returns the index of the first node that could not be hashed
|
|
|
|
for n in (hike.legs.len-1).countDown(0):
|
|
|
|
let
|
|
|
|
wp = hike.legs[n].wp
|
2023-06-20 13:26:25 +00:00
|
|
|
bg = hike.legs[n].backend
|
2023-05-30 21:21:15 +00:00
|
|
|
rc = wp.vtx.toNode db
|
|
|
|
if rc.isErr:
|
|
|
|
return ok n
|
2023-06-09 11:17:37 +00:00
|
|
|
|
2023-05-30 21:21:15 +00:00
|
|
|
# Vertices marked proof nodes need not be checked
|
2023-06-09 11:17:37 +00:00
|
|
|
if wp.vid in db.top.pPrf:
|
2023-05-30 21:21:15 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# Check against existing key, or store new key
|
2023-06-09 11:17:37 +00:00
|
|
|
let
|
2023-06-12 18:16:03 +00:00
|
|
|
key = rc.value.encode.digestTo(HashKey)
|
2023-06-20 13:26:25 +00:00
|
|
|
rx = db.updateHashKey(hike.root, wp.vid, key, bg)
|
|
|
|
if rx.isErr:
|
|
|
|
return err((wp.vid,rx.error))
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
ok -1 # all could be hashed
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc hashifyClear*(
|
2023-06-09 11:17:37 +00:00
|
|
|
db: AristoDb; # Database, top layer
|
2023-05-30 21:21:15 +00:00
|
|
|
locksOnly = false; # If `true`, then clear only proof locks
|
|
|
|
) =
|
2023-06-09 11:17:37 +00:00
|
|
|
## Clear all `Merkle` hashes from the `db` argument database top layer.
|
2023-05-30 21:21:15 +00:00
|
|
|
if not locksOnly:
|
2023-06-09 11:17:37 +00:00
|
|
|
db.top.pAmk.clear
|
|
|
|
db.top.kMap.clear
|
|
|
|
db.top.pPrf.clear
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
|
|
|
|
proc hashify*(
|
2023-06-09 11:17:37 +00:00
|
|
|
db: AristoDb; # Database, top layer
|
|
|
|
): Result[HashSet[VertexID],(VertexID,AristoError)] =
|
2023-05-30 21:21:15 +00:00
|
|
|
## Add keys to the `Patricia Trie` so that it becomes a `Merkle Patricia
|
|
|
|
## Tree`. If successful, the function returns the key (aka Merkle hash) of
|
|
|
|
## the root vertex.
|
|
|
|
var
|
2023-06-09 11:17:37 +00:00
|
|
|
roots: HashSet[VertexID]
|
|
|
|
completed: HashSet[VertexID]
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# Width-first leaf-to-root traversal structure
|
2023-06-20 13:26:25 +00:00
|
|
|
backLink: BackVidTab
|
|
|
|
downMost: BackVidTab
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
for (lky,vid) in db.top.lTab.pairs:
|
|
|
|
let hike = lky.hikeUp(db)
|
2023-05-30 21:21:15 +00:00
|
|
|
if hike.error != AristoError(0):
|
2023-06-09 11:17:37 +00:00
|
|
|
return err((hike.root,hike.error))
|
|
|
|
|
|
|
|
roots.incl hike.root
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# Hash as much of the `hike` as possible
|
|
|
|
let n = block:
|
2023-06-20 13:26:25 +00:00
|
|
|
let rc = db.leafToRootHasher hike
|
2023-05-30 21:21:15 +00:00
|
|
|
if rc.isErr:
|
|
|
|
return err(rc.error)
|
|
|
|
rc.value
|
|
|
|
|
|
|
|
if 0 < n:
|
2023-06-09 11:17:37 +00:00
|
|
|
# Backtrack and register remaining nodes. Note that in case *n == 0*, the
|
|
|
|
# root vertex has not been fully resolved yet.
|
2023-05-30 21:21:15 +00:00
|
|
|
#
|
|
|
|
# hike.legs: (leg[0], leg[1], .., leg[n-1], leg[n], ..)
|
|
|
|
# | | | |
|
|
|
|
# | <---- | <---- | <---- |
|
|
|
|
# | | |
|
|
|
|
# | backLink[] | downMost |
|
|
|
|
#
|
2023-06-20 13:26:25 +00:00
|
|
|
downMost[hike.legs[n].wp.vid] = BackVidValRef(
|
|
|
|
root: hike.root,
|
|
|
|
onBe: hike.legs[n].backend,
|
|
|
|
toVid: hike.legs[n-1].wp.vid)
|
2023-05-30 21:21:15 +00:00
|
|
|
for u in (n-1).countDown(1):
|
2023-06-20 13:26:25 +00:00
|
|
|
backLink[hike.legs[u].wp.vid] = BackVidValRef(
|
|
|
|
root: hike.root,
|
|
|
|
onBe: hike.legs[u].backend,
|
|
|
|
toVid: hike.legs[u-1].wp.vid)
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
elif n < 0:
|
|
|
|
completed.incl hike.root
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# At least one full path leaf..root should have succeeded with labelling
|
2023-06-09 11:17:37 +00:00
|
|
|
# for each root.
|
|
|
|
if completed.len < roots.len:
|
2023-06-02 10:04:29 +00:00
|
|
|
return err((VertexID(0),HashifyLeafToRootAllFailed))
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# Update remaining hashes
|
|
|
|
while 0 < downMost.len:
|
|
|
|
var
|
2023-06-20 13:26:25 +00:00
|
|
|
redo: BackVidTab
|
2023-05-30 21:21:15 +00:00
|
|
|
done: HashSet[VertexID]
|
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
for (vid,val) in downMost.pairs:
|
2023-05-30 21:21:15 +00:00
|
|
|
# Try to convert vertex to a node. This is possible only if all link
|
|
|
|
# references have Merkle hashes.
|
|
|
|
#
|
2023-06-20 13:26:25 +00:00
|
|
|
# Also `db.getVtx(vid)` => not nil as it was fetched earlier, already
|
|
|
|
let rc = db.getVtx(vid).toNode(db)
|
2023-05-30 21:21:15 +00:00
|
|
|
if rc.isErr:
|
2023-06-12 18:16:03 +00:00
|
|
|
# Cannot complete with this vertex, so do it later
|
2023-06-20 13:26:25 +00:00
|
|
|
redo[vid] = val
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
else:
|
2023-06-20 13:26:25 +00:00
|
|
|
# Update Merkle hash
|
2023-06-12 13:48:47 +00:00
|
|
|
let
|
2023-06-20 13:26:25 +00:00
|
|
|
key = rc.value.encode.digestTo(HashKey)
|
|
|
|
rx = db.updateHashKey(val.root, vid, key, val.onBe)
|
|
|
|
if rx.isErr:
|
|
|
|
return err((vid,rx.error))
|
2023-05-30 21:21:15 +00:00
|
|
|
|
2023-06-20 13:26:25 +00:00
|
|
|
done.incl vid
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# Proceed with back link
|
2023-06-20 13:26:25 +00:00
|
|
|
let nextItem = backLink.getOrVoid val.toVid
|
|
|
|
if nextItem.isValid:
|
|
|
|
redo[val.toVid] = nextItem
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# Make sure that the algorithm proceeds
|
|
|
|
if done.len == 0:
|
|
|
|
let error = HashifyCannotComplete
|
2023-06-02 10:04:29 +00:00
|
|
|
return err((VertexID(0),error))
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# Clean up dups from `backLink` and restart `downMost`
|
|
|
|
for vid in done.items:
|
|
|
|
backLink.del vid
|
|
|
|
downMost = redo
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
ok completed
|
2023-05-30 21:21:15 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public debugging functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc hashifyCheck*(
|
2023-06-09 11:17:37 +00:00
|
|
|
db: AristoDb; # Database, top layer
|
2023-05-30 21:21:15 +00:00
|
|
|
relax = false; # Check existing hashes only
|
|
|
|
): Result[void,(VertexID,AristoError)] =
|
|
|
|
## Verify that the Merkle hash keys are either completely missing or
|
|
|
|
## match all known vertices on the argument database layer `db`.
|
|
|
|
if not relax:
|
2023-06-09 11:17:37 +00:00
|
|
|
for (vid,vtx) in db.top.sTab.pairs:
|
2023-05-30 21:21:15 +00:00
|
|
|
let rc = vtx.toNode(db)
|
|
|
|
if rc.isErr:
|
|
|
|
return err((vid,HashifyCheckVtxIncomplete))
|
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
let lbl = db.top.kMap.getOrVoid vid
|
|
|
|
if not lbl.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckVtxHashMissing))
|
2023-06-12 18:16:03 +00:00
|
|
|
if lbl.key != rc.value.encode.digestTo(HashKey):
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckVtxHashMismatch))
|
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
let revVid = db.top.pAmk.getOrVoid lbl
|
|
|
|
if not revVid.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckRevHashMissing))
|
|
|
|
if revVid != vid:
|
|
|
|
return err((vid,HashifyCheckRevHashMismatch))
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
elif 0 < db.top.pPrf.len:
|
|
|
|
for vid in db.top.pPrf:
|
2023-06-12 13:48:47 +00:00
|
|
|
let vtx = db.top.sTab.getOrVoid vid
|
|
|
|
if not vtx.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckVidVtxMismatch))
|
|
|
|
|
|
|
|
let rc = vtx.toNode(db)
|
|
|
|
if rc.isErr:
|
|
|
|
return err((vid,HashifyCheckVtxIncomplete))
|
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
let lbl = db.top.kMap.getOrVoid vid
|
|
|
|
if not lbl.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckVtxHashMissing))
|
2023-06-12 18:16:03 +00:00
|
|
|
if lbl.key != rc.value.encode.digestTo(HashKey):
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckVtxHashMismatch))
|
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
let revVid = db.top.pAmk.getOrVoid lbl
|
|
|
|
if not revVid.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckRevHashMissing))
|
|
|
|
if revVid != vid:
|
|
|
|
return err((vid,HashifyCheckRevHashMismatch))
|
|
|
|
|
|
|
|
else:
|
2023-06-12 13:48:47 +00:00
|
|
|
for (vid,lbl) in db.top.kMap.pairs:
|
2023-05-30 21:21:15 +00:00
|
|
|
let vtx = db.getVtx vid
|
2023-06-12 13:48:47 +00:00
|
|
|
if vtx.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
let rc = vtx.toNode(db)
|
|
|
|
if rc.isOk:
|
2023-06-12 18:16:03 +00:00
|
|
|
if lbl.key != rc.value.encode.digestTo(HashKey):
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckVtxHashMismatch))
|
|
|
|
|
2023-06-12 13:48:47 +00:00
|
|
|
let revVid = db.top.pAmk.getOrVoid lbl
|
|
|
|
if not revVid.isValid:
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckRevHashMissing))
|
|
|
|
if revVid != vid:
|
|
|
|
return err((vid,HashifyCheckRevHashMismatch))
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
if db.top.pAmk.len != db.top.kMap.len:
|
2023-05-30 21:21:15 +00:00
|
|
|
var knownKeys: HashSet[VertexID]
|
2023-06-09 11:17:37 +00:00
|
|
|
for (key,vid) in db.top.pAmk.pairs:
|
|
|
|
if not db.top.kMap.hasKey(vid):
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckRevVtxMissing))
|
|
|
|
if vid in knownKeys:
|
|
|
|
return err((vid,HashifyCheckRevVtxDup))
|
|
|
|
knownKeys.incl vid
|
|
|
|
return err((VertexID(0),HashifyCheckRevCountMismatch)) # should not apply(!)
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
if 0 < db.top.pAmk.len and not relax and db.top.pAmk.len != db.top.sTab.len:
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((VertexID(0),HashifyCheckVtxCountMismatch))
|
|
|
|
|
2023-06-09 11:17:37 +00:00
|
|
|
for vid in db.top.pPrf:
|
|
|
|
if not db.top.kMap.hasKey(vid):
|
2023-05-30 21:21:15 +00:00
|
|
|
return err((vid,HashifyCheckVtxLockWithoutKey))
|
|
|
|
|
|
|
|
ok()
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|