2023-12-19 12:39:23 +00:00
|
|
|
# nimbus-eth1
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
2023-12-19 12:39:23 +00:00
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
|
|
|
std/[algorithm, sequtils, sets, tables],
|
|
|
|
eth/common,
|
|
|
|
results,
|
|
|
|
./aristo_desc
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
func dup(sTab: Table[VertexID,VertexRef]): Table[VertexID,VertexRef] =
|
|
|
|
## Explicit dup for `VertexRef` values
|
|
|
|
for (k,v) in sTab.pairs:
|
|
|
|
result[k] = v.dup
|
|
|
|
|
2023-12-20 16:19:00 +00:00
|
|
|
func getLebalOrVoid(stack: seq[LayerRef]; lbl: HashLabel): HashSet[VertexID] =
|
2023-12-19 12:39:23 +00:00
|
|
|
# Helper: get next set of vertex IDs from stack.
|
2023-12-20 16:19:00 +00:00
|
|
|
for w in stack.reversed:
|
2023-12-19 12:39:23 +00:00
|
|
|
w.delta.pAmk.withValue(lbl,value):
|
|
|
|
return value[]
|
|
|
|
|
2023-12-20 16:19:00 +00:00
|
|
|
proc recalcLebal(layer: var LayerObj) =
|
|
|
|
## Calculate reverse `kMap[]` for final (aka zero) layer
|
|
|
|
layer.delta.pAmk.clear
|
|
|
|
for (vid,lbl) in layer.delta.kMap.pairs:
|
|
|
|
if lbl.isValid:
|
|
|
|
layer.delta.pAmk.withValue(lbl, value):
|
|
|
|
value[].incl vid
|
|
|
|
do:
|
|
|
|
layer.delta.pAmk[lbl] = @[vid].toHashSet
|
|
|
|
|
2023-12-19 12:39:23 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public getters: lazy value lookup for read only versions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
func lTab*(db: AristoDbRef): Table[LeafTie,VertexID] =
|
|
|
|
db.top.final.lTab
|
|
|
|
|
|
|
|
func pPrf*(db: AristoDbRef): HashSet[VertexID] =
|
|
|
|
db.top.final.pPrf
|
|
|
|
|
|
|
|
func vGen*(db: AristoDbRef): seq[VertexID] =
|
|
|
|
db.top.final.vGen
|
|
|
|
|
|
|
|
func dirty*(db: AristoDbRef): bool =
|
|
|
|
db.top.final.dirty
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public getters/helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
func nLayersVtx*(db: AristoDbRef): int =
|
2023-12-20 16:19:00 +00:00
|
|
|
## Number of vertex ID/vertex entries on the cache layers. This is an upper
|
|
|
|
## bound for the number of effective vertex ID mappings held on the cache
|
|
|
|
## layers as there might be duplicate entries for the same vertex ID on
|
|
|
|
## different layers.
|
2023-12-19 12:39:23 +00:00
|
|
|
db.stack.mapIt(it.delta.sTab.len).foldl(a + b, db.top.delta.sTab.len)
|
|
|
|
|
|
|
|
func nLayersLabel*(db: AristoDbRef): int =
|
2023-12-20 16:19:00 +00:00
|
|
|
## Number of vertex ID/label entries on the cache layers. This is an upper
|
|
|
|
## bound for the number of effective vertex ID mappingss held on the cache
|
|
|
|
## layers as there might be duplicate entries for the same vertex ID on
|
|
|
|
## different layers.
|
2023-12-19 12:39:23 +00:00
|
|
|
db.stack.mapIt(it.delta.kMap.len).foldl(a + b, db.top.delta.kMap.len)
|
|
|
|
|
|
|
|
func nLayersLebal*(db: AristoDbRef): int =
|
2023-12-20 16:19:00 +00:00
|
|
|
## Number of label/vertex IDs reverse lookup entries on the cache layers.
|
|
|
|
## This is an upper bound for the number of effective label mappingss held
|
|
|
|
## on the cache layers as there might be duplicate entries for the same label
|
|
|
|
## on different layers.
|
2023-12-19 12:39:23 +00:00
|
|
|
db.stack.mapIt(it.delta.pAmk.len).foldl(a + b, db.top.delta.pAmk.len)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions: get variants
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc layersGetVtx*(db: AristoDbRef; vid: VertexID): Result[VertexRef,void] =
|
|
|
|
## Find a vertex on the cache layers. An `ok()` result might contain a
|
|
|
|
## `nil` vertex if it is stored on the cache that way.
|
|
|
|
##
|
|
|
|
if db.top.delta.sTab.hasKey vid:
|
|
|
|
return ok(db.top.delta.sTab.getOrVoid vid)
|
|
|
|
|
|
|
|
for w in db.stack.reversed:
|
|
|
|
if w.delta.sTab.hasKey vid:
|
|
|
|
return ok(w.delta.sTab.getOrVoid vid)
|
|
|
|
|
|
|
|
err()
|
|
|
|
|
|
|
|
proc layersGetVtxOrVoid*(db: AristoDbRef; vid: VertexID): VertexRef =
|
|
|
|
## Simplified version of `layersGetVtx()`
|
|
|
|
db.layersGetVtx(vid).valueOr: VertexRef(nil)
|
|
|
|
|
|
|
|
|
|
|
|
proc layersGetLabel*(db: AristoDbRef; vid: VertexID): Result[HashLabel,void] =
|
|
|
|
## Find a hash label (containh the `HashKey`) on the cache layers. An
|
|
|
|
## `ok()` result might contain a void hash label if it is stored on the
|
|
|
|
## cache that way.
|
|
|
|
##
|
|
|
|
if db.top.delta.kMap.hasKey vid:
|
2024-02-01 21:27:48 +00:00
|
|
|
# This is ok regardless of the `dirty` flag. If this vertex has become
|
|
|
|
# dirty, there is an empty `kMap[]` entry on this layer.
|
2023-12-19 12:39:23 +00:00
|
|
|
return ok(db.top.delta.kMap.getOrVoid vid)
|
|
|
|
|
|
|
|
for w in db.stack.reversed:
|
|
|
|
if w.delta.kMap.hasKey vid:
|
2024-02-01 21:27:48 +00:00
|
|
|
# Same reasoning as above regarding the `dirty` flag.
|
2023-12-19 12:39:23 +00:00
|
|
|
return ok(w.delta.kMap.getOrVoid vid)
|
|
|
|
|
|
|
|
err()
|
|
|
|
|
|
|
|
proc layersGetlabelOrVoid*(db: AristoDbRef; vid: VertexID): HashLabel =
|
|
|
|
## Simplified version of `layersGetLabel()`
|
|
|
|
db.layersGetLabel(vid).valueOr: VOID_HASH_LABEL
|
|
|
|
|
|
|
|
|
|
|
|
proc layersGetKey*(db: AristoDbRef; vid: VertexID): Result[HashKey,void] =
|
|
|
|
## Variant of `layersGetLabel()` for returning the `HashKey` part of the
|
|
|
|
## label only.
|
|
|
|
let lbl = db.layersGetLabel(vid).valueOr:
|
|
|
|
return err()
|
|
|
|
# Note that `lbl.isValid == lbl.key.isValid`
|
|
|
|
ok(lbl.key)
|
|
|
|
|
|
|
|
proc layersGetKeyOrVoid*(db: AristoDbRef; vid: VertexID): HashKey =
|
|
|
|
## Simplified version of `layersGetKey()`
|
|
|
|
db.layersGetKey(vid).valueOr: VOID_HASH_KEY
|
|
|
|
|
|
|
|
|
|
|
|
proc layersGetLebal*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
lbl: HashLabel;
|
|
|
|
): Result[HashSet[VertexID],void] =
|
|
|
|
## Inverse of `layersGetKey()`. For a given argumnt `lbl`, find all vertex
|
|
|
|
## IDs that have `layersGetLbl()` return this very `lbl` value for the these
|
|
|
|
## vertex IDs.
|
|
|
|
if db.top.delta.pAmk.hasKey lbl:
|
|
|
|
return ok(db.top.delta.pAmk.getOrVoid lbl)
|
|
|
|
|
|
|
|
for w in db.stack.reversed:
|
|
|
|
if w.delta.pAmk.hasKey lbl:
|
|
|
|
return ok(w.delta.pAmk.getOrVoid lbl)
|
|
|
|
|
|
|
|
err()
|
|
|
|
|
|
|
|
proc layersGetLebalOrVoid*(db: AristoDbRef; lbl: HashLabel): HashSet[VertexID] =
|
|
|
|
## Simplified version of `layersGetVidsOrVoid()`
|
|
|
|
db.layersGetLebal(lbl).valueOr: EmptyVidSet
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions: put variants
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc layersPutVtx*(db: AristoDbRef; vid: VertexID; vtx: VertexRef) =
|
|
|
|
## Store a (potentally empty) vertex on the top layer
|
|
|
|
db.top.delta.sTab[vid] = vtx
|
|
|
|
db.top.final.dirty = true # Modified top cache layers
|
|
|
|
|
|
|
|
proc layersResVtx*(db: AristoDbRef; vid: VertexID) =
|
|
|
|
## Shortcut for `db.layersPutVtx(vid, VertexRef(nil))`. It is sort of the
|
|
|
|
## equivalent of a delete function.
|
|
|
|
db.layersPutVtx(vid, VertexRef(nil))
|
|
|
|
|
|
|
|
|
|
|
|
proc layersPutLabel*(db: AristoDbRef; vid: VertexID; lbl: HashLabel) =
|
|
|
|
## Store a (potentally void) hash label on the top layer
|
|
|
|
|
|
|
|
# Get previous label
|
|
|
|
let blb = db.top.delta.kMap.getOrVoid vid
|
|
|
|
|
2024-02-01 21:27:48 +00:00
|
|
|
# Update label on `label->vid` mapping table
|
2023-12-19 12:39:23 +00:00
|
|
|
db.top.delta.kMap[vid] = lbl
|
|
|
|
db.top.final.dirty = true # Modified top cache layers
|
|
|
|
|
|
|
|
# Clear previous value on reverse table if it has changed
|
|
|
|
if blb.isValid and blb != lbl:
|
2023-12-20 16:19:00 +00:00
|
|
|
var vidsLen = -1
|
2023-12-19 12:39:23 +00:00
|
|
|
db.top.delta.pAmk.withValue(blb, value):
|
|
|
|
value[].excl vid
|
2023-12-20 16:19:00 +00:00
|
|
|
vidsLen = value[].len
|
2023-12-19 12:39:23 +00:00
|
|
|
do: # provide empty lookup
|
2023-12-20 16:19:00 +00:00
|
|
|
let vids = db.stack.getLebalOrVoid(blb)
|
|
|
|
if vids.isValid and vid in vids:
|
|
|
|
# This entry supersedes non-emtpty changed ones from lower levels
|
|
|
|
db.top.delta.pAmk[blb] = vids - @[vid].toHashSet
|
|
|
|
if vidsLen == 0 and not db.stack.getLebalOrVoid(blb).isValid:
|
|
|
|
# There is no non-emtpty entry on lower levels, so ledete this one
|
|
|
|
db.top.delta.pAmk.del blb
|
2023-12-19 12:39:23 +00:00
|
|
|
|
|
|
|
# Add updated value on reverse table if non-zero
|
|
|
|
if lbl.isValid:
|
|
|
|
db.top.delta.pAmk.withValue(lbl, value):
|
|
|
|
value[].incl vid
|
|
|
|
do: # else if not found: need to merge with value set from lower layer
|
2023-12-20 16:19:00 +00:00
|
|
|
db.top.delta.pAmk[lbl] = db.stack.getLebalOrVoid(lbl) + @[vid].toHashSet
|
2023-12-19 12:39:23 +00:00
|
|
|
|
|
|
|
|
|
|
|
proc layersResLabel*(db: AristoDbRef; vid: VertexID) =
|
|
|
|
## Shortcut for `db.layersPutLabel(vid, VOID_HASH_LABEL)`. It is sort of the
|
|
|
|
## equivalent of a delete function.
|
|
|
|
db.layersPutLabel(vid, VOID_HASH_LABEL)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-12-20 16:19:00 +00:00
|
|
|
proc layersMergeOnto*(src: LayerRef; trg: var LayerObj; stack: seq[LayerRef]) =
|
2023-12-19 12:39:23 +00:00
|
|
|
## Merges the argument `src` into the argument `trg` and returns `trg`. For
|
|
|
|
## the result layer, the `txUid` value set to `0`.
|
2023-12-20 16:19:00 +00:00
|
|
|
##
|
2023-12-19 12:39:23 +00:00
|
|
|
trg.final = src.final
|
|
|
|
trg.txUid = 0
|
|
|
|
|
|
|
|
for (vid,vtx) in src.delta.sTab.pairs:
|
|
|
|
trg.delta.sTab[vid] = vtx
|
|
|
|
for (vid,lbl) in src.delta.kMap.pairs:
|
|
|
|
trg.delta.kMap[vid] = lbl
|
|
|
|
|
2023-12-20 16:19:00 +00:00
|
|
|
if stack.len == 0:
|
|
|
|
# Re-calculate `pAmk[]`
|
|
|
|
trg.recalcLebal()
|
|
|
|
else:
|
|
|
|
# Merge reverse `kMap[]` layers. Empty label image sets are ignored unless
|
|
|
|
# they supersede non-empty values on the argument `stack[]`.
|
|
|
|
for (lbl,vids) in src.delta.pAmk.pairs:
|
|
|
|
if 0 < vids.len or stack.getLebalOrVoid(lbl).isValid:
|
|
|
|
trg.delta.pAmk[lbl] = vids
|
2023-12-19 12:39:23 +00:00
|
|
|
|
|
|
|
|
2023-12-20 16:19:00 +00:00
|
|
|
func layersCc*(db: AristoDbRef; level = high(int)): LayerRef =
|
2023-12-19 12:39:23 +00:00
|
|
|
## Provide a collapsed copy of layers up to a particular transaction level.
|
|
|
|
## If the `level` argument is too large, the maximum transaction level is
|
|
|
|
## returned. For the result layer, the `txUid` value set to `0`.
|
2023-12-20 16:19:00 +00:00
|
|
|
##
|
|
|
|
let layers = if db.stack.len <= level: db.stack & @[db.top]
|
|
|
|
else: db.stack[0 .. level]
|
|
|
|
|
|
|
|
# Set up initial layer (bottom layer)
|
|
|
|
result = LayerRef(
|
Core db update storage root management for sub tries (#1964)
* Aristo: Re-phrase `LayerDelta` and `LayerFinal` as object references
why:
Avoids copying in some cases
* Fix copyright header
* Aristo: Verify `leafTie.root` function argument for `merge()` proc
why:
Zero root will lead to inconsistent DB entry
* Aristo: Update failure condition for hash labels compiler `hashify()`
why:
Node need not be rejected as long as links are on the schedule. In
that case, `redo[]` is to become `wff.base[]` at a later stage.
This amends an earlier fix, part of #1952 by also testing against
the target nodes of the `wff.base[]` sets.
* Aristo: Add storage root glue record to `hashify()` schedule
why:
An account leaf node might refer to a non-resolvable storage root ID.
Storage root node chains will end up at the storage root. So the link
`storage-root->account-leaf` needs an extra item in the schedule.
* Aristo: fix error code returned by `fetchPayload()`
details:
Final error code is implied by the error code form the `hikeUp()`
function.
* CoreDb: Discard `createOk` argument in API `getRoot()` function
why:
Not needed for the legacy DB. For the `Arsto` DB, a lazy approach is
implemented where a stprage root node is created on-the-fly.
* CoreDb: Prevent `$$` logging in some cases
why:
Logging the function `$$` is not useful when it is used for internal
use, i.e. retrieving an an error text for logging.
* CoreDb: Add `tryHashFn()` to API for pretty printing
why:
Pretty printing must not change the hashification status for the
`Aristo` DB. So there is an independent API wrapper for getting the
node hash which never updated the hashes.
* CoreDb: Discard `update` argument in API `hash()` function
why:
When calling the API function `hash()`, the latest state is always
wanted. For a version that uses the current state as-is without checking,
the function `tryHash()` was added to the backend.
* CoreDb: Update opaque vertex ID objects for the `Aristo` backend
why:
For `Aristo`, vID objects encapsulate a numeric `VertexID`
referencing a vertex (rather than a node hash as used on the
legacy backend.) For storage sub-tries, there might be no initial
vertex known when the descriptor is created. So opaque vertex ID
objects are supported without a valid `VertexID` which will be
initalised on-the-fly when the first item is merged.
* CoreDb: Add pretty printer for opaque vertex ID objects
* Cosmetics, printing profiling data
* CoreDb: Fix segfault in `Aristo` backend when creating MPT descriptor
why:
Missing initialisation error
* CoreDb: Allow MPT to inherit shared context on `Aristo` backend
why:
Creates descriptors with different storage roots for the same
shared `Aristo` DB descriptor.
* Cosmetics, update diagnostic message items for `Aristo` backend
* Fix Copyright year
2024-01-11 19:11:38 +00:00
|
|
|
final: layers[^1].final.dup, # Pre-merged/final values
|
|
|
|
delta: LayerDeltaRef(
|
2023-12-20 16:19:00 +00:00
|
|
|
sTab: layers[0].delta.sTab.dup, # explicit dup for ref values
|
|
|
|
kMap: layers[0].delta.kMap))
|
|
|
|
|
|
|
|
# Consecutively merge other layers on top
|
|
|
|
for n in 1 ..< layers.len:
|
|
|
|
for (vid,vtx) in layers[n].delta.sTab.pairs:
|
|
|
|
result.delta.sTab[vid] = vtx
|
|
|
|
for (vid,lbl) in layers[n].delta.kMap.pairs:
|
|
|
|
result.delta.kMap[vid] = lbl
|
|
|
|
|
|
|
|
# Re-calculate `pAmk[]`
|
|
|
|
result[].recalcLebal()
|
2023-12-19 12:39:23 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public iterators
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
iterator layersWalkVtx*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
seen: var HashSet[VertexID];
|
|
|
|
): tuple[vid: VertexID, vtx: VertexRef] =
|
|
|
|
## Walk over all `(VertexID,VertexRef)` pairs on the cache layers. Note that
|
|
|
|
## entries are unsorted.
|
|
|
|
##
|
|
|
|
## The argument `seen` collects a set of all visited vertex IDs including
|
|
|
|
## the one with a zero vertex which are othewise skipped by the iterator.
|
|
|
|
## The `seen` argument must not be modified while the iterator is active.
|
|
|
|
##
|
|
|
|
for (vid,vtx) in db.top.delta.sTab.pairs:
|
|
|
|
yield (vid,vtx)
|
|
|
|
seen.incl vid
|
|
|
|
|
|
|
|
for w in db.stack.reversed:
|
|
|
|
for (vid,vtx) in w.delta.sTab.pairs:
|
|
|
|
if vid notin seen:
|
|
|
|
yield (vid,vtx)
|
|
|
|
seen.incl vid
|
|
|
|
|
|
|
|
iterator layersWalkVtx*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
): tuple[vid: VertexID, vtx: VertexRef] =
|
|
|
|
## Variant of `layersWalkVtx()`.
|
|
|
|
var seen: HashSet[VertexID]
|
|
|
|
for (vid,vtx) in db.layersWalkVtx seen:
|
|
|
|
yield (vid,vtx)
|
|
|
|
|
|
|
|
|
|
|
|
iterator layersWalkLabel*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
): tuple[vid: VertexID, lbl: HashLabel] =
|
|
|
|
## Walk over all `(VertexID,HashLabel)` pairs on the cache layers. Note that
|
|
|
|
## entries are unsorted.
|
|
|
|
var seen: HashSet[VertexID]
|
|
|
|
for (vid,lbl) in db.top.delta.kMap.pairs:
|
|
|
|
yield (vid,lbl)
|
|
|
|
seen.incl vid
|
|
|
|
|
|
|
|
for w in db.stack.reversed:
|
|
|
|
for (vid,lbl) in w.delta.kMap.pairs:
|
|
|
|
if vid notin seen:
|
|
|
|
yield (vid,lbl)
|
|
|
|
seen.incl vid
|
|
|
|
|
|
|
|
|
|
|
|
iterator layersWalkLebal*(
|
|
|
|
db: AristoDbRef;
|
|
|
|
): tuple[lbl: HashLabel, vids: HashSet[VertexID]] =
|
|
|
|
## Walk over `(HashLabel,HashSet[VertexID])` pairs.
|
|
|
|
var seen: HashSet[HashLabel]
|
|
|
|
for (lbl,vids) in db.top.delta.pAmk.pairs:
|
|
|
|
yield (lbl,vids)
|
|
|
|
seen.incl lbl
|
|
|
|
|
|
|
|
for w in db.stack.reversed:
|
|
|
|
for (lbl,vids) in w.delta.pAmk.pairs:
|
|
|
|
if lbl notin seen:
|
|
|
|
yield (lbl,vids)
|
|
|
|
seen.incl lbl
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|