Aristo db implement filter storage scheduler (#1713)

* Rename FilterID => QueueID

why:
  The current usage does not identify a particular filter but uses it as
  storage tag to manage it on the database (to be organised in a set of
  FIFOs or queues.)

* Split `aristo_filter` source into sub-files

why:
  Make space for filter management API

* Store filter queue IDs in pairs on the backend

why:
  Any pair will will describe a FIFO accessed by bottom/top IDs

* Reorg some source file names

why:
  The "aristo_" prefix for make local/private files is tedious to
  use, so removed.

* Implement filter slot scheduler

details:
  Filters will be stored on the database on cascaded FIFOs. When a FIFO
  queue is full, some filter items are bundled together and stored on the
  next FIFO.
This commit is contained in:
Jordan Hrycaj 2023-08-25 23:53:59 +01:00 committed by GitHub
parent 91704cd3ae
commit 465d694834
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
32 changed files with 1088 additions and 318 deletions

View File

@ -25,8 +25,7 @@ import
export
append, read
import aristo/aristo_desc/[
aristo_types_identifiers, aristo_types_structural]
import aristo/aristo_desc/[desc_identifiers, desc_structural]
export
AristoAccount,
PayloadRef,

View File

@ -402,7 +402,8 @@ assumed, i.e. the list with the single vertex ID *1*.
This list is used to control the filters on the database. By holding some IDs
in a dedicated list (e.g. the latest filters) one can quickly access particular
entries without searching through the set of filters.
entries without searching through the set of filters. In the current
implementation this list comes in ID pairs i.e. the number of entries is even.
### 4.10 Serialisation record identifier identification

View File

@ -12,7 +12,7 @@
import
eth/[common, trie/nibbles],
./aristo_desc/aristo_types_identifiers
./aristo_desc/desc_identifiers
const
EmptyBlob* = seq[byte].default
@ -24,7 +24,7 @@ const
EmptyVidSeq* = seq[VertexID].default
## Useful shortcut
EmptyFidSeq* = seq[FilterID].default
EmptyQidPairSeq* = seq[(QueueID,QueueID)].default
## Useful shortcut
VOID_CODE_HASH* = EMPTY_CODE_HASH
@ -36,4 +36,10 @@ const
VOID_HASH_LABEL* = HashLabel(root: VertexID(0), key: VOID_HASH_KEY)
## Void equivalent for Merkle hash value
DEFAULT_QID_QUEUES* = [
(128, 0), ## Consecutive list of 128 filter slots
( 64, 63), ## Overflow list, 64 filters, skipping 63 filters in-between
( 64, 127), ## ..
( 64, 255)]
# End

View File

@ -16,7 +16,8 @@ import
results,
stew/byteutils,
"."/[aristo_constants, aristo_desc, aristo_hike, aristo_init],
./aristo_init/[aristo_memory, aristo_rocksdb]
./aristo_init/[memory_db, rocks_db],
./aristo_filter/filter_desc
export
TypedBackendRef, aristo_init.to
@ -85,11 +86,28 @@ proc ppVid(vid: VertexID; pfx = true): string =
else:
result &= "ø"
proc ppFid(fid: FilterID): string =
if fid.isValid:
"%" & fid.uint64.toHex.stripZeros.toLowerAscii
else:
"ø"
proc ppQid(qid: QueueID): string =
if not qid.isValid:
return "ø"
let
chn = qid.uint64 shr 62
qid = qid.uint64 and 0x3fff_ffff_ffff_ffffu64
result = "%"
if 0 < chn:
result &= $chn & ":"
if 0x0fff_ffff_ffff_ffffu64 <= qid.uint64:
block here:
if qid.uint64 == 0x0fff_ffff_ffff_ffffu64:
result &= "(2^60-1)"
elif qid.uint64 == 0x1fff_ffff_ffff_ffffu64:
result &= "(2^61-1)"
elif qid.uint64 == 0x3fff_ffff_ffff_ffffu64:
result &= "(2^62-1)"
else:
break here
return
result &= qid.uint64.toHex.stripZeros.toLowerAscii
proc ppVidList(vGen: openArray[VertexID]): string =
"[" & vGen.mapIt(it.ppVid).join(",") & "]"
@ -450,8 +468,17 @@ proc pp*(lty: LeafTie, db = AristoDbRef()): string =
proc pp*(vid: VertexID): string =
vid.ppVid
proc pp*(fid: FilterID): string =
fid.ppFid
proc pp*(qid: QueueID): string =
qid.ppQid
proc pp*(a: openArray[(QueueID,QueueID)]): string =
"[" & a.toSeq.mapIt("(" & it[0].pp & "," & it[1].pp & ")").join(",") & "]"
proc pp*(a: QidAction): string =
($a.op).replace("Qid", "") & "(" & a.qid.pp & "," & a.xid.pp & ")"
proc pp*(a: openArray[QidAction]): string =
"[" & a.toSeq.mapIt(it.pp).join(",") & "]"
proc pp*(vGen: openArray[VertexID]): string =
vGen.ppVidList

View File

@ -25,16 +25,14 @@ import
std/[hashes, sets, tables],
eth/common,
./aristo_constants,
./aristo_desc/[
aristo_error, aristo_types_identifiers, aristo_types_structural]
./aristo_desc/[desc_error, desc_identifiers, desc_structural]
from ./aristo_desc/aristo_types_backend
from ./aristo_desc/desc_backend
import BackendRef
# Not auto-exporting backend
export
aristo_constants, aristo_error, aristo_types_identifiers,
aristo_types_structural
aristo_constants, desc_error, desc_identifiers, desc_structural
type
AristoTxRef* = ref object
@ -108,8 +106,8 @@ func isValid*(lbl: HashLabel): bool =
func isValid*(vid: VertexID): bool =
vid != VertexID(0)
func isValid*(fid: FilterID): bool =
fid != FilterID(0)
func isValid*(qid: QueueID): bool =
qid != QueueID(0)
# ------------------------------------------------------------------------------
# Public functions, miscellaneous
@ -120,8 +118,8 @@ func hash*(db: AristoDbRef): Hash =
## Table/KeyedQueue/HashSet mixin
cast[pointer](db).hash
# Note that the below `init()` function cannot go into
# `aristo_types_identifiers` as this would result in a circular import.
# Note that the below `init()` function cannot go into `desc_identifiers` as
# this would result in a circular import.
func init*(key: var HashKey; data: openArray[byte]): bool =
## Import argument `data` into `key` which must have length either `32`, or
## `0`. The latter case is equivalent to an all zero byte array of size `32`.

View File

@ -16,7 +16,7 @@
import
results,
"."/[aristo_error, aristo_types_identifiers, aristo_types_structural]
"."/[desc_error, desc_identifiers, desc_structural]
type
GetVtxFn* =
@ -30,7 +30,7 @@ type
## `Aristo DB` hash lookup value.
GetFilFn* =
proc(fid: FilterID): Result[FilterRef,AristoError]
proc(qid: QueueID): Result[FilterRef,AristoError]
{.gcsafe, raises: [].}
## Generic backend database retrieval function for a filter record.
@ -39,10 +39,10 @@ type
## Generic backend database retrieval function for a the ID generator
## `Aristo DB` state record.
GetFasFn* =
proc(): Result[seq[FilterID],AristoError] {.gcsafe, raises: [].}
## Generic backend database retrieval function for some administartion
## of the filters (e.g. the top ID.)
GetFqsFn* =
proc(): Result[seq[(QueueID,QueueID)],AristoError] {.gcsafe, raises: [].}
## Generic backend database retrieval function for some filter queue
## administration data (e.g. the bottom/top ID.)
# -------------
@ -69,7 +69,7 @@ type
## values indicate that records should be deleted.
PutFilFn* =
proc(hdl: PutHdlRef; vf: openArray[(FilterID,FilterRef)])
proc(hdl: PutHdlRef; qf: openArray[(QueueID,FilterRef)])
{.gcsafe, raises: [].}
## Generic backend database storage function for filter records.
@ -79,11 +79,11 @@ type
## Generic backend database ID generator state storage function. This
## function replaces the current generator state.
PutFasFn* =
proc(hdl: PutHdlRef; vs: openArray[FilterID])
PutFqsFn* =
proc(hdl: PutHdlRef; vs: openArray[(QueueID,QueueID)])
{.gcsafe, raises: [].}
## Generic backend database filter ID state storage function. This
## function replaces the current filter ID state.
## function replaces the current filter ID list.
PutEndFn* =
proc(hdl: PutHdlRef): AristoError {.gcsafe, raises: [].}
@ -106,14 +106,14 @@ type
getKeyFn*: GetKeyFn ## Read Merkle hash/key
getFilFn*: GetFilFn ## Read back log filter
getIdgFn*: GetIdgFn ## Read vertex ID generator state
getFasFn*: GetFasFn ## Read filter ID state
getFqsFn*: GetFqsFn ## Read filter ID state
putBegFn*: PutBegFn ## Start bulk store session
putVtxFn*: PutVtxFn ## Bulk store vertex records
putKeyFn*: PutKeyFn ## Bulk store vertex hashes
putFilFn*: PutFilFn ## Store back log filter
putIdgFn*: PutIdgFn ## Store ID generator state
putFasFn*: PutFasFn ## Store filter ID state
putFqsFn*: PutFqsFn ## Store filter ID state
putEndFn*: PutEndFn ## Commit bulk store session
closeFn*: CloseFn ## Generic destructor

View File

@ -188,7 +188,7 @@ type
GetKeyNotFound
GetFilNotFound
GetIdgNotFound
GetFasNotFound
GetFqsNotFound
# RocksDB backend
RdbBeCantCreateDataDir

View File

@ -23,9 +23,13 @@ type
ByteArray32* = array[32,byte]
## Used for 32 byte hash components repurposed as Merkle hash labels.
FilterID* = distinct uint64
QueueID* = distinct uint64
## Identifier used to tag filter logs stored on the backend.
FilterID* = distinct uint64
## Identifier used to identify a particular filter. It is generatied with the
## filter.
VertexID* = distinct uint64
## Unique identifier for a vertex of the `Aristo Trie`. The vertex is the
## prefix tree (aka `Patricia Trie`) component. When augmented by hash
@ -84,21 +88,44 @@ func `==`*(a, b: VertexID): bool {.borrow.}
func cmp*(a, b: VertexID): int {.borrow.}
func `$`*(a: VertexID): string {.borrow.}
func `==`*(a: VertexID; b: static[uint]): bool =
a == VertexID(b)
func `==`*(a: VertexID; b: static[uint]): bool = (a == VertexID(b))
# Scalar model extension for `IntervalSetRef[VertexID,uint64]`
proc `+`*(a: VertexID; b: uint64): VertexID = (a.uint64+b).VertexID
proc `-`*(a: VertexID; b: uint64): VertexID = (a.uint64-b).VertexID
proc `-`*(a, b: VertexID): uint64 = (a.uint64 - b.uint64)
# Scalar model extension as in `IntervalSetRef[VertexID,uint64]`
func `+`*(a: VertexID; b: uint64): VertexID = (a.uint64+b).VertexID
func `-`*(a: VertexID; b: uint64): VertexID = (a.uint64-b).VertexID
func `-`*(a, b: VertexID): uint64 = (a.uint64 - b.uint64)
# ------------------------------------------------------------------------------
# Public helpers: `QueueID` scalar data model
# ------------------------------------------------------------------------------
func `<`*(a, b: QueueID): bool {.borrow.}
func `<=`*(a, b: QueueID): bool {.borrow.}
func `==`*(a, b: QueueID): bool {.borrow.}
func cmp*(a, b: QueueID): int {.borrow.}
func `$`*(a: QueueID): string {.borrow.}
func `==`*(a: QueueID; b: static[uint]): bool = (a == QueueID(b))
func `+`*(a: QueueID; b: uint64): QueueID = (a.uint64+b).QueueID
func `-`*(a: QueueID; b: uint64): QueueID = (a.uint64-b).QueueID
func `-`*(a, b: QueueID): uint64 = (a.uint64 - b.uint64)
# ------------------------------------------------------------------------------
# Public helpers: `FilterID` scalar data model
# ------------------------------------------------------------------------------
func `<`*(a, b: FilterID): bool {.borrow.}
func `<=`*(a, b: FilterID): bool {.borrow.}
func `==`*(a, b: FilterID): bool {.borrow.}
func `$`*(a: FilterID): string {.borrow.}
func `==`*(a: FilterID; b: static[uint]): bool = (a == FilterID(b))
func `+`*(a: FilterID; b: uint64): FilterID = (a.uint64+b).FilterID
func `-`*(a: FilterID; b: uint64): FilterID = (a.uint64-b).FilterID
func `-`*(a, b: FilterID): uint64 = (a.uint64 - b.uint64)
# ------------------------------------------------------------------------------
# Public helpers: `HashID` scalar data model
# ------------------------------------------------------------------------------

View File

@ -17,7 +17,7 @@
import
std/[sets, tables],
eth/[common, trie/nibbles],
"."/[aristo_error, aristo_types_identifiers]
"."/[desc_error, desc_identifiers]
type
VertexType* = enum

View File

@ -15,131 +15,9 @@
import
std/[sequtils, sets, tables],
results,
./aristo_desc/aristo_types_backend,
"."/[aristo_desc, aristo_get, aristo_vid]
type
StateRootPair = object
be: HashKey
fg: HashKey
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc getLayerStateRoots(
db: AristoDbRef;
layer: LayerRef;
chunkedMpt: bool;
): Result[StateRootPair,AristoError] =
## Get the Merkle hash key for target state root to arrive at after this
## reverse filter was applied.
var spr: StateRootPair
spr.be = block:
let rc = db.getKeyBE VertexID(1)
if rc.isOk:
rc.value
elif rc.error == GetKeyNotFound:
VOID_HASH_KEY
else:
return err(rc.error)
block:
spr.fg = layer.kMap.getOrVoid(VertexID 1).key
if spr.fg.isValid:
return ok(spr)
if chunkedMpt:
let vid = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: spr.be)
if vid == VertexID(1):
spr.fg = spr.be
return ok(spr)
if layer.sTab.len == 0 and
layer.kMap.len == 0 and
layer.pAmk.len == 0:
return err(FilPrettyPointlessLayer)
err(FilStateRootMismatch)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc merge(
db: AristoDbRef;
upper: FilterRef; # Src filter, `nil` is ok
lower: FilterRef; # Trg filter, `nil` is ok
beStateRoot: HashKey; # Merkle hash key
): Result[FilterRef,(VertexID,AristoError)] =
## Merge argument `upper` into the `lower` filter instance.
##
## Comparing before and after merge
## ::
## current | merged
## ----------------------------+--------------------------------
## trg2 --upper-- (src2==trg1) |
## | trg2 --newFilter-- (src1==trg0)
## trg1 --lower-- (src1==trg0) |
## |
## trg0 --beStateRoot | trg0 --beStateRoot
## |
##
# Degenerate case: `upper` is void
if lower.isNil:
if upper.isNil:
# Even more degenerate case when both filters are void
return ok FilterRef(nil)
if upper.src != beStateRoot:
return err((VertexID(1),FilStateRootMismatch))
return ok(upper)
# Degenerate case: `upper` is non-trivial and `lower` is void
if upper.isNil:
if lower.src != beStateRoot:
return err((VertexID(0), FilStateRootMismatch))
return ok(lower)
# Verify stackability
if upper.src != lower.trg or
lower.src != beStateRoot:
return err((VertexID(0), FilStateRootMismatch))
# There is no need to deep copy table vertices as they will not be modified.
let newFilter = FilterRef(
src: lower.src,
sTab: lower.sTab,
kMap: lower.kMap,
vGen: upper.vGen,
trg: upper.trg)
for (vid,vtx) in upper.sTab.pairs:
if vtx.isValid or not newFilter.sTab.hasKey vid:
newFilter.sTab[vid] = vtx
elif newFilter.sTab.getOrVoid(vid).isValid:
let rc = db.getVtxUBE vid
if rc.isOk:
newFilter.sTab[vid] = vtx # VertexRef(nil)
elif rc.error == GetVtxNotFound:
newFilter.sTab.del vid
else:
return err((vid,rc.error))
for (vid,key) in upper.kMap.pairs:
if key.isValid or not newFilter.kMap.hasKey vid:
newFilter.kMap[vid] = key
elif newFilter.kMap.getOrVoid(vid).isValid:
let rc = db.getKeyUBE vid
if rc.isOk:
newFilter.kMap[vid] = key # VOID_HASH_KEY
elif rc.error == GetKeyNotFound:
newFilter.kMap.del vid
else:
return err((vid,rc.error))
ok newFilter
"."/[aristo_desc, aristo_get, aristo_vid],
./aristo_desc/desc_backend,
./aristo_filter/[filter_desc, filter_helpers]
# ------------------------------------------------------------------------------
# Public helpers

View File

@ -0,0 +1,117 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
import
../aristo_desc
type
StateRootPair* = object
## Helper structure for analysing state roots.
be*: HashKey ## Backend state root
fg*: HashKey ## Layer or filter implied state root
# ----------------
QidAction* = object
## Instruction for administering filter queue ID slots. The op-code is
## followed by one or two queue ID arguments. In case of a two arguments,
## the value of the second queue ID is never smaller than the first one.
op*: QidOp ## Action, followed by at most two queue IDs
qid*: QueueID ## Action argument
xid*: QueueID ## Second action argument for range argument
QidOp* = enum
Oops = 0
SaveQid ## Store new item
HoldQid ## Move/append range items to local queue
DequQid ## Store merged local queue items
QidLayoutRef* = ref object
## Layout of cascaded list of filter ID slot queues where a slot queue
## with index `N+1` serves as an overflow queue of slot queue `N`.
q*: array[4,QidSpec]
QidSpec* = tuple
## Layout of a filter ID slot queue
size: uint ## Capacity of queue, length within `1..wrap`
width: uint ## Instance gaps (relative to prev. item)
wrap: QueueID ## Range `1..wrap` for round-robin queue
QidSchedRef* = ref object of RootRef
## Current state of the filter queues
ctx*: QidLayoutRef ## Organisation of the FIFO
state*: seq[(QueueID,QueueID)] ## Current fill state
const
DefaultQidWrap = QueueID(0x3fff_ffff_ffff_ffffu64)
QidSpecSizeMax* = high(uint32).uint
## Maximum value allowed for a `size` value of a `QidSpec` object
QidSpecWidthMax* = high(uint32).uint
## Maximum value allowed for a `width` value of a `QidSpec` object
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
func max(a, b, c: int): int =
max(max(a,b),c)
# ------------------------------------------------------------------------------
# Public helpers
# ------------------------------------------------------------------------------
func to*(a: array[4,tuple[size, width: int]]; T: type QidLayoutRef): T =
## Convert a size-width array to a `QidLayoutRef` layout. Over large
## array field values are adjusted to its maximal size.
var q: array[4,QidSpec]
for n in 0..3:
q[n] = (min(a[n].size.uint, QidSpecSizeMax),
min(a[n].width.uint, QidSpecWidthMax),
DefaultQidWrap)
q[0].width = 0
T(q: q)
func to*(a: array[4,tuple[size, width, wrap: int]]; T: type QidLayoutRef): T =
## Convert a size-width-wrap array to a `QidLayoutRef` layout. Over large
## array field values are adjusted to its maximal size. Too small `wrap`
## values are adjusted to its minimal size.
var q: array[4,QidSpec]
for n in 0..2:
q[n] = (min(a[n].size.uint, QidSpecSizeMax),
min(a[n].width.uint, QidSpecWidthMax),
QueueID(max(a[n].size + a[n+1].width, a[n].width+1, a[n].wrap)))
q[0].width = 0
q[3] = (min(a[3].size.uint, QidSpecSizeMax),
min(a[3].width.uint, QidSpecWidthMax),
QueueID(max(a[3].size, a[3].width, a[3].wrap)))
T(q: q)
# ------------------------------------------------------------------------------
# Public constructors
# ------------------------------------------------------------------------------
func init*(T: type QidSchedRef; a: array[4,(int,int)]): T =
## Constructor, see comments at the coverter function `to()` for adjustments
## of the layout argument `a`.
T(ctx: a.to(QidLayoutRef))
func init*(T: type QidSchedRef; a: array[4,(int,int,int)]): T =
## Constructor, see comments at the coverter function `to()` for adjustments
## of the layout argument `a`.
T(ctx: a.to(QidLayoutRef))
func init*(T: type QidSchedRef; ctx: QidLayoutRef): T =
T(ctx: ctx)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,133 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
import
std/tables,
results,
".."/[aristo_desc, aristo_get],
./filter_desc
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc getLayerStateRoots*(
db: AristoDbRef;
layer: LayerRef;
chunkedMpt: bool;
): Result[StateRootPair,AristoError] =
## Get the Merkle hash key for target state root to arrive at after this
## reverse filter was applied.
var spr: StateRootPair
spr.be = block:
let rc = db.getKeyBE VertexID(1)
if rc.isOk:
rc.value
elif rc.error == GetKeyNotFound:
VOID_HASH_KEY
else:
return err(rc.error)
block:
spr.fg = layer.kMap.getOrVoid(VertexID 1).key
if spr.fg.isValid:
return ok(spr)
if chunkedMpt:
let vid = layer.pAmk.getOrVoid HashLabel(root: VertexID(1), key: spr.be)
if vid == VertexID(1):
spr.fg = spr.be
return ok(spr)
if layer.sTab.len == 0 and
layer.kMap.len == 0 and
layer.pAmk.len == 0:
return err(FilPrettyPointlessLayer)
err(FilStateRootMismatch)
proc merge*(
db: AristoDbRef;
upper: FilterRef; # Src filter, `nil` is ok
lower: FilterRef; # Trg filter, `nil` is ok
beStateRoot: HashKey; # Merkle hash key
): Result[FilterRef,(VertexID,AristoError)] =
## Merge argument `upper` into the `lower` filter instance.
##
## Comparing before and after merge
## ::
## current | merged
## ----------------------------+--------------------------------
## trg2 --upper-- (src2==trg1) |
## | trg2 --newFilter-- (src1==trg0)
## trg1 --lower-- (src1==trg0) |
## |
## trg0 --beStateRoot | trg0 --beStateRoot
## |
##
# Degenerate case: `upper` is void
if lower.isNil:
if upper.isNil:
# Even more degenerate case when both filters are void
return ok FilterRef(nil)
if upper.src != beStateRoot:
return err((VertexID(1),FilStateRootMismatch))
return ok(upper)
# Degenerate case: `upper` is non-trivial and `lower` is void
if upper.isNil:
if lower.src != beStateRoot:
return err((VertexID(0), FilStateRootMismatch))
return ok(lower)
# Verify stackability
if upper.src != lower.trg or
lower.src != beStateRoot:
return err((VertexID(0), FilStateRootMismatch))
# There is no need to deep copy table vertices as they will not be modified.
let newFilter = FilterRef(
src: lower.src,
sTab: lower.sTab,
kMap: lower.kMap,
vGen: upper.vGen,
trg: upper.trg)
for (vid,vtx) in upper.sTab.pairs:
if vtx.isValid or not newFilter.sTab.hasKey vid:
newFilter.sTab[vid] = vtx
elif newFilter.sTab.getOrVoid(vid).isValid:
let rc = db.getVtxUBE vid
if rc.isOk:
newFilter.sTab[vid] = vtx # VertexRef(nil)
elif rc.error == GetVtxNotFound:
newFilter.sTab.del vid
else:
return err((vid,rc.error))
for (vid,key) in upper.kMap.pairs:
if key.isValid or not newFilter.kMap.hasKey vid:
newFilter.kMap[vid] = key
elif newFilter.kMap.getOrVoid(vid).isValid:
let rc = db.getKeyUBE vid
if rc.isOk:
newFilter.kMap[vid] = key # VOID_HASH_KEY
elif rc.error == GetKeyNotFound:
newFilter.kMap.del vid
else:
return err((vid,rc.error))
ok newFilter
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -0,0 +1,331 @@
# nimbus-eth1
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
import
std/[algorithm, sequtils],
".."/[aristo_constants, aristo_desc],
./filter_desc
const
ZeroQidPair = (QueueID(0),QueueID(0))
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
func `+`*(a: QueueID; b: uint): QueueID = (a.uint64+b.uint64).QueueID
func `-`*(a: QueueID; b: uint): QueueID = (a.uint64-b.uint64).QueueID
func `<`(a: static[uint]; b: QueueID): bool = QueueID(a) < b
func globalQid(queue: int, qid: QueueID): QueueID =
QueueID((queue.uint64 shl 62) or qid.uint64)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
func fifoLen(
fifo: (QueueID,QueueID);
wrap: QueueID;
): uint =
## Number of entries in wrap-arounfd fifo organised with `fifo[0]` is the
## oldest entry and`fifo[1]` is the latest/newest entry.
##
if fifo[0] == 0:
return 0
if fifo[0] <= fifo[1]:
# Filling up
# ::
# | :
# | fifo[0]--> 3
# | 4
# | 5 <--fifo[1]
# | :
#
return ((fifo[1] + 1) - fifo[0]).uint
else:
# After wrap aound
# ::
# | :
# | 3 <--fifo[1]
# | 4
# | fifo[0]--> 5
# | :
# | wrap
return ((fifo[1] + 1) + (wrap - fifo[0])).uint
func fifoAdd(
fifo: (QueueID,QueueID);
wrap: QueueID;
): tuple[doDel: QueueID, fifo: (QueueID,QueueID)] =
## Add an entry to the wrap-arounfd fifo organised with `fifo[0]` is the
## oldest entry and`fifo[1]` is the latest/newest entry.
##
if fifo[0] == 0:
return (QueueID(0), (QueueID(1),QueueID(1)))
if fifo[0] <= fifo[1]:
if fifo[1] < wrap:
# Filling up
# ::
# | :
# | fifo[0]--> 3
# | 4
# | 5 <--fifo[1]
# | :
#
return (QueueID(0), (fifo[0],fifo[1]+1))
elif 1 < fifo[0]:
# Wrapping
# ::
# | :
# | fifo[0]--> 3
# | 4
# | :
# | wrap <--fifo[1]
#
return (QueueID(0), (fifo[0],QueueID(1)))
elif 1 < wrap:
# Wrapping and flushing out
# ::
# | fifo[0]--> 1
# | 2
# | :
# | wrap <--fifo[1]
#
return (QueueID(1), (QueueID(2),QueueID(1)))
else:
# Single entry FIFO
return (QueueID(1), (QueueID(1),QueueID(1)))
else:
if fifo[1] + 1 < fifo[0]:
# Filling up
# ::
# | :
# | 3 <--fifo[1]
# | 4
# | fifo[0]--> 5
# | :
# | wrap
return (QueueID(0), (fifo[0],fifo[1]+1))
elif fifo[0] < wrap:
# Flushing out
# ::
# | :
# | 4 <--fifo[1]
# | fifo[0]--> 5
# | :
# | wrap
return (fifo[0], (fifo[0]+1,fifo[1]+1))
else:
# Wrapping and flushing out
# ::
# | :
# | wrap-1 <--fifo[1]
# | fifo[0]--> wrap
return (wrap, (QueueID(1),wrap))
func fifoDel(
fifo: (QueueID,QueueID);
nDel: uint;
wrap: QueueID;
): tuple[doDel: seq[(QueueID,QueueID)], fifo: (QueueID,QueueID)] =
## Delete a the range `nDel` of filter IDs from the FIFO. The entries to be
## deleted are taken from the oldest ones added.
##
if fifo[0] == 0:
return (EmptyQidPairSeq, ZeroQidPair)
if fifo[0] <= fifo[1]:
# Take off the left end from `fifo[0] .. fifo[1]`
# ::
# | :
# | fifo[0]--> 3 ^
# | 4 | to be deleted
# | 5 v
# | 6 <--fifo[1]
# | :
#
if nDel.uint64 <= fifo[1] - fifo[0]:
return (@[(fifo[0], fifo[0] + nDel - 1)], (fifo[0] + nDel, fifo[1]))
else:
return (@[fifo], ZeroQidPair)
else:
if nDel.uint64 <= (wrap - fifo[0] + 1):
# Take off the left end from `fifo[0] .. wrap`
# ::
# | :
# | 3 <--fifo[1]
# | 4
# | fifo[0]--> 5 ^
# | 6 | to be deleted
# | 7 v
# | :
# | wrap
#
let topRange = (fifo[0], fifo[0] + nDel - 1)
if nDel.uint64 < (wrap - fifo[0] + 1):
return (@[topRange], (fifo[0] + nDel, fifo[1]))
else:
return (@[topRange], (QueueID(1), fifo[1]))
else:
# Interval `fifo[0] .. wrap` fully deleted, check `1 .. fifo[0]`
# ::
# | 1 ^
# | 2 | to be deleted
# | : v
# | 6
# | 7<--fifo[1]
# | fifo[0]--> 8 ^
# | 9 | to be deleted
# | : :
# | wrap v
#
let
topRange = (fifo[0], wrap)
nDelLeft = nDel.uint64 - (wrap - fifo[0] + 1)
# Take off the left end from `QueueID(1) .. fifo[1]`
if nDelLeft <= fifo[1] - QueueID(0):
let bottomRange = (QueueID(1), QueueID(nDelLeft))
if nDelLeft < fifo[1] - QueueID(0):
return (@[bottomRange, topRange], (QueueID(nDelLeft+1), fifo[1]))
else:
return (@[bottomRange, topRange], ZeroQidPair)
else:
# Delete all available
return (@[(QueueID(1), fifo[1]), (fifo[0], wrap)], ZeroQidPair)
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc stats*(
ctx: openArray[tuple[size, width: int]];
): tuple[maxQueue: int, minCovered: int, maxCovered: int] =
## Number of maximally stored and covered queued entries for the argument
## layout `ctx`. The resulting value of `maxQueue` entry is the maximal
## number of database slots needed, the `minCovered` and `maxCovered` entry
## indicate the rancge of the backlog foa a fully populated database.
var step = 1
for n in 0 ..< ctx.len:
step *= ctx[n].width + 1
let size = ctx[n].size + ctx[(n+1) mod ctx.len].width
result.maxQueue += size.int
result.minCovered += (ctx[n].size * step).int
result.maxCovered += (size * step).int
proc stats*(
ctx: openArray[tuple[size, width, wrap: int]];
): tuple[maxQueue: int, minCovered: int, maxCovered: int] =
## Variant of `stats()`
ctx.toSeq.mapIt((it[0],it[1])).stats
proc stats*(
ctx: QidLayoutRef;
): tuple[maxQueue: int, minCovered: int, maxCovered: int] =
## Variant of `stats()`
ctx.q.toSeq.mapIt((it[0].int,it[1].int)).stats
proc addItem*(
fifo: QidSchedRef;
): tuple[exec: seq[QidAction], fifo: QidSchedRef] =
## Get the instructions for adding a new slot to the cascades queues. The
## argument `fifo` is a complete state of the addresses a cascaded *FIFO*
## when applied to a database. Only the *FIFO* queue addresses are needed
## in order to describe how to add another item.
##
## Return value is a list of instructions what to do when adding a new item
## and the new state of the cascaded *FIFO*.
##
## The following instructions may be returned:
## ::
## SaveQid <queue-id> -- Store a new item under the address
## -- <queue-id> on the database.
##
## HoldQid <from-id>..<to-id> -- Move the records accessed by the argument
## -- addresses from the database to the right
## -- end of the local hold queue. The age of
## -- the items on the hold queue increases
## -- left to right.
##
## DequQid <queue-id> -- Merge items from the hold queue into a
## -- new item and store it under the address
## -- <queue-id> on the database. Clear the
## -- the hold queue.
##
let
ctx = fifo.ctx.q
var
state = fifo.state
deferred: seq[QidAction] # carry over to next sub-queue
revActions: seq[QidAction] # instructions in reverse order
for n in 0 ..< ctx.len:
if state.len < n + 1:
state.setlen(n + 1)
let
overlapWidth = ctx[(n+1) mod ctx.len].width
carryOverSize = ctx[n].size + overlapWidth
stateLen = state[n].fifoLen ctx[n].wrap
if stateLen < carryOverSize:
state[n] = state[n].fifoAdd(ctx[n].wrap).fifo
let qQidAdded = n.globalQid state[n][1]
if 0 < n:
revActions.add QidAction(op: DequQid, qid: qQidAdded)
else:
revActions.add QidAction(op: SaveQid, qid: qQidAdded)
if 0 < deferred.len:
revActions &= deferred
break
else:
# Full queue segment, carry over to next one
let
extra = stateLen - carryOverSize # should be zero
qDel = state[n].fifoDel(extra + overlapWidth + 1, ctx[n].wrap)
qAdd = qDel.fifo.fifoAdd ctx[n].wrap
qFidAdded = n.globalQid qAdd.fifo[1]
if 0 < n:
revActions.add QidAction(op: DequQid, qid: qFidAdded)
else:
revActions.add QidAction(op: SaveQid, qid: qFidAdded)
if 0 < deferred.len:
revActions &= deferred
deferred.setLen(0)
for w in qDel.doDel:
deferred.add QidAction(
op: HoldQid,
qid: n.globalQid w[0],
xid: n.globalQid w[1])
state[n] = qAdd.fifo
# End loop
(revActions.reversed, QidSchedRef(ctx: fifo.ctx, state: state))
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -36,14 +36,14 @@ proc getIdgUBE*(
return be.getIdgFn()
err(GetIdgNotFound)
proc getFasUBE*(
proc getFqsUBE*(
db: AristoDbRef;
): Result[seq[FilterID],AristoError] =
): Result[seq[(QueueID,QueueID)],AristoError] =
## Get the list of filter IDs unfiltered backened if available.
let be = db.backend
if not be.isNil:
return be.getFasFn()
err(GetFasNotFound)
return be.getFqsFn()
err(GetFqsNotFound)
proc getVtxUBE*(
db: AristoDbRef;
@ -67,12 +67,12 @@ proc getKeyUBE*(
proc getFilUBE*(
db: AristoDbRef;
fid: FilterID;
qid: QueueID;
): Result[FilterRef,AristoError] =
## Get the filter from the unfiltered backened if available.
let be = db.backend
if not be.isNil:
return be.getFilFn fid
return be.getFilFn qid
err GetFilNotFound
# ------------------

View File

@ -11,7 +11,7 @@
import
../aristo_desc,
../aristo_desc/aristo_types_backend
../aristo_desc/desc_backend
const
verifyIxId = true # and false
@ -46,7 +46,7 @@ type
of VtxPfx, KeyPfx:
vid*: VertexID ## Vertex ID where the error occured
of FilPfx:
fid*: FilterID ## Ditto
qid*: QueueID ## Ditto
of AdmPfx, Oops:
discard
code*: AristoError ## Error code (if any)
@ -58,7 +58,7 @@ type
const
AdmTabIdIdg* = AdminTabID(0) ## Access key for vertex ID generator state
AdmTabIdFas* = AdminTabID(1) ## Access key for filter state
AdmTabIdFqs* = AdminTabID(1) ## Access key for filter queue states
# ------------------------------------------------------------------------------
# Public helpers

View File

@ -33,25 +33,25 @@ import
stew/results,
../aristo_constants,
../aristo_desc,
../aristo_desc/aristo_types_backend,
../aristo_desc/desc_backend,
../aristo_transcode,
./aristo_init_common
./init_common
type
MemBackendRef* = ref object of TypedBackendRef
## Inheriting table so access can be extended for debugging purposes
sTab: Table[VertexID,Blob] ## Structural vertex table making up a trie
kMap: Table[VertexID,HashKey] ## Merkle hash key mapping
rFil: Table[FilterID,Blob] ## Backend filters
rFil: Table[QueueID,Blob] ## Backend filters
vGen: Option[seq[VertexID]]
vFas: Option[seq[FilterID]]
vFqs: Option[seq[(QueueID,QueueID)]]
MemPutHdlRef = ref object of TypedPutHdlRef
sTab: Table[VertexID,Blob]
kMap: Table[VertexID,HashKey]
rFil: Table[FilterID,Blob]
rFil: Table[QueueID,Blob]
vGen: Option[seq[VertexID]]
vFas: Option[seq[FilterID]]
vFqs: Option[seq[(QueueID,QueueID)]]
# ------------------------------------------------------------------------------
# Private helpers
@ -99,8 +99,8 @@ proc getKeyFn(db: MemBackendRef): GetKeyFn =
proc getFilFn(db: MemBackendRef): GetFilFn =
result =
proc(fid: FilterID): Result[FilterRef,AristoError] =
let data = db.rFil.getOrDefault(fid, EmptyBlob)
proc(qid: QueueID): Result[FilterRef,AristoError] =
let data = db.rFil.getOrDefault(qid, EmptyBlob)
if 0 < data.len:
return data.deblobify FilterRef
err(GetFilNotFound)
@ -112,12 +112,12 @@ proc getIdgFn(db: MemBackendRef): GetIdgFn =
return ok db.vGen.unsafeGet
err(GetIdgNotFound)
proc getFasFn(db: MemBackendRef): GetFasFn =
proc getFqsFn(db: MemBackendRef): GetFqsFn =
result =
proc(): Result[seq[FilterID],AristoError]=
if db.vFas.isSome:
return ok db.vFas.unsafeGet
err(GetFasNotFound)
proc(): Result[seq[(QueueID,QueueID)],AristoError]=
if db.vFqs.isSome:
return ok db.vFqs.unsafeGet
err(GetFqsNotFound)
# -------------
@ -155,18 +155,18 @@ proc putKeyFn(db: MemBackendRef): PutKeyFn =
proc putFilFn(db: MemBackendRef): PutFilFn =
result =
proc(hdl: PutHdlRef; vf: openArray[(FilterID,FilterRef)]) =
proc(hdl: PutHdlRef; vf: openArray[(QueueID,FilterRef)]) =
let hdl = hdl.getSession db
if hdl.error.isNil:
for (fid,filter) in vf:
for (qid,filter) in vf:
let rc = filter.blobify()
if rc.isErr:
hdl.error = TypedPutHdlErrRef(
pfx: FilPfx,
fid: fid,
qid: qid,
code: rc.error)
return
hdl.rFil[fid] = rc.value
hdl.rFil[qid] = rc.value
proc putIdgFn(db: MemBackendRef): PutIdgFn =
result =
@ -175,12 +175,12 @@ proc putIdgFn(db: MemBackendRef): PutIdgFn =
if hdl.error.isNil:
hdl.vGen = some(vs.toSeq)
proc putFasFn(db: MemBackendRef): PutFasFn =
proc putFqsFn(db: MemBackendRef): PutFqsFn =
result =
proc(hdl: PutHdlRef; fs: openArray[FilterID]) =
proc(hdl: PutHdlRef; fs: openArray[(QueueID,QueueID)]) =
let hdl = hdl.getSession db
if hdl.error.isNil:
hdl.vFas = some(fs.toSeq)
hdl.vFqs = some(fs.toSeq)
proc putEndFn(db: MemBackendRef): PutEndFn =
@ -194,7 +194,7 @@ proc putEndFn(db: MemBackendRef): PutEndFn =
pfx=hdl.error.pfx, vid=hdl.error.vid, error=hdl.error.code
of FilPfx:
debug logTxt "putEndFn: filter failed",
pfx=hdl.error.pfx, fid=hdl.error.fid, error=hdl.error.code
pfx=hdl.error.pfx, qid=hdl.error.qid, error=hdl.error.code
else:
debug logTxt "putEndFn: failed",
pfx=hdl.error.pfx, error=hdl.error.code
@ -212,11 +212,11 @@ proc putEndFn(db: MemBackendRef): PutEndFn =
else:
db.kMap.del vid
for (fid,data) in hdl.rFil.pairs:
if fid.isValid:
db.rFil[fid] = data
for (qid,data) in hdl.rFil.pairs:
if qid.isValid:
db.rFil[qid] = data
else:
db.rFil.del fid
db.rFil.del qid
if hdl.vGen.isSome:
let vGen = hdl.vGen.unsafeGet
@ -225,12 +225,12 @@ proc putEndFn(db: MemBackendRef): PutEndFn =
else:
db.vGen = some(vGen)
if hdl.vFas.isSome:
let vFas = hdl.vFas.unsafeGet
if vFas.len == 0:
db.vFas = none(seq[FilterID])
if hdl.vFqs.isSome:
let vFqs = hdl.vFqs.unsafeGet
if vFqs.len == 0:
db.vFqs = none(seq[(QueueID,QueueID)])
else:
db.vFas = some(vFas)
db.vFqs = some(vFqs)
AristoError(0)
@ -252,14 +252,14 @@ proc memoryBackend*(): BackendRef =
db.getKeyFn = getKeyFn db
db.getFilFn = getFilFn db
db.getIdgFn = getIdgFn db
db.getFasFn = getFasFn db
db.getFqsFn = getFqsFn db
db.putBegFn = putBegFn db
db.putVtxFn = putVtxFn db
db.putKeyFn = putKeyFn db
db.putFilFn = putFilFn db
db.putIdgFn = putIdgFn db
db.putFasFn = putFasFn db
db.putFqsFn = putFqsFn db
db.putEndFn = putEndFn db
db.closeFn = closeFn db
@ -294,16 +294,16 @@ iterator walkKey*(
iterator walkFil*(
be: MemBackendRef;
): tuple[n: int, fid: FilterID, filter: FilterRef] =
): tuple[n: int, qid: QueueID, filter: FilterRef] =
## Iteration over the vertex sub-table.
for n,fid in be.rFil.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.FilterID):
let data = be.rFil.getOrDefault(fid, EmptyBlob)
for n,qid in be.rFil.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.QueueID):
let data = be.rFil.getOrDefault(qid, EmptyBlob)
if 0 < data.len:
let rc = data.deblobify FilterRef
if rc.isErr:
debug logTxt "walkFilFn() skip", n, fid, error=rc.error
debug logTxt "walkFilFn() skip", n,qid, error=rc.error
else:
yield (n, fid, rc.value)
yield (n, qid, rc.value)
iterator walk*(
@ -319,8 +319,8 @@ iterator walk*(
yield(0, AdmPfx, AdmTabIdIdg.uint64, be.vGen.unsafeGet.blobify)
n.inc
if be.vFas.isSome:
yield(0, AdmPfx, AdmTabIdFas.uint64, be.vFas.unsafeGet.blobify)
if be.vFqs.isSome:
yield(0, AdmPfx, AdmTabIdFqs.uint64, be.vFqs.unsafeGet.blobify)
n.inc
for vid in be.sTab.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.VertexID):
@ -333,7 +333,7 @@ iterator walk*(
yield (n, KeyPfx, vid.uint64, key.to(Blob))
n.inc
for lid in be.rFil.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.FilterID):
for lid in be.rFil.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.QueueID):
let data = be.rFil.getOrDefault(lid, EmptyBlob)
if 0 < data.len:
yield (n, FilPfx, lid.uint64, data)

View File

@ -17,8 +17,8 @@ import
std/sets,
results,
../aristo_desc,
../aristo_desc/aristo_types_backend,
"."/[aristo_init_common, aristo_memory]
../aristo_desc/desc_backend,
"."/[init_common, memory_db]
type
VoidBackendRef* = ref object of TypedBackendRef

View File

@ -21,7 +21,7 @@
import
results,
../aristo_desc,
"."/[aristo_init_common, aristo_rocksdb, memory_only]
"."/[init_common, rocks_db, memory_only]
export
RdbBackendRef,
memory_only

View File

@ -33,10 +33,10 @@ import
stew/results,
../aristo_constants,
../aristo_desc,
../aristo_desc/aristo_types_backend,
../aristo_desc/desc_backend,
../aristo_transcode,
./aristo_init_common,
./aristo_rocksdb/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk]
./init_common,
./rocks_db/[rdb_desc, rdb_get, rdb_init, rdb_put, rdb_walk]
logScope:
topics = "aristo-backend"
@ -83,8 +83,8 @@ proc `vtxCache=`(hdl: RdbPutHdlRef; val: tuple[vid: VertexID; data: Blob]) =
proc `keyCache=`(hdl: RdbPutHdlRef; val: tuple[vid: VertexID; data: Blob]) =
hdl.cache[KeyPfx][val.vid.uint64] = val.data
proc `filCache=`(hdl: RdbPutHdlRef; val: tuple[fid: FilterID; data: Blob]) =
hdl.cache[FilPfx][val.fid.uint64] = val.data
proc `filCache=`(hdl: RdbPutHdlRef; val: tuple[qid: QueueID; data: Blob]) =
hdl.cache[FilPfx][val.qid.uint64] = val.data
proc `admCache=`(hdl: RdbPutHdlRef; val: tuple[id: AdminTabID; data: Blob]) =
hdl.cache[AdmPfx][val.id.uint64] = val.data
@ -131,12 +131,12 @@ proc getKeyFn(db: RdbBackendRef): GetKeyFn =
proc getFilFn(db: RdbBackendRef): GetFilFn =
result =
proc(fid: FilterID): Result[FilterRef,AristoError] =
proc(qid: QueueID): Result[FilterRef,AristoError] =
# Fetch serialised data record
let rc = db.rdb.get fid.toOpenArray()
let rc = db.rdb.get qid.toOpenArray()
if rc.isErr:
debug logTxt "getFilFn: failed", fid,
debug logTxt "getFilFn: failed", qid,
error=rc.error[0], info=rc.error[1]
return err(rc.error[0])
@ -163,22 +163,22 @@ proc getIdgFn(db: RdbBackendRef): GetIdgFn =
# Decode data record
rc.value.deblobify seq[VertexID]
proc getFasFn(db: RdbBackendRef): GetFasFn =
proc getFqsFn(db: RdbBackendRef): GetFqsFn =
result =
proc(): Result[seq[FilterID],AristoError]=
proc(): Result[seq[(QueueID,QueueID)],AristoError]=
# Fetch serialised data record
let rc = db.rdb.get AdmTabIdFas.toOpenArray()
let rc = db.rdb.get AdmTabIdFqs.toOpenArray()
if rc.isErr:
debug logTxt "getFosFn: failed", error=rc.error[1]
return err(rc.error[0])
if rc.value.len == 0:
let w = EmptyFidSeq
let w = EmptyQidPairSeq
return ok w
# Decode data record
rc.value.deblobify seq[FilterID]
rc.value.deblobify seq[(QueueID,QueueID)]
# -------------
@ -219,21 +219,21 @@ proc putKeyFn(db: RdbBackendRef): PutKeyFn =
proc putFilFn(db: RdbBackendRef): PutFilFn =
result =
proc(hdl: PutHdlRef; vrps: openArray[(FilterID,FilterRef)]) =
proc(hdl: PutHdlRef; vrps: openArray[(QueueID,FilterRef)]) =
let hdl = hdl.getSession db
if hdl.error.isNil:
for (fid,filter) in vrps:
for (qid,filter) in vrps:
if filter.isValid:
let rc = filter.blobify()
if rc.isErr:
hdl.error = TypedPutHdlErrRef(
pfx: FilPfx,
fid: fid,
qid: qid,
code: rc.error)
return
hdl.filCache = (fid, rc.value)
hdl.filCache = (qid, rc.value)
else:
hdl.filCache = (fid, EmptyBlob)
hdl.filCache = (qid, EmptyBlob)
proc putIdgFn(db: RdbBackendRef): PutIdgFn =
result =
@ -245,15 +245,15 @@ proc putIdgFn(db: RdbBackendRef): PutIdgFn =
else:
hdl.admCache = (AdmTabIdIdg, EmptyBlob)
proc putFasFn(db: RdbBackendRef): PutFasFn =
proc putFqsFn(db: RdbBackendRef): PutFqsFn =
result =
proc(hdl: PutHdlRef; vs: openArray[FilterID]) =
proc(hdl: PutHdlRef; vs: openArray[(QueueID,QueueID)]) =
let hdl = hdl.getSession db
if hdl.error.isNil:
if 0 < vs.len:
hdl.admCache = (AdmTabIdFas, vs.blobify)
hdl.admCache = (AdmTabIdFqs, vs.blobify)
else:
hdl.admCache = (AdmTabIdFas, EmptyBlob)
hdl.admCache = (AdmTabIdFqs, EmptyBlob)
proc putEndFn(db: RdbBackendRef): PutEndFn =
@ -301,14 +301,14 @@ proc rocksDbBackend*(path: string): Result[BackendRef,AristoError] =
db.getKeyFn = getKeyFn db
db.getFilFn = getFilFn db
db.getIdgFn = getIdgFn db
db.getFasFn = getFasFn db
db.getFqsFn = getFqsFn db
db.putBegFn = putBegFn db
db.putVtxFn = putVtxFn db
db.putKeyFn = putKeyFn db
db.putFilFn = putFilFn db
db.putIdgFn = putIdgFn db
db.putFasFn = putFasFn db
db.putFqsFn = putFqsFn db
db.putEndFn = putEndFn db
db.closeFn = closeFn db
@ -349,12 +349,12 @@ iterator walkKey*(
iterator walkFil*(
be: RdbBackendRef;
): tuple[n: int, fid: FilterID, filter: FilterRef] =
): tuple[n: int, qid: QueueID, filter: FilterRef] =
## Variant of `walk()` iteration over the filter sub-table.
for (n, xid, data) in be.rdb.walk FilPfx:
let rc = data.deblobify FilterRef
if rc.isOk:
yield (n, FilterID(xid), rc.value)
yield (n, QueueID(xid), rc.value)
# ------------------------------------------------------------------------------
# End

View File

@ -19,7 +19,7 @@ import
rocksdb,
stint,
../../aristo_desc,
../aristo_init_common.nim
../init_common
type
RdbInst* = object
@ -55,8 +55,8 @@ proc toRdbKey*(id: uint64; pfx: StorageType): RdbKey =
template toOpenArray*(vid: VertexID; pfx: StorageType): openArray[byte] =
vid.uint64.toRdbKey(pfx).toOpenArray(0, sizeof uint64)
template toOpenArray*(fid: FilterID): openArray[byte] =
fid.uint64.toRdbKey(FilPfx).toOpenArray(0, sizeof uint64)
template toOpenArray*(qid: QueueID): openArray[byte] =
qid.uint64.toRdbKey(FilPfx).toOpenArray(0, sizeof uint64)
template toOpenArray*(aid: AdminTabID): openArray[byte] =
aid.uint64.toRdbKey(AdmPfx).toOpenArray(0, sizeof uint64)

View File

@ -87,7 +87,7 @@ proc destroy*(rdb: var RdbInst; flush: bool) =
base = rdb.basePath / BaseFolder
try:
(base / TempFolder).removeDir
if flush:
(base / DataFolder).removeDir

View File

@ -20,7 +20,7 @@ import
rocksdb,
stew/results,
"../.."/[aristo_constants, aristo_desc],
../aristo_init_common,
../init_common,
./rdb_desc
logScope:

View File

@ -17,7 +17,7 @@ import
std/sequtils,
eth/common,
rocksdb,
../aristo_init_common,
../init_common,
./rdb_desc
# ------------------------------------------------------------------------------

View File

@ -378,20 +378,21 @@ proc blobify*(filter: FilterRef): Result[Blob, AristoError] =
ok data
proc blobify*(vFos: openArray[FilterID]; data: var Blob) =
## This function serialises a list of filter IDs.
proc blobify*(vFqs: openArray[(QueueID,QueueID)]; data: var Blob) =
## This function serialises a list of filter queue IDs.
## ::
## uint64, ... -- list of IDs
## 0x7e -- marker(8)
##
data.setLen(0)
for w in vFos:
data &= w.uint64.toBytesBE.toSeq
for w in vFqs:
data &= w[0].uint64.toBytesBE.toSeq
data &= w[1].uint64.toBytesBE.toSeq
data.add 0x7Eu8
proc blobify*(vFos: openArray[FilterID]): Blob =
proc blobify*(vFqs: openArray[(QueueID,QueueID)]): Blob =
## Variant of `blobify()`
vFos.blobify result
vFqs.blobify result
# -------------
@ -635,27 +636,33 @@ proc deblobify*(data: Blob; T: type FilterRef): Result[T,AristoError] =
return err(error)
ok filter
proc deblobify*(data: Blob; vFas: var seq[FilterID]): AristoError =
## De-serialise the data record encoded with `blobify()` into a filter ID
## argument liet `vFas`.
proc deblobify*(data: Blob; vFqs: var seq[(QueueID,QueueID)]): AristoError =
## De-serialise the data record encoded with `blobify()` into a filter queue
## ID argument liet `vFqs`.
if data.len == 0:
vFas = @[]
vFqs = @[]
else:
if (data.len mod 8) != 1:
if (data.len mod 16) != 1:
return DeblobSizeGarbled
if data[^1] != 0x7e:
return DeblobWrongType
for n in 0 ..< (data.len div 8):
let w = n * 8
vFas.add (uint64.fromBytesBE data[w ..< w + 8]).FilterID
for n in 0 ..< (data.len div 16):
let
w = n * 16
a = (uint64.fromBytesBE data[w + 0 ..< w + 8]).QueueID
b = (uint64.fromBytesBE data[w + 8 ..< w + 16]).QueueID
vFqs.add (a,b)
proc deblobify*(data: Blob; T: type seq[FilterID]): Result[T,AristoError] =
proc deblobify*(
data: Blob;
T: type seq[(QueueID,QueueID)];
): Result[T,AristoError] =
## Variant of `deblobify()` for deserialising the vertex ID generator state
var vFas: seq[FilterID]
let info = data.deblobify vFas
var vFqs: seq[(QueueID,QueueID)]
let info = data.deblobify vFqs
if info != AristoError(0):
return err(info)
ok vFas
ok vFqs
# ------------------------------------------------------------------------------
# End

View File

@ -13,11 +13,11 @@
## =====================================================
##
import
../aristo_init/[aristo_memory, memory_only],
../aristo_init/[memory_db, memory_only],
".."/[aristo_desc, aristo_init],
./aristo_walk_private
./walk_private
export
aristo_memory,
memory_db,
memory_only
# ------------------------------------------------------------------------------
@ -45,10 +45,10 @@ iterator walkKeyBe*[T: MemBackendRef|VoidBackendRef](
iterator walkFilBe*[T: MemBackendRef|VoidBackendRef](
_: type T;
db: AristoDbRef;
): tuple[n: int, fid: FilterID, filter: FilterRef] =
): tuple[n: int, qid: QueueID, filter: FilterRef] =
## Similar to `walkVtxBe()` but for filters.
for (n,fid,filter) in db.to(T).walkFilBeImpl db:
yield (n,fid,filter)
for (n,qid,filter) in db.to(T).walkFilBeImpl db:
yield (n,qid,filter)
# ------------------------------------------------------------------------------
# End

View File

@ -18,11 +18,11 @@
## `./aristo_walk/persistent`.)
##
import
../aristo_init/[aristo_rocksdb, persistent],
../aristo_init/[rocks_db, persistent],
".."/[aristo_desc, aristo_init],
"."/[aristo_walk_private, memory_only]
"."/[walk_private, memory_only]
export
aristo_rocksdb,
rocks_db,
memory_only,
persistent
@ -50,10 +50,10 @@ iterator walkKeyBe*(
iterator walkFilBe*(
T: type RdbBackendRef;
db: AristoDbRef;
): tuple[n: int, fid: FilterID, filter: FilterRef] =
): tuple[n: int, qid: QueueID, filter: FilterRef] =
## Similar to `walkVtxBe()` but for filters.
for (n,fid,filter) in db.to(T).walkFilBeImpl db:
yield (n,fid,filter)
for (n,qid,filter) in db.to(T).walkFilBeImpl db:
yield (n,qid,filter)
# ------------------------------------------------------------------------------
# End

View File

@ -90,13 +90,13 @@ iterator walkKeyBeImpl*[T](
iterator walkFilBeImpl*[T](
be: T; # Backend descriptor
db: AristoDbRef; # Database with optional backend filter
): tuple[n: int, fid: FilterID, filter: FilterRef] =
): tuple[n: int, qid: QueueID, filter: FilterRef] =
## Generic filter iterator
when be isnot VoidBackendRef:
mixin walkFil
for (n,fid,filter) in be.walkFil:
yield (n,fid,filter)
for (n,qid,filter) in be.walkFil:
yield (n,qid,filter)
# ------------------------------------------------------------------------------
# End

View File

@ -73,10 +73,18 @@ proc setErrorLevel {.used.} =
# Test Runners: accounts and accounts storages
# ------------------------------------------------------------------------------
proc miscRunner(noisy =true) =
suite &"Aristo: Miscellaneous tests":
test &"VertexID recyling lists":
noisy.testVidRecycleLists()
proc miscRunner(
noisy = true;
qidSampleSize = QidSample;
) =
suite "Aristo: Miscellaneous tests":
test "VertexID recyling lists":
check noisy.testVidRecycleLists()
test &"QueueID slot management (sample size: {qidSampleSize})":
check noisy.testQidScheduler(sampleSize = qidSampleSize)
proc accountsRunner(
@ -164,7 +172,7 @@ when isMainModule:
setErrorLevel()
when true: # and false:
noisy.miscRunner()
noisy.miscRunner(qidSampleSize = 10_000)
# This one uses dumps from the external `nimbus-eth1-blob` repo
when true and false:

View File

@ -20,10 +20,10 @@ import
../../nimbus/db/aristo/[
aristo_debug,
aristo_desc,
aristo_desc/aristo_types_backend,
aristo_desc/desc_backend,
aristo_hashify,
aristo_init/aristo_memory,
aristo_init/aristo_rocksdb,
aristo_init/memory_db,
aristo_init/rocks_db,
aristo_persistent,
aristo_transcode,
aristo_vid],
@ -142,13 +142,13 @@ proc verify(
proc collectFilter(
db: AristoDbRef;
filter: FilterRef;
tab: var Table[FilterID,Hash];
tab: var Table[QueueID,Hash];
noisy: bool;
): bool =
## Store filter on permanent BE and register digest
if not filter.isNil:
let
fid = FilterID(7 * (tab.len + 1)) # just some number
fid = QueueID(7 * (tab.len + 1)) # just some number
be = db.backend
tx = be.putBegFn()
@ -165,7 +165,7 @@ proc collectFilter(
proc verifyFiltersImpl[T: MemBackendRef|RdbBackendRef](
_: type T;
db: AristoDbRef;
tab: Table[FilterID,Hash];
tab: Table[QueueID,Hash];
noisy: bool;
): bool =
## Compare stored filters against registered ones
@ -195,7 +195,7 @@ proc verifyFiltersImpl[T: MemBackendRef|RdbBackendRef](
proc verifyFilters(
db: AristoDbRef;
tab: Table[FilterID,Hash];
tab: Table[QueueID,Hash];
noisy: bool;
): bool =
## Wrapper
@ -224,7 +224,7 @@ proc test_backendConsistency*(
): bool =
## Import accounts
var
filTab: Table[FilterID,Hash] # Filter register
filTab: Table[QueueID,Hash] # Filter register
ndb = AristoDbRef() # Reference cache
mdb = AristoDbRef() # Memory backend database
rdb = AristoDbRef() # Rocks DB backend database

View File

@ -109,7 +109,7 @@ proc `==`*(a: (int,AristoError), b: (int,int)): bool =
proc `==`*(a: (int,VertexID,AristoError), b: (int,int,int)): bool =
(a[0], a[1].int, a[2].int) == b
proc `==`*(a: (FilterID,Hash), b: (int,Hash)): bool =
proc `==`*(a: (QueueID,Hash), b: (int,Hash)): bool =
(a[0].int,a[1]) == b
proc to*(sample: AccountsSample; T: type seq[UndumpAccounts]): T =

View File

@ -12,23 +12,44 @@
## Aristo (aka Patricia) DB trancoder test
import
std/sequtils,
std/[algorithm, sequtils, strutils],
eth/common,
results,
stew/byteutils,
unittest2,
../../nimbus/db/aristo,
../../nimbus/db/aristo/[aristo_desc, aristo_transcode, aristo_vid],
../../nimbus/db/aristo/[
aristo_debug, aristo_desc, aristo_transcode, aristo_vid],
../../nimbus/db/aristo/aristo_filter/[filter_desc, filter_scheduler],
./test_helpers
type
TesterDesc = object
prng: uint32 ## random state
QValRef = ref object
fid: FilterID
width: uint32
QTab = Table[QueueID,QValRef]
const
QidSlotLyo = [(4,0,10),(3,3,10),(3,4,10),(3,5,10)]
QidSlotLy1 = [(4,0,0),(3,3,0),(3,4,0),(3,5,0)]
QidSample* = (3 * QidSlotLyo.stats.minCovered) div 2
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
template trueOrReturn(expr: untyped): untyped =
if not (expr):
check expr
return
# ---------------------
proc posixPrngRand(state: var uint32): byte =
## POSIX.1-2001 example of a rand() implementation, see manual page rand(3).
state = state * 1103515245 + 12345;
@ -69,12 +90,59 @@ proc init(T: type TesterDesc; seed: int): TesterDesc =
proc `+`(a: VertexID, b: int): VertexID =
(a.uint64 + b.uint64).VertexID
# ---------------------
func sortedPairs(t: QTab): seq[(QueueID,QValRef)] =
t.keys.toSeq.mapIt(it.uint64).sorted.mapIt(it.QueueID).mapIt((it,t[it]))
func fifos(qt: QTab; scd: QidSchedRef): seq[seq[(QueueID,QValRef)]] =
proc kvp(chn: int, qid: QueueID): (QueueID,QValRef) =
let
cid = QueueID((chn.uint64 shl 62) or qid.uint64)
val = qt.getOrDefault(cid, QValRef(nil))
(qid, val)
for i in 0 ..< scd.state.len:
let
left = scd.state[i][0]
right = scd.state[i][1]
result.add newSeq[(QueueID,QValRef)](0)
if left <= right:
for j in left .. right:
result[i].add kvp(i, j)
else:
for j in left .. scd.ctx.q[i].wrap:
result[i].add kvp(i, j)
for j in QueueID(1) .. right:
result[i].add kvp(i, j)
func flatten(a: seq[seq[(QueueID,QValRef)]]): seq[(QueueID,QValRef)] =
for w in a:
result &= w
func pp(val: QValRef): string =
if val.isNil:
return "ø"
result = $val.fid.uint64
if 0 < val.width:
result &= ":" & $val.width
func pp(kvp: (QueueID,QValRef)): string =
kvp[0].pp & "=" & kvp[1].pp
func pp(t: QTab): string =
"{" & t.sortedPairs.mapIt(it.pp).join(",") & "}"
func pp(t: QTab; scd: QidSchedRef): string =
"[" & t.fifos(scd).flatten.mapIt(it.pp).join(",") & "]"
# ------------------------------------------------------------------------------
# Public test function
# ------------------------------------------------------------------------------
proc testVidRecycleLists*(noisy = true; seed = 42) =
proc testVidRecycleLists*(noisy = true; seed = 42): bool =
## Transcode VID lists held in `AristoDb` descriptor
##
var td = TesterDesc.init seed
let db = newAristoDbRef BackendVoid
@ -93,7 +161,7 @@ proc testVidRecycleLists*(noisy = true; seed = 42) =
expectedVids += (vid < first).ord
db.vidDispose vid
check db.top.vGen.len == expectedVids
trueOrReturn db.top.vGen.len == expectedVids
noisy.say "***", "vids=", db.top.vGen.len, " discarded=", count-expectedVids
# Serialise/deserialise
@ -105,43 +173,213 @@ proc testVidRecycleLists*(noisy = true; seed = 42) =
db1 = newAristoDbRef BackendVoid
rc = dbBlob.deblobify seq[VertexID]
if rc.isErr:
check rc.error == AristoError(0)
trueOrReturn rc.error == AristoError(0)
else:
db1.top.vGen = rc.value
check db.top.vGen == db1.top.vGen
trueOrReturn db.top.vGen == db1.top.vGen
# Make sure that recycled numbers are fetched first
let topVid = db.top.vGen[^1]
while 1 < db.top.vGen.len:
let w = db.vidFetch()
check w < topVid
check db.top.vGen.len == 1 and db.top.vGen[0] == topVid
trueOrReturn w < topVid
trueOrReturn db.top.vGen.len == 1 and db.top.vGen[0] == topVid
# Get some consecutive vertex IDs
for n in 0 .. 5:
let w = db.vidFetch()
check w == topVid + n
check db.top.vGen.len == 1
trueOrReturn w == topVid + n
trueOrReturn db.top.vGen.len == 1
# Repeat last test after clearing the cache
db.top.vGen.setLen(0)
for n in 0 .. 5:
let w = db.vidFetch()
check w == VertexID(2) + n # VertexID(1) is default root ID
check db.top.vGen.len == 1
trueOrReturn w == VertexID(2) + n # VertexID(1) is default root ID
trueOrReturn db.top.vGen.len == 1
# Recycling and re-org tests
func toVQ(a: seq[int]): seq[VertexID] = a.mapIt(VertexID(it))
check @[8, 7, 3, 4, 5, 9] .toVQ.vidReorg == @[3, 4, 5, 7] .toVQ
check @[8, 7, 6, 3, 4, 5, 9] .toVQ.vidReorg == @[3] .toVQ
check @[5, 4, 3, 7] .toVQ.vidReorg == @[5, 4, 3, 7] .toVQ
check @[5] .toVQ.vidReorg == @[5] .toVQ
check @[3, 5] .toVQ.vidReorg == @[3, 5] .toVQ
check @[4, 5] .toVQ.vidReorg == @[4] .toVQ
trueOrReturn @[8, 7, 3, 4, 5, 9] .toVQ.vidReorg == @[3, 4, 5, 7] .toVQ
trueOrReturn @[8, 7, 6, 3, 4, 5, 9] .toVQ.vidReorg == @[3] .toVQ
trueOrReturn @[5, 4, 3, 7] .toVQ.vidReorg == @[5, 4, 3, 7] .toVQ
trueOrReturn @[5] .toVQ.vidReorg == @[5] .toVQ
trueOrReturn @[3, 5] .toVQ.vidReorg == @[3, 5] .toVQ
trueOrReturn @[4, 5] .toVQ.vidReorg == @[4] .toVQ
check newSeq[VertexID](0).vidReorg().len == 0
trueOrReturn newSeq[VertexID](0).vidReorg().len == 0
true
proc testQidScheduler*(
noisy = true;
layout = QidSlotLyo;
sampleSize = QidSample;
): bool =
##
## Example table for `QidSlotLyo` layout after 10_000 cycles
## ::
## QueueID | QValRef |
## | FilterID | width | comment
## --------+----------+-------+----------------------------------
## %7 | 9997 | 0 | %7 stands for QueueID(7)
## %8 | 9998 | 0 |
## %9 | 9999 | 0 |
## %a | 10000 | 0 |
## | | |
## %1:6 | 9981 | 3 | %1:6 stands for QueueID((1 shl 62) + 6)
## %1:7 | 9985 | 3 |
## %1:8 | 9989 | 3 |
## %1:9 | 9993 | 3 | 9993 + 3 + 1 => 9997, see %7
## | | |
## %2:3 | 9841 | 19 |
## %2:4 | 9861 | 19 |
## %2:5 | 9881 | 19 |
## %2:6 | 9901 | 19 |
## %2:7 | 9921 | 19 |
## %2:8 | 9941 | 19 |
## %2:9 | 9961 | 19 | 9961 + 19 + 1 => 9981, see %1:6
## | | |
## %3:a | 9481 | 119 |
## %3:1 | 9601 | 119 |
## %3:2 | 9721 | 119 | 9721 + 119 + 1 => 9871, see %2:3
##
var
list: Qtab
debug = false # or true
let
scd = QidSchedRef.init layout
ctx = scd.ctx.q
if debug:
noisy.say "***", "testFilterSchedule",
" ctx=", ctx,
" stats=", scd.ctx.stats
for n in 1 .. sampleSize:
let w = scd.addItem()
if debug and false:
noisy.say "***", "testFilterSchedule",
" n=", n,
" => ", w.exec.pp,
" / ", w.fifo.state.pp
var
saved = false
hold: seq[(QueueID,QueueID)]
for act in w.exec:
case act.op:
of Oops:
noisy.say "***", "testFilterSchedule", " n=", n, " act=", act.pp
of SaveQid:
if saved:
noisy.say "***", "testFilterSchedule", " n=", n, " act=", act.pp,
" hold=", hold.pp, " state=", scd.state.pp, " fifo=", list.pp scd
check not saved
return
list[act.qid] = QValRef(fid: FilterID(n))
saved = true
of HoldQid:
hold.add (act.qid, act.xid)
of DequQid:
var merged = QValRef(nil)
for w in hold:
for qid in w[0] .. w[1]:
let val = list.getOrDefault(qid, QValRef(nil))
if val.isNil:
noisy.say "***", "testFilterSchedule", " n=", n, " act=", act.pp,
" hold=", hold.pp, " state=", scd.state.pp, " fifo=", list.pp scd
check not val.isNil
return
if merged.isNil:
merged = val
elif merged.fid + merged.width + 1 == val.fid:
merged.width += val.width + 1
else:
noisy.say "***", "testFilterSchedule", " n=", n, " act=", act.pp,
" hold=", hold.pp, " state=", scd.state.pp, " fifo=", list.pp scd
check merged.fid + merged.width + 1 == val.fid
return
list.del qid
if merged.isNil:
noisy.say "***", "testFilterSchedule", " n=", n, " act=", act.pp,
" hold=", hold.pp, " state=", scd.state.pp, " fifo=", list.pp scd
check not merged.isNil
return
list[act.qid] = merged
hold.setLen(0)
scd[] = w.fifo[]
# Verify that the round-robin queues in `list` are consecutive and in the
# right order.
var
botVal = FilterID(0)
step = 1u
for chn,queue in list.fifos scd:
var lastVal = FilterID(0)
step *= ctx[chn].width + 1 # defined by schedule layout
for kvp in queue:
let (qid,val) = (kvp[0], kvp[1])
# Entries must exist
if val.isNil:
noisy.say "***", "testFilterSchedule", " n=", n, " chn=", chn,
" exec=", w.exec.pp, " kvp=", kvp.pp, " fifo=", list.pp scd
check not val.isNil
return
# Fid value fields must increase witin a sub-queue.
if val.fid <= lastVal:
noisy.say "***", "testFilterSchedule", " n=", n, " chn=", chn,
" exec=", w.exec.pp, " kvp=", kvp.pp, " fifo=", list.pp scd
check lastVal < val.fid
return
# Width value must correspond to `step` size
if val.width + 1 != step:
noisy.say "***", "testFilterSchedule", " n=", n, " chn=", chn,
" exec=", w.exec.pp, " kvp=", kvp.pp, " fifo=", list.pp scd
check val.width + 1 == step
return
# Item distances must match the step width
if lastVal != 0:
let dist = val.fid - lastVal
if dist != step.uint64:
noisy.say "***", "testFilterSchedule", " n=", n, " chn=", chn,
" exec=", w.exec.pp, " kvp=", kvp.pp, " fifo=", list.pp scd
check dist == step.uint64
return
lastVal = val.fid
# The top value of the current queue must be smaller than the
# bottom value of the previous one
if 0 < chn and botVal != queue[^1][1].fid + step.uint64:
noisy.say "***", "testFilterSchedule", " n=", n, " chn=", chn,
" exec=", w.exec.pp, " step=", step, " fifo=", list.pp scd
check botVal == queue[^1][1].fid + step.uint64
return
botVal = queue[0][1].fid
if debug:
noisy.say "***", "testFilterSchedule",
" n=", n,
"\n exec=", w.exec.pp,
"\n state=", scd.state.pp,
"\n list=", list.pp,
"\n fifo=", list.pp scd,
"\n"
true
# ------------------------------------------------------------------------------
# End